summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.ci/scripts/windows/scan_dll.py2
-rw-r--r--.codespellrc6
-rw-r--r--.github/workflows/codespell.yml17
-rw-r--r--CMakeLists.txt4
-rw-r--r--CMakeModules/FindFFmpeg.cmake2
-rw-r--r--src/CMakeLists.txt2
-rw-r--r--src/audio_core/audio_out_manager.h2
-rw-r--r--src/audio_core/device/audio_buffer.h2
-rw-r--r--src/audio_core/renderer/adsp/audio_renderer.cpp4
-rw-r--r--src/audio_core/renderer/behavior/behavior_info.h2
-rw-r--r--src/audio_core/renderer/effect/effect_info_base.h4
-rw-r--r--src/audio_core/renderer/memory/memory_pool_info.h2
-rw-r--r--src/audio_core/renderer/mix/mix_context.h2
-rw-r--r--src/audio_core/renderer/performance/performance_detail.h4
-rw-r--r--src/audio_core/renderer/performance/performance_entry.h4
-rw-r--r--src/audio_core/renderer/performance/performance_frame_header.h4
-rw-r--r--src/audio_core/renderer/splitter/splitter_context.h2
-rw-r--r--src/audio_core/renderer/splitter/splitter_destinations_data.h6
-rw-r--r--src/audio_core/renderer/splitter/splitter_info.h4
-rw-r--r--src/audio_core/renderer/system.h4
-rw-r--r--src/audio_core/renderer/system_manager.h6
-rw-r--r--src/audio_core/renderer/voice/voice_info.h4
-rw-r--r--src/common/announce_multiplayer_room.h2
-rw-r--r--src/common/fiber.cpp2
-rw-r--r--src/common/fixed_point.h2
-rw-r--r--src/common/host_memory.cpp4
-rw-r--r--src/common/input.h8
-rw-r--r--src/common/steady_clock.cpp25
-rw-r--r--src/common/steady_clock.h11
-rw-r--r--src/common/swap.h12
-rw-r--r--src/common/x64/native_clock.cpp38
-rw-r--r--src/common/x64/native_clock.h5
-rw-r--r--src/core/CMakeLists.txt3
-rw-r--r--src/core/core.cpp6
-rw-r--r--src/core/core.h2
-rw-r--r--src/core/core_timing.cpp2
-rw-r--r--src/core/core_timing.h2
-rw-r--r--src/core/cpu_manager.cpp2
-rw-r--r--src/core/crypto/ctr_encryption_layer.h2
-rw-r--r--src/core/crypto/key_manager.h2
-rw-r--r--src/core/crypto/xts_encryption_layer.h2
-rw-r--r--src/core/debugger/gdbstub.cpp14
-rw-r--r--src/core/debugger/gdbstub_arch.cpp4
-rw-r--r--src/core/file_sys/content_archive.h2
-rw-r--r--src/core/file_sys/registered_cache.h2
-rw-r--r--src/core/file_sys/vfs.h16
-rw-r--r--src/core/file_sys/vfs_real.h2
-rw-r--r--src/core/frontend/emu_window.h2
-rw-r--r--src/core/hid/emulated_controller.h6
-rw-r--r--src/core/hid/emulated_devices.h4
-rw-r--r--src/core/hid/input_converter.cpp4
-rw-r--r--src/core/hid/motion_input.h2
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp (renamed from src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp)0
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp33
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h29
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp115
-rw-r--r--src/core/hle/kernel/k_address_arbiter.h45
-rw-r--r--src/core/hle/kernel/k_affinity_mask.h20
-rw-r--r--src/core/hle/kernel/k_auto_object.cpp4
-rw-r--r--src/core/hle/kernel/k_auto_object.h17
-rw-r--r--src/core/hle/kernel/k_capabilities.cpp2
-rw-r--r--src/core/hle/kernel/k_capabilities.h2
-rw-r--r--src/core/hle/kernel/k_client_port.cpp83
-rw-r--r--src/core/hle/kernel/k_client_port.h23
-rw-r--r--src/core/hle/kernel/k_client_session.cpp13
-rw-r--r--src/core/hle/kernel/k_client_session.h13
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp24
-rw-r--r--src/core/hle/kernel/k_code_memory.h4
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp78
-rw-r--r--src/core/hle/kernel/k_condition_variable.h24
-rw-r--r--src/core/hle/kernel/k_debug.h4
-rw-r--r--src/core/hle/kernel/k_device_address_space.cpp4
-rw-r--r--src/core/hle/kernel/k_event.cpp8
-rw-r--r--src/core/hle/kernel/k_event.h2
-rw-r--r--src/core/hle/kernel/k_light_condition_variable.cpp18
-rw-r--r--src/core/hle/kernel/k_light_condition_variable.h6
-rw-r--r--src/core/hle/kernel/k_light_lock.cpp31
-rw-r--r--src/core/hle/kernel/k_light_lock.h8
-rw-r--r--src/core/hle/kernel/k_linked_list.h238
-rw-r--r--src/core/hle/kernel/k_memory_block.h30
-rw-r--r--src/core/hle/kernel/k_memory_layout.cpp19
-rw-r--r--src/core/hle/kernel/k_memory_layout.h34
-rw-r--r--src/core/hle/kernel/k_memory_region.h70
-rw-r--r--src/core/hle/kernel/k_object_name.h2
-rw-r--r--src/core/hle/kernel/k_page_buffer.h2
-rw-r--r--src/core/hle/kernel/k_page_table.h2
-rw-r--r--src/core/hle/kernel/k_page_table_slab_heap.h3
-rw-r--r--src/core/hle/kernel/k_port.cpp47
-rw-r--r--src/core/hle/kernel/k_port.h28
-rw-r--r--src/core/hle/kernel/k_priority_queue.h114
-rw-r--r--src/core/hle/kernel/k_process.cpp346
-rw-r--r--src/core/hle/kernel/k_process.h196
-rw-r--r--src/core/hle/kernel/k_readable_event.cpp20
-rw-r--r--src/core/hle/kernel/k_readable_event.h2
-rw-r--r--src/core/hle/kernel/k_resource_limit.cpp104
-rw-r--r--src/core/hle/kernel/k_resource_limit.h22
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp80
-rw-r--r--src/core/hle/kernel/k_scheduler.h14
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h55
-rw-r--r--src/core/hle/kernel/k_scoped_lock.h12
-rw-r--r--src/core/hle/kernel/k_scoped_resource_reservation.h36
-rw-r--r--src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h29
-rw-r--r--src/core/hle/kernel/k_server_port.cpp35
-rw-r--r--src/core/hle/kernel/k_server_port.h12
-rw-r--r--src/core/hle/kernel/k_server_session.cpp77
-rw-r--r--src/core/hle/kernel/k_server_session.h14
-rw-r--r--src/core/hle/kernel/k_session.cpp56
-rw-r--r--src/core/hle/kernel/k_session.h41
-rw-r--r--src/core/hle/kernel/k_session_request.cpp16
-rw-r--r--src/core/hle/kernel/k_session_request.h20
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp69
-rw-r--r--src/core/hle/kernel/k_shared_memory.h30
-rw-r--r--src/core/hle/kernel/k_shared_memory_info.h17
-rw-r--r--src/core/hle/kernel/k_slab_heap.h3
-rw-r--r--src/core/hle/kernel/k_spin_lock.cpp6
-rw-r--r--src/core/hle/kernel/k_spin_lock.h14
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp37
-rw-r--r--src/core/hle/kernel/k_synchronization_object.h27
-rw-r--r--src/core/hle/kernel/k_system_resource.cpp9
-rw-r--r--src/core/hle/kernel/k_system_resource.h8
-rw-r--r--src/core/hle/kernel/k_thread.cpp679
-rw-r--r--src/core/hle/kernel/k_thread.h553
-rw-r--r--src/core/hle/kernel/k_thread_local_page.cpp2
-rw-r--r--src/core/hle/kernel/k_thread_queue.cpp20
-rw-r--r--src/core/hle/kernel/k_thread_queue.h14
-rw-r--r--src/core/hle/kernel/k_transfer_memory.cpp27
-rw-r--r--src/core/hle/kernel/k_transfer_memory.h24
-rw-r--r--src/core/hle/kernel/k_worker_task.h2
-rw-r--r--src/core/hle/kernel/k_worker_task_manager.cpp2
-rw-r--r--src/core/hle/kernel/k_worker_task_manager.h2
-rw-r--r--src/core/hle/kernel/kernel.cpp22
-rw-r--r--src/core/hle/kernel/kernel.h1
-rw-r--r--src/core/hle/kernel/physical_core.cpp41
-rw-r--r--src/core/hle/kernel/physical_core.h36
-rw-r--r--src/core/hle/kernel/slab_helpers.h22
-rw-r--r--src/core/hle/kernel/svc.cpp180
-rw-r--r--src/core/hle/kernel/svc/svc_address_arbiter.cpp45
-rw-r--r--src/core/hle/kernel/svc/svc_code_memory.cpp10
-rw-r--r--src/core/hle/kernel/svc/svc_condition_variable.cpp15
-rw-r--r--src/core/hle/kernel/svc/svc_event.cpp22
-rw-r--r--src/core/hle/kernel/svc/svc_info.cpp107
-rw-r--r--src/core/hle/kernel/svc/svc_ipc.cpp8
-rw-r--r--src/core/hle/kernel/svc/svc_lock.cpp27
-rw-r--r--src/core/hle/kernel/svc/svc_memory.cpp30
-rw-r--r--src/core/hle/kernel/svc/svc_physical_memory.cpp36
-rw-r--r--src/core/hle/kernel/svc/svc_port.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_process.cpp18
-rw-r--r--src/core/hle/kernel/svc/svc_process_memory.cpp50
-rw-r--r--src/core/hle/kernel/svc/svc_query_memory.cpp8
-rw-r--r--src/core/hle/kernel/svc/svc_resource_limit.cpp14
-rw-r--r--src/core/hle/kernel/svc/svc_secure_monitor_call.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_session.cpp29
-rw-r--r--src/core/hle/kernel/svc/svc_shared_memory.cpp13
-rw-r--r--src/core/hle/kernel/svc/svc_synchronization.cpp12
-rw-r--r--src/core/hle/kernel/svc/svc_thread.cpp74
-rw-r--r--src/core/hle/kernel/svc/svc_transfer_memory.cpp6
-rw-r--r--src/core/hle/kernel/svc_generator.py6
-rw-r--r--src/core/hle/service/acc/acc.cpp2
-rw-r--r--src/core/hle/service/acc/profile_manager.cpp6
-rw-r--r--src/core/hle/service/am/am.cpp2
-rw-r--r--src/core/hle/service/am/applets/applet_cabinet.cpp4
-rw-r--r--src/core/hle/service/glue/arp.cpp4
-rw-r--r--src/core/hle/service/hid/controllers/gesture.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/stubbed.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/touchscreen.cpp8
-rw-r--r--src/core/hle/service/hid/hid.cpp2
-rw-r--r--src/core/hle/service/hid/irsensor/image_transfer_processor.cpp2
-rw-r--r--src/core/hle/service/hle_ipc.cpp2
-rw-r--r--src/core/hle/service/ipc_helpers.h2
-rw-r--r--src/core/hle/service/nfp/amiibo_crypto.cpp16
-rw-r--r--src/core/hle/service/nfp/amiibo_crypto.h2
-rw-r--r--src/core/hle/service/nfp/nfp_device.cpp210
-rw-r--r--src/core/hle/service/nfp/nfp_device.h9
-rw-r--r--src/core/hle/service/nfp/nfp_types.h49
-rw-r--r--src/core/hle/service/ns/iplatform_service_manager.cpp2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdevice.h2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp2
-rw-r--r--src/core/hle/service/pm/pm.cpp20
-rw-r--r--src/core/hle/service/server_manager.cpp2
-rw-r--r--src/core/hle/service/sm/sm.cpp4
-rw-r--r--src/core/hle/service/sm/sm_controller.cpp2
-rw-r--r--src/core/internal_network/network.cpp4
-rw-r--r--src/core/memory/cheat_engine.cpp2
-rw-r--r--src/core/perf_stats.cpp4
-rw-r--r--src/input_common/drivers/gc_adapter.cpp2
-rw-r--r--src/input_common/drivers/joycon.h2
-rw-r--r--src/input_common/drivers/keyboard.cpp2
-rw-r--r--src/input_common/drivers/mouse.cpp4
-rw-r--r--src/input_common/drivers/sdl_driver.cpp21
-rw-r--r--src/input_common/drivers/virtual_amiibo.cpp2
-rw-r--r--src/input_common/helpers/joycon_driver.cpp4
-rw-r--r--src/input_common/helpers/joycon_driver.h4
-rw-r--r--src/input_common/helpers/joycon_protocol/common_protocol.h4
-rw-r--r--src/input_common/helpers/udp_protocol.cpp2
-rw-r--r--src/input_common/main.h2
-rw-r--r--src/network/packet.h2
-rw-r--r--src/network/room.cpp2
-rw-r--r--src/network/room_member.h16
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp80
-rw-r--r--src/shader_recompiler/backend/glsl/glsl_emit_context.h2
-rw-r--r--src/tests/common/ring_buffer.cpp2
-rw-r--r--src/tests/common/scratch_buffer.cpp2
-rw-r--r--src/video_core/control/channel_state_cache.h2
-rw-r--r--src/video_core/engines/draw_manager.cpp1
-rw-r--r--src/video_core/engines/sw_blitter/blitter.cpp2
-rw-r--r--src/video_core/gpu_thread.cpp2
-rw-r--r--src/video_core/host_shaders/astc_decoder.comp2
-rw-r--r--src/video_core/host_shaders/opengl_smaa.glsl2
-rw-r--r--src/video_core/memory_manager.h4
-rw-r--r--src/video_core/query_cache.h2
-rw-r--r--src/video_core/renderer_opengl/blit_image.cpp3
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.cpp4
-rw-r--r--src/video_core/renderer_opengl/gl_graphics_pipeline.cpp5
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp48
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h20
-rw-r--r--src/video_core/renderer_opengl/gl_resource_manager.cpp10
-rw-r--r--src/video_core/renderer_opengl/gl_resource_manager.h3
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.cpp28
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.h6
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.cpp6
-rw-r--r--src/video_core/renderer_vulkan/vk_command_pool.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp250
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_pool.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp22
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.cpp2
-rw-r--r--src/video_core/texture_cache/image_base.h2
-rw-r--r--src/video_core/texture_cache/texture_cache.h86
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h10
-rw-r--r--src/video_core/textures/astc.cpp2
-rw-r--r--src/video_core/vulkan_common/vulkan_device.cpp2
-rw-r--r--src/video_core/vulkan_common/vulkan_device.h8
-rw-r--r--src/video_core/vulkan_common/vulkan_wrapper.h14
-rw-r--r--src/yuzu/applets/qt_web_browser.h2
-rw-r--r--src/yuzu/compatdb.cpp6
-rw-r--r--src/yuzu/configuration/configure_audio.cpp15
-rw-r--r--src/yuzu/configuration/configure_audio.ui45
-rw-r--r--src/yuzu/configuration/configure_general.cpp2
-rw-r--r--src/yuzu/configuration/configure_general.ui7
-rw-r--r--src/yuzu/configuration/configure_hotkeys.h2
-rw-r--r--src/yuzu/configuration/configure_input_player.h2
-rw-r--r--src/yuzu/configuration/configure_input_player_widget.h2
-rw-r--r--src/yuzu/configuration/configure_system.cpp30
-rw-r--r--src/yuzu/configuration/configure_system.h2
-rw-r--r--src/yuzu/configuration/configure_system.ui61
-rw-r--r--src/yuzu/debugger/wait_tree.cpp72
-rw-r--r--src/yuzu/debugger/wait_tree.h36
-rw-r--r--src/yuzu/loading_screen.cpp2
-rw-r--r--src/yuzu/main.cpp4
-rw-r--r--src/yuzu/multiplayer/lobby.cpp2
-rw-r--r--src/yuzu/multiplayer/state.cpp2
-rw-r--r--src/yuzu/multiplayer/state.h4
-rw-r--r--src/yuzu/startup_checks.cpp2
-rw-r--r--src/yuzu/util/overlay_dialog.h2
256 files changed, 3055 insertions, 3387 deletions
diff --git a/.ci/scripts/windows/scan_dll.py b/.ci/scripts/windows/scan_dll.py
index f374e0d78..a536f7375 100644
--- a/.ci/scripts/windows/scan_dll.py
+++ b/.ci/scripts/windows/scan_dll.py
@@ -40,7 +40,7 @@ def parse_imports(file_name):
40 40
41def parse_imports_recursive(file_name, path_list=[]): 41def parse_imports_recursive(file_name, path_list=[]):
42 q = queue.Queue() # create a FIFO queue 42 q = queue.Queue() # create a FIFO queue
43 # file_name can be a string or a list for the convience 43 # file_name can be a string or a list for the convenience
44 if isinstance(file_name, str): 44 if isinstance(file_name, str):
45 q.put(file_name) 45 q.put(file_name)
46 elif isinstance(file_name, list): 46 elif isinstance(file_name, list):
diff --git a/.codespellrc b/.codespellrc
new file mode 100644
index 000000000..786a991eb
--- /dev/null
+++ b/.codespellrc
@@ -0,0 +1,6 @@
1; SPDX-FileCopyrightText: 2023 yuzu Emulator Project
2; SPDX-License-Identifier: GPL-2.0-or-later
3
4[codespell]
5skip = ./.git,./build,./dist,./Doxyfile,./externals,./LICENSES
6ignore-words-list = aci,allright,ba,deques,froms,hda,inout,lod,masia,nam,nax,nd,pullrequests,pullrequest,te,transfered,unstall,uscaled,zink
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
new file mode 100644
index 000000000..d873fb725
--- /dev/null
+++ b/.github/workflows/codespell.yml
@@ -0,0 +1,17 @@
1# SPDX-FileCopyrightText: 2023 yuzu Emulator Project
2# SPDX-License-Identifier: GPL-2.0-or-later
3# GitHub Action to automate the identification of common misspellings in text files.
4# https://github.com/codespell-project/actions-codespell
5# https://github.com/codespell-project/codespell
6name: codespell
7on: pull_request
8permissions: {}
9jobs:
10 codespell:
11 name: Check for spelling errors
12 runs-on: ubuntu-latest
13 steps:
14 - uses: actions/checkout@v3
15 with:
16 persist-credentials: false
17 - uses: codespell-project/actions-codespell@master
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 91ec50bef..6932b6fab 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -344,12 +344,12 @@ if(ENABLE_QT)
344 find_package(PkgConfig REQUIRED) 344 find_package(PkgConfig REQUIRED)
345 pkg_check_modules(QT_DEP_GLU QUIET glu>=9.0.0) 345 pkg_check_modules(QT_DEP_GLU QUIET glu>=9.0.0)
346 if (NOT QT_DEP_GLU_FOUND) 346 if (NOT QT_DEP_GLU_FOUND)
347 message(FATAL_ERROR "Qt bundled pacakge dependency `glu` not found. \ 347 message(FATAL_ERROR "Qt bundled package dependency `glu` not found. \
348 Perhaps `libglu1-mesa-dev` needs to be installed?") 348 Perhaps `libglu1-mesa-dev` needs to be installed?")
349 endif() 349 endif()
350 pkg_check_modules(QT_DEP_MESA QUIET dri>=20.0.8) 350 pkg_check_modules(QT_DEP_MESA QUIET dri>=20.0.8)
351 if (NOT QT_DEP_MESA_FOUND) 351 if (NOT QT_DEP_MESA_FOUND)
352 message(FATAL_ERROR "Qt bundled pacakge dependency `dri` not found. \ 352 message(FATAL_ERROR "Qt bundled package dependency `dri` not found. \
353 Perhaps `mesa-common-dev` needs to be installed?") 353 Perhaps `mesa-common-dev` needs to be installed?")
354 endif() 354 endif()
355 355
diff --git a/CMakeModules/FindFFmpeg.cmake b/CMakeModules/FindFFmpeg.cmake
index eedf28aea..5cb1f3c8a 100644
--- a/CMakeModules/FindFFmpeg.cmake
+++ b/CMakeModules/FindFFmpeg.cmake
@@ -14,7 +14,7 @@
14# FFmpeg_LIBRARIES: aggregate all the paths to the libraries 14# FFmpeg_LIBRARIES: aggregate all the paths to the libraries
15# FFmpeg_FOUND: True if all components have been found 15# FFmpeg_FOUND: True if all components have been found
16# 16#
17# This module defines the following targets, which are prefered over variables: 17# This module defines the following targets, which are preferred over variables:
18# 18#
19# FFmpeg::<component>: Target to use <component> directly, with include path, 19# FFmpeg::<component>: Target to use <component> directly, with include path,
20# library and dependencies set up. If you are using a static build, you are 20# library and dependencies set up. If you are using a static build, you are
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index c7283e82c..0eca8e90e 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -83,7 +83,7 @@ if (MSVC)
83 ) 83 )
84 84
85 if (USE_CCACHE OR YUZU_USE_PRECOMPILED_HEADERS) 85 if (USE_CCACHE OR YUZU_USE_PRECOMPILED_HEADERS)
86 # when caching, we need to use /Z7 to downgrade debug info to use an older but more cachable format 86 # when caching, we need to use /Z7 to downgrade debug info to use an older but more cacheable format
87 # Precompiled headers are deleted if not using /Z7. See https://github.com/nanoant/CMakePCHCompiler/issues/21 87 # Precompiled headers are deleted if not using /Z7. See https://github.com/nanoant/CMakePCHCompiler/issues/21
88 add_compile_options(/Z7) 88 add_compile_options(/Z7)
89 else() 89 else()
diff --git a/src/audio_core/audio_out_manager.h b/src/audio_core/audio_out_manager.h
index 24981e08f..1e05ec5ed 100644
--- a/src/audio_core/audio_out_manager.h
+++ b/src/audio_core/audio_out_manager.h
@@ -58,7 +58,7 @@ public:
58 /** 58 /**
59 * Get a list of audio out device names. 59 * Get a list of audio out device names.
60 * 60 *
61 * @oaram names - Output container to write names to. 61 * @param names - Output container to write names to.
62 * @return Number of names written. 62 * @return Number of names written.
63 */ 63 */
64 u32 GetAudioOutDeviceNames( 64 u32 GetAudioOutDeviceNames(
diff --git a/src/audio_core/device/audio_buffer.h b/src/audio_core/device/audio_buffer.h
index 7128ef72a..4eb80c2ba 100644
--- a/src/audio_core/device/audio_buffer.h
+++ b/src/audio_core/device/audio_buffer.h
@@ -16,7 +16,7 @@ struct AudioBuffer {
16 s64 played_timestamp; 16 s64 played_timestamp;
17 /// Game memory address for these samples. 17 /// Game memory address for these samples.
18 VAddr samples; 18 VAddr samples;
19 /// Unqiue identifier for this buffer. 19 /// Unique identifier for this buffer.
20 u64 tag; 20 u64 tag;
21 /// Size of the samples buffer. 21 /// Size of the samples buffer.
22 u64 size; 22 u64 size;
diff --git a/src/audio_core/renderer/adsp/audio_renderer.cpp b/src/audio_core/renderer/adsp/audio_renderer.cpp
index 78c15629b..42b4b167a 100644
--- a/src/audio_core/renderer/adsp/audio_renderer.cpp
+++ b/src/audio_core/renderer/adsp/audio_renderer.cpp
@@ -135,7 +135,7 @@ void AudioRenderer::ThreadFunc() {
135 static constexpr char name[]{"AudioRenderer"}; 135 static constexpr char name[]{"AudioRenderer"};
136 MicroProfileOnThreadCreate(name); 136 MicroProfileOnThreadCreate(name);
137 Common::SetCurrentThreadName(name); 137 Common::SetCurrentThreadName(name);
138 Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical); 138 Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
139 if (mailbox->ADSPWaitMessage() != RenderMessage::AudioRenderer_InitializeOK) { 139 if (mailbox->ADSPWaitMessage() != RenderMessage::AudioRenderer_InitializeOK) {
140 LOG_ERROR(Service_Audio, 140 LOG_ERROR(Service_Audio,
141 "ADSP Audio Renderer -- Failed to receive initialize message from host!"); 141 "ADSP Audio Renderer -- Failed to receive initialize message from host!");
@@ -165,7 +165,7 @@ void AudioRenderer::ThreadFunc() {
165 // Check this buffer is valid, as it may not be used. 165 // Check this buffer is valid, as it may not be used.
166 if (command_buffer.buffer != 0) { 166 if (command_buffer.buffer != 0) {
167 // If there are no remaining commands (from the previous list), 167 // If there are no remaining commands (from the previous list),
168 // this is a new command list, initalize it. 168 // this is a new command list, initialize it.
169 if (command_buffer.remaining_command_count == 0) { 169 if (command_buffer.remaining_command_count == 0) {
170 command_list_processor.Initialize(system, command_buffer.buffer, 170 command_list_processor.Initialize(system, command_buffer.buffer,
171 command_buffer.size, streams[index]); 171 command_buffer.size, streams[index]);
diff --git a/src/audio_core/renderer/behavior/behavior_info.h b/src/audio_core/renderer/behavior/behavior_info.h
index 15c948344..b52340229 100644
--- a/src/audio_core/renderer/behavior/behavior_info.h
+++ b/src/audio_core/renderer/behavior/behavior_info.h
@@ -155,7 +155,7 @@ public:
155 /** 155 /**
156 * Check if a variadic command buffer is supported. 156 * Check if a variadic command buffer is supported.
157 * As of Rev 5 with the added optional performance metric logging, the command 157 * As of Rev 5 with the added optional performance metric logging, the command
158 * buffer can be a variable size, so take that into account for calcualting its size. 158 * buffer can be a variable size, so take that into account for calculating its size.
159 * 159 *
160 * @return True if supported, otherwise false. 160 * @return True if supported, otherwise false.
161 */ 161 */
diff --git a/src/audio_core/renderer/effect/effect_info_base.h b/src/audio_core/renderer/effect/effect_info_base.h
index 8525fde05..dbdccf278 100644
--- a/src/audio_core/renderer/effect/effect_info_base.h
+++ b/src/audio_core/renderer/effect/effect_info_base.h
@@ -192,7 +192,7 @@ public:
192 /** 192 /**
193 * Get this effect's parameter data. 193 * Get this effect's parameter data.
194 * 194 *
195 * @return Pointer to the parametter, must be cast to the correct type. 195 * @return Pointer to the parameter, must be cast to the correct type.
196 */ 196 */
197 u8* GetParameter() { 197 u8* GetParameter() {
198 return parameter.data(); 198 return parameter.data();
@@ -201,7 +201,7 @@ public:
201 /** 201 /**
202 * Get this effect's parameter data. 202 * Get this effect's parameter data.
203 * 203 *
204 * @return Pointer to the parametter, must be cast to the correct type. 204 * @return Pointer to the parameter, must be cast to the correct type.
205 */ 205 */
206 u8* GetStateBuffer() { 206 u8* GetStateBuffer() {
207 return state.data(); 207 return state.data();
diff --git a/src/audio_core/renderer/memory/memory_pool_info.h b/src/audio_core/renderer/memory/memory_pool_info.h
index 537a466ec..80c571bc1 100644
--- a/src/audio_core/renderer/memory/memory_pool_info.h
+++ b/src/audio_core/renderer/memory/memory_pool_info.h
@@ -29,7 +29,7 @@ public:
29 */ 29 */
30 enum class State { 30 enum class State {
31 Invalid, 31 Invalid,
32 Aquired, 32 Acquired,
33 RequestDetach, 33 RequestDetach,
34 Detached, 34 Detached,
35 RequestAttach, 35 RequestAttach,
diff --git a/src/audio_core/renderer/mix/mix_context.h b/src/audio_core/renderer/mix/mix_context.h
index da3aa2829..bcd9637da 100644
--- a/src/audio_core/renderer/mix/mix_context.h
+++ b/src/audio_core/renderer/mix/mix_context.h
@@ -93,7 +93,7 @@ public:
93 * Splitter sort, traverse the splitter node graph and sort the sorted mixes from results. 93 * Splitter sort, traverse the splitter node graph and sort the sorted mixes from results.
94 * 94 *
95 * @param splitter_context - Splitter context for the sort. 95 * @param splitter_context - Splitter context for the sort.
96 * @return True if the sort was successful, othewise false. 96 * @return True if the sort was successful, otherwise false.
97 */ 97 */
98 bool TSortInfo(const SplitterContext& splitter_context); 98 bool TSortInfo(const SplitterContext& splitter_context);
99 99
diff --git a/src/audio_core/renderer/performance/performance_detail.h b/src/audio_core/renderer/performance/performance_detail.h
index 3a4897e60..f603b9026 100644
--- a/src/audio_core/renderer/performance/performance_detail.h
+++ b/src/audio_core/renderer/performance/performance_detail.h
@@ -33,7 +33,7 @@ struct PerformanceDetailVersion1 {
33 /* 0x0D */ PerformanceEntryType entry_type; 33 /* 0x0D */ PerformanceEntryType entry_type;
34}; 34};
35static_assert(sizeof(PerformanceDetailVersion1) == 0x10, 35static_assert(sizeof(PerformanceDetailVersion1) == 0x10,
36 "PerformanceDetailVersion1 has the worng size!"); 36 "PerformanceDetailVersion1 has the wrong size!");
37 37
38struct PerformanceDetailVersion2 { 38struct PerformanceDetailVersion2 {
39 /* 0x00 */ u32 node_id; 39 /* 0x00 */ u32 node_id;
@@ -45,6 +45,6 @@ struct PerformanceDetailVersion2 {
45 /* 0x14 */ char unk14[0x4]; 45 /* 0x14 */ char unk14[0x4];
46}; 46};
47static_assert(sizeof(PerformanceDetailVersion2) == 0x18, 47static_assert(sizeof(PerformanceDetailVersion2) == 0x18,
48 "PerformanceDetailVersion2 has the worng size!"); 48 "PerformanceDetailVersion2 has the wrong size!");
49 49
50} // namespace AudioCore::AudioRenderer 50} // namespace AudioCore::AudioRenderer
diff --git a/src/audio_core/renderer/performance/performance_entry.h b/src/audio_core/renderer/performance/performance_entry.h
index d1b21406b..d6b1158db 100644
--- a/src/audio_core/renderer/performance/performance_entry.h
+++ b/src/audio_core/renderer/performance/performance_entry.h
@@ -22,7 +22,7 @@ struct PerformanceEntryVersion1 {
22 /* 0x0C */ PerformanceEntryType entry_type; 22 /* 0x0C */ PerformanceEntryType entry_type;
23}; 23};
24static_assert(sizeof(PerformanceEntryVersion1) == 0x10, 24static_assert(sizeof(PerformanceEntryVersion1) == 0x10,
25 "PerformanceEntryVersion1 has the worng size!"); 25 "PerformanceEntryVersion1 has the wrong size!");
26 26
27struct PerformanceEntryVersion2 { 27struct PerformanceEntryVersion2 {
28 /* 0x00 */ u32 node_id; 28 /* 0x00 */ u32 node_id;
@@ -32,6 +32,6 @@ struct PerformanceEntryVersion2 {
32 /* 0x0D */ char unk0D[0xB]; 32 /* 0x0D */ char unk0D[0xB];
33}; 33};
34static_assert(sizeof(PerformanceEntryVersion2) == 0x18, 34static_assert(sizeof(PerformanceEntryVersion2) == 0x18,
35 "PerformanceEntryVersion2 has the worng size!"); 35 "PerformanceEntryVersion2 has the wrong size!");
36 36
37} // namespace AudioCore::AudioRenderer 37} // namespace AudioCore::AudioRenderer
diff --git a/src/audio_core/renderer/performance/performance_frame_header.h b/src/audio_core/renderer/performance/performance_frame_header.h
index 707cc0afb..b1848284e 100644
--- a/src/audio_core/renderer/performance/performance_frame_header.h
+++ b/src/audio_core/renderer/performance/performance_frame_header.h
@@ -16,7 +16,7 @@ struct PerformanceFrameHeaderVersion1 {
16 /* 0x14 */ u32 frame_index; 16 /* 0x14 */ u32 frame_index;
17}; 17};
18static_assert(sizeof(PerformanceFrameHeaderVersion1) == 0x18, 18static_assert(sizeof(PerformanceFrameHeaderVersion1) == 0x18,
19 "PerformanceFrameHeaderVersion1 has the worng size!"); 19 "PerformanceFrameHeaderVersion1 has the wrong size!");
20 20
21struct PerformanceFrameHeaderVersion2 { 21struct PerformanceFrameHeaderVersion2 {
22 /* 0x00 */ u32 magic; // "PERF" 22 /* 0x00 */ u32 magic; // "PERF"
@@ -31,6 +31,6 @@ struct PerformanceFrameHeaderVersion2 {
31 /* 0x25 */ char unk25[0xB]; 31 /* 0x25 */ char unk25[0xB];
32}; 32};
33static_assert(sizeof(PerformanceFrameHeaderVersion2) == 0x30, 33static_assert(sizeof(PerformanceFrameHeaderVersion2) == 0x30,
34 "PerformanceFrameHeaderVersion2 has the worng size!"); 34 "PerformanceFrameHeaderVersion2 has the wrong size!");
35 35
36} // namespace AudioCore::AudioRenderer 36} // namespace AudioCore::AudioRenderer
diff --git a/src/audio_core/renderer/splitter/splitter_context.h b/src/audio_core/renderer/splitter/splitter_context.h
index cfd092b4f..1a63db1d3 100644
--- a/src/audio_core/renderer/splitter/splitter_context.h
+++ b/src/audio_core/renderer/splitter/splitter_context.h
@@ -55,7 +55,7 @@ public:
55 /** 55 /**
56 * Get the total number of splitter destinations. 56 * Get the total number of splitter destinations.
57 * 57 *
58 * @return Number of destiantions. 58 * @return Number of destinations.
59 */ 59 */
60 u32 GetDataCount() const; 60 u32 GetDataCount() const;
61 61
diff --git a/src/audio_core/renderer/splitter/splitter_destinations_data.h b/src/audio_core/renderer/splitter/splitter_destinations_data.h
index bd3d55748..d55ce0ad3 100644
--- a/src/audio_core/renderer/splitter/splitter_destinations_data.h
+++ b/src/audio_core/renderer/splitter/splitter_destinations_data.h
@@ -87,7 +87,7 @@ public:
87 /** 87 /**
88 * Update this destination. 88 * Update this destination.
89 * 89 *
90 * @param params - Inpout parameters to update the destination. 90 * @param params - Input parameters to update the destination.
91 */ 91 */
92 void Update(const InParameter& params); 92 void Update(const InParameter& params);
93 93
@@ -126,9 +126,9 @@ private:
126 std::array<f32, MaxMixBuffers> prev_mix_volumes{0.0f}; 126 std::array<f32, MaxMixBuffers> prev_mix_volumes{0.0f};
127 /// Next destination in the mix chain 127 /// Next destination in the mix chain
128 SplitterDestinationData* next{}; 128 SplitterDestinationData* next{};
129 /// Is this destiantion in use? 129 /// Is this destination in use?
130 bool in_use{}; 130 bool in_use{};
131 /// Does this destiantion need its volumes updated? 131 /// Does this destination need its volumes updated?
132 bool need_update{}; 132 bool need_update{};
133}; 133};
134 134
diff --git a/src/audio_core/renderer/splitter/splitter_info.h b/src/audio_core/renderer/splitter/splitter_info.h
index d1d75064c..b0ad01fe0 100644
--- a/src/audio_core/renderer/splitter/splitter_info.h
+++ b/src/audio_core/renderer/splitter/splitter_info.h
@@ -49,14 +49,14 @@ public:
49 /** 49 /**
50 * Get the number of destinations in this splitter. 50 * Get the number of destinations in this splitter.
51 * 51 *
52 * @return The number of destiantions. 52 * @return The number of destinations.
53 */ 53 */
54 u32 GetDestinationCount() const; 54 u32 GetDestinationCount() const;
55 55
56 /** 56 /**
57 * Set the number of destinations in this splitter. 57 * Set the number of destinations in this splitter.
58 * 58 *
59 * @param count - The new number of destiantions. 59 * @param count - The new number of destinations.
60 */ 60 */
61 void SetDestinationCount(u32 count); 61 void SetDestinationCount(u32 count);
62 62
diff --git a/src/audio_core/renderer/system.h b/src/audio_core/renderer/system.h
index 429196e41..e328783b6 100644
--- a/src/audio_core/renderer/system.h
+++ b/src/audio_core/renderer/system.h
@@ -154,7 +154,7 @@ public:
154 ExecutionMode GetExecutionMode() const; 154 ExecutionMode GetExecutionMode() const;
155 155
156 /** 156 /**
157 * Get the rendering deivce for this system. 157 * Get the rendering device for this system.
158 * This is unused. 158 * This is unused.
159 * 159 *
160 * @return Rendering device for this system. 160 * @return Rendering device for this system.
@@ -241,7 +241,7 @@ private:
241 std::span<u8> command_workbuffer{}; 241 std::span<u8> command_workbuffer{};
242 /// Size of command workbuffer 242 /// Size of command workbuffer
243 u64 command_workbuffer_size{}; 243 u64 command_workbuffer_size{};
244 /// Numebr of commands in the workbuffer 244 /// Number of commands in the workbuffer
245 u64 command_buffer_size{}; 245 u64 command_buffer_size{};
246 /// Manager for upsamplers 246 /// Manager for upsamplers
247 UpsamplerManager* upsampler_manager{}; 247 UpsamplerManager* upsampler_manager{};
diff --git a/src/audio_core/renderer/system_manager.h b/src/audio_core/renderer/system_manager.h
index 81457a3a1..415ddb74f 100644
--- a/src/audio_core/renderer/system_manager.h
+++ b/src/audio_core/renderer/system_manager.h
@@ -36,7 +36,7 @@ public:
36 /** 36 /**
37 * Initialize the system manager, called when any system is registered. 37 * Initialize the system manager, called when any system is registered.
38 * 38 *
39 * @return True if sucessfully initialized, otherwise false. 39 * @return True if successfully initialized, otherwise false.
40 */ 40 */
41 bool InitializeUnsafe(); 41 bool InitializeUnsafe();
42 42
@@ -50,7 +50,7 @@ public:
50 * The manager does not own the system, so do not free it without calling Remove. 50 * The manager does not own the system, so do not free it without calling Remove.
51 * 51 *
52 * @param system - The system to add. 52 * @param system - The system to add.
53 * @return True if succesfully added, otherwise false. 53 * @return True if successfully added, otherwise false.
54 */ 54 */
55 bool Add(System& system); 55 bool Add(System& system);
56 56
@@ -58,7 +58,7 @@ public:
58 * Remove an audio render system from the manager. 58 * Remove an audio render system from the manager.
59 * 59 *
60 * @param system - The system to remove. 60 * @param system - The system to remove.
61 * @return True if succesfully removed, otherwise false. 61 * @return True if successfully removed, otherwise false.
62 */ 62 */
63 bool Remove(System& system); 63 bool Remove(System& system);
64 64
diff --git a/src/audio_core/renderer/voice/voice_info.h b/src/audio_core/renderer/voice/voice_info.h
index 930180895..3c5d3e04f 100644
--- a/src/audio_core/renderer/voice/voice_info.h
+++ b/src/audio_core/renderer/voice/voice_info.h
@@ -183,7 +183,7 @@ public:
183 void Initialize(); 183 void Initialize();
184 184
185 /** 185 /**
186 * Does this voice ned an update? 186 * Does this voice need an update?
187 * 187 *
188 * @param params - Input parameters to check matching. 188 * @param params - Input parameters to check matching.
189 * 189 *
@@ -236,7 +236,7 @@ public:
236 * 236 *
237 * @param error_info - Output array of errors. 237 * @param error_info - Output array of errors.
238 * @param wave_buffer - The wavebuffer to be updated. 238 * @param wave_buffer - The wavebuffer to be updated.
239 * @param wave_buffer_internal - Input parametters to be used for the update. 239 * @param wave_buffer_internal - Input parameters to be used for the update.
240 * @param sample_format - Sample format of the wavebuffer. 240 * @param sample_format - Sample format of the wavebuffer.
241 * @param valid - Is this wavebuffer valid? 241 * @param valid - Is this wavebuffer valid?
242 * @param pool_mapper - Used to map the wavebuffers. 242 * @param pool_mapper - Used to map the wavebuffers.
diff --git a/src/common/announce_multiplayer_room.h b/src/common/announce_multiplayer_room.h
index 4a3100fa4..f32060196 100644
--- a/src/common/announce_multiplayer_room.h
+++ b/src/common/announce_multiplayer_room.h
@@ -66,7 +66,7 @@ public:
66 * @param description The room description 66 * @param description The room description
67 * @param port The port of the room 67 * @param port The port of the room
68 * @param net_version The version of the libNetwork that gets used 68 * @param net_version The version of the libNetwork that gets used
69 * @param has_password True if the room is passowrd protected 69 * @param has_password True if the room is password protected
70 * @param preferred_game The preferred game of the room 70 * @param preferred_game The preferred game of the room
71 * @param preferred_game_id The title id of the preferred game 71 * @param preferred_game_id The title id of the preferred game
72 */ 72 */
diff --git a/src/common/fiber.cpp b/src/common/fiber.cpp
index bc92b360b..c991b7cf1 100644
--- a/src/common/fiber.cpp
+++ b/src/common/fiber.cpp
@@ -90,7 +90,7 @@ Fiber::~Fiber() {
90} 90}
91 91
92void Fiber::Exit() { 92void Fiber::Exit() {
93 ASSERT_MSG(impl->is_thread_fiber, "Exitting non main thread fiber"); 93 ASSERT_MSG(impl->is_thread_fiber, "Exiting non main thread fiber");
94 if (!impl->is_thread_fiber) { 94 if (!impl->is_thread_fiber) {
95 return; 95 return;
96 } 96 }
diff --git a/src/common/fixed_point.h b/src/common/fixed_point.h
index f899b0d54..b0f3ae2cc 100644
--- a/src/common/fixed_point.h
+++ b/src/common/fixed_point.h
@@ -22,7 +22,7 @@ class FixedPoint;
22namespace detail { 22namespace detail {
23 23
24// helper templates to make magic with types :) 24// helper templates to make magic with types :)
25// these allow us to determine resonable types from 25// these allow us to determine reasonable types from
26// a desired size, they also let us infer the next largest type 26// a desired size, they also let us infer the next largest type
27// from a type which is nice for the division op 27// from a type which is nice for the division op
28template <size_t T> 28template <size_t T>
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp
index 611c7d1a3..8e4f1f97a 100644
--- a/src/common/host_memory.cpp
+++ b/src/common/host_memory.cpp
@@ -322,7 +322,7 @@ private:
322 } 322 }
323 323
324 /// Return true when a given memory region is a "nieche" and the placeholders don't have to be 324 /// Return true when a given memory region is a "nieche" and the placeholders don't have to be
325 /// splitted. 325 /// split.
326 bool IsNiechePlaceholder(size_t virtual_offset, size_t length) const { 326 bool IsNiechePlaceholder(size_t virtual_offset, size_t length) const {
327 const auto it = placeholders.upper_bound({virtual_offset, virtual_offset + length}); 327 const auto it = placeholders.upper_bound({virtual_offset, virtual_offset + length});
328 if (it != placeholders.end() && it->lower() == virtual_offset + length) { 328 if (it != placeholders.end() && it->lower() == virtual_offset + length) {
@@ -484,7 +484,7 @@ class HostMemory::Impl {
484public: 484public:
485 explicit Impl(size_t /*backing_size */, size_t /* virtual_size */) { 485 explicit Impl(size_t /*backing_size */, size_t /* virtual_size */) {
486 // This is just a place holder. 486 // This is just a place holder.
487 // Please implement fastmem in a propper way on your platform. 487 // Please implement fastmem in a proper way on your platform.
488 throw std::bad_alloc{}; 488 throw std::bad_alloc{};
489 } 489 }
490 490
diff --git a/src/common/input.h b/src/common/input.h
index 98e934685..51b277c1f 100644
--- a/src/common/input.h
+++ b/src/common/input.h
@@ -15,7 +15,7 @@
15 15
16namespace Common::Input { 16namespace Common::Input {
17 17
18// Type of data that is expected to recieve or send 18// Type of data that is expected to receive or send
19enum class InputType { 19enum class InputType {
20 None, 20 None,
21 Battery, 21 Battery,
@@ -103,7 +103,7 @@ enum class VibrationAmplificationType {
103struct AnalogProperties { 103struct AnalogProperties {
104 // Anything below this value will be detected as zero 104 // Anything below this value will be detected as zero
105 float deadzone{}; 105 float deadzone{};
106 // Anyting above this values will be detected as one 106 // Anything above this values will be detected as one
107 float range{1.0f}; 107 float range{1.0f};
108 // Minimum value to be detected as active 108 // Minimum value to be detected as active
109 float threshold{0.5f}; 109 float threshold{0.5f};
@@ -209,7 +209,7 @@ struct LedStatus {
209 bool led_4{}; 209 bool led_4{};
210}; 210};
211 211
212// Raw data fom camera 212// Raw data from camera
213struct CameraStatus { 213struct CameraStatus {
214 CameraFormat format{CameraFormat::None}; 214 CameraFormat format{CameraFormat::None};
215 std::vector<u8> data{}; 215 std::vector<u8> data{};
@@ -428,7 +428,7 @@ inline void UnregisterOutputFactory(const std::string& name) {
428} 428}
429 429
430/** 430/**
431 * Create an input device from given paramters. 431 * Create an input device from given parameters.
432 * @tparam InputDeviceType the type of input devices to create 432 * @tparam InputDeviceType the type of input devices to create
433 * @param params a serialized ParamPackage string that contains all parameters for creating the 433 * @param params a serialized ParamPackage string that contains all parameters for creating the
434 * device 434 * device
diff --git a/src/common/steady_clock.cpp b/src/common/steady_clock.cpp
index 0d5908aa7..782859196 100644
--- a/src/common/steady_clock.cpp
+++ b/src/common/steady_clock.cpp
@@ -23,6 +23,19 @@ static s64 WindowsQueryPerformanceCounter() {
23 QueryPerformanceCounter(&counter); 23 QueryPerformanceCounter(&counter);
24 return counter.QuadPart; 24 return counter.QuadPart;
25} 25}
26
27static s64 GetSystemTimeNS() {
28 // GetSystemTimePreciseAsFileTime returns the file time in 100ns units.
29 static constexpr s64 Multiplier = 100;
30 // Convert Windows epoch to Unix epoch.
31 static constexpr s64 WindowsEpochToUnixEpochNS = 0x19DB1DED53E8000LL;
32
33 FILETIME filetime;
34 GetSystemTimePreciseAsFileTime(&filetime);
35 return Multiplier * ((static_cast<s64>(filetime.dwHighDateTime) << 32) +
36 static_cast<s64>(filetime.dwLowDateTime)) -
37 WindowsEpochToUnixEpochNS;
38}
26#endif 39#endif
27 40
28SteadyClock::time_point SteadyClock::Now() noexcept { 41SteadyClock::time_point SteadyClock::Now() noexcept {
@@ -53,4 +66,16 @@ SteadyClock::time_point SteadyClock::Now() noexcept {
53#endif 66#endif
54} 67}
55 68
69RealTimeClock::time_point RealTimeClock::Now() noexcept {
70#if defined(_WIN32)
71 return time_point{duration{GetSystemTimeNS()}};
72#elif defined(__APPLE__)
73 return time_point{duration{clock_gettime_nsec_np(CLOCK_REALTIME)}};
74#else
75 timespec ts;
76 clock_gettime(CLOCK_REALTIME, &ts);
77 return time_point{std::chrono::seconds{ts.tv_sec} + std::chrono::nanoseconds{ts.tv_nsec}};
78#endif
79}
80
56}; // namespace Common 81}; // namespace Common
diff --git a/src/common/steady_clock.h b/src/common/steady_clock.h
index 9497cf865..dbd0e2513 100644
--- a/src/common/steady_clock.h
+++ b/src/common/steady_clock.h
@@ -20,4 +20,15 @@ struct SteadyClock {
20 [[nodiscard]] static time_point Now() noexcept; 20 [[nodiscard]] static time_point Now() noexcept;
21}; 21};
22 22
23struct RealTimeClock {
24 using rep = s64;
25 using period = std::nano;
26 using duration = std::chrono::nanoseconds;
27 using time_point = std::chrono::time_point<RealTimeClock>;
28
29 static constexpr bool is_steady = false;
30
31 [[nodiscard]] static time_point Now() noexcept;
32};
33
23} // namespace Common 34} // namespace Common
diff --git a/src/common/swap.h b/src/common/swap.h
index 037b82781..085baaf9a 100644
--- a/src/common/swap.h
+++ b/src/common/swap.h
@@ -229,7 +229,7 @@ public:
229 value = swap(swap() - 1); 229 value = swap(swap() - 1);
230 return old; 230 return old;
231 } 231 }
232 // Comparaison 232 // Comparison
233 // v == i 233 // v == i
234 bool operator==(const swapped_t& i) const { 234 bool operator==(const swapped_t& i) const {
235 return swap() == i.swap(); 235 return swap() == i.swap();
@@ -368,7 +368,7 @@ public:
368 // Member 368 // Member
369 /** todo **/ 369 /** todo **/
370 370
371 // Arithmetics 371 // Arithmetic
372 template <typename S, typename T2, typename F2> 372 template <typename S, typename T2, typename F2>
373 friend S operator+(const S& p, const swapped_t v); 373 friend S operator+(const S& p, const swapped_t v);
374 374
@@ -384,7 +384,7 @@ public:
384 template <typename S, typename T2, typename F2> 384 template <typename S, typename T2, typename F2>
385 friend S operator%(const S& p, const swapped_t v); 385 friend S operator%(const S& p, const swapped_t v);
386 386
387 // Arithmetics + assignments 387 // Arithmetic + assignments
388 template <typename S, typename T2, typename F2> 388 template <typename S, typename T2, typename F2>
389 friend S operator+=(const S& p, const swapped_t v); 389 friend S operator+=(const S& p, const swapped_t v);
390 390
@@ -415,7 +415,7 @@ public:
415 friend bool operator==(const S& p, const swapped_t v); 415 friend bool operator==(const S& p, const swapped_t v);
416}; 416};
417 417
418// Arithmetics 418// Arithmetic
419template <typename S, typename T, typename F> 419template <typename S, typename T, typename F>
420S operator+(const S& i, const swap_struct_t<T, F> v) { 420S operator+(const S& i, const swap_struct_t<T, F> v) {
421 return i + v.swap(); 421 return i + v.swap();
@@ -441,7 +441,7 @@ S operator%(const S& i, const swap_struct_t<T, F> v) {
441 return i % v.swap(); 441 return i % v.swap();
442} 442}
443 443
444// Arithmetics + assignments 444// Arithmetic + assignments
445template <typename S, typename T, typename F> 445template <typename S, typename T, typename F>
446S& operator+=(S& i, const swap_struct_t<T, F> v) { 446S& operator+=(S& i, const swap_struct_t<T, F> v) {
447 i += v.swap(); 447 i += v.swap();
@@ -465,7 +465,7 @@ S operator&(const swap_struct_t<T, F> v, const S& i) {
465 return static_cast<S>(v.swap() & i); 465 return static_cast<S>(v.swap() & i);
466} 466}
467 467
468// Comparaison 468// Comparison
469template <typename S, typename T, typename F> 469template <typename S, typename T, typename F>
470bool operator<(const S& p, const swap_struct_t<T, F> v) { 470bool operator<(const S& p, const swap_struct_t<T, F> v) {
471 return p < v.swap(); 471 return p < v.swap();
diff --git a/src/common/x64/native_clock.cpp b/src/common/x64/native_clock.cpp
index bc1a973b0..76c66e7ee 100644
--- a/src/common/x64/native_clock.cpp
+++ b/src/common/x64/native_clock.cpp
@@ -53,11 +53,11 @@ u64 EstimateRDTSCFrequency() {
53 FencedRDTSC(); 53 FencedRDTSC();
54 54
55 // Get the current time. 55 // Get the current time.
56 const auto start_time = Common::SteadyClock::Now(); 56 const auto start_time = Common::RealTimeClock::Now();
57 const u64 tsc_start = FencedRDTSC(); 57 const u64 tsc_start = FencedRDTSC();
58 // Wait for 250 milliseconds. 58 // Wait for 250 milliseconds.
59 std::this_thread::sleep_for(std::chrono::milliseconds{250}); 59 std::this_thread::sleep_for(std::chrono::milliseconds{250});
60 const auto end_time = Common::SteadyClock::Now(); 60 const auto end_time = Common::RealTimeClock::Now();
61 const u64 tsc_end = FencedRDTSC(); 61 const u64 tsc_end = FencedRDTSC();
62 // Calculate differences. 62 // Calculate differences.
63 const u64 timer_diff = static_cast<u64>( 63 const u64 timer_diff = static_cast<u64>(
@@ -72,13 +72,29 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequen
72 u64 rtsc_frequency_) 72 u64 rtsc_frequency_)
73 : WallClock(emulated_cpu_frequency_, emulated_clock_frequency_, true), rtsc_frequency{ 73 : WallClock(emulated_cpu_frequency_, emulated_clock_frequency_, true), rtsc_frequency{
74 rtsc_frequency_} { 74 rtsc_frequency_} {
75 // Thread to re-adjust the RDTSC frequency after 10 seconds has elapsed.
76 time_sync_thread = std::jthread{[this](std::stop_token token) {
77 // Get the current time.
78 const auto start_time = Common::RealTimeClock::Now();
79 const u64 tsc_start = FencedRDTSC();
80 // Wait for 10 seconds.
81 if (!Common::StoppableTimedWait(token, std::chrono::seconds{10})) {
82 return;
83 }
84 const auto end_time = Common::RealTimeClock::Now();
85 const u64 tsc_end = FencedRDTSC();
86 // Calculate differences.
87 const u64 timer_diff = static_cast<u64>(
88 std::chrono::duration_cast<std::chrono::nanoseconds>(end_time - start_time).count());
89 const u64 tsc_diff = tsc_end - tsc_start;
90 const u64 tsc_freq = MultiplyAndDivide64(tsc_diff, 1000000000ULL, timer_diff);
91 rtsc_frequency = tsc_freq;
92 CalculateAndSetFactors();
93 }};
94
75 time_point.inner.last_measure = FencedRDTSC(); 95 time_point.inner.last_measure = FencedRDTSC();
76 time_point.inner.accumulated_ticks = 0U; 96 time_point.inner.accumulated_ticks = 0U;
77 ns_rtsc_factor = GetFixedPoint64Factor(NS_RATIO, rtsc_frequency); 97 CalculateAndSetFactors();
78 us_rtsc_factor = GetFixedPoint64Factor(US_RATIO, rtsc_frequency);
79 ms_rtsc_factor = GetFixedPoint64Factor(MS_RATIO, rtsc_frequency);
80 clock_rtsc_factor = GetFixedPoint64Factor(emulated_clock_frequency, rtsc_frequency);
81 cpu_rtsc_factor = GetFixedPoint64Factor(emulated_cpu_frequency, rtsc_frequency);
82} 98}
83 99
84u64 NativeClock::GetRTSC() { 100u64 NativeClock::GetRTSC() {
@@ -138,6 +154,14 @@ u64 NativeClock::GetCPUCycles() {
138 return MultiplyHigh(rtsc_value, cpu_rtsc_factor); 154 return MultiplyHigh(rtsc_value, cpu_rtsc_factor);
139} 155}
140 156
157void NativeClock::CalculateAndSetFactors() {
158 ns_rtsc_factor = GetFixedPoint64Factor(NS_RATIO, rtsc_frequency);
159 us_rtsc_factor = GetFixedPoint64Factor(US_RATIO, rtsc_frequency);
160 ms_rtsc_factor = GetFixedPoint64Factor(MS_RATIO, rtsc_frequency);
161 clock_rtsc_factor = GetFixedPoint64Factor(emulated_clock_frequency, rtsc_frequency);
162 cpu_rtsc_factor = GetFixedPoint64Factor(emulated_cpu_frequency, rtsc_frequency);
163}
164
141} // namespace X64 165} // namespace X64
142 166
143} // namespace Common 167} // namespace Common
diff --git a/src/common/x64/native_clock.h b/src/common/x64/native_clock.h
index 38ae7a462..03ca291d8 100644
--- a/src/common/x64/native_clock.h
+++ b/src/common/x64/native_clock.h
@@ -3,6 +3,7 @@
3 3
4#pragma once 4#pragma once
5 5
6#include "common/polyfill_thread.h"
6#include "common/wall_clock.h" 7#include "common/wall_clock.h"
7 8
8namespace Common { 9namespace Common {
@@ -28,6 +29,8 @@ public:
28private: 29private:
29 u64 GetRTSC(); 30 u64 GetRTSC();
30 31
32 void CalculateAndSetFactors();
33
31 union alignas(16) TimePoint { 34 union alignas(16) TimePoint {
32 TimePoint() : pack{} {} 35 TimePoint() : pack{} {}
33 u128 pack{}; 36 u128 pack{};
@@ -47,6 +50,8 @@ private:
47 u64 ms_rtsc_factor{}; 50 u64 ms_rtsc_factor{};
48 51
49 u64 rtsc_frequency; 52 u64 rtsc_frequency;
53
54 std::jthread time_sync_thread;
50}; 55};
51} // namespace X64 56} // namespace X64
52 57
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 75e0c4f38..378e6c023 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -158,6 +158,7 @@ add_library(core STATIC
158 hid/motion_input.h 158 hid/motion_input.h
159 hle/api_version.h 159 hle/api_version.h
160 hle/ipc.h 160 hle/ipc.h
161 hle/kernel/board/nintendo/nx/k_memory_layout.cpp
161 hle/kernel/board/nintendo/nx/k_memory_layout.h 162 hle/kernel/board/nintendo/nx/k_memory_layout.h
162 hle/kernel/board/nintendo/nx/k_system_control.cpp 163 hle/kernel/board/nintendo/nx/k_system_control.cpp
163 hle/kernel/board/nintendo/nx/k_system_control.h 164 hle/kernel/board/nintendo/nx/k_system_control.h
@@ -211,12 +212,10 @@ add_library(core STATIC
211 hle/kernel/k_light_condition_variable.h 212 hle/kernel/k_light_condition_variable.h
212 hle/kernel/k_light_lock.cpp 213 hle/kernel/k_light_lock.cpp
213 hle/kernel/k_light_lock.h 214 hle/kernel/k_light_lock.h
214 hle/kernel/k_linked_list.h
215 hle/kernel/k_memory_block.h 215 hle/kernel/k_memory_block.h
216 hle/kernel/k_memory_block_manager.cpp 216 hle/kernel/k_memory_block_manager.cpp
217 hle/kernel/k_memory_block_manager.h 217 hle/kernel/k_memory_block_manager.h
218 hle/kernel/k_memory_layout.cpp 218 hle/kernel/k_memory_layout.cpp
219 hle/kernel/k_memory_layout.board.nintendo_nx.cpp
220 hle/kernel/k_memory_layout.h 219 hle/kernel/k_memory_layout.h
221 hle/kernel/k_memory_manager.cpp 220 hle/kernel/k_memory_manager.cpp
222 hle/kernel/k_memory_manager.h 221 hle/kernel/k_memory_manager.h
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 4a1372d15..d2b597068 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -358,7 +358,7 @@ struct System::Impl {
358 void ShutdownMainProcess() { 358 void ShutdownMainProcess() {
359 SetShuttingDown(true); 359 SetShuttingDown(true);
360 360
361 // Log last frame performance stats if game was loded 361 // Log last frame performance stats if game was loaded
362 if (perf_stats) { 362 if (perf_stats) {
363 const auto perf_results = GetAndResetPerfStats(); 363 const auto perf_results = GetAndResetPerfStats();
364 constexpr auto performance = Common::Telemetry::FieldType::Performance; 364 constexpr auto performance = Common::Telemetry::FieldType::Performance;
@@ -434,7 +434,7 @@ struct System::Impl {
434 } 434 }
435 435
436 Service::Glue::ApplicationLaunchProperty launch{}; 436 Service::Glue::ApplicationLaunchProperty launch{};
437 launch.title_id = process.GetProgramID(); 437 launch.title_id = process.GetProgramId();
438 438
439 FileSys::PatchManager pm{launch.title_id, fs_controller, *content_provider}; 439 FileSys::PatchManager pm{launch.title_id, fs_controller, *content_provider};
440 launch.version = pm.GetGameVersion().value_or(0); 440 launch.version = pm.GetGameVersion().value_or(0);
@@ -762,7 +762,7 @@ const Core::SpeedLimiter& System::SpeedLimiter() const {
762} 762}
763 763
764u64 System::GetApplicationProcessProgramID() const { 764u64 System::GetApplicationProcessProgramID() const {
765 return impl->kernel.ApplicationProcess()->GetProgramID(); 765 return impl->kernel.ApplicationProcess()->GetProgramId();
766} 766}
767 767
768Loader::ResultStatus System::GetGameName(std::string& out) const { 768Loader::ResultStatus System::GetGameName(std::string& out) const {
diff --git a/src/core/core.h b/src/core/core.h
index 91e78672e..5843696d4 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -146,7 +146,7 @@ public:
146 146
147 /** 147 /**
148 * Initializes the system 148 * Initializes the system
149 * This function will initialize core functionaility used for system emulation 149 * This function will initialize core functionality used for system emulation
150 */ 150 */
151 void Initialize(); 151 void Initialize();
152 152
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index 742cfb996..cd4df4522 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -53,7 +53,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
53 static constexpr char name[] = "HostTiming"; 53 static constexpr char name[] = "HostTiming";
54 MicroProfileOnThreadCreate(name); 54 MicroProfileOnThreadCreate(name);
55 Common::SetCurrentThreadName(name); 55 Common::SetCurrentThreadName(name);
56 Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical); 56 Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
57 instance.on_thread_init(); 57 instance.on_thread_init();
58 instance.ThreadLoop(); 58 instance.ThreadLoop();
59 MicroProfileOnThreadExit(); 59 MicroProfileOnThreadExit();
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index 4b89c0c39..e7c4a949f 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -150,7 +150,7 @@ private:
150 // The queue is a min-heap using std::make_heap/push_heap/pop_heap. 150 // The queue is a min-heap using std::make_heap/push_heap/pop_heap.
151 // We don't use std::priority_queue because we need to be able to serialize, unserialize and 151 // We don't use std::priority_queue because we need to be able to serialize, unserialize and
152 // erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't 152 // erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't
153 // accomodated by the standard adaptor class. 153 // accommodated by the standard adaptor class.
154 std::vector<Event> event_queue; 154 std::vector<Event> event_queue;
155 u64 event_fifo_id = 0; 155 u64 event_fifo_id = 0;
156 156
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 04a11f444..980bb97f9 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -192,7 +192,7 @@ void CpuManager::RunThread(std::stop_token token, std::size_t core) {
192 } 192 }
193 MicroProfileOnThreadCreate(name.c_str()); 193 MicroProfileOnThreadCreate(name.c_str());
194 Common::SetCurrentThreadName(name.c_str()); 194 Common::SetCurrentThreadName(name.c_str());
195 Common::SetCurrentThreadPriority(Common::ThreadPriority::High); 195 Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
196 auto& data = core_data[core]; 196 auto& data = core_data[core];
197 data.host_context = Common::Fiber::ThreadToFiber(); 197 data.host_context = Common::Fiber::ThreadToFiber();
198 198
diff --git a/src/core/crypto/ctr_encryption_layer.h b/src/core/crypto/ctr_encryption_layer.h
index 77f08d776..d85ad8f78 100644
--- a/src/core/crypto/ctr_encryption_layer.h
+++ b/src/core/crypto/ctr_encryption_layer.h
@@ -11,7 +11,7 @@
11 11
12namespace Core::Crypto { 12namespace Core::Crypto {
13 13
14// Sits on top of a VirtualFile and provides CTR-mode AES decription. 14// Sits on top of a VirtualFile and provides CTR-mode AES description.
15class CTREncryptionLayer : public EncryptionLayer { 15class CTREncryptionLayer : public EncryptionLayer {
16public: 16public:
17 using IVData = std::array<u8, 16>; 17 using IVData = std::array<u8, 16>;
diff --git a/src/core/crypto/key_manager.h b/src/core/crypto/key_manager.h
index dbf9ebfe4..673cec463 100644
--- a/src/core/crypto/key_manager.h
+++ b/src/core/crypto/key_manager.h
@@ -249,7 +249,7 @@ public:
249 249
250 static bool KeyFileExists(bool title); 250 static bool KeyFileExists(bool title);
251 251
252 // Call before using the sd seed to attempt to derive it if it dosen't exist. Needs system 252 // Call before using the sd seed to attempt to derive it if it doesn't exist. Needs system
253 // save 8*43 and the private file to exist. 253 // save 8*43 and the private file to exist.
254 void DeriveSDSeedLazy(); 254 void DeriveSDSeedLazy();
255 255
diff --git a/src/core/crypto/xts_encryption_layer.h b/src/core/crypto/xts_encryption_layer.h
index 735e660cb..68b5643b1 100644
--- a/src/core/crypto/xts_encryption_layer.h
+++ b/src/core/crypto/xts_encryption_layer.h
@@ -9,7 +9,7 @@
9 9
10namespace Core::Crypto { 10namespace Core::Crypto {
11 11
12// Sits on top of a VirtualFile and provides XTS-mode AES decription. 12// Sits on top of a VirtualFile and provides XTS-mode AES description.
13class XTSEncryptionLayer : public EncryptionLayer { 13class XTSEncryptionLayer : public EncryptionLayer {
14public: 14public:
15 XTSEncryptionLayer(FileSys::VirtualFile base, Key256 key); 15 XTSEncryptionLayer(FileSys::VirtualFile base, Key256 key);
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp
index 18afe97e1..b2fe6bd7d 100644
--- a/src/core/debugger/gdbstub.cpp
+++ b/src/core/debugger/gdbstub.cpp
@@ -421,7 +421,7 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
421static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory, 421static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory,
422 const Kernel::KThread* thread) { 422 const Kernel::KThread* thread) {
423 // Read thread type from TLS 423 // Read thread type from TLS
424 const VAddr tls_thread_type{memory.Read32(thread->GetTLSAddress() + 0x1fc)}; 424 const VAddr tls_thread_type{memory.Read32(thread->GetTlsAddress() + 0x1fc)};
425 const VAddr argument_thread_type{thread->GetArgument()}; 425 const VAddr argument_thread_type{thread->GetArgument()};
426 426
427 if (argument_thread_type && tls_thread_type != argument_thread_type) { 427 if (argument_thread_type && tls_thread_type != argument_thread_type) {
@@ -452,7 +452,7 @@ static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory&
452static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory, 452static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory,
453 const Kernel::KThread* thread) { 453 const Kernel::KThread* thread) {
454 // Read thread type from TLS 454 // Read thread type from TLS
455 const VAddr tls_thread_type{memory.Read64(thread->GetTLSAddress() + 0x1f8)}; 455 const VAddr tls_thread_type{memory.Read64(thread->GetTlsAddress() + 0x1f8)};
456 const VAddr argument_thread_type{thread->GetArgument()}; 456 const VAddr argument_thread_type{thread->GetArgument()};
457 457
458 if (argument_thread_type && tls_thread_type != argument_thread_type) { 458 if (argument_thread_type && tls_thread_type != argument_thread_type) {
@@ -576,7 +576,7 @@ void GDBStub::HandleQuery(std::string_view command) {
576 const auto& threads = system.ApplicationProcess()->GetThreadList(); 576 const auto& threads = system.ApplicationProcess()->GetThreadList();
577 std::vector<std::string> thread_ids; 577 std::vector<std::string> thread_ids;
578 for (const auto& thread : threads) { 578 for (const auto& thread : threads) {
579 thread_ids.push_back(fmt::format("{:x}", thread->GetThreadID())); 579 thread_ids.push_back(fmt::format("{:x}", thread->GetThreadId()));
580 } 580 }
581 SendReply(fmt::format("m{}", fmt::join(thread_ids, ","))); 581 SendReply(fmt::format("m{}", fmt::join(thread_ids, ",")));
582 } else if (command.starts_with("sThreadInfo")) { 582 } else if (command.starts_with("sThreadInfo")) {
@@ -591,11 +591,11 @@ void GDBStub::HandleQuery(std::string_view command) {
591 for (const auto* thread : threads) { 591 for (const auto* thread : threads) {
592 auto thread_name{GetThreadName(system, thread)}; 592 auto thread_name{GetThreadName(system, thread)};
593 if (!thread_name) { 593 if (!thread_name) {
594 thread_name = fmt::format("Thread {:d}", thread->GetThreadID()); 594 thread_name = fmt::format("Thread {:d}", thread->GetThreadId());
595 } 595 }
596 596
597 buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)", 597 buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)",
598 thread->GetThreadID(), thread->GetActiveCore(), 598 thread->GetThreadId(), thread->GetActiveCore(),
599 EscapeXML(*thread_name), GetThreadState(thread)); 599 EscapeXML(*thread_name), GetThreadState(thread));
600 } 600 }
601 601
@@ -756,7 +756,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
756 756
757 reply = fmt::format("Process: {:#x} ({})\n" 757 reply = fmt::format("Process: {:#x} ({})\n"
758 "Program Id: {:#018x}\n", 758 "Program Id: {:#018x}\n",
759 process->GetProcessID(), process->GetName(), process->GetProgramID()); 759 process->GetProcessId(), process->GetName(), process->GetProgramId());
760 reply += 760 reply +=
761 fmt::format("Layout:\n" 761 fmt::format("Layout:\n"
762 " Alias: {:#012x} - {:#012x}\n" 762 " Alias: {:#012x} - {:#012x}\n"
@@ -819,7 +819,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
819Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) { 819Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) {
820 const auto& threads{system.ApplicationProcess()->GetThreadList()}; 820 const auto& threads{system.ApplicationProcess()->GetThreadList()};
821 for (auto* thread : threads) { 821 for (auto* thread : threads) {
822 if (thread->GetThreadID() == thread_id) { 822 if (thread->GetThreadId() == thread_id) {
823 return thread; 823 return thread;
824 } 824 }
825 } 825 }
diff --git a/src/core/debugger/gdbstub_arch.cpp b/src/core/debugger/gdbstub_arch.cpp
index 831c48513..75c94a91a 100644
--- a/src/core/debugger/gdbstub_arch.cpp
+++ b/src/core/debugger/gdbstub_arch.cpp
@@ -259,7 +259,7 @@ void GDBStubA64::WriteRegisters(Kernel::KThread* thread, std::string_view regist
259std::string GDBStubA64::ThreadStatus(const Kernel::KThread* thread, u8 signal) const { 259std::string GDBStubA64::ThreadStatus(const Kernel::KThread* thread, u8 signal) const {
260 return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER, 260 return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER,
261 RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER), 261 RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER),
262 LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadID()); 262 LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadId());
263} 263}
264 264
265u32 GDBStubA64::BreakpointInstruction() const { 265u32 GDBStubA64::BreakpointInstruction() const {
@@ -469,7 +469,7 @@ void GDBStubA32::WriteRegisters(Kernel::KThread* thread, std::string_view regist
469std::string GDBStubA32::ThreadStatus(const Kernel::KThread* thread, u8 signal) const { 469std::string GDBStubA32::ThreadStatus(const Kernel::KThread* thread, u8 signal) const {
470 return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER, 470 return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER,
471 RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER), 471 RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER),
472 LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadID()); 472 LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadId());
473} 473}
474 474
475u32 GDBStubA32::BreakpointInstruction() const { 475u32 GDBStubA32::BreakpointInstruction() const {
diff --git a/src/core/file_sys/content_archive.h b/src/core/file_sys/content_archive.h
index 7fdc45ea7..20f524f80 100644
--- a/src/core/file_sys/content_archive.h
+++ b/src/core/file_sys/content_archive.h
@@ -93,7 +93,7 @@ inline bool IsDirectoryLogoPartition(const VirtualDir& pfs) {
93 pfs->GetFile("StartupMovie.gif") != nullptr; 93 pfs->GetFile("StartupMovie.gif") != nullptr;
94} 94}
95 95
96// An implementation of VfsDirectory that represents a Nintendo Content Archive (NCA) conatiner. 96// An implementation of VfsDirectory that represents a Nintendo Content Archive (NCA) container.
97// After construction, use GetStatus to determine if the file is valid and ready to be used. 97// After construction, use GetStatus to determine if the file is valid and ready to be used.
98class NCA : public ReadOnlyVfsDirectory { 98class NCA : public ReadOnlyVfsDirectory {
99public: 99public:
diff --git a/src/core/file_sys/registered_cache.h b/src/core/file_sys/registered_cache.h
index 587f8cae8..bd7f53eaf 100644
--- a/src/core/file_sys/registered_cache.h
+++ b/src/core/file_sys/registered_cache.h
@@ -162,7 +162,7 @@ public:
162 InstallResult InstallEntry(const NSP& nsp, bool overwrite_if_exists = false, 162 InstallResult InstallEntry(const NSP& nsp, bool overwrite_if_exists = false,
163 const VfsCopyFunction& copy = &VfsRawCopy); 163 const VfsCopyFunction& copy = &VfsRawCopy);
164 164
165 // Due to the fact that we must use Meta-type NCAs to determine the existance of files, this 165 // Due to the fact that we must use Meta-type NCAs to determine the existence of files, this
166 // poses quite a challenge. Instead of creating a new meta NCA for this file, yuzu will create a 166 // poses quite a challenge. Instead of creating a new meta NCA for this file, yuzu will create a
167 // dir inside the NAND called 'yuzu_meta' and store the raw CNMT there. 167 // dir inside the NAND called 'yuzu_meta' and store the raw CNMT there.
168 // TODO(DarkLordZach): Author real meta-type NCAs and install those. 168 // TODO(DarkLordZach): Author real meta-type NCAs and install those.
diff --git a/src/core/file_sys/vfs.h b/src/core/file_sys/vfs.h
index 8fc1738a4..a93e21f67 100644
--- a/src/core/file_sys/vfs.h
+++ b/src/core/file_sys/vfs.h
@@ -45,7 +45,7 @@ public:
45 // Return whether or not the user has write permission on this filesystem. 45 // Return whether or not the user has write permission on this filesystem.
46 virtual bool IsWritable() const; 46 virtual bool IsWritable() const;
47 47
48 // Determine if the entry at path is non-existant, a file, or a directory. 48 // Determine if the entry at path is non-existent, a file, or a directory.
49 virtual VfsEntryType GetEntryType(std::string_view path) const; 49 virtual VfsEntryType GetEntryType(std::string_view path) const;
50 50
51 // Opens the file with path relative to root. If it doesn't exist, returns nullptr. 51 // Opens the file with path relative to root. If it doesn't exist, returns nullptr.
@@ -58,7 +58,7 @@ public:
58 // Moves the file from old_path to new_path, returning the moved file on success and nullptr on 58 // Moves the file from old_path to new_path, returning the moved file on success and nullptr on
59 // failure. 59 // failure.
60 virtual VirtualFile MoveFile(std::string_view old_path, std::string_view new_path); 60 virtual VirtualFile MoveFile(std::string_view old_path, std::string_view new_path);
61 // Deletes the file with path relative to root, returing true on success. 61 // Deletes the file with path relative to root, returning true on success.
62 virtual bool DeleteFile(std::string_view path); 62 virtual bool DeleteFile(std::string_view path);
63 63
64 // Opens the directory with path relative to root. If it doesn't exist, returns nullptr. 64 // Opens the directory with path relative to root. If it doesn't exist, returns nullptr.
@@ -71,7 +71,7 @@ public:
71 // Moves the directory from old_path to new_path, returning the moved directory on success and 71 // Moves the directory from old_path to new_path, returning the moved directory on success and
72 // nullptr on failure. 72 // nullptr on failure.
73 virtual VirtualDir MoveDirectory(std::string_view old_path, std::string_view new_path); 73 virtual VirtualDir MoveDirectory(std::string_view old_path, std::string_view new_path);
74 // Deletes the directory with path relative to root, returing true on success. 74 // Deletes the directory with path relative to root, returning true on success.
75 virtual bool DeleteDirectory(std::string_view path); 75 virtual bool DeleteDirectory(std::string_view path);
76 76
77protected: 77protected:
@@ -144,7 +144,7 @@ public:
144 return Read(reinterpret_cast<u8*>(data), sizeof(T), offset); 144 return Read(reinterpret_cast<u8*>(data), sizeof(T), offset);
145 } 145 }
146 146
147 // Writes exactly one byte to offset in file and retuns whether or not the byte was written 147 // Writes exactly one byte to offset in file and returns whether or not the byte was written
148 // successfully. 148 // successfully.
149 virtual bool WriteByte(u8 data, std::size_t offset = 0); 149 virtual bool WriteByte(u8 data, std::size_t offset = 0);
150 // Writes a vector of bytes to offset in file and returns the number of bytes successfully 150 // Writes a vector of bytes to offset in file and returns the number of bytes successfully
@@ -191,13 +191,13 @@ public:
191 VfsDirectory() = default; 191 VfsDirectory() = default;
192 virtual ~VfsDirectory(); 192 virtual ~VfsDirectory();
193 193
194 // Retrives the file located at path as if the current directory was root. Returns nullptr if 194 // Retrieves the file located at path as if the current directory was root. Returns nullptr if
195 // not found. 195 // not found.
196 virtual VirtualFile GetFileRelative(std::string_view path) const; 196 virtual VirtualFile GetFileRelative(std::string_view path) const;
197 // Calls GetFileRelative(path) on the root of the current directory. 197 // Calls GetFileRelative(path) on the root of the current directory.
198 virtual VirtualFile GetFileAbsolute(std::string_view path) const; 198 virtual VirtualFile GetFileAbsolute(std::string_view path) const;
199 199
200 // Retrives the directory located at path as if the current directory was root. Returns nullptr 200 // Retrieves the directory located at path as if the current directory was root. Returns nullptr
201 // if not found. 201 // if not found.
202 virtual VirtualDir GetDirectoryRelative(std::string_view path) const; 202 virtual VirtualDir GetDirectoryRelative(std::string_view path) const;
203 // Calls GetDirectoryRelative(path) on the root of the current directory. 203 // Calls GetDirectoryRelative(path) on the root of the current directory.
@@ -205,7 +205,7 @@ public:
205 205
206 // Returns a vector containing all of the files in this directory. 206 // Returns a vector containing all of the files in this directory.
207 virtual std::vector<VirtualFile> GetFiles() const = 0; 207 virtual std::vector<VirtualFile> GetFiles() const = 0;
208 // Returns the file with filename matching name. Returns nullptr if directory dosen't have a 208 // Returns the file with filename matching name. Returns nullptr if directory doesn't have a
209 // file with name. 209 // file with name.
210 virtual VirtualFile GetFile(std::string_view name) const; 210 virtual VirtualFile GetFile(std::string_view name) const;
211 211
@@ -214,7 +214,7 @@ public:
214 214
215 // Returns a vector containing all of the subdirectories in this directory. 215 // Returns a vector containing all of the subdirectories in this directory.
216 virtual std::vector<VirtualDir> GetSubdirectories() const = 0; 216 virtual std::vector<VirtualDir> GetSubdirectories() const = 0;
217 // Returns the directory with name matching name. Returns nullptr if directory dosen't have a 217 // Returns the directory with name matching name. Returns nullptr if directory doesn't have a
218 // directory with name. 218 // directory with name.
219 virtual VirtualDir GetSubdirectory(std::string_view name) const; 219 virtual VirtualDir GetSubdirectory(std::string_view name) const;
220 220
diff --git a/src/core/file_sys/vfs_real.h b/src/core/file_sys/vfs_real.h
index acde1ac89..b92c84316 100644
--- a/src/core/file_sys/vfs_real.h
+++ b/src/core/file_sys/vfs_real.h
@@ -38,7 +38,7 @@ private:
38 boost::container::flat_map<std::string, std::weak_ptr<Common::FS::IOFile>> cache; 38 boost::container::flat_map<std::string, std::weak_ptr<Common::FS::IOFile>> cache;
39}; 39};
40 40
41// An implmentation of VfsFile that represents a file on the user's computer. 41// An implementation of VfsFile that represents a file on the user's computer.
42class RealVfsFile : public VfsFile { 42class RealVfsFile : public VfsFile {
43 friend class RealVfsDirectory; 43 friend class RealVfsDirectory;
44 friend class RealVfsFilesystem; 44 friend class RealVfsFilesystem;
diff --git a/src/core/frontend/emu_window.h b/src/core/frontend/emu_window.h
index cf85ba29e..1093800f6 100644
--- a/src/core/frontend/emu_window.h
+++ b/src/core/frontend/emu_window.h
@@ -205,7 +205,7 @@ protected:
205 } 205 }
206 206
207 /** 207 /**
208 * Converts a screen postion into the equivalent touchscreen position. 208 * Converts a screen position into the equivalent touchscreen position.
209 */ 209 */
210 std::pair<f32, f32> MapToTouchScreen(u32 framebuffer_x, u32 framebuffer_y) const; 210 std::pair<f32, f32> MapToTouchScreen(u32 framebuffer_x, u32 framebuffer_y) const;
211 211
diff --git a/src/core/hid/emulated_controller.h b/src/core/hid/emulated_controller.h
index a9da465a2..429655355 100644
--- a/src/core/hid/emulated_controller.h
+++ b/src/core/hid/emulated_controller.h
@@ -132,7 +132,7 @@ struct ControllerStatus {
132 RingAnalogValue ring_analog_value{}; 132 RingAnalogValue ring_analog_value{};
133 NfcValues nfc_values{}; 133 NfcValues nfc_values{};
134 134
135 // Data for HID serices 135 // Data for HID services
136 HomeButtonState home_button_state{}; 136 HomeButtonState home_button_state{};
137 CaptureButtonState capture_button_state{}; 137 CaptureButtonState capture_button_state{};
138 NpadButtonState npad_button_state{}; 138 NpadButtonState npad_button_state{};
@@ -357,7 +357,7 @@ public:
357 357
358 /** 358 /**
359 * Sends a small vibration to the output device 359 * Sends a small vibration to the output device
360 * @return true if SetVibration was successfull 360 * @return true if SetVibration was successful
361 */ 361 */
362 bool IsVibrationEnabled(std::size_t device_index); 362 bool IsVibrationEnabled(std::size_t device_index);
363 363
@@ -373,7 +373,7 @@ public:
373 /** 373 /**
374 * Sets the desired camera format to be polled from a controller 374 * Sets the desired camera format to be polled from a controller
375 * @param camera_format size of each frame 375 * @param camera_format size of each frame
376 * @return true if SetCameraFormat was successfull 376 * @return true if SetCameraFormat was successful
377 */ 377 */
378 bool SetCameraFormat(Core::IrSensor::ImageTransferProcessorFormat camera_format); 378 bool SetCameraFormat(Core::IrSensor::ImageTransferProcessorFormat camera_format);
379 379
diff --git a/src/core/hid/emulated_devices.h b/src/core/hid/emulated_devices.h
index caf2ca659..5eab693e4 100644
--- a/src/core/hid/emulated_devices.h
+++ b/src/core/hid/emulated_devices.h
@@ -53,7 +53,7 @@ struct DeviceStatus {
53 MouseWheelValues mouse_wheel_values{}; 53 MouseWheelValues mouse_wheel_values{};
54 MouseStickValue mouse_stick_value{}; 54 MouseStickValue mouse_stick_value{};
55 55
56 // Data for HID serices 56 // Data for HID services
57 KeyboardKey keyboard_state{}; 57 KeyboardKey keyboard_state{};
58 KeyboardModifier keyboard_moddifier_state{}; 58 KeyboardModifier keyboard_moddifier_state{};
59 MouseButton mouse_button_state{}; 59 MouseButton mouse_button_state{};
@@ -75,7 +75,7 @@ struct InterfaceUpdateCallback {
75class EmulatedDevices { 75class EmulatedDevices {
76public: 76public:
77 /** 77 /**
78 * Contains all input data related to external devices that aren't necesarily a controller 78 * Contains all input data related to external devices that aren't necessarily a controller
79 * This includes devices such as the keyboard or mouse 79 * This includes devices such as the keyboard or mouse
80 */ 80 */
81 explicit EmulatedDevices(); 81 explicit EmulatedDevices();
diff --git a/src/core/hid/input_converter.cpp b/src/core/hid/input_converter.cpp
index 3f7b8c090..7cee39a53 100644
--- a/src/core/hid/input_converter.cpp
+++ b/src/core/hid/input_converter.cpp
@@ -328,7 +328,7 @@ void SanitizeAnalog(Common::Input::AnalogStatus& analog, bool clamp_value) {
328 // Apply center offset 328 // Apply center offset
329 raw_value -= properties.offset; 329 raw_value -= properties.offset;
330 330
331 // Set initial values to be formated 331 // Set initial values to be formatted
332 value = raw_value; 332 value = raw_value;
333 333
334 // Calculate vector size 334 // Calculate vector size
@@ -398,7 +398,7 @@ void SanitizeStick(Common::Input::AnalogStatus& analog_x, Common::Input::AnalogS
398 raw_x = properties_x.inverted ? -raw_x : raw_x; 398 raw_x = properties_x.inverted ? -raw_x : raw_x;
399 raw_y = properties_y.inverted ? -raw_y : raw_y; 399 raw_y = properties_y.inverted ? -raw_y : raw_y;
400 400
401 // Set initial values to be formated 401 // Set initial values to be formatted
402 x = raw_x; 402 x = raw_x;
403 y = raw_y; 403 y = raw_y;
404 404
diff --git a/src/core/hid/motion_input.h b/src/core/hid/motion_input.h
index e2c1bbf95..9f3fc1cf7 100644
--- a/src/core/hid/motion_input.h
+++ b/src/core/hid/motion_input.h
@@ -84,7 +84,7 @@ private:
84 // Gyroscope vector measurement in radians/s. 84 // Gyroscope vector measurement in radians/s.
85 Common::Vec3f gyro; 85 Common::Vec3f gyro;
86 86
87 // Vector to be substracted from gyro measurements 87 // Vector to be subtracted from gyro measurements
88 Common::Vec3f gyro_bias; 88 Common::Vec3f gyro_bias;
89 89
90 // Minimum gyro amplitude to detect if the device is moving 90 // Minimum gyro amplitude to detect if the device is moving
diff --git a/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp
index 098ba6eac..098ba6eac 100644
--- a/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
index fd911a3a5..7b090ccb5 100644
--- a/src/core/hle/kernel/global_scheduler_context.cpp
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -12,20 +12,19 @@
12 12
13namespace Kernel { 13namespace Kernel {
14 14
15GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel_) 15GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
16 : kernel{kernel_}, scheduler_lock{kernel_} {} 16 : m_kernel{kernel}, m_scheduler_lock{kernel} {}
17 17
18GlobalSchedulerContext::~GlobalSchedulerContext() = default; 18GlobalSchedulerContext::~GlobalSchedulerContext() = default;
19 19
20void GlobalSchedulerContext::AddThread(KThread* thread) { 20void GlobalSchedulerContext::AddThread(KThread* thread) {
21 std::scoped_lock lock{global_list_guard}; 21 std::scoped_lock lock{m_global_list_guard};
22 thread_list.push_back(thread); 22 m_thread_list.push_back(thread);
23} 23}
24 24
25void GlobalSchedulerContext::RemoveThread(KThread* thread) { 25void GlobalSchedulerContext::RemoveThread(KThread* thread) {
26 std::scoped_lock lock{global_list_guard}; 26 std::scoped_lock lock{m_global_list_guard};
27 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), 27 std::erase(m_thread_list, thread);
28 thread_list.end());
29} 28}
30 29
31void GlobalSchedulerContext::PreemptThreads() { 30void GlobalSchedulerContext::PreemptThreads() {
@@ -38,37 +37,37 @@ void GlobalSchedulerContext::PreemptThreads() {
38 63, 37 63,
39 }; 38 };
40 39
41 ASSERT(IsLocked()); 40 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
42 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 41 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
43 const u32 priority = preemption_priorities[core_id]; 42 const u32 priority = preemption_priorities[core_id];
44 KScheduler::RotateScheduledQueue(kernel, core_id, priority); 43 KScheduler::RotateScheduledQueue(m_kernel, core_id, priority);
45 } 44 }
46} 45}
47 46
48bool GlobalSchedulerContext::IsLocked() const { 47bool GlobalSchedulerContext::IsLocked() const {
49 return scheduler_lock.IsLockedByCurrentThread(); 48 return m_scheduler_lock.IsLockedByCurrentThread();
50} 49}
51 50
52void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) { 51void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) {
53 ASSERT(IsLocked()); 52 ASSERT(this->IsLocked());
54 53
55 woken_dummy_threads.insert(thread); 54 m_woken_dummy_threads.insert(thread);
56} 55}
57 56
58void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) { 57void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) {
59 ASSERT(IsLocked()); 58 ASSERT(this->IsLocked());
60 59
61 woken_dummy_threads.erase(thread); 60 m_woken_dummy_threads.erase(thread);
62} 61}
63 62
64void GlobalSchedulerContext::WakeupWaitingDummyThreads() { 63void GlobalSchedulerContext::WakeupWaitingDummyThreads() {
65 ASSERT(IsLocked()); 64 ASSERT(this->IsLocked());
66 65
67 for (auto* thread : woken_dummy_threads) { 66 for (auto* thread : m_woken_dummy_threads) {
68 thread->DummyThreadEndWait(); 67 thread->DummyThreadEndWait();
69 } 68 }
70 69
71 woken_dummy_threads.clear(); 70 m_woken_dummy_threads.clear();
72} 71}
73 72
74} // namespace Kernel 73} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
index 220ed6192..c48e8cd12 100644
--- a/src/core/hle/kernel/global_scheduler_context.h
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -33,7 +33,7 @@ class GlobalSchedulerContext final {
33public: 33public:
34 using LockType = KAbstractSchedulerLock<KScheduler>; 34 using LockType = KAbstractSchedulerLock<KScheduler>;
35 35
36 explicit GlobalSchedulerContext(KernelCore& kernel_); 36 explicit GlobalSchedulerContext(KernelCore& kernel);
37 ~GlobalSchedulerContext(); 37 ~GlobalSchedulerContext();
38 38
39 /// Adds a new thread to the scheduler 39 /// Adds a new thread to the scheduler
@@ -43,8 +43,9 @@ public:
43 void RemoveThread(KThread* thread); 43 void RemoveThread(KThread* thread);
44 44
45 /// Returns a list of all threads managed by the scheduler 45 /// Returns a list of all threads managed by the scheduler
46 [[nodiscard]] const std::vector<KThread*>& GetThreadList() const { 46 /// This is only safe to iterate while holding the scheduler lock
47 return thread_list; 47 const std::vector<KThread*>& GetThreadList() const {
48 return m_thread_list;
48 } 49 }
49 50
50 /** 51 /**
@@ -63,30 +64,26 @@ public:
63 void RegisterDummyThreadForWakeup(KThread* thread); 64 void RegisterDummyThreadForWakeup(KThread* thread);
64 void WakeupWaitingDummyThreads(); 65 void WakeupWaitingDummyThreads();
65 66
66 [[nodiscard]] LockType& SchedulerLock() { 67 LockType& SchedulerLock() {
67 return scheduler_lock; 68 return m_scheduler_lock;
68 }
69
70 [[nodiscard]] const LockType& SchedulerLock() const {
71 return scheduler_lock;
72 } 69 }
73 70
74private: 71private:
75 friend class KScopedSchedulerLock; 72 friend class KScopedSchedulerLock;
76 friend class KScopedSchedulerLockAndSleep; 73 friend class KScopedSchedulerLockAndSleep;
77 74
78 KernelCore& kernel; 75 KernelCore& m_kernel;
79 76
80 std::atomic_bool scheduler_update_needed{}; 77 std::atomic_bool m_scheduler_update_needed{};
81 KSchedulerPriorityQueue priority_queue; 78 KSchedulerPriorityQueue m_priority_queue;
82 LockType scheduler_lock; 79 LockType m_scheduler_lock;
83 80
84 /// Lists dummy threads pending wakeup on lock release 81 /// Lists dummy threads pending wakeup on lock release
85 std::set<KThread*> woken_dummy_threads; 82 std::set<KThread*> m_woken_dummy_threads;
86 83
87 /// Lists all thread ids that aren't deleted/etc. 84 /// Lists all thread ids that aren't deleted/etc.
88 std::vector<KThread*> thread_list; 85 std::vector<KThread*> m_thread_list;
89 std::mutex global_list_guard; 86 std::mutex m_global_list_guard;
90}; 87};
91 88
92} // namespace Kernel 89} // namespace Kernel
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index fb86451ea..30a4e6edb 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -14,8 +14,8 @@
14 14
15namespace Kernel { 15namespace Kernel {
16 16
17KAddressArbiter::KAddressArbiter(Core::System& system_) 17KAddressArbiter::KAddressArbiter(Core::System& system)
18 : system{system_}, kernel{system.Kernel()} {} 18 : m_system{system}, m_kernel{system.Kernel()} {}
19KAddressArbiter::~KAddressArbiter() = default; 19KAddressArbiter::~KAddressArbiter() = default;
20 20
21namespace { 21namespace {
@@ -90,8 +90,8 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
90 90
91class ThreadQueueImplForKAddressArbiter final : public KThreadQueue { 91class ThreadQueueImplForKAddressArbiter final : public KThreadQueue {
92public: 92public:
93 explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t) 93 explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel, KAddressArbiter::ThreadTree* t)
94 : KThreadQueue(kernel_), m_tree(t) {} 94 : KThreadQueue(kernel), m_tree(t) {}
95 95
96 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { 96 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
97 // If the thread is waiting on an address arbiter, remove it from the tree. 97 // If the thread is waiting on an address arbiter, remove it from the tree.
@@ -105,7 +105,7 @@ public:
105 } 105 }
106 106
107private: 107private:
108 KAddressArbiter::ThreadTree* m_tree; 108 KAddressArbiter::ThreadTree* m_tree{};
109}; 109};
110 110
111} // namespace 111} // namespace
@@ -114,10 +114,10 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) {
114 // Perform signaling. 114 // Perform signaling.
115 s32 num_waiters{}; 115 s32 num_waiters{};
116 { 116 {
117 KScopedSchedulerLock sl(kernel); 117 KScopedSchedulerLock sl(m_kernel);
118 118
119 auto it = thread_tree.nfind_key({addr, -1}); 119 auto it = m_tree.nfind_key({addr, -1});
120 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 120 while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
121 (it->GetAddressArbiterKey() == addr)) { 121 (it->GetAddressArbiterKey() == addr)) {
122 // End the thread's wait. 122 // End the thread's wait.
123 KThread* target_thread = std::addressof(*it); 123 KThread* target_thread = std::addressof(*it);
@@ -126,31 +126,27 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) {
126 ASSERT(target_thread->IsWaitingForAddressArbiter()); 126 ASSERT(target_thread->IsWaitingForAddressArbiter());
127 target_thread->ClearAddressArbiter(); 127 target_thread->ClearAddressArbiter();
128 128
129 it = thread_tree.erase(it); 129 it = m_tree.erase(it);
130 ++num_waiters; 130 ++num_waiters;
131 } 131 }
132 } 132 }
133 return ResultSuccess; 133 R_SUCCEED();
134} 134}
135 135
136Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) { 136Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
137 // Perform signaling. 137 // Perform signaling.
138 s32 num_waiters{}; 138 s32 num_waiters{};
139 { 139 {
140 KScopedSchedulerLock sl(kernel); 140 KScopedSchedulerLock sl(m_kernel);
141 141
142 // Check the userspace value. 142 // Check the userspace value.
143 s32 user_value{}; 143 s32 user_value{};
144 if (!UpdateIfEqual(system, &user_value, addr, value, value + 1)) { 144 R_UNLESS(UpdateIfEqual(m_system, std::addressof(user_value), addr, value, value + 1),
145 LOG_ERROR(Kernel, "Invalid current memory!"); 145 ResultInvalidCurrentMemory);
146 return ResultInvalidCurrentMemory; 146 R_UNLESS(user_value == value, ResultInvalidState);
147 }
148 if (user_value != value) {
149 return ResultInvalidState;
150 }
151 147
152 auto it = thread_tree.nfind_key({addr, -1}); 148 auto it = m_tree.nfind_key({addr, -1});
153 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 149 while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
154 (it->GetAddressArbiterKey() == addr)) { 150 (it->GetAddressArbiterKey() == addr)) {
155 // End the thread's wait. 151 // End the thread's wait.
156 KThread* target_thread = std::addressof(*it); 152 KThread* target_thread = std::addressof(*it);
@@ -159,33 +155,33 @@ Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 cou
159 ASSERT(target_thread->IsWaitingForAddressArbiter()); 155 ASSERT(target_thread->IsWaitingForAddressArbiter());
160 target_thread->ClearAddressArbiter(); 156 target_thread->ClearAddressArbiter();
161 157
162 it = thread_tree.erase(it); 158 it = m_tree.erase(it);
163 ++num_waiters; 159 ++num_waiters;
164 } 160 }
165 } 161 }
166 return ResultSuccess; 162 R_SUCCEED();
167} 163}
168 164
169Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) { 165Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
170 // Perform signaling. 166 // Perform signaling.
171 s32 num_waiters{}; 167 s32 num_waiters{};
172 { 168 {
173 [[maybe_unused]] const KScopedSchedulerLock sl(kernel); 169 KScopedSchedulerLock sl(m_kernel);
174 170
175 auto it = thread_tree.nfind_key({addr, -1}); 171 auto it = m_tree.nfind_key({addr, -1});
176 // Determine the updated value. 172 // Determine the updated value.
177 s32 new_value{}; 173 s32 new_value{};
178 if (count <= 0) { 174 if (count <= 0) {
179 if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) { 175 if (it != m_tree.end() && it->GetAddressArbiterKey() == addr) {
180 new_value = value - 2; 176 new_value = value - 2;
181 } else { 177 } else {
182 new_value = value + 1; 178 new_value = value + 1;
183 } 179 }
184 } else { 180 } else {
185 if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) { 181 if (it != m_tree.end() && it->GetAddressArbiterKey() == addr) {
186 auto tmp_it = it; 182 auto tmp_it = it;
187 s32 tmp_num_waiters{}; 183 s32 tmp_num_waiters{};
188 while (++tmp_it != thread_tree.end() && tmp_it->GetAddressArbiterKey() == addr) { 184 while (++tmp_it != m_tree.end() && tmp_it->GetAddressArbiterKey() == addr) {
189 if (tmp_num_waiters++ >= count) { 185 if (tmp_num_waiters++ >= count) {
190 break; 186 break;
191 } 187 }
@@ -205,20 +201,15 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val
205 s32 user_value{}; 201 s32 user_value{};
206 bool succeeded{}; 202 bool succeeded{};
207 if (value != new_value) { 203 if (value != new_value) {
208 succeeded = UpdateIfEqual(system, &user_value, addr, value, new_value); 204 succeeded = UpdateIfEqual(m_system, std::addressof(user_value), addr, value, new_value);
209 } else { 205 } else {
210 succeeded = ReadFromUser(system, &user_value, addr); 206 succeeded = ReadFromUser(m_system, std::addressof(user_value), addr);
211 } 207 }
212 208
213 if (!succeeded) { 209 R_UNLESS(succeeded, ResultInvalidCurrentMemory);
214 LOG_ERROR(Kernel, "Invalid current memory!"); 210 R_UNLESS(user_value == value, ResultInvalidState);
215 return ResultInvalidCurrentMemory;
216 }
217 if (user_value != value) {
218 return ResultInvalidState;
219 }
220 211
221 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 212 while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
222 (it->GetAddressArbiterKey() == addr)) { 213 (it->GetAddressArbiterKey() == addr)) {
223 // End the thread's wait. 214 // End the thread's wait.
224 KThread* target_thread = std::addressof(*it); 215 KThread* target_thread = std::addressof(*it);
@@ -227,58 +218,60 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val
227 ASSERT(target_thread->IsWaitingForAddressArbiter()); 218 ASSERT(target_thread->IsWaitingForAddressArbiter());
228 target_thread->ClearAddressArbiter(); 219 target_thread->ClearAddressArbiter();
229 220
230 it = thread_tree.erase(it); 221 it = m_tree.erase(it);
231 ++num_waiters; 222 ++num_waiters;
232 } 223 }
233 } 224 }
234 return ResultSuccess; 225 R_SUCCEED();
235} 226}
236 227
237Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) { 228Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
238 // Prepare to wait. 229 // Prepare to wait.
239 KThread* cur_thread = GetCurrentThreadPointer(kernel); 230 KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
240 ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree)); 231 KHardwareTimer* timer{};
232 ThreadQueueImplForKAddressArbiter wait_queue(m_kernel, std::addressof(m_tree));
241 233
242 { 234 {
243 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; 235 KScopedSchedulerLockAndSleep slp{m_kernel, std::addressof(timer), cur_thread, timeout};
244 236
245 // Check that the thread isn't terminating. 237 // Check that the thread isn't terminating.
246 if (cur_thread->IsTerminationRequested()) { 238 if (cur_thread->IsTerminationRequested()) {
247 slp.CancelSleep(); 239 slp.CancelSleep();
248 return ResultTerminationRequested; 240 R_THROW(ResultTerminationRequested);
249 } 241 }
250 242
251 // Read the value from userspace. 243 // Read the value from userspace.
252 s32 user_value{}; 244 s32 user_value{};
253 bool succeeded{}; 245 bool succeeded{};
254 if (decrement) { 246 if (decrement) {
255 succeeded = DecrementIfLessThan(system, &user_value, addr, value); 247 succeeded = DecrementIfLessThan(m_system, std::addressof(user_value), addr, value);
256 } else { 248 } else {
257 succeeded = ReadFromUser(system, &user_value, addr); 249 succeeded = ReadFromUser(m_system, std::addressof(user_value), addr);
258 } 250 }
259 251
260 if (!succeeded) { 252 if (!succeeded) {
261 slp.CancelSleep(); 253 slp.CancelSleep();
262 return ResultInvalidCurrentMemory; 254 R_THROW(ResultInvalidCurrentMemory);
263 } 255 }
264 256
265 // Check that the value is less than the specified one. 257 // Check that the value is less than the specified one.
266 if (user_value >= value) { 258 if (user_value >= value) {
267 slp.CancelSleep(); 259 slp.CancelSleep();
268 return ResultInvalidState; 260 R_THROW(ResultInvalidState);
269 } 261 }
270 262
271 // Check that the timeout is non-zero. 263 // Check that the timeout is non-zero.
272 if (timeout == 0) { 264 if (timeout == 0) {
273 slp.CancelSleep(); 265 slp.CancelSleep();
274 return ResultTimedOut; 266 R_THROW(ResultTimedOut);
275 } 267 }
276 268
277 // Set the arbiter. 269 // Set the arbiter.
278 cur_thread->SetAddressArbiter(&thread_tree, addr); 270 cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
279 thread_tree.insert(*cur_thread); 271 m_tree.insert(*cur_thread);
280 272
281 // Wait for the thread to finish. 273 // Wait for the thread to finish.
274 wait_queue.SetHardwareTimer(timer);
282 cur_thread->BeginWait(std::addressof(wait_queue)); 275 cur_thread->BeginWait(std::addressof(wait_queue));
283 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration); 276 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
284 } 277 }
@@ -289,42 +282,44 @@ Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s6
289 282
290Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { 283Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
291 // Prepare to wait. 284 // Prepare to wait.
292 KThread* cur_thread = GetCurrentThreadPointer(kernel); 285 KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
293 ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree)); 286 KHardwareTimer* timer{};
287 ThreadQueueImplForKAddressArbiter wait_queue(m_kernel, std::addressof(m_tree));
294 288
295 { 289 {
296 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; 290 KScopedSchedulerLockAndSleep slp{m_kernel, std::addressof(timer), cur_thread, timeout};
297 291
298 // Check that the thread isn't terminating. 292 // Check that the thread isn't terminating.
299 if (cur_thread->IsTerminationRequested()) { 293 if (cur_thread->IsTerminationRequested()) {
300 slp.CancelSleep(); 294 slp.CancelSleep();
301 return ResultTerminationRequested; 295 R_THROW(ResultTerminationRequested);
302 } 296 }
303 297
304 // Read the value from userspace. 298 // Read the value from userspace.
305 s32 user_value{}; 299 s32 user_value{};
306 if (!ReadFromUser(system, &user_value, addr)) { 300 if (!ReadFromUser(m_system, std::addressof(user_value), addr)) {
307 slp.CancelSleep(); 301 slp.CancelSleep();
308 return ResultInvalidCurrentMemory; 302 R_THROW(ResultInvalidCurrentMemory);
309 } 303 }
310 304
311 // Check that the value is equal. 305 // Check that the value is equal.
312 if (value != user_value) { 306 if (value != user_value) {
313 slp.CancelSleep(); 307 slp.CancelSleep();
314 return ResultInvalidState; 308 R_THROW(ResultInvalidState);
315 } 309 }
316 310
317 // Check that the timeout is non-zero. 311 // Check that the timeout is non-zero.
318 if (timeout == 0) { 312 if (timeout == 0) {
319 slp.CancelSleep(); 313 slp.CancelSleep();
320 return ResultTimedOut; 314 R_THROW(ResultTimedOut);
321 } 315 }
322 316
323 // Set the arbiter. 317 // Set the arbiter.
324 cur_thread->SetAddressArbiter(&thread_tree, addr); 318 cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
325 thread_tree.insert(*cur_thread); 319 m_tree.insert(*cur_thread);
326 320
327 // Wait for the thread to finish. 321 // Wait for the thread to finish.
322 wait_queue.SetHardwareTimer(timer);
328 cur_thread->BeginWait(std::addressof(wait_queue)); 323 cur_thread->BeginWait(std::addressof(wait_queue));
329 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration); 324 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
330 } 325 }
diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h
index e4085ae22..9a8c1ae94 100644
--- a/src/core/hle/kernel/k_address_arbiter.h
+++ b/src/core/hle/kernel/k_address_arbiter.h
@@ -22,47 +22,46 @@ class KAddressArbiter {
22public: 22public:
23 using ThreadTree = KConditionVariable::ThreadTree; 23 using ThreadTree = KConditionVariable::ThreadTree;
24 24
25 explicit KAddressArbiter(Core::System& system_); 25 explicit KAddressArbiter(Core::System& system);
26 ~KAddressArbiter(); 26 ~KAddressArbiter();
27 27
28 [[nodiscard]] Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) { 28 Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) {
29 switch (type) { 29 switch (type) {
30 case Svc::SignalType::Signal: 30 case Svc::SignalType::Signal:
31 return Signal(addr, count); 31 R_RETURN(this->Signal(addr, count));
32 case Svc::SignalType::SignalAndIncrementIfEqual: 32 case Svc::SignalType::SignalAndIncrementIfEqual:
33 return SignalAndIncrementIfEqual(addr, value, count); 33 R_RETURN(this->SignalAndIncrementIfEqual(addr, value, count));
34 case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual: 34 case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
35 return SignalAndModifyByWaitingCountIfEqual(addr, value, count); 35 R_RETURN(this->SignalAndModifyByWaitingCountIfEqual(addr, value, count));
36 default:
37 UNREACHABLE();
36 } 38 }
37 ASSERT(false);
38 return ResultUnknown;
39 } 39 }
40 40
41 [[nodiscard]] Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value, 41 Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value, s64 timeout) {
42 s64 timeout) {
43 switch (type) { 42 switch (type) {
44 case Svc::ArbitrationType::WaitIfLessThan: 43 case Svc::ArbitrationType::WaitIfLessThan:
45 return WaitIfLessThan(addr, value, false, timeout); 44 R_RETURN(WaitIfLessThan(addr, value, false, timeout));
46 case Svc::ArbitrationType::DecrementAndWaitIfLessThan: 45 case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
47 return WaitIfLessThan(addr, value, true, timeout); 46 R_RETURN(WaitIfLessThan(addr, value, true, timeout));
48 case Svc::ArbitrationType::WaitIfEqual: 47 case Svc::ArbitrationType::WaitIfEqual:
49 return WaitIfEqual(addr, value, timeout); 48 R_RETURN(WaitIfEqual(addr, value, timeout));
49 default:
50 UNREACHABLE();
50 } 51 }
51 ASSERT(false);
52 return ResultUnknown;
53 } 52 }
54 53
55private: 54private:
56 [[nodiscard]] Result Signal(VAddr addr, s32 count); 55 Result Signal(VAddr addr, s32 count);
57 [[nodiscard]] Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count); 56 Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
58 [[nodiscard]] Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count); 57 Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
59 [[nodiscard]] Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout); 58 Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
60 [[nodiscard]] Result WaitIfEqual(VAddr addr, s32 value, s64 timeout); 59 Result WaitIfEqual(VAddr addr, s32 value, s64 timeout);
61 60
62 ThreadTree thread_tree; 61private:
63 62 ThreadTree m_tree;
64 Core::System& system; 63 Core::System& m_system;
65 KernelCore& kernel; 64 KernelCore& m_kernel;
66}; 65};
67 66
68} // namespace Kernel 67} // namespace Kernel
diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h
index b58716e90..07a5a822c 100644
--- a/src/core/hle/kernel/k_affinity_mask.h
+++ b/src/core/hle/kernel/k_affinity_mask.h
@@ -13,40 +13,40 @@ class KAffinityMask {
13public: 13public:
14 constexpr KAffinityMask() = default; 14 constexpr KAffinityMask() = default;
15 15
16 [[nodiscard]] constexpr u64 GetAffinityMask() const { 16 constexpr u64 GetAffinityMask() const {
17 return this->mask; 17 return m_mask;
18 } 18 }
19 19
20 constexpr void SetAffinityMask(u64 new_mask) { 20 constexpr void SetAffinityMask(u64 new_mask) {
21 ASSERT((new_mask & ~AllowedAffinityMask) == 0); 21 ASSERT((new_mask & ~AllowedAffinityMask) == 0);
22 this->mask = new_mask; 22 m_mask = new_mask;
23 } 23 }
24 24
25 [[nodiscard]] constexpr bool GetAffinity(s32 core) const { 25 constexpr bool GetAffinity(s32 core) const {
26 return (this->mask & GetCoreBit(core)) != 0; 26 return (m_mask & GetCoreBit(core)) != 0;
27 } 27 }
28 28
29 constexpr void SetAffinity(s32 core, bool set) { 29 constexpr void SetAffinity(s32 core, bool set) {
30 if (set) { 30 if (set) {
31 this->mask |= GetCoreBit(core); 31 m_mask |= GetCoreBit(core);
32 } else { 32 } else {
33 this->mask &= ~GetCoreBit(core); 33 m_mask &= ~GetCoreBit(core);
34 } 34 }
35 } 35 }
36 36
37 constexpr void SetAll() { 37 constexpr void SetAll() {
38 this->mask = AllowedAffinityMask; 38 m_mask = AllowedAffinityMask;
39 } 39 }
40 40
41private: 41private:
42 [[nodiscard]] static constexpr u64 GetCoreBit(s32 core) { 42 static constexpr u64 GetCoreBit(s32 core) {
43 ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); 43 ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
44 return (1ULL << core); 44 return (1ULL << core);
45 } 45 }
46 46
47 static constexpr u64 AllowedAffinityMask = (1ULL << Core::Hardware::NUM_CPU_CORES) - 1; 47 static constexpr u64 AllowedAffinityMask = (1ULL << Core::Hardware::NUM_CPU_CORES) - 1;
48 48
49 u64 mask{}; 49 u64 m_mask{};
50}; 50};
51 51
52} // namespace Kernel 52} // namespace Kernel
diff --git a/src/core/hle/kernel/k_auto_object.cpp b/src/core/hle/kernel/k_auto_object.cpp
index 691af8ccb..0ae42c95c 100644
--- a/src/core/hle/kernel/k_auto_object.cpp
+++ b/src/core/hle/kernel/k_auto_object.cpp
@@ -12,11 +12,11 @@ KAutoObject* KAutoObject::Create(KAutoObject* obj) {
12} 12}
13 13
14void KAutoObject::RegisterWithKernel() { 14void KAutoObject::RegisterWithKernel() {
15 kernel.RegisterKernelObject(this); 15 m_kernel.RegisterKernelObject(this);
16} 16}
17 17
18void KAutoObject::UnregisterWithKernel() { 18void KAutoObject::UnregisterWithKernel() {
19 kernel.UnregisterKernelObject(this); 19 m_kernel.UnregisterKernelObject(this);
20} 20}
21 21
22} // namespace Kernel 22} // namespace Kernel
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index e8118c2b8..9b71fe371 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -80,7 +80,7 @@ private:
80 KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const); 80 KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
81 81
82public: 82public:
83 explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) { 83 explicit KAutoObject(KernelCore& kernel) : m_kernel(kernel) {
84 RegisterWithKernel(); 84 RegisterWithKernel();
85 } 85 }
86 virtual ~KAutoObject() = default; 86 virtual ~KAutoObject() = default;
@@ -164,17 +164,12 @@ public:
164 } 164 }
165 } 165 }
166 166
167 const std::string& GetName() const {
168 return name;
169 }
170
171private: 167private:
172 void RegisterWithKernel(); 168 void RegisterWithKernel();
173 void UnregisterWithKernel(); 169 void UnregisterWithKernel();
174 170
175protected: 171protected:
176 KernelCore& kernel; 172 KernelCore& m_kernel;
177 std::string name;
178 173
179private: 174private:
180 std::atomic<u32> m_ref_count{}; 175 std::atomic<u32> m_ref_count{};
@@ -184,7 +179,7 @@ class KAutoObjectWithListContainer;
184 179
185class KAutoObjectWithList : public KAutoObject, public boost::intrusive::set_base_hook<> { 180class KAutoObjectWithList : public KAutoObject, public boost::intrusive::set_base_hook<> {
186public: 181public:
187 explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_) {} 182 explicit KAutoObjectWithList(KernelCore& kernel) : KAutoObject(kernel) {}
188 183
189 static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) { 184 static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) {
190 const u64 lid = lhs.GetId(); 185 const u64 lid = lhs.GetId();
@@ -200,7 +195,7 @@ public:
200 } 195 }
201 196
202 friend bool operator<(const KAutoObjectWithList& left, const KAutoObjectWithList& right) { 197 friend bool operator<(const KAutoObjectWithList& left, const KAutoObjectWithList& right) {
203 return &left < &right; 198 return KAutoObjectWithList::Compare(left, right) < 0;
204 } 199 }
205 200
206public: 201public:
@@ -208,10 +203,6 @@ public:
208 return reinterpret_cast<u64>(this); 203 return reinterpret_cast<u64>(this);
209 } 204 }
210 205
211 virtual const std::string& GetName() const {
212 return name;
213 }
214
215private: 206private:
216 friend class KAutoObjectWithListContainer; 207 friend class KAutoObjectWithListContainer;
217}; 208};
diff --git a/src/core/hle/kernel/k_capabilities.cpp b/src/core/hle/kernel/k_capabilities.cpp
index 2907cc6e3..90e4e8fb0 100644
--- a/src/core/hle/kernel/k_capabilities.cpp
+++ b/src/core/hle/kernel/k_capabilities.cpp
@@ -11,7 +11,7 @@
11 11
12namespace Kernel { 12namespace Kernel {
13 13
14Result KCapabilities::InitializeForKIP(std::span<const u32> kern_caps, KPageTable* page_table) { 14Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table) {
15 // We're initializing an initial process. 15 // We're initializing an initial process.
16 m_svc_access_flags.reset(); 16 m_svc_access_flags.reset();
17 m_irq_access_flags.reset(); 17 m_irq_access_flags.reset();
diff --git a/src/core/hle/kernel/k_capabilities.h b/src/core/hle/kernel/k_capabilities.h
index cd96f8d23..de766c811 100644
--- a/src/core/hle/kernel/k_capabilities.h
+++ b/src/core/hle/kernel/k_capabilities.h
@@ -22,7 +22,7 @@ class KCapabilities {
22public: 22public:
23 constexpr explicit KCapabilities() = default; 23 constexpr explicit KCapabilities() = default;
24 24
25 Result InitializeForKIP(std::span<const u32> kern_caps, KPageTable* page_table); 25 Result InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table);
26 Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table); 26 Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table);
27 27
28 static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps); 28 static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps);
diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp
index 700ae71e3..40e09e532 100644
--- a/src/core/hle/kernel/k_client_port.cpp
+++ b/src/core/hle/kernel/k_client_port.cpp
@@ -11,26 +11,21 @@
11 11
12namespace Kernel { 12namespace Kernel {
13 13
14KClientPort::KClientPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} 14KClientPort::KClientPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
15KClientPort::~KClientPort() = default; 15KClientPort::~KClientPort() = default;
16 16
17void KClientPort::Initialize(KPort* parent_port_, s32 max_sessions_, std::string&& name_) { 17void KClientPort::Initialize(KPort* parent, s32 max_sessions) {
18 // Set member variables. 18 // Set member variables.
19 num_sessions = 0; 19 m_num_sessions = 0;
20 peak_sessions = 0; 20 m_peak_sessions = 0;
21 parent = parent_port_; 21 m_parent = parent;
22 max_sessions = max_sessions_; 22 m_max_sessions = max_sessions;
23 name = std::move(name_);
24} 23}
25 24
26void KClientPort::OnSessionFinalized() { 25void KClientPort::OnSessionFinalized() {
27 KScopedSchedulerLock sl{kernel}; 26 KScopedSchedulerLock sl{m_kernel};
28 27
29 // This might happen if a session was improperly used with this port. 28 if (const auto prev = m_num_sessions--; prev == m_max_sessions) {
30 ASSERT_MSG(num_sessions > 0, "num_sessions is invalid");
31
32 const auto prev = num_sessions--;
33 if (prev == max_sessions) {
34 this->NotifyAvailable(); 29 this->NotifyAvailable();
35 } 30 }
36} 31}
@@ -47,81 +42,81 @@ bool KClientPort::IsServerClosed() const {
47 42
48void KClientPort::Destroy() { 43void KClientPort::Destroy() {
49 // Note with our parent that we're closed. 44 // Note with our parent that we're closed.
50 parent->OnClientClosed(); 45 m_parent->OnClientClosed();
51 46
52 // Close our reference to our parent. 47 // Close our reference to our parent.
53 parent->Close(); 48 m_parent->Close();
54} 49}
55 50
56bool KClientPort::IsSignaled() const { 51bool KClientPort::IsSignaled() const {
57 return num_sessions < max_sessions; 52 return m_num_sessions.load() < m_max_sessions;
58} 53}
59 54
60Result KClientPort::CreateSession(KClientSession** out) { 55Result KClientPort::CreateSession(KClientSession** out) {
56 // Declare the session we're going to allocate.
57 KSession* session{};
58
61 // Reserve a new session from the resource limit. 59 // Reserve a new session from the resource limit.
62 //! FIXME: we are reserving this from the wrong resource limit! 60 //! FIXME: we are reserving this from the wrong resource limit!
63 KScopedResourceReservation session_reservation(kernel.ApplicationProcess()->GetResourceLimit(), 61 KScopedResourceReservation session_reservation(
64 LimitableResource::SessionCountMax); 62 m_kernel.ApplicationProcess()->GetResourceLimit(), LimitableResource::SessionCountMax);
65 R_UNLESS(session_reservation.Succeeded(), ResultLimitReached); 63 R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
66 64
65 // Allocate a session normally.
66 session = KSession::Create(m_kernel);
67
68 // Check that we successfully created a session.
69 R_UNLESS(session != nullptr, ResultOutOfResource);
70
67 // Update the session counts. 71 // Update the session counts.
68 { 72 {
73 ON_RESULT_FAILURE {
74 session->Close();
75 };
76
69 // Atomically increment the number of sessions. 77 // Atomically increment the number of sessions.
70 s32 new_sessions{}; 78 s32 new_sessions{};
71 { 79 {
72 const auto max = max_sessions; 80 const auto max = m_max_sessions;
73 auto cur_sessions = num_sessions.load(std::memory_order_acquire); 81 auto cur_sessions = m_num_sessions.load(std::memory_order_acquire);
74 do { 82 do {
75 R_UNLESS(cur_sessions < max, ResultOutOfSessions); 83 R_UNLESS(cur_sessions < max, ResultOutOfSessions);
76 new_sessions = cur_sessions + 1; 84 new_sessions = cur_sessions + 1;
77 } while (!num_sessions.compare_exchange_weak(cur_sessions, new_sessions, 85 } while (!m_num_sessions.compare_exchange_weak(cur_sessions, new_sessions,
78 std::memory_order_relaxed)); 86 std::memory_order_relaxed));
79 } 87 }
80 88
81 // Atomically update the peak session tracking. 89 // Atomically update the peak session tracking.
82 { 90 {
83 auto peak = peak_sessions.load(std::memory_order_acquire); 91 auto peak = m_peak_sessions.load(std::memory_order_acquire);
84 do { 92 do {
85 if (peak >= new_sessions) { 93 if (peak >= new_sessions) {
86 break; 94 break;
87 } 95 }
88 } while (!peak_sessions.compare_exchange_weak(peak, new_sessions, 96 } while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions,
89 std::memory_order_relaxed)); 97 std::memory_order_relaxed));
90 } 98 }
91 } 99 }
92 100
93 // Create a new session.
94 KSession* session = KSession::Create(kernel);
95 if (session == nullptr) {
96 // Decrement the session count.
97 const auto prev = num_sessions--;
98 if (prev == max_sessions) {
99 this->NotifyAvailable();
100 }
101
102 return ResultOutOfResource;
103 }
104
105 // Initialize the session. 101 // Initialize the session.
106 session->Initialize(this, parent->GetName()); 102 session->Initialize(this, m_parent->GetName());
107 103
108 // Commit the session reservation. 104 // Commit the session reservation.
109 session_reservation.Commit(); 105 session_reservation.Commit();
110 106
111 // Register the session. 107 // Register the session.
112 KSession::Register(kernel, session); 108 KSession::Register(m_kernel, session);
113 auto session_guard = SCOPE_GUARD({ 109 ON_RESULT_FAILURE {
114 session->GetClientSession().Close(); 110 session->GetClientSession().Close();
115 session->GetServerSession().Close(); 111 session->GetServerSession().Close();
116 }); 112 };
117 113
118 // Enqueue the session with our parent. 114 // Enqueue the session with our parent.
119 R_TRY(parent->EnqueueSession(std::addressof(session->GetServerSession()))); 115 R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession())));
120 116
121 // We succeeded, so set the output. 117 // We succeeded, so set the output.
122 session_guard.Cancel();
123 *out = std::addressof(session->GetClientSession()); 118 *out = std::addressof(session->GetClientSession());
124 return ResultSuccess; 119 R_SUCCEED();
125} 120}
126 121
127} // namespace Kernel 122} // namespace Kernel
diff --git a/src/core/hle/kernel/k_client_port.h b/src/core/hle/kernel/k_client_port.h
index a757cf9cd..23db06ddf 100644
--- a/src/core/hle/kernel/k_client_port.h
+++ b/src/core/hle/kernel/k_client_port.h
@@ -4,7 +4,6 @@
4#pragma once 4#pragma once
5 5
6#include <memory> 6#include <memory>
7#include <string>
8 7
9#include "common/common_types.h" 8#include "common/common_types.h"
10#include "core/hle/kernel/k_synchronization_object.h" 9#include "core/hle/kernel/k_synchronization_object.h"
@@ -20,28 +19,28 @@ class KClientPort final : public KSynchronizationObject {
20 KERNEL_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject); 19 KERNEL_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject);
21 20
22public: 21public:
23 explicit KClientPort(KernelCore& kernel_); 22 explicit KClientPort(KernelCore& kernel);
24 ~KClientPort() override; 23 ~KClientPort() override;
25 24
26 void Initialize(KPort* parent_, s32 max_sessions_, std::string&& name_); 25 void Initialize(KPort* parent, s32 max_sessions);
27 void OnSessionFinalized(); 26 void OnSessionFinalized();
28 void OnServerClosed(); 27 void OnServerClosed();
29 28
30 const KPort* GetParent() const { 29 const KPort* GetParent() const {
31 return parent; 30 return m_parent;
32 } 31 }
33 KPort* GetParent() { 32 KPort* GetParent() {
34 return parent; 33 return m_parent;
35 } 34 }
36 35
37 s32 GetNumSessions() const { 36 s32 GetNumSessions() const {
38 return num_sessions; 37 return m_num_sessions;
39 } 38 }
40 s32 GetPeakSessions() const { 39 s32 GetPeakSessions() const {
41 return peak_sessions; 40 return m_peak_sessions;
42 } 41 }
43 s32 GetMaxSessions() const { 42 s32 GetMaxSessions() const {
44 return max_sessions; 43 return m_max_sessions;
45 } 44 }
46 45
47 bool IsLight() const; 46 bool IsLight() const;
@@ -54,10 +53,10 @@ public:
54 Result CreateSession(KClientSession** out); 53 Result CreateSession(KClientSession** out);
55 54
56private: 55private:
57 std::atomic<s32> num_sessions{}; 56 std::atomic<s32> m_num_sessions{};
58 std::atomic<s32> peak_sessions{}; 57 std::atomic<s32> m_peak_sessions{};
59 s32 max_sessions{}; 58 s32 m_max_sessions{};
60 KPort* parent{}; 59 KPort* m_parent{};
61}; 60};
62 61
63} // namespace Kernel 62} // namespace Kernel
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
index da0c9ac8c..d998b2be2 100644
--- a/src/core/hle/kernel/k_client_session.cpp
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -12,28 +12,27 @@ namespace Kernel {
12 12
13static constexpr u32 MessageBufferSize = 0x100; 13static constexpr u32 MessageBufferSize = 0x100;
14 14
15KClientSession::KClientSession(KernelCore& kernel_) 15KClientSession::KClientSession(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
16 : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
17KClientSession::~KClientSession() = default; 16KClientSession::~KClientSession() = default;
18 17
19void KClientSession::Destroy() { 18void KClientSession::Destroy() {
20 parent->OnClientClosed(); 19 m_parent->OnClientClosed();
21 parent->Close(); 20 m_parent->Close();
22} 21}
23 22
24void KClientSession::OnServerClosed() {} 23void KClientSession::OnServerClosed() {}
25 24
26Result KClientSession::SendSyncRequest() { 25Result KClientSession::SendSyncRequest() {
27 // Create a session request. 26 // Create a session request.
28 KSessionRequest* request = KSessionRequest::Create(kernel); 27 KSessionRequest* request = KSessionRequest::Create(m_kernel);
29 R_UNLESS(request != nullptr, ResultOutOfResource); 28 R_UNLESS(request != nullptr, ResultOutOfResource);
30 SCOPE_EXIT({ request->Close(); }); 29 SCOPE_EXIT({ request->Close(); });
31 30
32 // Initialize the request. 31 // Initialize the request.
33 request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize); 32 request->Initialize(nullptr, GetCurrentThread(m_kernel).GetTlsAddress(), MessageBufferSize);
34 33
35 // Send the request. 34 // Send the request.
36 return parent->GetServerSession().OnRequest(request); 35 R_RETURN(m_parent->GetServerSession().OnRequest(request));
37} 36}
38 37
39} // namespace Kernel 38} // namespace Kernel
diff --git a/src/core/hle/kernel/k_client_session.h b/src/core/hle/kernel/k_client_session.h
index b4a19c546..9b62e55e4 100644
--- a/src/core/hle/kernel/k_client_session.h
+++ b/src/core/hle/kernel/k_client_session.h
@@ -30,20 +30,19 @@ class KClientSession final
30 KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject); 30 KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
31 31
32public: 32public:
33 explicit KClientSession(KernelCore& kernel_); 33 explicit KClientSession(KernelCore& kernel);
34 ~KClientSession() override; 34 ~KClientSession() override;
35 35
36 void Initialize(KSession* parent_session_, std::string&& name_) { 36 void Initialize(KSession* parent) {
37 // Set member variables. 37 // Set member variables.
38 parent = parent_session_; 38 m_parent = parent;
39 name = std::move(name_);
40 } 39 }
41 40
42 void Destroy() override; 41 void Destroy() override;
43 static void PostDestroy([[maybe_unused]] uintptr_t arg) {} 42 static void PostDestroy(uintptr_t arg) {}
44 43
45 KSession* GetParent() const { 44 KSession* GetParent() const {
46 return parent; 45 return m_parent;
47 } 46 }
48 47
49 Result SendSyncRequest(); 48 Result SendSyncRequest();
@@ -51,7 +50,7 @@ public:
51 void OnServerClosed(); 50 void OnServerClosed();
52 51
53private: 52private:
54 KSession* parent{}; 53 KSession* m_parent{};
55}; 54};
56 55
57} // namespace Kernel 56} // namespace Kernel
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index 6c44a9e99..89df6b5d8 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -16,18 +16,18 @@
16 16
17namespace Kernel { 17namespace Kernel {
18 18
19KCodeMemory::KCodeMemory(KernelCore& kernel_) 19KCodeMemory::KCodeMemory(KernelCore& kernel)
20 : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {} 20 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock(kernel) {}
21 21
22Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) { 22Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
23 // Set members. 23 // Set members.
24 m_owner = GetCurrentProcessPointer(kernel); 24 m_owner = GetCurrentProcessPointer(m_kernel);
25 25
26 // Get the owner page table. 26 // Get the owner page table.
27 auto& page_table = m_owner->PageTable(); 27 auto& page_table = m_owner->PageTable();
28 28
29 // Construct the page group. 29 // Construct the page group.
30 m_page_group.emplace(kernel, page_table.GetBlockInfoManager()); 30 m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager());
31 31
32 // Lock the memory. 32 // Lock the memory.
33 R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size)) 33 R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size))
@@ -45,7 +45,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
45 m_is_mapped = false; 45 m_is_mapped = false;
46 46
47 // We succeeded. 47 // We succeeded.
48 return ResultSuccess; 48 R_SUCCEED();
49} 49}
50 50
51void KCodeMemory::Finalize() { 51void KCodeMemory::Finalize() {
@@ -74,13 +74,13 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
74 R_UNLESS(!m_is_mapped, ResultInvalidState); 74 R_UNLESS(!m_is_mapped, ResultInvalidState);
75 75
76 // Map the memory. 76 // Map the memory.
77 R_TRY(GetCurrentProcess(kernel).PageTable().MapPageGroup( 77 R_TRY(GetCurrentProcess(m_kernel).PageTable().MapPageGroup(
78 address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); 78 address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
79 79
80 // Mark ourselves as mapped. 80 // Mark ourselves as mapped.
81 m_is_mapped = true; 81 m_is_mapped = true;
82 82
83 return ResultSuccess; 83 R_SUCCEED();
84} 84}
85 85
86Result KCodeMemory::Unmap(VAddr address, size_t size) { 86Result KCodeMemory::Unmap(VAddr address, size_t size) {
@@ -91,13 +91,13 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
91 KScopedLightLock lk(m_lock); 91 KScopedLightLock lk(m_lock);
92 92
93 // Unmap the memory. 93 // Unmap the memory.
94 R_TRY(GetCurrentProcess(kernel).PageTable().UnmapPageGroup(address, *m_page_group, 94 R_TRY(GetCurrentProcess(m_kernel).PageTable().UnmapPageGroup(address, *m_page_group,
95 KMemoryState::CodeOut)); 95 KMemoryState::CodeOut));
96 96
97 // Mark ourselves as unmapped. 97 // Mark ourselves as unmapped.
98 m_is_mapped = false; 98 m_is_mapped = false;
99 99
100 return ResultSuccess; 100 R_SUCCEED();
101} 101}
102 102
103Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { 103Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
@@ -131,7 +131,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
131 // Mark ourselves as mapped. 131 // Mark ourselves as mapped.
132 m_is_owner_mapped = true; 132 m_is_owner_mapped = true;
133 133
134 return ResultSuccess; 134 R_SUCCEED();
135} 135}
136 136
137Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { 137Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
@@ -147,7 +147,7 @@ Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
147 // Mark ourselves as unmapped. 147 // Mark ourselves as unmapped.
148 m_is_owner_mapped = false; 148 m_is_owner_mapped = false;
149 149
150 return ResultSuccess; 150 R_SUCCEED();
151} 151}
152 152
153} // namespace Kernel 153} // namespace Kernel
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h
index 5b260b385..23cbb283b 100644
--- a/src/core/hle/kernel/k_code_memory.h
+++ b/src/core/hle/kernel/k_code_memory.h
@@ -29,7 +29,7 @@ class KCodeMemory final
29 KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject); 29 KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject);
30 30
31public: 31public:
32 explicit KCodeMemory(KernelCore& kernel_); 32 explicit KCodeMemory(KernelCore& kernel);
33 33
34 Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size); 34 Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
35 void Finalize() override; 35 void Finalize() override;
@@ -42,7 +42,7 @@ public:
42 bool IsInitialized() const override { 42 bool IsInitialized() const override {
43 return m_is_initialized; 43 return m_is_initialized;
44 } 44 }
45 static void PostDestroy([[maybe_unused]] uintptr_t arg) {} 45 static void PostDestroy(uintptr_t arg) {}
46 46
47 KProcess* GetOwner() const override { 47 KProcess* GetOwner() const override {
48 return m_owner; 48 return m_owner;
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index f40cf92b1..58b8609d8 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -4,7 +4,6 @@
4#include "core/arm/exclusive_monitor.h" 4#include "core/arm/exclusive_monitor.h"
5#include "core/core.h" 5#include "core/core.h"
6#include "core/hle/kernel/k_condition_variable.h" 6#include "core/hle/kernel/k_condition_variable.h"
7#include "core/hle/kernel/k_linked_list.h"
8#include "core/hle/kernel/k_process.h" 7#include "core/hle/kernel/k_process.h"
9#include "core/hle/kernel/k_scheduler.h" 8#include "core/hle/kernel/k_scheduler.h"
10#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 9#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
@@ -58,8 +57,8 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
58 57
59class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue { 58class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
60public: 59public:
61 explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_) 60 explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel)
62 : KThreadQueue(kernel_) {} 61 : KThreadQueue(kernel) {}
63 62
64 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { 63 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
65 // Remove the thread as a waiter from its owner. 64 // Remove the thread as a waiter from its owner.
@@ -76,8 +75,8 @@ private:
76 75
77public: 76public:
78 explicit ThreadQueueImplForKConditionVariableWaitConditionVariable( 77 explicit ThreadQueueImplForKConditionVariableWaitConditionVariable(
79 KernelCore& kernel_, KConditionVariable::ThreadTree* t) 78 KernelCore& kernel, KConditionVariable::ThreadTree* t)
80 : KThreadQueue(kernel_), m_tree(t) {} 79 : KThreadQueue(kernel), m_tree(t) {}
81 80
82 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { 81 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
83 // Remove the thread as a waiter from its owner. 82 // Remove the thread as a waiter from its owner.
@@ -98,17 +97,17 @@ public:
98 97
99} // namespace 98} // namespace
100 99
101KConditionVariable::KConditionVariable(Core::System& system_) 100KConditionVariable::KConditionVariable(Core::System& system)
102 : system{system_}, kernel{system.Kernel()} {} 101 : m_system{system}, m_kernel{system.Kernel()} {}
103 102
104KConditionVariable::~KConditionVariable() = default; 103KConditionVariable::~KConditionVariable() = default;
105 104
106Result KConditionVariable::SignalToAddress(VAddr addr) { 105Result KConditionVariable::SignalToAddress(VAddr addr) {
107 KThread* owner_thread = GetCurrentThreadPointer(kernel); 106 KThread* owner_thread = GetCurrentThreadPointer(m_kernel);
108 107
109 // Signal the address. 108 // Signal the address.
110 { 109 {
111 KScopedSchedulerLock sl(kernel); 110 KScopedSchedulerLock sl(m_kernel);
112 111
113 // Remove waiter thread. 112 // Remove waiter thread.
114 bool has_waiters{}; 113 bool has_waiters{};
@@ -129,7 +128,7 @@ Result KConditionVariable::SignalToAddress(VAddr addr) {
129 128
130 // Write the value to userspace. 129 // Write the value to userspace.
131 Result result{ResultSuccess}; 130 Result result{ResultSuccess};
132 if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] { 131 if (WriteToUser(m_system, addr, std::addressof(next_value))) [[likely]] {
133 result = ResultSuccess; 132 result = ResultSuccess;
134 } else { 133 } else {
135 result = ResultInvalidCurrentMemory; 134 result = ResultInvalidCurrentMemory;
@@ -145,26 +144,27 @@ Result KConditionVariable::SignalToAddress(VAddr addr) {
145} 144}
146 145
147Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) { 146Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
148 KThread* cur_thread = GetCurrentThreadPointer(kernel); 147 KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
149 ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel); 148 ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel);
150 149
151 // Wait for the address. 150 // Wait for the address.
152 KThread* owner_thread{}; 151 KThread* owner_thread{};
153 { 152 {
154 KScopedSchedulerLock sl(kernel); 153 KScopedSchedulerLock sl(m_kernel);
155 154
156 // Check if the thread should terminate. 155 // Check if the thread should terminate.
157 R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested); 156 R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
158 157
159 // Read the tag from userspace. 158 // Read the tag from userspace.
160 u32 test_tag{}; 159 u32 test_tag{};
161 R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory); 160 R_UNLESS(ReadFromUser(m_system, std::addressof(test_tag), addr),
161 ResultInvalidCurrentMemory);
162 162
163 // If the tag isn't the handle (with wait mask), we're done. 163 // If the tag isn't the handle (with wait mask), we're done.
164 R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask)); 164 R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
165 165
166 // Get the lock owner thread. 166 // Get the lock owner thread.
167 owner_thread = GetCurrentProcess(kernel) 167 owner_thread = GetCurrentProcess(m_kernel)
168 .GetHandleTable() 168 .GetHandleTable()
169 .GetObjectWithoutPseudoHandle<KThread>(handle) 169 .GetObjectWithoutPseudoHandle<KThread>(handle)
170 .ReleasePointerUnsafe(); 170 .ReleasePointerUnsafe();
@@ -177,19 +177,18 @@ Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value)
177 // Begin waiting. 177 // Begin waiting.
178 cur_thread->BeginWait(std::addressof(wait_queue)); 178 cur_thread->BeginWait(std::addressof(wait_queue));
179 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); 179 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
180 cur_thread->SetMutexWaitAddressForDebugging(addr);
181 } 180 }
182 181
183 // Close our reference to the owner thread, now that the wait is over. 182 // Close our reference to the owner thread, now that the wait is over.
184 owner_thread->Close(); 183 owner_thread->Close();
185 184
186 // Get the wait result. 185 // Get the wait result.
187 return cur_thread->GetWaitResult(); 186 R_RETURN(cur_thread->GetWaitResult());
188} 187}
189 188
190void KConditionVariable::SignalImpl(KThread* thread) { 189void KConditionVariable::SignalImpl(KThread* thread) {
191 // Check pre-conditions. 190 // Check pre-conditions.
192 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 191 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
193 192
194 // Update the tag. 193 // Update the tag.
195 VAddr address = thread->GetAddressKey(); 194 VAddr address = thread->GetAddressKey();
@@ -204,7 +203,7 @@ void KConditionVariable::SignalImpl(KThread* thread) {
204 // TODO(bunnei): We should call CanAccessAtomic(..) here. 203 // TODO(bunnei): We should call CanAccessAtomic(..) here.
205 can_access = true; 204 can_access = true;
206 if (can_access) [[likely]] { 205 if (can_access) [[likely]] {
207 UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag, 206 UpdateLockAtomic(m_system, std::addressof(prev_tag), address, own_tag,
208 Svc::HandleWaitMask); 207 Svc::HandleWaitMask);
209 } 208 }
210 } 209 }
@@ -215,7 +214,7 @@ void KConditionVariable::SignalImpl(KThread* thread) {
215 thread->EndWait(ResultSuccess); 214 thread->EndWait(ResultSuccess);
216 } else { 215 } else {
217 // Get the previous owner. 216 // Get the previous owner.
218 KThread* owner_thread = GetCurrentProcess(kernel) 217 KThread* owner_thread = GetCurrentProcess(m_kernel)
219 .GetHandleTable() 218 .GetHandleTable()
220 .GetObjectWithoutPseudoHandle<KThread>( 219 .GetObjectWithoutPseudoHandle<KThread>(
221 static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask)) 220 static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
@@ -240,14 +239,14 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
240 // Perform signaling. 239 // Perform signaling.
241 s32 num_waiters{}; 240 s32 num_waiters{};
242 { 241 {
243 KScopedSchedulerLock sl(kernel); 242 KScopedSchedulerLock sl(m_kernel);
244 243
245 auto it = thread_tree.nfind_key({cv_key, -1}); 244 auto it = m_tree.nfind_key({cv_key, -1});
246 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 245 while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
247 (it->GetConditionVariableKey() == cv_key)) { 246 (it->GetConditionVariableKey() == cv_key)) {
248 KThread* target_thread = std::addressof(*it); 247 KThread* target_thread = std::addressof(*it);
249 248
250 it = thread_tree.erase(it); 249 it = m_tree.erase(it);
251 target_thread->ClearConditionVariable(); 250 target_thread->ClearConditionVariable();
252 251
253 this->SignalImpl(target_thread); 252 this->SignalImpl(target_thread);
@@ -256,26 +255,27 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
256 } 255 }
257 256
258 // If we have no waiters, clear the has waiter flag. 257 // If we have no waiters, clear the has waiter flag.
259 if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) { 258 if (it == m_tree.end() || it->GetConditionVariableKey() != cv_key) {
260 const u32 has_waiter_flag{}; 259 const u32 has_waiter_flag{};
261 WriteToUser(system, cv_key, std::addressof(has_waiter_flag)); 260 WriteToUser(m_system, cv_key, std::addressof(has_waiter_flag));
262 } 261 }
263 } 262 }
264} 263}
265 264
266Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { 265Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
267 // Prepare to wait. 266 // Prepare to wait.
268 KThread* cur_thread = GetCurrentThreadPointer(kernel); 267 KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
269 ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue( 268 KHardwareTimer* timer{};
270 kernel, std::addressof(thread_tree)); 269 ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(m_kernel,
270 std::addressof(m_tree));
271 271
272 { 272 {
273 KScopedSchedulerLockAndSleep slp(kernel, cur_thread, timeout); 273 KScopedSchedulerLockAndSleep slp(m_kernel, std::addressof(timer), cur_thread, timeout);
274 274
275 // Check that the thread isn't terminating. 275 // Check that the thread isn't terminating.
276 if (cur_thread->IsTerminationRequested()) { 276 if (cur_thread->IsTerminationRequested()) {
277 slp.CancelSleep(); 277 slp.CancelSleep();
278 return ResultTerminationRequested; 278 R_THROW(ResultTerminationRequested);
279 } 279 }
280 280
281 // Update the value and process for the next owner. 281 // Update the value and process for the next owner.
@@ -301,14 +301,14 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
301 // Write to the cv key. 301 // Write to the cv key.
302 { 302 {
303 const u32 has_waiter_flag = 1; 303 const u32 has_waiter_flag = 1;
304 WriteToUser(system, key, std::addressof(has_waiter_flag)); 304 WriteToUser(m_system, key, std::addressof(has_waiter_flag));
305 // TODO(bunnei): We should call DataMemoryBarrier(..) here. 305 std::atomic_thread_fence(std::memory_order_seq_cst);
306 } 306 }
307 307
308 // Write the value to userspace. 308 // Write the value to userspace.
309 if (!WriteToUser(system, addr, std::addressof(next_value))) { 309 if (!WriteToUser(m_system, addr, std::addressof(next_value))) {
310 slp.CancelSleep(); 310 slp.CancelSleep();
311 return ResultInvalidCurrentMemory; 311 R_THROW(ResultInvalidCurrentMemory);
312 } 312 }
313 } 313 }
314 314
@@ -316,17 +316,17 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
316 R_UNLESS(timeout != 0, ResultTimedOut); 316 R_UNLESS(timeout != 0, ResultTimedOut);
317 317
318 // Update condition variable tracking. 318 // Update condition variable tracking.
319 cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value); 319 cur_thread->SetConditionVariable(std::addressof(m_tree), addr, key, value);
320 thread_tree.insert(*cur_thread); 320 m_tree.insert(*cur_thread);
321 321
322 // Begin waiting. 322 // Begin waiting.
323 wait_queue.SetHardwareTimer(timer);
323 cur_thread->BeginWait(std::addressof(wait_queue)); 324 cur_thread->BeginWait(std::addressof(wait_queue));
324 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); 325 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
325 cur_thread->SetMutexWaitAddressForDebugging(addr);
326 } 326 }
327 327
328 // Get the wait result. 328 // Get the wait result.
329 return cur_thread->GetWaitResult(); 329 R_RETURN(cur_thread->GetWaitResult());
330} 330}
331 331
332} // namespace Kernel 332} // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index fad4ed011..fbd2c1fc0 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -21,36 +21,36 @@ class KConditionVariable {
21public: 21public:
22 using ThreadTree = typename KThread::ConditionVariableThreadTreeType; 22 using ThreadTree = typename KThread::ConditionVariableThreadTreeType;
23 23
24 explicit KConditionVariable(Core::System& system_); 24 explicit KConditionVariable(Core::System& system);
25 ~KConditionVariable(); 25 ~KConditionVariable();
26 26
27 // Arbitration 27 // Arbitration
28 [[nodiscard]] Result SignalToAddress(VAddr addr); 28 Result SignalToAddress(VAddr addr);
29 [[nodiscard]] Result WaitForAddress(Handle handle, VAddr addr, u32 value); 29 Result WaitForAddress(Handle handle, VAddr addr, u32 value);
30 30
31 // Condition variable 31 // Condition variable
32 void Signal(u64 cv_key, s32 count); 32 void Signal(u64 cv_key, s32 count);
33 [[nodiscard]] Result Wait(VAddr addr, u64 key, u32 value, s64 timeout); 33 Result Wait(VAddr addr, u64 key, u32 value, s64 timeout);
34 34
35private: 35private:
36 void SignalImpl(KThread* thread); 36 void SignalImpl(KThread* thread);
37 37
38 ThreadTree thread_tree; 38private:
39 39 Core::System& m_system;
40 Core::System& system; 40 KernelCore& m_kernel;
41 KernelCore& kernel; 41 ThreadTree m_tree{};
42}; 42};
43 43
44inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree, 44inline void BeforeUpdatePriority(KernelCore& kernel, KConditionVariable::ThreadTree* tree,
45 KThread* thread) { 45 KThread* thread) {
46 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 46 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
47 47
48 tree->erase(tree->iterator_to(*thread)); 48 tree->erase(tree->iterator_to(*thread));
49} 49}
50 50
51inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree, 51inline void AfterUpdatePriority(KernelCore& kernel, KConditionVariable::ThreadTree* tree,
52 KThread* thread) { 52 KThread* thread) {
53 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 53 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
54 54
55 tree->insert(*thread); 55 tree->insert(*thread);
56} 56}
diff --git a/src/core/hle/kernel/k_debug.h b/src/core/hle/kernel/k_debug.h
index e3a0689c8..2290e3bca 100644
--- a/src/core/hle/kernel/k_debug.h
+++ b/src/core/hle/kernel/k_debug.h
@@ -12,9 +12,9 @@ class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObj
12 KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject); 12 KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject);
13 13
14public: 14public:
15 explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} 15 explicit KDebug(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
16 16
17 static void PostDestroy([[maybe_unused]] uintptr_t arg) {} 17 static void PostDestroy(uintptr_t arg) {}
18}; 18};
19 19
20} // namespace Kernel 20} // namespace Kernel
diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp
index 27659ea3b..a2fc4fe1f 100644
--- a/src/core/hle/kernel/k_device_address_space.cpp
+++ b/src/core/hle/kernel/k_device_address_space.cpp
@@ -9,8 +9,8 @@
9 9
10namespace Kernel { 10namespace Kernel {
11 11
12KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel_) 12KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel)
13 : KAutoObjectWithSlabHeapAndContainer(kernel_), m_lock(kernel_), m_is_initialized(false) {} 13 : KAutoObjectWithSlabHeapAndContainer(kernel), m_lock(kernel), m_is_initialized(false) {}
14KDeviceAddressSpace::~KDeviceAddressSpace() = default; 14KDeviceAddressSpace::~KDeviceAddressSpace() = default;
15 15
16void KDeviceAddressSpace::Initialize() { 16void KDeviceAddressSpace::Initialize() {
diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp
index d973853ab..d92b491f8 100644
--- a/src/core/hle/kernel/k_event.cpp
+++ b/src/core/hle/kernel/k_event.cpp
@@ -7,8 +7,8 @@
7 7
8namespace Kernel { 8namespace Kernel {
9 9
10KEvent::KEvent(KernelCore& kernel_) 10KEvent::KEvent(KernelCore& kernel)
11 : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_readable_event{kernel_} {} 11 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_readable_event{kernel} {}
12 12
13KEvent::~KEvent() = default; 13KEvent::~KEvent() = default;
14 14
@@ -36,7 +36,7 @@ void KEvent::Finalize() {
36} 36}
37 37
38Result KEvent::Signal() { 38Result KEvent::Signal() {
39 KScopedSchedulerLock sl{kernel}; 39 KScopedSchedulerLock sl{m_kernel};
40 40
41 R_SUCCEED_IF(m_readable_event_destroyed); 41 R_SUCCEED_IF(m_readable_event_destroyed);
42 42
@@ -44,7 +44,7 @@ Result KEvent::Signal() {
44} 44}
45 45
46Result KEvent::Clear() { 46Result KEvent::Clear() {
47 KScopedSchedulerLock sl{kernel}; 47 KScopedSchedulerLock sl{m_kernel};
48 48
49 R_SUCCEED_IF(m_readable_event_destroyed); 49 R_SUCCEED_IF(m_readable_event_destroyed);
50 50
diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h
index 48ce7d9a0..f522b0a84 100644
--- a/src/core/hle/kernel/k_event.h
+++ b/src/core/hle/kernel/k_event.h
@@ -16,7 +16,7 @@ class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObj
16 KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject); 16 KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
17 17
18public: 18public:
19 explicit KEvent(KernelCore& kernel_); 19 explicit KEvent(KernelCore& kernel);
20 ~KEvent() override; 20 ~KEvent() override;
21 21
22 void Initialize(KProcess* owner); 22 void Initialize(KProcess* owner);
diff --git a/src/core/hle/kernel/k_light_condition_variable.cpp b/src/core/hle/kernel/k_light_condition_variable.cpp
index cade99cfd..6d5a815aa 100644
--- a/src/core/hle/kernel/k_light_condition_variable.cpp
+++ b/src/core/hle/kernel/k_light_condition_variable.cpp
@@ -13,9 +13,9 @@ namespace {
13 13
14class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue { 14class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue {
15public: 15public:
16 ThreadQueueImplForKLightConditionVariable(KernelCore& kernel_, KThread::WaiterList* wl, 16 ThreadQueueImplForKLightConditionVariable(KernelCore& kernel, KThread::WaiterList* wl,
17 bool term) 17 bool term)
18 : KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {} 18 : KThreadQueue(kernel), m_wait_list(wl), m_allow_terminating_thread(term) {}
19 19
20 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { 20 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
21 // Only process waits if we're allowed to. 21 // Only process waits if we're allowed to.
@@ -39,14 +39,15 @@ private:
39 39
40void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_terminating_thread) { 40void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
41 // Create thread queue. 41 // Create thread queue.
42 KThread* owner = GetCurrentThreadPointer(kernel); 42 KThread* owner = GetCurrentThreadPointer(m_kernel);
43 KHardwareTimer* timer{};
43 44
44 ThreadQueueImplForKLightConditionVariable wait_queue(kernel, std::addressof(wait_list), 45 ThreadQueueImplForKLightConditionVariable wait_queue(m_kernel, std::addressof(m_wait_list),
45 allow_terminating_thread); 46 allow_terminating_thread);
46 47
47 // Sleep the thread. 48 // Sleep the thread.
48 { 49 {
49 KScopedSchedulerLockAndSleep lk(kernel, owner, timeout); 50 KScopedSchedulerLockAndSleep lk(m_kernel, std::addressof(timer), owner, timeout);
50 51
51 if (!allow_terminating_thread && owner->IsTerminationRequested()) { 52 if (!allow_terminating_thread && owner->IsTerminationRequested()) {
52 lk.CancelSleep(); 53 lk.CancelSleep();
@@ -56,9 +57,10 @@ void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_ter
56 lock->Unlock(); 57 lock->Unlock();
57 58
58 // Add the thread to the queue. 59 // Add the thread to the queue.
59 wait_list.push_back(*owner); 60 m_wait_list.push_back(*owner);
60 61
61 // Begin waiting. 62 // Begin waiting.
63 wait_queue.SetHardwareTimer(timer);
62 owner->BeginWait(std::addressof(wait_queue)); 64 owner->BeginWait(std::addressof(wait_queue));
63 } 65 }
64 66
@@ -67,10 +69,10 @@ void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_ter
67} 69}
68 70
69void KLightConditionVariable::Broadcast() { 71void KLightConditionVariable::Broadcast() {
70 KScopedSchedulerLock lk(kernel); 72 KScopedSchedulerLock lk(m_kernel);
71 73
72 // Signal all threads. 74 // Signal all threads.
73 for (auto it = wait_list.begin(); it != wait_list.end(); it = wait_list.erase(it)) { 75 for (auto it = m_wait_list.begin(); it != m_wait_list.end(); it = m_wait_list.erase(it)) {
74 it->EndWait(ResultSuccess); 76 it->EndWait(ResultSuccess);
75 } 77 }
76} 78}
diff --git a/src/core/hle/kernel/k_light_condition_variable.h b/src/core/hle/kernel/k_light_condition_variable.h
index 3cabd6b4f..ab612426d 100644
--- a/src/core/hle/kernel/k_light_condition_variable.h
+++ b/src/core/hle/kernel/k_light_condition_variable.h
@@ -13,13 +13,13 @@ class KLightLock;
13 13
14class KLightConditionVariable { 14class KLightConditionVariable {
15public: 15public:
16 explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {} 16 explicit KLightConditionVariable(KernelCore& kernel) : m_kernel{kernel} {}
17 17
18 void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true); 18 void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true);
19 void Broadcast(); 19 void Broadcast();
20 20
21private: 21private:
22 KernelCore& kernel; 22 KernelCore& m_kernel;
23 KThread::WaiterList wait_list{}; 23 KThread::WaiterList m_wait_list{};
24}; 24};
25} // namespace Kernel 25} // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp
index 14cb615da..e87ee8b65 100644
--- a/src/core/hle/kernel/k_light_lock.cpp
+++ b/src/core/hle/kernel/k_light_lock.cpp
@@ -13,7 +13,7 @@ namespace {
13 13
14class ThreadQueueImplForKLightLock final : public KThreadQueue { 14class ThreadQueueImplForKLightLock final : public KThreadQueue {
15public: 15public:
16 explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {} 16 explicit ThreadQueueImplForKLightLock(KernelCore& kernel) : KThreadQueue(kernel) {}
17 17
18 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { 18 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
19 // Remove the thread as a waiter from its owner. 19 // Remove the thread as a waiter from its owner.
@@ -29,13 +29,13 @@ public:
29} // namespace 29} // namespace
30 30
31void KLightLock::Lock() { 31void KLightLock::Lock() {
32 const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)); 32 const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel));
33 33
34 while (true) { 34 while (true) {
35 uintptr_t old_tag = tag.load(std::memory_order_relaxed); 35 uintptr_t old_tag = m_tag.load(std::memory_order_relaxed);
36 36
37 while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1), 37 while (!m_tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
38 std::memory_order_acquire)) { 38 std::memory_order_acquire)) {
39 } 39 }
40 40
41 if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) { 41 if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) {
@@ -45,30 +45,30 @@ void KLightLock::Lock() {
45} 45}
46 46
47void KLightLock::Unlock() { 47void KLightLock::Unlock() {
48 const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)); 48 const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel));
49 49
50 uintptr_t expected = cur_thread; 50 uintptr_t expected = cur_thread;
51 if (!tag.compare_exchange_strong(expected, 0, std::memory_order_release)) { 51 if (!m_tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
52 this->UnlockSlowPath(cur_thread); 52 this->UnlockSlowPath(cur_thread);
53 } 53 }
54} 54}
55 55
56bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { 56bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
57 KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread); 57 KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
58 ThreadQueueImplForKLightLock wait_queue(kernel); 58 ThreadQueueImplForKLightLock wait_queue(m_kernel);
59 59
60 // Pend the current thread waiting on the owner thread. 60 // Pend the current thread waiting on the owner thread.
61 { 61 {
62 KScopedSchedulerLock sl{kernel}; 62 KScopedSchedulerLock sl{m_kernel};
63 63
64 // Ensure we actually have locking to do. 64 // Ensure we actually have locking to do.
65 if (tag.load(std::memory_order_relaxed) != _owner) { 65 if (m_tag.load(std::memory_order_relaxed) != _owner) {
66 return false; 66 return false;
67 } 67 }
68 68
69 // Add the current thread as a waiter on the owner. 69 // Add the current thread as a waiter on the owner.
70 KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL); 70 KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL);
71 cur_thread->SetKernelAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag))); 71 cur_thread->SetKernelAddressKey(reinterpret_cast<uintptr_t>(std::addressof(m_tag)));
72 owner_thread->AddWaiter(cur_thread); 72 owner_thread->AddWaiter(cur_thread);
73 73
74 // Begin waiting to hold the lock. 74 // Begin waiting to hold the lock.
@@ -87,12 +87,12 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
87 87
88 // Unlock. 88 // Unlock.
89 { 89 {
90 KScopedSchedulerLock sl(kernel); 90 KScopedSchedulerLock sl(m_kernel);
91 91
92 // Get the next owner. 92 // Get the next owner.
93 bool has_waiters; 93 bool has_waiters;
94 KThread* next_owner = owner_thread->RemoveKernelWaiterByKey( 94 KThread* next_owner = owner_thread->RemoveKernelWaiterByKey(
95 std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag))); 95 std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(m_tag)));
96 96
97 // Pass the lock to the next owner. 97 // Pass the lock to the next owner.
98 uintptr_t next_tag = 0; 98 uintptr_t next_tag = 0;
@@ -114,12 +114,13 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
114 } 114 }
115 115
116 // Write the new tag value. 116 // Write the new tag value.
117 tag.store(next_tag, std::memory_order_release); 117 m_tag.store(next_tag, std::memory_order_release);
118 } 118 }
119} 119}
120 120
121bool KLightLock::IsLockedByCurrentThread() const { 121bool KLightLock::IsLockedByCurrentThread() const {
122 return (tag | 1ULL) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)) | 1ULL); 122 return (m_tag.load() | 1ULL) ==
123 (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel)) | 1ULL);
123} 124}
124 125
125} // namespace Kernel 126} // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_lock.h b/src/core/hle/kernel/k_light_lock.h
index 7edd950c0..626f57596 100644
--- a/src/core/hle/kernel/k_light_lock.h
+++ b/src/core/hle/kernel/k_light_lock.h
@@ -13,7 +13,7 @@ class KernelCore;
13 13
14class KLightLock { 14class KLightLock {
15public: 15public:
16 explicit KLightLock(KernelCore& kernel_) : kernel{kernel_} {} 16 explicit KLightLock(KernelCore& kernel) : m_kernel{kernel} {}
17 17
18 void Lock(); 18 void Lock();
19 19
@@ -24,14 +24,14 @@ public:
24 void UnlockSlowPath(uintptr_t cur_thread); 24 void UnlockSlowPath(uintptr_t cur_thread);
25 25
26 bool IsLocked() const { 26 bool IsLocked() const {
27 return tag != 0; 27 return m_tag.load() != 0;
28 } 28 }
29 29
30 bool IsLockedByCurrentThread() const; 30 bool IsLockedByCurrentThread() const;
31 31
32private: 32private:
33 std::atomic<uintptr_t> tag{}; 33 std::atomic<uintptr_t> m_tag{};
34 KernelCore& kernel; 34 KernelCore& m_kernel;
35}; 35};
36 36
37using KScopedLightLock = KScopedLock<KLightLock>; 37using KScopedLightLock = KScopedLock<KLightLock>;
diff --git a/src/core/hle/kernel/k_linked_list.h b/src/core/hle/kernel/k_linked_list.h
deleted file mode 100644
index 29ebd16b7..000000000
--- a/src/core/hle/kernel/k_linked_list.h
+++ /dev/null
@@ -1,238 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <boost/intrusive/list.hpp>
7
8#include "common/assert.h"
9#include "core/hle/kernel/slab_helpers.h"
10
11namespace Kernel {
12
13class KernelCore;
14
15class KLinkedListNode : public boost::intrusive::list_base_hook<>,
16 public KSlabAllocated<KLinkedListNode> {
17
18public:
19 explicit KLinkedListNode(KernelCore&) {}
20 KLinkedListNode() = default;
21
22 void Initialize(void* it) {
23 m_item = it;
24 }
25
26 void* GetItem() const {
27 return m_item;
28 }
29
30private:
31 void* m_item = nullptr;
32};
33
34template <typename T>
35class KLinkedList : private boost::intrusive::list<KLinkedListNode> {
36private:
37 using BaseList = boost::intrusive::list<KLinkedListNode>;
38
39public:
40 template <bool Const>
41 class Iterator;
42
43 using value_type = T;
44 using size_type = size_t;
45 using difference_type = ptrdiff_t;
46 using pointer = value_type*;
47 using const_pointer = const value_type*;
48 using reference = value_type&;
49 using const_reference = const value_type&;
50 using iterator = Iterator<false>;
51 using const_iterator = Iterator<true>;
52 using reverse_iterator = std::reverse_iterator<iterator>;
53 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
54
55 template <bool Const>
56 class Iterator {
57 private:
58 using BaseIterator = BaseList::iterator;
59 friend class KLinkedList;
60
61 public:
62 using iterator_category = std::bidirectional_iterator_tag;
63 using value_type = typename KLinkedList::value_type;
64 using difference_type = typename KLinkedList::difference_type;
65 using pointer = std::conditional_t<Const, KLinkedList::const_pointer, KLinkedList::pointer>;
66 using reference =
67 std::conditional_t<Const, KLinkedList::const_reference, KLinkedList::reference>;
68
69 public:
70 explicit Iterator(BaseIterator it) : m_base_it(it) {}
71
72 pointer GetItem() const {
73 return static_cast<pointer>(m_base_it->GetItem());
74 }
75
76 bool operator==(const Iterator& rhs) const {
77 return m_base_it == rhs.m_base_it;
78 }
79
80 bool operator!=(const Iterator& rhs) const {
81 return !(*this == rhs);
82 }
83
84 pointer operator->() const {
85 return this->GetItem();
86 }
87
88 reference operator*() const {
89 return *this->GetItem();
90 }
91
92 Iterator& operator++() {
93 ++m_base_it;
94 return *this;
95 }
96
97 Iterator& operator--() {
98 --m_base_it;
99 return *this;
100 }
101
102 Iterator operator++(int) {
103 const Iterator it{*this};
104 ++(*this);
105 return it;
106 }
107
108 Iterator operator--(int) {
109 const Iterator it{*this};
110 --(*this);
111 return it;
112 }
113
114 operator Iterator<true>() const {
115 return Iterator<true>(m_base_it);
116 }
117
118 private:
119 BaseIterator m_base_it;
120 };
121
122public:
123 constexpr KLinkedList(KernelCore& kernel_) : BaseList(), kernel{kernel_} {}
124
125 ~KLinkedList() {
126 // Erase all elements.
127 for (auto it = begin(); it != end(); it = erase(it)) {
128 }
129
130 // Ensure we succeeded.
131 ASSERT(this->empty());
132 }
133
134 // Iterator accessors.
135 iterator begin() {
136 return iterator(BaseList::begin());
137 }
138
139 const_iterator begin() const {
140 return const_iterator(BaseList::begin());
141 }
142
143 iterator end() {
144 return iterator(BaseList::end());
145 }
146
147 const_iterator end() const {
148 return const_iterator(BaseList::end());
149 }
150
151 const_iterator cbegin() const {
152 return this->begin();
153 }
154
155 const_iterator cend() const {
156 return this->end();
157 }
158
159 reverse_iterator rbegin() {
160 return reverse_iterator(this->end());
161 }
162
163 const_reverse_iterator rbegin() const {
164 return const_reverse_iterator(this->end());
165 }
166
167 reverse_iterator rend() {
168 return reverse_iterator(this->begin());
169 }
170
171 const_reverse_iterator rend() const {
172 return const_reverse_iterator(this->begin());
173 }
174
175 const_reverse_iterator crbegin() const {
176 return this->rbegin();
177 }
178
179 const_reverse_iterator crend() const {
180 return this->rend();
181 }
182
183 // Content management.
184 using BaseList::empty;
185 using BaseList::size;
186
187 reference back() {
188 return *(--this->end());
189 }
190
191 const_reference back() const {
192 return *(--this->end());
193 }
194
195 reference front() {
196 return *this->begin();
197 }
198
199 const_reference front() const {
200 return *this->begin();
201 }
202
203 iterator insert(const_iterator pos, reference ref) {
204 KLinkedListNode* new_node = KLinkedListNode::Allocate(kernel);
205 ASSERT(new_node != nullptr);
206 new_node->Initialize(std::addressof(ref));
207 return iterator(BaseList::insert(pos.m_base_it, *new_node));
208 }
209
210 void push_back(reference ref) {
211 this->insert(this->end(), ref);
212 }
213
214 void push_front(reference ref) {
215 this->insert(this->begin(), ref);
216 }
217
218 void pop_back() {
219 this->erase(--this->end());
220 }
221
222 void pop_front() {
223 this->erase(this->begin());
224 }
225
226 iterator erase(const iterator pos) {
227 KLinkedListNode* freed_node = std::addressof(*pos.m_base_it);
228 iterator ret = iterator(BaseList::erase(pos.m_base_it));
229 KLinkedListNode::Free(kernel, freed_node);
230
231 return ret;
232 }
233
234private:
235 KernelCore& kernel;
236};
237
238} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index 87ca65592..e01929da6 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -471,8 +471,8 @@ public:
471 m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight); 471 m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight);
472 } 472 }
473 473
474 constexpr void UpdateDeviceDisableMergeStateForShareLeft( 474 constexpr void UpdateDeviceDisableMergeStateForShareLeft(KMemoryPermission new_perm, bool left,
475 [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { 475 bool right) {
476 // New permission/right aren't used. 476 // New permission/right aren't used.
477 if (left) { 477 if (left) {
478 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( 478 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
@@ -482,8 +482,8 @@ public:
482 } 482 }
483 } 483 }
484 484
485 constexpr void UpdateDeviceDisableMergeStateForShareRight( 485 constexpr void UpdateDeviceDisableMergeStateForShareRight(KMemoryPermission new_perm, bool left,
486 [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { 486 bool right) {
487 // New permission/left aren't used. 487 // New permission/left aren't used.
488 if (right) { 488 if (right) {
489 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( 489 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
@@ -499,8 +499,7 @@ public:
499 this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right); 499 this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
500 } 500 }
501 501
502 constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, 502 constexpr void ShareToDevice(KMemoryPermission new_perm, bool left, bool right) {
503 bool right) {
504 // New permission isn't used. 503 // New permission isn't used.
505 504
506 // We must either be shared or have a zero lock count. 505 // We must either be shared or have a zero lock count.
@@ -516,8 +515,8 @@ public:
516 this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right); 515 this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
517 } 516 }
518 517
519 constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( 518 constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(KMemoryPermission new_perm,
520 [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { 519 bool left, bool right) {
521 // New permission/right aren't used. 520 // New permission/right aren't used.
522 521
523 if (left) { 522 if (left) {
@@ -536,8 +535,8 @@ public:
536 } 535 }
537 } 536 }
538 537
539 constexpr void UpdateDeviceDisableMergeStateForUnshareRight( 538 constexpr void UpdateDeviceDisableMergeStateForUnshareRight(KMemoryPermission new_perm,
540 [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { 539 bool left, bool right) {
541 // New permission/left aren't used. 540 // New permission/left aren't used.
542 541
543 if (right) { 542 if (right) {
@@ -556,8 +555,7 @@ public:
556 this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); 555 this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
557 } 556 }
558 557
559 constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, 558 constexpr void UnshareToDevice(KMemoryPermission new_perm, bool left, bool right) {
560 bool right) {
561 // New permission isn't used. 559 // New permission isn't used.
562 560
563 // We must be shared. 561 // We must be shared.
@@ -575,8 +573,7 @@ public:
575 this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right); 573 this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
576 } 574 }
577 575
578 constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, 576 constexpr void UnshareToDeviceRight(KMemoryPermission new_perm, bool left, bool right) {
579 bool right) {
580 // New permission isn't used. 577 // New permission isn't used.
581 578
582 // We must be shared. 579 // We must be shared.
@@ -594,7 +591,7 @@ public:
594 this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); 591 this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
595 } 592 }
596 593
597 constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { 594 constexpr void LockForIpc(KMemoryPermission new_perm, bool left, bool right) {
598 // We must either be locked or have a zero lock count. 595 // We must either be locked or have a zero lock count.
599 ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked || 596 ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked ||
600 m_ipc_lock_count == 0); 597 m_ipc_lock_count == 0);
@@ -626,8 +623,7 @@ public:
626 } 623 }
627 } 624 }
628 625
629 constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, 626 constexpr void UnlockForIpc(KMemoryPermission new_perm, bool left, bool right) {
630 [[maybe_unused]] bool right) {
631 // New permission isn't used. 627 // New permission isn't used.
632 628
633 // We must be locked. 629 // We must be locked.
diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp
index 72c3ee4b7..9ff751119 100644
--- a/src/core/hle/kernel/k_memory_layout.cpp
+++ b/src/core/hle/kernel/k_memory_layout.cpp
@@ -18,11 +18,11 @@ KMemoryRegion* AllocateRegion(KMemoryRegionAllocator& memory_region_allocator, A
18 18
19} // namespace 19} // namespace
20 20
21KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_) 21KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator)
22 : memory_region_allocator{memory_region_allocator_} {} 22 : m_memory_region_allocator{memory_region_allocator} {}
23 23
24void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) { 24void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) {
25 this->insert(*AllocateRegion(memory_region_allocator, address, last_address, attr, type_id)); 25 this->insert(*AllocateRegion(m_memory_region_allocator, address, last_address, attr, type_id));
26} 26}
27 27
28bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) { 28bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) {
@@ -69,7 +69,7 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at
69 const u64 new_pair = (old_pair != std::numeric_limits<u64>::max()) 69 const u64 new_pair = (old_pair != std::numeric_limits<u64>::max())
70 ? old_pair + (address - old_address) 70 ? old_pair + (address - old_address)
71 : old_pair; 71 : old_pair;
72 this->insert(*AllocateRegion(memory_region_allocator, address, inserted_region_last, 72 this->insert(*AllocateRegion(m_memory_region_allocator, address, inserted_region_last,
73 new_pair, new_attr, type_id)); 73 new_pair, new_attr, type_id));
74 } 74 }
75 75
@@ -78,7 +78,7 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at
78 const u64 after_pair = (old_pair != std::numeric_limits<u64>::max()) 78 const u64 after_pair = (old_pair != std::numeric_limits<u64>::max())
79 ? old_pair + (inserted_region_end - old_address) 79 ? old_pair + (inserted_region_end - old_address)
80 : old_pair; 80 : old_pair;
81 this->insert(*AllocateRegion(memory_region_allocator, inserted_region_end, old_last, 81 this->insert(*AllocateRegion(m_memory_region_allocator, inserted_region_end, old_last,
82 after_pair, old_attr, old_type)); 82 after_pair, old_attr, old_type));
83 } 83 }
84 84
@@ -126,14 +126,15 @@ VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u
126} 126}
127 127
128KMemoryLayout::KMemoryLayout() 128KMemoryLayout::KMemoryLayout()
129 : virtual_tree{memory_region_allocator}, physical_tree{memory_region_allocator}, 129 : m_virtual_tree{m_memory_region_allocator}, m_physical_tree{m_memory_region_allocator},
130 virtual_linear_tree{memory_region_allocator}, physical_linear_tree{memory_region_allocator} {} 130 m_virtual_linear_tree{m_memory_region_allocator}, m_physical_linear_tree{
131 m_memory_region_allocator} {}
131 132
132void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, 133void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
133 VAddr linear_virtual_start) { 134 VAddr linear_virtual_start) {
134 // Set static differences. 135 // Set static differences.
135 linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start; 136 m_linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start;
136 linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start; 137 m_linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start;
137 138
138 // Initialize linear trees. 139 // Initialize linear trees.
139 for (auto& region : GetPhysicalMemoryRegionTree()) { 140 for (auto& region : GetPhysicalMemoryRegionTree()) {
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index 17fa1a6ed..551b7a0e4 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -80,35 +80,35 @@ public:
80 KMemoryLayout(); 80 KMemoryLayout();
81 81
82 KMemoryRegionTree& GetVirtualMemoryRegionTree() { 82 KMemoryRegionTree& GetVirtualMemoryRegionTree() {
83 return virtual_tree; 83 return m_virtual_tree;
84 } 84 }
85 const KMemoryRegionTree& GetVirtualMemoryRegionTree() const { 85 const KMemoryRegionTree& GetVirtualMemoryRegionTree() const {
86 return virtual_tree; 86 return m_virtual_tree;
87 } 87 }
88 KMemoryRegionTree& GetPhysicalMemoryRegionTree() { 88 KMemoryRegionTree& GetPhysicalMemoryRegionTree() {
89 return physical_tree; 89 return m_physical_tree;
90 } 90 }
91 const KMemoryRegionTree& GetPhysicalMemoryRegionTree() const { 91 const KMemoryRegionTree& GetPhysicalMemoryRegionTree() const {
92 return physical_tree; 92 return m_physical_tree;
93 } 93 }
94 KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() { 94 KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() {
95 return virtual_linear_tree; 95 return m_virtual_linear_tree;
96 } 96 }
97 const KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() const { 97 const KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() const {
98 return virtual_linear_tree; 98 return m_virtual_linear_tree;
99 } 99 }
100 KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() { 100 KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() {
101 return physical_linear_tree; 101 return m_physical_linear_tree;
102 } 102 }
103 const KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() const { 103 const KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() const {
104 return physical_linear_tree; 104 return m_physical_linear_tree;
105 } 105 }
106 106
107 VAddr GetLinearVirtualAddress(PAddr address) const { 107 VAddr GetLinearVirtualAddress(PAddr address) const {
108 return address + linear_phys_to_virt_diff; 108 return address + m_linear_phys_to_virt_diff;
109 } 109 }
110 PAddr GetLinearPhysicalAddress(VAddr address) const { 110 PAddr GetLinearPhysicalAddress(VAddr address) const {
111 return address + linear_virt_to_phys_diff; 111 return address + m_linear_virt_to_phys_diff;
112 } 112 }
113 113
114 const KMemoryRegion* FindVirtual(VAddr address) const { 114 const KMemoryRegion* FindVirtual(VAddr address) const {
@@ -391,13 +391,13 @@ private:
391 } 391 }
392 392
393private: 393private:
394 u64 linear_phys_to_virt_diff{}; 394 u64 m_linear_phys_to_virt_diff{};
395 u64 linear_virt_to_phys_diff{}; 395 u64 m_linear_virt_to_phys_diff{};
396 KMemoryRegionAllocator memory_region_allocator; 396 KMemoryRegionAllocator m_memory_region_allocator;
397 KMemoryRegionTree virtual_tree; 397 KMemoryRegionTree m_virtual_tree;
398 KMemoryRegionTree physical_tree; 398 KMemoryRegionTree m_physical_tree;
399 KMemoryRegionTree virtual_linear_tree; 399 KMemoryRegionTree m_virtual_linear_tree;
400 KMemoryRegionTree physical_linear_tree; 400 KMemoryRegionTree m_physical_linear_tree;
401}; 401};
402 402
403namespace Init { 403namespace Init {
diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h
index 5037e657f..cfe86fb82 100644
--- a/src/core/hle/kernel/k_memory_region.h
+++ b/src/core/hle/kernel/k_memory_region.h
@@ -21,15 +21,15 @@ public:
21 YUZU_NON_MOVEABLE(KMemoryRegion); 21 YUZU_NON_MOVEABLE(KMemoryRegion);
22 22
23 constexpr KMemoryRegion() = default; 23 constexpr KMemoryRegion() = default;
24 constexpr KMemoryRegion(u64 address_, u64 last_address_) 24 constexpr KMemoryRegion(u64 address, u64 last_address)
25 : address{address_}, last_address{last_address_} {} 25 : m_address{address}, m_last_address{last_address} {}
26 constexpr KMemoryRegion(u64 address_, u64 last_address_, u64 pair_address_, u32 attributes_, 26 constexpr KMemoryRegion(u64 address, u64 last_address, u64 pair_address, u32 attributes,
27 u32 type_id_) 27 u32 type_id)
28 : address(address_), last_address(last_address_), pair_address(pair_address_), 28 : m_address(address), m_last_address(last_address), m_pair_address(pair_address),
29 attributes(attributes_), type_id(type_id_) {} 29 m_attributes(attributes), m_type_id(type_id) {}
30 constexpr KMemoryRegion(u64 address_, u64 last_address_, u32 attributes_, u32 type_id_) 30 constexpr KMemoryRegion(u64 address, u64 last_address, u32 attributes, u32 type_id)
31 : KMemoryRegion(address_, last_address_, std::numeric_limits<u64>::max(), attributes_, 31 : KMemoryRegion(address, last_address, std::numeric_limits<u64>::max(), attributes,
32 type_id_) {} 32 type_id) {}
33 33
34 ~KMemoryRegion() = default; 34 ~KMemoryRegion() = default;
35 35
@@ -44,15 +44,15 @@ public:
44 } 44 }
45 45
46 constexpr u64 GetAddress() const { 46 constexpr u64 GetAddress() const {
47 return address; 47 return m_address;
48 } 48 }
49 49
50 constexpr u64 GetPairAddress() const { 50 constexpr u64 GetPairAddress() const {
51 return pair_address; 51 return m_pair_address;
52 } 52 }
53 53
54 constexpr u64 GetLastAddress() const { 54 constexpr u64 GetLastAddress() const {
55 return last_address; 55 return m_last_address;
56 } 56 }
57 57
58 constexpr u64 GetEndAddress() const { 58 constexpr u64 GetEndAddress() const {
@@ -64,16 +64,16 @@ public:
64 } 64 }
65 65
66 constexpr u32 GetAttributes() const { 66 constexpr u32 GetAttributes() const {
67 return attributes; 67 return m_attributes;
68 } 68 }
69 69
70 constexpr u32 GetType() const { 70 constexpr u32 GetType() const {
71 return type_id; 71 return m_type_id;
72 } 72 }
73 73
74 constexpr void SetType(u32 type) { 74 constexpr void SetType(u32 type) {
75 ASSERT(this->CanDerive(type)); 75 ASSERT(this->CanDerive(type));
76 type_id = type; 76 m_type_id = type;
77 } 77 }
78 78
79 constexpr bool Contains(u64 addr) const { 79 constexpr bool Contains(u64 addr) const {
@@ -94,27 +94,27 @@ public:
94 } 94 }
95 95
96 constexpr void SetPairAddress(u64 a) { 96 constexpr void SetPairAddress(u64 a) {
97 pair_address = a; 97 m_pair_address = a;
98 } 98 }
99 99
100 constexpr void SetTypeAttribute(u32 attr) { 100 constexpr void SetTypeAttribute(u32 attr) {
101 type_id |= attr; 101 m_type_id |= attr;
102 } 102 }
103 103
104private: 104private:
105 constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) { 105 constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) {
106 address = a; 106 m_address = a;
107 pair_address = p; 107 m_pair_address = p;
108 last_address = la; 108 m_last_address = la;
109 attributes = r; 109 m_attributes = r;
110 type_id = t; 110 m_type_id = t;
111 } 111 }
112 112
113 u64 address{}; 113 u64 m_address{};
114 u64 last_address{}; 114 u64 m_last_address{};
115 u64 pair_address{}; 115 u64 m_pair_address{};
116 u32 attributes{}; 116 u32 m_attributes{};
117 u32 type_id{}; 117 u32 m_type_id{};
118}; 118};
119 119
120class KMemoryRegionTree final { 120class KMemoryRegionTree final {
@@ -322,7 +322,7 @@ public:
322 322
323private: 323private:
324 TreeType m_tree{}; 324 TreeType m_tree{};
325 KMemoryRegionAllocator& memory_region_allocator; 325 KMemoryRegionAllocator& m_memory_region_allocator;
326}; 326};
327 327
328class KMemoryRegionAllocator final { 328class KMemoryRegionAllocator final {
@@ -338,18 +338,18 @@ public:
338 template <typename... Args> 338 template <typename... Args>
339 KMemoryRegion* Allocate(Args&&... args) { 339 KMemoryRegion* Allocate(Args&&... args) {
340 // Ensure we stay within the bounds of our heap. 340 // Ensure we stay within the bounds of our heap.
341 ASSERT(this->num_regions < MaxMemoryRegions); 341 ASSERT(m_num_regions < MaxMemoryRegions);
342 342
343 // Create the new region. 343 // Create the new region.
344 KMemoryRegion* region = std::addressof(this->region_heap[this->num_regions++]); 344 KMemoryRegion* region = std::addressof(m_region_heap[m_num_regions++]);
345 new (region) KMemoryRegion(std::forward<Args>(args)...); 345 std::construct_at(region, std::forward<Args>(args)...);
346 346
347 return region; 347 return region;
348 } 348 }
349 349
350private: 350private:
351 std::array<KMemoryRegion, MaxMemoryRegions> region_heap{}; 351 std::array<KMemoryRegion, MaxMemoryRegions> m_region_heap{};
352 size_t num_regions{}; 352 size_t m_num_regions{};
353}; 353};
354 354
355} // namespace Kernel 355} // namespace Kernel
diff --git a/src/core/hle/kernel/k_object_name.h b/src/core/hle/kernel/k_object_name.h
index b7f943134..2d97fc777 100644
--- a/src/core/hle/kernel/k_object_name.h
+++ b/src/core/hle/kernel/k_object_name.h
@@ -41,7 +41,7 @@ public:
41 // Check that the object is closed. 41 // Check that the object is closed.
42 R_UNLESS(derived->IsServerClosed(), ResultInvalidState); 42 R_UNLESS(derived->IsServerClosed(), ResultInvalidState);
43 43
44 return Delete(kernel, obj.GetPointerUnsafe(), name); 44 R_RETURN(Delete(kernel, obj.GetPointerUnsafe(), name));
45 } 45 }
46 46
47 template <typename Derived> 47 template <typename Derived>
diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h
index cfedaae61..b7a3ccb4a 100644
--- a/src/core/hle/kernel/k_page_buffer.h
+++ b/src/core/hle/kernel/k_page_buffer.h
@@ -29,7 +29,7 @@ public:
29 static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr); 29 static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr);
30 30
31private: 31private:
32 [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{}; 32 alignas(PageSize) std::array<u8, PageSize> m_buffer{};
33}; 33};
34static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize); 34static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize);
35 35
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 367dab613..5c5356338 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -484,7 +484,7 @@ private:
484 } 484 }
485 485
486 PageLinkedList* GetPageList() { 486 PageLinkedList* GetPageList() {
487 return &m_ll; 487 return std::addressof(m_ll);
488 } 488 }
489 }; 489 };
490 490
diff --git a/src/core/hle/kernel/k_page_table_slab_heap.h b/src/core/hle/kernel/k_page_table_slab_heap.h
index a9543cbd0..9a8d77316 100644
--- a/src/core/hle/kernel/k_page_table_slab_heap.h
+++ b/src/core/hle/kernel/k_page_table_slab_heap.h
@@ -20,7 +20,8 @@ public:
20 PageTablePage() = default; 20 PageTablePage() = default;
21 21
22private: 22private:
23 std::array<u8, PageSize> m_buffer{}; 23 // Initializer intentionally skipped
24 std::array<u8, PageSize> m_buffer;
24}; 25};
25static_assert(sizeof(PageTablePage) == PageSize); 26static_assert(sizeof(PageTablePage) == PageSize);
26 27
diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp
index 0a45ffd57..1621ca1d3 100644
--- a/src/core/hle/kernel/k_port.cpp
+++ b/src/core/hle/kernel/k_port.cpp
@@ -7,56 +7,55 @@
7 7
8namespace Kernel { 8namespace Kernel {
9 9
10KPort::KPort(KernelCore& kernel_) 10KPort::KPort(KernelCore& kernel)
11 : KAutoObjectWithSlabHeapAndContainer{kernel_}, server{kernel_}, client{kernel_} {} 11 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
12 12
13KPort::~KPort() = default; 13KPort::~KPort() = default;
14 14
15void KPort::Initialize(s32 max_sessions_, bool is_light_, const std::string& name_) { 15void KPort::Initialize(s32 max_sessions, bool is_light, uintptr_t name) {
16 // Open a new reference count to the initialized port. 16 // Open a new reference count to the initialized port.
17 Open(); 17 this->Open();
18 18
19 // Create and initialize our server/client pair. 19 // Create and initialize our server/client pair.
20 KAutoObject::Create(std::addressof(server)); 20 KAutoObject::Create(std::addressof(m_server));
21 KAutoObject::Create(std::addressof(client)); 21 KAutoObject::Create(std::addressof(m_client));
22 server.Initialize(this, name_ + ":Server"); 22 m_server.Initialize(this);
23 client.Initialize(this, max_sessions_, name_ + ":Client"); 23 m_client.Initialize(this, max_sessions);
24 24
25 // Set our member variables. 25 // Set our member variables.
26 is_light = is_light_; 26 m_is_light = is_light;
27 name = name_; 27 m_name = name;
28 state = State::Normal; 28 m_state = State::Normal;
29} 29}
30 30
31void KPort::OnClientClosed() { 31void KPort::OnClientClosed() {
32 KScopedSchedulerLock sl{kernel}; 32 KScopedSchedulerLock sl{m_kernel};
33 33
34 if (state == State::Normal) { 34 if (m_state == State::Normal) {
35 state = State::ClientClosed; 35 m_state = State::ClientClosed;
36 } 36 }
37} 37}
38 38
39void KPort::OnServerClosed() { 39void KPort::OnServerClosed() {
40 KScopedSchedulerLock sl{kernel}; 40 KScopedSchedulerLock sl{m_kernel};
41 41
42 if (state == State::Normal) { 42 if (m_state == State::Normal) {
43 state = State::ServerClosed; 43 m_state = State::ServerClosed;
44 } 44 }
45} 45}
46 46
47bool KPort::IsServerClosed() const { 47bool KPort::IsServerClosed() const {
48 KScopedSchedulerLock sl{kernel}; 48 KScopedSchedulerLock sl{m_kernel};
49 return state == State::ServerClosed; 49 return m_state == State::ServerClosed;
50} 50}
51 51
52Result KPort::EnqueueSession(KServerSession* session) { 52Result KPort::EnqueueSession(KServerSession* session) {
53 KScopedSchedulerLock sl{kernel}; 53 KScopedSchedulerLock sl{m_kernel};
54 54
55 R_UNLESS(state == State::Normal, ResultPortClosed); 55 R_UNLESS(m_state == State::Normal, ResultPortClosed);
56 56
57 server.EnqueueSession(session); 57 m_server.EnqueueSession(session);
58 58 R_SUCCEED();
59 return ResultSuccess;
60} 59}
61 60
62} // namespace Kernel 61} // namespace Kernel
diff --git a/src/core/hle/kernel/k_port.h b/src/core/hle/kernel/k_port.h
index 0cfc16dab..991be27ab 100644
--- a/src/core/hle/kernel/k_port.h
+++ b/src/core/hle/kernel/k_port.h
@@ -19,17 +19,20 @@ class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort, KAutoObjec
19 KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject); 19 KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject);
20 20
21public: 21public:
22 explicit KPort(KernelCore& kernel_); 22 explicit KPort(KernelCore& kernel);
23 ~KPort() override; 23 ~KPort() override;
24 24
25 static void PostDestroy([[maybe_unused]] uintptr_t arg) {} 25 static void PostDestroy(uintptr_t arg) {}
26 26
27 void Initialize(s32 max_sessions_, bool is_light_, const std::string& name_); 27 void Initialize(s32 max_sessions, bool is_light, uintptr_t name);
28 void OnClientClosed(); 28 void OnClientClosed();
29 void OnServerClosed(); 29 void OnServerClosed();
30 30
31 uintptr_t GetName() const {
32 return m_name;
33 }
31 bool IsLight() const { 34 bool IsLight() const {
32 return is_light; 35 return m_is_light;
33 } 36 }
34 37
35 bool IsServerClosed() const; 38 bool IsServerClosed() const;
@@ -37,16 +40,16 @@ public:
37 Result EnqueueSession(KServerSession* session); 40 Result EnqueueSession(KServerSession* session);
38 41
39 KClientPort& GetClientPort() { 42 KClientPort& GetClientPort() {
40 return client; 43 return m_client;
41 } 44 }
42 KServerPort& GetServerPort() { 45 KServerPort& GetServerPort() {
43 return server; 46 return m_server;
44 } 47 }
45 const KClientPort& GetClientPort() const { 48 const KClientPort& GetClientPort() const {
46 return client; 49 return m_client;
47 } 50 }
48 const KServerPort& GetServerPort() const { 51 const KServerPort& GetServerPort() const {
49 return server; 52 return m_server;
50 } 53 }
51 54
52private: 55private:
@@ -57,10 +60,11 @@ private:
57 ServerClosed = 3, 60 ServerClosed = 3,
58 }; 61 };
59 62
60 KServerPort server; 63 KServerPort m_server;
61 KClientPort client; 64 KClientPort m_client;
62 State state{State::Invalid}; 65 uintptr_t m_name;
63 bool is_light{}; 66 State m_state{State::Invalid};
67 bool m_is_light{};
64}; 68};
65 69
66} // namespace Kernel 70} // namespace Kernel
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
index 645c5b531..26677ec65 100644
--- a/src/core/hle/kernel/k_priority_queue.h
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -77,11 +77,11 @@ private:
77public: 77public:
78 class KPerCoreQueue { 78 class KPerCoreQueue {
79 private: 79 private:
80 std::array<Entry, NumCores> root{}; 80 std::array<Entry, NumCores> m_root{};
81 81
82 public: 82 public:
83 constexpr KPerCoreQueue() { 83 constexpr KPerCoreQueue() {
84 for (auto& per_core_root : root) { 84 for (auto& per_core_root : m_root) {
85 per_core_root.Initialize(); 85 per_core_root.Initialize();
86 } 86 }
87 } 87 }
@@ -91,15 +91,15 @@ public:
91 Entry& member_entry = member->GetPriorityQueueEntry(core); 91 Entry& member_entry = member->GetPriorityQueueEntry(core);
92 92
93 // Get the entry associated with the end of the queue. 93 // Get the entry associated with the end of the queue.
94 Member* tail = this->root[core].GetPrev(); 94 Member* tail = m_root[core].GetPrev();
95 Entry& tail_entry = 95 Entry& tail_entry =
96 (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core]; 96 (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : m_root[core];
97 97
98 // Link the entries. 98 // Link the entries.
99 member_entry.SetPrev(tail); 99 member_entry.SetPrev(tail);
100 member_entry.SetNext(nullptr); 100 member_entry.SetNext(nullptr);
101 tail_entry.SetNext(member); 101 tail_entry.SetNext(member);
102 this->root[core].SetPrev(member); 102 m_root[core].SetPrev(member);
103 103
104 return tail == nullptr; 104 return tail == nullptr;
105 } 105 }
@@ -109,15 +109,15 @@ public:
109 Entry& member_entry = member->GetPriorityQueueEntry(core); 109 Entry& member_entry = member->GetPriorityQueueEntry(core);
110 110
111 // Get the entry associated with the front of the queue. 111 // Get the entry associated with the front of the queue.
112 Member* head = this->root[core].GetNext(); 112 Member* head = m_root[core].GetNext();
113 Entry& head_entry = 113 Entry& head_entry =
114 (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core]; 114 (head != nullptr) ? head->GetPriorityQueueEntry(core) : m_root[core];
115 115
116 // Link the entries. 116 // Link the entries.
117 member_entry.SetPrev(nullptr); 117 member_entry.SetPrev(nullptr);
118 member_entry.SetNext(head); 118 member_entry.SetNext(head);
119 head_entry.SetPrev(member); 119 head_entry.SetPrev(member);
120 this->root[core].SetNext(member); 120 m_root[core].SetNext(member);
121 121
122 return (head == nullptr); 122 return (head == nullptr);
123 } 123 }
@@ -130,9 +130,9 @@ public:
130 Member* prev = member_entry.GetPrev(); 130 Member* prev = member_entry.GetPrev();
131 Member* next = member_entry.GetNext(); 131 Member* next = member_entry.GetNext();
132 Entry& prev_entry = 132 Entry& prev_entry =
133 (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core]; 133 (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : m_root[core];
134 Entry& next_entry = 134 Entry& next_entry =
135 (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core]; 135 (next != nullptr) ? next->GetPriorityQueueEntry(core) : m_root[core];
136 136
137 // Unlink. 137 // Unlink.
138 prev_entry.SetNext(next); 138 prev_entry.SetNext(next);
@@ -142,7 +142,7 @@ public:
142 } 142 }
143 143
144 constexpr Member* GetFront(s32 core) const { 144 constexpr Member* GetFront(s32 core) const {
145 return this->root[core].GetNext(); 145 return m_root[core].GetNext();
146 } 146 }
147 }; 147 };
148 148
@@ -158,8 +158,8 @@ public:
158 return; 158 return;
159 } 159 }
160 160
161 if (this->queues[priority].PushBack(core, member)) { 161 if (m_queues[priority].PushBack(core, member)) {
162 this->available_priorities[core].SetBit(priority); 162 m_available_priorities[core].SetBit(priority);
163 } 163 }
164 } 164 }
165 165
@@ -171,8 +171,8 @@ public:
171 return; 171 return;
172 } 172 }
173 173
174 if (this->queues[priority].PushFront(core, member)) { 174 if (m_queues[priority].PushFront(core, member)) {
175 this->available_priorities[core].SetBit(priority); 175 m_available_priorities[core].SetBit(priority);
176 } 176 }
177 } 177 }
178 178
@@ -184,18 +184,17 @@ public:
184 return; 184 return;
185 } 185 }
186 186
187 if (this->queues[priority].Remove(core, member)) { 187 if (m_queues[priority].Remove(core, member)) {
188 this->available_priorities[core].ClearBit(priority); 188 m_available_priorities[core].ClearBit(priority);
189 } 189 }
190 } 190 }
191 191
192 constexpr Member* GetFront(s32 core) const { 192 constexpr Member* GetFront(s32 core) const {
193 ASSERT(IsValidCore(core)); 193 ASSERT(IsValidCore(core));
194 194
195 const s32 priority = 195 const s32 priority = static_cast<s32>(m_available_priorities[core].CountLeadingZero());
196 static_cast<s32>(this->available_priorities[core].CountLeadingZero());
197 if (priority <= LowestPriority) { 196 if (priority <= LowestPriority) {
198 return this->queues[priority].GetFront(core); 197 return m_queues[priority].GetFront(core);
199 } else { 198 } else {
200 return nullptr; 199 return nullptr;
201 } 200 }
@@ -206,7 +205,7 @@ public:
206 ASSERT(IsValidPriority(priority)); 205 ASSERT(IsValidPriority(priority));
207 206
208 if (priority <= LowestPriority) { 207 if (priority <= LowestPriority) {
209 return this->queues[priority].GetFront(core); 208 return m_queues[priority].GetFront(core);
210 } else { 209 } else {
211 return nullptr; 210 return nullptr;
212 } 211 }
@@ -218,9 +217,9 @@ public:
218 Member* next = member->GetPriorityQueueEntry(core).GetNext(); 217 Member* next = member->GetPriorityQueueEntry(core).GetNext();
219 if (next == nullptr) { 218 if (next == nullptr) {
220 const s32 priority = static_cast<s32>( 219 const s32 priority = static_cast<s32>(
221 this->available_priorities[core].GetNextSet(member->GetPriority())); 220 m_available_priorities[core].GetNextSet(member->GetPriority()));
222 if (priority <= LowestPriority) { 221 if (priority <= LowestPriority) {
223 next = this->queues[priority].GetFront(core); 222 next = m_queues[priority].GetFront(core);
224 } 223 }
225 } 224 }
226 return next; 225 return next;
@@ -231,8 +230,8 @@ public:
231 ASSERT(IsValidPriority(priority)); 230 ASSERT(IsValidPriority(priority));
232 231
233 if (priority <= LowestPriority) { 232 if (priority <= LowestPriority) {
234 this->queues[priority].Remove(core, member); 233 m_queues[priority].Remove(core, member);
235 this->queues[priority].PushFront(core, member); 234 m_queues[priority].PushFront(core, member);
236 } 235 }
237 } 236 }
238 237
@@ -241,29 +240,29 @@ public:
241 ASSERT(IsValidPriority(priority)); 240 ASSERT(IsValidPriority(priority));
242 241
243 if (priority <= LowestPriority) { 242 if (priority <= LowestPriority) {
244 this->queues[priority].Remove(core, member); 243 m_queues[priority].Remove(core, member);
245 this->queues[priority].PushBack(core, member); 244 m_queues[priority].PushBack(core, member);
246 return this->queues[priority].GetFront(core); 245 return m_queues[priority].GetFront(core);
247 } else { 246 } else {
248 return nullptr; 247 return nullptr;
249 } 248 }
250 } 249 }
251 250
252 private: 251 private:
253 std::array<KPerCoreQueue, NumPriority> queues{}; 252 std::array<KPerCoreQueue, NumPriority> m_queues{};
254 std::array<Common::BitSet64<NumPriority>, NumCores> available_priorities{}; 253 std::array<Common::BitSet64<NumPriority>, NumCores> m_available_priorities{};
255 }; 254 };
256 255
257private: 256private:
258 KPriorityQueueImpl scheduled_queue; 257 KPriorityQueueImpl m_scheduled_queue;
259 KPriorityQueueImpl suggested_queue; 258 KPriorityQueueImpl m_suggested_queue;
260 259
261private: 260private:
262 constexpr void ClearAffinityBit(u64& affinity, s32 core) { 261 static constexpr void ClearAffinityBit(u64& affinity, s32 core) {
263 affinity &= ~(UINT64_C(1) << core); 262 affinity &= ~(UINT64_C(1) << core);
264 } 263 }
265 264
266 constexpr s32 GetNextCore(u64& affinity) { 265 static constexpr s32 GetNextCore(u64& affinity) {
267 const s32 core = std::countr_zero(affinity); 266 const s32 core = std::countr_zero(affinity);
268 ClearAffinityBit(affinity, core); 267 ClearAffinityBit(affinity, core);
269 return core; 268 return core;
@@ -275,13 +274,13 @@ private:
275 // Push onto the scheduled queue for its core, if we can. 274 // Push onto the scheduled queue for its core, if we can.
276 u64 affinity = member->GetAffinityMask().GetAffinityMask(); 275 u64 affinity = member->GetAffinityMask().GetAffinityMask();
277 if (const s32 core = member->GetActiveCore(); core >= 0) { 276 if (const s32 core = member->GetActiveCore(); core >= 0) {
278 this->scheduled_queue.PushBack(priority, core, member); 277 m_scheduled_queue.PushBack(priority, core, member);
279 ClearAffinityBit(affinity, core); 278 ClearAffinityBit(affinity, core);
280 } 279 }
281 280
282 // And suggest the thread for all other cores. 281 // And suggest the thread for all other cores.
283 while (affinity) { 282 while (affinity) {
284 this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); 283 m_suggested_queue.PushBack(priority, GetNextCore(affinity), member);
285 } 284 }
286 } 285 }
287 286
@@ -291,14 +290,14 @@ private:
291 // Push onto the scheduled queue for its core, if we can. 290 // Push onto the scheduled queue for its core, if we can.
292 u64 affinity = member->GetAffinityMask().GetAffinityMask(); 291 u64 affinity = member->GetAffinityMask().GetAffinityMask();
293 if (const s32 core = member->GetActiveCore(); core >= 0) { 292 if (const s32 core = member->GetActiveCore(); core >= 0) {
294 this->scheduled_queue.PushFront(priority, core, member); 293 m_scheduled_queue.PushFront(priority, core, member);
295 ClearAffinityBit(affinity, core); 294 ClearAffinityBit(affinity, core);
296 } 295 }
297 296
298 // And suggest the thread for all other cores. 297 // And suggest the thread for all other cores.
299 // Note: Nintendo pushes onto the back of the suggested queue, not the front. 298 // Note: Nintendo pushes onto the back of the suggested queue, not the front.
300 while (affinity) { 299 while (affinity) {
301 this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); 300 m_suggested_queue.PushBack(priority, GetNextCore(affinity), member);
302 } 301 }
303 } 302 }
304 303
@@ -308,13 +307,13 @@ private:
308 // Remove from the scheduled queue for its core. 307 // Remove from the scheduled queue for its core.
309 u64 affinity = member->GetAffinityMask().GetAffinityMask(); 308 u64 affinity = member->GetAffinityMask().GetAffinityMask();
310 if (const s32 core = member->GetActiveCore(); core >= 0) { 309 if (const s32 core = member->GetActiveCore(); core >= 0) {
311 this->scheduled_queue.Remove(priority, core, member); 310 m_scheduled_queue.Remove(priority, core, member);
312 ClearAffinityBit(affinity, core); 311 ClearAffinityBit(affinity, core);
313 } 312 }
314 313
315 // Remove from the suggested queue for all other cores. 314 // Remove from the suggested queue for all other cores.
316 while (affinity) { 315 while (affinity) {
317 this->suggested_queue.Remove(priority, GetNextCore(affinity), member); 316 m_suggested_queue.Remove(priority, GetNextCore(affinity), member);
318 } 317 }
319 } 318 }
320 319
@@ -323,27 +322,27 @@ public:
323 322
324 // Getters. 323 // Getters.
325 constexpr Member* GetScheduledFront(s32 core) const { 324 constexpr Member* GetScheduledFront(s32 core) const {
326 return this->scheduled_queue.GetFront(core); 325 return m_scheduled_queue.GetFront(core);
327 } 326 }
328 327
329 constexpr Member* GetScheduledFront(s32 core, s32 priority) const { 328 constexpr Member* GetScheduledFront(s32 core, s32 priority) const {
330 return this->scheduled_queue.GetFront(priority, core); 329 return m_scheduled_queue.GetFront(priority, core);
331 } 330 }
332 331
333 constexpr Member* GetSuggestedFront(s32 core) const { 332 constexpr Member* GetSuggestedFront(s32 core) const {
334 return this->suggested_queue.GetFront(core); 333 return m_suggested_queue.GetFront(core);
335 } 334 }
336 335
337 constexpr Member* GetSuggestedFront(s32 core, s32 priority) const { 336 constexpr Member* GetSuggestedFront(s32 core, s32 priority) const {
338 return this->suggested_queue.GetFront(priority, core); 337 return m_suggested_queue.GetFront(priority, core);
339 } 338 }
340 339
341 constexpr Member* GetScheduledNext(s32 core, const Member* member) const { 340 constexpr Member* GetScheduledNext(s32 core, const Member* member) const {
342 return this->scheduled_queue.GetNext(core, member); 341 return m_scheduled_queue.GetNext(core, member);
343 } 342 }
344 343
345 constexpr Member* GetSuggestedNext(s32 core, const Member* member) const { 344 constexpr Member* GetSuggestedNext(s32 core, const Member* member) const {
346 return this->suggested_queue.GetNext(core, member); 345 return m_suggested_queue.GetNext(core, member);
347 } 346 }
348 347
349 constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const { 348 constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const {
@@ -375,7 +374,7 @@ public:
375 return; 374 return;
376 } 375 }
377 376
378 this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member); 377 m_scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
379 } 378 }
380 379
381 constexpr KThread* MoveToScheduledBack(Member* member) { 380 constexpr KThread* MoveToScheduledBack(Member* member) {
@@ -384,8 +383,7 @@ public:
384 return {}; 383 return {};
385 } 384 }
386 385
387 return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), 386 return m_scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member);
388 member);
389 } 387 }
390 388
391 // First class fancy operations. 389 // First class fancy operations.
@@ -425,9 +423,9 @@ public:
425 for (s32 core = 0; core < static_cast<s32>(NumCores); core++) { 423 for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
426 if (prev_affinity.GetAffinity(core)) { 424 if (prev_affinity.GetAffinity(core)) {
427 if (core == prev_core) { 425 if (core == prev_core) {
428 this->scheduled_queue.Remove(priority, core, member); 426 m_scheduled_queue.Remove(priority, core, member);
429 } else { 427 } else {
430 this->suggested_queue.Remove(priority, core, member); 428 m_suggested_queue.Remove(priority, core, member);
431 } 429 }
432 } 430 }
433 } 431 }
@@ -436,9 +434,9 @@ public:
436 for (s32 core = 0; core < static_cast<s32>(NumCores); core++) { 434 for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
437 if (new_affinity.GetAffinity(core)) { 435 if (new_affinity.GetAffinity(core)) {
438 if (core == new_core) { 436 if (core == new_core) {
439 this->scheduled_queue.PushBack(priority, core, member); 437 m_scheduled_queue.PushBack(priority, core, member);
440 } else { 438 } else {
441 this->suggested_queue.PushBack(priority, core, member); 439 m_suggested_queue.PushBack(priority, core, member);
442 } 440 }
443 } 441 }
444 } 442 }
@@ -458,22 +456,22 @@ public:
458 if (prev_core != new_core) { 456 if (prev_core != new_core) {
459 // Remove from the scheduled queue for the previous core. 457 // Remove from the scheduled queue for the previous core.
460 if (prev_core >= 0) { 458 if (prev_core >= 0) {
461 this->scheduled_queue.Remove(priority, prev_core, member); 459 m_scheduled_queue.Remove(priority, prev_core, member);
462 } 460 }
463 461
464 // Remove from the suggested queue and add to the scheduled queue for the new core. 462 // Remove from the suggested queue and add to the scheduled queue for the new core.
465 if (new_core >= 0) { 463 if (new_core >= 0) {
466 this->suggested_queue.Remove(priority, new_core, member); 464 m_suggested_queue.Remove(priority, new_core, member);
467 if (to_front) { 465 if (to_front) {
468 this->scheduled_queue.PushFront(priority, new_core, member); 466 m_scheduled_queue.PushFront(priority, new_core, member);
469 } else { 467 } else {
470 this->scheduled_queue.PushBack(priority, new_core, member); 468 m_scheduled_queue.PushBack(priority, new_core, member);
471 } 469 }
472 } 470 }
473 471
474 // Add to the suggested queue for the previous core. 472 // Add to the suggested queue for the previous core.
475 if (prev_core >= 0) { 473 if (prev_core >= 0) {
476 this->suggested_queue.PushBack(priority, prev_core, member); 474 m_suggested_queue.PushBack(priority, prev_core, member);
477 } 475 }
478 } 476 }
479 } 477 }
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index d44f6e921..9d18f4049 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -44,14 +44,14 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
44 SCOPE_EXIT({ thread->Close(); }); 44 SCOPE_EXIT({ thread->Close(); });
45 45
46 ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority, 46 ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority,
47 owner_process.GetIdealCoreId(), &owner_process) 47 owner_process.GetIdealCoreId(),
48 std::addressof(owner_process))
48 .IsSuccess()); 49 .IsSuccess());
49 50
50 // Register 1 must be a handle to the main thread 51 // Register 1 must be a handle to the main thread
51 Handle thread_handle{}; 52 Handle thread_handle{};
52 owner_process.GetHandleTable().Add(&thread_handle, thread); 53 owner_process.GetHandleTable().Add(std::addressof(thread_handle), thread);
53 54
54 thread->SetName("main");
55 thread->GetContext32().cpu_registers[0] = 0; 55 thread->GetContext32().cpu_registers[0] = 0;
56 thread->GetContext64().cpu_registers[0] = 0; 56 thread->GetContext64().cpu_registers[0] = 0;
57 thread->GetContext32().cpu_registers[1] = thread_handle; 57 thread->GetContext32().cpu_registers[1] = thread_handle;
@@ -71,32 +71,32 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
71 auto& kernel = system.Kernel(); 71 auto& kernel = system.Kernel();
72 72
73 process->name = std::move(process_name); 73 process->name = std::move(process_name);
74 process->resource_limit = res_limit; 74 process->m_resource_limit = res_limit;
75 process->system_resource_address = 0; 75 process->m_system_resource_address = 0;
76 process->state = State::Created; 76 process->m_state = State::Created;
77 process->program_id = 0; 77 process->m_program_id = 0;
78 process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() 78 process->m_process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
79 : kernel.CreateNewUserProcessID(); 79 : kernel.CreateNewUserProcessID();
80 process->capabilities.InitializeForMetadatalessProcess(); 80 process->m_capabilities.InitializeForMetadatalessProcess();
81 process->is_initialized = true; 81 process->m_is_initialized = true;
82 82
83 std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr))); 83 std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr)));
84 std::uniform_int_distribution<u64> distribution; 84 std::uniform_int_distribution<u64> distribution;
85 std::generate(process->random_entropy.begin(), process->random_entropy.end(), 85 std::generate(process->m_random_entropy.begin(), process->m_random_entropy.end(),
86 [&] { return distribution(rng); }); 86 [&] { return distribution(rng); });
87 87
88 kernel.AppendNewProcess(process); 88 kernel.AppendNewProcess(process);
89 89
90 // Clear remaining fields. 90 // Clear remaining fields.
91 process->num_running_threads = 0; 91 process->m_num_running_threads = 0;
92 process->is_signaled = false; 92 process->m_is_signaled = false;
93 process->exception_thread = nullptr; 93 process->m_exception_thread = nullptr;
94 process->is_suspended = false; 94 process->m_is_suspended = false;
95 process->schedule_count = 0; 95 process->m_schedule_count = 0;
96 process->is_handle_table_initialized = false; 96 process->m_is_handle_table_initialized = false;
97 97
98 // Open a reference to the resource limit. 98 // Open a reference to the resource limit.
99 process->resource_limit->Open(); 99 process->m_resource_limit->Open();
100 100
101 R_SUCCEED(); 101 R_SUCCEED();
102} 102}
@@ -106,65 +106,65 @@ void KProcess::DoWorkerTaskImpl() {
106} 106}
107 107
108KResourceLimit* KProcess::GetResourceLimit() const { 108KResourceLimit* KProcess::GetResourceLimit() const {
109 return resource_limit; 109 return m_resource_limit;
110} 110}
111 111
112void KProcess::IncrementRunningThreadCount() { 112void KProcess::IncrementRunningThreadCount() {
113 ASSERT(num_running_threads.load() >= 0); 113 ASSERT(m_num_running_threads.load() >= 0);
114 ++num_running_threads; 114 ++m_num_running_threads;
115} 115}
116 116
117void KProcess::DecrementRunningThreadCount() { 117void KProcess::DecrementRunningThreadCount() {
118 ASSERT(num_running_threads.load() > 0); 118 ASSERT(m_num_running_threads.load() > 0);
119 119
120 if (const auto prev = num_running_threads--; prev == 1) { 120 if (const auto prev = m_num_running_threads--; prev == 1) {
121 // TODO(bunnei): Process termination to be implemented when multiprocess is supported. 121 // TODO(bunnei): Process termination to be implemented when multiprocess is supported.
122 } 122 }
123} 123}
124 124
125u64 KProcess::GetTotalPhysicalMemoryAvailable() { 125u64 KProcess::GetTotalPhysicalMemoryAvailable() {
126 const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) + 126 const u64 capacity{m_resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) +
127 page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size + 127 m_page_table.GetNormalMemorySize() + GetSystemResourceSize() + m_image_size +
128 main_thread_stack_size}; 128 m_main_thread_stack_size};
129 if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); 129 if (const auto pool_size = m_kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
130 capacity != pool_size) { 130 capacity != pool_size) {
131 LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size); 131 LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size);
132 } 132 }
133 if (capacity < memory_usage_capacity) { 133 if (capacity < m_memory_usage_capacity) {
134 return capacity; 134 return capacity;
135 } 135 }
136 return memory_usage_capacity; 136 return m_memory_usage_capacity;
137} 137}
138 138
139u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() { 139u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
140 return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); 140 return this->GetTotalPhysicalMemoryAvailable() - this->GetSystemResourceSize();
141} 141}
142 142
143u64 KProcess::GetTotalPhysicalMemoryUsed() { 143u64 KProcess::GetTotalPhysicalMemoryUsed() {
144 return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() + 144 return m_image_size + m_main_thread_stack_size + m_page_table.GetNormalMemorySize() +
145 GetSystemResourceSize(); 145 this->GetSystemResourceSize();
146} 146}
147 147
148u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() { 148u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
149 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); 149 return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceUsage();
150} 150}
151 151
152bool KProcess::ReleaseUserException(KThread* thread) { 152bool KProcess::ReleaseUserException(KThread* thread) {
153 KScopedSchedulerLock sl{kernel}; 153 KScopedSchedulerLock sl{m_kernel};
154 154
155 if (exception_thread == thread) { 155 if (m_exception_thread == thread) {
156 exception_thread = nullptr; 156 m_exception_thread = nullptr;
157 157
158 // Remove waiter thread. 158 // Remove waiter thread.
159 bool has_waiters{}; 159 bool has_waiters{};
160 if (KThread* next = thread->RemoveKernelWaiterByKey( 160 if (KThread* next = thread->RemoveKernelWaiterByKey(
161 std::addressof(has_waiters), 161 std::addressof(has_waiters),
162 reinterpret_cast<uintptr_t>(std::addressof(exception_thread))); 162 reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)));
163 next != nullptr) { 163 next != nullptr) {
164 next->EndWait(ResultSuccess); 164 next->EndWait(ResultSuccess);
165 } 165 }
166 166
167 KScheduler::SetSchedulerUpdateNeeded(kernel); 167 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
168 168
169 return true; 169 return true;
170 } else { 170 } else {
@@ -173,72 +173,72 @@ bool KProcess::ReleaseUserException(KThread* thread) {
173} 173}
174 174
175void KProcess::PinCurrentThread(s32 core_id) { 175void KProcess::PinCurrentThread(s32 core_id) {
176 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 176 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
177 177
178 // Get the current thread. 178 // Get the current thread.
179 KThread* cur_thread = 179 KThread* cur_thread =
180 kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread(); 180 m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
181 181
182 // If the thread isn't terminated, pin it. 182 // If the thread isn't terminated, pin it.
183 if (!cur_thread->IsTerminationRequested()) { 183 if (!cur_thread->IsTerminationRequested()) {
184 // Pin it. 184 // Pin it.
185 PinThread(core_id, cur_thread); 185 this->PinThread(core_id, cur_thread);
186 cur_thread->Pin(core_id); 186 cur_thread->Pin(core_id);
187 187
188 // An update is needed. 188 // An update is needed.
189 KScheduler::SetSchedulerUpdateNeeded(kernel); 189 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
190 } 190 }
191} 191}
192 192
193void KProcess::UnpinCurrentThread(s32 core_id) { 193void KProcess::UnpinCurrentThread(s32 core_id) {
194 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 194 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
195 195
196 // Get the current thread. 196 // Get the current thread.
197 KThread* cur_thread = 197 KThread* cur_thread =
198 kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread(); 198 m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
199 199
200 // Unpin it. 200 // Unpin it.
201 cur_thread->Unpin(); 201 cur_thread->Unpin();
202 UnpinThread(core_id, cur_thread); 202 this->UnpinThread(core_id, cur_thread);
203 203
204 // An update is needed. 204 // An update is needed.
205 KScheduler::SetSchedulerUpdateNeeded(kernel); 205 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
206} 206}
207 207
208void KProcess::UnpinThread(KThread* thread) { 208void KProcess::UnpinThread(KThread* thread) {
209 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 209 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
210 210
211 // Get the thread's core id. 211 // Get the thread's core id.
212 const auto core_id = thread->GetActiveCore(); 212 const auto core_id = thread->GetActiveCore();
213 213
214 // Unpin it. 214 // Unpin it.
215 UnpinThread(core_id, thread); 215 this->UnpinThread(core_id, thread);
216 thread->Unpin(); 216 thread->Unpin();
217 217
218 // An update is needed. 218 // An update is needed.
219 KScheduler::SetSchedulerUpdateNeeded(kernel); 219 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
220} 220}
221 221
222Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, 222Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
223 [[maybe_unused]] size_t size) { 223 [[maybe_unused]] size_t size) {
224 // Lock ourselves, to prevent concurrent access. 224 // Lock ourselves, to prevent concurrent access.
225 KScopedLightLock lk(state_lock); 225 KScopedLightLock lk(m_state_lock);
226 226
227 // Try to find an existing info for the memory. 227 // Try to find an existing info for the memory.
228 KSharedMemoryInfo* shemen_info = nullptr; 228 KSharedMemoryInfo* shemen_info = nullptr;
229 const auto iter = std::find_if( 229 const auto iter = std::find_if(
230 shared_memory_list.begin(), shared_memory_list.end(), 230 m_shared_memory_list.begin(), m_shared_memory_list.end(),
231 [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; }); 231 [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; });
232 if (iter != shared_memory_list.end()) { 232 if (iter != m_shared_memory_list.end()) {
233 shemen_info = *iter; 233 shemen_info = *iter;
234 } 234 }
235 235
236 if (shemen_info == nullptr) { 236 if (shemen_info == nullptr) {
237 shemen_info = KSharedMemoryInfo::Allocate(kernel); 237 shemen_info = KSharedMemoryInfo::Allocate(m_kernel);
238 R_UNLESS(shemen_info != nullptr, ResultOutOfMemory); 238 R_UNLESS(shemen_info != nullptr, ResultOutOfMemory);
239 239
240 shemen_info->Initialize(shmem); 240 shemen_info->Initialize(shmem);
241 shared_memory_list.push_back(shemen_info); 241 m_shared_memory_list.push_back(shemen_info);
242 } 242 }
243 243
244 // Open a reference to the shared memory and its info. 244 // Open a reference to the shared memory and its info.
@@ -251,21 +251,21 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
251void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, 251void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
252 [[maybe_unused]] size_t size) { 252 [[maybe_unused]] size_t size) {
253 // Lock ourselves, to prevent concurrent access. 253 // Lock ourselves, to prevent concurrent access.
254 KScopedLightLock lk(state_lock); 254 KScopedLightLock lk(m_state_lock);
255 255
256 KSharedMemoryInfo* shemen_info = nullptr; 256 KSharedMemoryInfo* shemen_info = nullptr;
257 const auto iter = std::find_if( 257 const auto iter = std::find_if(
258 shared_memory_list.begin(), shared_memory_list.end(), 258 m_shared_memory_list.begin(), m_shared_memory_list.end(),
259 [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; }); 259 [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; });
260 if (iter != shared_memory_list.end()) { 260 if (iter != m_shared_memory_list.end()) {
261 shemen_info = *iter; 261 shemen_info = *iter;
262 } 262 }
263 263
264 ASSERT(shemen_info != nullptr); 264 ASSERT(shemen_info != nullptr);
265 265
266 if (shemen_info->Close()) { 266 if (shemen_info->Close()) {
267 shared_memory_list.erase(iter); 267 m_shared_memory_list.erase(iter);
268 KSharedMemoryInfo::Free(kernel, shemen_info); 268 KSharedMemoryInfo::Free(m_kernel, shemen_info);
269 } 269 }
270 270
271 // Close a reference to the shared memory. 271 // Close a reference to the shared memory.
@@ -273,22 +273,22 @@ void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr a
273} 273}
274 274
275void KProcess::RegisterThread(KThread* thread) { 275void KProcess::RegisterThread(KThread* thread) {
276 KScopedLightLock lk{list_lock}; 276 KScopedLightLock lk{m_list_lock};
277 277
278 thread_list.push_back(thread); 278 m_thread_list.push_back(thread);
279} 279}
280 280
281void KProcess::UnregisterThread(KThread* thread) { 281void KProcess::UnregisterThread(KThread* thread) {
282 KScopedLightLock lk{list_lock}; 282 KScopedLightLock lk{m_list_lock};
283 283
284 thread_list.remove(thread); 284 m_thread_list.remove(thread);
285} 285}
286 286
287u64 KProcess::GetFreeThreadCount() const { 287u64 KProcess::GetFreeThreadCount() const {
288 if (resource_limit != nullptr) { 288 if (m_resource_limit != nullptr) {
289 const auto current_value = 289 const auto current_value =
290 resource_limit->GetCurrentValue(LimitableResource::ThreadCountMax); 290 m_resource_limit->GetCurrentValue(LimitableResource::ThreadCountMax);
291 const auto limit_value = resource_limit->GetLimitValue(LimitableResource::ThreadCountMax); 291 const auto limit_value = m_resource_limit->GetLimitValue(LimitableResource::ThreadCountMax);
292 return limit_value - current_value; 292 return limit_value - current_value;
293 } else { 293 } else {
294 return 0; 294 return 0;
@@ -297,84 +297,85 @@ u64 KProcess::GetFreeThreadCount() const {
297 297
298Result KProcess::Reset() { 298Result KProcess::Reset() {
299 // Lock the process and the scheduler. 299 // Lock the process and the scheduler.
300 KScopedLightLock lk(state_lock); 300 KScopedLightLock lk(m_state_lock);
301 KScopedSchedulerLock sl{kernel}; 301 KScopedSchedulerLock sl{m_kernel};
302 302
303 // Validate that we're in a state that we can reset. 303 // Validate that we're in a state that we can reset.
304 R_UNLESS(state != State::Terminated, ResultInvalidState); 304 R_UNLESS(m_state != State::Terminated, ResultInvalidState);
305 R_UNLESS(is_signaled, ResultInvalidState); 305 R_UNLESS(m_is_signaled, ResultInvalidState);
306 306
307 // Clear signaled. 307 // Clear signaled.
308 is_signaled = false; 308 m_is_signaled = false;
309 R_SUCCEED(); 309 R_SUCCEED();
310} 310}
311 311
312Result KProcess::SetActivity(ProcessActivity activity) { 312Result KProcess::SetActivity(ProcessActivity activity) {
313 // Lock ourselves and the scheduler. 313 // Lock ourselves and the scheduler.
314 KScopedLightLock lk{state_lock}; 314 KScopedLightLock lk{m_state_lock};
315 KScopedLightLock list_lk{list_lock}; 315 KScopedLightLock list_lk{m_list_lock};
316 KScopedSchedulerLock sl{kernel}; 316 KScopedSchedulerLock sl{m_kernel};
317 317
318 // Validate our state. 318 // Validate our state.
319 R_UNLESS(state != State::Terminating, ResultInvalidState); 319 R_UNLESS(m_state != State::Terminating, ResultInvalidState);
320 R_UNLESS(state != State::Terminated, ResultInvalidState); 320 R_UNLESS(m_state != State::Terminated, ResultInvalidState);
321 321
322 // Either pause or resume. 322 // Either pause or resume.
323 if (activity == ProcessActivity::Paused) { 323 if (activity == ProcessActivity::Paused) {
324 // Verify that we're not suspended. 324 // Verify that we're not suspended.
325 R_UNLESS(!is_suspended, ResultInvalidState); 325 R_UNLESS(!m_is_suspended, ResultInvalidState);
326 326
327 // Suspend all threads. 327 // Suspend all threads.
328 for (auto* thread : GetThreadList()) { 328 for (auto* thread : this->GetThreadList()) {
329 thread->RequestSuspend(SuspendType::Process); 329 thread->RequestSuspend(SuspendType::Process);
330 } 330 }
331 331
332 // Set ourselves as suspended. 332 // Set ourselves as suspended.
333 SetSuspended(true); 333 this->SetSuspended(true);
334 } else { 334 } else {
335 ASSERT(activity == ProcessActivity::Runnable); 335 ASSERT(activity == ProcessActivity::Runnable);
336 336
337 // Verify that we're suspended. 337 // Verify that we're suspended.
338 R_UNLESS(is_suspended, ResultInvalidState); 338 R_UNLESS(m_is_suspended, ResultInvalidState);
339 339
340 // Resume all threads. 340 // Resume all threads.
341 for (auto* thread : GetThreadList()) { 341 for (auto* thread : this->GetThreadList()) {
342 thread->Resume(SuspendType::Process); 342 thread->Resume(SuspendType::Process);
343 } 343 }
344 344
345 // Set ourselves as resumed. 345 // Set ourselves as resumed.
346 SetSuspended(false); 346 this->SetSuspended(false);
347 } 347 }
348 348
349 R_SUCCEED(); 349 R_SUCCEED();
350} 350}
351 351
352Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) { 352Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
353 program_id = metadata.GetTitleID(); 353 m_program_id = metadata.GetTitleID();
354 ideal_core = metadata.GetMainThreadCore(); 354 m_ideal_core = metadata.GetMainThreadCore();
355 is_64bit_process = metadata.Is64BitProgram(); 355 m_is_64bit_process = metadata.Is64BitProgram();
356 system_resource_size = metadata.GetSystemResourceSize(); 356 m_system_resource_size = metadata.GetSystemResourceSize();
357 image_size = code_size; 357 m_image_size = code_size;
358 358
359 KScopedResourceReservation memory_reservation( 359 KScopedResourceReservation memory_reservation(
360 resource_limit, LimitableResource::PhysicalMemoryMax, code_size + system_resource_size); 360 m_resource_limit, LimitableResource::PhysicalMemoryMax, code_size + m_system_resource_size);
361 if (!memory_reservation.Succeeded()) { 361 if (!memory_reservation.Succeeded()) {
362 LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", 362 LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
363 code_size + system_resource_size); 363 code_size + m_system_resource_size);
364 R_RETURN(ResultLimitReached); 364 R_RETURN(ResultLimitReached);
365 } 365 }
366 // Initialize proces address space 366 // Initialize process address space
367 if (const Result result{page_table.InitializeForProcess( 367 if (const Result result{m_page_table.InitializeForProcess(
368 metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application, 368 metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application,
369 0x8000000, code_size, &kernel.GetAppSystemResource(), resource_limit)}; 369 0x8000000, code_size, std::addressof(m_kernel.GetAppSystemResource()),
370 m_resource_limit)};
370 result.IsError()) { 371 result.IsError()) {
371 R_RETURN(result); 372 R_RETURN(result);
372 } 373 }
373 374
374 // Map process code region 375 // Map process code region
375 if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(), 376 if (const Result result{m_page_table.MapProcessCode(m_page_table.GetCodeRegionStart(),
376 code_size / PageSize, KMemoryState::Code, 377 code_size / PageSize, KMemoryState::Code,
377 KMemoryPermission::None)}; 378 KMemoryPermission::None)};
378 result.IsError()) { 379 result.IsError()) {
379 R_RETURN(result); 380 R_RETURN(result);
380 } 381 }
@@ -382,7 +383,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
382 // Initialize process capabilities 383 // Initialize process capabilities
383 const auto& caps{metadata.GetKernelCapabilities()}; 384 const auto& caps{metadata.GetKernelCapabilities()};
384 if (const Result result{ 385 if (const Result result{
385 capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)}; 386 m_capabilities.InitializeForUserProcess(caps.data(), caps.size(), m_page_table)};
386 result.IsError()) { 387 result.IsError()) {
387 R_RETURN(result); 388 R_RETURN(result);
388 } 389 }
@@ -392,12 +393,14 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
392 case FileSys::ProgramAddressSpaceType::Is32Bit: 393 case FileSys::ProgramAddressSpaceType::Is32Bit:
393 case FileSys::ProgramAddressSpaceType::Is36Bit: 394 case FileSys::ProgramAddressSpaceType::Is36Bit:
394 case FileSys::ProgramAddressSpaceType::Is39Bit: 395 case FileSys::ProgramAddressSpaceType::Is39Bit:
395 memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart(); 396 m_memory_usage_capacity =
397 m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart();
396 break; 398 break;
397 399
398 case FileSys::ProgramAddressSpaceType::Is32BitNoMap: 400 case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
399 memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() + 401 m_memory_usage_capacity =
400 page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart(); 402 m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart() +
403 m_page_table.GetAliasRegionEnd() - m_page_table.GetAliasRegionStart();
401 break; 404 break;
402 405
403 default: 406 default:
@@ -406,33 +409,34 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
406 } 409 }
407 410
408 // Create TLS region 411 // Create TLS region
409 R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address))); 412 R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address)));
410 memory_reservation.Commit(); 413 memory_reservation.Commit();
411 414
412 R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize())); 415 R_RETURN(m_handle_table.Initialize(m_capabilities.GetHandleTableSize()));
413} 416}
414 417
415void KProcess::Run(s32 main_thread_priority, u64 stack_size) { 418void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
416 ASSERT(AllocateMainThreadStack(stack_size) == ResultSuccess); 419 ASSERT(this->AllocateMainThreadStack(stack_size) == ResultSuccess);
417 resource_limit->Reserve(LimitableResource::ThreadCountMax, 1); 420 m_resource_limit->Reserve(LimitableResource::ThreadCountMax, 1);
418 421
419 const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; 422 const std::size_t heap_capacity{m_memory_usage_capacity -
420 ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); 423 (m_main_thread_stack_size + m_image_size)};
424 ASSERT(!m_page_table.SetMaxHeapSize(heap_capacity).IsError());
421 425
422 ChangeState(State::Running); 426 this->ChangeState(State::Running);
423 427
424 SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top); 428 SetupMainThread(m_kernel.System(), *this, main_thread_priority, m_main_thread_stack_top);
425} 429}
426 430
427void KProcess::PrepareForTermination() { 431void KProcess::PrepareForTermination() {
428 ChangeState(State::Terminating); 432 this->ChangeState(State::Terminating);
429 433
430 const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) { 434 const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
431 for (auto* thread : in_thread_list) { 435 for (auto* thread : in_thread_list) {
432 if (thread->GetOwnerProcess() != this) 436 if (thread->GetOwnerProcess() != this)
433 continue; 437 continue;
434 438
435 if (thread == GetCurrentThreadPointer(kernel)) 439 if (thread == GetCurrentThreadPointer(m_kernel))
436 continue; 440 continue;
437 441
438 // TODO(Subv): When are the other running/ready threads terminated? 442 // TODO(Subv): When are the other running/ready threads terminated?
@@ -443,24 +447,24 @@ void KProcess::PrepareForTermination() {
443 } 447 }
444 }; 448 };
445 449
446 stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); 450 stop_threads(m_kernel.System().GlobalSchedulerContext().GetThreadList());
447 451
448 this->DeleteThreadLocalRegion(plr_address); 452 this->DeleteThreadLocalRegion(m_plr_address);
449 plr_address = 0; 453 m_plr_address = 0;
450 454
451 if (resource_limit) { 455 if (m_resource_limit) {
452 resource_limit->Release(LimitableResource::PhysicalMemoryMax, 456 m_resource_limit->Release(LimitableResource::PhysicalMemoryMax,
453 main_thread_stack_size + image_size); 457 m_main_thread_stack_size + m_image_size);
454 } 458 }
455 459
456 ChangeState(State::Terminated); 460 this->ChangeState(State::Terminated);
457} 461}
458 462
459void KProcess::Finalize() { 463void KProcess::Finalize() {
460 // Free all shared memory infos. 464 // Free all shared memory infos.
461 { 465 {
462 auto it = shared_memory_list.begin(); 466 auto it = m_shared_memory_list.begin();
463 while (it != shared_memory_list.end()) { 467 while (it != m_shared_memory_list.end()) {
464 KSharedMemoryInfo* info = *it; 468 KSharedMemoryInfo* info = *it;
465 KSharedMemory* shmem = info->GetSharedMemory(); 469 KSharedMemory* shmem = info->GetSharedMemory();
466 470
@@ -470,22 +474,22 @@ void KProcess::Finalize() {
470 474
471 shmem->Close(); 475 shmem->Close();
472 476
473 it = shared_memory_list.erase(it); 477 it = m_shared_memory_list.erase(it);
474 KSharedMemoryInfo::Free(kernel, info); 478 KSharedMemoryInfo::Free(m_kernel, info);
475 } 479 }
476 } 480 }
477 481
478 // Release memory to the resource limit. 482 // Release memory to the resource limit.
479 if (resource_limit != nullptr) { 483 if (m_resource_limit != nullptr) {
480 resource_limit->Close(); 484 m_resource_limit->Close();
481 resource_limit = nullptr; 485 m_resource_limit = nullptr;
482 } 486 }
483 487
484 // Finalize the page table. 488 // Finalize the page table.
485 page_table.Finalize(); 489 m_page_table.Finalize();
486 490
487 // Perform inherited finalization. 491 // Perform inherited finalization.
488 KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize(); 492 KSynchronizationObject::Finalize();
489} 493}
490 494
491Result KProcess::CreateThreadLocalRegion(VAddr* out) { 495Result KProcess::CreateThreadLocalRegion(VAddr* out) {
@@ -494,16 +498,16 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
494 498
495 // See if we can get a region from a partially used TLP. 499 // See if we can get a region from a partially used TLP.
496 { 500 {
497 KScopedSchedulerLock sl{kernel}; 501 KScopedSchedulerLock sl{m_kernel};
498 502
499 if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) { 503 if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) {
500 tlr = it->Reserve(); 504 tlr = it->Reserve();
501 ASSERT(tlr != 0); 505 ASSERT(tlr != 0);
502 506
503 if (it->IsAllUsed()) { 507 if (it->IsAllUsed()) {
504 tlp = std::addressof(*it); 508 tlp = std::addressof(*it);
505 partially_used_tlp_tree.erase(it); 509 m_partially_used_tlp_tree.erase(it);
506 fully_used_tlp_tree.insert(*tlp); 510 m_fully_used_tlp_tree.insert(*tlp);
507 } 511 }
508 512
509 *out = tlr; 513 *out = tlr;
@@ -512,12 +516,12 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
512 } 516 }
513 517
514 // Allocate a new page. 518 // Allocate a new page.
515 tlp = KThreadLocalPage::Allocate(kernel); 519 tlp = KThreadLocalPage::Allocate(m_kernel);
516 R_UNLESS(tlp != nullptr, ResultOutOfMemory); 520 R_UNLESS(tlp != nullptr, ResultOutOfMemory);
517 auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(kernel, tlp); }); 521 auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(m_kernel, tlp); });
518 522
519 // Initialize the new page. 523 // Initialize the new page.
520 R_TRY(tlp->Initialize(kernel, this)); 524 R_TRY(tlp->Initialize(m_kernel, this));
521 525
522 // Reserve a TLR. 526 // Reserve a TLR.
523 tlr = tlp->Reserve(); 527 tlr = tlp->Reserve();
@@ -525,11 +529,11 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
525 529
526 // Insert into our tree. 530 // Insert into our tree.
527 { 531 {
528 KScopedSchedulerLock sl{kernel}; 532 KScopedSchedulerLock sl{m_kernel};
529 if (tlp->IsAllUsed()) { 533 if (tlp->IsAllUsed()) {
530 fully_used_tlp_tree.insert(*tlp); 534 m_fully_used_tlp_tree.insert(*tlp);
531 } else { 535 } else {
532 partially_used_tlp_tree.insert(*tlp); 536 m_partially_used_tlp_tree.insert(*tlp);
533 } 537 }
534 } 538 }
535 539
@@ -544,25 +548,25 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
544 548
545 // Release the region. 549 // Release the region.
546 { 550 {
547 KScopedSchedulerLock sl{kernel}; 551 KScopedSchedulerLock sl{m_kernel};
548 552
549 // Try to find the page in the partially used list. 553 // Try to find the page in the partially used list.
550 auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); 554 auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
551 if (it == partially_used_tlp_tree.end()) { 555 if (it == m_partially_used_tlp_tree.end()) {
552 // If we don't find it, it has to be in the fully used list. 556 // If we don't find it, it has to be in the fully used list.
553 it = fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); 557 it = m_fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
554 R_UNLESS(it != fully_used_tlp_tree.end(), ResultInvalidAddress); 558 R_UNLESS(it != m_fully_used_tlp_tree.end(), ResultInvalidAddress);
555 559
556 // Release the region. 560 // Release the region.
557 it->Release(addr); 561 it->Release(addr);
558 562
559 // Move the page out of the fully used list. 563 // Move the page out of the fully used list.
560 KThreadLocalPage* tlp = std::addressof(*it); 564 KThreadLocalPage* tlp = std::addressof(*it);
561 fully_used_tlp_tree.erase(it); 565 m_fully_used_tlp_tree.erase(it);
562 if (tlp->IsAllFree()) { 566 if (tlp->IsAllFree()) {
563 page_to_free = tlp; 567 page_to_free = tlp;
564 } else { 568 } else {
565 partially_used_tlp_tree.insert(*tlp); 569 m_partially_used_tlp_tree.insert(*tlp);
566 } 570 }
567 } else { 571 } else {
568 // Release the region. 572 // Release the region.
@@ -571,7 +575,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
571 // Handle the all-free case. 575 // Handle the all-free case.
572 KThreadLocalPage* tlp = std::addressof(*it); 576 KThreadLocalPage* tlp = std::addressof(*it);
573 if (tlp->IsAllFree()) { 577 if (tlp->IsAllFree()) {
574 partially_used_tlp_tree.erase(it); 578 m_partially_used_tlp_tree.erase(it);
575 page_to_free = tlp; 579 page_to_free = tlp;
576 } 580 }
577 } 581 }
@@ -581,7 +585,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
581 if (page_to_free != nullptr) { 585 if (page_to_free != nullptr) {
582 page_to_free->Finalize(); 586 page_to_free->Finalize();
583 587
584 KThreadLocalPage::Free(kernel, page_to_free); 588 KThreadLocalPage::Free(m_kernel, page_to_free);
585 } 589 }
586 590
587 R_SUCCEED(); 591 R_SUCCEED();
@@ -589,11 +593,11 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
589 593
590bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, 594bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
591 DebugWatchpointType type) { 595 DebugWatchpointType type) {
592 const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) { 596 const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
593 return wp.type == DebugWatchpointType::None; 597 return wp.type == DebugWatchpointType::None;
594 })}; 598 })};
595 599
596 if (watch == watchpoints.end()) { 600 if (watch == m_watchpoints.end()) {
597 return false; 601 return false;
598 } 602 }
599 603
@@ -602,7 +606,7 @@ bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
602 watch->type = type; 606 watch->type = type;
603 607
604 for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) { 608 for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
605 debug_page_refcounts[page]++; 609 m_debug_page_refcounts[page]++;
606 system.Memory().MarkRegionDebug(page, PageSize, true); 610 system.Memory().MarkRegionDebug(page, PageSize, true);
607 } 611 }
608 612
@@ -611,11 +615,11 @@ bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
611 615
612bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, 616bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
613 DebugWatchpointType type) { 617 DebugWatchpointType type) {
614 const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) { 618 const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
615 return wp.start_address == addr && wp.end_address == addr + size && wp.type == type; 619 return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
616 })}; 620 })};
617 621
618 if (watch == watchpoints.end()) { 622 if (watch == m_watchpoints.end()) {
619 return false; 623 return false;
620 } 624 }
621 625
@@ -624,8 +628,8 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
624 watch->type = DebugWatchpointType::None; 628 watch->type = DebugWatchpointType::None;
625 629
626 for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) { 630 for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
627 debug_page_refcounts[page]--; 631 m_debug_page_refcounts[page]--;
628 if (!debug_page_refcounts[page]) { 632 if (!m_debug_page_refcounts[page]) {
629 system.Memory().MarkRegionDebug(page, PageSize, false); 633 system.Memory().MarkRegionDebug(page, PageSize, false);
630 } 634 }
631 } 635 }
@@ -636,11 +640,11 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
636void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { 640void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
637 const auto ReprotectSegment = [&](const CodeSet::Segment& segment, 641 const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
638 Svc::MemoryPermission permission) { 642 Svc::MemoryPermission permission) {
639 page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); 643 m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
640 }; 644 };
641 645
642 kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), 646 m_kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
643 code_set.memory.size()); 647 code_set.memory.size());
644 648
645 ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute); 649 ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute);
646 ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read); 650 ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read);
@@ -648,35 +652,35 @@ void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
648} 652}
649 653
650bool KProcess::IsSignaled() const { 654bool KProcess::IsSignaled() const {
651 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 655 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
652 return is_signaled; 656 return m_is_signaled;
653} 657}
654 658
655KProcess::KProcess(KernelCore& kernel_) 659KProcess::KProcess(KernelCore& kernel)
656 : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()}, 660 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_page_table{m_kernel.System()},
657 handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, 661 m_handle_table{m_kernel}, m_address_arbiter{m_kernel.System()},
658 state_lock{kernel_}, list_lock{kernel_} {} 662 m_condition_var{m_kernel.System()}, m_state_lock{m_kernel}, m_list_lock{m_kernel} {}
659 663
660KProcess::~KProcess() = default; 664KProcess::~KProcess() = default;
661 665
662void KProcess::ChangeState(State new_state) { 666void KProcess::ChangeState(State new_state) {
663 if (state == new_state) { 667 if (m_state == new_state) {
664 return; 668 return;
665 } 669 }
666 670
667 state = new_state; 671 m_state = new_state;
668 is_signaled = true; 672 m_is_signaled = true;
669 NotifyAvailable(); 673 this->NotifyAvailable();
670} 674}
671 675
672Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { 676Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
673 // Ensure that we haven't already allocated stack. 677 // Ensure that we haven't already allocated stack.
674 ASSERT(main_thread_stack_size == 0); 678 ASSERT(m_main_thread_stack_size == 0);
675 679
676 // Ensure that we're allocating a valid stack. 680 // Ensure that we're allocating a valid stack.
677 stack_size = Common::AlignUp(stack_size, PageSize); 681 stack_size = Common::AlignUp(stack_size, PageSize);
678 // R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory); 682 // R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory);
679 R_UNLESS(stack_size + image_size >= image_size, ResultOutOfMemory); 683 R_UNLESS(stack_size + m_image_size >= m_image_size, ResultOutOfMemory);
680 684
681 // Place a tentative reservation of memory for our new stack. 685 // Place a tentative reservation of memory for our new stack.
682 KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax, 686 KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax,
@@ -686,11 +690,11 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
686 // Allocate and map our stack. 690 // Allocate and map our stack.
687 if (stack_size) { 691 if (stack_size) {
688 KProcessAddress stack_bottom; 692 KProcessAddress stack_bottom;
689 R_TRY(page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, 693 R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize,
690 KMemoryState::Stack, KMemoryPermission::UserReadWrite)); 694 KMemoryState::Stack, KMemoryPermission::UserReadWrite));
691 695
692 main_thread_stack_top = stack_bottom + stack_size; 696 m_main_thread_stack_top = stack_bottom + stack_size;
693 main_thread_stack_size = stack_size; 697 m_main_thread_stack_size = stack_size;
694 } 698 }
695 699
696 // We succeeded! Commit our memory reservation. 700 // We succeeded! Commit our memory reservation.
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 549809000..7b7a971b8 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -68,7 +68,7 @@ class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWor
68 KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject); 68 KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
69 69
70public: 70public:
71 explicit KProcess(KernelCore& kernel_); 71 explicit KProcess(KernelCore& kernel);
72 ~KProcess() override; 72 ~KProcess() override;
73 73
74 enum class State { 74 enum class State {
@@ -107,66 +107,76 @@ public:
107 107
108 /// Gets a reference to the process' page table. 108 /// Gets a reference to the process' page table.
109 KPageTable& PageTable() { 109 KPageTable& PageTable() {
110 return page_table; 110 return m_page_table;
111 } 111 }
112 112
113 /// Gets const a reference to the process' page table. 113 /// Gets const a reference to the process' page table.
114 const KPageTable& PageTable() const { 114 const KPageTable& PageTable() const {
115 return page_table; 115 return m_page_table;
116 }
117
118 /// Gets a reference to the process' page table.
119 KPageTable& GetPageTable() {
120 return m_page_table;
121 }
122
123 /// Gets const a reference to the process' page table.
124 const KPageTable& GetPageTable() const {
125 return m_page_table;
116 } 126 }
117 127
118 /// Gets a reference to the process' handle table. 128 /// Gets a reference to the process' handle table.
119 KHandleTable& GetHandleTable() { 129 KHandleTable& GetHandleTable() {
120 return handle_table; 130 return m_handle_table;
121 } 131 }
122 132
123 /// Gets a const reference to the process' handle table. 133 /// Gets a const reference to the process' handle table.
124 const KHandleTable& GetHandleTable() const { 134 const KHandleTable& GetHandleTable() const {
125 return handle_table; 135 return m_handle_table;
126 } 136 }
127 137
128 Result SignalToAddress(VAddr address) { 138 Result SignalToAddress(VAddr address) {
129 return condition_var.SignalToAddress(address); 139 return m_condition_var.SignalToAddress(address);
130 } 140 }
131 141
132 Result WaitForAddress(Handle handle, VAddr address, u32 tag) { 142 Result WaitForAddress(Handle handle, VAddr address, u32 tag) {
133 return condition_var.WaitForAddress(handle, address, tag); 143 return m_condition_var.WaitForAddress(handle, address, tag);
134 } 144 }
135 145
136 void SignalConditionVariable(u64 cv_key, int32_t count) { 146 void SignalConditionVariable(u64 cv_key, int32_t count) {
137 return condition_var.Signal(cv_key, count); 147 return m_condition_var.Signal(cv_key, count);
138 } 148 }
139 149
140 Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { 150 Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
141 R_RETURN(condition_var.Wait(address, cv_key, tag, ns)); 151 R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns));
142 } 152 }
143 153
144 Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) { 154 Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
145 R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count)); 155 R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
146 } 156 }
147 157
148 Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, 158 Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
149 s64 timeout) { 159 s64 timeout) {
150 R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout)); 160 R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
151 } 161 }
152 162
153 VAddr GetProcessLocalRegionAddress() const { 163 VAddr GetProcessLocalRegionAddress() const {
154 return plr_address; 164 return m_plr_address;
155 } 165 }
156 166
157 /// Gets the current status of the process 167 /// Gets the current status of the process
158 State GetState() const { 168 State GetState() const {
159 return state; 169 return m_state;
160 } 170 }
161 171
162 /// Gets the unique ID that identifies this particular process. 172 /// Gets the unique ID that identifies this particular process.
163 u64 GetProcessID() const { 173 u64 GetProcessId() const {
164 return process_id; 174 return m_process_id;
165 } 175 }
166 176
167 /// Gets the program ID corresponding to this process. 177 /// Gets the program ID corresponding to this process.
168 u64 GetProgramID() const { 178 u64 GetProgramId() const {
169 return program_id; 179 return m_program_id;
170 } 180 }
171 181
172 /// Gets the resource limit descriptor for this process 182 /// Gets the resource limit descriptor for this process
@@ -174,7 +184,7 @@ public:
174 184
175 /// Gets the ideal CPU core ID for this process 185 /// Gets the ideal CPU core ID for this process
176 u8 GetIdealCoreId() const { 186 u8 GetIdealCoreId() const {
177 return ideal_core; 187 return m_ideal_core;
178 } 188 }
179 189
180 /// Checks if the specified thread priority is valid. 190 /// Checks if the specified thread priority is valid.
@@ -184,17 +194,17 @@ public:
184 194
185 /// Gets the bitmask of allowed cores that this process' threads can run on. 195 /// Gets the bitmask of allowed cores that this process' threads can run on.
186 u64 GetCoreMask() const { 196 u64 GetCoreMask() const {
187 return capabilities.GetCoreMask(); 197 return m_capabilities.GetCoreMask();
188 } 198 }
189 199
190 /// Gets the bitmask of allowed thread priorities. 200 /// Gets the bitmask of allowed thread priorities.
191 u64 GetPriorityMask() const { 201 u64 GetPriorityMask() const {
192 return capabilities.GetPriorityMask(); 202 return m_capabilities.GetPriorityMask();
193 } 203 }
194 204
195 /// Gets the amount of secure memory to allocate for memory management. 205 /// Gets the amount of secure memory to allocate for memory management.
196 u32 GetSystemResourceSize() const { 206 u32 GetSystemResourceSize() const {
197 return system_resource_size; 207 return m_system_resource_size;
198 } 208 }
199 209
200 /// Gets the amount of secure memory currently in use for memory management. 210 /// Gets the amount of secure memory currently in use for memory management.
@@ -214,67 +224,67 @@ public:
214 224
215 /// Whether this process is an AArch64 or AArch32 process. 225 /// Whether this process is an AArch64 or AArch32 process.
216 bool Is64BitProcess() const { 226 bool Is64BitProcess() const {
217 return is_64bit_process; 227 return m_is_64bit_process;
218 } 228 }
219 229
220 [[nodiscard]] bool IsSuspended() const { 230 bool IsSuspended() const {
221 return is_suspended; 231 return m_is_suspended;
222 } 232 }
223 233
224 void SetSuspended(bool suspended) { 234 void SetSuspended(bool suspended) {
225 is_suspended = suspended; 235 m_is_suspended = suspended;
226 } 236 }
227 237
228 /// Gets the total running time of the process instance in ticks. 238 /// Gets the total running time of the process instance in ticks.
229 u64 GetCPUTimeTicks() const { 239 u64 GetCPUTimeTicks() const {
230 return total_process_running_time_ticks; 240 return m_total_process_running_time_ticks;
231 } 241 }
232 242
233 /// Updates the total running time, adding the given ticks to it. 243 /// Updates the total running time, adding the given ticks to it.
234 void UpdateCPUTimeTicks(u64 ticks) { 244 void UpdateCPUTimeTicks(u64 ticks) {
235 total_process_running_time_ticks += ticks; 245 m_total_process_running_time_ticks += ticks;
236 } 246 }
237 247
238 /// Gets the process schedule count, used for thread yelding 248 /// Gets the process schedule count, used for thread yielding
239 s64 GetScheduledCount() const { 249 s64 GetScheduledCount() const {
240 return schedule_count; 250 return m_schedule_count;
241 } 251 }
242 252
243 /// Increments the process schedule count, used for thread yielding. 253 /// Increments the process schedule count, used for thread yielding.
244 void IncrementScheduledCount() { 254 void IncrementScheduledCount() {
245 ++schedule_count; 255 ++m_schedule_count;
246 } 256 }
247 257
248 void IncrementRunningThreadCount(); 258 void IncrementRunningThreadCount();
249 void DecrementRunningThreadCount(); 259 void DecrementRunningThreadCount();
250 260
251 void SetRunningThread(s32 core, KThread* thread, u64 idle_count) { 261 void SetRunningThread(s32 core, KThread* thread, u64 idle_count) {
252 running_threads[core] = thread; 262 m_running_threads[core] = thread;
253 running_thread_idle_counts[core] = idle_count; 263 m_running_thread_idle_counts[core] = idle_count;
254 } 264 }
255 265
256 void ClearRunningThread(KThread* thread) { 266 void ClearRunningThread(KThread* thread) {
257 for (size_t i = 0; i < running_threads.size(); ++i) { 267 for (size_t i = 0; i < m_running_threads.size(); ++i) {
258 if (running_threads[i] == thread) { 268 if (m_running_threads[i] == thread) {
259 running_threads[i] = nullptr; 269 m_running_threads[i] = nullptr;
260 } 270 }
261 } 271 }
262 } 272 }
263 273
264 [[nodiscard]] KThread* GetRunningThread(s32 core) const { 274 [[nodiscard]] KThread* GetRunningThread(s32 core) const {
265 return running_threads[core]; 275 return m_running_threads[core];
266 } 276 }
267 277
268 bool ReleaseUserException(KThread* thread); 278 bool ReleaseUserException(KThread* thread);
269 279
270 [[nodiscard]] KThread* GetPinnedThread(s32 core_id) const { 280 [[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
271 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); 281 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
272 return pinned_threads[core_id]; 282 return m_pinned_threads[core_id];
273 } 283 }
274 284
275 /// Gets 8 bytes of random data for svcGetInfo RandomEntropy 285 /// Gets 8 bytes of random data for svcGetInfo RandomEntropy
276 u64 GetRandomEntropy(std::size_t index) const { 286 u64 GetRandomEntropy(std::size_t index) const {
277 return random_entropy.at(index); 287 return m_random_entropy.at(index);
278 } 288 }
279 289
280 /// Retrieves the total physical memory available to this process in bytes. 290 /// Retrieves the total physical memory available to this process in bytes.
@@ -293,7 +303,7 @@ public:
293 303
294 /// Gets the list of all threads created with this process as their owner. 304 /// Gets the list of all threads created with this process as their owner.
295 std::list<KThread*>& GetThreadList() { 305 std::list<KThread*>& GetThreadList() {
296 return thread_list; 306 return m_thread_list;
297 } 307 }
298 308
299 /// Registers a thread as being created under this process, 309 /// Registers a thread as being created under this process,
@@ -345,15 +355,15 @@ public:
345 void LoadModule(CodeSet code_set, VAddr base_addr); 355 void LoadModule(CodeSet code_set, VAddr base_addr);
346 356
347 bool IsInitialized() const override { 357 bool IsInitialized() const override {
348 return is_initialized; 358 return m_is_initialized;
349 } 359 }
350 360
351 static void PostDestroy([[maybe_unused]] uintptr_t arg) {} 361 static void PostDestroy(uintptr_t arg) {}
352 362
353 void Finalize() override; 363 void Finalize() override;
354 364
355 u64 GetId() const override { 365 u64 GetId() const override {
356 return GetProcessID(); 366 return GetProcessId();
357 } 367 }
358 368
359 bool IsSignaled() const override; 369 bool IsSignaled() const override;
@@ -367,7 +377,7 @@ public:
367 void UnpinThread(KThread* thread); 377 void UnpinThread(KThread* thread);
368 378
369 KLightLock& GetStateLock() { 379 KLightLock& GetStateLock() {
370 return state_lock; 380 return m_state_lock;
371 } 381 }
372 382
373 Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size); 383 Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
@@ -392,30 +402,34 @@ public:
392 bool RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type); 402 bool RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
393 403
394 const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const { 404 const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
395 return watchpoints; 405 return m_watchpoints;
406 }
407
408 const std::string& GetName() {
409 return name;
396 } 410 }
397 411
398private: 412private:
399 void PinThread(s32 core_id, KThread* thread) { 413 void PinThread(s32 core_id, KThread* thread) {
400 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); 414 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
401 ASSERT(thread != nullptr); 415 ASSERT(thread != nullptr);
402 ASSERT(pinned_threads[core_id] == nullptr); 416 ASSERT(m_pinned_threads[core_id] == nullptr);
403 pinned_threads[core_id] = thread; 417 m_pinned_threads[core_id] = thread;
404 } 418 }
405 419
406 void UnpinThread(s32 core_id, KThread* thread) { 420 void UnpinThread(s32 core_id, KThread* thread) {
407 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); 421 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
408 ASSERT(thread != nullptr); 422 ASSERT(thread != nullptr);
409 ASSERT(pinned_threads[core_id] == thread); 423 ASSERT(m_pinned_threads[core_id] == thread);
410 pinned_threads[core_id] = nullptr; 424 m_pinned_threads[core_id] = nullptr;
411 } 425 }
412 426
413 void FinalizeHandleTable() { 427 void FinalizeHandleTable() {
414 // Finalize the table. 428 // Finalize the table.
415 handle_table.Finalize(); 429 m_handle_table.Finalize();
416 430
417 // Note that the table is finalized. 431 // Note that the table is finalized.
418 is_handle_table_initialized = false; 432 m_is_handle_table_initialized = false;
419 } 433 }
420 434
421 void ChangeState(State new_state); 435 void ChangeState(State new_state);
@@ -424,105 +438,107 @@ private:
424 Result AllocateMainThreadStack(std::size_t stack_size); 438 Result AllocateMainThreadStack(std::size_t stack_size);
425 439
426 /// Memory manager for this process 440 /// Memory manager for this process
427 KPageTable page_table; 441 KPageTable m_page_table;
428 442
429 /// Current status of the process 443 /// Current status of the process
430 State state{}; 444 State m_state{};
431 445
432 /// The ID of this process 446 /// The ID of this process
433 u64 process_id = 0; 447 u64 m_process_id = 0;
434 448
435 /// Title ID corresponding to the process 449 /// Title ID corresponding to the process
436 u64 program_id = 0; 450 u64 m_program_id = 0;
437 451
438 /// Specifies additional memory to be reserved for the process's memory management by the 452 /// Specifies additional memory to be reserved for the process's memory management by the
439 /// system. When this is non-zero, secure memory is allocated and used for page table allocation 453 /// system. When this is non-zero, secure memory is allocated and used for page table allocation
440 /// instead of using the normal global page tables/memory block management. 454 /// instead of using the normal global page tables/memory block management.
441 u32 system_resource_size = 0; 455 u32 m_system_resource_size = 0;
442 456
443 /// Resource limit descriptor for this process 457 /// Resource limit descriptor for this process
444 KResourceLimit* resource_limit{}; 458 KResourceLimit* m_resource_limit{};
445 459
446 VAddr system_resource_address{}; 460 VAddr m_system_resource_address{};
447 461
448 /// The ideal CPU core for this process, threads are scheduled on this core by default. 462 /// The ideal CPU core for this process, threads are scheduled on this core by default.
449 u8 ideal_core = 0; 463 u8 m_ideal_core = 0;
450 464
451 /// Contains the parsed process capability descriptors. 465 /// Contains the parsed process capability descriptors.
452 ProcessCapabilities capabilities; 466 ProcessCapabilities m_capabilities;
453 467
454 /// Whether or not this process is AArch64, or AArch32. 468 /// Whether or not this process is AArch64, or AArch32.
455 /// By default, we currently assume this is true, unless otherwise 469 /// By default, we currently assume this is true, unless otherwise
456 /// specified by metadata provided to the process during loading. 470 /// specified by metadata provided to the process during loading.
457 bool is_64bit_process = true; 471 bool m_is_64bit_process = true;
458 472
459 /// Total running time for the process in ticks. 473 /// Total running time for the process in ticks.
460 std::atomic<u64> total_process_running_time_ticks = 0; 474 std::atomic<u64> m_total_process_running_time_ticks = 0;
461 475
462 /// Per-process handle table for storing created object handles in. 476 /// Per-process handle table for storing created object handles in.
463 KHandleTable handle_table; 477 KHandleTable m_handle_table;
464 478
465 /// Per-process address arbiter. 479 /// Per-process address arbiter.
466 KAddressArbiter address_arbiter; 480 KAddressArbiter m_address_arbiter;
467 481
468 /// The per-process mutex lock instance used for handling various 482 /// The per-process mutex lock instance used for handling various
469 /// forms of services, such as lock arbitration, and condition 483 /// forms of services, such as lock arbitration, and condition
470 /// variable related facilities. 484 /// variable related facilities.
471 KConditionVariable condition_var; 485 KConditionVariable m_condition_var;
472 486
473 /// Address indicating the location of the process' dedicated TLS region. 487 /// Address indicating the location of the process' dedicated TLS region.
474 VAddr plr_address = 0; 488 VAddr m_plr_address = 0;
475 489
476 /// Random values for svcGetInfo RandomEntropy 490 /// Random values for svcGetInfo RandomEntropy
477 std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{}; 491 std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{};
478 492
479 /// List of threads that are running with this process as their owner. 493 /// List of threads that are running with this process as their owner.
480 std::list<KThread*> thread_list; 494 std::list<KThread*> m_thread_list;
481 495
482 /// List of shared memory that are running with this process as their owner. 496 /// List of shared memory that are running with this process as their owner.
483 std::list<KSharedMemoryInfo*> shared_memory_list; 497 std::list<KSharedMemoryInfo*> m_shared_memory_list;
484 498
485 /// Address of the top of the main thread's stack 499 /// Address of the top of the main thread's stack
486 VAddr main_thread_stack_top{}; 500 VAddr m_main_thread_stack_top{};
487 501
488 /// Size of the main thread's stack 502 /// Size of the main thread's stack
489 std::size_t main_thread_stack_size{}; 503 std::size_t m_main_thread_stack_size{};
490 504
491 /// Memory usage capacity for the process 505 /// Memory usage capacity for the process
492 std::size_t memory_usage_capacity{}; 506 std::size_t m_memory_usage_capacity{};
493 507
494 /// Process total image size 508 /// Process total image size
495 std::size_t image_size{}; 509 std::size_t m_image_size{};
496 510
497 /// Schedule count of this process 511 /// Schedule count of this process
498 s64 schedule_count{}; 512 s64 m_schedule_count{};
513
514 size_t m_memory_release_hint{};
499 515
500 size_t memory_release_hint{}; 516 std::string name{};
501 517
502 bool is_signaled{}; 518 bool m_is_signaled{};
503 bool is_suspended{}; 519 bool m_is_suspended{};
504 bool is_immortal{}; 520 bool m_is_immortal{};
505 bool is_handle_table_initialized{}; 521 bool m_is_handle_table_initialized{};
506 bool is_initialized{}; 522 bool m_is_initialized{};
507 523
508 std::atomic<u16> num_running_threads{}; 524 std::atomic<u16> m_num_running_threads{};
509 525
510 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{}; 526 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{};
511 std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{}; 527 std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
512 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> pinned_threads{}; 528 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
513 std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> watchpoints{}; 529 std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
514 std::map<VAddr, u64> debug_page_refcounts; 530 std::map<VAddr, u64> m_debug_page_refcounts;
515 531
516 KThread* exception_thread{}; 532 KThread* m_exception_thread{};
517 533
518 KLightLock state_lock; 534 KLightLock m_state_lock;
519 KLightLock list_lock; 535 KLightLock m_list_lock;
520 536
521 using TLPTree = 537 using TLPTree =
522 Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>; 538 Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
523 using TLPIterator = TLPTree::iterator; 539 using TLPIterator = TLPTree::iterator;
524 TLPTree fully_used_tlp_tree; 540 TLPTree m_fully_used_tlp_tree;
525 TLPTree partially_used_tlp_tree; 541 TLPTree m_partially_used_tlp_tree;
526}; 542};
527 543
528} // namespace Kernel 544} // namespace Kernel
diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp
index 5c942d47c..c30662666 100644
--- a/src/core/hle/kernel/k_readable_event.cpp
+++ b/src/core/hle/kernel/k_readable_event.cpp
@@ -11,7 +11,7 @@
11 11
12namespace Kernel { 12namespace Kernel {
13 13
14KReadableEvent::KReadableEvent(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} 14KReadableEvent::KReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {}
15 15
16KReadableEvent::~KReadableEvent() = default; 16KReadableEvent::~KReadableEvent() = default;
17 17
@@ -25,7 +25,7 @@ void KReadableEvent::Initialize(KEvent* parent) {
25} 25}
26 26
27bool KReadableEvent::IsSignaled() const { 27bool KReadableEvent::IsSignaled() const {
28 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 28 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
29 29
30 return m_is_signaled; 30 return m_is_signaled;
31} 31}
@@ -33,7 +33,7 @@ bool KReadableEvent::IsSignaled() const {
33void KReadableEvent::Destroy() { 33void KReadableEvent::Destroy() {
34 if (m_parent) { 34 if (m_parent) {
35 { 35 {
36 KScopedSchedulerLock sl{kernel}; 36 KScopedSchedulerLock sl{m_kernel};
37 m_parent->OnReadableEventDestroyed(); 37 m_parent->OnReadableEventDestroyed();
38 } 38 }
39 m_parent->Close(); 39 m_parent->Close();
@@ -41,31 +41,29 @@ void KReadableEvent::Destroy() {
41} 41}
42 42
43Result KReadableEvent::Signal() { 43Result KReadableEvent::Signal() {
44 KScopedSchedulerLock lk{kernel}; 44 KScopedSchedulerLock lk{m_kernel};
45 45
46 if (!m_is_signaled) { 46 if (!m_is_signaled) {
47 m_is_signaled = true; 47 m_is_signaled = true;
48 this->NotifyAvailable(); 48 this->NotifyAvailable();
49 } 49 }
50 50
51 return ResultSuccess; 51 R_SUCCEED();
52} 52}
53 53
54Result KReadableEvent::Clear() { 54Result KReadableEvent::Clear() {
55 this->Reset(); 55 this->Reset();
56 56
57 return ResultSuccess; 57 R_SUCCEED();
58} 58}
59 59
60Result KReadableEvent::Reset() { 60Result KReadableEvent::Reset() {
61 KScopedSchedulerLock lk{kernel}; 61 KScopedSchedulerLock lk{m_kernel};
62 62
63 if (!m_is_signaled) { 63 R_UNLESS(m_is_signaled, ResultInvalidState);
64 return ResultInvalidState;
65 }
66 64
67 m_is_signaled = false; 65 m_is_signaled = false;
68 return ResultSuccess; 66 R_SUCCEED();
69} 67}
70 68
71} // namespace Kernel 69} // namespace Kernel
diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h
index 743f96bf5..d2ec36323 100644
--- a/src/core/hle/kernel/k_readable_event.h
+++ b/src/core/hle/kernel/k_readable_event.h
@@ -17,7 +17,7 @@ class KReadableEvent : public KSynchronizationObject {
17 KERNEL_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject); 17 KERNEL_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject);
18 18
19public: 19public:
20 explicit KReadableEvent(KernelCore& kernel_); 20 explicit KReadableEvent(KernelCore& kernel);
21 ~KReadableEvent() override; 21 ~KReadableEvent() override;
22 22
23 void Initialize(KEvent* parent); 23 void Initialize(KEvent* parent);
diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp
index 626517619..fcee26a29 100644
--- a/src/core/hle/kernel/k_resource_limit.cpp
+++ b/src/core/hle/kernel/k_resource_limit.cpp
@@ -11,12 +11,12 @@
11namespace Kernel { 11namespace Kernel {
12constexpr s64 DefaultTimeout = 10000000000; // 10 seconds 12constexpr s64 DefaultTimeout = 10000000000; // 10 seconds
13 13
14KResourceLimit::KResourceLimit(KernelCore& kernel_) 14KResourceLimit::KResourceLimit(KernelCore& kernel)
15 : KAutoObjectWithSlabHeapAndContainer{kernel_}, lock{kernel_}, cond_var{kernel_} {} 15 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock{m_kernel}, m_cond_var{m_kernel} {}
16KResourceLimit::~KResourceLimit() = default; 16KResourceLimit::~KResourceLimit() = default;
17 17
18void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing_) { 18void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing) {
19 core_timing = core_timing_; 19 m_core_timing = core_timing;
20} 20}
21 21
22void KResourceLimit::Finalize() {} 22void KResourceLimit::Finalize() {}
@@ -25,11 +25,11 @@ s64 KResourceLimit::GetLimitValue(LimitableResource which) const {
25 const auto index = static_cast<std::size_t>(which); 25 const auto index = static_cast<std::size_t>(which);
26 s64 value{}; 26 s64 value{};
27 { 27 {
28 KScopedLightLock lk{lock}; 28 KScopedLightLock lk{m_lock};
29 value = limit_values[index]; 29 value = m_limit_values[index];
30 ASSERT(value >= 0); 30 ASSERT(value >= 0);
31 ASSERT(current_values[index] <= limit_values[index]); 31 ASSERT(m_current_values[index] <= m_limit_values[index]);
32 ASSERT(current_hints[index] <= current_values[index]); 32 ASSERT(m_current_hints[index] <= m_current_values[index]);
33 } 33 }
34 return value; 34 return value;
35} 35}
@@ -38,11 +38,11 @@ s64 KResourceLimit::GetCurrentValue(LimitableResource which) const {
38 const auto index = static_cast<std::size_t>(which); 38 const auto index = static_cast<std::size_t>(which);
39 s64 value{}; 39 s64 value{};
40 { 40 {
41 KScopedLightLock lk{lock}; 41 KScopedLightLock lk{m_lock};
42 value = current_values[index]; 42 value = m_current_values[index];
43 ASSERT(value >= 0); 43 ASSERT(value >= 0);
44 ASSERT(current_values[index] <= limit_values[index]); 44 ASSERT(m_current_values[index] <= m_limit_values[index]);
45 ASSERT(current_hints[index] <= current_values[index]); 45 ASSERT(m_current_hints[index] <= m_current_values[index]);
46 } 46 }
47 return value; 47 return value;
48} 48}
@@ -51,11 +51,11 @@ s64 KResourceLimit::GetPeakValue(LimitableResource which) const {
51 const auto index = static_cast<std::size_t>(which); 51 const auto index = static_cast<std::size_t>(which);
52 s64 value{}; 52 s64 value{};
53 { 53 {
54 KScopedLightLock lk{lock}; 54 KScopedLightLock lk{m_lock};
55 value = peak_values[index]; 55 value = m_peak_values[index];
56 ASSERT(value >= 0); 56 ASSERT(value >= 0);
57 ASSERT(current_values[index] <= limit_values[index]); 57 ASSERT(m_current_values[index] <= m_limit_values[index]);
58 ASSERT(current_hints[index] <= current_values[index]); 58 ASSERT(m_current_hints[index] <= m_current_values[index]);
59 } 59 }
60 return value; 60 return value;
61} 61}
@@ -64,11 +64,11 @@ s64 KResourceLimit::GetFreeValue(LimitableResource which) const {
64 const auto index = static_cast<std::size_t>(which); 64 const auto index = static_cast<std::size_t>(which);
65 s64 value{}; 65 s64 value{};
66 { 66 {
67 KScopedLightLock lk(lock); 67 KScopedLightLock lk(m_lock);
68 ASSERT(current_values[index] >= 0); 68 ASSERT(m_current_values[index] >= 0);
69 ASSERT(current_values[index] <= limit_values[index]); 69 ASSERT(m_current_values[index] <= m_limit_values[index]);
70 ASSERT(current_hints[index] <= current_values[index]); 70 ASSERT(m_current_hints[index] <= m_current_values[index]);
71 value = limit_values[index] - current_values[index]; 71 value = m_limit_values[index] - m_current_values[index];
72 } 72 }
73 73
74 return value; 74 return value;
@@ -76,51 +76,51 @@ s64 KResourceLimit::GetFreeValue(LimitableResource which) const {
76 76
77Result KResourceLimit::SetLimitValue(LimitableResource which, s64 value) { 77Result KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
78 const auto index = static_cast<std::size_t>(which); 78 const auto index = static_cast<std::size_t>(which);
79 KScopedLightLock lk(lock); 79 KScopedLightLock lk(m_lock);
80 R_UNLESS(current_values[index] <= value, ResultInvalidState); 80 R_UNLESS(m_current_values[index] <= value, ResultInvalidState);
81 81
82 limit_values[index] = value; 82 m_limit_values[index] = value;
83 peak_values[index] = current_values[index]; 83 m_peak_values[index] = m_current_values[index];
84 84
85 return ResultSuccess; 85 R_SUCCEED();
86} 86}
87 87
88bool KResourceLimit::Reserve(LimitableResource which, s64 value) { 88bool KResourceLimit::Reserve(LimitableResource which, s64 value) {
89 return Reserve(which, value, core_timing->GetGlobalTimeNs().count() + DefaultTimeout); 89 return Reserve(which, value, m_core_timing->GetGlobalTimeNs().count() + DefaultTimeout);
90} 90}
91 91
92bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) { 92bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
93 ASSERT(value >= 0); 93 ASSERT(value >= 0);
94 const auto index = static_cast<std::size_t>(which); 94 const auto index = static_cast<std::size_t>(which);
95 KScopedLightLock lk(lock); 95 KScopedLightLock lk(m_lock);
96 96
97 ASSERT(current_hints[index] <= current_values[index]); 97 ASSERT(m_current_hints[index] <= m_current_values[index]);
98 if (current_hints[index] >= limit_values[index]) { 98 if (m_current_hints[index] >= m_limit_values[index]) {
99 return false; 99 return false;
100 } 100 }
101 101
102 // Loop until we reserve or run out of time. 102 // Loop until we reserve or run out of time.
103 while (true) { 103 while (true) {
104 ASSERT(current_values[index] <= limit_values[index]); 104 ASSERT(m_current_values[index] <= m_limit_values[index]);
105 ASSERT(current_hints[index] <= current_values[index]); 105 ASSERT(m_current_hints[index] <= m_current_values[index]);
106 106
107 // If we would overflow, don't allow to succeed. 107 // If we would overflow, don't allow to succeed.
108 if (Common::WrappingAdd(current_values[index], value) <= current_values[index]) { 108 if (Common::WrappingAdd(m_current_values[index], value) <= m_current_values[index]) {
109 break; 109 break;
110 } 110 }
111 111
112 if (current_values[index] + value <= limit_values[index]) { 112 if (m_current_values[index] + value <= m_limit_values[index]) {
113 current_values[index] += value; 113 m_current_values[index] += value;
114 current_hints[index] += value; 114 m_current_hints[index] += value;
115 peak_values[index] = std::max(peak_values[index], current_values[index]); 115 m_peak_values[index] = std::max(m_peak_values[index], m_current_values[index]);
116 return true; 116 return true;
117 } 117 }
118 118
119 if (current_hints[index] + value <= limit_values[index] && 119 if (m_current_hints[index] + value <= m_limit_values[index] &&
120 (timeout < 0 || core_timing->GetGlobalTimeNs().count() < timeout)) { 120 (timeout < 0 || m_core_timing->GetGlobalTimeNs().count() < timeout)) {
121 waiter_count++; 121 m_waiter_count++;
122 cond_var.Wait(&lock, timeout, false); 122 m_cond_var.Wait(std::addressof(m_lock), timeout, false);
123 waiter_count--; 123 m_waiter_count--;
124 } else { 124 } else {
125 break; 125 break;
126 } 126 }
@@ -138,23 +138,23 @@ void KResourceLimit::Release(LimitableResource which, s64 value, s64 hint) {
138 ASSERT(hint >= 0); 138 ASSERT(hint >= 0);
139 139
140 const auto index = static_cast<std::size_t>(which); 140 const auto index = static_cast<std::size_t>(which);
141 KScopedLightLock lk(lock); 141 KScopedLightLock lk(m_lock);
142 ASSERT(current_values[index] <= limit_values[index]); 142 ASSERT(m_current_values[index] <= m_limit_values[index]);
143 ASSERT(current_hints[index] <= current_values[index]); 143 ASSERT(m_current_hints[index] <= m_current_values[index]);
144 ASSERT(value <= current_values[index]); 144 ASSERT(value <= m_current_values[index]);
145 ASSERT(hint <= current_hints[index]); 145 ASSERT(hint <= m_current_hints[index]);
146 146
147 current_values[index] -= value; 147 m_current_values[index] -= value;
148 current_hints[index] -= hint; 148 m_current_hints[index] -= hint;
149 149
150 if (waiter_count != 0) { 150 if (m_waiter_count != 0) {
151 cond_var.Broadcast(); 151 m_cond_var.Broadcast();
152 } 152 }
153} 153}
154 154
155KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size) { 155KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size) {
156 auto* resource_limit = KResourceLimit::Create(system.Kernel()); 156 auto* resource_limit = KResourceLimit::Create(system.Kernel());
157 resource_limit->Initialize(&system.CoreTiming()); 157 resource_limit->Initialize(std::addressof(system.CoreTiming()));
158 158
159 // Initialize default resource limit values. 159 // Initialize default resource limit values.
160 // TODO(bunnei): These values are the system defaults, the limits for service processes are 160 // TODO(bunnei): These values are the system defaults, the limits for service processes are
diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h
index 2573d1b7c..15e69af56 100644
--- a/src/core/hle/kernel/k_resource_limit.h
+++ b/src/core/hle/kernel/k_resource_limit.h
@@ -28,10 +28,10 @@ class KResourceLimit final
28 KERNEL_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject); 28 KERNEL_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject);
29 29
30public: 30public:
31 explicit KResourceLimit(KernelCore& kernel_); 31 explicit KResourceLimit(KernelCore& kernel);
32 ~KResourceLimit() override; 32 ~KResourceLimit() override;
33 33
34 void Initialize(const Core::Timing::CoreTiming* core_timing_); 34 void Initialize(const Core::Timing::CoreTiming* core_timing);
35 void Finalize() override; 35 void Finalize() override;
36 36
37 s64 GetLimitValue(LimitableResource which) const; 37 s64 GetLimitValue(LimitableResource which) const;
@@ -46,18 +46,18 @@ public:
46 void Release(LimitableResource which, s64 value); 46 void Release(LimitableResource which, s64 value);
47 void Release(LimitableResource which, s64 value, s64 hint); 47 void Release(LimitableResource which, s64 value, s64 hint);
48 48
49 static void PostDestroy([[maybe_unused]] uintptr_t arg) {} 49 static void PostDestroy(uintptr_t arg) {}
50 50
51private: 51private:
52 using ResourceArray = std::array<s64, static_cast<std::size_t>(LimitableResource::Count)>; 52 using ResourceArray = std::array<s64, static_cast<std::size_t>(LimitableResource::Count)>;
53 ResourceArray limit_values{}; 53 ResourceArray m_limit_values{};
54 ResourceArray current_values{}; 54 ResourceArray m_current_values{};
55 ResourceArray current_hints{}; 55 ResourceArray m_current_hints{};
56 ResourceArray peak_values{}; 56 ResourceArray m_peak_values{};
57 mutable KLightLock lock; 57 mutable KLightLock m_lock;
58 s32 waiter_count{}; 58 s32 m_waiter_count{};
59 KLightConditionVariable cond_var; 59 KLightConditionVariable m_cond_var;
60 const Core::Timing::CoreTiming* core_timing{}; 60 const Core::Timing::CoreTiming* m_core_timing{};
61}; 61};
62 62
63KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size); 63KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size);
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index d6c214237..ecadf2916 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -27,7 +27,7 @@ static void IncrementScheduledCount(Kernel::KThread* thread) {
27 } 27 }
28} 28}
29 29
30KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} { 30KScheduler::KScheduler(KernelCore& kernel) : m_kernel{kernel} {
31 m_switch_fiber = std::make_shared<Common::Fiber>([this] { 31 m_switch_fiber = std::make_shared<Common::Fiber>([this] {
32 while (true) { 32 while (true) {
33 ScheduleImplFiber(); 33 ScheduleImplFiber();
@@ -47,7 +47,7 @@ void KScheduler::SetInterruptTaskRunnable() {
47void KScheduler::RequestScheduleOnInterrupt() { 47void KScheduler::RequestScheduleOnInterrupt() {
48 m_state.needs_scheduling = true; 48 m_state.needs_scheduling = true;
49 49
50 if (CanSchedule(kernel)) { 50 if (CanSchedule(m_kernel)) {
51 ScheduleOnInterrupt(); 51 ScheduleOnInterrupt();
52 } 52 }
53} 53}
@@ -97,50 +97,50 @@ u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
97} 97}
98 98
99void KScheduler::Schedule() { 99void KScheduler::Schedule() {
100 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); 100 ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
101 ASSERT(m_core_id == GetCurrentCoreId(kernel)); 101 ASSERT(m_core_id == GetCurrentCoreId(m_kernel));
102 102
103 ScheduleImpl(); 103 ScheduleImpl();
104} 104}
105 105
106void KScheduler::ScheduleOnInterrupt() { 106void KScheduler::ScheduleOnInterrupt() {
107 GetCurrentThread(kernel).DisableDispatch(); 107 GetCurrentThread(m_kernel).DisableDispatch();
108 Schedule(); 108 Schedule();
109 GetCurrentThread(kernel).EnableDispatch(); 109 GetCurrentThread(m_kernel).EnableDispatch();
110} 110}
111 111
112void KScheduler::PreemptSingleCore() { 112void KScheduler::PreemptSingleCore() {
113 GetCurrentThread(kernel).DisableDispatch(); 113 GetCurrentThread(m_kernel).DisableDispatch();
114 114
115 auto* thread = GetCurrentThreadPointer(kernel); 115 auto* thread = GetCurrentThreadPointer(m_kernel);
116 auto& previous_scheduler = kernel.Scheduler(thread->GetCurrentCore()); 116 auto& previous_scheduler = m_kernel.Scheduler(thread->GetCurrentCore());
117 previous_scheduler.Unload(thread); 117 previous_scheduler.Unload(thread);
118 118
119 Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber); 119 Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber);
120 120
121 GetCurrentThread(kernel).EnableDispatch(); 121 GetCurrentThread(m_kernel).EnableDispatch();
122} 122}
123 123
124void KScheduler::RescheduleCurrentCore() { 124void KScheduler::RescheduleCurrentCore() {
125 ASSERT(!kernel.IsPhantomModeForSingleCore()); 125 ASSERT(!m_kernel.IsPhantomModeForSingleCore());
126 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); 126 ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
127 127
128 GetCurrentThread(kernel).EnableDispatch(); 128 GetCurrentThread(m_kernel).EnableDispatch();
129 129
130 if (m_state.needs_scheduling.load()) { 130 if (m_state.needs_scheduling.load()) {
131 // Disable interrupts, and then check again if rescheduling is needed. 131 // Disable interrupts, and then check again if rescheduling is needed.
132 // KScopedInterruptDisable intr_disable; 132 // KScopedInterruptDisable intr_disable;
133 133
134 kernel.CurrentScheduler()->RescheduleCurrentCoreImpl(); 134 m_kernel.CurrentScheduler()->RescheduleCurrentCoreImpl();
135 } 135 }
136} 136}
137 137
138void KScheduler::RescheduleCurrentCoreImpl() { 138void KScheduler::RescheduleCurrentCoreImpl() {
139 // Check that scheduling is needed. 139 // Check that scheduling is needed.
140 if (m_state.needs_scheduling.load()) [[likely]] { 140 if (m_state.needs_scheduling.load()) [[likely]] {
141 GetCurrentThread(kernel).DisableDispatch(); 141 GetCurrentThread(m_kernel).DisableDispatch();
142 Schedule(); 142 Schedule();
143 GetCurrentThread(kernel).EnableDispatch(); 143 GetCurrentThread(m_kernel).EnableDispatch();
144 } 144 }
145} 145}
146 146
@@ -149,18 +149,18 @@ void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core
149 m_core_id = core_id; 149 m_core_id = core_id;
150 m_idle_thread = idle_thread; 150 m_idle_thread = idle_thread;
151 // m_state.idle_thread_stack = m_idle_thread->GetStackTop(); 151 // m_state.idle_thread_stack = m_idle_thread->GetStackTop();
152 // m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager(); 152 // m_state.interrupt_task_manager = std::addressof(kernel.GetInterruptTaskManager());
153 153
154 // Insert the main thread into the priority queue. 154 // Insert the main thread into the priority queue.
155 // { 155 // {
156 // KScopedSchedulerLock lk{kernel}; 156 // KScopedSchedulerLock lk{m_kernel};
157 // GetPriorityQueue(kernel).PushBack(GetCurrentThreadPointer(kernel)); 157 // GetPriorityQueue(m_kernel).PushBack(GetCurrentThreadPointer(m_kernel));
158 // SetSchedulerUpdateNeeded(kernel); 158 // SetSchedulerUpdateNeeded(m_kernel);
159 // } 159 // }
160 160
161 // Bind interrupt handler. 161 // Bind interrupt handler.
162 // kernel.GetInterruptManager().BindHandler( 162 // kernel.GetInterruptManager().BindHandler(
163 // GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id, 163 // GetSchedulerInterruptHandler(m_kernel), KInterruptName::Scheduler, m_core_id,
164 // KInterruptController::PriorityLevel::Scheduler, false, false); 164 // KInterruptController::PriorityLevel::Scheduler, false, false);
165 165
166 // Set the current thread. 166 // Set the current thread.
@@ -168,7 +168,7 @@ void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core
168} 168}
169 169
170void KScheduler::Activate() { 170void KScheduler::Activate() {
171 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); 171 ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
172 172
173 // m_state.should_count_idle = KTargetSystem::IsDebugMode(); 173 // m_state.should_count_idle = KTargetSystem::IsDebugMode();
174 m_is_active = true; 174 m_is_active = true;
@@ -176,7 +176,7 @@ void KScheduler::Activate() {
176} 176}
177 177
178void KScheduler::OnThreadStart() { 178void KScheduler::OnThreadStart() {
179 GetCurrentThread(kernel).EnableDispatch(); 179 GetCurrentThread(m_kernel).EnableDispatch();
180} 180}
181 181
182u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { 182u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
@@ -184,7 +184,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
184 prev_highest_thread != highest_thread) [[likely]] { 184 prev_highest_thread != highest_thread) [[likely]] {
185 if (prev_highest_thread != nullptr) [[likely]] { 185 if (prev_highest_thread != nullptr) [[likely]] {
186 IncrementScheduledCount(prev_highest_thread); 186 IncrementScheduledCount(prev_highest_thread);
187 prev_highest_thread->SetLastScheduledTick(kernel.System().CoreTiming().GetCPUTicks()); 187 prev_highest_thread->SetLastScheduledTick(m_kernel.System().CoreTiming().GetCPUTicks());
188 } 188 }
189 if (m_state.should_count_idle) { 189 if (m_state.should_count_idle) {
190 if (highest_thread != nullptr) [[likely]] { 190 if (highest_thread != nullptr) [[likely]] {
@@ -328,8 +328,8 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
328} 328}
329 329
330void KScheduler::SwitchThread(KThread* next_thread) { 330void KScheduler::SwitchThread(KThread* next_thread) {
331 KProcess* const cur_process = GetCurrentProcessPointer(kernel); 331 KProcess* const cur_process = GetCurrentProcessPointer(m_kernel);
332 KThread* const cur_thread = GetCurrentThreadPointer(kernel); 332 KThread* const cur_thread = GetCurrentThreadPointer(m_kernel);
333 333
334 // We never want to schedule a null thread, so use the idle thread if we don't have a next. 334 // We never want to schedule a null thread, so use the idle thread if we don't have a next.
335 if (next_thread == nullptr) { 335 if (next_thread == nullptr) {
@@ -351,7 +351,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
351 351
352 // Update the CPU time tracking variables. 352 // Update the CPU time tracking variables.
353 const s64 prev_tick = m_last_context_switch_time; 353 const s64 prev_tick = m_last_context_switch_time;
354 const s64 cur_tick = kernel.System().CoreTiming().GetCPUTicks(); 354 const s64 cur_tick = m_kernel.System().CoreTiming().GetCPUTicks();
355 const s64 tick_diff = cur_tick - prev_tick; 355 const s64 tick_diff = cur_tick - prev_tick;
356 cur_thread->AddCpuTime(m_core_id, tick_diff); 356 cur_thread->AddCpuTime(m_core_id, tick_diff);
357 if (cur_process != nullptr) { 357 if (cur_process != nullptr) {
@@ -375,7 +375,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
375 // } 375 // }
376 376
377 // Set the new thread. 377 // Set the new thread.
378 SetCurrentThread(kernel, next_thread); 378 SetCurrentThread(m_kernel, next_thread);
379 m_current_thread = next_thread; 379 m_current_thread = next_thread;
380 380
381 // Set the new Thread Local region. 381 // Set the new Thread Local region.
@@ -388,7 +388,7 @@ void KScheduler::ScheduleImpl() {
388 std::atomic_thread_fence(std::memory_order_seq_cst); 388 std::atomic_thread_fence(std::memory_order_seq_cst);
389 389
390 // Load the appropriate thread pointers for scheduling. 390 // Load the appropriate thread pointers for scheduling.
391 KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; 391 KThread* const cur_thread{GetCurrentThreadPointer(m_kernel)};
392 KThread* highest_priority_thread{m_state.highest_priority_thread}; 392 KThread* highest_priority_thread{m_state.highest_priority_thread};
393 393
394 // Check whether there are runnable interrupt tasks. 394 // Check whether there are runnable interrupt tasks.
@@ -411,7 +411,7 @@ void KScheduler::ScheduleImpl() {
411 m_switch_cur_thread = cur_thread; 411 m_switch_cur_thread = cur_thread;
412 m_switch_highest_priority_thread = highest_priority_thread; 412 m_switch_highest_priority_thread = highest_priority_thread;
413 m_switch_from_schedule = true; 413 m_switch_from_schedule = true;
414 Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber); 414 Common::Fiber::YieldTo(cur_thread->m_host_context, *m_switch_fiber);
415 415
416 // Returning from ScheduleImpl occurs after this thread has been scheduled again. 416 // Returning from ScheduleImpl occurs after this thread has been scheduled again.
417} 417}
@@ -450,7 +450,7 @@ void KScheduler::ScheduleImplFiber() {
450 450
451 // We want to try to lock the highest priority thread's context. 451 // We want to try to lock the highest priority thread's context.
452 // Try to take it. 452 // Try to take it.
453 while (!highest_priority_thread->context_guard.try_lock()) { 453 while (!highest_priority_thread->m_context_guard.try_lock()) {
454 // The highest priority thread's context is already locked. 454 // The highest priority thread's context is already locked.
455 // Check if we need scheduling. If we don't, we can retry directly. 455 // Check if we need scheduling. If we don't, we can retry directly.
456 if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { 456 if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
@@ -468,7 +468,7 @@ void KScheduler::ScheduleImplFiber() {
468 if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { 468 if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
469 // Our switch failed. 469 // Our switch failed.
470 // We should unlock the thread context, and then retry. 470 // We should unlock the thread context, and then retry.
471 highest_priority_thread->context_guard.unlock(); 471 highest_priority_thread->m_context_guard.unlock();
472 goto retry; 472 goto retry;
473 } else { 473 } else {
474 break; 474 break;
@@ -489,30 +489,30 @@ void KScheduler::ScheduleImplFiber() {
489 Reload(highest_priority_thread); 489 Reload(highest_priority_thread);
490 490
491 // Reload the host thread. 491 // Reload the host thread.
492 Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context); 492 Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->m_host_context);
493} 493}
494 494
495void KScheduler::Unload(KThread* thread) { 495void KScheduler::Unload(KThread* thread) {
496 auto& cpu_core = kernel.System().ArmInterface(m_core_id); 496 auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
497 cpu_core.SaveContext(thread->GetContext32()); 497 cpu_core.SaveContext(thread->GetContext32());
498 cpu_core.SaveContext(thread->GetContext64()); 498 cpu_core.SaveContext(thread->GetContext64());
499 // Save the TPIDR_EL0 system register in case it was modified. 499 // Save the TPIDR_EL0 system register in case it was modified.
500 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); 500 thread->SetTpidrEl0(cpu_core.GetTPIDR_EL0());
501 cpu_core.ClearExclusiveState(); 501 cpu_core.ClearExclusiveState();
502 502
503 // Check if the thread is terminated by checking the DPC flags. 503 // Check if the thread is terminated by checking the DPC flags.
504 if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { 504 if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) {
505 // The thread isn't terminated, so we want to unlock it. 505 // The thread isn't terminated, so we want to unlock it.
506 thread->context_guard.unlock(); 506 thread->m_context_guard.unlock();
507 } 507 }
508} 508}
509 509
510void KScheduler::Reload(KThread* thread) { 510void KScheduler::Reload(KThread* thread) {
511 auto& cpu_core = kernel.System().ArmInterface(m_core_id); 511 auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
512 cpu_core.LoadContext(thread->GetContext32()); 512 cpu_core.LoadContext(thread->GetContext32());
513 cpu_core.LoadContext(thread->GetContext64()); 513 cpu_core.LoadContext(thread->GetContext64());
514 cpu_core.SetTlsAddress(thread->GetTLSAddress()); 514 cpu_core.SetTlsAddress(thread->GetTlsAddress());
515 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); 515 cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0());
516 cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); 516 cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
517 cpu_core.ClearExclusiveState(); 517 cpu_core.ClearExclusiveState();
518} 518}
@@ -891,7 +891,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
891 891
892void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) { 892void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) {
893 if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) { 893 if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) {
894 RescheduleCores(kernel, core_mask); 894 RescheduleCores(m_kernel, core_mask);
895 } 895 }
896} 896}
897 897
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 534321d8d..d85a0c040 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -80,17 +80,17 @@ public:
80 return GetCurrentThread(kernel).GetDisableDispatchCount() == 0; 80 return GetCurrentThread(kernel).GetDisableDispatchCount() == 0;
81 } 81 }
82 static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) { 82 static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) {
83 return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread(); 83 return kernel.GlobalSchedulerContext().m_scheduler_lock.IsLockedByCurrentThread();
84 } 84 }
85 85
86 static bool IsSchedulerUpdateNeeded(KernelCore& kernel) { 86 static bool IsSchedulerUpdateNeeded(KernelCore& kernel) {
87 return kernel.GlobalSchedulerContext().scheduler_update_needed; 87 return kernel.GlobalSchedulerContext().m_scheduler_update_needed;
88 } 88 }
89 static void SetSchedulerUpdateNeeded(KernelCore& kernel) { 89 static void SetSchedulerUpdateNeeded(KernelCore& kernel) {
90 kernel.GlobalSchedulerContext().scheduler_update_needed = true; 90 kernel.GlobalSchedulerContext().m_scheduler_update_needed = true;
91 } 91 }
92 static void ClearSchedulerUpdateNeeded(KernelCore& kernel) { 92 static void ClearSchedulerUpdateNeeded(KernelCore& kernel) {
93 kernel.GlobalSchedulerContext().scheduler_update_needed = false; 93 kernel.GlobalSchedulerContext().m_scheduler_update_needed = false;
94 } 94 }
95 95
96 static void DisableScheduling(KernelCore& kernel); 96 static void DisableScheduling(KernelCore& kernel);
@@ -115,7 +115,7 @@ public:
115private: 115private:
116 // Static private API. 116 // Static private API.
117 static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel) { 117 static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel) {
118 return kernel.GlobalSchedulerContext().priority_queue; 118 return kernel.GlobalSchedulerContext().m_priority_queue;
119 } 119 }
120 static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); 120 static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
121 121
@@ -149,7 +149,7 @@ private:
149 KInterruptTaskManager* interrupt_task_manager{nullptr}; 149 KInterruptTaskManager* interrupt_task_manager{nullptr};
150 }; 150 };
151 151
152 KernelCore& kernel; 152 KernelCore& m_kernel;
153 SchedulingState m_state; 153 SchedulingState m_state;
154 bool m_is_active{false}; 154 bool m_is_active{false};
155 s32 m_core_id{0}; 155 s32 m_core_id{0};
@@ -166,7 +166,7 @@ private:
166class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> { 166class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> {
167public: 167public:
168 explicit KScopedSchedulerLock(KernelCore& kernel) 168 explicit KScopedSchedulerLock(KernelCore& kernel)
169 : KScopedLock(kernel.GlobalSchedulerContext().scheduler_lock) {} 169 : KScopedLock(kernel.GlobalSchedulerContext().m_scheduler_lock) {}
170 ~KScopedSchedulerLock() = default; 170 ~KScopedSchedulerLock() = default;
171}; 171};
172 172
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 13463717f..caa1404f1 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -14,74 +14,67 @@
14namespace Kernel { 14namespace Kernel {
15 15
16class KernelCore; 16class KernelCore;
17class GlobalSchedulerContext;
17 18
18template <typename SchedulerType> 19template <typename SchedulerType>
19class KAbstractSchedulerLock { 20class KAbstractSchedulerLock {
20public: 21public:
21 explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {} 22 explicit KAbstractSchedulerLock(KernelCore& kernel) : m_kernel{kernel} {}
22 23
23 bool IsLockedByCurrentThread() const { 24 bool IsLockedByCurrentThread() const {
24 return owner_thread == GetCurrentThreadPointer(kernel); 25 return m_owner_thread == GetCurrentThreadPointer(m_kernel);
25 } 26 }
26 27
27 void Lock() { 28 void Lock() {
28 // If we are shutting down the kernel, none of this is relevant anymore. 29 if (this->IsLockedByCurrentThread()) {
29 if (kernel.IsShuttingDown()) {
30 return;
31 }
32
33 if (IsLockedByCurrentThread()) {
34 // If we already own the lock, the lock count should be > 0. 30 // If we already own the lock, the lock count should be > 0.
35 // For debug, ensure this is true. 31 // For debug, ensure this is true.
36 ASSERT(lock_count > 0); 32 ASSERT(m_lock_count > 0);
37 } else { 33 } else {
38 // Otherwise, we want to disable scheduling and acquire the spinlock. 34 // Otherwise, we want to disable scheduling and acquire the spinlock.
39 SchedulerType::DisableScheduling(kernel); 35 SchedulerType::DisableScheduling(m_kernel);
40 spin_lock.Lock(); 36 m_spin_lock.Lock();
41 37
42 ASSERT(lock_count == 0); 38 ASSERT(m_lock_count == 0);
43 ASSERT(owner_thread == nullptr); 39 ASSERT(m_owner_thread == nullptr);
44 40
45 // Take ownership of the lock. 41 // Take ownership of the lock.
46 owner_thread = GetCurrentThreadPointer(kernel); 42 m_owner_thread = GetCurrentThreadPointer(m_kernel);
47 } 43 }
48 44
49 // Increment the lock count. 45 // Increment the lock count.
50 lock_count++; 46 m_lock_count++;
51 } 47 }
52 48
53 void Unlock() { 49 void Unlock() {
54 // If we are shutting down the kernel, none of this is relevant anymore. 50 ASSERT(this->IsLockedByCurrentThread());
55 if (kernel.IsShuttingDown()) { 51 ASSERT(m_lock_count > 0);
56 return;
57 }
58
59 ASSERT(IsLockedByCurrentThread());
60 ASSERT(lock_count > 0);
61 52
62 // Release an instance of the lock. 53 // Release an instance of the lock.
63 if ((--lock_count) == 0) { 54 if ((--m_lock_count) == 0) {
64 // Perform a memory barrier here. 55 // Perform a memory barrier here.
65 std::atomic_thread_fence(std::memory_order_seq_cst); 56 std::atomic_thread_fence(std::memory_order_seq_cst);
66 57
67 // We're no longer going to hold the lock. Take note of what cores need scheduling. 58 // We're no longer going to hold the lock. Take note of what cores need scheduling.
68 const u64 cores_needing_scheduling = 59 const u64 cores_needing_scheduling =
69 SchedulerType::UpdateHighestPriorityThreads(kernel); 60 SchedulerType::UpdateHighestPriorityThreads(m_kernel);
70 61
71 // Note that we no longer hold the lock, and unlock the spinlock. 62 // Note that we no longer hold the lock, and unlock the spinlock.
72 owner_thread = nullptr; 63 m_owner_thread = nullptr;
73 spin_lock.Unlock(); 64 m_spin_lock.Unlock();
74 65
75 // Enable scheduling, and perform a rescheduling operation. 66 // Enable scheduling, and perform a rescheduling operation.
76 SchedulerType::EnableScheduling(kernel, cores_needing_scheduling); 67 SchedulerType::EnableScheduling(m_kernel, cores_needing_scheduling);
77 } 68 }
78 } 69 }
79 70
80private: 71private:
81 KernelCore& kernel; 72 friend class GlobalSchedulerContext;
82 KAlignedSpinLock spin_lock{}; 73
83 s32 lock_count{}; 74 KernelCore& m_kernel;
84 std::atomic<KThread*> owner_thread{}; 75 KAlignedSpinLock m_spin_lock{};
76 s32 m_lock_count{};
77 std::atomic<KThread*> m_owner_thread{};
85}; 78};
86 79
87} // namespace Kernel 80} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h
index a15640fd2..629a7d20d 100644
--- a/src/core/hle/kernel/k_scoped_lock.h
+++ b/src/core/hle/kernel/k_scoped_lock.h
@@ -18,15 +18,15 @@ std::is_reference_v<T>&& requires(T& t) {
18 18
19template <typename T> 19template <typename T>
20 requires KLockable<T> 20 requires KLockable<T>
21class [[nodiscard]] KScopedLock { 21class KScopedLock {
22public: 22public:
23 explicit KScopedLock(T* l) : lock_ptr(l) { 23 explicit KScopedLock(T* l) : m_lock(*l) {}
24 this->lock_ptr->Lock(); 24 explicit KScopedLock(T& l) : m_lock(l) {
25 m_lock.Lock();
25 } 26 }
26 explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) {}
27 27
28 ~KScopedLock() { 28 ~KScopedLock() {
29 this->lock_ptr->Unlock(); 29 m_lock.Unlock();
30 } 30 }
31 31
32 KScopedLock(const KScopedLock&) = delete; 32 KScopedLock(const KScopedLock&) = delete;
@@ -36,7 +36,7 @@ public:
36 KScopedLock& operator=(KScopedLock&&) = delete; 36 KScopedLock& operator=(KScopedLock&&) = delete;
37 37
38private: 38private:
39 T* lock_ptr; 39 T& m_lock;
40}; 40};
41 41
42} // namespace Kernel 42} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_resource_reservation.h b/src/core/hle/kernel/k_scoped_resource_reservation.h
index 436bcf9fe..2cc464612 100644
--- a/src/core/hle/kernel/k_scoped_resource_reservation.h
+++ b/src/core/hle/kernel/k_scoped_resource_reservation.h
@@ -12,20 +12,20 @@ namespace Kernel {
12class KScopedResourceReservation { 12class KScopedResourceReservation {
13public: 13public:
14 explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v, s64 timeout) 14 explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v, s64 timeout)
15 : resource_limit(std::move(l)), value(v), resource(r) { 15 : m_limit(l), m_value(v), m_resource(r) {
16 if (resource_limit && value) { 16 if (m_limit && m_value) {
17 success = resource_limit->Reserve(resource, value, timeout); 17 m_succeeded = m_limit->Reserve(m_resource, m_value, timeout);
18 } else { 18 } else {
19 success = true; 19 m_succeeded = true;
20 } 20 }
21 } 21 }
22 22
23 explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v = 1) 23 explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v = 1)
24 : resource_limit(std::move(l)), value(v), resource(r) { 24 : m_limit(l), m_value(v), m_resource(r) {
25 if (resource_limit && value) { 25 if (m_limit && m_value) {
26 success = resource_limit->Reserve(resource, value); 26 m_succeeded = m_limit->Reserve(m_resource, m_value);
27 } else { 27 } else {
28 success = true; 28 m_succeeded = true;
29 } 29 }
30 } 30 }
31 31
@@ -36,26 +36,26 @@ public:
36 : KScopedResourceReservation(p->GetResourceLimit(), r, v) {} 36 : KScopedResourceReservation(p->GetResourceLimit(), r, v) {}
37 37
38 ~KScopedResourceReservation() noexcept { 38 ~KScopedResourceReservation() noexcept {
39 if (resource_limit && value && success) { 39 if (m_limit && m_value && m_succeeded) {
40 // resource was not committed, release the reservation. 40 // Resource was not committed, release the reservation.
41 resource_limit->Release(resource, value); 41 m_limit->Release(m_resource, m_value);
42 } 42 }
43 } 43 }
44 44
45 /// Commit the resource reservation, destruction of this object does not release the resource 45 /// Commit the resource reservation, destruction of this object does not release the resource
46 void Commit() { 46 void Commit() {
47 resource_limit = nullptr; 47 m_limit = nullptr;
48 } 48 }
49 49
50 [[nodiscard]] bool Succeeded() const { 50 bool Succeeded() const {
51 return success; 51 return m_succeeded;
52 } 52 }
53 53
54private: 54private:
55 KResourceLimit* resource_limit{}; 55 KResourceLimit* m_limit{};
56 s64 value; 56 s64 m_value{};
57 LimitableResource resource; 57 LimitableResource m_resource{};
58 bool success; 58 bool m_succeeded{};
59}; 59};
60 60
61} // namespace Kernel 61} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
index 76db65a4d..c485022f5 100644
--- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
+++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
@@ -11,32 +11,39 @@
11 11
12namespace Kernel { 12namespace Kernel {
13 13
14class [[nodiscard]] KScopedSchedulerLockAndSleep { 14class KScopedSchedulerLockAndSleep {
15public: 15public:
16 explicit KScopedSchedulerLockAndSleep(KernelCore& kernel_, KThread* t, s64 timeout) 16 explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, KHardwareTimer** out_timer,
17 : kernel(kernel_), thread(t), timeout_tick(timeout) { 17 KThread* thread, s64 timeout_tick)
18 : m_kernel(kernel), m_timeout_tick(timeout_tick), m_thread(thread), m_timer() {
18 // Lock the scheduler. 19 // Lock the scheduler.
19 kernel.GlobalSchedulerContext().scheduler_lock.Lock(); 20 kernel.GlobalSchedulerContext().m_scheduler_lock.Lock();
21
22 // Set our timer only if the time is positive.
23 m_timer = (timeout_tick > 0) ? std::addressof(kernel.HardwareTimer()) : nullptr;
24
25 *out_timer = m_timer;
20 } 26 }
21 27
22 ~KScopedSchedulerLockAndSleep() { 28 ~KScopedSchedulerLockAndSleep() {
23 // Register the sleep. 29 // Register the sleep.
24 if (timeout_tick > 0) { 30 if (m_timeout_tick > 0) {
25 kernel.HardwareTimer().RegisterTask(thread, timeout_tick); 31 m_timer->RegisterTask(m_thread, m_timeout_tick);
26 } 32 }
27 33
28 // Unlock the scheduler. 34 // Unlock the scheduler.
29 kernel.GlobalSchedulerContext().scheduler_lock.Unlock(); 35 m_kernel.GlobalSchedulerContext().m_scheduler_lock.Unlock();
30 } 36 }
31 37
32 void CancelSleep() { 38 void CancelSleep() {
33 timeout_tick = 0; 39 m_timeout_tick = 0;
34 } 40 }
35 41
36private: 42private:
37 KernelCore& kernel; 43 KernelCore& m_kernel;
38 KThread* thread{}; 44 s64 m_timeout_tick{};
39 s64 timeout_tick{}; 45 KThread* m_thread{};
46 KHardwareTimer* m_timer{};
40}; 47};
41 48
42} // namespace Kernel 49} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_port.cpp b/src/core/hle/kernel/k_server_port.cpp
index 16968ba97..a29d34bc1 100644
--- a/src/core/hle/kernel/k_server_port.cpp
+++ b/src/core/hle/kernel/k_server_port.cpp
@@ -12,13 +12,12 @@
12 12
13namespace Kernel { 13namespace Kernel {
14 14
15KServerPort::KServerPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} 15KServerPort::KServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
16KServerPort::~KServerPort() = default; 16KServerPort::~KServerPort() = default;
17 17
18void KServerPort::Initialize(KPort* parent_port_, std::string&& name_) { 18void KServerPort::Initialize(KPort* parent) {
19 // Set member variables. 19 // Set member variables.
20 parent = parent_port_; 20 m_parent = parent;
21 name = std::move(name_);
22} 21}
23 22
24bool KServerPort::IsLight() const { 23bool KServerPort::IsLight() const {
@@ -36,10 +35,10 @@ void KServerPort::CleanupSessions() {
36 // Get the last session in the list 35 // Get the last session in the list
37 KServerSession* session = nullptr; 36 KServerSession* session = nullptr;
38 { 37 {
39 KScopedSchedulerLock sl{kernel}; 38 KScopedSchedulerLock sl{m_kernel};
40 if (!session_list.empty()) { 39 if (!m_session_list.empty()) {
41 session = std::addressof(session_list.front()); 40 session = std::addressof(m_session_list.front());
42 session_list.pop_front(); 41 m_session_list.pop_front();
43 } 42 }
44 } 43 }
45 44
@@ -54,13 +53,13 @@ void KServerPort::CleanupSessions() {
54 53
55void KServerPort::Destroy() { 54void KServerPort::Destroy() {
56 // Note with our parent that we're closed. 55 // Note with our parent that we're closed.
57 parent->OnServerClosed(); 56 m_parent->OnServerClosed();
58 57
59 // Perform necessary cleanup of our session lists. 58 // Perform necessary cleanup of our session lists.
60 this->CleanupSessions(); 59 this->CleanupSessions();
61 60
62 // Close our reference to our parent. 61 // Close our reference to our parent.
63 parent->Close(); 62 m_parent->Close();
64} 63}
65 64
66bool KServerPort::IsSignaled() const { 65bool KServerPort::IsSignaled() const {
@@ -68,18 +67,18 @@ bool KServerPort::IsSignaled() const {
68 UNIMPLEMENTED(); 67 UNIMPLEMENTED();
69 return false; 68 return false;
70 } else { 69 } else {
71 return !session_list.empty(); 70 return !m_session_list.empty();
72 } 71 }
73} 72}
74 73
75void KServerPort::EnqueueSession(KServerSession* session) { 74void KServerPort::EnqueueSession(KServerSession* session) {
76 ASSERT(!this->IsLight()); 75 ASSERT(!this->IsLight());
77 76
78 KScopedSchedulerLock sl{kernel}; 77 KScopedSchedulerLock sl{m_kernel};
79 78
80 // Add the session to our queue. 79 // Add the session to our queue.
81 session_list.push_back(*session); 80 m_session_list.push_back(*session);
82 if (session_list.size() == 1) { 81 if (m_session_list.size() == 1) {
83 this->NotifyAvailable(); 82 this->NotifyAvailable();
84 } 83 }
85} 84}
@@ -87,15 +86,15 @@ void KServerPort::EnqueueSession(KServerSession* session) {
87KServerSession* KServerPort::AcceptSession() { 86KServerSession* KServerPort::AcceptSession() {
88 ASSERT(!this->IsLight()); 87 ASSERT(!this->IsLight());
89 88
90 KScopedSchedulerLock sl{kernel}; 89 KScopedSchedulerLock sl{m_kernel};
91 90
92 // Return the first session in the list. 91 // Return the first session in the list.
93 if (session_list.empty()) { 92 if (m_session_list.empty()) {
94 return nullptr; 93 return nullptr;
95 } 94 }
96 95
97 KServerSession* session = std::addressof(session_list.front()); 96 KServerSession* session = std::addressof(m_session_list.front());
98 session_list.pop_front(); 97 m_session_list.pop_front();
99 return session; 98 return session;
100} 99}
101 100
diff --git a/src/core/hle/kernel/k_server_port.h b/src/core/hle/kernel/k_server_port.h
index 5fc7ee683..21c040e62 100644
--- a/src/core/hle/kernel/k_server_port.h
+++ b/src/core/hle/kernel/k_server_port.h
@@ -22,17 +22,17 @@ class KServerPort final : public KSynchronizationObject {
22 KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject); 22 KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject);
23 23
24public: 24public:
25 explicit KServerPort(KernelCore& kernel_); 25 explicit KServerPort(KernelCore& kernel);
26 ~KServerPort() override; 26 ~KServerPort() override;
27 27
28 void Initialize(KPort* parent_port_, std::string&& name_); 28 void Initialize(KPort* parent);
29 29
30 void EnqueueSession(KServerSession* pending_session); 30 void EnqueueSession(KServerSession* session);
31 31
32 KServerSession* AcceptSession(); 32 KServerSession* AcceptSession();
33 33
34 const KPort* GetParent() const { 34 const KPort* GetParent() const {
35 return parent; 35 return m_parent;
36 } 36 }
37 37
38 bool IsLight() const; 38 bool IsLight() const;
@@ -46,8 +46,8 @@ private:
46 46
47 void CleanupSessions(); 47 void CleanupSessions();
48 48
49 SessionList session_list; 49 SessionList m_session_list{};
50 KPort* parent{}; 50 KPort* m_parent{};
51}; 51};
52 52
53} // namespace Kernel 53} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index 01591af5b..2288ee435 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -28,23 +28,17 @@ namespace Kernel {
28 28
29using ThreadQueueImplForKServerSessionRequest = KThreadQueue; 29using ThreadQueueImplForKServerSessionRequest = KThreadQueue;
30 30
31KServerSession::KServerSession(KernelCore& kernel_) 31KServerSession::KServerSession(KernelCore& kernel)
32 : KSynchronizationObject{kernel_}, m_lock{kernel_} {} 32 : KSynchronizationObject{kernel}, m_lock{m_kernel} {}
33 33
34KServerSession::~KServerSession() = default; 34KServerSession::~KServerSession() = default;
35 35
36void KServerSession::Initialize(KSession* parent_session_, std::string&& name_) {
37 // Set member variables.
38 parent = parent_session_;
39 name = std::move(name_);
40}
41
42void KServerSession::Destroy() { 36void KServerSession::Destroy() {
43 parent->OnServerClosed(); 37 m_parent->OnServerClosed();
44 38
45 this->CleanupRequests(); 39 this->CleanupRequests();
46 40
47 parent->Close(); 41 m_parent->Close();
48} 42}
49 43
50void KServerSession::OnClientClosed() { 44void KServerSession::OnClientClosed() {
@@ -62,7 +56,7 @@ void KServerSession::OnClientClosed() {
62 56
63 // Get the next request. 57 // Get the next request.
64 { 58 {
65 KScopedSchedulerLock sl{kernel}; 59 KScopedSchedulerLock sl{m_kernel};
66 60
67 if (m_current_request != nullptr && m_current_request != prev_request) { 61 if (m_current_request != nullptr && m_current_request != prev_request) {
68 // Set the request, open a reference as we process it. 62 // Set the request, open a reference as we process it.
@@ -121,7 +115,7 @@ void KServerSession::OnClientClosed() {
121 115
122 // // Get the process and page table. 116 // // Get the process and page table.
123 // KProcess *client_process = thread->GetOwnerProcess(); 117 // KProcess *client_process = thread->GetOwnerProcess();
124 // auto &client_pt = client_process->GetPageTable(); 118 // auto& client_pt = client_process->GetPageTable();
125 119
126 // // Reply to the request. 120 // // Reply to the request.
127 // ReplyAsyncError(client_process, request->GetAddress(), request->GetSize(), 121 // ReplyAsyncError(client_process, request->GetAddress(), request->GetSize(),
@@ -141,10 +135,10 @@ void KServerSession::OnClientClosed() {
141} 135}
142 136
143bool KServerSession::IsSignaled() const { 137bool KServerSession::IsSignaled() const {
144 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 138 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
145 139
146 // If the client is closed, we're always signaled. 140 // If the client is closed, we're always signaled.
147 if (parent->IsClientClosed()) { 141 if (m_parent->IsClientClosed()) {
148 return true; 142 return true;
149 } 143 }
150 144
@@ -154,17 +148,17 @@ bool KServerSession::IsSignaled() const {
154 148
155Result KServerSession::OnRequest(KSessionRequest* request) { 149Result KServerSession::OnRequest(KSessionRequest* request) {
156 // Create the wait queue. 150 // Create the wait queue.
157 ThreadQueueImplForKServerSessionRequest wait_queue{kernel}; 151 ThreadQueueImplForKServerSessionRequest wait_queue{m_kernel};
158 152
159 { 153 {
160 // Lock the scheduler. 154 // Lock the scheduler.
161 KScopedSchedulerLock sl{kernel}; 155 KScopedSchedulerLock sl{m_kernel};
162 156
163 // Ensure that we can handle new requests. 157 // Ensure that we can handle new requests.
164 R_UNLESS(!parent->IsServerClosed(), ResultSessionClosed); 158 R_UNLESS(!m_parent->IsServerClosed(), ResultSessionClosed);
165 159
166 // Check that we're not terminating. 160 // Check that we're not terminating.
167 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); 161 R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested);
168 162
169 // Get whether we're empty. 163 // Get whether we're empty.
170 const bool was_empty = m_request_list.empty(); 164 const bool was_empty = m_request_list.empty();
@@ -182,11 +176,11 @@ Result KServerSession::OnRequest(KSessionRequest* request) {
182 R_SUCCEED_IF(request->GetEvent() != nullptr); 176 R_SUCCEED_IF(request->GetEvent() != nullptr);
183 177
184 // This is a synchronous request, so we should wait for our request to complete. 178 // This is a synchronous request, so we should wait for our request to complete.
185 GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); 179 GetCurrentThread(m_kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
186 GetCurrentThread(kernel).BeginWait(&wait_queue); 180 GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue));
187 } 181 }
188 182
189 return GetCurrentThread(kernel).GetWaitResult(); 183 return GetCurrentThread(m_kernel).GetWaitResult();
190} 184}
191 185
192Result KServerSession::SendReply(bool is_hle) { 186Result KServerSession::SendReply(bool is_hle) {
@@ -196,7 +190,7 @@ Result KServerSession::SendReply(bool is_hle) {
196 // Get the request. 190 // Get the request.
197 KSessionRequest* request; 191 KSessionRequest* request;
198 { 192 {
199 KScopedSchedulerLock sl{kernel}; 193 KScopedSchedulerLock sl{m_kernel};
200 194
201 // Get the current request. 195 // Get the current request.
202 request = m_current_request; 196 request = m_current_request;
@@ -219,7 +213,7 @@ Result KServerSession::SendReply(bool is_hle) {
219 KEvent* event = request->GetEvent(); 213 KEvent* event = request->GetEvent();
220 214
221 // Check whether we're closed. 215 // Check whether we're closed.
222 const bool closed = (client_thread == nullptr || parent->IsClientClosed()); 216 const bool closed = (client_thread == nullptr || m_parent->IsClientClosed());
223 217
224 Result result = ResultSuccess; 218 Result result = ResultSuccess;
225 if (!closed) { 219 if (!closed) {
@@ -228,11 +222,11 @@ Result KServerSession::SendReply(bool is_hle) {
228 // HLE servers write directly to a pointer to the thread command buffer. Therefore 222 // HLE servers write directly to a pointer to the thread command buffer. Therefore
229 // the reply has already been written in this case. 223 // the reply has already been written in this case.
230 } else { 224 } else {
231 Core::Memory::Memory& memory{kernel.System().Memory()}; 225 Core::Memory::Memory& memory{m_kernel.System().Memory()};
232 KThread* server_thread{GetCurrentThreadPointer(kernel)}; 226 KThread* server_thread{GetCurrentThreadPointer(m_kernel)};
233 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); 227 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
234 228
235 auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); 229 auto* src_msg_buffer = memory.GetPointer(server_thread->GetTlsAddress());
236 auto* dst_msg_buffer = memory.GetPointer(client_message); 230 auto* dst_msg_buffer = memory.GetPointer(client_message);
237 std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); 231 std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
238 } 232 }
@@ -254,7 +248,7 @@ Result KServerSession::SendReply(bool is_hle) {
254 if (event != nullptr) { 248 if (event != nullptr) {
255 // // Get the client process/page table. 249 // // Get the client process/page table.
256 // KProcess *client_process = client_thread->GetOwnerProcess(); 250 // KProcess *client_process = client_thread->GetOwnerProcess();
257 // KPageTable *client_page_table = &client_process->PageTable(); 251 // KPageTable *client_page_table = std::addressof(client_process->PageTable());
258 252
259 // // If we need to, reply with an async error. 253 // // If we need to, reply with an async error.
260 // if (R_FAILED(client_result)) { 254 // if (R_FAILED(client_result)) {
@@ -270,7 +264,7 @@ Result KServerSession::SendReply(bool is_hle) {
270 event->Signal(); 264 event->Signal();
271 } else { 265 } else {
272 // End the client thread's wait. 266 // End the client thread's wait.
273 KScopedSchedulerLock sl{kernel}; 267 KScopedSchedulerLock sl{m_kernel};
274 268
275 if (!client_thread->IsTerminationRequested()) { 269 if (!client_thread->IsTerminationRequested()) {
276 client_thread->EndWait(client_result); 270 client_thread->EndWait(client_result);
@@ -278,7 +272,7 @@ Result KServerSession::SendReply(bool is_hle) {
278 } 272 }
279 } 273 }
280 274
281 return result; 275 R_RETURN(result);
282} 276}
283 277
284Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext>* out_context, 278Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext>* out_context,
@@ -291,10 +285,10 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext
291 KThread* client_thread; 285 KThread* client_thread;
292 286
293 { 287 {
294 KScopedSchedulerLock sl{kernel}; 288 KScopedSchedulerLock sl{m_kernel};
295 289
296 // Ensure that we can service the request. 290 // Ensure that we can service the request.
297 R_UNLESS(!parent->IsClientClosed(), ResultSessionClosed); 291 R_UNLESS(!m_parent->IsClientClosed(), ResultSessionClosed);
298 292
299 // Ensure we aren't already servicing a request. 293 // Ensure we aren't already servicing a request.
300 R_UNLESS(m_current_request == nullptr, ResultNotFound); 294 R_UNLESS(m_current_request == nullptr, ResultNotFound);
@@ -303,7 +297,7 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext
303 R_UNLESS(!m_request_list.empty(), ResultNotFound); 297 R_UNLESS(!m_request_list.empty(), ResultNotFound);
304 298
305 // Pop the first request from the list. 299 // Pop the first request from the list.
306 request = &m_request_list.front(); 300 request = std::addressof(m_request_list.front());
307 m_request_list.pop_front(); 301 m_request_list.pop_front();
308 302
309 // Get the thread for the request. 303 // Get the thread for the request.
@@ -325,27 +319,27 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext
325 // bool recv_list_broken = false; 319 // bool recv_list_broken = false;
326 320
327 // Receive the message. 321 // Receive the message.
328 Core::Memory::Memory& memory{kernel.System().Memory()}; 322 Core::Memory::Memory& memory{m_kernel.System().Memory()};
329 if (out_context != nullptr) { 323 if (out_context != nullptr) {
330 // HLE request. 324 // HLE request.
331 u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))}; 325 u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))};
332 *out_context = 326 *out_context =
333 std::make_shared<Service::HLERequestContext>(kernel, memory, this, client_thread); 327 std::make_shared<Service::HLERequestContext>(m_kernel, memory, this, client_thread);
334 (*out_context)->SetSessionRequestManager(manager); 328 (*out_context)->SetSessionRequestManager(manager);
335 (*out_context) 329 (*out_context)
336 ->PopulateFromIncomingCommandBuffer(client_thread->GetOwnerProcess()->GetHandleTable(), 330 ->PopulateFromIncomingCommandBuffer(client_thread->GetOwnerProcess()->GetHandleTable(),
337 cmd_buf); 331 cmd_buf);
338 } else { 332 } else {
339 KThread* server_thread{GetCurrentThreadPointer(kernel)}; 333 KThread* server_thread{GetCurrentThreadPointer(m_kernel)};
340 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); 334 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
341 335
342 auto* src_msg_buffer = memory.GetPointer(client_message); 336 auto* src_msg_buffer = memory.GetPointer(client_message);
343 auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); 337 auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTlsAddress());
344 std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); 338 std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
345 } 339 }
346 340
347 // We succeeded. 341 // We succeeded.
348 return ResultSuccess; 342 R_SUCCEED();
349} 343}
350 344
351void KServerSession::CleanupRequests() { 345void KServerSession::CleanupRequests() {
@@ -356,7 +350,7 @@ void KServerSession::CleanupRequests() {
356 // Get the next request. 350 // Get the next request.
357 KSessionRequest* request = nullptr; 351 KSessionRequest* request = nullptr;
358 { 352 {
359 KScopedSchedulerLock sl{kernel}; 353 KScopedSchedulerLock sl{m_kernel};
360 354
361 if (m_current_request) { 355 if (m_current_request) {
362 // Choose the current request if we have one. 356 // Choose the current request if we have one.
@@ -364,7 +358,7 @@ void KServerSession::CleanupRequests() {
364 m_current_request = nullptr; 358 m_current_request = nullptr;
365 } else if (!m_request_list.empty()) { 359 } else if (!m_request_list.empty()) {
366 // Pop the request from the front of the list. 360 // Pop the request from the front of the list.
367 request = &m_request_list.front(); 361 request = std::addressof(m_request_list.front());
368 m_request_list.pop_front(); 362 m_request_list.pop_front();
369 } 363 }
370 } 364 }
@@ -387,7 +381,8 @@ void KServerSession::CleanupRequests() {
387 // KProcess *client_process = (client_thread != nullptr) ? 381 // KProcess *client_process = (client_thread != nullptr) ?
388 // client_thread->GetOwnerProcess() : nullptr; 382 // client_thread->GetOwnerProcess() : nullptr;
389 // KProcessPageTable *client_page_table = (client_process != nullptr) ? 383 // KProcessPageTable *client_page_table = (client_process != nullptr) ?
390 // &client_process->GetPageTable() : nullptr; 384 // std::addressof(client_process->GetPageTable())
385 // : nullptr;
391 386
392 // Cleanup the mappings. 387 // Cleanup the mappings.
393 // Result result = CleanupMap(request, server_process, client_page_table); 388 // Result result = CleanupMap(request, server_process, client_page_table);
@@ -407,7 +402,7 @@ void KServerSession::CleanupRequests() {
407 event->Signal(); 402 event->Signal();
408 } else { 403 } else {
409 // End the client thread's wait. 404 // End the client thread's wait.
410 KScopedSchedulerLock sl{kernel}; 405 KScopedSchedulerLock sl{m_kernel};
411 406
412 if (!client_thread->IsTerminationRequested()) { 407 if (!client_thread->IsTerminationRequested()) {
413 client_thread->EndWait(ResultSessionClosed); 408 client_thread->EndWait(ResultSessionClosed);
diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h
index 33f380352..5ee02f556 100644
--- a/src/core/hle/kernel/k_server_session.h
+++ b/src/core/hle/kernel/k_server_session.h
@@ -33,19 +33,17 @@ class KServerSession final : public KSynchronizationObject,
33 friend class ServiceThread; 33 friend class ServiceThread;
34 34
35public: 35public:
36 explicit KServerSession(KernelCore& kernel_); 36 explicit KServerSession(KernelCore& kernel);
37 ~KServerSession() override; 37 ~KServerSession() override;
38 38
39 void Destroy() override; 39 void Destroy() override;
40 40
41 void Initialize(KSession* parent_session_, std::string&& name_); 41 void Initialize(KSession* p) {
42 42 m_parent = p;
43 KSession* GetParent() {
44 return parent;
45 } 43 }
46 44
47 const KSession* GetParent() const { 45 const KSession* GetParent() const {
48 return parent; 46 return m_parent;
49 } 47 }
50 48
51 bool IsSignaled() const override; 49 bool IsSignaled() const override;
@@ -66,10 +64,10 @@ private:
66 void CleanupRequests(); 64 void CleanupRequests();
67 65
68 /// KSession that owns this KServerSession 66 /// KSession that owns this KServerSession
69 KSession* parent{}; 67 KSession* m_parent{};
70 68
71 /// List of threads which are pending a reply. 69 /// List of threads which are pending a reply.
72 boost::intrusive::list<KSessionRequest> m_request_list; 70 boost::intrusive::list<KSessionRequest> m_request_list{};
73 KSessionRequest* m_current_request{}; 71 KSessionRequest* m_current_request{};
74 72
75 KLightLock m_lock; 73 KLightLock m_lock;
diff --git a/src/core/hle/kernel/k_session.cpp b/src/core/hle/kernel/k_session.cpp
index 7e677c028..44d7a8f02 100644
--- a/src/core/hle/kernel/k_session.cpp
+++ b/src/core/hle/kernel/k_session.cpp
@@ -9,69 +9,63 @@
9 9
10namespace Kernel { 10namespace Kernel {
11 11
12KSession::KSession(KernelCore& kernel_) 12KSession::KSession(KernelCore& kernel)
13 : KAutoObjectWithSlabHeapAndContainer{kernel_}, server{kernel_}, client{kernel_} {} 13 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
14KSession::~KSession() = default; 14KSession::~KSession() = default;
15 15
16void KSession::Initialize(KClientPort* port_, const std::string& name_) { 16void KSession::Initialize(KClientPort* client_port, uintptr_t name) {
17 // Increment reference count. 17 // Increment reference count.
18 // Because reference count is one on creation, this will result 18 // Because reference count is one on creation, this will result
19 // in a reference count of two. Thus, when both server and client are closed 19 // in a reference count of two. Thus, when both server and client are closed
20 // this object will be destroyed. 20 // this object will be destroyed.
21 Open(); 21 this->Open();
22 22
23 // Create our sub sessions. 23 // Create our sub sessions.
24 KAutoObject::Create(std::addressof(server)); 24 KAutoObject::Create(std::addressof(m_server));
25 KAutoObject::Create(std::addressof(client)); 25 KAutoObject::Create(std::addressof(m_client));
26 26
27 // Initialize our sub sessions. 27 // Initialize our sub sessions.
28 server.Initialize(this, name_ + ":Server"); 28 m_server.Initialize(this);
29 client.Initialize(this, name_ + ":Client"); 29 m_client.Initialize(this);
30 30
31 // Set state and name. 31 // Set state and name.
32 SetState(State::Normal); 32 this->SetState(State::Normal);
33 name = name_; 33 m_name = name;
34 34
35 // Set our owner process. 35 // Set our owner process.
36 //! FIXME: this is the wrong process! 36 //! FIXME: this is the wrong process!
37 process = kernel.ApplicationProcess(); 37 m_process = m_kernel.ApplicationProcess();
38 process->Open(); 38 m_process->Open();
39 39
40 // Set our port. 40 // Set our port.
41 port = port_; 41 m_port = client_port;
42 if (port != nullptr) { 42 if (m_port != nullptr) {
43 port->Open(); 43 m_port->Open();
44 } 44 }
45 45
46 // Mark initialized. 46 // Mark initialized.
47 initialized = true; 47 m_initialized = true;
48} 48}
49 49
50void KSession::Finalize() { 50void KSession::Finalize() {
51 if (port == nullptr) { 51 if (m_port != nullptr) {
52 return; 52 m_port->OnSessionFinalized();
53 m_port->Close();
53 } 54 }
54
55 port->OnSessionFinalized();
56 port->Close();
57} 55}
58 56
59void KSession::OnServerClosed() { 57void KSession::OnServerClosed() {
60 if (GetState() != State::Normal) { 58 if (this->GetState() == State::Normal) {
61 return; 59 this->SetState(State::ServerClosed);
60 m_client.OnServerClosed();
62 } 61 }
63
64 SetState(State::ServerClosed);
65 client.OnServerClosed();
66} 62}
67 63
68void KSession::OnClientClosed() { 64void KSession::OnClientClosed() {
69 if (GetState() != State::Normal) { 65 if (this->GetState() == State::Normal) {
70 return; 66 SetState(State::ClientClosed);
67 m_server.OnClientClosed();
71 } 68 }
72
73 SetState(State::ClientClosed);
74 server.OnClientClosed();
75} 69}
76 70
77void KSession::PostDestroy(uintptr_t arg) { 71void KSession::PostDestroy(uintptr_t arg) {
diff --git a/src/core/hle/kernel/k_session.h b/src/core/hle/kernel/k_session.h
index 93e5e6f71..f69bab088 100644
--- a/src/core/hle/kernel/k_session.h
+++ b/src/core/hle/kernel/k_session.h
@@ -18,19 +18,18 @@ class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession, KAut
18 KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject); 18 KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject);
19 19
20public: 20public:
21 explicit KSession(KernelCore& kernel_); 21 explicit KSession(KernelCore& kernel);
22 ~KSession() override; 22 ~KSession() override;
23 23
24 void Initialize(KClientPort* port_, const std::string& name_); 24 void Initialize(KClientPort* port, uintptr_t name);
25
26 void Finalize() override; 25 void Finalize() override;
27 26
28 bool IsInitialized() const override { 27 bool IsInitialized() const override {
29 return initialized; 28 return m_initialized;
30 } 29 }
31 30
32 uintptr_t GetPostDestroyArgument() const override { 31 uintptr_t GetPostDestroyArgument() const override {
33 return reinterpret_cast<uintptr_t>(process); 32 return reinterpret_cast<uintptr_t>(m_process);
34 } 33 }
35 34
36 static void PostDestroy(uintptr_t arg); 35 static void PostDestroy(uintptr_t arg);
@@ -48,27 +47,23 @@ public:
48 } 47 }
49 48
50 KClientSession& GetClientSession() { 49 KClientSession& GetClientSession() {
51 return client; 50 return m_client;
52 } 51 }
53 52
54 KServerSession& GetServerSession() { 53 KServerSession& GetServerSession() {
55 return server; 54 return m_server;
56 } 55 }
57 56
58 const KClientSession& GetClientSession() const { 57 const KClientSession& GetClientSession() const {
59 return client; 58 return m_client;
60 } 59 }
61 60
62 const KServerSession& GetServerSession() const { 61 const KServerSession& GetServerSession() const {
63 return server; 62 return m_server;
64 } 63 }
65 64
66 const KClientPort* GetParent() const { 65 const KClientPort* GetParent() const {
67 return port; 66 return m_port;
68 }
69
70 KClientPort* GetParent() {
71 return port;
72 } 67 }
73 68
74private: 69private:
@@ -80,20 +75,20 @@ private:
80 }; 75 };
81 76
82 void SetState(State state) { 77 void SetState(State state) {
83 atomic_state = static_cast<u8>(state); 78 m_atomic_state = static_cast<u8>(state);
84 } 79 }
85 80
86 State GetState() const { 81 State GetState() const {
87 return static_cast<State>(atomic_state.load(std::memory_order_relaxed)); 82 return static_cast<State>(m_atomic_state.load());
88 } 83 }
89 84
90 KServerSession server; 85 KServerSession m_server;
91 KClientSession client; 86 KClientSession m_client;
92 std::atomic<std::underlying_type_t<State>> atomic_state{ 87 KClientPort* m_port{};
93 static_cast<std::underlying_type_t<State>>(State::Invalid)}; 88 uintptr_t m_name{};
94 KClientPort* port{}; 89 KProcess* m_process{};
95 KProcess* process{}; 90 std::atomic<u8> m_atomic_state{static_cast<u8>(State::Invalid)};
96 bool initialized{}; 91 bool m_initialized{};
97}; 92};
98 93
99} // namespace Kernel 94} // namespace Kernel
diff --git a/src/core/hle/kernel/k_session_request.cpp b/src/core/hle/kernel/k_session_request.cpp
index 520da6aa7..a329e5690 100644
--- a/src/core/hle/kernel/k_session_request.cpp
+++ b/src/core/hle/kernel/k_session_request.cpp
@@ -14,46 +14,46 @@ Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, siz
14 // Get the mapping. 14 // Get the mapping.
15 Mapping* mapping; 15 Mapping* mapping;
16 if (index < NumStaticMappings) { 16 if (index < NumStaticMappings) {
17 mapping = &m_static_mappings[index]; 17 mapping = std::addressof(m_static_mappings[index]);
18 } else { 18 } else {
19 // Allocate a page for the extra mappings. 19 // Allocate a page for the extra mappings.
20 if (m_mappings == nullptr) { 20 if (m_mappings == nullptr) {
21 KPageBuffer* page_buffer = KPageBuffer::Allocate(kernel); 21 KPageBuffer* page_buffer = KPageBuffer::Allocate(m_kernel);
22 R_UNLESS(page_buffer != nullptr, ResultOutOfMemory); 22 R_UNLESS(page_buffer != nullptr, ResultOutOfMemory);
23 23
24 m_mappings = reinterpret_cast<Mapping*>(page_buffer); 24 m_mappings = reinterpret_cast<Mapping*>(page_buffer);
25 } 25 }
26 26
27 mapping = &m_mappings[index - NumStaticMappings]; 27 mapping = std::addressof(m_mappings[index - NumStaticMappings]);
28 } 28 }
29 29
30 // Set the mapping. 30 // Set the mapping.
31 mapping->Set(client, server, size, state); 31 mapping->Set(client, server, size, state);
32 32
33 return ResultSuccess; 33 R_SUCCEED();
34} 34}
35 35
36Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size, 36Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size,
37 KMemoryState state) { 37 KMemoryState state) {
38 ASSERT(m_num_recv == 0); 38 ASSERT(m_num_recv == 0);
39 ASSERT(m_num_exch == 0); 39 ASSERT(m_num_exch == 0);
40 return this->PushMap(client, server, size, state, m_num_send++); 40 R_RETURN(this->PushMap(client, server, size, state, m_num_send++));
41} 41}
42 42
43Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size, 43Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size,
44 KMemoryState state) { 44 KMemoryState state) {
45 ASSERT(m_num_exch == 0); 45 ASSERT(m_num_exch == 0);
46 return this->PushMap(client, server, size, state, m_num_send + m_num_recv++); 46 R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv++));
47} 47}
48 48
49Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size, 49Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size,
50 KMemoryState state) { 50 KMemoryState state) {
51 return this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++); 51 R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++));
52} 52}
53 53
54void KSessionRequest::SessionMappings::Finalize() { 54void KSessionRequest::SessionMappings::Finalize() {
55 if (m_mappings) { 55 if (m_mappings) {
56 KPageBuffer::Free(kernel, reinterpret_cast<KPageBuffer*>(m_mappings)); 56 KPageBuffer::Free(m_kernel, reinterpret_cast<KPageBuffer*>(m_mappings));
57 m_mappings = nullptr; 57 m_mappings = nullptr;
58 } 58 }
59} 59}
diff --git a/src/core/hle/kernel/k_session_request.h b/src/core/hle/kernel/k_session_request.h
index e5558bc2c..5685048ba 100644
--- a/src/core/hle/kernel/k_session_request.h
+++ b/src/core/hle/kernel/k_session_request.h
@@ -47,14 +47,14 @@ public:
47 } 47 }
48 48
49 private: 49 private:
50 VAddr m_client_address; 50 VAddr m_client_address{};
51 VAddr m_server_address; 51 VAddr m_server_address{};
52 size_t m_size; 52 size_t m_size{};
53 KMemoryState m_state; 53 KMemoryState m_state{};
54 }; 54 };
55 55
56 public: 56 public:
57 explicit SessionMappings(KernelCore& kernel_) : kernel(kernel_) {} 57 explicit SessionMappings(KernelCore& kernel) : m_kernel(kernel) {}
58 58
59 void Initialize() {} 59 void Initialize() {}
60 void Finalize(); 60 void Finalize();
@@ -149,8 +149,8 @@ public:
149 } 149 }
150 150
151 private: 151 private:
152 KernelCore& kernel; 152 KernelCore& m_kernel;
153 std::array<Mapping, NumStaticMappings> m_static_mappings; 153 std::array<Mapping, NumStaticMappings> m_static_mappings{};
154 Mapping* m_mappings{}; 154 Mapping* m_mappings{};
155 u8 m_num_send{}; 155 u8 m_num_send{};
156 u8 m_num_recv{}; 156 u8 m_num_recv{};
@@ -158,7 +158,7 @@ public:
158 }; 158 };
159 159
160public: 160public:
161 explicit KSessionRequest(KernelCore& kernel_) : KAutoObject(kernel_), m_mappings(kernel_) {} 161 explicit KSessionRequest(KernelCore& kernel) : KAutoObject(kernel), m_mappings(kernel) {}
162 162
163 static KSessionRequest* Create(KernelCore& kernel) { 163 static KSessionRequest* Create(KernelCore& kernel) {
164 KSessionRequest* req = KSessionRequest::Allocate(kernel); 164 KSessionRequest* req = KSessionRequest::Allocate(kernel);
@@ -170,13 +170,13 @@ public:
170 170
171 void Destroy() override { 171 void Destroy() override {
172 this->Finalize(); 172 this->Finalize();
173 KSessionRequest::Free(kernel, this); 173 KSessionRequest::Free(m_kernel, this);
174 } 174 }
175 175
176 void Initialize(KEvent* event, uintptr_t address, size_t size) { 176 void Initialize(KEvent* event, uintptr_t address, size_t size) {
177 m_mappings.Initialize(); 177 m_mappings.Initialize();
178 178
179 m_thread = GetCurrentThreadPointer(kernel); 179 m_thread = GetCurrentThreadPointer(m_kernel);
180 m_event = event; 180 m_event = event;
181 m_address = address; 181 m_address = address;
182 m_size = size; 182 m_size = size;
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index df505edfe..954e5befe 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -12,29 +12,27 @@
12 12
13namespace Kernel { 13namespace Kernel {
14 14
15KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} 15KSharedMemory::KSharedMemory(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
16KSharedMemory::~KSharedMemory() = default; 16KSharedMemory::~KSharedMemory() = default;
17 17
18Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, 18Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* owner_process,
19 Svc::MemoryPermission owner_permission_, 19 Svc::MemoryPermission owner_permission,
20 Svc::MemoryPermission user_permission_, std::size_t size_, 20 Svc::MemoryPermission user_permission, std::size_t size) {
21 std::string name_) {
22 // Set members. 21 // Set members.
23 owner_process = owner_process_; 22 m_owner_process = owner_process;
24 device_memory = &device_memory_; 23 m_device_memory = std::addressof(device_memory);
25 owner_permission = owner_permission_; 24 m_owner_permission = owner_permission;
26 user_permission = user_permission_; 25 m_user_permission = user_permission;
27 size = Common::AlignUp(size_, PageSize); 26 m_size = Common::AlignUp(size, PageSize);
28 name = std::move(name_);
29 27
30 const size_t num_pages = Common::DivideUp(size, PageSize); 28 const size_t num_pages = Common::DivideUp(size, PageSize);
31 29
32 // Get the resource limit. 30 // Get the resource limit.
33 KResourceLimit* reslimit = kernel.GetSystemResourceLimit(); 31 KResourceLimit* reslimit = m_kernel.GetSystemResourceLimit();
34 32
35 // Reserve memory for ourselves. 33 // Reserve memory for ourselves.
36 KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemoryMax, 34 KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemoryMax,
37 size_); 35 size);
38 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 36 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
39 37
40 // Allocate the memory. 38 // Allocate the memory.
@@ -42,67 +40,66 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
42 //! HACK: Open continuous mapping from sysmodule pool. 40 //! HACK: Open continuous mapping from sysmodule pool.
43 auto option = KMemoryManager::EncodeOption(KMemoryManager::Pool::Secure, 41 auto option = KMemoryManager::EncodeOption(KMemoryManager::Pool::Secure,
44 KMemoryManager::Direction::FromBack); 42 KMemoryManager::Direction::FromBack);
45 physical_address = kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option); 43 m_physical_address = m_kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option);
46 R_UNLESS(physical_address != 0, ResultOutOfMemory); 44 R_UNLESS(m_physical_address != 0, ResultOutOfMemory);
47 45
48 //! Insert the result into our page group. 46 //! Insert the result into our page group.
49 page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager()); 47 m_page_group.emplace(m_kernel,
50 page_group->AddBlock(physical_address, num_pages); 48 std::addressof(m_kernel.GetSystemSystemResource().GetBlockInfoManager()));
49 m_page_group->AddBlock(m_physical_address, num_pages);
51 50
52 // Commit our reservation. 51 // Commit our reservation.
53 memory_reservation.Commit(); 52 memory_reservation.Commit();
54 53
55 // Set our resource limit. 54 // Set our resource limit.
56 resource_limit = reslimit; 55 m_resource_limit = reslimit;
57 resource_limit->Open(); 56 m_resource_limit->Open();
58 57
59 // Mark initialized. 58 // Mark initialized.
60 is_initialized = true; 59 m_is_initialized = true;
61 60
62 // Clear all pages in the memory. 61 // Clear all pages in the memory.
63 for (const auto& block : *page_group) { 62 for (const auto& block : *m_page_group) {
64 std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize()); 63 std::memset(m_device_memory->GetPointer<void>(block.GetAddress()), 0, block.GetSize());
65 } 64 }
66 65
67 return ResultSuccess; 66 R_SUCCEED();
68} 67}
69 68
70void KSharedMemory::Finalize() { 69void KSharedMemory::Finalize() {
71 // Close and finalize the page group. 70 // Close and finalize the page group.
72 page_group->Close(); 71 m_page_group->Close();
73 page_group->Finalize(); 72 m_page_group->Finalize();
74 73
75 // Release the memory reservation. 74 // Release the memory reservation.
76 resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); 75 m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, m_size);
77 resource_limit->Close(); 76 m_resource_limit->Close();
78
79 // Perform inherited finalization.
80 KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize();
81} 77}
82 78
83Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size, 79Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
84 Svc::MemoryPermission map_perm) { 80 Svc::MemoryPermission map_perm) {
85 // Validate the size. 81 // Validate the size.
86 R_UNLESS(size == map_size, ResultInvalidSize); 82 R_UNLESS(m_size == map_size, ResultInvalidSize);
87 83
88 // Validate the permission. 84 // Validate the permission.
89 const Svc::MemoryPermission test_perm = 85 const Svc::MemoryPermission test_perm =
90 &target_process == owner_process ? owner_permission : user_permission; 86 std::addressof(target_process) == m_owner_process ? m_owner_permission : m_user_permission;
91 if (test_perm == Svc::MemoryPermission::DontCare) { 87 if (test_perm == Svc::MemoryPermission::DontCare) {
92 ASSERT(map_perm == Svc::MemoryPermission::Read || map_perm == Svc::MemoryPermission::Write); 88 ASSERT(map_perm == Svc::MemoryPermission::Read || map_perm == Svc::MemoryPermission::Write);
93 } else { 89 } else {
94 R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission); 90 R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission);
95 } 91 }
96 92
97 return target_process.PageTable().MapPageGroup(address, *page_group, KMemoryState::Shared, 93 R_RETURN(target_process.PageTable().MapPageGroup(address, *m_page_group, KMemoryState::Shared,
98 ConvertToKMemoryPermission(map_perm)); 94 ConvertToKMemoryPermission(map_perm)));
99} 95}
100 96
101Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) { 97Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
102 // Validate the size. 98 // Validate the size.
103 R_UNLESS(size == unmap_size, ResultInvalidSize); 99 R_UNLESS(m_size == unmap_size, ResultInvalidSize);
104 100
105 return target_process.PageTable().UnmapPageGroup(address, *page_group, KMemoryState::Shared); 101 R_RETURN(
102 target_process.PageTable().UnmapPageGroup(address, *m_page_group, KMemoryState::Shared));
106} 103}
107 104
108} // namespace Kernel 105} // namespace Kernel
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h
index 8b29f0b4a..b4c4125bb 100644
--- a/src/core/hle/kernel/k_shared_memory.h
+++ b/src/core/hle/kernel/k_shared_memory.h
@@ -23,12 +23,12 @@ class KSharedMemory final
23 KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject); 23 KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
24 24
25public: 25public:
26 explicit KSharedMemory(KernelCore& kernel_); 26 explicit KSharedMemory(KernelCore& kernel);
27 ~KSharedMemory() override; 27 ~KSharedMemory() override;
28 28
29 Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, 29 Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
30 Svc::MemoryPermission owner_permission_, 30 Svc::MemoryPermission owner_permission_,
31 Svc::MemoryPermission user_permission_, std::size_t size_, std::string name_); 31 Svc::MemoryPermission user_permission_, std::size_t size_);
32 32
33 /** 33 /**
34 * Maps a shared memory block to an address in the target process' address space 34 * Maps a shared memory block to an address in the target process' address space
@@ -54,7 +54,7 @@ public:
54 * @return A pointer to the shared memory block from the specified offset 54 * @return A pointer to the shared memory block from the specified offset
55 */ 55 */
56 u8* GetPointer(std::size_t offset = 0) { 56 u8* GetPointer(std::size_t offset = 0) {
57 return device_memory->GetPointer<u8>(physical_address + offset); 57 return m_device_memory->GetPointer<u8>(m_physical_address + offset);
58 } 58 }
59 59
60 /** 60 /**
@@ -63,26 +63,26 @@ public:
63 * @return A pointer to the shared memory block from the specified offset 63 * @return A pointer to the shared memory block from the specified offset
64 */ 64 */
65 const u8* GetPointer(std::size_t offset = 0) const { 65 const u8* GetPointer(std::size_t offset = 0) const {
66 return device_memory->GetPointer<u8>(physical_address + offset); 66 return m_device_memory->GetPointer<u8>(m_physical_address + offset);
67 } 67 }
68 68
69 void Finalize() override; 69 void Finalize() override;
70 70
71 bool IsInitialized() const override { 71 bool IsInitialized() const override {
72 return is_initialized; 72 return m_is_initialized;
73 } 73 }
74 static void PostDestroy([[maybe_unused]] uintptr_t arg) {} 74 static void PostDestroy(uintptr_t arg) {}
75 75
76private: 76private:
77 Core::DeviceMemory* device_memory{}; 77 Core::DeviceMemory* m_device_memory{};
78 KProcess* owner_process{}; 78 KProcess* m_owner_process{};
79 std::optional<KPageGroup> page_group{}; 79 std::optional<KPageGroup> m_page_group{};
80 Svc::MemoryPermission owner_permission{}; 80 Svc::MemoryPermission m_owner_permission{};
81 Svc::MemoryPermission user_permission{}; 81 Svc::MemoryPermission m_user_permission{};
82 PAddr physical_address{}; 82 PAddr m_physical_address{};
83 std::size_t size{}; 83 std::size_t m_size{};
84 KResourceLimit* resource_limit{}; 84 KResourceLimit* m_resource_limit{};
85 bool is_initialized{}; 85 bool m_is_initialized{};
86}; 86};
87 87
88} // namespace Kernel 88} // namespace Kernel
diff --git a/src/core/hle/kernel/k_shared_memory_info.h b/src/core/hle/kernel/k_shared_memory_info.h
index 2bb6b6d08..75b73ba39 100644
--- a/src/core/hle/kernel/k_shared_memory_info.h
+++ b/src/core/hle/kernel/k_shared_memory_info.h
@@ -18,25 +18,28 @@ public:
18 explicit KSharedMemoryInfo(KernelCore&) {} 18 explicit KSharedMemoryInfo(KernelCore&) {}
19 KSharedMemoryInfo() = default; 19 KSharedMemoryInfo() = default;
20 20
21 constexpr void Initialize(KSharedMemory* shmem) { 21 constexpr void Initialize(KSharedMemory* m) {
22 shared_memory = shmem; 22 m_shared_memory = m;
23 m_reference_count = 0;
23 } 24 }
24 25
25 constexpr KSharedMemory* GetSharedMemory() const { 26 constexpr KSharedMemory* GetSharedMemory() const {
26 return shared_memory; 27 return m_shared_memory;
27 } 28 }
28 29
29 constexpr void Open() { 30 constexpr void Open() {
30 ++reference_count; 31 ++m_reference_count;
32 ASSERT(m_reference_count > 0);
31 } 33 }
32 34
33 constexpr bool Close() { 35 constexpr bool Close() {
34 return (--reference_count) == 0; 36 ASSERT(m_reference_count > 0);
37 return (--m_reference_count) == 0;
35 } 38 }
36 39
37private: 40private:
38 KSharedMemory* shared_memory{}; 41 KSharedMemory* m_shared_memory{};
39 size_t reference_count{}; 42 size_t m_reference_count{};
40}; 43};
41 44
42} // namespace Kernel 45} // namespace Kernel
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h
index 68469b041..334afebb7 100644
--- a/src/core/hle/kernel/k_slab_heap.h
+++ b/src/core/hle/kernel/k_slab_heap.h
@@ -89,7 +89,8 @@ private:
89 if (alloc_peak <= cur_peak) { 89 if (alloc_peak <= cur_peak) {
90 break; 90 break;
91 } 91 }
92 } while (!Common::AtomicCompareAndSwap(&m_peak, alloc_peak, cur_peak, cur_peak)); 92 } while (
93 !Common::AtomicCompareAndSwap(std::addressof(m_peak), alloc_peak, cur_peak, cur_peak));
93 } 94 }
94 95
95public: 96public:
diff --git a/src/core/hle/kernel/k_spin_lock.cpp b/src/core/hle/kernel/k_spin_lock.cpp
index 6e16a1849..852532037 100644
--- a/src/core/hle/kernel/k_spin_lock.cpp
+++ b/src/core/hle/kernel/k_spin_lock.cpp
@@ -6,15 +6,15 @@
6namespace Kernel { 6namespace Kernel {
7 7
8void KSpinLock::Lock() { 8void KSpinLock::Lock() {
9 lck.lock(); 9 m_lock.lock();
10} 10}
11 11
12void KSpinLock::Unlock() { 12void KSpinLock::Unlock() {
13 lck.unlock(); 13 m_lock.unlock();
14} 14}
15 15
16bool KSpinLock::TryLock() { 16bool KSpinLock::TryLock() {
17 return lck.try_lock(); 17 return m_lock.try_lock();
18} 18}
19 19
20} // namespace Kernel 20} // namespace Kernel
diff --git a/src/core/hle/kernel/k_spin_lock.h b/src/core/hle/kernel/k_spin_lock.h
index 397a93d21..094a1e6be 100644
--- a/src/core/hle/kernel/k_spin_lock.h
+++ b/src/core/hle/kernel/k_spin_lock.h
@@ -5,26 +5,24 @@
5 5
6#include <mutex> 6#include <mutex>
7 7
8#include "common/common_funcs.h"
8#include "core/hle/kernel/k_scoped_lock.h" 9#include "core/hle/kernel/k_scoped_lock.h"
9 10
10namespace Kernel { 11namespace Kernel {
11 12
12class KSpinLock { 13class KSpinLock {
13public: 14public:
14 KSpinLock() = default; 15 explicit KSpinLock() = default;
15 16
16 KSpinLock(const KSpinLock&) = delete; 17 YUZU_NON_COPYABLE(KSpinLock);
17 KSpinLock& operator=(const KSpinLock&) = delete; 18 YUZU_NON_MOVEABLE(KSpinLock);
18
19 KSpinLock(KSpinLock&&) = delete;
20 KSpinLock& operator=(KSpinLock&&) = delete;
21 19
22 void Lock(); 20 void Lock();
23 void Unlock(); 21 void Unlock();
24 [[nodiscard]] bool TryLock(); 22 bool TryLock();
25 23
26private: 24private:
27 std::mutex lck; 25 std::mutex m_lock;
28}; 26};
29 27
30// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future. 28// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future.
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
index 802dca046..b7da3eee7 100644
--- a/src/core/hle/kernel/k_synchronization_object.cpp
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -17,9 +17,9 @@ namespace {
17 17
18class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait { 18class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait {
19public: 19public:
20 ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o, 20 ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel, KSynchronizationObject** o,
21 KSynchronizationObject::ThreadListNode* n, s32 c) 21 KSynchronizationObject::ThreadListNode* n, s32 c)
22 : KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {} 22 : KThreadQueueWithoutEndWait(kernel), m_objects(o), m_nodes(n), m_count(c) {}
23 23
24 void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object, 24 void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
25 Result wait_result) override { 25 Result wait_result) override {
@@ -71,25 +71,26 @@ void KSynchronizationObject::Finalize() {
71 KAutoObject::Finalize(); 71 KAutoObject::Finalize();
72} 72}
73 73
74Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, 74Result KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
75 KSynchronizationObject** objects, const s32 num_objects, 75 KSynchronizationObject** objects, const s32 num_objects,
76 s64 timeout) { 76 s64 timeout) {
77 // Allocate space on stack for thread nodes. 77 // Allocate space on stack for thread nodes.
78 std::vector<ThreadListNode> thread_nodes(num_objects); 78 std::vector<ThreadListNode> thread_nodes(num_objects);
79 79
80 // Prepare for wait. 80 // Prepare for wait.
81 KThread* thread = GetCurrentThreadPointer(kernel_ctx); 81 KThread* thread = GetCurrentThreadPointer(kernel);
82 ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects, 82 KHardwareTimer* timer{};
83 thread_nodes.data(), num_objects); 83 ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel, objects, thread_nodes.data(),
84 num_objects);
84 85
85 { 86 {
86 // Setup the scheduling lock and sleep. 87 // Setup the scheduling lock and sleep.
87 KScopedSchedulerLockAndSleep slp(kernel_ctx, thread, timeout); 88 KScopedSchedulerLockAndSleep slp(kernel, std::addressof(timer), thread, timeout);
88 89
89 // Check if the thread should terminate. 90 // Check if the thread should terminate.
90 if (thread->IsTerminationRequested()) { 91 if (thread->IsTerminationRequested()) {
91 slp.CancelSleep(); 92 slp.CancelSleep();
92 return ResultTerminationRequested; 93 R_THROW(ResultTerminationRequested);
93 } 94 }
94 95
95 // Check if any of the objects are already signaled. 96 // Check if any of the objects are already signaled.
@@ -99,21 +100,21 @@ Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
99 if (objects[i]->IsSignaled()) { 100 if (objects[i]->IsSignaled()) {
100 *out_index = i; 101 *out_index = i;
101 slp.CancelSleep(); 102 slp.CancelSleep();
102 return ResultSuccess; 103 R_THROW(ResultSuccess);
103 } 104 }
104 } 105 }
105 106
106 // Check if the timeout is zero. 107 // Check if the timeout is zero.
107 if (timeout == 0) { 108 if (timeout == 0) {
108 slp.CancelSleep(); 109 slp.CancelSleep();
109 return ResultTimedOut; 110 R_THROW(ResultTimedOut);
110 } 111 }
111 112
112 // Check if waiting was canceled. 113 // Check if waiting was canceled.
113 if (thread->IsWaitCancelled()) { 114 if (thread->IsWaitCancelled()) {
114 slp.CancelSleep(); 115 slp.CancelSleep();
115 thread->ClearWaitCancelled(); 116 thread->ClearWaitCancelled();
116 return ResultCancelled; 117 R_THROW(ResultCancelled);
117 } 118 }
118 119
119 // Add the waiters. 120 // Add the waiters.
@@ -131,6 +132,7 @@ Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
131 thread->SetSyncedIndex(-1); 132 thread->SetSyncedIndex(-1);
132 133
133 // Wait for an object to be signaled. 134 // Wait for an object to be signaled.
135 wait_queue.SetHardwareTimer(timer);
134 thread->BeginWait(std::addressof(wait_queue)); 136 thread->BeginWait(std::addressof(wait_queue));
135 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization); 137 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
136 } 138 }
@@ -139,16 +141,15 @@ Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
139 *out_index = thread->GetSyncedIndex(); 141 *out_index = thread->GetSyncedIndex();
140 142
141 // Get the wait result. 143 // Get the wait result.
142 return thread->GetWaitResult(); 144 R_RETURN(thread->GetWaitResult());
143} 145}
144 146
145KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) 147KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : KAutoObjectWithList{kernel} {}
146 : KAutoObjectWithList{kernel_} {}
147 148
148KSynchronizationObject::~KSynchronizationObject() = default; 149KSynchronizationObject::~KSynchronizationObject() = default;
149 150
150void KSynchronizationObject::NotifyAvailable(Result result) { 151void KSynchronizationObject::NotifyAvailable(Result result) {
151 KScopedSchedulerLock sl(kernel); 152 KScopedSchedulerLock sl(m_kernel);
152 153
153 // If we're not signaled, we've nothing to notify. 154 // If we're not signaled, we've nothing to notify.
154 if (!this->IsSignaled()) { 155 if (!this->IsSignaled()) {
@@ -156,7 +157,7 @@ void KSynchronizationObject::NotifyAvailable(Result result) {
156 } 157 }
157 158
158 // Iterate over each thread. 159 // Iterate over each thread.
159 for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { 160 for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
160 cur_node->thread->NotifyAvailable(this, result); 161 cur_node->thread->NotifyAvailable(this, result);
161 } 162 }
162} 163}
@@ -166,8 +167,8 @@ std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() co
166 167
167 // If debugging, dump the list of waiters. 168 // If debugging, dump the list of waiters.
168 { 169 {
169 KScopedSchedulerLock lock(kernel); 170 KScopedSchedulerLock lock(m_kernel);
170 for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { 171 for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
171 threads.emplace_back(cur_node->thread); 172 threads.emplace_back(cur_node->thread);
172 } 173 }
173 } 174 }
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h
index 8d8122ab7..d55a2673d 100644
--- a/src/core/hle/kernel/k_synchronization_object.h
+++ b/src/core/hle/kernel/k_synchronization_object.h
@@ -24,31 +24,30 @@ public:
24 KThread* thread{}; 24 KThread* thread{};
25 }; 25 };
26 26
27 [[nodiscard]] static Result Wait(KernelCore& kernel, s32* out_index, 27 static Result Wait(KernelCore& kernel, s32* out_index, KSynchronizationObject** objects,
28 KSynchronizationObject** objects, const s32 num_objects, 28 const s32 num_objects, s64 timeout);
29 s64 timeout);
30 29
31 void Finalize() override; 30 void Finalize() override;
32 31
33 [[nodiscard]] virtual bool IsSignaled() const = 0; 32 virtual bool IsSignaled() const = 0;
34 33
35 [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const; 34 std::vector<KThread*> GetWaitingThreadsForDebugging() const;
36 35
37 void LinkNode(ThreadListNode* node_) { 36 void LinkNode(ThreadListNode* node_) {
38 // Link the node to the list. 37 // Link the node to the list.
39 if (thread_list_tail == nullptr) { 38 if (m_thread_list_tail == nullptr) {
40 thread_list_head = node_; 39 m_thread_list_head = node_;
41 } else { 40 } else {
42 thread_list_tail->next = node_; 41 m_thread_list_tail->next = node_;
43 } 42 }
44 43
45 thread_list_tail = node_; 44 m_thread_list_tail = node_;
46 } 45 }
47 46
48 void UnlinkNode(ThreadListNode* node_) { 47 void UnlinkNode(ThreadListNode* node_) {
49 // Unlink the node from the list. 48 // Unlink the node from the list.
50 ThreadListNode* prev_ptr = 49 ThreadListNode* prev_ptr =
51 reinterpret_cast<ThreadListNode*>(std::addressof(thread_list_head)); 50 reinterpret_cast<ThreadListNode*>(std::addressof(m_thread_list_head));
52 ThreadListNode* prev_val = nullptr; 51 ThreadListNode* prev_val = nullptr;
53 ThreadListNode *prev, *tail_prev; 52 ThreadListNode *prev, *tail_prev;
54 53
@@ -59,8 +58,8 @@ public:
59 prev_val = prev_ptr; 58 prev_val = prev_ptr;
60 } while (prev_ptr != node_); 59 } while (prev_ptr != node_);
61 60
62 if (thread_list_tail == node_) { 61 if (m_thread_list_tail == node_) {
63 thread_list_tail = tail_prev; 62 m_thread_list_tail = tail_prev;
64 } 63 }
65 64
66 prev->next = node_->next; 65 prev->next = node_->next;
@@ -78,8 +77,8 @@ protected:
78 } 77 }
79 78
80private: 79private:
81 ThreadListNode* thread_list_head{}; 80 ThreadListNode* m_thread_list_head{};
82 ThreadListNode* thread_list_tail{}; 81 ThreadListNode* m_thread_list_tail{};
83}; 82};
84 83
85} // namespace Kernel 84} // namespace Kernel
diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp
index 4cc377a6c..e6c8d589a 100644
--- a/src/core/hle/kernel/k_system_resource.cpp
+++ b/src/core/hle/kernel/k_system_resource.cpp
@@ -5,9 +5,8 @@
5 5
6namespace Kernel { 6namespace Kernel {
7 7
8Result KSecureSystemResource::Initialize([[maybe_unused]] size_t size, 8Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_limit,
9 [[maybe_unused]] KResourceLimit* resource_limit, 9 KMemoryManager::Pool pool) {
10 [[maybe_unused]] KMemoryManager::Pool pool) {
11 // Unimplemented 10 // Unimplemented
12 UNREACHABLE(); 11 UNREACHABLE();
13} 12}
@@ -17,8 +16,8 @@ void KSecureSystemResource::Finalize() {
17 UNREACHABLE(); 16 UNREACHABLE();
18} 17}
19 18
20size_t KSecureSystemResource::CalculateRequiredSecureMemorySize( 19size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(size_t size,
21 [[maybe_unused]] size_t size, [[maybe_unused]] KMemoryManager::Pool pool) { 20 KMemoryManager::Pool pool) {
22 // Unimplemented 21 // Unimplemented
23 UNREACHABLE(); 22 UNREACHABLE();
24} 23}
diff --git a/src/core/hle/kernel/k_system_resource.h b/src/core/hle/kernel/k_system_resource.h
index 9a991f725..d36aaa9bd 100644
--- a/src/core/hle/kernel/k_system_resource.h
+++ b/src/core/hle/kernel/k_system_resource.h
@@ -21,7 +21,7 @@ class KSystemResource : public KAutoObject {
21 KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject); 21 KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject);
22 22
23public: 23public:
24 explicit KSystemResource(KernelCore& kernel_) : KAutoObject(kernel_) {} 24 explicit KSystemResource(KernelCore& kernel) : KAutoObject(kernel) {}
25 25
26protected: 26protected:
27 void SetSecureResource() { 27 void SetSecureResource() {
@@ -87,8 +87,8 @@ private:
87class KSecureSystemResource final 87class KSecureSystemResource final
88 : public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> { 88 : public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> {
89public: 89public:
90 explicit KSecureSystemResource(KernelCore& kernel_) 90 explicit KSecureSystemResource(KernelCore& kernel)
91 : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel_) { 91 : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel) {
92 // Mark ourselves as being a secure resource. 92 // Mark ourselves as being a secure resource.
93 this->SetSecureResource(); 93 this->SetSecureResource();
94 } 94 }
@@ -99,7 +99,7 @@ public:
99 bool IsInitialized() const { 99 bool IsInitialized() const {
100 return m_is_initialized; 100 return m_is_initialized;
101 } 101 }
102 static void PostDestroy([[maybe_unused]] uintptr_t arg) {} 102 static void PostDestroy(uintptr_t arg) {}
103 103
104 size_t CalculateRequiredSecureMemorySize() const { 104 size_t CalculateRequiredSecureMemorySize() const {
105 return CalculateRequiredSecureMemorySize(m_resource_size, m_resource_pool); 105 return CalculateRequiredSecureMemorySize(m_resource_size, m_resource_pool);
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 15ae652f9..c0e3ecb45 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -35,15 +35,11 @@
35#include "core/hle/result.h" 35#include "core/hle/result.h"
36#include "core/memory.h" 36#include "core/memory.h"
37 37
38#ifdef ARCHITECTURE_x86_64
39#include "core/arm/dynarmic/arm_dynarmic_32.h"
40#endif
41
42namespace { 38namespace {
43 39
44constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1; 40constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1;
45 41
46static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, 42static void ResetThreadContext32(Kernel::KThread::ThreadContext32& context, u32 stack_top,
47 u32 entry_point, u32 arg) { 43 u32 entry_point, u32 arg) {
48 context = {}; 44 context = {};
49 context.cpu_registers[0] = arg; 45 context.cpu_registers[0] = arg;
@@ -52,7 +48,7 @@ static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context,
52 context.fpscr = 0; 48 context.fpscr = 0;
53} 49}
54 50
55static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top, 51static void ResetThreadContext64(Kernel::KThread::ThreadContext64& context, VAddr stack_top,
56 VAddr entry_point, u64 arg) { 52 VAddr entry_point, u64 arg) {
57 context = {}; 53 context = {};
58 context.cpu_registers[0] = arg; 54 context.cpu_registers[0] = arg;
@@ -77,14 +73,14 @@ struct ThreadLocalRegion {
77 73
78class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { 74class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait {
79public: 75public:
80 explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_) 76 explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel)
81 : KThreadQueueWithoutEndWait(kernel_) {} 77 : KThreadQueueWithoutEndWait(kernel) {}
82}; 78};
83 79
84class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue { 80class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
85public: 81public:
86 explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl) 82 explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel, KThread::WaiterList* wl)
87 : KThreadQueue(kernel_), m_wait_list(wl) {} 83 : KThreadQueue(kernel), m_wait_list(wl) {}
88 84
89 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { 85 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
90 // Remove the thread from the wait list. 86 // Remove the thread from the wait list.
@@ -95,13 +91,13 @@ public:
95 } 91 }
96 92
97private: 93private:
98 KThread::WaiterList* m_wait_list; 94 KThread::WaiterList* m_wait_list{};
99}; 95};
100 96
101} // namespace 97} // namespace
102 98
103KThread::KThread(KernelCore& kernel_) 99KThread::KThread(KernelCore& kernel)
104 : KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {} 100 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_activity_pause_lock{kernel} {}
105KThread::~KThread() = default; 101KThread::~KThread() = default;
106 102
107Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, 103Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
@@ -117,7 +113,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
117 ASSERT(0 <= phys_core && phys_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); 113 ASSERT(0 <= phys_core && phys_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
118 114
119 // First, clear the TLS address. 115 // First, clear the TLS address.
120 tls_address = {}; 116 m_tls_address = {};
121 117
122 // Next, assert things based on the type. 118 // Next, assert things based on the type.
123 switch (type) { 119 switch (type) {
@@ -141,110 +137,110 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
141 ASSERT_MSG(false, "KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type)); 137 ASSERT_MSG(false, "KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
142 break; 138 break;
143 } 139 }
144 thread_type = type; 140 m_thread_type = type;
145 141
146 // Set the ideal core ID and affinity mask. 142 // Set the ideal core ID and affinity mask.
147 virtual_ideal_core_id = virt_core; 143 m_virtual_ideal_core_id = virt_core;
148 physical_ideal_core_id = phys_core; 144 m_physical_ideal_core_id = phys_core;
149 virtual_affinity_mask = 1ULL << virt_core; 145 m_virtual_affinity_mask = 1ULL << virt_core;
150 physical_affinity_mask.SetAffinity(phys_core, true); 146 m_physical_affinity_mask.SetAffinity(phys_core, true);
151 147
152 // Set the thread state. 148 // Set the thread state.
153 thread_state = (type == ThreadType::Main || type == ThreadType::Dummy) 149 m_thread_state = (type == ThreadType::Main || type == ThreadType::Dummy)
154 ? ThreadState::Runnable 150 ? ThreadState::Runnable
155 : ThreadState::Initialized; 151 : ThreadState::Initialized;
156 152
157 // Set TLS address. 153 // Set TLS address.
158 tls_address = 0; 154 m_tls_address = 0;
159 155
160 // Set parent and condvar tree. 156 // Set parent and condvar tree.
161 parent = nullptr; 157 m_parent = nullptr;
162 condvar_tree = nullptr; 158 m_condvar_tree = nullptr;
163 159
164 // Set sync booleans. 160 // Set sync booleans.
165 signaled = false; 161 m_signaled = false;
166 termination_requested = false; 162 m_termination_requested = false;
167 wait_cancelled = false; 163 m_wait_cancelled = false;
168 cancellable = false; 164 m_cancellable = false;
169 165
170 // Set core ID and wait result. 166 // Set core ID and wait result.
171 core_id = phys_core; 167 m_core_id = phys_core;
172 wait_result = ResultNoSynchronizationObject; 168 m_wait_result = ResultNoSynchronizationObject;
173 169
174 // Set priorities. 170 // Set priorities.
175 priority = prio; 171 m_priority = prio;
176 base_priority = prio; 172 m_base_priority = prio;
177 173
178 // Initialize sleeping queue. 174 // Initialize sleeping queue.
179 wait_queue = nullptr; 175 m_wait_queue = nullptr;
180 176
181 // Set suspend flags. 177 // Set suspend flags.
182 suspend_request_flags = 0; 178 m_suspend_request_flags = 0;
183 suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask); 179 m_suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask);
184 180
185 // We're neither debug attached, nor are we nesting our priority inheritance. 181 // We're neither debug attached, nor are we nesting our priority inheritance.
186 debug_attached = false; 182 m_debug_attached = false;
187 priority_inheritance_count = 0; 183 m_priority_inheritance_count = 0;
188 184
189 // We haven't been scheduled, and we have done no light IPC. 185 // We haven't been scheduled, and we have done no light IPC.
190 schedule_count = -1; 186 m_schedule_count = -1;
191 last_scheduled_tick = 0; 187 m_last_scheduled_tick = 0;
192 light_ipc_data = nullptr; 188 m_light_ipc_data = nullptr;
193 189
194 // We're not waiting for a lock, and we haven't disabled migration. 190 // We're not waiting for a lock, and we haven't disabled migration.
195 waiting_lock_info = nullptr; 191 m_waiting_lock_info = nullptr;
196 num_core_migration_disables = 0; 192 m_num_core_migration_disables = 0;
197 193
198 // We have no waiters, but we do have an entrypoint. 194 // We have no waiters, but we do have an entrypoint.
199 num_kernel_waiters = 0; 195 m_num_kernel_waiters = 0;
200 196
201 // Set our current core id. 197 // Set our current core id.
202 current_core_id = phys_core; 198 m_current_core_id = phys_core;
203 199
204 // We haven't released our resource limit hint, and we've spent no time on the cpu. 200 // We haven't released our resource limit hint, and we've spent no time on the cpu.
205 resource_limit_release_hint = false; 201 m_resource_limit_release_hint = false;
206 cpu_time = 0; 202 m_cpu_time = 0;
207 203
208 // Set debug context. 204 // Set debug context.
209 stack_top = user_stack_top; 205 m_stack_top = user_stack_top;
210 argument = arg; 206 m_argument = arg;
211 207
212 // Clear our stack parameters. 208 // Clear our stack parameters.
213 std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0, 209 std::memset(static_cast<void*>(std::addressof(this->GetStackParameters())), 0,
214 sizeof(StackParameters)); 210 sizeof(StackParameters));
215 211
216 // Set parent, if relevant. 212 // Set parent, if relevant.
217 if (owner != nullptr) { 213 if (owner != nullptr) {
218 // Setup the TLS, if needed. 214 // Setup the TLS, if needed.
219 if (type == ThreadType::User) { 215 if (type == ThreadType::User) {
220 R_TRY(owner->CreateThreadLocalRegion(std::addressof(tls_address))); 216 R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address)));
221 } 217 }
222 218
223 parent = owner; 219 m_parent = owner;
224 parent->Open(); 220 m_parent->Open();
225 } 221 }
226 222
227 // Initialize thread context. 223 // Initialize thread context.
228 ResetThreadContext64(thread_context_64, user_stack_top, func, arg); 224 ResetThreadContext64(m_thread_context_64, user_stack_top, func, arg);
229 ResetThreadContext32(thread_context_32, static_cast<u32>(user_stack_top), 225 ResetThreadContext32(m_thread_context_32, static_cast<u32>(user_stack_top),
230 static_cast<u32>(func), static_cast<u32>(arg)); 226 static_cast<u32>(func), static_cast<u32>(arg));
231 227
232 // Setup the stack parameters. 228 // Setup the stack parameters.
233 StackParameters& sp = GetStackParameters(); 229 StackParameters& sp = this->GetStackParameters();
234 sp.cur_thread = this; 230 sp.cur_thread = this;
235 sp.disable_count = 1; 231 sp.disable_count = 1;
236 SetInExceptionHandler(); 232 this->SetInExceptionHandler();
237 233
238 // Set thread ID. 234 // Set thread ID.
239 thread_id = kernel.CreateNewThreadID(); 235 m_thread_id = m_kernel.CreateNewThreadID();
240 236
241 // We initialized! 237 // We initialized!
242 initialized = true; 238 m_initialized = true;
243 239
244 // Register ourselves with our parent process. 240 // Register ourselves with our parent process.
245 if (parent != nullptr) { 241 if (m_parent != nullptr) {
246 parent->RegisterThread(this); 242 m_parent->RegisterThread(this);
247 if (parent->IsSuspended()) { 243 if (m_parent->IsSuspended()) {
248 RequestSuspend(SuspendType::Process); 244 RequestSuspend(SuspendType::Process);
249 } 245 }
250 } 246 }
@@ -259,8 +255,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_
259 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); 255 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
260 256
261 // Initialize emulation parameters. 257 // Initialize emulation parameters.
262 thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func)); 258 thread->m_host_context = std::make_shared<Common::Fiber>(std::move(init_func));
263 thread->is_single_core = !Settings::values.use_multi_core.GetValue();
264 259
265 R_SUCCEED(); 260 R_SUCCEED();
266} 261}
@@ -270,7 +265,7 @@ Result KThread::InitializeDummyThread(KThread* thread, KProcess* owner) {
270 R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, owner, ThreadType::Dummy)); 265 R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, owner, ThreadType::Dummy));
271 266
272 // Initialize emulation parameters. 267 // Initialize emulation parameters.
273 thread->stack_parameters.disable_count = 0; 268 thread->m_stack_parameters.disable_count = 0;
274 269
275 R_SUCCEED(); 270 R_SUCCEED();
276} 271}
@@ -331,25 +326,25 @@ void KThread::PostDestroy(uintptr_t arg) {
331 326
332void KThread::Finalize() { 327void KThread::Finalize() {
333 // If the thread has an owner process, unregister it. 328 // If the thread has an owner process, unregister it.
334 if (parent != nullptr) { 329 if (m_parent != nullptr) {
335 parent->UnregisterThread(this); 330 m_parent->UnregisterThread(this);
336 } 331 }
337 332
338 // If the thread has a local region, delete it. 333 // If the thread has a local region, delete it.
339 if (tls_address != 0) { 334 if (m_tls_address != 0) {
340 ASSERT(parent->DeleteThreadLocalRegion(tls_address).IsSuccess()); 335 ASSERT(m_parent->DeleteThreadLocalRegion(m_tls_address).IsSuccess());
341 } 336 }
342 337
343 // Release any waiters. 338 // Release any waiters.
344 { 339 {
345 ASSERT(waiting_lock_info == nullptr); 340 ASSERT(m_waiting_lock_info == nullptr);
346 KScopedSchedulerLock sl{kernel}; 341 KScopedSchedulerLock sl{m_kernel};
347 342
348 // Check that we have no kernel waiters. 343 // Check that we have no kernel waiters.
349 ASSERT(num_kernel_waiters == 0); 344 ASSERT(m_num_kernel_waiters == 0);
350 345
351 auto it = held_lock_info_list.begin(); 346 auto it = m_held_lock_info_list.begin();
352 while (it != held_lock_info_list.end()) { 347 while (it != m_held_lock_info_list.end()) {
353 // Get the lock info. 348 // Get the lock info.
354 auto* const lock_info = std::addressof(*it); 349 auto* const lock_info = std::addressof(*it);
355 350
@@ -371,70 +366,70 @@ void KThread::Finalize() {
371 } 366 }
372 367
373 // Remove the held lock from our list. 368 // Remove the held lock from our list.
374 it = held_lock_info_list.erase(it); 369 it = m_held_lock_info_list.erase(it);
375 370
376 // Free the lock info. 371 // Free the lock info.
377 LockWithPriorityInheritanceInfo::Free(kernel, lock_info); 372 LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
378 } 373 }
379 } 374 }
380 375
381 // Release host emulation members. 376 // Release host emulation members.
382 host_context.reset(); 377 m_host_context.reset();
383 378
384 // Perform inherited finalization. 379 // Perform inherited finalization.
385 KSynchronizationObject::Finalize(); 380 KSynchronizationObject::Finalize();
386} 381}
387 382
388bool KThread::IsSignaled() const { 383bool KThread::IsSignaled() const {
389 return signaled; 384 return m_signaled;
390} 385}
391 386
392void KThread::OnTimer() { 387void KThread::OnTimer() {
393 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 388 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
394 389
395 // If we're waiting, cancel the wait. 390 // If we're waiting, cancel the wait.
396 if (GetState() == ThreadState::Waiting) { 391 if (this->GetState() == ThreadState::Waiting) {
397 wait_queue->CancelWait(this, ResultTimedOut, false); 392 m_wait_queue->CancelWait(this, ResultTimedOut, false);
398 } 393 }
399} 394}
400 395
401void KThread::StartTermination() { 396void KThread::StartTermination() {
402 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 397 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
403 398
404 // Release user exception and unpin, if relevant. 399 // Release user exception and unpin, if relevant.
405 if (parent != nullptr) { 400 if (m_parent != nullptr) {
406 parent->ReleaseUserException(this); 401 m_parent->ReleaseUserException(this);
407 if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) { 402 if (m_parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) {
408 parent->UnpinCurrentThread(core_id); 403 m_parent->UnpinCurrentThread(m_core_id);
409 } 404 }
410 } 405 }
411 406
412 // Set state to terminated. 407 // Set state to terminated.
413 SetState(ThreadState::Terminated); 408 this->SetState(ThreadState::Terminated);
414 409
415 // Clear the thread's status as running in parent. 410 // Clear the thread's status as running in parent.
416 if (parent != nullptr) { 411 if (m_parent != nullptr) {
417 parent->ClearRunningThread(this); 412 m_parent->ClearRunningThread(this);
418 } 413 }
419 414
420 // Signal. 415 // Signal.
421 signaled = true; 416 m_signaled = true;
422 KSynchronizationObject::NotifyAvailable(); 417 KSynchronizationObject::NotifyAvailable();
423 418
424 // Clear previous thread in KScheduler. 419 // Clear previous thread in KScheduler.
425 KScheduler::ClearPreviousThread(kernel, this); 420 KScheduler::ClearPreviousThread(m_kernel, this);
426 421
427 // Register terminated dpc flag. 422 // Register terminated dpc flag.
428 RegisterDpc(DpcFlag::Terminated); 423 this->RegisterDpc(DpcFlag::Terminated);
429} 424}
430 425
431void KThread::FinishTermination() { 426void KThread::FinishTermination() {
432 // Ensure that the thread is not executing on any core. 427 // Ensure that the thread is not executing on any core.
433 if (parent != nullptr) { 428 if (m_parent != nullptr) {
434 for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) { 429 for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
435 KThread* core_thread{}; 430 KThread* core_thread{};
436 do { 431 do {
437 core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread(); 432 core_thread = m_kernel.Scheduler(i).GetSchedulerCurrentThread();
438 } while (core_thread == this); 433 } while (core_thread == this);
439 } 434 }
440 } 435 }
@@ -449,182 +444,183 @@ void KThread::DoWorkerTaskImpl() {
449} 444}
450 445
451void KThread::Pin(s32 current_core) { 446void KThread::Pin(s32 current_core) {
452 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 447 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
453 448
454 // Set ourselves as pinned. 449 // Set ourselves as pinned.
455 GetStackParameters().is_pinned = true; 450 GetStackParameters().is_pinned = true;
456 451
457 // Disable core migration. 452 // Disable core migration.
458 ASSERT(num_core_migration_disables == 0); 453 ASSERT(m_num_core_migration_disables == 0);
459 { 454 {
460 ++num_core_migration_disables; 455 ++m_num_core_migration_disables;
461 456
462 // Save our ideal state to restore when we're unpinned. 457 // Save our ideal state to restore when we're unpinned.
463 original_physical_ideal_core_id = physical_ideal_core_id; 458 m_original_physical_ideal_core_id = m_physical_ideal_core_id;
464 original_physical_affinity_mask = physical_affinity_mask; 459 m_original_physical_affinity_mask = m_physical_affinity_mask;
465 460
466 // Bind ourselves to this core. 461 // Bind ourselves to this core.
467 const s32 active_core = GetActiveCore(); 462 const s32 active_core = this->GetActiveCore();
468 463
469 SetActiveCore(current_core); 464 this->SetActiveCore(current_core);
470 physical_ideal_core_id = current_core; 465 m_physical_ideal_core_id = current_core;
471 physical_affinity_mask.SetAffinityMask(1ULL << current_core); 466 m_physical_affinity_mask.SetAffinityMask(1ULL << current_core);
472 467
473 if (active_core != current_core || physical_affinity_mask.GetAffinityMask() != 468 if (active_core != current_core ||
474 original_physical_affinity_mask.GetAffinityMask()) { 469 m_physical_affinity_mask.GetAffinityMask() !=
475 KScheduler::OnThreadAffinityMaskChanged(kernel, this, original_physical_affinity_mask, 470 m_original_physical_affinity_mask.GetAffinityMask()) {
476 active_core); 471 KScheduler::OnThreadAffinityMaskChanged(m_kernel, this,
472 m_original_physical_affinity_mask, active_core);
477 } 473 }
478 } 474 }
479 475
480 // Disallow performing thread suspension. 476 // Disallow performing thread suspension.
481 { 477 {
482 // Update our allow flags. 478 // Update our allow flags.
483 suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) + 479 m_suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) +
484 static_cast<u32>(ThreadState::SuspendShift))); 480 static_cast<u32>(ThreadState::SuspendShift)));
485 481
486 // Update our state. 482 // Update our state.
487 UpdateState(); 483 this->UpdateState();
488 } 484 }
489 485
490 // TODO(bunnei): Update our SVC access permissions. 486 // TODO(bunnei): Update our SVC access permissions.
491 ASSERT(parent != nullptr); 487 ASSERT(m_parent != nullptr);
492} 488}
493 489
494void KThread::Unpin() { 490void KThread::Unpin() {
495 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 491 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
496 492
497 // Set ourselves as unpinned. 493 // Set ourselves as unpinned.
498 GetStackParameters().is_pinned = false; 494 this->GetStackParameters().is_pinned = false;
499 495
500 // Enable core migration. 496 // Enable core migration.
501 ASSERT(num_core_migration_disables == 1); 497 ASSERT(m_num_core_migration_disables == 1);
502 { 498 {
503 num_core_migration_disables--; 499 m_num_core_migration_disables--;
504 500
505 // Restore our original state. 501 // Restore our original state.
506 const KAffinityMask old_mask = physical_affinity_mask; 502 const KAffinityMask old_mask = m_physical_affinity_mask;
507 503
508 physical_ideal_core_id = original_physical_ideal_core_id; 504 m_physical_ideal_core_id = m_original_physical_ideal_core_id;
509 physical_affinity_mask = original_physical_affinity_mask; 505 m_physical_affinity_mask = m_original_physical_affinity_mask;
510 506
511 if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { 507 if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
512 const s32 active_core = GetActiveCore(); 508 const s32 active_core = this->GetActiveCore();
513 509
514 if (!physical_affinity_mask.GetAffinity(active_core)) { 510 if (!m_physical_affinity_mask.GetAffinity(active_core)) {
515 if (physical_ideal_core_id >= 0) { 511 if (m_physical_ideal_core_id >= 0) {
516 SetActiveCore(physical_ideal_core_id); 512 this->SetActiveCore(m_physical_ideal_core_id);
517 } else { 513 } else {
518 SetActiveCore(static_cast<s32>( 514 this->SetActiveCore(static_cast<s32>(
519 Common::BitSize<u64>() - 1 - 515 Common::BitSize<u64>() - 1 -
520 std::countl_zero(physical_affinity_mask.GetAffinityMask()))); 516 std::countl_zero(m_physical_affinity_mask.GetAffinityMask())));
521 } 517 }
522 } 518 }
523 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core); 519 KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core);
524 } 520 }
525 } 521 }
526 522
527 // Allow performing thread suspension (if termination hasn't been requested). 523 // Allow performing thread suspension (if termination hasn't been requested).
528 if (!IsTerminationRequested()) { 524 if (!this->IsTerminationRequested()) {
529 // Update our allow flags. 525 // Update our allow flags.
530 suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) + 526 m_suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) +
531 static_cast<u32>(ThreadState::SuspendShift))); 527 static_cast<u32>(ThreadState::SuspendShift)));
532 528
533 // Update our state. 529 // Update our state.
534 UpdateState(); 530 this->UpdateState();
535 } 531 }
536 532
537 // TODO(bunnei): Update our SVC access permissions. 533 // TODO(bunnei): Update our SVC access permissions.
538 ASSERT(parent != nullptr); 534 ASSERT(m_parent != nullptr);
539 535
540 // Resume any threads that began waiting on us while we were pinned. 536 // Resume any threads that began waiting on us while we were pinned.
541 for (auto it = pinned_waiter_list.begin(); it != pinned_waiter_list.end(); ++it) { 537 for (auto it = m_pinned_waiter_list.begin(); it != m_pinned_waiter_list.end(); ++it) {
542 it->EndWait(ResultSuccess); 538 it->EndWait(ResultSuccess);
543 } 539 }
544} 540}
545 541
546u16 KThread::GetUserDisableCount() const { 542u16 KThread::GetUserDisableCount() const {
547 if (!IsUserThread()) { 543 if (!this->IsUserThread()) {
548 // We only emulate TLS for user threads 544 // We only emulate TLS for user threads
549 return {}; 545 return {};
550 } 546 }
551 547
552 auto& memory = kernel.System().Memory(); 548 auto& memory = m_kernel.System().Memory();
553 return memory.Read16(tls_address + offsetof(ThreadLocalRegion, disable_count)); 549 return memory.Read16(m_tls_address + offsetof(ThreadLocalRegion, disable_count));
554} 550}
555 551
556void KThread::SetInterruptFlag() { 552void KThread::SetInterruptFlag() {
557 if (!IsUserThread()) { 553 if (!this->IsUserThread()) {
558 // We only emulate TLS for user threads 554 // We only emulate TLS for user threads
559 return; 555 return;
560 } 556 }
561 557
562 auto& memory = kernel.System().Memory(); 558 auto& memory = m_kernel.System().Memory();
563 memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1); 559 memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1);
564} 560}
565 561
566void KThread::ClearInterruptFlag() { 562void KThread::ClearInterruptFlag() {
567 if (!IsUserThread()) { 563 if (!this->IsUserThread()) {
568 // We only emulate TLS for user threads 564 // We only emulate TLS for user threads
569 return; 565 return;
570 } 566 }
571 567
572 auto& memory = kernel.System().Memory(); 568 auto& memory = m_kernel.System().Memory();
573 memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0); 569 memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0);
574} 570}
575 571
576Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { 572Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
577 KScopedSchedulerLock sl{kernel}; 573 KScopedSchedulerLock sl{m_kernel};
578 574
579 // Get the virtual mask. 575 // Get the virtual mask.
580 *out_ideal_core = virtual_ideal_core_id; 576 *out_ideal_core = m_virtual_ideal_core_id;
581 *out_affinity_mask = virtual_affinity_mask; 577 *out_affinity_mask = m_virtual_affinity_mask;
582 578
583 R_SUCCEED(); 579 R_SUCCEED();
584} 580}
585 581
586Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { 582Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
587 KScopedSchedulerLock sl{kernel}; 583 KScopedSchedulerLock sl{m_kernel};
588 ASSERT(num_core_migration_disables >= 0); 584 ASSERT(m_num_core_migration_disables >= 0);
589 585
590 // Select between core mask and original core mask. 586 // Select between core mask and original core mask.
591 if (num_core_migration_disables == 0) { 587 if (m_num_core_migration_disables == 0) {
592 *out_ideal_core = physical_ideal_core_id; 588 *out_ideal_core = m_physical_ideal_core_id;
593 *out_affinity_mask = physical_affinity_mask.GetAffinityMask(); 589 *out_affinity_mask = m_physical_affinity_mask.GetAffinityMask();
594 } else { 590 } else {
595 *out_ideal_core = original_physical_ideal_core_id; 591 *out_ideal_core = m_original_physical_ideal_core_id;
596 *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask(); 592 *out_affinity_mask = m_original_physical_affinity_mask.GetAffinityMask();
597 } 593 }
598 594
599 R_SUCCEED(); 595 R_SUCCEED();
600} 596}
601 597
602Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { 598Result KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) {
603 ASSERT(parent != nullptr); 599 ASSERT(m_parent != nullptr);
604 ASSERT(v_affinity_mask != 0); 600 ASSERT(v_affinity_mask != 0);
605 KScopedLightLock lk(activity_pause_lock); 601 KScopedLightLock lk(m_activity_pause_lock);
606 602
607 // Set the core mask. 603 // Set the core mask.
608 u64 p_affinity_mask = 0; 604 u64 p_affinity_mask = 0;
609 { 605 {
610 KScopedSchedulerLock sl(kernel); 606 KScopedSchedulerLock sl(m_kernel);
611 ASSERT(num_core_migration_disables >= 0); 607 ASSERT(m_num_core_migration_disables >= 0);
612 608
613 // If we're updating, set our ideal virtual core. 609 // If we're updating, set our ideal virtual core.
614 if (core_id_ != Svc::IdealCoreNoUpdate) { 610 if (core_id != Svc::IdealCoreNoUpdate) {
615 virtual_ideal_core_id = core_id_; 611 m_virtual_ideal_core_id = core_id;
616 } else { 612 } else {
617 // Preserve our ideal core id. 613 // Preserve our ideal core id.
618 core_id_ = virtual_ideal_core_id; 614 core_id = m_virtual_ideal_core_id;
619 R_UNLESS(((1ULL << core_id_) & v_affinity_mask) != 0, ResultInvalidCombination); 615 R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, ResultInvalidCombination);
620 } 616 }
621 617
622 // Set our affinity mask. 618 // Set our affinity mask.
623 virtual_affinity_mask = v_affinity_mask; 619 m_virtual_affinity_mask = v_affinity_mask;
624 620
625 // Translate the virtual core to a physical core. 621 // Translate the virtual core to a physical core.
626 if (core_id_ >= 0) { 622 if (core_id >= 0) {
627 core_id_ = Core::Hardware::VirtualToPhysicalCoreMap[core_id_]; 623 core_id = Core::Hardware::VirtualToPhysicalCoreMap[core_id];
628 } 624 }
629 625
630 // Translate the virtual affinity mask to a physical one. 626 // Translate the virtual affinity mask to a physical one.
@@ -635,43 +631,43 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
635 } 631 }
636 632
637 // If we haven't disabled migration, perform an affinity change. 633 // If we haven't disabled migration, perform an affinity change.
638 if (num_core_migration_disables == 0) { 634 if (m_num_core_migration_disables == 0) {
639 const KAffinityMask old_mask = physical_affinity_mask; 635 const KAffinityMask old_mask = m_physical_affinity_mask;
640 636
641 // Set our new ideals. 637 // Set our new ideals.
642 physical_ideal_core_id = core_id_; 638 m_physical_ideal_core_id = core_id;
643 physical_affinity_mask.SetAffinityMask(p_affinity_mask); 639 m_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
644 640
645 if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { 641 if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
646 const s32 active_core = GetActiveCore(); 642 const s32 active_core = GetActiveCore();
647 643
648 if (active_core >= 0 && !physical_affinity_mask.GetAffinity(active_core)) { 644 if (active_core >= 0 && !m_physical_affinity_mask.GetAffinity(active_core)) {
649 const s32 new_core = static_cast<s32>( 645 const s32 new_core = static_cast<s32>(
650 physical_ideal_core_id >= 0 646 m_physical_ideal_core_id >= 0
651 ? physical_ideal_core_id 647 ? m_physical_ideal_core_id
652 : Common::BitSize<u64>() - 1 - 648 : Common::BitSize<u64>() - 1 -
653 std::countl_zero(physical_affinity_mask.GetAffinityMask())); 649 std::countl_zero(m_physical_affinity_mask.GetAffinityMask()));
654 SetActiveCore(new_core); 650 SetActiveCore(new_core);
655 } 651 }
656 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core); 652 KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core);
657 } 653 }
658 } else { 654 } else {
659 // Otherwise, we edit the original affinity for restoration later. 655 // Otherwise, we edit the original affinity for restoration later.
660 original_physical_ideal_core_id = core_id_; 656 m_original_physical_ideal_core_id = core_id;
661 original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); 657 m_original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
662 } 658 }
663 } 659 }
664 660
665 // Update the pinned waiter list. 661 // Update the pinned waiter list.
666 ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, std::addressof(pinned_waiter_list)); 662 ThreadQueueImplForKThreadSetProperty wait_queue(m_kernel, std::addressof(m_pinned_waiter_list));
667 { 663 {
668 bool retry_update{}; 664 bool retry_update{};
669 do { 665 do {
670 // Lock the scheduler. 666 // Lock the scheduler.
671 KScopedSchedulerLock sl(kernel); 667 KScopedSchedulerLock sl(m_kernel);
672 668
673 // Don't do any further management if our termination has been requested. 669 // Don't do any further management if our termination has been requested.
674 R_SUCCEED_IF(IsTerminationRequested()); 670 R_SUCCEED_IF(this->IsTerminationRequested());
675 671
676 // By default, we won't need to retry. 672 // By default, we won't need to retry.
677 retry_update = false; 673 retry_update = false;
@@ -681,7 +677,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
681 s32 thread_core; 677 s32 thread_core;
682 for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); 678 for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
683 ++thread_core) { 679 ++thread_core) {
684 if (kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) { 680 if (m_kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) {
685 thread_is_current = true; 681 thread_is_current = true;
686 break; 682 break;
687 } 683 }
@@ -691,14 +687,14 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
691 // new mask. 687 // new mask.
692 if (thread_is_current && ((1ULL << thread_core) & p_affinity_mask) == 0) { 688 if (thread_is_current && ((1ULL << thread_core) & p_affinity_mask) == 0) {
693 // If the thread is pinned, we want to wait until it's not pinned. 689 // If the thread is pinned, we want to wait until it's not pinned.
694 if (GetStackParameters().is_pinned) { 690 if (this->GetStackParameters().is_pinned) {
695 // Verify that the current thread isn't terminating. 691 // Verify that the current thread isn't terminating.
696 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), 692 R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(),
697 ResultTerminationRequested); 693 ResultTerminationRequested);
698 694
699 // Wait until the thread isn't pinned any more. 695 // Wait until the thread isn't pinned any more.
700 pinned_waiter_list.push_back(GetCurrentThread(kernel)); 696 m_pinned_waiter_list.push_back(GetCurrentThread(m_kernel));
701 GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_)); 697 GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue));
702 } else { 698 } else {
703 // If the thread isn't pinned, release the scheduler lock and retry until it's 699 // If the thread isn't pinned, release the scheduler lock and retry until it's
704 // not current. 700 // not current.
@@ -714,124 +710,124 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
714void KThread::SetBasePriority(s32 value) { 710void KThread::SetBasePriority(s32 value) {
715 ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority); 711 ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority);
716 712
717 KScopedSchedulerLock sl{kernel}; 713 KScopedSchedulerLock sl{m_kernel};
718 714
719 // Change our base priority. 715 // Change our base priority.
720 base_priority = value; 716 m_base_priority = value;
721 717
722 // Perform a priority restoration. 718 // Perform a priority restoration.
723 RestorePriority(kernel, this); 719 RestorePriority(m_kernel, this);
724} 720}
725 721
726KThread* KThread::GetLockOwner() const { 722KThread* KThread::GetLockOwner() const {
727 return waiting_lock_info != nullptr ? waiting_lock_info->GetOwner() : nullptr; 723 return m_waiting_lock_info != nullptr ? m_waiting_lock_info->GetOwner() : nullptr;
728} 724}
729 725
730void KThread::IncreaseBasePriority(s32 priority_) { 726void KThread::IncreaseBasePriority(s32 priority) {
731 ASSERT(Svc::HighestThreadPriority <= priority_ && priority_ <= Svc::LowestThreadPriority); 727 ASSERT(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority);
732 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 728 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
733 ASSERT(!this->GetStackParameters().is_pinned); 729 ASSERT(!this->GetStackParameters().is_pinned);
734 730
735 // Set our base priority. 731 // Set our base priority.
736 if (base_priority > priority_) { 732 if (m_base_priority > priority) {
737 base_priority = priority_; 733 m_base_priority = priority;
738 734
739 // Perform a priority restoration. 735 // Perform a priority restoration.
740 RestorePriority(kernel, this); 736 RestorePriority(m_kernel, this);
741 } 737 }
742} 738}
743 739
744void KThread::RequestSuspend(SuspendType type) { 740void KThread::RequestSuspend(SuspendType type) {
745 KScopedSchedulerLock sl{kernel}; 741 KScopedSchedulerLock sl{m_kernel};
746 742
747 // Note the request in our flags. 743 // Note the request in our flags.
748 suspend_request_flags |= 744 m_suspend_request_flags |=
749 (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type))); 745 (1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
750 746
751 // Try to perform the suspend. 747 // Try to perform the suspend.
752 TrySuspend(); 748 this->TrySuspend();
753} 749}
754 750
755void KThread::Resume(SuspendType type) { 751void KThread::Resume(SuspendType type) {
756 KScopedSchedulerLock sl{kernel}; 752 KScopedSchedulerLock sl{m_kernel};
757 753
758 // Clear the request in our flags. 754 // Clear the request in our flags.
759 suspend_request_flags &= 755 m_suspend_request_flags &=
760 ~(1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type))); 756 ~(1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
761 757
762 // Update our state. 758 // Update our state.
763 this->UpdateState(); 759 this->UpdateState();
764} 760}
765 761
766void KThread::WaitCancel() { 762void KThread::WaitCancel() {
767 KScopedSchedulerLock sl{kernel}; 763 KScopedSchedulerLock sl{m_kernel};
768 764
769 // Check if we're waiting and cancellable. 765 // Check if we're waiting and cancellable.
770 if (this->GetState() == ThreadState::Waiting && cancellable) { 766 if (this->GetState() == ThreadState::Waiting && m_cancellable) {
771 wait_cancelled = false; 767 m_wait_cancelled = false;
772 wait_queue->CancelWait(this, ResultCancelled, true); 768 m_wait_queue->CancelWait(this, ResultCancelled, true);
773 } else { 769 } else {
774 // Otherwise, note that we cancelled a wait. 770 // Otherwise, note that we cancelled a wait.
775 wait_cancelled = true; 771 m_wait_cancelled = true;
776 } 772 }
777} 773}
778 774
779void KThread::TrySuspend() { 775void KThread::TrySuspend() {
780 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 776 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
781 ASSERT(IsSuspendRequested()); 777 ASSERT(this->IsSuspendRequested());
782 778
783 // Ensure that we have no waiters. 779 // Ensure that we have no waiters.
784 if (GetNumKernelWaiters() > 0) { 780 if (this->GetNumKernelWaiters() > 0) {
785 return; 781 return;
786 } 782 }
787 ASSERT(GetNumKernelWaiters() == 0); 783 ASSERT(this->GetNumKernelWaiters() == 0);
788 784
789 // Perform the suspend. 785 // Perform the suspend.
790 this->UpdateState(); 786 this->UpdateState();
791} 787}
792 788
793void KThread::UpdateState() { 789void KThread::UpdateState() {
794 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 790 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
795 791
796 // Set our suspend flags in state. 792 // Set our suspend flags in state.
797 const ThreadState old_state = thread_state.load(std::memory_order_relaxed); 793 const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed);
798 const auto new_state = 794 const auto new_state =
799 static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask); 795 static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask);
800 thread_state.store(new_state, std::memory_order_relaxed); 796 m_thread_state.store(new_state, std::memory_order_relaxed);
801 797
802 // Note the state change in scheduler. 798 // Note the state change in scheduler.
803 if (new_state != old_state) { 799 if (new_state != old_state) {
804 KScheduler::OnThreadStateChanged(kernel, this, old_state); 800 KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
805 } 801 }
806} 802}
807 803
808void KThread::Continue() { 804void KThread::Continue() {
809 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 805 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
810 806
811 // Clear our suspend flags in state. 807 // Clear our suspend flags in state.
812 const ThreadState old_state = thread_state.load(std::memory_order_relaxed); 808 const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed);
813 thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed); 809 m_thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed);
814 810
815 // Note the state change in scheduler. 811 // Note the state change in scheduler.
816 KScheduler::OnThreadStateChanged(kernel, this, old_state); 812 KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
817} 813}
818 814
819void KThread::CloneFpuStatus() { 815void KThread::CloneFpuStatus() {
820 // We shouldn't reach here when starting kernel threads. 816 // We shouldn't reach here when starting kernel threads.
821 ASSERT(this->GetOwnerProcess() != nullptr); 817 ASSERT(this->GetOwnerProcess() != nullptr);
822 ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(kernel)); 818 ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel));
823 819
824 if (this->GetOwnerProcess()->Is64BitProcess()) { 820 if (this->GetOwnerProcess()->Is64BitProcess()) {
825 // Clone FPSR and FPCR. 821 // Clone FPSR and FPCR.
826 ThreadContext64 cur_ctx{}; 822 ThreadContext64 cur_ctx{};
827 kernel.System().CurrentArmInterface().SaveContext(cur_ctx); 823 m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
828 824
829 this->GetContext64().fpcr = cur_ctx.fpcr; 825 this->GetContext64().fpcr = cur_ctx.fpcr;
830 this->GetContext64().fpsr = cur_ctx.fpsr; 826 this->GetContext64().fpsr = cur_ctx.fpsr;
831 } else { 827 } else {
832 // Clone FPSCR. 828 // Clone FPSCR.
833 ThreadContext32 cur_ctx{}; 829 ThreadContext32 cur_ctx{};
834 kernel.System().CurrentArmInterface().SaveContext(cur_ctx); 830 m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
835 831
836 this->GetContext32().fpscr = cur_ctx.fpscr; 832 this->GetContext32().fpscr = cur_ctx.fpscr;
837 } 833 }
@@ -839,12 +835,12 @@ void KThread::CloneFpuStatus() {
839 835
840Result KThread::SetActivity(Svc::ThreadActivity activity) { 836Result KThread::SetActivity(Svc::ThreadActivity activity) {
841 // Lock ourselves. 837 // Lock ourselves.
842 KScopedLightLock lk(activity_pause_lock); 838 KScopedLightLock lk(m_activity_pause_lock);
843 839
844 // Set the activity. 840 // Set the activity.
845 { 841 {
846 // Lock the scheduler. 842 // Lock the scheduler.
847 KScopedSchedulerLock sl(kernel); 843 KScopedSchedulerLock sl(m_kernel);
848 844
849 // Verify our state. 845 // Verify our state.
850 const auto cur_state = this->GetState(); 846 const auto cur_state = this->GetState();
@@ -871,13 +867,13 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
871 867
872 // If the thread is now paused, update the pinned waiter list. 868 // If the thread is now paused, update the pinned waiter list.
873 if (activity == Svc::ThreadActivity::Paused) { 869 if (activity == Svc::ThreadActivity::Paused) {
874 ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, 870 ThreadQueueImplForKThreadSetProperty wait_queue(m_kernel,
875 std::addressof(pinned_waiter_list)); 871 std::addressof(m_pinned_waiter_list));
876 872
877 bool thread_is_current; 873 bool thread_is_current{};
878 do { 874 do {
879 // Lock the scheduler. 875 // Lock the scheduler.
880 KScopedSchedulerLock sl(kernel); 876 KScopedSchedulerLock sl(m_kernel);
881 877
882 // Don't do any further management if our termination has been requested. 878 // Don't do any further management if our termination has been requested.
883 R_SUCCEED_IF(this->IsTerminationRequested()); 879 R_SUCCEED_IF(this->IsTerminationRequested());
@@ -888,17 +884,17 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
888 // Check whether the thread is pinned. 884 // Check whether the thread is pinned.
889 if (this->GetStackParameters().is_pinned) { 885 if (this->GetStackParameters().is_pinned) {
890 // Verify that the current thread isn't terminating. 886 // Verify that the current thread isn't terminating.
891 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), 887 R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(),
892 ResultTerminationRequested); 888 ResultTerminationRequested);
893 889
894 // Wait until the thread isn't pinned any more. 890 // Wait until the thread isn't pinned any more.
895 pinned_waiter_list.push_back(GetCurrentThread(kernel)); 891 m_pinned_waiter_list.push_back(GetCurrentThread(m_kernel));
896 GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_)); 892 GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue));
897 } else { 893 } else {
898 // Check if the thread is currently running. 894 // Check if the thread is currently running.
899 // If it is, we'll need to retry. 895 // If it is, we'll need to retry.
900 for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) { 896 for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
901 if (kernel.Scheduler(i).GetSchedulerCurrentThread() == this) { 897 if (m_kernel.Scheduler(i).GetSchedulerCurrentThread() == this) {
902 thread_is_current = true; 898 thread_is_current = true;
903 break; 899 break;
904 } 900 }
@@ -912,32 +908,32 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
912 908
913Result KThread::GetThreadContext3(std::vector<u8>& out) { 909Result KThread::GetThreadContext3(std::vector<u8>& out) {
914 // Lock ourselves. 910 // Lock ourselves.
915 KScopedLightLock lk{activity_pause_lock}; 911 KScopedLightLock lk{m_activity_pause_lock};
916 912
917 // Get the context. 913 // Get the context.
918 { 914 {
919 // Lock the scheduler. 915 // Lock the scheduler.
920 KScopedSchedulerLock sl{kernel}; 916 KScopedSchedulerLock sl{m_kernel};
921 917
922 // Verify that we're suspended. 918 // Verify that we're suspended.
923 R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState); 919 R_UNLESS(this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
924 920
925 // If we're not terminating, get the thread's user context. 921 // If we're not terminating, get the thread's user context.
926 if (!IsTerminationRequested()) { 922 if (!this->IsTerminationRequested()) {
927 if (parent->Is64BitProcess()) { 923 if (m_parent->Is64BitProcess()) {
928 // Mask away mode bits, interrupt bits, IL bit, and other reserved bits. 924 // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
929 auto context = GetContext64(); 925 auto context = GetContext64();
930 context.pstate &= 0xFF0FFE20; 926 context.pstate &= 0xFF0FFE20;
931 927
932 out.resize(sizeof(context)); 928 out.resize(sizeof(context));
933 std::memcpy(out.data(), &context, sizeof(context)); 929 std::memcpy(out.data(), std::addressof(context), sizeof(context));
934 } else { 930 } else {
935 // Mask away mode bits, interrupt bits, IL bit, and other reserved bits. 931 // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
936 auto context = GetContext32(); 932 auto context = GetContext32();
937 context.cpsr &= 0xFF0FFE20; 933 context.cpsr &= 0xFF0FFE20;
938 934
939 out.resize(sizeof(context)); 935 out.resize(sizeof(context));
940 std::memcpy(out.data(), &context, sizeof(context)); 936 std::memcpy(out.data(), std::addressof(context), sizeof(context));
941 } 937 }
942 } 938 }
943 } 939 }
@@ -946,23 +942,23 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
946} 942}
947 943
948void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) { 944void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) {
949 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 945 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
950 946
951 // Set ourselves as the lock's owner. 947 // Set ourselves as the lock's owner.
952 lock_info->SetOwner(this); 948 lock_info->SetOwner(this);
953 949
954 // Add the lock to our held list. 950 // Add the lock to our held list.
955 held_lock_info_list.push_front(*lock_info); 951 m_held_lock_info_list.push_front(*lock_info);
956} 952}
957 953
958KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_, 954KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key,
959 bool is_kernel_address_key_) { 955 bool is_kernel_address_key) {
960 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 956 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
961 957
962 // Try to find an existing held lock. 958 // Try to find an existing held lock.
963 for (auto& held_lock : held_lock_info_list) { 959 for (auto& held_lock : m_held_lock_info_list) {
964 if (held_lock.GetAddressKey() == address_key_ && 960 if (held_lock.GetAddressKey() == address_key &&
965 held_lock.GetIsKernelAddressKey() == is_kernel_address_key_) { 961 held_lock.GetIsKernelAddressKey() == is_kernel_address_key) {
966 return std::addressof(held_lock); 962 return std::addressof(held_lock);
967 } 963 }
968 } 964 }
@@ -971,25 +967,25 @@ KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_ke
971} 967}
972 968
973void KThread::AddWaiterImpl(KThread* thread) { 969void KThread::AddWaiterImpl(KThread* thread) {
974 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 970 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
975 ASSERT(thread->GetConditionVariableTree() == nullptr); 971 ASSERT(thread->GetConditionVariableTree() == nullptr);
976 972
977 // Get the thread's address key. 973 // Get the thread's address key.
978 const auto address_key_ = thread->GetAddressKey(); 974 const auto address_key = thread->GetAddressKey();
979 const auto is_kernel_address_key_ = thread->GetIsKernelAddressKey(); 975 const auto is_kernel_address_key = thread->GetIsKernelAddressKey();
980 976
981 // Keep track of how many kernel waiters we have. 977 // Keep track of how many kernel waiters we have.
982 if (is_kernel_address_key_) { 978 if (is_kernel_address_key) {
983 ASSERT((num_kernel_waiters++) >= 0); 979 ASSERT((m_num_kernel_waiters++) >= 0);
984 KScheduler::SetSchedulerUpdateNeeded(kernel); 980 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
985 } 981 }
986 982
987 // Get the relevant lock info. 983 // Get the relevant lock info.
988 auto* lock_info = this->FindHeldLock(address_key_, is_kernel_address_key_); 984 auto* lock_info = this->FindHeldLock(address_key, is_kernel_address_key);
989 if (lock_info == nullptr) { 985 if (lock_info == nullptr) {
990 // Create a new lock for the address key. 986 // Create a new lock for the address key.
991 lock_info = 987 lock_info =
992 LockWithPriorityInheritanceInfo::Create(kernel, address_key_, is_kernel_address_key_); 988 LockWithPriorityInheritanceInfo::Create(m_kernel, address_key, is_kernel_address_key);
993 989
994 // Add the new lock to our list. 990 // Add the new lock to our list.
995 this->AddHeldLock(lock_info); 991 this->AddHeldLock(lock_info);
@@ -1000,12 +996,12 @@ void KThread::AddWaiterImpl(KThread* thread) {
1000} 996}
1001 997
1002void KThread::RemoveWaiterImpl(KThread* thread) { 998void KThread::RemoveWaiterImpl(KThread* thread) {
1003 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 999 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
1004 1000
1005 // Keep track of how many kernel waiters we have. 1001 // Keep track of how many kernel waiters we have.
1006 if (thread->GetIsKernelAddressKey()) { 1002 if (thread->GetIsKernelAddressKey()) {
1007 ASSERT((num_kernel_waiters--) > 0); 1003 ASSERT((m_num_kernel_waiters--) > 0);
1008 KScheduler::SetSchedulerUpdateNeeded(kernel); 1004 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
1009 } 1005 }
1010 1006
1011 // Get the info for the lock the thread is waiting on. 1007 // Get the info for the lock the thread is waiting on.
@@ -1014,8 +1010,8 @@ void KThread::RemoveWaiterImpl(KThread* thread) {
1014 1010
1015 // Remove the waiter. 1011 // Remove the waiter.
1016 if (lock_info->RemoveWaiter(thread)) { 1012 if (lock_info->RemoveWaiter(thread)) {
1017 held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info)); 1013 m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info));
1018 LockWithPriorityInheritanceInfo::Free(kernel, lock_info); 1014 LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
1019 } 1015 }
1020} 1016}
1021 1017
@@ -1025,7 +1021,7 @@ void KThread::RestorePriority(KernelCore& kernel, KThread* thread) {
1025 while (thread != nullptr) { 1021 while (thread != nullptr) {
1026 // We want to inherit priority where possible. 1022 // We want to inherit priority where possible.
1027 s32 new_priority = thread->GetBasePriority(); 1023 s32 new_priority = thread->GetBasePriority();
1028 for (const auto& held_lock : thread->held_lock_info_list) { 1024 for (const auto& held_lock : thread->m_held_lock_info_list) {
1029 new_priority = 1025 new_priority =
1030 std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority()); 1026 std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority());
1031 } 1027 }
@@ -1076,7 +1072,7 @@ void KThread::AddWaiter(KThread* thread) {
1076 1072
1077 // If the thread has a higher priority than us, we should inherit. 1073 // If the thread has a higher priority than us, we should inherit.
1078 if (thread->GetPriority() < this->GetPriority()) { 1074 if (thread->GetPriority() < this->GetPriority()) {
1079 RestorePriority(kernel, this); 1075 RestorePriority(m_kernel, this);
1080 } 1076 }
1081} 1077}
1082 1078
@@ -1087,12 +1083,12 @@ void KThread::RemoveWaiter(KThread* thread) {
1087 // lower priority. 1083 // lower priority.
1088 if (this->GetPriority() == thread->GetPriority() && 1084 if (this->GetPriority() == thread->GetPriority() &&
1089 this->GetPriority() < this->GetBasePriority()) { 1085 this->GetPriority() < this->GetBasePriority()) {
1090 RestorePriority(kernel, this); 1086 RestorePriority(m_kernel, this);
1091 } 1087 }
1092} 1088}
1093 1089
1094KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key_) { 1090KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key_) {
1095 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 1091 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
1096 1092
1097 // Get the relevant lock info. 1093 // Get the relevant lock info.
1098 auto* lock_info = this->FindHeldLock(key, is_kernel_address_key_); 1094 auto* lock_info = this->FindHeldLock(key, is_kernel_address_key_);
@@ -1102,13 +1098,13 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
1102 } 1098 }
1103 1099
1104 // Remove the lock info from our held list. 1100 // Remove the lock info from our held list.
1105 held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info)); 1101 m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info));
1106 1102
1107 // Keep track of how many kernel waiters we have. 1103 // Keep track of how many kernel waiters we have.
1108 if (lock_info->GetIsKernelAddressKey()) { 1104 if (lock_info->GetIsKernelAddressKey()) {
1109 num_kernel_waiters -= lock_info->GetWaiterCount(); 1105 m_num_kernel_waiters -= lock_info->GetWaiterCount();
1110 ASSERT(num_kernel_waiters >= 0); 1106 ASSERT(m_num_kernel_waiters >= 0);
1111 KScheduler::SetSchedulerUpdateNeeded(kernel); 1107 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
1112 } 1108 }
1113 1109
1114 ASSERT(lock_info->GetWaiterCount() > 0); 1110 ASSERT(lock_info->GetWaiterCount() > 0);
@@ -1120,7 +1116,7 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
1120 *out_has_waiters = false; 1116 *out_has_waiters = false;
1121 1117
1122 // Free the lock info, since it has no waiters. 1118 // Free the lock info, since it has no waiters.
1123 LockWithPriorityInheritanceInfo::Free(kernel, lock_info); 1119 LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
1124 } else { 1120 } else {
1125 // There are additional waiters on the lock. 1121 // There are additional waiters on the lock.
1126 *out_has_waiters = true; 1122 *out_has_waiters = true;
@@ -1130,8 +1126,8 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
1130 1126
1131 // Keep track of any kernel waiters for the new owner. 1127 // Keep track of any kernel waiters for the new owner.
1132 if (lock_info->GetIsKernelAddressKey()) { 1128 if (lock_info->GetIsKernelAddressKey()) {
1133 next_lock_owner->num_kernel_waiters += lock_info->GetWaiterCount(); 1129 next_lock_owner->m_num_kernel_waiters += lock_info->GetWaiterCount();
1134 ASSERT(next_lock_owner->num_kernel_waiters > 0); 1130 ASSERT(next_lock_owner->m_num_kernel_waiters > 0);
1135 1131
1136 // NOTE: No need to set scheduler update needed, because we will have already done so 1132 // NOTE: No need to set scheduler update needed, because we will have already done so
1137 // when removing earlier. 1133 // when removing earlier.
@@ -1142,7 +1138,7 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
1142 // to lower priority. 1138 // to lower priority.
1143 if (this->GetPriority() == next_lock_owner->GetPriority() && 1139 if (this->GetPriority() == next_lock_owner->GetPriority() &&
1144 this->GetPriority() < this->GetBasePriority()) { 1140 this->GetPriority() < this->GetBasePriority()) {
1145 RestorePriority(kernel, this); 1141 RestorePriority(m_kernel, this);
1146 // NOTE: No need to restore priority on the next lock owner, because it was already the 1142 // NOTE: No need to restore priority on the next lock owner, because it was already the
1147 // highest priority waiter on the lock. 1143 // highest priority waiter on the lock.
1148 } 1144 }
@@ -1153,76 +1149,76 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
1153 1149
1154Result KThread::Run() { 1150Result KThread::Run() {
1155 while (true) { 1151 while (true) {
1156 KScopedSchedulerLock lk{kernel}; 1152 KScopedSchedulerLock lk{m_kernel};
1157 1153
1158 // If either this thread or the current thread are requesting termination, note it. 1154 // If either this thread or the current thread are requesting termination, note it.
1159 R_UNLESS(!IsTerminationRequested(), ResultTerminationRequested); 1155 R_UNLESS(!this->IsTerminationRequested(), ResultTerminationRequested);
1160 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); 1156 R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested);
1161 1157
1162 // Ensure our thread state is correct. 1158 // Ensure our thread state is correct.
1163 R_UNLESS(GetState() == ThreadState::Initialized, ResultInvalidState); 1159 R_UNLESS(this->GetState() == ThreadState::Initialized, ResultInvalidState);
1164 1160
1165 // If the current thread has been asked to suspend, suspend it and retry. 1161 // If the current thread has been asked to suspend, suspend it and retry.
1166 if (GetCurrentThread(kernel).IsSuspended()) { 1162 if (GetCurrentThread(m_kernel).IsSuspended()) {
1167 GetCurrentThread(kernel).UpdateState(); 1163 GetCurrentThread(m_kernel).UpdateState();
1168 continue; 1164 continue;
1169 } 1165 }
1170 1166
1171 // If we're not a kernel thread and we've been asked to suspend, suspend ourselves. 1167 // If we're not a kernel thread and we've been asked to suspend, suspend ourselves.
1172 if (KProcess* owner = this->GetOwnerProcess(); owner != nullptr) { 1168 if (KProcess* owner = this->GetOwnerProcess(); owner != nullptr) {
1173 if (IsUserThread() && IsSuspended()) { 1169 if (this->IsUserThread() && this->IsSuspended()) {
1174 this->UpdateState(); 1170 this->UpdateState();
1175 } 1171 }
1176 owner->IncrementRunningThreadCount(); 1172 owner->IncrementRunningThreadCount();
1177 } 1173 }
1178 1174
1179 // Set our state and finish. 1175 // Set our state and finish.
1180 SetState(ThreadState::Runnable); 1176 this->SetState(ThreadState::Runnable);
1181 1177
1182 R_SUCCEED(); 1178 R_SUCCEED();
1183 } 1179 }
1184} 1180}
1185 1181
1186void KThread::Exit() { 1182void KThread::Exit() {
1187 ASSERT(this == GetCurrentThreadPointer(kernel)); 1183 ASSERT(this == GetCurrentThreadPointer(m_kernel));
1188 1184
1189 // Release the thread resource hint, running thread count from parent. 1185 // Release the thread resource hint, running thread count from parent.
1190 if (parent != nullptr) { 1186 if (m_parent != nullptr) {
1191 parent->GetResourceLimit()->Release(Kernel::LimitableResource::ThreadCountMax, 0, 1); 1187 m_parent->GetResourceLimit()->Release(Kernel::LimitableResource::ThreadCountMax, 0, 1);
1192 resource_limit_release_hint = true; 1188 m_resource_limit_release_hint = true;
1193 parent->DecrementRunningThreadCount(); 1189 m_parent->DecrementRunningThreadCount();
1194 } 1190 }
1195 1191
1196 // Perform termination. 1192 // Perform termination.
1197 { 1193 {
1198 KScopedSchedulerLock sl{kernel}; 1194 KScopedSchedulerLock sl{m_kernel};
1199 1195
1200 // Disallow all suspension. 1196 // Disallow all suspension.
1201 suspend_allowed_flags = 0; 1197 m_suspend_allowed_flags = 0;
1202 this->UpdateState(); 1198 this->UpdateState();
1203 1199
1204 // Disallow all suspension. 1200 // Disallow all suspension.
1205 suspend_allowed_flags = 0; 1201 m_suspend_allowed_flags = 0;
1206 1202
1207 // Start termination. 1203 // Start termination.
1208 StartTermination(); 1204 this->StartTermination();
1209 1205
1210 // Register the thread as a work task. 1206 // Register the thread as a work task.
1211 KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this); 1207 KWorkerTaskManager::AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit, this);
1212 } 1208 }
1213 1209
1214 UNREACHABLE_MSG("KThread::Exit() would return"); 1210 UNREACHABLE_MSG("KThread::Exit() would return");
1215} 1211}
1216 1212
1217Result KThread::Terminate() { 1213Result KThread::Terminate() {
1218 ASSERT(this != GetCurrentThreadPointer(kernel)); 1214 ASSERT(this != GetCurrentThreadPointer(m_kernel));
1219 1215
1220 // Request the thread terminate if it hasn't already. 1216 // Request the thread terminate if it hasn't already.
1221 if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) { 1217 if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) {
1222 // If the thread isn't terminated, wait for it to terminate. 1218 // If the thread isn't terminated, wait for it to terminate.
1223 s32 index; 1219 s32 index;
1224 KSynchronizationObject* objects[] = {this}; 1220 KSynchronizationObject* objects[] = {this};
1225 R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1, 1221 R_TRY(KSynchronizationObject::Wait(m_kernel, std::addressof(index), objects, 1,
1226 Svc::WaitInfinite)); 1222 Svc::WaitInfinite));
1227 } 1223 }
1228 1224
@@ -1230,22 +1226,22 @@ Result KThread::Terminate() {
1230} 1226}
1231 1227
1232ThreadState KThread::RequestTerminate() { 1228ThreadState KThread::RequestTerminate() {
1233 ASSERT(this != GetCurrentThreadPointer(kernel)); 1229 ASSERT(this != GetCurrentThreadPointer(m_kernel));
1234 1230
1235 KScopedSchedulerLock sl{kernel}; 1231 KScopedSchedulerLock sl{m_kernel};
1236 1232
1237 // Determine if this is the first termination request. 1233 // Determine if this is the first termination request.
1238 const bool first_request = [&]() -> bool { 1234 const bool first_request = [&]() -> bool {
1239 // Perform an atomic compare-and-swap from false to true. 1235 // Perform an atomic compare-and-swap from false to true.
1240 bool expected = false; 1236 bool expected = false;
1241 return termination_requested.compare_exchange_strong(expected, true); 1237 return m_termination_requested.compare_exchange_strong(expected, true);
1242 }(); 1238 }();
1243 1239
1244 // If this is the first request, start termination procedure. 1240 // If this is the first request, start termination procedure.
1245 if (first_request) { 1241 if (first_request) {
1246 // If the thread is in initialized state, just change state to terminated. 1242 // If the thread is in initialized state, just change state to terminated.
1247 if (this->GetState() == ThreadState::Initialized) { 1243 if (this->GetState() == ThreadState::Initialized) {
1248 thread_state = ThreadState::Terminated; 1244 m_thread_state = ThreadState::Terminated;
1249 return ThreadState::Terminated; 1245 return ThreadState::Terminated;
1250 } 1246 }
1251 1247
@@ -1259,7 +1255,7 @@ ThreadState KThread::RequestTerminate() {
1259 1255
1260 // If the thread is suspended, continue it. 1256 // If the thread is suspended, continue it.
1261 if (this->IsSuspended()) { 1257 if (this->IsSuspended()) {
1262 suspend_allowed_flags = 0; 1258 m_suspend_allowed_flags = 0;
1263 this->UpdateState(); 1259 this->UpdateState();
1264 } 1260 }
1265 1261
@@ -1268,16 +1264,16 @@ ThreadState KThread::RequestTerminate() {
1268 1264
1269 // If the thread is runnable, send a termination interrupt to other cores. 1265 // If the thread is runnable, send a termination interrupt to other cores.
1270 if (this->GetState() == ThreadState::Runnable) { 1266 if (this->GetState() == ThreadState::Runnable) {
1271 if (const u64 core_mask = 1267 if (const u64 core_mask = m_physical_affinity_mask.GetAffinityMask() &
1272 physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel)); 1268 ~(1ULL << GetCurrentCoreId(m_kernel));
1273 core_mask != 0) { 1269 core_mask != 0) {
1274 Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask); 1270 Kernel::KInterruptManager::SendInterProcessorInterrupt(m_kernel, core_mask);
1275 } 1271 }
1276 } 1272 }
1277 1273
1278 // Wake up the thread. 1274 // Wake up the thread.
1279 if (this->GetState() == ThreadState::Waiting) { 1275 if (this->GetState() == ThreadState::Waiting) {
1280 wait_queue->CancelWait(this, ResultTerminationRequested, true); 1276 m_wait_queue->CancelWait(this, ResultTerminationRequested, true);
1281 } 1277 }
1282 } 1278 }
1283 1279
@@ -1285,14 +1281,15 @@ ThreadState KThread::RequestTerminate() {
1285} 1281}
1286 1282
1287Result KThread::Sleep(s64 timeout) { 1283Result KThread::Sleep(s64 timeout) {
1288 ASSERT(!kernel.GlobalSchedulerContext().IsLocked()); 1284 ASSERT(!KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
1289 ASSERT(this == GetCurrentThreadPointer(kernel)); 1285 ASSERT(this == GetCurrentThreadPointer(m_kernel));
1290 ASSERT(timeout > 0); 1286 ASSERT(timeout > 0);
1291 1287
1292 ThreadQueueImplForKThreadSleep wait_queue_(kernel); 1288 ThreadQueueImplForKThreadSleep wait_queue(m_kernel);
1289 KHardwareTimer* timer{};
1293 { 1290 {
1294 // Setup the scheduling lock and sleep. 1291 // Setup the scheduling lock and sleep.
1295 KScopedSchedulerLockAndSleep slp(kernel, this, timeout); 1292 KScopedSchedulerLockAndSleep slp(m_kernel, std::addressof(timer), this, timeout);
1296 1293
1297 // Check if the thread should terminate. 1294 // Check if the thread should terminate.
1298 if (this->IsTerminationRequested()) { 1295 if (this->IsTerminationRequested()) {
@@ -1301,102 +1298,102 @@ Result KThread::Sleep(s64 timeout) {
1301 } 1298 }
1302 1299
1303 // Wait for the sleep to end. 1300 // Wait for the sleep to end.
1304 this->BeginWait(std::addressof(wait_queue_)); 1301 wait_queue.SetHardwareTimer(timer);
1305 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); 1302 this->BeginWait(std::addressof(wait_queue));
1303 this->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
1306 } 1304 }
1307 1305
1308 R_SUCCEED(); 1306 R_SUCCEED();
1309} 1307}
1310 1308
1311void KThread::RequestDummyThreadWait() { 1309void KThread::RequestDummyThreadWait() {
1312 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 1310 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
1313 ASSERT(this->IsDummyThread()); 1311 ASSERT(this->IsDummyThread());
1314 1312
1315 // We will block when the scheduler lock is released. 1313 // We will block when the scheduler lock is released.
1316 dummy_thread_runnable.store(false); 1314 m_dummy_thread_runnable.store(false);
1317} 1315}
1318 1316
1319void KThread::DummyThreadBeginWait() { 1317void KThread::DummyThreadBeginWait() {
1320 if (!this->IsDummyThread() || kernel.IsPhantomModeForSingleCore()) { 1318 if (!this->IsDummyThread() || m_kernel.IsPhantomModeForSingleCore()) {
1321 // Occurs in single core mode. 1319 // Occurs in single core mode.
1322 return; 1320 return;
1323 } 1321 }
1324 1322
1325 // Block until runnable is no longer false. 1323 // Block until runnable is no longer false.
1326 dummy_thread_runnable.wait(false); 1324 m_dummy_thread_runnable.wait(false);
1327} 1325}
1328 1326
1329void KThread::DummyThreadEndWait() { 1327void KThread::DummyThreadEndWait() {
1330 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 1328 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
1331 ASSERT(this->IsDummyThread()); 1329 ASSERT(this->IsDummyThread());
1332 1330
1333 // Wake up the waiting thread. 1331 // Wake up the waiting thread.
1334 dummy_thread_runnable.store(true); 1332 m_dummy_thread_runnable.store(true);
1335 dummy_thread_runnable.notify_one(); 1333 m_dummy_thread_runnable.notify_one();
1336} 1334}
1337 1335
1338void KThread::BeginWait(KThreadQueue* queue) { 1336void KThread::BeginWait(KThreadQueue* queue) {
1339 // Set our state as waiting. 1337 // Set our state as waiting.
1340 SetState(ThreadState::Waiting); 1338 this->SetState(ThreadState::Waiting);
1341 1339
1342 // Set our wait queue. 1340 // Set our wait queue.
1343 wait_queue = queue; 1341 m_wait_queue = queue;
1344} 1342}
1345 1343
1346void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_) { 1344void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result) {
1347 // Lock the scheduler. 1345 // Lock the scheduler.
1348 KScopedSchedulerLock sl(kernel); 1346 KScopedSchedulerLock sl(m_kernel);
1349 1347
1350 // If we're waiting, notify our queue that we're available. 1348 // If we're waiting, notify our queue that we're available.
1351 if (GetState() == ThreadState::Waiting) { 1349 if (this->GetState() == ThreadState::Waiting) {
1352 wait_queue->NotifyAvailable(this, signaled_object, wait_result_); 1350 m_wait_queue->NotifyAvailable(this, signaled_object, wait_result);
1353 } 1351 }
1354} 1352}
1355 1353
1356void KThread::EndWait(Result wait_result_) { 1354void KThread::EndWait(Result wait_result) {
1357 // Lock the scheduler. 1355 // Lock the scheduler.
1358 KScopedSchedulerLock sl(kernel); 1356 KScopedSchedulerLock sl(m_kernel);
1359 1357
1360 // If we're waiting, notify our queue that we're available. 1358 // If we're waiting, notify our queue that we're available.
1361 if (GetState() == ThreadState::Waiting) { 1359 if (this->GetState() == ThreadState::Waiting) {
1362 if (wait_queue == nullptr) { 1360 if (m_wait_queue == nullptr) {
1363 // This should never happen, but avoid a hard crash below to get this logged. 1361 // This should never happen, but avoid a hard crash below to get this logged.
1364 ASSERT_MSG(false, "wait_queue is nullptr!"); 1362 ASSERT_MSG(false, "wait_queue is nullptr!");
1365 return; 1363 return;
1366 } 1364 }
1367 1365
1368 wait_queue->EndWait(this, wait_result_); 1366 m_wait_queue->EndWait(this, wait_result);
1369 } 1367 }
1370} 1368}
1371 1369
1372void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) { 1370void KThread::CancelWait(Result wait_result, bool cancel_timer_task) {
1373 // Lock the scheduler. 1371 // Lock the scheduler.
1374 KScopedSchedulerLock sl(kernel); 1372 KScopedSchedulerLock sl(m_kernel);
1375 1373
1376 // If we're waiting, notify our queue that we're available. 1374 // If we're waiting, notify our queue that we're available.
1377 if (GetState() == ThreadState::Waiting) { 1375 if (this->GetState() == ThreadState::Waiting) {
1378 wait_queue->CancelWait(this, wait_result_, cancel_timer_task); 1376 m_wait_queue->CancelWait(this, wait_result, cancel_timer_task);
1379 } 1377 }
1380} 1378}
1381 1379
1382void KThread::SetState(ThreadState state) { 1380void KThread::SetState(ThreadState state) {
1383 KScopedSchedulerLock sl{kernel}; 1381 KScopedSchedulerLock sl{m_kernel};
1384 1382
1385 // Clear debugging state 1383 // Clear debugging state
1386 SetMutexWaitAddressForDebugging({}); 1384 this->SetWaitReasonForDebugging({});
1387 SetWaitReasonForDebugging({});
1388 1385
1389 const ThreadState old_state = thread_state.load(std::memory_order_relaxed); 1386 const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed);
1390 thread_state.store( 1387 m_thread_state.store(
1391 static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)), 1388 static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)),
1392 std::memory_order_relaxed); 1389 std::memory_order_relaxed);
1393 if (thread_state.load(std::memory_order_relaxed) != old_state) { 1390 if (m_thread_state.load(std::memory_order_relaxed) != old_state) {
1394 KScheduler::OnThreadStateChanged(kernel, this, old_state); 1391 KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
1395 } 1392 }
1396} 1393}
1397 1394
1398std::shared_ptr<Common::Fiber>& KThread::GetHostContext() { 1395std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
1399 return host_context; 1396 return m_host_context;
1400} 1397}
1401 1398
1402void SetCurrentThread(KernelCore& kernel, KThread* thread) { 1399void SetCurrentThread(KernelCore& kernel, KThread* thread) {
@@ -1425,20 +1422,20 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
1425 1422
1426KScopedDisableDispatch::~KScopedDisableDispatch() { 1423KScopedDisableDispatch::~KScopedDisableDispatch() {
1427 // If we are shutting down the kernel, none of this is relevant anymore. 1424 // If we are shutting down the kernel, none of this is relevant anymore.
1428 if (kernel.IsShuttingDown()) { 1425 if (m_kernel.IsShuttingDown()) {
1429 return; 1426 return;
1430 } 1427 }
1431 1428
1432 if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { 1429 if (GetCurrentThread(m_kernel).GetDisableDispatchCount() <= 1) {
1433 auto* scheduler = kernel.CurrentScheduler(); 1430 auto* scheduler = m_kernel.CurrentScheduler();
1434 1431
1435 if (scheduler && !kernel.IsPhantomModeForSingleCore()) { 1432 if (scheduler && !m_kernel.IsPhantomModeForSingleCore()) {
1436 scheduler->RescheduleCurrentCore(); 1433 scheduler->RescheduleCurrentCore();
1437 } else { 1434 } else {
1438 KScheduler::RescheduleCurrentHLEThread(kernel); 1435 KScheduler::RescheduleCurrentHLEThread(m_kernel);
1439 } 1436 }
1440 } else { 1437 } else {
1441 GetCurrentThread(kernel).EnableDispatch(); 1438 GetCurrentThread(m_kernel).EnableDispatch();
1442 } 1439 }
1443} 1440}
1444 1441
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index 9423f08ca..53fa64369 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -108,11 +108,11 @@ enum class StepState : u32 {
108}; 108};
109 109
110void SetCurrentThread(KernelCore& kernel, KThread* thread); 110void SetCurrentThread(KernelCore& kernel, KThread* thread);
111[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel); 111KThread* GetCurrentThreadPointer(KernelCore& kernel);
112[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel); 112KThread& GetCurrentThread(KernelCore& kernel);
113[[nodiscard]] KProcess* GetCurrentProcessPointer(KernelCore& kernel); 113KProcess* GetCurrentProcessPointer(KernelCore& kernel);
114[[nodiscard]] KProcess& GetCurrentProcess(KernelCore& kernel); 114KProcess& GetCurrentProcess(KernelCore& kernel);
115[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel); 115s32 GetCurrentCoreId(KernelCore& kernel);
116 116
117class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KWorkerTask>, 117class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KWorkerTask>,
118 public boost::intrusive::list_base_hook<>, 118 public boost::intrusive::list_base_hook<>,
@@ -128,7 +128,7 @@ public:
128 static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1; 128 static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
129 static constexpr s32 DummyThreadPriority = Svc::LowestThreadPriority + 2; 129 static constexpr s32 DummyThreadPriority = Svc::LowestThreadPriority + 2;
130 130
131 explicit KThread(KernelCore& kernel_); 131 explicit KThread(KernelCore& kernel);
132 ~KThread() override; 132 ~KThread() override;
133 133
134public: 134public:
@@ -136,16 +136,12 @@ public:
136 using ThreadContext64 = Core::ARM_Interface::ThreadContext64; 136 using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
137 using WaiterList = boost::intrusive::list<KThread>; 137 using WaiterList = boost::intrusive::list<KThread>;
138 138
139 void SetName(std::string new_name) {
140 name = std::move(new_name);
141 }
142
143 /** 139 /**
144 * Gets the thread's current priority 140 * Gets the thread's current priority
145 * @return The current thread's priority 141 * @return The current thread's priority
146 */ 142 */
147 [[nodiscard]] s32 GetPriority() const { 143 s32 GetPriority() const {
148 return priority; 144 return m_priority;
149 } 145 }
150 146
151 /** 147 /**
@@ -153,23 +149,23 @@ public:
153 * @param priority The new priority. 149 * @param priority The new priority.
154 */ 150 */
155 void SetPriority(s32 value) { 151 void SetPriority(s32 value) {
156 priority = value; 152 m_priority = value;
157 } 153 }
158 154
159 /** 155 /**
160 * Gets the thread's nominal priority. 156 * Gets the thread's nominal priority.
161 * @return The current thread's nominal priority. 157 * @return The current thread's nominal priority.
162 */ 158 */
163 [[nodiscard]] s32 GetBasePriority() const { 159 s32 GetBasePriority() const {
164 return base_priority; 160 return m_base_priority;
165 } 161 }
166 162
167 /** 163 /**
168 * Gets the thread's thread ID 164 * Gets the thread's thread ID
169 * @return The thread's ID 165 * @return The thread's ID
170 */ 166 */
171 [[nodiscard]] u64 GetThreadID() const { 167 u64 GetThreadId() const {
172 return thread_id; 168 return m_thread_id;
173 } 169 }
174 170
175 void ContinueIfHasKernelWaiters() { 171 void ContinueIfHasKernelWaiters() {
@@ -180,7 +176,7 @@ public:
180 176
181 void SetBasePriority(s32 value); 177 void SetBasePriority(s32 value);
182 178
183 [[nodiscard]] Result Run(); 179 Result Run();
184 180
185 void Exit(); 181 void Exit();
186 182
@@ -188,22 +184,22 @@ public:
188 184
189 ThreadState RequestTerminate(); 185 ThreadState RequestTerminate();
190 186
191 [[nodiscard]] u32 GetSuspendFlags() const { 187 u32 GetSuspendFlags() const {
192 return suspend_allowed_flags & suspend_request_flags; 188 return m_suspend_allowed_flags & m_suspend_request_flags;
193 } 189 }
194 190
195 [[nodiscard]] bool IsSuspended() const { 191 bool IsSuspended() const {
196 return GetSuspendFlags() != 0; 192 return GetSuspendFlags() != 0;
197 } 193 }
198 194
199 [[nodiscard]] bool IsSuspendRequested(SuspendType type) const { 195 bool IsSuspendRequested(SuspendType type) const {
200 return (suspend_request_flags & 196 return (m_suspend_request_flags &
201 (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) != 197 (1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) !=
202 0; 198 0;
203 } 199 }
204 200
205 [[nodiscard]] bool IsSuspendRequested() const { 201 bool IsSuspendRequested() const {
206 return suspend_request_flags != 0; 202 return m_suspend_request_flags != 0;
207 } 203 }
208 204
209 void RequestSuspend(SuspendType type); 205 void RequestSuspend(SuspendType type);
@@ -217,124 +213,124 @@ public:
217 void Continue(); 213 void Continue();
218 214
219 constexpr void SetSyncedIndex(s32 index) { 215 constexpr void SetSyncedIndex(s32 index) {
220 synced_index = index; 216 m_synced_index = index;
221 } 217 }
222 218
223 [[nodiscard]] constexpr s32 GetSyncedIndex() const { 219 constexpr s32 GetSyncedIndex() const {
224 return synced_index; 220 return m_synced_index;
225 } 221 }
226 222
227 constexpr void SetWaitResult(Result wait_res) { 223 constexpr void SetWaitResult(Result wait_res) {
228 wait_result = wait_res; 224 m_wait_result = wait_res;
229 } 225 }
230 226
231 [[nodiscard]] constexpr Result GetWaitResult() const { 227 constexpr Result GetWaitResult() const {
232 return wait_result; 228 return m_wait_result;
233 } 229 }
234 230
235 /* 231 /*
236 * Returns the Thread Local Storage address of the current thread 232 * Returns the Thread Local Storage address of the current thread
237 * @returns VAddr of the thread's TLS 233 * @returns VAddr of the thread's TLS
238 */ 234 */
239 [[nodiscard]] VAddr GetTLSAddress() const { 235 VAddr GetTlsAddress() const {
240 return tls_address; 236 return m_tls_address;
241 } 237 }
242 238
243 /* 239 /*
244 * Returns the value of the TPIDR_EL0 Read/Write system register for this thread. 240 * Returns the value of the TPIDR_EL0 Read/Write system register for this thread.
245 * @returns The value of the TPIDR_EL0 register. 241 * @returns The value of the TPIDR_EL0 register.
246 */ 242 */
247 [[nodiscard]] u64 GetTPIDR_EL0() const { 243 u64 GetTpidrEl0() const {
248 return thread_context_64.tpidr; 244 return m_thread_context_64.tpidr;
249 } 245 }
250 246
251 /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread. 247 /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
252 void SetTPIDR_EL0(u64 value) { 248 void SetTpidrEl0(u64 value) {
253 thread_context_64.tpidr = value; 249 m_thread_context_64.tpidr = value;
254 thread_context_32.tpidr = static_cast<u32>(value); 250 m_thread_context_32.tpidr = static_cast<u32>(value);
255 } 251 }
256 252
257 void CloneFpuStatus(); 253 void CloneFpuStatus();
258 254
259 [[nodiscard]] ThreadContext32& GetContext32() { 255 ThreadContext32& GetContext32() {
260 return thread_context_32; 256 return m_thread_context_32;
261 } 257 }
262 258
263 [[nodiscard]] const ThreadContext32& GetContext32() const { 259 const ThreadContext32& GetContext32() const {
264 return thread_context_32; 260 return m_thread_context_32;
265 } 261 }
266 262
267 [[nodiscard]] ThreadContext64& GetContext64() { 263 ThreadContext64& GetContext64() {
268 return thread_context_64; 264 return m_thread_context_64;
269 } 265 }
270 266
271 [[nodiscard]] const ThreadContext64& GetContext64() const { 267 const ThreadContext64& GetContext64() const {
272 return thread_context_64; 268 return m_thread_context_64;
273 } 269 }
274 270
275 [[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext(); 271 std::shared_ptr<Common::Fiber>& GetHostContext();
276 272
277 [[nodiscard]] ThreadState GetState() const { 273 ThreadState GetState() const {
278 return thread_state.load(std::memory_order_relaxed) & ThreadState::Mask; 274 return m_thread_state.load(std::memory_order_relaxed) & ThreadState::Mask;
279 } 275 }
280 276
281 [[nodiscard]] ThreadState GetRawState() const { 277 ThreadState GetRawState() const {
282 return thread_state.load(std::memory_order_relaxed); 278 return m_thread_state.load(std::memory_order_relaxed);
283 } 279 }
284 280
285 void SetState(ThreadState state); 281 void SetState(ThreadState state);
286 282
287 [[nodiscard]] StepState GetStepState() const { 283 StepState GetStepState() const {
288 return step_state; 284 return m_step_state;
289 } 285 }
290 286
291 void SetStepState(StepState state) { 287 void SetStepState(StepState state) {
292 step_state = state; 288 m_step_state = state;
293 } 289 }
294 290
295 [[nodiscard]] s64 GetLastScheduledTick() const { 291 s64 GetLastScheduledTick() const {
296 return last_scheduled_tick; 292 return m_last_scheduled_tick;
297 } 293 }
298 294
299 void SetLastScheduledTick(s64 tick) { 295 void SetLastScheduledTick(s64 tick) {
300 last_scheduled_tick = tick; 296 m_last_scheduled_tick = tick;
301 } 297 }
302 298
303 void AddCpuTime([[maybe_unused]] s32 core_id_, s64 amount) { 299 void AddCpuTime(s32 core_id, s64 amount) {
304 cpu_time += amount; 300 m_cpu_time += amount;
305 // TODO(bunnei): Debug kernels track per-core tick counts. Should we? 301 // TODO(bunnei): Debug kernels track per-core tick counts. Should we?
306 } 302 }
307 303
308 [[nodiscard]] s64 GetCpuTime() const { 304 s64 GetCpuTime() const {
309 return cpu_time; 305 return m_cpu_time;
310 } 306 }
311 307
312 [[nodiscard]] s32 GetActiveCore() const { 308 s32 GetActiveCore() const {
313 return core_id; 309 return m_core_id;
314 } 310 }
315 311
316 void SetActiveCore(s32 core) { 312 void SetActiveCore(s32 core) {
317 core_id = core; 313 m_core_id = core;
318 } 314 }
319 315
320 [[nodiscard]] s32 GetCurrentCore() const { 316 s32 GetCurrentCore() const {
321 return current_core_id; 317 return m_current_core_id;
322 } 318 }
323 319
324 void SetCurrentCore(s32 core) { 320 void SetCurrentCore(s32 core) {
325 current_core_id = core; 321 m_current_core_id = core;
326 } 322 }
327 323
328 [[nodiscard]] KProcess* GetOwnerProcess() { 324 KProcess* GetOwnerProcess() {
329 return parent; 325 return m_parent;
330 } 326 }
331 327
332 [[nodiscard]] const KProcess* GetOwnerProcess() const { 328 const KProcess* GetOwnerProcess() const {
333 return parent; 329 return m_parent;
334 } 330 }
335 331
336 [[nodiscard]] bool IsUserThread() const { 332 bool IsUserThread() const {
337 return parent != nullptr; 333 return m_parent != nullptr;
338 } 334 }
339 335
340 u16 GetUserDisableCount() const; 336 u16 GetUserDisableCount() const;
@@ -343,69 +339,69 @@ public:
343 339
344 KThread* GetLockOwner() const; 340 KThread* GetLockOwner() const;
345 341
346 [[nodiscard]] const KAffinityMask& GetAffinityMask() const { 342 const KAffinityMask& GetAffinityMask() const {
347 return physical_affinity_mask; 343 return m_physical_affinity_mask;
348 } 344 }
349 345
350 [[nodiscard]] Result GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask); 346 Result GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
351 347
352 [[nodiscard]] Result GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask); 348 Result GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
353 349
354 [[nodiscard]] Result SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask); 350 Result SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask);
355 351
356 [[nodiscard]] Result SetActivity(Svc::ThreadActivity activity); 352 Result SetActivity(Svc::ThreadActivity activity);
357 353
358 [[nodiscard]] Result Sleep(s64 timeout); 354 Result Sleep(s64 timeout);
359 355
360 [[nodiscard]] s64 GetYieldScheduleCount() const { 356 s64 GetYieldScheduleCount() const {
361 return schedule_count; 357 return m_schedule_count;
362 } 358 }
363 359
364 void SetYieldScheduleCount(s64 count) { 360 void SetYieldScheduleCount(s64 count) {
365 schedule_count = count; 361 m_schedule_count = count;
366 } 362 }
367 363
368 void WaitCancel(); 364 void WaitCancel();
369 365
370 [[nodiscard]] bool IsWaitCancelled() const { 366 bool IsWaitCancelled() const {
371 return wait_cancelled; 367 return m_wait_cancelled;
372 } 368 }
373 369
374 void ClearWaitCancelled() { 370 void ClearWaitCancelled() {
375 wait_cancelled = false; 371 m_wait_cancelled = false;
376 } 372 }
377 373
378 [[nodiscard]] bool IsCancellable() const { 374 bool IsCancellable() const {
379 return cancellable; 375 return m_cancellable;
380 } 376 }
381 377
382 void SetCancellable() { 378 void SetCancellable() {
383 cancellable = true; 379 m_cancellable = true;
384 } 380 }
385 381
386 void ClearCancellable() { 382 void ClearCancellable() {
387 cancellable = false; 383 m_cancellable = false;
388 } 384 }
389 385
390 [[nodiscard]] bool IsTerminationRequested() const { 386 bool IsTerminationRequested() const {
391 return termination_requested || GetRawState() == ThreadState::Terminated; 387 return m_termination_requested || GetRawState() == ThreadState::Terminated;
392 } 388 }
393 389
394 [[nodiscard]] u64 GetId() const override { 390 u64 GetId() const override {
395 return this->GetThreadID(); 391 return this->GetThreadId();
396 } 392 }
397 393
398 [[nodiscard]] bool IsInitialized() const override { 394 bool IsInitialized() const override {
399 return initialized; 395 return m_initialized;
400 } 396 }
401 397
402 [[nodiscard]] uintptr_t GetPostDestroyArgument() const override { 398 uintptr_t GetPostDestroyArgument() const override {
403 return reinterpret_cast<uintptr_t>(parent) | (resource_limit_release_hint ? 1 : 0); 399 return reinterpret_cast<uintptr_t>(m_parent) | (m_resource_limit_release_hint ? 1 : 0);
404 } 400 }
405 401
406 void Finalize() override; 402 void Finalize() override;
407 403
408 [[nodiscard]] bool IsSignaled() const override; 404 bool IsSignaled() const override;
409 405
410 void OnTimer(); 406 void OnTimer();
411 407
@@ -413,26 +409,22 @@ public:
413 409
414 static void PostDestroy(uintptr_t arg); 410 static void PostDestroy(uintptr_t arg);
415 411
416 [[nodiscard]] static Result InitializeDummyThread(KThread* thread, KProcess* owner); 412 static Result InitializeDummyThread(KThread* thread, KProcess* owner);
417 413
418 [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread, 414 static Result InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core);
419 s32 virt_core);
420 415
421 [[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread, 416 static Result InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core);
422 s32 virt_core);
423 417
424 [[nodiscard]] static Result InitializeHighPriorityThread(Core::System& system, KThread* thread, 418 static Result InitializeHighPriorityThread(Core::System& system, KThread* thread,
425 KThreadFunction func, uintptr_t arg, 419 KThreadFunction func, uintptr_t arg, s32 virt_core);
426 s32 virt_core);
427 420
428 [[nodiscard]] static Result InitializeUserThread(Core::System& system, KThread* thread, 421 static Result InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
429 KThreadFunction func, uintptr_t arg, 422 uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
430 VAddr user_stack_top, s32 prio, s32 virt_core, 423 KProcess* owner);
431 KProcess* owner);
432 424
433 [[nodiscard]] static Result InitializeServiceThread(Core::System& system, KThread* thread, 425 static Result InitializeServiceThread(Core::System& system, KThread* thread,
434 std::function<void()>&& thread_func, 426 std::function<void()>&& thread_func, s32 prio,
435 s32 prio, s32 virt_core, KProcess* owner); 427 s32 virt_core, KProcess* owner);
436 428
437public: 429public:
438 struct StackParameters { 430 struct StackParameters {
@@ -446,12 +438,12 @@ public:
446 KThread* cur_thread; 438 KThread* cur_thread;
447 }; 439 };
448 440
449 [[nodiscard]] StackParameters& GetStackParameters() { 441 StackParameters& GetStackParameters() {
450 return stack_parameters; 442 return m_stack_parameters;
451 } 443 }
452 444
453 [[nodiscard]] const StackParameters& GetStackParameters() const { 445 const StackParameters& GetStackParameters() const {
454 return stack_parameters; 446 return m_stack_parameters;
455 } 447 }
456 448
457 class QueueEntry { 449 class QueueEntry {
@@ -459,47 +451,47 @@ public:
459 constexpr QueueEntry() = default; 451 constexpr QueueEntry() = default;
460 452
461 constexpr void Initialize() { 453 constexpr void Initialize() {
462 prev = nullptr; 454 m_prev = nullptr;
463 next = nullptr; 455 m_next = nullptr;
464 } 456 }
465 457
466 constexpr KThread* GetPrev() const { 458 constexpr KThread* GetPrev() const {
467 return prev; 459 return m_prev;
468 } 460 }
469 constexpr KThread* GetNext() const { 461 constexpr KThread* GetNext() const {
470 return next; 462 return m_next;
471 } 463 }
472 constexpr void SetPrev(KThread* thread) { 464 constexpr void SetPrev(KThread* thread) {
473 prev = thread; 465 m_prev = thread;
474 } 466 }
475 constexpr void SetNext(KThread* thread) { 467 constexpr void SetNext(KThread* thread) {
476 next = thread; 468 m_next = thread;
477 } 469 }
478 470
479 private: 471 private:
480 KThread* prev{}; 472 KThread* m_prev{};
481 KThread* next{}; 473 KThread* m_next{};
482 }; 474 };
483 475
484 [[nodiscard]] QueueEntry& GetPriorityQueueEntry(s32 core) { 476 QueueEntry& GetPriorityQueueEntry(s32 core) {
485 return per_core_priority_queue_entry[core]; 477 return m_per_core_priority_queue_entry[core];
486 } 478 }
487 479
488 [[nodiscard]] const QueueEntry& GetPriorityQueueEntry(s32 core) const { 480 const QueueEntry& GetPriorityQueueEntry(s32 core) const {
489 return per_core_priority_queue_entry[core]; 481 return m_per_core_priority_queue_entry[core];
490 } 482 }
491 483
492 [[nodiscard]] s32 GetDisableDispatchCount() const { 484 s32 GetDisableDispatchCount() const {
493 return this->GetStackParameters().disable_count; 485 return this->GetStackParameters().disable_count;
494 } 486 }
495 487
496 void DisableDispatch() { 488 void DisableDispatch() {
497 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); 489 ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() >= 0);
498 this->GetStackParameters().disable_count++; 490 this->GetStackParameters().disable_count++;
499 } 491 }
500 492
501 void EnableDispatch() { 493 void EnableDispatch() {
502 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); 494 ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() > 0);
503 this->GetStackParameters().disable_count--; 495 this->GetStackParameters().disable_count--;
504 } 496 }
505 497
@@ -515,7 +507,7 @@ public:
515 this->GetStackParameters().is_in_exception_handler = false; 507 this->GetStackParameters().is_in_exception_handler = false;
516 } 508 }
517 509
518 [[nodiscard]] bool IsInExceptionHandler() const { 510 bool IsInExceptionHandler() const {
519 return this->GetStackParameters().is_in_exception_handler; 511 return this->GetStackParameters().is_in_exception_handler;
520 } 512 }
521 513
@@ -527,11 +519,11 @@ public:
527 this->GetStackParameters().is_calling_svc = false; 519 this->GetStackParameters().is_calling_svc = false;
528 } 520 }
529 521
530 [[nodiscard]] bool IsCallingSvc() const { 522 bool IsCallingSvc() const {
531 return this->GetStackParameters().is_calling_svc; 523 return this->GetStackParameters().is_calling_svc;
532 } 524 }
533 525
534 [[nodiscard]] u8 GetSvcId() const { 526 u8 GetSvcId() const {
535 return this->GetStackParameters().current_svc_id; 527 return this->GetStackParameters().current_svc_id;
536 } 528 }
537 529
@@ -543,78 +535,54 @@ public:
543 this->GetStackParameters().dpc_flags &= ~static_cast<u8>(flag); 535 this->GetStackParameters().dpc_flags &= ~static_cast<u8>(flag);
544 } 536 }
545 537
546 [[nodiscard]] u8 GetDpc() const { 538 u8 GetDpc() const {
547 return this->GetStackParameters().dpc_flags; 539 return this->GetStackParameters().dpc_flags;
548 } 540 }
549 541
550 [[nodiscard]] bool HasDpc() const { 542 bool HasDpc() const {
551 return this->GetDpc() != 0; 543 return this->GetDpc() != 0;
552 } 544 }
553 545
554 void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) { 546 void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) {
555 wait_reason_for_debugging = reason; 547 m_wait_reason_for_debugging = reason;
556 }
557
558 [[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const {
559 return wait_reason_for_debugging;
560 }
561
562 [[nodiscard]] ThreadType GetThreadType() const {
563 return thread_type;
564 }
565
566 [[nodiscard]] bool IsDummyThread() const {
567 return GetThreadType() == ThreadType::Dummy;
568 }
569
570 void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
571 wait_objects_for_debugging.clear();
572 wait_objects_for_debugging.reserve(objects.size());
573 for (const auto& object : objects) {
574 wait_objects_for_debugging.emplace_back(object);
575 }
576 } 548 }
577 549
578 [[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const { 550 ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const {
579 return wait_objects_for_debugging; 551 return m_wait_reason_for_debugging;
580 } 552 }
581 553
582 void SetMutexWaitAddressForDebugging(VAddr address) { 554 ThreadType GetThreadType() const {
583 mutex_wait_address_for_debugging = address; 555 return m_thread_type;
584 } 556 }
585 557
586 [[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const { 558 bool IsDummyThread() const {
587 return mutex_wait_address_for_debugging; 559 return this->GetThreadType() == ThreadType::Dummy;
588 }
589
590 [[nodiscard]] s32 GetIdealCoreForDebugging() const {
591 return virtual_ideal_core_id;
592 } 560 }
593 561
594 void AddWaiter(KThread* thread); 562 void AddWaiter(KThread* thread);
595 563
596 void RemoveWaiter(KThread* thread); 564 void RemoveWaiter(KThread* thread);
597 565
598 [[nodiscard]] Result GetThreadContext3(std::vector<u8>& out); 566 Result GetThreadContext3(std::vector<u8>& out);
599 567
600 [[nodiscard]] KThread* RemoveUserWaiterByKey(bool* out_has_waiters, VAddr key) { 568 KThread* RemoveUserWaiterByKey(bool* out_has_waiters, VAddr key) {
601 return this->RemoveWaiterByKey(out_has_waiters, key, false); 569 return this->RemoveWaiterByKey(out_has_waiters, key, false);
602 } 570 }
603 571
604 [[nodiscard]] KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, VAddr key) { 572 KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, VAddr key) {
605 return this->RemoveWaiterByKey(out_has_waiters, key, true); 573 return this->RemoveWaiterByKey(out_has_waiters, key, true);
606 } 574 }
607 575
608 [[nodiscard]] VAddr GetAddressKey() const { 576 VAddr GetAddressKey() const {
609 return address_key; 577 return m_address_key;
610 } 578 }
611 579
612 [[nodiscard]] u32 GetAddressKeyValue() const { 580 u32 GetAddressKeyValue() const {
613 return address_key_value; 581 return m_address_key_value;
614 } 582 }
615 583
616 [[nodiscard]] bool GetIsKernelAddressKey() const { 584 bool GetIsKernelAddressKey() const {
617 return is_kernel_address_key; 585 return m_is_kernel_address_key;
618 } 586 }
619 587
620 //! NB: intentional deviation from official kernel. 588 //! NB: intentional deviation from official kernel.
@@ -624,37 +592,37 @@ public:
624 // into things. 592 // into things.
625 593
626 void SetUserAddressKey(VAddr key, u32 val) { 594 void SetUserAddressKey(VAddr key, u32 val) {
627 ASSERT(waiting_lock_info == nullptr); 595 ASSERT(m_waiting_lock_info == nullptr);
628 address_key = key; 596 m_address_key = key;
629 address_key_value = val; 597 m_address_key_value = val;
630 is_kernel_address_key = false; 598 m_is_kernel_address_key = false;
631 } 599 }
632 600
633 void SetKernelAddressKey(VAddr key) { 601 void SetKernelAddressKey(VAddr key) {
634 ASSERT(waiting_lock_info == nullptr); 602 ASSERT(m_waiting_lock_info == nullptr);
635 address_key = key; 603 m_address_key = key;
636 is_kernel_address_key = true; 604 m_is_kernel_address_key = true;
637 } 605 }
638 606
639 void ClearWaitQueue() { 607 void ClearWaitQueue() {
640 wait_queue = nullptr; 608 m_wait_queue = nullptr;
641 } 609 }
642 610
643 void BeginWait(KThreadQueue* queue); 611 void BeginWait(KThreadQueue* queue);
644 void NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_); 612 void NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result);
645 void EndWait(Result wait_result_); 613 void EndWait(Result wait_result);
646 void CancelWait(Result wait_result_, bool cancel_timer_task); 614 void CancelWait(Result wait_result, bool cancel_timer_task);
647 615
648 [[nodiscard]] s32 GetNumKernelWaiters() const { 616 s32 GetNumKernelWaiters() const {
649 return num_kernel_waiters; 617 return m_num_kernel_waiters;
650 } 618 }
651 619
652 [[nodiscard]] u64 GetConditionVariableKey() const { 620 u64 GetConditionVariableKey() const {
653 return condvar_key; 621 return m_condvar_key;
654 } 622 }
655 623
656 [[nodiscard]] u64 GetAddressArbiterKey() const { 624 u64 GetAddressArbiterKey() const {
657 return condvar_key; 625 return m_condvar_key;
658 } 626 }
659 627
660 // Dummy threads (used for HLE host threads) cannot wait based on the guest scheduler, and 628 // Dummy threads (used for HLE host threads) cannot wait based on the guest scheduler, and
@@ -665,17 +633,16 @@ public:
665 void DummyThreadBeginWait(); 633 void DummyThreadBeginWait();
666 void DummyThreadEndWait(); 634 void DummyThreadEndWait();
667 635
668 [[nodiscard]] uintptr_t GetArgument() const { 636 uintptr_t GetArgument() const {
669 return argument; 637 return m_argument;
670 } 638 }
671 639
672 [[nodiscard]] VAddr GetUserStackTop() const { 640 VAddr GetUserStackTop() const {
673 return stack_top; 641 return m_stack_top;
674 } 642 }
675 643
676private: 644private:
677 [[nodiscard]] KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key, 645 KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key);
678 bool is_kernel_address_key);
679 646
680 static constexpr size_t PriorityInheritanceCountMax = 10; 647 static constexpr size_t PriorityInheritanceCountMax = 10;
681 union SyncObjectBuffer { 648 union SyncObjectBuffer {
@@ -692,11 +659,11 @@ private:
692 u64 cv_key{}; 659 u64 cv_key{};
693 s32 priority{}; 660 s32 priority{};
694 661
695 [[nodiscard]] constexpr u64 GetConditionVariableKey() const { 662 constexpr u64 GetConditionVariableKey() const {
696 return cv_key; 663 return cv_key;
697 } 664 }
698 665
699 [[nodiscard]] constexpr s32 GetPriority() const { 666 constexpr s32 GetPriority() const {
700 return priority; 667 return priority;
701 } 668 }
702 }; 669 };
@@ -728,22 +695,21 @@ private:
728 695
729 void IncreaseBasePriority(s32 priority); 696 void IncreaseBasePriority(s32 priority);
730 697
731 [[nodiscard]] Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, 698 Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
732 s32 prio, s32 virt_core, KProcess* owner, ThreadType type); 699 s32 virt_core, KProcess* owner, ThreadType type);
733 700
734 [[nodiscard]] static Result InitializeThread(KThread* thread, KThreadFunction func, 701 static Result InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
735 uintptr_t arg, VAddr user_stack_top, s32 prio, 702 VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
736 s32 core, KProcess* owner, ThreadType type, 703 ThreadType type, std::function<void()>&& init_func);
737 std::function<void()>&& init_func);
738 704
739 // For core KThread implementation 705 // For core KThread implementation
740 ThreadContext32 thread_context_32{}; 706 ThreadContext32 m_thread_context_32{};
741 ThreadContext64 thread_context_64{}; 707 ThreadContext64 m_thread_context_64{};
742 Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{}; 708 Common::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{};
743 s32 priority{}; 709 s32 m_priority{};
744 using ConditionVariableThreadTreeTraits = 710 using ConditionVariableThreadTreeTraits =
745 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert< 711 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<
746 &KThread::condvar_arbiter_tree_node>; 712 &KThread::m_condvar_arbiter_tree_node>;
747 using ConditionVariableThreadTree = 713 using ConditionVariableThreadTree =
748 ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>; 714 ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
749 715
@@ -773,7 +739,7 @@ private:
773 739
774 using LockWithPriorityInheritanceThreadTreeTraits = 740 using LockWithPriorityInheritanceThreadTreeTraits =
775 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert< 741 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<
776 &KThread::condvar_arbiter_tree_node>; 742 &KThread::m_condvar_arbiter_tree_node>;
777 using LockWithPriorityInheritanceThreadTree = 743 using LockWithPriorityInheritanceThreadTree =
778 ConditionVariableThreadTreeTraits::TreeType<LockWithPriorityInheritanceComparator>; 744 ConditionVariableThreadTreeTraits::TreeType<LockWithPriorityInheritanceComparator>;
779 745
@@ -809,7 +775,7 @@ public:
809 waiter->SetWaitingLockInfo(this); 775 waiter->SetWaitingLockInfo(this);
810 } 776 }
811 777
812 [[nodiscard]] bool RemoveWaiter(KThread* waiter) { 778 bool RemoveWaiter(KThread* waiter) {
813 m_tree.erase(m_tree.iterator_to(*waiter)); 779 m_tree.erase(m_tree.iterator_to(*waiter));
814 780
815 waiter->SetWaitingLockInfo(nullptr); 781 waiter->SetWaitingLockInfo(nullptr);
@@ -853,11 +819,11 @@ public:
853 }; 819 };
854 820
855 void SetWaitingLockInfo(LockWithPriorityInheritanceInfo* lock) { 821 void SetWaitingLockInfo(LockWithPriorityInheritanceInfo* lock) {
856 waiting_lock_info = lock; 822 m_waiting_lock_info = lock;
857 } 823 }
858 824
859 LockWithPriorityInheritanceInfo* GetWaitingLockInfo() { 825 LockWithPriorityInheritanceInfo* GetWaitingLockInfo() {
860 return waiting_lock_info; 826 return m_waiting_lock_info;
861 } 827 }
862 828
863 void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info); 829 void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info);
@@ -867,111 +833,110 @@ private:
867 using LockWithPriorityInheritanceInfoList = 833 using LockWithPriorityInheritanceInfoList =
868 boost::intrusive::list<LockWithPriorityInheritanceInfo>; 834 boost::intrusive::list<LockWithPriorityInheritanceInfo>;
869 835
870 ConditionVariableThreadTree* condvar_tree{}; 836 ConditionVariableThreadTree* m_condvar_tree{};
871 u64 condvar_key{}; 837 u64 m_condvar_key{};
872 u64 virtual_affinity_mask{}; 838 u64 m_virtual_affinity_mask{};
873 KAffinityMask physical_affinity_mask{}; 839 KAffinityMask m_physical_affinity_mask{};
874 u64 thread_id{}; 840 u64 m_thread_id{};
875 std::atomic<s64> cpu_time{}; 841 std::atomic<s64> m_cpu_time{};
876 VAddr address_key{}; 842 VAddr m_address_key{};
877 KProcess* parent{}; 843 KProcess* m_parent{};
878 VAddr kernel_stack_top{}; 844 VAddr m_kernel_stack_top{};
879 u32* light_ipc_data{}; 845 u32* m_light_ipc_data{};
880 VAddr tls_address{}; 846 VAddr m_tls_address{};
881 KLightLock activity_pause_lock; 847 KLightLock m_activity_pause_lock;
882 s64 schedule_count{}; 848 s64 m_schedule_count{};
883 s64 last_scheduled_tick{}; 849 s64 m_last_scheduled_tick{};
884 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{}; 850 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> m_per_core_priority_queue_entry{};
885 KThreadQueue* wait_queue{}; 851 KThreadQueue* m_wait_queue{};
886 LockWithPriorityInheritanceInfoList held_lock_info_list{}; 852 LockWithPriorityInheritanceInfoList m_held_lock_info_list{};
887 LockWithPriorityInheritanceInfo* waiting_lock_info{}; 853 LockWithPriorityInheritanceInfo* m_waiting_lock_info{};
888 WaiterList pinned_waiter_list{}; 854 WaiterList m_pinned_waiter_list{};
889 u32 address_key_value{}; 855 u32 m_address_key_value{};
890 u32 suspend_request_flags{}; 856 u32 m_suspend_request_flags{};
891 u32 suspend_allowed_flags{}; 857 u32 m_suspend_allowed_flags{};
892 s32 synced_index{}; 858 s32 m_synced_index{};
893 Result wait_result{ResultSuccess}; 859 Result m_wait_result{ResultSuccess};
894 s32 base_priority{}; 860 s32 m_base_priority{};
895 s32 physical_ideal_core_id{}; 861 s32 m_physical_ideal_core_id{};
896 s32 virtual_ideal_core_id{}; 862 s32 m_virtual_ideal_core_id{};
897 s32 num_kernel_waiters{}; 863 s32 m_num_kernel_waiters{};
898 s32 current_core_id{}; 864 s32 m_current_core_id{};
899 s32 core_id{}; 865 s32 m_core_id{};
900 KAffinityMask original_physical_affinity_mask{}; 866 KAffinityMask m_original_physical_affinity_mask{};
901 s32 original_physical_ideal_core_id{}; 867 s32 m_original_physical_ideal_core_id{};
902 s32 num_core_migration_disables{}; 868 s32 m_num_core_migration_disables{};
903 std::atomic<ThreadState> thread_state{}; 869 std::atomic<ThreadState> m_thread_state{};
904 std::atomic<bool> termination_requested{}; 870 std::atomic<bool> m_termination_requested{};
905 bool wait_cancelled{}; 871 bool m_wait_cancelled{};
906 bool cancellable{}; 872 bool m_cancellable{};
907 bool signaled{}; 873 bool m_signaled{};
908 bool initialized{}; 874 bool m_initialized{};
909 bool debug_attached{}; 875 bool m_debug_attached{};
910 s8 priority_inheritance_count{}; 876 s8 m_priority_inheritance_count{};
911 bool resource_limit_release_hint{}; 877 bool m_resource_limit_release_hint{};
912 bool is_kernel_address_key{}; 878 bool m_is_kernel_address_key{};
913 StackParameters stack_parameters{}; 879 StackParameters m_stack_parameters{};
914 Common::SpinLock context_guard{}; 880 Common::SpinLock m_context_guard{};
915 881
916 // For emulation 882 // For emulation
917 std::shared_ptr<Common::Fiber> host_context{}; 883 std::shared_ptr<Common::Fiber> m_host_context{};
918 bool is_single_core{}; 884 ThreadType m_thread_type{};
919 ThreadType thread_type{}; 885 StepState m_step_state{};
920 StepState step_state{}; 886 std::atomic<bool> m_dummy_thread_runnable{true};
921 std::atomic<bool> dummy_thread_runnable{true};
922 887
923 // For debugging 888 // For debugging
924 std::vector<KSynchronizationObject*> wait_objects_for_debugging; 889 std::vector<KSynchronizationObject*> m_wait_objects_for_debugging{};
925 VAddr mutex_wait_address_for_debugging{}; 890 VAddr m_mutex_wait_address_for_debugging{};
926 ThreadWaitReasonForDebugging wait_reason_for_debugging{}; 891 ThreadWaitReasonForDebugging m_wait_reason_for_debugging{};
927 uintptr_t argument{}; 892 uintptr_t m_argument{};
928 VAddr stack_top{}; 893 VAddr m_stack_top{};
929 894
930public: 895public:
931 using ConditionVariableThreadTreeType = ConditionVariableThreadTree; 896 using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
932 897
933 void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key, 898 void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key,
934 u32 value) { 899 u32 value) {
935 ASSERT(waiting_lock_info == nullptr); 900 ASSERT(m_waiting_lock_info == nullptr);
936 condvar_tree = tree; 901 m_condvar_tree = tree;
937 condvar_key = cv_key; 902 m_condvar_key = cv_key;
938 address_key = address; 903 m_address_key = address;
939 address_key_value = value; 904 m_address_key_value = value;
940 is_kernel_address_key = false; 905 m_is_kernel_address_key = false;
941 } 906 }
942 907
943 void ClearConditionVariable() { 908 void ClearConditionVariable() {
944 condvar_tree = nullptr; 909 m_condvar_tree = nullptr;
945 } 910 }
946 911
947 [[nodiscard]] bool IsWaitingForConditionVariable() const { 912 bool IsWaitingForConditionVariable() const {
948 return condvar_tree != nullptr; 913 return m_condvar_tree != nullptr;
949 } 914 }
950 915
951 void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) { 916 void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) {
952 ASSERT(waiting_lock_info == nullptr); 917 ASSERT(m_waiting_lock_info == nullptr);
953 condvar_tree = tree; 918 m_condvar_tree = tree;
954 condvar_key = address; 919 m_condvar_key = address;
955 } 920 }
956 921
957 void ClearAddressArbiter() { 922 void ClearAddressArbiter() {
958 condvar_tree = nullptr; 923 m_condvar_tree = nullptr;
959 } 924 }
960 925
961 [[nodiscard]] bool IsWaitingForAddressArbiter() const { 926 bool IsWaitingForAddressArbiter() const {
962 return condvar_tree != nullptr; 927 return m_condvar_tree != nullptr;
963 } 928 }
964 929
965 [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const { 930 ConditionVariableThreadTree* GetConditionVariableTree() const {
966 return condvar_tree; 931 return m_condvar_tree;
967 } 932 }
968}; 933};
969 934
970class KScopedDisableDispatch { 935class KScopedDisableDispatch {
971public: 936public:
972 [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { 937 explicit KScopedDisableDispatch(KernelCore& kernel) : m_kernel{kernel} {
973 // If we are shutting down the kernel, none of this is relevant anymore. 938 // If we are shutting down the kernel, none of this is relevant anymore.
974 if (kernel.IsShuttingDown()) { 939 if (m_kernel.IsShuttingDown()) {
975 return; 940 return;
976 } 941 }
977 GetCurrentThread(kernel).DisableDispatch(); 942 GetCurrentThread(kernel).DisableDispatch();
@@ -980,7 +945,7 @@ public:
980 ~KScopedDisableDispatch(); 945 ~KScopedDisableDispatch();
981 946
982private: 947private:
983 KernelCore& kernel; 948 KernelCore& m_kernel;
984}; 949};
985 950
986inline void KTimerTask::OnTimer() { 951inline void KTimerTask::OnTimer() {
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp
index 563560114..c2af6898a 100644
--- a/src/core/hle/kernel/k_thread_local_page.cpp
+++ b/src/core/hle/kernel/k_thread_local_page.cpp
@@ -16,7 +16,7 @@ namespace Kernel {
16Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) { 16Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
17 // Set that this process owns us. 17 // Set that this process owns us.
18 m_owner = process; 18 m_owner = process;
19 m_kernel = &kernel; 19 m_kernel = std::addressof(kernel);
20 20
21 // Allocate a new page. 21 // Allocate a new page.
22 KPageBuffer* page_buf = KPageBuffer::Allocate(kernel); 22 KPageBuffer* page_buf = KPageBuffer::Allocate(kernel);
diff --git a/src/core/hle/kernel/k_thread_queue.cpp b/src/core/hle/kernel/k_thread_queue.cpp
index 5f1dc97eb..61488f4ce 100644
--- a/src/core/hle/kernel/k_thread_queue.cpp
+++ b/src/core/hle/kernel/k_thread_queue.cpp
@@ -7,9 +7,10 @@
7 7
8namespace Kernel { 8namespace Kernel {
9 9
10void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread, 10void KThreadQueue::NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
11 [[maybe_unused]] KSynchronizationObject* signaled_object, 11 Result wait_result) {
12 [[maybe_unused]] Result wait_result) {} 12 UNREACHABLE();
13}
13 14
14void KThreadQueue::EndWait(KThread* waiting_thread, Result wait_result) { 15void KThreadQueue::EndWait(KThread* waiting_thread, Result wait_result) {
15 // Set the thread's wait result. 16 // Set the thread's wait result.
@@ -22,7 +23,9 @@ void KThreadQueue::EndWait(KThread* waiting_thread, Result wait_result) {
22 waiting_thread->ClearWaitQueue(); 23 waiting_thread->ClearWaitQueue();
23 24
24 // Cancel the thread task. 25 // Cancel the thread task.
25 kernel.HardwareTimer().CancelTask(waiting_thread); 26 if (m_hardware_timer != nullptr) {
27 m_hardware_timer->CancelTask(waiting_thread);
28 }
26} 29}
27 30
28void KThreadQueue::CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) { 31void KThreadQueue::CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) {
@@ -36,12 +39,13 @@ void KThreadQueue::CancelWait(KThread* waiting_thread, Result wait_result, bool
36 waiting_thread->ClearWaitQueue(); 39 waiting_thread->ClearWaitQueue();
37 40
38 // Cancel the thread task. 41 // Cancel the thread task.
39 if (cancel_timer_task) { 42 if (cancel_timer_task && m_hardware_timer != nullptr) {
40 kernel.HardwareTimer().CancelTask(waiting_thread); 43 m_hardware_timer->CancelTask(waiting_thread);
41 } 44 }
42} 45}
43 46
44void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread, 47void KThreadQueueWithoutEndWait::EndWait(KThread* waiting_thread, Result wait_result) {
45 [[maybe_unused]] Result wait_result) {} 48 UNREACHABLE();
49}
46 50
47} // namespace Kernel 51} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h
index 8d76ece81..117af0919 100644
--- a/src/core/hle/kernel/k_thread_queue.h
+++ b/src/core/hle/kernel/k_thread_queue.h
@@ -8,24 +8,30 @@
8 8
9namespace Kernel { 9namespace Kernel {
10 10
11class KHardwareTimer;
12
11class KThreadQueue { 13class KThreadQueue {
12public: 14public:
13 explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {} 15 explicit KThreadQueue(KernelCore& kernel) : m_kernel{kernel}, m_hardware_timer{} {}
14 virtual ~KThreadQueue() = default; 16 virtual ~KThreadQueue() = default;
15 17
18 void SetHardwareTimer(KHardwareTimer* timer) {
19 m_hardware_timer = timer;
20 }
21
16 virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object, 22 virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
17 Result wait_result); 23 Result wait_result);
18 virtual void EndWait(KThread* waiting_thread, Result wait_result); 24 virtual void EndWait(KThread* waiting_thread, Result wait_result);
19 virtual void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task); 25 virtual void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task);
20 26
21private: 27private:
22 KernelCore& kernel; 28 KernelCore& m_kernel;
23 KThread::WaiterList wait_list{}; 29 KHardwareTimer* m_hardware_timer{};
24}; 30};
25 31
26class KThreadQueueWithoutEndWait : public KThreadQueue { 32class KThreadQueueWithoutEndWait : public KThreadQueue {
27public: 33public:
28 explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {} 34 explicit KThreadQueueWithoutEndWait(KernelCore& kernel) : KThreadQueue(kernel) {}
29 35
30 void EndWait(KThread* waiting_thread, Result wait_result) override final; 36 void EndWait(KThread* waiting_thread, Result wait_result) override final;
31}; 37};
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp
index faa5c73b5..471349282 100644
--- a/src/core/hle/kernel/k_transfer_memory.cpp
+++ b/src/core/hle/kernel/k_transfer_memory.cpp
@@ -8,32 +8,29 @@
8 8
9namespace Kernel { 9namespace Kernel {
10 10
11KTransferMemory::KTransferMemory(KernelCore& kernel_) 11KTransferMemory::KTransferMemory(KernelCore& kernel)
12 : KAutoObjectWithSlabHeapAndContainer{kernel_} {} 12 : KAutoObjectWithSlabHeapAndContainer{kernel} {}
13 13
14KTransferMemory::~KTransferMemory() = default; 14KTransferMemory::~KTransferMemory() = default;
15 15
16Result KTransferMemory::Initialize(VAddr address_, std::size_t size_, 16Result KTransferMemory::Initialize(VAddr address, std::size_t size,
17 Svc::MemoryPermission owner_perm_) { 17 Svc::MemoryPermission owner_perm) {
18 // Set members. 18 // Set members.
19 owner = GetCurrentProcessPointer(kernel); 19 m_owner = GetCurrentProcessPointer(m_kernel);
20 20
21 // TODO(bunnei): Lock for transfer memory 21 // TODO(bunnei): Lock for transfer memory
22 22
23 // Set remaining tracking members. 23 // Set remaining tracking members.
24 owner->Open(); 24 m_owner->Open();
25 owner_perm = owner_perm_; 25 m_owner_perm = owner_perm;
26 address = address_; 26 m_address = address;
27 size = size_; 27 m_size = size;
28 is_initialized = true; 28 m_is_initialized = true;
29 29
30 return ResultSuccess; 30 R_SUCCEED();
31} 31}
32 32
33void KTransferMemory::Finalize() { 33void KTransferMemory::Finalize() {}
34 // Perform inherited finalization.
35 KAutoObjectWithSlabHeapAndContainer<KTransferMemory, KAutoObjectWithList>::Finalize();
36}
37 34
38void KTransferMemory::PostDestroy(uintptr_t arg) { 35void KTransferMemory::PostDestroy(uintptr_t arg) {
39 KProcess* owner = reinterpret_cast<KProcess*>(arg); 36 KProcess* owner = reinterpret_cast<KProcess*>(arg);
diff --git a/src/core/hle/kernel/k_transfer_memory.h b/src/core/hle/kernel/k_transfer_memory.h
index 85d508ee7..3d4d795a5 100644
--- a/src/core/hle/kernel/k_transfer_memory.h
+++ b/src/core/hle/kernel/k_transfer_memory.h
@@ -23,41 +23,41 @@ class KTransferMemory final
23 KERNEL_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject); 23 KERNEL_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject);
24 24
25public: 25public:
26 explicit KTransferMemory(KernelCore& kernel_); 26 explicit KTransferMemory(KernelCore& kernel);
27 ~KTransferMemory() override; 27 ~KTransferMemory() override;
28 28
29 Result Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_); 29 Result Initialize(VAddr address, std::size_t size, Svc::MemoryPermission owner_perm);
30 30
31 void Finalize() override; 31 void Finalize() override;
32 32
33 bool IsInitialized() const override { 33 bool IsInitialized() const override {
34 return is_initialized; 34 return m_is_initialized;
35 } 35 }
36 36
37 uintptr_t GetPostDestroyArgument() const override { 37 uintptr_t GetPostDestroyArgument() const override {
38 return reinterpret_cast<uintptr_t>(owner); 38 return reinterpret_cast<uintptr_t>(m_owner);
39 } 39 }
40 40
41 static void PostDestroy(uintptr_t arg); 41 static void PostDestroy(uintptr_t arg);
42 42
43 KProcess* GetOwner() const override { 43 KProcess* GetOwner() const override {
44 return owner; 44 return m_owner;
45 } 45 }
46 46
47 VAddr GetSourceAddress() const { 47 VAddr GetSourceAddress() const {
48 return address; 48 return m_address;
49 } 49 }
50 50
51 size_t GetSize() const { 51 size_t GetSize() const {
52 return is_initialized ? size : 0; 52 return m_is_initialized ? m_size : 0;
53 } 53 }
54 54
55private: 55private:
56 KProcess* owner{}; 56 KProcess* m_owner{};
57 VAddr address{}; 57 VAddr m_address{};
58 Svc::MemoryPermission owner_perm{}; 58 Svc::MemoryPermission m_owner_perm{};
59 size_t size{}; 59 size_t m_size{};
60 bool is_initialized{}; 60 bool m_is_initialized{};
61}; 61};
62 62
63} // namespace Kernel 63} // namespace Kernel
diff --git a/src/core/hle/kernel/k_worker_task.h b/src/core/hle/kernel/k_worker_task.h
index ef591d831..9a230c03c 100644
--- a/src/core/hle/kernel/k_worker_task.h
+++ b/src/core/hle/kernel/k_worker_task.h
@@ -9,7 +9,7 @@ namespace Kernel {
9 9
10class KWorkerTask : public KSynchronizationObject { 10class KWorkerTask : public KSynchronizationObject {
11public: 11public:
12 explicit KWorkerTask(KernelCore& kernel_); 12 explicit KWorkerTask(KernelCore& kernel);
13 13
14 void DoWorkerTask(); 14 void DoWorkerTask();
15}; 15};
diff --git a/src/core/hle/kernel/k_worker_task_manager.cpp b/src/core/hle/kernel/k_worker_task_manager.cpp
index 04042bf8f..8ead39591 100644
--- a/src/core/hle/kernel/k_worker_task_manager.cpp
+++ b/src/core/hle/kernel/k_worker_task_manager.cpp
@@ -10,7 +10,7 @@
10 10
11namespace Kernel { 11namespace Kernel {
12 12
13KWorkerTask::KWorkerTask(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} 13KWorkerTask::KWorkerTask(KernelCore& kernel) : KSynchronizationObject{kernel} {}
14 14
15void KWorkerTask::DoWorkerTask() { 15void KWorkerTask::DoWorkerTask() {
16 if (auto* const thread = this->DynamicCast<KThread*>(); thread != nullptr) { 16 if (auto* const thread = this->DynamicCast<KThread*>(); thread != nullptr) {
diff --git a/src/core/hle/kernel/k_worker_task_manager.h b/src/core/hle/kernel/k_worker_task_manager.h
index f6618883e..8745a4ce2 100644
--- a/src/core/hle/kernel/k_worker_task_manager.h
+++ b/src/core/hle/kernel/k_worker_task_manager.h
@@ -20,7 +20,7 @@ public:
20 20
21 KWorkerTaskManager(); 21 KWorkerTaskManager();
22 22
23 static void AddTask(KernelCore& kernel_, WorkerType type, KWorkerTask* task); 23 static void AddTask(KernelCore& kernel, WorkerType type, KWorkerTask* task);
24 24
25private: 25private:
26 void AddTask(KernelCore& kernel, KWorkerTask* task); 26 void AddTask(KernelCore& kernel, KWorkerTask* task);
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index ef7057ff7..98ecaf12f 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -214,7 +214,6 @@ struct KernelCore::Impl {
214 cores[i] = std::make_unique<Kernel::PhysicalCore>(i, system, *schedulers[i]); 214 cores[i] = std::make_unique<Kernel::PhysicalCore>(i, system, *schedulers[i]);
215 215
216 auto* main_thread{Kernel::KThread::Create(system.Kernel())}; 216 auto* main_thread{Kernel::KThread::Create(system.Kernel())};
217 main_thread->SetName(fmt::format("MainThread:{}", core));
218 main_thread->SetCurrentCore(core); 217 main_thread->SetCurrentCore(core);
219 ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess()); 218 ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess());
220 219
@@ -356,7 +355,6 @@ struct KernelCore::Impl {
356 ASSERT(KThread::InitializeHighPriorityThread(system, shutdown_threads[core_id], {}, {}, 355 ASSERT(KThread::InitializeHighPriorityThread(system, shutdown_threads[core_id], {}, {},
357 core_id) 356 core_id)
358 .IsSuccess()); 357 .IsSuccess());
359 shutdown_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
360 } 358 }
361 } 359 }
362 360
@@ -388,11 +386,10 @@ struct KernelCore::Impl {
388 386
389 // Gets the dummy KThread for the caller, allocating a new one if this is the first time 387 // Gets the dummy KThread for the caller, allocating a new one if this is the first time
390 KThread* GetHostDummyThread(KThread* existing_thread) { 388 KThread* GetHostDummyThread(KThread* existing_thread) {
391 auto initialize = [this](KThread* thread) { 389 const auto initialize{[](KThread* thread) {
392 ASSERT(KThread::InitializeDummyThread(thread, nullptr).IsSuccess()); 390 ASSERT(KThread::InitializeDummyThread(thread, nullptr).IsSuccess());
393 thread->SetName(fmt::format("DummyThread:{}", next_host_thread_id++));
394 return thread; 391 return thread;
395 }; 392 }};
396 393
397 thread_local KThread raw_thread{system.Kernel()}; 394 thread_local KThread raw_thread{system.Kernel()};
398 thread_local KThread* thread = existing_thread ? existing_thread : initialize(&raw_thread); 395 thread_local KThread* thread = existing_thread ? existing_thread : initialize(&raw_thread);
@@ -742,16 +739,15 @@ struct KernelCore::Impl {
742 hidbus_shared_mem = KSharedMemory::Create(system.Kernel()); 739 hidbus_shared_mem = KSharedMemory::Create(system.Kernel());
743 740
744 hid_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None, 741 hid_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
745 Svc::MemoryPermission::Read, hid_size, "HID:SharedMemory"); 742 Svc::MemoryPermission::Read, hid_size);
746 font_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None, 743 font_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
747 Svc::MemoryPermission::Read, font_size, "Font:SharedMemory"); 744 Svc::MemoryPermission::Read, font_size);
748 irs_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None, 745 irs_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
749 Svc::MemoryPermission::Read, irs_size, "IRS:SharedMemory"); 746 Svc::MemoryPermission::Read, irs_size);
750 time_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None, 747 time_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
751 Svc::MemoryPermission::Read, time_size, "Time:SharedMemory"); 748 Svc::MemoryPermission::Read, time_size);
752 hidbus_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None, 749 hidbus_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
753 Svc::MemoryPermission::Read, hidbus_size, 750 Svc::MemoryPermission::Read, hidbus_size);
754 "HidBus:SharedMemory");
755 } 751 }
756 752
757 std::mutex registered_objects_lock; 753 std::mutex registered_objects_lock;
@@ -1321,7 +1317,6 @@ const Core::System& KernelCore::System() const {
1321struct KernelCore::SlabHeapContainer { 1317struct KernelCore::SlabHeapContainer {
1322 KSlabHeap<KClientSession> client_session; 1318 KSlabHeap<KClientSession> client_session;
1323 KSlabHeap<KEvent> event; 1319 KSlabHeap<KEvent> event;
1324 KSlabHeap<KLinkedListNode> linked_list_node;
1325 KSlabHeap<KPort> port; 1320 KSlabHeap<KPort> port;
1326 KSlabHeap<KProcess> process; 1321 KSlabHeap<KProcess> process;
1327 KSlabHeap<KResourceLimit> resource_limit; 1322 KSlabHeap<KResourceLimit> resource_limit;
@@ -1348,8 +1343,6 @@ KSlabHeap<T>& KernelCore::SlabHeap() {
1348 return slab_heap_container->client_session; 1343 return slab_heap_container->client_session;
1349 } else if constexpr (std::is_same_v<T, KEvent>) { 1344 } else if constexpr (std::is_same_v<T, KEvent>) {
1350 return slab_heap_container->event; 1345 return slab_heap_container->event;
1351 } else if constexpr (std::is_same_v<T, KLinkedListNode>) {
1352 return slab_heap_container->linked_list_node;
1353 } else if constexpr (std::is_same_v<T, KPort>) { 1346 } else if constexpr (std::is_same_v<T, KPort>) {
1354 return slab_heap_container->port; 1347 return slab_heap_container->port;
1355 } else if constexpr (std::is_same_v<T, KProcess>) { 1348 } else if constexpr (std::is_same_v<T, KProcess>) {
@@ -1391,7 +1384,6 @@ KSlabHeap<T>& KernelCore::SlabHeap() {
1391 1384
1392template KSlabHeap<KClientSession>& KernelCore::SlabHeap(); 1385template KSlabHeap<KClientSession>& KernelCore::SlabHeap();
1393template KSlabHeap<KEvent>& KernelCore::SlabHeap(); 1386template KSlabHeap<KEvent>& KernelCore::SlabHeap();
1394template KSlabHeap<KLinkedListNode>& KernelCore::SlabHeap();
1395template KSlabHeap<KPort>& KernelCore::SlabHeap(); 1387template KSlabHeap<KPort>& KernelCore::SlabHeap();
1396template KSlabHeap<KProcess>& KernelCore::SlabHeap(); 1388template KSlabHeap<KProcess>& KernelCore::SlabHeap();
1397template KSlabHeap<KResourceLimit>& KernelCore::SlabHeap(); 1389template KSlabHeap<KResourceLimit>& KernelCore::SlabHeap();
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 1b380a07b..183a4d227 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -47,7 +47,6 @@ class KEvent;
47class KEventInfo; 47class KEventInfo;
48class KHandleTable; 48class KHandleTable;
49class KHardwareTimer; 49class KHardwareTimer;
50class KLinkedListNode;
51class KMemoryLayout; 50class KMemoryLayout;
52class KMemoryManager; 51class KMemoryManager;
53class KObjectName; 52class KObjectName;
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index 3044922ac..2e0c36129 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -10,14 +10,14 @@
10 10
11namespace Kernel { 11namespace Kernel {
12 12
13PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_) 13PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, KScheduler& scheduler)
14 : core_index{core_index_}, system{system_}, scheduler{scheduler_} { 14 : m_core_index{core_index}, m_system{system}, m_scheduler{scheduler} {
15#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64) 15#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
16 // TODO(bunnei): Initialization relies on a core being available. We may later replace this with 16 // TODO(bunnei): Initialization relies on a core being available. We may later replace this with
17 // a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager. 17 // a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager.
18 auto& kernel = system.Kernel(); 18 auto& kernel = system.Kernel();
19 arm_interface = std::make_unique<Core::ARM_Dynarmic_64>( 19 m_arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
20 system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index); 20 system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), m_core_index);
21#else 21#else
22#error Platform not supported yet. 22#error Platform not supported yet.
23#endif 23#endif
@@ -25,13 +25,13 @@ PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KSche
25 25
26PhysicalCore::~PhysicalCore() = default; 26PhysicalCore::~PhysicalCore() = default;
27 27
28void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { 28void PhysicalCore::Initialize(bool is_64_bit) {
29#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64) 29#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
30 auto& kernel = system.Kernel(); 30 auto& kernel = m_system.Kernel();
31 if (!is_64_bit) { 31 if (!is_64_bit) {
32 // We already initialized a 64-bit core, replace with a 32-bit one. 32 // We already initialized a 64-bit core, replace with a 32-bit one.
33 arm_interface = std::make_unique<Core::ARM_Dynarmic_32>( 33 m_arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
34 system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index); 34 m_system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), m_core_index);
35 } 35 }
36#else 36#else
37#error Platform not supported yet. 37#error Platform not supported yet.
@@ -39,31 +39,30 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
39} 39}
40 40
41void PhysicalCore::Run() { 41void PhysicalCore::Run() {
42 arm_interface->Run(); 42 m_arm_interface->Run();
43 arm_interface->ClearExclusiveState(); 43 m_arm_interface->ClearExclusiveState();
44} 44}
45 45
46void PhysicalCore::Idle() { 46void PhysicalCore::Idle() {
47 std::unique_lock lk{guard}; 47 std::unique_lock lk{m_guard};
48 on_interrupt.wait(lk, [this] { return is_interrupted; }); 48 m_on_interrupt.wait(lk, [this] { return m_is_interrupted; });
49} 49}
50 50
51bool PhysicalCore::IsInterrupted() const { 51bool PhysicalCore::IsInterrupted() const {
52 return is_interrupted; 52 return m_is_interrupted;
53} 53}
54 54
55void PhysicalCore::Interrupt() { 55void PhysicalCore::Interrupt() {
56 std::unique_lock lk{guard}; 56 std::unique_lock lk{m_guard};
57 is_interrupted = true; 57 m_is_interrupted = true;
58 arm_interface->SignalInterrupt(); 58 m_arm_interface->SignalInterrupt();
59 on_interrupt.notify_all(); 59 m_on_interrupt.notify_all();
60} 60}
61 61
62void PhysicalCore::ClearInterrupt() { 62void PhysicalCore::ClearInterrupt() {
63 std::unique_lock lk{guard}; 63 std::unique_lock lk{m_guard};
64 is_interrupted = false; 64 m_is_interrupted = false;
65 arm_interface->ClearInterrupt(); 65 m_arm_interface->ClearInterrupt();
66 on_interrupt.notify_all();
67} 66}
68 67
69} // namespace Kernel 68} // namespace Kernel
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index fb8e7933e..5cb398fdc 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -47,46 +47,38 @@ public:
47 bool IsInterrupted() const; 47 bool IsInterrupted() const;
48 48
49 bool IsInitialized() const { 49 bool IsInitialized() const {
50 return arm_interface != nullptr; 50 return m_arm_interface != nullptr;
51 } 51 }
52 52
53 Core::ARM_Interface& ArmInterface() { 53 Core::ARM_Interface& ArmInterface() {
54 return *arm_interface; 54 return *m_arm_interface;
55 } 55 }
56 56
57 const Core::ARM_Interface& ArmInterface() const { 57 const Core::ARM_Interface& ArmInterface() const {
58 return *arm_interface; 58 return *m_arm_interface;
59 }
60
61 bool IsMainCore() const {
62 return core_index == 0;
63 }
64
65 bool IsSystemCore() const {
66 return core_index == 3;
67 } 59 }
68 60
69 std::size_t CoreIndex() const { 61 std::size_t CoreIndex() const {
70 return core_index; 62 return m_core_index;
71 } 63 }
72 64
73 Kernel::KScheduler& Scheduler() { 65 Kernel::KScheduler& Scheduler() {
74 return scheduler; 66 return m_scheduler;
75 } 67 }
76 68
77 const Kernel::KScheduler& Scheduler() const { 69 const Kernel::KScheduler& Scheduler() const {
78 return scheduler; 70 return m_scheduler;
79 } 71 }
80 72
81private: 73private:
82 const std::size_t core_index; 74 const std::size_t m_core_index;
83 Core::System& system; 75 Core::System& m_system;
84 Kernel::KScheduler& scheduler; 76 Kernel::KScheduler& m_scheduler;
85 77
86 std::mutex guard; 78 std::mutex m_guard;
87 std::condition_variable on_interrupt; 79 std::condition_variable m_on_interrupt;
88 std::unique_ptr<Core::ARM_Interface> arm_interface; 80 std::unique_ptr<Core::ARM_Interface> m_arm_interface;
89 bool is_interrupted{}; 81 bool m_is_interrupted{};
90}; 82};
91 83
92} // namespace Kernel 84} // namespace Kernel
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h
index 0228ce188..d1bbc7670 100644
--- a/src/core/hle/kernel/slab_helpers.h
+++ b/src/core/hle/kernel/slab_helpers.h
@@ -66,7 +66,7 @@ private:
66 } 66 }
67 67
68public: 68public:
69 explicit KAutoObjectWithSlabHeap(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {} 69 explicit KAutoObjectWithSlabHeap(KernelCore& kernel) : Base(kernel) {}
70 virtual ~KAutoObjectWithSlabHeap() = default; 70 virtual ~KAutoObjectWithSlabHeap() = default;
71 71
72 virtual void Destroy() override { 72 virtual void Destroy() override {
@@ -76,7 +76,7 @@ public:
76 arg = this->GetPostDestroyArgument(); 76 arg = this->GetPostDestroyArgument();
77 this->Finalize(); 77 this->Finalize();
78 } 78 }
79 Free(kernel, static_cast<Derived*>(this)); 79 Free(Base::m_kernel, static_cast<Derived*>(this));
80 if (is_initialized) { 80 if (is_initialized) {
81 Derived::PostDestroy(arg); 81 Derived::PostDestroy(arg);
82 } 82 }
@@ -90,7 +90,7 @@ public:
90 } 90 }
91 91
92 size_t GetSlabIndex() const { 92 size_t GetSlabIndex() const {
93 return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this)); 93 return SlabHeap<Derived>(Base::m_kernel).GetObjectIndex(static_cast<const Derived*>(this));
94 } 94 }
95 95
96public: 96public:
@@ -125,14 +125,11 @@ public:
125 static size_t GetNumRemaining(KernelCore& kernel) { 125 static size_t GetNumRemaining(KernelCore& kernel) {
126 return kernel.SlabHeap<Derived>().GetNumRemaining(); 126 return kernel.SlabHeap<Derived>().GetNumRemaining();
127 } 127 }
128
129protected:
130 KernelCore& kernel;
131}; 128};
132 129
133template <typename Derived, typename Base> 130template <typename Derived, typename Base>
134class KAutoObjectWithSlabHeapAndContainer : public Base { 131class KAutoObjectWithSlabHeapAndContainer : public Base {
135 static_assert(std::is_base_of<KAutoObjectWithList, Base>::value); 132 static_assert(std::is_base_of_v<KAutoObjectWithList, Base>);
136 133
137private: 134private:
138 static Derived* Allocate(KernelCore& kernel) { 135 static Derived* Allocate(KernelCore& kernel) {
@@ -144,18 +141,18 @@ private:
144 } 141 }
145 142
146public: 143public:
147 KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {} 144 KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel) : Base(kernel) {}
148 virtual ~KAutoObjectWithSlabHeapAndContainer() {} 145 virtual ~KAutoObjectWithSlabHeapAndContainer() {}
149 146
150 virtual void Destroy() override { 147 virtual void Destroy() override {
151 const bool is_initialized = this->IsInitialized(); 148 const bool is_initialized = this->IsInitialized();
152 uintptr_t arg = 0; 149 uintptr_t arg = 0;
153 if (is_initialized) { 150 if (is_initialized) {
154 kernel.ObjectListContainer().Unregister(this); 151 Base::m_kernel.ObjectListContainer().Unregister(this);
155 arg = this->GetPostDestroyArgument(); 152 arg = this->GetPostDestroyArgument();
156 this->Finalize(); 153 this->Finalize();
157 } 154 }
158 Free(kernel, static_cast<Derived*>(this)); 155 Free(Base::m_kernel, static_cast<Derived*>(this));
159 if (is_initialized) { 156 if (is_initialized) {
160 Derived::PostDestroy(arg); 157 Derived::PostDestroy(arg);
161 } 158 }
@@ -169,7 +166,7 @@ public:
169 } 166 }
170 167
171 size_t GetSlabIndex() const { 168 size_t GetSlabIndex() const {
172 return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this)); 169 return SlabHeap<Derived>(Base::m_kernel).GetObjectIndex(static_cast<const Derived*>(this));
173 } 170 }
174 171
175public: 172public:
@@ -209,9 +206,6 @@ public:
209 static size_t GetNumRemaining(KernelCore& kernel) { 206 static size_t GetNumRemaining(KernelCore& kernel) {
210 return kernel.SlabHeap<Derived>().GetNumRemaining(); 207 return kernel.SlabHeap<Derived>().GetNumRemaining();
211 } 208 }
212
213protected:
214 KernelCore& kernel;
215}; 209};
216 210
217} // namespace Kernel 211} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index a0bfd6bbc..871d541d4 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -36,9 +36,9 @@ static To Convert(const From& from) {
36 To to{}; 36 To to{};
37 37
38 if constexpr (sizeof(To) >= sizeof(From)) { 38 if constexpr (sizeof(To) >= sizeof(From)) {
39 std::memcpy(&to, &from, sizeof(From)); 39 std::memcpy(std::addressof(to), std::addressof(from), sizeof(From));
40 } else { 40 } else {
41 std::memcpy(&to, &from, sizeof(To)); 41 std::memcpy(std::addressof(to), std::addressof(from), sizeof(To));
42 } 42 }
43 43
44 return to; 44 return to;
@@ -87,7 +87,7 @@ static void SvcWrap_SetHeapSize64From32(Core::System& system) {
87 87
88 size = Convert<uint32_t>(GetReg32(system, 1)); 88 size = Convert<uint32_t>(GetReg32(system, 1));
89 89
90 ret = SetHeapSize64From32(system, &out_address, size); 90 ret = SetHeapSize64From32(system, std::addressof(out_address), size);
91 91
92 SetReg32(system, 0, Convert<uint32_t>(ret)); 92 SetReg32(system, 0, Convert<uint32_t>(ret));
93 SetReg32(system, 1, Convert<uint32_t>(out_address)); 93 SetReg32(system, 1, Convert<uint32_t>(out_address));
@@ -169,7 +169,7 @@ static void SvcWrap_QueryMemory64From32(Core::System& system) {
169 out_memory_info = Convert<uint32_t>(GetReg32(system, 0)); 169 out_memory_info = Convert<uint32_t>(GetReg32(system, 0));
170 address = Convert<uint32_t>(GetReg32(system, 2)); 170 address = Convert<uint32_t>(GetReg32(system, 2));
171 171
172 ret = QueryMemory64From32(system, out_memory_info, &out_page_info, address); 172 ret = QueryMemory64From32(system, out_memory_info, std::addressof(out_page_info), address);
173 173
174 SetReg32(system, 0, Convert<uint32_t>(ret)); 174 SetReg32(system, 0, Convert<uint32_t>(ret));
175 SetReg32(system, 1, Convert<uint32_t>(out_page_info)); 175 SetReg32(system, 1, Convert<uint32_t>(out_page_info));
@@ -195,7 +195,7 @@ static void SvcWrap_CreateThread64From32(Core::System& system) {
195 priority = Convert<int32_t>(GetReg32(system, 0)); 195 priority = Convert<int32_t>(GetReg32(system, 0));
196 core_id = Convert<int32_t>(GetReg32(system, 4)); 196 core_id = Convert<int32_t>(GetReg32(system, 4));
197 197
198 ret = CreateThread64From32(system, &out_handle, func, arg, stack_bottom, priority, core_id); 198 ret = CreateThread64From32(system, std::addressof(out_handle), func, arg, stack_bottom, priority, core_id);
199 199
200 SetReg32(system, 0, Convert<uint32_t>(ret)); 200 SetReg32(system, 0, Convert<uint32_t>(ret));
201 SetReg32(system, 1, Convert<uint32_t>(out_handle)); 201 SetReg32(system, 1, Convert<uint32_t>(out_handle));
@@ -236,7 +236,7 @@ static void SvcWrap_GetThreadPriority64From32(Core::System& system) {
236 236
237 thread_handle = Convert<Handle>(GetReg32(system, 1)); 237 thread_handle = Convert<Handle>(GetReg32(system, 1));
238 238
239 ret = GetThreadPriority64From32(system, &out_priority, thread_handle); 239 ret = GetThreadPriority64From32(system, std::addressof(out_priority), thread_handle);
240 240
241 SetReg32(system, 0, Convert<uint32_t>(ret)); 241 SetReg32(system, 0, Convert<uint32_t>(ret));
242 SetReg32(system, 1, Convert<uint32_t>(out_priority)); 242 SetReg32(system, 1, Convert<uint32_t>(out_priority));
@@ -265,7 +265,7 @@ static void SvcWrap_GetThreadCoreMask64From32(Core::System& system) {
265 265
266 thread_handle = Convert<Handle>(GetReg32(system, 2)); 266 thread_handle = Convert<Handle>(GetReg32(system, 2));
267 267
268 ret = GetThreadCoreMask64From32(system, &out_core_id, &out_affinity_mask, thread_handle); 268 ret = GetThreadCoreMask64From32(system, std::addressof(out_core_id), std::addressof(out_affinity_mask), thread_handle);
269 269
270 SetReg32(system, 0, Convert<uint32_t>(ret)); 270 SetReg32(system, 0, Convert<uint32_t>(ret));
271 SetReg32(system, 1, Convert<uint32_t>(out_core_id)); 271 SetReg32(system, 1, Convert<uint32_t>(out_core_id));
@@ -371,7 +371,7 @@ static void SvcWrap_CreateTransferMemory64From32(Core::System& system) {
371 size = Convert<uint32_t>(GetReg32(system, 2)); 371 size = Convert<uint32_t>(GetReg32(system, 2));
372 map_perm = Convert<MemoryPermission>(GetReg32(system, 3)); 372 map_perm = Convert<MemoryPermission>(GetReg32(system, 3));
373 373
374 ret = CreateTransferMemory64From32(system, &out_handle, address, size, map_perm); 374 ret = CreateTransferMemory64From32(system, std::addressof(out_handle), address, size, map_perm);
375 375
376 SetReg32(system, 0, Convert<uint32_t>(ret)); 376 SetReg32(system, 0, Convert<uint32_t>(ret));
377 SetReg32(system, 1, Convert<uint32_t>(out_handle)); 377 SetReg32(system, 1, Convert<uint32_t>(out_handle));
@@ -416,7 +416,7 @@ static void SvcWrap_WaitSynchronization64From32(Core::System& system) {
416 timeout_ns_gather[1] = GetReg32(system, 3); 416 timeout_ns_gather[1] = GetReg32(system, 3);
417 timeout_ns = Convert<int64_t>(timeout_ns_gather); 417 timeout_ns = Convert<int64_t>(timeout_ns_gather);
418 418
419 ret = WaitSynchronization64From32(system, &out_index, handles, num_handles, timeout_ns); 419 ret = WaitSynchronization64From32(system, std::addressof(out_index), handles, num_handles, timeout_ns);
420 420
421 SetReg32(system, 0, Convert<uint32_t>(ret)); 421 SetReg32(system, 0, Convert<uint32_t>(ret));
422 SetReg32(system, 1, Convert<uint32_t>(out_index)); 422 SetReg32(system, 1, Convert<uint32_t>(out_index));
@@ -511,7 +511,7 @@ static void SvcWrap_ConnectToNamedPort64From32(Core::System& system) {
511 511
512 name = Convert<uint32_t>(GetReg32(system, 1)); 512 name = Convert<uint32_t>(GetReg32(system, 1));
513 513
514 ret = ConnectToNamedPort64From32(system, &out_handle, name); 514 ret = ConnectToNamedPort64From32(system, std::addressof(out_handle), name);
515 515
516 SetReg32(system, 0, Convert<uint32_t>(ret)); 516 SetReg32(system, 0, Convert<uint32_t>(ret));
517 SetReg32(system, 1, Convert<uint32_t>(out_handle)); 517 SetReg32(system, 1, Convert<uint32_t>(out_handle));
@@ -557,7 +557,7 @@ static void SvcWrap_SendAsyncRequestWithUserBuffer64From32(Core::System& system)
557 message_buffer_size = Convert<uint32_t>(GetReg32(system, 2)); 557 message_buffer_size = Convert<uint32_t>(GetReg32(system, 2));
558 session_handle = Convert<Handle>(GetReg32(system, 3)); 558 session_handle = Convert<Handle>(GetReg32(system, 3));
559 559
560 ret = SendAsyncRequestWithUserBuffer64From32(system, &out_event_handle, message_buffer, message_buffer_size, session_handle); 560 ret = SendAsyncRequestWithUserBuffer64From32(system, std::addressof(out_event_handle), message_buffer, message_buffer_size, session_handle);
561 561
562 SetReg32(system, 0, Convert<uint32_t>(ret)); 562 SetReg32(system, 0, Convert<uint32_t>(ret));
563 SetReg32(system, 1, Convert<uint32_t>(out_event_handle)); 563 SetReg32(system, 1, Convert<uint32_t>(out_event_handle));
@@ -571,7 +571,7 @@ static void SvcWrap_GetProcessId64From32(Core::System& system) {
571 571
572 process_handle = Convert<Handle>(GetReg32(system, 1)); 572 process_handle = Convert<Handle>(GetReg32(system, 1));
573 573
574 ret = GetProcessId64From32(system, &out_process_id, process_handle); 574 ret = GetProcessId64From32(system, std::addressof(out_process_id), process_handle);
575 575
576 SetReg32(system, 0, Convert<uint32_t>(ret)); 576 SetReg32(system, 0, Convert<uint32_t>(ret));
577 auto out_process_id_scatter = Convert<std::array<uint32_t, 2>>(out_process_id); 577 auto out_process_id_scatter = Convert<std::array<uint32_t, 2>>(out_process_id);
@@ -587,7 +587,7 @@ static void SvcWrap_GetThreadId64From32(Core::System& system) {
587 587
588 thread_handle = Convert<Handle>(GetReg32(system, 1)); 588 thread_handle = Convert<Handle>(GetReg32(system, 1));
589 589
590 ret = GetThreadId64From32(system, &out_thread_id, thread_handle); 590 ret = GetThreadId64From32(system, std::addressof(out_thread_id), thread_handle);
591 591
592 SetReg32(system, 0, Convert<uint32_t>(ret)); 592 SetReg32(system, 0, Convert<uint32_t>(ret));
593 auto out_thread_id_scatter = Convert<std::array<uint32_t, 2>>(out_thread_id); 593 auto out_thread_id_scatter = Convert<std::array<uint32_t, 2>>(out_thread_id);
@@ -644,7 +644,7 @@ static void SvcWrap_GetInfo64From32(Core::System& system) {
644 info_subtype_gather[1] = GetReg32(system, 3); 644 info_subtype_gather[1] = GetReg32(system, 3);
645 info_subtype = Convert<uint64_t>(info_subtype_gather); 645 info_subtype = Convert<uint64_t>(info_subtype_gather);
646 646
647 ret = GetInfo64From32(system, &out, info_type, handle, info_subtype); 647 ret = GetInfo64From32(system, std::addressof(out), info_type, handle, info_subtype);
648 648
649 SetReg32(system, 0, Convert<uint32_t>(ret)); 649 SetReg32(system, 0, Convert<uint32_t>(ret));
650 auto out_scatter = Convert<std::array<uint32_t, 2>>(out); 650 auto out_scatter = Convert<std::array<uint32_t, 2>>(out);
@@ -712,7 +712,7 @@ static void SvcWrap_GetDebugFutureThreadInfo64From32(Core::System& system) {
712 ns_gather[1] = GetReg32(system, 1); 712 ns_gather[1] = GetReg32(system, 1);
713 ns = Convert<int64_t>(ns_gather); 713 ns = Convert<int64_t>(ns_gather);
714 714
715 ret = GetDebugFutureThreadInfo64From32(system, &out_context, &out_thread_id, debug_handle, ns); 715 ret = GetDebugFutureThreadInfo64From32(system, std::addressof(out_context), std::addressof(out_thread_id), debug_handle, ns);
716 716
717 SetReg32(system, 0, Convert<uint32_t>(ret)); 717 SetReg32(system, 0, Convert<uint32_t>(ret));
718 auto out_context_scatter = Convert<std::array<uint32_t, 4>>(out_context); 718 auto out_context_scatter = Convert<std::array<uint32_t, 4>>(out_context);
@@ -732,7 +732,7 @@ static void SvcWrap_GetLastThreadInfo64From32(Core::System& system) {
732 uint64_t out_tls_address{}; 732 uint64_t out_tls_address{};
733 uint32_t out_flags{}; 733 uint32_t out_flags{};
734 734
735 ret = GetLastThreadInfo64From32(system, &out_context, &out_tls_address, &out_flags); 735 ret = GetLastThreadInfo64From32(system, std::addressof(out_context), std::addressof(out_tls_address), std::addressof(out_flags));
736 736
737 SetReg32(system, 0, Convert<uint32_t>(ret)); 737 SetReg32(system, 0, Convert<uint32_t>(ret));
738 auto out_context_scatter = Convert<std::array<uint32_t, 4>>(out_context); 738 auto out_context_scatter = Convert<std::array<uint32_t, 4>>(out_context);
@@ -754,7 +754,7 @@ static void SvcWrap_GetResourceLimitLimitValue64From32(Core::System& system) {
754 resource_limit_handle = Convert<Handle>(GetReg32(system, 1)); 754 resource_limit_handle = Convert<Handle>(GetReg32(system, 1));
755 which = Convert<LimitableResource>(GetReg32(system, 2)); 755 which = Convert<LimitableResource>(GetReg32(system, 2));
756 756
757 ret = GetResourceLimitLimitValue64From32(system, &out_limit_value, resource_limit_handle, which); 757 ret = GetResourceLimitLimitValue64From32(system, std::addressof(out_limit_value), resource_limit_handle, which);
758 758
759 SetReg32(system, 0, Convert<uint32_t>(ret)); 759 SetReg32(system, 0, Convert<uint32_t>(ret));
760 auto out_limit_value_scatter = Convert<std::array<uint32_t, 2>>(out_limit_value); 760 auto out_limit_value_scatter = Convert<std::array<uint32_t, 2>>(out_limit_value);
@@ -772,7 +772,7 @@ static void SvcWrap_GetResourceLimitCurrentValue64From32(Core::System& system) {
772 resource_limit_handle = Convert<Handle>(GetReg32(system, 1)); 772 resource_limit_handle = Convert<Handle>(GetReg32(system, 1));
773 which = Convert<LimitableResource>(GetReg32(system, 2)); 773 which = Convert<LimitableResource>(GetReg32(system, 2));
774 774
775 ret = GetResourceLimitCurrentValue64From32(system, &out_current_value, resource_limit_handle, which); 775 ret = GetResourceLimitCurrentValue64From32(system, std::addressof(out_current_value), resource_limit_handle, which);
776 776
777 SetReg32(system, 0, Convert<uint32_t>(ret)); 777 SetReg32(system, 0, Convert<uint32_t>(ret));
778 auto out_current_value_scatter = Convert<std::array<uint32_t, 2>>(out_current_value); 778 auto out_current_value_scatter = Convert<std::array<uint32_t, 2>>(out_current_value);
@@ -861,7 +861,7 @@ static void SvcWrap_GetResourceLimitPeakValue64From32(Core::System& system) {
861 resource_limit_handle = Convert<Handle>(GetReg32(system, 1)); 861 resource_limit_handle = Convert<Handle>(GetReg32(system, 1));
862 which = Convert<LimitableResource>(GetReg32(system, 2)); 862 which = Convert<LimitableResource>(GetReg32(system, 2));
863 863
864 ret = GetResourceLimitPeakValue64From32(system, &out_peak_value, resource_limit_handle, which); 864 ret = GetResourceLimitPeakValue64From32(system, std::addressof(out_peak_value), resource_limit_handle, which);
865 865
866 SetReg32(system, 0, Convert<uint32_t>(ret)); 866 SetReg32(system, 0, Convert<uint32_t>(ret));
867 auto out_peak_value_scatter = Convert<std::array<uint32_t, 2>>(out_peak_value); 867 auto out_peak_value_scatter = Convert<std::array<uint32_t, 2>>(out_peak_value);
@@ -877,7 +877,7 @@ static void SvcWrap_CreateIoPool64From32(Core::System& system) {
877 877
878 which = Convert<IoPoolType>(GetReg32(system, 1)); 878 which = Convert<IoPoolType>(GetReg32(system, 1));
879 879
880 ret = CreateIoPool64From32(system, &out_handle, which); 880 ret = CreateIoPool64From32(system, std::addressof(out_handle), which);
881 881
882 SetReg32(system, 0, Convert<uint32_t>(ret)); 882 SetReg32(system, 0, Convert<uint32_t>(ret));
883 SetReg32(system, 1, Convert<uint32_t>(out_handle)); 883 SetReg32(system, 1, Convert<uint32_t>(out_handle));
@@ -902,7 +902,7 @@ static void SvcWrap_CreateIoRegion64From32(Core::System& system) {
902 mapping = Convert<MemoryMapping>(GetReg32(system, 4)); 902 mapping = Convert<MemoryMapping>(GetReg32(system, 4));
903 perm = Convert<MemoryPermission>(GetReg32(system, 5)); 903 perm = Convert<MemoryPermission>(GetReg32(system, 5));
904 904
905 ret = CreateIoRegion64From32(system, &out_handle, io_pool, physical_address, size, mapping, perm); 905 ret = CreateIoRegion64From32(system, std::addressof(out_handle), io_pool, physical_address, size, mapping, perm);
906 906
907 SetReg32(system, 0, Convert<uint32_t>(ret)); 907 SetReg32(system, 0, Convert<uint32_t>(ret));
908 SetReg32(system, 1, Convert<uint32_t>(out_handle)); 908 SetReg32(system, 1, Convert<uint32_t>(out_handle));
@@ -950,7 +950,7 @@ static void SvcWrap_CreateSession64From32(Core::System& system) {
950 is_light = Convert<bool>(GetReg32(system, 2)); 950 is_light = Convert<bool>(GetReg32(system, 2));
951 name = Convert<uint32_t>(GetReg32(system, 3)); 951 name = Convert<uint32_t>(GetReg32(system, 3));
952 952
953 ret = CreateSession64From32(system, &out_server_session_handle, &out_client_session_handle, is_light, name); 953 ret = CreateSession64From32(system, std::addressof(out_server_session_handle), std::addressof(out_client_session_handle), is_light, name);
954 954
955 SetReg32(system, 0, Convert<uint32_t>(ret)); 955 SetReg32(system, 0, Convert<uint32_t>(ret));
956 SetReg32(system, 1, Convert<uint32_t>(out_server_session_handle)); 956 SetReg32(system, 1, Convert<uint32_t>(out_server_session_handle));
@@ -965,7 +965,7 @@ static void SvcWrap_AcceptSession64From32(Core::System& system) {
965 965
966 port = Convert<Handle>(GetReg32(system, 1)); 966 port = Convert<Handle>(GetReg32(system, 1));
967 967
968 ret = AcceptSession64From32(system, &out_handle, port); 968 ret = AcceptSession64From32(system, std::addressof(out_handle), port);
969 969
970 SetReg32(system, 0, Convert<uint32_t>(ret)); 970 SetReg32(system, 0, Convert<uint32_t>(ret));
971 SetReg32(system, 1, Convert<uint32_t>(out_handle)); 971 SetReg32(system, 1, Convert<uint32_t>(out_handle));
@@ -988,7 +988,7 @@ static void SvcWrap_ReplyAndReceive64From32(Core::System& system) {
988 timeout_ns_gather[1] = GetReg32(system, 4); 988 timeout_ns_gather[1] = GetReg32(system, 4);
989 timeout_ns = Convert<int64_t>(timeout_ns_gather); 989 timeout_ns = Convert<int64_t>(timeout_ns_gather);
990 990
991 ret = ReplyAndReceive64From32(system, &out_index, handles, num_handles, reply_target, timeout_ns); 991 ret = ReplyAndReceive64From32(system, std::addressof(out_index), handles, num_handles, reply_target, timeout_ns);
992 992
993 SetReg32(system, 0, Convert<uint32_t>(ret)); 993 SetReg32(system, 0, Convert<uint32_t>(ret));
994 SetReg32(system, 1, Convert<uint32_t>(out_index)); 994 SetReg32(system, 1, Convert<uint32_t>(out_index));
@@ -1015,7 +1015,7 @@ static void SvcWrap_ReplyAndReceiveWithUserBuffer64From32(Core::System& system)
1015 timeout_ns_gather[1] = GetReg32(system, 6); 1015 timeout_ns_gather[1] = GetReg32(system, 6);
1016 timeout_ns = Convert<int64_t>(timeout_ns_gather); 1016 timeout_ns = Convert<int64_t>(timeout_ns_gather);
1017 1017
1018 ret = ReplyAndReceiveWithUserBuffer64From32(system, &out_index, message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns); 1018 ret = ReplyAndReceiveWithUserBuffer64From32(system, std::addressof(out_index), message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns);
1019 1019
1020 SetReg32(system, 0, Convert<uint32_t>(ret)); 1020 SetReg32(system, 0, Convert<uint32_t>(ret));
1021 SetReg32(system, 1, Convert<uint32_t>(out_index)); 1021 SetReg32(system, 1, Convert<uint32_t>(out_index));
@@ -1027,7 +1027,7 @@ static void SvcWrap_CreateEvent64From32(Core::System& system) {
1027 Handle out_write_handle{}; 1027 Handle out_write_handle{};
1028 Handle out_read_handle{}; 1028 Handle out_read_handle{};
1029 1029
1030 ret = CreateEvent64From32(system, &out_write_handle, &out_read_handle); 1030 ret = CreateEvent64From32(system, std::addressof(out_write_handle), std::addressof(out_read_handle));
1031 1031
1032 SetReg32(system, 0, Convert<uint32_t>(ret)); 1032 SetReg32(system, 0, Convert<uint32_t>(ret));
1033 SetReg32(system, 1, Convert<uint32_t>(out_write_handle)); 1033 SetReg32(system, 1, Convert<uint32_t>(out_write_handle));
@@ -1118,7 +1118,7 @@ static void SvcWrap_CreateCodeMemory64From32(Core::System& system) {
1118 address = Convert<uint32_t>(GetReg32(system, 1)); 1118 address = Convert<uint32_t>(GetReg32(system, 1));
1119 size = Convert<uint32_t>(GetReg32(system, 2)); 1119 size = Convert<uint32_t>(GetReg32(system, 2));
1120 1120
1121 ret = CreateCodeMemory64From32(system, &out_handle, address, size); 1121 ret = CreateCodeMemory64From32(system, std::addressof(out_handle), address, size);
1122 1122
1123 SetReg32(system, 0, Convert<uint32_t>(ret)); 1123 SetReg32(system, 0, Convert<uint32_t>(ret));
1124 SetReg32(system, 1, Convert<uint32_t>(out_handle)); 1124 SetReg32(system, 1, Convert<uint32_t>(out_handle));
@@ -1169,7 +1169,7 @@ static void SvcWrap_ReadWriteRegister64From32(Core::System& system) {
1169 mask = Convert<uint32_t>(GetReg32(system, 0)); 1169 mask = Convert<uint32_t>(GetReg32(system, 0));
1170 value = Convert<uint32_t>(GetReg32(system, 1)); 1170 value = Convert<uint32_t>(GetReg32(system, 1));
1171 1171
1172 ret = ReadWriteRegister64From32(system, &out_value, address, mask, value); 1172 ret = ReadWriteRegister64From32(system, std::addressof(out_value), address, mask, value);
1173 1173
1174 SetReg32(system, 0, Convert<uint32_t>(ret)); 1174 SetReg32(system, 0, Convert<uint32_t>(ret));
1175 SetReg32(system, 1, Convert<uint32_t>(out_value)); 1175 SetReg32(system, 1, Convert<uint32_t>(out_value));
@@ -1201,7 +1201,7 @@ static void SvcWrap_CreateSharedMemory64From32(Core::System& system) {
1201 owner_perm = Convert<MemoryPermission>(GetReg32(system, 2)); 1201 owner_perm = Convert<MemoryPermission>(GetReg32(system, 2));
1202 remote_perm = Convert<MemoryPermission>(GetReg32(system, 3)); 1202 remote_perm = Convert<MemoryPermission>(GetReg32(system, 3));
1203 1203
1204 ret = CreateSharedMemory64From32(system, &out_handle, size, owner_perm, remote_perm); 1204 ret = CreateSharedMemory64From32(system, std::addressof(out_handle), size, owner_perm, remote_perm);
1205 1205
1206 SetReg32(system, 0, Convert<uint32_t>(ret)); 1206 SetReg32(system, 0, Convert<uint32_t>(ret));
1207 SetReg32(system, 1, Convert<uint32_t>(out_handle)); 1207 SetReg32(system, 1, Convert<uint32_t>(out_handle));
@@ -1251,7 +1251,7 @@ static void SvcWrap_CreateInterruptEvent64From32(Core::System& system) {
1251 interrupt_id = Convert<int32_t>(GetReg32(system, 1)); 1251 interrupt_id = Convert<int32_t>(GetReg32(system, 1));
1252 interrupt_type = Convert<InterruptType>(GetReg32(system, 2)); 1252 interrupt_type = Convert<InterruptType>(GetReg32(system, 2));
1253 1253
1254 ret = CreateInterruptEvent64From32(system, &out_read_handle, interrupt_id, interrupt_type); 1254 ret = CreateInterruptEvent64From32(system, std::addressof(out_read_handle), interrupt_id, interrupt_type);
1255 1255
1256 SetReg32(system, 0, Convert<uint32_t>(ret)); 1256 SetReg32(system, 0, Convert<uint32_t>(ret));
1257 SetReg32(system, 1, Convert<uint32_t>(out_read_handle)); 1257 SetReg32(system, 1, Convert<uint32_t>(out_read_handle));
@@ -1265,7 +1265,7 @@ static void SvcWrap_QueryPhysicalAddress64From32(Core::System& system) {
1265 1265
1266 address = Convert<uint32_t>(GetReg32(system, 1)); 1266 address = Convert<uint32_t>(GetReg32(system, 1));
1267 1267
1268 ret = QueryPhysicalAddress64From32(system, &out_info, address); 1268 ret = QueryPhysicalAddress64From32(system, std::addressof(out_info), address);
1269 1269
1270 SetReg32(system, 0, Convert<uint32_t>(ret)); 1270 SetReg32(system, 0, Convert<uint32_t>(ret));
1271 auto out_info_scatter = Convert<std::array<uint32_t, 4>>(out_info); 1271 auto out_info_scatter = Convert<std::array<uint32_t, 4>>(out_info);
@@ -1289,7 +1289,7 @@ static void SvcWrap_QueryIoMapping64From32(Core::System& system) {
1289 physical_address = Convert<uint64_t>(physical_address_gather); 1289 physical_address = Convert<uint64_t>(physical_address_gather);
1290 size = Convert<uint32_t>(GetReg32(system, 0)); 1290 size = Convert<uint32_t>(GetReg32(system, 0));
1291 1291
1292 ret = QueryIoMapping64From32(system, &out_address, &out_size, physical_address, size); 1292 ret = QueryIoMapping64From32(system, std::addressof(out_address), std::addressof(out_size), physical_address, size);
1293 1293
1294 SetReg32(system, 0, Convert<uint32_t>(ret)); 1294 SetReg32(system, 0, Convert<uint32_t>(ret));
1295 SetReg32(system, 1, Convert<uint32_t>(out_address)); 1295 SetReg32(system, 1, Convert<uint32_t>(out_address));
@@ -1312,7 +1312,7 @@ static void SvcWrap_CreateDeviceAddressSpace64From32(Core::System& system) {
1312 das_size_gather[1] = GetReg32(system, 1); 1312 das_size_gather[1] = GetReg32(system, 1);
1313 das_size = Convert<uint64_t>(das_size_gather); 1313 das_size = Convert<uint64_t>(das_size_gather);
1314 1314
1315 ret = CreateDeviceAddressSpace64From32(system, &out_handle, das_address, das_size); 1315 ret = CreateDeviceAddressSpace64From32(system, std::addressof(out_handle), das_address, das_size);
1316 1316
1317 SetReg32(system, 0, Convert<uint32_t>(ret)); 1317 SetReg32(system, 0, Convert<uint32_t>(ret));
1318 SetReg32(system, 1, Convert<uint32_t>(out_handle)); 1318 SetReg32(system, 1, Convert<uint32_t>(out_handle));
@@ -1505,7 +1505,7 @@ static void SvcWrap_DebugActiveProcess64From32(Core::System& system) {
1505 process_id_gather[1] = GetReg32(system, 3); 1505 process_id_gather[1] = GetReg32(system, 3);
1506 process_id = Convert<uint64_t>(process_id_gather); 1506 process_id = Convert<uint64_t>(process_id_gather);
1507 1507
1508 ret = DebugActiveProcess64From32(system, &out_handle, process_id); 1508 ret = DebugActiveProcess64From32(system, std::addressof(out_handle), process_id);
1509 1509
1510 SetReg32(system, 0, Convert<uint32_t>(ret)); 1510 SetReg32(system, 0, Convert<uint32_t>(ret));
1511 SetReg32(system, 1, Convert<uint32_t>(out_handle)); 1511 SetReg32(system, 1, Convert<uint32_t>(out_handle));
@@ -1577,7 +1577,7 @@ static void SvcWrap_GetProcessList64From32(Core::System& system) {
1577 out_process_ids = Convert<uint32_t>(GetReg32(system, 1)); 1577 out_process_ids = Convert<uint32_t>(GetReg32(system, 1));
1578 max_out_count = Convert<int32_t>(GetReg32(system, 2)); 1578 max_out_count = Convert<int32_t>(GetReg32(system, 2));
1579 1579
1580 ret = GetProcessList64From32(system, &out_num_processes, out_process_ids, max_out_count); 1580 ret = GetProcessList64From32(system, std::addressof(out_num_processes), out_process_ids, max_out_count);
1581 1581
1582 SetReg32(system, 0, Convert<uint32_t>(ret)); 1582 SetReg32(system, 0, Convert<uint32_t>(ret));
1583 SetReg32(system, 1, Convert<uint32_t>(out_num_processes)); 1583 SetReg32(system, 1, Convert<uint32_t>(out_num_processes));
@@ -1595,7 +1595,7 @@ static void SvcWrap_GetThreadList64From32(Core::System& system) {
1595 max_out_count = Convert<int32_t>(GetReg32(system, 2)); 1595 max_out_count = Convert<int32_t>(GetReg32(system, 2));
1596 debug_handle = Convert<Handle>(GetReg32(system, 3)); 1596 debug_handle = Convert<Handle>(GetReg32(system, 3));
1597 1597
1598 ret = GetThreadList64From32(system, &out_num_threads, out_thread_ids, max_out_count, debug_handle); 1598 ret = GetThreadList64From32(system, std::addressof(out_num_threads), out_thread_ids, max_out_count, debug_handle);
1599 1599
1600 SetReg32(system, 0, Convert<uint32_t>(ret)); 1600 SetReg32(system, 0, Convert<uint32_t>(ret));
1601 SetReg32(system, 1, Convert<uint32_t>(out_num_threads)); 1601 SetReg32(system, 1, Convert<uint32_t>(out_num_threads));
@@ -1655,7 +1655,7 @@ static void SvcWrap_QueryDebugProcessMemory64From32(Core::System& system) {
1655 process_handle = Convert<Handle>(GetReg32(system, 2)); 1655 process_handle = Convert<Handle>(GetReg32(system, 2));
1656 address = Convert<uint32_t>(GetReg32(system, 3)); 1656 address = Convert<uint32_t>(GetReg32(system, 3));
1657 1657
1658 ret = QueryDebugProcessMemory64From32(system, out_memory_info, &out_page_info, process_handle, address); 1658 ret = QueryDebugProcessMemory64From32(system, out_memory_info, std::addressof(out_page_info), process_handle, address);
1659 1659
1660 SetReg32(system, 0, Convert<uint32_t>(ret)); 1660 SetReg32(system, 0, Convert<uint32_t>(ret));
1661 SetReg32(system, 1, Convert<uint32_t>(out_page_info)); 1661 SetReg32(system, 1, Convert<uint32_t>(out_page_info));
@@ -1735,7 +1735,7 @@ static void SvcWrap_GetDebugThreadParam64From32(Core::System& system) {
1735 thread_id = Convert<uint64_t>(thread_id_gather); 1735 thread_id = Convert<uint64_t>(thread_id_gather);
1736 param = Convert<DebugThreadParam>(GetReg32(system, 3)); 1736 param = Convert<DebugThreadParam>(GetReg32(system, 3));
1737 1737
1738 ret = GetDebugThreadParam64From32(system, &out_64, &out_32, debug_handle, thread_id, param); 1738 ret = GetDebugThreadParam64From32(system, std::addressof(out_64), std::addressof(out_32), debug_handle, thread_id, param);
1739 1739
1740 SetReg32(system, 0, Convert<uint32_t>(ret)); 1740 SetReg32(system, 0, Convert<uint32_t>(ret));
1741 auto out_64_scatter = Convert<std::array<uint32_t, 2>>(out_64); 1741 auto out_64_scatter = Convert<std::array<uint32_t, 2>>(out_64);
@@ -1759,7 +1759,7 @@ static void SvcWrap_GetSystemInfo64From32(Core::System& system) {
1759 info_subtype_gather[1] = GetReg32(system, 3); 1759 info_subtype_gather[1] = GetReg32(system, 3);
1760 info_subtype = Convert<uint64_t>(info_subtype_gather); 1760 info_subtype = Convert<uint64_t>(info_subtype_gather);
1761 1761
1762 ret = GetSystemInfo64From32(system, &out, info_type, handle, info_subtype); 1762 ret = GetSystemInfo64From32(system, std::addressof(out), info_type, handle, info_subtype);
1763 1763
1764 SetReg32(system, 0, Convert<uint32_t>(ret)); 1764 SetReg32(system, 0, Convert<uint32_t>(ret));
1765 auto out_scatter = Convert<std::array<uint32_t, 2>>(out); 1765 auto out_scatter = Convert<std::array<uint32_t, 2>>(out);
@@ -1780,7 +1780,7 @@ static void SvcWrap_CreatePort64From32(Core::System& system) {
1780 is_light = Convert<bool>(GetReg32(system, 3)); 1780 is_light = Convert<bool>(GetReg32(system, 3));
1781 name = Convert<uint32_t>(GetReg32(system, 0)); 1781 name = Convert<uint32_t>(GetReg32(system, 0));
1782 1782
1783 ret = CreatePort64From32(system, &out_server_handle, &out_client_handle, max_sessions, is_light, name); 1783 ret = CreatePort64From32(system, std::addressof(out_server_handle), std::addressof(out_client_handle), max_sessions, is_light, name);
1784 1784
1785 SetReg32(system, 0, Convert<uint32_t>(ret)); 1785 SetReg32(system, 0, Convert<uint32_t>(ret));
1786 SetReg32(system, 1, Convert<uint32_t>(out_server_handle)); 1786 SetReg32(system, 1, Convert<uint32_t>(out_server_handle));
@@ -1797,7 +1797,7 @@ static void SvcWrap_ManageNamedPort64From32(Core::System& system) {
1797 name = Convert<uint32_t>(GetReg32(system, 1)); 1797 name = Convert<uint32_t>(GetReg32(system, 1));
1798 max_sessions = Convert<int32_t>(GetReg32(system, 2)); 1798 max_sessions = Convert<int32_t>(GetReg32(system, 2));
1799 1799
1800 ret = ManageNamedPort64From32(system, &out_server_handle, name, max_sessions); 1800 ret = ManageNamedPort64From32(system, std::addressof(out_server_handle), name, max_sessions);
1801 1801
1802 SetReg32(system, 0, Convert<uint32_t>(ret)); 1802 SetReg32(system, 0, Convert<uint32_t>(ret));
1803 SetReg32(system, 1, Convert<uint32_t>(out_server_handle)); 1803 SetReg32(system, 1, Convert<uint32_t>(out_server_handle));
@@ -1811,7 +1811,7 @@ static void SvcWrap_ConnectToPort64From32(Core::System& system) {
1811 1811
1812 port = Convert<Handle>(GetReg32(system, 1)); 1812 port = Convert<Handle>(GetReg32(system, 1));
1813 1813
1814 ret = ConnectToPort64From32(system, &out_handle, port); 1814 ret = ConnectToPort64From32(system, std::addressof(out_handle), port);
1815 1815
1816 SetReg32(system, 0, Convert<uint32_t>(ret)); 1816 SetReg32(system, 0, Convert<uint32_t>(ret));
1817 SetReg32(system, 1, Convert<uint32_t>(out_handle)); 1817 SetReg32(system, 1, Convert<uint32_t>(out_handle));
@@ -1898,7 +1898,7 @@ static void SvcWrap_QueryProcessMemory64From32(Core::System& system) {
1898 address_gather[1] = GetReg32(system, 3); 1898 address_gather[1] = GetReg32(system, 3);
1899 address = Convert<uint64_t>(address_gather); 1899 address = Convert<uint64_t>(address_gather);
1900 1900
1901 ret = QueryProcessMemory64From32(system, out_memory_info, &out_page_info, process_handle, address); 1901 ret = QueryProcessMemory64From32(system, out_memory_info, std::addressof(out_page_info), process_handle, address);
1902 1902
1903 SetReg32(system, 0, Convert<uint32_t>(ret)); 1903 SetReg32(system, 0, Convert<uint32_t>(ret));
1904 SetReg32(system, 1, Convert<uint32_t>(out_page_info)); 1904 SetReg32(system, 1, Convert<uint32_t>(out_page_info));
@@ -1970,7 +1970,7 @@ static void SvcWrap_CreateProcess64From32(Core::System& system) {
1970 caps = Convert<uint32_t>(GetReg32(system, 2)); 1970 caps = Convert<uint32_t>(GetReg32(system, 2));
1971 num_caps = Convert<int32_t>(GetReg32(system, 3)); 1971 num_caps = Convert<int32_t>(GetReg32(system, 3));
1972 1972
1973 ret = CreateProcess64From32(system, &out_handle, parameters, caps, num_caps); 1973 ret = CreateProcess64From32(system, std::addressof(out_handle), parameters, caps, num_caps);
1974 1974
1975 SetReg32(system, 0, Convert<uint32_t>(ret)); 1975 SetReg32(system, 0, Convert<uint32_t>(ret));
1976 SetReg32(system, 1, Convert<uint32_t>(out_handle)); 1976 SetReg32(system, 1, Convert<uint32_t>(out_handle));
@@ -2019,7 +2019,7 @@ static void SvcWrap_GetProcessInfo64From32(Core::System& system) {
2019 process_handle = Convert<Handle>(GetReg32(system, 1)); 2019 process_handle = Convert<Handle>(GetReg32(system, 1));
2020 info_type = Convert<ProcessInfoType>(GetReg32(system, 2)); 2020 info_type = Convert<ProcessInfoType>(GetReg32(system, 2));
2021 2021
2022 ret = GetProcessInfo64From32(system, &out_info, process_handle, info_type); 2022 ret = GetProcessInfo64From32(system, std::addressof(out_info), process_handle, info_type);
2023 2023
2024 SetReg32(system, 0, Convert<uint32_t>(ret)); 2024 SetReg32(system, 0, Convert<uint32_t>(ret));
2025 auto out_info_scatter = Convert<std::array<uint32_t, 2>>(out_info); 2025 auto out_info_scatter = Convert<std::array<uint32_t, 2>>(out_info);
@@ -2032,7 +2032,7 @@ static void SvcWrap_CreateResourceLimit64From32(Core::System& system) {
2032 2032
2033 Handle out_handle{}; 2033 Handle out_handle{};
2034 2034
2035 ret = CreateResourceLimit64From32(system, &out_handle); 2035 ret = CreateResourceLimit64From32(system, std::addressof(out_handle));
2036 2036
2037 SetReg32(system, 0, Convert<uint32_t>(ret)); 2037 SetReg32(system, 0, Convert<uint32_t>(ret));
2038 SetReg32(system, 1, Convert<uint32_t>(out_handle)); 2038 SetReg32(system, 1, Convert<uint32_t>(out_handle));
@@ -2093,7 +2093,7 @@ static void SvcWrap_SetHeapSize64(Core::System& system) {
2093 2093
2094 size = Convert<uint64_t>(GetReg64(system, 1)); 2094 size = Convert<uint64_t>(GetReg64(system, 1));
2095 2095
2096 ret = SetHeapSize64(system, &out_address, size); 2096 ret = SetHeapSize64(system, std::addressof(out_address), size);
2097 2097
2098 SetReg64(system, 0, Convert<uint64_t>(ret)); 2098 SetReg64(system, 0, Convert<uint64_t>(ret));
2099 SetReg64(system, 1, Convert<uint64_t>(out_address)); 2099 SetReg64(system, 1, Convert<uint64_t>(out_address));
@@ -2175,7 +2175,7 @@ static void SvcWrap_QueryMemory64(Core::System& system) {
2175 out_memory_info = Convert<uint64_t>(GetReg64(system, 0)); 2175 out_memory_info = Convert<uint64_t>(GetReg64(system, 0));
2176 address = Convert<uint64_t>(GetReg64(system, 2)); 2176 address = Convert<uint64_t>(GetReg64(system, 2));
2177 2177
2178 ret = QueryMemory64(system, out_memory_info, &out_page_info, address); 2178 ret = QueryMemory64(system, out_memory_info, std::addressof(out_page_info), address);
2179 2179
2180 SetReg64(system, 0, Convert<uint64_t>(ret)); 2180 SetReg64(system, 0, Convert<uint64_t>(ret));
2181 SetReg64(system, 1, Convert<uint64_t>(out_page_info)); 2181 SetReg64(system, 1, Convert<uint64_t>(out_page_info));
@@ -2201,7 +2201,7 @@ static void SvcWrap_CreateThread64(Core::System& system) {
2201 priority = Convert<int32_t>(GetReg64(system, 4)); 2201 priority = Convert<int32_t>(GetReg64(system, 4));
2202 core_id = Convert<int32_t>(GetReg64(system, 5)); 2202 core_id = Convert<int32_t>(GetReg64(system, 5));
2203 2203
2204 ret = CreateThread64(system, &out_handle, func, arg, stack_bottom, priority, core_id); 2204 ret = CreateThread64(system, std::addressof(out_handle), func, arg, stack_bottom, priority, core_id);
2205 2205
2206 SetReg64(system, 0, Convert<uint64_t>(ret)); 2206 SetReg64(system, 0, Convert<uint64_t>(ret));
2207 SetReg64(system, 1, Convert<uint64_t>(out_handle)); 2207 SetReg64(system, 1, Convert<uint64_t>(out_handle));
@@ -2239,7 +2239,7 @@ static void SvcWrap_GetThreadPriority64(Core::System& system) {
2239 2239
2240 thread_handle = Convert<Handle>(GetReg64(system, 1)); 2240 thread_handle = Convert<Handle>(GetReg64(system, 1));
2241 2241
2242 ret = GetThreadPriority64(system, &out_priority, thread_handle); 2242 ret = GetThreadPriority64(system, std::addressof(out_priority), thread_handle);
2243 2243
2244 SetReg64(system, 0, Convert<uint64_t>(ret)); 2244 SetReg64(system, 0, Convert<uint64_t>(ret));
2245 SetReg64(system, 1, Convert<uint64_t>(out_priority)); 2245 SetReg64(system, 1, Convert<uint64_t>(out_priority));
@@ -2268,7 +2268,7 @@ static void SvcWrap_GetThreadCoreMask64(Core::System& system) {
2268 2268
2269 thread_handle = Convert<Handle>(GetReg64(system, 2)); 2269 thread_handle = Convert<Handle>(GetReg64(system, 2));
2270 2270
2271 ret = GetThreadCoreMask64(system, &out_core_id, &out_affinity_mask, thread_handle); 2271 ret = GetThreadCoreMask64(system, std::addressof(out_core_id), std::addressof(out_affinity_mask), thread_handle);
2272 2272
2273 SetReg64(system, 0, Convert<uint64_t>(ret)); 2273 SetReg64(system, 0, Convert<uint64_t>(ret));
2274 SetReg64(system, 1, Convert<uint64_t>(out_core_id)); 2274 SetReg64(system, 1, Convert<uint64_t>(out_core_id));
@@ -2369,7 +2369,7 @@ static void SvcWrap_CreateTransferMemory64(Core::System& system) {
2369 size = Convert<uint64_t>(GetReg64(system, 2)); 2369 size = Convert<uint64_t>(GetReg64(system, 2));
2370 map_perm = Convert<MemoryPermission>(GetReg64(system, 3)); 2370 map_perm = Convert<MemoryPermission>(GetReg64(system, 3));
2371 2371
2372 ret = CreateTransferMemory64(system, &out_handle, address, size, map_perm); 2372 ret = CreateTransferMemory64(system, std::addressof(out_handle), address, size, map_perm);
2373 2373
2374 SetReg64(system, 0, Convert<uint64_t>(ret)); 2374 SetReg64(system, 0, Convert<uint64_t>(ret));
2375 SetReg64(system, 1, Convert<uint64_t>(out_handle)); 2375 SetReg64(system, 1, Convert<uint64_t>(out_handle));
@@ -2411,7 +2411,7 @@ static void SvcWrap_WaitSynchronization64(Core::System& system) {
2411 num_handles = Convert<int32_t>(GetReg64(system, 2)); 2411 num_handles = Convert<int32_t>(GetReg64(system, 2));
2412 timeout_ns = Convert<int64_t>(GetReg64(system, 3)); 2412 timeout_ns = Convert<int64_t>(GetReg64(system, 3));
2413 2413
2414 ret = WaitSynchronization64(system, &out_index, handles, num_handles, timeout_ns); 2414 ret = WaitSynchronization64(system, std::addressof(out_index), handles, num_handles, timeout_ns);
2415 2415
2416 SetReg64(system, 0, Convert<uint64_t>(ret)); 2416 SetReg64(system, 0, Convert<uint64_t>(ret));
2417 SetReg64(system, 1, Convert<uint64_t>(out_index)); 2417 SetReg64(system, 1, Convert<uint64_t>(out_index));
@@ -2501,7 +2501,7 @@ static void SvcWrap_ConnectToNamedPort64(Core::System& system) {
2501 2501
2502 name = Convert<uint64_t>(GetReg64(system, 1)); 2502 name = Convert<uint64_t>(GetReg64(system, 1));
2503 2503
2504 ret = ConnectToNamedPort64(system, &out_handle, name); 2504 ret = ConnectToNamedPort64(system, std::addressof(out_handle), name);
2505 2505
2506 SetReg64(system, 0, Convert<uint64_t>(ret)); 2506 SetReg64(system, 0, Convert<uint64_t>(ret));
2507 SetReg64(system, 1, Convert<uint64_t>(out_handle)); 2507 SetReg64(system, 1, Convert<uint64_t>(out_handle));
@@ -2547,7 +2547,7 @@ static void SvcWrap_SendAsyncRequestWithUserBuffer64(Core::System& system) {
2547 message_buffer_size = Convert<uint64_t>(GetReg64(system, 2)); 2547 message_buffer_size = Convert<uint64_t>(GetReg64(system, 2));
2548 session_handle = Convert<Handle>(GetReg64(system, 3)); 2548 session_handle = Convert<Handle>(GetReg64(system, 3));
2549 2549
2550 ret = SendAsyncRequestWithUserBuffer64(system, &out_event_handle, message_buffer, message_buffer_size, session_handle); 2550 ret = SendAsyncRequestWithUserBuffer64(system, std::addressof(out_event_handle), message_buffer, message_buffer_size, session_handle);
2551 2551
2552 SetReg64(system, 0, Convert<uint64_t>(ret)); 2552 SetReg64(system, 0, Convert<uint64_t>(ret));
2553 SetReg64(system, 1, Convert<uint64_t>(out_event_handle)); 2553 SetReg64(system, 1, Convert<uint64_t>(out_event_handle));
@@ -2561,7 +2561,7 @@ static void SvcWrap_GetProcessId64(Core::System& system) {
2561 2561
2562 process_handle = Convert<Handle>(GetReg64(system, 1)); 2562 process_handle = Convert<Handle>(GetReg64(system, 1));
2563 2563
2564 ret = GetProcessId64(system, &out_process_id, process_handle); 2564 ret = GetProcessId64(system, std::addressof(out_process_id), process_handle);
2565 2565
2566 SetReg64(system, 0, Convert<uint64_t>(ret)); 2566 SetReg64(system, 0, Convert<uint64_t>(ret));
2567 SetReg64(system, 1, Convert<uint64_t>(out_process_id)); 2567 SetReg64(system, 1, Convert<uint64_t>(out_process_id));
@@ -2575,7 +2575,7 @@ static void SvcWrap_GetThreadId64(Core::System& system) {
2575 2575
2576 thread_handle = Convert<Handle>(GetReg64(system, 1)); 2576 thread_handle = Convert<Handle>(GetReg64(system, 1));
2577 2577
2578 ret = GetThreadId64(system, &out_thread_id, thread_handle); 2578 ret = GetThreadId64(system, std::addressof(out_thread_id), thread_handle);
2579 2579
2580 SetReg64(system, 0, Convert<uint64_t>(ret)); 2580 SetReg64(system, 0, Convert<uint64_t>(ret));
2581 SetReg64(system, 1, Convert<uint64_t>(out_thread_id)); 2581 SetReg64(system, 1, Convert<uint64_t>(out_thread_id));
@@ -2627,7 +2627,7 @@ static void SvcWrap_GetInfo64(Core::System& system) {
2627 handle = Convert<Handle>(GetReg64(system, 2)); 2627 handle = Convert<Handle>(GetReg64(system, 2));
2628 info_subtype = Convert<uint64_t>(GetReg64(system, 3)); 2628 info_subtype = Convert<uint64_t>(GetReg64(system, 3));
2629 2629
2630 ret = GetInfo64(system, &out, info_type, handle, info_subtype); 2630 ret = GetInfo64(system, std::addressof(out), info_type, handle, info_subtype);
2631 2631
2632 SetReg64(system, 0, Convert<uint64_t>(ret)); 2632 SetReg64(system, 0, Convert<uint64_t>(ret));
2633 SetReg64(system, 1, Convert<uint64_t>(out)); 2633 SetReg64(system, 1, Convert<uint64_t>(out));
@@ -2690,7 +2690,7 @@ static void SvcWrap_GetDebugFutureThreadInfo64(Core::System& system) {
2690 debug_handle = Convert<Handle>(GetReg64(system, 2)); 2690 debug_handle = Convert<Handle>(GetReg64(system, 2));
2691 ns = Convert<int64_t>(GetReg64(system, 3)); 2691 ns = Convert<int64_t>(GetReg64(system, 3));
2692 2692
2693 ret = GetDebugFutureThreadInfo64(system, &out_context, &out_thread_id, debug_handle, ns); 2693 ret = GetDebugFutureThreadInfo64(system, std::addressof(out_context), std::addressof(out_thread_id), debug_handle, ns);
2694 2694
2695 SetReg64(system, 0, Convert<uint64_t>(ret)); 2695 SetReg64(system, 0, Convert<uint64_t>(ret));
2696 auto out_context_scatter = Convert<std::array<uint64_t, 4>>(out_context); 2696 auto out_context_scatter = Convert<std::array<uint64_t, 4>>(out_context);
@@ -2708,7 +2708,7 @@ static void SvcWrap_GetLastThreadInfo64(Core::System& system) {
2708 uint64_t out_tls_address{}; 2708 uint64_t out_tls_address{};
2709 uint32_t out_flags{}; 2709 uint32_t out_flags{};
2710 2710
2711 ret = GetLastThreadInfo64(system, &out_context, &out_tls_address, &out_flags); 2711 ret = GetLastThreadInfo64(system, std::addressof(out_context), std::addressof(out_tls_address), std::addressof(out_flags));
2712 2712
2713 SetReg64(system, 0, Convert<uint64_t>(ret)); 2713 SetReg64(system, 0, Convert<uint64_t>(ret));
2714 auto out_context_scatter = Convert<std::array<uint64_t, 4>>(out_context); 2714 auto out_context_scatter = Convert<std::array<uint64_t, 4>>(out_context);
@@ -2730,7 +2730,7 @@ static void SvcWrap_GetResourceLimitLimitValue64(Core::System& system) {
2730 resource_limit_handle = Convert<Handle>(GetReg64(system, 1)); 2730 resource_limit_handle = Convert<Handle>(GetReg64(system, 1));
2731 which = Convert<LimitableResource>(GetReg64(system, 2)); 2731 which = Convert<LimitableResource>(GetReg64(system, 2));
2732 2732
2733 ret = GetResourceLimitLimitValue64(system, &out_limit_value, resource_limit_handle, which); 2733 ret = GetResourceLimitLimitValue64(system, std::addressof(out_limit_value), resource_limit_handle, which);
2734 2734
2735 SetReg64(system, 0, Convert<uint64_t>(ret)); 2735 SetReg64(system, 0, Convert<uint64_t>(ret));
2736 SetReg64(system, 1, Convert<uint64_t>(out_limit_value)); 2736 SetReg64(system, 1, Convert<uint64_t>(out_limit_value));
@@ -2746,7 +2746,7 @@ static void SvcWrap_GetResourceLimitCurrentValue64(Core::System& system) {
2746 resource_limit_handle = Convert<Handle>(GetReg64(system, 1)); 2746 resource_limit_handle = Convert<Handle>(GetReg64(system, 1));
2747 which = Convert<LimitableResource>(GetReg64(system, 2)); 2747 which = Convert<LimitableResource>(GetReg64(system, 2));
2748 2748
2749 ret = GetResourceLimitCurrentValue64(system, &out_current_value, resource_limit_handle, which); 2749 ret = GetResourceLimitCurrentValue64(system, std::addressof(out_current_value), resource_limit_handle, which);
2750 2750
2751 SetReg64(system, 0, Convert<uint64_t>(ret)); 2751 SetReg64(system, 0, Convert<uint64_t>(ret));
2752 SetReg64(system, 1, Convert<uint64_t>(out_current_value)); 2752 SetReg64(system, 1, Convert<uint64_t>(out_current_value));
@@ -2830,7 +2830,7 @@ static void SvcWrap_GetResourceLimitPeakValue64(Core::System& system) {
2830 resource_limit_handle = Convert<Handle>(GetReg64(system, 1)); 2830 resource_limit_handle = Convert<Handle>(GetReg64(system, 1));
2831 which = Convert<LimitableResource>(GetReg64(system, 2)); 2831 which = Convert<LimitableResource>(GetReg64(system, 2));
2832 2832
2833 ret = GetResourceLimitPeakValue64(system, &out_peak_value, resource_limit_handle, which); 2833 ret = GetResourceLimitPeakValue64(system, std::addressof(out_peak_value), resource_limit_handle, which);
2834 2834
2835 SetReg64(system, 0, Convert<uint64_t>(ret)); 2835 SetReg64(system, 0, Convert<uint64_t>(ret));
2836 SetReg64(system, 1, Convert<uint64_t>(out_peak_value)); 2836 SetReg64(system, 1, Convert<uint64_t>(out_peak_value));
@@ -2844,7 +2844,7 @@ static void SvcWrap_CreateIoPool64(Core::System& system) {
2844 2844
2845 which = Convert<IoPoolType>(GetReg64(system, 1)); 2845 which = Convert<IoPoolType>(GetReg64(system, 1));
2846 2846
2847 ret = CreateIoPool64(system, &out_handle, which); 2847 ret = CreateIoPool64(system, std::addressof(out_handle), which);
2848 2848
2849 SetReg64(system, 0, Convert<uint64_t>(ret)); 2849 SetReg64(system, 0, Convert<uint64_t>(ret));
2850 SetReg64(system, 1, Convert<uint64_t>(out_handle)); 2850 SetReg64(system, 1, Convert<uint64_t>(out_handle));
@@ -2866,7 +2866,7 @@ static void SvcWrap_CreateIoRegion64(Core::System& system) {
2866 mapping = Convert<MemoryMapping>(GetReg64(system, 4)); 2866 mapping = Convert<MemoryMapping>(GetReg64(system, 4));
2867 perm = Convert<MemoryPermission>(GetReg64(system, 5)); 2867 perm = Convert<MemoryPermission>(GetReg64(system, 5));
2868 2868
2869 ret = CreateIoRegion64(system, &out_handle, io_pool, physical_address, size, mapping, perm); 2869 ret = CreateIoRegion64(system, std::addressof(out_handle), io_pool, physical_address, size, mapping, perm);
2870 2870
2871 SetReg64(system, 0, Convert<uint64_t>(ret)); 2871 SetReg64(system, 0, Convert<uint64_t>(ret));
2872 SetReg64(system, 1, Convert<uint64_t>(out_handle)); 2872 SetReg64(system, 1, Convert<uint64_t>(out_handle));
@@ -2905,7 +2905,7 @@ static void SvcWrap_CreateSession64(Core::System& system) {
2905 is_light = Convert<bool>(GetReg64(system, 2)); 2905 is_light = Convert<bool>(GetReg64(system, 2));
2906 name = Convert<uint64_t>(GetReg64(system, 3)); 2906 name = Convert<uint64_t>(GetReg64(system, 3));
2907 2907
2908 ret = CreateSession64(system, &out_server_session_handle, &out_client_session_handle, is_light, name); 2908 ret = CreateSession64(system, std::addressof(out_server_session_handle), std::addressof(out_client_session_handle), is_light, name);
2909 2909
2910 SetReg64(system, 0, Convert<uint64_t>(ret)); 2910 SetReg64(system, 0, Convert<uint64_t>(ret));
2911 SetReg64(system, 1, Convert<uint64_t>(out_server_session_handle)); 2911 SetReg64(system, 1, Convert<uint64_t>(out_server_session_handle));
@@ -2920,7 +2920,7 @@ static void SvcWrap_AcceptSession64(Core::System& system) {
2920 2920
2921 port = Convert<Handle>(GetReg64(system, 1)); 2921 port = Convert<Handle>(GetReg64(system, 1));
2922 2922
2923 ret = AcceptSession64(system, &out_handle, port); 2923 ret = AcceptSession64(system, std::addressof(out_handle), port);
2924 2924
2925 SetReg64(system, 0, Convert<uint64_t>(ret)); 2925 SetReg64(system, 0, Convert<uint64_t>(ret));
2926 SetReg64(system, 1, Convert<uint64_t>(out_handle)); 2926 SetReg64(system, 1, Convert<uint64_t>(out_handle));
@@ -2940,7 +2940,7 @@ static void SvcWrap_ReplyAndReceive64(Core::System& system) {
2940 reply_target = Convert<Handle>(GetReg64(system, 3)); 2940 reply_target = Convert<Handle>(GetReg64(system, 3));
2941 timeout_ns = Convert<int64_t>(GetReg64(system, 4)); 2941 timeout_ns = Convert<int64_t>(GetReg64(system, 4));
2942 2942
2943 ret = ReplyAndReceive64(system, &out_index, handles, num_handles, reply_target, timeout_ns); 2943 ret = ReplyAndReceive64(system, std::addressof(out_index), handles, num_handles, reply_target, timeout_ns);
2944 2944
2945 SetReg64(system, 0, Convert<uint64_t>(ret)); 2945 SetReg64(system, 0, Convert<uint64_t>(ret));
2946 SetReg64(system, 1, Convert<uint64_t>(out_index)); 2946 SetReg64(system, 1, Convert<uint64_t>(out_index));
@@ -2964,7 +2964,7 @@ static void SvcWrap_ReplyAndReceiveWithUserBuffer64(Core::System& system) {
2964 reply_target = Convert<Handle>(GetReg64(system, 5)); 2964 reply_target = Convert<Handle>(GetReg64(system, 5));
2965 timeout_ns = Convert<int64_t>(GetReg64(system, 6)); 2965 timeout_ns = Convert<int64_t>(GetReg64(system, 6));
2966 2966
2967 ret = ReplyAndReceiveWithUserBuffer64(system, &out_index, message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns); 2967 ret = ReplyAndReceiveWithUserBuffer64(system, std::addressof(out_index), message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns);
2968 2968
2969 SetReg64(system, 0, Convert<uint64_t>(ret)); 2969 SetReg64(system, 0, Convert<uint64_t>(ret));
2970 SetReg64(system, 1, Convert<uint64_t>(out_index)); 2970 SetReg64(system, 1, Convert<uint64_t>(out_index));
@@ -2976,7 +2976,7 @@ static void SvcWrap_CreateEvent64(Core::System& system) {
2976 Handle out_write_handle{}; 2976 Handle out_write_handle{};
2977 Handle out_read_handle{}; 2977 Handle out_read_handle{};
2978 2978
2979 ret = CreateEvent64(system, &out_write_handle, &out_read_handle); 2979 ret = CreateEvent64(system, std::addressof(out_write_handle), std::addressof(out_read_handle));
2980 2980
2981 SetReg64(system, 0, Convert<uint64_t>(ret)); 2981 SetReg64(system, 0, Convert<uint64_t>(ret));
2982 SetReg64(system, 1, Convert<uint64_t>(out_write_handle)); 2982 SetReg64(system, 1, Convert<uint64_t>(out_write_handle));
@@ -3067,7 +3067,7 @@ static void SvcWrap_CreateCodeMemory64(Core::System& system) {
3067 address = Convert<uint64_t>(GetReg64(system, 1)); 3067 address = Convert<uint64_t>(GetReg64(system, 1));
3068 size = Convert<uint64_t>(GetReg64(system, 2)); 3068 size = Convert<uint64_t>(GetReg64(system, 2));
3069 3069
3070 ret = CreateCodeMemory64(system, &out_handle, address, size); 3070 ret = CreateCodeMemory64(system, std::addressof(out_handle), address, size);
3071 3071
3072 SetReg64(system, 0, Convert<uint64_t>(ret)); 3072 SetReg64(system, 0, Convert<uint64_t>(ret));
3073 SetReg64(system, 1, Convert<uint64_t>(out_handle)); 3073 SetReg64(system, 1, Convert<uint64_t>(out_handle));
@@ -3109,7 +3109,7 @@ static void SvcWrap_ReadWriteRegister64(Core::System& system) {
3109 mask = Convert<uint32_t>(GetReg64(system, 2)); 3109 mask = Convert<uint32_t>(GetReg64(system, 2));
3110 value = Convert<uint32_t>(GetReg64(system, 3)); 3110 value = Convert<uint32_t>(GetReg64(system, 3));
3111 3111
3112 ret = ReadWriteRegister64(system, &out_value, address, mask, value); 3112 ret = ReadWriteRegister64(system, std::addressof(out_value), address, mask, value);
3113 3113
3114 SetReg64(system, 0, Convert<uint64_t>(ret)); 3114 SetReg64(system, 0, Convert<uint64_t>(ret));
3115 SetReg64(system, 1, Convert<uint64_t>(out_value)); 3115 SetReg64(system, 1, Convert<uint64_t>(out_value));
@@ -3141,7 +3141,7 @@ static void SvcWrap_CreateSharedMemory64(Core::System& system) {
3141 owner_perm = Convert<MemoryPermission>(GetReg64(system, 2)); 3141 owner_perm = Convert<MemoryPermission>(GetReg64(system, 2));
3142 remote_perm = Convert<MemoryPermission>(GetReg64(system, 3)); 3142 remote_perm = Convert<MemoryPermission>(GetReg64(system, 3));
3143 3143
3144 ret = CreateSharedMemory64(system, &out_handle, size, owner_perm, remote_perm); 3144 ret = CreateSharedMemory64(system, std::addressof(out_handle), size, owner_perm, remote_perm);
3145 3145
3146 SetReg64(system, 0, Convert<uint64_t>(ret)); 3146 SetReg64(system, 0, Convert<uint64_t>(ret));
3147 SetReg64(system, 1, Convert<uint64_t>(out_handle)); 3147 SetReg64(system, 1, Convert<uint64_t>(out_handle));
@@ -3191,7 +3191,7 @@ static void SvcWrap_CreateInterruptEvent64(Core::System& system) {
3191 interrupt_id = Convert<int32_t>(GetReg64(system, 1)); 3191 interrupt_id = Convert<int32_t>(GetReg64(system, 1));
3192 interrupt_type = Convert<InterruptType>(GetReg64(system, 2)); 3192 interrupt_type = Convert<InterruptType>(GetReg64(system, 2));
3193 3193
3194 ret = CreateInterruptEvent64(system, &out_read_handle, interrupt_id, interrupt_type); 3194 ret = CreateInterruptEvent64(system, std::addressof(out_read_handle), interrupt_id, interrupt_type);
3195 3195
3196 SetReg64(system, 0, Convert<uint64_t>(ret)); 3196 SetReg64(system, 0, Convert<uint64_t>(ret));
3197 SetReg64(system, 1, Convert<uint64_t>(out_read_handle)); 3197 SetReg64(system, 1, Convert<uint64_t>(out_read_handle));
@@ -3205,7 +3205,7 @@ static void SvcWrap_QueryPhysicalAddress64(Core::System& system) {
3205 3205
3206 address = Convert<uint64_t>(GetReg64(system, 1)); 3206 address = Convert<uint64_t>(GetReg64(system, 1));
3207 3207
3208 ret = QueryPhysicalAddress64(system, &out_info, address); 3208 ret = QueryPhysicalAddress64(system, std::addressof(out_info), address);
3209 3209
3210 SetReg64(system, 0, Convert<uint64_t>(ret)); 3210 SetReg64(system, 0, Convert<uint64_t>(ret));
3211 auto out_info_scatter = Convert<std::array<uint64_t, 3>>(out_info); 3211 auto out_info_scatter = Convert<std::array<uint64_t, 3>>(out_info);
@@ -3225,7 +3225,7 @@ static void SvcWrap_QueryIoMapping64(Core::System& system) {
3225 physical_address = Convert<uint64_t>(GetReg64(system, 2)); 3225 physical_address = Convert<uint64_t>(GetReg64(system, 2));
3226 size = Convert<uint64_t>(GetReg64(system, 3)); 3226 size = Convert<uint64_t>(GetReg64(system, 3));
3227 3227
3228 ret = QueryIoMapping64(system, &out_address, &out_size, physical_address, size); 3228 ret = QueryIoMapping64(system, std::addressof(out_address), std::addressof(out_size), physical_address, size);
3229 3229
3230 SetReg64(system, 0, Convert<uint64_t>(ret)); 3230 SetReg64(system, 0, Convert<uint64_t>(ret));
3231 SetReg64(system, 1, Convert<uint64_t>(out_address)); 3231 SetReg64(system, 1, Convert<uint64_t>(out_address));
@@ -3242,7 +3242,7 @@ static void SvcWrap_CreateDeviceAddressSpace64(Core::System& system) {
3242 das_address = Convert<uint64_t>(GetReg64(system, 1)); 3242 das_address = Convert<uint64_t>(GetReg64(system, 1));
3243 das_size = Convert<uint64_t>(GetReg64(system, 2)); 3243 das_size = Convert<uint64_t>(GetReg64(system, 2));
3244 3244
3245 ret = CreateDeviceAddressSpace64(system, &out_handle, das_address, das_size); 3245 ret = CreateDeviceAddressSpace64(system, std::addressof(out_handle), das_address, das_size);
3246 3246
3247 SetReg64(system, 0, Convert<uint64_t>(ret)); 3247 SetReg64(system, 0, Convert<uint64_t>(ret));
3248 SetReg64(system, 1, Convert<uint64_t>(out_handle)); 3248 SetReg64(system, 1, Convert<uint64_t>(out_handle));
@@ -3396,7 +3396,7 @@ static void SvcWrap_DebugActiveProcess64(Core::System& system) {
3396 3396
3397 process_id = Convert<uint64_t>(GetReg64(system, 1)); 3397 process_id = Convert<uint64_t>(GetReg64(system, 1));
3398 3398
3399 ret = DebugActiveProcess64(system, &out_handle, process_id); 3399 ret = DebugActiveProcess64(system, std::addressof(out_handle), process_id);
3400 3400
3401 SetReg64(system, 0, Convert<uint64_t>(ret)); 3401 SetReg64(system, 0, Convert<uint64_t>(ret));
3402 SetReg64(system, 1, Convert<uint64_t>(out_handle)); 3402 SetReg64(system, 1, Convert<uint64_t>(out_handle));
@@ -3468,7 +3468,7 @@ static void SvcWrap_GetProcessList64(Core::System& system) {
3468 out_process_ids = Convert<uint64_t>(GetReg64(system, 1)); 3468 out_process_ids = Convert<uint64_t>(GetReg64(system, 1));
3469 max_out_count = Convert<int32_t>(GetReg64(system, 2)); 3469 max_out_count = Convert<int32_t>(GetReg64(system, 2));
3470 3470
3471 ret = GetProcessList64(system, &out_num_processes, out_process_ids, max_out_count); 3471 ret = GetProcessList64(system, std::addressof(out_num_processes), out_process_ids, max_out_count);
3472 3472
3473 SetReg64(system, 0, Convert<uint64_t>(ret)); 3473 SetReg64(system, 0, Convert<uint64_t>(ret));
3474 SetReg64(system, 1, Convert<uint64_t>(out_num_processes)); 3474 SetReg64(system, 1, Convert<uint64_t>(out_num_processes));
@@ -3486,7 +3486,7 @@ static void SvcWrap_GetThreadList64(Core::System& system) {
3486 max_out_count = Convert<int32_t>(GetReg64(system, 2)); 3486 max_out_count = Convert<int32_t>(GetReg64(system, 2));
3487 debug_handle = Convert<Handle>(GetReg64(system, 3)); 3487 debug_handle = Convert<Handle>(GetReg64(system, 3));
3488 3488
3489 ret = GetThreadList64(system, &out_num_threads, out_thread_ids, max_out_count, debug_handle); 3489 ret = GetThreadList64(system, std::addressof(out_num_threads), out_thread_ids, max_out_count, debug_handle);
3490 3490
3491 SetReg64(system, 0, Convert<uint64_t>(ret)); 3491 SetReg64(system, 0, Convert<uint64_t>(ret));
3492 SetReg64(system, 1, Convert<uint64_t>(out_num_threads)); 3492 SetReg64(system, 1, Convert<uint64_t>(out_num_threads));
@@ -3540,7 +3540,7 @@ static void SvcWrap_QueryDebugProcessMemory64(Core::System& system) {
3540 process_handle = Convert<Handle>(GetReg64(system, 2)); 3540 process_handle = Convert<Handle>(GetReg64(system, 2));
3541 address = Convert<uint64_t>(GetReg64(system, 3)); 3541 address = Convert<uint64_t>(GetReg64(system, 3));
3542 3542
3543 ret = QueryDebugProcessMemory64(system, out_memory_info, &out_page_info, process_handle, address); 3543 ret = QueryDebugProcessMemory64(system, out_memory_info, std::addressof(out_page_info), process_handle, address);
3544 3544
3545 SetReg64(system, 0, Convert<uint64_t>(ret)); 3545 SetReg64(system, 0, Convert<uint64_t>(ret));
3546 SetReg64(system, 1, Convert<uint64_t>(out_page_info)); 3546 SetReg64(system, 1, Convert<uint64_t>(out_page_info));
@@ -3611,7 +3611,7 @@ static void SvcWrap_GetDebugThreadParam64(Core::System& system) {
3611 thread_id = Convert<uint64_t>(GetReg64(system, 3)); 3611 thread_id = Convert<uint64_t>(GetReg64(system, 3));
3612 param = Convert<DebugThreadParam>(GetReg64(system, 4)); 3612 param = Convert<DebugThreadParam>(GetReg64(system, 4));
3613 3613
3614 ret = GetDebugThreadParam64(system, &out_64, &out_32, debug_handle, thread_id, param); 3614 ret = GetDebugThreadParam64(system, std::addressof(out_64), std::addressof(out_32), debug_handle, thread_id, param);
3615 3615
3616 SetReg64(system, 0, Convert<uint64_t>(ret)); 3616 SetReg64(system, 0, Convert<uint64_t>(ret));
3617 SetReg64(system, 1, Convert<uint64_t>(out_64)); 3617 SetReg64(system, 1, Convert<uint64_t>(out_64));
@@ -3630,7 +3630,7 @@ static void SvcWrap_GetSystemInfo64(Core::System& system) {
3630 handle = Convert<Handle>(GetReg64(system, 2)); 3630 handle = Convert<Handle>(GetReg64(system, 2));
3631 info_subtype = Convert<uint64_t>(GetReg64(system, 3)); 3631 info_subtype = Convert<uint64_t>(GetReg64(system, 3));
3632 3632
3633 ret = GetSystemInfo64(system, &out, info_type, handle, info_subtype); 3633 ret = GetSystemInfo64(system, std::addressof(out), info_type, handle, info_subtype);
3634 3634
3635 SetReg64(system, 0, Convert<uint64_t>(ret)); 3635 SetReg64(system, 0, Convert<uint64_t>(ret));
3636 SetReg64(system, 1, Convert<uint64_t>(out)); 3636 SetReg64(system, 1, Convert<uint64_t>(out));
@@ -3649,7 +3649,7 @@ static void SvcWrap_CreatePort64(Core::System& system) {
3649 is_light = Convert<bool>(GetReg64(system, 3)); 3649 is_light = Convert<bool>(GetReg64(system, 3));
3650 name = Convert<uint64_t>(GetReg64(system, 4)); 3650 name = Convert<uint64_t>(GetReg64(system, 4));
3651 3651
3652 ret = CreatePort64(system, &out_server_handle, &out_client_handle, max_sessions, is_light, name); 3652 ret = CreatePort64(system, std::addressof(out_server_handle), std::addressof(out_client_handle), max_sessions, is_light, name);
3653 3653
3654 SetReg64(system, 0, Convert<uint64_t>(ret)); 3654 SetReg64(system, 0, Convert<uint64_t>(ret));
3655 SetReg64(system, 1, Convert<uint64_t>(out_server_handle)); 3655 SetReg64(system, 1, Convert<uint64_t>(out_server_handle));
@@ -3666,7 +3666,7 @@ static void SvcWrap_ManageNamedPort64(Core::System& system) {
3666 name = Convert<uint64_t>(GetReg64(system, 1)); 3666 name = Convert<uint64_t>(GetReg64(system, 1));
3667 max_sessions = Convert<int32_t>(GetReg64(system, 2)); 3667 max_sessions = Convert<int32_t>(GetReg64(system, 2));
3668 3668
3669 ret = ManageNamedPort64(system, &out_server_handle, name, max_sessions); 3669 ret = ManageNamedPort64(system, std::addressof(out_server_handle), name, max_sessions);
3670 3670
3671 SetReg64(system, 0, Convert<uint64_t>(ret)); 3671 SetReg64(system, 0, Convert<uint64_t>(ret));
3672 SetReg64(system, 1, Convert<uint64_t>(out_server_handle)); 3672 SetReg64(system, 1, Convert<uint64_t>(out_server_handle));
@@ -3680,7 +3680,7 @@ static void SvcWrap_ConnectToPort64(Core::System& system) {
3680 3680
3681 port = Convert<Handle>(GetReg64(system, 1)); 3681 port = Convert<Handle>(GetReg64(system, 1));
3682 3682
3683 ret = ConnectToPort64(system, &out_handle, port); 3683 ret = ConnectToPort64(system, std::addressof(out_handle), port);
3684 3684
3685 SetReg64(system, 0, Convert<uint64_t>(ret)); 3685 SetReg64(system, 0, Convert<uint64_t>(ret));
3686 SetReg64(system, 1, Convert<uint64_t>(out_handle)); 3686 SetReg64(system, 1, Convert<uint64_t>(out_handle));
@@ -3752,7 +3752,7 @@ static void SvcWrap_QueryProcessMemory64(Core::System& system) {
3752 process_handle = Convert<Handle>(GetReg64(system, 2)); 3752 process_handle = Convert<Handle>(GetReg64(system, 2));
3753 address = Convert<uint64_t>(GetReg64(system, 3)); 3753 address = Convert<uint64_t>(GetReg64(system, 3));
3754 3754
3755 ret = QueryProcessMemory64(system, out_memory_info, &out_page_info, process_handle, address); 3755 ret = QueryProcessMemory64(system, out_memory_info, std::addressof(out_page_info), process_handle, address);
3756 3756
3757 SetReg64(system, 0, Convert<uint64_t>(ret)); 3757 SetReg64(system, 0, Convert<uint64_t>(ret));
3758 SetReg64(system, 1, Convert<uint64_t>(out_page_info)); 3758 SetReg64(system, 1, Convert<uint64_t>(out_page_info));
@@ -3806,7 +3806,7 @@ static void SvcWrap_CreateProcess64(Core::System& system) {
3806 caps = Convert<uint64_t>(GetReg64(system, 2)); 3806 caps = Convert<uint64_t>(GetReg64(system, 2));
3807 num_caps = Convert<int32_t>(GetReg64(system, 3)); 3807 num_caps = Convert<int32_t>(GetReg64(system, 3));
3808 3808
3809 ret = CreateProcess64(system, &out_handle, parameters, caps, num_caps); 3809 ret = CreateProcess64(system, std::addressof(out_handle), parameters, caps, num_caps);
3810 3810
3811 SetReg64(system, 0, Convert<uint64_t>(ret)); 3811 SetReg64(system, 0, Convert<uint64_t>(ret));
3812 SetReg64(system, 1, Convert<uint64_t>(out_handle)); 3812 SetReg64(system, 1, Convert<uint64_t>(out_handle));
@@ -3852,7 +3852,7 @@ static void SvcWrap_GetProcessInfo64(Core::System& system) {
3852 process_handle = Convert<Handle>(GetReg64(system, 1)); 3852 process_handle = Convert<Handle>(GetReg64(system, 1));
3853 info_type = Convert<ProcessInfoType>(GetReg64(system, 2)); 3853 info_type = Convert<ProcessInfoType>(GetReg64(system, 2));
3854 3854
3855 ret = GetProcessInfo64(system, &out_info, process_handle, info_type); 3855 ret = GetProcessInfo64(system, std::addressof(out_info), process_handle, info_type);
3856 3856
3857 SetReg64(system, 0, Convert<uint64_t>(ret)); 3857 SetReg64(system, 0, Convert<uint64_t>(ret));
3858 SetReg64(system, 1, Convert<uint64_t>(out_info)); 3858 SetReg64(system, 1, Convert<uint64_t>(out_info));
@@ -3863,7 +3863,7 @@ static void SvcWrap_CreateResourceLimit64(Core::System& system) {
3863 3863
3864 Handle out_handle{}; 3864 Handle out_handle{};
3865 3865
3866 ret = CreateResourceLimit64(system, &out_handle); 3866 ret = CreateResourceLimit64(system, std::addressof(out_handle));
3867 3867
3868 SetReg64(system, 0, Convert<uint64_t>(ret)); 3868 SetReg64(system, 0, Convert<uint64_t>(ret));
3869 SetReg64(system, 1, Convert<uint64_t>(out_handle)); 3869 SetReg64(system, 1, Convert<uint64_t>(out_handle));
diff --git a/src/core/hle/kernel/svc/svc_address_arbiter.cpp b/src/core/hle/kernel/svc/svc_address_arbiter.cpp
index 998bd3f22..22071731b 100644
--- a/src/core/hle/kernel/svc/svc_address_arbiter.cpp
+++ b/src/core/hle/kernel/svc/svc_address_arbiter.cpp
@@ -43,18 +43,9 @@ Result WaitForAddress(Core::System& system, VAddr address, ArbitrationType arb_t
43 address, arb_type, value, timeout_ns); 43 address, arb_type, value, timeout_ns);
44 44
45 // Validate input. 45 // Validate input.
46 if (IsKernelAddress(address)) { 46 R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
47 LOG_ERROR(Kernel_SVC, "Attempting to wait on kernel address (address={:08X})", address); 47 R_UNLESS(Common::IsAligned(address, sizeof(s32)), ResultInvalidAddress);
48 return ResultInvalidCurrentMemory; 48 R_UNLESS(IsValidArbitrationType(arb_type), ResultInvalidEnumValue);
49 }
50 if (!Common::IsAligned(address, sizeof(s32))) {
51 LOG_ERROR(Kernel_SVC, "Wait address must be 4 byte aligned (address={:08X})", address);
52 return ResultInvalidAddress;
53 }
54 if (!IsValidArbitrationType(arb_type)) {
55 LOG_ERROR(Kernel_SVC, "Invalid arbitration type specified (type={})", arb_type);
56 return ResultInvalidEnumValue;
57 }
58 49
59 // Convert timeout from nanoseconds to ticks. 50 // Convert timeout from nanoseconds to ticks.
60 s64 timeout{}; 51 s64 timeout{};
@@ -72,7 +63,8 @@ Result WaitForAddress(Core::System& system, VAddr address, ArbitrationType arb_t
72 timeout = timeout_ns; 63 timeout = timeout_ns;
73 } 64 }
74 65
75 return GetCurrentProcess(system.Kernel()).WaitAddressArbiter(address, arb_type, value, timeout); 66 R_RETURN(
67 GetCurrentProcess(system.Kernel()).WaitAddressArbiter(address, arb_type, value, timeout));
76} 68}
77 69
78// Signals to an address (via Address Arbiter) 70// Signals to an address (via Address Arbiter)
@@ -82,41 +74,32 @@ Result SignalToAddress(Core::System& system, VAddr address, SignalType signal_ty
82 address, signal_type, value, count); 74 address, signal_type, value, count);
83 75
84 // Validate input. 76 // Validate input.
85 if (IsKernelAddress(address)) { 77 R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
86 LOG_ERROR(Kernel_SVC, "Attempting to signal to a kernel address (address={:08X})", address); 78 R_UNLESS(Common::IsAligned(address, sizeof(s32)), ResultInvalidAddress);
87 return ResultInvalidCurrentMemory; 79 R_UNLESS(IsValidSignalType(signal_type), ResultInvalidEnumValue);
88 }
89 if (!Common::IsAligned(address, sizeof(s32))) {
90 LOG_ERROR(Kernel_SVC, "Signaled address must be 4 byte aligned (address={:08X})", address);
91 return ResultInvalidAddress;
92 }
93 if (!IsValidSignalType(signal_type)) {
94 LOG_ERROR(Kernel_SVC, "Invalid signal type specified (type={})", signal_type);
95 return ResultInvalidEnumValue;
96 }
97 80
98 return GetCurrentProcess(system.Kernel()) 81 R_RETURN(GetCurrentProcess(system.Kernel())
99 .SignalAddressArbiter(address, signal_type, value, count); 82 .SignalAddressArbiter(address, signal_type, value, count));
100} 83}
101 84
102Result WaitForAddress64(Core::System& system, VAddr address, ArbitrationType arb_type, s32 value, 85Result WaitForAddress64(Core::System& system, VAddr address, ArbitrationType arb_type, s32 value,
103 s64 timeout_ns) { 86 s64 timeout_ns) {
104 return WaitForAddress(system, address, arb_type, value, timeout_ns); 87 R_RETURN(WaitForAddress(system, address, arb_type, value, timeout_ns));
105} 88}
106 89
107Result SignalToAddress64(Core::System& system, VAddr address, SignalType signal_type, s32 value, 90Result SignalToAddress64(Core::System& system, VAddr address, SignalType signal_type, s32 value,
108 s32 count) { 91 s32 count) {
109 return SignalToAddress(system, address, signal_type, value, count); 92 R_RETURN(SignalToAddress(system, address, signal_type, value, count));
110} 93}
111 94
112Result WaitForAddress64From32(Core::System& system, u32 address, ArbitrationType arb_type, 95Result WaitForAddress64From32(Core::System& system, u32 address, ArbitrationType arb_type,
113 s32 value, s64 timeout_ns) { 96 s32 value, s64 timeout_ns) {
114 return WaitForAddress(system, address, arb_type, value, timeout_ns); 97 R_RETURN(WaitForAddress(system, address, arb_type, value, timeout_ns));
115} 98}
116 99
117Result SignalToAddress64From32(Core::System& system, u32 address, SignalType signal_type, s32 value, 100Result SignalToAddress64From32(Core::System& system, u32 address, SignalType signal_type, s32 value,
118 s32 count) { 101 s32 count) {
119 return SignalToAddress(system, address, signal_type, value, count); 102 R_RETURN(SignalToAddress(system, address, signal_type, value, count));
120} 103}
121 104
122} // namespace Kernel::Svc 105} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_code_memory.cpp b/src/core/hle/kernel/svc/svc_code_memory.cpp
index 8bed747af..43feab986 100644
--- a/src/core/hle/kernel/svc/svc_code_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_code_memory.cpp
@@ -1,6 +1,7 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include "common/scope_exit.h"
4#include "core/core.h" 5#include "core/core.h"
5#include "core/hle/kernel/k_code_memory.h" 6#include "core/hle/kernel/k_code_memory.h"
6#include "core/hle/kernel/k_process.h" 7#include "core/hle/kernel/k_process.h"
@@ -44,6 +45,7 @@ Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, uint64
44 45
45 KCodeMemory* code_mem = KCodeMemory::Create(kernel); 46 KCodeMemory* code_mem = KCodeMemory::Create(kernel);
46 R_UNLESS(code_mem != nullptr, ResultOutOfResource); 47 R_UNLESS(code_mem != nullptr, ResultOutOfResource);
48 SCOPE_EXIT({ code_mem->Close(); });
47 49
48 // Verify that the region is in range. 50 // Verify that the region is in range.
49 R_UNLESS(GetCurrentProcess(system.Kernel()).PageTable().Contains(address, size), 51 R_UNLESS(GetCurrentProcess(system.Kernel()).PageTable().Contains(address, size),
@@ -58,9 +60,7 @@ Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, uint64
58 // Add the code memory to the handle table. 60 // Add the code memory to the handle table.
59 R_TRY(GetCurrentProcess(system.Kernel()).GetHandleTable().Add(out, code_mem)); 61 R_TRY(GetCurrentProcess(system.Kernel()).GetHandleTable().Add(out, code_mem));
60 62
61 code_mem->Close(); 63 R_SUCCEED();
62
63 return ResultSuccess;
64} 64}
65 65
66Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, 66Result ControlCodeMemory(Core::System& system, Handle code_memory_handle,
@@ -140,10 +140,10 @@ Result ControlCodeMemory(Core::System& system, Handle code_memory_handle,
140 R_TRY(code_mem->UnmapFromOwner(address, size)); 140 R_TRY(code_mem->UnmapFromOwner(address, size));
141 } break; 141 } break;
142 default: 142 default:
143 return ResultInvalidEnumValue; 143 R_THROW(ResultInvalidEnumValue);
144 } 144 }
145 145
146 return ResultSuccess; 146 R_SUCCEED();
147} 147}
148 148
149Result CreateCodeMemory64(Core::System& system, Handle* out_handle, uint64_t address, 149Result CreateCodeMemory64(Core::System& system, Handle* out_handle, uint64_t address,
diff --git a/src/core/hle/kernel/svc/svc_condition_variable.cpp b/src/core/hle/kernel/svc/svc_condition_variable.cpp
index 8ad1a0b8f..648ed23d0 100644
--- a/src/core/hle/kernel/svc/svc_condition_variable.cpp
+++ b/src/core/hle/kernel/svc/svc_condition_variable.cpp
@@ -17,14 +17,8 @@ Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_ke
17 cv_key, tag, timeout_ns); 17 cv_key, tag, timeout_ns);
18 18
19 // Validate input. 19 // Validate input.
20 if (IsKernelAddress(address)) { 20 R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
21 LOG_ERROR(Kernel_SVC, "Attempted to wait on kernel address (address={:08X})", address); 21 R_UNLESS(Common::IsAligned(address, sizeof(s32)), ResultInvalidAddress);
22 return ResultInvalidCurrentMemory;
23 }
24 if (!Common::IsAligned(address, sizeof(s32))) {
25 LOG_ERROR(Kernel_SVC, "Address must be 4 byte aligned (address={:08X})", address);
26 return ResultInvalidAddress;
27 }
28 22
29 // Convert timeout from nanoseconds to ticks. 23 // Convert timeout from nanoseconds to ticks.
30 s64 timeout{}; 24 s64 timeout{};
@@ -43,8 +37,9 @@ Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_ke
43 } 37 }
44 38
45 // Wait on the condition variable. 39 // Wait on the condition variable.
46 return GetCurrentProcess(system.Kernel()) 40 R_RETURN(
47 .WaitConditionVariable(address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout); 41 GetCurrentProcess(system.Kernel())
42 .WaitConditionVariable(address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout));
48} 43}
49 44
50/// Signal process wide key 45/// Signal process wide key
diff --git a/src/core/hle/kernel/svc/svc_event.cpp b/src/core/hle/kernel/svc/svc_event.cpp
index a948493e8..901202e6a 100644
--- a/src/core/hle/kernel/svc/svc_event.cpp
+++ b/src/core/hle/kernel/svc/svc_event.cpp
@@ -21,7 +21,7 @@ Result SignalEvent(Core::System& system, Handle event_handle) {
21 KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle); 21 KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
22 R_UNLESS(event.IsNotNull(), ResultInvalidHandle); 22 R_UNLESS(event.IsNotNull(), ResultInvalidHandle);
23 23
24 return event->Signal(); 24 R_RETURN(event->Signal());
25} 25}
26 26
27Result ClearEvent(Core::System& system, Handle event_handle) { 27Result ClearEvent(Core::System& system, Handle event_handle) {
@@ -34,7 +34,7 @@ Result ClearEvent(Core::System& system, Handle event_handle) {
34 { 34 {
35 KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle); 35 KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
36 if (event.IsNotNull()) { 36 if (event.IsNotNull()) {
37 return event->Clear(); 37 R_RETURN(event->Clear());
38 } 38 }
39 } 39 }
40 40
@@ -42,13 +42,11 @@ Result ClearEvent(Core::System& system, Handle event_handle) {
42 { 42 {
43 KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(event_handle); 43 KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(event_handle);
44 if (readable_event.IsNotNull()) { 44 if (readable_event.IsNotNull()) {
45 return readable_event->Clear(); 45 R_RETURN(readable_event->Clear());
46 } 46 }
47 } 47 }
48 48
49 LOG_ERROR(Kernel_SVC, "Event handle does not exist, event_handle=0x{:08X}", event_handle); 49 R_THROW(ResultInvalidHandle);
50
51 return ResultInvalidHandle;
52} 50}
53 51
54Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) { 52Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
@@ -85,15 +83,13 @@ Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
85 // Add the event to the handle table. 83 // Add the event to the handle table.
86 R_TRY(handle_table.Add(out_write, event)); 84 R_TRY(handle_table.Add(out_write, event));
87 85
88 // Ensure that we maintaing a clean handle state on exit. 86 // Ensure that we maintain a clean handle state on exit.
89 auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*out_write); }); 87 ON_RESULT_FAILURE {
88 handle_table.Remove(*out_write);
89 };
90 90
91 // Add the readable event to the handle table. 91 // Add the readable event to the handle table.
92 R_TRY(handle_table.Add(out_read, std::addressof(event->GetReadableEvent()))); 92 R_RETURN(handle_table.Add(out_read, std::addressof(event->GetReadableEvent())));
93
94 // We succeeded.
95 handle_guard.Cancel();
96 return ResultSuccess;
97} 93}
98 94
99Result SignalEvent64(Core::System& system, Handle event_handle) { 95Result SignalEvent64(Core::System& system, Handle event_handle) {
diff --git a/src/core/hle/kernel/svc/svc_info.cpp b/src/core/hle/kernel/svc/svc_info.cpp
index cbed4dc8c..04b6d6964 100644
--- a/src/core/hle/kernel/svc/svc_info.cpp
+++ b/src/core/hle/kernel/svc/svc_info.cpp
@@ -38,126 +38,110 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
38 case InfoType::UsedNonSystemMemorySize: 38 case InfoType::UsedNonSystemMemorySize:
39 case InfoType::IsApplication: 39 case InfoType::IsApplication:
40 case InfoType::FreeThreadCount: { 40 case InfoType::FreeThreadCount: {
41 if (info_sub_id != 0) { 41 R_UNLESS(info_sub_id == 0, ResultInvalidEnumValue);
42 LOG_ERROR(Kernel_SVC, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
43 info_sub_id);
44 return ResultInvalidEnumValue;
45 }
46 42
47 const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable(); 43 const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable();
48 KScopedAutoObject process = handle_table.GetObject<KProcess>(handle); 44 KScopedAutoObject process = handle_table.GetObject<KProcess>(handle);
49 if (process.IsNull()) { 45 R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
50 LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}",
51 info_id, info_sub_id, handle);
52 return ResultInvalidHandle;
53 }
54 46
55 switch (info_id_type) { 47 switch (info_id_type) {
56 case InfoType::CoreMask: 48 case InfoType::CoreMask:
57 *result = process->GetCoreMask(); 49 *result = process->GetCoreMask();
58 return ResultSuccess; 50 R_SUCCEED();
59 51
60 case InfoType::PriorityMask: 52 case InfoType::PriorityMask:
61 *result = process->GetPriorityMask(); 53 *result = process->GetPriorityMask();
62 return ResultSuccess; 54 R_SUCCEED();
63 55
64 case InfoType::AliasRegionAddress: 56 case InfoType::AliasRegionAddress:
65 *result = process->PageTable().GetAliasRegionStart(); 57 *result = process->PageTable().GetAliasRegionStart();
66 return ResultSuccess; 58 R_SUCCEED();
67 59
68 case InfoType::AliasRegionSize: 60 case InfoType::AliasRegionSize:
69 *result = process->PageTable().GetAliasRegionSize(); 61 *result = process->PageTable().GetAliasRegionSize();
70 return ResultSuccess; 62 R_SUCCEED();
71 63
72 case InfoType::HeapRegionAddress: 64 case InfoType::HeapRegionAddress:
73 *result = process->PageTable().GetHeapRegionStart(); 65 *result = process->PageTable().GetHeapRegionStart();
74 return ResultSuccess; 66 R_SUCCEED();
75 67
76 case InfoType::HeapRegionSize: 68 case InfoType::HeapRegionSize:
77 *result = process->PageTable().GetHeapRegionSize(); 69 *result = process->PageTable().GetHeapRegionSize();
78 return ResultSuccess; 70 R_SUCCEED();
79 71
80 case InfoType::AslrRegionAddress: 72 case InfoType::AslrRegionAddress:
81 *result = process->PageTable().GetAliasCodeRegionStart(); 73 *result = process->PageTable().GetAliasCodeRegionStart();
82 return ResultSuccess; 74 R_SUCCEED();
83 75
84 case InfoType::AslrRegionSize: 76 case InfoType::AslrRegionSize:
85 *result = process->PageTable().GetAliasCodeRegionSize(); 77 *result = process->PageTable().GetAliasCodeRegionSize();
86 return ResultSuccess; 78 R_SUCCEED();
87 79
88 case InfoType::StackRegionAddress: 80 case InfoType::StackRegionAddress:
89 *result = process->PageTable().GetStackRegionStart(); 81 *result = process->PageTable().GetStackRegionStart();
90 return ResultSuccess; 82 R_SUCCEED();
91 83
92 case InfoType::StackRegionSize: 84 case InfoType::StackRegionSize:
93 *result = process->PageTable().GetStackRegionSize(); 85 *result = process->PageTable().GetStackRegionSize();
94 return ResultSuccess; 86 R_SUCCEED();
95 87
96 case InfoType::TotalMemorySize: 88 case InfoType::TotalMemorySize:
97 *result = process->GetTotalPhysicalMemoryAvailable(); 89 *result = process->GetTotalPhysicalMemoryAvailable();
98 return ResultSuccess; 90 R_SUCCEED();
99 91
100 case InfoType::UsedMemorySize: 92 case InfoType::UsedMemorySize:
101 *result = process->GetTotalPhysicalMemoryUsed(); 93 *result = process->GetTotalPhysicalMemoryUsed();
102 return ResultSuccess; 94 R_SUCCEED();
103 95
104 case InfoType::SystemResourceSizeTotal: 96 case InfoType::SystemResourceSizeTotal:
105 *result = process->GetSystemResourceSize(); 97 *result = process->GetSystemResourceSize();
106 return ResultSuccess; 98 R_SUCCEED();
107 99
108 case InfoType::SystemResourceSizeUsed: 100 case InfoType::SystemResourceSizeUsed:
109 LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage"); 101 LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
110 *result = process->GetSystemResourceUsage(); 102 *result = process->GetSystemResourceUsage();
111 return ResultSuccess; 103 R_SUCCEED();
112 104
113 case InfoType::ProgramId: 105 case InfoType::ProgramId:
114 *result = process->GetProgramID(); 106 *result = process->GetProgramId();
115 return ResultSuccess; 107 R_SUCCEED();
116 108
117 case InfoType::UserExceptionContextAddress: 109 case InfoType::UserExceptionContextAddress:
118 *result = process->GetProcessLocalRegionAddress(); 110 *result = process->GetProcessLocalRegionAddress();
119 return ResultSuccess; 111 R_SUCCEED();
120 112
121 case InfoType::TotalNonSystemMemorySize: 113 case InfoType::TotalNonSystemMemorySize:
122 *result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource(); 114 *result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
123 return ResultSuccess; 115 R_SUCCEED();
124 116
125 case InfoType::UsedNonSystemMemorySize: 117 case InfoType::UsedNonSystemMemorySize:
126 *result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource(); 118 *result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
127 return ResultSuccess; 119 R_SUCCEED();
128 120
129 case InfoType::IsApplication: 121 case InfoType::IsApplication:
130 LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application"); 122 LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application");
131 *result = true; 123 *result = true;
132 return ResultSuccess; 124 R_SUCCEED();
133 125
134 case InfoType::FreeThreadCount: 126 case InfoType::FreeThreadCount:
135 *result = process->GetFreeThreadCount(); 127 *result = process->GetFreeThreadCount();
136 return ResultSuccess; 128 R_SUCCEED();
137 129
138 default: 130 default:
139 break; 131 break;
140 } 132 }
141 133
142 LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id); 134 LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
143 return ResultInvalidEnumValue; 135 R_THROW(ResultInvalidEnumValue);
144 } 136 }
145 137
146 case InfoType::DebuggerAttached: 138 case InfoType::DebuggerAttached:
147 *result = 0; 139 *result = 0;
148 return ResultSuccess; 140 R_SUCCEED();
149 141
150 case InfoType::ResourceLimit: { 142 case InfoType::ResourceLimit: {
151 if (handle != 0) { 143 R_UNLESS(handle == 0, ResultInvalidHandle);
152 LOG_ERROR(Kernel, "Handle is non zero! handle={:08X}", handle); 144 R_UNLESS(info_sub_id == 0, ResultInvalidCombination);
153 return ResultInvalidHandle;
154 }
155
156 if (info_sub_id != 0) {
157 LOG_ERROR(Kernel, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
158 info_sub_id);
159 return ResultInvalidCombination;
160 }
161 145
162 KProcess* const current_process = GetCurrentProcessPointer(system.Kernel()); 146 KProcess* const current_process = GetCurrentProcessPointer(system.Kernel());
163 KHandleTable& handle_table = current_process->GetHandleTable(); 147 KHandleTable& handle_table = current_process->GetHandleTable();
@@ -165,44 +149,35 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
165 if (!resource_limit) { 149 if (!resource_limit) {
166 *result = Svc::InvalidHandle; 150 *result = Svc::InvalidHandle;
167 // Yes, the kernel considers this a successful operation. 151 // Yes, the kernel considers this a successful operation.
168 return ResultSuccess; 152 R_SUCCEED();
169 } 153 }
170 154
171 Handle resource_handle{}; 155 Handle resource_handle{};
172 R_TRY(handle_table.Add(&resource_handle, resource_limit)); 156 R_TRY(handle_table.Add(std::addressof(resource_handle), resource_limit));
173 157
174 *result = resource_handle; 158 *result = resource_handle;
175 return ResultSuccess; 159 R_SUCCEED();
176 } 160 }
177 161
178 case InfoType::RandomEntropy: 162 case InfoType::RandomEntropy:
179 if (handle != 0) { 163 R_UNLESS(handle == 0, ResultInvalidHandle);
180 LOG_ERROR(Kernel_SVC, "Process Handle is non zero, expected 0 result but got {:016X}", 164 R_UNLESS(info_sub_id < KProcess::RANDOM_ENTROPY_SIZE, ResultInvalidCombination);
181 handle);
182 return ResultInvalidHandle;
183 }
184
185 if (info_sub_id >= KProcess::RANDOM_ENTROPY_SIZE) {
186 LOG_ERROR(Kernel_SVC, "Entropy size is out of range, expected {} but got {}",
187 KProcess::RANDOM_ENTROPY_SIZE, info_sub_id);
188 return ResultInvalidCombination;
189 }
190 165
191 *result = GetCurrentProcess(system.Kernel()).GetRandomEntropy(info_sub_id); 166 *result = GetCurrentProcess(system.Kernel()).GetRandomEntropy(info_sub_id);
192 return ResultSuccess; 167 R_SUCCEED();
193 168
194 case InfoType::InitialProcessIdRange: 169 case InfoType::InitialProcessIdRange:
195 LOG_WARNING(Kernel_SVC, 170 LOG_WARNING(Kernel_SVC,
196 "(STUBBED) Attempted to query privileged process id bounds, returned 0"); 171 "(STUBBED) Attempted to query privileged process id bounds, returned 0");
197 *result = 0; 172 *result = 0;
198 return ResultSuccess; 173 R_SUCCEED();
199 174
200 case InfoType::ThreadTickCount: { 175 case InfoType::ThreadTickCount: {
201 constexpr u64 num_cpus = 4; 176 constexpr u64 num_cpus = 4;
202 if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id >= num_cpus) { 177 if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id >= num_cpus) {
203 LOG_ERROR(Kernel_SVC, "Core count is out of range, expected {} but got {}", num_cpus, 178 LOG_ERROR(Kernel_SVC, "Core count is out of range, expected {} but got {}", num_cpus,
204 info_sub_id); 179 info_sub_id);
205 return ResultInvalidCombination; 180 R_THROW(ResultInvalidCombination);
206 } 181 }
207 182
208 KScopedAutoObject thread = GetCurrentProcess(system.Kernel()) 183 KScopedAutoObject thread = GetCurrentProcess(system.Kernel())
@@ -211,7 +186,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
211 if (thread.IsNull()) { 186 if (thread.IsNull()) {
212 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", 187 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}",
213 static_cast<Handle>(handle)); 188 static_cast<Handle>(handle));
214 return ResultInvalidHandle; 189 R_THROW(ResultInvalidHandle);
215 } 190 }
216 191
217 const auto& core_timing = system.CoreTiming(); 192 const auto& core_timing = system.CoreTiming();
@@ -230,7 +205,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
230 } 205 }
231 206
232 *result = out_ticks; 207 *result = out_ticks;
233 return ResultSuccess; 208 R_SUCCEED();
234 } 209 }
235 case InfoType::IdleTickCount: { 210 case InfoType::IdleTickCount: {
236 // Verify the input handle is invalid. 211 // Verify the input handle is invalid.
@@ -244,7 +219,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
244 219
245 // Get the idle tick count. 220 // Get the idle tick count.
246 *result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime(); 221 *result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime();
247 return ResultSuccess; 222 R_SUCCEED();
248 } 223 }
249 case InfoType::MesosphereCurrentProcess: { 224 case InfoType::MesosphereCurrentProcess: {
250 // Verify the input handle is invalid. 225 // Verify the input handle is invalid.
@@ -259,17 +234,17 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
259 234
260 // Get a new handle for the current process. 235 // Get a new handle for the current process.
261 Handle tmp; 236 Handle tmp;
262 R_TRY(handle_table.Add(&tmp, current_process)); 237 R_TRY(handle_table.Add(std::addressof(tmp), current_process));
263 238
264 // Set the output. 239 // Set the output.
265 *result = tmp; 240 *result = tmp;
266 241
267 // We succeeded. 242 // We succeeded.
268 return ResultSuccess; 243 R_SUCCEED();
269 } 244 }
270 default: 245 default:
271 LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id); 246 LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
272 return ResultInvalidEnumValue; 247 R_THROW(ResultInvalidEnumValue);
273 } 248 }
274} 249}
275 250
diff --git a/src/core/hle/kernel/svc/svc_ipc.cpp b/src/core/hle/kernel/svc/svc_ipc.cpp
index a7a2c3b92..46fd0f2ea 100644
--- a/src/core/hle/kernel/svc/svc_ipc.cpp
+++ b/src/core/hle/kernel/svc/svc_ipc.cpp
@@ -19,7 +19,7 @@ Result SendSyncRequest(Core::System& system, Handle handle) {
19 19
20 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); 20 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
21 21
22 return session->SendSyncRequest(); 22 R_RETURN(session->SendSyncRequest());
23} 23}
24 24
25Result SendSyncRequestWithUserBuffer(Core::System& system, uint64_t message_buffer, 25Result SendSyncRequestWithUserBuffer(Core::System& system, uint64_t message_buffer,
@@ -79,10 +79,10 @@ Result ReplyAndReceive(Core::System& system, s32* out_index, uint64_t handles_ad
79 while (true) { 79 while (true) {
80 // Wait for an object. 80 // Wait for an object.
81 s32 index; 81 s32 index;
82 Result result = KSynchronizationObject::Wait(kernel, &index, objs.data(), 82 Result result = KSynchronizationObject::Wait(kernel, std::addressof(index), objs.data(),
83 static_cast<s32>(objs.size()), timeout_ns); 83 static_cast<s32>(objs.size()), timeout_ns);
84 if (result == ResultTimedOut) { 84 if (result == ResultTimedOut) {
85 return result; 85 R_RETURN(result);
86 } 86 }
87 87
88 // Receive the request. 88 // Receive the request.
@@ -97,7 +97,7 @@ Result ReplyAndReceive(Core::System& system, s32* out_index, uint64_t handles_ad
97 } 97 }
98 98
99 *out_index = index; 99 *out_index = index;
100 return result; 100 R_RETURN(result);
101 } 101 }
102} 102}
103 103
diff --git a/src/core/hle/kernel/svc/svc_lock.cpp b/src/core/hle/kernel/svc/svc_lock.cpp
index f3d3e140b..3681279d6 100644
--- a/src/core/hle/kernel/svc/svc_lock.cpp
+++ b/src/core/hle/kernel/svc/svc_lock.cpp
@@ -14,17 +14,10 @@ Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
14 thread_handle, address, tag); 14 thread_handle, address, tag);
15 15
16 // Validate the input address. 16 // Validate the input address.
17 if (IsKernelAddress(address)) { 17 R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
18 LOG_ERROR(Kernel_SVC, "Attempting to arbitrate a lock on a kernel address (address={:08X})", 18 R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
19 address);
20 return ResultInvalidCurrentMemory;
21 }
22 if (!Common::IsAligned(address, sizeof(u32))) {
23 LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
24 return ResultInvalidAddress;
25 }
26 19
27 return GetCurrentProcess(system.Kernel()).WaitForAddress(thread_handle, address, tag); 20 R_RETURN(GetCurrentProcess(system.Kernel()).WaitForAddress(thread_handle, address, tag));
28} 21}
29 22
30/// Unlock a mutex 23/// Unlock a mutex
@@ -32,18 +25,10 @@ Result ArbitrateUnlock(Core::System& system, VAddr address) {
32 LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address); 25 LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
33 26
34 // Validate the input address. 27 // Validate the input address.
35 if (IsKernelAddress(address)) { 28 R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
36 LOG_ERROR(Kernel_SVC, 29 R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
37 "Attempting to arbitrate an unlock on a kernel address (address={:08X})",
38 address);
39 return ResultInvalidCurrentMemory;
40 }
41 if (!Common::IsAligned(address, sizeof(u32))) {
42 LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
43 return ResultInvalidAddress;
44 }
45 30
46 return GetCurrentProcess(system.Kernel()).SignalToAddress(address); 31 R_RETURN(GetCurrentProcess(system.Kernel()).SignalToAddress(address));
47} 32}
48 33
49Result ArbitrateLock64(Core::System& system, Handle thread_handle, uint64_t address, uint32_t tag) { 34Result ArbitrateLock64(Core::System& system, Handle thread_handle, uint64_t address, uint32_t tag) {
diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp
index 214bcd073..4db25a3b7 100644
--- a/src/core/hle/kernel/svc/svc_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_memory.cpp
@@ -33,49 +33,49 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAd
33 u64 size) { 33 u64 size) {
34 if (!Common::Is4KBAligned(dst_addr)) { 34 if (!Common::Is4KBAligned(dst_addr)) {
35 LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr); 35 LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
36 return ResultInvalidAddress; 36 R_THROW(ResultInvalidAddress);
37 } 37 }
38 38
39 if (!Common::Is4KBAligned(src_addr)) { 39 if (!Common::Is4KBAligned(src_addr)) {
40 LOG_ERROR(Kernel_SVC, "Source address is not aligned to 4KB, 0x{:016X}", src_addr); 40 LOG_ERROR(Kernel_SVC, "Source address is not aligned to 4KB, 0x{:016X}", src_addr);
41 return ResultInvalidSize; 41 R_THROW(ResultInvalidSize);
42 } 42 }
43 43
44 if (size == 0) { 44 if (size == 0) {
45 LOG_ERROR(Kernel_SVC, "Size is 0"); 45 LOG_ERROR(Kernel_SVC, "Size is 0");
46 return ResultInvalidSize; 46 R_THROW(ResultInvalidSize);
47 } 47 }
48 48
49 if (!Common::Is4KBAligned(size)) { 49 if (!Common::Is4KBAligned(size)) {
50 LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size); 50 LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size);
51 return ResultInvalidSize; 51 R_THROW(ResultInvalidSize);
52 } 52 }
53 53
54 if (!IsValidAddressRange(dst_addr, size)) { 54 if (!IsValidAddressRange(dst_addr, size)) {
55 LOG_ERROR(Kernel_SVC, 55 LOG_ERROR(Kernel_SVC,
56 "Destination is not a valid address range, addr=0x{:016X}, size=0x{:016X}", 56 "Destination is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
57 dst_addr, size); 57 dst_addr, size);
58 return ResultInvalidCurrentMemory; 58 R_THROW(ResultInvalidCurrentMemory);
59 } 59 }
60 60
61 if (!IsValidAddressRange(src_addr, size)) { 61 if (!IsValidAddressRange(src_addr, size)) {
62 LOG_ERROR(Kernel_SVC, "Source is not a valid address range, addr=0x{:016X}, size=0x{:016X}", 62 LOG_ERROR(Kernel_SVC, "Source is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
63 src_addr, size); 63 src_addr, size);
64 return ResultInvalidCurrentMemory; 64 R_THROW(ResultInvalidCurrentMemory);
65 } 65 }
66 66
67 if (!manager.IsInsideAddressSpace(src_addr, size)) { 67 if (!manager.IsInsideAddressSpace(src_addr, size)) {
68 LOG_ERROR(Kernel_SVC, 68 LOG_ERROR(Kernel_SVC,
69 "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", 69 "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}",
70 src_addr, size); 70 src_addr, size);
71 return ResultInvalidCurrentMemory; 71 R_THROW(ResultInvalidCurrentMemory);
72 } 72 }
73 73
74 if (manager.IsOutsideStackRegion(dst_addr, size)) { 74 if (manager.IsOutsideStackRegion(dst_addr, size)) {
75 LOG_ERROR(Kernel_SVC, 75 LOG_ERROR(Kernel_SVC,
76 "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}", 76 "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}",
77 dst_addr, size); 77 dst_addr, size);
78 return ResultInvalidMemoryRegion; 78 R_THROW(ResultInvalidMemoryRegion);
79 } 79 }
80 80
81 if (manager.IsInsideHeapRegion(dst_addr, size)) { 81 if (manager.IsInsideHeapRegion(dst_addr, size)) {
@@ -83,7 +83,7 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAd
83 "Destination does not fit within the heap region, addr=0x{:016X}, " 83 "Destination does not fit within the heap region, addr=0x{:016X}, "
84 "size=0x{:016X}", 84 "size=0x{:016X}",
85 dst_addr, size); 85 dst_addr, size);
86 return ResultInvalidMemoryRegion; 86 R_THROW(ResultInvalidMemoryRegion);
87 } 87 }
88 88
89 if (manager.IsInsideAliasRegion(dst_addr, size)) { 89 if (manager.IsInsideAliasRegion(dst_addr, size)) {
@@ -91,10 +91,10 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAd
91 "Destination does not fit within the map region, addr=0x{:016X}, " 91 "Destination does not fit within the map region, addr=0x{:016X}, "
92 "size=0x{:016X}", 92 "size=0x{:016X}",
93 dst_addr, size); 93 dst_addr, size);
94 return ResultInvalidMemoryRegion; 94 R_THROW(ResultInvalidMemoryRegion);
95 } 95 }
96 96
97 return ResultSuccess; 97 R_SUCCEED();
98} 98}
99 99
100} // namespace 100} // namespace
@@ -117,7 +117,7 @@ Result SetMemoryPermission(Core::System& system, VAddr address, u64 size, Memory
117 R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); 117 R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
118 118
119 // Set the memory attribute. 119 // Set the memory attribute.
120 return page_table.SetMemoryPermission(address, size, perm); 120 R_RETURN(page_table.SetMemoryPermission(address, size, perm));
121} 121}
122 122
123Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attr) { 123Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attr) {
@@ -141,7 +141,7 @@ Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mas
141 R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); 141 R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
142 142
143 // Set the memory attribute. 143 // Set the memory attribute.
144 return page_table.SetMemoryAttribute(address, size, mask, attr); 144 R_RETURN(page_table.SetMemoryAttribute(address, size, mask, attr));
145} 145}
146 146
147/// Maps a memory range into a different range. 147/// Maps a memory range into a different range.
@@ -156,7 +156,7 @@ Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size)
156 return result; 156 return result;
157 } 157 }
158 158
159 return page_table.MapMemory(dst_addr, src_addr, size); 159 R_RETURN(page_table.MapMemory(dst_addr, src_addr, size));
160} 160}
161 161
162/// Unmaps a region that was previously mapped with svcMapMemory 162/// Unmaps a region that was previously mapped with svcMapMemory
@@ -171,7 +171,7 @@ Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 siz
171 return result; 171 return result;
172 } 172 }
173 173
174 return page_table.UnmapMemory(dst_addr, src_addr, size); 174 R_RETURN(page_table.UnmapMemory(dst_addr, src_addr, size));
175} 175}
176 176
177Result SetMemoryPermission64(Core::System& system, uint64_t address, uint64_t size, 177Result SetMemoryPermission64(Core::System& system, uint64_t address, uint64_t size,
diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp
index ed6a624ac..63196e1ed 100644
--- a/src/core/hle/kernel/svc/svc_physical_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp
@@ -16,9 +16,7 @@ Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
16 R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize); 16 R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize);
17 17
18 // Set the heap size. 18 // Set the heap size.
19 R_TRY(GetCurrentProcess(system.Kernel()).PageTable().SetHeapSize(out_address, size)); 19 R_RETURN(GetCurrentProcess(system.Kernel()).PageTable().SetHeapSize(out_address, size));
20
21 return ResultSuccess;
22} 20}
23 21
24/// Maps memory at a desired address 22/// Maps memory at a desired address
@@ -27,22 +25,22 @@ Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
27 25
28 if (!Common::Is4KBAligned(addr)) { 26 if (!Common::Is4KBAligned(addr)) {
29 LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr); 27 LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
30 return ResultInvalidAddress; 28 R_THROW(ResultInvalidAddress);
31 } 29 }
32 30
33 if (!Common::Is4KBAligned(size)) { 31 if (!Common::Is4KBAligned(size)) {
34 LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size); 32 LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
35 return ResultInvalidSize; 33 R_THROW(ResultInvalidSize);
36 } 34 }
37 35
38 if (size == 0) { 36 if (size == 0) {
39 LOG_ERROR(Kernel_SVC, "Size is zero"); 37 LOG_ERROR(Kernel_SVC, "Size is zero");
40 return ResultInvalidSize; 38 R_THROW(ResultInvalidSize);
41 } 39 }
42 40
43 if (!(addr < addr + size)) { 41 if (!(addr < addr + size)) {
44 LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address"); 42 LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
45 return ResultInvalidMemoryRegion; 43 R_THROW(ResultInvalidMemoryRegion);
46 } 44 }
47 45
48 KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())}; 46 KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
@@ -50,24 +48,24 @@ Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
50 48
51 if (current_process->GetSystemResourceSize() == 0) { 49 if (current_process->GetSystemResourceSize() == 0) {
52 LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); 50 LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
53 return ResultInvalidState; 51 R_THROW(ResultInvalidState);
54 } 52 }
55 53
56 if (!page_table.IsInsideAddressSpace(addr, size)) { 54 if (!page_table.IsInsideAddressSpace(addr, size)) {
57 LOG_ERROR(Kernel_SVC, 55 LOG_ERROR(Kernel_SVC,
58 "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, 56 "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
59 size); 57 size);
60 return ResultInvalidMemoryRegion; 58 R_THROW(ResultInvalidMemoryRegion);
61 } 59 }
62 60
63 if (page_table.IsOutsideAliasRegion(addr, size)) { 61 if (page_table.IsOutsideAliasRegion(addr, size)) {
64 LOG_ERROR(Kernel_SVC, 62 LOG_ERROR(Kernel_SVC,
65 "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr, 63 "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
66 size); 64 size);
67 return ResultInvalidMemoryRegion; 65 R_THROW(ResultInvalidMemoryRegion);
68 } 66 }
69 67
70 return page_table.MapPhysicalMemory(addr, size); 68 R_RETURN(page_table.MapPhysicalMemory(addr, size));
71} 69}
72 70
73/// Unmaps memory previously mapped via MapPhysicalMemory 71/// Unmaps memory previously mapped via MapPhysicalMemory
@@ -76,22 +74,22 @@ Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
76 74
77 if (!Common::Is4KBAligned(addr)) { 75 if (!Common::Is4KBAligned(addr)) {
78 LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr); 76 LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
79 return ResultInvalidAddress; 77 R_THROW(ResultInvalidAddress);
80 } 78 }
81 79
82 if (!Common::Is4KBAligned(size)) { 80 if (!Common::Is4KBAligned(size)) {
83 LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size); 81 LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
84 return ResultInvalidSize; 82 R_THROW(ResultInvalidSize);
85 } 83 }
86 84
87 if (size == 0) { 85 if (size == 0) {
88 LOG_ERROR(Kernel_SVC, "Size is zero"); 86 LOG_ERROR(Kernel_SVC, "Size is zero");
89 return ResultInvalidSize; 87 R_THROW(ResultInvalidSize);
90 } 88 }
91 89
92 if (!(addr < addr + size)) { 90 if (!(addr < addr + size)) {
93 LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address"); 91 LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
94 return ResultInvalidMemoryRegion; 92 R_THROW(ResultInvalidMemoryRegion);
95 } 93 }
96 94
97 KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())}; 95 KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
@@ -99,24 +97,24 @@ Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
99 97
100 if (current_process->GetSystemResourceSize() == 0) { 98 if (current_process->GetSystemResourceSize() == 0) {
101 LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); 99 LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
102 return ResultInvalidState; 100 R_THROW(ResultInvalidState);
103 } 101 }
104 102
105 if (!page_table.IsInsideAddressSpace(addr, size)) { 103 if (!page_table.IsInsideAddressSpace(addr, size)) {
106 LOG_ERROR(Kernel_SVC, 104 LOG_ERROR(Kernel_SVC,
107 "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, 105 "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
108 size); 106 size);
109 return ResultInvalidMemoryRegion; 107 R_THROW(ResultInvalidMemoryRegion);
110 } 108 }
111 109
112 if (page_table.IsOutsideAliasRegion(addr, size)) { 110 if (page_table.IsOutsideAliasRegion(addr, size)) {
113 LOG_ERROR(Kernel_SVC, 111 LOG_ERROR(Kernel_SVC,
114 "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr, 112 "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
115 size); 113 size);
116 return ResultInvalidMemoryRegion; 114 R_THROW(ResultInvalidMemoryRegion);
117 } 115 }
118 116
119 return page_table.UnmapPhysicalMemory(addr, size); 117 R_RETURN(page_table.UnmapPhysicalMemory(addr, size));
120} 118}
121 119
122Result MapPhysicalMemoryUnsafe(Core::System& system, uint64_t address, uint64_t size) { 120Result MapPhysicalMemoryUnsafe(Core::System& system, uint64_t address, uint64_t size) {
diff --git a/src/core/hle/kernel/svc/svc_port.cpp b/src/core/hle/kernel/svc/svc_port.cpp
index 78c2a8d17..0b5556bc4 100644
--- a/src/core/hle/kernel/svc/svc_port.cpp
+++ b/src/core/hle/kernel/svc/svc_port.cpp
@@ -81,7 +81,7 @@ Result ManageNamedPort(Core::System& system, Handle* out_server_handle, uint64_t
81 R_UNLESS(port != nullptr, ResultOutOfResource); 81 R_UNLESS(port != nullptr, ResultOutOfResource);
82 82
83 // Initialize the new port. 83 // Initialize the new port.
84 port->Initialize(max_sessions, false, ""); 84 port->Initialize(max_sessions, false, 0);
85 85
86 // Register the port. 86 // Register the port.
87 KPort::Register(system.Kernel(), port); 87 KPort::Register(system.Kernel(), port);
diff --git a/src/core/hle/kernel/svc/svc_process.cpp b/src/core/hle/kernel/svc/svc_process.cpp
index c35d2be76..b538c37e7 100644
--- a/src/core/hle/kernel/svc/svc_process.cpp
+++ b/src/core/hle/kernel/svc/svc_process.cpp
@@ -11,7 +11,7 @@ namespace Kernel::Svc {
11void ExitProcess(Core::System& system) { 11void ExitProcess(Core::System& system) {
12 auto* current_process = GetCurrentProcessPointer(system.Kernel()); 12 auto* current_process = GetCurrentProcessPointer(system.Kernel());
13 13
14 LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); 14 LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessId());
15 ASSERT_MSG(current_process->GetState() == KProcess::State::Running, 15 ASSERT_MSG(current_process->GetState() == KProcess::State::Running,
16 "Process has already exited"); 16 "Process has already exited");
17 17
@@ -47,7 +47,7 @@ Result GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
47 // Get the process id. 47 // Get the process id.
48 *out_process_id = process->GetId(); 48 *out_process_id = process->GetId();
49 49
50 return ResultSuccess; 50 R_SUCCEED();
51} 51}
52 52
53Result GetProcessList(Core::System& system, s32* out_num_processes, VAddr out_process_ids, 53Result GetProcessList(Core::System& system, s32* out_num_processes, VAddr out_process_ids,
@@ -60,7 +60,7 @@ Result GetProcessList(Core::System& system, s32* out_num_processes, VAddr out_pr
60 LOG_ERROR(Kernel_SVC, 60 LOG_ERROR(Kernel_SVC,
61 "Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}", 61 "Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}",
62 out_process_ids_size); 62 out_process_ids_size);
63 return ResultOutOfRange; 63 R_THROW(ResultOutOfRange);
64 } 64 }
65 65
66 auto& kernel = system.Kernel(); 66 auto& kernel = system.Kernel();
@@ -70,7 +70,7 @@ Result GetProcessList(Core::System& system, s32* out_num_processes, VAddr out_pr
70 out_process_ids, total_copy_size)) { 70 out_process_ids, total_copy_size)) {
71 LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}", 71 LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
72 out_process_ids, out_process_ids + total_copy_size); 72 out_process_ids, out_process_ids + total_copy_size);
73 return ResultInvalidCurrentMemory; 73 R_THROW(ResultInvalidCurrentMemory);
74 } 74 }
75 75
76 auto& memory = system.Memory(); 76 auto& memory = system.Memory();
@@ -80,12 +80,12 @@ Result GetProcessList(Core::System& system, s32* out_num_processes, VAddr out_pr
80 std::min(static_cast<std::size_t>(out_process_ids_size), num_processes); 80 std::min(static_cast<std::size_t>(out_process_ids_size), num_processes);
81 81
82 for (std::size_t i = 0; i < copy_amount; ++i) { 82 for (std::size_t i = 0; i < copy_amount; ++i) {
83 memory.Write64(out_process_ids, process_list[i]->GetProcessID()); 83 memory.Write64(out_process_ids, process_list[i]->GetProcessId());
84 out_process_ids += sizeof(u64); 84 out_process_ids += sizeof(u64);
85 } 85 }
86 86
87 *out_num_processes = static_cast<u32>(num_processes); 87 *out_num_processes = static_cast<u32>(num_processes);
88 return ResultSuccess; 88 R_SUCCEED();
89} 89}
90 90
91Result GetProcessInfo(Core::System& system, s64* out, Handle process_handle, 91Result GetProcessInfo(Core::System& system, s64* out, Handle process_handle,
@@ -97,17 +97,17 @@ Result GetProcessInfo(Core::System& system, s64* out, Handle process_handle,
97 if (process.IsNull()) { 97 if (process.IsNull()) {
98 LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}", 98 LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
99 process_handle); 99 process_handle);
100 return ResultInvalidHandle; 100 R_THROW(ResultInvalidHandle);
101 } 101 }
102 102
103 if (info_type != ProcessInfoType::ProcessState) { 103 if (info_type != ProcessInfoType::ProcessState) {
104 LOG_ERROR(Kernel_SVC, "Expected info_type to be ProcessState but got {} instead", 104 LOG_ERROR(Kernel_SVC, "Expected info_type to be ProcessState but got {} instead",
105 info_type); 105 info_type);
106 return ResultInvalidEnumValue; 106 R_THROW(ResultInvalidEnumValue);
107 } 107 }
108 108
109 *out = static_cast<s64>(process->GetState()); 109 *out = static_cast<s64>(process->GetState());
110 return ResultSuccess; 110 R_SUCCEED();
111} 111}
112 112
113Result CreateProcess(Core::System& system, Handle* out_handle, uint64_t parameters, uint64_t caps, 113Result CreateProcess(Core::System& system, Handle* out_handle, uint64_t parameters, uint64_t caps,
diff --git a/src/core/hle/kernel/svc/svc_process_memory.cpp b/src/core/hle/kernel/svc/svc_process_memory.cpp
index 8e2fb4092..f9210ca1e 100644
--- a/src/core/hle/kernel/svc/svc_process_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_process_memory.cpp
@@ -53,7 +53,7 @@ Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, V
53 R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); 53 R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
54 54
55 // Set the memory permission. 55 // Set the memory permission.
56 return page_table.SetProcessMemoryPermission(address, size, perm); 56 R_RETURN(page_table.SetProcessMemoryPermission(address, size, perm));
57} 57}
58 58
59Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle, 59Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
@@ -93,10 +93,8 @@ Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_
93 KMemoryAttribute::All, KMemoryAttribute::None)); 93 KMemoryAttribute::All, KMemoryAttribute::None));
94 94
95 // Map the group. 95 // Map the group.
96 R_TRY(dst_pt.MapPageGroup(dst_address, pg, KMemoryState::SharedCode, 96 R_RETURN(dst_pt.MapPageGroup(dst_address, pg, KMemoryState::SharedCode,
97 KMemoryPermission::UserReadWrite)); 97 KMemoryPermission::UserReadWrite));
98
99 return ResultSuccess;
100} 98}
101 99
102Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle, 100Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
@@ -129,9 +127,7 @@ Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle proces
129 ResultInvalidMemoryRegion); 127 ResultInvalidMemoryRegion);
130 128
131 // Unmap the memory. 129 // Unmap the memory.
132 R_TRY(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address)); 130 R_RETURN(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address));
133
134 return ResultSuccess;
135} 131}
136 132
137Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address, 133Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
@@ -144,18 +140,18 @@ Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst
144 if (!Common::Is4KBAligned(src_address)) { 140 if (!Common::Is4KBAligned(src_address)) {
145 LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).", 141 LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
146 src_address); 142 src_address);
147 return ResultInvalidAddress; 143 R_THROW(ResultInvalidAddress);
148 } 144 }
149 145
150 if (!Common::Is4KBAligned(dst_address)) { 146 if (!Common::Is4KBAligned(dst_address)) {
151 LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).", 147 LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
152 dst_address); 148 dst_address);
153 return ResultInvalidAddress; 149 R_THROW(ResultInvalidAddress);
154 } 150 }
155 151
156 if (size == 0 || !Common::Is4KBAligned(size)) { 152 if (size == 0 || !Common::Is4KBAligned(size)) {
157 LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size); 153 LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size);
158 return ResultInvalidSize; 154 R_THROW(ResultInvalidSize);
159 } 155 }
160 156
161 if (!IsValidAddressRange(dst_address, size)) { 157 if (!IsValidAddressRange(dst_address, size)) {
@@ -163,7 +159,7 @@ Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst
163 "Destination address range overflows the address space (dst_address=0x{:016X}, " 159 "Destination address range overflows the address space (dst_address=0x{:016X}, "
164 "size=0x{:016X}).", 160 "size=0x{:016X}).",
165 dst_address, size); 161 dst_address, size);
166 return ResultInvalidCurrentMemory; 162 R_THROW(ResultInvalidCurrentMemory);
167 } 163 }
168 164
169 if (!IsValidAddressRange(src_address, size)) { 165 if (!IsValidAddressRange(src_address, size)) {
@@ -171,7 +167,7 @@ Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst
171 "Source address range overflows the address space (src_address=0x{:016X}, " 167 "Source address range overflows the address space (src_address=0x{:016X}, "
172 "size=0x{:016X}).", 168 "size=0x{:016X}).",
173 src_address, size); 169 src_address, size);
174 return ResultInvalidCurrentMemory; 170 R_THROW(ResultInvalidCurrentMemory);
175 } 171 }
176 172
177 const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable(); 173 const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable();
@@ -179,7 +175,7 @@ Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst
179 if (process.IsNull()) { 175 if (process.IsNull()) {
180 LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).", 176 LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
181 process_handle); 177 process_handle);
182 return ResultInvalidHandle; 178 R_THROW(ResultInvalidHandle);
183 } 179 }
184 180
185 auto& page_table = process->PageTable(); 181 auto& page_table = process->PageTable();
@@ -188,7 +184,7 @@ Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst
188 "Source address range is not within the address space (src_address=0x{:016X}, " 184 "Source address range is not within the address space (src_address=0x{:016X}, "
189 "size=0x{:016X}).", 185 "size=0x{:016X}).",
190 src_address, size); 186 src_address, size);
191 return ResultInvalidCurrentMemory; 187 R_THROW(ResultInvalidCurrentMemory);
192 } 188 }
193 189
194 if (!page_table.IsInsideASLRRegion(dst_address, size)) { 190 if (!page_table.IsInsideASLRRegion(dst_address, size)) {
@@ -196,10 +192,10 @@ Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst
196 "Destination address range is not within the ASLR region (dst_address=0x{:016X}, " 192 "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
197 "size=0x{:016X}).", 193 "size=0x{:016X}).",
198 dst_address, size); 194 dst_address, size);
199 return ResultInvalidMemoryRegion; 195 R_THROW(ResultInvalidMemoryRegion);
200 } 196 }
201 197
202 return page_table.MapCodeMemory(dst_address, src_address, size); 198 R_RETURN(page_table.MapCodeMemory(dst_address, src_address, size));
203} 199}
204 200
205Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address, 201Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
@@ -212,18 +208,18 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d
212 if (!Common::Is4KBAligned(dst_address)) { 208 if (!Common::Is4KBAligned(dst_address)) {
213 LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).", 209 LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
214 dst_address); 210 dst_address);
215 return ResultInvalidAddress; 211 R_THROW(ResultInvalidAddress);
216 } 212 }
217 213
218 if (!Common::Is4KBAligned(src_address)) { 214 if (!Common::Is4KBAligned(src_address)) {
219 LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).", 215 LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
220 src_address); 216 src_address);
221 return ResultInvalidAddress; 217 R_THROW(ResultInvalidAddress);
222 } 218 }
223 219
224 if (size == 0 || !Common::Is4KBAligned(size)) { 220 if (size == 0 || !Common::Is4KBAligned(size)) {
225 LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size); 221 LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
226 return ResultInvalidSize; 222 R_THROW(ResultInvalidSize);
227 } 223 }
228 224
229 if (!IsValidAddressRange(dst_address, size)) { 225 if (!IsValidAddressRange(dst_address, size)) {
@@ -231,7 +227,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d
231 "Destination address range overflows the address space (dst_address=0x{:016X}, " 227 "Destination address range overflows the address space (dst_address=0x{:016X}, "
232 "size=0x{:016X}).", 228 "size=0x{:016X}).",
233 dst_address, size); 229 dst_address, size);
234 return ResultInvalidCurrentMemory; 230 R_THROW(ResultInvalidCurrentMemory);
235 } 231 }
236 232
237 if (!IsValidAddressRange(src_address, size)) { 233 if (!IsValidAddressRange(src_address, size)) {
@@ -239,7 +235,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d
239 "Source address range overflows the address space (src_address=0x{:016X}, " 235 "Source address range overflows the address space (src_address=0x{:016X}, "
240 "size=0x{:016X}).", 236 "size=0x{:016X}).",
241 src_address, size); 237 src_address, size);
242 return ResultInvalidCurrentMemory; 238 R_THROW(ResultInvalidCurrentMemory);
243 } 239 }
244 240
245 const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable(); 241 const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable();
@@ -247,7 +243,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d
247 if (process.IsNull()) { 243 if (process.IsNull()) {
248 LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).", 244 LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
249 process_handle); 245 process_handle);
250 return ResultInvalidHandle; 246 R_THROW(ResultInvalidHandle);
251 } 247 }
252 248
253 auto& page_table = process->PageTable(); 249 auto& page_table = process->PageTable();
@@ -256,7 +252,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d
256 "Source address range is not within the address space (src_address=0x{:016X}, " 252 "Source address range is not within the address space (src_address=0x{:016X}, "
257 "size=0x{:016X}).", 253 "size=0x{:016X}).",
258 src_address, size); 254 src_address, size);
259 return ResultInvalidCurrentMemory; 255 R_THROW(ResultInvalidCurrentMemory);
260 } 256 }
261 257
262 if (!page_table.IsInsideASLRRegion(dst_address, size)) { 258 if (!page_table.IsInsideASLRRegion(dst_address, size)) {
@@ -264,11 +260,11 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d
264 "Destination address range is not within the ASLR region (dst_address=0x{:016X}, " 260 "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
265 "size=0x{:016X}).", 261 "size=0x{:016X}).",
266 dst_address, size); 262 dst_address, size);
267 return ResultInvalidMemoryRegion; 263 R_THROW(ResultInvalidMemoryRegion);
268 } 264 }
269 265
270 return page_table.UnmapCodeMemory(dst_address, src_address, size, 266 R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size,
271 KPageTable::ICacheInvalidationStrategy::InvalidateAll); 267 KPageTable::ICacheInvalidationStrategy::InvalidateAll));
272} 268}
273 269
274Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address, 270Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address,
diff --git a/src/core/hle/kernel/svc/svc_query_memory.cpp b/src/core/hle/kernel/svc/svc_query_memory.cpp
index ee75ad370..457ebf950 100644
--- a/src/core/hle/kernel/svc/svc_query_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_query_memory.cpp
@@ -15,8 +15,8 @@ Result QueryMemory(Core::System& system, uint64_t out_memory_info, PageInfo* out
15 out_memory_info, query_address); 15 out_memory_info, query_address);
16 16
17 // Query memory is just QueryProcessMemory on the current process. 17 // Query memory is just QueryProcessMemory on the current process.
18 return QueryProcessMemory(system, out_memory_info, out_page_info, CurrentProcess, 18 R_RETURN(
19 query_address); 19 QueryProcessMemory(system, out_memory_info, out_page_info, CurrentProcess, query_address));
20} 20}
21 21
22Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info, 22Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info,
@@ -27,13 +27,13 @@ Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageIn
27 if (process.IsNull()) { 27 if (process.IsNull()) {
28 LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}", 28 LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
29 process_handle); 29 process_handle);
30 return ResultInvalidHandle; 30 R_THROW(ResultInvalidHandle);
31 } 31 }
32 32
33 auto& memory{system.Memory()}; 33 auto& memory{system.Memory()};
34 const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()}; 34 const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()};
35 35
36 memory.WriteBlock(out_memory_info, &memory_info, sizeof(memory_info)); 36 memory.WriteBlock(out_memory_info, std::addressof(memory_info), sizeof(memory_info));
37 37
38 //! This is supposed to be part of the QueryInfo call. 38 //! This is supposed to be part of the QueryInfo call.
39 *out_page_info = {}; 39 *out_page_info = {};
diff --git a/src/core/hle/kernel/svc/svc_resource_limit.cpp b/src/core/hle/kernel/svc/svc_resource_limit.cpp
index 88166299e..732bc017e 100644
--- a/src/core/hle/kernel/svc/svc_resource_limit.cpp
+++ b/src/core/hle/kernel/svc/svc_resource_limit.cpp
@@ -21,15 +21,13 @@ Result CreateResourceLimit(Core::System& system, Handle* out_handle) {
21 SCOPE_EXIT({ resource_limit->Close(); }); 21 SCOPE_EXIT({ resource_limit->Close(); });
22 22
23 // Initialize the resource limit. 23 // Initialize the resource limit.
24 resource_limit->Initialize(&system.CoreTiming()); 24 resource_limit->Initialize(std::addressof(system.CoreTiming()));
25 25
26 // Register the limit. 26 // Register the limit.
27 KResourceLimit::Register(kernel, resource_limit); 27 KResourceLimit::Register(kernel, resource_limit);
28 28
29 // Add the limit to the handle table. 29 // Add the limit to the handle table.
30 R_TRY(GetCurrentProcess(kernel).GetHandleTable().Add(out_handle, resource_limit)); 30 R_RETURN(GetCurrentProcess(kernel).GetHandleTable().Add(out_handle, resource_limit));
31
32 return ResultSuccess;
33} 31}
34 32
35Result GetResourceLimitLimitValue(Core::System& system, s64* out_limit_value, 33Result GetResourceLimitLimitValue(Core::System& system, s64* out_limit_value,
@@ -49,7 +47,7 @@ Result GetResourceLimitLimitValue(Core::System& system, s64* out_limit_value,
49 // Get the limit value. 47 // Get the limit value.
50 *out_limit_value = resource_limit->GetLimitValue(which); 48 *out_limit_value = resource_limit->GetLimitValue(which);
51 49
52 return ResultSuccess; 50 R_SUCCEED();
53} 51}
54 52
55Result GetResourceLimitCurrentValue(Core::System& system, s64* out_current_value, 53Result GetResourceLimitCurrentValue(Core::System& system, s64* out_current_value,
@@ -69,7 +67,7 @@ Result GetResourceLimitCurrentValue(Core::System& system, s64* out_current_value
69 // Get the current value. 67 // Get the current value.
70 *out_current_value = resource_limit->GetCurrentValue(which); 68 *out_current_value = resource_limit->GetCurrentValue(which);
71 69
72 return ResultSuccess; 70 R_SUCCEED();
73} 71}
74 72
75Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle, 73Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
@@ -87,9 +85,7 @@ Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_ha
87 R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle); 85 R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
88 86
89 // Set the limit value. 87 // Set the limit value.
90 R_TRY(resource_limit->SetLimitValue(which, limit_value)); 88 R_RETURN(resource_limit->SetLimitValue(which, limit_value));
91
92 return ResultSuccess;
93} 89}
94 90
95Result GetResourceLimitPeakValue(Core::System& system, int64_t* out_peak_value, 91Result GetResourceLimitPeakValue(Core::System& system, int64_t* out_peak_value,
diff --git a/src/core/hle/kernel/svc/svc_secure_monitor_call.cpp b/src/core/hle/kernel/svc/svc_secure_monitor_call.cpp
index 20f6ec643..62c781551 100644
--- a/src/core/hle/kernel/svc/svc_secure_monitor_call.cpp
+++ b/src/core/hle/kernel/svc/svc_secure_monitor_call.cpp
@@ -29,7 +29,7 @@ void SvcWrap_CallSecureMonitor64(Core::System& system) {
29 args.r[i] = core.GetReg(i); 29 args.r[i] = core.GetReg(i);
30 } 30 }
31 31
32 CallSecureMonitor64(system, &args); 32 CallSecureMonitor64(system, std::addressof(args));
33 33
34 for (int i = 0; i < 8; i++) { 34 for (int i = 0; i < 8; i++) {
35 core.SetReg(i, args.r[i]); 35 core.SetReg(i, args.r[i]);
@@ -43,7 +43,7 @@ void SvcWrap_CallSecureMonitor64From32(Core::System& system) {
43 args.r[i] = static_cast<u32>(core.GetReg(i)); 43 args.r[i] = static_cast<u32>(core.GetReg(i));
44 } 44 }
45 45
46 CallSecureMonitor64From32(system, &args); 46 CallSecureMonitor64From32(system, std::addressof(args));
47 47
48 for (int i = 0; i < 8; i++) { 48 for (int i = 0; i < 8; i++) {
49 core.SetReg(i, args.r[i]); 49 core.SetReg(i, args.r[i]);
diff --git a/src/core/hle/kernel/svc/svc_session.cpp b/src/core/hle/kernel/svc/svc_session.cpp
index 00fd1605e..01b8a52ad 100644
--- a/src/core/hle/kernel/svc/svc_session.cpp
+++ b/src/core/hle/kernel/svc/svc_session.cpp
@@ -12,7 +12,7 @@ namespace Kernel::Svc {
12namespace { 12namespace {
13 13
14template <typename T> 14template <typename T>
15Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, u64 name) { 15Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, uint64_t name) {
16 auto& process = GetCurrentProcess(system.Kernel()); 16 auto& process = GetCurrentProcess(system.Kernel());
17 auto& handle_table = process.GetHandleTable(); 17 auto& handle_table = process.GetHandleTable();
18 18
@@ -21,16 +21,17 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien
21 21
22 // Reserve a new session from the process resource limit. 22 // Reserve a new session from the process resource limit.
23 // FIXME: LimitableResource_SessionCountMax 23 // FIXME: LimitableResource_SessionCountMax
24 KScopedResourceReservation session_reservation(&process, LimitableResource::SessionCountMax); 24 KScopedResourceReservation session_reservation(std::addressof(process),
25 LimitableResource::SessionCountMax);
25 if (session_reservation.Succeeded()) { 26 if (session_reservation.Succeeded()) {
26 session = T::Create(system.Kernel()); 27 session = T::Create(system.Kernel());
27 } else { 28 } else {
28 return ResultLimitReached; 29 R_THROW(ResultLimitReached);
29 30
30 // // We couldn't reserve a session. Check that we support dynamically expanding the 31 // // We couldn't reserve a session. Check that we support dynamically expanding the
31 // // resource limit. 32 // // resource limit.
32 // R_UNLESS(process.GetResourceLimit() == 33 // R_UNLESS(process.GetResourceLimit() ==
33 // &system.Kernel().GetSystemResourceLimit(), ResultLimitReached); 34 // std::addressof(system.Kernel().GetSystemResourceLimit()), ResultLimitReached);
34 // R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), ResultLimitReached()); 35 // R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), ResultLimitReached());
35 36
36 // // Try to allocate a session from unused slab memory. 37 // // Try to allocate a session from unused slab memory.
@@ -59,7 +60,7 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien
59 R_UNLESS(session != nullptr, ResultOutOfResource); 60 R_UNLESS(session != nullptr, ResultOutOfResource);
60 61
61 // Initialize the session. 62 // Initialize the session.
62 session->Initialize(nullptr, fmt::format("{}", name)); 63 session->Initialize(nullptr, name);
63 64
64 // Commit the session reservation. 65 // Commit the session reservation.
65 session_reservation.Commit(); 66 session_reservation.Commit();
@@ -75,17 +76,15 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien
75 T::Register(system.Kernel(), session); 76 T::Register(system.Kernel(), session);
76 77
77 // Add the server session to the handle table. 78 // Add the server session to the handle table.
78 R_TRY(handle_table.Add(out_server, &session->GetServerSession())); 79 R_TRY(handle_table.Add(out_server, std::addressof(session->GetServerSession())));
79 80
80 // Add the client session to the handle table. 81 // Ensure that we maintain a clean handle state on exit.
81 const auto result = handle_table.Add(out_client, &session->GetClientSession()); 82 ON_RESULT_FAILURE {
82
83 if (!R_SUCCEEDED(result)) {
84 // Ensure that we maintaing a clean handle state on exit.
85 handle_table.Remove(*out_server); 83 handle_table.Remove(*out_server);
86 } 84 };
87 85
88 return result; 86 // Add the client session to the handle table.
87 R_RETURN(handle_table.Add(out_client, std::addressof(session->GetClientSession())));
89} 88}
90 89
91} // namespace 90} // namespace
@@ -94,9 +93,9 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien
94 u64 name) { 93 u64 name) {
95 if (is_light) { 94 if (is_light) {
96 // return CreateSession<KLightSession>(system, out_server, out_client, name); 95 // return CreateSession<KLightSession>(system, out_server, out_client, name);
97 return ResultNotImplemented; 96 R_THROW(ResultNotImplemented);
98 } else { 97 } else {
99 return CreateSession<KSession>(system, out_server, out_client, name); 98 R_RETURN(CreateSession<KSession>(system, out_server, out_client, name));
100 } 99 }
101} 100}
102 101
diff --git a/src/core/hle/kernel/svc/svc_shared_memory.cpp b/src/core/hle/kernel/svc/svc_shared_memory.cpp
index 18e0dc904..40d878f17 100644
--- a/src/core/hle/kernel/svc/svc_shared_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_shared_memory.cpp
@@ -56,15 +56,12 @@ Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
56 R_TRY(process.AddSharedMemory(shmem.GetPointerUnsafe(), address, size)); 56 R_TRY(process.AddSharedMemory(shmem.GetPointerUnsafe(), address, size));
57 57
58 // Ensure that we clean up the shared memory if we fail to map it. 58 // Ensure that we clean up the shared memory if we fail to map it.
59 auto guard = 59 ON_RESULT_FAILURE {
60 SCOPE_GUARD({ process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); }); 60 process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size);
61 };
61 62
62 // Map the shared memory. 63 // Map the shared memory.
63 R_TRY(shmem->Map(process, address, size, map_perm)); 64 R_RETURN(shmem->Map(process, address, size, map_perm));
64
65 // We succeeded.
66 guard.Cancel();
67 return ResultSuccess;
68} 65}
69 66
70Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size) { 67Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size) {
@@ -91,7 +88,7 @@ Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr addres
91 // Remove the shared memory from the process. 88 // Remove the shared memory from the process.
92 process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); 89 process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size);
93 90
94 return ResultSuccess; 91 R_SUCCEED();
95} 92}
96 93
97Result CreateSharedMemory(Core::System& system, Handle* out_handle, uint64_t size, 94Result CreateSharedMemory(Core::System& system, Handle* out_handle, uint64_t size,
diff --git a/src/core/hle/kernel/svc/svc_synchronization.cpp b/src/core/hle/kernel/svc/svc_synchronization.cpp
index 9e7bf9530..660b45c23 100644
--- a/src/core/hle/kernel/svc/svc_synchronization.cpp
+++ b/src/core/hle/kernel/svc/svc_synchronization.cpp
@@ -17,7 +17,7 @@ Result CloseHandle(Core::System& system, Handle handle) {
17 R_UNLESS(GetCurrentProcess(system.Kernel()).GetHandleTable().Remove(handle), 17 R_UNLESS(GetCurrentProcess(system.Kernel()).GetHandleTable().Remove(handle),
18 ResultInvalidHandle); 18 ResultInvalidHandle);
19 19
20 return ResultSuccess; 20 R_SUCCEED();
21} 21}
22 22
23/// Clears the signaled state of an event or process. 23/// Clears the signaled state of an event or process.
@@ -31,7 +31,7 @@ Result ResetSignal(Core::System& system, Handle handle) {
31 { 31 {
32 KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(handle); 32 KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(handle);
33 if (readable_event.IsNotNull()) { 33 if (readable_event.IsNotNull()) {
34 return readable_event->Reset(); 34 R_RETURN(readable_event->Reset());
35 } 35 }
36 } 36 }
37 37
@@ -39,13 +39,11 @@ Result ResetSignal(Core::System& system, Handle handle) {
39 { 39 {
40 KScopedAutoObject process = handle_table.GetObject<KProcess>(handle); 40 KScopedAutoObject process = handle_table.GetObject<KProcess>(handle);
41 if (process.IsNotNull()) { 41 if (process.IsNotNull()) {
42 return process->Reset(); 42 R_RETURN(process->Reset());
43 } 43 }
44 } 44 }
45 45
46 LOG_ERROR(Kernel_SVC, "invalid handle (0x{:08X})", handle); 46 R_THROW(ResultInvalidHandle);
47
48 return ResultInvalidHandle;
49} 47}
50 48
51static Result WaitSynchronization(Core::System& system, int32_t* out_index, const Handle* handles, 49static Result WaitSynchronization(Core::System& system, int32_t* out_index, const Handle* handles,
@@ -109,7 +107,7 @@ Result CancelSynchronization(Core::System& system, Handle handle) {
109 107
110 // Cancel the thread's wait. 108 // Cancel the thread's wait.
111 thread->WaitCancel(); 109 thread->WaitCancel();
112 return ResultSuccess; 110 R_SUCCEED();
113} 111}
114 112
115void SynchronizePreemptionState(Core::System& system) { 113void SynchronizePreemptionState(Core::System& system) {
diff --git a/src/core/hle/kernel/svc/svc_thread.cpp b/src/core/hle/kernel/svc/svc_thread.cpp
index 9bc1ebe74..50991fb62 100644
--- a/src/core/hle/kernel/svc/svc_thread.cpp
+++ b/src/core/hle/kernel/svc/svc_thread.cpp
@@ -34,51 +34,31 @@ Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point,
34 } 34 }
35 35
36 // Validate arguments. 36 // Validate arguments.
37 if (!IsValidVirtualCoreId(core_id)) { 37 R_UNLESS(IsValidVirtualCoreId(core_id), ResultInvalidCoreId);
38 LOG_ERROR(Kernel_SVC, "Invalid Core ID specified (id={})", core_id); 38 R_UNLESS(((1ull << core_id) & process.GetCoreMask()) != 0, ResultInvalidCoreId);
39 return ResultInvalidCoreId;
40 }
41 if (((1ULL << core_id) & process.GetCoreMask()) == 0) {
42 LOG_ERROR(Kernel_SVC, "Core ID doesn't fall within allowable cores (id={})", core_id);
43 return ResultInvalidCoreId;
44 }
45 39
46 if (HighestThreadPriority > priority || priority > LowestThreadPriority) { 40 R_UNLESS(HighestThreadPriority <= priority && priority <= LowestThreadPriority,
47 LOG_ERROR(Kernel_SVC, "Invalid priority specified (priority={})", priority); 41 ResultInvalidPriority);
48 return ResultInvalidPriority; 42 R_UNLESS(process.CheckThreadPriority(priority), ResultInvalidPriority);
49 }
50 if (!process.CheckThreadPriority(priority)) {
51 LOG_ERROR(Kernel_SVC, "Invalid allowable thread priority (priority={})", priority);
52 return ResultInvalidPriority;
53 }
54 43
55 // Reserve a new thread from the process resource limit (waiting up to 100ms). 44 // Reserve a new thread from the process resource limit (waiting up to 100ms).
56 KScopedResourceReservation thread_reservation(&process, LimitableResource::ThreadCountMax, 1, 45 KScopedResourceReservation thread_reservation(
57 system.CoreTiming().GetGlobalTimeNs().count() + 46 std::addressof(process), LimitableResource::ThreadCountMax, 1,
58 100000000); 47 system.CoreTiming().GetGlobalTimeNs().count() + 100000000);
59 if (!thread_reservation.Succeeded()) { 48 R_UNLESS(thread_reservation.Succeeded(), ResultLimitReached);
60 LOG_ERROR(Kernel_SVC, "Could not reserve a new thread");
61 return ResultLimitReached;
62 }
63 49
64 // Create the thread. 50 // Create the thread.
65 KThread* thread = KThread::Create(kernel); 51 KThread* thread = KThread::Create(kernel);
66 if (!thread) { 52 R_UNLESS(thread != nullptr, ResultOutOfResource)
67 LOG_ERROR(Kernel_SVC, "Unable to create new threads. Thread creation limit reached.");
68 return ResultOutOfResource;
69 }
70 SCOPE_EXIT({ thread->Close(); }); 53 SCOPE_EXIT({ thread->Close(); });
71 54
72 // Initialize the thread. 55 // Initialize the thread.
73 { 56 {
74 KScopedLightLock lk{process.GetStateLock()}; 57 KScopedLightLock lk{process.GetStateLock()};
75 R_TRY(KThread::InitializeUserThread(system, thread, entry_point, arg, stack_bottom, 58 R_TRY(KThread::InitializeUserThread(system, thread, entry_point, arg, stack_bottom,
76 priority, core_id, &process)); 59 priority, core_id, std::addressof(process)));
77 } 60 }
78 61
79 // Set the thread name for debugging purposes.
80 thread->SetName(fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *out_handle));
81
82 // Commit the thread reservation. 62 // Commit the thread reservation.
83 thread_reservation.Commit(); 63 thread_reservation.Commit();
84 64
@@ -89,9 +69,7 @@ Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point,
89 KThread::Register(kernel, thread); 69 KThread::Register(kernel, thread);
90 70
91 // Add the thread to the handle table. 71 // Add the thread to the handle table.
92 R_TRY(process.GetHandleTable().Add(out_handle, thread)); 72 R_RETURN(process.GetHandleTable().Add(out_handle, thread));
93
94 return ResultSuccess;
95} 73}
96 74
97/// Starts the thread for the provided handle 75/// Starts the thread for the provided handle
@@ -110,7 +88,7 @@ Result StartThread(Core::System& system, Handle thread_handle) {
110 thread->Open(); 88 thread->Open();
111 system.Kernel().RegisterInUseObject(thread.GetPointerUnsafe()); 89 system.Kernel().RegisterInUseObject(thread.GetPointerUnsafe());
112 90
113 return ResultSuccess; 91 R_SUCCEED();
114} 92}
115 93
116/// Called when a thread exits 94/// Called when a thread exits
@@ -202,10 +180,8 @@ Result GetThreadContext3(Core::System& system, VAddr out_context, Handle thread_
202 // Copy the thread context to user space. 180 // Copy the thread context to user space.
203 system.Memory().WriteBlock(out_context, context.data(), context.size()); 181 system.Memory().WriteBlock(out_context, context.data(), context.size());
204 182
205 return ResultSuccess; 183 R_SUCCEED();
206 } 184 }
207
208 return ResultSuccess;
209} 185}
210 186
211/// Gets the priority for the specified thread 187/// Gets the priority for the specified thread
@@ -219,7 +195,7 @@ Result GetThreadPriority(Core::System& system, s32* out_priority, Handle handle)
219 195
220 // Get the thread's priority. 196 // Get the thread's priority.
221 *out_priority = thread->GetPriority(); 197 *out_priority = thread->GetPriority();
222 return ResultSuccess; 198 R_SUCCEED();
223} 199}
224 200
225/// Sets the priority for the specified thread 201/// Sets the priority for the specified thread
@@ -238,7 +214,7 @@ Result SetThreadPriority(Core::System& system, Handle thread_handle, s32 priorit
238 214
239 // Set the thread priority. 215 // Set the thread priority.
240 thread->SetBasePriority(priority); 216 thread->SetBasePriority(priority);
241 return ResultSuccess; 217 R_SUCCEED();
242} 218}
243 219
244Result GetThreadList(Core::System& system, s32* out_num_threads, VAddr out_thread_ids, 220Result GetThreadList(Core::System& system, s32* out_num_threads, VAddr out_thread_ids,
@@ -253,7 +229,7 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, VAddr out_threa
253 if ((out_thread_ids_size & 0xF0000000) != 0) { 229 if ((out_thread_ids_size & 0xF0000000) != 0) {
254 LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}", 230 LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}",
255 out_thread_ids_size); 231 out_thread_ids_size);
256 return ResultOutOfRange; 232 R_THROW(ResultOutOfRange);
257 } 233 }
258 234
259 auto* const current_process = GetCurrentProcessPointer(system.Kernel()); 235 auto* const current_process = GetCurrentProcessPointer(system.Kernel());
@@ -263,7 +239,7 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, VAddr out_threa
263 !current_process->PageTable().IsInsideAddressSpace(out_thread_ids, total_copy_size)) { 239 !current_process->PageTable().IsInsideAddressSpace(out_thread_ids, total_copy_size)) {
264 LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}", 240 LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
265 out_thread_ids, out_thread_ids + total_copy_size); 241 out_thread_ids, out_thread_ids + total_copy_size);
266 return ResultInvalidCurrentMemory; 242 R_THROW(ResultInvalidCurrentMemory);
267 } 243 }
268 244
269 auto& memory = system.Memory(); 245 auto& memory = system.Memory();
@@ -273,12 +249,12 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, VAddr out_threa
273 249
274 auto list_iter = thread_list.cbegin(); 250 auto list_iter = thread_list.cbegin();
275 for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) { 251 for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
276 memory.Write64(out_thread_ids, (*list_iter)->GetThreadID()); 252 memory.Write64(out_thread_ids, (*list_iter)->GetThreadId());
277 out_thread_ids += sizeof(u64); 253 out_thread_ids += sizeof(u64);
278 } 254 }
279 255
280 *out_num_threads = static_cast<u32>(num_threads); 256 *out_num_threads = static_cast<u32>(num_threads);
281 return ResultSuccess; 257 R_SUCCEED();
282} 258}
283 259
284Result GetThreadCoreMask(Core::System& system, s32* out_core_id, u64* out_affinity_mask, 260Result GetThreadCoreMask(Core::System& system, s32* out_core_id, u64* out_affinity_mask,
@@ -291,9 +267,7 @@ Result GetThreadCoreMask(Core::System& system, s32* out_core_id, u64* out_affini
291 R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); 267 R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
292 268
293 // Get the core mask. 269 // Get the core mask.
294 R_TRY(thread->GetCoreMask(out_core_id, out_affinity_mask)); 270 R_RETURN(thread->GetCoreMask(out_core_id, out_affinity_mask));
295
296 return ResultSuccess;
297} 271}
298 272
299Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id, 273Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
@@ -323,9 +297,7 @@ Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id
323 R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); 297 R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
324 298
325 // Set the core mask. 299 // Set the core mask.
326 R_TRY(thread->SetCoreMask(core_id, affinity_mask)); 300 R_RETURN(thread->SetCoreMask(core_id, affinity_mask));
327
328 return ResultSuccess;
329} 301}
330 302
331/// Get the ID for the specified thread. 303/// Get the ID for the specified thread.
@@ -337,7 +309,7 @@ Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handl
337 309
338 // Get the thread's id. 310 // Get the thread's id.
339 *out_thread_id = thread->GetId(); 311 *out_thread_id = thread->GetId();
340 return ResultSuccess; 312 R_SUCCEED();
341} 313}
342 314
343Result CreateThread64(Core::System& system, Handle* out_handle, uint64_t func, uint64_t arg, 315Result CreateThread64(Core::System& system, Handle* out_handle, uint64_t func, uint64_t arg,
diff --git a/src/core/hle/kernel/svc/svc_transfer_memory.cpp b/src/core/hle/kernel/svc/svc_transfer_memory.cpp
index 7ffc24adf..394f06728 100644
--- a/src/core/hle/kernel/svc/svc_transfer_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_transfer_memory.cpp
@@ -43,7 +43,7 @@ Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u6
43 auto& handle_table = process.GetHandleTable(); 43 auto& handle_table = process.GetHandleTable();
44 44
45 // Reserve a new transfer memory from the process resource limit. 45 // Reserve a new transfer memory from the process resource limit.
46 KScopedResourceReservation trmem_reservation(&process, 46 KScopedResourceReservation trmem_reservation(std::addressof(process),
47 LimitableResource::TransferMemoryCountMax); 47 LimitableResource::TransferMemoryCountMax);
48 R_UNLESS(trmem_reservation.Succeeded(), ResultLimitReached); 48 R_UNLESS(trmem_reservation.Succeeded(), ResultLimitReached);
49 49
@@ -67,9 +67,7 @@ Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u6
67 KTransferMemory::Register(kernel, trmem); 67 KTransferMemory::Register(kernel, trmem);
68 68
69 // Add the transfer memory to the handle table. 69 // Add the transfer memory to the handle table.
70 R_TRY(handle_table.Add(out, trmem)); 70 R_RETURN(handle_table.Add(out, trmem));
71
72 return ResultSuccess;
73} 71}
74 72
75Result MapTransferMemory(Core::System& system, Handle trmem_handle, uint64_t address, uint64_t size, 73Result MapTransferMemory(Core::System& system, Handle trmem_handle, uint64_t address, uint64_t size,
diff --git a/src/core/hle/kernel/svc_generator.py b/src/core/hle/kernel/svc_generator.py
index 0cce69e85..7fcbb1ba1 100644
--- a/src/core/hle/kernel/svc_generator.py
+++ b/src/core/hle/kernel/svc_generator.py
@@ -460,7 +460,7 @@ def emit_wrapper(wrapped_fn, suffix, register_info, arguments, byte_size):
460 call_arguments = ["system"] 460 call_arguments = ["system"]
461 for arg in arguments: 461 for arg in arguments:
462 if arg.is_output and not arg.is_outptr: 462 if arg.is_output and not arg.is_outptr:
463 call_arguments.append(f"&{arg.var_name}") 463 call_arguments.append(f"std::addressof({arg.var_name})")
464 else: 464 else:
465 call_arguments.append(arg.var_name) 465 call_arguments.append(arg.var_name)
466 466
@@ -574,9 +574,9 @@ static To Convert(const From& from) {
574 To to{}; 574 To to{};
575 575
576 if constexpr (sizeof(To) >= sizeof(From)) { 576 if constexpr (sizeof(To) >= sizeof(From)) {
577 std::memcpy(&to, &from, sizeof(From)); 577 std::memcpy(std::addressof(to), std::addressof(from), sizeof(From));
578 } else { 578 } else {
579 std::memcpy(&to, &from, sizeof(To)); 579 std::memcpy(std::addressof(to), std::addressof(from), sizeof(To));
580 } 580 }
581 581
582 return to; 582 return to;
diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp
index 120282aa4..6c29cb613 100644
--- a/src/core/hle/service/acc/acc.cpp
+++ b/src/core/hle/service/acc/acc.cpp
@@ -789,7 +789,7 @@ Result Module::Interface::InitializeApplicationInfoBase() {
789 } 789 }
790 790
791 LOG_WARNING(Service_ACC, "ApplicationInfo init required"); 791 LOG_WARNING(Service_ACC, "ApplicationInfo init required");
792 // TODO(ogniK): Actual initalization here 792 // TODO(ogniK): Actual initialization here
793 793
794 return ResultSuccess; 794 return ResultSuccess;
795} 795}
diff --git a/src/core/hle/service/acc/profile_manager.cpp b/src/core/hle/service/acc/profile_manager.cpp
index 97f7c6688..63fd5bfd6 100644
--- a/src/core/hle/service/acc/profile_manager.cpp
+++ b/src/core/hle/service/acc/profile_manager.cpp
@@ -287,7 +287,7 @@ void ProfileManager::StoreOpenedUsers() {
287 }); 287 });
288} 288}
289 289
290/// Return the users profile base and the unknown arbitary data. 290/// Return the users profile base and the unknown arbitrary data.
291bool ProfileManager::GetProfileBaseAndData(std::optional<std::size_t> index, ProfileBase& profile, 291bool ProfileManager::GetProfileBaseAndData(std::optional<std::size_t> index, ProfileBase& profile,
292 UserData& data) const { 292 UserData& data) const {
293 if (GetProfileBase(index, profile)) { 293 if (GetProfileBase(index, profile)) {
@@ -297,13 +297,13 @@ bool ProfileManager::GetProfileBaseAndData(std::optional<std::size_t> index, Pro
297 return false; 297 return false;
298} 298}
299 299
300/// Return the users profile base and the unknown arbitary data. 300/// Return the users profile base and the unknown arbitrary data.
301bool ProfileManager::GetProfileBaseAndData(UUID uuid, ProfileBase& profile, UserData& data) const { 301bool ProfileManager::GetProfileBaseAndData(UUID uuid, ProfileBase& profile, UserData& data) const {
302 const auto idx = GetUserIndex(uuid); 302 const auto idx = GetUserIndex(uuid);
303 return GetProfileBaseAndData(idx, profile, data); 303 return GetProfileBaseAndData(idx, profile, data);
304} 304}
305 305
306/// Return the users profile base and the unknown arbitary data. 306/// Return the users profile base and the unknown arbitrary data.
307bool ProfileManager::GetProfileBaseAndData(const ProfileInfo& user, ProfileBase& profile, 307bool ProfileManager::GetProfileBaseAndData(const ProfileInfo& user, ProfileBase& profile,
308 UserData& data) const { 308 UserData& data) const {
309 return GetProfileBaseAndData(user.user_uuid, profile, data); 309 return GetProfileBaseAndData(user.user_uuid, profile, data);
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index f17df5124..deeca925d 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -79,7 +79,7 @@ IWindowController::IWindowController(Core::System& system_)
79IWindowController::~IWindowController() = default; 79IWindowController::~IWindowController() = default;
80 80
81void IWindowController::GetAppletResourceUserId(HLERequestContext& ctx) { 81void IWindowController::GetAppletResourceUserId(HLERequestContext& ctx) {
82 const u64 process_id = system.ApplicationProcess()->GetProcessID(); 82 const u64 process_id = system.ApplicationProcess()->GetProcessId();
83 83
84 LOG_DEBUG(Service_AM, "called. Process ID=0x{:016X}", process_id); 84 LOG_DEBUG(Service_AM, "called. Process ID=0x{:016X}", process_id);
85 85
diff --git a/src/core/hle/service/am/applets/applet_cabinet.cpp b/src/core/hle/service/am/applets/applet_cabinet.cpp
index d0969b0f1..162687b29 100644
--- a/src/core/hle/service/am/applets/applet_cabinet.cpp
+++ b/src/core/hle/service/am/applets/applet_cabinet.cpp
@@ -119,7 +119,7 @@ void Cabinet::DisplayCompleted(bool apply_changes, std::string_view amiibo_name)
119 case Service::NFP::CabinetMode::StartNicknameAndOwnerSettings: { 119 case Service::NFP::CabinetMode::StartNicknameAndOwnerSettings: {
120 Service::NFP::AmiiboName name{}; 120 Service::NFP::AmiiboName name{};
121 std::memcpy(name.data(), amiibo_name.data(), std::min(amiibo_name.size(), name.size() - 1)); 121 std::memcpy(name.data(), amiibo_name.data(), std::min(amiibo_name.size(), name.size() - 1));
122 nfp_device->SetNicknameAndOwner(name); 122 nfp_device->SetRegisterInfoPrivate(name);
123 break; 123 break;
124 } 124 }
125 case Service::NFP::CabinetMode::StartGameDataEraser: 125 case Service::NFP::CabinetMode::StartGameDataEraser:
@@ -129,7 +129,7 @@ void Cabinet::DisplayCompleted(bool apply_changes, std::string_view amiibo_name)
129 nfp_device->RestoreAmiibo(); 129 nfp_device->RestoreAmiibo();
130 break; 130 break;
131 case Service::NFP::CabinetMode::StartFormatter: 131 case Service::NFP::CabinetMode::StartFormatter:
132 nfp_device->DeleteAllData(); 132 nfp_device->Format();
133 break; 133 break;
134 default: 134 default:
135 UNIMPLEMENTED_MSG("Unknown CabinetMode={}", applet_input_common.applet_mode); 135 UNIMPLEMENTED_MSG("Unknown CabinetMode={}", applet_input_common.applet_mode);
diff --git a/src/core/hle/service/glue/arp.cpp b/src/core/hle/service/glue/arp.cpp
index 929dcca0d..ed6fcb5f6 100644
--- a/src/core/hle/service/glue/arp.cpp
+++ b/src/core/hle/service/glue/arp.cpp
@@ -18,14 +18,14 @@ namespace {
18std::optional<u64> GetTitleIDForProcessID(const Core::System& system, u64 process_id) { 18std::optional<u64> GetTitleIDForProcessID(const Core::System& system, u64 process_id) {
19 const auto& list = system.Kernel().GetProcessList(); 19 const auto& list = system.Kernel().GetProcessList();
20 const auto iter = std::find_if(list.begin(), list.end(), [&process_id](const auto& process) { 20 const auto iter = std::find_if(list.begin(), list.end(), [&process_id](const auto& process) {
21 return process->GetProcessID() == process_id; 21 return process->GetProcessId() == process_id;
22 }); 22 });
23 23
24 if (iter == list.end()) { 24 if (iter == list.end()) {
25 return std::nullopt; 25 return std::nullopt;
26 } 26 }
27 27
28 return (*iter)->GetProgramID(); 28 return (*iter)->GetProgramId();
29} 29}
30} // Anonymous namespace 30} // Anonymous namespace
31 31
diff --git a/src/core/hle/service/hid/controllers/gesture.cpp b/src/core/hle/service/hid/controllers/gesture.cpp
index de0090cc5..03432f7cb 100644
--- a/src/core/hle/service/hid/controllers/gesture.cpp
+++ b/src/core/hle/service/hid/controllers/gesture.cpp
@@ -55,7 +55,7 @@ void Controller_Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
55 static_cast<f32>(shared_memory->gesture_lifo.timestamp - last_update_timestamp) / 55 static_cast<f32>(shared_memory->gesture_lifo.timestamp - last_update_timestamp) /
56 (1000 * 1000 * 1000); 56 (1000 * 1000 * 1000);
57 57
58 // Only update if necesary 58 // Only update if necessary
59 if (!ShouldUpdateGesture(gesture, time_difference)) { 59 if (!ShouldUpdateGesture(gesture, time_difference)) {
60 return; 60 return;
61 } 61 }
diff --git a/src/core/hle/service/hid/controllers/stubbed.cpp b/src/core/hle/service/hid/controllers/stubbed.cpp
index df9ee0c3f..9e2f3ab21 100644
--- a/src/core/hle/service/hid/controllers/stubbed.cpp
+++ b/src/core/hle/service/hid/controllers/stubbed.cpp
@@ -26,7 +26,7 @@ void Controller_Stubbed::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
26 } 26 }
27 27
28 CommonHeader header{}; 28 CommonHeader header{};
29 header.timestamp = core_timing.GetCPUTicks(); 29 header.timestamp = core_timing.GetGlobalTimeNs().count();
30 header.total_entry_count = 17; 30 header.total_entry_count = 17;
31 header.entry_count = 0; 31 header.entry_count = 0;
32 header.last_entry_index = 0; 32 header.last_entry_index = 0;
diff --git a/src/core/hle/service/hid/controllers/touchscreen.cpp b/src/core/hle/service/hid/controllers/touchscreen.cpp
index d90a4e732..3ef91df4b 100644
--- a/src/core/hle/service/hid/controllers/touchscreen.cpp
+++ b/src/core/hle/service/hid/controllers/touchscreen.cpp
@@ -32,7 +32,7 @@ void Controller_Touchscreen::OnInit() {}
32void Controller_Touchscreen::OnRelease() {} 32void Controller_Touchscreen::OnRelease() {}
33 33
34void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timing) { 34void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
35 shared_memory->touch_screen_lifo.timestamp = core_timing.GetCPUTicks(); 35 shared_memory->touch_screen_lifo.timestamp = core_timing.GetGlobalTimeNs().count();
36 36
37 if (!IsControllerActivated()) { 37 if (!IsControllerActivated()) {
38 shared_memory->touch_screen_lifo.buffer_count = 0; 38 shared_memory->touch_screen_lifo.buffer_count = 0;
@@ -85,7 +85,7 @@ void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timin
85 const auto active_fingers_count = 85 const auto active_fingers_count =
86 static_cast<std::size_t>(std::distance(active_fingers.begin(), end_iter)); 86 static_cast<std::size_t>(std::distance(active_fingers.begin(), end_iter));
87 87
88 const u64 tick = core_timing.GetCPUTicks(); 88 const u64 timestamp = static_cast<u64>(core_timing.GetGlobalTimeNs().count());
89 const auto& last_entry = shared_memory->touch_screen_lifo.ReadCurrentEntry().state; 89 const auto& last_entry = shared_memory->touch_screen_lifo.ReadCurrentEntry().state;
90 90
91 next_state.sampling_number = last_entry.sampling_number + 1; 91 next_state.sampling_number = last_entry.sampling_number + 1;
@@ -102,8 +102,8 @@ void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timin
102 touch_entry.diameter_x = Settings::values.touchscreen.diameter_x; 102 touch_entry.diameter_x = Settings::values.touchscreen.diameter_x;
103 touch_entry.diameter_y = Settings::values.touchscreen.diameter_y; 103 touch_entry.diameter_y = Settings::values.touchscreen.diameter_y;
104 touch_entry.rotation_angle = Settings::values.touchscreen.rotation_angle; 104 touch_entry.rotation_angle = Settings::values.touchscreen.rotation_angle;
105 touch_entry.delta_time = tick - active_fingers[id].last_touch; 105 touch_entry.delta_time = timestamp - active_fingers[id].last_touch;
106 fingers[active_fingers[id].id].last_touch = tick; 106 fingers[active_fingers[id].id].last_touch = timestamp;
107 touch_entry.finger = active_fingers[id].id; 107 touch_entry.finger = active_fingers[id].id;
108 touch_entry.attribute.raw = active_fingers[id].attribute.raw; 108 touch_entry.attribute.raw = active_fingers[id].attribute.raw;
109 } else { 109 } else {
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 56c7275df..4529ad643 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -690,7 +690,7 @@ void Hid::ResetSixAxisSensorFusionParameters(HLERequestContext& ctx) {
690 690
691 const auto parameters{rp.PopRaw<Parameters>()}; 691 const auto parameters{rp.PopRaw<Parameters>()};
692 692
693 // Since these parameters are unknow just use what HW outputs 693 // Since these parameters are unknown just use what HW outputs
694 const Core::HID::SixAxisSensorFusionParameters fusion_parameters{ 694 const Core::HID::SixAxisSensorFusionParameters fusion_parameters{
695 .parameter1 = 0.03f, 695 .parameter1 = 0.03f,
696 .parameter2 = 0.4f, 696 .parameter2 = 0.4f,
diff --git a/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp b/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
index bc896a1e3..a268750cd 100644
--- a/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
+++ b/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
@@ -51,7 +51,7 @@ void ImageTransferProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType
51 51
52 const auto camera_data = npad_device->GetCamera(); 52 const auto camera_data = npad_device->GetCamera();
53 53
54 // This indicates how much ambient light is precent 54 // This indicates how much ambient light is present
55 processor_state.ambient_noise_level = Core::IrSensor::CameraAmbientNoiseLevel::Low; 55 processor_state.ambient_noise_level = Core::IrSensor::CameraAmbientNoiseLevel::Low;
56 processor_state.sampling_number = camera_data.sample; 56 processor_state.sampling_number = camera_data.sample;
57 57
diff --git a/src/core/hle/service/hle_ipc.cpp b/src/core/hle/service/hle_ipc.cpp
index c221ffe11..cca697c64 100644
--- a/src/core/hle/service/hle_ipc.cpp
+++ b/src/core/hle/service/hle_ipc.cpp
@@ -303,7 +303,7 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(Kernel::KThread& requesti
303 } 303 }
304 304
305 // Copy the translated command buffer back into the thread's command buffer area. 305 // Copy the translated command buffer back into the thread's command buffer area.
306 memory.WriteBlock(owner_process, requesting_thread.GetTLSAddress(), cmd_buf.data(), 306 memory.WriteBlock(owner_process, requesting_thread.GetTlsAddress(), cmd_buf.data(),
307 write_size * sizeof(u32)); 307 write_size * sizeof(u32));
308 308
309 return ResultSuccess; 309 return ResultSuccess;
diff --git a/src/core/hle/service/ipc_helpers.h b/src/core/hle/service/ipc_helpers.h
index 8703b57ca..e4cb4e1f2 100644
--- a/src/core/hle/service/ipc_helpers.h
+++ b/src/core/hle/service/ipc_helpers.h
@@ -155,7 +155,7 @@ public:
155 Kernel::LimitableResource::SessionCountMax, 1); 155 Kernel::LimitableResource::SessionCountMax, 1);
156 156
157 auto* session = Kernel::KSession::Create(kernel); 157 auto* session = Kernel::KSession::Create(kernel);
158 session->Initialize(nullptr, iface->GetServiceName()); 158 session->Initialize(nullptr, 0);
159 159
160 auto next_manager = std::make_shared<Service::SessionRequestManager>( 160 auto next_manager = std::make_shared<Service::SessionRequestManager>(
161 kernel, manager->GetServerManager()); 161 kernel, manager->GetServerManager());
diff --git a/src/core/hle/service/nfp/amiibo_crypto.cpp b/src/core/hle/service/nfp/amiibo_crypto.cpp
index ffb2f959c..ad73edbda 100644
--- a/src/core/hle/service/nfp/amiibo_crypto.cpp
+++ b/src/core/hle/service/nfp/amiibo_crypto.cpp
@@ -80,13 +80,16 @@ NTAG215File NfcDataToEncodedData(const EncryptedNTAG215File& nfc_data) {
80 encoded_data.hmac_data = nfc_data.user_memory.hmac_data; 80 encoded_data.hmac_data = nfc_data.user_memory.hmac_data;
81 encoded_data.constant_value = nfc_data.user_memory.constant_value; 81 encoded_data.constant_value = nfc_data.user_memory.constant_value;
82 encoded_data.write_counter = nfc_data.user_memory.write_counter; 82 encoded_data.write_counter = nfc_data.user_memory.write_counter;
83 encoded_data.amiibo_version = nfc_data.user_memory.amiibo_version;
83 encoded_data.settings = nfc_data.user_memory.settings; 84 encoded_data.settings = nfc_data.user_memory.settings;
84 encoded_data.owner_mii = nfc_data.user_memory.owner_mii; 85 encoded_data.owner_mii = nfc_data.user_memory.owner_mii;
85 encoded_data.title_id = nfc_data.user_memory.title_id; 86 encoded_data.application_id = nfc_data.user_memory.application_id;
86 encoded_data.applicaton_write_counter = nfc_data.user_memory.applicaton_write_counter; 87 encoded_data.application_write_counter = nfc_data.user_memory.application_write_counter;
87 encoded_data.application_area_id = nfc_data.user_memory.application_area_id; 88 encoded_data.application_area_id = nfc_data.user_memory.application_area_id;
89 encoded_data.application_id_byte = nfc_data.user_memory.application_id_byte;
88 encoded_data.unknown = nfc_data.user_memory.unknown; 90 encoded_data.unknown = nfc_data.user_memory.unknown;
89 encoded_data.unknown2 = nfc_data.user_memory.unknown2; 91 encoded_data.unknown2 = nfc_data.user_memory.unknown2;
92 encoded_data.application_area_crc = nfc_data.user_memory.application_area_crc;
90 encoded_data.application_area = nfc_data.user_memory.application_area; 93 encoded_data.application_area = nfc_data.user_memory.application_area;
91 encoded_data.hmac_tag = nfc_data.user_memory.hmac_tag; 94 encoded_data.hmac_tag = nfc_data.user_memory.hmac_tag;
92 encoded_data.lock_bytes = nfc_data.uuid.lock_bytes; 95 encoded_data.lock_bytes = nfc_data.uuid.lock_bytes;
@@ -111,13 +114,16 @@ EncryptedNTAG215File EncodedDataToNfcData(const NTAG215File& encoded_data) {
111 nfc_data.user_memory.hmac_data = encoded_data.hmac_data; 114 nfc_data.user_memory.hmac_data = encoded_data.hmac_data;
112 nfc_data.user_memory.constant_value = encoded_data.constant_value; 115 nfc_data.user_memory.constant_value = encoded_data.constant_value;
113 nfc_data.user_memory.write_counter = encoded_data.write_counter; 116 nfc_data.user_memory.write_counter = encoded_data.write_counter;
117 nfc_data.user_memory.amiibo_version = encoded_data.amiibo_version;
114 nfc_data.user_memory.settings = encoded_data.settings; 118 nfc_data.user_memory.settings = encoded_data.settings;
115 nfc_data.user_memory.owner_mii = encoded_data.owner_mii; 119 nfc_data.user_memory.owner_mii = encoded_data.owner_mii;
116 nfc_data.user_memory.title_id = encoded_data.title_id; 120 nfc_data.user_memory.application_id = encoded_data.application_id;
117 nfc_data.user_memory.applicaton_write_counter = encoded_data.applicaton_write_counter; 121 nfc_data.user_memory.application_write_counter = encoded_data.application_write_counter;
118 nfc_data.user_memory.application_area_id = encoded_data.application_area_id; 122 nfc_data.user_memory.application_area_id = encoded_data.application_area_id;
123 nfc_data.user_memory.application_id_byte = encoded_data.application_id_byte;
119 nfc_data.user_memory.unknown = encoded_data.unknown; 124 nfc_data.user_memory.unknown = encoded_data.unknown;
120 nfc_data.user_memory.unknown2 = encoded_data.unknown2; 125 nfc_data.user_memory.unknown2 = encoded_data.unknown2;
126 nfc_data.user_memory.application_area_crc = encoded_data.application_area_crc;
121 nfc_data.user_memory.application_area = encoded_data.application_area; 127 nfc_data.user_memory.application_area = encoded_data.application_area;
122 nfc_data.user_memory.hmac_tag = encoded_data.hmac_tag; 128 nfc_data.user_memory.hmac_tag = encoded_data.hmac_tag;
123 nfc_data.user_memory.model_info = encoded_data.model_info; 129 nfc_data.user_memory.model_info = encoded_data.model_info;
@@ -131,7 +137,7 @@ EncryptedNTAG215File EncodedDataToNfcData(const NTAG215File& encoded_data) {
131} 137}
132 138
133u32 GetTagPassword(const TagUuid& uuid) { 139u32 GetTagPassword(const TagUuid& uuid) {
134 // Verifiy that the generated password is correct 140 // Verify that the generated password is correct
135 u32 password = 0xAA ^ (uuid.uid[1] ^ uuid.uid[3]); 141 u32 password = 0xAA ^ (uuid.uid[1] ^ uuid.uid[3]);
136 password &= (0x55 ^ (uuid.uid[2] ^ uuid.uid[4])) << 8; 142 password &= (0x55 ^ (uuid.uid[2] ^ uuid.uid[4])) << 8;
137 password &= (0xAA ^ (uuid.uid[3] ^ uuid.uid[5])) << 16; 143 password &= (0xAA ^ (uuid.uid[3] ^ uuid.uid[5])) << 16;
diff --git a/src/core/hle/service/nfp/amiibo_crypto.h b/src/core/hle/service/nfp/amiibo_crypto.h
index 1fa61174e..c9fd67a39 100644
--- a/src/core/hle/service/nfp/amiibo_crypto.h
+++ b/src/core/hle/service/nfp/amiibo_crypto.h
@@ -94,7 +94,7 @@ bool LoadKeys(InternalKey& locked_secret, InternalKey& unfixed_info);
94/// Returns true if key_retail.bin exist 94/// Returns true if key_retail.bin exist
95bool IsKeyAvailable(); 95bool IsKeyAvailable();
96 96
97/// Decodes encripted amiibo data returns true if output is valid 97/// Decodes encrypted amiibo data returns true if output is valid
98bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& tag_data); 98bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& tag_data);
99 99
100/// Encodes plain amiibo data returns true if output is valid 100/// Encodes plain amiibo data returns true if output is valid
diff --git a/src/core/hle/service/nfp/nfp_device.cpp b/src/core/hle/service/nfp/nfp_device.cpp
index 1bdc42741..ddff90d6a 100644
--- a/src/core/hle/service/nfp/nfp_device.cpp
+++ b/src/core/hle/service/nfp/nfp_device.cpp
@@ -174,8 +174,8 @@ Result NfpDevice::StopDetection() {
174 174
175 if (device_state == DeviceState::TagFound || device_state == DeviceState::TagMounted) { 175 if (device_state == DeviceState::TagFound || device_state == DeviceState::TagMounted) {
176 CloseAmiibo(); 176 CloseAmiibo();
177 return ResultSuccess;
178 } 177 }
178
179 if (device_state == DeviceState::SearchingForTag || device_state == DeviceState::TagRemoved) { 179 if (device_state == DeviceState::SearchingForTag || device_state == DeviceState::TagRemoved) {
180 device_state = DeviceState::Initialized; 180 device_state = DeviceState::Initialized;
181 return ResultSuccess; 181 return ResultSuccess;
@@ -204,9 +204,7 @@ Result NfpDevice::Flush() {
204 const auto& current_date = GetAmiiboDate(current_posix_time); 204 const auto& current_date = GetAmiiboDate(current_posix_time);
205 if (settings.write_date.raw_date != current_date.raw_date) { 205 if (settings.write_date.raw_date != current_date.raw_date) {
206 settings.write_date = current_date; 206 settings.write_date = current_date;
207 settings.crc_counter++; 207 UpdateSettingsCrc();
208 // TODO: Find how to calculate the crc check
209 // settings.crc = CalculateCRC(settings);
210 } 208 }
211 209
212 tag_data.write_counter++; 210 tag_data.write_counter++;
@@ -318,7 +316,7 @@ Result NfpDevice::GetCommonInfo(CommonInfo& common_info) const {
318 common_info = { 316 common_info = {
319 .last_write_date = settings.write_date.GetWriteDate(), 317 .last_write_date = settings.write_date.GetWriteDate(),
320 .write_counter = tag_data.write_counter, 318 .write_counter = tag_data.write_counter,
321 .version = 0, 319 .version = tag_data.amiibo_version,
322 .application_area_size = sizeof(ApplicationArea), 320 .application_area_size = sizeof(ApplicationArea),
323 }; 321 };
324 return ResultSuccess; 322 return ResultSuccess;
@@ -370,13 +368,95 @@ Result NfpDevice::GetRegisterInfo(RegisterInfo& register_info) const {
370 .mii_char_info = manager.ConvertV3ToCharInfo(tag_data.owner_mii), 368 .mii_char_info = manager.ConvertV3ToCharInfo(tag_data.owner_mii),
371 .creation_date = settings.init_date.GetWriteDate(), 369 .creation_date = settings.init_date.GetWriteDate(),
372 .amiibo_name = GetAmiiboName(settings), 370 .amiibo_name = GetAmiiboName(settings),
373 .font_region = {}, 371 .font_region = settings.settings.font_region,
372 };
373
374 return ResultSuccess;
375}
376
377Result NfpDevice::GetAdminInfo(AdminInfo& admin_info) const {
378 if (device_state != DeviceState::TagMounted) {
379 LOG_ERROR(Service_NFC, "Wrong device state {}", device_state);
380 if (device_state == DeviceState::TagRemoved) {
381 return TagRemoved;
382 }
383 return WrongDeviceState;
384 }
385
386 if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) {
387 LOG_ERROR(Service_NFC, "Amiibo is read only", device_state);
388 return WrongDeviceState;
389 }
390
391 u8 flags = static_cast<u8>(tag_data.settings.settings.raw >> 0x4);
392 if (tag_data.settings.settings.amiibo_initialized == 0) {
393 flags = flags & 0xfe;
394 }
395
396 u64 application_id = 0;
397 u32 application_area_id = 0;
398 AppAreaVersion app_area_version = AppAreaVersion::NotSet;
399 if (tag_data.settings.settings.appdata_initialized != 0) {
400 application_id = tag_data.application_id;
401 app_area_version =
402 static_cast<AppAreaVersion>(application_id >> application_id_version_offset & 0xf);
403
404 // Restore application id to original value
405 if (application_id >> 0x38 != 0) {
406 const u8 application_byte = tag_data.application_id_byte & 0xf;
407 application_id = RemoveVersionByte(application_id) |
408 (static_cast<u64>(application_byte) << application_id_version_offset);
409 }
410
411 application_area_id = tag_data.application_area_id;
412 }
413
414 // TODO: Validate this data
415 admin_info = {
416 .application_id = application_id,
417 .application_area_id = application_area_id,
418 .crc_change_counter = tag_data.settings.crc_counter,
419 .flags = flags,
420 .tag_type = PackedTagType::Type2,
421 .app_area_version = app_area_version,
374 }; 422 };
375 423
376 return ResultSuccess; 424 return ResultSuccess;
377} 425}
378 426
379Result NfpDevice::SetNicknameAndOwner(const AmiiboName& amiibo_name) { 427Result NfpDevice::DeleteRegisterInfo() {
428 if (device_state != DeviceState::TagMounted) {
429 LOG_ERROR(Service_NFC, "Wrong device state {}", device_state);
430 if (device_state == DeviceState::TagRemoved) {
431 return TagRemoved;
432 }
433 return WrongDeviceState;
434 }
435
436 if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) {
437 LOG_ERROR(Service_NFC, "Amiibo is read only", device_state);
438 return WrongDeviceState;
439 }
440
441 if (tag_data.settings.settings.amiibo_initialized == 0) {
442 return RegistrationIsNotInitialized;
443 }
444
445 Common::TinyMT rng{};
446 rng.GenerateRandomBytes(&tag_data.owner_mii, sizeof(tag_data.owner_mii));
447 rng.GenerateRandomBytes(&tag_data.settings.amiibo_name, sizeof(tag_data.settings.amiibo_name));
448 rng.GenerateRandomBytes(&tag_data.unknown, sizeof(u8));
449 rng.GenerateRandomBytes(&tag_data.unknown2[0], sizeof(u32));
450 rng.GenerateRandomBytes(&tag_data.unknown2[1], sizeof(u32));
451 rng.GenerateRandomBytes(&tag_data.application_area_crc, sizeof(u32));
452 rng.GenerateRandomBytes(&tag_data.settings.init_date, sizeof(u32));
453 tag_data.settings.settings.font_region.Assign(0);
454 tag_data.settings.settings.amiibo_initialized.Assign(0);
455
456 return Flush();
457}
458
459Result NfpDevice::SetRegisterInfoPrivate(const AmiiboName& amiibo_name) {
380 if (device_state != DeviceState::TagMounted) { 460 if (device_state != DeviceState::TagMounted) {
381 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); 461 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
382 if (device_state == DeviceState::TagRemoved) { 462 if (device_state == DeviceState::TagRemoved) {
@@ -393,16 +473,23 @@ Result NfpDevice::SetNicknameAndOwner(const AmiiboName& amiibo_name) {
393 Service::Mii::MiiManager manager; 473 Service::Mii::MiiManager manager;
394 auto& settings = tag_data.settings; 474 auto& settings = tag_data.settings;
395 475
396 settings.init_date = GetAmiiboDate(current_posix_time); 476 if (tag_data.settings.settings.amiibo_initialized == 0) {
397 settings.write_date = GetAmiiboDate(current_posix_time); 477 settings.init_date = GetAmiiboDate(current_posix_time);
398 settings.crc_counter++; 478 settings.write_date.raw_date = 0;
399 // TODO: Find how to calculate the crc check 479 }
400 // settings.crc = CalculateCRC(settings);
401 480
402 SetAmiiboName(settings, amiibo_name); 481 SetAmiiboName(settings, amiibo_name);
403 tag_data.owner_mii = manager.ConvertCharInfoToV3(manager.BuildDefault(0)); 482 tag_data.owner_mii = manager.ConvertCharInfoToV3(manager.BuildDefault(0));
483 tag_data.unknown = 0;
484 tag_data.unknown2[6] = 0;
485 settings.country_code_id = 0;
486 settings.settings.font_region.Assign(0);
404 settings.settings.amiibo_initialized.Assign(1); 487 settings.settings.amiibo_initialized.Assign(1);
405 488
489 // TODO: this is a mix of tag.file input
490 std::array<u8, 0x7e> unknown_input{};
491 tag_data.application_area_crc = CalculateCrc(unknown_input);
492
406 return Flush(); 493 return Flush();
407} 494}
408 495
@@ -425,23 +512,17 @@ Result NfpDevice::RestoreAmiibo() {
425 return ResultSuccess; 512 return ResultSuccess;
426} 513}
427 514
428Result NfpDevice::DeleteAllData() { 515Result NfpDevice::Format() {
429 const auto result = DeleteApplicationArea(); 516 auto result1 = DeleteApplicationArea();
430 if (result.IsError()) { 517 auto result2 = DeleteRegisterInfo();
431 return result;
432 }
433 518
434 if (device_state != DeviceState::TagMounted) { 519 if (result1.IsError()) {
435 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); 520 return result1;
436 if (device_state == DeviceState::TagRemoved) {
437 return TagRemoved;
438 }
439 return WrongDeviceState;
440 } 521 }
441 522
442 Common::TinyMT rng{}; 523 if (result2.IsError()) {
443 rng.GenerateRandomBytes(&tag_data.owner_mii, sizeof(tag_data.owner_mii)); 524 return result2;
444 tag_data.settings.settings.amiibo_initialized.Assign(0); 525 }
445 526
446 return Flush(); 527 return Flush();
447} 528}
@@ -569,7 +650,10 @@ Result NfpDevice::SetApplicationArea(std::span<const u8> data) {
569 rng.GenerateRandomBytes(tag_data.application_area.data() + data.size(), 650 rng.GenerateRandomBytes(tag_data.application_area.data() + data.size(),
570 sizeof(ApplicationArea) - data.size()); 651 sizeof(ApplicationArea) - data.size());
571 652
572 tag_data.applicaton_write_counter++; 653 if (tag_data.application_write_counter != counter_limit) {
654 tag_data.application_write_counter++;
655 }
656
573 is_data_moddified = true; 657 is_data_moddified = true;
574 658
575 return ResultSuccess; 659 return ResultSuccess;
@@ -617,14 +701,25 @@ Result NfpDevice::RecreateApplicationArea(u32 access_id, std::span<const u8> dat
617 rng.GenerateRandomBytes(tag_data.application_area.data() + data.size(), 701 rng.GenerateRandomBytes(tag_data.application_area.data() + data.size(),
618 sizeof(ApplicationArea) - data.size()); 702 sizeof(ApplicationArea) - data.size());
619 703
620 // TODO: Investigate why the title id needs to be moddified 704 if (tag_data.application_write_counter != counter_limit) {
621 tag_data.title_id = system.GetApplicationProcessProgramID(); 705 tag_data.application_write_counter++;
622 tag_data.title_id = tag_data.title_id | 0x30000000ULL; 706 }
707
708 const u64 application_id = system.GetApplicationProcessProgramID();
709
710 tag_data.application_id_byte =
711 static_cast<u8>(application_id >> application_id_version_offset & 0xf);
712 tag_data.application_id =
713 RemoveVersionByte(application_id) |
714 (static_cast<u64>(AppAreaVersion::NintendoSwitch) << application_id_version_offset);
623 tag_data.settings.settings.appdata_initialized.Assign(1); 715 tag_data.settings.settings.appdata_initialized.Assign(1);
624 tag_data.application_area_id = access_id; 716 tag_data.application_area_id = access_id;
625 tag_data.applicaton_write_counter++;
626 tag_data.unknown = {}; 717 tag_data.unknown = {};
627 718
719 // TODO: this is a mix of tag_data input
720 std::array<u8, 0x7e> unknown_input{};
721 tag_data.application_area_crc = CalculateCrc(unknown_input);
722
628 return Flush(); 723 return Flush();
629} 724}
630 725
@@ -642,12 +737,20 @@ Result NfpDevice::DeleteApplicationArea() {
642 return WrongDeviceState; 737 return WrongDeviceState;
643 } 738 }
644 739
740 if (tag_data.settings.settings.appdata_initialized == 0) {
741 return ApplicationAreaIsNotInitialized;
742 }
743
744 if (tag_data.application_write_counter != counter_limit) {
745 tag_data.application_write_counter++;
746 }
747
645 Common::TinyMT rng{}; 748 Common::TinyMT rng{};
646 rng.GenerateRandomBytes(tag_data.application_area.data(), sizeof(ApplicationArea)); 749 rng.GenerateRandomBytes(tag_data.application_area.data(), sizeof(ApplicationArea));
647 rng.GenerateRandomBytes(&tag_data.title_id, sizeof(u64)); 750 rng.GenerateRandomBytes(&tag_data.application_id, sizeof(u64));
648 rng.GenerateRandomBytes(&tag_data.application_area_id, sizeof(u32)); 751 rng.GenerateRandomBytes(&tag_data.application_area_id, sizeof(u32));
752 rng.GenerateRandomBytes(&tag_data.application_id_byte, sizeof(u8));
649 tag_data.settings.settings.appdata_initialized.Assign(0); 753 tag_data.settings.settings.appdata_initialized.Assign(0);
650 tag_data.applicaton_write_counter++;
651 tag_data.unknown = {}; 754 tag_data.unknown = {};
652 755
653 return Flush(); 756 return Flush();
@@ -719,4 +822,45 @@ AmiiboDate NfpDevice::GetAmiiboDate(s64 posix_time) const {
719 return amiibo_date; 822 return amiibo_date;
720} 823}
721 824
825u64 NfpDevice::RemoveVersionByte(u64 application_id) const {
826 return application_id & ~(0xfULL << application_id_version_offset);
827}
828
829void NfpDevice::UpdateSettingsCrc() {
830 auto& settings = tag_data.settings;
831
832 if (settings.crc_counter != counter_limit) {
833 settings.crc_counter++;
834 }
835
836 // TODO: this reads data from a global, find what it is
837 std::array<u8, 8> unknown_input{};
838 settings.crc = CalculateCrc(unknown_input);
839}
840
841u32 NfpDevice::CalculateCrc(std::span<const u8> data) {
842 constexpr u32 magic = 0xedb88320;
843 u32 crc = 0xffffffff;
844
845 if (data.size() == 0) {
846 return 0;
847 }
848
849 for (u8 input : data) {
850 u32 temp = (crc ^ input) >> 1;
851 if (((crc ^ input) & 1) != 0) {
852 temp = temp ^ magic;
853 }
854
855 for (std::size_t step = 0; step < 7; ++step) {
856 crc = temp >> 1;
857 if ((temp & 1) != 0) {
858 crc = temp >> 1 ^ magic;
859 }
860 }
861 }
862
863 return ~crc;
864}
865
722} // namespace Service::NFP 866} // namespace Service::NFP
diff --git a/src/core/hle/service/nfp/nfp_device.h b/src/core/hle/service/nfp/nfp_device.h
index b6a46f2ac..06386401d 100644
--- a/src/core/hle/service/nfp/nfp_device.h
+++ b/src/core/hle/service/nfp/nfp_device.h
@@ -47,10 +47,12 @@ public:
47 Result GetCommonInfo(CommonInfo& common_info) const; 47 Result GetCommonInfo(CommonInfo& common_info) const;
48 Result GetModelInfo(ModelInfo& model_info) const; 48 Result GetModelInfo(ModelInfo& model_info) const;
49 Result GetRegisterInfo(RegisterInfo& register_info) const; 49 Result GetRegisterInfo(RegisterInfo& register_info) const;
50 Result GetAdminInfo(AdminInfo& admin_info) const;
50 51
51 Result SetNicknameAndOwner(const AmiiboName& amiibo_name); 52 Result DeleteRegisterInfo();
53 Result SetRegisterInfoPrivate(const AmiiboName& amiibo_name);
52 Result RestoreAmiibo(); 54 Result RestoreAmiibo();
53 Result DeleteAllData(); 55 Result Format();
54 56
55 Result OpenApplicationArea(u32 access_id); 57 Result OpenApplicationArea(u32 access_id);
56 Result GetApplicationAreaId(u32& application_area_id) const; 58 Result GetApplicationAreaId(u32& application_area_id) const;
@@ -76,6 +78,9 @@ private:
76 AmiiboName GetAmiiboName(const AmiiboSettings& settings) const; 78 AmiiboName GetAmiiboName(const AmiiboSettings& settings) const;
77 void SetAmiiboName(AmiiboSettings& settings, const AmiiboName& amiibo_name); 79 void SetAmiiboName(AmiiboSettings& settings, const AmiiboName& amiibo_name);
78 AmiiboDate GetAmiiboDate(s64 posix_time) const; 80 AmiiboDate GetAmiiboDate(s64 posix_time) const;
81 u64 RemoveVersionByte(u64 application_id) const;
82 void UpdateSettingsCrc();
83 u32 CalculateCrc(std::span<const u8>);
79 84
80 bool is_controller_set{}; 85 bool is_controller_set{};
81 int callback_key; 86 int callback_key;
diff --git a/src/core/hle/service/nfp/nfp_types.h b/src/core/hle/service/nfp/nfp_types.h
index fc228c2b2..142343d6e 100644
--- a/src/core/hle/service/nfp/nfp_types.h
+++ b/src/core/hle/service/nfp/nfp_types.h
@@ -10,6 +10,8 @@
10 10
11namespace Service::NFP { 11namespace Service::NFP {
12static constexpr std::size_t amiibo_name_length = 0xA; 12static constexpr std::size_t amiibo_name_length = 0xA;
13static constexpr std::size_t application_id_version_offset = 0x1c;
14static constexpr std::size_t counter_limit = 0xffff;
13 15
14enum class ServiceType : u32 { 16enum class ServiceType : u32 {
15 User, 17 User,
@@ -99,6 +101,14 @@ enum class TagProtocol : u32 {
99 All = 0xFFFFFFFFU, 101 All = 0xFFFFFFFFU,
100}; 102};
101 103
104enum class AppAreaVersion : u8 {
105 Nintendo3DS = 0,
106 NintendoWiiU = 1,
107 Nintendo3DSv2 = 2,
108 NintendoSwitch = 3,
109 NotSet = 0xFF,
110};
111
102enum class CabinetMode : u8 { 112enum class CabinetMode : u8 {
103 StartNicknameAndOwnerSettings, 113 StartNicknameAndOwnerSettings,
104 StartGameDataEraser, 114 StartGameDataEraser,
@@ -197,6 +207,7 @@ struct Settings {
197 union { 207 union {
198 u8 raw{}; 208 u8 raw{};
199 209
210 BitField<0, 4, u8> font_region;
200 BitField<4, 1, u8> amiibo_initialized; 211 BitField<4, 1, u8> amiibo_initialized;
201 BitField<5, 1, u8> appdata_initialized; 212 BitField<5, 1, u8> appdata_initialized;
202 }; 213 };
@@ -236,18 +247,20 @@ static_assert(sizeof(NTAG215Password) == 0x8, "NTAG215Password is an invalid siz
236struct EncryptedAmiiboFile { 247struct EncryptedAmiiboFile {
237 u8 constant_value; // Must be A5 248 u8 constant_value; // Must be A5
238 u16_be write_counter; // Number of times the amiibo has been written? 249 u16_be write_counter; // Number of times the amiibo has been written?
239 INSERT_PADDING_BYTES(0x1); // Unknown 1 250 u8 amiibo_version; // Amiibo file version
240 AmiiboSettings settings; // Encrypted amiibo settings 251 AmiiboSettings settings; // Encrypted amiibo settings
241 HashData hmac_tag; // Hash 252 HashData hmac_tag; // Hash
242 AmiiboModelInfo model_info; // Encrypted amiibo model info 253 AmiiboModelInfo model_info; // Encrypted amiibo model info
243 HashData keygen_salt; // Salt 254 HashData keygen_salt; // Salt
244 HashData hmac_data; // Hash 255 HashData hmac_data; // Hash
245 Service::Mii::Ver3StoreData owner_mii; // Encrypted Mii data 256 Service::Mii::Ver3StoreData owner_mii; // Encrypted Mii data
246 u64_be title_id; // Encrypted Game id 257 u64_be application_id; // Encrypted Game id
247 u16_be applicaton_write_counter; // Encrypted Counter 258 u16_be application_write_counter; // Encrypted Counter
248 u32_be application_area_id; // Encrypted Game id 259 u32_be application_area_id; // Encrypted Game id
249 std::array<u8, 0x2> unknown; 260 u8 application_id_byte;
250 std::array<u32, 0x8> unknown2; 261 u8 unknown;
262 std::array<u32, 0x7> unknown2;
263 u32_be application_area_crc;
251 ApplicationArea application_area; // Encrypted Game data 264 ApplicationArea application_area; // Encrypted Game data
252}; 265};
253static_assert(sizeof(EncryptedAmiiboFile) == 0x1F8, "AmiiboFile is an invalid size"); 266static_assert(sizeof(EncryptedAmiiboFile) == 0x1F8, "AmiiboFile is an invalid size");
@@ -259,14 +272,16 @@ struct NTAG215File {
259 HashData hmac_data; // Hash 272 HashData hmac_data; // Hash
260 u8 constant_value; // Must be A5 273 u8 constant_value; // Must be A5
261 u16_be write_counter; // Number of times the amiibo has been written? 274 u16_be write_counter; // Number of times the amiibo has been written?
262 INSERT_PADDING_BYTES(0x1); // Unknown 1 275 u8 amiibo_version; // Amiibo file version
263 AmiiboSettings settings; 276 AmiiboSettings settings;
264 Service::Mii::Ver3StoreData owner_mii; // Encrypted Mii data 277 Service::Mii::Ver3StoreData owner_mii; // Mii data
265 u64_be title_id; 278 u64_be application_id; // Game id
266 u16_be applicaton_write_counter; // Encrypted Counter 279 u16_be application_write_counter; // Counter
267 u32_be application_area_id; 280 u32_be application_area_id;
268 std::array<u8, 0x2> unknown; 281 u8 application_id_byte;
269 std::array<u32, 0x8> unknown2; 282 u8 unknown;
283 std::array<u32, 0x7> unknown2;
284 u32_be application_area_crc;
270 ApplicationArea application_area; // Encrypted Game data 285 ApplicationArea application_area; // Encrypted Game data
271 HashData hmac_tag; // Hash 286 HashData hmac_tag; // Hash
272 UniqueSerialNumber uid; // Unique serial number 287 UniqueSerialNumber uid; // Unique serial number
@@ -336,6 +351,18 @@ struct RegisterInfo {
336}; 351};
337static_assert(sizeof(RegisterInfo) == 0x100, "RegisterInfo is an invalid size"); 352static_assert(sizeof(RegisterInfo) == 0x100, "RegisterInfo is an invalid size");
338 353
354struct AdminInfo {
355 u64 application_id;
356 u32 application_area_id;
357 u16 crc_change_counter;
358 u8 flags;
359 PackedTagType tag_type;
360 AppAreaVersion app_area_version;
361 INSERT_PADDING_BYTES(0x7);
362 INSERT_PADDING_BYTES(0x28);
363};
364static_assert(sizeof(AdminInfo) == 0x40, "AdminInfo is an invalid size");
365
339struct SectorKey { 366struct SectorKey {
340 MifareCmd command; 367 MifareCmd command;
341 u8 unknown; // Usually 1 368 u8 unknown; // Usually 1
diff --git a/src/core/hle/service/ns/iplatform_service_manager.cpp b/src/core/hle/service/ns/iplatform_service_manager.cpp
index cd2705881..6c2f5e70b 100644
--- a/src/core/hle/service/ns/iplatform_service_manager.cpp
+++ b/src/core/hle/service/ns/iplatform_service_manager.cpp
@@ -119,7 +119,7 @@ struct IPlatformServiceManager::Impl {
119 break; 119 break;
120 } 120 }
121 121
122 // Derive key withing inverse xor 122 // Derive key within inverse xor
123 const u32 KEY = GetU32Swapped(input.data() + cur_offset) ^ EXPECTED_MAGIC; 123 const u32 KEY = GetU32Swapped(input.data() + cur_offset) ^ EXPECTED_MAGIC;
124 const u32 SIZE = GetU32Swapped(input.data() + cur_offset + 4) ^ KEY; 124 const u32 SIZE = GetU32Swapped(input.data() + cur_offset + 4) ^ KEY;
125 shared_font_regions.push_back(FontRegion{cur_offset + 8, SIZE}); 125 shared_font_regions.push_back(FontRegion{cur_offset + 8, SIZE});
diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h
index c562e04d2..ab1f30f9e 100644
--- a/src/core/hle/service/nvdrv/devices/nvdevice.h
+++ b/src/core/hle/service/nvdrv/devices/nvdevice.h
@@ -59,7 +59,7 @@ public:
59 std::vector<u8>& output, std::vector<u8>& inline_output) = 0; 59 std::vector<u8>& output, std::vector<u8>& inline_output) = 0;
60 60
61 /** 61 /**
62 * Called once a device is openned 62 * Called once a device is opened
63 * @param fd The device fd 63 * @param fd The device fd
64 */ 64 */
65 virtual void OnOpen(DeviceFD fd) = 0; 65 virtual void OnOpen(DeviceFD fd) = 0;
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index 277afe0b4..07417f045 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -264,7 +264,7 @@ NvResult nvmap::IocFree(std::span<const u8> input, std::vector<u8>& output) {
264 params.flags.raw = 0; 264 params.flags.raw = 0;
265 params.flags.map_uncached.Assign(freeInfo->was_uncached); 265 params.flags.map_uncached.Assign(freeInfo->was_uncached);
266 } else { 266 } else {
267 // This is possible when there's internel dups or other duplicates. 267 // This is possible when there's internal dups or other duplicates.
268 } 268 }
269 269
270 std::memcpy(output.data(), &params, sizeof(params)); 270 std::memcpy(output.data(), &params, sizeof(params));
diff --git a/src/core/hle/service/pm/pm.cpp b/src/core/hle/service/pm/pm.cpp
index ea249c26f..f9cf2dda3 100644
--- a/src/core/hle/service/pm/pm.cpp
+++ b/src/core/hle/service/pm/pm.cpp
@@ -37,12 +37,12 @@ std::optional<Kernel::KProcess*> SearchProcessList(
37void GetApplicationPidGeneric(HLERequestContext& ctx, 37void GetApplicationPidGeneric(HLERequestContext& ctx,
38 const std::vector<Kernel::KProcess*>& process_list) { 38 const std::vector<Kernel::KProcess*>& process_list) {
39 const auto process = SearchProcessList(process_list, [](const auto& proc) { 39 const auto process = SearchProcessList(process_list, [](const auto& proc) {
40 return proc->GetProcessID() == Kernel::KProcess::ProcessIDMin; 40 return proc->GetProcessId() == Kernel::KProcess::ProcessIDMin;
41 }); 41 });
42 42
43 IPC::ResponseBuilder rb{ctx, 4}; 43 IPC::ResponseBuilder rb{ctx, 4};
44 rb.Push(ResultSuccess); 44 rb.Push(ResultSuccess);
45 rb.Push(process.has_value() ? (*process)->GetProcessID() : NO_PROCESS_FOUND_PID); 45 rb.Push(process.has_value() ? (*process)->GetProcessId() : NO_PROCESS_FOUND_PID);
46} 46}
47 47
48} // Anonymous namespace 48} // Anonymous namespace
@@ -108,7 +108,7 @@ private:
108 108
109 const auto process = 109 const auto process =
110 SearchProcessList(kernel.GetProcessList(), [program_id](const auto& proc) { 110 SearchProcessList(kernel.GetProcessList(), [program_id](const auto& proc) {
111 return proc->GetProgramID() == program_id; 111 return proc->GetProgramId() == program_id;
112 }); 112 });
113 113
114 if (!process.has_value()) { 114 if (!process.has_value()) {
@@ -119,7 +119,7 @@ private:
119 119
120 IPC::ResponseBuilder rb{ctx, 4}; 120 IPC::ResponseBuilder rb{ctx, 4};
121 rb.Push(ResultSuccess); 121 rb.Push(ResultSuccess);
122 rb.Push((*process)->GetProcessID()); 122 rb.Push((*process)->GetProcessId());
123 } 123 }
124 124
125 void GetApplicationProcessId(HLERequestContext& ctx) { 125 void GetApplicationProcessId(HLERequestContext& ctx) {
@@ -136,7 +136,7 @@ private:
136 LOG_WARNING(Service_PM, "(Partial Implementation) called, pid={:016X}", pid); 136 LOG_WARNING(Service_PM, "(Partial Implementation) called, pid={:016X}", pid);
137 137
138 const auto process = SearchProcessList(kernel.GetProcessList(), [pid](const auto& proc) { 138 const auto process = SearchProcessList(kernel.GetProcessList(), [pid](const auto& proc) {
139 return proc->GetProcessID() == pid; 139 return proc->GetProcessId() == pid;
140 }); 140 });
141 141
142 if (!process.has_value()) { 142 if (!process.has_value()) {
@@ -159,7 +159,7 @@ private:
159 159
160 OverrideStatus override_status{}; 160 OverrideStatus override_status{};
161 ProgramLocation program_location{ 161 ProgramLocation program_location{
162 .program_id = (*process)->GetProgramID(), 162 .program_id = (*process)->GetProgramId(),
163 .storage_id = 0, 163 .storage_id = 0,
164 }; 164 };
165 165
@@ -194,7 +194,7 @@ private:
194 LOG_DEBUG(Service_PM, "called, process_id={:016X}", process_id); 194 LOG_DEBUG(Service_PM, "called, process_id={:016X}", process_id);
195 195
196 const auto process = SearchProcessList(process_list, [process_id](const auto& proc) { 196 const auto process = SearchProcessList(process_list, [process_id](const auto& proc) {
197 return proc->GetProcessID() == process_id; 197 return proc->GetProcessId() == process_id;
198 }); 198 });
199 199
200 if (!process.has_value()) { 200 if (!process.has_value()) {
@@ -205,7 +205,7 @@ private:
205 205
206 IPC::ResponseBuilder rb{ctx, 4}; 206 IPC::ResponseBuilder rb{ctx, 4};
207 rb.Push(ResultSuccess); 207 rb.Push(ResultSuccess);
208 rb.Push((*process)->GetProgramID()); 208 rb.Push((*process)->GetProgramId());
209 } 209 }
210 210
211 void AtmosphereGetProcessId(HLERequestContext& ctx) { 211 void AtmosphereGetProcessId(HLERequestContext& ctx) {
@@ -215,7 +215,7 @@ private:
215 LOG_DEBUG(Service_PM, "called, program_id={:016X}", program_id); 215 LOG_DEBUG(Service_PM, "called, program_id={:016X}", program_id);
216 216
217 const auto process = SearchProcessList(process_list, [program_id](const auto& proc) { 217 const auto process = SearchProcessList(process_list, [program_id](const auto& proc) {
218 return proc->GetProgramID() == program_id; 218 return proc->GetProgramId() == program_id;
219 }); 219 });
220 220
221 if (!process.has_value()) { 221 if (!process.has_value()) {
@@ -226,7 +226,7 @@ private:
226 226
227 IPC::ResponseBuilder rb{ctx, 4}; 227 IPC::ResponseBuilder rb{ctx, 4};
228 rb.Push(ResultSuccess); 228 rb.Push(ResultSuccess);
229 rb.Push((*process)->GetProcessID()); 229 rb.Push((*process)->GetProcessId());
230 } 230 }
231 231
232 const std::vector<Kernel::KProcess*>& process_list; 232 const std::vector<Kernel::KProcess*>& process_list;
diff --git a/src/core/hle/service/server_manager.cpp b/src/core/hle/service/server_manager.cpp
index bd04cd023..6b4a1291e 100644
--- a/src/core/hle/service/server_manager.cpp
+++ b/src/core/hle/service/server_manager.cpp
@@ -124,7 +124,7 @@ Result ServerManager::ManageNamedPort(const std::string& service_name,
124 124
125 // Create a new port. 125 // Create a new port.
126 auto* port = Kernel::KPort::Create(m_system.Kernel()); 126 auto* port = Kernel::KPort::Create(m_system.Kernel());
127 port->Initialize(max_sessions, false, service_name); 127 port->Initialize(max_sessions, false, 0);
128 128
129 // Register the port. 129 // Register the port.
130 Kernel::KPort::Register(m_system.Kernel(), port); 130 Kernel::KPort::Register(m_system.Kernel(), port);
diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp
index b4046d3ce..c45be5726 100644
--- a/src/core/hle/service/sm/sm.cpp
+++ b/src/core/hle/service/sm/sm.cpp
@@ -62,7 +62,7 @@ Result ServiceManager::RegisterService(std::string name, u32 max_sessions,
62 } 62 }
63 63
64 auto* port = Kernel::KPort::Create(kernel); 64 auto* port = Kernel::KPort::Create(kernel);
65 port->Initialize(ServerSessionCountMax, false, name); 65 port->Initialize(ServerSessionCountMax, false, 0);
66 66
67 service_ports.emplace(name, port); 67 service_ports.emplace(name, port);
68 registered_services.emplace(name, handler); 68 registered_services.emplace(name, handler);
@@ -211,7 +211,7 @@ void SM::RegisterService(HLERequestContext& ctx) {
211 } 211 }
212 212
213 auto* port = Kernel::KPort::Create(kernel); 213 auto* port = Kernel::KPort::Create(kernel);
214 port->Initialize(ServerSessionCountMax, is_light, name); 214 port->Initialize(ServerSessionCountMax, is_light, 0);
215 SCOPE_EXIT({ port->GetClientPort().Close(); }); 215 SCOPE_EXIT({ port->GetClientPort().Close(); });
216 216
217 IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; 217 IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles};
diff --git a/src/core/hle/service/sm/sm_controller.cpp b/src/core/hle/service/sm/sm_controller.cpp
index 0111c8d7f..419c1df2b 100644
--- a/src/core/hle/service/sm/sm_controller.cpp
+++ b/src/core/hle/service/sm/sm_controller.cpp
@@ -44,7 +44,7 @@ void Controller::CloneCurrentObject(HLERequestContext& ctx) {
44 ASSERT(session != nullptr); 44 ASSERT(session != nullptr);
45 45
46 // Initialize the session. 46 // Initialize the session.
47 session->Initialize(nullptr, ""); 47 session->Initialize(nullptr, 0);
48 48
49 // Commit the session reservation. 49 // Commit the session reservation.
50 session_reservation.Commit(); 50 session_reservation.Commit();
diff --git a/src/core/internal_network/network.cpp b/src/core/internal_network/network.cpp
index f85c73ca6..bf97b0ebc 100644
--- a/src/core/internal_network/network.cpp
+++ b/src/core/internal_network/network.cpp
@@ -572,7 +572,7 @@ std::pair<s32, Errno> Socket::SendTo(u32 flags, std::span<const u8> message,
572 ASSERT(flags == 0); 572 ASSERT(flags == 0);
573 573
574 const sockaddr* to = nullptr; 574 const sockaddr* to = nullptr;
575 const int tolen = addr ? sizeof(sockaddr) : 0; 575 const int to_len = addr ? sizeof(sockaddr) : 0;
576 sockaddr host_addr_in; 576 sockaddr host_addr_in;
577 577
578 if (addr) { 578 if (addr) {
@@ -581,7 +581,7 @@ std::pair<s32, Errno> Socket::SendTo(u32 flags, std::span<const u8> message,
581 } 581 }
582 582
583 const auto result = sendto(fd, reinterpret_cast<const char*>(message.data()), 583 const auto result = sendto(fd, reinterpret_cast<const char*>(message.data()),
584 static_cast<int>(message.size()), 0, to, tolen); 584 static_cast<int>(message.size()), 0, to, to_len);
585 if (result != SOCKET_ERROR) { 585 if (result != SOCKET_ERROR) {
586 return {static_cast<s32>(result), Errno::SUCCESS}; 586 return {static_cast<s32>(result), Errno::SUCCESS};
587 } 587 }
diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp
index c2d96bbec..de729955f 100644
--- a/src/core/memory/cheat_engine.cpp
+++ b/src/core/memory/cheat_engine.cpp
@@ -196,7 +196,7 @@ void CheatEngine::Initialize() {
196 }); 196 });
197 core_timing.ScheduleLoopingEvent(CHEAT_ENGINE_NS, CHEAT_ENGINE_NS, event); 197 core_timing.ScheduleLoopingEvent(CHEAT_ENGINE_NS, CHEAT_ENGINE_NS, event);
198 198
199 metadata.process_id = system.ApplicationProcess()->GetProcessID(); 199 metadata.process_id = system.ApplicationProcess()->GetProcessId();
200 metadata.title_id = system.GetApplicationProcessProgramID(); 200 metadata.title_id = system.GetApplicationProcessProgramID();
201 201
202 const auto& page_table = system.ApplicationProcess()->PageTable(); 202 const auto& page_table = system.ApplicationProcess()->PageTable();
diff --git a/src/core/perf_stats.cpp b/src/core/perf_stats.cpp
index f09c176f8..1231c0dc8 100644
--- a/src/core/perf_stats.cpp
+++ b/src/core/perf_stats.cpp
@@ -126,8 +126,8 @@ double PerfStats::GetLastFrameTimeScale() const {
126} 126}
127 127
128void SpeedLimiter::DoSpeedLimiting(microseconds current_system_time_us) { 128void SpeedLimiter::DoSpeedLimiting(microseconds current_system_time_us) {
129 if (!Settings::values.use_speed_limit.GetValue() || 129 if (Settings::values.use_multi_core.GetValue() ||
130 Settings::values.use_multi_core.GetValue()) { 130 !Settings::values.use_speed_limit.GetValue()) {
131 return; 131 return;
132 } 132 }
133 133
diff --git a/src/input_common/drivers/gc_adapter.cpp b/src/input_common/drivers/gc_adapter.cpp
index d09ff178b..3ad34884d 100644
--- a/src/input_common/drivers/gc_adapter.cpp
+++ b/src/input_common/drivers/gc_adapter.cpp
@@ -344,7 +344,7 @@ bool GCAdapter::IsVibrationEnabled([[maybe_unused]] const PadIdentifier& identif
344 344
345void GCAdapter::UpdateVibrations() { 345void GCAdapter::UpdateVibrations() {
346 // Use 8 states to keep the switching between on/off fast enough for 346 // Use 8 states to keep the switching between on/off fast enough for
347 // a human to feel different vibration strenght 347 // a human to feel different vibration strength
348 // More states == more rumble strengths == slower update time 348 // More states == more rumble strengths == slower update time
349 constexpr u8 vibration_states = 8; 349 constexpr u8 vibration_states = 8;
350 350
diff --git a/src/input_common/drivers/joycon.h b/src/input_common/drivers/joycon.h
index 473ba1b9e..5b40817e2 100644
--- a/src/input_common/drivers/joycon.h
+++ b/src/input_common/drivers/joycon.h
@@ -62,7 +62,7 @@ private:
62 /// Registers controllers, clears all data and starts the scan thread 62 /// Registers controllers, clears all data and starts the scan thread
63 void Setup(); 63 void Setup();
64 64
65 /// Actively searchs for new devices 65 /// Actively searches for new devices
66 void ScanThread(std::stop_token stop_token); 66 void ScanThread(std::stop_token stop_token);
67 67
68 /// Returns true if device is valid and not registered 68 /// Returns true if device is valid and not registered
diff --git a/src/input_common/drivers/keyboard.cpp b/src/input_common/drivers/keyboard.cpp
index 71e612fbf..2567df9af 100644
--- a/src/input_common/drivers/keyboard.cpp
+++ b/src/input_common/drivers/keyboard.cpp
@@ -24,7 +24,7 @@ constexpr PadIdentifier keyboard_modifier_identifier = {
24}; 24};
25 25
26Keyboard::Keyboard(std::string input_engine_) : InputEngine(std::move(input_engine_)) { 26Keyboard::Keyboard(std::string input_engine_) : InputEngine(std::move(input_engine_)) {
27 // Keyboard is broken into 3 diferent sets: 27 // Keyboard is broken into 3 different sets:
28 // key: Unfiltered intended for controllers. 28 // key: Unfiltered intended for controllers.
29 // keyboard_key: Allows only Settings::NativeKeyboard::Keys intended for keyboard emulation. 29 // keyboard_key: Allows only Settings::NativeKeyboard::Keys intended for keyboard emulation.
30 // keyboard_modifier: Allows only Settings::NativeKeyboard::Modifiers intended for keyboard 30 // keyboard_modifier: Allows only Settings::NativeKeyboard::Modifiers intended for keyboard
diff --git a/src/input_common/drivers/mouse.cpp b/src/input_common/drivers/mouse.cpp
index 94e92c37d..4fb2a6cfa 100644
--- a/src/input_common/drivers/mouse.cpp
+++ b/src/input_common/drivers/mouse.cpp
@@ -142,7 +142,7 @@ void Mouse::Move(int x, int y, int center_x, int center_y) {
142 return; 142 return;
143 } 143 }
144 144
145 // Make slow movements at least 3 units on lenght 145 // Make slow movements at least 3 units on length
146 if (move_distance < 3.0f) { 146 if (move_distance < 3.0f) {
147 // Normalize value 147 // Normalize value
148 mouse_change /= move_distance; 148 mouse_change /= move_distance;
@@ -154,7 +154,7 @@ void Mouse::Move(int x, int y, int center_x, int center_y) {
154 154
155 const auto last_move_distance = last_mouse_change.Length(); 155 const auto last_move_distance = last_mouse_change.Length();
156 156
157 // Make fast movements clamp to 8 units on lenght 157 // Make fast movements clamp to 8 units on length
158 if (last_move_distance > 8.0f) { 158 if (last_move_distance > 8.0f) {
159 // Normalize value 159 // Normalize value
160 last_mouse_change /= last_move_distance; 160 last_mouse_change /= last_move_distance;
diff --git a/src/input_common/drivers/sdl_driver.cpp b/src/input_common/drivers/sdl_driver.cpp
index 5c20b3426..7f9e8dbb9 100644
--- a/src/input_common/drivers/sdl_driver.cpp
+++ b/src/input_common/drivers/sdl_driver.cpp
@@ -652,12 +652,27 @@ bool SDLDriver::IsVibrationEnabled(const PadIdentifier& identifier) {
652} 652}
653 653
654void SDLDriver::SendVibrations() { 654void SDLDriver::SendVibrations() {
655 std::vector<VibrationRequest> filtered_vibrations{};
655 while (!vibration_queue.Empty()) { 656 while (!vibration_queue.Empty()) {
656 VibrationRequest request; 657 VibrationRequest request;
657 vibration_queue.Pop(request); 658 vibration_queue.Pop(request);
658 const auto joystick = GetSDLJoystickByGUID(request.identifier.guid.RawString(), 659 const auto joystick = GetSDLJoystickByGUID(request.identifier.guid.RawString(),
659 static_cast<int>(request.identifier.port)); 660 static_cast<int>(request.identifier.port));
660 joystick->RumblePlay(request.vibration); 661 const auto it = std::find_if(filtered_vibrations.begin(), filtered_vibrations.end(),
662 [request](VibrationRequest vibration) {
663 return vibration.identifier == request.identifier;
664 });
665 if (it == filtered_vibrations.end()) {
666 filtered_vibrations.push_back(std::move(request));
667 continue;
668 }
669 *it = request;
670 }
671
672 for (const auto& vibration : filtered_vibrations) {
673 const auto joystick = GetSDLJoystickByGUID(vibration.identifier.guid.RawString(),
674 static_cast<int>(vibration.identifier.port));
675 joystick->RumblePlay(vibration.vibration);
661 } 676 }
662} 677}
663 678
@@ -748,7 +763,7 @@ ButtonMapping SDLDriver::GetButtonMappingForDevice(const Common::ParamPackage& p
748 763
749 // This list is missing ZL/ZR since those are not considered buttons in SDL GameController. 764 // This list is missing ZL/ZR since those are not considered buttons in SDL GameController.
750 // We will add those afterwards 765 // We will add those afterwards
751 // This list also excludes Screenshot since theres not really a mapping for that 766 // This list also excludes Screenshot since there's not really a mapping for that
752 ButtonBindings switch_to_sdl_button; 767 ButtonBindings switch_to_sdl_button;
753 768
754 if (SDL_GameControllerGetType(controller) == SDL_CONTROLLER_TYPE_NINTENDO_SWITCH_PRO) { 769 if (SDL_GameControllerGetType(controller) == SDL_CONTROLLER_TYPE_NINTENDO_SWITCH_PRO) {
@@ -1007,7 +1022,7 @@ MotionMapping SDLDriver::GetMotionMappingForDevice(const Common::ParamPackage& p
1007 1022
1008Common::Input::ButtonNames SDLDriver::GetUIName(const Common::ParamPackage& params) const { 1023Common::Input::ButtonNames SDLDriver::GetUIName(const Common::ParamPackage& params) const {
1009 if (params.Has("button")) { 1024 if (params.Has("button")) {
1010 // TODO(German77): Find how to substitue the values for real button names 1025 // TODO(German77): Find how to substitute the values for real button names
1011 return Common::Input::ButtonNames::Value; 1026 return Common::Input::ButtonNames::Value;
1012 } 1027 }
1013 if (params.Has("hat")) { 1028 if (params.Has("hat")) {
diff --git a/src/input_common/drivers/virtual_amiibo.cpp b/src/input_common/drivers/virtual_amiibo.cpp
index 4a0268a4d..304f4c70b 100644
--- a/src/input_common/drivers/virtual_amiibo.cpp
+++ b/src/input_common/drivers/virtual_amiibo.cpp
@@ -57,7 +57,7 @@ Common::Input::NfcState VirtualAmiibo::WriteNfcData(
57 } 57 }
58 58
59 if (!nfc_file.Write(data)) { 59 if (!nfc_file.Write(data)) {
60 LOG_ERROR(Service_NFP, "Error writting to file"); 60 LOG_ERROR(Service_NFP, "Error writing to file");
61 return Common::Input::NfcState::WriteFailed; 61 return Common::Input::NfcState::WriteFailed;
62 } 62 }
63 63
diff --git a/src/input_common/helpers/joycon_driver.cpp b/src/input_common/helpers/joycon_driver.cpp
index 78cc5893c..83429a336 100644
--- a/src/input_common/helpers/joycon_driver.cpp
+++ b/src/input_common/helpers/joycon_driver.cpp
@@ -164,8 +164,8 @@ void JoyconDriver::InputThread(std::stop_token stop_token) {
164void JoyconDriver::OnNewData(std::span<u8> buffer) { 164void JoyconDriver::OnNewData(std::span<u8> buffer) {
165 const auto report_mode = static_cast<ReportMode>(buffer[0]); 165 const auto report_mode = static_cast<ReportMode>(buffer[0]);
166 166
167 // Packages can be a litte bit inconsistent. Average the delta time to provide a smoother motion 167 // Packages can be a little bit inconsistent. Average the delta time to provide a smoother
168 // experience 168 // motion experience
169 switch (report_mode) { 169 switch (report_mode) {
170 case ReportMode::STANDARD_FULL_60HZ: 170 case ReportMode::STANDARD_FULL_60HZ:
171 case ReportMode::NFC_IR_MODE_60HZ: 171 case ReportMode::NFC_IR_MODE_60HZ:
diff --git a/src/input_common/helpers/joycon_driver.h b/src/input_common/helpers/joycon_driver.h
index b52a13ecf..72a9e71dc 100644
--- a/src/input_common/helpers/joycon_driver.h
+++ b/src/input_common/helpers/joycon_driver.h
@@ -73,7 +73,7 @@ private:
73 /// Main thread, actively request new data from the handle 73 /// Main thread, actively request new data from the handle
74 void InputThread(std::stop_token stop_token); 74 void InputThread(std::stop_token stop_token);
75 75
76 /// Called everytime a valid package arrives 76 /// Called every time a valid package arrives
77 void OnNewData(std::span<u8> buffer); 77 void OnNewData(std::span<u8> buffer);
78 78
79 /// Updates device configuration to enable or disable features 79 /// Updates device configuration to enable or disable features
@@ -110,7 +110,7 @@ private:
110 bool amiibo_detected{}; 110 bool amiibo_detected{};
111 bool is_ring_disabled_by_irs{}; 111 bool is_ring_disabled_by_irs{};
112 112
113 // Harware configuration 113 // Hardware configuration
114 u8 leds{}; 114 u8 leds{};
115 ReportMode mode{}; 115 ReportMode mode{};
116 bool passive_enabled{}; // Low power mode, Ideal for multiple controllers at the same time 116 bool passive_enabled{}; // Low power mode, Ideal for multiple controllers at the same time
diff --git a/src/input_common/helpers/joycon_protocol/common_protocol.h b/src/input_common/helpers/joycon_protocol/common_protocol.h
index f44f73ba4..62cae739a 100644
--- a/src/input_common/helpers/joycon_protocol/common_protocol.h
+++ b/src/input_common/helpers/joycon_protocol/common_protocol.h
@@ -68,7 +68,7 @@ public:
68 } 68 }
69 69
70 /** 70 /**
71 * Waits for incoming data of the joycon device that matchs the subcommand 71 * Waits for incoming data of the joycon device that matches the subcommand
72 * @param sub_command type of data to be returned 72 * @param sub_command type of data to be returned
73 * @returns a buffer containing the response 73 * @returns a buffer containing the response
74 */ 74 */
@@ -137,7 +137,7 @@ public:
137 DriverResult EnableMCU(bool enable); 137 DriverResult EnableMCU(bool enable);
138 138
139 /** 139 /**
140 * Configures the MCU to the correspoinding mode 140 * Configures the MCU to the corresponding mode
141 * @param MCUConfig configuration 141 * @param MCUConfig configuration
142 */ 142 */
143 DriverResult ConfigureMCU(const MCUConfig& config); 143 DriverResult ConfigureMCU(const MCUConfig& config);
diff --git a/src/input_common/helpers/udp_protocol.cpp b/src/input_common/helpers/udp_protocol.cpp
index 994380d21..e54a8fce1 100644
--- a/src/input_common/helpers/udp_protocol.cpp
+++ b/src/input_common/helpers/udp_protocol.cpp
@@ -25,7 +25,7 @@ namespace Response {
25/** 25/**
26 * Returns Type if the packet is valid, else none 26 * Returns Type if the packet is valid, else none
27 * 27 *
28 * Note: Modifies the buffer to zero out the crc (since thats the easiest way to check without 28 * Note: Modifies the buffer to zero out the crc (since that's the easiest way to check without
29 * copying the buffer) 29 * copying the buffer)
30 */ 30 */
31std::optional<Type> Validate(u8* data, std::size_t size) { 31std::optional<Type> Validate(u8* data, std::size_t size) {
diff --git a/src/input_common/main.h b/src/input_common/main.h
index 1207d786c..d64a6cb4c 100644
--- a/src/input_common/main.h
+++ b/src/input_common/main.h
@@ -132,7 +132,7 @@ public:
132 /// Retrieves the motion mappings for the given device. 132 /// Retrieves the motion mappings for the given device.
133 [[nodiscard]] MotionMapping GetMotionMappingForDevice(const Common::ParamPackage& device) const; 133 [[nodiscard]] MotionMapping GetMotionMappingForDevice(const Common::ParamPackage& device) const;
134 134
135 /// Returns an enum contaning the name to be displayed from the input engine. 135 /// Returns an enum containing the name to be displayed from the input engine.
136 [[nodiscard]] Common::Input::ButtonNames GetButtonName( 136 [[nodiscard]] Common::Input::ButtonNames GetButtonName(
137 const Common::ParamPackage& params) const; 137 const Common::ParamPackage& params) const;
138 138
diff --git a/src/network/packet.h b/src/network/packet.h
index e69217488..9aa2a2c9c 100644
--- a/src/network/packet.h
+++ b/src/network/packet.h
@@ -9,7 +9,7 @@
9 9
10namespace Network { 10namespace Network {
11 11
12/// A class that serializes data for network transfer. It also handles endianess 12/// A class that serializes data for network transfer. It also handles endianness
13class Packet { 13class Packet {
14public: 14public:
15 Packet() = default; 15 Packet() = default;
diff --git a/src/network/room.cpp b/src/network/room.cpp
index dc5dbce7f..e456ea09c 100644
--- a/src/network/room.cpp
+++ b/src/network/room.cpp
@@ -27,7 +27,7 @@ public:
27 std::atomic<State> state{State::Closed}; ///< Current state of the room. 27 std::atomic<State> state{State::Closed}; ///< Current state of the room.
28 RoomInformation room_information; ///< Information about this room. 28 RoomInformation room_information; ///< Information about this room.
29 29
30 std::string verify_uid; ///< A GUID which may be used for verfication. 30 std::string verify_uid; ///< A GUID which may be used for verification.
31 mutable std::mutex verify_uid_mutex; ///< Mutex for verify_uid 31 mutable std::mutex verify_uid_mutex; ///< Mutex for verify_uid
32 32
33 std::string password; ///< The password required to connect to this room. 33 std::string password; ///< The password required to connect to this room.
diff --git a/src/network/room_member.h b/src/network/room_member.h
index 0d6417294..33ac18e72 100644
--- a/src/network/room_member.h
+++ b/src/network/room_member.h
@@ -71,7 +71,7 @@ public:
71 Idle, ///< Default state (i.e. not connected) 71 Idle, ///< Default state (i.e. not connected)
72 Joining, ///< The client is attempting to join a room. 72 Joining, ///< The client is attempting to join a room.
73 Joined, ///< The client is connected to the room and is ready to send/receive packets. 73 Joined, ///< The client is connected to the room and is ready to send/receive packets.
74 Moderator, ///< The client is connnected to the room and is granted mod permissions. 74 Moderator, ///< The client is connected to the room and is granted mod permissions.
75 }; 75 };
76 76
77 enum class Error : u8 { 77 enum class Error : u8 {
@@ -201,7 +201,7 @@ public:
201 201
202 /** 202 /**
203 * Binds a function to an event that will be triggered every time the State of the member 203 * Binds a function to an event that will be triggered every time the State of the member
204 * changed. The function wil be called every time the event is triggered. The callback function 204 * changed. The function will be called every time the event is triggered. The callback function
205 * must not bind or unbind a function. Doing so will cause a deadlock 205 * must not bind or unbind a function. Doing so will cause a deadlock
206 * @param callback The function to call 206 * @param callback The function to call
207 * @return A handle used for removing the function from the registered list 207 * @return A handle used for removing the function from the registered list
@@ -210,8 +210,8 @@ public:
210 210
211 /** 211 /**
212 * Binds a function to an event that will be triggered every time an error happened. The 212 * Binds a function to an event that will be triggered every time an error happened. The
213 * function wil be called every time the event is triggered. The callback function must not bind 213 * function will be called every time the event is triggered. The callback function must not
214 * or unbind a function. Doing so will cause a deadlock 214 * bind or unbind a function. Doing so will cause a deadlock
215 * @param callback The function to call 215 * @param callback The function to call
216 * @return A handle used for removing the function from the registered list 216 * @return A handle used for removing the function from the registered list
217 */ 217 */
@@ -219,7 +219,7 @@ public:
219 219
220 /** 220 /**
221 * Binds a function to an event that will be triggered every time a ProxyPacket is received. 221 * Binds a function to an event that will be triggered every time a ProxyPacket is received.
222 * The function wil be called everytime the event is triggered. 222 * The function will be called every time the event is triggered.
223 * The callback function must not bind or unbind a function. Doing so will cause a deadlock 223 * The callback function must not bind or unbind a function. Doing so will cause a deadlock
224 * @param callback The function to call 224 * @param callback The function to call
225 * @return A handle used for removing the function from the registered list 225 * @return A handle used for removing the function from the registered list
@@ -229,7 +229,7 @@ public:
229 229
230 /** 230 /**
231 * Binds a function to an event that will be triggered every time an LDNPacket is received. 231 * Binds a function to an event that will be triggered every time an LDNPacket is received.
232 * The function wil be called everytime the event is triggered. 232 * The function will be called every time the event is triggered.
233 * The callback function must not bind or unbind a function. Doing so will cause a deadlock 233 * The callback function must not bind or unbind a function. Doing so will cause a deadlock
234 * @param callback The function to call 234 * @param callback The function to call
235 * @return A handle used for removing the function from the registered list 235 * @return A handle used for removing the function from the registered list
@@ -239,7 +239,7 @@ public:
239 239
240 /** 240 /**
241 * Binds a function to an event that will be triggered every time the RoomInformation changes. 241 * Binds a function to an event that will be triggered every time the RoomInformation changes.
242 * The function wil be called every time the event is triggered. 242 * The function will be called every time the event is triggered.
243 * The callback function must not bind or unbind a function. Doing so will cause a deadlock 243 * The callback function must not bind or unbind a function. Doing so will cause a deadlock
244 * @param callback The function to call 244 * @param callback The function to call
245 * @return A handle used for removing the function from the registered list 245 * @return A handle used for removing the function from the registered list
@@ -249,7 +249,7 @@ public:
249 249
250 /** 250 /**
251 * Binds a function to an event that will be triggered every time a ChatMessage is received. 251 * Binds a function to an event that will be triggered every time a ChatMessage is received.
252 * The function wil be called every time the event is triggered. 252 * The function will be called every time the event is triggered.
253 * The callback function must not bind or unbind a function. Doing so will cause a deadlock 253 * The callback function must not bind or unbind a function. Doing so will cause a deadlock
254 * @param callback The function to call 254 * @param callback The function to call
255 * @return A handle used for removing the function from the registered list 255 * @return A handle used for removing the function from the registered list
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
index 911181c43..376a05827 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
@@ -398,162 +398,162 @@ void EmitStorageAtomicMaxF32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value
398} 398}
399 399
400void EmitGlobalAtomicIAdd32(EmitContext&) { 400void EmitGlobalAtomicIAdd32(EmitContext&) {
401 throw NotImplementedException("GLSL Instrucion"); 401 throw NotImplementedException("GLSL Instruction");
402} 402}
403 403
404void EmitGlobalAtomicSMin32(EmitContext&) { 404void EmitGlobalAtomicSMin32(EmitContext&) {
405 throw NotImplementedException("GLSL Instrucion"); 405 throw NotImplementedException("GLSL Instruction");
406} 406}
407 407
408void EmitGlobalAtomicUMin32(EmitContext&) { 408void EmitGlobalAtomicUMin32(EmitContext&) {
409 throw NotImplementedException("GLSL Instrucion"); 409 throw NotImplementedException("GLSL Instruction");
410} 410}
411 411
412void EmitGlobalAtomicSMax32(EmitContext&) { 412void EmitGlobalAtomicSMax32(EmitContext&) {
413 throw NotImplementedException("GLSL Instrucion"); 413 throw NotImplementedException("GLSL Instruction");
414} 414}
415 415
416void EmitGlobalAtomicUMax32(EmitContext&) { 416void EmitGlobalAtomicUMax32(EmitContext&) {
417 throw NotImplementedException("GLSL Instrucion"); 417 throw NotImplementedException("GLSL Instruction");
418} 418}
419 419
420void EmitGlobalAtomicInc32(EmitContext&) { 420void EmitGlobalAtomicInc32(EmitContext&) {
421 throw NotImplementedException("GLSL Instrucion"); 421 throw NotImplementedException("GLSL Instruction");
422} 422}
423 423
424void EmitGlobalAtomicDec32(EmitContext&) { 424void EmitGlobalAtomicDec32(EmitContext&) {
425 throw NotImplementedException("GLSL Instrucion"); 425 throw NotImplementedException("GLSL Instruction");
426} 426}
427 427
428void EmitGlobalAtomicAnd32(EmitContext&) { 428void EmitGlobalAtomicAnd32(EmitContext&) {
429 throw NotImplementedException("GLSL Instrucion"); 429 throw NotImplementedException("GLSL Instruction");
430} 430}
431 431
432void EmitGlobalAtomicOr32(EmitContext&) { 432void EmitGlobalAtomicOr32(EmitContext&) {
433 throw NotImplementedException("GLSL Instrucion"); 433 throw NotImplementedException("GLSL Instruction");
434} 434}
435 435
436void EmitGlobalAtomicXor32(EmitContext&) { 436void EmitGlobalAtomicXor32(EmitContext&) {
437 throw NotImplementedException("GLSL Instrucion"); 437 throw NotImplementedException("GLSL Instruction");
438} 438}
439 439
440void EmitGlobalAtomicExchange32(EmitContext&) { 440void EmitGlobalAtomicExchange32(EmitContext&) {
441 throw NotImplementedException("GLSL Instrucion"); 441 throw NotImplementedException("GLSL Instruction");
442} 442}
443 443
444void EmitGlobalAtomicIAdd64(EmitContext&) { 444void EmitGlobalAtomicIAdd64(EmitContext&) {
445 throw NotImplementedException("GLSL Instrucion"); 445 throw NotImplementedException("GLSL Instruction");
446} 446}
447 447
448void EmitGlobalAtomicSMin64(EmitContext&) { 448void EmitGlobalAtomicSMin64(EmitContext&) {
449 throw NotImplementedException("GLSL Instrucion"); 449 throw NotImplementedException("GLSL Instruction");
450} 450}
451 451
452void EmitGlobalAtomicUMin64(EmitContext&) { 452void EmitGlobalAtomicUMin64(EmitContext&) {
453 throw NotImplementedException("GLSL Instrucion"); 453 throw NotImplementedException("GLSL Instruction");
454} 454}
455 455
456void EmitGlobalAtomicSMax64(EmitContext&) { 456void EmitGlobalAtomicSMax64(EmitContext&) {
457 throw NotImplementedException("GLSL Instrucion"); 457 throw NotImplementedException("GLSL Instruction");
458} 458}
459 459
460void EmitGlobalAtomicUMax64(EmitContext&) { 460void EmitGlobalAtomicUMax64(EmitContext&) {
461 throw NotImplementedException("GLSL Instrucion"); 461 throw NotImplementedException("GLSL Instruction");
462} 462}
463 463
464void EmitGlobalAtomicInc64(EmitContext&) { 464void EmitGlobalAtomicInc64(EmitContext&) {
465 throw NotImplementedException("GLSL Instrucion"); 465 throw NotImplementedException("GLSL Instruction");
466} 466}
467 467
468void EmitGlobalAtomicDec64(EmitContext&) { 468void EmitGlobalAtomicDec64(EmitContext&) {
469 throw NotImplementedException("GLSL Instrucion"); 469 throw NotImplementedException("GLSL Instruction");
470} 470}
471 471
472void EmitGlobalAtomicAnd64(EmitContext&) { 472void EmitGlobalAtomicAnd64(EmitContext&) {
473 throw NotImplementedException("GLSL Instrucion"); 473 throw NotImplementedException("GLSL Instruction");
474} 474}
475 475
476void EmitGlobalAtomicOr64(EmitContext&) { 476void EmitGlobalAtomicOr64(EmitContext&) {
477 throw NotImplementedException("GLSL Instrucion"); 477 throw NotImplementedException("GLSL Instruction");
478} 478}
479 479
480void EmitGlobalAtomicXor64(EmitContext&) { 480void EmitGlobalAtomicXor64(EmitContext&) {
481 throw NotImplementedException("GLSL Instrucion"); 481 throw NotImplementedException("GLSL Instruction");
482} 482}
483 483
484void EmitGlobalAtomicExchange64(EmitContext&) { 484void EmitGlobalAtomicExchange64(EmitContext&) {
485 throw NotImplementedException("GLSL Instrucion"); 485 throw NotImplementedException("GLSL Instruction");
486} 486}
487 487
488void EmitGlobalAtomicIAdd32x2(EmitContext&) { 488void EmitGlobalAtomicIAdd32x2(EmitContext&) {
489 throw NotImplementedException("GLSL Instrucion"); 489 throw NotImplementedException("GLSL Instruction");
490} 490}
491 491
492void EmitGlobalAtomicSMin32x2(EmitContext&) { 492void EmitGlobalAtomicSMin32x2(EmitContext&) {
493 throw NotImplementedException("GLSL Instrucion"); 493 throw NotImplementedException("GLSL Instruction");
494} 494}
495 495
496void EmitGlobalAtomicUMin32x2(EmitContext&) { 496void EmitGlobalAtomicUMin32x2(EmitContext&) {
497 throw NotImplementedException("GLSL Instrucion"); 497 throw NotImplementedException("GLSL Instruction");
498} 498}
499 499
500void EmitGlobalAtomicSMax32x2(EmitContext&) { 500void EmitGlobalAtomicSMax32x2(EmitContext&) {
501 throw NotImplementedException("GLSL Instrucion"); 501 throw NotImplementedException("GLSL Instruction");
502} 502}
503 503
504void EmitGlobalAtomicUMax32x2(EmitContext&) { 504void EmitGlobalAtomicUMax32x2(EmitContext&) {
505 throw NotImplementedException("GLSL Instrucion"); 505 throw NotImplementedException("GLSL Instruction");
506} 506}
507 507
508void EmitGlobalAtomicInc32x2(EmitContext&) { 508void EmitGlobalAtomicInc32x2(EmitContext&) {
509 throw NotImplementedException("GLSL Instrucion"); 509 throw NotImplementedException("GLSL Instruction");
510} 510}
511 511
512void EmitGlobalAtomicDec32x2(EmitContext&) { 512void EmitGlobalAtomicDec32x2(EmitContext&) {
513 throw NotImplementedException("GLSL Instrucion"); 513 throw NotImplementedException("GLSL Instruction");
514} 514}
515 515
516void EmitGlobalAtomicAnd32x2(EmitContext&) { 516void EmitGlobalAtomicAnd32x2(EmitContext&) {
517 throw NotImplementedException("GLSL Instrucion"); 517 throw NotImplementedException("GLSL Instruction");
518} 518}
519 519
520void EmitGlobalAtomicOr32x2(EmitContext&) { 520void EmitGlobalAtomicOr32x2(EmitContext&) {
521 throw NotImplementedException("GLSL Instrucion"); 521 throw NotImplementedException("GLSL Instruction");
522} 522}
523 523
524void EmitGlobalAtomicXor32x2(EmitContext&) { 524void EmitGlobalAtomicXor32x2(EmitContext&) {
525 throw NotImplementedException("GLSL Instrucion"); 525 throw NotImplementedException("GLSL Instruction");
526} 526}
527 527
528void EmitGlobalAtomicExchange32x2(EmitContext&) { 528void EmitGlobalAtomicExchange32x2(EmitContext&) {
529 throw NotImplementedException("GLSL Instrucion"); 529 throw NotImplementedException("GLSL Instruction");
530} 530}
531 531
532void EmitGlobalAtomicAddF32(EmitContext&) { 532void EmitGlobalAtomicAddF32(EmitContext&) {
533 throw NotImplementedException("GLSL Instrucion"); 533 throw NotImplementedException("GLSL Instruction");
534} 534}
535 535
536void EmitGlobalAtomicAddF16x2(EmitContext&) { 536void EmitGlobalAtomicAddF16x2(EmitContext&) {
537 throw NotImplementedException("GLSL Instrucion"); 537 throw NotImplementedException("GLSL Instruction");
538} 538}
539 539
540void EmitGlobalAtomicAddF32x2(EmitContext&) { 540void EmitGlobalAtomicAddF32x2(EmitContext&) {
541 throw NotImplementedException("GLSL Instrucion"); 541 throw NotImplementedException("GLSL Instruction");
542} 542}
543 543
544void EmitGlobalAtomicMinF16x2(EmitContext&) { 544void EmitGlobalAtomicMinF16x2(EmitContext&) {
545 throw NotImplementedException("GLSL Instrucion"); 545 throw NotImplementedException("GLSL Instruction");
546} 546}
547 547
548void EmitGlobalAtomicMinF32x2(EmitContext&) { 548void EmitGlobalAtomicMinF32x2(EmitContext&) {
549 throw NotImplementedException("GLSL Instrucion"); 549 throw NotImplementedException("GLSL Instruction");
550} 550}
551 551
552void EmitGlobalAtomicMaxF16x2(EmitContext&) { 552void EmitGlobalAtomicMaxF16x2(EmitContext&) {
553 throw NotImplementedException("GLSL Instrucion"); 553 throw NotImplementedException("GLSL Instruction");
554} 554}
555 555
556void EmitGlobalAtomicMaxF32x2(EmitContext&) { 556void EmitGlobalAtomicMaxF32x2(EmitContext&) {
557 throw NotImplementedException("GLSL Instrucion"); 557 throw NotImplementedException("GLSL Instruction");
558} 558}
559} // namespace Shader::Backend::GLSL 559} // namespace Shader::Backend::GLSL
diff --git a/src/shader_recompiler/backend/glsl/glsl_emit_context.h b/src/shader_recompiler/backend/glsl/glsl_emit_context.h
index dfd10ac28..7587f7bab 100644
--- a/src/shader_recompiler/backend/glsl/glsl_emit_context.h
+++ b/src/shader_recompiler/backend/glsl/glsl_emit_context.h
@@ -49,7 +49,7 @@ public:
49 void Add(const char* format_str, IR::Inst& inst, Args&&... args) { 49 void Add(const char* format_str, IR::Inst& inst, Args&&... args) {
50 const auto var_def{var_alloc.AddDefine(inst, type)}; 50 const auto var_def{var_alloc.AddDefine(inst, type)};
51 if (var_def.empty()) { 51 if (var_def.empty()) {
52 // skip assigment. 52 // skip assignment.
53 code += fmt::format(fmt::runtime(format_str + 3), std::forward<Args>(args)...); 53 code += fmt::format(fmt::runtime(format_str + 3), std::forward<Args>(args)...);
54 } else { 54 } else {
55 code += fmt::format(fmt::runtime(format_str), var_def, std::forward<Args>(args)...); 55 code += fmt::format(fmt::runtime(format_str), var_def, std::forward<Args>(args)...);
diff --git a/src/tests/common/ring_buffer.cpp b/src/tests/common/ring_buffer.cpp
index 7dee988c8..e85f9977b 100644
--- a/src/tests/common/ring_buffer.cpp
+++ b/src/tests/common/ring_buffer.cpp
@@ -52,7 +52,7 @@ TEST_CASE("RingBuffer: Basic Tests", "[common]") {
52 52
53 REQUIRE(buf.Size() == 1U); 53 REQUIRE(buf.Size() == 1U);
54 54
55 // Pushing more values than space available should partially suceed. 55 // Pushing more values than space available should partially succeed.
56 { 56 {
57 std::vector<char> to_push(6); 57 std::vector<char> to_push(6);
58 std::iota(to_push.begin(), to_push.end(), 88); 58 std::iota(to_push.begin(), to_push.end(), 88);
diff --git a/src/tests/common/scratch_buffer.cpp b/src/tests/common/scratch_buffer.cpp
index 132f139fa..26e401760 100644
--- a/src/tests/common/scratch_buffer.cpp
+++ b/src/tests/common/scratch_buffer.cpp
@@ -191,7 +191,7 @@ TEST_CASE("ScratchBuffer: Span Writes", "[common]") {
191 191
192 for (size_t i = 0; i < buf_span.size(); ++i) { 192 for (size_t i = 0; i < buf_span.size(); ++i) {
193 const auto new_value = static_cast<u8>(i + 1U); 193 const auto new_value = static_cast<u8>(i + 1U);
194 // Writes to a span of the scratch buffer will propogate to the buffer itself 194 // Writes to a span of the scratch buffer will propagate to the buffer itself
195 buf_span[i] = new_value; 195 buf_span[i] = new_value;
196 REQUIRE(buf[i] == new_value); 196 REQUIRE(buf[i] == new_value);
197 } 197 }
diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h
index cdaf4f8d5..46bc9e322 100644
--- a/src/video_core/control/channel_state_cache.h
+++ b/src/video_core/control/channel_state_cache.h
@@ -44,7 +44,7 @@ public:
44template <class P> 44template <class P>
45class ChannelSetupCaches { 45class ChannelSetupCaches {
46public: 46public:
47 /// Operations for seting the channel of execution. 47 /// Operations for setting the channel of execution.
48 virtual ~ChannelSetupCaches(); 48 virtual ~ChannelSetupCaches();
49 49
50 /// Create channel state. 50 /// Create channel state.
diff --git a/src/video_core/engines/draw_manager.cpp b/src/video_core/engines/draw_manager.cpp
index 1d22d25f1..0e94c521a 100644
--- a/src/video_core/engines/draw_manager.cpp
+++ b/src/video_core/engines/draw_manager.cpp
@@ -164,6 +164,7 @@ void DrawManager::DrawEnd(u32 instance_count, bool force_draw) {
164 draw_state.index_buffer.count = 164 draw_state.index_buffer.count =
165 static_cast<u32>(draw_state.inline_index_draw_indexes.size() / 4); 165 static_cast<u32>(draw_state.inline_index_draw_indexes.size() / 4);
166 draw_state.index_buffer.format = Maxwell3D::Regs::IndexFormat::UnsignedInt; 166 draw_state.index_buffer.format = Maxwell3D::Regs::IndexFormat::UnsignedInt;
167 maxwell3d->dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
167 ProcessDraw(true, instance_count); 168 ProcessDraw(true, instance_count);
168 draw_state.inline_index_draw_indexes.clear(); 169 draw_state.inline_index_draw_indexes.clear();
169 break; 170 break;
diff --git a/src/video_core/engines/sw_blitter/blitter.cpp b/src/video_core/engines/sw_blitter/blitter.cpp
index 2f1ea4626..3c9f38559 100644
--- a/src/video_core/engines/sw_blitter/blitter.cpp
+++ b/src/video_core/engines/sw_blitter/blitter.cpp
@@ -193,7 +193,7 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
193 output_converter->ConvertFrom(impl->intermediate_dst, impl->dst_buffer); 193 output_converter->ConvertFrom(impl->intermediate_dst, impl->dst_buffer);
194 }; 194 };
195 195
196 // Do actuall Blit 196 // Do actual Blit
197 197
198 impl->dst_buffer.resize(dst_copy_size); 198 impl->dst_buffer.resize(dst_copy_size);
199 if (src.linear == Fermi2D::MemoryLayout::BlockLinear) { 199 if (src.linear == Fermi2D::MemoryLayout::BlockLinear) {
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 9c103c0d4..050b11874 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -25,7 +25,7 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
25 SCOPE_EXIT({ MicroProfileOnThreadExit(); }); 25 SCOPE_EXIT({ MicroProfileOnThreadExit(); });
26 26
27 Common::SetCurrentThreadName(name.c_str()); 27 Common::SetCurrentThreadName(name.c_str());
28 Common::SetCurrentThreadPriority(Common::ThreadPriority::High); 28 Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
29 system.RegisterHostThread(); 29 system.RegisterHostThread();
30 30
31 auto current_context = context.Acquire(); 31 auto current_context = context.Acquire();
diff --git a/src/video_core/host_shaders/astc_decoder.comp b/src/video_core/host_shaders/astc_decoder.comp
index d608678a3..bf2693559 100644
--- a/src/video_core/host_shaders/astc_decoder.comp
+++ b/src/video_core/host_shaders/astc_decoder.comp
@@ -125,7 +125,7 @@ uvec4 local_buff;
125uvec4 color_endpoint_data; 125uvec4 color_endpoint_data;
126int color_bitsread = 0; 126int color_bitsread = 0;
127 127
128// Four values, two endpoints, four maximum paritions 128// Four values, two endpoints, four maximum partitions
129uint color_values[32]; 129uint color_values[32];
130int colvals_index = 0; 130int colvals_index = 0;
131 131
diff --git a/src/video_core/host_shaders/opengl_smaa.glsl b/src/video_core/host_shaders/opengl_smaa.glsl
index 3cbe87bbf..419f89bca 100644
--- a/src/video_core/host_shaders/opengl_smaa.glsl
+++ b/src/video_core/host_shaders/opengl_smaa.glsl
@@ -97,7 +97,7 @@
97 * half-rate linear filtering on GCN. 97 * half-rate linear filtering on GCN.
98 * 98 *
99 * If SMAA is applied to 64-bit color buffers, switching to point filtering 99 * If SMAA is applied to 64-bit color buffers, switching to point filtering
100 * when accesing them will increase the performance. Search for 100 * when accessing them will increase the performance. Search for
101 * 'SMAASamplePoint' to see which textures may benefit from point 101 * 'SMAASamplePoint' to see which textures may benefit from point
102 * filtering, and where (which is basically the color input in the edge 102 * filtering, and where (which is basically the color input in the edge
103 * detection and resolve passes). 103 * detection and resolve passes).
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index cf56392ef..51ae2de68 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -103,8 +103,8 @@ public:
103 103
104 /** 104 /**
105 * Returns a vector with all the subranges of cpu addresses mapped beneath. 105 * Returns a vector with all the subranges of cpu addresses mapped beneath.
106 * if the region is continous, a single pair will be returned. If it's unmapped, an empty vector 106 * if the region is continuous, a single pair will be returned. If it's unmapped, an empty
107 * will be returned; 107 * vector will be returned;
108 */ 108 */
109 std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr, 109 std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr,
110 std::size_t size) const; 110 std::size_t size) const;
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index 00ce53e3e..8906ba6d8 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -341,7 +341,7 @@ public:
341 341
342 /// Flushes the query to guest memory. 342 /// Flushes the query to guest memory.
343 virtual void Flush() { 343 virtual void Flush() {
344 // When counter is nullptr it means that it's just been reseted. We are supposed to write a 344 // When counter is nullptr it means that it's just been reset. We are supposed to write a
345 // zero in these cases. 345 // zero in these cases.
346 const u64 value = counter ? counter->Query() : 0; 346 const u64 value = counter ? counter->Query() : 0;
347 std::memcpy(host_ptr, &value, sizeof(u64)); 347 std::memcpy(host_ptr, &value, sizeof(u64));
diff --git a/src/video_core/renderer_opengl/blit_image.cpp b/src/video_core/renderer_opengl/blit_image.cpp
index 9a560a73b..3b03e8d5a 100644
--- a/src/video_core/renderer_opengl/blit_image.cpp
+++ b/src/video_core/renderer_opengl/blit_image.cpp
@@ -22,7 +22,7 @@ BlitImageHelper::~BlitImageHelper() = default;
22void BlitImageHelper::BlitColor(GLuint dst_framebuffer, GLuint src_image_view, GLuint src_sampler, 22void BlitImageHelper::BlitColor(GLuint dst_framebuffer, GLuint src_image_view, GLuint src_sampler,
23 const Region2D& dst_region, const Region2D& src_region, 23 const Region2D& dst_region, const Region2D& src_region,
24 const Extent3D& src_size) { 24 const Extent3D& src_size) {
25 glEnable(GL_CULL_FACE); 25 glDisable(GL_CULL_FACE);
26 glDisable(GL_COLOR_LOGIC_OP); 26 glDisable(GL_COLOR_LOGIC_OP);
27 glDisable(GL_DEPTH_TEST); 27 glDisable(GL_DEPTH_TEST);
28 glDisable(GL_STENCIL_TEST); 28 glDisable(GL_STENCIL_TEST);
@@ -31,7 +31,6 @@ void BlitImageHelper::BlitColor(GLuint dst_framebuffer, GLuint src_image_view, G
31 glDisable(GL_ALPHA_TEST); 31 glDisable(GL_ALPHA_TEST);
32 glDisablei(GL_BLEND, 0); 32 glDisablei(GL_BLEND, 0);
33 glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); 33 glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
34 glCullFace(GL_BACK);
35 glFrontFace(GL_CW); 34 glFrontFace(GL_CW);
36 glColorMaski(0, GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); 35 glColorMaski(0, GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
37 glDepthRangeIndexed(0, 0.0, 0.0); 36 glDepthRangeIndexed(0, 0.0, 0.0);
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp
index 91463f854..5326172af 100644
--- a/src/video_core/renderer_opengl/gl_fence_manager.cpp
+++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp
@@ -27,9 +27,7 @@ bool GLInnerFence::IsSignaled() const {
27 return true; 27 return true;
28 } 28 }
29 ASSERT(sync_object.handle != 0); 29 ASSERT(sync_object.handle != 0);
30 GLint sync_status; 30 return sync_object.IsSignaled();
31 glGetSynciv(sync_object.handle, GL_SYNC_STATUS, 1, nullptr, &sync_status);
32 return sync_status == GL_SIGNALED;
33} 31}
34 32
35void GLInnerFence::Wait() { 33void GLInnerFence::Wait() {
diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
index 29491e762..89000d6e0 100644
--- a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
+++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
@@ -621,10 +621,7 @@ bool GraphicsPipeline::IsBuilt() noexcept {
621 if (built_fence.handle == 0) { 621 if (built_fence.handle == 0) {
622 return false; 622 return false;
623 } 623 }
624 // Timeout of zero means this is non-blocking 624 is_built = built_fence.IsSignaled();
625 const auto sync_status = glClientWaitSync(built_fence.handle, 0, 0);
626 ASSERT(sync_status != GL_WAIT_FAILED);
627 is_built = sync_status != GL_TIMEOUT_EXPIRED;
628 return is_built; 625 return is_built;
629} 626}
630 627
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 7bced675c..90e35e307 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -63,7 +63,7 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra
63 buffer_cache(*this, cpu_memory_, buffer_cache_runtime), 63 buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
64 shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager, 64 shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager,
65 state_tracker, gpu.ShaderNotify()), 65 state_tracker, gpu.ShaderNotify()),
66 query_cache(*this), accelerate_dma(buffer_cache), 66 query_cache(*this), accelerate_dma(buffer_cache, texture_cache),
67 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache), 67 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache),
68 blit_image(program_manager_) {} 68 blit_image(program_manager_) {}
69 69
@@ -357,6 +357,7 @@ void RasterizerOpenGL::DrawTexture() {
357 .y = static_cast<s32>(draw_texture_state.src_y1)}}; 357 .y = static_cast<s32>(draw_texture_state.src_y1)}};
358 blit_image.BlitColor(texture_cache.GetFramebuffer()->Handle(), texture.DefaultHandle(), 358 blit_image.BlitColor(texture_cache.GetFramebuffer()->Handle(), texture.DefaultHandle(),
359 sampler->Handle(), dst_region, src_region, texture.size); 359 sampler->Handle(), dst_region, src_region, texture.size);
360 state_tracker.InvalidateState();
360 } 361 }
361 362
362 ++num_queued_commands; 363 ++num_queued_commands;
@@ -576,7 +577,7 @@ bool RasterizerOpenGL::AccelerateConditionalRendering() {
576 // Reimplement Host conditional rendering. 577 // Reimplement Host conditional rendering.
577 return false; 578 return false;
578 } 579 }
579 // Medium / Low Hack: stub any checks on queries writen into the buffer cache. 580 // Medium / Low Hack: stub any checks on queries written into the buffer cache.
580 const GPUVAddr condition_address{maxwell3d->regs.render_enable.Address()}; 581 const GPUVAddr condition_address{maxwell3d->regs.render_enable.Address()};
581 Maxwell::ReportSemaphore::Compare cmp; 582 Maxwell::ReportSemaphore::Compare cmp;
582 if (gpu_memory->IsMemoryDirty(condition_address, sizeof(cmp), 583 if (gpu_memory->IsMemoryDirty(condition_address, sizeof(cmp),
@@ -1262,7 +1263,8 @@ void RasterizerOpenGL::ReleaseChannel(s32 channel_id) {
1262 query_cache.EraseChannel(channel_id); 1263 query_cache.EraseChannel(channel_id);
1263} 1264}
1264 1265
1265AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {} 1266AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_, TextureCache& texture_cache_)
1267 : buffer_cache{buffer_cache_}, texture_cache{texture_cache_} {}
1266 1268
1267bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { 1269bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
1268 std::scoped_lock lock{buffer_cache.mutex}; 1270 std::scoped_lock lock{buffer_cache.mutex};
@@ -1274,4 +1276,44 @@ bool AccelerateDMA::BufferClear(GPUVAddr src_address, u64 amount, u32 value) {
1274 return buffer_cache.DMAClear(src_address, amount, value); 1276 return buffer_cache.DMAClear(src_address, amount, value);
1275} 1277}
1276 1278
1279template <bool IS_IMAGE_UPLOAD>
1280bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
1281 const Tegra::DMA::BufferOperand& buffer_operand,
1282 const Tegra::DMA::ImageOperand& image_operand) {
1283 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
1284 const auto image_id = texture_cache.DmaImageId(image_operand);
1285 if (image_id == VideoCommon::NULL_IMAGE_ID) {
1286 return false;
1287 }
1288 const u32 buffer_size = static_cast<u32>(buffer_operand.pitch * buffer_operand.height);
1289 static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::FullSynchronize;
1290 const auto post_op = IS_IMAGE_UPLOAD ? VideoCommon::ObtainBufferOperation::DoNothing
1291 : VideoCommon::ObtainBufferOperation::MarkAsWritten;
1292 const auto [buffer, offset] =
1293 buffer_cache.ObtainBuffer(buffer_operand.address, buffer_size, sync_info, post_op);
1294
1295 const auto [image, copy] = texture_cache.DmaBufferImageCopy(
1296 copy_info, buffer_operand, image_operand, image_id, IS_IMAGE_UPLOAD);
1297 const std::span copy_span{&copy, 1};
1298
1299 if constexpr (IS_IMAGE_UPLOAD) {
1300 image->UploadMemory(buffer->Handle(), offset, copy_span);
1301 } else {
1302 image->DownloadMemory(buffer->Handle(), offset, copy_span);
1303 }
1304 return true;
1305}
1306
1307bool AccelerateDMA::ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info,
1308 const Tegra::DMA::ImageOperand& image_operand,
1309 const Tegra::DMA::BufferOperand& buffer_operand) {
1310 return DmaBufferImageCopy<false>(copy_info, buffer_operand, image_operand);
1311}
1312
1313bool AccelerateDMA::BufferToImage(const Tegra::DMA::ImageCopy& copy_info,
1314 const Tegra::DMA::BufferOperand& buffer_operand,
1315 const Tegra::DMA::ImageOperand& image_operand) {
1316 return DmaBufferImageCopy<true>(copy_info, buffer_operand, image_operand);
1317}
1318
1277} // namespace OpenGL 1319} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 7e21fc43d..ad6978bd0 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -50,24 +50,26 @@ static_assert(sizeof(BindlessSSBO) * CHAR_BIT == 128);
50 50
51class AccelerateDMA : public Tegra::Engines::AccelerateDMAInterface { 51class AccelerateDMA : public Tegra::Engines::AccelerateDMAInterface {
52public: 52public:
53 explicit AccelerateDMA(BufferCache& buffer_cache); 53 explicit AccelerateDMA(BufferCache& buffer_cache, TextureCache& texture_cache);
54 54
55 bool BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) override; 55 bool BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) override;
56 56
57 bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) override; 57 bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) override;
58 58
59 bool ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::ImageOperand& src, 59 bool ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::ImageOperand& src,
60 const Tegra::DMA::BufferOperand& dst) override { 60 const Tegra::DMA::BufferOperand& dst) override;
61 return false;
62 }
63 61
64 bool BufferToImage(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& src, 62 bool BufferToImage(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& src,
65 const Tegra::DMA::ImageOperand& dst) override { 63 const Tegra::DMA::ImageOperand& dst) override;
66 return false;
67 }
68 64
69private: 65private:
66 template <bool IS_IMAGE_UPLOAD>
67 bool DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
68 const Tegra::DMA::BufferOperand& src,
69 const Tegra::DMA::ImageOperand& dst);
70
70 BufferCache& buffer_cache; 71 BufferCache& buffer_cache;
72 TextureCache& texture_cache;
71}; 73};
72 74
73class RasterizerOpenGL : public VideoCore::RasterizerAccelerated, 75class RasterizerOpenGL : public VideoCore::RasterizerAccelerated,
@@ -160,7 +162,7 @@ private:
160 /// Syncs the cull mode to match the guest state 162 /// Syncs the cull mode to match the guest state
161 void SyncCullMode(); 163 void SyncCullMode();
162 164
163 /// Syncs the primitve restart to match the guest state 165 /// Syncs the primitive restart to match the guest state
164 void SyncPrimitiveRestart(); 166 void SyncPrimitiveRestart();
165 167
166 /// Syncs the depth test state to match the guest state 168 /// Syncs the depth test state to match the guest state
@@ -244,7 +246,7 @@ private:
244 std::array<GLuint, MAX_TEXTURES> texture_handles{}; 246 std::array<GLuint, MAX_TEXTURES> texture_handles{};
245 std::array<GLuint, MAX_IMAGES> image_handles{}; 247 std::array<GLuint, MAX_IMAGES> image_handles{};
246 248
247 /// Number of commands queued to the OpenGL driver. Resetted on flush. 249 /// Number of commands queued to the OpenGL driver. Reset on flush.
248 size_t num_queued_commands = 0; 250 size_t num_queued_commands = 0;
249 bool has_written_global_memory = false; 251 bool has_written_global_memory = false;
250 252
diff --git a/src/video_core/renderer_opengl/gl_resource_manager.cpp b/src/video_core/renderer_opengl/gl_resource_manager.cpp
index 3a664fdec..eae8fd110 100644
--- a/src/video_core/renderer_opengl/gl_resource_manager.cpp
+++ b/src/video_core/renderer_opengl/gl_resource_manager.cpp
@@ -3,6 +3,7 @@
3 3
4#include <string_view> 4#include <string_view>
5#include <glad/glad.h> 5#include <glad/glad.h>
6#include "common/assert.h"
6#include "common/microprofile.h" 7#include "common/microprofile.h"
7#include "video_core/renderer_opengl/gl_resource_manager.h" 8#include "video_core/renderer_opengl/gl_resource_manager.h"
8#include "video_core/renderer_opengl/gl_shader_util.h" 9#include "video_core/renderer_opengl/gl_shader_util.h"
@@ -158,6 +159,15 @@ void OGLSync::Release() {
158 handle = 0; 159 handle = 0;
159} 160}
160 161
162bool OGLSync::IsSignaled() const noexcept {
163 // At least on Nvidia, glClientWaitSync with a timeout of 0
164 // is faster than glGetSynciv of GL_SYNC_STATUS.
165 // Timeout of 0 means this check is non-blocking.
166 const auto sync_status = glClientWaitSync(handle, 0, 0);
167 ASSERT(sync_status != GL_WAIT_FAILED);
168 return sync_status != GL_TIMEOUT_EXPIRED;
169}
170
161void OGLFramebuffer::Create() { 171void OGLFramebuffer::Create() {
162 if (handle != 0) 172 if (handle != 0)
163 return; 173 return;
diff --git a/src/video_core/renderer_opengl/gl_resource_manager.h b/src/video_core/renderer_opengl/gl_resource_manager.h
index bc05ba4bd..77362acd2 100644
--- a/src/video_core/renderer_opengl/gl_resource_manager.h
+++ b/src/video_core/renderer_opengl/gl_resource_manager.h
@@ -263,6 +263,9 @@ public:
263 /// Deletes the internal OpenGL resource 263 /// Deletes the internal OpenGL resource
264 void Release(); 264 void Release();
265 265
266 /// Checks if the sync has been signaled
267 bool IsSignaled() const noexcept;
268
266 GLsync handle = 0; 269 GLsync handle = 0;
267}; 270};
268 271
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp
index d3eabd686..0b9c4a904 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp
@@ -717,9 +717,7 @@ std::optional<size_t> TextureCacheRuntime::StagingBuffers::FindBuffer(size_t req
717 continue; 717 continue;
718 } 718 }
719 if (syncs[index].handle != 0) { 719 if (syncs[index].handle != 0) {
720 GLint status; 720 if (!syncs[index].IsSignaled()) {
721 glGetSynciv(syncs[index].handle, GL_SYNC_STATUS, 1, nullptr, &status);
722 if (status != GL_SIGNALED) {
723 continue; 721 continue;
724 } 722 }
725 syncs[index].Release(); 723 syncs[index].Release();
@@ -765,14 +763,14 @@ Image::Image(const VideoCommon::NullImageParams& params) : VideoCommon::ImageBas
765 763
766Image::~Image() = default; 764Image::~Image() = default;
767 765
768void Image::UploadMemory(const ImageBufferMap& map, 766void Image::UploadMemory(GLuint buffer_handle, size_t buffer_offset,
769 std::span<const VideoCommon::BufferImageCopy> copies) { 767 std::span<const VideoCommon::BufferImageCopy> copies) {
770 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled); 768 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled);
771 if (is_rescaled) { 769 if (is_rescaled) {
772 ScaleDown(true); 770 ScaleDown(true);
773 } 771 }
774 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, map.buffer); 772 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer_handle);
775 glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, map.offset, unswizzled_size_bytes); 773 glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, buffer_offset, unswizzled_size_bytes);
776 774
777 glPixelStorei(GL_UNPACK_ALIGNMENT, 1); 775 glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
778 776
@@ -791,21 +789,26 @@ void Image::UploadMemory(const ImageBufferMap& map,
791 current_image_height = copy.buffer_image_height; 789 current_image_height = copy.buffer_image_height;
792 glPixelStorei(GL_UNPACK_IMAGE_HEIGHT, current_image_height); 790 glPixelStorei(GL_UNPACK_IMAGE_HEIGHT, current_image_height);
793 } 791 }
794 CopyBufferToImage(copy, map.offset); 792 CopyBufferToImage(copy, buffer_offset);
795 } 793 }
796 if (is_rescaled) { 794 if (is_rescaled) {
797 ScaleUp(); 795 ScaleUp();
798 } 796 }
799} 797}
800 798
801void Image::DownloadMemory(ImageBufferMap& map, 799void Image::UploadMemory(const ImageBufferMap& map,
800 std::span<const VideoCommon::BufferImageCopy> copies) {
801 UploadMemory(map.buffer, map.offset, copies);
802}
803
804void Image::DownloadMemory(GLuint buffer_handle, size_t buffer_offset,
802 std::span<const VideoCommon::BufferImageCopy> copies) { 805 std::span<const VideoCommon::BufferImageCopy> copies) {
803 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled); 806 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled);
804 if (is_rescaled) { 807 if (is_rescaled) {
805 ScaleDown(); 808 ScaleDown();
806 } 809 }
807 glMemoryBarrier(GL_PIXEL_BUFFER_BARRIER_BIT); // TODO: Move this to its own API 810 glMemoryBarrier(GL_PIXEL_BUFFER_BARRIER_BIT); // TODO: Move this to its own API
808 glBindBuffer(GL_PIXEL_PACK_BUFFER, map.buffer); 811 glBindBuffer(GL_PIXEL_PACK_BUFFER, buffer_handle);
809 glPixelStorei(GL_PACK_ALIGNMENT, 1); 812 glPixelStorei(GL_PACK_ALIGNMENT, 1);
810 813
811 u32 current_row_length = std::numeric_limits<u32>::max(); 814 u32 current_row_length = std::numeric_limits<u32>::max();
@@ -823,13 +826,18 @@ void Image::DownloadMemory(ImageBufferMap& map,
823 current_image_height = copy.buffer_image_height; 826 current_image_height = copy.buffer_image_height;
824 glPixelStorei(GL_PACK_IMAGE_HEIGHT, current_image_height); 827 glPixelStorei(GL_PACK_IMAGE_HEIGHT, current_image_height);
825 } 828 }
826 CopyImageToBuffer(copy, map.offset); 829 CopyImageToBuffer(copy, buffer_offset);
827 } 830 }
828 if (is_rescaled) { 831 if (is_rescaled) {
829 ScaleUp(true); 832 ScaleUp(true);
830 } 833 }
831} 834}
832 835
836void Image::DownloadMemory(ImageBufferMap& map,
837 std::span<const VideoCommon::BufferImageCopy> copies) {
838 DownloadMemory(map.buffer, map.offset, copies);
839}
840
833GLuint Image::StorageHandle() noexcept { 841GLuint Image::StorageHandle() noexcept {
834 switch (info.format) { 842 switch (info.format) {
835 case PixelFormat::A8B8G8R8_SRGB: 843 case PixelFormat::A8B8G8R8_SRGB:
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h
index e30875496..911e4607a 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.h
+++ b/src/video_core/renderer_opengl/gl_texture_cache.h
@@ -206,9 +206,15 @@ public:
206 Image(Image&&) = default; 206 Image(Image&&) = default;
207 Image& operator=(Image&&) = default; 207 Image& operator=(Image&&) = default;
208 208
209 void UploadMemory(GLuint buffer_handle, size_t buffer_offset,
210 std::span<const VideoCommon::BufferImageCopy> copies);
211
209 void UploadMemory(const ImageBufferMap& map, 212 void UploadMemory(const ImageBufferMap& map,
210 std::span<const VideoCommon::BufferImageCopy> copies); 213 std::span<const VideoCommon::BufferImageCopy> copies);
211 214
215 void DownloadMemory(GLuint buffer_handle, size_t buffer_offset,
216 std::span<const VideoCommon::BufferImageCopy> copies);
217
212 void DownloadMemory(ImageBufferMap& map, std::span<const VideoCommon::BufferImageCopy> copies); 218 void DownloadMemory(ImageBufferMap& map, std::span<const VideoCommon::BufferImageCopy> copies);
213 219
214 GLuint StorageHandle() noexcept; 220 GLuint StorageHandle() noexcept;
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index f8398b511..e7df32d84 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -271,7 +271,7 @@ bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcep
271 271
272u32 FixedPipelineState::PackComparisonOp(Maxwell::ComparisonOp op) noexcept { 272u32 FixedPipelineState::PackComparisonOp(Maxwell::ComparisonOp op) noexcept {
273 // OpenGL enums go from 0x200 to 0x207 and the others from 1 to 8 273 // OpenGL enums go from 0x200 to 0x207 and the others from 1 to 8
274 // If we substract 0x200 to OpenGL enums and 1 to the others we get a 0-7 range. 274 // If we subtract 0x200 to OpenGL enums and 1 to the others we get a 0-7 range.
275 // Perfect for a hash. 275 // Perfect for a hash.
276 const u32 value = static_cast<u32>(op); 276 const u32 value = static_cast<u32>(op);
277 return value - (value >= 0x200 ? 0x200 : 1); 277 return value - (value >= 0x200 ? 0x200 : 1);
@@ -322,8 +322,8 @@ Maxwell::StencilOp::Op FixedPipelineState::UnpackStencilOp(u32 packed) noexcept
322} 322}
323 323
324u32 FixedPipelineState::PackCullFace(Maxwell::CullFace cull) noexcept { 324u32 FixedPipelineState::PackCullFace(Maxwell::CullFace cull) noexcept {
325 // FrontAndBack is 0x408, by substracting 0x406 in it we get 2. 325 // FrontAndBack is 0x408, by subtracting 0x406 in it we get 2.
326 // Individual cull faces are in 0x404 and 0x405, substracting 0x404 we get 0 and 1. 326 // Individual cull faces are in 0x404 and 0x405, subtracting 0x404 we get 0 and 1.
327 const u32 value = static_cast<u32>(cull); 327 const u32 value = static_cast<u32>(cull);
328 return value - (value == 0x408 ? 0x406 : 0x404); 328 return value - (value == 0x408 ? 0x406 : 0x404);
329} 329}
diff --git a/src/video_core/renderer_vulkan/vk_command_pool.cpp b/src/video_core/renderer_vulkan/vk_command_pool.cpp
index 2f09de1c1..d0dbf7ca5 100644
--- a/src/video_core/renderer_vulkan/vk_command_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_command_pool.cpp
@@ -22,8 +22,8 @@ CommandPool::CommandPool(MasterSemaphore& master_semaphore_, const Device& devic
22CommandPool::~CommandPool() = default; 22CommandPool::~CommandPool() = default;
23 23
24void CommandPool::Allocate(size_t begin, size_t end) { 24void CommandPool::Allocate(size_t begin, size_t end) {
25 // Command buffers are going to be commited, recorded, executed every single usage cycle. 25 // Command buffers are going to be committed, recorded, executed every single usage cycle.
26 // They are also going to be reseted when commited. 26 // They are also going to be reset when committed.
27 Pool& pool = pools.emplace_back(); 27 Pool& pool = pools.emplace_back();
28 pool.handle = device.GetLogical().CreateCommandPool({ 28 pool.handle = device.GetLogical().CreateCommandPool({
29 .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, 29 .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 25965b684..673ab478e 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -671,7 +671,7 @@ bool RasterizerVulkan::AccelerateConditionalRendering() {
671 // TODO(Blinkhawk): Reimplement Host conditional rendering. 671 // TODO(Blinkhawk): Reimplement Host conditional rendering.
672 return false; 672 return false;
673 } 673 }
674 // Medium / Low Hack: stub any checks on queries writen into the buffer cache. 674 // Medium / Low Hack: stub any checks on queries written into the buffer cache.
675 const GPUVAddr condition_address{maxwell3d->regs.render_enable.Address()}; 675 const GPUVAddr condition_address{maxwell3d->regs.render_enable.Address()};
676 Maxwell::ReportSemaphore::Compare cmp; 676 Maxwell::ReportSemaphore::Compare cmp;
677 if (gpu_memory->IsMemoryDirty(condition_address, sizeof(cmp), 677 if (gpu_memory->IsMemoryDirty(condition_address, sizeof(cmp),
@@ -770,232 +770,44 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64
770 return buffer_cache.DMACopy(src_address, dest_address, amount); 770 return buffer_cache.DMACopy(src_address, dest_address, amount);
771} 771}
772 772
773bool AccelerateDMA::ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info, 773template <bool IS_IMAGE_UPLOAD>
774 const Tegra::DMA::ImageOperand& src, 774bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
775 const Tegra::DMA::BufferOperand& dst) { 775 const Tegra::DMA::BufferOperand& buffer_operand,
776 const Tegra::DMA::ImageOperand& image_operand) {
776 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 777 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
777 auto query_image = texture_cache.ObtainImage(src, false); 778 const auto image_id = texture_cache.DmaImageId(image_operand);
778 if (!query_image) { 779 if (image_id == VideoCommon::NULL_IMAGE_ID) {
779 return false; 780 return false;
780 } 781 }
781 auto* image = query_image->first; 782 const u32 buffer_size = static_cast<u32>(buffer_operand.pitch * buffer_operand.height);
782 auto [level, base] = query_image->second; 783 static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::FullSynchronize;
783 const u32 buffer_size = static_cast<u32>(dst.pitch * dst.height); 784 const auto post_op = IS_IMAGE_UPLOAD ? VideoCommon::ObtainBufferOperation::DoNothing
784 const auto [buffer, offset] = buffer_cache.ObtainBuffer( 785 : VideoCommon::ObtainBufferOperation::MarkAsWritten;
785 dst.address, buffer_size, VideoCommon::ObtainBufferSynchronize::FullSynchronize, 786 const auto [buffer, offset] =
786 VideoCommon::ObtainBufferOperation::MarkAsWritten); 787 buffer_cache.ObtainBuffer(buffer_operand.address, buffer_size, sync_info, post_op);
787 788
788 const bool is_rescaled = image->IsRescaled(); 789 const auto [image, copy] = texture_cache.DmaBufferImageCopy(
789 if (is_rescaled) { 790 copy_info, buffer_operand, image_operand, image_id, IS_IMAGE_UPLOAD);
790 image->ScaleDown(); 791 const std::span copy_span{&copy, 1};
791 } 792
792 VkImageSubresourceLayers subresources{ 793 if constexpr (IS_IMAGE_UPLOAD) {
793 .aspectMask = image->AspectMask(), 794 image->UploadMemory(buffer->Handle(), offset, copy_span);
794 .mipLevel = level, 795 } else {
795 .baseArrayLayer = base, 796 image->DownloadMemory(buffer->Handle(), offset, copy_span);
796 .layerCount = 1,
797 };
798 const u32 bpp = VideoCore::Surface::BytesPerBlock(image->info.format);
799 const auto convert = [old_bpp = src.bytes_per_pixel, bpp](u32 value) {
800 return (old_bpp * value) / bpp;
801 };
802 const u32 base_x = convert(src.params.origin.x.Value());
803 const u32 base_y = src.params.origin.y.Value();
804 const u32 length_x = convert(copy_info.length_x);
805 const u32 length_y = copy_info.length_y;
806 VkOffset3D image_offset{
807 .x = static_cast<s32>(base_x),
808 .y = static_cast<s32>(base_y),
809 .z = 0,
810 };
811 VkExtent3D image_extent{
812 .width = length_x,
813 .height = length_y,
814 .depth = 1,
815 };
816 auto buff_info(dst);
817 buff_info.pitch = convert(dst.pitch);
818 scheduler.RequestOutsideRenderPassOperationContext();
819 scheduler.Record([src_image = image->Handle(), dst_buffer = buffer->Handle(),
820 buffer_offset = offset, subresources, image_offset, image_extent,
821 buff_info](vk::CommandBuffer cmdbuf) {
822 const std::array buffer_copy_info{
823 VkBufferImageCopy{
824 .bufferOffset = buffer_offset,
825 .bufferRowLength = buff_info.pitch,
826 .bufferImageHeight = buff_info.height,
827 .imageSubresource = subresources,
828 .imageOffset = image_offset,
829 .imageExtent = image_extent,
830 },
831 };
832 const VkImageSubresourceRange range{
833 .aspectMask = subresources.aspectMask,
834 .baseMipLevel = subresources.mipLevel,
835 .levelCount = 1,
836 .baseArrayLayer = subresources.baseArrayLayer,
837 .layerCount = 1,
838 };
839 static constexpr VkMemoryBarrier WRITE_BARRIER{
840 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
841 .pNext = nullptr,
842 .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
843 .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT,
844 };
845 const std::array pre_barriers{
846 VkImageMemoryBarrier{
847 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
848 .pNext = nullptr,
849 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
850 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
851 VK_ACCESS_TRANSFER_WRITE_BIT,
852 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
853 .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
854 .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
855 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
856 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
857 .image = src_image,
858 .subresourceRange = range,
859 },
860 };
861 const std::array post_barriers{
862 VkImageMemoryBarrier{
863 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
864 .pNext = nullptr,
865 .srcAccessMask = 0,
866 .dstAccessMask = 0,
867 .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
868 .newLayout = VK_IMAGE_LAYOUT_GENERAL,
869 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
870 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
871 .image = src_image,
872 .subresourceRange = range,
873 },
874 };
875 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
876 0, {}, {}, pre_barriers);
877 cmdbuf.CopyImageToBuffer(src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_buffer,
878 buffer_copy_info);
879 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
880 0, WRITE_BARRIER, nullptr, post_barriers);
881 });
882 if (is_rescaled) {
883 image->ScaleUp(true);
884 } 797 }
885 return true; 798 return true;
886} 799}
887 800
801bool AccelerateDMA::ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info,
802 const Tegra::DMA::ImageOperand& image_operand,
803 const Tegra::DMA::BufferOperand& buffer_operand) {
804 return DmaBufferImageCopy<false>(copy_info, buffer_operand, image_operand);
805}
806
888bool AccelerateDMA::BufferToImage(const Tegra::DMA::ImageCopy& copy_info, 807bool AccelerateDMA::BufferToImage(const Tegra::DMA::ImageCopy& copy_info,
889 const Tegra::DMA::BufferOperand& src, 808 const Tegra::DMA::BufferOperand& buffer_operand,
890 const Tegra::DMA::ImageOperand& dst) { 809 const Tegra::DMA::ImageOperand& image_operand) {
891 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 810 return DmaBufferImageCopy<true>(copy_info, buffer_operand, image_operand);
892 auto query_image = texture_cache.ObtainImage(dst, true);
893 if (!query_image) {
894 return false;
895 }
896 auto* image = query_image->first;
897 auto [level, base] = query_image->second;
898 const u32 buffer_size = static_cast<u32>(src.pitch * src.height);
899 const auto [buffer, offset] = buffer_cache.ObtainBuffer(
900 src.address, buffer_size, VideoCommon::ObtainBufferSynchronize::FullSynchronize,
901 VideoCommon::ObtainBufferOperation::DoNothing);
902 const bool is_rescaled = image->IsRescaled();
903 if (is_rescaled) {
904 image->ScaleDown(true);
905 }
906 VkImageSubresourceLayers subresources{
907 .aspectMask = image->AspectMask(),
908 .mipLevel = level,
909 .baseArrayLayer = base,
910 .layerCount = 1,
911 };
912 const u32 bpp = VideoCore::Surface::BytesPerBlock(image->info.format);
913 const auto convert = [old_bpp = dst.bytes_per_pixel, bpp](u32 value) {
914 return (old_bpp * value) / bpp;
915 };
916 const u32 base_x = convert(dst.params.origin.x.Value());
917 const u32 base_y = dst.params.origin.y.Value();
918 const u32 length_x = convert(copy_info.length_x);
919 const u32 length_y = copy_info.length_y;
920 VkOffset3D image_offset{
921 .x = static_cast<s32>(base_x),
922 .y = static_cast<s32>(base_y),
923 .z = 0,
924 };
925 VkExtent3D image_extent{
926 .width = length_x,
927 .height = length_y,
928 .depth = 1,
929 };
930 auto buff_info(src);
931 buff_info.pitch = convert(src.pitch);
932 scheduler.RequestOutsideRenderPassOperationContext();
933 scheduler.Record([dst_image = image->Handle(), src_buffer = buffer->Handle(),
934 buffer_offset = offset, subresources, image_offset, image_extent,
935 buff_info](vk::CommandBuffer cmdbuf) {
936 const std::array buffer_copy_info{
937 VkBufferImageCopy{
938 .bufferOffset = buffer_offset,
939 .bufferRowLength = buff_info.pitch,
940 .bufferImageHeight = buff_info.height,
941 .imageSubresource = subresources,
942 .imageOffset = image_offset,
943 .imageExtent = image_extent,
944 },
945 };
946 const VkImageSubresourceRange range{
947 .aspectMask = subresources.aspectMask,
948 .baseMipLevel = subresources.mipLevel,
949 .levelCount = 1,
950 .baseArrayLayer = subresources.baseArrayLayer,
951 .layerCount = 1,
952 };
953 static constexpr VkMemoryBarrier READ_BARRIER{
954 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
955 .pNext = nullptr,
956 .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT,
957 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT,
958 };
959 const std::array pre_barriers{
960 VkImageMemoryBarrier{
961 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
962 .pNext = nullptr,
963 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
964 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
965 VK_ACCESS_TRANSFER_WRITE_BIT,
966 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
967 .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
968 .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
969 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
970 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
971 .image = dst_image,
972 .subresourceRange = range,
973 },
974 };
975 const std::array post_barriers{
976 VkImageMemoryBarrier{
977 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
978 .pNext = nullptr,
979 .srcAccessMask = 0,
980 .dstAccessMask = 0,
981 .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
982 .newLayout = VK_IMAGE_LAYOUT_GENERAL,
983 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
984 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
985 .image = dst_image,
986 .subresourceRange = range,
987 },
988 };
989 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
990 0, READ_BARRIER, {}, pre_barriers);
991 cmdbuf.CopyBufferToImage(src_buffer, dst_image, VK_IMAGE_LAYOUT_GENERAL, buffer_copy_info);
992 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
993 0, nullptr, nullptr, post_barriers);
994 });
995 if (is_rescaled) {
996 image->ScaleUp();
997 }
998 return true;
999} 811}
1000 812
1001void RasterizerVulkan::UpdateDynamicStates() { 813void RasterizerVulkan::UpdateDynamicStates() {
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 7746c5434..1659fbc13 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -59,6 +59,11 @@ public:
59 const Tegra::DMA::ImageOperand& dst) override; 59 const Tegra::DMA::ImageOperand& dst) override;
60 60
61private: 61private:
62 template <bool IS_IMAGE_UPLOAD>
63 bool DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
64 const Tegra::DMA::BufferOperand& src,
65 const Tegra::DMA::ImageOperand& dst);
66
62 BufferCache& buffer_cache; 67 BufferCache& buffer_cache;
63 TextureCache& texture_cache; 68 TextureCache& texture_cache;
64 Scheduler& scheduler; 69 Scheduler& scheduler;
diff --git a/src/video_core/renderer_vulkan/vk_resource_pool.cpp b/src/video_core/renderer_vulkan/vk_resource_pool.cpp
index 6c8ac22f4..6572f82ba 100644
--- a/src/video_core/renderer_vulkan/vk_resource_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_resource_pool.cpp
@@ -37,7 +37,7 @@ size_t ResourcePool::CommitResource() {
37 found = free_resource; 37 found = free_resource;
38 } 38 }
39 } 39 }
40 // Free iterator is hinted to the resource after the one that's been commited. 40 // Free iterator is hinted to the resource after the one that's been committed.
41 hint_iterator = (*found + 1) % ticks.size(); 41 hint_iterator = (*found + 1) % ticks.size();
42 return *found; 42 return *found;
43} 43}
@@ -46,7 +46,7 @@ size_t ResourcePool::ManageOverflow() {
46 const size_t old_capacity = ticks.size(); 46 const size_t old_capacity = ticks.size();
47 Grow(); 47 Grow();
48 48
49 // The last entry is guaranted to be free, since it's the first element of the freshly 49 // The last entry is guaranteed to be free, since it's the first element of the freshly
50 // allocated resources. 50 // allocated resources.
51 return old_capacity; 51 return old_capacity;
52} 52}
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp
index b6810eef9..85fdce6e5 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.cpp
+++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp
@@ -159,7 +159,7 @@ void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, bo
159 present_mode = ChooseSwapPresentMode(present_modes); 159 present_mode = ChooseSwapPresentMode(present_modes);
160 160
161 u32 requested_image_count{capabilities.minImageCount + 1}; 161 u32 requested_image_count{capabilities.minImageCount + 1};
162 // Ensure Tripple buffering if possible. 162 // Ensure Triple buffering if possible.
163 if (capabilities.maxImageCount > 0) { 163 if (capabilities.maxImageCount > 0) {
164 if (requested_image_count > capabilities.maxImageCount) { 164 if (requested_image_count > capabilities.maxImageCount) {
165 requested_image_count = capabilities.maxImageCount; 165 requested_image_count = capabilities.maxImageCount;
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index e013d1c60..ae15f6976 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -1315,15 +1315,16 @@ Image::Image(const VideoCommon::NullImageParams& params) : VideoCommon::ImageBas
1315 1315
1316Image::~Image() = default; 1316Image::~Image() = default;
1317 1317
1318void Image::UploadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) { 1318void Image::UploadMemory(VkBuffer buffer, VkDeviceSize offset,
1319 std::span<const VideoCommon::BufferImageCopy> copies) {
1319 // TODO: Move this to another API 1320 // TODO: Move this to another API
1320 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled); 1321 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled);
1321 if (is_rescaled) { 1322 if (is_rescaled) {
1322 ScaleDown(true); 1323 ScaleDown(true);
1323 } 1324 }
1324 scheduler->RequestOutsideRenderPassOperationContext(); 1325 scheduler->RequestOutsideRenderPassOperationContext();
1325 std::vector vk_copies = TransformBufferImageCopies(copies, map.offset, aspect_mask); 1326 std::vector vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask);
1326 const VkBuffer src_buffer = map.buffer; 1327 const VkBuffer src_buffer = buffer;
1327 const VkImage vk_image = *original_image; 1328 const VkImage vk_image = *original_image;
1328 const VkImageAspectFlags vk_aspect_mask = aspect_mask; 1329 const VkImageAspectFlags vk_aspect_mask = aspect_mask;
1329 const bool is_initialized = std::exchange(initialized, true); 1330 const bool is_initialized = std::exchange(initialized, true);
@@ -1336,14 +1337,19 @@ void Image::UploadMemory(const StagingBufferRef& map, std::span<const BufferImag
1336 } 1337 }
1337} 1338}
1338 1339
1339void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) { 1340void Image::UploadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) {
1341 UploadMemory(map.buffer, map.offset, copies);
1342}
1343
1344void Image::DownloadMemory(VkBuffer buffer, VkDeviceSize offset,
1345 std::span<const VideoCommon::BufferImageCopy> copies) {
1340 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled); 1346 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled);
1341 if (is_rescaled) { 1347 if (is_rescaled) {
1342 ScaleDown(); 1348 ScaleDown();
1343 } 1349 }
1344 std::vector vk_copies = TransformBufferImageCopies(copies, map.offset, aspect_mask); 1350 std::vector vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask);
1345 scheduler->RequestOutsideRenderPassOperationContext(); 1351 scheduler->RequestOutsideRenderPassOperationContext();
1346 scheduler->Record([buffer = map.buffer, image = *original_image, aspect_mask = aspect_mask, 1352 scheduler->Record([buffer, image = *original_image, aspect_mask = aspect_mask,
1347 vk_copies](vk::CommandBuffer cmdbuf) { 1353 vk_copies](vk::CommandBuffer cmdbuf) {
1348 const VkImageMemoryBarrier read_barrier{ 1354 const VkImageMemoryBarrier read_barrier{
1349 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, 1355 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
@@ -1398,6 +1404,10 @@ void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferIm
1398 } 1404 }
1399} 1405}
1400 1406
1407void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) {
1408 DownloadMemory(map.buffer, map.offset, copies);
1409}
1410
1401bool Image::IsRescaled() const noexcept { 1411bool Image::IsRescaled() const noexcept {
1402 return True(flags & ImageFlagBits::Rescaled); 1412 return True(flags & ImageFlagBits::Rescaled);
1403} 1413}
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 0ce39616f..d5ee23f8d 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -132,9 +132,15 @@ public:
132 Image(Image&&) = default; 132 Image(Image&&) = default;
133 Image& operator=(Image&&) = default; 133 Image& operator=(Image&&) = default;
134 134
135 void UploadMemory(VkBuffer buffer, VkDeviceSize offset,
136 std::span<const VideoCommon::BufferImageCopy> copies);
137
135 void UploadMemory(const StagingBufferRef& map, 138 void UploadMemory(const StagingBufferRef& map,
136 std::span<const VideoCommon::BufferImageCopy> copies); 139 std::span<const VideoCommon::BufferImageCopy> copies);
137 140
141 void DownloadMemory(VkBuffer buffer, VkDeviceSize offset,
142 std::span<const VideoCommon::BufferImageCopy> copies);
143
138 void DownloadMemory(const StagingBufferRef& map, 144 void DownloadMemory(const StagingBufferRef& map,
139 std::span<const VideoCommon::BufferImageCopy> copies); 145 std::span<const VideoCommon::BufferImageCopy> copies);
140 146
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
index 4d4a6753b..009dab0b6 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
@@ -25,7 +25,7 @@ void UpdateDescriptorQueue::TickFrame() {
25 25
26void UpdateDescriptorQueue::Acquire() { 26void UpdateDescriptorQueue::Acquire() {
27 // Minimum number of entries required. 27 // Minimum number of entries required.
28 // This is the maximum number of entries a single draw call migth use. 28 // This is the maximum number of entries a single draw call might use.
29 static constexpr size_t MIN_ENTRIES = 0x400; 29 static constexpr size_t MIN_ENTRIES = 0x400;
30 30
31 if (std::distance(payload.data(), payload_cursor) + MIN_ENTRIES >= payload.max_size()) { 31 if (std::distance(payload.data(), payload_cursor) + MIN_ENTRIES >= payload.max_size()) {
diff --git a/src/video_core/texture_cache/image_base.h b/src/video_core/texture_cache/image_base.h
index e8fa592d2..329396bb6 100644
--- a/src/video_core/texture_cache/image_base.h
+++ b/src/video_core/texture_cache/image_base.h
@@ -25,7 +25,7 @@ enum class ImageFlagBits : u32 {
25 Registered = 1 << 6, ///< True when the image is registered 25 Registered = 1 << 6, ///< True when the image is registered
26 Picked = 1 << 7, ///< Temporary flag to mark the image as picked 26 Picked = 1 << 7, ///< Temporary flag to mark the image as picked
27 Remapped = 1 << 8, ///< Image has been remapped. 27 Remapped = 1 << 8, ///< Image has been remapped.
28 Sparse = 1 << 9, ///< Image has non continous submemory. 28 Sparse = 1 << 9, ///< Image has non continuous submemory.
29 29
30 // Garbage Collection Flags 30 // Garbage Collection Flags
31 BadOverlap = 1 << 10, ///< This image overlaps other but doesn't fit, has higher 31 BadOverlap = 1 << 10, ///< This image overlaps other but doesn't fit, has higher
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 335338434..8e8b9a5e6 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -745,6 +745,25 @@ void TextureCache<P>::PopAsyncFlushes() {
745} 745}
746 746
747template <class P> 747template <class P>
748ImageId TextureCache<P>::DmaImageId(const Tegra::DMA::ImageOperand& operand) {
749 const ImageInfo dst_info(operand);
750 const ImageId dst_id = FindDMAImage(dst_info, operand.address);
751 if (!dst_id) {
752 return NULL_IMAGE_ID;
753 }
754 const auto& image = slot_images[dst_id];
755 if (False(image.flags & ImageFlagBits::GpuModified)) {
756 // No need to waste time on an image that's synced with guest
757 return NULL_IMAGE_ID;
758 }
759 const auto base = image.TryFindBase(operand.address);
760 if (!base) {
761 return NULL_IMAGE_ID;
762 }
763 return dst_id;
764}
765
766template <class P>
748bool TextureCache<P>::IsRescaling() const noexcept { 767bool TextureCache<P>::IsRescaling() const noexcept {
749 return is_rescaling; 768 return is_rescaling;
750} 769}
@@ -772,6 +791,49 @@ bool TextureCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
772} 791}
773 792
774template <class P> 793template <class P>
794std::pair<typename TextureCache<P>::Image*, BufferImageCopy> TextureCache<P>::DmaBufferImageCopy(
795 const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& buffer_operand,
796 const Tegra::DMA::ImageOperand& image_operand, ImageId image_id, bool modifies_image) {
797 const auto [level, base] = PrepareDmaImage(image_id, image_operand.address, modifies_image);
798 auto* image = &slot_images[image_id];
799 const u32 buffer_size = static_cast<u32>(buffer_operand.pitch * buffer_operand.height);
800 const u32 bpp = VideoCore::Surface::BytesPerBlock(image->info.format);
801 const auto convert = [old_bpp = image_operand.bytes_per_pixel, bpp](u32 value) {
802 return (old_bpp * value) / bpp;
803 };
804 const u32 base_x = convert(image_operand.params.origin.x.Value());
805 const u32 base_y = image_operand.params.origin.y.Value();
806 const u32 length_x = convert(copy_info.length_x);
807 const u32 length_y = copy_info.length_y;
808
809 const BufferImageCopy copy{
810 .buffer_offset = 0,
811 .buffer_size = buffer_size,
812 .buffer_row_length = convert(buffer_operand.pitch),
813 .buffer_image_height = buffer_operand.height,
814 .image_subresource =
815 {
816 .base_level = static_cast<s32>(level),
817 .base_layer = static_cast<s32>(base),
818 .num_layers = 1,
819 },
820 .image_offset =
821 {
822 .x = static_cast<s32>(base_x),
823 .y = static_cast<s32>(base_y),
824 .z = 0,
825 },
826 .image_extent =
827 {
828 .width = length_x,
829 .height = length_y,
830 .depth = 1,
831 },
832 };
833 return {image, copy};
834}
835
836template <class P>
775void TextureCache<P>::RefreshContents(Image& image, ImageId image_id) { 837void TextureCache<P>::RefreshContents(Image& image, ImageId image_id) {
776 if (False(image.flags & ImageFlagBits::CpuModified)) { 838 if (False(image.flags & ImageFlagBits::CpuModified)) {
777 // Only upload modified images 839 // Only upload modified images
@@ -1405,26 +1467,14 @@ ImageId TextureCache<P>::FindDMAImage(const ImageInfo& info, GPUVAddr gpu_addr)
1405} 1467}
1406 1468
1407template <class P> 1469template <class P>
1408std::optional<std::pair<typename TextureCache<P>::Image*, std::pair<u32, u32>>> 1470std::pair<u32, u32> TextureCache<P>::PrepareDmaImage(ImageId dst_id, GPUVAddr base_addr,
1409TextureCache<P>::ObtainImage(const Tegra::DMA::ImageOperand& operand, bool mark_as_modified) { 1471 bool mark_as_modified) {
1410 ImageInfo dst_info(operand); 1472 const auto& image = slot_images[dst_id];
1411 ImageId dst_id = FindDMAImage(dst_info, operand.address); 1473 const auto base = image.TryFindBase(base_addr);
1412 if (!dst_id) {
1413 return std::nullopt;
1414 }
1415 auto& image = slot_images[dst_id];
1416 auto base = image.TryFindBase(operand.address);
1417 if (!base) {
1418 return std::nullopt;
1419 }
1420 if (False(image.flags & ImageFlagBits::GpuModified)) {
1421 // No need to waste time on an image that's synced with guest
1422 return std::nullopt;
1423 }
1424 PrepareImage(dst_id, mark_as_modified, false); 1474 PrepareImage(dst_id, mark_as_modified, false);
1425 auto& new_image = slot_images[dst_id]; 1475 const auto& new_image = slot_images[dst_id];
1426 lru_cache.Touch(new_image.lru_index, frame_tick); 1476 lru_cache.Touch(new_image.lru_index, frame_tick);
1427 return std::make_pair(&new_image, std::make_pair(base->level, base->layer)); 1477 return std::make_pair(base->level, base->layer);
1428} 1478}
1429 1479
1430template <class P> 1480template <class P>
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index 848a5d9ea..5a5b4179c 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -209,8 +209,11 @@ public:
209 /// Pop asynchronous downloads 209 /// Pop asynchronous downloads
210 void PopAsyncFlushes(); 210 void PopAsyncFlushes();
211 211
212 [[nodiscard]] std::optional<std::pair<Image*, std::pair<u32, u32>>> ObtainImage( 212 [[nodiscard]] ImageId DmaImageId(const Tegra::DMA::ImageOperand& operand);
213 const Tegra::DMA::ImageOperand& operand, bool mark_as_modified); 213
214 [[nodiscard]] std::pair<Image*, BufferImageCopy> DmaBufferImageCopy(
215 const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& buffer_operand,
216 const Tegra::DMA::ImageOperand& image_operand, ImageId image_id, bool modifies_image);
214 217
215 /// Return true when a CPU region is modified from the GPU 218 /// Return true when a CPU region is modified from the GPU
216 [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size); 219 [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size);
@@ -386,6 +389,9 @@ private:
386 /// Returns true if the current clear parameters clear the whole image of a given image view 389 /// Returns true if the current clear parameters clear the whole image of a given image view
387 [[nodiscard]] bool IsFullClear(ImageViewId id); 390 [[nodiscard]] bool IsFullClear(ImageViewId id);
388 391
392 [[nodiscard]] std::pair<u32, u32> PrepareDmaImage(ImageId dst_id, GPUVAddr base_addr,
393 bool mark_as_modified);
394
389 bool ImageCanRescale(ImageBase& image); 395 bool ImageCanRescale(ImageBase& image);
390 void InvalidateScale(Image& image); 396 void InvalidateScale(Image& image);
391 bool ScaleUp(Image& image); 397 bool ScaleUp(Image& image);
diff --git a/src/video_core/textures/astc.cpp b/src/video_core/textures/astc.cpp
index 4381eed1d..a68bc0d77 100644
--- a/src/video_core/textures/astc.cpp
+++ b/src/video_core/textures/astc.cpp
@@ -1571,7 +1571,7 @@ static void DecompressBlock(std::span<const u8, 16> inBuf, const u32 blockWidth,
1571 assert(strm.GetBitsRead() + weightParams.GetPackedBitSize() == 128); 1571 assert(strm.GetBitsRead() + weightParams.GetPackedBitSize() == 128);
1572 1572
1573 // Decode both color data and texel weight data 1573 // Decode both color data and texel weight data
1574 u32 colorValues[32]; // Four values, two endpoints, four maximum paritions 1574 u32 colorValues[32]; // Four values, two endpoints, four maximum partitions
1575 DecodeColorValues(colorValues, colorEndpointData, colorEndpointMode, nPartitions, 1575 DecodeColorValues(colorValues, colorEndpointData, colorEndpointMode, nPartitions,
1576 colorDataBits); 1576 colorDataBits);
1577 1577
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index 48f1a3d14..df348af55 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -417,7 +417,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
417 417
418 sets_per_pool = 64; 418 sets_per_pool = 64;
419 if (is_amd_driver) { 419 if (is_amd_driver) {
420 // AMD drivers need a higher amount of Sets per Pool in certain circunstances like in XC2. 420 // AMD drivers need a higher amount of Sets per Pool in certain circumstances like in XC2.
421 sets_per_pool = 96; 421 sets_per_pool = 96;
422 // Disable VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT on AMD GCN4 and lower as it is broken. 422 // Disable VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT on AMD GCN4 and lower as it is broken.
423 if (!features.shader_float16_int8.shaderFloat16) { 423 if (!features.shader_float16_int8.shaderFloat16) {
diff --git a/src/video_core/vulkan_common/vulkan_device.h b/src/video_core/vulkan_common/vulkan_device.h
index 0662a2d9f..41b5da18a 100644
--- a/src/video_core/vulkan_common/vulkan_device.h
+++ b/src/video_core/vulkan_common/vulkan_device.h
@@ -180,7 +180,7 @@ public:
180 ~Device(); 180 ~Device();
181 181
182 /** 182 /**
183 * Returns a format supported by the device for the passed requeriments. 183 * Returns a format supported by the device for the passed requirements.
184 * @param wanted_format The ideal format to be returned. It may not be the returned format. 184 * @param wanted_format The ideal format to be returned. It may not be the returned format.
185 * @param wanted_usage The usage that must be fulfilled even if the format is not supported. 185 * @param wanted_usage The usage that must be fulfilled even if the format is not supported.
186 * @param format_type Format type usage. 186 * @param format_type Format type usage.
@@ -259,12 +259,12 @@ public:
259 259
260 bool ShouldBoostClocks() const; 260 bool ShouldBoostClocks() const;
261 261
262 /// Returns uniform buffer alignment requeriment. 262 /// Returns uniform buffer alignment requirement.
263 VkDeviceSize GetUniformBufferAlignment() const { 263 VkDeviceSize GetUniformBufferAlignment() const {
264 return properties.properties.limits.minUniformBufferOffsetAlignment; 264 return properties.properties.limits.minUniformBufferOffsetAlignment;
265 } 265 }
266 266
267 /// Returns storage alignment requeriment. 267 /// Returns storage alignment requirement.
268 VkDeviceSize GetStorageBufferAlignment() const { 268 VkDeviceSize GetStorageBufferAlignment() const {
269 return properties.properties.limits.minStorageBufferOffsetAlignment; 269 return properties.properties.limits.minStorageBufferOffsetAlignment;
270 } 270 }
@@ -656,7 +656,7 @@ private:
656 bool is_integrated{}; ///< Is GPU an iGPU. 656 bool is_integrated{}; ///< Is GPU an iGPU.
657 bool is_virtual{}; ///< Is GPU a virtual GPU. 657 bool is_virtual{}; ///< Is GPU a virtual GPU.
658 bool is_non_gpu{}; ///< Is SoftwareRasterizer, FPGA, non-GPU device. 658 bool is_non_gpu{}; ///< Is SoftwareRasterizer, FPGA, non-GPU device.
659 bool has_broken_cube_compatibility{}; ///< Has broken cube compatiblity bit 659 bool has_broken_cube_compatibility{}; ///< Has broken cube compatibility bit
660 bool has_renderdoc{}; ///< Has RenderDoc attached 660 bool has_renderdoc{}; ///< Has RenderDoc attached
661 bool has_nsight_graphics{}; ///< Has Nsight Graphics attached 661 bool has_nsight_graphics{}; ///< Has Nsight Graphics attached
662 bool supports_d24_depth{}; ///< Supports D24 depth buffers. 662 bool supports_d24_depth{}; ///< Supports D24 depth buffers.
diff --git a/src/video_core/vulkan_common/vulkan_wrapper.h b/src/video_core/vulkan_common/vulkan_wrapper.h
index e86f661cb..4ff328a21 100644
--- a/src/video_core/vulkan_common/vulkan_wrapper.h
+++ b/src/video_core/vulkan_common/vulkan_wrapper.h
@@ -68,7 +68,7 @@ public:
68 constexpr Span(const Range& range) : ptr{std::data(range)}, num{std::size(range)} {} 68 constexpr Span(const Range& range) : ptr{std::data(range)}, num{std::size(range)} {}
69 69
70 /// Construct a span from a pointer and a size. 70 /// Construct a span from a pointer and a size.
71 /// This is inteded for subranges. 71 /// This is intended for subranges.
72 constexpr Span(const T* ptr_, std::size_t num_) noexcept : ptr{ptr_}, num{num_} {} 72 constexpr Span(const T* ptr_, std::size_t num_) noexcept : ptr{ptr_}, num{num_} {}
73 73
74 /// Returns the data pointer by the span. 74 /// Returns the data pointer by the span.
@@ -390,11 +390,11 @@ public:
390 Handle(const Handle&) = delete; 390 Handle(const Handle&) = delete;
391 Handle& operator=(const Handle&) = delete; 391 Handle& operator=(const Handle&) = delete;
392 392
393 /// Construct a handle transfering the ownership from another handle. 393 /// Construct a handle transferring the ownership from another handle.
394 Handle(Handle&& rhs) noexcept 394 Handle(Handle&& rhs) noexcept
395 : handle{std::exchange(rhs.handle, nullptr)}, owner{rhs.owner}, dld{rhs.dld} {} 395 : handle{std::exchange(rhs.handle, nullptr)}, owner{rhs.owner}, dld{rhs.dld} {}
396 396
397 /// Assign the current handle transfering the ownership from another handle. 397 /// Assign the current handle transferring the ownership from another handle.
398 /// Destroys any previously held object. 398 /// Destroys any previously held object.
399 Handle& operator=(Handle&& rhs) noexcept { 399 Handle& operator=(Handle&& rhs) noexcept {
400 Release(); 400 Release();
@@ -463,10 +463,10 @@ public:
463 Handle(const Handle&) = delete; 463 Handle(const Handle&) = delete;
464 Handle& operator=(const Handle&) = delete; 464 Handle& operator=(const Handle&) = delete;
465 465
466 /// Construct a handle transfering ownership from another handle. 466 /// Construct a handle transferring ownership from another handle.
467 Handle(Handle&& rhs) noexcept : handle{std::exchange(rhs.handle, nullptr)}, dld{rhs.dld} {} 467 Handle(Handle&& rhs) noexcept : handle{std::exchange(rhs.handle, nullptr)}, dld{rhs.dld} {}
468 468
469 /// Assign the current handle transfering the ownership from another handle. 469 /// Assign the current handle transferring the ownership from another handle.
470 /// Destroys any previously held object. 470 /// Destroys any previously held object.
471 Handle& operator=(Handle&& rhs) noexcept { 471 Handle& operator=(Handle&& rhs) noexcept {
472 Release(); 472 Release();
@@ -533,12 +533,12 @@ public:
533 PoolAllocations(const PoolAllocations&) = delete; 533 PoolAllocations(const PoolAllocations&) = delete;
534 PoolAllocations& operator=(const PoolAllocations&) = delete; 534 PoolAllocations& operator=(const PoolAllocations&) = delete;
535 535
536 /// Construct an allocation transfering ownership from another allocation. 536 /// Construct an allocation transferring ownership from another allocation.
537 PoolAllocations(PoolAllocations&& rhs) noexcept 537 PoolAllocations(PoolAllocations&& rhs) noexcept
538 : allocations{std::move(rhs.allocations)}, num{rhs.num}, device{rhs.device}, pool{rhs.pool}, 538 : allocations{std::move(rhs.allocations)}, num{rhs.num}, device{rhs.device}, pool{rhs.pool},
539 dld{rhs.dld} {} 539 dld{rhs.dld} {}
540 540
541 /// Assign an allocation transfering ownership from another allocation. 541 /// Assign an allocation transferring ownership from another allocation.
542 PoolAllocations& operator=(PoolAllocations&& rhs) noexcept { 542 PoolAllocations& operator=(PoolAllocations&& rhs) noexcept {
543 allocations = std::move(rhs.allocations); 543 allocations = std::move(rhs.allocations);
544 num = rhs.num; 544 num = rhs.num;
diff --git a/src/yuzu/applets/qt_web_browser.h b/src/yuzu/applets/qt_web_browser.h
index e8fe511ed..ceae7926e 100644
--- a/src/yuzu/applets/qt_web_browser.h
+++ b/src/yuzu/applets/qt_web_browser.h
@@ -110,7 +110,7 @@ private:
110 /** 110 /**
111 * Handles button presses to execute functions assigned in yuzu_key_callbacks. 111 * Handles button presses to execute functions assigned in yuzu_key_callbacks.
112 * yuzu_key_callbacks contains specialized functions for the buttons in the window footer 112 * yuzu_key_callbacks contains specialized functions for the buttons in the window footer
113 * that can be overriden by games to achieve desired functionality. 113 * that can be overridden by games to achieve desired functionality.
114 * 114 *
115 * @tparam HIDButton The list of buttons contained in yuzu_key_callbacks 115 * @tparam HIDButton The list of buttons contained in yuzu_key_callbacks
116 */ 116 */
diff --git a/src/yuzu/compatdb.cpp b/src/yuzu/compatdb.cpp
index 05f49c0d2..a57a96a38 100644
--- a/src/yuzu/compatdb.cpp
+++ b/src/yuzu/compatdb.cpp
@@ -76,7 +76,7 @@ void CompatDB::Submit() {
76 compatibility_Graphical->addButton(ui->radioButton_Audio_Minor, 1); 76 compatibility_Graphical->addButton(ui->radioButton_Audio_Minor, 1);
77 compatibility_Audio->addButton(ui->radioButton_Audio_No, 2); 77 compatibility_Audio->addButton(ui->radioButton_Audio_No, 2);
78 78
79 const int compatiblity = static_cast<int>(CalculateCompatibility()); 79 const int compatibility = static_cast<int>(CalculateCompatibility());
80 80
81 switch ((static_cast<CompatDBPage>(currentId()))) { 81 switch ((static_cast<CompatDBPage>(currentId()))) {
82 case CompatDBPage::Intro: 82 case CompatDBPage::Intro:
@@ -113,9 +113,9 @@ void CompatDB::Submit() {
113 break; 113 break;
114 case CompatDBPage::Final: 114 case CompatDBPage::Final:
115 back(); 115 back();
116 LOG_INFO(Frontend, "Compatibility Rating: {}", compatiblity); 116 LOG_INFO(Frontend, "Compatibility Rating: {}", compatibility);
117 telemetry_session.AddField(Common::Telemetry::FieldType::UserFeedback, "Compatibility", 117 telemetry_session.AddField(Common::Telemetry::FieldType::UserFeedback, "Compatibility",
118 compatiblity); 118 compatibility);
119 119
120 button(NextButton)->setEnabled(false); 120 button(NextButton)->setEnabled(false);
121 button(NextButton)->setText(tr("Submitting")); 121 button(NextButton)->setText(tr("Submitting"));
diff --git a/src/yuzu/configuration/configure_audio.cpp b/src/yuzu/configuration/configure_audio.cpp
index 70cc6f84b..fcd6d61a0 100644
--- a/src/yuzu/configuration/configure_audio.cpp
+++ b/src/yuzu/configuration/configure_audio.cpp
@@ -10,6 +10,7 @@
10#include "ui_configure_audio.h" 10#include "ui_configure_audio.h"
11#include "yuzu/configuration/configuration_shared.h" 11#include "yuzu/configuration/configuration_shared.h"
12#include "yuzu/configuration/configure_audio.h" 12#include "yuzu/configuration/configure_audio.h"
13#include "yuzu/uisettings.h"
13 14
14ConfigureAudio::ConfigureAudio(const Core::System& system_, QWidget* parent) 15ConfigureAudio::ConfigureAudio(const Core::System& system_, QWidget* parent)
15 : QWidget(parent), ui(std::make_unique<Ui::ConfigureAudio>()), system{system_} { 16 : QWidget(parent), ui(std::make_unique<Ui::ConfigureAudio>()), system{system_} {
@@ -47,6 +48,7 @@ void ConfigureAudio::SetConfiguration() {
47 48
48 const auto volume_value = static_cast<int>(Settings::values.volume.GetValue()); 49 const auto volume_value = static_cast<int>(Settings::values.volume.GetValue());
49 ui->volume_slider->setValue(volume_value); 50 ui->volume_slider->setValue(volume_value);
51 ui->toggle_background_mute->setChecked(UISettings::values.mute_when_in_background.GetValue());
50 52
51 if (!Settings::IsConfiguringGlobal()) { 53 if (!Settings::IsConfiguringGlobal()) {
52 if (Settings::values.volume.UsingGlobal()) { 54 if (Settings::values.volume.UsingGlobal()) {
@@ -56,8 +58,13 @@ void ConfigureAudio::SetConfiguration() {
56 ui->volume_combo_box->setCurrentIndex(1); 58 ui->volume_combo_box->setCurrentIndex(1);
57 ui->volume_slider->setEnabled(true); 59 ui->volume_slider->setEnabled(true);
58 } 60 }
61 ConfigurationShared::SetPerGameSetting(ui->combo_sound, &Settings::values.sound_index);
62 ConfigurationShared::SetHighlight(ui->mode_label,
63 !Settings::values.sound_index.UsingGlobal());
59 ConfigurationShared::SetHighlight(ui->volume_layout, 64 ConfigurationShared::SetHighlight(ui->volume_layout,
60 !Settings::values.volume.UsingGlobal()); 65 !Settings::values.volume.UsingGlobal());
66 } else {
67 ui->combo_sound->setCurrentIndex(Settings::values.sound_index.GetValue());
61 } 68 }
62 SetVolumeIndicatorText(ui->volume_slider->sliderPosition()); 69 SetVolumeIndicatorText(ui->volume_slider->sliderPosition());
63} 70}
@@ -109,6 +116,8 @@ void ConfigureAudio::SetVolumeIndicatorText(int percentage) {
109} 116}
110 117
111void ConfigureAudio::ApplyConfiguration() { 118void ConfigureAudio::ApplyConfiguration() {
119 ConfigurationShared::ApplyPerGameSetting(&Settings::values.sound_index, ui->combo_sound);
120
112 if (Settings::IsConfiguringGlobal()) { 121 if (Settings::IsConfiguringGlobal()) {
113 Settings::values.sink_id = 122 Settings::values.sink_id =
114 ui->sink_combo_box->itemText(ui->sink_combo_box->currentIndex()).toStdString(); 123 ui->sink_combo_box->itemText(ui->sink_combo_box->currentIndex()).toStdString();
@@ -116,6 +125,7 @@ void ConfigureAudio::ApplyConfiguration() {
116 ui->output_combo_box->itemText(ui->output_combo_box->currentIndex()).toStdString()); 125 ui->output_combo_box->itemText(ui->output_combo_box->currentIndex()).toStdString());
117 Settings::values.audio_input_device_id.SetValue( 126 Settings::values.audio_input_device_id.SetValue(
118 ui->input_combo_box->itemText(ui->input_combo_box->currentIndex()).toStdString()); 127 ui->input_combo_box->itemText(ui->input_combo_box->currentIndex()).toStdString());
128 UISettings::values.mute_when_in_background = ui->toggle_background_mute->isChecked();
119 129
120 // Guard if during game and set to game-specific value 130 // Guard if during game and set to game-specific value
121 if (Settings::values.volume.UsingGlobal()) { 131 if (Settings::values.volume.UsingGlobal()) {
@@ -173,11 +183,14 @@ void ConfigureAudio::RetranslateUI() {
173 183
174void ConfigureAudio::SetupPerGameUI() { 184void ConfigureAudio::SetupPerGameUI() {
175 if (Settings::IsConfiguringGlobal()) { 185 if (Settings::IsConfiguringGlobal()) {
186 ui->combo_sound->setEnabled(Settings::values.sound_index.UsingGlobal());
176 ui->volume_slider->setEnabled(Settings::values.volume.UsingGlobal()); 187 ui->volume_slider->setEnabled(Settings::values.volume.UsingGlobal());
177
178 return; 188 return;
179 } 189 }
180 190
191 ConfigurationShared::SetColoredComboBox(ui->combo_sound, ui->mode_label,
192 Settings::values.sound_index.GetValue(true));
193
181 connect(ui->volume_combo_box, qOverload<int>(&QComboBox::activated), this, [this](int index) { 194 connect(ui->volume_combo_box, qOverload<int>(&QComboBox::activated), this, [this](int index) {
182 ui->volume_slider->setEnabled(index == 1); 195 ui->volume_slider->setEnabled(index == 1);
183 ConfigurationShared::SetHighlight(ui->volume_layout, index == 1); 196 ConfigurationShared::SetHighlight(ui->volume_layout, index == 1);
diff --git a/src/yuzu/configuration/configure_audio.ui b/src/yuzu/configuration/configure_audio.ui
index 6034d8581..4128c83ad 100644
--- a/src/yuzu/configuration/configure_audio.ui
+++ b/src/yuzu/configuration/configure_audio.ui
@@ -39,7 +39,7 @@
39 <item> 39 <item>
40 <widget class="QLabel" name="output_label"> 40 <widget class="QLabel" name="output_label">
41 <property name="text"> 41 <property name="text">
42 <string>Output Device</string> 42 <string>Output Device:</string>
43 </property> 43 </property>
44 </widget> 44 </widget>
45 </item> 45 </item>
@@ -53,7 +53,7 @@
53 <item> 53 <item>
54 <widget class="QLabel" name="input_label"> 54 <widget class="QLabel" name="input_label">
55 <property name="text"> 55 <property name="text">
56 <string>Input Device</string> 56 <string>Input Device:</string>
57 </property> 57 </property>
58 </widget> 58 </widget>
59 </item> 59 </item>
@@ -62,6 +62,36 @@
62 </item> 62 </item>
63 </layout> 63 </layout>
64 </item> 64 </item>
65 <item>
66 <layout class="QHBoxLayout" name="mode_layout">
67 <item>
68 <widget class="QLabel" name="mode_label">
69 <property name="text">
70 <string>Sound Output Mode:</string>
71 </property>
72 </widget>
73 </item>
74 <item>
75 <widget class="QComboBox" name="combo_sound">
76 <item>
77 <property name="text">
78 <string>Mono</string>
79 </property>
80 </item>
81 <item>
82 <property name="text">
83 <string>Stereo</string>
84 </property>
85 </item>
86 <item>
87 <property name="text">
88 <string>Surround</string>
89 </property>
90 </item>
91 </widget>
92 </item>
93 </layout>
94 </item>
65 <item> 95 <item>
66 <widget class="QWidget" name="volume_layout" native="true"> 96 <widget class="QWidget" name="volume_layout" native="true">
67 <layout class="QHBoxLayout" name="horizontalLayout_2"> 97 <layout class="QHBoxLayout" name="horizontalLayout_2">
@@ -149,6 +179,17 @@
149 </layout> 179 </layout>
150 </widget> 180 </widget>
151 </item> 181 </item>
182 <item>
183 <layout class="QHBoxLayout" name="mute_layout">
184 <item>
185 <widget class="QCheckBox" name="toggle_background_mute">
186 <property name="text">
187 <string>Mute audio when in background</string>
188 </property>
189 </widget>
190 </item>
191 </layout>
192 </item>
152 </layout> 193 </layout>
153 </widget> 194 </widget>
154 </item> 195 </item>
diff --git a/src/yuzu/configuration/configure_general.cpp b/src/yuzu/configuration/configure_general.cpp
index 7ade01ba6..207bcdc4d 100644
--- a/src/yuzu/configuration/configure_general.cpp
+++ b/src/yuzu/configuration/configure_general.cpp
@@ -42,7 +42,6 @@ void ConfigureGeneral::SetConfiguration() {
42 ui->toggle_check_exit->setChecked(UISettings::values.confirm_before_closing.GetValue()); 42 ui->toggle_check_exit->setChecked(UISettings::values.confirm_before_closing.GetValue());
43 ui->toggle_user_on_boot->setChecked(UISettings::values.select_user_on_boot.GetValue()); 43 ui->toggle_user_on_boot->setChecked(UISettings::values.select_user_on_boot.GetValue());
44 ui->toggle_background_pause->setChecked(UISettings::values.pause_when_in_background.GetValue()); 44 ui->toggle_background_pause->setChecked(UISettings::values.pause_when_in_background.GetValue());
45 ui->toggle_background_mute->setChecked(UISettings::values.mute_when_in_background.GetValue());
46 ui->toggle_hide_mouse->setChecked(UISettings::values.hide_mouse.GetValue()); 45 ui->toggle_hide_mouse->setChecked(UISettings::values.hide_mouse.GetValue());
47 46
48 ui->toggle_speed_limit->setChecked(Settings::values.use_speed_limit.GetValue()); 47 ui->toggle_speed_limit->setChecked(Settings::values.use_speed_limit.GetValue());
@@ -88,7 +87,6 @@ void ConfigureGeneral::ApplyConfiguration() {
88 UISettings::values.confirm_before_closing = ui->toggle_check_exit->isChecked(); 87 UISettings::values.confirm_before_closing = ui->toggle_check_exit->isChecked();
89 UISettings::values.select_user_on_boot = ui->toggle_user_on_boot->isChecked(); 88 UISettings::values.select_user_on_boot = ui->toggle_user_on_boot->isChecked();
90 UISettings::values.pause_when_in_background = ui->toggle_background_pause->isChecked(); 89 UISettings::values.pause_when_in_background = ui->toggle_background_pause->isChecked();
91 UISettings::values.mute_when_in_background = ui->toggle_background_mute->isChecked();
92 UISettings::values.hide_mouse = ui->toggle_hide_mouse->isChecked(); 90 UISettings::values.hide_mouse = ui->toggle_hide_mouse->isChecked();
93 91
94 // Guard if during game and set to game-specific value 92 // Guard if during game and set to game-specific value
diff --git a/src/yuzu/configuration/configure_general.ui b/src/yuzu/configuration/configure_general.ui
index 5b90b1109..6cd79673c 100644
--- a/src/yuzu/configuration/configure_general.ui
+++ b/src/yuzu/configuration/configure_general.ui
@@ -90,13 +90,6 @@
90 </widget> 90 </widget>
91 </item> 91 </item>
92 <item> 92 <item>
93 <widget class="QCheckBox" name="toggle_background_mute">
94 <property name="text">
95 <string>Mute audio when in background</string>
96 </property>
97 </widget>
98 </item>
99 <item>
100 <widget class="QCheckBox" name="toggle_hide_mouse"> 93 <widget class="QCheckBox" name="toggle_hide_mouse">
101 <property name="text"> 94 <property name="text">
102 <string>Hide mouse on inactivity</string> 95 <string>Hide mouse on inactivity</string>
diff --git a/src/yuzu/configuration/configure_hotkeys.h b/src/yuzu/configuration/configure_hotkeys.h
index b45ecb185..e8e414320 100644
--- a/src/yuzu/configuration/configure_hotkeys.h
+++ b/src/yuzu/configuration/configure_hotkeys.h
@@ -34,7 +34,7 @@ public:
34 34
35 /** 35 /**
36 * Populates the hotkey list widget using data from the provided registry. 36 * Populates the hotkey list widget using data from the provided registry.
37 * Called everytime the Configure dialog is opened. 37 * Called every time the Configure dialog is opened.
38 * @param registry The HotkeyRegistry whose data is used to populate the list. 38 * @param registry The HotkeyRegistry whose data is used to populate the list.
39 */ 39 */
40 void Populate(const HotkeyRegistry& registry); 40 void Populate(const HotkeyRegistry& registry);
diff --git a/src/yuzu/configuration/configure_input_player.h b/src/yuzu/configuration/configure_input_player.h
index 99a9c875d..d4df43d73 100644
--- a/src/yuzu/configuration/configure_input_player.h
+++ b/src/yuzu/configuration/configure_input_player.h
@@ -224,7 +224,7 @@ private:
224 224
225 /// Bottom row is where console wide settings are held, and its "owned" by the parent 225 /// Bottom row is where console wide settings are held, and its "owned" by the parent
226 /// ConfigureInput widget. On show, add this widget to the main layout. This will change the 226 /// ConfigureInput widget. On show, add this widget to the main layout. This will change the
227 /// parent of the widget to this widget (but thats fine). 227 /// parent of the widget to this widget (but that's fine).
228 QWidget* bottom_row; 228 QWidget* bottom_row;
229 229
230 Core::HID::HIDCore& hid_core; 230 Core::HID::HIDCore& hid_core;
diff --git a/src/yuzu/configuration/configure_input_player_widget.h b/src/yuzu/configuration/configure_input_player_widget.h
index 0e9e95e85..267d134de 100644
--- a/src/yuzu/configuration/configure_input_player_widget.h
+++ b/src/yuzu/configuration/configure_input_player_widget.h
@@ -43,7 +43,7 @@ public:
43 // Handles emulated controller events 43 // Handles emulated controller events
44 void ControllerUpdate(Core::HID::ControllerTriggerType type); 44 void ControllerUpdate(Core::HID::ControllerTriggerType type);
45 45
46 // Updates input on sheduled interval 46 // Updates input on scheduled interval
47 void UpdateInput(); 47 void UpdateInput();
48 48
49protected: 49protected:
diff --git a/src/yuzu/configuration/configure_system.cpp b/src/yuzu/configuration/configure_system.cpp
index 9ea4c02da..6af34f793 100644
--- a/src/yuzu/configuration/configure_system.cpp
+++ b/src/yuzu/configuration/configure_system.cpp
@@ -40,8 +40,6 @@ static bool IsValidLocale(u32 region_index, u32 language_index) {
40ConfigureSystem::ConfigureSystem(Core::System& system_, QWidget* parent) 40ConfigureSystem::ConfigureSystem(Core::System& system_, QWidget* parent)
41 : QWidget(parent), ui{std::make_unique<Ui::ConfigureSystem>()}, system{system_} { 41 : QWidget(parent), ui{std::make_unique<Ui::ConfigureSystem>()}, system{system_} {
42 ui->setupUi(this); 42 ui->setupUi(this);
43 connect(ui->button_regenerate_console_id, &QPushButton::clicked, this,
44 &ConfigureSystem::RefreshConsoleID);
45 43
46 connect(ui->rng_seed_checkbox, &QCheckBox::stateChanged, this, [this](int state) { 44 connect(ui->rng_seed_checkbox, &QCheckBox::stateChanged, this, [this](int state) {
47 ui->rng_seed_edit->setEnabled(state == Qt::Checked); 45 ui->rng_seed_edit->setEnabled(state == Qt::Checked);
@@ -76,9 +74,6 @@ ConfigureSystem::ConfigureSystem(Core::System& system_, QWidget* parent)
76 locale_check); 74 locale_check);
77 connect(ui->combo_region, qOverload<int>(&QComboBox::currentIndexChanged), this, locale_check); 75 connect(ui->combo_region, qOverload<int>(&QComboBox::currentIndexChanged), this, locale_check);
78 76
79 ui->label_console_id->setVisible(Settings::IsConfiguringGlobal());
80 ui->button_regenerate_console_id->setVisible(Settings::IsConfiguringGlobal());
81
82 SetupPerGameUI(); 77 SetupPerGameUI();
83 78
84 SetConfiguration(); 79 SetConfiguration();
@@ -121,14 +116,12 @@ void ConfigureSystem::SetConfiguration() {
121 ui->combo_language->setCurrentIndex(Settings::values.language_index.GetValue()); 116 ui->combo_language->setCurrentIndex(Settings::values.language_index.GetValue());
122 ui->combo_region->setCurrentIndex(Settings::values.region_index.GetValue()); 117 ui->combo_region->setCurrentIndex(Settings::values.region_index.GetValue());
123 ui->combo_time_zone->setCurrentIndex(Settings::values.time_zone_index.GetValue()); 118 ui->combo_time_zone->setCurrentIndex(Settings::values.time_zone_index.GetValue());
124 ui->combo_sound->setCurrentIndex(Settings::values.sound_index.GetValue());
125 } else { 119 } else {
126 ConfigurationShared::SetPerGameSetting(ui->combo_language, 120 ConfigurationShared::SetPerGameSetting(ui->combo_language,
127 &Settings::values.language_index); 121 &Settings::values.language_index);
128 ConfigurationShared::SetPerGameSetting(ui->combo_region, &Settings::values.region_index); 122 ConfigurationShared::SetPerGameSetting(ui->combo_region, &Settings::values.region_index);
129 ConfigurationShared::SetPerGameSetting(ui->combo_time_zone, 123 ConfigurationShared::SetPerGameSetting(ui->combo_time_zone,
130 &Settings::values.time_zone_index); 124 &Settings::values.time_zone_index);
131 ConfigurationShared::SetPerGameSetting(ui->combo_sound, &Settings::values.sound_index);
132 125
133 ConfigurationShared::SetHighlight(ui->label_language, 126 ConfigurationShared::SetHighlight(ui->label_language,
134 !Settings::values.language_index.UsingGlobal()); 127 !Settings::values.language_index.UsingGlobal());
@@ -136,8 +129,6 @@ void ConfigureSystem::SetConfiguration() {
136 !Settings::values.region_index.UsingGlobal()); 129 !Settings::values.region_index.UsingGlobal());
137 ConfigurationShared::SetHighlight(ui->label_timezone, 130 ConfigurationShared::SetHighlight(ui->label_timezone,
138 !Settings::values.time_zone_index.UsingGlobal()); 131 !Settings::values.time_zone_index.UsingGlobal());
139 ConfigurationShared::SetHighlight(ui->label_sound,
140 !Settings::values.sound_index.UsingGlobal());
141 } 132 }
142} 133}
143 134
@@ -169,7 +160,6 @@ void ConfigureSystem::ApplyConfiguration() {
169 ConfigurationShared::ApplyPerGameSetting(&Settings::values.region_index, ui->combo_region); 160 ConfigurationShared::ApplyPerGameSetting(&Settings::values.region_index, ui->combo_region);
170 ConfigurationShared::ApplyPerGameSetting(&Settings::values.time_zone_index, 161 ConfigurationShared::ApplyPerGameSetting(&Settings::values.time_zone_index,
171 ui->combo_time_zone); 162 ui->combo_time_zone);
172 ConfigurationShared::ApplyPerGameSetting(&Settings::values.sound_index, ui->combo_sound);
173 163
174 if (Settings::IsConfiguringGlobal()) { 164 if (Settings::IsConfiguringGlobal()) {
175 // Guard if during game and set to game-specific value 165 // Guard if during game and set to game-specific value
@@ -202,29 +192,11 @@ void ConfigureSystem::ApplyConfiguration() {
202 } 192 }
203} 193}
204 194
205void ConfigureSystem::RefreshConsoleID() {
206 QMessageBox::StandardButton reply;
207 QString warning_text = tr("This will replace your current virtual Switch with a new one. "
208 "Your current virtual Switch will not be recoverable. "
209 "This might have unexpected effects in games. This might fail, "
210 "if you use an outdated config savegame. Continue?");
211 reply = QMessageBox::critical(this, tr("Warning"), warning_text,
212 QMessageBox::No | QMessageBox::Yes);
213 if (reply == QMessageBox::No) {
214 return;
215 }
216
217 u64 console_id{};
218 ui->label_console_id->setText(
219 tr("Console ID: 0x%1").arg(QString::number(console_id, 16).toUpper()));
220}
221
222void ConfigureSystem::SetupPerGameUI() { 195void ConfigureSystem::SetupPerGameUI() {
223 if (Settings::IsConfiguringGlobal()) { 196 if (Settings::IsConfiguringGlobal()) {
224 ui->combo_language->setEnabled(Settings::values.language_index.UsingGlobal()); 197 ui->combo_language->setEnabled(Settings::values.language_index.UsingGlobal());
225 ui->combo_region->setEnabled(Settings::values.region_index.UsingGlobal()); 198 ui->combo_region->setEnabled(Settings::values.region_index.UsingGlobal());
226 ui->combo_time_zone->setEnabled(Settings::values.time_zone_index.UsingGlobal()); 199 ui->combo_time_zone->setEnabled(Settings::values.time_zone_index.UsingGlobal());
227 ui->combo_sound->setEnabled(Settings::values.sound_index.UsingGlobal());
228 ui->rng_seed_checkbox->setEnabled(Settings::values.rng_seed.UsingGlobal()); 200 ui->rng_seed_checkbox->setEnabled(Settings::values.rng_seed.UsingGlobal());
229 ui->rng_seed_edit->setEnabled(Settings::values.rng_seed.UsingGlobal()); 201 ui->rng_seed_edit->setEnabled(Settings::values.rng_seed.UsingGlobal());
230 202
@@ -237,8 +209,6 @@ void ConfigureSystem::SetupPerGameUI() {
237 Settings::values.region_index.GetValue(true)); 209 Settings::values.region_index.GetValue(true));
238 ConfigurationShared::SetColoredComboBox(ui->combo_time_zone, ui->label_timezone, 210 ConfigurationShared::SetColoredComboBox(ui->combo_time_zone, ui->label_timezone,
239 Settings::values.time_zone_index.GetValue(true)); 211 Settings::values.time_zone_index.GetValue(true));
240 ConfigurationShared::SetColoredComboBox(ui->combo_sound, ui->label_sound,
241 Settings::values.sound_index.GetValue(true));
242 212
243 ConfigurationShared::SetColoredTristate( 213 ConfigurationShared::SetColoredTristate(
244 ui->rng_seed_checkbox, Settings::values.rng_seed.UsingGlobal(), 214 ui->rng_seed_checkbox, Settings::values.rng_seed.UsingGlobal(),
diff --git a/src/yuzu/configuration/configure_system.h b/src/yuzu/configuration/configure_system.h
index a7f086258..ec28724a1 100644
--- a/src/yuzu/configuration/configure_system.h
+++ b/src/yuzu/configuration/configure_system.h
@@ -35,8 +35,6 @@ private:
35 35
36 void ReadSystemSettings(); 36 void ReadSystemSettings();
37 37
38 void RefreshConsoleID();
39
40 void SetupPerGameUI(); 38 void SetupPerGameUI();
41 39
42 std::unique_ptr<Ui::ConfigureSystem> ui; 40 std::unique_ptr<Ui::ConfigureSystem> ui;
diff --git a/src/yuzu/configuration/configure_system.ui b/src/yuzu/configuration/configure_system.ui
index 0459cd924..9e7bc3b93 100644
--- a/src/yuzu/configuration/configure_system.ui
+++ b/src/yuzu/configuration/configure_system.ui
@@ -411,7 +411,7 @@
411 </item> 411 </item>
412 </widget> 412 </widget>
413 </item> 413 </item>
414 <item row="5" column="0"> 414 <item row="4" column="0">
415 <widget class="QCheckBox" name="custom_rtc_checkbox"> 415 <widget class="QCheckBox" name="custom_rtc_checkbox">
416 <property name="text"> 416 <property name="text">
417 <string>Custom RTC</string> 417 <string>Custom RTC</string>
@@ -425,54 +425,21 @@
425 </property> 425 </property>
426 </widget> 426 </widget>
427 </item> 427 </item>
428 <item row="6" column="0"> 428 <item row="5" column="0">
429 <widget class="QCheckBox" name="rng_seed_checkbox"> 429 <widget class="QCheckBox" name="rng_seed_checkbox">
430 <property name="text"> 430 <property name="text">
431 <string>RNG Seed</string> 431 <string>RNG Seed</string>
432 </property> 432 </property>
433 </widget> 433 </widget>
434 </item> 434 </item>
435 <item row="7" column="0"> 435 <item row="6" column="0">
436 <widget class="QLabel" name="device_name_label"> 436 <widget class="QLabel" name="device_name_label">
437 <property name="text"> 437 <property name="text">
438 <string>Device Name</string> 438 <string>Device Name</string>
439 </property> 439 </property>
440 </widget> 440 </widget>
441 </item> 441 </item>
442 <item row="3" column="1"> 442 <item row="4" column="1">
443 <widget class="QComboBox" name="combo_sound">
444 <item>
445 <property name="text">
446 <string>Mono</string>
447 </property>
448 </item>
449 <item>
450 <property name="text">
451 <string>Stereo</string>
452 </property>
453 </item>
454 <item>
455 <property name="text">
456 <string>Surround</string>
457 </property>
458 </item>
459 </widget>
460 </item>
461 <item row="4" column="0">
462 <widget class="QLabel" name="label_console_id">
463 <property name="text">
464 <string>Console ID:</string>
465 </property>
466 </widget>
467 </item>
468 <item row="3" column="0">
469 <widget class="QLabel" name="label_sound">
470 <property name="text">
471 <string>Sound output mode</string>
472 </property>
473 </widget>
474 </item>
475 <item row="5" column="1">
476 <widget class="QDateTimeEdit" name="custom_rtc_edit"> 443 <widget class="QDateTimeEdit" name="custom_rtc_edit">
477 <property name="minimumDate"> 444 <property name="minimumDate">
478 <date> 445 <date>
@@ -483,14 +450,14 @@
483 </property> 450 </property>
484 </widget> 451 </widget>
485 </item> 452 </item>
486 <item row="7" column="1"> 453 <item row="6" column="1">
487 <widget class="QLineEdit" name="device_name_edit"> 454 <widget class="QLineEdit" name="device_name_edit">
488 <property name="maxLength"> 455 <property name="maxLength">
489 <number>128</number> 456 <number>128</number>
490 </property> 457 </property>
491 </widget> 458 </widget>
492 </item> 459 </item>
493 <item row="6" column="1"> 460 <item row="5" column="1">
494 <widget class="QLineEdit" name="rng_seed_edit"> 461 <widget class="QLineEdit" name="rng_seed_edit">
495 <property name="sizePolicy"> 462 <property name="sizePolicy">
496 <sizepolicy hsizetype="Minimum" vsizetype="Fixed"> 463 <sizepolicy hsizetype="Minimum" vsizetype="Fixed">
@@ -511,22 +478,6 @@
511 </property> 478 </property>
512 </widget> 479 </widget>
513 </item> 480 </item>
514 <item row="4" column="1">
515 <widget class="QPushButton" name="button_regenerate_console_id">
516 <property name="sizePolicy">
517 <sizepolicy hsizetype="Fixed" vsizetype="Fixed">
518 <horstretch>0</horstretch>
519 <verstretch>0</verstretch>
520 </sizepolicy>
521 </property>
522 <property name="layoutDirection">
523 <enum>Qt::RightToLeft</enum>
524 </property>
525 <property name="text">
526 <string>Regenerate</string>
527 </property>
528 </widget>
529 </item>
530 </layout> 481 </layout>
531 </item> 482 </item>
532 </layout> 483 </layout>
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index 7f7c5fc42..0783a2430 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -112,33 +112,6 @@ QString WaitTreeText::GetText() const {
112 return text; 112 return text;
113} 113}
114 114
115WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address_, const Kernel::KHandleTable& handle_table,
116 Core::System& system_)
117 : mutex_address{mutex_address_}, system{system_} {
118 mutex_value = system.Memory().Read32(mutex_address);
119 owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Svc::HandleWaitMask);
120 owner = handle_table.GetObject<Kernel::KThread>(owner_handle).GetPointerUnsafe();
121}
122
123WaitTreeMutexInfo::~WaitTreeMutexInfo() = default;
124
125QString WaitTreeMutexInfo::GetText() const {
126 return tr("waiting for mutex 0x%1").arg(mutex_address, 16, 16, QLatin1Char{'0'});
127}
128
129std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexInfo::GetChildren() const {
130 const bool has_waiters = (mutex_value & Kernel::Svc::HandleWaitMask) != 0;
131
132 std::vector<std::unique_ptr<WaitTreeItem>> list;
133 list.push_back(std::make_unique<WaitTreeText>(tr("has waiters: %1").arg(has_waiters)));
134 list.push_back(std::make_unique<WaitTreeText>(
135 tr("owner handle: 0x%1").arg(owner_handle, 8, 16, QLatin1Char{'0'})));
136 if (owner != nullptr) {
137 list.push_back(std::make_unique<WaitTreeThread>(*owner, system));
138 }
139 return list;
140}
141
142WaitTreeCallstack::WaitTreeCallstack(const Kernel::KThread& thread_, Core::System& system_) 115WaitTreeCallstack::WaitTreeCallstack(const Kernel::KThread& thread_, Core::System& system_)
143 : thread{thread_}, system{system_} {} 116 : thread{thread_}, system{system_} {}
144WaitTreeCallstack::~WaitTreeCallstack() = default; 117WaitTreeCallstack::~WaitTreeCallstack() = default;
@@ -182,10 +155,9 @@ bool WaitTreeExpandableItem::IsExpandable() const {
182} 155}
183 156
184QString WaitTreeSynchronizationObject::GetText() const { 157QString WaitTreeSynchronizationObject::GetText() const {
185 return tr("[%1] %2 %3") 158 return tr("[%1] %2")
186 .arg(object.GetId()) 159 .arg(object.GetId())
187 .arg(QString::fromStdString(object.GetTypeObj().GetName()), 160 .arg(QString::fromStdString(object.GetTypeObj().GetName()));
188 QString::fromStdString(object.GetName()));
189} 161}
190 162
191std::unique_ptr<WaitTreeSynchronizationObject> WaitTreeSynchronizationObject::make( 163std::unique_ptr<WaitTreeSynchronizationObject> WaitTreeSynchronizationObject::make(
@@ -217,26 +189,6 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeSynchronizationObject::GetChi
217 return list; 189 return list;
218} 190}
219 191
220WaitTreeObjectList::WaitTreeObjectList(const std::vector<Kernel::KSynchronizationObject*>& list,
221 bool w_all, Core::System& system_)
222 : object_list(list), wait_all(w_all), system{system_} {}
223
224WaitTreeObjectList::~WaitTreeObjectList() = default;
225
226QString WaitTreeObjectList::GetText() const {
227 if (wait_all)
228 return tr("waiting for all objects");
229 return tr("waiting for one of the following objects");
230}
231
232std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeObjectList::GetChildren() const {
233 std::vector<std::unique_ptr<WaitTreeItem>> list(object_list.size());
234 std::transform(object_list.begin(), object_list.end(), list.begin(), [this](const auto& t) {
235 return WaitTreeSynchronizationObject::make(*t, system);
236 });
237 return list;
238}
239
240WaitTreeThread::WaitTreeThread(const Kernel::KThread& thread, Core::System& system_) 192WaitTreeThread::WaitTreeThread(const Kernel::KThread& thread, Core::System& system_)
241 : WaitTreeSynchronizationObject(thread, system_), system{system_} {} 193 : WaitTreeSynchronizationObject(thread, system_), system{system_} {}
242WaitTreeThread::~WaitTreeThread() = default; 194WaitTreeThread::~WaitTreeThread() = default;
@@ -348,32 +300,14 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
348 300
349 list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor))); 301 list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
350 list.push_back(std::make_unique<WaitTreeText>( 302 list.push_back(std::make_unique<WaitTreeText>(
351 tr("ideal core = %1").arg(thread.GetIdealCoreForDebugging())));
352 list.push_back(std::make_unique<WaitTreeText>(
353 tr("affinity mask = %1").arg(thread.GetAffinityMask().GetAffinityMask()))); 303 tr("affinity mask = %1").arg(thread.GetAffinityMask().GetAffinityMask())));
354 list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID()))); 304 list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadId())));
355 list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)") 305 list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
356 .arg(thread.GetPriority()) 306 .arg(thread.GetPriority())
357 .arg(thread.GetBasePriority()))); 307 .arg(thread.GetBasePriority())));
358 list.push_back(std::make_unique<WaitTreeText>( 308 list.push_back(std::make_unique<WaitTreeText>(
359 tr("last running ticks = %1").arg(thread.GetLastScheduledTick()))); 309 tr("last running ticks = %1").arg(thread.GetLastScheduledTick())));
360 310
361 const VAddr mutex_wait_address = thread.GetMutexWaitAddressForDebugging();
362 if (mutex_wait_address != 0) {
363 const auto& handle_table = thread.GetOwnerProcess()->GetHandleTable();
364 list.push_back(
365 std::make_unique<WaitTreeMutexInfo>(mutex_wait_address, handle_table, system));
366 } else {
367 list.push_back(std::make_unique<WaitTreeText>(tr("not waiting for mutex")));
368 }
369
370 if (thread.GetState() == Kernel::ThreadState::Waiting &&
371 thread.GetWaitReasonForDebugging() ==
372 Kernel::ThreadWaitReasonForDebugging::Synchronization) {
373 list.push_back(std::make_unique<WaitTreeObjectList>(thread.GetWaitObjectsForDebugging(),
374 thread.IsCancellable(), system));
375 }
376
377 list.push_back(std::make_unique<WaitTreeCallstack>(thread, system)); 311 list.push_back(std::make_unique<WaitTreeCallstack>(thread, system));
378 312
379 return list; 313 return list;
diff --git a/src/yuzu/debugger/wait_tree.h b/src/yuzu/debugger/wait_tree.h
index 7e528b592..23c329fbe 100644
--- a/src/yuzu/debugger/wait_tree.h
+++ b/src/yuzu/debugger/wait_tree.h
@@ -74,25 +74,6 @@ public:
74 bool IsExpandable() const override; 74 bool IsExpandable() const override;
75}; 75};
76 76
77class WaitTreeMutexInfo : public WaitTreeExpandableItem {
78 Q_OBJECT
79public:
80 explicit WaitTreeMutexInfo(VAddr mutex_address_, const Kernel::KHandleTable& handle_table,
81 Core::System& system_);
82 ~WaitTreeMutexInfo() override;
83
84 QString GetText() const override;
85 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
86
87private:
88 VAddr mutex_address{};
89 u32 mutex_value{};
90 Kernel::Handle owner_handle{};
91 Kernel::KThread* owner{};
92
93 Core::System& system;
94};
95
96class WaitTreeCallstack : public WaitTreeExpandableItem { 77class WaitTreeCallstack : public WaitTreeExpandableItem {
97 Q_OBJECT 78 Q_OBJECT
98public: 79public:
@@ -127,23 +108,6 @@ private:
127 Core::System& system; 108 Core::System& system;
128}; 109};
129 110
130class WaitTreeObjectList : public WaitTreeExpandableItem {
131 Q_OBJECT
132public:
133 WaitTreeObjectList(const std::vector<Kernel::KSynchronizationObject*>& list, bool wait_all,
134 Core::System& system_);
135 ~WaitTreeObjectList() override;
136
137 QString GetText() const override;
138 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
139
140private:
141 const std::vector<Kernel::KSynchronizationObject*>& object_list;
142 bool wait_all;
143
144 Core::System& system;
145};
146
147class WaitTreeThread : public WaitTreeSynchronizationObject { 111class WaitTreeThread : public WaitTreeSynchronizationObject {
148 Q_OBJECT 112 Q_OBJECT
149public: 113public:
diff --git a/src/yuzu/loading_screen.cpp b/src/yuzu/loading_screen.cpp
index e263a07a7..b081fff6b 100644
--- a/src/yuzu/loading_screen.cpp
+++ b/src/yuzu/loading_screen.cpp
@@ -153,7 +153,7 @@ void LoadingScreen::OnLoadProgress(VideoCore::LoadCallbackStage stage, std::size
153 } 153 }
154 154
155 QString estimate; 155 QString estimate;
156 // If theres a drastic slowdown in the rate, then display an estimate 156 // If there's a drastic slowdown in the rate, then display an estimate
157 if (now - previous_time > milliseconds{50} || slow_shader_compile_start) { 157 if (now - previous_time > milliseconds{50} || slow_shader_compile_start) {
158 if (!slow_shader_compile_start) { 158 if (!slow_shader_compile_start) {
159 slow_shader_start = steady_clock::now(); 159 slow_shader_start = steady_clock::now();
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index c092507f4..ae14884b5 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -274,7 +274,7 @@ static QString PrettyProductName() {
274 274
275#ifdef _WIN32 275#ifdef _WIN32
276static void OverrideWindowsFont() { 276static void OverrideWindowsFont() {
277 // Qt5 chooses these fonts on Windows and they have fairly ugly alphanumeric/cyrllic characters 277 // Qt5 chooses these fonts on Windows and they have fairly ugly alphanumeric/cyrillic characters
278 // Asking to use "MS Shell Dlg 2" gives better other chars while leaving the Chinese Characters. 278 // Asking to use "MS Shell Dlg 2" gives better other chars while leaving the Chinese Characters.
279 const QString startup_font = QApplication::font().family(); 279 const QString startup_font = QApplication::font().family();
280 const QStringList ugly_fonts = {QStringLiteral("SimSun"), QStringLiteral("PMingLiU")}; 280 const QStringList ugly_fonts = {QStringLiteral("SimSun"), QStringLiteral("PMingLiU")};
@@ -3596,7 +3596,7 @@ bool GMainWindow::CreateShortcut(const std::string& shortcut_path, const std::st
3596 const std::string& command, const std::string& arguments, 3596 const std::string& command, const std::string& arguments,
3597 const std::string& categories, const std::string& keywords) { 3597 const std::string& categories, const std::string& keywords) {
3598#if defined(__linux__) || defined(__FreeBSD__) 3598#if defined(__linux__) || defined(__FreeBSD__)
3599 // This desktop file template was writting referencing 3599 // This desktop file template was writing referencing
3600 // https://specifications.freedesktop.org/desktop-entry-spec/desktop-entry-spec-1.0.html 3600 // https://specifications.freedesktop.org/desktop-entry-spec/desktop-entry-spec-1.0.html
3601 std::string shortcut_contents{}; 3601 std::string shortcut_contents{};
3602 shortcut_contents.append("[Desktop Entry]\n"); 3602 shortcut_contents.append("[Desktop Entry]\n");
diff --git a/src/yuzu/multiplayer/lobby.cpp b/src/yuzu/multiplayer/lobby.cpp
index 6c93e3511..387f6f7c9 100644
--- a/src/yuzu/multiplayer/lobby.cpp
+++ b/src/yuzu/multiplayer/lobby.cpp
@@ -278,7 +278,7 @@ void Lobby::OnRefreshLobby() {
278 } 278 }
279 } 279 }
280 280
281 // Reenable the refresh button and resize the columns 281 // Re-enable the refresh button and resize the columns
282 ui->refresh_list->setEnabled(true); 282 ui->refresh_list->setEnabled(true);
283 ui->refresh_list->setText(tr("Refresh List")); 283 ui->refresh_list->setText(tr("Refresh List"));
284 ui->room_list->header()->stretchLastSection(); 284 ui->room_list->header()->stretchLastSection();
diff --git a/src/yuzu/multiplayer/state.cpp b/src/yuzu/multiplayer/state.cpp
index 285bb150d..d82ca9aee 100644
--- a/src/yuzu/multiplayer/state.cpp
+++ b/src/yuzu/multiplayer/state.cpp
@@ -112,7 +112,7 @@ void MultiplayerState::SetNotificationStatus(NotificationStatus status) {
112 112
113void MultiplayerState::UpdateNotificationStatus() { 113void MultiplayerState::UpdateNotificationStatus() {
114 switch (notification_status) { 114 switch (notification_status) {
115 case NotificationStatus::Unitialized: 115 case NotificationStatus::Uninitialized:
116 status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("disconnected")).pixmap(16)); 116 status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("disconnected")).pixmap(16));
117 status_text->setText(tr("Not Connected. Click here to find a room!")); 117 status_text->setText(tr("Not Connected. Click here to find a room!"));
118 leave_room->setEnabled(false); 118 leave_room->setEnabled(false);
diff --git a/src/yuzu/multiplayer/state.h b/src/yuzu/multiplayer/state.h
index 5d681c5c6..d6149838f 100644
--- a/src/yuzu/multiplayer/state.h
+++ b/src/yuzu/multiplayer/state.h
@@ -23,7 +23,7 @@ class MultiplayerState : public QWidget {
23 23
24public: 24public:
25 enum class NotificationStatus { 25 enum class NotificationStatus {
26 Unitialized, 26 Uninitialized,
27 Disconnected, 27 Disconnected,
28 Connected, 28 Connected,
29 Notification, 29 Notification,
@@ -98,7 +98,7 @@ private:
98 QAction* show_room; 98 QAction* show_room;
99 std::shared_ptr<Core::AnnounceMultiplayerSession> announce_multiplayer_session; 99 std::shared_ptr<Core::AnnounceMultiplayerSession> announce_multiplayer_session;
100 Network::RoomMember::State current_state = Network::RoomMember::State::Uninitialized; 100 Network::RoomMember::State current_state = Network::RoomMember::State::Uninitialized;
101 NotificationStatus notification_status = NotificationStatus::Unitialized; 101 NotificationStatus notification_status = NotificationStatus::Uninitialized;
102 bool has_mod_perms = false; 102 bool has_mod_perms = false;
103 Network::RoomMember::CallbackHandle<Network::RoomMember::State> state_callback_handle; 103 Network::RoomMember::CallbackHandle<Network::RoomMember::State> state_callback_handle;
104 Network::RoomMember::CallbackHandle<Network::RoomMember::Error> error_callback_handle; 104 Network::RoomMember::CallbackHandle<Network::RoomMember::Error> error_callback_handle;
diff --git a/src/yuzu/startup_checks.cpp b/src/yuzu/startup_checks.cpp
index 9f702fe95..5e1f76339 100644
--- a/src/yuzu/startup_checks.cpp
+++ b/src/yuzu/startup_checks.cpp
@@ -86,7 +86,7 @@ bool StartupChecks(const char* arg0, bool* has_broken_vulkan, bool perform_vulka
86 return false; 86 return false;
87 } 87 }
88 88
89 // Wait until the processs exits and get exit code from it 89 // Wait until the process exits and get exit code from it
90 WaitForSingleObject(process_info.hProcess, INFINITE); 90 WaitForSingleObject(process_info.hProcess, INFINITE);
91 DWORD exit_code = STILL_ACTIVE; 91 DWORD exit_code = STILL_ACTIVE;
92 const int err = GetExitCodeProcess(process_info.hProcess, &exit_code); 92 const int err = GetExitCodeProcess(process_info.hProcess, &exit_code);
diff --git a/src/yuzu/util/overlay_dialog.h b/src/yuzu/util/overlay_dialog.h
index 872283d61..62f9da311 100644
--- a/src/yuzu/util/overlay_dialog.h
+++ b/src/yuzu/util/overlay_dialog.h
@@ -71,7 +71,7 @@ private:
71 const QString& left_button_text, const QString& right_button_text, 71 const QString& left_button_text, const QString& right_button_text,
72 Qt::Alignment alignment); 72 Qt::Alignment alignment);
73 73
74 /// Moves and resizes the dialog to be fully overlayed on top of the parent window. 74 /// Moves and resizes the dialog to be fully overlaid on top of the parent window.
75 void MoveAndResizeWindow(); 75 void MoveAndResizeWindow();
76 76
77 /** 77 /**