summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/CMakeLists.txt33
-rw-r--r--src/audio_core/CMakeLists.txt11
-rw-r--r--src/audio_core/in/audio_in_system.cpp2
-rw-r--r--src/audio_core/in/audio_in_system.h2
-rw-r--r--src/audio_core/out/audio_out_system.cpp4
-rw-r--r--src/audio_core/out/audio_out_system.h4
-rw-r--r--src/audio_core/renderer/behavior/info_updater.cpp2
-rw-r--r--src/audio_core/renderer/command/effect/biquad_filter.cpp2
-rw-r--r--src/audio_core/renderer/command/effect/multi_tap_biquad_filter.cpp2
-rw-r--r--src/audio_core/renderer/system.cpp85
-rw-r--r--src/audio_core/renderer/system.h16
-rw-r--r--src/audio_core/renderer/voice/voice_context.cpp4
-rw-r--r--src/common/CMakeLists.txt13
-rw-r--r--src/common/bit_field.h15
-rw-r--r--src/common/bounded_threadsafe_queue.h9
-rw-r--r--src/common/concepts.h24
-rw-r--r--src/common/fixed_point.h274
-rw-r--r--src/common/fs/file.h12
-rw-r--r--src/common/input.h5
-rw-r--r--src/core/CMakeLists.txt13
-rw-r--r--src/core/arm/arm_interface.cpp8
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp2
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp2
-rw-r--r--src/core/core.cpp93
-rw-r--r--src/core/core.h10
-rw-r--r--src/core/core_timing.cpp34
-rw-r--r--src/core/core_timing.h14
-rw-r--r--src/core/device_memory.h10
-rw-r--r--src/core/file_sys/card_image.cpp4
-rw-r--r--src/core/file_sys/control_metadata.cpp43
-rw-r--r--src/core/file_sys/control_metadata.h6
-rw-r--r--src/core/file_sys/program_metadata.cpp2
-rw-r--r--src/core/file_sys/savedata_factory.cpp58
-rw-r--r--src/core/file_sys/savedata_factory.h4
-rw-r--r--src/core/hid/emulated_controller.cpp68
-rw-r--r--src/core/hid/emulated_controller.h5
-rw-r--r--src/core/hle/ipc_helpers.h15
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp22
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h8
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp110
-rw-r--r--src/core/hle/kernel/hle_ipc.h11
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.cpp8
-rw-r--r--src/core/hle/kernel/k_client_session.cpp15
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp2
-rw-r--r--src/core/hle/kernel/k_dynamic_page_manager.h136
-rw-r--r--src/core/hle/kernel/k_dynamic_resource_manager.h58
-rw-r--r--src/core/hle/kernel/k_dynamic_slab_heap.h122
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.cpp29
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.h4
-rw-r--r--src/core/hle/kernel/k_linked_list.h1
-rw-r--r--src/core/hle/kernel/k_memory_block.h506
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.cpp409
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.h145
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp2
-rw-r--r--src/core/hle/kernel/k_page_buffer.cpp2
-rw-r--r--src/core/hle/kernel/k_page_buffer.h1
-rw-r--r--src/core/hle/kernel/k_page_table.cpp1302
-rw-r--r--src/core/hle/kernel/k_page_table.h319
-rw-r--r--src/core/hle/kernel/k_process.cpp112
-rw-r--r--src/core/hle/kernel/k_process.h83
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp26
-rw-r--r--src/core/hle/kernel/k_server_session.cpp232
-rw-r--r--src/core/hle/kernel/k_server_session.h41
-rw-r--r--src/core/hle/kernel/k_session_request.cpp61
-rw-r--r--src/core/hle/kernel/k_session_request.h306
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp2
-rw-r--r--src/core/hle/kernel/k_shared_memory.h4
-rw-r--r--src/core/hle/kernel/k_shared_memory_info.h3
-rw-r--r--src/core/hle/kernel/k_slab_heap.h27
-rw-r--r--src/core/hle/kernel/k_thread.cpp153
-rw-r--r--src/core/hle/kernel/k_thread.h12
-rw-r--r--src/core/hle/kernel/k_thread_local_page.h2
-rw-r--r--src/core/hle/kernel/kernel.cpp80
-rw-r--r--src/core/hle/kernel/kernel.h14
-rw-r--r--src/core/hle/kernel/slab_helpers.h2
-rw-r--r--src/core/hle/kernel/svc.cpp10
-rw-r--r--src/core/hle/kernel/svc_common.h7
-rw-r--r--src/core/hle/kernel/svc_types.h13
-rw-r--r--src/core/hle/result.h11
-rw-r--r--src/core/hle/service/am/am.cpp13
-rw-r--r--src/core/hle/service/am/am.h1
-rw-r--r--src/core/hle/service/am/applets/applets.h2
-rw-r--r--src/core/hle/service/audio/audctl.cpp16
-rw-r--r--src/core/hle/service/audio/audin_u.cpp2
-rw-r--r--src/core/hle/service/audio/audout_u.cpp2
-rw-r--r--src/core/hle/service/audio/audren_u.cpp26
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp29
-rw-r--r--src/core/hle/service/ldr/ldr.cpp4
-rw-r--r--src/core/hle/service/nfp/amiibo_crypto.cpp8
-rw-r--r--src/core/hle/service/nfp/amiibo_crypto.h3
-rw-r--r--src/core/hle/service/nfp/nfp_device.cpp9
-rw-r--r--src/core/hle/service/nfp/nfp_device.h1
-rw-r--r--src/core/hle/service/nfp/nfp_types.h5
-rw-r--r--src/core/hle/service/nfp/nfp_user.cpp3
-rw-r--r--src/core/hle/service/nfp/nfp_user.h8
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.cpp5
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.h1
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp12
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp13
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.cpp2
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.h6
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_producer.cpp7
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp25
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.h7
-rw-r--r--src/core/hle/service/service.cpp4
-rw-r--r--src/core/hle/service/service.h2
-rw-r--r--src/core/hle/service/sm/sm.cpp1
-rw-r--r--src/core/hle/service/sm/sm_controller.cpp5
-rw-r--r--src/core/hle/service/vi/display/vi_display.h6
-rw-r--r--src/core/hle/service/vi/vi.cpp8
-rw-r--r--src/core/hle/service/vi/vi_results.h2
-rw-r--r--src/core/memory.cpp43
-rw-r--r--src/input_common/CMakeLists.txt9
-rw-r--r--src/input_common/drivers/gc_adapter.cpp6
-rw-r--r--src/input_common/drivers/gc_adapter.h4
-rw-r--r--src/input_common/drivers/sdl_driver.cpp68
-rw-r--r--src/input_common/drivers/sdl_driver.h4
-rw-r--r--src/input_common/input_engine.h7
-rw-r--r--src/input_common/input_poller.cpp24
-rw-r--r--src/shader_recompiler/CMakeLists.txt14
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp4
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp4
-rw-r--r--src/shader_recompiler/frontend/ir/microinstruction.cpp5
-rw-r--r--src/shader_recompiler/frontend/ir/value.h4
-rw-r--r--src/shader_recompiler/frontend/maxwell/translate_program.cpp47
-rw-r--r--src/shader_recompiler/ir_opt/dead_code_elimination_pass.cpp98
-rw-r--r--src/shader_recompiler/runtime_info.h2
-rw-r--r--src/shader_recompiler/shader_info.h3
-rw-r--r--src/tests/core/core_timing.cpp3
-rw-r--r--src/tests/video_core/buffer_base.cpp2
-rw-r--r--src/video_core/CMakeLists.txt9
-rw-r--r--src/video_core/engines/maxwell_3d.cpp285
-rw-r--r--src/video_core/engines/maxwell_3d.h48
-rw-r--r--src/video_core/engines/maxwell_dma.cpp127
-rw-r--r--src/video_core/engines/maxwell_dma.h2
-rw-r--r--src/video_core/engines/puller.cpp5
-rw-r--r--src/video_core/macro/macro_hle.cpp47
-rw-r--r--src/video_core/macro/macro_interpreter.cpp2
-rw-r--r--src/video_core/macro/macro_jit_x64.cpp2
-rw-r--r--src/video_core/memory_manager.cpp65
-rw-r--r--src/video_core/memory_manager.h21
-rw-r--r--src/video_core/pte_kind.h264
-rw-r--r--src/video_core/rasterizer_interface.h2
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp17
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h4
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp7
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp47
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h4
-rw-r--r--src/video_core/texture_cache/format_lookup_table.cpp2
-rw-r--r--src/video_core/texture_cache/texture_cache.h8
-rw-r--r--src/video_core/textures/astc.cpp4
-rw-r--r--src/video_core/textures/decoders.cpp2
-rw-r--r--src/yuzu/applets/qt_controller.ui2
-rw-r--r--src/yuzu/bootmanager.cpp4
-rw-r--r--src/yuzu/main.cpp11
-rw-r--r--src/yuzu/multiplayer/state.cpp2
-rw-r--r--src/yuzu/startup_checks.cpp17
-rw-r--r--src/yuzu_cmd/yuzu.cpp4
160 files changed, 4505 insertions, 2423 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 3575a3cb3..0ac3d254e 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -58,13 +58,11 @@ if (MSVC)
58 58
59 # Warnings 59 # Warnings
60 /W3 60 /W3
61 /we4018 # 'expression': signed/unsigned mismatch 61 /WX
62
62 /we4062 # Enumerator 'identifier' in a switch of enum 'enumeration' is not handled 63 /we4062 # Enumerator 'identifier' in a switch of enum 'enumeration' is not handled
63 /we4101 # 'identifier': unreferenced local variable
64 /we4189 # 'identifier': local variable is initialized but not referenced 64 /we4189 # 'identifier': local variable is initialized but not referenced
65 /we4265 # 'class': class has virtual functions, but destructor is not virtual 65 /we4265 # 'class': class has virtual functions, but destructor is not virtual
66 /we4267 # 'var': conversion from 'size_t' to 'type', possible loss of data
67 /we4305 # 'context': truncation from 'type1' to 'type2'
68 /we4388 # 'expression': signed/unsigned mismatch 66 /we4388 # 'expression': signed/unsigned mismatch
69 /we4389 # 'operator': signed/unsigned mismatch 67 /we4389 # 'operator': signed/unsigned mismatch
70 /we4456 # Declaration of 'identifier' hides previous local declaration 68 /we4456 # Declaration of 'identifier' hides previous local declaration
@@ -75,10 +73,13 @@ if (MSVC)
75 /we4547 # 'operator': operator before comma has no effect; expected operator with side-effect 73 /we4547 # 'operator': operator before comma has no effect; expected operator with side-effect
76 /we4549 # 'operator1': operator before comma has no effect; did you intend 'operator2'? 74 /we4549 # 'operator1': operator before comma has no effect; did you intend 'operator2'?
77 /we4555 # Expression has no effect; expected expression with side-effect 75 /we4555 # Expression has no effect; expected expression with side-effect
78 /we4715 # 'function': not all control paths return a value 76 /we4826 # Conversion from 'type1' to 'type2' is sign-extended. This may cause unexpected runtime behavior.
79 /we4834 # Discarding return value of function with 'nodiscard' attribute
80 /we5038 # data member 'member1' will be initialized after data member 'member2' 77 /we5038 # data member 'member1' will be initialized after data member 'member2'
78 /we5233 # explicit lambda capture 'identifier' is not used
81 /we5245 # 'function': unreferenced function with internal linkage has been removed 79 /we5245 # 'function': unreferenced function with internal linkage has been removed
80
81 /wd4100 # 'identifier': unreferenced formal parameter
82 /wd4324 # 'struct_name': structure was padded due to __declspec(align())
82 ) 83 )
83 84
84 if (USE_CCACHE) 85 if (USE_CCACHE)
@@ -99,24 +100,18 @@ if (MSVC)
99 set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE) 100 set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE)
100else() 101else()
101 add_compile_options( 102 add_compile_options(
102 -Wall 103 -Werror=all
103 -Werror=array-bounds 104 -Werror=extra
104 -Werror=implicit-fallthrough
105 -Werror=missing-declarations 105 -Werror=missing-declarations
106 -Werror=missing-field-initializers
107 -Werror=reorder
108 -Werror=shadow 106 -Werror=shadow
109 -Werror=sign-compare 107 -Werror=unused
110 -Werror=switch 108
111 -Werror=uninitialized
112 -Werror=unused-function
113 -Werror=unused-result
114 -Werror=unused-variable
115 -Wextra
116 -Wmissing-declarations
117 -Wno-attributes 109 -Wno-attributes
118 -Wno-invalid-offsetof 110 -Wno-invalid-offsetof
119 -Wno-unused-parameter 111 -Wno-unused-parameter
112
113 $<$<CXX_COMPILER_ID:Clang>:-Wno-braced-scalar-init>
114 $<$<CXX_COMPILER_ID:Clang>:-Wno-unused-private-field>
120 ) 115 )
121 116
122 if (ARCHITECTURE_x86_64) 117 if (ARCHITECTURE_x86_64)
diff --git a/src/audio_core/CMakeLists.txt b/src/audio_core/CMakeLists.txt
index 144f1bab2..0a1f3bf18 100644
--- a/src/audio_core/CMakeLists.txt
+++ b/src/audio_core/CMakeLists.txt
@@ -206,20 +206,11 @@ if (MSVC)
206 /we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data 206 /we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data
207 /we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch 207 /we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch
208 /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data 208 /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
209 /we4456 # Declaration of 'identifier' hides previous local declaration 209 /we4800 # Implicit conversion from 'type' to bool. Possible information loss
210 /we4457 # Declaration of 'identifier' hides function parameter
211 /we4458 # Declaration of 'identifier' hides class member
212 /we4459 # Declaration of 'identifier' hides global declaration
213 ) 210 )
214else() 211else()
215 target_compile_options(audio_core PRIVATE 212 target_compile_options(audio_core PRIVATE
216 -Werror=conversion 213 -Werror=conversion
217 -Werror=ignored-qualifiers
218 -Werror=shadow
219 -Werror=unused-variable
220
221 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
222 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
223 214
224 -Wno-sign-conversion 215 -Wno-sign-conversion
225 ) 216 )
diff --git a/src/audio_core/in/audio_in_system.cpp b/src/audio_core/in/audio_in_system.cpp
index 6b7e6715c..4324cafd8 100644
--- a/src/audio_core/in/audio_in_system.cpp
+++ b/src/audio_core/in/audio_in_system.cpp
@@ -56,7 +56,7 @@ Result System::IsConfigValid(const std::string_view device_name,
56 return ResultSuccess; 56 return ResultSuccess;
57} 57}
58 58
59Result System::Initialize(std::string& device_name, const AudioInParameter& in_params, 59Result System::Initialize(std::string device_name, const AudioInParameter& in_params,
60 const u32 handle_, const u64 applet_resource_user_id_) { 60 const u32 handle_, const u64 applet_resource_user_id_) {
61 auto result{IsConfigValid(device_name, in_params)}; 61 auto result{IsConfigValid(device_name, in_params)};
62 if (result.IsError()) { 62 if (result.IsError()) {
diff --git a/src/audio_core/in/audio_in_system.h b/src/audio_core/in/audio_in_system.h
index b9dc0e60f..1c5154638 100644
--- a/src/audio_core/in/audio_in_system.h
+++ b/src/audio_core/in/audio_in_system.h
@@ -97,7 +97,7 @@ public:
97 * @param applet_resource_user_id - Unused. 97 * @param applet_resource_user_id - Unused.
98 * @return Result code. 98 * @return Result code.
99 */ 99 */
100 Result Initialize(std::string& device_name, const AudioInParameter& in_params, u32 handle, 100 Result Initialize(std::string device_name, const AudioInParameter& in_params, u32 handle,
101 u64 applet_resource_user_id); 101 u64 applet_resource_user_id);
102 102
103 /** 103 /**
diff --git a/src/audio_core/out/audio_out_system.cpp b/src/audio_core/out/audio_out_system.cpp
index 48a801923..a66208ed9 100644
--- a/src/audio_core/out/audio_out_system.cpp
+++ b/src/audio_core/out/audio_out_system.cpp
@@ -49,8 +49,8 @@ Result System::IsConfigValid(std::string_view device_name,
49 return Service::Audio::ERR_INVALID_CHANNEL_COUNT; 49 return Service::Audio::ERR_INVALID_CHANNEL_COUNT;
50} 50}
51 51
52Result System::Initialize(std::string& device_name, const AudioOutParameter& in_params, u32 handle_, 52Result System::Initialize(std::string device_name, const AudioOutParameter& in_params, u32 handle_,
53 u64& applet_resource_user_id_) { 53 u64 applet_resource_user_id_) {
54 auto result = IsConfigValid(device_name, in_params); 54 auto result = IsConfigValid(device_name, in_params);
55 if (result.IsError()) { 55 if (result.IsError()) {
56 return result; 56 return result;
diff --git a/src/audio_core/out/audio_out_system.h b/src/audio_core/out/audio_out_system.h
index 0817b2f37..b95cb91be 100644
--- a/src/audio_core/out/audio_out_system.h
+++ b/src/audio_core/out/audio_out_system.h
@@ -88,8 +88,8 @@ public:
88 * @param applet_resource_user_id - Unused. 88 * @param applet_resource_user_id - Unused.
89 * @return Result code. 89 * @return Result code.
90 */ 90 */
91 Result Initialize(std::string& device_name, const AudioOutParameter& in_params, u32 handle, 91 Result Initialize(std::string device_name, const AudioOutParameter& in_params, u32 handle,
92 u64& applet_resource_user_id); 92 u64 applet_resource_user_id);
93 93
94 /** 94 /**
95 * Start this system. 95 * Start this system.
diff --git a/src/audio_core/renderer/behavior/info_updater.cpp b/src/audio_core/renderer/behavior/info_updater.cpp
index c0a307b89..574cf0982 100644
--- a/src/audio_core/renderer/behavior/info_updater.cpp
+++ b/src/audio_core/renderer/behavior/info_updater.cpp
@@ -91,7 +91,7 @@ Result InfoUpdater::UpdateVoices(VoiceContext& voice_context,
91 voice_info.Initialize(); 91 voice_info.Initialize();
92 92
93 for (u32 channel = 0; channel < in_param.channel_count; channel++) { 93 for (u32 channel = 0; channel < in_param.channel_count; channel++) {
94 std::memset(voice_states[channel], 0, sizeof(VoiceState)); 94 *voice_states[channel] = {};
95 } 95 }
96 } 96 }
97 97
diff --git a/src/audio_core/renderer/command/effect/biquad_filter.cpp b/src/audio_core/renderer/command/effect/biquad_filter.cpp
index 1baae74fd..edb30ce72 100644
--- a/src/audio_core/renderer/command/effect/biquad_filter.cpp
+++ b/src/audio_core/renderer/command/effect/biquad_filter.cpp
@@ -94,7 +94,7 @@ void BiquadFilterCommand::Dump([[maybe_unused]] const ADSP::CommandListProcessor
94void BiquadFilterCommand::Process(const ADSP::CommandListProcessor& processor) { 94void BiquadFilterCommand::Process(const ADSP::CommandListProcessor& processor) {
95 auto state_{reinterpret_cast<VoiceState::BiquadFilterState*>(state)}; 95 auto state_{reinterpret_cast<VoiceState::BiquadFilterState*>(state)};
96 if (needs_init) { 96 if (needs_init) {
97 std::memset(state_, 0, sizeof(VoiceState::BiquadFilterState)); 97 *state_ = {};
98 } 98 }
99 99
100 auto input_buffer{ 100 auto input_buffer{
diff --git a/src/audio_core/renderer/command/effect/multi_tap_biquad_filter.cpp b/src/audio_core/renderer/command/effect/multi_tap_biquad_filter.cpp
index b3c3ba4ba..48a7cba8a 100644
--- a/src/audio_core/renderer/command/effect/multi_tap_biquad_filter.cpp
+++ b/src/audio_core/renderer/command/effect/multi_tap_biquad_filter.cpp
@@ -30,7 +30,7 @@ void MultiTapBiquadFilterCommand::Process(const ADSP::CommandListProcessor& proc
30 for (u32 i = 0; i < filter_tap_count; i++) { 30 for (u32 i = 0; i < filter_tap_count; i++) {
31 auto state{reinterpret_cast<VoiceState::BiquadFilterState*>(states[i])}; 31 auto state{reinterpret_cast<VoiceState::BiquadFilterState*>(states[i])};
32 if (needs_init[i]) { 32 if (needs_init[i]) {
33 std::memset(state, 0, sizeof(VoiceState::BiquadFilterState)); 33 *state = {};
34 } 34 }
35 35
36 ApplyBiquadFilterFloat(output_buffer, input_buffer, biquads[i].b, biquads[i].a, *state, 36 ApplyBiquadFilterFloat(output_buffer, input_buffer, biquads[i].b, biquads[i].a, *state,
diff --git a/src/audio_core/renderer/system.cpp b/src/audio_core/renderer/system.cpp
index bde794cd1..4fac30c7c 100644
--- a/src/audio_core/renderer/system.cpp
+++ b/src/audio_core/renderer/system.cpp
@@ -98,9 +98,8 @@ System::System(Core::System& core_, Kernel::KEvent* adsp_rendered_event_)
98 : core{core_}, adsp{core.AudioCore().GetADSP()}, adsp_rendered_event{adsp_rendered_event_} {} 98 : core{core_}, adsp{core.AudioCore().GetADSP()}, adsp_rendered_event{adsp_rendered_event_} {}
99 99
100Result System::Initialize(const AudioRendererParameterInternal& params, 100Result System::Initialize(const AudioRendererParameterInternal& params,
101 Kernel::KTransferMemory* transfer_memory, const u64 transfer_memory_size, 101 Kernel::KTransferMemory* transfer_memory, u64 transfer_memory_size,
102 const u32 process_handle_, const u64 applet_resource_user_id_, 102 u32 process_handle_, u64 applet_resource_user_id_, s32 session_id_) {
103 const s32 session_id_) {
104 if (!CheckValidRevision(params.revision)) { 103 if (!CheckValidRevision(params.revision)) {
105 return Service::Audio::ERR_INVALID_REVISION; 104 return Service::Audio::ERR_INVALID_REVISION;
106 } 105 }
@@ -354,6 +353,8 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
354 353
355 render_time_limit_percent = 100; 354 render_time_limit_percent = 100;
356 drop_voice = params.voice_drop_enabled && params.execution_mode == ExecutionMode::Auto; 355 drop_voice = params.voice_drop_enabled && params.execution_mode == ExecutionMode::Auto;
356 drop_voice_param = 1.0f;
357 num_voices_dropped = 0;
357 358
358 allocator.Align(0x40); 359 allocator.Align(0x40);
359 command_workbuffer_size = allocator.GetRemainingSize(); 360 command_workbuffer_size = allocator.GetRemainingSize();
@@ -547,7 +548,7 @@ u32 System::GetRenderingTimeLimit() const {
547 return render_time_limit_percent; 548 return render_time_limit_percent;
548} 549}
549 550
550void System::SetRenderingTimeLimit(const u32 limit) { 551void System::SetRenderingTimeLimit(u32 limit) {
551 render_time_limit_percent = limit; 552 render_time_limit_percent = limit;
552} 553}
553 554
@@ -635,7 +636,7 @@ void System::SendCommandToDsp() {
635} 636}
636 637
637u64 System::GenerateCommand(std::span<u8> in_command_buffer, 638u64 System::GenerateCommand(std::span<u8> in_command_buffer,
638 [[maybe_unused]] const u64 command_buffer_size_) { 639 [[maybe_unused]] u64 command_buffer_size_) {
639 PoolMapper::ClearUseState(memory_pool_workbuffer, memory_pool_count); 640 PoolMapper::ClearUseState(memory_pool_workbuffer, memory_pool_count);
640 const auto start_time{core.CoreTiming().GetClockTicks()}; 641 const auto start_time{core.CoreTiming().GetClockTicks()};
641 642
@@ -693,7 +694,8 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
693 694
694 voice_context.SortInfo(); 695 voice_context.SortInfo();
695 696
696 const auto start_estimated_time{command_buffer.estimated_process_time}; 697 const auto start_estimated_time{drop_voice_param *
698 static_cast<f32>(command_buffer.estimated_process_time)};
697 699
698 command_generator.GenerateVoiceCommands(); 700 command_generator.GenerateVoiceCommands();
699 command_generator.GenerateSubMixCommands(); 701 command_generator.GenerateSubMixCommands();
@@ -712,11 +714,16 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
712 render_context.behavior->IsAudioRendererProcessingTimeLimit70PercentSupported(); 714 render_context.behavior->IsAudioRendererProcessingTimeLimit70PercentSupported();
713 time_limit_percent = 70.0f; 715 time_limit_percent = 70.0f;
714 } 716 }
717
718 const auto end_estimated_time{drop_voice_param *
719 static_cast<f32>(command_buffer.estimated_process_time)};
720 const auto estimated_time{start_estimated_time - end_estimated_time};
721
715 const auto time_limit{static_cast<u32>( 722 const auto time_limit{static_cast<u32>(
716 static_cast<f32>(start_estimated_time - command_buffer.estimated_process_time) + 723 estimated_time + (((time_limit_percent / 100.0f) * 2'880'000.0) *
717 (((time_limit_percent / 100.0f) * 2'880'000.0) * 724 (static_cast<f32>(render_time_limit_percent) / 100.0f)))};
718 (static_cast<f32>(render_time_limit_percent) / 100.0f)))}; 725 num_voices_dropped =
719 num_voices_dropped = DropVoices(command_buffer, start_estimated_time, time_limit); 726 DropVoices(command_buffer, static_cast<u32>(start_estimated_time), time_limit);
720 } 727 }
721 728
722 command_list_header->buffer_size = command_buffer.size; 729 command_list_header->buffer_size = command_buffer.size;
@@ -737,24 +744,33 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
737 return command_buffer.size; 744 return command_buffer.size;
738} 745}
739 746
740u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_process_time, 747f32 System::GetVoiceDropParameter() const {
741 const u32 time_limit) { 748 return drop_voice_param;
749}
750
751void System::SetVoiceDropParameter(f32 voice_drop_) {
752 drop_voice_param = voice_drop_;
753}
754
755u32 System::DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit) {
742 u32 i{0}; 756 u32 i{0};
743 auto command_list{command_buffer.command_list.data() + sizeof(CommandListHeader)}; 757 auto command_list{command_buffer.command_list.data() + sizeof(CommandListHeader)};
744 ICommand* cmd{}; 758 ICommand* cmd{nullptr};
745 759
746 for (; i < command_buffer.count; i++) { 760 // Find a first valid voice to drop
761 while (i < command_buffer.count) {
747 cmd = reinterpret_cast<ICommand*>(command_list); 762 cmd = reinterpret_cast<ICommand*>(command_list);
748 if (cmd->type != CommandId::Performance && 763 if (cmd->type == CommandId::Performance ||
749 cmd->type != CommandId::DataSourcePcmInt16Version1 && 764 cmd->type == CommandId::DataSourcePcmInt16Version1 ||
750 cmd->type != CommandId::DataSourcePcmInt16Version2 && 765 cmd->type == CommandId::DataSourcePcmInt16Version2 ||
751 cmd->type != CommandId::DataSourcePcmFloatVersion1 && 766 cmd->type == CommandId::DataSourcePcmFloatVersion1 ||
752 cmd->type != CommandId::DataSourcePcmFloatVersion2 && 767 cmd->type == CommandId::DataSourcePcmFloatVersion2 ||
753 cmd->type != CommandId::DataSourceAdpcmVersion1 && 768 cmd->type == CommandId::DataSourceAdpcmVersion1 ||
754 cmd->type != CommandId::DataSourceAdpcmVersion2) { 769 cmd->type == CommandId::DataSourceAdpcmVersion2) {
755 break; 770 break;
756 } 771 }
757 command_list += cmd->size; 772 command_list += cmd->size;
773 i++;
758 } 774 }
759 775
760 if (cmd == nullptr || command_buffer.count == 0 || i >= command_buffer.count) { 776 if (cmd == nullptr || command_buffer.count == 0 || i >= command_buffer.count) {
@@ -767,6 +783,7 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
767 const auto node_id_type{cmd->node_id >> 28}; 783 const auto node_id_type{cmd->node_id >> 28};
768 const auto node_id_base{cmd->node_id & 0xFFF}; 784 const auto node_id_base{cmd->node_id & 0xFFF};
769 785
786 // If the new estimated process time falls below the limit, we're done dropping.
770 if (estimated_process_time <= time_limit) { 787 if (estimated_process_time <= time_limit) {
771 break; 788 break;
772 } 789 }
@@ -775,6 +792,7 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
775 break; 792 break;
776 } 793 }
777 794
795 // Don't drop voices marked with the highest priority.
778 auto& voice_info{voice_context.GetInfo(node_id_base)}; 796 auto& voice_info{voice_context.GetInfo(node_id_base)};
779 if (voice_info.priority == HighestVoicePriority) { 797 if (voice_info.priority == HighestVoicePriority) {
780 break; 798 break;
@@ -783,18 +801,23 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
783 voices_dropped++; 801 voices_dropped++;
784 voice_info.voice_dropped = true; 802 voice_info.voice_dropped = true;
785 803
786 if (i < command_buffer.count) { 804 // First iteration should drop the voice, and then iterate through all of the commands tied
787 while (cmd->node_id == node_id) { 805 // to the voice. We don't need reverb on a voice which we've just removed, for example.
788 if (cmd->type == CommandId::DepopPrepare) { 806 // Depops can't be removed otherwise we'll introduce audio popping, and we don't
789 cmd->enabled = true; 807 // remove perf commands. Lower the estimated time for each command dropped.
790 } else if (cmd->type == CommandId::Performance || !cmd->enabled) { 808 while (i < command_buffer.count && cmd->node_id == node_id) {
791 cmd->enabled = false; 809 if (cmd->type == CommandId::DepopPrepare) {
792 } 810 cmd->enabled = true;
793 i++; 811 } else if (cmd->enabled && cmd->type != CommandId::Performance) {
794 command_list += cmd->size; 812 cmd->enabled = false;
795 cmd = reinterpret_cast<ICommand*>(command_list); 813 estimated_process_time -= static_cast<u32>(
814 drop_voice_param * static_cast<f32>(cmd->estimated_process_time));
796 } 815 }
816 command_list += cmd->size;
817 cmd = reinterpret_cast<ICommand*>(command_list);
818 i++;
797 } 819 }
820 i++;
798 } 821 }
799 return voices_dropped; 822 return voices_dropped;
800} 823}
diff --git a/src/audio_core/renderer/system.h b/src/audio_core/renderer/system.h
index bcbe65b07..429196e41 100644
--- a/src/audio_core/renderer/system.h
+++ b/src/audio_core/renderer/system.h
@@ -196,6 +196,20 @@ public:
196 */ 196 */
197 u32 DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit); 197 u32 DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit);
198 198
199 /**
200 * Get the current voice drop parameter.
201 *
202 * @return The current voice drop.
203 */
204 f32 GetVoiceDropParameter() const;
205
206 /**
207 * Set the voice drop parameter.
208 *
209 * @param The new voice drop.
210 */
211 void SetVoiceDropParameter(f32 voice_drop);
212
199private: 213private:
200 /// Core system 214 /// Core system
201 Core::System& core; 215 Core::System& core;
@@ -301,6 +315,8 @@ private:
301 u32 num_voices_dropped{}; 315 u32 num_voices_dropped{};
302 /// Tick that rendering started 316 /// Tick that rendering started
303 u64 render_start_tick{}; 317 u64 render_start_tick{};
318 /// Parameter to control the threshold for dropping voices if the audio graph gets too large
319 f32 drop_voice_param{1.0f};
304}; 320};
305 321
306} // namespace AudioRenderer 322} // namespace AudioRenderer
diff --git a/src/audio_core/renderer/voice/voice_context.cpp b/src/audio_core/renderer/voice/voice_context.cpp
index eafb51b01..a501a677d 100644
--- a/src/audio_core/renderer/voice/voice_context.cpp
+++ b/src/audio_core/renderer/voice/voice_context.cpp
@@ -74,8 +74,8 @@ void VoiceContext::SortInfo() {
74 } 74 }
75 75
76 std::ranges::sort(sorted_voice_info, [](const VoiceInfo* a, const VoiceInfo* b) { 76 std::ranges::sort(sorted_voice_info, [](const VoiceInfo* a, const VoiceInfo* b) {
77 return a->priority != b->priority ? a->priority < b->priority 77 return a->priority != b->priority ? a->priority > b->priority
78 : a->sort_order < b->sort_order; 78 : a->sort_order > b->sort_order;
79 }); 79 });
80} 80}
81 81
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index a02696873..c0555f840 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -156,12 +156,13 @@ if (MSVC)
156 ) 156 )
157 target_compile_options(common PRIVATE 157 target_compile_options(common PRIVATE
158 /W4 158 /W4
159 /WX 159
160 /we4242 # 'identifier': conversion from 'type1' to 'type2', possible loss of data
161 /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
162 /we4800 # Implicit conversion from 'type' to bool. Possible information loss
160 ) 163 )
161else() 164else()
162 target_compile_options(common PRIVATE 165 target_compile_options(common PRIVATE
163 -Werror
164
165 $<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation> 166 $<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation>
166 ) 167 )
167endif() 168endif()
@@ -169,7 +170,11 @@ endif()
169create_target_directory_groups(common) 170create_target_directory_groups(common)
170 171
171target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile Threads::Threads) 172target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile Threads::Threads)
172target_link_libraries(common PRIVATE lz4::lz4) 173if (TARGET lz4::lz4)
174 target_link_libraries(common PRIVATE lz4::lz4)
175else()
176 target_link_libraries(common PRIVATE LZ4::lz4_shared)
177endif()
173if (TARGET zstd::zstd) 178if (TARGET zstd::zstd)
174 target_link_libraries(common PRIVATE zstd::zstd) 179 target_link_libraries(common PRIVATE zstd::zstd)
175else() 180else()
diff --git a/src/common/bit_field.h b/src/common/bit_field.h
index 7e1df62b1..e4e58ea45 100644
--- a/src/common/bit_field.h
+++ b/src/common/bit_field.h
@@ -141,10 +141,6 @@ public:
141 constexpr BitField(BitField&&) noexcept = default; 141 constexpr BitField(BitField&&) noexcept = default;
142 constexpr BitField& operator=(BitField&&) noexcept = default; 142 constexpr BitField& operator=(BitField&&) noexcept = default;
143 143
144 [[nodiscard]] constexpr operator T() const {
145 return Value();
146 }
147
148 constexpr void Assign(const T& value) { 144 constexpr void Assign(const T& value) {
149#ifdef _MSC_VER 145#ifdef _MSC_VER
150 storage = static_cast<StorageType>((storage & ~mask) | FormatValue(value)); 146 storage = static_cast<StorageType>((storage & ~mask) | FormatValue(value));
@@ -162,6 +158,17 @@ public:
162 return ExtractValue(storage); 158 return ExtractValue(storage);
163 } 159 }
164 160
161 template <typename ConvertedToType>
162 [[nodiscard]] constexpr ConvertedToType As() const {
163 static_assert(!std::is_same_v<T, ConvertedToType>,
164 "Unnecessary cast. Use Value() instead.");
165 return static_cast<ConvertedToType>(Value());
166 }
167
168 [[nodiscard]] constexpr operator T() const {
169 return Value();
170 }
171
165 [[nodiscard]] constexpr explicit operator bool() const { 172 [[nodiscard]] constexpr explicit operator bool() const {
166 return Value() != 0; 173 return Value() != 0;
167 } 174 }
diff --git a/src/common/bounded_threadsafe_queue.h b/src/common/bounded_threadsafe_queue.h
index 7e465549b..21217801e 100644
--- a/src/common/bounded_threadsafe_queue.h
+++ b/src/common/bounded_threadsafe_queue.h
@@ -21,11 +21,6 @@ constexpr size_t hardware_interference_size = std::hardware_destructive_interfer
21constexpr size_t hardware_interference_size = 64; 21constexpr size_t hardware_interference_size = 64;
22#endif 22#endif
23 23
24#ifdef _MSC_VER
25#pragma warning(push)
26#pragma warning(disable : 4324)
27#endif
28
29template <typename T, size_t capacity = 0x400> 24template <typename T, size_t capacity = 0x400>
30class MPSCQueue { 25class MPSCQueue {
31public: 26public:
@@ -160,8 +155,4 @@ private:
160 static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible"); 155 static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
161}; 156};
162 157
163#ifdef _MSC_VER
164#pragma warning(pop)
165#endif
166
167} // namespace Common 158} // namespace Common
diff --git a/src/common/concepts.h b/src/common/concepts.h
index a97555f6a..a9acff3e7 100644
--- a/src/common/concepts.h
+++ b/src/common/concepts.h
@@ -3,24 +3,14 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <iterator>
6#include <type_traits> 7#include <type_traits>
7 8
8namespace Common { 9namespace Common {
9 10
10// Check if type is like an STL container 11// Check if type satisfies the ContiguousContainer named requirement.
11template <typename T> 12template <typename T>
12concept IsSTLContainer = requires(T t) { 13concept IsContiguousContainer = std::contiguous_iterator<typename T::iterator>;
13 typename T::value_type;
14 typename T::iterator;
15 typename T::const_iterator;
16 // TODO(ogniK): Replace below is std::same_as<void> when MSVC supports it.
17 t.begin();
18 t.end();
19 t.cbegin();
20 t.cend();
21 t.data();
22 t.size();
23};
24 14
25// TODO: Replace with std::derived_from when the <concepts> header 15// TODO: Replace with std::derived_from when the <concepts> header
26// is available on all supported platforms. 16// is available on all supported platforms.
@@ -34,4 +24,12 @@ concept DerivedFrom = requires {
34template <typename From, typename To> 24template <typename From, typename To>
35concept ConvertibleTo = std::is_convertible_v<From, To>; 25concept ConvertibleTo = std::is_convertible_v<From, To>;
36 26
27// No equivalents in the stdlib
28
29template <typename T>
30concept IsArithmetic = std::is_arithmetic_v<T>;
31
32template <typename T>
33concept IsIntegral = std::is_integral_v<T>;
34
37} // namespace Common 35} // namespace Common
diff --git a/src/common/fixed_point.h b/src/common/fixed_point.h
index 4a0f72cc9..f899b0d54 100644
--- a/src/common/fixed_point.h
+++ b/src/common/fixed_point.h
@@ -4,14 +4,7 @@
4// From: https://github.com/eteran/cpp-utilities/blob/master/fixed/include/cpp-utilities/fixed.h 4// From: https://github.com/eteran/cpp-utilities/blob/master/fixed/include/cpp-utilities/fixed.h
5// See also: http://stackoverflow.com/questions/79677/whats-the-best-way-to-do-fixed-point-math 5// See also: http://stackoverflow.com/questions/79677/whats-the-best-way-to-do-fixed-point-math
6 6
7#ifndef FIXED_H_ 7#pragma once
8#define FIXED_H_
9
10#if __cplusplus >= 201402L
11#define CONSTEXPR14 constexpr
12#else
13#define CONSTEXPR14
14#endif
15 8
16#include <cstddef> // for size_t 9#include <cstddef> // for size_t
17#include <cstdint> 10#include <cstdint>
@@ -19,6 +12,8 @@
19#include <ostream> 12#include <ostream>
20#include <type_traits> 13#include <type_traits>
21 14
15#include <common/concepts.h>
16
22namespace Common { 17namespace Common {
23 18
24template <size_t I, size_t F> 19template <size_t I, size_t F>
@@ -57,8 +52,8 @@ struct type_from_size<64> {
57 static constexpr size_t size = 64; 52 static constexpr size_t size = 64;
58 53
59 using value_type = int64_t; 54 using value_type = int64_t;
60 using unsigned_type = std::make_unsigned<value_type>::type; 55 using unsigned_type = std::make_unsigned_t<value_type>;
61 using signed_type = std::make_signed<value_type>::type; 56 using signed_type = std::make_signed_t<value_type>;
62 using next_size = type_from_size<128>; 57 using next_size = type_from_size<128>;
63}; 58};
64 59
@@ -68,8 +63,8 @@ struct type_from_size<32> {
68 static constexpr size_t size = 32; 63 static constexpr size_t size = 32;
69 64
70 using value_type = int32_t; 65 using value_type = int32_t;
71 using unsigned_type = std::make_unsigned<value_type>::type; 66 using unsigned_type = std::make_unsigned_t<value_type>;
72 using signed_type = std::make_signed<value_type>::type; 67 using signed_type = std::make_signed_t<value_type>;
73 using next_size = type_from_size<64>; 68 using next_size = type_from_size<64>;
74}; 69};
75 70
@@ -79,8 +74,8 @@ struct type_from_size<16> {
79 static constexpr size_t size = 16; 74 static constexpr size_t size = 16;
80 75
81 using value_type = int16_t; 76 using value_type = int16_t;
82 using unsigned_type = std::make_unsigned<value_type>::type; 77 using unsigned_type = std::make_unsigned_t<value_type>;
83 using signed_type = std::make_signed<value_type>::type; 78 using signed_type = std::make_signed_t<value_type>;
84 using next_size = type_from_size<32>; 79 using next_size = type_from_size<32>;
85}; 80};
86 81
@@ -90,8 +85,8 @@ struct type_from_size<8> {
90 static constexpr size_t size = 8; 85 static constexpr size_t size = 8;
91 86
92 using value_type = int8_t; 87 using value_type = int8_t;
93 using unsigned_type = std::make_unsigned<value_type>::type; 88 using unsigned_type = std::make_unsigned_t<value_type>;
94 using signed_type = std::make_signed<value_type>::type; 89 using signed_type = std::make_signed_t<value_type>;
95 using next_size = type_from_size<16>; 90 using next_size = type_from_size<16>;
96}; 91};
97 92
@@ -106,9 +101,9 @@ constexpr B next_to_base(N rhs) {
106struct divide_by_zero : std::exception {}; 101struct divide_by_zero : std::exception {};
107 102
108template <size_t I, size_t F> 103template <size_t I, size_t F>
109CONSTEXPR14 FixedPoint<I, F> divide( 104constexpr FixedPoint<I, F> divide(
110 FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder, 105 FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
111 typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { 106 std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
112 107
113 using next_type = typename FixedPoint<I, F>::next_type; 108 using next_type = typename FixedPoint<I, F>::next_type;
114 using base_type = typename FixedPoint<I, F>::base_type; 109 using base_type = typename FixedPoint<I, F>::base_type;
@@ -126,9 +121,9 @@ CONSTEXPR14 FixedPoint<I, F> divide(
126} 121}
127 122
128template <size_t I, size_t F> 123template <size_t I, size_t F>
129CONSTEXPR14 FixedPoint<I, F> divide( 124constexpr FixedPoint<I, F> divide(
130 FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder, 125 FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
131 typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { 126 std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
132 127
133 using unsigned_type = typename FixedPoint<I, F>::unsigned_type; 128 using unsigned_type = typename FixedPoint<I, F>::unsigned_type;
134 129
@@ -196,9 +191,9 @@ CONSTEXPR14 FixedPoint<I, F> divide(
196 191
197// this is the usual implementation of multiplication 192// this is the usual implementation of multiplication
198template <size_t I, size_t F> 193template <size_t I, size_t F>
199CONSTEXPR14 FixedPoint<I, F> multiply( 194constexpr FixedPoint<I, F> multiply(
200 FixedPoint<I, F> lhs, FixedPoint<I, F> rhs, 195 FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
201 typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { 196 std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
202 197
203 using next_type = typename FixedPoint<I, F>::next_type; 198 using next_type = typename FixedPoint<I, F>::next_type;
204 using base_type = typename FixedPoint<I, F>::base_type; 199 using base_type = typename FixedPoint<I, F>::base_type;
@@ -215,9 +210,9 @@ CONSTEXPR14 FixedPoint<I, F> multiply(
215// it is slightly slower, but is more robust since it doesn't 210// it is slightly slower, but is more robust since it doesn't
216// require and upgraded type 211// require and upgraded type
217template <size_t I, size_t F> 212template <size_t I, size_t F>
218CONSTEXPR14 FixedPoint<I, F> multiply( 213constexpr FixedPoint<I, F> multiply(
219 FixedPoint<I, F> lhs, FixedPoint<I, F> rhs, 214 FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
220 typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { 215 std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
221 216
222 using base_type = typename FixedPoint<I, F>::base_type; 217 using base_type = typename FixedPoint<I, F>::base_type;
223 218
@@ -272,19 +267,20 @@ public:
272 static constexpr base_type one = base_type(1) << fractional_bits; 267 static constexpr base_type one = base_type(1) << fractional_bits;
273 268
274public: // constructors 269public: // constructors
275 FixedPoint() = default; 270 constexpr FixedPoint() = default;
276 FixedPoint(const FixedPoint&) = default; 271
277 FixedPoint(FixedPoint&&) = default; 272 constexpr FixedPoint(const FixedPoint&) = default;
278 FixedPoint& operator=(const FixedPoint&) = default; 273 constexpr FixedPoint& operator=(const FixedPoint&) = default;
274
275 constexpr FixedPoint(FixedPoint&&) noexcept = default;
276 constexpr FixedPoint& operator=(FixedPoint&&) noexcept = default;
279 277
280 template <class Number> 278 template <IsArithmetic Number>
281 constexpr FixedPoint( 279 constexpr FixedPoint(Number n) : data_(static_cast<base_type>(n * one)) {}
282 Number n, typename std::enable_if<std::is_arithmetic<Number>::value>::type* = nullptr)
283 : data_(static_cast<base_type>(n * one)) {}
284 280
285public: // conversion 281public: // conversion
286 template <size_t I2, size_t F2> 282 template <size_t I2, size_t F2>
287 CONSTEXPR14 explicit FixedPoint(FixedPoint<I2, F2> other) { 283 constexpr explicit FixedPoint(FixedPoint<I2, F2> other) {
288 static_assert(I2 <= I && F2 <= F, "Scaling conversion can only upgrade types"); 284 static_assert(I2 <= I && F2 <= F, "Scaling conversion can only upgrade types");
289 using T = FixedPoint<I2, F2>; 285 using T = FixedPoint<I2, F2>;
290 286
@@ -308,36 +304,14 @@ public:
308 } 304 }
309 305
310public: // comparison operators 306public: // comparison operators
311 constexpr bool operator==(FixedPoint rhs) const { 307 friend constexpr auto operator<=>(FixedPoint lhs, FixedPoint rhs) = default;
312 return data_ == rhs.data_;
313 }
314
315 constexpr bool operator!=(FixedPoint rhs) const {
316 return data_ != rhs.data_;
317 }
318
319 constexpr bool operator<(FixedPoint rhs) const {
320 return data_ < rhs.data_;
321 }
322
323 constexpr bool operator>(FixedPoint rhs) const {
324 return data_ > rhs.data_;
325 }
326
327 constexpr bool operator<=(FixedPoint rhs) const {
328 return data_ <= rhs.data_;
329 }
330
331 constexpr bool operator>=(FixedPoint rhs) const {
332 return data_ >= rhs.data_;
333 }
334 308
335public: // unary operators 309public: // unary operators
336 constexpr bool operator!() const { 310 [[nodiscard]] constexpr bool operator!() const {
337 return !data_; 311 return !data_;
338 } 312 }
339 313
340 constexpr FixedPoint operator~() const { 314 [[nodiscard]] constexpr FixedPoint operator~() const {
341 // NOTE(eteran): this will often appear to "just negate" the value 315 // NOTE(eteran): this will often appear to "just negate" the value
342 // that is not an error, it is because -x == (~x+1) 316 // that is not an error, it is because -x == (~x+1)
343 // and that "+1" is adding an infinitesimally small fraction to the 317 // and that "+1" is adding an infinitesimally small fraction to the
@@ -345,89 +319,87 @@ public: // unary operators
345 return FixedPoint::from_base(~data_); 319 return FixedPoint::from_base(~data_);
346 } 320 }
347 321
348 constexpr FixedPoint operator-() const { 322 [[nodiscard]] constexpr FixedPoint operator-() const {
349 return FixedPoint::from_base(-data_); 323 return FixedPoint::from_base(-data_);
350 } 324 }
351 325
352 constexpr FixedPoint operator+() const { 326 [[nodiscard]] constexpr FixedPoint operator+() const {
353 return FixedPoint::from_base(+data_); 327 return FixedPoint::from_base(+data_);
354 } 328 }
355 329
356 CONSTEXPR14 FixedPoint& operator++() { 330 constexpr FixedPoint& operator++() {
357 data_ += one; 331 data_ += one;
358 return *this; 332 return *this;
359 } 333 }
360 334
361 CONSTEXPR14 FixedPoint& operator--() { 335 constexpr FixedPoint& operator--() {
362 data_ -= one; 336 data_ -= one;
363 return *this; 337 return *this;
364 } 338 }
365 339
366 CONSTEXPR14 FixedPoint operator++(int) { 340 constexpr FixedPoint operator++(int) {
367 FixedPoint tmp(*this); 341 FixedPoint tmp(*this);
368 data_ += one; 342 data_ += one;
369 return tmp; 343 return tmp;
370 } 344 }
371 345
372 CONSTEXPR14 FixedPoint operator--(int) { 346 constexpr FixedPoint operator--(int) {
373 FixedPoint tmp(*this); 347 FixedPoint tmp(*this);
374 data_ -= one; 348 data_ -= one;
375 return tmp; 349 return tmp;
376 } 350 }
377 351
378public: // basic math operators 352public: // basic math operators
379 CONSTEXPR14 FixedPoint& operator+=(FixedPoint n) { 353 constexpr FixedPoint& operator+=(FixedPoint n) {
380 data_ += n.data_; 354 data_ += n.data_;
381 return *this; 355 return *this;
382 } 356 }
383 357
384 CONSTEXPR14 FixedPoint& operator-=(FixedPoint n) { 358 constexpr FixedPoint& operator-=(FixedPoint n) {
385 data_ -= n.data_; 359 data_ -= n.data_;
386 return *this; 360 return *this;
387 } 361 }
388 362
389 CONSTEXPR14 FixedPoint& operator*=(FixedPoint n) { 363 constexpr FixedPoint& operator*=(FixedPoint n) {
390 return assign(detail::multiply(*this, n)); 364 return assign(detail::multiply(*this, n));
391 } 365 }
392 366
393 CONSTEXPR14 FixedPoint& operator/=(FixedPoint n) { 367 constexpr FixedPoint& operator/=(FixedPoint n) {
394 FixedPoint temp; 368 FixedPoint temp;
395 return assign(detail::divide(*this, n, temp)); 369 return assign(detail::divide(*this, n, temp));
396 } 370 }
397 371
398private: 372private:
399 CONSTEXPR14 FixedPoint& assign(FixedPoint rhs) { 373 constexpr FixedPoint& assign(FixedPoint rhs) {
400 data_ = rhs.data_; 374 data_ = rhs.data_;
401 return *this; 375 return *this;
402 } 376 }
403 377
404public: // binary math operators, effects underlying bit pattern since these 378public: // binary math operators, effects underlying bit pattern since these
405 // don't really typically make sense for non-integer values 379 // don't really typically make sense for non-integer values
406 CONSTEXPR14 FixedPoint& operator&=(FixedPoint n) { 380 constexpr FixedPoint& operator&=(FixedPoint n) {
407 data_ &= n.data_; 381 data_ &= n.data_;
408 return *this; 382 return *this;
409 } 383 }
410 384
411 CONSTEXPR14 FixedPoint& operator|=(FixedPoint n) { 385 constexpr FixedPoint& operator|=(FixedPoint n) {
412 data_ |= n.data_; 386 data_ |= n.data_;
413 return *this; 387 return *this;
414 } 388 }
415 389
416 CONSTEXPR14 FixedPoint& operator^=(FixedPoint n) { 390 constexpr FixedPoint& operator^=(FixedPoint n) {
417 data_ ^= n.data_; 391 data_ ^= n.data_;
418 return *this; 392 return *this;
419 } 393 }
420 394
421 template <class Integer, 395 template <IsIntegral Integer>
422 class = typename std::enable_if<std::is_integral<Integer>::value>::type> 396 constexpr FixedPoint& operator>>=(Integer n) {
423 CONSTEXPR14 FixedPoint& operator>>=(Integer n) {
424 data_ >>= n; 397 data_ >>= n;
425 return *this; 398 return *this;
426 } 399 }
427 400
428 template <class Integer, 401 template <IsIntegral Integer>
429 class = typename std::enable_if<std::is_integral<Integer>::value>::type> 402 constexpr FixedPoint& operator<<=(Integer n) {
430 CONSTEXPR14 FixedPoint& operator<<=(Integer n) {
431 data_ <<= n; 403 data_ <<= n;
432 return *this; 404 return *this;
433 } 405 }
@@ -437,42 +409,42 @@ public: // conversion to basic types
437 data_ += (data_ & fractional_mask) >> 1; 409 data_ += (data_ & fractional_mask) >> 1;
438 } 410 }
439 411
440 constexpr int to_int() { 412 [[nodiscard]] constexpr int to_int() {
441 round_up(); 413 round_up();
442 return static_cast<int>((data_ & integer_mask) >> fractional_bits); 414 return static_cast<int>((data_ & integer_mask) >> fractional_bits);
443 } 415 }
444 416
445 constexpr unsigned int to_uint() const { 417 [[nodiscard]] constexpr unsigned int to_uint() {
446 round_up(); 418 round_up();
447 return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits); 419 return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
448 } 420 }
449 421
450 constexpr int64_t to_long() { 422 [[nodiscard]] constexpr int64_t to_long() {
451 round_up(); 423 round_up();
452 return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits); 424 return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
453 } 425 }
454 426
455 constexpr int to_int_floor() const { 427 [[nodiscard]] constexpr int to_int_floor() const {
456 return static_cast<int>((data_ & integer_mask) >> fractional_bits); 428 return static_cast<int>((data_ & integer_mask) >> fractional_bits);
457 } 429 }
458 430
459 constexpr int64_t to_long_floor() { 431 [[nodiscard]] constexpr int64_t to_long_floor() const {
460 return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits); 432 return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
461 } 433 }
462 434
463 constexpr unsigned int to_uint_floor() const { 435 [[nodiscard]] constexpr unsigned int to_uint_floor() const {
464 return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits); 436 return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
465 } 437 }
466 438
467 constexpr float to_float() const { 439 [[nodiscard]] constexpr float to_float() const {
468 return static_cast<float>(data_) / FixedPoint::one; 440 return static_cast<float>(data_) / FixedPoint::one;
469 } 441 }
470 442
471 constexpr double to_double() const { 443 [[nodiscard]] constexpr double to_double() const {
472 return static_cast<double>(data_) / FixedPoint::one; 444 return static_cast<double>(data_) / FixedPoint::one;
473 } 445 }
474 446
475 constexpr base_type to_raw() const { 447 [[nodiscard]] constexpr base_type to_raw() const {
476 return data_; 448 return data_;
477 } 449 }
478 450
@@ -480,27 +452,27 @@ public: // conversion to basic types
480 data_ &= fractional_mask; 452 data_ &= fractional_mask;
481 } 453 }
482 454
483 constexpr base_type get_frac() const { 455 [[nodiscard]] constexpr base_type get_frac() const {
484 return data_ & fractional_mask; 456 return data_ & fractional_mask;
485 } 457 }
486 458
487public: 459public:
488 CONSTEXPR14 void swap(FixedPoint& rhs) { 460 constexpr void swap(FixedPoint& rhs) noexcept {
489 using std::swap; 461 using std::swap;
490 swap(data_, rhs.data_); 462 swap(data_, rhs.data_);
491 } 463 }
492 464
493public: 465public:
494 base_type data_; 466 base_type data_{};
495}; 467};
496 468
497// if we have the same fractional portion, but differing integer portions, we trivially upgrade the 469// if we have the same fractional portion, but differing integer portions, we trivially upgrade the
498// smaller type 470// smaller type
499template <size_t I1, size_t I2, size_t F> 471template <size_t I1, size_t I2, size_t F>
500CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type 472constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator+(
501operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { 473 FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
502 474
503 using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; 475 using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
504 476
505 const T l = T::from_base(lhs.to_raw()); 477 const T l = T::from_base(lhs.to_raw());
506 const T r = T::from_base(rhs.to_raw()); 478 const T r = T::from_base(rhs.to_raw());
@@ -508,10 +480,10 @@ operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
508} 480}
509 481
510template <size_t I1, size_t I2, size_t F> 482template <size_t I1, size_t I2, size_t F>
511CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type 483constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator-(
512operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { 484 FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
513 485
514 using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; 486 using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
515 487
516 const T l = T::from_base(lhs.to_raw()); 488 const T l = T::from_base(lhs.to_raw());
517 const T r = T::from_base(rhs.to_raw()); 489 const T r = T::from_base(rhs.to_raw());
@@ -519,10 +491,10 @@ operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
519} 491}
520 492
521template <size_t I1, size_t I2, size_t F> 493template <size_t I1, size_t I2, size_t F>
522CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type 494constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator*(
523operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { 495 FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
524 496
525 using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; 497 using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
526 498
527 const T l = T::from_base(lhs.to_raw()); 499 const T l = T::from_base(lhs.to_raw());
528 const T r = T::from_base(rhs.to_raw()); 500 const T r = T::from_base(rhs.to_raw());
@@ -530,10 +502,10 @@ operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
530} 502}
531 503
532template <size_t I1, size_t I2, size_t F> 504template <size_t I1, size_t I2, size_t F>
533CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type 505constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator/(
534operator/(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { 506 FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
535 507
536 using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; 508 using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
537 509
538 const T l = T::from_base(lhs.to_raw()); 510 const T l = T::from_base(lhs.to_raw());
539 const T r = T::from_base(rhs.to_raw()); 511 const T r = T::from_base(rhs.to_raw());
@@ -548,159 +520,133 @@ std::ostream& operator<<(std::ostream& os, FixedPoint<I, F> f) {
548 520
549// basic math operators 521// basic math operators
550template <size_t I, size_t F> 522template <size_t I, size_t F>
551CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { 523constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
552 lhs += rhs; 524 lhs += rhs;
553 return lhs; 525 return lhs;
554} 526}
555template <size_t I, size_t F> 527template <size_t I, size_t F>
556CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { 528constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
557 lhs -= rhs; 529 lhs -= rhs;
558 return lhs; 530 return lhs;
559} 531}
560template <size_t I, size_t F> 532template <size_t I, size_t F>
561CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { 533constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
562 lhs *= rhs; 534 lhs *= rhs;
563 return lhs; 535 return lhs;
564} 536}
565template <size_t I, size_t F> 537template <size_t I, size_t F>
566CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { 538constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
567 lhs /= rhs; 539 lhs /= rhs;
568 return lhs; 540 return lhs;
569} 541}
570 542
571template <size_t I, size_t F, class Number, 543template <size_t I, size_t F, IsArithmetic Number>
572 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 544constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) {
573CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) {
574 lhs += FixedPoint<I, F>(rhs); 545 lhs += FixedPoint<I, F>(rhs);
575 return lhs; 546 return lhs;
576} 547}
577template <size_t I, size_t F, class Number, 548template <size_t I, size_t F, IsArithmetic Number>
578 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 549constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) {
579CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) {
580 lhs -= FixedPoint<I, F>(rhs); 550 lhs -= FixedPoint<I, F>(rhs);
581 return lhs; 551 return lhs;
582} 552}
583template <size_t I, size_t F, class Number, 553template <size_t I, size_t F, IsArithmetic Number>
584 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 554constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) {
585CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) {
586 lhs *= FixedPoint<I, F>(rhs); 555 lhs *= FixedPoint<I, F>(rhs);
587 return lhs; 556 return lhs;
588} 557}
589template <size_t I, size_t F, class Number, 558template <size_t I, size_t F, IsArithmetic Number>
590 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 559constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) {
591CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) {
592 lhs /= FixedPoint<I, F>(rhs); 560 lhs /= FixedPoint<I, F>(rhs);
593 return lhs; 561 return lhs;
594} 562}
595 563
596template <size_t I, size_t F, class Number, 564template <size_t I, size_t F, IsArithmetic Number>
597 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 565constexpr FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) {
598CONSTEXPR14 FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) {
599 FixedPoint<I, F> tmp(lhs); 566 FixedPoint<I, F> tmp(lhs);
600 tmp += rhs; 567 tmp += rhs;
601 return tmp; 568 return tmp;
602} 569}
603template <size_t I, size_t F, class Number, 570template <size_t I, size_t F, IsArithmetic Number>
604 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 571constexpr FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) {
605CONSTEXPR14 FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) {
606 FixedPoint<I, F> tmp(lhs); 572 FixedPoint<I, F> tmp(lhs);
607 tmp -= rhs; 573 tmp -= rhs;
608 return tmp; 574 return tmp;
609} 575}
610template <size_t I, size_t F, class Number, 576template <size_t I, size_t F, IsArithmetic Number>
611 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 577constexpr FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) {
612CONSTEXPR14 FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) {
613 FixedPoint<I, F> tmp(lhs); 578 FixedPoint<I, F> tmp(lhs);
614 tmp *= rhs; 579 tmp *= rhs;
615 return tmp; 580 return tmp;
616} 581}
617template <size_t I, size_t F, class Number, 582template <size_t I, size_t F, IsArithmetic Number>
618 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 583constexpr FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) {
619CONSTEXPR14 FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) {
620 FixedPoint<I, F> tmp(lhs); 584 FixedPoint<I, F> tmp(lhs);
621 tmp /= rhs; 585 tmp /= rhs;
622 return tmp; 586 return tmp;
623} 587}
624 588
625// shift operators 589// shift operators
626template <size_t I, size_t F, class Integer, 590template <size_t I, size_t F, IsIntegral Integer>
627 class = typename std::enable_if<std::is_integral<Integer>::value>::type> 591constexpr FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) {
628CONSTEXPR14 FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) {
629 lhs <<= rhs; 592 lhs <<= rhs;
630 return lhs; 593 return lhs;
631} 594}
632template <size_t I, size_t F, class Integer, 595template <size_t I, size_t F, IsIntegral Integer>
633 class = typename std::enable_if<std::is_integral<Integer>::value>::type> 596constexpr FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) {
634CONSTEXPR14 FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) {
635 lhs >>= rhs; 597 lhs >>= rhs;
636 return lhs; 598 return lhs;
637} 599}
638 600
639// comparison operators 601// comparison operators
640template <size_t I, size_t F, class Number, 602template <size_t I, size_t F, IsArithmetic Number>
641 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
642constexpr bool operator>(FixedPoint<I, F> lhs, Number rhs) { 603constexpr bool operator>(FixedPoint<I, F> lhs, Number rhs) {
643 return lhs > FixedPoint<I, F>(rhs); 604 return lhs > FixedPoint<I, F>(rhs);
644} 605}
645template <size_t I, size_t F, class Number, 606template <size_t I, size_t F, IsArithmetic Number>
646 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
647constexpr bool operator<(FixedPoint<I, F> lhs, Number rhs) { 607constexpr bool operator<(FixedPoint<I, F> lhs, Number rhs) {
648 return lhs < FixedPoint<I, F>(rhs); 608 return lhs < FixedPoint<I, F>(rhs);
649} 609}
650template <size_t I, size_t F, class Number, 610template <size_t I, size_t F, IsArithmetic Number>
651 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
652constexpr bool operator>=(FixedPoint<I, F> lhs, Number rhs) { 611constexpr bool operator>=(FixedPoint<I, F> lhs, Number rhs) {
653 return lhs >= FixedPoint<I, F>(rhs); 612 return lhs >= FixedPoint<I, F>(rhs);
654} 613}
655template <size_t I, size_t F, class Number, 614template <size_t I, size_t F, IsArithmetic Number>
656 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
657constexpr bool operator<=(FixedPoint<I, F> lhs, Number rhs) { 615constexpr bool operator<=(FixedPoint<I, F> lhs, Number rhs) {
658 return lhs <= FixedPoint<I, F>(rhs); 616 return lhs <= FixedPoint<I, F>(rhs);
659} 617}
660template <size_t I, size_t F, class Number, 618template <size_t I, size_t F, IsArithmetic Number>
661 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
662constexpr bool operator==(FixedPoint<I, F> lhs, Number rhs) { 619constexpr bool operator==(FixedPoint<I, F> lhs, Number rhs) {
663 return lhs == FixedPoint<I, F>(rhs); 620 return lhs == FixedPoint<I, F>(rhs);
664} 621}
665template <size_t I, size_t F, class Number, 622template <size_t I, size_t F, IsArithmetic Number>
666 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
667constexpr bool operator!=(FixedPoint<I, F> lhs, Number rhs) { 623constexpr bool operator!=(FixedPoint<I, F> lhs, Number rhs) {
668 return lhs != FixedPoint<I, F>(rhs); 624 return lhs != FixedPoint<I, F>(rhs);
669} 625}
670 626
671template <size_t I, size_t F, class Number, 627template <size_t I, size_t F, IsArithmetic Number>
672 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
673constexpr bool operator>(Number lhs, FixedPoint<I, F> rhs) { 628constexpr bool operator>(Number lhs, FixedPoint<I, F> rhs) {
674 return FixedPoint<I, F>(lhs) > rhs; 629 return FixedPoint<I, F>(lhs) > rhs;
675} 630}
676template <size_t I, size_t F, class Number, 631template <size_t I, size_t F, IsArithmetic Number>
677 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
678constexpr bool operator<(Number lhs, FixedPoint<I, F> rhs) { 632constexpr bool operator<(Number lhs, FixedPoint<I, F> rhs) {
679 return FixedPoint<I, F>(lhs) < rhs; 633 return FixedPoint<I, F>(lhs) < rhs;
680} 634}
681template <size_t I, size_t F, class Number, 635template <size_t I, size_t F, IsArithmetic Number>
682 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
683constexpr bool operator>=(Number lhs, FixedPoint<I, F> rhs) { 636constexpr bool operator>=(Number lhs, FixedPoint<I, F> rhs) {
684 return FixedPoint<I, F>(lhs) >= rhs; 637 return FixedPoint<I, F>(lhs) >= rhs;
685} 638}
686template <size_t I, size_t F, class Number, 639template <size_t I, size_t F, IsArithmetic Number>
687 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
688constexpr bool operator<=(Number lhs, FixedPoint<I, F> rhs) { 640constexpr bool operator<=(Number lhs, FixedPoint<I, F> rhs) {
689 return FixedPoint<I, F>(lhs) <= rhs; 641 return FixedPoint<I, F>(lhs) <= rhs;
690} 642}
691template <size_t I, size_t F, class Number, 643template <size_t I, size_t F, IsArithmetic Number>
692 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
693constexpr bool operator==(Number lhs, FixedPoint<I, F> rhs) { 644constexpr bool operator==(Number lhs, FixedPoint<I, F> rhs) {
694 return FixedPoint<I, F>(lhs) == rhs; 645 return FixedPoint<I, F>(lhs) == rhs;
695} 646}
696template <size_t I, size_t F, class Number, 647template <size_t I, size_t F, IsArithmetic Number>
697 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
698constexpr bool operator!=(Number lhs, FixedPoint<I, F> rhs) { 648constexpr bool operator!=(Number lhs, FixedPoint<I, F> rhs) {
699 return FixedPoint<I, F>(lhs) != rhs; 649 return FixedPoint<I, F>(lhs) != rhs;
700} 650}
701 651
702} // namespace Common 652} // namespace Common
703
704#undef CONSTEXPR14
705
706#endif
diff --git a/src/common/fs/file.h b/src/common/fs/file.h
index 69b53384c..167c4d826 100644
--- a/src/common/fs/file.h
+++ b/src/common/fs/file.h
@@ -209,8 +209,8 @@ public:
209 209
210 /** 210 /**
211 * Helper function which deduces the value type of a contiguous STL container used in ReadSpan. 211 * Helper function which deduces the value type of a contiguous STL container used in ReadSpan.
212 * If T is not a contiguous STL container as defined by the concept IsSTLContainer, this calls 212 * If T is not a contiguous container as defined by the concept IsContiguousContainer, this
213 * ReadObject and T must be a trivially copyable object. 213 * calls ReadObject and T must be a trivially copyable object.
214 * 214 *
215 * See ReadSpan for more details if T is a contiguous container. 215 * See ReadSpan for more details if T is a contiguous container.
216 * See ReadObject for more details if T is a trivially copyable object. 216 * See ReadObject for more details if T is a trivially copyable object.
@@ -223,7 +223,7 @@ public:
223 */ 223 */
224 template <typename T> 224 template <typename T>
225 [[nodiscard]] size_t Read(T& data) const { 225 [[nodiscard]] size_t Read(T& data) const {
226 if constexpr (IsSTLContainer<T>) { 226 if constexpr (IsContiguousContainer<T>) {
227 using ContiguousType = typename T::value_type; 227 using ContiguousType = typename T::value_type;
228 static_assert(std::is_trivially_copyable_v<ContiguousType>, 228 static_assert(std::is_trivially_copyable_v<ContiguousType>,
229 "Data type must be trivially copyable."); 229 "Data type must be trivially copyable.");
@@ -235,8 +235,8 @@ public:
235 235
236 /** 236 /**
237 * Helper function which deduces the value type of a contiguous STL container used in WriteSpan. 237 * Helper function which deduces the value type of a contiguous STL container used in WriteSpan.
238 * If T is not a contiguous STL container as defined by the concept IsSTLContainer, this calls 238 * If T is not a contiguous STL container as defined by the concept IsContiguousContainer, this
239 * WriteObject and T must be a trivially copyable object. 239 * calls WriteObject and T must be a trivially copyable object.
240 * 240 *
241 * See WriteSpan for more details if T is a contiguous container. 241 * See WriteSpan for more details if T is a contiguous container.
242 * See WriteObject for more details if T is a trivially copyable object. 242 * See WriteObject for more details if T is a trivially copyable object.
@@ -249,7 +249,7 @@ public:
249 */ 249 */
250 template <typename T> 250 template <typename T>
251 [[nodiscard]] size_t Write(const T& data) const { 251 [[nodiscard]] size_t Write(const T& data) const {
252 if constexpr (IsSTLContainer<T>) { 252 if constexpr (IsContiguousContainer<T>) {
253 using ContiguousType = typename T::value_type; 253 using ContiguousType = typename T::value_type;
254 static_assert(std::is_trivially_copyable_v<ContiguousType>, 254 static_assert(std::is_trivially_copyable_v<ContiguousType>,
255 "Data type must be trivially copyable."); 255 "Data type must be trivially copyable.");
diff --git a/src/common/input.h b/src/common/input.h
index b533f3844..cb30b7254 100644
--- a/src/common/input.h
+++ b/src/common/input.h
@@ -100,7 +100,6 @@ enum class CameraError {
100enum class VibrationAmplificationType { 100enum class VibrationAmplificationType {
101 Linear, 101 Linear,
102 Exponential, 102 Exponential,
103 Test,
104}; 103};
105 104
106// Analog properties for calibration 105// Analog properties for calibration
@@ -325,6 +324,10 @@ public:
325 return VibrationError::NotSupported; 324 return VibrationError::NotSupported;
326 } 325 }
327 326
327 virtual bool IsVibrationEnabled() {
328 return false;
329 }
330
328 virtual PollingError SetPollingMode([[maybe_unused]] PollingMode polling_mode) { 331 virtual PollingError SetPollingMode([[maybe_unused]] PollingMode polling_mode) {
329 return PollingError::NotSupported; 332 return PollingError::NotSupported;
330 } 333 }
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index abeb5859b..113e663b5 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -190,6 +190,9 @@ add_library(core STATIC
190 hle/kernel/k_code_memory.h 190 hle/kernel/k_code_memory.h
191 hle/kernel/k_condition_variable.cpp 191 hle/kernel/k_condition_variable.cpp
192 hle/kernel/k_condition_variable.h 192 hle/kernel/k_condition_variable.h
193 hle/kernel/k_dynamic_page_manager.h
194 hle/kernel/k_dynamic_resource_manager.h
195 hle/kernel/k_dynamic_slab_heap.h
193 hle/kernel/k_event.cpp 196 hle/kernel/k_event.cpp
194 hle/kernel/k_event.h 197 hle/kernel/k_event.h
195 hle/kernel/k_handle_table.cpp 198 hle/kernel/k_handle_table.cpp
@@ -240,6 +243,8 @@ add_library(core STATIC
240 hle/kernel/k_server_session.h 243 hle/kernel/k_server_session.h
241 hle/kernel/k_session.cpp 244 hle/kernel/k_session.cpp
242 hle/kernel/k_session.h 245 hle/kernel/k_session.h
246 hle/kernel/k_session_request.cpp
247 hle/kernel/k_session_request.h
243 hle/kernel/k_shared_memory.cpp 248 hle/kernel/k_shared_memory.cpp
244 hle/kernel/k_shared_memory.h 249 hle/kernel/k_shared_memory.h
245 hle/kernel/k_shared_memory_info.h 250 hle/kernel/k_shared_memory_info.h
@@ -769,19 +774,15 @@ if (MSVC)
769 /we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data 774 /we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data
770 /we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch 775 /we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch
771 /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data 776 /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
777 /we4800 # Implicit conversion from 'type' to bool. Possible information loss
772 ) 778 )
773else() 779else()
774 target_compile_options(core PRIVATE 780 target_compile_options(core PRIVATE
775 -Werror=conversion 781 -Werror=conversion
776 -Werror=ignored-qualifiers
777 782
778 $<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess> 783 -Wno-sign-conversion
779 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
780 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
781 784
782 $<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation> 785 $<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation>
783
784 -Wno-sign-conversion
785 ) 786 )
786endif() 787endif()
787 788
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp
index 953d96439..29ba562dc 100644
--- a/src/core/arm/arm_interface.cpp
+++ b/src/core/arm/arm_interface.cpp
@@ -134,6 +134,14 @@ void ARM_Interface::Run() {
134 } 134 }
135 system.ExitDynarmicProfile(); 135 system.ExitDynarmicProfile();
136 136
137 // If the thread is scheduled for termination, exit the thread.
138 if (current_thread->HasDpc()) {
139 if (current_thread->IsTerminationRequested()) {
140 current_thread->Exit();
141 UNREACHABLE();
142 }
143 }
144
137 // Notify the debugger and go to sleep if a breakpoint was hit, 145 // Notify the debugger and go to sleep if a breakpoint was hit,
138 // or if the thread is unable to continue for any reason. 146 // or if the thread is unable to continue for any reason.
139 if (Has(hr, breakpoint) || Has(hr, no_execute)) { 147 if (Has(hr, breakpoint) || Has(hr, no_execute)) {
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index d1e70f19d..287ba102e 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -450,7 +450,7 @@ std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_32::GetBacktrace(Core::S
450 // Frame records are two words long: 450 // Frame records are two words long:
451 // fp+0 : pointer to previous frame record 451 // fp+0 : pointer to previous frame record
452 // fp+4 : value of lr for frame 452 // fp+4 : value of lr for frame
453 while (true) { 453 for (size_t i = 0; i < 256; i++) {
454 out.push_back({"", 0, lr, 0, ""}); 454 out.push_back({"", 0, lr, 0, ""});
455 if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 8)) { 455 if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 8)) {
456 break; 456 break;
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 22b5d5656..afb7fb3a0 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -517,7 +517,7 @@ std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_64::GetBacktrace(Core::S
517 // Frame records are two words long: 517 // Frame records are two words long:
518 // fp+0 : pointer to previous frame record 518 // fp+0 : pointer to previous frame record
519 // fp+8 : value of lr for frame 519 // fp+8 : value of lr for frame
520 while (true) { 520 for (size_t i = 0; i < 256; i++) {
521 out.push_back({"", 0, lr, 0, ""}); 521 out.push_back({"", 0, lr, 0, ""});
522 if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 16)) { 522 if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 16)) {
523 break; 523 break;
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 1deeee154..40a610435 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -133,6 +133,50 @@ struct System::Impl {
133 : kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{}, 133 : kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{},
134 cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {} 134 cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {}
135 135
136 void Initialize(System& system) {
137 device_memory = std::make_unique<Core::DeviceMemory>();
138
139 is_multicore = Settings::values.use_multi_core.GetValue();
140
141 core_timing.SetMulticore(is_multicore);
142 core_timing.Initialize([&system]() { system.RegisterHostThread(); });
143
144 const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
145 const auto current_time =
146 std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
147 Settings::values.custom_rtc_differential =
148 Settings::values.custom_rtc.value_or(current_time) - current_time;
149
150 // Create a default fs if one doesn't already exist.
151 if (virtual_filesystem == nullptr) {
152 virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
153 }
154 if (content_provider == nullptr) {
155 content_provider = std::make_unique<FileSys::ContentProviderUnion>();
156 }
157
158 // Create default implementations of applets if one is not provided.
159 applet_manager.SetDefaultAppletsIfMissing();
160
161 is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
162
163 kernel.SetMulticore(is_multicore);
164 cpu_manager.SetMulticore(is_multicore);
165 cpu_manager.SetAsyncGpu(is_async_gpu);
166 }
167
168 void ReinitializeIfNecessary(System& system) {
169 if (is_multicore == Settings::values.use_multi_core.GetValue()) {
170 return;
171 }
172
173 LOG_DEBUG(Kernel, "Re-initializing");
174
175 is_multicore = Settings::values.use_multi_core.GetValue();
176
177 Initialize(system);
178 }
179
136 SystemResultStatus Run() { 180 SystemResultStatus Run() {
137 std::unique_lock<std::mutex> lk(suspend_guard); 181 std::unique_lock<std::mutex> lk(suspend_guard);
138 status = SystemResultStatus::Success; 182 status = SystemResultStatus::Success;
@@ -178,37 +222,14 @@ struct System::Impl {
178 debugger = std::make_unique<Debugger>(system, port); 222 debugger = std::make_unique<Debugger>(system, port);
179 } 223 }
180 224
181 SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) { 225 SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) {
182 LOG_DEBUG(Core, "initialized OK"); 226 LOG_DEBUG(Core, "initialized OK");
183 227
184 device_memory = std::make_unique<Core::DeviceMemory>(); 228 // Setting changes may require a full system reinitialization (e.g., disabling multicore).
185 229 ReinitializeIfNecessary(system);
186 is_multicore = Settings::values.use_multi_core.GetValue();
187 is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
188
189 kernel.SetMulticore(is_multicore);
190 cpu_manager.SetMulticore(is_multicore);
191 cpu_manager.SetAsyncGpu(is_async_gpu);
192 core_timing.SetMulticore(is_multicore);
193 230
194 kernel.Initialize(); 231 kernel.Initialize();
195 cpu_manager.Initialize(); 232 cpu_manager.Initialize();
196 core_timing.Initialize([&system]() { system.RegisterHostThread(); });
197
198 const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
199 const auto current_time =
200 std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
201 Settings::values.custom_rtc_differential =
202 Settings::values.custom_rtc.value_or(current_time) - current_time;
203
204 // Create a default fs if one doesn't already exist.
205 if (virtual_filesystem == nullptr)
206 virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
207 if (content_provider == nullptr)
208 content_provider = std::make_unique<FileSys::ContentProviderUnion>();
209
210 /// Create default implementations of applets if one is not provided.
211 applet_manager.SetDefaultAppletsIfMissing();
212 233
213 /// Reset all glue registrations 234 /// Reset all glue registrations
214 arp_manager.ResetAll(); 235 arp_manager.ResetAll();
@@ -253,11 +274,11 @@ struct System::Impl {
253 return SystemResultStatus::ErrorGetLoader; 274 return SystemResultStatus::ErrorGetLoader;
254 } 275 }
255 276
256 SystemResultStatus init_result{Init(system, emu_window)}; 277 SystemResultStatus init_result{SetupForMainProcess(system, emu_window)};
257 if (init_result != SystemResultStatus::Success) { 278 if (init_result != SystemResultStatus::Success) {
258 LOG_CRITICAL(Core, "Failed to initialize system (Error {})!", 279 LOG_CRITICAL(Core, "Failed to initialize system (Error {})!",
259 static_cast<int>(init_result)); 280 static_cast<int>(init_result));
260 Shutdown(); 281 ShutdownMainProcess();
261 return init_result; 282 return init_result;
262 } 283 }
263 284
@@ -276,7 +297,7 @@ struct System::Impl {
276 const auto [load_result, load_parameters] = app_loader->Load(*main_process, system); 297 const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
277 if (load_result != Loader::ResultStatus::Success) { 298 if (load_result != Loader::ResultStatus::Success) {
278 LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result); 299 LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
279 Shutdown(); 300 ShutdownMainProcess();
280 301
281 return static_cast<SystemResultStatus>( 302 return static_cast<SystemResultStatus>(
282 static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result)); 303 static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result));
@@ -335,7 +356,7 @@ struct System::Impl {
335 return status; 356 return status;
336 } 357 }
337 358
338 void Shutdown() { 359 void ShutdownMainProcess() {
339 SetShuttingDown(true); 360 SetShuttingDown(true);
340 361
341 // Log last frame performance stats if game was loded 362 // Log last frame performance stats if game was loded
@@ -363,13 +384,14 @@ struct System::Impl {
363 kernel.ShutdownCores(); 384 kernel.ShutdownCores();
364 cpu_manager.Shutdown(); 385 cpu_manager.Shutdown();
365 debugger.reset(); 386 debugger.reset();
387 services->KillNVNFlinger();
366 kernel.CloseServices(); 388 kernel.CloseServices();
367 services.reset(); 389 services.reset();
368 service_manager.reset(); 390 service_manager.reset();
369 cheat_engine.reset(); 391 cheat_engine.reset();
370 telemetry_session.reset(); 392 telemetry_session.reset();
371 time_manager.Shutdown(); 393 time_manager.Shutdown();
372 core_timing.Shutdown(); 394 core_timing.ClearPendingEvents();
373 app_loader.reset(); 395 app_loader.reset();
374 audio_core.reset(); 396 audio_core.reset();
375 gpu_core.reset(); 397 gpu_core.reset();
@@ -377,7 +399,6 @@ struct System::Impl {
377 perf_stats.reset(); 399 perf_stats.reset();
378 kernel.Shutdown(); 400 kernel.Shutdown();
379 memory.Reset(); 401 memory.Reset();
380 applet_manager.ClearAll();
381 402
382 if (auto room_member = room_network.GetRoomMember().lock()) { 403 if (auto room_member = room_network.GetRoomMember().lock()) {
383 Network::GameInfo game_info{}; 404 Network::GameInfo game_info{};
@@ -520,6 +541,10 @@ const CpuManager& System::GetCpuManager() const {
520 return impl->cpu_manager; 541 return impl->cpu_manager;
521} 542}
522 543
544void System::Initialize() {
545 impl->Initialize(*this);
546}
547
523SystemResultStatus System::Run() { 548SystemResultStatus System::Run() {
524 return impl->Run(); 549 return impl->Run();
525} 550}
@@ -540,8 +565,8 @@ void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
540 impl->kernel.InvalidateCpuInstructionCacheRange(addr, size); 565 impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
541} 566}
542 567
543void System::Shutdown() { 568void System::ShutdownMainProcess() {
544 impl->Shutdown(); 569 impl->ShutdownMainProcess();
545} 570}
546 571
547bool System::IsShuttingDown() const { 572bool System::IsShuttingDown() const {
diff --git a/src/core/core.h b/src/core/core.h
index 7843cc8ad..4ebedffd9 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -143,6 +143,12 @@ public:
143 System& operator=(System&&) = delete; 143 System& operator=(System&&) = delete;
144 144
145 /** 145 /**
146 * Initializes the system
147 * This function will initialize core functionaility used for system emulation
148 */
149 void Initialize();
150
151 /**
146 * Run the OS and Application 152 * Run the OS and Application
147 * This function will start emulation and run the relevant devices 153 * This function will start emulation and run the relevant devices
148 */ 154 */
@@ -166,8 +172,8 @@ public:
166 172
167 void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size); 173 void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
168 174
169 /// Shutdown the emulated system. 175 /// Shutdown the main emulated process.
170 void Shutdown(); 176 void ShutdownMainProcess();
171 177
172 /// Check if the core is shutting down. 178 /// Check if the core is shutting down.
173 [[nodiscard]] bool IsShuttingDown() const; 179 [[nodiscard]] bool IsShuttingDown() const;
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index 2678ce532..0e7b5f943 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -40,7 +40,9 @@ struct CoreTiming::Event {
40CoreTiming::CoreTiming() 40CoreTiming::CoreTiming()
41 : clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {} 41 : clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {}
42 42
43CoreTiming::~CoreTiming() = default; 43CoreTiming::~CoreTiming() {
44 Reset();
45}
44 46
45void CoreTiming::ThreadEntry(CoreTiming& instance) { 47void CoreTiming::ThreadEntry(CoreTiming& instance) {
46 constexpr char name[] = "HostTiming"; 48 constexpr char name[] = "HostTiming";
@@ -53,6 +55,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
53} 55}
54 56
55void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) { 57void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
58 Reset();
56 on_thread_init = std::move(on_thread_init_); 59 on_thread_init = std::move(on_thread_init_);
57 event_fifo_id = 0; 60 event_fifo_id = 0;
58 shutting_down = false; 61 shutting_down = false;
@@ -65,17 +68,8 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
65 } 68 }
66} 69}
67 70
68void CoreTiming::Shutdown() { 71void CoreTiming::ClearPendingEvents() {
69 paused = true; 72 event_queue.clear();
70 shutting_down = true;
71 pause_event.Set();
72 event.Set();
73 if (timer_thread) {
74 timer_thread->join();
75 }
76 ClearPendingEvents();
77 timer_thread.reset();
78 has_started = false;
79} 73}
80 74
81void CoreTiming::Pause(bool is_paused) { 75void CoreTiming::Pause(bool is_paused) {
@@ -196,10 +190,6 @@ u64 CoreTiming::GetClockTicks() const {
196 return CpuCyclesToClockCycles(ticks); 190 return CpuCyclesToClockCycles(ticks);
197} 191}
198 192
199void CoreTiming::ClearPendingEvents() {
200 event_queue.clear();
201}
202
203void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { 193void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
204 std::scoped_lock lock{basic_lock}; 194 std::scoped_lock lock{basic_lock};
205 195
@@ -307,6 +297,18 @@ void CoreTiming::ThreadLoop() {
307 } 297 }
308} 298}
309 299
300void CoreTiming::Reset() {
301 paused = true;
302 shutting_down = true;
303 pause_event.Set();
304 event.Set();
305 if (timer_thread) {
306 timer_thread->join();
307 }
308 timer_thread.reset();
309 has_started = false;
310}
311
310std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const { 312std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
311 if (is_multicore) { 313 if (is_multicore) {
312 return clock->GetTimeNS(); 314 return clock->GetTimeNS();
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index 3259397b2..b5925193c 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -61,19 +61,14 @@ public:
61 /// required to end slice - 1 and start slice 0 before the first cycle of code is executed. 61 /// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
62 void Initialize(std::function<void()>&& on_thread_init_); 62 void Initialize(std::function<void()>&& on_thread_init_);
63 63
64 /// Tears down all timing related functionality. 64 /// Clear all pending events. This should ONLY be done on exit.
65 void Shutdown(); 65 void ClearPendingEvents();
66 66
67 /// Sets if emulation is multicore or single core, must be set before Initialize 67 /// Sets if emulation is multicore or single core, must be set before Initialize
68 void SetMulticore(bool is_multicore_) { 68 void SetMulticore(bool is_multicore_) {
69 is_multicore = is_multicore_; 69 is_multicore = is_multicore_;
70 } 70 }
71 71
72 /// Check if it's using host timing.
73 bool IsHostTiming() const {
74 return is_multicore;
75 }
76
77 /// Pauses/Unpauses the execution of the timer thread. 72 /// Pauses/Unpauses the execution of the timer thread.
78 void Pause(bool is_paused); 73 void Pause(bool is_paused);
79 74
@@ -136,12 +131,11 @@ public:
136private: 131private:
137 struct Event; 132 struct Event;
138 133
139 /// Clear all pending events. This should ONLY be done on exit.
140 void ClearPendingEvents();
141
142 static void ThreadEntry(CoreTiming& instance); 134 static void ThreadEntry(CoreTiming& instance);
143 void ThreadLoop(); 135 void ThreadLoop();
144 136
137 void Reset();
138
145 std::unique_ptr<Common::WallClock> clock; 139 std::unique_ptr<Common::WallClock> clock;
146 140
147 s64 global_timer = 0; 141 s64 global_timer = 0;
diff --git a/src/core/device_memory.h b/src/core/device_memory.h
index df61b0c0b..90510733c 100644
--- a/src/core/device_memory.h
+++ b/src/core/device_memory.h
@@ -31,12 +31,14 @@ public:
31 DramMemoryMap::Base; 31 DramMemoryMap::Base;
32 } 32 }
33 33
34 u8* GetPointer(PAddr addr) { 34 template <typename T>
35 return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); 35 T* GetPointer(PAddr addr) {
36 return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
36 } 37 }
37 38
38 const u8* GetPointer(PAddr addr) const { 39 template <typename T>
39 return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); 40 const T* GetPointer(PAddr addr) const {
41 return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
40 } 42 }
41 43
42 Common::HostMemory buffer; 44 Common::HostMemory buffer;
diff --git a/src/core/file_sys/card_image.cpp b/src/core/file_sys/card_image.cpp
index f23d9373b..5d02865f4 100644
--- a/src/core/file_sys/card_image.cpp
+++ b/src/core/file_sys/card_image.cpp
@@ -232,8 +232,8 @@ const std::vector<std::shared_ptr<NCA>>& XCI::GetNCAs() const {
232 232
233std::shared_ptr<NCA> XCI::GetNCAByType(NCAContentType type) const { 233std::shared_ptr<NCA> XCI::GetNCAByType(NCAContentType type) const {
234 const auto program_id = secure_partition->GetProgramTitleID(); 234 const auto program_id = secure_partition->GetProgramTitleID();
235 const auto iter = std::find_if( 235 const auto iter =
236 ncas.begin(), ncas.end(), [this, type, program_id](const std::shared_ptr<NCA>& nca) { 236 std::find_if(ncas.begin(), ncas.end(), [type, program_id](const std::shared_ptr<NCA>& nca) {
237 return nca->GetType() == type && nca->GetTitleId() == program_id; 237 return nca->GetType() == type && nca->GetTitleId() == program_id;
238 }); 238 });
239 return iter == ncas.end() ? nullptr : *iter; 239 return iter == ncas.end() ? nullptr : *iter;
diff --git a/src/core/file_sys/control_metadata.cpp b/src/core/file_sys/control_metadata.cpp
index be25da2f6..50f44f598 100644
--- a/src/core/file_sys/control_metadata.cpp
+++ b/src/core/file_sys/control_metadata.cpp
@@ -1,6 +1,7 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include "common/settings.h"
4#include "common/string_util.h" 5#include "common/string_util.h"
5#include "common/swap.h" 6#include "common/swap.h"
6#include "core/file_sys/control_metadata.h" 7#include "core/file_sys/control_metadata.h"
@@ -37,6 +38,27 @@ std::string LanguageEntry::GetDeveloperName() const {
37 developer_name.size()); 38 developer_name.size());
38} 39}
39 40
41constexpr std::array<Language, 18> language_to_codes = {{
42 Language::Japanese,
43 Language::AmericanEnglish,
44 Language::French,
45 Language::German,
46 Language::Italian,
47 Language::Spanish,
48 Language::Chinese,
49 Language::Korean,
50 Language::Dutch,
51 Language::Portuguese,
52 Language::Russian,
53 Language::Taiwanese,
54 Language::BritishEnglish,
55 Language::CanadianFrench,
56 Language::LatinAmericanSpanish,
57 Language::Chinese,
58 Language::Taiwanese,
59 Language::BrazilianPortuguese,
60}};
61
40NACP::NACP() = default; 62NACP::NACP() = default;
41 63
42NACP::NACP(VirtualFile file) { 64NACP::NACP(VirtualFile file) {
@@ -45,9 +67,13 @@ NACP::NACP(VirtualFile file) {
45 67
46NACP::~NACP() = default; 68NACP::~NACP() = default;
47 69
48const LanguageEntry& NACP::GetLanguageEntry(Language language) const { 70const LanguageEntry& NACP::GetLanguageEntry() const {
49 if (language != Language::Default) { 71 Language language = language_to_codes[Settings::values.language_index.GetValue()];
50 return raw.language_entries.at(static_cast<u8>(language)); 72
73 {
74 const auto& language_entry = raw.language_entries.at(static_cast<u8>(language));
75 if (!language_entry.GetApplicationName().empty())
76 return language_entry;
51 } 77 }
52 78
53 for (const auto& language_entry : raw.language_entries) { 79 for (const auto& language_entry : raw.language_entries) {
@@ -55,16 +81,15 @@ const LanguageEntry& NACP::GetLanguageEntry(Language language) const {
55 return language_entry; 81 return language_entry;
56 } 82 }
57 83
58 // Fallback to English 84 return raw.language_entries.at(static_cast<u8>(Language::AmericanEnglish));
59 return GetLanguageEntry(Language::AmericanEnglish);
60} 85}
61 86
62std::string NACP::GetApplicationName(Language language) const { 87std::string NACP::GetApplicationName() const {
63 return GetLanguageEntry(language).GetApplicationName(); 88 return GetLanguageEntry().GetApplicationName();
64} 89}
65 90
66std::string NACP::GetDeveloperName(Language language) const { 91std::string NACP::GetDeveloperName() const {
67 return GetLanguageEntry(language).GetDeveloperName(); 92 return GetLanguageEntry().GetDeveloperName();
68} 93}
69 94
70u64 NACP::GetTitleId() const { 95u64 NACP::GetTitleId() const {
diff --git a/src/core/file_sys/control_metadata.h b/src/core/file_sys/control_metadata.h
index 75295519c..6a81873b1 100644
--- a/src/core/file_sys/control_metadata.h
+++ b/src/core/file_sys/control_metadata.h
@@ -101,9 +101,9 @@ public:
101 explicit NACP(VirtualFile file); 101 explicit NACP(VirtualFile file);
102 ~NACP(); 102 ~NACP();
103 103
104 const LanguageEntry& GetLanguageEntry(Language language = Language::Default) const; 104 const LanguageEntry& GetLanguageEntry() const;
105 std::string GetApplicationName(Language language = Language::Default) const; 105 std::string GetApplicationName() const;
106 std::string GetDeveloperName(Language language = Language::Default) const; 106 std::string GetDeveloperName() const;
107 u64 GetTitleId() const; 107 u64 GetTitleId() const;
108 u64 GetDLCBaseTitleId() const; 108 u64 GetDLCBaseTitleId() const;
109 std::string GetVersionString() const; 109 std::string GetVersionString() const;
diff --git a/src/core/file_sys/program_metadata.cpp b/src/core/file_sys/program_metadata.cpp
index 08d489eab..f00479bd3 100644
--- a/src/core/file_sys/program_metadata.cpp
+++ b/src/core/file_sys/program_metadata.cpp
@@ -127,7 +127,7 @@ void ProgramMetadata::LoadManual(bool is_64_bit, ProgramAddressSpaceType address
127} 127}
128 128
129bool ProgramMetadata::Is64BitProgram() const { 129bool ProgramMetadata::Is64BitProgram() const {
130 return npdm_header.has_64_bit_instructions; 130 return npdm_header.has_64_bit_instructions.As<bool>();
131} 131}
132 132
133ProgramAddressSpaceType ProgramMetadata::GetAddressSpaceType() const { 133ProgramAddressSpaceType ProgramMetadata::GetAddressSpaceType() const {
diff --git a/src/core/file_sys/savedata_factory.cpp b/src/core/file_sys/savedata_factory.cpp
index 8c1b2523c..1567da231 100644
--- a/src/core/file_sys/savedata_factory.cpp
+++ b/src/core/file_sys/savedata_factory.cpp
@@ -5,6 +5,7 @@
5#include "common/assert.h" 5#include "common/assert.h"
6#include "common/common_types.h" 6#include "common/common_types.h"
7#include "common/logging/log.h" 7#include "common/logging/log.h"
8#include "common/uuid.h"
8#include "core/core.h" 9#include "core/core.h"
9#include "core/file_sys/savedata_factory.h" 10#include "core/file_sys/savedata_factory.h"
10#include "core/file_sys/vfs.h" 11#include "core/file_sys/vfs.h"
@@ -59,6 +60,36 @@ bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataA
59 attr.title_id == 0 && attr.save_id == 0); 60 attr.title_id == 0 && attr.save_id == 0);
60} 61}
61 62
63std::string GetFutureSaveDataPath(SaveDataSpaceId space_id, SaveDataType type, u64 title_id,
64 u128 user_id) {
65 // Only detect nand user saves.
66 const auto space_id_path = [space_id]() -> std::string_view {
67 switch (space_id) {
68 case SaveDataSpaceId::NandUser:
69 return "/user/save";
70 default:
71 return "";
72 }
73 }();
74
75 if (space_id_path.empty()) {
76 return "";
77 }
78
79 Common::UUID uuid;
80 std::memcpy(uuid.uuid.data(), user_id.data(), sizeof(Common::UUID));
81
82 // Only detect account/device saves from the future location.
83 switch (type) {
84 case SaveDataType::SaveData:
85 return fmt::format("{}/account/{}/{:016X}/1", space_id_path, uuid.RawString(), title_id);
86 case SaveDataType::DeviceSaveData:
87 return fmt::format("{}/device/{:016X}/1", space_id_path, title_id);
88 default:
89 return "";
90 }
91}
92
62} // Anonymous namespace 93} // Anonymous namespace
63 94
64std::string SaveDataAttribute::DebugInfo() const { 95std::string SaveDataAttribute::DebugInfo() const {
@@ -82,7 +113,7 @@ ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space,
82 PrintSaveDataAttributeWarnings(meta); 113 PrintSaveDataAttributeWarnings(meta);
83 114
84 const auto save_directory = 115 const auto save_directory =
85 GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id); 116 GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
86 117
87 auto out = dir->CreateDirectoryRelative(save_directory); 118 auto out = dir->CreateDirectoryRelative(save_directory);
88 119
@@ -99,7 +130,7 @@ ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space,
99 const SaveDataAttribute& meta) const { 130 const SaveDataAttribute& meta) const {
100 131
101 const auto save_directory = 132 const auto save_directory =
102 GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id); 133 GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
103 134
104 auto out = dir->GetDirectoryRelative(save_directory); 135 auto out = dir->GetDirectoryRelative(save_directory);
105 136
@@ -134,9 +165,9 @@ std::string SaveDataFactory::GetSaveDataSpaceIdPath(SaveDataSpaceId space) {
134 } 165 }
135} 166}
136 167
137std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId space, 168std::string SaveDataFactory::GetFullPath(Core::System& system, VirtualDir dir,
138 SaveDataType type, u64 title_id, u128 user_id, 169 SaveDataSpaceId space, SaveDataType type, u64 title_id,
139 u64 save_id) { 170 u128 user_id, u64 save_id) {
140 // According to switchbrew, if a save is of type SaveData and the title id field is 0, it should 171 // According to switchbrew, if a save is of type SaveData and the title id field is 0, it should
141 // be interpreted as the title id of the current process. 172 // be interpreted as the title id of the current process.
142 if (type == SaveDataType::SaveData || type == SaveDataType::DeviceSaveData) { 173 if (type == SaveDataType::SaveData || type == SaveDataType::DeviceSaveData) {
@@ -145,6 +176,17 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId s
145 } 176 }
146 } 177 }
147 178
179 // For compat with a future impl.
180 if (std::string future_path =
181 GetFutureSaveDataPath(space, type, title_id & ~(0xFFULL), user_id);
182 !future_path.empty()) {
183 // Check if this location exists, and prefer it over the old.
184 if (const auto future_dir = dir->GetDirectoryRelative(future_path); future_dir != nullptr) {
185 LOG_INFO(Service_FS, "Using save at new location: {}", future_path);
186 return future_path;
187 }
188 }
189
148 std::string out = GetSaveDataSpaceIdPath(space); 190 std::string out = GetSaveDataSpaceIdPath(space);
149 191
150 switch (type) { 192 switch (type) {
@@ -167,7 +209,8 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId s
167 209
168SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id, 210SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
169 u128 user_id) const { 211 u128 user_id) const {
170 const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0); 212 const auto path =
213 GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
171 const auto relative_dir = GetOrCreateDirectoryRelative(dir, path); 214 const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
172 215
173 const auto size_file = relative_dir->GetFile(SAVE_DATA_SIZE_FILENAME); 216 const auto size_file = relative_dir->GetFile(SAVE_DATA_SIZE_FILENAME);
@@ -185,7 +228,8 @@ SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
185 228
186void SaveDataFactory::WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id, 229void SaveDataFactory::WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,
187 SaveDataSize new_value) const { 230 SaveDataSize new_value) const {
188 const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0); 231 const auto path =
232 GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
189 const auto relative_dir = GetOrCreateDirectoryRelative(dir, path); 233 const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
190 234
191 const auto size_file = relative_dir->CreateFile(SAVE_DATA_SIZE_FILENAME); 235 const auto size_file = relative_dir->CreateFile(SAVE_DATA_SIZE_FILENAME);
diff --git a/src/core/file_sys/savedata_factory.h b/src/core/file_sys/savedata_factory.h
index a763b94c8..d3633ef03 100644
--- a/src/core/file_sys/savedata_factory.h
+++ b/src/core/file_sys/savedata_factory.h
@@ -95,8 +95,8 @@ public:
95 VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const; 95 VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const;
96 96
97 static std::string GetSaveDataSpaceIdPath(SaveDataSpaceId space); 97 static std::string GetSaveDataSpaceIdPath(SaveDataSpaceId space);
98 static std::string GetFullPath(Core::System& system, SaveDataSpaceId space, SaveDataType type, 98 static std::string GetFullPath(Core::System& system, VirtualDir dir, SaveDataSpaceId space,
99 u64 title_id, u128 user_id, u64 save_id); 99 SaveDataType type, u64 title_id, u128 user_id, u64 save_id);
100 100
101 SaveDataSize ReadSaveDataSize(SaveDataType type, u64 title_id, u128 user_id) const; 101 SaveDataSize ReadSaveDataSize(SaveDataType type, u64 title_id, u128 user_id) const;
102 void WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id, 102 void WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,
diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp
index 025f1c78e..ec1364452 100644
--- a/src/core/hid/emulated_controller.cpp
+++ b/src/core/hid/emulated_controller.cpp
@@ -970,14 +970,7 @@ bool EmulatedController::SetVibration(std::size_t device_index, VibrationValue v
970 Common::Input::VibrationError::None; 970 Common::Input::VibrationError::None;
971} 971}
972 972
973bool EmulatedController::TestVibration(std::size_t device_index) { 973bool EmulatedController::IsVibrationEnabled(std::size_t device_index) {
974 if (device_index >= output_devices.size()) {
975 return false;
976 }
977 if (!output_devices[device_index]) {
978 return false;
979 }
980
981 const auto player_index = NpadIdTypeToIndex(npad_id_type); 974 const auto player_index = NpadIdTypeToIndex(npad_id_type);
982 const auto& player = Settings::values.players.GetValue()[player_index]; 975 const auto& player = Settings::values.players.GetValue()[player_index];
983 976
@@ -985,31 +978,15 @@ bool EmulatedController::TestVibration(std::size_t device_index) {
985 return false; 978 return false;
986 } 979 }
987 980
988 const Common::Input::VibrationStatus test_vibration = { 981 if (device_index >= output_devices.size()) {
989 .low_amplitude = 0.001f, 982 return false;
990 .low_frequency = DEFAULT_VIBRATION_VALUE.low_frequency, 983 }
991 .high_amplitude = 0.001f,
992 .high_frequency = DEFAULT_VIBRATION_VALUE.high_frequency,
993 .type = Common::Input::VibrationAmplificationType::Test,
994 };
995
996 const Common::Input::VibrationStatus zero_vibration = {
997 .low_amplitude = DEFAULT_VIBRATION_VALUE.low_amplitude,
998 .low_frequency = DEFAULT_VIBRATION_VALUE.low_frequency,
999 .high_amplitude = DEFAULT_VIBRATION_VALUE.high_amplitude,
1000 .high_frequency = DEFAULT_VIBRATION_VALUE.high_frequency,
1001 .type = Common::Input::VibrationAmplificationType::Test,
1002 };
1003
1004 // Send a slight vibration to test for rumble support
1005 output_devices[device_index]->SetVibration(test_vibration);
1006 984
1007 // Wait for about 15ms to ensure the controller is ready for the stop command 985 if (!output_devices[device_index]) {
1008 std::this_thread::sleep_for(std::chrono::milliseconds(15)); 986 return false;
987 }
1009 988
1010 // Stop any vibration and return the result 989 return output_devices[device_index]->IsVibrationEnabled();
1011 return output_devices[device_index]->SetVibration(zero_vibration) ==
1012 Common::Input::VibrationError::None;
1013} 990}
1014 991
1015bool EmulatedController::SetPollingMode(Common::Input::PollingMode polling_mode) { 992bool EmulatedController::SetPollingMode(Common::Input::PollingMode polling_mode) {
@@ -1048,6 +1025,7 @@ bool EmulatedController::HasNfc() const {
1048 case NpadStyleIndex::JoyconRight: 1025 case NpadStyleIndex::JoyconRight:
1049 case NpadStyleIndex::JoyconDual: 1026 case NpadStyleIndex::JoyconDual:
1050 case NpadStyleIndex::ProController: 1027 case NpadStyleIndex::ProController:
1028 case NpadStyleIndex::Handheld:
1051 break; 1029 break;
1052 default: 1030 default:
1053 return false; 1031 return false;
@@ -1158,27 +1136,27 @@ bool EmulatedController::IsControllerSupported(bool use_temporary_value) const {
1158 const auto type = is_configuring && use_temporary_value ? tmp_npad_type : npad_type; 1136 const auto type = is_configuring && use_temporary_value ? tmp_npad_type : npad_type;
1159 switch (type) { 1137 switch (type) {
1160 case NpadStyleIndex::ProController: 1138 case NpadStyleIndex::ProController:
1161 return supported_style_tag.fullkey; 1139 return supported_style_tag.fullkey.As<bool>();
1162 case NpadStyleIndex::Handheld: 1140 case NpadStyleIndex::Handheld:
1163 return supported_style_tag.handheld; 1141 return supported_style_tag.handheld.As<bool>();
1164 case NpadStyleIndex::JoyconDual: 1142 case NpadStyleIndex::JoyconDual:
1165 return supported_style_tag.joycon_dual; 1143 return supported_style_tag.joycon_dual.As<bool>();
1166 case NpadStyleIndex::JoyconLeft: 1144 case NpadStyleIndex::JoyconLeft:
1167 return supported_style_tag.joycon_left; 1145 return supported_style_tag.joycon_left.As<bool>();
1168 case NpadStyleIndex::JoyconRight: 1146 case NpadStyleIndex::JoyconRight:
1169 return supported_style_tag.joycon_right; 1147 return supported_style_tag.joycon_right.As<bool>();
1170 case NpadStyleIndex::GameCube: 1148 case NpadStyleIndex::GameCube:
1171 return supported_style_tag.gamecube; 1149 return supported_style_tag.gamecube.As<bool>();
1172 case NpadStyleIndex::Pokeball: 1150 case NpadStyleIndex::Pokeball:
1173 return supported_style_tag.palma; 1151 return supported_style_tag.palma.As<bool>();
1174 case NpadStyleIndex::NES: 1152 case NpadStyleIndex::NES:
1175 return supported_style_tag.lark; 1153 return supported_style_tag.lark.As<bool>();
1176 case NpadStyleIndex::SNES: 1154 case NpadStyleIndex::SNES:
1177 return supported_style_tag.lucia; 1155 return supported_style_tag.lucia.As<bool>();
1178 case NpadStyleIndex::N64: 1156 case NpadStyleIndex::N64:
1179 return supported_style_tag.lagoon; 1157 return supported_style_tag.lagoon.As<bool>();
1180 case NpadStyleIndex::SegaGenesis: 1158 case NpadStyleIndex::SegaGenesis:
1181 return supported_style_tag.lager; 1159 return supported_style_tag.lager.As<bool>();
1182 default: 1160 default:
1183 return false; 1161 return false;
1184 } 1162 }
@@ -1234,12 +1212,6 @@ bool EmulatedController::IsConnected(bool get_temporary_value) const {
1234 return is_connected; 1212 return is_connected;
1235} 1213}
1236 1214
1237bool EmulatedController::IsVibrationEnabled() const {
1238 const auto player_index = NpadIdTypeToIndex(npad_id_type);
1239 const auto& player = Settings::values.players.GetValue()[player_index];
1240 return player.vibration_enabled;
1241}
1242
1243NpadIdType EmulatedController::GetNpadIdType() const { 1215NpadIdType EmulatedController::GetNpadIdType() const {
1244 std::scoped_lock lock{mutex}; 1216 std::scoped_lock lock{mutex};
1245 return npad_id_type; 1217 return npad_id_type;
diff --git a/src/core/hid/emulated_controller.h b/src/core/hid/emulated_controller.h
index 319226bf8..d004ca56a 100644
--- a/src/core/hid/emulated_controller.h
+++ b/src/core/hid/emulated_controller.h
@@ -206,9 +206,6 @@ public:
206 */ 206 */
207 bool IsConnected(bool get_temporary_value = false) const; 207 bool IsConnected(bool get_temporary_value = false) const;
208 208
209 /// Returns true if vibration is enabled
210 bool IsVibrationEnabled() const;
211
212 /// Removes all callbacks created from input devices 209 /// Removes all callbacks created from input devices
213 void UnloadInput(); 210 void UnloadInput();
214 211
@@ -339,7 +336,7 @@ public:
339 * Sends a small vibration to the output device 336 * Sends a small vibration to the output device
340 * @return true if SetVibration was successfull 337 * @return true if SetVibration was successfull
341 */ 338 */
342 bool TestVibration(std::size_t device_index); 339 bool IsVibrationEnabled(std::size_t device_index);
343 340
344 /** 341 /**
345 * Sets the desired data to be polled from a controller 342 * Sets the desired data to be polled from a controller
diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h
index 0cc26a211..18fde8bd6 100644
--- a/src/core/hle/ipc_helpers.h
+++ b/src/core/hle/ipc_helpers.h
@@ -86,13 +86,13 @@ public:
86 u32 num_domain_objects{}; 86 u32 num_domain_objects{};
87 const bool always_move_handles{ 87 const bool always_move_handles{
88 (static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0}; 88 (static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0};
89 if (!ctx.Session()->IsDomain() || always_move_handles) { 89 if (!ctx.Session()->GetSessionRequestManager()->IsDomain() || always_move_handles) {
90 num_handles_to_move = num_objects_to_move; 90 num_handles_to_move = num_objects_to_move;
91 } else { 91 } else {
92 num_domain_objects = num_objects_to_move; 92 num_domain_objects = num_objects_to_move;
93 } 93 }
94 94
95 if (ctx.Session()->IsDomain()) { 95 if (ctx.Session()->GetSessionRequestManager()->IsDomain()) {
96 raw_data_size += 96 raw_data_size +=
97 static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects); 97 static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects);
98 ctx.write_size += num_domain_objects; 98 ctx.write_size += num_domain_objects;
@@ -125,7 +125,8 @@ public:
125 if (!ctx.IsTipc()) { 125 if (!ctx.IsTipc()) {
126 AlignWithPadding(); 126 AlignWithPadding();
127 127
128 if (ctx.Session()->IsDomain() && ctx.HasDomainMessageHeader()) { 128 if (ctx.Session()->GetSessionRequestManager()->IsDomain() &&
129 ctx.HasDomainMessageHeader()) {
129 IPC::DomainMessageHeader domain_header{}; 130 IPC::DomainMessageHeader domain_header{};
130 domain_header.num_objects = num_domain_objects; 131 domain_header.num_objects = num_domain_objects;
131 PushRaw(domain_header); 132 PushRaw(domain_header);
@@ -145,7 +146,7 @@ public:
145 146
146 template <class T> 147 template <class T>
147 void PushIpcInterface(std::shared_ptr<T> iface) { 148 void PushIpcInterface(std::shared_ptr<T> iface) {
148 if (context->Session()->IsDomain()) { 149 if (context->Session()->GetSessionRequestManager()->IsDomain()) {
149 context->AddDomainObject(std::move(iface)); 150 context->AddDomainObject(std::move(iface));
150 } else { 151 } else {
151 kernel.CurrentProcess()->GetResourceLimit()->Reserve( 152 kernel.CurrentProcess()->GetResourceLimit()->Reserve(
@@ -386,7 +387,7 @@ public:
386 387
387 template <class T> 388 template <class T>
388 std::weak_ptr<T> PopIpcInterface() { 389 std::weak_ptr<T> PopIpcInterface() {
389 ASSERT(context->Session()->IsDomain()); 390 ASSERT(context->Session()->GetSessionRequestManager()->IsDomain());
390 ASSERT(context->GetDomainMessageHeader().input_object_count > 0); 391 ASSERT(context->GetDomainMessageHeader().input_object_count > 0);
391 return context->GetDomainHandler<T>(Pop<u32>() - 1); 392 return context->GetDomainHandler<T>(Pop<u32>() - 1);
392 } 393 }
@@ -405,7 +406,7 @@ inline s32 RequestParser::Pop() {
405} 406}
406 407
407// Ignore the -Wclass-memaccess warning on memcpy for non-trivially default constructible objects. 408// Ignore the -Wclass-memaccess warning on memcpy for non-trivially default constructible objects.
408#if defined(__GNUC__) 409#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
409#pragma GCC diagnostic push 410#pragma GCC diagnostic push
410#pragma GCC diagnostic ignored "-Wclass-memaccess" 411#pragma GCC diagnostic ignored "-Wclass-memaccess"
411#endif 412#endif
@@ -416,7 +417,7 @@ void RequestParser::PopRaw(T& value) {
416 std::memcpy(&value, cmdbuf + index, sizeof(T)); 417 std::memcpy(&value, cmdbuf + index, sizeof(T));
417 index += (sizeof(T) + 3) / 4; // round up to word length 418 index += (sizeof(T) + 3) / 4; // round up to word length
418} 419}
419#if defined(__GNUC__) 420#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
420#pragma GCC diagnostic pop 421#pragma GCC diagnostic pop
421#endif 422#endif
422 423
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
index 65576b8c4..fd911a3a5 100644
--- a/src/core/hle/kernel/global_scheduler_context.cpp
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -49,4 +49,26 @@ bool GlobalSchedulerContext::IsLocked() const {
49 return scheduler_lock.IsLockedByCurrentThread(); 49 return scheduler_lock.IsLockedByCurrentThread();
50} 50}
51 51
52void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) {
53 ASSERT(IsLocked());
54
55 woken_dummy_threads.insert(thread);
56}
57
58void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) {
59 ASSERT(IsLocked());
60
61 woken_dummy_threads.erase(thread);
62}
63
64void GlobalSchedulerContext::WakeupWaitingDummyThreads() {
65 ASSERT(IsLocked());
66
67 for (auto* thread : woken_dummy_threads) {
68 thread->DummyThreadEndWait();
69 }
70
71 woken_dummy_threads.clear();
72}
73
52} // namespace Kernel 74} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
index 67bb9852d..220ed6192 100644
--- a/src/core/hle/kernel/global_scheduler_context.h
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -4,6 +4,7 @@
4#pragma once 4#pragma once
5 5
6#include <atomic> 6#include <atomic>
7#include <set>
7#include <vector> 8#include <vector>
8 9
9#include "common/common_types.h" 10#include "common/common_types.h"
@@ -58,6 +59,10 @@ public:
58 /// Returns true if the global scheduler lock is acquired 59 /// Returns true if the global scheduler lock is acquired
59 bool IsLocked() const; 60 bool IsLocked() const;
60 61
62 void UnregisterDummyThreadForWakeup(KThread* thread);
63 void RegisterDummyThreadForWakeup(KThread* thread);
64 void WakeupWaitingDummyThreads();
65
61 [[nodiscard]] LockType& SchedulerLock() { 66 [[nodiscard]] LockType& SchedulerLock() {
62 return scheduler_lock; 67 return scheduler_lock;
63 } 68 }
@@ -76,6 +81,9 @@ private:
76 KSchedulerPriorityQueue priority_queue; 81 KSchedulerPriorityQueue priority_queue;
77 LockType scheduler_lock; 82 LockType scheduler_lock;
78 83
84 /// Lists dummy threads pending wakeup on lock release
85 std::set<KThread*> woken_dummy_threads;
86
79 /// Lists all thread ids that aren't deleted/etc. 87 /// Lists all thread ids that aren't deleted/etc.
80 std::vector<KThread*> thread_list; 88 std::vector<KThread*> thread_list;
81 std::mutex global_list_guard; 89 std::mutex global_list_guard;
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 5b3feec66..e4f43a053 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -19,6 +19,7 @@
19#include "core/hle/kernel/k_server_session.h" 19#include "core/hle/kernel/k_server_session.h"
20#include "core/hle/kernel/k_thread.h" 20#include "core/hle/kernel/k_thread.h"
21#include "core/hle/kernel/kernel.h" 21#include "core/hle/kernel/kernel.h"
22#include "core/hle/kernel/service_thread.h"
22#include "core/memory.h" 23#include "core/memory.h"
23 24
24namespace Kernel { 25namespace Kernel {
@@ -56,16 +57,103 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
56 } 57 }
57} 58}
58 59
60Result SessionRequestManager::CompleteSyncRequest(KServerSession* server_session,
61 HLERequestContext& context) {
62 Result result = ResultSuccess;
63
64 // If the session has been converted to a domain, handle the domain request
65 if (this->HasSessionRequestHandler(context)) {
66 if (IsDomain() && context.HasDomainMessageHeader()) {
67 result = HandleDomainSyncRequest(server_session, context);
68 // If there is no domain header, the regular session handler is used
69 } else if (this->HasSessionHandler()) {
70 // If this manager has an associated HLE handler, forward the request to it.
71 result = this->SessionHandler().HandleSyncRequest(*server_session, context);
72 }
73 } else {
74 ASSERT_MSG(false, "Session handler is invalid, stubbing response!");
75 IPC::ResponseBuilder rb(context, 2);
76 rb.Push(ResultSuccess);
77 }
78
79 if (convert_to_domain) {
80 ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance.");
81 this->ConvertToDomain();
82 convert_to_domain = false;
83 }
84
85 return result;
86}
87
88Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_session,
89 HLERequestContext& context) {
90 if (!context.HasDomainMessageHeader()) {
91 return ResultSuccess;
92 }
93
94 // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
95 context.SetSessionRequestManager(server_session->GetSessionRequestManager());
96
97 // If there is a DomainMessageHeader, then this is CommandType "Request"
98 const auto& domain_message_header = context.GetDomainMessageHeader();
99 const u32 object_id{domain_message_header.object_id};
100 switch (domain_message_header.command) {
101 case IPC::DomainMessageHeader::CommandType::SendMessage:
102 if (object_id > this->DomainHandlerCount()) {
103 LOG_CRITICAL(IPC,
104 "object_id {} is too big! This probably means a recent service call "
105 "needed to return a new interface!",
106 object_id);
107 ASSERT(false);
108 return ResultSuccess; // Ignore error if asserts are off
109 }
110 if (auto strong_ptr = this->DomainHandler(object_id - 1).lock()) {
111 return strong_ptr->HandleSyncRequest(*server_session, context);
112 } else {
113 ASSERT(false);
114 return ResultSuccess;
115 }
116
117 case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
118 LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
119
120 this->CloseDomainHandler(object_id - 1);
121
122 IPC::ResponseBuilder rb{context, 2};
123 rb.Push(ResultSuccess);
124 return ResultSuccess;
125 }
126 }
127
128 LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
129 ASSERT(false);
130 return ResultSuccess;
131}
132
133Result SessionRequestManager::QueueSyncRequest(KSession* parent,
134 std::shared_ptr<HLERequestContext>&& context) {
135 // Ensure we have a session request handler
136 if (this->HasSessionRequestHandler(*context)) {
137 if (auto strong_ptr = this->GetServiceThread().lock()) {
138 strong_ptr->QueueSyncRequest(*parent, std::move(context));
139 } else {
140 ASSERT_MSG(false, "strong_ptr is nullptr!");
141 }
142 } else {
143 ASSERT_MSG(false, "handler is invalid!");
144 }
145
146 return ResultSuccess;
147}
148
59void SessionRequestHandler::ClientConnected(KServerSession* session) { 149void SessionRequestHandler::ClientConnected(KServerSession* session) {
60 session->ClientConnected(shared_from_this()); 150 session->GetSessionRequestManager()->SetSessionHandler(shared_from_this());
61 151
62 // Ensure our server session is tracked globally. 152 // Ensure our server session is tracked globally.
63 kernel.RegisterServerObject(session); 153 kernel.RegisterServerObject(session);
64} 154}
65 155
66void SessionRequestHandler::ClientDisconnected(KServerSession* session) { 156void SessionRequestHandler::ClientDisconnected(KServerSession* session) {}
67 session->ClientDisconnected();
68}
69 157
70HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_, 158HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_,
71 KServerSession* server_session_, KThread* thread_) 159 KServerSession* server_session_, KThread* thread_)
@@ -126,7 +214,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
126 // Padding to align to 16 bytes 214 // Padding to align to 16 bytes
127 rp.AlignWithPadding(); 215 rp.AlignWithPadding();
128 216
129 if (Session()->IsDomain() && 217 if (Session()->GetSessionRequestManager()->IsDomain() &&
130 ((command_header->type == IPC::CommandType::Request || 218 ((command_header->type == IPC::CommandType::Request ||
131 command_header->type == IPC::CommandType::RequestWithContext) || 219 command_header->type == IPC::CommandType::RequestWithContext) ||
132 !incoming)) { 220 !incoming)) {
@@ -135,7 +223,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
135 if (incoming || domain_message_header) { 223 if (incoming || domain_message_header) {
136 domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>(); 224 domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>();
137 } else { 225 } else {
138 if (Session()->IsDomain()) { 226 if (Session()->GetSessionRequestManager()->IsDomain()) {
139 LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!"); 227 LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
140 } 228 }
141 } 229 }
@@ -228,12 +316,12 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_threa
228 // Write the domain objects to the command buffer, these go after the raw untranslated data. 316 // Write the domain objects to the command buffer, these go after the raw untranslated data.
229 // TODO(Subv): This completely ignores C buffers. 317 // TODO(Subv): This completely ignores C buffers.
230 318
231 if (Session()->IsDomain()) { 319 if (server_session->GetSessionRequestManager()->IsDomain()) {
232 current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size()); 320 current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size());
233 for (const auto& object : outgoing_domain_objects) { 321 for (auto& object : outgoing_domain_objects) {
234 server_session->AppendDomainHandler(object); 322 server_session->GetSessionRequestManager()->AppendDomainHandler(std::move(object));
235 cmd_buf[current_offset++] = 323 cmd_buf[current_offset++] = static_cast<u32_le>(
236 static_cast<u32_le>(server_session->NumDomainRequestHandlers()); 324 server_session->GetSessionRequestManager()->DomainHandlerCount());
237 } 325 }
238 } 326 }
239 327
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index e258e2cdf..1083638a9 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -121,6 +121,10 @@ public:
121 is_domain = true; 121 is_domain = true;
122 } 122 }
123 123
124 void ConvertToDomainOnRequestEnd() {
125 convert_to_domain = true;
126 }
127
124 std::size_t DomainHandlerCount() const { 128 std::size_t DomainHandlerCount() const {
125 return domain_handlers.size(); 129 return domain_handlers.size();
126 } 130 }
@@ -164,7 +168,12 @@ public:
164 168
165 bool HasSessionRequestHandler(const HLERequestContext& context) const; 169 bool HasSessionRequestHandler(const HLERequestContext& context) const;
166 170
171 Result HandleDomainSyncRequest(KServerSession* server_session, HLERequestContext& context);
172 Result CompleteSyncRequest(KServerSession* server_session, HLERequestContext& context);
173 Result QueueSyncRequest(KSession* parent, std::shared_ptr<HLERequestContext>&& context);
174
167private: 175private:
176 bool convert_to_domain{};
168 bool is_domain{}; 177 bool is_domain{};
169 SessionRequestHandlerPtr session_handler; 178 SessionRequestHandlerPtr session_handler;
170 std::vector<SessionRequestHandlerPtr> domain_handlers; 179 std::vector<SessionRequestHandlerPtr> domain_handlers;
@@ -295,7 +304,7 @@ public:
295 */ 304 */
296 template <typename T, typename = std::enable_if_t<!std::is_pointer_v<T>>> 305 template <typename T, typename = std::enable_if_t<!std::is_pointer_v<T>>>
297 std::size_t WriteBuffer(const T& data, std::size_t buffer_index = 0) const { 306 std::size_t WriteBuffer(const T& data, std::size_t buffer_index = 0) const {
298 if constexpr (Common::IsSTLContainer<T>) { 307 if constexpr (Common::IsContiguousContainer<T>) {
299 using ContiguousType = typename T::value_type; 308 using ContiguousType = typename T::value_type;
300 static_assert(std::is_trivially_copyable_v<ContiguousType>, 309 static_assert(std::is_trivially_copyable_v<ContiguousType>,
301 "Container to WriteBuffer must contain trivially copyable objects"); 310 "Container to WriteBuffer must contain trivially copyable objects");
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index 9b6b284d0..477e4e407 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -18,6 +18,7 @@
18#include "core/hle/kernel/k_process.h" 18#include "core/hle/kernel/k_process.h"
19#include "core/hle/kernel/k_resource_limit.h" 19#include "core/hle/kernel/k_resource_limit.h"
20#include "core/hle/kernel/k_session.h" 20#include "core/hle/kernel/k_session.h"
21#include "core/hle/kernel/k_session_request.h"
21#include "core/hle/kernel/k_shared_memory.h" 22#include "core/hle/kernel/k_shared_memory.h"
22#include "core/hle/kernel/k_shared_memory_info.h" 23#include "core/hle/kernel/k_shared_memory_info.h"
23#include "core/hle/kernel/k_system_control.h" 24#include "core/hle/kernel/k_system_control.h"
@@ -34,6 +35,7 @@ namespace Kernel::Init {
34 HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \ 35 HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \
35 HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \ 36 HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \
36 HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \ 37 HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
38 HANDLER(KSessionRequest, (SLAB_COUNT(KSession) * 2), ##__VA_ARGS__) \
37 HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \ 39 HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
38 HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \ 40 HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \
39 HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \ 41 HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
@@ -94,8 +96,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
94 // TODO(bunnei): Fix this once we support the kernel virtual memory layout. 96 // TODO(bunnei): Fix this once we support the kernel virtual memory layout.
95 97
96 if (size > 0) { 98 if (size > 0) {
97 void* backing_kernel_memory{ 99 void* backing_kernel_memory{system.DeviceMemory().GetPointer<void>(
98 system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))}; 100 TranslateSlabAddrToPhysical(memory_layout, start))};
99 101
100 const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); 102 const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
101 ASSERT(region != nullptr); 103 ASSERT(region != nullptr);
@@ -181,7 +183,7 @@ void InitializeKPageBufferSlabHeap(Core::System& system) {
181 ASSERT(slab_address != 0); 183 ASSERT(slab_address != 0);
182 184
183 // Initialize the slabheap. 185 // Initialize the slabheap.
184 KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address), 186 KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address),
185 slab_size); 187 slab_size);
186} 188}
187 189
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
index 8892c5b7c..b4197a8d5 100644
--- a/src/core/hle/kernel/k_client_session.cpp
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -1,6 +1,7 @@
1// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include "common/scope_exit.h"
4#include "core/hle/kernel/hle_ipc.h" 5#include "core/hle/kernel/hle_ipc.h"
5#include "core/hle/kernel/k_client_session.h" 6#include "core/hle/kernel/k_client_session.h"
6#include "core/hle/kernel/k_server_session.h" 7#include "core/hle/kernel/k_server_session.h"
@@ -10,6 +11,8 @@
10 11
11namespace Kernel { 12namespace Kernel {
12 13
14static constexpr u32 MessageBufferSize = 0x100;
15
13KClientSession::KClientSession(KernelCore& kernel_) 16KClientSession::KClientSession(KernelCore& kernel_)
14 : KAutoObjectWithSlabHeapAndContainer{kernel_} {} 17 : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
15KClientSession::~KClientSession() = default; 18KClientSession::~KClientSession() = default;
@@ -22,8 +25,16 @@ void KClientSession::Destroy() {
22void KClientSession::OnServerClosed() {} 25void KClientSession::OnServerClosed() {}
23 26
24Result KClientSession::SendSyncRequest() { 27Result KClientSession::SendSyncRequest() {
25 // Signal the server session that new data is available 28 // Create a session request.
26 return parent->GetServerSession().OnRequest(); 29 KSessionRequest* request = KSessionRequest::Create(kernel);
30 R_UNLESS(request != nullptr, ResultOutOfResource);
31 SCOPE_EXIT({ request->Close(); });
32
33 // Initialize the request.
34 request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize);
35
36 // Send the request.
37 return parent->GetServerSession().OnRequest(request);
27} 38}
28 39
29} // namespace Kernel 40} // namespace Kernel
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index da57ceb21..4b1c134d4 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
34 34
35 // Clear the memory. 35 // Clear the memory.
36 for (const auto& block : m_page_group.Nodes()) { 36 for (const auto& block : m_page_group.Nodes()) {
37 std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize()); 37 std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
38 } 38 }
39 39
40 // Set remaining tracking members. 40 // Set remaining tracking members.
diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h
new file mode 100644
index 000000000..9076c8fa3
--- /dev/null
+++ b/src/core/hle/kernel/k_dynamic_page_manager.h
@@ -0,0 +1,136 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/alignment.h"
7#include "common/common_types.h"
8#include "core/hle/kernel/k_page_bitmap.h"
9#include "core/hle/kernel/k_spin_lock.h"
10#include "core/hle/kernel/memory_types.h"
11#include "core/hle/kernel/svc_results.h"
12
13namespace Kernel {
14
15class KDynamicPageManager {
16public:
17 class PageBuffer {
18 private:
19 u8 m_buffer[PageSize];
20 };
21 static_assert(sizeof(PageBuffer) == PageSize);
22
23public:
24 KDynamicPageManager() = default;
25
26 template <typename T>
27 T* GetPointer(VAddr addr) {
28 return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
29 }
30
31 template <typename T>
32 const T* GetPointer(VAddr addr) const {
33 return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
34 }
35
36 Result Initialize(VAddr addr, size_t sz) {
37 // We need to have positive size.
38 R_UNLESS(sz > 0, ResultOutOfMemory);
39 m_backing_memory.resize(sz);
40
41 // Calculate management overhead.
42 const size_t management_size =
43 KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer));
44 const size_t allocatable_size = sz - management_size;
45
46 // Set tracking fields.
47 m_address = addr;
48 m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer));
49 m_count = allocatable_size / sizeof(PageBuffer);
50 R_UNLESS(m_count > 0, ResultOutOfMemory);
51
52 // Clear the management region.
53 u64* management_ptr = GetPointer<u64>(m_address + allocatable_size);
54 std::memset(management_ptr, 0, management_size);
55
56 // Initialize the bitmap.
57 m_page_bitmap.Initialize(management_ptr, m_count);
58
59 // Free the pages to the bitmap.
60 for (size_t i = 0; i < m_count; i++) {
61 // Ensure the freed page is all-zero.
62 std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize);
63
64 // Set the bit for the free page.
65 m_page_bitmap.SetBit(i);
66 }
67
68 R_SUCCEED();
69 }
70
71 VAddr GetAddress() const {
72 return m_address;
73 }
74 size_t GetSize() const {
75 return m_size;
76 }
77 size_t GetUsed() const {
78 return m_used;
79 }
80 size_t GetPeak() const {
81 return m_peak;
82 }
83 size_t GetCount() const {
84 return m_count;
85 }
86
87 PageBuffer* Allocate() {
88 // Take the lock.
89 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
90 KScopedSpinLock lk(m_lock);
91
92 // Find a random free block.
93 s64 soffset = m_page_bitmap.FindFreeBlock(true);
94 if (soffset < 0) [[unlikely]] {
95 return nullptr;
96 }
97
98 const size_t offset = static_cast<size_t>(soffset);
99
100 // Update our tracking.
101 m_page_bitmap.ClearBit(offset);
102 m_peak = std::max(m_peak, (++m_used));
103
104 return GetPointer<PageBuffer>(m_address) + offset;
105 }
106
107 void Free(PageBuffer* pb) {
108 // Ensure all pages in the heap are zero.
109 std::memset(pb, 0, PageSize);
110
111 // Take the lock.
112 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
113 KScopedSpinLock lk(m_lock);
114
115 // Set the bit for the free page.
116 size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer);
117 m_page_bitmap.SetBit(offset);
118
119 // Decrement our used count.
120 --m_used;
121 }
122
123private:
124 KSpinLock m_lock;
125 KPageBitmap m_page_bitmap;
126 size_t m_used{};
127 size_t m_peak{};
128 size_t m_count{};
129 VAddr m_address{};
130 size_t m_size{};
131
132 // TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
133 std::vector<u8> m_backing_memory;
134};
135
136} // namespace Kernel
diff --git a/src/core/hle/kernel/k_dynamic_resource_manager.h b/src/core/hle/kernel/k_dynamic_resource_manager.h
new file mode 100644
index 000000000..1ce517e8e
--- /dev/null
+++ b/src/core/hle/kernel/k_dynamic_resource_manager.h
@@ -0,0 +1,58 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/common_funcs.h"
7#include "core/hle/kernel/k_dynamic_slab_heap.h"
8#include "core/hle/kernel/k_memory_block.h"
9
10namespace Kernel {
11
12template <typename T, bool ClearNode = false>
13class KDynamicResourceManager {
14 YUZU_NON_COPYABLE(KDynamicResourceManager);
15 YUZU_NON_MOVEABLE(KDynamicResourceManager);
16
17public:
18 using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>;
19
20public:
21 constexpr KDynamicResourceManager() = default;
22
23 constexpr size_t GetSize() const {
24 return m_slab_heap->GetSize();
25 }
26 constexpr size_t GetUsed() const {
27 return m_slab_heap->GetUsed();
28 }
29 constexpr size_t GetPeak() const {
30 return m_slab_heap->GetPeak();
31 }
32 constexpr size_t GetCount() const {
33 return m_slab_heap->GetCount();
34 }
35
36 void Initialize(KDynamicPageManager* page_allocator, DynamicSlabType* slab_heap) {
37 m_page_allocator = page_allocator;
38 m_slab_heap = slab_heap;
39 }
40
41 T* Allocate() const {
42 return m_slab_heap->Allocate(m_page_allocator);
43 }
44
45 void Free(T* t) const {
46 m_slab_heap->Free(t);
47 }
48
49private:
50 KDynamicPageManager* m_page_allocator{};
51 DynamicSlabType* m_slab_heap{};
52};
53
54class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {};
55
56using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType;
57
58} // namespace Kernel
diff --git a/src/core/hle/kernel/k_dynamic_slab_heap.h b/src/core/hle/kernel/k_dynamic_slab_heap.h
new file mode 100644
index 000000000..3a0ddd050
--- /dev/null
+++ b/src/core/hle/kernel/k_dynamic_slab_heap.h
@@ -0,0 +1,122 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <atomic>
7
8#include "common/common_funcs.h"
9#include "core/hle/kernel/k_dynamic_page_manager.h"
10#include "core/hle/kernel/k_slab_heap.h"
11
12namespace Kernel {
13
14template <typename T, bool ClearNode = false>
15class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
16 YUZU_NON_COPYABLE(KDynamicSlabHeap);
17 YUZU_NON_MOVEABLE(KDynamicSlabHeap);
18
19public:
20 constexpr KDynamicSlabHeap() = default;
21
22 constexpr VAddr GetAddress() const {
23 return m_address;
24 }
25 constexpr size_t GetSize() const {
26 return m_size;
27 }
28 constexpr size_t GetUsed() const {
29 return m_used.load();
30 }
31 constexpr size_t GetPeak() const {
32 return m_peak.load();
33 }
34 constexpr size_t GetCount() const {
35 return m_count.load();
36 }
37
38 constexpr bool IsInRange(VAddr addr) const {
39 return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
40 }
41
42 void Initialize(KDynamicPageManager* page_allocator, size_t num_objects) {
43 ASSERT(page_allocator != nullptr);
44
45 // Initialize members.
46 m_address = page_allocator->GetAddress();
47 m_size = page_allocator->GetSize();
48
49 // Initialize the base allocator.
50 KSlabHeapImpl::Initialize();
51
52 // Allocate until we have the correct number of objects.
53 while (m_count.load() < num_objects) {
54 auto* allocated = reinterpret_cast<T*>(page_allocator->Allocate());
55 ASSERT(allocated != nullptr);
56
57 for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
58 KSlabHeapImpl::Free(allocated + i);
59 }
60
61 m_count += sizeof(PageBuffer) / sizeof(T);
62 }
63 }
64
65 T* Allocate(KDynamicPageManager* page_allocator) {
66 T* allocated = static_cast<T*>(KSlabHeapImpl::Allocate());
67
68 // If we successfully allocated and we should clear the node, do so.
69 if constexpr (ClearNode) {
70 if (allocated != nullptr) [[likely]] {
71 reinterpret_cast<KSlabHeapImpl::Node*>(allocated)->next = nullptr;
72 }
73 }
74
75 // If we fail to allocate, try to get a new page from our next allocator.
76 if (allocated == nullptr) [[unlikely]] {
77 if (page_allocator != nullptr) {
78 allocated = reinterpret_cast<T*>(page_allocator->Allocate());
79 if (allocated != nullptr) {
80 // If we succeeded in getting a page, free the rest to our slab.
81 for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
82 KSlabHeapImpl::Free(allocated + i);
83 }
84 m_count += sizeof(PageBuffer) / sizeof(T);
85 }
86 }
87 }
88
89 if (allocated != nullptr) [[likely]] {
90 // Construct the object.
91 std::construct_at(allocated);
92
93 // Update our tracking.
94 const size_t used = ++m_used;
95 size_t peak = m_peak.load();
96 while (peak < used) {
97 if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
98 break;
99 }
100 }
101 }
102
103 return allocated;
104 }
105
106 void Free(T* t) {
107 KSlabHeapImpl::Free(t);
108 --m_used;
109 }
110
111private:
112 using PageBuffer = KDynamicPageManager::PageBuffer;
113
114private:
115 std::atomic<size_t> m_used{};
116 std::atomic<size_t> m_peak{};
117 std::atomic<size_t> m_count{};
118 VAddr m_address{};
119 size_t m_size{};
120};
121
122} // namespace Kernel
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp
index 1b577a5b3..4a6b60d26 100644
--- a/src/core/hle/kernel/k_interrupt_manager.cpp
+++ b/src/core/hle/kernel/k_interrupt_manager.cpp
@@ -11,29 +11,34 @@
11namespace Kernel::KInterruptManager { 11namespace Kernel::KInterruptManager {
12 12
13void HandleInterrupt(KernelCore& kernel, s32 core_id) { 13void HandleInterrupt(KernelCore& kernel, s32 core_id) {
14 auto* process = kernel.CurrentProcess();
15 if (!process) {
16 return;
17 }
18
19 // Acknowledge the interrupt. 14 // Acknowledge the interrupt.
20 kernel.PhysicalCore(core_id).ClearInterrupt(); 15 kernel.PhysicalCore(core_id).ClearInterrupt();
21 16
22 auto& current_thread = GetCurrentThread(kernel); 17 auto& current_thread = GetCurrentThread(kernel);
23 18
24 // If the user disable count is set, we may need to pin the current thread. 19 if (auto* process = kernel.CurrentProcess(); process) {
25 if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) { 20 // If the user disable count is set, we may need to pin the current thread.
26 KScopedSchedulerLock sl{kernel}; 21 if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
22 KScopedSchedulerLock sl{kernel};
27 23
28 // Pin the current thread. 24 // Pin the current thread.
29 process->PinCurrentThread(core_id); 25 process->PinCurrentThread(core_id);
30 26
31 // Set the interrupt flag for the thread. 27 // Set the interrupt flag for the thread.
32 GetCurrentThread(kernel).SetInterruptFlag(); 28 GetCurrentThread(kernel).SetInterruptFlag();
29 }
33 } 30 }
34 31
35 // Request interrupt scheduling. 32 // Request interrupt scheduling.
36 kernel.CurrentScheduler()->RequestScheduleOnInterrupt(); 33 kernel.CurrentScheduler()->RequestScheduleOnInterrupt();
37} 34}
38 35
36void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) {
37 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
38 if (core_mask & (1ULL << core_id)) {
39 kernel.PhysicalCore(core_id).Interrupt();
40 }
41 }
42}
43
39} // namespace Kernel::KInterruptManager 44} // namespace Kernel::KInterruptManager
diff --git a/src/core/hle/kernel/k_interrupt_manager.h b/src/core/hle/kernel/k_interrupt_manager.h
index f103dfe3f..803dc9211 100644
--- a/src/core/hle/kernel/k_interrupt_manager.h
+++ b/src/core/hle/kernel/k_interrupt_manager.h
@@ -11,6 +11,8 @@ class KernelCore;
11 11
12namespace KInterruptManager { 12namespace KInterruptManager {
13void HandleInterrupt(KernelCore& kernel, s32 core_id); 13void HandleInterrupt(KernelCore& kernel, s32 core_id);
14} 14void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask);
15
16} // namespace KInterruptManager
15 17
16} // namespace Kernel 18} // namespace Kernel
diff --git a/src/core/hle/kernel/k_linked_list.h b/src/core/hle/kernel/k_linked_list.h
index 78859ced3..29ebd16b7 100644
--- a/src/core/hle/kernel/k_linked_list.h
+++ b/src/core/hle/kernel/k_linked_list.h
@@ -16,6 +16,7 @@ class KLinkedListNode : public boost::intrusive::list_base_hook<>,
16 public KSlabAllocated<KLinkedListNode> { 16 public KSlabAllocated<KLinkedListNode> {
17 17
18public: 18public:
19 explicit KLinkedListNode(KernelCore&) {}
19 KLinkedListNode() = default; 20 KLinkedListNode() = default;
20 21
21 void Initialize(void* it) { 22 void Initialize(void* it) {
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index 18df1f836..9444f6bd2 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -6,6 +6,7 @@
6#include "common/alignment.h" 6#include "common/alignment.h"
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "common/intrusive_red_black_tree.h"
9#include "core/hle/kernel/memory_types.h" 10#include "core/hle/kernel/memory_types.h"
10#include "core/hle/kernel/svc_types.h" 11#include "core/hle/kernel/svc_types.h"
11 12
@@ -168,9 +169,8 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per
168 169
169enum class KMemoryAttribute : u8 { 170enum class KMemoryAttribute : u8 {
170 None = 0x00, 171 None = 0x00,
171 Mask = 0x7F, 172 All = 0xFF,
172 All = Mask, 173 UserMask = All,
173 DontCareMask = 0x80,
174 174
175 Locked = static_cast<u8>(Svc::MemoryAttribute::Locked), 175 Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
176 IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked), 176 IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
@@ -178,76 +178,112 @@ enum class KMemoryAttribute : u8 {
178 Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached), 178 Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
179 179
180 SetMask = Uncached, 180 SetMask = Uncached,
181
182 IpcAndDeviceMapped = IpcLocked | DeviceShared,
183 LockedAndIpcLocked = Locked | IpcLocked,
184 DeviceSharedAndUncached = DeviceShared | Uncached
185}; 181};
186DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute); 182DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
187 183
188static_assert((static_cast<u8>(KMemoryAttribute::Mask) & 184enum class KMemoryBlockDisableMergeAttribute : u8 {
189 static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0); 185 None = 0,
186 Normal = (1u << 0),
187 DeviceLeft = (1u << 1),
188 IpcLeft = (1u << 2),
189 Locked = (1u << 3),
190 DeviceRight = (1u << 4),
191
192 AllLeft = Normal | DeviceLeft | IpcLeft | Locked,
193 AllRight = DeviceRight,
194};
195DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute);
190 196
191struct KMemoryInfo { 197struct KMemoryInfo {
192 VAddr addr{}; 198 uintptr_t m_address;
193 std::size_t size{}; 199 size_t m_size;
194 KMemoryState state{}; 200 KMemoryState m_state;
195 KMemoryPermission perm{}; 201 u16 m_device_disable_merge_left_count;
196 KMemoryAttribute attribute{}; 202 u16 m_device_disable_merge_right_count;
197 KMemoryPermission original_perm{}; 203 u16 m_ipc_lock_count;
198 u16 ipc_lock_count{}; 204 u16 m_device_use_count;
199 u16 device_use_count{}; 205 u16 m_ipc_disable_merge_count;
206 KMemoryPermission m_permission;
207 KMemoryAttribute m_attribute;
208 KMemoryPermission m_original_permission;
209 KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
200 210
201 constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { 211 constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
202 return { 212 return {
203 addr, 213 .addr = m_address,
204 size, 214 .size = m_size,
205 static_cast<Svc::MemoryState>(state & KMemoryState::Mask), 215 .state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask),
206 static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask), 216 .attr = static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask),
207 static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask), 217 .perm = static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask),
208 ipc_lock_count, 218 .ipc_refcount = m_ipc_lock_count,
209 device_use_count, 219 .device_refcount = m_device_use_count,
220 .padding = {},
210 }; 221 };
211 } 222 }
212 223
213 constexpr VAddr GetAddress() const { 224 constexpr uintptr_t GetAddress() const {
214 return addr; 225 return m_address;
226 }
227
228 constexpr size_t GetSize() const {
229 return m_size;
215 } 230 }
216 constexpr std::size_t GetSize() const { 231
217 return size; 232 constexpr size_t GetNumPages() const {
233 return this->GetSize() / PageSize;
218 } 234 }
219 constexpr std::size_t GetNumPages() const { 235
220 return GetSize() / PageSize; 236 constexpr uintptr_t GetEndAddress() const {
237 return this->GetAddress() + this->GetSize();
221 } 238 }
222 constexpr VAddr GetEndAddress() const { 239
223 return GetAddress() + GetSize(); 240 constexpr uintptr_t GetLastAddress() const {
241 return this->GetEndAddress() - 1;
224 } 242 }
225 constexpr VAddr GetLastAddress() const { 243
226 return GetEndAddress() - 1; 244 constexpr u16 GetIpcLockCount() const {
245 return m_ipc_lock_count;
227 } 246 }
247
248 constexpr u16 GetIpcDisableMergeCount() const {
249 return m_ipc_disable_merge_count;
250 }
251
228 constexpr KMemoryState GetState() const { 252 constexpr KMemoryState GetState() const {
229 return state; 253 return m_state;
254 }
255
256 constexpr KMemoryPermission GetPermission() const {
257 return m_permission;
230 } 258 }
259
260 constexpr KMemoryPermission GetOriginalPermission() const {
261 return m_original_permission;
262 }
263
231 constexpr KMemoryAttribute GetAttribute() const { 264 constexpr KMemoryAttribute GetAttribute() const {
232 return attribute; 265 return m_attribute;
233 } 266 }
234 constexpr KMemoryPermission GetPermission() const { 267
235 return perm; 268 constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
269 return m_disable_merge_attribute;
236 } 270 }
237}; 271};
238 272
239class KMemoryBlock final { 273class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
240 friend class KMemoryBlockManager;
241
242private: 274private:
243 VAddr addr{}; 275 u16 m_device_disable_merge_left_count;
244 std::size_t num_pages{}; 276 u16 m_device_disable_merge_right_count;
245 KMemoryState state{KMemoryState::None}; 277 VAddr m_address;
246 u16 ipc_lock_count{}; 278 size_t m_num_pages;
247 u16 device_use_count{}; 279 KMemoryState m_memory_state;
248 KMemoryPermission perm{KMemoryPermission::None}; 280 u16 m_ipc_lock_count;
249 KMemoryPermission original_perm{KMemoryPermission::None}; 281 u16 m_device_use_count;
250 KMemoryAttribute attribute{KMemoryAttribute::None}; 282 u16 m_ipc_disable_merge_count;
283 KMemoryPermission m_permission;
284 KMemoryPermission m_original_permission;
285 KMemoryAttribute m_attribute;
286 KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
251 287
252public: 288public:
253 static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) { 289 static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) {
@@ -261,113 +297,349 @@ public:
261 } 297 }
262 298
263public: 299public:
264 constexpr KMemoryBlock() = default;
265 constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_,
266 KMemoryPermission perm_, KMemoryAttribute attribute_)
267 : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
268
269 constexpr VAddr GetAddress() const { 300 constexpr VAddr GetAddress() const {
270 return addr; 301 return m_address;
271 } 302 }
272 303
273 constexpr std::size_t GetNumPages() const { 304 constexpr size_t GetNumPages() const {
274 return num_pages; 305 return m_num_pages;
275 } 306 }
276 307
277 constexpr std::size_t GetSize() const { 308 constexpr size_t GetSize() const {
278 return GetNumPages() * PageSize; 309 return this->GetNumPages() * PageSize;
279 } 310 }
280 311
281 constexpr VAddr GetEndAddress() const { 312 constexpr VAddr GetEndAddress() const {
282 return GetAddress() + GetSize(); 313 return this->GetAddress() + this->GetSize();
283 } 314 }
284 315
285 constexpr VAddr GetLastAddress() const { 316 constexpr VAddr GetLastAddress() const {
286 return GetEndAddress() - 1; 317 return this->GetEndAddress() - 1;
318 }
319
320 constexpr u16 GetIpcLockCount() const {
321 return m_ipc_lock_count;
322 }
323
324 constexpr u16 GetIpcDisableMergeCount() const {
325 return m_ipc_disable_merge_count;
326 }
327
328 constexpr KMemoryPermission GetPermission() const {
329 return m_permission;
330 }
331
332 constexpr KMemoryPermission GetOriginalPermission() const {
333 return m_original_permission;
334 }
335
336 constexpr KMemoryAttribute GetAttribute() const {
337 return m_attribute;
287 } 338 }
288 339
289 constexpr KMemoryInfo GetMemoryInfo() const { 340 constexpr KMemoryInfo GetMemoryInfo() const {
290 return { 341 return {
291 GetAddress(), GetSize(), state, perm, 342 .m_address = this->GetAddress(),
292 attribute, original_perm, ipc_lock_count, device_use_count, 343 .m_size = this->GetSize(),
344 .m_state = m_memory_state,
345 .m_device_disable_merge_left_count = m_device_disable_merge_left_count,
346 .m_device_disable_merge_right_count = m_device_disable_merge_right_count,
347 .m_ipc_lock_count = m_ipc_lock_count,
348 .m_device_use_count = m_device_use_count,
349 .m_ipc_disable_merge_count = m_ipc_disable_merge_count,
350 .m_permission = m_permission,
351 .m_attribute = m_attribute,
352 .m_original_permission = m_original_permission,
353 .m_disable_merge_attribute = m_disable_merge_attribute,
293 }; 354 };
294 } 355 }
295 356
296 void ShareToDevice(KMemoryPermission /*new_perm*/) { 357public:
297 ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || 358 explicit KMemoryBlock() = default;
298 device_use_count == 0); 359
299 attribute |= KMemoryAttribute::DeviceShared; 360 constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
300 const u16 new_use_count{++device_use_count}; 361 KMemoryAttribute attr)
301 ASSERT(new_use_count > 0); 362 : Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(),
363 m_device_disable_merge_left_count(), m_device_disable_merge_right_count(),
364 m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0),
365 m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p),
366 m_original_permission(KMemoryPermission::None), m_attribute(attr),
367 m_disable_merge_attribute() {}
368
369 constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
370 KMemoryAttribute attr) {
371 m_device_disable_merge_left_count = 0;
372 m_device_disable_merge_right_count = 0;
373 m_address = addr;
374 m_num_pages = np;
375 m_memory_state = ms;
376 m_ipc_lock_count = 0;
377 m_device_use_count = 0;
378 m_permission = p;
379 m_original_permission = KMemoryPermission::None;
380 m_attribute = attr;
381 m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None;
382 }
383
384 constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
385 constexpr auto AttributeIgnoreMask =
386 KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
387 return m_memory_state == s && m_permission == p &&
388 (m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
389 }
390
391 constexpr bool HasSameProperties(const KMemoryBlock& rhs) const {
392 return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission &&
393 m_original_permission == rhs.m_original_permission &&
394 m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count &&
395 m_device_use_count == rhs.m_device_use_count;
396 }
397
398 constexpr bool CanMergeWith(const KMemoryBlock& rhs) const {
399 return this->HasSameProperties(rhs) &&
400 (m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) ==
401 KMemoryBlockDisableMergeAttribute::None &&
402 (rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) ==
403 KMemoryBlockDisableMergeAttribute::None;
302 } 404 }
303 405
304 void UnshareToDevice(KMemoryPermission /*new_perm*/) { 406 constexpr bool Contains(VAddr addr) const {
305 ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); 407 return this->GetAddress() <= addr && addr <= this->GetEndAddress();
306 const u16 prev_use_count{device_use_count--}; 408 }
307 ASSERT(prev_use_count > 0); 409
308 if (prev_use_count == 1) { 410 constexpr void Add(const KMemoryBlock& added_block) {
309 attribute &= ~KMemoryAttribute::DeviceShared; 411 ASSERT(added_block.GetNumPages() > 0);
412 ASSERT(this->GetAddress() + added_block.GetSize() - 1 <
413 this->GetEndAddress() + added_block.GetSize() - 1);
414
415 m_num_pages += added_block.GetNumPages();
416 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
417 m_disable_merge_attribute | added_block.m_disable_merge_attribute);
418 m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count;
419 }
420
421 constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a,
422 bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) {
423 ASSERT(m_original_permission == KMemoryPermission::None);
424 ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
425
426 m_memory_state = s;
427 m_permission = p;
428 m_attribute = static_cast<KMemoryAttribute>(
429 a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
430
431 if (set_disable_merge_attr && set_mask != 0) {
432 m_disable_merge_attribute = m_disable_merge_attribute |
433 static_cast<KMemoryBlockDisableMergeAttribute>(set_mask);
434 }
435 if (clear_mask != 0) {
436 m_disable_merge_attribute = m_disable_merge_attribute &
437 static_cast<KMemoryBlockDisableMergeAttribute>(~clear_mask);
310 } 438 }
311 } 439 }
312 440
313private: 441 constexpr void Split(KMemoryBlock* block, VAddr addr) {
314 constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const { 442 ASSERT(this->GetAddress() < addr);
315 constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask | 443 ASSERT(this->Contains(addr));
316 KMemoryAttribute::IpcLocked | 444 ASSERT(Common::IsAligned(addr, PageSize));
317 KMemoryAttribute::DeviceShared}; 445
318 return state == s && perm == p && 446 block->m_address = m_address;
319 (attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); 447 block->m_num_pages = (addr - this->GetAddress()) / PageSize;
448 block->m_memory_state = m_memory_state;
449 block->m_ipc_lock_count = m_ipc_lock_count;
450 block->m_device_use_count = m_device_use_count;
451 block->m_permission = m_permission;
452 block->m_original_permission = m_original_permission;
453 block->m_attribute = m_attribute;
454 block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
455 m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft);
456 block->m_ipc_disable_merge_count = m_ipc_disable_merge_count;
457 block->m_device_disable_merge_left_count = m_device_disable_merge_left_count;
458 block->m_device_disable_merge_right_count = 0;
459
460 m_address = addr;
461 m_num_pages -= block->m_num_pages;
462
463 m_ipc_disable_merge_count = 0;
464 m_device_disable_merge_left_count = 0;
465 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
466 m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight);
320 } 467 }
321 468
322 constexpr bool HasSameProperties(const KMemoryBlock& rhs) const { 469 constexpr void UpdateDeviceDisableMergeStateForShareLeft(
323 return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm && 470 [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
324 attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count && 471 if (left) {
325 device_use_count == rhs.device_use_count; 472 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
473 m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft);
474 const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count;
475 ASSERT(new_device_disable_merge_left_count > 0);
476 }
326 } 477 }
327 478
328 constexpr bool Contains(VAddr start) const { 479 constexpr void UpdateDeviceDisableMergeStateForShareRight(
329 return GetAddress() <= start && start <= GetEndAddress(); 480 [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
481 if (right) {
482 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
483 m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight);
484 const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count;
485 ASSERT(new_device_disable_merge_right_count > 0);
486 }
487 }
488
489 constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left,
490 bool right) {
491 this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right);
492 this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
330 } 493 }
331 494
332 constexpr void Add(std::size_t count) { 495 constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
333 ASSERT(count > 0); 496 bool right) {
334 ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1); 497 // We must either be shared or have a zero lock count.
498 ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
499 m_device_use_count == 0);
335 500
336 num_pages += count; 501 // Share.
502 const u16 new_count = ++m_device_use_count;
503 ASSERT(new_count > 0);
504
505 m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::DeviceShared);
506
507 this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
337 } 508 }
338 509
339 constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm, 510 constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
340 KMemoryAttribute new_attribute) { 511 [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
341 ASSERT(original_perm == KMemoryPermission::None);
342 ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
343 512
344 state = new_state; 513 if (left) {
345 perm = new_perm; 514 if (!m_device_disable_merge_left_count) {
515 return;
516 }
517 --m_device_disable_merge_left_count;
518 }
346 519
347 attribute = static_cast<KMemoryAttribute>( 520 m_device_disable_merge_left_count =
348 new_attribute | 521 std::min(m_device_disable_merge_left_count, m_device_use_count);
349 (attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); 522
523 if (m_device_disable_merge_left_count == 0) {
524 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
525 m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft);
526 }
350 } 527 }
351 528
352 constexpr KMemoryBlock Split(VAddr split_addr) { 529 constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
353 ASSERT(GetAddress() < split_addr); 530 [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
354 ASSERT(Contains(split_addr)); 531 if (right) {
355 ASSERT(Common::IsAligned(split_addr, PageSize)); 532 const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;
533 ASSERT(old_device_disable_merge_right_count > 0);
534 if (old_device_disable_merge_right_count == 1) {
535 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
536 m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight);
537 }
538 }
539 }
356 540
357 KMemoryBlock block; 541 constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left,
358 block.addr = addr; 542 bool right) {
359 block.num_pages = (split_addr - GetAddress()) / PageSize; 543 this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right);
360 block.state = state; 544 this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
361 block.ipc_lock_count = ipc_lock_count; 545 }
362 block.device_use_count = device_use_count;
363 block.perm = perm;
364 block.original_perm = original_perm;
365 block.attribute = attribute;
366 546
367 addr = split_addr; 547 constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
368 num_pages -= block.num_pages; 548 bool right) {
549 // We must be shared.
550 ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
551
552 // Unhare.
553 const u16 old_count = m_device_use_count--;
554 ASSERT(old_count > 0);
555
556 if (old_count == 1) {
557 m_attribute =
558 static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
559 }
560
561 this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
562 }
563
564 constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
565 bool right) {
566
567 // We must be shared.
568 ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
569
570 // Unhare.
571 const u16 old_count = m_device_use_count--;
572 ASSERT(old_count > 0);
573
574 if (old_count == 1) {
575 m_attribute =
576 static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
577 }
578
579 this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
580 }
581
582 constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
583 // We must either be locked or have a zero lock count.
584 ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked ||
585 m_ipc_lock_count == 0);
586
587 // Lock.
588 const u16 new_lock_count = ++m_ipc_lock_count;
589 ASSERT(new_lock_count > 0);
590
591 // If this is our first lock, update our permissions.
592 if (new_lock_count == 1) {
593 ASSERT(m_original_permission == KMemoryPermission::None);
594 ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) ==
595 (m_permission | KMemoryPermission::NotMapped));
596 ASSERT((m_permission & KMemoryPermission::UserExecute) !=
597 KMemoryPermission::UserExecute ||
598 (new_perm == KMemoryPermission::UserRead));
599 m_original_permission = m_permission;
600 m_permission = static_cast<KMemoryPermission>(
601 (new_perm & KMemoryPermission::IpcLockChangeMask) |
602 (m_original_permission & ~KMemoryPermission::IpcLockChangeMask));
603 }
604 m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::IpcLocked);
605
606 if (left) {
607 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
608 m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft);
609 const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count;
610 ASSERT(new_ipc_disable_merge_count > 0);
611 }
612 }
613
614 constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
615 [[maybe_unused]] bool right) {
616 // We must be locked.
617 ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked);
618
619 // Unlock.
620 const u16 old_lock_count = m_ipc_lock_count--;
621 ASSERT(old_lock_count > 0);
622
623 // If this is our last unlock, update our permissions.
624 if (old_lock_count == 1) {
625 ASSERT(m_original_permission != KMemoryPermission::None);
626 m_permission = m_original_permission;
627 m_original_permission = KMemoryPermission::None;
628 m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::IpcLocked);
629 }
630
631 if (left) {
632 const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--;
633 ASSERT(old_ipc_disable_merge_count > 0);
634 if (old_ipc_disable_merge_count == 1) {
635 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
636 m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft);
637 }
638 }
639 }
369 640
370 return block; 641 constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
642 return m_disable_merge_attribute;
371 } 643 }
372}; 644};
373static_assert(std::is_trivially_destructible<KMemoryBlock>::value); 645static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp
index 3ddb9984f..cf4c1e371 100644
--- a/src/core/hle/kernel/k_memory_block_manager.cpp
+++ b/src/core/hle/kernel/k_memory_block_manager.cpp
@@ -2,221 +2,336 @@
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include "core/hle/kernel/k_memory_block_manager.h" 4#include "core/hle/kernel/k_memory_block_manager.h"
5#include "core/hle/kernel/memory_types.h"
6 5
7namespace Kernel { 6namespace Kernel {
8 7
9KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_) 8KMemoryBlockManager::KMemoryBlockManager() = default;
10 : start_addr{start_addr_}, end_addr{end_addr_} {
11 const u64 num_pages{(end_addr - start_addr) / PageSize};
12 memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free,
13 KMemoryPermission::None, KMemoryAttribute::None);
14}
15 9
16KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) { 10Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
17 auto node{memory_block_tree.begin()}; 11 // Allocate a block to encapsulate the address space, insert it into the tree.
18 while (node != end()) { 12 KMemoryBlock* start_block = slab_manager->Allocate();
19 const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()}; 13 R_UNLESS(start_block != nullptr, ResultOutOfResource);
20 if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) { 14
21 return node; 15 // Set our start and end.
22 } 16 m_start_address = st;
23 node = std::next(node); 17 m_end_address = nd;
24 } 18 ASSERT(Common::IsAligned(m_start_address, PageSize));
25 return end(); 19 ASSERT(Common::IsAligned(m_end_address, PageSize));
20
21 // Initialize and insert the block.
22 start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
23 KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None);
24 m_memory_block_tree.insert(*start_block);
25
26 R_SUCCEED();
26} 27}
27 28
28VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages, 29void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
29 std::size_t num_pages, std::size_t align, 30 HostUnmapCallback&& host_unmap_callback) {
30 std::size_t offset, std::size_t guard_pages) { 31 // Erase every block until we have none left.
31 if (num_pages == 0) { 32 auto it = m_memory_block_tree.begin();
32 return {}; 33 while (it != m_memory_block_tree.end()) {
34 KMemoryBlock* block = std::addressof(*it);
35 it = m_memory_block_tree.erase(it);
36 slab_manager->Free(block);
37 host_unmap_callback(block->GetAddress(), block->GetSize());
33 } 38 }
34 39
35 const VAddr region_end{region_start + region_num_pages * PageSize}; 40 ASSERT(m_memory_block_tree.empty());
36 const VAddr region_last{region_end - 1}; 41}
37 for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
38 const auto info{it->GetMemoryInfo()};
39 if (region_last < info.GetAddress()) {
40 break;
41 }
42 42
43 if (info.state != KMemoryState::Free) { 43VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
44 continue; 44 size_t num_pages, size_t alignment, size_t offset,
45 } 45 size_t guard_pages) const {
46 if (num_pages > 0) {
47 const VAddr region_end = region_start + region_num_pages * PageSize;
48 const VAddr region_last = region_end - 1;
49 for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
50 it++) {
51 const KMemoryInfo info = it->GetMemoryInfo();
52 if (region_last < info.GetAddress()) {
53 break;
54 }
55 if (info.m_state != KMemoryState::Free) {
56 continue;
57 }
46 58
47 VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()}; 59 VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
48 area += guard_pages * PageSize; 60 area += guard_pages * PageSize;
49 61
50 const VAddr offset_area{Common::AlignDown(area, align) + offset}; 62 const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
51 area = (area <= offset_area) ? offset_area : offset_area + align; 63 area = (area <= offset_area) ? offset_area : offset_area + alignment;
52 64
53 const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize}; 65 const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
54 const VAddr area_last{area_end - 1}; 66 const VAddr area_last = area_end - 1;
55 67
56 if (info.GetAddress() <= area && area < area_last && area_last <= region_last && 68 if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
57 area_last <= info.GetLastAddress()) { 69 area_last <= info.GetLastAddress()) {
58 return area; 70 return area;
71 }
59 } 72 }
60 } 73 }
61 74
62 return {}; 75 return {};
63} 76}
64 77
65void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, 78void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
66 KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, 79 VAddr address, size_t num_pages) {
67 KMemoryState state, KMemoryPermission perm, 80 // Find the iterator now that we've updated.
68 KMemoryAttribute attribute) { 81 iterator it = this->FindIterator(address);
69 const VAddr update_end_addr{addr + num_pages * PageSize}; 82 if (address != m_start_address) {
70 iterator node{memory_block_tree.begin()}; 83 it--;
84 }
71 85
72 prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped; 86 // Coalesce blocks that we can.
87 while (true) {
88 iterator prev = it++;
89 if (it == m_memory_block_tree.end()) {
90 break;
91 }
73 92
74 while (node != memory_block_tree.end()) { 93 if (prev->CanMergeWith(*it)) {
75 KMemoryBlock* block{&(*node)}; 94 KMemoryBlock* block = std::addressof(*it);
76 iterator next_node{std::next(node)}; 95 m_memory_block_tree.erase(it);
77 const VAddr cur_addr{block->GetAddress()}; 96 prev->Add(*block);
78 const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; 97 allocator->Free(block);
98 it = prev;
99 }
79 100
80 if (addr < cur_end_addr && cur_addr < update_end_addr) { 101 if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) {
81 if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) { 102 break;
82 node = next_node; 103 }
83 continue; 104 }
84 } 105}
85 106
86 iterator new_node{node}; 107void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
87 if (addr > cur_addr) { 108 size_t num_pages, KMemoryState state, KMemoryPermission perm,
88 memory_block_tree.insert(node, block->Split(addr)); 109 KMemoryAttribute attr,
110 KMemoryBlockDisableMergeAttribute set_disable_attr,
111 KMemoryBlockDisableMergeAttribute clear_disable_attr) {
112 // Ensure for auditing that we never end up with an invalid tree.
113 KScopedMemoryBlockManagerAuditor auditor(this);
114 ASSERT(Common::IsAligned(address, PageSize));
115 ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
116 KMemoryAttribute::None);
117
118 VAddr cur_address = address;
119 size_t remaining_pages = num_pages;
120 iterator it = this->FindIterator(address);
121
122 while (remaining_pages > 0) {
123 const size_t remaining_size = remaining_pages * PageSize;
124 KMemoryInfo cur_info = it->GetMemoryInfo();
125 if (it->HasProperties(state, perm, attr)) {
126 // If we already have the right properties, just advance.
127 if (cur_address + remaining_size < cur_info.GetEndAddress()) {
128 remaining_pages = 0;
129 cur_address += remaining_size;
130 } else {
131 remaining_pages =
132 (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
133 cur_address = cur_info.GetEndAddress();
89 } 134 }
135 } else {
136 // If we need to, create a new block before and insert it.
137 if (cur_info.GetAddress() != cur_address) {
138 KMemoryBlock* new_block = allocator->Allocate();
139
140 it->Split(new_block, cur_address);
141 it = m_memory_block_tree.insert(*new_block);
142 it++;
90 143
91 if (update_end_addr < cur_end_addr) { 144 cur_info = it->GetMemoryInfo();
92 new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); 145 cur_address = cur_info.GetAddress();
93 } 146 }
94 147
95 new_node->Update(state, perm, attribute); 148 // If we need to, create a new block after and insert it.
149 if (cur_info.GetSize() > remaining_size) {
150 KMemoryBlock* new_block = allocator->Allocate();
96 151
97 MergeAdjacent(new_node, next_node); 152 it->Split(new_block, cur_address + remaining_size);
98 } 153 it = m_memory_block_tree.insert(*new_block);
99 154
100 if (cur_end_addr - 1 >= update_end_addr - 1) { 155 cur_info = it->GetMemoryInfo();
101 break; 156 }
102 }
103 157
104 node = next_node; 158 // Update block state.
159 it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
160 static_cast<u8>(clear_disable_attr));
161 cur_address += cur_info.GetSize();
162 remaining_pages -= cur_info.GetNumPages();
163 }
164 it++;
105 } 165 }
166
167 this->CoalesceForUpdate(allocator, address, num_pages);
106} 168}
107 169
108void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state, 170void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
109 KMemoryPermission perm, KMemoryAttribute attribute) { 171 VAddr address, size_t num_pages, KMemoryState test_state,
110 const VAddr update_end_addr{addr + num_pages * PageSize}; 172 KMemoryPermission test_perm, KMemoryAttribute test_attr,
111 iterator node{memory_block_tree.begin()}; 173 KMemoryState state, KMemoryPermission perm,
174 KMemoryAttribute attr) {
175 // Ensure for auditing that we never end up with an invalid tree.
176 KScopedMemoryBlockManagerAuditor auditor(this);
177 ASSERT(Common::IsAligned(address, PageSize));
178 ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
179 KMemoryAttribute::None);
180
181 VAddr cur_address = address;
182 size_t remaining_pages = num_pages;
183 iterator it = this->FindIterator(address);
184
185 while (remaining_pages > 0) {
186 const size_t remaining_size = remaining_pages * PageSize;
187 KMemoryInfo cur_info = it->GetMemoryInfo();
188 if (it->HasProperties(test_state, test_perm, test_attr) &&
189 !it->HasProperties(state, perm, attr)) {
190 // If we need to, create a new block before and insert it.
191 if (cur_info.GetAddress() != cur_address) {
192 KMemoryBlock* new_block = allocator->Allocate();
193
194 it->Split(new_block, cur_address);
195 it = m_memory_block_tree.insert(*new_block);
196 it++;
197
198 cur_info = it->GetMemoryInfo();
199 cur_address = cur_info.GetAddress();
200 }
112 201
113 while (node != memory_block_tree.end()) { 202 // If we need to, create a new block after and insert it.
114 KMemoryBlock* block{&(*node)}; 203 if (cur_info.GetSize() > remaining_size) {
115 iterator next_node{std::next(node)}; 204 KMemoryBlock* new_block = allocator->Allocate();
116 const VAddr cur_addr{block->GetAddress()};
117 const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
118 205
119 if (addr < cur_end_addr && cur_addr < update_end_addr) { 206 it->Split(new_block, cur_address + remaining_size);
120 iterator new_node{node}; 207 it = m_memory_block_tree.insert(*new_block);
121 208
122 if (addr > cur_addr) { 209 cur_info = it->GetMemoryInfo();
123 memory_block_tree.insert(node, block->Split(addr));
124 } 210 }
125 211
126 if (update_end_addr < cur_end_addr) { 212 // Update block state.
127 new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); 213 it->Update(state, perm, attr, false, 0, 0);
214 cur_address += cur_info.GetSize();
215 remaining_pages -= cur_info.GetNumPages();
216 } else {
217 // If we already have the right properties, just advance.
218 if (cur_address + remaining_size < cur_info.GetEndAddress()) {
219 remaining_pages = 0;
220 cur_address += remaining_size;
221 } else {
222 remaining_pages =
223 (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
224 cur_address = cur_info.GetEndAddress();
128 } 225 }
129
130 new_node->Update(state, perm, attribute);
131
132 MergeAdjacent(new_node, next_node);
133 }
134
135 if (cur_end_addr - 1 >= update_end_addr - 1) {
136 break;
137 } 226 }
138 227 it++;
139 node = next_node;
140 } 228 }
229
230 this->CoalesceForUpdate(allocator, address, num_pages);
141} 231}
142 232
143void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, 233void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
234 size_t num_pages, MemoryBlockLockFunction lock_func,
144 KMemoryPermission perm) { 235 KMemoryPermission perm) {
145 const VAddr update_end_addr{addr + num_pages * PageSize}; 236 // Ensure for auditing that we never end up with an invalid tree.
146 iterator node{memory_block_tree.begin()}; 237 KScopedMemoryBlockManagerAuditor auditor(this);
238 ASSERT(Common::IsAligned(address, PageSize));
147 239
148 while (node != memory_block_tree.end()) { 240 VAddr cur_address = address;
149 KMemoryBlock* block{&(*node)}; 241 size_t remaining_pages = num_pages;
150 iterator next_node{std::next(node)}; 242 iterator it = this->FindIterator(address);
151 const VAddr cur_addr{block->GetAddress()};
152 const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
153 243
154 if (addr < cur_end_addr && cur_addr < update_end_addr) { 244 const VAddr end_address = address + (num_pages * PageSize);
155 iterator new_node{node};
156 245
157 if (addr > cur_addr) { 246 while (remaining_pages > 0) {
158 memory_block_tree.insert(node, block->Split(addr)); 247 const size_t remaining_size = remaining_pages * PageSize;
159 } 248 KMemoryInfo cur_info = it->GetMemoryInfo();
160 249
161 if (update_end_addr < cur_end_addr) { 250 // If we need to, create a new block before and insert it.
162 new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); 251 if (cur_info.m_address != cur_address) {
163 } 252 KMemoryBlock* new_block = allocator->Allocate();
164 253
165 lock_func(new_node, perm); 254 it->Split(new_block, cur_address);
255 it = m_memory_block_tree.insert(*new_block);
256 it++;
166 257
167 MergeAdjacent(new_node, next_node); 258 cur_info = it->GetMemoryInfo();
259 cur_address = cur_info.GetAddress();
168 } 260 }
169 261
170 if (cur_end_addr - 1 >= update_end_addr - 1) { 262 if (cur_info.GetSize() > remaining_size) {
171 break; 263 // If we need to, create a new block after and insert it.
264 KMemoryBlock* new_block = allocator->Allocate();
265
266 it->Split(new_block, cur_address + remaining_size);
267 it = m_memory_block_tree.insert(*new_block);
268
269 cur_info = it->GetMemoryInfo();
172 } 270 }
173 271
174 node = next_node; 272 // Call the locked update function.
273 (std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address,
274 cur_info.GetEndAddress() == end_address);
275 cur_address += cur_info.GetSize();
276 remaining_pages -= cur_info.GetNumPages();
277 it++;
175 } 278 }
176}
177 279
178void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) { 280 this->CoalesceForUpdate(allocator, address, num_pages);
179 const_iterator it{FindIterator(start)};
180 KMemoryInfo info{};
181 do {
182 info = it->GetMemoryInfo();
183 func(info);
184 it = std::next(it);
185 } while (info.addr + info.size - 1 < end - 1 && it != cend());
186} 281}
187 282
188void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) { 283// Debug.
189 KMemoryBlock* block{&(*it)}; 284bool KMemoryBlockManager::CheckState() const {
190 285 // Loop over every block, ensuring that we are sorted and coalesced.
191 auto EraseIt = [&](const iterator it_to_erase) { 286 auto it = m_memory_block_tree.cbegin();
192 if (next_it == it_to_erase) { 287 auto prev = it++;
193 next_it = std::next(next_it); 288 while (it != m_memory_block_tree.cend()) {
289 const KMemoryInfo prev_info = prev->GetMemoryInfo();
290 const KMemoryInfo cur_info = it->GetMemoryInfo();
291
292 // Sequential blocks which can be merged should be merged.
293 if (prev->CanMergeWith(*it)) {
294 return false;
194 } 295 }
195 memory_block_tree.erase(it_to_erase);
196 };
197 296
198 if (it != memory_block_tree.begin()) { 297 // Sequential blocks should be sequential.
199 KMemoryBlock* prev{&(*std::prev(it))}; 298 if (prev_info.GetEndAddress() != cur_info.GetAddress()) {
200 299 return false;
201 if (block->HasSameProperties(*prev)) { 300 }
202 const iterator prev_it{std::prev(it)};
203 301
204 prev->Add(block->GetNumPages()); 302 // If the block is ipc locked, it must have a count.
205 EraseIt(it); 303 if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
304 cur_info.m_ipc_lock_count == 0) {
305 return false;
306 }
206 307
207 it = prev_it; 308 // If the block is device shared, it must have a count.
208 block = prev; 309 if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
310 cur_info.m_device_use_count == 0) {
311 return false;
209 } 312 }
313
314 // Advance the iterator.
315 prev = it++;
210 } 316 }
211 317
212 if (it != cend()) { 318 // Our loop will miss checking the last block, potentially, so check it.
213 const KMemoryBlock* const next{&(*std::next(it))}; 319 if (prev != m_memory_block_tree.cend()) {
320 const KMemoryInfo prev_info = prev->GetMemoryInfo();
321 // If the block is ipc locked, it must have a count.
322 if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
323 prev_info.m_ipc_lock_count == 0) {
324 return false;
325 }
214 326
215 if (block->HasSameProperties(*next)) { 327 // If the block is device shared, it must have a count.
216 block->Add(next->GetNumPages()); 328 if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
217 EraseIt(std::next(it)); 329 prev_info.m_device_use_count == 0) {
330 return false;
218 } 331 }
219 } 332 }
333
334 return true;
220} 335}
221 336
222} // namespace Kernel 337} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h
index e14741b89..9b5873883 100644
--- a/src/core/hle/kernel/k_memory_block_manager.h
+++ b/src/core/hle/kernel/k_memory_block_manager.h
@@ -4,63 +4,154 @@
4#pragma once 4#pragma once
5 5
6#include <functional> 6#include <functional>
7#include <list>
8 7
8#include "common/common_funcs.h"
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "core/hle/kernel/k_dynamic_resource_manager.h"
10#include "core/hle/kernel/k_memory_block.h" 11#include "core/hle/kernel/k_memory_block.h"
11 12
12namespace Kernel { 13namespace Kernel {
13 14
15class KMemoryBlockManagerUpdateAllocator {
16public:
17 static constexpr size_t MaxBlocks = 2;
18
19private:
20 KMemoryBlock* m_blocks[MaxBlocks];
21 size_t m_index;
22 KMemoryBlockSlabManager* m_slab_manager;
23
24private:
25 Result Initialize(size_t num_blocks) {
26 // Check num blocks.
27 ASSERT(num_blocks <= MaxBlocks);
28
29 // Set index.
30 m_index = MaxBlocks - num_blocks;
31
32 // Allocate the blocks.
33 for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) {
34 m_blocks[m_index + i] = m_slab_manager->Allocate();
35 R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource);
36 }
37
38 R_SUCCEED();
39 }
40
41public:
42 KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm,
43 size_t num_blocks = MaxBlocks)
44 : m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) {
45 *out_result = this->Initialize(num_blocks);
46 }
47
48 ~KMemoryBlockManagerUpdateAllocator() {
49 for (const auto& block : m_blocks) {
50 if (block != nullptr) {
51 m_slab_manager->Free(block);
52 }
53 }
54 }
55
56 KMemoryBlock* Allocate() {
57 ASSERT(m_index < MaxBlocks);
58 ASSERT(m_blocks[m_index] != nullptr);
59 KMemoryBlock* block = nullptr;
60 std::swap(block, m_blocks[m_index++]);
61 return block;
62 }
63
64 void Free(KMemoryBlock* block) {
65 ASSERT(m_index <= MaxBlocks);
66 ASSERT(block != nullptr);
67 if (m_index == 0) {
68 m_slab_manager->Free(block);
69 } else {
70 m_blocks[--m_index] = block;
71 }
72 }
73};
74
14class KMemoryBlockManager final { 75class KMemoryBlockManager final {
15public: 76public:
16 using MemoryBlockTree = std::list<KMemoryBlock>; 77 using MemoryBlockTree =
78 Common::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>;
79 using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left,
80 bool right);
17 using iterator = MemoryBlockTree::iterator; 81 using iterator = MemoryBlockTree::iterator;
18 using const_iterator = MemoryBlockTree::const_iterator; 82 using const_iterator = MemoryBlockTree::const_iterator;
19 83
20public: 84public:
21 KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_); 85 KMemoryBlockManager();
86
87 using HostUnmapCallback = std::function<void(VAddr, u64)>;
88
89 Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager);
90 void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
22 91
23 iterator end() { 92 iterator end() {
24 return memory_block_tree.end(); 93 return m_memory_block_tree.end();
25 } 94 }
26 const_iterator end() const { 95 const_iterator end() const {
27 return memory_block_tree.end(); 96 return m_memory_block_tree.end();
28 } 97 }
29 const_iterator cend() const { 98 const_iterator cend() const {
30 return memory_block_tree.cend(); 99 return m_memory_block_tree.cend();
31 } 100 }
32 101
33 iterator FindIterator(VAddr addr); 102 VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
103 size_t alignment, size_t offset, size_t guard_pages) const;
34 104
35 VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, 105 void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
36 std::size_t align, std::size_t offset, std::size_t guard_pages); 106 KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
107 KMemoryBlockDisableMergeAttribute set_disable_attr,
108 KMemoryBlockDisableMergeAttribute clear_disable_attr);
109 void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
110 MemoryBlockLockFunction lock_func, KMemoryPermission perm);
37 111
38 void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, 112 void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
39 KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state, 113 size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
40 KMemoryPermission perm, KMemoryAttribute attribute); 114 KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
115 KMemoryAttribute attr);
41 116
42 void Update(VAddr addr, std::size_t num_pages, KMemoryState state, 117 iterator FindIterator(VAddr address) const {
43 KMemoryPermission perm = KMemoryPermission::None, 118 return m_memory_block_tree.find(KMemoryBlock(
44 KMemoryAttribute attribute = KMemoryAttribute::None); 119 address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None));
45 120 }
46 using LockFunc = std::function<void(iterator, KMemoryPermission)>;
47 void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
48 KMemoryPermission perm);
49 121
50 using IterateFunc = std::function<void(const KMemoryInfo&)>; 122 const KMemoryBlock* FindBlock(VAddr address) const {
51 void IterateForRange(VAddr start, VAddr end, IterateFunc&& func); 123 if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
124 return std::addressof(*it);
125 }
52 126
53 KMemoryBlock& FindBlock(VAddr addr) { 127 return nullptr;
54 return *FindIterator(addr);
55 } 128 }
56 129
130 // Debug.
131 bool CheckState() const;
132
57private: 133private:
58 void MergeAdjacent(iterator it, iterator& next_it); 134 void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
135 size_t num_pages);
59 136
60 [[maybe_unused]] const VAddr start_addr; 137 MemoryBlockTree m_memory_block_tree;
61 [[maybe_unused]] const VAddr end_addr; 138 VAddr m_start_address{};
139 VAddr m_end_address{};
140};
62 141
63 MemoryBlockTree memory_block_tree; 142class KScopedMemoryBlockManagerAuditor {
143public:
144 explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) {
145 ASSERT(m_manager->CheckState());
146 }
147 explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m)
148 : KScopedMemoryBlockManagerAuditor(std::addressof(m)) {}
149 ~KScopedMemoryBlockManagerAuditor() {
150 ASSERT(m_manager->CheckState());
151 }
152
153private:
154 KMemoryBlockManager* m_manager;
64}; 155};
65 156
66} // namespace Kernel 157} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index 5b0a9963a..646711505 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -331,7 +331,7 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag
331 331
332 // Set all the allocated memory. 332 // Set all the allocated memory.
333 for (const auto& block : out->Nodes()) { 333 for (const auto& block : out->Nodes()) {
334 std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern, 334 std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
335 block.GetSize()); 335 block.GetSize());
336 } 336 }
337 337
diff --git a/src/core/hle/kernel/k_page_buffer.cpp b/src/core/hle/kernel/k_page_buffer.cpp
index 1a0bf4439..0c16dded4 100644
--- a/src/core/hle/kernel/k_page_buffer.cpp
+++ b/src/core/hle/kernel/k_page_buffer.cpp
@@ -12,7 +12,7 @@ namespace Kernel {
12 12
13KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) { 13KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
14 ASSERT(Common::IsAligned(phys_addr, PageSize)); 14 ASSERT(Common::IsAligned(phys_addr, PageSize));
15 return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr)); 15 return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr);
16} 16}
17 17
18} // namespace Kernel 18} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h
index 7e50dc1d1..aef06e213 100644
--- a/src/core/hle/kernel/k_page_buffer.h
+++ b/src/core/hle/kernel/k_page_buffer.h
@@ -13,6 +13,7 @@ namespace Kernel {
13 13
14class KPageBuffer final : public KSlabAllocated<KPageBuffer> { 14class KPageBuffer final : public KSlabAllocated<KPageBuffer> {
15public: 15public:
16 explicit KPageBuffer(KernelCore&) {}
16 KPageBuffer() = default; 17 KPageBuffer() = default;
17 18
18 static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr); 19 static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr);
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index d975de844..307e491cb 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -25,7 +25,7 @@ namespace {
25 25
26using namespace Common::Literals; 26using namespace Common::Literals;
27 27
28constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { 28constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
29 switch (as_type) { 29 switch (as_type) {
30 case FileSys::ProgramAddressSpaceType::Is32Bit: 30 case FileSys::ProgramAddressSpaceType::Is32Bit:
31 case FileSys::ProgramAddressSpaceType::Is32BitNoMap: 31 case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
@@ -43,27 +43,29 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT
43} // namespace 43} // namespace
44 44
45KPageTable::KPageTable(Core::System& system_) 45KPageTable::KPageTable(Core::System& system_)
46 : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {} 46 : m_general_lock{system_.Kernel()},
47 m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {}
47 48
48KPageTable::~KPageTable() = default; 49KPageTable::~KPageTable() = default;
49 50
50Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, 51Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
51 VAddr code_addr, std::size_t code_size, 52 VAddr code_addr, size_t code_size,
53 KMemoryBlockSlabManager* mem_block_slab_manager,
52 KMemoryManager::Pool pool) { 54 KMemoryManager::Pool pool) {
53 55
54 const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { 56 const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
55 return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type); 57 return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
56 }; 58 };
57 const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { 59 const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) {
58 return KAddressSpaceInfo::GetAddressSpaceSize(address_space_width, type); 60 return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
59 }; 61 };
60 62
61 // Set our width and heap/alias sizes 63 // Set our width and heap/alias sizes
62 address_space_width = GetAddressSpaceWidthFromType(as_type); 64 m_address_space_width = GetAddressSpaceWidthFromType(as_type);
63 const VAddr start = 0; 65 const VAddr start = 0;
64 const VAddr end{1ULL << address_space_width}; 66 const VAddr end{1ULL << m_address_space_width};
65 std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; 67 size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
66 std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; 68 size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
67 69
68 ASSERT(code_addr < code_addr + code_size); 70 ASSERT(code_addr < code_addr + code_size);
69 ASSERT(code_addr + code_size - 1 <= end - 1); 71 ASSERT(code_addr + code_size - 1 <= end - 1);
@@ -75,66 +77,65 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
75 } 77 }
76 78
77 // Set code regions and determine remaining 79 // Set code regions and determine remaining
78 constexpr std::size_t RegionAlignment{2_MiB}; 80 constexpr size_t RegionAlignment{2_MiB};
79 VAddr process_code_start{}; 81 VAddr process_code_start{};
80 VAddr process_code_end{}; 82 VAddr process_code_end{};
81 std::size_t stack_region_size{}; 83 size_t stack_region_size{};
82 std::size_t kernel_map_region_size{}; 84 size_t kernel_map_region_size{};
83 85
84 if (address_space_width == 39) { 86 if (m_address_space_width == 39) {
85 alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); 87 alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
86 heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); 88 heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
87 stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); 89 stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
88 kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); 90 kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
89 code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); 91 m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
90 code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); 92 m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
91 alias_code_region_start = code_region_start; 93 m_alias_code_region_start = m_code_region_start;
92 alias_code_region_end = code_region_end; 94 m_alias_code_region_end = m_code_region_end;
93 process_code_start = Common::AlignDown(code_addr, RegionAlignment); 95 process_code_start = Common::AlignDown(code_addr, RegionAlignment);
94 process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); 96 process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment);
95 } else { 97 } else {
96 stack_region_size = 0; 98 stack_region_size = 0;
97 kernel_map_region_size = 0; 99 kernel_map_region_size = 0;
98 code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); 100 m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
99 code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); 101 m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
100 stack_region_start = code_region_start; 102 m_stack_region_start = m_code_region_start;
101 alias_code_region_start = code_region_start; 103 m_alias_code_region_start = m_code_region_start;
102 alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + 104 m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
103 GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); 105 GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
104 stack_region_end = code_region_end; 106 m_stack_region_end = m_code_region_end;
105 kernel_map_region_start = code_region_start; 107 m_kernel_map_region_start = m_code_region_start;
106 kernel_map_region_end = code_region_end; 108 m_kernel_map_region_end = m_code_region_end;
107 process_code_start = code_region_start; 109 process_code_start = m_code_region_start;
108 process_code_end = code_region_end; 110 process_code_end = m_code_region_end;
109 } 111 }
110 112
111 // Set other basic fields 113 // Set other basic fields
112 is_aslr_enabled = enable_aslr; 114 m_enable_aslr = enable_aslr;
113 address_space_start = start; 115 m_enable_device_address_space_merge = false;
114 address_space_end = end; 116 m_address_space_start = start;
115 is_kernel = false; 117 m_address_space_end = end;
118 m_is_kernel = false;
119 m_memory_block_slab_manager = mem_block_slab_manager;
116 120
117 // Determine the region we can place our undetermineds in 121 // Determine the region we can place our undetermineds in
118 VAddr alloc_start{}; 122 VAddr alloc_start{};
119 std::size_t alloc_size{}; 123 size_t alloc_size{};
120 if ((process_code_start - code_region_start) >= (end - process_code_end)) { 124 if ((process_code_start - m_code_region_start) >= (end - process_code_end)) {
121 alloc_start = code_region_start; 125 alloc_start = m_code_region_start;
122 alloc_size = process_code_start - code_region_start; 126 alloc_size = process_code_start - m_code_region_start;
123 } else { 127 } else {
124 alloc_start = process_code_end; 128 alloc_start = process_code_end;
125 alloc_size = end - process_code_end; 129 alloc_size = end - process_code_end;
126 } 130 }
127 const std::size_t needed_size{ 131 const size_t needed_size =
128 (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; 132 (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
129 if (alloc_size < needed_size) { 133 R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
130 ASSERT(false);
131 return ResultOutOfMemory;
132 }
133 134
134 const std::size_t remaining_size{alloc_size - needed_size}; 135 const size_t remaining_size{alloc_size - needed_size};
135 136
136 // Determine random placements for each region 137 // Determine random placements for each region
137 std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; 138 size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{};
138 if (enable_aslr) { 139 if (enable_aslr) {
139 alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * 140 alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
140 RegionAlignment; 141 RegionAlignment;
@@ -147,117 +148,130 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
147 } 148 }
148 149
149 // Setup heap and alias regions 150 // Setup heap and alias regions
150 alias_region_start = alloc_start + alias_rnd; 151 m_alias_region_start = alloc_start + alias_rnd;
151 alias_region_end = alias_region_start + alias_region_size; 152 m_alias_region_end = m_alias_region_start + alias_region_size;
152 heap_region_start = alloc_start + heap_rnd; 153 m_heap_region_start = alloc_start + heap_rnd;
153 heap_region_end = heap_region_start + heap_region_size; 154 m_heap_region_end = m_heap_region_start + heap_region_size;
154 155
155 if (alias_rnd <= heap_rnd) { 156 if (alias_rnd <= heap_rnd) {
156 heap_region_start += alias_region_size; 157 m_heap_region_start += alias_region_size;
157 heap_region_end += alias_region_size; 158 m_heap_region_end += alias_region_size;
158 } else { 159 } else {
159 alias_region_start += heap_region_size; 160 m_alias_region_start += heap_region_size;
160 alias_region_end += heap_region_size; 161 m_alias_region_end += heap_region_size;
161 } 162 }
162 163
163 // Setup stack region 164 // Setup stack region
164 if (stack_region_size) { 165 if (stack_region_size) {
165 stack_region_start = alloc_start + stack_rnd; 166 m_stack_region_start = alloc_start + stack_rnd;
166 stack_region_end = stack_region_start + stack_region_size; 167 m_stack_region_end = m_stack_region_start + stack_region_size;
167 168
168 if (alias_rnd < stack_rnd) { 169 if (alias_rnd < stack_rnd) {
169 stack_region_start += alias_region_size; 170 m_stack_region_start += alias_region_size;
170 stack_region_end += alias_region_size; 171 m_stack_region_end += alias_region_size;
171 } else { 172 } else {
172 alias_region_start += stack_region_size; 173 m_alias_region_start += stack_region_size;
173 alias_region_end += stack_region_size; 174 m_alias_region_end += stack_region_size;
174 } 175 }
175 176
176 if (heap_rnd < stack_rnd) { 177 if (heap_rnd < stack_rnd) {
177 stack_region_start += heap_region_size; 178 m_stack_region_start += heap_region_size;
178 stack_region_end += heap_region_size; 179 m_stack_region_end += heap_region_size;
179 } else { 180 } else {
180 heap_region_start += stack_region_size; 181 m_heap_region_start += stack_region_size;
181 heap_region_end += stack_region_size; 182 m_heap_region_end += stack_region_size;
182 } 183 }
183 } 184 }
184 185
185 // Setup kernel map region 186 // Setup kernel map region
186 if (kernel_map_region_size) { 187 if (kernel_map_region_size) {
187 kernel_map_region_start = alloc_start + kmap_rnd; 188 m_kernel_map_region_start = alloc_start + kmap_rnd;
188 kernel_map_region_end = kernel_map_region_start + kernel_map_region_size; 189 m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
189 190
190 if (alias_rnd < kmap_rnd) { 191 if (alias_rnd < kmap_rnd) {
191 kernel_map_region_start += alias_region_size; 192 m_kernel_map_region_start += alias_region_size;
192 kernel_map_region_end += alias_region_size; 193 m_kernel_map_region_end += alias_region_size;
193 } else { 194 } else {
194 alias_region_start += kernel_map_region_size; 195 m_alias_region_start += kernel_map_region_size;
195 alias_region_end += kernel_map_region_size; 196 m_alias_region_end += kernel_map_region_size;
196 } 197 }
197 198
198 if (heap_rnd < kmap_rnd) { 199 if (heap_rnd < kmap_rnd) {
199 kernel_map_region_start += heap_region_size; 200 m_kernel_map_region_start += heap_region_size;
200 kernel_map_region_end += heap_region_size; 201 m_kernel_map_region_end += heap_region_size;
201 } else { 202 } else {
202 heap_region_start += kernel_map_region_size; 203 m_heap_region_start += kernel_map_region_size;
203 heap_region_end += kernel_map_region_size; 204 m_heap_region_end += kernel_map_region_size;
204 } 205 }
205 206
206 if (stack_region_size) { 207 if (stack_region_size) {
207 if (stack_rnd < kmap_rnd) { 208 if (stack_rnd < kmap_rnd) {
208 kernel_map_region_start += stack_region_size; 209 m_kernel_map_region_start += stack_region_size;
209 kernel_map_region_end += stack_region_size; 210 m_kernel_map_region_end += stack_region_size;
210 } else { 211 } else {
211 stack_region_start += kernel_map_region_size; 212 m_stack_region_start += kernel_map_region_size;
212 stack_region_end += kernel_map_region_size; 213 m_stack_region_end += kernel_map_region_size;
213 } 214 }
214 } 215 }
215 } 216 }
216 217
217 // Set heap members 218 // Set heap members
218 current_heap_end = heap_region_start; 219 m_current_heap_end = m_heap_region_start;
219 max_heap_size = 0; 220 m_max_heap_size = 0;
220 max_physical_memory_size = 0; 221 m_max_physical_memory_size = 0;
221 222
222 // Ensure that we regions inside our address space 223 // Ensure that we regions inside our address space
223 auto IsInAddressSpace = [&](VAddr addr) { 224 auto IsInAddressSpace = [&](VAddr addr) {
224 return address_space_start <= addr && addr <= address_space_end; 225 return m_address_space_start <= addr && addr <= m_address_space_end;
225 }; 226 };
226 ASSERT(IsInAddressSpace(alias_region_start)); 227 ASSERT(IsInAddressSpace(m_alias_region_start));
227 ASSERT(IsInAddressSpace(alias_region_end)); 228 ASSERT(IsInAddressSpace(m_alias_region_end));
228 ASSERT(IsInAddressSpace(heap_region_start)); 229 ASSERT(IsInAddressSpace(m_heap_region_start));
229 ASSERT(IsInAddressSpace(heap_region_end)); 230 ASSERT(IsInAddressSpace(m_heap_region_end));
230 ASSERT(IsInAddressSpace(stack_region_start)); 231 ASSERT(IsInAddressSpace(m_stack_region_start));
231 ASSERT(IsInAddressSpace(stack_region_end)); 232 ASSERT(IsInAddressSpace(m_stack_region_end));
232 ASSERT(IsInAddressSpace(kernel_map_region_start)); 233 ASSERT(IsInAddressSpace(m_kernel_map_region_start));
233 ASSERT(IsInAddressSpace(kernel_map_region_end)); 234 ASSERT(IsInAddressSpace(m_kernel_map_region_end));
234 235
235 // Ensure that we selected regions that don't overlap 236 // Ensure that we selected regions that don't overlap
236 const VAddr alias_start{alias_region_start}; 237 const VAddr alias_start{m_alias_region_start};
237 const VAddr alias_last{alias_region_end - 1}; 238 const VAddr alias_last{m_alias_region_end - 1};
238 const VAddr heap_start{heap_region_start}; 239 const VAddr heap_start{m_heap_region_start};
239 const VAddr heap_last{heap_region_end - 1}; 240 const VAddr heap_last{m_heap_region_end - 1};
240 const VAddr stack_start{stack_region_start}; 241 const VAddr stack_start{m_stack_region_start};
241 const VAddr stack_last{stack_region_end - 1}; 242 const VAddr stack_last{m_stack_region_end - 1};
242 const VAddr kmap_start{kernel_map_region_start}; 243 const VAddr kmap_start{m_kernel_map_region_start};
243 const VAddr kmap_last{kernel_map_region_end - 1}; 244 const VAddr kmap_last{m_kernel_map_region_end - 1};
244 ASSERT(alias_last < heap_start || heap_last < alias_start); 245 ASSERT(alias_last < heap_start || heap_last < alias_start);
245 ASSERT(alias_last < stack_start || stack_last < alias_start); 246 ASSERT(alias_last < stack_start || stack_last < alias_start);
246 ASSERT(alias_last < kmap_start || kmap_last < alias_start); 247 ASSERT(alias_last < kmap_start || kmap_last < alias_start);
247 ASSERT(heap_last < stack_start || stack_last < heap_start); 248 ASSERT(heap_last < stack_start || stack_last < heap_start);
248 ASSERT(heap_last < kmap_start || kmap_last < heap_start); 249 ASSERT(heap_last < kmap_start || kmap_last < heap_start);
249 250
250 current_heap_end = heap_region_start; 251 m_current_heap_end = m_heap_region_start;
251 max_heap_size = 0; 252 m_max_heap_size = 0;
252 mapped_physical_memory_size = 0; 253 m_mapped_physical_memory_size = 0;
253 memory_pool = pool; 254 m_memory_pool = pool;
255
256 m_page_table_impl = std::make_unique<Common::PageTable>();
257 m_page_table_impl->Resize(m_address_space_width, PageBits);
258
259 // Initialize our memory block manager.
260 R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
261 m_memory_block_slab_manager));
262}
254 263
255 page_table_impl.Resize(address_space_width, PageBits); 264void KPageTable::Finalize() {
265 // Finalize memory blocks.
266 m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) {
267 m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size);
268 });
256 269
257 return InitializeMemoryLayout(start, end); 270 // Close the backing page table, as the destructor is not called for guest objects.
271 m_page_table_impl.reset();
258} 272}
259 273
260Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state, 274Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state,
261 KMemoryPermission perm) { 275 KMemoryPermission perm) {
262 const u64 size{num_pages * PageSize}; 276 const u64 size{num_pages * PageSize};
263 277
@@ -265,52 +279,76 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat
265 R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); 279 R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
266 280
267 // Lock the table. 281 // Lock the table.
268 KScopedLightLock lk(general_lock); 282 KScopedLightLock lk(m_general_lock);
269 283
270 // Verify that the destination memory is unmapped. 284 // Verify that the destination memory is unmapped.
271 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, 285 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
272 KMemoryPermission::None, KMemoryPermission::None, 286 KMemoryPermission::None, KMemoryPermission::None,
273 KMemoryAttribute::None, KMemoryAttribute::None)); 287 KMemoryAttribute::None, KMemoryAttribute::None));
288
289 // Create an update allocator.
290 Result allocator_result{ResultSuccess};
291 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
292 m_memory_block_slab_manager);
293
294 // Allocate and open.
274 KPageGroup pg; 295 KPageGroup pg;
275 R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( 296 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
276 &pg, num_pages, 297 &pg, num_pages,
277 KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option))); 298 KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
278 299
279 R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); 300 R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup));
280 301
281 block_manager->Update(addr, num_pages, state, perm); 302 // Update the blocks.
303 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
304 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
305 KMemoryBlockDisableMergeAttribute::None);
282 306
283 return ResultSuccess; 307 R_SUCCEED();
284} 308}
285 309
286Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) { 310Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) {
287 // Validate the mapping request. 311 // Validate the mapping request.
288 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), 312 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
289 ResultInvalidMemoryRegion); 313 ResultInvalidMemoryRegion);
290 314
291 // Lock the table. 315 // Lock the table.
292 KScopedLightLock lk(general_lock); 316 KScopedLightLock lk(m_general_lock);
293 317
294 // Verify that the source memory is normal heap. 318 // Verify that the source memory is normal heap.
295 KMemoryState src_state{}; 319 KMemoryState src_state{};
296 KMemoryPermission src_perm{}; 320 KMemoryPermission src_perm{};
297 std::size_t num_src_allocator_blocks{}; 321 size_t num_src_allocator_blocks{};
298 R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks, 322 R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks,
299 src_address, size, KMemoryState::All, KMemoryState::Normal, 323 src_address, size, KMemoryState::All, KMemoryState::Normal,
300 KMemoryPermission::All, KMemoryPermission::UserReadWrite, 324 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
301 KMemoryAttribute::All, KMemoryAttribute::None)); 325 KMemoryAttribute::All, KMemoryAttribute::None));
302 326
303 // Verify that the destination memory is unmapped. 327 // Verify that the destination memory is unmapped.
304 std::size_t num_dst_allocator_blocks{}; 328 size_t num_dst_allocator_blocks{};
305 R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All, 329 R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All,
306 KMemoryState::Free, KMemoryPermission::None, 330 KMemoryState::Free, KMemoryPermission::None,
307 KMemoryPermission::None, KMemoryAttribute::None, 331 KMemoryPermission::None, KMemoryAttribute::None,
308 KMemoryAttribute::None)); 332 KMemoryAttribute::None));
309 333
334 // Create an update allocator for the source.
335 Result src_allocator_result{ResultSuccess};
336 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
337 m_memory_block_slab_manager,
338 num_src_allocator_blocks);
339 R_TRY(src_allocator_result);
340
341 // Create an update allocator for the destination.
342 Result dst_allocator_result{ResultSuccess};
343 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
344 m_memory_block_slab_manager,
345 num_dst_allocator_blocks);
346 R_TRY(dst_allocator_result);
347
310 // Map the code memory. 348 // Map the code memory.
311 { 349 {
312 // Determine the number of pages being operated on. 350 // Determine the number of pages being operated on.
313 const std::size_t num_pages = size / PageSize; 351 const size_t num_pages = size / PageSize;
314 352
315 // Create page groups for the memory being mapped. 353 // Create page groups for the memory being mapped.
316 KPageGroup pg; 354 KPageGroup pg;
@@ -335,33 +373,37 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size
335 unprot_guard.Cancel(); 373 unprot_guard.Cancel();
336 374
337 // Apply the memory block updates. 375 // Apply the memory block updates.
338 block_manager->Update(src_address, num_pages, src_state, new_perm, 376 m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
339 KMemoryAttribute::Locked); 377 src_state, new_perm, KMemoryAttribute::Locked,
340 block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm, 378 KMemoryBlockDisableMergeAttribute::Locked,
341 KMemoryAttribute::None); 379 KMemoryBlockDisableMergeAttribute::None);
380 m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
381 KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
382 KMemoryBlockDisableMergeAttribute::Normal,
383 KMemoryBlockDisableMergeAttribute::None);
342 } 384 }
343 385
344 return ResultSuccess; 386 R_SUCCEED();
345} 387}
346 388
347Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, 389Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
348 ICacheInvalidationStrategy icache_invalidation_strategy) { 390 ICacheInvalidationStrategy icache_invalidation_strategy) {
349 // Validate the mapping request. 391 // Validate the mapping request.
350 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), 392 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
351 ResultInvalidMemoryRegion); 393 ResultInvalidMemoryRegion);
352 394
353 // Lock the table. 395 // Lock the table.
354 KScopedLightLock lk(general_lock); 396 KScopedLightLock lk(m_general_lock);
355 397
356 // Verify that the source memory is locked normal heap. 398 // Verify that the source memory is locked normal heap.
357 std::size_t num_src_allocator_blocks{}; 399 size_t num_src_allocator_blocks{};
358 R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, 400 R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
359 KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, 401 KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
360 KMemoryPermission::None, KMemoryAttribute::All, 402 KMemoryPermission::None, KMemoryAttribute::All,
361 KMemoryAttribute::Locked)); 403 KMemoryAttribute::Locked));
362 404
363 // Verify that the destination memory is aliasable code. 405 // Verify that the destination memory is aliasable code.
364 std::size_t num_dst_allocator_blocks{}; 406 size_t num_dst_allocator_blocks{};
365 R_TRY(this->CheckMemoryStateContiguous( 407 R_TRY(this->CheckMemoryStateContiguous(
366 std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, 408 std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
367 KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, 409 KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
@@ -370,7 +412,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
370 // Determine whether any pages being unmapped are code. 412 // Determine whether any pages being unmapped are code.
371 bool any_code_pages = false; 413 bool any_code_pages = false;
372 { 414 {
373 KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address); 415 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
374 while (true) { 416 while (true) {
375 // Get the memory info. 417 // Get the memory info.
376 const KMemoryInfo info = it->GetMemoryInfo(); 418 const KMemoryInfo info = it->GetMemoryInfo();
@@ -396,9 +438,9 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
396 SCOPE_EXIT({ 438 SCOPE_EXIT({
397 if (reprotected_pages && any_code_pages) { 439 if (reprotected_pages && any_code_pages) {
398 if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) { 440 if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
399 system.InvalidateCpuInstructionCacheRange(dst_address, size); 441 m_system.InvalidateCpuInstructionCacheRange(dst_address, size);
400 } else { 442 } else {
401 system.InvalidateCpuInstructionCaches(); 443 m_system.InvalidateCpuInstructionCaches();
402 } 444 }
403 } 445 }
404 }); 446 });
@@ -406,7 +448,21 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
406 // Unmap. 448 // Unmap.
407 { 449 {
408 // Determine the number of pages being operated on. 450 // Determine the number of pages being operated on.
409 const std::size_t num_pages = size / PageSize; 451 const size_t num_pages = size / PageSize;
452
453 // Create an update allocator for the source.
454 Result src_allocator_result{ResultSuccess};
455 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
456 m_memory_block_slab_manager,
457 num_src_allocator_blocks);
458 R_TRY(src_allocator_result);
459
460 // Create an update allocator for the destination.
461 Result dst_allocator_result{ResultSuccess};
462 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
463 m_memory_block_slab_manager,
464 num_dst_allocator_blocks);
465 R_TRY(dst_allocator_result);
410 466
411 // Unmap the aliased copy of the pages. 467 // Unmap the aliased copy of the pages.
412 R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); 468 R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
@@ -416,73 +472,34 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
416 OperationType::ChangePermissions)); 472 OperationType::ChangePermissions));
417 473
418 // Apply the memory block updates. 474 // Apply the memory block updates.
419 block_manager->Update(dst_address, num_pages, KMemoryState::None); 475 m_memory_block_manager.Update(
420 block_manager->Update(src_address, num_pages, KMemoryState::Normal, 476 std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
421 KMemoryPermission::UserReadWrite); 477 KMemoryPermission::None, KMemoryAttribute::None,
478 KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
479 m_memory_block_manager.Update(
480 std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal,
481 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
482 KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
422 483
423 // Note that we reprotected pages. 484 // Note that we reprotected pages.
424 reprotected_pages = true; 485 reprotected_pages = true;
425 } 486 }
426 487
427 return ResultSuccess; 488 R_SUCCEED();
428} 489}
429 490
430VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, 491VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
431 std::size_t num_pages, std::size_t alignment, std::size_t offset, 492 size_t alignment, size_t offset, size_t guard_pages) {
432 std::size_t guard_pages) {
433 VAddr address = 0; 493 VAddr address = 0;
434 494
435 if (num_pages <= region_num_pages) { 495 if (num_pages <= region_num_pages) {
436 if (this->IsAslrEnabled()) { 496 if (this->IsAslrEnabled()) {
437 // Try to directly find a free area up to 8 times. 497 UNIMPLEMENTED();
438 for (std::size_t i = 0; i < 8; i++) {
439 const std::size_t random_offset =
440 KSystemControl::GenerateRandomRange(
441 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) *
442 alignment;
443 const VAddr candidate =
444 Common::AlignDown((region_start + random_offset), alignment) + offset;
445
446 KMemoryInfo info = this->QueryInfoImpl(candidate);
447
448 if (info.state != KMemoryState::Free) {
449 continue;
450 }
451 if (region_start > candidate) {
452 continue;
453 }
454 if (info.GetAddress() + guard_pages * PageSize > candidate) {
455 continue;
456 }
457
458 const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1;
459 if (candidate_end > info.GetLastAddress()) {
460 continue;
461 }
462 if (candidate_end > region_start + region_num_pages * PageSize - 1) {
463 continue;
464 }
465
466 address = candidate;
467 break;
468 }
469 // Fall back to finding the first free area with a random offset.
470 if (address == 0) {
471 // NOTE: Nintendo does not account for guard pages here.
472 // This may theoretically cause an offset to be chosen that cannot be mapped. We
473 // will account for guard pages.
474 const std::size_t offset_pages = KSystemControl::GenerateRandomRange(
475 0, region_num_pages - num_pages - guard_pages);
476 address = block_manager->FindFreeArea(region_start + offset_pages * PageSize,
477 region_num_pages - offset_pages, num_pages,
478 alignment, offset, guard_pages);
479 }
480 } 498 }
481
482 // Find the first free area. 499 // Find the first free area.
483 if (address == 0) { 500 if (address == 0) {
484 address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages, 501 address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
485 alignment, offset, guard_pages); 502 alignment, offset, guard_pages);
486 } 503 }
487 } 504 }
488 505
@@ -500,7 +517,8 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
500 // Begin traversal. 517 // Begin traversal.
501 Common::PageTable::TraversalContext context; 518 Common::PageTable::TraversalContext context;
502 Common::PageTable::TraversalEntry next_entry; 519 Common::PageTable::TraversalEntry next_entry;
503 R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory); 520 R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, addr),
521 ResultInvalidCurrentMemory);
504 522
505 // Prepare tracking variables. 523 // Prepare tracking variables.
506 PAddr cur_addr = next_entry.phys_addr; 524 PAddr cur_addr = next_entry.phys_addr;
@@ -508,9 +526,9 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
508 size_t tot_size = cur_size; 526 size_t tot_size = cur_size;
509 527
510 // Iterate, adding to group as we go. 528 // Iterate, adding to group as we go.
511 const auto& memory_layout = system.Kernel().MemoryLayout(); 529 const auto& memory_layout = m_system.Kernel().MemoryLayout();
512 while (tot_size < size) { 530 while (tot_size < size) {
513 R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context), 531 R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context),
514 ResultInvalidCurrentMemory); 532 ResultInvalidCurrentMemory);
515 533
516 if (next_entry.phys_addr != (cur_addr + cur_size)) { 534 if (next_entry.phys_addr != (cur_addr + cur_size)) {
@@ -538,7 +556,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
538 R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory); 556 R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
539 R_TRY(pg.AddBlock(cur_addr, cur_pages)); 557 R_TRY(pg.AddBlock(cur_addr, cur_pages));
540 558
541 return ResultSuccess; 559 R_SUCCEED();
542} 560}
543 561
544bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { 562bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) {
@@ -546,7 +564,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
546 564
547 const size_t size = num_pages * PageSize; 565 const size_t size = num_pages * PageSize;
548 const auto& pg = pg_ll.Nodes(); 566 const auto& pg = pg_ll.Nodes();
549 const auto& memory_layout = system.Kernel().MemoryLayout(); 567 const auto& memory_layout = m_system.Kernel().MemoryLayout();
550 568
551 // Empty groups are necessarily invalid. 569 // Empty groups are necessarily invalid.
552 if (pg.empty()) { 570 if (pg.empty()) {
@@ -573,7 +591,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
573 // Begin traversal. 591 // Begin traversal.
574 Common::PageTable::TraversalContext context; 592 Common::PageTable::TraversalContext context;
575 Common::PageTable::TraversalEntry next_entry; 593 Common::PageTable::TraversalEntry next_entry;
576 if (!page_table_impl.BeginTraversal(next_entry, context, addr)) { 594 if (!m_page_table_impl->BeginTraversal(next_entry, context, addr)) {
577 return false; 595 return false;
578 } 596 }
579 597
@@ -584,7 +602,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
584 602
585 // Iterate, comparing expected to actual. 603 // Iterate, comparing expected to actual.
586 while (tot_size < size) { 604 while (tot_size < size) {
587 if (!page_table_impl.ContinueTraversal(next_entry, context)) { 605 if (!m_page_table_impl->ContinueTraversal(next_entry, context)) {
588 return false; 606 return false;
589 } 607 }
590 608
@@ -630,11 +648,11 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
630 return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); 648 return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
631} 649}
632 650
633Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, 651Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
634 VAddr src_addr) { 652 VAddr src_addr) {
635 KScopedLightLock lk(general_lock); 653 KScopedLightLock lk(m_general_lock);
636 654
637 const std::size_t num_pages{size / PageSize}; 655 const size_t num_pages{size / PageSize};
638 656
639 // Check that the memory is mapped in the destination process. 657 // Check that the memory is mapped in the destination process.
640 size_t num_allocator_blocks; 658 size_t num_allocator_blocks;
@@ -649,43 +667,51 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTab
649 KMemoryPermission::None, KMemoryAttribute::All, 667 KMemoryPermission::None, KMemoryAttribute::All,
650 KMemoryAttribute::None)); 668 KMemoryAttribute::None));
651 669
670 // Create an update allocator.
671 Result allocator_result{ResultSuccess};
672 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
673 m_memory_block_slab_manager, num_allocator_blocks);
674 R_TRY(allocator_result);
675
652 CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); 676 CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
653 677
654 // Apply the memory block update. 678 // Apply the memory block update.
655 block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None, 679 m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages,
656 KMemoryAttribute::None); 680 KMemoryState::Free, KMemoryPermission::None,
681 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
682 KMemoryBlockDisableMergeAttribute::Normal);
657 683
658 system.InvalidateCpuInstructionCaches(); 684 m_system.InvalidateCpuInstructionCaches();
659 685
660 return ResultSuccess; 686 R_SUCCEED();
661} 687}
662 688
663Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { 689Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
664 // Lock the physical memory lock. 690 // Lock the physical memory lock.
665 KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); 691 KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
666 692
667 // Calculate the last address for convenience. 693 // Calculate the last address for convenience.
668 const VAddr last_address = address + size - 1; 694 const VAddr last_address = address + size - 1;
669 695
670 // Define iteration variables. 696 // Define iteration variables.
671 VAddr cur_address; 697 VAddr cur_address;
672 std::size_t mapped_size; 698 size_t mapped_size;
673 699
674 // The entire mapping process can be retried. 700 // The entire mapping process can be retried.
675 while (true) { 701 while (true) {
676 // Check if the memory is already mapped. 702 // Check if the memory is already mapped.
677 { 703 {
678 // Lock the table. 704 // Lock the table.
679 KScopedLightLock lk(general_lock); 705 KScopedLightLock lk(m_general_lock);
680 706
681 // Iterate over the memory. 707 // Iterate over the memory.
682 cur_address = address; 708 cur_address = address;
683 mapped_size = 0; 709 mapped_size = 0;
684 710
685 auto it = block_manager->FindIterator(cur_address); 711 auto it = m_memory_block_manager.FindIterator(cur_address);
686 while (true) { 712 while (true) {
687 // Check that the iterator is valid. 713 // Check that the iterator is valid.
688 ASSERT(it != block_manager->end()); 714 ASSERT(it != m_memory_block_manager.end());
689 715
690 // Get the memory info. 716 // Get the memory info.
691 const KMemoryInfo info = it->GetMemoryInfo(); 717 const KMemoryInfo info = it->GetMemoryInfo();
@@ -716,20 +742,20 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
716 { 742 {
717 // Reserve the memory from the process resource limit. 743 // Reserve the memory from the process resource limit.
718 KScopedResourceReservation memory_reservation( 744 KScopedResourceReservation memory_reservation(
719 system.Kernel().CurrentProcess()->GetResourceLimit(), 745 m_system.Kernel().CurrentProcess()->GetResourceLimit(),
720 LimitableResource::PhysicalMemory, size - mapped_size); 746 LimitableResource::PhysicalMemory, size - mapped_size);
721 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 747 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
722 748
723 // Allocate pages for the new memory. 749 // Allocate pages for the new memory.
724 KPageGroup pg; 750 KPageGroup pg;
725 R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( 751 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess(
726 &pg, (size - mapped_size) / PageSize, 752 &pg, (size - mapped_size) / PageSize,
727 KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); 753 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
728 754
729 // Map the memory. 755 // Map the memory.
730 { 756 {
731 // Lock the table. 757 // Lock the table.
732 KScopedLightLock lk(general_lock); 758 KScopedLightLock lk(m_general_lock);
733 759
734 size_t num_allocator_blocks = 0; 760 size_t num_allocator_blocks = 0;
735 761
@@ -739,10 +765,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
739 size_t checked_mapped_size = 0; 765 size_t checked_mapped_size = 0;
740 cur_address = address; 766 cur_address = address;
741 767
742 auto it = block_manager->FindIterator(cur_address); 768 auto it = m_memory_block_manager.FindIterator(cur_address);
743 while (true) { 769 while (true) {
744 // Check that the iterator is valid. 770 // Check that the iterator is valid.
745 ASSERT(it != block_manager->end()); 771 ASSERT(it != m_memory_block_manager.end());
746 772
747 // Get the memory info. 773 // Get the memory info.
748 const KMemoryInfo info = it->GetMemoryInfo(); 774 const KMemoryInfo info = it->GetMemoryInfo();
@@ -782,6 +808,14 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
782 } 808 }
783 } 809 }
784 810
811 // Create an update allocator.
812 ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
813 Result allocator_result{ResultSuccess};
814 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
815 m_memory_block_slab_manager,
816 num_allocator_blocks);
817 R_TRY(allocator_result);
818
785 // Reset the current tracking address, and make sure we clean up on failure. 819 // Reset the current tracking address, and make sure we clean up on failure.
786 cur_address = address; 820 cur_address = address;
787 auto unmap_guard = detail::ScopeExit([&] { 821 auto unmap_guard = detail::ScopeExit([&] {
@@ -791,10 +825,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
791 // Iterate, unmapping the pages. 825 // Iterate, unmapping the pages.
792 cur_address = address; 826 cur_address = address;
793 827
794 auto it = block_manager->FindIterator(cur_address); 828 auto it = m_memory_block_manager.FindIterator(cur_address);
795 while (true) { 829 while (true) {
796 // Check that the iterator is valid. 830 // Check that the iterator is valid.
797 ASSERT(it != block_manager->end()); 831 ASSERT(it != m_memory_block_manager.end());
798 832
799 // Get the memory info. 833 // Get the memory info.
800 const KMemoryInfo info = it->GetMemoryInfo(); 834 const KMemoryInfo info = it->GetMemoryInfo();
@@ -830,10 +864,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
830 PAddr pg_phys_addr = pg_it->GetAddress(); 864 PAddr pg_phys_addr = pg_it->GetAddress();
831 size_t pg_pages = pg_it->GetNumPages(); 865 size_t pg_pages = pg_it->GetNumPages();
832 866
833 auto it = block_manager->FindIterator(cur_address); 867 auto it = m_memory_block_manager.FindIterator(cur_address);
834 while (true) { 868 while (true) {
835 // Check that the iterator is valid. 869 // Check that the iterator is valid.
836 ASSERT(it != block_manager->end()); 870 ASSERT(it != m_memory_block_manager.end());
837 871
838 // Get the memory info. 872 // Get the memory info.
839 const KMemoryInfo info = it->GetMemoryInfo(); 873 const KMemoryInfo info = it->GetMemoryInfo();
@@ -886,37 +920,37 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
886 memory_reservation.Commit(); 920 memory_reservation.Commit();
887 921
888 // Increase our tracked mapped size. 922 // Increase our tracked mapped size.
889 mapped_physical_memory_size += (size - mapped_size); 923 m_mapped_physical_memory_size += (size - mapped_size);
890 924
891 // Update the relevant memory blocks. 925 // Update the relevant memory blocks.
892 block_manager->Update(address, size / PageSize, KMemoryState::Free, 926 m_memory_block_manager.UpdateIfMatch(
893 KMemoryPermission::None, KMemoryAttribute::None, 927 std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
894 KMemoryState::Normal, KMemoryPermission::UserReadWrite, 928 KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
895 KMemoryAttribute::None); 929 KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
896 930
897 // Cancel our guard. 931 // Cancel our guard.
898 unmap_guard.Cancel(); 932 unmap_guard.Cancel();
899 933
900 return ResultSuccess; 934 R_SUCCEED();
901 } 935 }
902 } 936 }
903 } 937 }
904} 938}
905 939
906Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { 940Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
907 // Lock the physical memory lock. 941 // Lock the physical memory lock.
908 KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); 942 KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
909 943
910 // Lock the table. 944 // Lock the table.
911 KScopedLightLock lk(general_lock); 945 KScopedLightLock lk(m_general_lock);
912 946
913 // Calculate the last address for convenience. 947 // Calculate the last address for convenience.
914 const VAddr last_address = address + size - 1; 948 const VAddr last_address = address + size - 1;
915 949
916 // Define iteration variables. 950 // Define iteration variables.
917 VAddr cur_address = 0; 951 VAddr cur_address = 0;
918 std::size_t mapped_size = 0; 952 size_t mapped_size = 0;
919 std::size_t num_allocator_blocks = 0; 953 size_t num_allocator_blocks = 0;
920 954
921 // Check if the memory is mapped. 955 // Check if the memory is mapped.
922 { 956 {
@@ -924,10 +958,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
924 cur_address = address; 958 cur_address = address;
925 mapped_size = 0; 959 mapped_size = 0;
926 960
927 auto it = block_manager->FindIterator(cur_address); 961 auto it = m_memory_block_manager.FindIterator(cur_address);
928 while (true) { 962 while (true) {
929 // Check that the iterator is valid. 963 // Check that the iterator is valid.
930 ASSERT(it != block_manager->end()); 964 ASSERT(it != m_memory_block_manager.end());
931 965
932 // Get the memory info. 966 // Get the memory info.
933 const KMemoryInfo info = it->GetMemoryInfo(); 967 const KMemoryInfo info = it->GetMemoryInfo();
@@ -1022,6 +1056,13 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
1022 } 1056 }
1023 ASSERT(pg.GetNumPages() == mapped_size / PageSize); 1057 ASSERT(pg.GetNumPages() == mapped_size / PageSize);
1024 1058
1059 // Create an update allocator.
1060 ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
1061 Result allocator_result{ResultSuccess};
1062 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1063 m_memory_block_slab_manager, num_allocator_blocks);
1064 R_TRY(allocator_result);
1065
1025 // Reset the current tracking address, and make sure we clean up on failure. 1066 // Reset the current tracking address, and make sure we clean up on failure.
1026 cur_address = address; 1067 cur_address = address;
1027 auto remap_guard = detail::ScopeExit([&] { 1068 auto remap_guard = detail::ScopeExit([&] {
@@ -1030,7 +1071,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
1030 cur_address = address; 1071 cur_address = address;
1031 1072
1032 // Iterate over the memory we unmapped. 1073 // Iterate over the memory we unmapped.
1033 auto it = block_manager->FindIterator(cur_address); 1074 auto it = m_memory_block_manager.FindIterator(cur_address);
1034 auto pg_it = pg.Nodes().begin(); 1075 auto pg_it = pg.Nodes().begin();
1035 PAddr pg_phys_addr = pg_it->GetAddress(); 1076 PAddr pg_phys_addr = pg_it->GetAddress();
1036 size_t pg_pages = pg_it->GetNumPages(); 1077 size_t pg_pages = pg_it->GetNumPages();
@@ -1085,10 +1126,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
1085 }); 1126 });
1086 1127
1087 // Iterate over the memory, unmapping as we go. 1128 // Iterate over the memory, unmapping as we go.
1088 auto it = block_manager->FindIterator(cur_address); 1129 auto it = m_memory_block_manager.FindIterator(cur_address);
1089 while (true) { 1130 while (true) {
1090 // Check that the iterator is valid. 1131 // Check that the iterator is valid.
1091 ASSERT(it != block_manager->end()); 1132 ASSERT(it != m_memory_block_manager.end());
1092 1133
1093 // Get the memory info. 1134 // Get the memory info.
1094 const KMemoryInfo info = it->GetMemoryInfo(); 1135 const KMemoryInfo info = it->GetMemoryInfo();
@@ -1115,104 +1156,159 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
1115 } 1156 }
1116 1157
1117 // Release the memory resource. 1158 // Release the memory resource.
1118 mapped_physical_memory_size -= mapped_size; 1159 m_mapped_physical_memory_size -= mapped_size;
1119 auto process{system.Kernel().CurrentProcess()}; 1160 auto process{m_system.Kernel().CurrentProcess()};
1120 process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); 1161 process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
1121 1162
1122 // Update memory blocks. 1163 // Update memory blocks.
1123 block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, 1164 m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
1124 KMemoryAttribute::None); 1165 KMemoryState::Free, KMemoryPermission::None,
1166 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
1167 KMemoryBlockDisableMergeAttribute::None);
1125 1168
1126 // TODO(bunnei): This is a workaround until the next set of changes, where we add reference 1169 // TODO(bunnei): This is a workaround until the next set of changes, where we add reference
1127 // counting for mapped pages. Until then, we must manually close the reference to the page 1170 // counting for mapped pages. Until then, we must manually close the reference to the page
1128 // group. 1171 // group.
1129 system.Kernel().MemoryManager().Close(pg); 1172 m_system.Kernel().MemoryManager().Close(pg);
1130 1173
1131 // We succeeded. 1174 // We succeeded.
1132 remap_guard.Cancel(); 1175 remap_guard.Cancel();
1133 1176
1134 return ResultSuccess; 1177 R_SUCCEED();
1135} 1178}
1136 1179
1137Result KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { 1180Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) {
1138 KScopedLightLock lk(general_lock); 1181 // Lock the table.
1139 1182 KScopedLightLock lk(m_general_lock);
1140 KMemoryState src_state{}; 1183
1141 CASCADE_CODE(CheckMemoryState( 1184 // Validate that the source address's state is valid.
1142 &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, 1185 KMemoryState src_state;
1143 KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::UserReadWrite, 1186 size_t num_src_allocator_blocks;
1144 KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); 1187 R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
1188 std::addressof(num_src_allocator_blocks), src_address, size,
1189 KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
1190 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
1191 KMemoryAttribute::All, KMemoryAttribute::None));
1145 1192
1146 if (IsRegionMapped(dst_addr, size)) { 1193 // Validate that the dst address's state is valid.
1147 return ResultInvalidCurrentMemory; 1194 size_t num_dst_allocator_blocks;
1148 } 1195 R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
1196 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
1197 KMemoryPermission::None, KMemoryAttribute::None,
1198 KMemoryAttribute::None));
1149 1199
1200 // Create an update allocator for the source.
1201 Result src_allocator_result{ResultSuccess};
1202 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
1203 m_memory_block_slab_manager,
1204 num_src_allocator_blocks);
1205 R_TRY(src_allocator_result);
1206
1207 // Create an update allocator for the destination.
1208 Result dst_allocator_result{ResultSuccess};
1209 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
1210 m_memory_block_slab_manager,
1211 num_dst_allocator_blocks);
1212 R_TRY(dst_allocator_result);
1213
1214 // Map the memory.
1150 KPageGroup page_linked_list; 1215 KPageGroup page_linked_list;
1151 const std::size_t num_pages{size / PageSize}; 1216 const size_t num_pages{size / PageSize};
1152 1217 const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
1153 AddRegionToPages(src_addr, num_pages, page_linked_list); 1218 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
1219 const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
1154 1220
1221 AddRegionToPages(src_address, num_pages, page_linked_list);
1155 { 1222 {
1223 // Reprotect the source as kernel-read/not mapped.
1156 auto block_guard = detail::ScopeExit([&] { 1224 auto block_guard = detail::ScopeExit([&] {
1157 Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite, 1225 Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
1158 OperationType::ChangePermissions); 1226 OperationType::ChangePermissions);
1159 }); 1227 });
1160 1228 R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions));
1161 CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None, 1229 R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite));
1162 OperationType::ChangePermissions));
1163 CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::UserReadWrite));
1164 1230
1165 block_guard.Cancel(); 1231 block_guard.Cancel();
1166 } 1232 }
1167 1233
1168 block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::None, 1234 // Apply the memory block updates.
1169 KMemoryAttribute::Locked); 1235 m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
1170 block_manager->Update(dst_addr, num_pages, KMemoryState::Stack, 1236 new_src_perm, new_src_attr,
1171 KMemoryPermission::UserReadWrite); 1237 KMemoryBlockDisableMergeAttribute::Locked,
1172 1238 KMemoryBlockDisableMergeAttribute::None);
1173 return ResultSuccess; 1239 m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
1240 KMemoryState::Stack, KMemoryPermission::UserReadWrite,
1241 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
1242 KMemoryBlockDisableMergeAttribute::None);
1243
1244 R_SUCCEED();
1174} 1245}
1175 1246
1176Result KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { 1247Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) {
1177 KScopedLightLock lk(general_lock); 1248 // Lock the table.
1249 KScopedLightLock lk(m_general_lock);
1250
1251 // Validate that the source address's state is valid.
1252 KMemoryState src_state;
1253 size_t num_src_allocator_blocks;
1254 R_TRY(this->CheckMemoryState(
1255 std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
1256 src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
1257 KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
1258 KMemoryAttribute::All, KMemoryAttribute::Locked));
1259
1260 // Validate that the dst address's state is valid.
1261 KMemoryPermission dst_perm;
1262 size_t num_dst_allocator_blocks;
1263 R_TRY(this->CheckMemoryState(
1264 nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
1265 dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
1266 KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
1178 1267
1179 KMemoryState src_state{}; 1268 // Create an update allocator for the source.
1180 CASCADE_CODE(CheckMemoryState( 1269 Result src_allocator_result{ResultSuccess};
1181 &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, 1270 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
1182 KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::None, 1271 m_memory_block_slab_manager,
1183 KMemoryAttribute::Mask, KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); 1272 num_src_allocator_blocks);
1273 R_TRY(src_allocator_result);
1184 1274
1185 KMemoryPermission dst_perm{}; 1275 // Create an update allocator for the destination.
1186 CASCADE_CODE(CheckMemoryState(nullptr, &dst_perm, nullptr, nullptr, dst_addr, size, 1276 Result dst_allocator_result{ResultSuccess};
1187 KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, 1277 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
1188 KMemoryPermission::None, KMemoryAttribute::Mask, 1278 m_memory_block_slab_manager,
1189 KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); 1279 num_dst_allocator_blocks);
1280 R_TRY(dst_allocator_result);
1190 1281
1191 KPageGroup src_pages; 1282 KPageGroup src_pages;
1192 KPageGroup dst_pages; 1283 KPageGroup dst_pages;
1193 const std::size_t num_pages{size / PageSize}; 1284 const size_t num_pages{size / PageSize};
1194 1285
1195 AddRegionToPages(src_addr, num_pages, src_pages); 1286 AddRegionToPages(src_address, num_pages, src_pages);
1196 AddRegionToPages(dst_addr, num_pages, dst_pages); 1287 AddRegionToPages(dst_address, num_pages, dst_pages);
1197 1288
1198 if (!dst_pages.IsEqual(src_pages)) { 1289 R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion);
1199 return ResultInvalidMemoryRegion;
1200 }
1201 1290
1202 { 1291 {
1203 auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); }); 1292 auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
1204 1293
1205 CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); 1294 R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
1206 CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite, 1295 R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
1207 OperationType::ChangePermissions)); 1296 OperationType::ChangePermissions));
1208 1297
1209 block_guard.Cancel(); 1298 block_guard.Cancel();
1210 } 1299 }
1211 1300
1212 block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::UserReadWrite); 1301 // Apply the memory block updates.
1213 block_manager->Update(dst_addr, num_pages, KMemoryState::Free); 1302 m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
1214 1303 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
1215 return ResultSuccess; 1304 KMemoryBlockDisableMergeAttribute::None,
1305 KMemoryBlockDisableMergeAttribute::Locked);
1306 m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
1307 KMemoryState::None, KMemoryPermission::None,
1308 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
1309 KMemoryBlockDisableMergeAttribute::Normal);
1310
1311 R_SUCCEED();
1216} 1312}
1217 1313
1218Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, 1314Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
@@ -1225,48 +1321,54 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
1225 if (const auto result{ 1321 if (const auto result{
1226 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; 1322 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
1227 result.IsError()) { 1323 result.IsError()) {
1228 const std::size_t num_pages{(addr - cur_addr) / PageSize}; 1324 const size_t num_pages{(addr - cur_addr) / PageSize};
1229 1325
1230 ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) 1326 ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)
1231 .IsSuccess()); 1327 .IsSuccess());
1232 1328
1233 return result; 1329 R_RETURN(result);
1234 } 1330 }
1235 1331
1236 cur_addr += node.GetNumPages() * PageSize; 1332 cur_addr += node.GetNumPages() * PageSize;
1237 } 1333 }
1238 1334
1239 return ResultSuccess; 1335 R_SUCCEED();
1240} 1336}
1241 1337
1242Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, 1338Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
1243 KMemoryPermission perm) { 1339 KMemoryPermission perm) {
1244 // Check that the map is in range. 1340 // Check that the map is in range.
1245 const std::size_t num_pages{page_linked_list.GetNumPages()}; 1341 const size_t num_pages{page_linked_list.GetNumPages()};
1246 const std::size_t size{num_pages * PageSize}; 1342 const size_t size{num_pages * PageSize};
1247 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); 1343 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
1248 1344
1249 // Lock the table. 1345 // Lock the table.
1250 KScopedLightLock lk(general_lock); 1346 KScopedLightLock lk(m_general_lock);
1251 1347
1252 // Check the memory state. 1348 // Check the memory state.
1253 R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, 1349 R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
1254 KMemoryPermission::None, KMemoryPermission::None, 1350 KMemoryPermission::None, KMemoryPermission::None,
1255 KMemoryAttribute::None, KMemoryAttribute::None)); 1351 KMemoryAttribute::None, KMemoryAttribute::None));
1256 1352
1353 // Create an update allocator.
1354 Result allocator_result{ResultSuccess};
1355 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1356 m_memory_block_slab_manager);
1357
1257 // Map the pages. 1358 // Map the pages.
1258 R_TRY(MapPages(address, page_linked_list, perm)); 1359 R_TRY(MapPages(address, page_linked_list, perm));
1259 1360
1260 // Update the blocks. 1361 // Update the blocks.
1261 block_manager->Update(address, num_pages, state, perm); 1362 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
1363 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
1364 KMemoryBlockDisableMergeAttribute::None);
1262 1365
1263 return ResultSuccess; 1366 R_SUCCEED();
1264} 1367}
1265 1368
1266Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, 1369Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
1267 PAddr phys_addr, bool is_pa_valid, VAddr region_start, 1370 bool is_pa_valid, VAddr region_start, size_t region_num_pages,
1268 std::size_t region_num_pages, KMemoryState state, 1371 KMemoryState state, KMemoryPermission perm) {
1269 KMemoryPermission perm) {
1270 ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); 1372 ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
1271 1373
1272 // Ensure this is a valid map request. 1374 // Ensure this is a valid map request.
@@ -1275,7 +1377,7 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t
1275 R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); 1377 R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
1276 1378
1277 // Lock the table. 1379 // Lock the table.
1278 KScopedLightLock lk(general_lock); 1380 KScopedLightLock lk(m_general_lock);
1279 1381
1280 // Find a random address to map at. 1382 // Find a random address to map at.
1281 VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, 1383 VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
@@ -1288,6 +1390,11 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t
1288 KMemoryAttribute::None, KMemoryAttribute::None) 1390 KMemoryAttribute::None, KMemoryAttribute::None)
1289 .IsSuccess()); 1391 .IsSuccess());
1290 1392
1393 // Create an update allocator.
1394 Result allocator_result{ResultSuccess};
1395 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1396 m_memory_block_slab_manager);
1397
1291 // Perform mapping operation. 1398 // Perform mapping operation.
1292 if (is_pa_valid) { 1399 if (is_pa_valid) {
1293 R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr)); 1400 R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr));
@@ -1296,11 +1403,13 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t
1296 } 1403 }
1297 1404
1298 // Update the blocks. 1405 // Update the blocks.
1299 block_manager->Update(addr, num_pages, state, perm); 1406 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
1407 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
1408 KMemoryBlockDisableMergeAttribute::None);
1300 1409
1301 // We successfully mapped the pages. 1410 // We successfully mapped the pages.
1302 *out_addr = addr; 1411 *out_addr = addr;
1303 return ResultSuccess; 1412 R_SUCCEED();
1304} 1413}
1305 1414
1306Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { 1415Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
@@ -1312,60 +1421,80 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
1312 if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, 1421 if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
1313 OperationType::Unmap)}; 1422 OperationType::Unmap)};
1314 result.IsError()) { 1423 result.IsError()) {
1315 return result; 1424 R_RETURN(result);
1316 } 1425 }
1317 1426
1318 cur_addr += node.GetNumPages() * PageSize; 1427 cur_addr += node.GetNumPages() * PageSize;
1319 } 1428 }
1320 1429
1321 return ResultSuccess; 1430 R_SUCCEED();
1322} 1431}
1323 1432
1324Result KPageTable::UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state) { 1433Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) {
1325 // Check that the unmap is in range. 1434 // Check that the unmap is in range.
1326 const std::size_t num_pages{page_linked_list.GetNumPages()}; 1435 const size_t num_pages{page_linked_list.GetNumPages()};
1327 const std::size_t size{num_pages * PageSize}; 1436 const size_t size{num_pages * PageSize};
1328 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); 1437 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1329 1438
1330 // Lock the table. 1439 // Lock the table.
1331 KScopedLightLock lk(general_lock); 1440 KScopedLightLock lk(m_general_lock);
1332 1441
1333 // Check the memory state. 1442 // Check the memory state.
1334 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None, 1443 size_t num_allocator_blocks;
1444 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
1445 KMemoryState::All, state, KMemoryPermission::None,
1335 KMemoryPermission::None, KMemoryAttribute::All, 1446 KMemoryPermission::None, KMemoryAttribute::All,
1336 KMemoryAttribute::None)); 1447 KMemoryAttribute::None));
1337 1448
1449 // Create an update allocator.
1450 Result allocator_result{ResultSuccess};
1451 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1452 m_memory_block_slab_manager, num_allocator_blocks);
1453 R_TRY(allocator_result);
1454
1338 // Perform the unmap. 1455 // Perform the unmap.
1339 R_TRY(UnmapPages(addr, page_linked_list)); 1456 R_TRY(UnmapPages(address, page_linked_list));
1340 1457
1341 // Update the blocks. 1458 // Update the blocks.
1342 block_manager->Update(addr, num_pages, state, KMemoryPermission::None); 1459 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
1460 KMemoryPermission::None, KMemoryAttribute::None,
1461 KMemoryBlockDisableMergeAttribute::None,
1462 KMemoryBlockDisableMergeAttribute::Normal);
1343 1463
1344 return ResultSuccess; 1464 R_SUCCEED();
1345} 1465}
1346 1466
1347Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) { 1467Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) {
1348 // Check that the unmap is in range. 1468 // Check that the unmap is in range.
1349 const std::size_t size = num_pages * PageSize; 1469 const size_t size = num_pages * PageSize;
1350 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); 1470 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1351 1471
1352 // Lock the table. 1472 // Lock the table.
1353 KScopedLightLock lk(general_lock); 1473 KScopedLightLock lk(m_general_lock);
1354 1474
1355 // Check the memory state. 1475 // Check the memory state.
1356 std::size_t num_allocator_blocks{}; 1476 size_t num_allocator_blocks{};
1357 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, 1477 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
1358 KMemoryState::All, state, KMemoryPermission::None, 1478 KMemoryState::All, state, KMemoryPermission::None,
1359 KMemoryPermission::None, KMemoryAttribute::All, 1479 KMemoryPermission::None, KMemoryAttribute::All,
1360 KMemoryAttribute::None)); 1480 KMemoryAttribute::None));
1361 1481
1482 // Create an update allocator.
1483 Result allocator_result{ResultSuccess};
1484 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1485 m_memory_block_slab_manager, num_allocator_blocks);
1486 R_TRY(allocator_result);
1487
1362 // Perform the unmap. 1488 // Perform the unmap.
1363 R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); 1489 R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap));
1364 1490
1365 // Update the blocks. 1491 // Update the blocks.
1366 block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None); 1492 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
1493 KMemoryPermission::None, KMemoryAttribute::None,
1494 KMemoryBlockDisableMergeAttribute::None,
1495 KMemoryBlockDisableMergeAttribute::Normal);
1367 1496
1368 return ResultSuccess; 1497 R_SUCCEED();
1369} 1498}
1370 1499
1371Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, 1500Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
@@ -1380,7 +1509,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n
1380 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); 1509 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1381 1510
1382 // Lock the table. 1511 // Lock the table.
1383 KScopedLightLock lk(general_lock); 1512 KScopedLightLock lk(m_general_lock);
1384 1513
1385 // Check if state allows us to create the group. 1514 // Check if state allows us to create the group.
1386 R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, 1515 R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
@@ -1390,15 +1519,15 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n
1390 // Create a new page group for the region. 1519 // Create a new page group for the region.
1391 R_TRY(this->MakePageGroup(*out, address, num_pages)); 1520 R_TRY(this->MakePageGroup(*out, address, num_pages));
1392 1521
1393 return ResultSuccess; 1522 R_SUCCEED();
1394} 1523}
1395 1524
1396Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, 1525Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size,
1397 Svc::MemoryPermission svc_perm) { 1526 Svc::MemoryPermission svc_perm) {
1398 const size_t num_pages = size / PageSize; 1527 const size_t num_pages = size / PageSize;
1399 1528
1400 // Lock the table. 1529 // Lock the table.
1401 KScopedLightLock lk(general_lock); 1530 KScopedLightLock lk(m_general_lock);
1402 1531
1403 // Verify we can change the memory permission. 1532 // Verify we can change the memory permission.
1404 KMemoryState old_state; 1533 KMemoryState old_state;
@@ -1435,105 +1564,101 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
1435 // Succeed if there's nothing to do. 1564 // Succeed if there's nothing to do.
1436 R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); 1565 R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
1437 1566
1567 // Create an update allocator.
1568 Result allocator_result{ResultSuccess};
1569 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1570 m_memory_block_slab_manager, num_allocator_blocks);
1571 R_TRY(allocator_result);
1572
1438 // Perform mapping operation. 1573 // Perform mapping operation.
1439 const auto operation = 1574 const auto operation =
1440 was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions; 1575 was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions;
1441 R_TRY(Operate(addr, num_pages, new_perm, operation)); 1576 R_TRY(Operate(addr, num_pages, new_perm, operation));
1442 1577
1443 // Update the blocks. 1578 // Update the blocks.
1444 block_manager->Update(addr, num_pages, new_state, new_perm, KMemoryAttribute::None); 1579 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
1580 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
1581 KMemoryBlockDisableMergeAttribute::None);
1445 1582
1446 // Ensure cache coherency, if we're setting pages as executable. 1583 // Ensure cache coherency, if we're setting pages as executable.
1447 if (is_x) { 1584 if (is_x) {
1448 system.InvalidateCpuInstructionCacheRange(addr, size); 1585 m_system.InvalidateCpuInstructionCacheRange(addr, size);
1449 } 1586 }
1450 1587
1451 return ResultSuccess; 1588 R_SUCCEED();
1452} 1589}
1453 1590
1454KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { 1591KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) {
1455 KScopedLightLock lk(general_lock); 1592 KScopedLightLock lk(m_general_lock);
1456 1593
1457 return block_manager->FindBlock(addr).GetMemoryInfo(); 1594 return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo();
1458} 1595}
1459 1596
1460KMemoryInfo KPageTable::QueryInfo(VAddr addr) { 1597KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
1461 if (!Contains(addr, 1)) { 1598 if (!Contains(addr, 1)) {
1462 return {address_space_end, 0 - address_space_end, KMemoryState::Inaccessible, 1599 return {
1463 KMemoryPermission::None, KMemoryAttribute::None, KMemoryPermission::None}; 1600 .m_address = m_address_space_end,
1601 .m_size = 0 - m_address_space_end,
1602 .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
1603 .m_device_disable_merge_left_count = 0,
1604 .m_device_disable_merge_right_count = 0,
1605 .m_ipc_lock_count = 0,
1606 .m_device_use_count = 0,
1607 .m_ipc_disable_merge_count = 0,
1608 .m_permission = KMemoryPermission::None,
1609 .m_attribute = KMemoryAttribute::None,
1610 .m_original_permission = KMemoryPermission::None,
1611 .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
1612 };
1464 } 1613 }
1465 1614
1466 return QueryInfoImpl(addr); 1615 return QueryInfoImpl(addr);
1467} 1616}
1468 1617
1469Result KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) { 1618Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) {
1470 KScopedLightLock lk(general_lock);
1471
1472 KMemoryState state{};
1473 KMemoryAttribute attribute{};
1474
1475 R_TRY(CheckMemoryState(&state, nullptr, &attribute, nullptr, addr, size,
1476 KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
1477 KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
1478 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
1479 KMemoryAttribute::Mask, KMemoryAttribute::None,
1480 KMemoryAttribute::IpcAndDeviceMapped));
1481
1482 block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked);
1483
1484 return ResultSuccess;
1485}
1486
1487Result KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
1488 KScopedLightLock lk(general_lock);
1489
1490 KMemoryState state{};
1491
1492 R_TRY(CheckMemoryState(&state, nullptr, nullptr, nullptr, addr, size,
1493 KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
1494 KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
1495 KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask,
1496 KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
1497
1498 block_manager->Update(addr, size / PageSize, state, KMemoryPermission::UserReadWrite);
1499 return ResultSuccess;
1500}
1501
1502Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
1503 Svc::MemoryPermission svc_perm) {
1504 const size_t num_pages = size / PageSize; 1619 const size_t num_pages = size / PageSize;
1505 1620
1506 // Lock the table. 1621 // Lock the table.
1507 KScopedLightLock lk(general_lock); 1622 KScopedLightLock lk(m_general_lock);
1508 1623
1509 // Verify we can change the memory permission. 1624 // Verify we can change the memory permission.
1510 KMemoryState old_state; 1625 KMemoryState old_state;
1511 KMemoryPermission old_perm; 1626 KMemoryPermission old_perm;
1512 R_TRY(this->CheckMemoryState( 1627 size_t num_allocator_blocks;
1513 std::addressof(old_state), std::addressof(old_perm), nullptr, nullptr, addr, size, 1628 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
1514 KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, KMemoryPermission::None, 1629 std::addressof(num_allocator_blocks), addr, size,
1515 KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); 1630 KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
1631 KMemoryPermission::None, KMemoryPermission::None,
1632 KMemoryAttribute::All, KMemoryAttribute::None));
1516 1633
1517 // Determine new perm. 1634 // Determine new perm.
1518 const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); 1635 const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
1519 R_SUCCEED_IF(old_perm == new_perm); 1636 R_SUCCEED_IF(old_perm == new_perm);
1520 1637
1638 // Create an update allocator.
1639 Result allocator_result{ResultSuccess};
1640 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1641 m_memory_block_slab_manager, num_allocator_blocks);
1642 R_TRY(allocator_result);
1643
1521 // Perform mapping operation. 1644 // Perform mapping operation.
1522 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); 1645 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
1523 1646
1524 // Update the blocks. 1647 // Update the blocks.
1525 block_manager->Update(addr, num_pages, old_state, new_perm, KMemoryAttribute::None); 1648 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
1649 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
1650 KMemoryBlockDisableMergeAttribute::None);
1526 1651
1527 return ResultSuccess; 1652 R_SUCCEED();
1528} 1653}
1529 1654
1530Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) { 1655Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) {
1531 const size_t num_pages = size / PageSize; 1656 const size_t num_pages = size / PageSize;
1532 ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) == 1657 ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
1533 KMemoryAttribute::SetMask); 1658 KMemoryAttribute::SetMask);
1534 1659
1535 // Lock the table. 1660 // Lock the table.
1536 KScopedLightLock lk(general_lock); 1661 KScopedLightLock lk(m_general_lock);
1537 1662
1538 // Verify we can change the memory attribute. 1663 // Verify we can change the memory attribute.
1539 KMemoryState old_state; 1664 KMemoryState old_state;
@@ -1548,6 +1673,12 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3
1548 KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, 1673 KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
1549 AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); 1674 AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
1550 1675
1676 // Create an update allocator.
1677 Result allocator_result{ResultSuccess};
1678 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1679 m_memory_block_slab_manager, num_allocator_blocks);
1680 R_TRY(allocator_result);
1681
1551 // Determine the new attribute. 1682 // Determine the new attribute.
1552 const KMemoryAttribute new_attr = 1683 const KMemoryAttribute new_attr =
1553 static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) | 1684 static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) |
@@ -1557,123 +1688,142 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3
1557 this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); 1688 this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh);
1558 1689
1559 // Update the blocks. 1690 // Update the blocks.
1560 block_manager->Update(addr, num_pages, old_state, old_perm, new_attr); 1691 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm,
1692 new_attr, KMemoryBlockDisableMergeAttribute::None,
1693 KMemoryBlockDisableMergeAttribute::None);
1561 1694
1562 return ResultSuccess; 1695 R_SUCCEED();
1563} 1696}
1564 1697
1565Result KPageTable::SetMaxHeapSize(std::size_t size) { 1698Result KPageTable::SetMaxHeapSize(size_t size) {
1566 // Lock the table. 1699 // Lock the table.
1567 KScopedLightLock lk(general_lock); 1700 KScopedLightLock lk(m_general_lock);
1568 1701
1569 // Only process page tables are allowed to set heap size. 1702 // Only process page tables are allowed to set heap size.
1570 ASSERT(!this->IsKernel()); 1703 ASSERT(!this->IsKernel());
1571 1704
1572 max_heap_size = size; 1705 m_max_heap_size = size;
1573 1706
1574 return ResultSuccess; 1707 R_SUCCEED();
1575} 1708}
1576 1709
1577Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { 1710Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
1578 // Lock the physical memory mutex. 1711 // Lock the physical memory mutex.
1579 KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); 1712 KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
1580 1713
1581 // Try to perform a reduction in heap, instead of an extension. 1714 // Try to perform a reduction in heap, instead of an extension.
1582 VAddr cur_address{}; 1715 VAddr cur_address{};
1583 std::size_t allocation_size{}; 1716 size_t allocation_size{};
1584 { 1717 {
1585 // Lock the table. 1718 // Lock the table.
1586 KScopedLightLock lk(general_lock); 1719 KScopedLightLock lk(m_general_lock);
1587 1720
1588 // Validate that setting heap size is possible at all. 1721 // Validate that setting heap size is possible at all.
1589 R_UNLESS(!is_kernel, ResultOutOfMemory); 1722 R_UNLESS(!m_is_kernel, ResultOutOfMemory);
1590 R_UNLESS(size <= static_cast<std::size_t>(heap_region_end - heap_region_start), 1723 R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start),
1591 ResultOutOfMemory); 1724 ResultOutOfMemory);
1592 R_UNLESS(size <= max_heap_size, ResultOutOfMemory); 1725 R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory);
1593 1726
1594 if (size < GetHeapSize()) { 1727 if (size < GetHeapSize()) {
1595 // The size being requested is less than the current size, so we need to free the end of 1728 // The size being requested is less than the current size, so we need to free the end of
1596 // the heap. 1729 // the heap.
1597 1730
1598 // Validate memory state. 1731 // Validate memory state.
1599 std::size_t num_allocator_blocks; 1732 size_t num_allocator_blocks;
1600 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), 1733 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
1601 heap_region_start + size, GetHeapSize() - size, 1734 m_heap_region_start + size, GetHeapSize() - size,
1602 KMemoryState::All, KMemoryState::Normal, 1735 KMemoryState::All, KMemoryState::Normal,
1603 KMemoryPermission::All, KMemoryPermission::UserReadWrite, 1736 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
1604 KMemoryAttribute::All, KMemoryAttribute::None)); 1737 KMemoryAttribute::All, KMemoryAttribute::None));
1605 1738
1739 // Create an update allocator.
1740 Result allocator_result{ResultSuccess};
1741 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1742 m_memory_block_slab_manager,
1743 num_allocator_blocks);
1744 R_TRY(allocator_result);
1745
1606 // Unmap the end of the heap. 1746 // Unmap the end of the heap.
1607 const auto num_pages = (GetHeapSize() - size) / PageSize; 1747 const auto num_pages = (GetHeapSize() - size) / PageSize;
1608 R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None, 1748 R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None,
1609 OperationType::Unmap)); 1749 OperationType::Unmap));
1610 1750
1611 // Release the memory from the resource limit. 1751 // Release the memory from the resource limit.
1612 system.Kernel().CurrentProcess()->GetResourceLimit()->Release( 1752 m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release(
1613 LimitableResource::PhysicalMemory, num_pages * PageSize); 1753 LimitableResource::PhysicalMemory, num_pages * PageSize);
1614 1754
1615 // Apply the memory block update. 1755 // Apply the memory block update.
1616 block_manager->Update(heap_region_start + size, num_pages, KMemoryState::Free, 1756 m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
1617 KMemoryPermission::None, KMemoryAttribute::None); 1757 num_pages, KMemoryState::Free, KMemoryPermission::None,
1758 KMemoryAttribute::None,
1759 KMemoryBlockDisableMergeAttribute::None,
1760 size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
1761 : KMemoryBlockDisableMergeAttribute::None);
1618 1762
1619 // Update the current heap end. 1763 // Update the current heap end.
1620 current_heap_end = heap_region_start + size; 1764 m_current_heap_end = m_heap_region_start + size;
1621 1765
1622 // Set the output. 1766 // Set the output.
1623 *out = heap_region_start; 1767 *out = m_heap_region_start;
1624 return ResultSuccess; 1768 R_SUCCEED();
1625 } else if (size == GetHeapSize()) { 1769 } else if (size == GetHeapSize()) {
1626 // The size requested is exactly the current size. 1770 // The size requested is exactly the current size.
1627 *out = heap_region_start; 1771 *out = m_heap_region_start;
1628 return ResultSuccess; 1772 R_SUCCEED();
1629 } else { 1773 } else {
1630 // We have to allocate memory. Determine how much to allocate and where while the table 1774 // We have to allocate memory. Determine how much to allocate and where while the table
1631 // is locked. 1775 // is locked.
1632 cur_address = current_heap_end; 1776 cur_address = m_current_heap_end;
1633 allocation_size = size - GetHeapSize(); 1777 allocation_size = size - GetHeapSize();
1634 } 1778 }
1635 } 1779 }
1636 1780
1637 // Reserve memory for the heap extension. 1781 // Reserve memory for the heap extension.
1638 KScopedResourceReservation memory_reservation( 1782 KScopedResourceReservation memory_reservation(
1639 system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, 1783 m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
1640 allocation_size); 1784 allocation_size);
1641 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 1785 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
1642 1786
1643 // Allocate pages for the heap extension. 1787 // Allocate pages for the heap extension.
1644 KPageGroup pg; 1788 KPageGroup pg;
1645 R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( 1789 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
1646 &pg, allocation_size / PageSize, 1790 &pg, allocation_size / PageSize,
1647 KMemoryManager::EncodeOption(memory_pool, allocation_option))); 1791 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
1648 1792
1649 // Clear all the newly allocated pages. 1793 // Clear all the newly allocated pages.
1650 for (const auto& it : pg.Nodes()) { 1794 for (const auto& it : pg.Nodes()) {
1651 std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, 1795 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
1652 it.GetSize()); 1796 it.GetSize());
1653 } 1797 }
1654 1798
1655 // Map the pages. 1799 // Map the pages.
1656 { 1800 {
1657 // Lock the table. 1801 // Lock the table.
1658 KScopedLightLock lk(general_lock); 1802 KScopedLightLock lk(m_general_lock);
1659 1803
1660 // Ensure that the heap hasn't changed since we began executing. 1804 // Ensure that the heap hasn't changed since we began executing.
1661 ASSERT(cur_address == current_heap_end); 1805 ASSERT(cur_address == m_current_heap_end);
1662 1806
1663 // Check the memory state. 1807 // Check the memory state.
1664 std::size_t num_allocator_blocks{}; 1808 size_t num_allocator_blocks{};
1665 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end, 1809 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end,
1666 allocation_size, KMemoryState::All, KMemoryState::Free, 1810 allocation_size, KMemoryState::All, KMemoryState::Free,
1667 KMemoryPermission::None, KMemoryPermission::None, 1811 KMemoryPermission::None, KMemoryPermission::None,
1668 KMemoryAttribute::None, KMemoryAttribute::None)); 1812 KMemoryAttribute::None, KMemoryAttribute::None));
1669 1813
1814 // Create an update allocator.
1815 Result allocator_result{ResultSuccess};
1816 KMemoryBlockManagerUpdateAllocator allocator(
1817 std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
1818 R_TRY(allocator_result);
1819
1670 // Map the pages. 1820 // Map the pages.
1671 const auto num_pages = allocation_size / PageSize; 1821 const auto num_pages = allocation_size / PageSize;
1672 R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup)); 1822 R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup));
1673 1823
1674 // Clear all the newly allocated pages. 1824 // Clear all the newly allocated pages.
1675 for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { 1825 for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
1676 std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0, 1826 std::memset(m_system.Memory().GetPointer(m_current_heap_end + (cur_page * PageSize)), 0,
1677 PageSize); 1827 PageSize);
1678 } 1828 }
1679 1829
@@ -1681,133 +1831,172 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
1681 memory_reservation.Commit(); 1831 memory_reservation.Commit();
1682 1832
1683 // Apply the memory block update. 1833 // Apply the memory block update.
1684 block_manager->Update(current_heap_end, num_pages, KMemoryState::Normal, 1834 m_memory_block_manager.Update(
1685 KMemoryPermission::UserReadWrite, KMemoryAttribute::None); 1835 std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal,
1836 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
1837 m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
1838 : KMemoryBlockDisableMergeAttribute::None,
1839 KMemoryBlockDisableMergeAttribute::None);
1686 1840
1687 // Update the current heap end. 1841 // Update the current heap end.
1688 current_heap_end = heap_region_start + size; 1842 m_current_heap_end = m_heap_region_start + size;
1689 1843
1690 // Set the output. 1844 // Set the output.
1691 *out = heap_region_start; 1845 *out = m_heap_region_start;
1692 return ResultSuccess; 1846 R_SUCCEED();
1693 } 1847 }
1694} 1848}
1695 1849
1696ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, 1850ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align,
1697 bool is_map_only, VAddr region_start, 1851 bool is_map_only, VAddr region_start,
1698 std::size_t region_num_pages, KMemoryState state, 1852 size_t region_num_pages, KMemoryState state,
1699 KMemoryPermission perm, PAddr map_addr) { 1853 KMemoryPermission perm, PAddr map_addr) {
1700 KScopedLightLock lk(general_lock); 1854 KScopedLightLock lk(m_general_lock);
1701
1702 if (!CanContain(region_start, region_num_pages * PageSize, state)) {
1703 return ResultInvalidCurrentMemory;
1704 }
1705
1706 if (region_num_pages <= needed_num_pages) {
1707 return ResultOutOfMemory;
1708 }
1709 1855
1856 R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state),
1857 ResultInvalidCurrentMemory);
1858 R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory);
1710 const VAddr addr{ 1859 const VAddr addr{
1711 AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)}; 1860 AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
1712 if (!addr) { 1861 R_UNLESS(addr, ResultOutOfMemory);
1713 return ResultOutOfMemory; 1862
1714 } 1863 // Create an update allocator.
1864 Result allocator_result{ResultSuccess};
1865 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1866 m_memory_block_slab_manager);
1715 1867
1716 if (is_map_only) { 1868 if (is_map_only) {
1717 R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); 1869 R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
1718 } else { 1870 } else {
1719 KPageGroup page_group; 1871 KPageGroup page_group;
1720 R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( 1872 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess(
1721 &page_group, needed_num_pages, 1873 &page_group, needed_num_pages,
1722 KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); 1874 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
1723 R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); 1875 R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
1724 } 1876 }
1725 1877
1726 block_manager->Update(addr, needed_num_pages, state, perm); 1878 // Update the blocks.
1879 m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm,
1880 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
1881 KMemoryBlockDisableMergeAttribute::None);
1727 1882
1728 return addr; 1883 return addr;
1729} 1884}
1730 1885
1731Result KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { 1886Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
1732 KScopedLightLock lk(general_lock); 1887 bool is_aligned) {
1733 1888 // Lightly validate the range before doing anything else.
1734 KMemoryPermission perm{}; 1889 const size_t num_pages = size / PageSize;
1735 if (const Result result{CheckMemoryState( 1890 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1736 nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
1737 KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
1738 KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
1739 KMemoryAttribute::DeviceSharedAndUncached)};
1740 result.IsError()) {
1741 return result;
1742 }
1743 1891
1744 block_manager->UpdateLock( 1892 // Lock the table.
1745 addr, size / PageSize, 1893 KScopedLightLock lk(m_general_lock);
1746 [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
1747 block->ShareToDevice(permission);
1748 },
1749 perm);
1750 1894
1751 return ResultSuccess; 1895 // Check the memory state.
1896 const auto test_state =
1897 (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap);
1898 size_t num_allocator_blocks;
1899 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state,
1900 test_state, perm, perm,
1901 KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
1902 KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
1903
1904 // Create an update allocator.
1905 Result allocator_result{ResultSuccess};
1906 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1907 m_memory_block_slab_manager, num_allocator_blocks);
1908 R_TRY(allocator_result);
1909
1910 // Update the memory blocks.
1911 m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
1912 &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
1913
1914 R_SUCCEED();
1752} 1915}
1753 1916
1754Result KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) { 1917Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) {
1755 KScopedLightLock lk(general_lock); 1918 // Lightly validate the range before doing anything else.
1756 1919 const size_t num_pages = size / PageSize;
1757 KMemoryPermission perm{}; 1920 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1758 if (const Result result{CheckMemoryState(
1759 nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
1760 KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
1761 KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
1762 KMemoryAttribute::DeviceSharedAndUncached)};
1763 result.IsError()) {
1764 return result;
1765 }
1766 1921
1767 block_manager->UpdateLock( 1922 // Lock the table.
1768 addr, size / PageSize, 1923 KScopedLightLock lk(m_general_lock);
1769 [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
1770 block->UnshareToDevice(permission);
1771 },
1772 perm);
1773 1924
1774 return ResultSuccess; 1925 // Check the memory state.
1926 size_t num_allocator_blocks;
1927 R_TRY(this->CheckMemoryStateContiguous(
1928 std::addressof(num_allocator_blocks), address, size,
1929 KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap,
1930 KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap,
1931 KMemoryPermission::None, KMemoryPermission::None,
1932 KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
1933
1934 // Create an update allocator.
1935 Result allocator_result{ResultSuccess};
1936 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1937 m_memory_block_slab_manager, num_allocator_blocks);
1938 R_TRY(allocator_result);
1939
1940 // Update the memory blocks.
1941 const KMemoryBlockManager::MemoryBlockLockFunction lock_func =
1942 m_enable_device_address_space_merge
1943 ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare
1944 : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
1945 m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
1946 KMemoryPermission::None);
1947
1948 R_SUCCEED();
1775} 1949}
1776 1950
1777Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) { 1951Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {
1778 return this->LockMemoryAndOpen( 1952 // Lightly validate the range before doing anything else.
1953 const size_t num_pages = size / PageSize;
1954 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1955
1956 // Lock the table.
1957 KScopedLightLock lk(m_general_lock);
1958
1959 // Check the memory state.
1960 size_t num_allocator_blocks;
1961 R_TRY(this->CheckMemoryStateContiguous(
1962 std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
1963 KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
1964 KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
1965
1966 // Create an update allocator.
1967 Result allocator_result{ResultSuccess};
1968 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1969 m_memory_block_slab_manager, num_allocator_blocks);
1970 R_TRY(allocator_result);
1971
1972 // Update the memory blocks.
1973 m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
1974 &KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
1975
1976 R_SUCCEED();
1977}
1978
1979Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) {
1980 R_RETURN(this->LockMemoryAndOpen(
1779 out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, 1981 out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
1780 KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, 1982 KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
1781 KMemoryAttribute::None, 1983 KMemoryAttribute::None,
1782 static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | 1984 static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
1783 KMemoryPermission::KernelReadWrite), 1985 KMemoryPermission::KernelReadWrite),
1784 KMemoryAttribute::Locked); 1986 KMemoryAttribute::Locked));
1785} 1987}
1786 1988
1787Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) { 1989Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) {
1788 return this->UnlockMemory( 1990 R_RETURN(this->UnlockMemory(
1789 addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, 1991 addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
1790 KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, 1992 KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
1791 KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg); 1993 KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg));
1792}
1793
1794Result KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
1795 block_manager = std::make_unique<KMemoryBlockManager>(start, end);
1796
1797 return ResultSuccess;
1798}
1799
1800bool KPageTable::IsRegionMapped(VAddr address, u64 size) {
1801 return CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
1802 KMemoryPermission::All, KMemoryPermission::None, KMemoryAttribute::Mask,
1803 KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)
1804 .IsError();
1805} 1994}
1806 1995
1807bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { 1996bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
1808 auto start_ptr = system.Memory().GetPointer(addr); 1997 auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(addr);
1809 for (u64 offset{}; offset < size; offset += PageSize) { 1998 for (u64 offset{}; offset < size; offset += PageSize) {
1810 if (start_ptr != system.Memory().GetPointer(addr + offset)) { 1999 if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(addr + offset)) {
1811 return false; 2000 return false;
1812 } 2001 }
1813 start_ptr += PageSize; 2002 start_ptr += PageSize;
@@ -1815,8 +2004,7 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
1815 return true; 2004 return true;
1816} 2005}
1817 2006
1818void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, 2007void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list) {
1819 KPageGroup& page_linked_list) {
1820 VAddr addr{start}; 2008 VAddr addr{start};
1821 while (addr < start + (num_pages * PageSize)) { 2009 while (addr < start + (num_pages * PageSize)) {
1822 const PAddr paddr{GetPhysicalAddr(addr)}; 2010 const PAddr paddr{GetPhysicalAddr(addr)};
@@ -1826,16 +2014,16 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
1826 } 2014 }
1827} 2015}
1828 2016
1829VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, 2017VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
1830 u64 needed_num_pages, std::size_t align) { 2018 size_t align) {
1831 if (is_aslr_enabled) { 2019 if (m_enable_aslr) {
1832 UNIMPLEMENTED(); 2020 UNIMPLEMENTED();
1833 } 2021 }
1834 return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, 2022 return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0,
1835 IsKernel() ? 1 : 4); 2023 IsKernel() ? 1 : 4);
1836} 2024}
1837 2025
1838Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, 2026Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
1839 OperationType operation) { 2027 OperationType operation) {
1840 ASSERT(this->IsLockedByCurrentThread()); 2028 ASSERT(this->IsLockedByCurrentThread());
1841 2029
@@ -1844,11 +2032,11 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup&
1844 ASSERT(num_pages == page_group.GetNumPages()); 2032 ASSERT(num_pages == page_group.GetNumPages());
1845 2033
1846 for (const auto& node : page_group.Nodes()) { 2034 for (const auto& node : page_group.Nodes()) {
1847 const std::size_t size{node.GetNumPages() * PageSize}; 2035 const size_t size{node.GetNumPages() * PageSize};
1848 2036
1849 switch (operation) { 2037 switch (operation) {
1850 case OperationType::MapGroup: 2038 case OperationType::MapGroup:
1851 system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress()); 2039 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
1852 break; 2040 break;
1853 default: 2041 default:
1854 ASSERT(false); 2042 ASSERT(false);
@@ -1857,10 +2045,10 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup&
1857 addr += size; 2045 addr += size;
1858 } 2046 }
1859 2047
1860 return ResultSuccess; 2048 R_SUCCEED();
1861} 2049}
1862 2050
1863Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, 2051Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
1864 OperationType operation, PAddr map_addr) { 2052 OperationType operation, PAddr map_addr) {
1865 ASSERT(this->IsLockedByCurrentThread()); 2053 ASSERT(this->IsLockedByCurrentThread());
1866 2054
@@ -1870,12 +2058,12 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission
1870 2058
1871 switch (operation) { 2059 switch (operation) {
1872 case OperationType::Unmap: 2060 case OperationType::Unmap:
1873 system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize); 2061 m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
1874 break; 2062 break;
1875 case OperationType::Map: { 2063 case OperationType::Map: {
1876 ASSERT(map_addr); 2064 ASSERT(map_addr);
1877 ASSERT(Common::IsAligned(map_addr, PageSize)); 2065 ASSERT(Common::IsAligned(map_addr, PageSize));
1878 system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr); 2066 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
1879 break; 2067 break;
1880 } 2068 }
1881 case OperationType::ChangePermissions: 2069 case OperationType::ChangePermissions:
@@ -1884,25 +2072,25 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission
1884 default: 2072 default:
1885 ASSERT(false); 2073 ASSERT(false);
1886 } 2074 }
1887 return ResultSuccess; 2075 R_SUCCEED();
1888} 2076}
1889 2077
1890VAddr KPageTable::GetRegionAddress(KMemoryState state) const { 2078VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
1891 switch (state) { 2079 switch (state) {
1892 case KMemoryState::Free: 2080 case KMemoryState::Free:
1893 case KMemoryState::Kernel: 2081 case KMemoryState::Kernel:
1894 return address_space_start; 2082 return m_address_space_start;
1895 case KMemoryState::Normal: 2083 case KMemoryState::Normal:
1896 return heap_region_start; 2084 return m_heap_region_start;
1897 case KMemoryState::Ipc: 2085 case KMemoryState::Ipc:
1898 case KMemoryState::NonSecureIpc: 2086 case KMemoryState::NonSecureIpc:
1899 case KMemoryState::NonDeviceIpc: 2087 case KMemoryState::NonDeviceIpc:
1900 return alias_region_start; 2088 return m_alias_region_start;
1901 case KMemoryState::Stack: 2089 case KMemoryState::Stack:
1902 return stack_region_start; 2090 return m_stack_region_start;
1903 case KMemoryState::Static: 2091 case KMemoryState::Static:
1904 case KMemoryState::ThreadLocal: 2092 case KMemoryState::ThreadLocal:
1905 return kernel_map_region_start; 2093 return m_kernel_map_region_start;
1906 case KMemoryState::Io: 2094 case KMemoryState::Io:
1907 case KMemoryState::Shared: 2095 case KMemoryState::Shared:
1908 case KMemoryState::AliasCode: 2096 case KMemoryState::AliasCode:
@@ -1913,31 +2101,31 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
1913 case KMemoryState::GeneratedCode: 2101 case KMemoryState::GeneratedCode:
1914 case KMemoryState::CodeOut: 2102 case KMemoryState::CodeOut:
1915 case KMemoryState::Coverage: 2103 case KMemoryState::Coverage:
1916 return alias_code_region_start; 2104 return m_alias_code_region_start;
1917 case KMemoryState::Code: 2105 case KMemoryState::Code:
1918 case KMemoryState::CodeData: 2106 case KMemoryState::CodeData:
1919 return code_region_start; 2107 return m_code_region_start;
1920 default: 2108 default:
1921 UNREACHABLE(); 2109 UNREACHABLE();
1922 } 2110 }
1923} 2111}
1924 2112
1925std::size_t KPageTable::GetRegionSize(KMemoryState state) const { 2113size_t KPageTable::GetRegionSize(KMemoryState state) const {
1926 switch (state) { 2114 switch (state) {
1927 case KMemoryState::Free: 2115 case KMemoryState::Free:
1928 case KMemoryState::Kernel: 2116 case KMemoryState::Kernel:
1929 return address_space_end - address_space_start; 2117 return m_address_space_end - m_address_space_start;
1930 case KMemoryState::Normal: 2118 case KMemoryState::Normal:
1931 return heap_region_end - heap_region_start; 2119 return m_heap_region_end - m_heap_region_start;
1932 case KMemoryState::Ipc: 2120 case KMemoryState::Ipc:
1933 case KMemoryState::NonSecureIpc: 2121 case KMemoryState::NonSecureIpc:
1934 case KMemoryState::NonDeviceIpc: 2122 case KMemoryState::NonDeviceIpc:
1935 return alias_region_end - alias_region_start; 2123 return m_alias_region_end - m_alias_region_start;
1936 case KMemoryState::Stack: 2124 case KMemoryState::Stack:
1937 return stack_region_end - stack_region_start; 2125 return m_stack_region_end - m_stack_region_start;
1938 case KMemoryState::Static: 2126 case KMemoryState::Static:
1939 case KMemoryState::ThreadLocal: 2127 case KMemoryState::ThreadLocal:
1940 return kernel_map_region_end - kernel_map_region_start; 2128 return m_kernel_map_region_end - m_kernel_map_region_start;
1941 case KMemoryState::Io: 2129 case KMemoryState::Io:
1942 case KMemoryState::Shared: 2130 case KMemoryState::Shared:
1943 case KMemoryState::AliasCode: 2131 case KMemoryState::AliasCode:
@@ -1948,16 +2136,16 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
1948 case KMemoryState::GeneratedCode: 2136 case KMemoryState::GeneratedCode:
1949 case KMemoryState::CodeOut: 2137 case KMemoryState::CodeOut:
1950 case KMemoryState::Coverage: 2138 case KMemoryState::Coverage:
1951 return alias_code_region_end - alias_code_region_start; 2139 return m_alias_code_region_end - m_alias_code_region_start;
1952 case KMemoryState::Code: 2140 case KMemoryState::Code:
1953 case KMemoryState::CodeData: 2141 case KMemoryState::CodeData:
1954 return code_region_end - code_region_start; 2142 return m_code_region_end - m_code_region_start;
1955 default: 2143 default:
1956 UNREACHABLE(); 2144 UNREACHABLE();
1957 } 2145 }
1958} 2146}
1959 2147
1960bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { 2148bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const {
1961 const VAddr end = addr + size; 2149 const VAddr end = addr + size;
1962 const VAddr last = end - 1; 2150 const VAddr last = end - 1;
1963 2151
@@ -1966,10 +2154,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co
1966 2154
1967 const bool is_in_region = 2155 const bool is_in_region =
1968 region_start <= addr && addr < end && last <= region_start + region_size - 1; 2156 region_start <= addr && addr < end && last <= region_start + region_size - 1;
1969 const bool is_in_heap = !(end <= heap_region_start || heap_region_end <= addr || 2157 const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr ||
1970 heap_region_start == heap_region_end); 2158 m_heap_region_start == m_heap_region_end);
1971 const bool is_in_alias = !(end <= alias_region_start || alias_region_end <= addr || 2159 const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
1972 alias_region_start == alias_region_end); 2160 m_alias_region_start == m_alias_region_end);
1973 switch (state) { 2161 switch (state) {
1974 case KMemoryState::Free: 2162 case KMemoryState::Free:
1975 case KMemoryState::Kernel: 2163 case KMemoryState::Kernel:
@@ -2008,23 +2196,23 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_
2008 KMemoryPermission perm, KMemoryAttribute attr_mask, 2196 KMemoryPermission perm, KMemoryAttribute attr_mask,
2009 KMemoryAttribute attr) const { 2197 KMemoryAttribute attr) const {
2010 // Validate the states match expectation. 2198 // Validate the states match expectation.
2011 R_UNLESS((info.state & state_mask) == state, ResultInvalidCurrentMemory); 2199 R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
2012 R_UNLESS((info.perm & perm_mask) == perm, ResultInvalidCurrentMemory); 2200 R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
2013 R_UNLESS((info.attribute & attr_mask) == attr, ResultInvalidCurrentMemory); 2201 R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
2014 2202
2015 return ResultSuccess; 2203 R_SUCCEED();
2016} 2204}
2017 2205
2018Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, 2206Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
2019 std::size_t size, KMemoryState state_mask, 2207 KMemoryState state_mask, KMemoryState state,
2020 KMemoryState state, KMemoryPermission perm_mask, 2208 KMemoryPermission perm_mask, KMemoryPermission perm,
2021 KMemoryPermission perm, KMemoryAttribute attr_mask, 2209 KMemoryAttribute attr_mask,
2022 KMemoryAttribute attr) const { 2210 KMemoryAttribute attr) const {
2023 ASSERT(this->IsLockedByCurrentThread()); 2211 ASSERT(this->IsLockedByCurrentThread());
2024 2212
2025 // Get information about the first block. 2213 // Get information about the first block.
2026 const VAddr last_addr = addr + size - 1; 2214 const VAddr last_addr = addr + size - 1;
2027 KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr); 2215 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
2028 KMemoryInfo info = it->GetMemoryInfo(); 2216 KMemoryInfo info = it->GetMemoryInfo();
2029 2217
2030 // If the start address isn't aligned, we need a block. 2218 // If the start address isn't aligned, we need a block.
@@ -2042,7 +2230,7 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA
2042 2230
2043 // Advance our iterator. 2231 // Advance our iterator.
2044 it++; 2232 it++;
2045 ASSERT(it != block_manager->cend()); 2233 ASSERT(it != m_memory_block_manager.cend());
2046 info = it->GetMemoryInfo(); 2234 info = it->GetMemoryInfo();
2047 } 2235 }
2048 2236
@@ -2054,12 +2242,12 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA
2054 *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; 2242 *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
2055 } 2243 }
2056 2244
2057 return ResultSuccess; 2245 R_SUCCEED();
2058} 2246}
2059 2247
2060Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, 2248Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
2061 KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, 2249 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
2062 VAddr addr, std::size_t size, KMemoryState state_mask, 2250 VAddr addr, size_t size, KMemoryState state_mask,
2063 KMemoryState state, KMemoryPermission perm_mask, 2251 KMemoryState state, KMemoryPermission perm_mask,
2064 KMemoryPermission perm, KMemoryAttribute attr_mask, 2252 KMemoryPermission perm, KMemoryAttribute attr_mask,
2065 KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { 2253 KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
@@ -2067,7 +2255,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
2067 2255
2068 // Get information about the first block. 2256 // Get information about the first block.
2069 const VAddr last_addr = addr + size - 1; 2257 const VAddr last_addr = addr + size - 1;
2070 KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr); 2258 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
2071 KMemoryInfo info = it->GetMemoryInfo(); 2259 KMemoryInfo info = it->GetMemoryInfo();
2072 2260
2073 // If the start address isn't aligned, we need a block. 2261 // If the start address isn't aligned, we need a block.
@@ -2075,14 +2263,14 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
2075 (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0; 2263 (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0;
2076 2264
2077 // Validate all blocks in the range have correct state. 2265 // Validate all blocks in the range have correct state.
2078 const KMemoryState first_state = info.state; 2266 const KMemoryState first_state = info.m_state;
2079 const KMemoryPermission first_perm = info.perm; 2267 const KMemoryPermission first_perm = info.m_permission;
2080 const KMemoryAttribute first_attr = info.attribute; 2268 const KMemoryAttribute first_attr = info.m_attribute;
2081 while (true) { 2269 while (true) {
2082 // Validate the current block. 2270 // Validate the current block.
2083 R_UNLESS(info.state == first_state, ResultInvalidCurrentMemory); 2271 R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
2084 R_UNLESS(info.perm == first_perm, ResultInvalidCurrentMemory); 2272 R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
2085 R_UNLESS((info.attribute | ignore_attr) == (first_attr | ignore_attr), 2273 R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
2086 ResultInvalidCurrentMemory); 2274 ResultInvalidCurrentMemory);
2087 2275
2088 // Validate against the provided masks. 2276 // Validate against the provided masks.
@@ -2095,7 +2283,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
2095 2283
2096 // Advance our iterator. 2284 // Advance our iterator.
2097 it++; 2285 it++;
2098 ASSERT(it != block_manager->cend()); 2286 ASSERT(it != m_memory_block_manager.cend());
2099 info = it->GetMemoryInfo(); 2287 info = it->GetMemoryInfo();
2100 } 2288 }
2101 2289
@@ -2116,7 +2304,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
2116 if (out_blocks_needed != nullptr) { 2304 if (out_blocks_needed != nullptr) {
2117 *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; 2305 *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
2118 } 2306 }
2119 return ResultSuccess; 2307 R_SUCCEED();
2120} 2308}
2121 2309
2122Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, 2310Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
@@ -2134,7 +2322,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
2134 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); 2322 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
2135 2323
2136 // Lock the table. 2324 // Lock the table.
2137 KScopedLightLock lk(general_lock); 2325 KScopedLightLock lk(m_general_lock);
2138 2326
2139 // Check that the output page group is empty, if it exists. 2327 // Check that the output page group is empty, if it exists.
2140 if (out_pg) { 2328 if (out_pg) {
@@ -2162,6 +2350,12 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
2162 R_TRY(this->MakePageGroup(*out_pg, addr, num_pages)); 2350 R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
2163 } 2351 }
2164 2352
2353 // Create an update allocator.
2354 Result allocator_result{ResultSuccess};
2355 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2356 m_memory_block_slab_manager, num_allocator_blocks);
2357 R_TRY(allocator_result);
2358
2165 // Decide on new perm and attr. 2359 // Decide on new perm and attr.
2166 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; 2360 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
2167 KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr); 2361 KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
@@ -2172,9 +2366,11 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
2172 } 2366 }
2173 2367
2174 // Apply the memory block updates. 2368 // Apply the memory block updates.
2175 block_manager->Update(addr, num_pages, old_state, new_perm, new_attr); 2369 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
2370 new_attr, KMemoryBlockDisableMergeAttribute::Locked,
2371 KMemoryBlockDisableMergeAttribute::None);
2176 2372
2177 return ResultSuccess; 2373 R_SUCCEED();
2178} 2374}
2179 2375
2180Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, 2376Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
@@ -2191,7 +2387,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask
2191 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); 2387 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
2192 2388
2193 // Lock the table. 2389 // Lock the table.
2194 KScopedLightLock lk(general_lock); 2390 KScopedLightLock lk(m_general_lock);
2195 2391
2196 // Check the state. 2392 // Check the state.
2197 KMemoryState old_state{}; 2393 KMemoryState old_state{};
@@ -2213,15 +2409,23 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask
2213 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; 2409 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
2214 KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr); 2410 KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
2215 2411
2412 // Create an update allocator.
2413 Result allocator_result{ResultSuccess};
2414 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2415 m_memory_block_slab_manager, num_allocator_blocks);
2416 R_TRY(allocator_result);
2417
2216 // Update permission, if we need to. 2418 // Update permission, if we need to.
2217 if (new_perm != old_perm) { 2419 if (new_perm != old_perm) {
2218 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); 2420 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
2219 } 2421 }
2220 2422
2221 // Apply the memory block updates. 2423 // Apply the memory block updates.
2222 block_manager->Update(addr, num_pages, old_state, new_perm, new_attr); 2424 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
2425 new_attr, KMemoryBlockDisableMergeAttribute::None,
2426 KMemoryBlockDisableMergeAttribute::Locked);
2223 2427
2224 return ResultSuccess; 2428 R_SUCCEED();
2225} 2429}
2226 2430
2227} // namespace Kernel 2431} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 25774f232..c6aeacd96 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -9,8 +9,10 @@
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "common/page_table.h" 10#include "common/page_table.h"
11#include "core/file_sys/program_metadata.h" 11#include "core/file_sys/program_metadata.h"
12#include "core/hle/kernel/k_dynamic_resource_manager.h"
12#include "core/hle/kernel/k_light_lock.h" 13#include "core/hle/kernel/k_light_lock.h"
13#include "core/hle/kernel/k_memory_block.h" 14#include "core/hle/kernel/k_memory_block.h"
15#include "core/hle/kernel/k_memory_block_manager.h"
14#include "core/hle/kernel/k_memory_layout.h" 16#include "core/hle/kernel/k_memory_layout.h"
15#include "core/hle/kernel/k_memory_manager.h" 17#include "core/hle/kernel/k_memory_manager.h"
16#include "core/hle/result.h" 18#include "core/hle/result.h"
@@ -34,58 +36,66 @@ public:
34 ~KPageTable(); 36 ~KPageTable();
35 37
36 Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, 38 Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
37 VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool); 39 VAddr code_addr, size_t code_size,
38 Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, 40 KMemoryBlockSlabManager* mem_block_slab_manager,
41 KMemoryManager::Pool pool);
42
43 void Finalize();
44
45 Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state,
39 KMemoryPermission perm); 46 KMemoryPermission perm);
40 Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); 47 Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size);
41 Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, 48 Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
42 ICacheInvalidationStrategy icache_invalidation_strategy); 49 ICacheInvalidationStrategy icache_invalidation_strategy);
43 Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, 50 Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
44 VAddr src_addr); 51 VAddr src_addr);
45 Result MapPhysicalMemory(VAddr addr, std::size_t size); 52 Result MapPhysicalMemory(VAddr addr, size_t size);
46 Result UnmapPhysicalMemory(VAddr addr, std::size_t size); 53 Result UnmapPhysicalMemory(VAddr addr, size_t size);
47 Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); 54 Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
48 Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); 55 Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
49 Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state, 56 Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
50 KMemoryPermission perm); 57 KMemoryPermission perm);
51 Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, 58 Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
52 KMemoryState state, KMemoryPermission perm) { 59 KMemoryState state, KMemoryPermission perm) {
53 return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, 60 R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
54 this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, 61 this->GetRegionAddress(state),
55 state, perm); 62 this->GetRegionSize(state) / PageSize, state, perm));
56 } 63 }
57 Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); 64 Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
58 Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state); 65 Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
59 Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm); 66 Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
60 KMemoryInfo QueryInfo(VAddr addr); 67 KMemoryInfo QueryInfo(VAddr addr);
61 Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); 68 Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
62 Result ResetTransferMemory(VAddr addr, std::size_t size); 69 Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
63 Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm); 70 Result SetMaxHeapSize(size_t size);
64 Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr); 71 Result SetHeapSize(VAddr* out, size_t size);
65 Result SetMaxHeapSize(std::size_t size); 72 ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only,
66 Result SetHeapSize(VAddr* out, std::size_t size); 73 VAddr region_start, size_t region_num_pages,
67 ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, 74 KMemoryState state, KMemoryPermission perm,
68 bool is_map_only, VAddr region_start, 75 PAddr map_addr = 0);
69 std::size_t region_num_pages, KMemoryState state, 76
70 KMemoryPermission perm, PAddr map_addr = 0); 77 Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
71 Result LockForDeviceAddressSpace(VAddr addr, std::size_t size); 78 bool is_aligned);
72 Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); 79 Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size);
73 Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size); 80
74 Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg); 81 Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
82
83 Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
84 Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
75 Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, 85 Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
76 KMemoryState state_mask, KMemoryState state, 86 KMemoryState state_mask, KMemoryState state,
77 KMemoryPermission perm_mask, KMemoryPermission perm, 87 KMemoryPermission perm_mask, KMemoryPermission perm,
78 KMemoryAttribute attr_mask, KMemoryAttribute attr); 88 KMemoryAttribute attr_mask, KMemoryAttribute attr);
79 89
80 Common::PageTable& PageTableImpl() { 90 Common::PageTable& PageTableImpl() {
81 return page_table_impl; 91 return *m_page_table_impl;
82 } 92 }
83 93
84 const Common::PageTable& PageTableImpl() const { 94 const Common::PageTable& PageTableImpl() const {
85 return page_table_impl; 95 return *m_page_table_impl;
86 } 96 }
87 97
88 bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const; 98 bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
89 99
90private: 100private:
91 enum class OperationType : u32 { 101 enum class OperationType : u32 {
@@ -96,67 +106,65 @@ private:
96 ChangePermissionsAndRefresh, 106 ChangePermissionsAndRefresh,
97 }; 107 };
98 108
99 static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask | 109 static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
100 KMemoryAttribute::IpcLocked | 110 KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
101 KMemoryAttribute::DeviceShared;
102 111
103 Result InitializeMemoryLayout(VAddr start, VAddr end);
104 Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); 112 Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
105 Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, 113 Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
106 bool is_pa_valid, VAddr region_start, std::size_t region_num_pages, 114 bool is_pa_valid, VAddr region_start, size_t region_num_pages,
107 KMemoryState state, KMemoryPermission perm); 115 KMemoryState state, KMemoryPermission perm);
108 Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); 116 Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
109 bool IsRegionMapped(VAddr address, u64 size);
110 bool IsRegionContiguous(VAddr addr, u64 size) const; 117 bool IsRegionContiguous(VAddr addr, u64 size) const;
111 void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list); 118 void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
112 KMemoryInfo QueryInfoImpl(VAddr addr); 119 KMemoryInfo QueryInfoImpl(VAddr addr);
113 VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages, 120 VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
114 std::size_t align); 121 size_t align);
115 Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, 122 Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
116 OperationType operation); 123 OperationType operation);
117 Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, 124 Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
118 OperationType operation, PAddr map_addr = 0); 125 PAddr map_addr = 0);
119 VAddr GetRegionAddress(KMemoryState state) const; 126 VAddr GetRegionAddress(KMemoryState state) const;
120 std::size_t GetRegionSize(KMemoryState state) const; 127 size_t GetRegionSize(KMemoryState state) const;
121 128
122 VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, 129 VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
123 std::size_t alignment, std::size_t offset, std::size_t guard_pages); 130 size_t alignment, size_t offset, size_t guard_pages);
124 131
125 Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, 132 Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
126 KMemoryState state_mask, KMemoryState state, 133 KMemoryState state_mask, KMemoryState state,
127 KMemoryPermission perm_mask, KMemoryPermission perm, 134 KMemoryPermission perm_mask, KMemoryPermission perm,
128 KMemoryAttribute attr_mask, KMemoryAttribute attr) const; 135 KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
129 Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask, 136 Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask,
130 KMemoryState state, KMemoryPermission perm_mask, 137 KMemoryState state, KMemoryPermission perm_mask,
131 KMemoryPermission perm, KMemoryAttribute attr_mask, 138 KMemoryPermission perm, KMemoryAttribute attr_mask,
132 KMemoryAttribute attr) const { 139 KMemoryAttribute attr) const {
133 return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, 140 R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
134 perm, attr_mask, attr); 141 perm, attr_mask, attr));
135 } 142 }
136 143
137 Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, 144 Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
138 KMemoryPermission perm_mask, KMemoryPermission perm, 145 KMemoryPermission perm_mask, KMemoryPermission perm,
139 KMemoryAttribute attr_mask, KMemoryAttribute attr) const; 146 KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
140 Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, 147 Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
141 KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr, 148 KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr,
142 std::size_t size, KMemoryState state_mask, KMemoryState state, 149 size_t size, KMemoryState state_mask, KMemoryState state,
143 KMemoryPermission perm_mask, KMemoryPermission perm, 150 KMemoryPermission perm_mask, KMemoryPermission perm,
144 KMemoryAttribute attr_mask, KMemoryAttribute attr, 151 KMemoryAttribute attr_mask, KMemoryAttribute attr,
145 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; 152 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
146 Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, 153 Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
147 KMemoryState state_mask, KMemoryState state, 154 KMemoryState state_mask, KMemoryState state,
148 KMemoryPermission perm_mask, KMemoryPermission perm, 155 KMemoryPermission perm_mask, KMemoryPermission perm,
149 KMemoryAttribute attr_mask, KMemoryAttribute attr, 156 KMemoryAttribute attr_mask, KMemoryAttribute attr,
150 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { 157 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
151 return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, 158 R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
152 state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); 159 state_mask, state, perm_mask, perm, attr_mask, attr,
160 ignore_attr));
153 } 161 }
154 Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask, 162 Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
155 KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, 163 KMemoryPermission perm_mask, KMemoryPermission perm,
156 KMemoryAttribute attr_mask, KMemoryAttribute attr, 164 KMemoryAttribute attr_mask, KMemoryAttribute attr,
157 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { 165 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
158 return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, 166 R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
159 attr_mask, attr, ignore_attr); 167 attr_mask, attr, ignore_attr));
160 } 168 }
161 169
162 Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, 170 Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
@@ -174,13 +182,13 @@ private:
174 bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages); 182 bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
175 183
176 bool IsLockedByCurrentThread() const { 184 bool IsLockedByCurrentThread() const {
177 return general_lock.IsLockedByCurrentThread(); 185 return m_general_lock.IsLockedByCurrentThread();
178 } 186 }
179 187
180 bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) { 188 bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
181 ASSERT(this->IsLockedByCurrentThread()); 189 ASSERT(this->IsLockedByCurrentThread());
182 190
183 return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr); 191 return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
184 } 192 }
185 193
186 bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const { 194 bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
@@ -191,95 +199,93 @@ private:
191 return *out != 0; 199 return *out != 0;
192 } 200 }
193 201
194 mutable KLightLock general_lock; 202 mutable KLightLock m_general_lock;
195 mutable KLightLock map_physical_memory_lock; 203 mutable KLightLock m_map_physical_memory_lock;
196
197 std::unique_ptr<KMemoryBlockManager> block_manager;
198 204
199public: 205public:
200 constexpr VAddr GetAddressSpaceStart() const { 206 constexpr VAddr GetAddressSpaceStart() const {
201 return address_space_start; 207 return m_address_space_start;
202 } 208 }
203 constexpr VAddr GetAddressSpaceEnd() const { 209 constexpr VAddr GetAddressSpaceEnd() const {
204 return address_space_end; 210 return m_address_space_end;
205 } 211 }
206 constexpr std::size_t GetAddressSpaceSize() const { 212 constexpr size_t GetAddressSpaceSize() const {
207 return address_space_end - address_space_start; 213 return m_address_space_end - m_address_space_start;
208 } 214 }
209 constexpr VAddr GetHeapRegionStart() const { 215 constexpr VAddr GetHeapRegionStart() const {
210 return heap_region_start; 216 return m_heap_region_start;
211 } 217 }
212 constexpr VAddr GetHeapRegionEnd() const { 218 constexpr VAddr GetHeapRegionEnd() const {
213 return heap_region_end; 219 return m_heap_region_end;
214 } 220 }
215 constexpr std::size_t GetHeapRegionSize() const { 221 constexpr size_t GetHeapRegionSize() const {
216 return heap_region_end - heap_region_start; 222 return m_heap_region_end - m_heap_region_start;
217 } 223 }
218 constexpr VAddr GetAliasRegionStart() const { 224 constexpr VAddr GetAliasRegionStart() const {
219 return alias_region_start; 225 return m_alias_region_start;
220 } 226 }
221 constexpr VAddr GetAliasRegionEnd() const { 227 constexpr VAddr GetAliasRegionEnd() const {
222 return alias_region_end; 228 return m_alias_region_end;
223 } 229 }
224 constexpr std::size_t GetAliasRegionSize() const { 230 constexpr size_t GetAliasRegionSize() const {
225 return alias_region_end - alias_region_start; 231 return m_alias_region_end - m_alias_region_start;
226 } 232 }
227 constexpr VAddr GetStackRegionStart() const { 233 constexpr VAddr GetStackRegionStart() const {
228 return stack_region_start; 234 return m_stack_region_start;
229 } 235 }
230 constexpr VAddr GetStackRegionEnd() const { 236 constexpr VAddr GetStackRegionEnd() const {
231 return stack_region_end; 237 return m_stack_region_end;
232 } 238 }
233 constexpr std::size_t GetStackRegionSize() const { 239 constexpr size_t GetStackRegionSize() const {
234 return stack_region_end - stack_region_start; 240 return m_stack_region_end - m_stack_region_start;
235 } 241 }
236 constexpr VAddr GetKernelMapRegionStart() const { 242 constexpr VAddr GetKernelMapRegionStart() const {
237 return kernel_map_region_start; 243 return m_kernel_map_region_start;
238 } 244 }
239 constexpr VAddr GetKernelMapRegionEnd() const { 245 constexpr VAddr GetKernelMapRegionEnd() const {
240 return kernel_map_region_end; 246 return m_kernel_map_region_end;
241 } 247 }
242 constexpr VAddr GetCodeRegionStart() const { 248 constexpr VAddr GetCodeRegionStart() const {
243 return code_region_start; 249 return m_code_region_start;
244 } 250 }
245 constexpr VAddr GetCodeRegionEnd() const { 251 constexpr VAddr GetCodeRegionEnd() const {
246 return code_region_end; 252 return m_code_region_end;
247 } 253 }
248 constexpr VAddr GetAliasCodeRegionStart() const { 254 constexpr VAddr GetAliasCodeRegionStart() const {
249 return alias_code_region_start; 255 return m_alias_code_region_start;
250 } 256 }
251 constexpr VAddr GetAliasCodeRegionSize() const { 257 constexpr VAddr GetAliasCodeRegionSize() const {
252 return alias_code_region_end - alias_code_region_start; 258 return m_alias_code_region_end - m_alias_code_region_start;
253 } 259 }
254 std::size_t GetNormalMemorySize() { 260 size_t GetNormalMemorySize() {
255 KScopedLightLock lk(general_lock); 261 KScopedLightLock lk(m_general_lock);
256 return GetHeapSize() + mapped_physical_memory_size; 262 return GetHeapSize() + m_mapped_physical_memory_size;
257 } 263 }
258 constexpr std::size_t GetAddressSpaceWidth() const { 264 constexpr size_t GetAddressSpaceWidth() const {
259 return address_space_width; 265 return m_address_space_width;
260 } 266 }
261 constexpr std::size_t GetHeapSize() const { 267 constexpr size_t GetHeapSize() const {
262 return current_heap_end - heap_region_start; 268 return m_current_heap_end - m_heap_region_start;
263 } 269 }
264 constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { 270 constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const {
265 return address_space_start <= address && address + size - 1 <= address_space_end - 1; 271 return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1;
266 } 272 }
267 constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const { 273 constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const {
268 return alias_region_start > address || address + size - 1 > alias_region_end - 1; 274 return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1;
269 } 275 }
270 constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const { 276 constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const {
271 return stack_region_start > address || address + size - 1 > stack_region_end - 1; 277 return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1;
272 } 278 }
273 constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const { 279 constexpr bool IsInvalidRegion(VAddr address, size_t size) const {
274 return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; 280 return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
275 } 281 }
276 constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const { 282 constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const {
277 return address + size > heap_region_start && heap_region_end > address; 283 return address + size > m_heap_region_start && m_heap_region_end > address;
278 } 284 }
279 constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const { 285 constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const {
280 return address + size > alias_region_start && alias_region_end > address; 286 return address + size > m_alias_region_start && m_alias_region_end > address;
281 } 287 }
282 constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const { 288 constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const {
283 if (IsInvalidRegion(address, size)) { 289 if (IsInvalidRegion(address, size)) {
284 return true; 290 return true;
285 } 291 }
@@ -291,73 +297,78 @@ public:
291 } 297 }
292 return {}; 298 return {};
293 } 299 }
294 constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { 300 constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const {
295 return !IsOutsideASLRRegion(address, size); 301 return !IsOutsideASLRRegion(address, size);
296 } 302 }
297 constexpr std::size_t GetNumGuardPages() const { 303 constexpr size_t GetNumGuardPages() const {
298 return IsKernel() ? 1 : 4; 304 return IsKernel() ? 1 : 4;
299 } 305 }
300 PAddr GetPhysicalAddr(VAddr addr) const { 306 PAddr GetPhysicalAddr(VAddr addr) const {
301 const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; 307 const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
302 ASSERT(backing_addr); 308 ASSERT(backing_addr);
303 return backing_addr + addr; 309 return backing_addr + addr;
304 } 310 }
305 constexpr bool Contains(VAddr addr) const { 311 constexpr bool Contains(VAddr addr) const {
306 return address_space_start <= addr && addr <= address_space_end - 1; 312 return m_address_space_start <= addr && addr <= m_address_space_end - 1;
307 } 313 }
308 constexpr bool Contains(VAddr addr, std::size_t size) const { 314 constexpr bool Contains(VAddr addr, size_t size) const {
309 return address_space_start <= addr && addr < addr + size && 315 return m_address_space_start <= addr && addr < addr + size &&
310 addr + size - 1 <= address_space_end - 1; 316 addr + size - 1 <= m_address_space_end - 1;
311 } 317 }
312 318
313private: 319private:
314 constexpr bool IsKernel() const { 320 constexpr bool IsKernel() const {
315 return is_kernel; 321 return m_is_kernel;
316 } 322 }
317 constexpr bool IsAslrEnabled() const { 323 constexpr bool IsAslrEnabled() const {
318 return is_aslr_enabled; 324 return m_enable_aslr;
319 } 325 }
320 326
321 constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { 327 constexpr bool ContainsPages(VAddr addr, size_t num_pages) const {
322 return (address_space_start <= addr) && 328 return (m_address_space_start <= addr) &&
323 (num_pages <= (address_space_end - address_space_start) / PageSize) && 329 (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
324 (addr + num_pages * PageSize - 1 <= address_space_end - 1); 330 (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
325 } 331 }
326 332
327private: 333private:
328 VAddr address_space_start{}; 334 VAddr m_address_space_start{};
329 VAddr address_space_end{}; 335 VAddr m_address_space_end{};
330 VAddr heap_region_start{}; 336 VAddr m_heap_region_start{};
331 VAddr heap_region_end{}; 337 VAddr m_heap_region_end{};
332 VAddr current_heap_end{}; 338 VAddr m_current_heap_end{};
333 VAddr alias_region_start{}; 339 VAddr m_alias_region_start{};
334 VAddr alias_region_end{}; 340 VAddr m_alias_region_end{};
335 VAddr stack_region_start{}; 341 VAddr m_stack_region_start{};
336 VAddr stack_region_end{}; 342 VAddr m_stack_region_end{};
337 VAddr kernel_map_region_start{}; 343 VAddr m_kernel_map_region_start{};
338 VAddr kernel_map_region_end{}; 344 VAddr m_kernel_map_region_end{};
339 VAddr code_region_start{}; 345 VAddr m_code_region_start{};
340 VAddr code_region_end{}; 346 VAddr m_code_region_end{};
341 VAddr alias_code_region_start{}; 347 VAddr m_alias_code_region_start{};
342 VAddr alias_code_region_end{}; 348 VAddr m_alias_code_region_end{};
343 349
344 std::size_t mapped_physical_memory_size{}; 350 size_t m_mapped_physical_memory_size{};
345 std::size_t max_heap_size{}; 351 size_t m_max_heap_size{};
346 std::size_t max_physical_memory_size{}; 352 size_t m_max_physical_memory_size{};
347 std::size_t address_space_width{}; 353 size_t m_address_space_width{};
348 354
349 bool is_kernel{}; 355 KMemoryBlockManager m_memory_block_manager;
350 bool is_aslr_enabled{}; 356
351 357 bool m_is_kernel{};
352 u32 heap_fill_value{}; 358 bool m_enable_aslr{};
353 const KMemoryRegion* cached_physical_heap_region{}; 359 bool m_enable_device_address_space_merge{};
354 360
355 KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; 361 KMemoryBlockSlabManager* m_memory_block_slab_manager{};
356 KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; 362
357 363 u32 m_heap_fill_value{};
358 Common::PageTable page_table_impl; 364 const KMemoryRegion* m_cached_physical_heap_region{};
359 365
360 Core::System& system; 366 KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
367 KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
368
369 std::unique_ptr<Common::PageTable> m_page_table_impl;
370
371 Core::System& m_system;
361}; 372};
362 373
363} // namespace Kernel 374} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index d3e99665f..8c3495e5a 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -72,7 +72,8 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
72 72
73 process->name = std::move(process_name); 73 process->name = std::move(process_name);
74 process->resource_limit = res_limit; 74 process->resource_limit = res_limit;
75 process->status = ProcessStatus::Created; 75 process->system_resource_address = 0;
76 process->state = State::Created;
76 process->program_id = 0; 77 process->program_id = 0;
77 process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() 78 process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
78 : kernel.CreateNewUserProcessID(); 79 : kernel.CreateNewUserProcessID();
@@ -92,11 +93,12 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
92 process->exception_thread = nullptr; 93 process->exception_thread = nullptr;
93 process->is_suspended = false; 94 process->is_suspended = false;
94 process->schedule_count = 0; 95 process->schedule_count = 0;
96 process->is_handle_table_initialized = false;
95 97
96 // Open a reference to the resource limit. 98 // Open a reference to the resource limit.
97 process->resource_limit->Open(); 99 process->resource_limit->Open();
98 100
99 return ResultSuccess; 101 R_SUCCEED();
100} 102}
101 103
102void KProcess::DoWorkerTaskImpl() { 104void KProcess::DoWorkerTaskImpl() {
@@ -121,9 +123,9 @@ void KProcess::DecrementRunningThreadCount() {
121 } 123 }
122} 124}
123 125
124u64 KProcess::GetTotalPhysicalMemoryAvailable() const { 126u64 KProcess::GetTotalPhysicalMemoryAvailable() {
125 const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + 127 const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
126 page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size + 128 page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +
127 main_thread_stack_size}; 129 main_thread_stack_size};
128 if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); 130 if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
129 capacity != pool_size) { 131 capacity != pool_size) {
@@ -135,16 +137,16 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
135 return memory_usage_capacity; 137 return memory_usage_capacity;
136} 138}
137 139
138u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { 140u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
139 return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); 141 return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
140} 142}
141 143
142u64 KProcess::GetTotalPhysicalMemoryUsed() const { 144u64 KProcess::GetTotalPhysicalMemoryUsed() {
143 return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() + 145 return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() +
144 GetSystemResourceSize(); 146 GetSystemResourceSize();
145} 147}
146 148
147u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { 149u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
148 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); 150 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
149} 151}
150 152
@@ -244,7 +246,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
244 shmem->Open(); 246 shmem->Open();
245 shemen_info->Open(); 247 shemen_info->Open();
246 248
247 return ResultSuccess; 249 R_SUCCEED();
248} 250}
249 251
250void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, 252void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
@@ -289,12 +291,12 @@ Result KProcess::Reset() {
289 KScopedSchedulerLock sl{kernel}; 291 KScopedSchedulerLock sl{kernel};
290 292
291 // Validate that we're in a state that we can reset. 293 // Validate that we're in a state that we can reset.
292 R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); 294 R_UNLESS(state != State::Terminated, ResultInvalidState);
293 R_UNLESS(is_signaled, ResultInvalidState); 295 R_UNLESS(is_signaled, ResultInvalidState);
294 296
295 // Clear signaled. 297 // Clear signaled.
296 is_signaled = false; 298 is_signaled = false;
297 return ResultSuccess; 299 R_SUCCEED();
298} 300}
299 301
300Result KProcess::SetActivity(ProcessActivity activity) { 302Result KProcess::SetActivity(ProcessActivity activity) {
@@ -304,15 +306,13 @@ Result KProcess::SetActivity(ProcessActivity activity) {
304 KScopedSchedulerLock sl{kernel}; 306 KScopedSchedulerLock sl{kernel};
305 307
306 // Validate our state. 308 // Validate our state.
307 R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState); 309 R_UNLESS(state != State::Terminating, ResultInvalidState);
308 R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); 310 R_UNLESS(state != State::Terminated, ResultInvalidState);
309 311
310 // Either pause or resume. 312 // Either pause or resume.
311 if (activity == ProcessActivity::Paused) { 313 if (activity == ProcessActivity::Paused) {
312 // Verify that we're not suspended. 314 // Verify that we're not suspended.
313 if (is_suspended) { 315 R_UNLESS(!is_suspended, ResultInvalidState);
314 return ResultInvalidState;
315 }
316 316
317 // Suspend all threads. 317 // Suspend all threads.
318 for (auto* thread : GetThreadList()) { 318 for (auto* thread : GetThreadList()) {
@@ -325,9 +325,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
325 ASSERT(activity == ProcessActivity::Runnable); 325 ASSERT(activity == ProcessActivity::Runnable);
326 326
327 // Verify that we're suspended. 327 // Verify that we're suspended.
328 if (!is_suspended) { 328 R_UNLESS(is_suspended, ResultInvalidState);
329 return ResultInvalidState;
330 }
331 329
332 // Resume all threads. 330 // Resume all threads.
333 for (auto* thread : GetThreadList()) { 331 for (auto* thread : GetThreadList()) {
@@ -338,7 +336,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
338 SetSuspended(false); 336 SetSuspended(false);
339 } 337 }
340 338
341 return ResultSuccess; 339 R_SUCCEED();
342} 340}
343 341
344Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) { 342Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
@@ -348,35 +346,38 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
348 system_resource_size = metadata.GetSystemResourceSize(); 346 system_resource_size = metadata.GetSystemResourceSize();
349 image_size = code_size; 347 image_size = code_size;
350 348
349 // We currently do not support process-specific system resource
350 UNIMPLEMENTED_IF(system_resource_size != 0);
351
351 KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, 352 KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
352 code_size + system_resource_size); 353 code_size + system_resource_size);
353 if (!memory_reservation.Succeeded()) { 354 if (!memory_reservation.Succeeded()) {
354 LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", 355 LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
355 code_size + system_resource_size); 356 code_size + system_resource_size);
356 return ResultLimitReached; 357 R_RETURN(ResultLimitReached);
357 } 358 }
358 // Initialize proces address space 359 // Initialize proces address space
359 if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 360 if (const Result result{page_table.InitializeForProcess(
360 0x8000000, code_size, 361 metadata.GetAddressSpaceType(), false, 0x8000000, code_size,
361 KMemoryManager::Pool::Application)}; 362 &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)};
362 result.IsError()) { 363 result.IsError()) {
363 return result; 364 R_RETURN(result);
364 } 365 }
365 366
366 // Map process code region 367 // Map process code region
367 if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(), 368 if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(),
368 code_size / PageSize, KMemoryState::Code, 369 code_size / PageSize, KMemoryState::Code,
369 KMemoryPermission::None)}; 370 KMemoryPermission::None)};
370 result.IsError()) { 371 result.IsError()) {
371 return result; 372 R_RETURN(result);
372 } 373 }
373 374
374 // Initialize process capabilities 375 // Initialize process capabilities
375 const auto& caps{metadata.GetKernelCapabilities()}; 376 const auto& caps{metadata.GetKernelCapabilities()};
376 if (const Result result{ 377 if (const Result result{
377 capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)}; 378 capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)};
378 result.IsError()) { 379 result.IsError()) {
379 return result; 380 R_RETURN(result);
380 } 381 }
381 382
382 // Set memory usage capacity 383 // Set memory usage capacity
@@ -384,12 +385,12 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
384 case FileSys::ProgramAddressSpaceType::Is32Bit: 385 case FileSys::ProgramAddressSpaceType::Is32Bit:
385 case FileSys::ProgramAddressSpaceType::Is36Bit: 386 case FileSys::ProgramAddressSpaceType::Is36Bit:
386 case FileSys::ProgramAddressSpaceType::Is39Bit: 387 case FileSys::ProgramAddressSpaceType::Is39Bit:
387 memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart(); 388 memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart();
388 break; 389 break;
389 390
390 case FileSys::ProgramAddressSpaceType::Is32BitNoMap: 391 case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
391 memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() + 392 memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() +
392 page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart(); 393 page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart();
393 break; 394 break;
394 395
395 default: 396 default:
@@ -397,10 +398,10 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
397 } 398 }
398 399
399 // Create TLS region 400 // Create TLS region
400 R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address))); 401 R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address)));
401 memory_reservation.Commit(); 402 memory_reservation.Commit();
402 403
403 return handle_table.Initialize(capabilities.GetHandleTableSize()); 404 R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize()));
404} 405}
405 406
406void KProcess::Run(s32 main_thread_priority, u64 stack_size) { 407void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
@@ -409,15 +410,15 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
409 resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); 410 resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
410 411
411 const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; 412 const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
412 ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError()); 413 ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
413 414
414 ChangeStatus(ProcessStatus::Running); 415 ChangeState(State::Running);
415 416
416 SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top); 417 SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
417} 418}
418 419
419void KProcess::PrepareForTermination() { 420void KProcess::PrepareForTermination() {
420 ChangeStatus(ProcessStatus::Exiting); 421 ChangeState(State::Terminating);
421 422
422 const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) { 423 const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
423 for (auto* thread : in_thread_list) { 424 for (auto* thread : in_thread_list) {
@@ -437,15 +438,15 @@ void KProcess::PrepareForTermination() {
437 438
438 stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); 439 stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
439 440
440 this->DeleteThreadLocalRegion(tls_region_address); 441 this->DeleteThreadLocalRegion(plr_address);
441 tls_region_address = 0; 442 plr_address = 0;
442 443
443 if (resource_limit) { 444 if (resource_limit) {
444 resource_limit->Release(LimitableResource::PhysicalMemory, 445 resource_limit->Release(LimitableResource::PhysicalMemory,
445 main_thread_stack_size + image_size); 446 main_thread_stack_size + image_size);
446 } 447 }
447 448
448 ChangeStatus(ProcessStatus::Exited); 449 ChangeState(State::Terminated);
449} 450}
450 451
451void KProcess::Finalize() { 452void KProcess::Finalize() {
@@ -474,7 +475,7 @@ void KProcess::Finalize() {
474 } 475 }
475 476
476 // Finalize the page table. 477 // Finalize the page table.
477 page_table.reset(); 478 page_table.Finalize();
478 479
479 // Perform inherited finalization. 480 // Perform inherited finalization.
480 KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize(); 481 KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
@@ -499,7 +500,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
499 } 500 }
500 501
501 *out = tlr; 502 *out = tlr;
502 return ResultSuccess; 503 R_SUCCEED();
503 } 504 }
504 } 505 }
505 506
@@ -528,7 +529,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
528 // We succeeded! 529 // We succeeded!
529 tlp_guard.Cancel(); 530 tlp_guard.Cancel();
530 *out = tlr; 531 *out = tlr;
531 return ResultSuccess; 532 R_SUCCEED();
532} 533}
533 534
534Result KProcess::DeleteThreadLocalRegion(VAddr addr) { 535Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
@@ -576,7 +577,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
576 KThreadLocalPage::Free(kernel, page_to_free); 577 KThreadLocalPage::Free(kernel, page_to_free);
577 } 578 }
578 579
579 return ResultSuccess; 580 R_SUCCEED();
580} 581}
581 582
582bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, 583bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
@@ -628,7 +629,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
628void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { 629void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
629 const auto ReprotectSegment = [&](const CodeSet::Segment& segment, 630 const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
630 Svc::MemoryPermission permission) { 631 Svc::MemoryPermission permission) {
631 page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); 632 page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
632 }; 633 };
633 634
634 kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), 635 kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
@@ -645,19 +646,18 @@ bool KProcess::IsSignaled() const {
645} 646}
646 647
647KProcess::KProcess(KernelCore& kernel_) 648KProcess::KProcess(KernelCore& kernel_)
648 : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>( 649 : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()},
649 kernel_.System())},
650 handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, 650 handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
651 state_lock{kernel_}, list_lock{kernel_} {} 651 state_lock{kernel_}, list_lock{kernel_} {}
652 652
653KProcess::~KProcess() = default; 653KProcess::~KProcess() = default;
654 654
655void KProcess::ChangeStatus(ProcessStatus new_status) { 655void KProcess::ChangeState(State new_state) {
656 if (status == new_status) { 656 if (state == new_state) {
657 return; 657 return;
658 } 658 }
659 659
660 status = new_status; 660 state = new_state;
661 is_signaled = true; 661 is_signaled = true;
662 NotifyAvailable(); 662 NotifyAvailable();
663} 663}
@@ -668,17 +668,17 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
668 // The kernel always ensures that the given stack size is page aligned. 668 // The kernel always ensures that the given stack size is page aligned.
669 main_thread_stack_size = Common::AlignUp(stack_size, PageSize); 669 main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
670 670
671 const VAddr start{page_table->GetStackRegionStart()}; 671 const VAddr start{page_table.GetStackRegionStart()};
672 const std::size_t size{page_table->GetStackRegionEnd() - start}; 672 const std::size_t size{page_table.GetStackRegionEnd() - start};
673 673
674 CASCADE_RESULT(main_thread_stack_top, 674 CASCADE_RESULT(main_thread_stack_top,
675 page_table->AllocateAndMapMemory( 675 page_table.AllocateAndMapMemory(
676 main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize, 676 main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
677 KMemoryState::Stack, KMemoryPermission::UserReadWrite)); 677 KMemoryState::Stack, KMemoryPermission::UserReadWrite));
678 678
679 main_thread_stack_top += main_thread_stack_size; 679 main_thread_stack_top += main_thread_stack_size;
680 680
681 return ResultSuccess; 681 R_SUCCEED();
682} 682}
683 683
684} // namespace Kernel 684} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index d56d73bab..2e0cc3d0b 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -13,6 +13,7 @@
13#include "core/hle/kernel/k_auto_object.h" 13#include "core/hle/kernel/k_auto_object.h"
14#include "core/hle/kernel/k_condition_variable.h" 14#include "core/hle/kernel/k_condition_variable.h"
15#include "core/hle/kernel/k_handle_table.h" 15#include "core/hle/kernel/k_handle_table.h"
16#include "core/hle/kernel/k_page_table.h"
16#include "core/hle/kernel/k_synchronization_object.h" 17#include "core/hle/kernel/k_synchronization_object.h"
17#include "core/hle/kernel/k_thread_local_page.h" 18#include "core/hle/kernel/k_thread_local_page.h"
18#include "core/hle/kernel/k_worker_task.h" 19#include "core/hle/kernel/k_worker_task.h"
@@ -31,7 +32,6 @@ class ProgramMetadata;
31namespace Kernel { 32namespace Kernel {
32 33
33class KernelCore; 34class KernelCore;
34class KPageTable;
35class KResourceLimit; 35class KResourceLimit;
36class KThread; 36class KThread;
37class KSharedMemoryInfo; 37class KSharedMemoryInfo;
@@ -45,24 +45,6 @@ enum class MemoryRegion : u16 {
45 BASE = 3, 45 BASE = 3,
46}; 46};
47 47
48/**
49 * Indicates the status of a Process instance.
50 *
51 * @note These match the values as used by kernel,
52 * so new entries should only be added if RE
53 * shows that a new value has been introduced.
54 */
55enum class ProcessStatus {
56 Created,
57 CreatedWithDebuggerAttached,
58 Running,
59 WaitingForDebuggerToAttach,
60 DebuggerAttached,
61 Exiting,
62 Exited,
63 DebugBreak,
64};
65
66enum class ProcessActivity : u32 { 48enum class ProcessActivity : u32 {
67 Runnable, 49 Runnable,
68 Paused, 50 Paused,
@@ -89,6 +71,17 @@ public:
89 explicit KProcess(KernelCore& kernel_); 71 explicit KProcess(KernelCore& kernel_);
90 ~KProcess() override; 72 ~KProcess() override;
91 73
74 enum class State {
75 Created = static_cast<u32>(Svc::ProcessState::Created),
76 CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached),
77 Running = static_cast<u32>(Svc::ProcessState::Running),
78 Crashed = static_cast<u32>(Svc::ProcessState::Crashed),
79 RunningAttached = static_cast<u32>(Svc::ProcessState::RunningAttached),
80 Terminating = static_cast<u32>(Svc::ProcessState::Terminating),
81 Terminated = static_cast<u32>(Svc::ProcessState::Terminated),
82 DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak),
83 };
84
92 enum : u64 { 85 enum : u64 {
93 /// Lowest allowed process ID for a kernel initial process. 86 /// Lowest allowed process ID for a kernel initial process.
94 InitialKIPIDMin = 1, 87 InitialKIPIDMin = 1,
@@ -114,12 +107,12 @@ public:
114 107
115 /// Gets a reference to the process' page table. 108 /// Gets a reference to the process' page table.
116 KPageTable& PageTable() { 109 KPageTable& PageTable() {
117 return *page_table; 110 return page_table;
118 } 111 }
119 112
120 /// Gets const a reference to the process' page table. 113 /// Gets const a reference to the process' page table.
121 const KPageTable& PageTable() const { 114 const KPageTable& PageTable() const {
122 return *page_table; 115 return page_table;
123 } 116 }
124 117
125 /// Gets a reference to the process' handle table. 118 /// Gets a reference to the process' handle table.
@@ -145,26 +138,25 @@ public:
145 } 138 }
146 139
147 Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { 140 Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
148 return condition_var.Wait(address, cv_key, tag, ns); 141 R_RETURN(condition_var.Wait(address, cv_key, tag, ns));
149 } 142 }
150 143
151 Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) { 144 Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
152 return address_arbiter.SignalToAddress(address, signal_type, value, count); 145 R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count));
153 } 146 }
154 147
155 Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, 148 Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
156 s64 timeout) { 149 s64 timeout) {
157 return address_arbiter.WaitForAddress(address, arb_type, value, timeout); 150 R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout));
158 } 151 }
159 152
160 /// Gets the address to the process' dedicated TLS region. 153 VAddr GetProcessLocalRegionAddress() const {
161 VAddr GetTLSRegionAddress() const { 154 return plr_address;
162 return tls_region_address;
163 } 155 }
164 156
165 /// Gets the current status of the process 157 /// Gets the current status of the process
166 ProcessStatus GetStatus() const { 158 State GetState() const {
167 return status; 159 return state;
168 } 160 }
169 161
170 /// Gets the unique ID that identifies this particular process. 162 /// Gets the unique ID that identifies this particular process.
@@ -286,18 +278,18 @@ public:
286 } 278 }
287 279
288 /// Retrieves the total physical memory available to this process in bytes. 280 /// Retrieves the total physical memory available to this process in bytes.
289 u64 GetTotalPhysicalMemoryAvailable() const; 281 u64 GetTotalPhysicalMemoryAvailable();
290 282
291 /// Retrieves the total physical memory available to this process in bytes, 283 /// Retrieves the total physical memory available to this process in bytes,
292 /// without the size of the personal system resource heap added to it. 284 /// without the size of the personal system resource heap added to it.
293 u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const; 285 u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
294 286
295 /// Retrieves the total physical memory used by this process in bytes. 287 /// Retrieves the total physical memory used by this process in bytes.
296 u64 GetTotalPhysicalMemoryUsed() const; 288 u64 GetTotalPhysicalMemoryUsed();
297 289
298 /// Retrieves the total physical memory used by this process in bytes, 290 /// Retrieves the total physical memory used by this process in bytes,
299 /// without the size of the personal system resource heap added to it. 291 /// without the size of the personal system resource heap added to it.
300 u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const; 292 u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
301 293
302 /// Gets the list of all threads created with this process as their owner. 294 /// Gets the list of all threads created with this process as their owner.
303 std::list<KThread*>& GetThreadList() { 295 std::list<KThread*>& GetThreadList() {
@@ -415,19 +407,24 @@ private:
415 pinned_threads[core_id] = nullptr; 407 pinned_threads[core_id] = nullptr;
416 } 408 }
417 409
418 /// Changes the process status. If the status is different 410 void FinalizeHandleTable() {
419 /// from the current process status, then this will trigger 411 // Finalize the table.
420 /// a process signal. 412 handle_table.Finalize();
421 void ChangeStatus(ProcessStatus new_status); 413
414 // Note that the table is finalized.
415 is_handle_table_initialized = false;
416 }
417
418 void ChangeState(State new_state);
422 419
423 /// Allocates the main thread stack for the process, given the stack size in bytes. 420 /// Allocates the main thread stack for the process, given the stack size in bytes.
424 Result AllocateMainThreadStack(std::size_t stack_size); 421 Result AllocateMainThreadStack(std::size_t stack_size);
425 422
426 /// Memory manager for this process 423 /// Memory manager for this process
427 std::unique_ptr<KPageTable> page_table; 424 KPageTable page_table;
428 425
429 /// Current status of the process 426 /// Current status of the process
430 ProcessStatus status{}; 427 State state{};
431 428
432 /// The ID of this process 429 /// The ID of this process
433 u64 process_id = 0; 430 u64 process_id = 0;
@@ -443,6 +440,8 @@ private:
443 /// Resource limit descriptor for this process 440 /// Resource limit descriptor for this process
444 KResourceLimit* resource_limit{}; 441 KResourceLimit* resource_limit{};
445 442
443 VAddr system_resource_address{};
444
446 /// The ideal CPU core for this process, threads are scheduled on this core by default. 445 /// The ideal CPU core for this process, threads are scheduled on this core by default.
447 u8 ideal_core = 0; 446 u8 ideal_core = 0;
448 447
@@ -469,7 +468,7 @@ private:
469 KConditionVariable condition_var; 468 KConditionVariable condition_var;
470 469
471 /// Address indicating the location of the process' dedicated TLS region. 470 /// Address indicating the location of the process' dedicated TLS region.
472 VAddr tls_region_address = 0; 471 VAddr plr_address = 0;
473 472
474 /// Random values for svcGetInfo RandomEntropy 473 /// Random values for svcGetInfo RandomEntropy
475 std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{}; 474 std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
@@ -495,8 +494,12 @@ private:
495 /// Schedule count of this process 494 /// Schedule count of this process
496 s64 schedule_count{}; 495 s64 schedule_count{};
497 496
497 size_t memory_release_hint{};
498
498 bool is_signaled{}; 499 bool is_signaled{};
499 bool is_suspended{}; 500 bool is_suspended{};
501 bool is_immortal{};
502 bool is_handle_table_initialized{};
500 bool is_initialized{}; 503 bool is_initialized{};
501 504
502 std::atomic<u16> num_running_threads{}; 505 std::atomic<u16> num_running_threads{};
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index c34ce7a17..b1cabbca0 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -81,8 +81,8 @@ void KScheduler::RescheduleCurrentHLEThread(KernelCore& kernel) {
81 // HACK: we cannot schedule from this thread, it is not a core thread 81 // HACK: we cannot schedule from this thread, it is not a core thread
82 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); 82 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
83 83
84 // Special case to ensure dummy threads that are waiting block 84 // Ensure dummy threads that are waiting block.
85 GetCurrentThread(kernel).IfDummyThreadTryWait(); 85 GetCurrentThread(kernel).DummyThreadBeginWait();
86 86
87 ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting); 87 ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting);
88 GetCurrentThread(kernel).EnableDispatch(); 88 GetCurrentThread(kernel).EnableDispatch();
@@ -314,6 +314,16 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
314 idle_cores &= ~(1ULL << core_id); 314 idle_cores &= ~(1ULL << core_id);
315 } 315 }
316 316
317 // HACK: any waiting dummy threads can wake up now.
318 kernel.GlobalSchedulerContext().WakeupWaitingDummyThreads();
319
320 // HACK: if we are a dummy thread, and we need to go sleep, indicate
321 // that for when the lock is released.
322 KThread* const cur_thread = GetCurrentThreadPointer(kernel);
323 if (cur_thread->IsDummyThread() && cur_thread->GetState() != ThreadState::Runnable) {
324 cur_thread->RequestDummyThreadWait();
325 }
326
317 return cores_needing_scheduling; 327 return cores_needing_scheduling;
318} 328}
319 329
@@ -531,11 +541,23 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, Threa
531 GetPriorityQueue(kernel).Remove(thread); 541 GetPriorityQueue(kernel).Remove(thread);
532 IncrementScheduledCount(thread); 542 IncrementScheduledCount(thread);
533 SetSchedulerUpdateNeeded(kernel); 543 SetSchedulerUpdateNeeded(kernel);
544
545 if (thread->IsDummyThread()) {
546 // HACK: if this is a dummy thread, it should no longer wake up when the
547 // scheduler lock is released.
548 kernel.GlobalSchedulerContext().UnregisterDummyThreadForWakeup(thread);
549 }
534 } else if (cur_state == ThreadState::Runnable) { 550 } else if (cur_state == ThreadState::Runnable) {
535 // If we're now runnable, then we weren't previously, and we should add. 551 // If we're now runnable, then we weren't previously, and we should add.
536 GetPriorityQueue(kernel).PushBack(thread); 552 GetPriorityQueue(kernel).PushBack(thread);
537 IncrementScheduledCount(thread); 553 IncrementScheduledCount(thread);
538 SetSchedulerUpdateNeeded(kernel); 554 SetSchedulerUpdateNeeded(kernel);
555
556 if (thread->IsDummyThread()) {
557 // HACK: if this is a dummy thread, it should wake up when the scheduler
558 // lock is released.
559 kernel.GlobalSchedulerContext().RegisterDummyThreadForWakeup(thread);
560 }
539 } 561 }
540} 562}
541 563
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index 4252c9adb..faf03fcc8 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -22,15 +22,12 @@
22#include "core/hle/kernel/k_thread.h" 22#include "core/hle/kernel/k_thread.h"
23#include "core/hle/kernel/k_thread_queue.h" 23#include "core/hle/kernel/k_thread_queue.h"
24#include "core/hle/kernel/kernel.h" 24#include "core/hle/kernel/kernel.h"
25#include "core/hle/kernel/service_thread.h"
26#include "core/memory.h" 25#include "core/memory.h"
27 26
28namespace Kernel { 27namespace Kernel {
29 28
30using ThreadQueueImplForKServerSessionRequest = KThreadQueue; 29using ThreadQueueImplForKServerSessionRequest = KThreadQueue;
31 30
32static constexpr u32 MessageBufferSize = 0x100;
33
34KServerSession::KServerSession(KernelCore& kernel_) 31KServerSession::KServerSession(KernelCore& kernel_)
35 : KSynchronizationObject{kernel_}, m_lock{kernel_} {} 32 : KSynchronizationObject{kernel_}, m_lock{kernel_} {}
36 33
@@ -73,59 +70,7 @@ bool KServerSession::IsSignaled() const {
73 } 70 }
74 71
75 // Otherwise, we're signaled if we have a request and aren't handling one. 72 // Otherwise, we're signaled if we have a request and aren't handling one.
76 return !m_thread_request_list.empty() && m_current_thread_request == nullptr; 73 return !m_request_list.empty() && m_current_request == nullptr;
77}
78
79void KServerSession::AppendDomainHandler(SessionRequestHandlerPtr handler) {
80 manager->AppendDomainHandler(std::move(handler));
81}
82
83std::size_t KServerSession::NumDomainRequestHandlers() const {
84 return manager->DomainHandlerCount();
85}
86
87Result KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
88 if (!context.HasDomainMessageHeader()) {
89 return ResultSuccess;
90 }
91
92 // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
93 context.SetSessionRequestManager(manager);
94
95 // If there is a DomainMessageHeader, then this is CommandType "Request"
96 const auto& domain_message_header = context.GetDomainMessageHeader();
97 const u32 object_id{domain_message_header.object_id};
98 switch (domain_message_header.command) {
99 case IPC::DomainMessageHeader::CommandType::SendMessage:
100 if (object_id > manager->DomainHandlerCount()) {
101 LOG_CRITICAL(IPC,
102 "object_id {} is too big! This probably means a recent service call "
103 "to {} needed to return a new interface!",
104 object_id, name);
105 ASSERT(false);
106 return ResultSuccess; // Ignore error if asserts are off
107 }
108 if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) {
109 return strong_ptr->HandleSyncRequest(*this, context);
110 } else {
111 ASSERT(false);
112 return ResultSuccess;
113 }
114
115 case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
116 LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
117
118 manager->CloseDomainHandler(object_id - 1);
119
120 IPC::ResponseBuilder rb{context, 2};
121 rb.Push(ResultSuccess);
122 return ResultSuccess;
123 }
124 }
125
126 LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
127 ASSERT(false);
128 return ResultSuccess;
129} 74}
130 75
131Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) { 76Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
@@ -134,43 +79,11 @@ Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& m
134 79
135 context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); 80 context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
136 81
137 // Ensure we have a session request handler 82 return manager->QueueSyncRequest(parent, std::move(context));
138 if (manager->HasSessionRequestHandler(*context)) {
139 if (auto strong_ptr = manager->GetServiceThread().lock()) {
140 strong_ptr->QueueSyncRequest(*parent, std::move(context));
141 } else {
142 ASSERT_MSG(false, "strong_ptr is nullptr!");
143 }
144 } else {
145 ASSERT_MSG(false, "handler is invalid!");
146 }
147
148 return ResultSuccess;
149} 83}
150 84
151Result KServerSession::CompleteSyncRequest(HLERequestContext& context) { 85Result KServerSession::CompleteSyncRequest(HLERequestContext& context) {
152 Result result = ResultSuccess; 86 Result result = manager->CompleteSyncRequest(this, context);
153
154 // If the session has been converted to a domain, handle the domain request
155 if (manager->HasSessionRequestHandler(context)) {
156 if (IsDomain() && context.HasDomainMessageHeader()) {
157 result = HandleDomainSyncRequest(context);
158 // If there is no domain header, the regular session handler is used
159 } else if (manager->HasSessionHandler()) {
160 // If this ServerSession has an associated HLE handler, forward the request to it.
161 result = manager->SessionHandler().HandleSyncRequest(*this, context);
162 }
163 } else {
164 ASSERT_MSG(false, "Session handler is invalid, stubbing response!");
165 IPC::ResponseBuilder rb(context, 2);
166 rb.Push(ResultSuccess);
167 }
168
169 if (convert_to_domain) {
170 ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance.");
171 manager->ConvertToDomain();
172 convert_to_domain = false;
173 }
174 87
175 // The calling thread is waiting for this request to complete, so wake it up. 88 // The calling thread is waiting for this request to complete, so wake it up.
176 context.GetThread().EndWait(result); 89 context.GetThread().EndWait(result);
@@ -178,7 +91,7 @@ Result KServerSession::CompleteSyncRequest(HLERequestContext& context) {
178 return result; 91 return result;
179} 92}
180 93
181Result KServerSession::OnRequest() { 94Result KServerSession::OnRequest(KSessionRequest* request) {
182 // Create the wait queue. 95 // Create the wait queue.
183 ThreadQueueImplForKServerSessionRequest wait_queue{kernel}; 96 ThreadQueueImplForKServerSessionRequest wait_queue{kernel};
184 97
@@ -198,14 +111,13 @@ Result KServerSession::OnRequest() {
198 this->QueueSyncRequest(GetCurrentThreadPointer(kernel), memory); 111 this->QueueSyncRequest(GetCurrentThreadPointer(kernel), memory);
199 } else { 112 } else {
200 // Non-HLE request. 113 // Non-HLE request.
201 auto* thread{GetCurrentThreadPointer(kernel)};
202 114
203 // Get whether we're empty. 115 // Get whether we're empty.
204 const bool was_empty = m_thread_request_list.empty(); 116 const bool was_empty = m_request_list.empty();
205 117
206 // Add the thread to the list. 118 // Add the request to the list.
207 thread->Open(); 119 request->Open();
208 m_thread_request_list.push_back(thread); 120 m_request_list.push_back(*request);
209 121
210 // If we were empty, signal. 122 // If we were empty, signal.
211 if (was_empty) { 123 if (was_empty) {
@@ -213,6 +125,9 @@ Result KServerSession::OnRequest() {
213 } 125 }
214 } 126 }
215 127
128 // If we have a request event, this is asynchronous, and we don't need to wait.
129 R_SUCCEED_IF(request->GetEvent() != nullptr);
130
216 // This is a synchronous request, so we should wait for our request to complete. 131 // This is a synchronous request, so we should wait for our request to complete.
217 GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); 132 GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
218 GetCurrentThread(kernel).BeginWait(&wait_queue); 133 GetCurrentThread(kernel).BeginWait(&wait_queue);
@@ -223,32 +138,32 @@ Result KServerSession::OnRequest() {
223 138
224Result KServerSession::SendReply() { 139Result KServerSession::SendReply() {
225 // Lock the session. 140 // Lock the session.
226 KScopedLightLock lk(m_lock); 141 KScopedLightLock lk{m_lock};
227 142
228 // Get the request. 143 // Get the request.
229 KThread* client_thread; 144 KSessionRequest* request;
230 { 145 {
231 KScopedSchedulerLock sl{kernel}; 146 KScopedSchedulerLock sl{kernel};
232 147
233 // Get the current request. 148 // Get the current request.
234 client_thread = m_current_thread_request; 149 request = m_current_request;
235 R_UNLESS(client_thread != nullptr, ResultInvalidState); 150 R_UNLESS(request != nullptr, ResultInvalidState);
236 151
237 // Clear the current request, since we're processing it. 152 // Clear the current request, since we're processing it.
238 m_current_thread_request = nullptr; 153 m_current_request = nullptr;
239 if (!m_thread_request_list.empty()) { 154 if (!m_request_list.empty()) {
240 this->NotifyAvailable(); 155 this->NotifyAvailable();
241 } 156 }
242 } 157 }
243 158
244 // Close reference to the request once we're done processing it. 159 // Close reference to the request once we're done processing it.
245 SCOPE_EXIT({ client_thread->Close(); }); 160 SCOPE_EXIT({ request->Close(); });
246 161
247 // Extract relevant information from the request. 162 // Extract relevant information from the request.
248 // const uintptr_t client_message = request->GetAddress(); 163 const uintptr_t client_message = request->GetAddress();
249 // const size_t client_buffer_size = request->GetSize(); 164 const size_t client_buffer_size = request->GetSize();
250 // KThread *client_thread = request->GetThread(); 165 KThread* client_thread = request->GetThread();
251 // KEvent *event = request->GetEvent(); 166 KEvent* event = request->GetEvent();
252 167
253 // Check whether we're closed. 168 // Check whether we're closed.
254 const bool closed = (client_thread == nullptr || parent->IsClientClosed()); 169 const bool closed = (client_thread == nullptr || parent->IsClientClosed());
@@ -261,8 +176,8 @@ Result KServerSession::SendReply() {
261 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); 176 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
262 177
263 auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); 178 auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
264 auto* dst_msg_buffer = memory.GetPointer(client_thread->GetTLSAddress()); 179 auto* dst_msg_buffer = memory.GetPointer(client_message);
265 std::memcpy(dst_msg_buffer, src_msg_buffer, MessageBufferSize); 180 std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
266 } else { 181 } else {
267 result = ResultSessionClosed; 182 result = ResultSessionClosed;
268 } 183 }
@@ -278,11 +193,30 @@ Result KServerSession::SendReply() {
278 193
279 // If there's a client thread, update it. 194 // If there's a client thread, update it.
280 if (client_thread != nullptr) { 195 if (client_thread != nullptr) {
281 // End the client thread's wait. 196 if (event != nullptr) {
282 KScopedSchedulerLock sl{kernel}; 197 // // Get the client process/page table.
198 // KProcess *client_process = client_thread->GetOwnerProcess();
199 // KPageTable *client_page_table = &client_process->PageTable();
200
201 // // If we need to, reply with an async error.
202 // if (R_FAILED(client_result)) {
203 // ReplyAsyncError(client_process, client_message, client_buffer_size,
204 // client_result);
205 // }
206
207 // // Unlock the client buffer.
208 // // NOTE: Nintendo does not check the result of this.
209 // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size);
210
211 // Signal the event.
212 event->Signal();
213 } else {
214 // End the client thread's wait.
215 KScopedSchedulerLock sl{kernel};
283 216
284 if (!client_thread->IsTerminationRequested()) { 217 if (!client_thread->IsTerminationRequested()) {
285 client_thread->EndWait(client_result); 218 client_thread->EndWait(client_result);
219 }
286 } 220 }
287 } 221 }
288 222
@@ -291,10 +225,10 @@ Result KServerSession::SendReply() {
291 225
292Result KServerSession::ReceiveRequest() { 226Result KServerSession::ReceiveRequest() {
293 // Lock the session. 227 // Lock the session.
294 KScopedLightLock lk(m_lock); 228 KScopedLightLock lk{m_lock};
295 229
296 // Get the request and client thread. 230 // Get the request and client thread.
297 // KSessionRequest *request; 231 KSessionRequest* request;
298 KThread* client_thread; 232 KThread* client_thread;
299 233
300 { 234 {
@@ -304,35 +238,41 @@ Result KServerSession::ReceiveRequest() {
304 R_UNLESS(!parent->IsClientClosed(), ResultSessionClosed); 238 R_UNLESS(!parent->IsClientClosed(), ResultSessionClosed);
305 239
306 // Ensure we aren't already servicing a request. 240 // Ensure we aren't already servicing a request.
307 R_UNLESS(m_current_thread_request == nullptr, ResultNotFound); 241 R_UNLESS(m_current_request == nullptr, ResultNotFound);
308 242
309 // Ensure we have a request to service. 243 // Ensure we have a request to service.
310 R_UNLESS(!m_thread_request_list.empty(), ResultNotFound); 244 R_UNLESS(!m_request_list.empty(), ResultNotFound);
311 245
312 // Pop the first request from the list. 246 // Pop the first request from the list.
313 client_thread = m_thread_request_list.front(); 247 request = &m_request_list.front();
314 m_thread_request_list.pop_front(); 248 m_request_list.pop_front();
315 249
316 // Get the thread for the request. 250 // Get the thread for the request.
251 client_thread = request->GetThread();
317 R_UNLESS(client_thread != nullptr, ResultSessionClosed); 252 R_UNLESS(client_thread != nullptr, ResultSessionClosed);
318 253
319 // Open the client thread. 254 // Open the client thread.
320 client_thread->Open(); 255 client_thread->Open();
321 } 256 }
322 257
323 // SCOPE_EXIT({ client_thread->Close(); }); 258 SCOPE_EXIT({ client_thread->Close(); });
324 259
325 // Set the request as our current. 260 // Set the request as our current.
326 m_current_thread_request = client_thread; 261 m_current_request = request;
262
263 // Get the client address.
264 uintptr_t client_message = request->GetAddress();
265 size_t client_buffer_size = request->GetSize();
266 // bool recv_list_broken = false;
327 267
328 // Receive the message. 268 // Receive the message.
329 Core::Memory::Memory& memory{kernel.System().Memory()}; 269 Core::Memory::Memory& memory{kernel.System().Memory()};
330 KThread* server_thread{GetCurrentThreadPointer(kernel)}; 270 KThread* server_thread{GetCurrentThreadPointer(kernel)};
331 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); 271 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
332 272
333 auto* src_msg_buffer = memory.GetPointer(client_thread->GetTLSAddress()); 273 auto* src_msg_buffer = memory.GetPointer(client_message);
334 auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); 274 auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
335 std::memcpy(dst_msg_buffer, src_msg_buffer, MessageBufferSize); 275 std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
336 276
337 // We succeeded. 277 // We succeeded.
338 return ResultSuccess; 278 return ResultSuccess;
@@ -344,35 +284,34 @@ void KServerSession::CleanupRequests() {
344 // Clean up any pending requests. 284 // Clean up any pending requests.
345 while (true) { 285 while (true) {
346 // Get the next request. 286 // Get the next request.
347 // KSessionRequest *request = nullptr; 287 KSessionRequest* request = nullptr;
348 KThread* client_thread = nullptr;
349 { 288 {
350 KScopedSchedulerLock sl{kernel}; 289 KScopedSchedulerLock sl{kernel};
351 290
352 if (m_current_thread_request) { 291 if (m_current_request) {
353 // Choose the current request if we have one. 292 // Choose the current request if we have one.
354 client_thread = m_current_thread_request; 293 request = m_current_request;
355 m_current_thread_request = nullptr; 294 m_current_request = nullptr;
356 } else if (!m_thread_request_list.empty()) { 295 } else if (!m_request_list.empty()) {
357 // Pop the request from the front of the list. 296 // Pop the request from the front of the list.
358 client_thread = m_thread_request_list.front(); 297 request = &m_request_list.front();
359 m_thread_request_list.pop_front(); 298 m_request_list.pop_front();
360 } 299 }
361 } 300 }
362 301
363 // If there's no request, we're done. 302 // If there's no request, we're done.
364 if (client_thread == nullptr) { 303 if (request == nullptr) {
365 break; 304 break;
366 } 305 }
367 306
368 // Close a reference to the request once it's cleaned up. 307 // Close a reference to the request once it's cleaned up.
369 SCOPE_EXIT({ client_thread->Close(); }); 308 SCOPE_EXIT({ request->Close(); });
370 309
371 // Extract relevant information from the request. 310 // Extract relevant information from the request.
372 // const uintptr_t client_message = request->GetAddress(); 311 // const uintptr_t client_message = request->GetAddress();
373 // const size_t client_buffer_size = request->GetSize(); 312 // const size_t client_buffer_size = request->GetSize();
374 // KThread *client_thread = request->GetThread(); 313 KThread* client_thread = request->GetThread();
375 // KEvent *event = request->GetEvent(); 314 KEvent* event = request->GetEvent();
376 315
377 // KProcess *server_process = request->GetServerProcess(); 316 // KProcess *server_process = request->GetServerProcess();
378 // KProcess *client_process = (client_thread != nullptr) ? 317 // KProcess *client_process = (client_thread != nullptr) ?
@@ -385,11 +324,24 @@ void KServerSession::CleanupRequests() {
385 324
386 // If there's a client thread, update it. 325 // If there's a client thread, update it.
387 if (client_thread != nullptr) { 326 if (client_thread != nullptr) {
388 // End the client thread's wait. 327 if (event != nullptr) {
389 KScopedSchedulerLock sl{kernel}; 328 // // We need to reply async.
390 329 // ReplyAsyncError(client_process, client_message, client_buffer_size,
391 if (!client_thread->IsTerminationRequested()) { 330 // (R_SUCCEEDED(result) ? ResultSessionClosed : result));
392 client_thread->EndWait(ResultSessionClosed); 331
332 // // Unlock the client buffer.
333 // NOTE: Nintendo does not check the result of this.
334 // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size);
335
336 // Signal the event.
337 event->Signal();
338 } else {
339 // End the client thread's wait.
340 KScopedSchedulerLock sl{kernel};
341
342 if (!client_thread->IsTerminationRequested()) {
343 client_thread->EndWait(ResultSessionClosed);
344 }
393 } 345 }
394 } 346 }
395 } 347 }
diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h
index 748d52826..188aef4af 100644
--- a/src/core/hle/kernel/k_server_session.h
+++ b/src/core/hle/kernel/k_server_session.h
@@ -12,6 +12,7 @@
12 12
13#include "core/hle/kernel/hle_ipc.h" 13#include "core/hle/kernel/hle_ipc.h"
14#include "core/hle/kernel/k_light_lock.h" 14#include "core/hle/kernel/k_light_lock.h"
15#include "core/hle/kernel/k_session_request.h"
15#include "core/hle/kernel/k_synchronization_object.h" 16#include "core/hle/kernel/k_synchronization_object.h"
16#include "core/hle/result.h" 17#include "core/hle/result.h"
17 18
@@ -57,44 +58,15 @@ public:
57 } 58 }
58 59
59 bool IsSignaled() const override; 60 bool IsSignaled() const override;
60
61 void OnClientClosed(); 61 void OnClientClosed();
62 62
63 void ClientConnected(SessionRequestHandlerPtr handler) {
64 if (manager) {
65 manager->SetSessionHandler(std::move(handler));
66 }
67 }
68
69 void ClientDisconnected() {
70 manager = nullptr;
71 }
72
73 /// Adds a new domain request handler to the collection of request handlers within
74 /// this ServerSession instance.
75 void AppendDomainHandler(SessionRequestHandlerPtr handler);
76
77 /// Retrieves the total number of domain request handlers that have been
78 /// appended to this ServerSession instance.
79 std::size_t NumDomainRequestHandlers() const;
80
81 /// Returns true if the session has been converted to a domain, otherwise False
82 bool IsDomain() const {
83 return manager && manager->IsDomain();
84 }
85
86 /// Converts the session to a domain at the end of the current command
87 void ConvertToDomain() {
88 convert_to_domain = true;
89 }
90
91 /// Gets the session request manager, which forwards requests to the underlying service 63 /// Gets the session request manager, which forwards requests to the underlying service
92 std::shared_ptr<SessionRequestManager>& GetSessionRequestManager() { 64 std::shared_ptr<SessionRequestManager>& GetSessionRequestManager() {
93 return manager; 65 return manager;
94 } 66 }
95 67
96 /// TODO: flesh these out to match the real kernel 68 /// TODO: flesh these out to match the real kernel
97 Result OnRequest(); 69 Result OnRequest(KSessionRequest* request);
98 Result SendReply(); 70 Result SendReply();
99 Result ReceiveRequest(); 71 Result ReceiveRequest();
100 72
@@ -108,10 +80,6 @@ private:
108 /// Completes a sync request from the emulated application. 80 /// Completes a sync request from the emulated application.
109 Result CompleteSyncRequest(HLERequestContext& context); 81 Result CompleteSyncRequest(HLERequestContext& context);
110 82
111 /// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
112 /// object handle.
113 Result HandleDomainSyncRequest(Kernel::HLERequestContext& context);
114
115 /// This session's HLE request handlers; if nullptr, this is not an HLE server 83 /// This session's HLE request handlers; if nullptr, this is not an HLE server
116 std::shared_ptr<SessionRequestManager> manager; 84 std::shared_ptr<SessionRequestManager> manager;
117 85
@@ -122,9 +90,8 @@ private:
122 KSession* parent{}; 90 KSession* parent{};
123 91
124 /// List of threads which are pending a reply. 92 /// List of threads which are pending a reply.
125 /// FIXME: KSessionRequest 93 boost::intrusive::list<KSessionRequest> m_request_list;
126 std::list<KThread*> m_thread_request_list; 94 KSessionRequest* m_current_request{};
127 KThread* m_current_thread_request{};
128 95
129 KLightLock m_lock; 96 KLightLock m_lock;
130}; 97};
diff --git a/src/core/hle/kernel/k_session_request.cpp b/src/core/hle/kernel/k_session_request.cpp
new file mode 100644
index 000000000..520da6aa7
--- /dev/null
+++ b/src/core/hle/kernel/k_session_request.cpp
@@ -0,0 +1,61 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/hle/kernel/k_page_buffer.h"
5#include "core/hle/kernel/k_session_request.h"
6
7namespace Kernel {
8
9Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, size_t size,
10 KMemoryState state, size_t index) {
11 // At most 15 buffers of each type (4-bit descriptor counts).
12 ASSERT(index < ((1ul << 4) - 1) * 3);
13
14 // Get the mapping.
15 Mapping* mapping;
16 if (index < NumStaticMappings) {
17 mapping = &m_static_mappings[index];
18 } else {
19 // Allocate a page for the extra mappings.
20 if (m_mappings == nullptr) {
21 KPageBuffer* page_buffer = KPageBuffer::Allocate(kernel);
22 R_UNLESS(page_buffer != nullptr, ResultOutOfMemory);
23
24 m_mappings = reinterpret_cast<Mapping*>(page_buffer);
25 }
26
27 mapping = &m_mappings[index - NumStaticMappings];
28 }
29
30 // Set the mapping.
31 mapping->Set(client, server, size, state);
32
33 return ResultSuccess;
34}
35
36Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size,
37 KMemoryState state) {
38 ASSERT(m_num_recv == 0);
39 ASSERT(m_num_exch == 0);
40 return this->PushMap(client, server, size, state, m_num_send++);
41}
42
43Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size,
44 KMemoryState state) {
45 ASSERT(m_num_exch == 0);
46 return this->PushMap(client, server, size, state, m_num_send + m_num_recv++);
47}
48
49Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size,
50 KMemoryState state) {
51 return this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++);
52}
53
54void KSessionRequest::SessionMappings::Finalize() {
55 if (m_mappings) {
56 KPageBuffer::Free(kernel, reinterpret_cast<KPageBuffer*>(m_mappings));
57 m_mappings = nullptr;
58 }
59}
60
61} // namespace Kernel
diff --git a/src/core/hle/kernel/k_session_request.h b/src/core/hle/kernel/k_session_request.h
new file mode 100644
index 000000000..e5558bc2c
--- /dev/null
+++ b/src/core/hle/kernel/k_session_request.h
@@ -0,0 +1,306 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <array>
7
8#include "core/hle/kernel/k_auto_object.h"
9#include "core/hle/kernel/k_event.h"
10#include "core/hle/kernel/k_memory_block.h"
11#include "core/hle/kernel/k_process.h"
12#include "core/hle/kernel/k_thread.h"
13#include "core/hle/kernel/slab_helpers.h"
14
15namespace Kernel {
16
17class KSessionRequest final : public KSlabAllocated<KSessionRequest>,
18 public KAutoObject,
19 public boost::intrusive::list_base_hook<> {
20 KERNEL_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject);
21
22public:
23 class SessionMappings {
24 private:
25 static constexpr size_t NumStaticMappings = 8;
26
27 class Mapping {
28 public:
29 constexpr void Set(VAddr c, VAddr s, size_t sz, KMemoryState st) {
30 m_client_address = c;
31 m_server_address = s;
32 m_size = sz;
33 m_state = st;
34 }
35
36 constexpr VAddr GetClientAddress() const {
37 return m_client_address;
38 }
39 constexpr VAddr GetServerAddress() const {
40 return m_server_address;
41 }
42 constexpr size_t GetSize() const {
43 return m_size;
44 }
45 constexpr KMemoryState GetMemoryState() const {
46 return m_state;
47 }
48
49 private:
50 VAddr m_client_address;
51 VAddr m_server_address;
52 size_t m_size;
53 KMemoryState m_state;
54 };
55
56 public:
57 explicit SessionMappings(KernelCore& kernel_) : kernel(kernel_) {}
58
59 void Initialize() {}
60 void Finalize();
61
62 size_t GetSendCount() const {
63 return m_num_send;
64 }
65 size_t GetReceiveCount() const {
66 return m_num_recv;
67 }
68 size_t GetExchangeCount() const {
69 return m_num_exch;
70 }
71
72 Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state);
73 Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state);
74 Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state);
75
76 VAddr GetSendClientAddress(size_t i) const {
77 return GetSendMapping(i).GetClientAddress();
78 }
79 VAddr GetSendServerAddress(size_t i) const {
80 return GetSendMapping(i).GetServerAddress();
81 }
82 size_t GetSendSize(size_t i) const {
83 return GetSendMapping(i).GetSize();
84 }
85 KMemoryState GetSendMemoryState(size_t i) const {
86 return GetSendMapping(i).GetMemoryState();
87 }
88
89 VAddr GetReceiveClientAddress(size_t i) const {
90 return GetReceiveMapping(i).GetClientAddress();
91 }
92 VAddr GetReceiveServerAddress(size_t i) const {
93 return GetReceiveMapping(i).GetServerAddress();
94 }
95 size_t GetReceiveSize(size_t i) const {
96 return GetReceiveMapping(i).GetSize();
97 }
98 KMemoryState GetReceiveMemoryState(size_t i) const {
99 return GetReceiveMapping(i).GetMemoryState();
100 }
101
102 VAddr GetExchangeClientAddress(size_t i) const {
103 return GetExchangeMapping(i).GetClientAddress();
104 }
105 VAddr GetExchangeServerAddress(size_t i) const {
106 return GetExchangeMapping(i).GetServerAddress();
107 }
108 size_t GetExchangeSize(size_t i) const {
109 return GetExchangeMapping(i).GetSize();
110 }
111 KMemoryState GetExchangeMemoryState(size_t i) const {
112 return GetExchangeMapping(i).GetMemoryState();
113 }
114
115 private:
116 Result PushMap(VAddr client, VAddr server, size_t size, KMemoryState state, size_t index);
117
118 const Mapping& GetSendMapping(size_t i) const {
119 ASSERT(i < m_num_send);
120
121 const size_t index = i;
122 if (index < NumStaticMappings) {
123 return m_static_mappings[index];
124 } else {
125 return m_mappings[index - NumStaticMappings];
126 }
127 }
128
129 const Mapping& GetReceiveMapping(size_t i) const {
130 ASSERT(i < m_num_recv);
131
132 const size_t index = m_num_send + i;
133 if (index < NumStaticMappings) {
134 return m_static_mappings[index];
135 } else {
136 return m_mappings[index - NumStaticMappings];
137 }
138 }
139
140 const Mapping& GetExchangeMapping(size_t i) const {
141 ASSERT(i < m_num_exch);
142
143 const size_t index = m_num_send + m_num_recv + i;
144 if (index < NumStaticMappings) {
145 return m_static_mappings[index];
146 } else {
147 return m_mappings[index - NumStaticMappings];
148 }
149 }
150
151 private:
152 KernelCore& kernel;
153 std::array<Mapping, NumStaticMappings> m_static_mappings;
154 Mapping* m_mappings{};
155 u8 m_num_send{};
156 u8 m_num_recv{};
157 u8 m_num_exch{};
158 };
159
160public:
161 explicit KSessionRequest(KernelCore& kernel_) : KAutoObject(kernel_), m_mappings(kernel_) {}
162
163 static KSessionRequest* Create(KernelCore& kernel) {
164 KSessionRequest* req = KSessionRequest::Allocate(kernel);
165 if (req != nullptr) [[likely]] {
166 KAutoObject::Create(req);
167 }
168 return req;
169 }
170
171 void Destroy() override {
172 this->Finalize();
173 KSessionRequest::Free(kernel, this);
174 }
175
176 void Initialize(KEvent* event, uintptr_t address, size_t size) {
177 m_mappings.Initialize();
178
179 m_thread = GetCurrentThreadPointer(kernel);
180 m_event = event;
181 m_address = address;
182 m_size = size;
183
184 m_thread->Open();
185 if (m_event != nullptr) {
186 m_event->Open();
187 }
188 }
189
190 static void PostDestroy(uintptr_t arg) {}
191
192 KThread* GetThread() const {
193 return m_thread;
194 }
195 KEvent* GetEvent() const {
196 return m_event;
197 }
198 uintptr_t GetAddress() const {
199 return m_address;
200 }
201 size_t GetSize() const {
202 return m_size;
203 }
204 KProcess* GetServerProcess() const {
205 return m_server;
206 }
207
208 void SetServerProcess(KProcess* process) {
209 m_server = process;
210 m_server->Open();
211 }
212
213 void ClearThread() {
214 m_thread = nullptr;
215 }
216 void ClearEvent() {
217 m_event = nullptr;
218 }
219
220 size_t GetSendCount() const {
221 return m_mappings.GetSendCount();
222 }
223 size_t GetReceiveCount() const {
224 return m_mappings.GetReceiveCount();
225 }
226 size_t GetExchangeCount() const {
227 return m_mappings.GetExchangeCount();
228 }
229
230 Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state) {
231 return m_mappings.PushSend(client, server, size, state);
232 }
233
234 Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state) {
235 return m_mappings.PushReceive(client, server, size, state);
236 }
237
238 Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state) {
239 return m_mappings.PushExchange(client, server, size, state);
240 }
241
242 VAddr GetSendClientAddress(size_t i) const {
243 return m_mappings.GetSendClientAddress(i);
244 }
245 VAddr GetSendServerAddress(size_t i) const {
246 return m_mappings.GetSendServerAddress(i);
247 }
248 size_t GetSendSize(size_t i) const {
249 return m_mappings.GetSendSize(i);
250 }
251 KMemoryState GetSendMemoryState(size_t i) const {
252 return m_mappings.GetSendMemoryState(i);
253 }
254
255 VAddr GetReceiveClientAddress(size_t i) const {
256 return m_mappings.GetReceiveClientAddress(i);
257 }
258 VAddr GetReceiveServerAddress(size_t i) const {
259 return m_mappings.GetReceiveServerAddress(i);
260 }
261 size_t GetReceiveSize(size_t i) const {
262 return m_mappings.GetReceiveSize(i);
263 }
264 KMemoryState GetReceiveMemoryState(size_t i) const {
265 return m_mappings.GetReceiveMemoryState(i);
266 }
267
268 VAddr GetExchangeClientAddress(size_t i) const {
269 return m_mappings.GetExchangeClientAddress(i);
270 }
271 VAddr GetExchangeServerAddress(size_t i) const {
272 return m_mappings.GetExchangeServerAddress(i);
273 }
274 size_t GetExchangeSize(size_t i) const {
275 return m_mappings.GetExchangeSize(i);
276 }
277 KMemoryState GetExchangeMemoryState(size_t i) const {
278 return m_mappings.GetExchangeMemoryState(i);
279 }
280
281private:
282 // NOTE: This is public and virtual in Nintendo's kernel.
283 void Finalize() override {
284 m_mappings.Finalize();
285
286 if (m_thread) {
287 m_thread->Close();
288 }
289 if (m_event) {
290 m_event->Close();
291 }
292 if (m_server) {
293 m_server->Close();
294 }
295 }
296
297private:
298 SessionMappings m_mappings;
299 KThread* m_thread{};
300 KProcess* m_server{};
301 KEvent* m_event{};
302 uintptr_t m_address{};
303 size_t m_size{};
304};
305
306} // namespace Kernel
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 8ff1545b6..a039cc591 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -50,7 +50,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
50 is_initialized = true; 50 is_initialized = true;
51 51
52 // Clear all pages in the memory. 52 // Clear all pages in the memory.
53 std::memset(device_memory_.GetPointer(physical_address_), 0, size_); 53 std::memset(device_memory_.GetPointer<void>(physical_address_), 0, size_);
54 54
55 return ResultSuccess; 55 return ResultSuccess;
56} 56}
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h
index 34cb98456..5620c3660 100644
--- a/src/core/hle/kernel/k_shared_memory.h
+++ b/src/core/hle/kernel/k_shared_memory.h
@@ -54,7 +54,7 @@ public:
54 * @return A pointer to the shared memory block from the specified offset 54 * @return A pointer to the shared memory block from the specified offset
55 */ 55 */
56 u8* GetPointer(std::size_t offset = 0) { 56 u8* GetPointer(std::size_t offset = 0) {
57 return device_memory->GetPointer(physical_address + offset); 57 return device_memory->GetPointer<u8>(physical_address + offset);
58 } 58 }
59 59
60 /** 60 /**
@@ -63,7 +63,7 @@ public:
63 * @return A pointer to the shared memory block from the specified offset 63 * @return A pointer to the shared memory block from the specified offset
64 */ 64 */
65 const u8* GetPointer(std::size_t offset = 0) const { 65 const u8* GetPointer(std::size_t offset = 0) const {
66 return device_memory->GetPointer(physical_address + offset); 66 return device_memory->GetPointer<u8>(physical_address + offset);
67 } 67 }
68 68
69 void Finalize() override; 69 void Finalize() override;
diff --git a/src/core/hle/kernel/k_shared_memory_info.h b/src/core/hle/kernel/k_shared_memory_info.h
index e43db8515..2bb6b6d08 100644
--- a/src/core/hle/kernel/k_shared_memory_info.h
+++ b/src/core/hle/kernel/k_shared_memory_info.h
@@ -15,7 +15,8 @@ class KSharedMemoryInfo final : public KSlabAllocated<KSharedMemoryInfo>,
15 public boost::intrusive::list_base_hook<> { 15 public boost::intrusive::list_base_hook<> {
16 16
17public: 17public:
18 explicit KSharedMemoryInfo() = default; 18 explicit KSharedMemoryInfo(KernelCore&) {}
19 KSharedMemoryInfo() = default;
19 20
20 constexpr void Initialize(KSharedMemory* shmem) { 21 constexpr void Initialize(KSharedMemory* shmem) {
21 shared_memory = shmem; 22 shared_memory = shmem;
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h
index 2b303537e..a8c77a7d4 100644
--- a/src/core/hle/kernel/k_slab_heap.h
+++ b/src/core/hle/kernel/k_slab_heap.h
@@ -8,6 +8,7 @@
8#include "common/assert.h" 8#include "common/assert.h"
9#include "common/common_funcs.h" 9#include "common/common_funcs.h"
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "common/spin_lock.h"
11 12
12namespace Kernel { 13namespace Kernel {
13 14
@@ -36,28 +37,34 @@ public:
36 } 37 }
37 38
38 void* Allocate() { 39 void* Allocate() {
39 Node* ret = m_head.load(); 40 // KScopedInterruptDisable di;
40 41
41 do { 42 m_lock.lock();
42 if (ret == nullptr) { 43
43 break; 44 Node* ret = m_head;
44 } 45 if (ret != nullptr) [[likely]] {
45 } while (!m_head.compare_exchange_weak(ret, ret->next)); 46 m_head = ret->next;
47 }
46 48
49 m_lock.unlock();
47 return ret; 50 return ret;
48 } 51 }
49 52
50 void Free(void* obj) { 53 void Free(void* obj) {
54 // KScopedInterruptDisable di;
55
56 m_lock.lock();
57
51 Node* node = static_cast<Node*>(obj); 58 Node* node = static_cast<Node*>(obj);
59 node->next = m_head;
60 m_head = node;
52 61
53 Node* cur_head = m_head.load(); 62 m_lock.unlock();
54 do {
55 node->next = cur_head;
56 } while (!m_head.compare_exchange_weak(cur_head, node));
57 } 63 }
58 64
59private: 65private:
60 std::atomic<Node*> m_head{}; 66 std::atomic<Node*> m_head{};
67 Common::SpinLock m_lock;
61}; 68};
62 69
63} // namespace impl 70} // namespace impl
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 174afc80d..d57b42fdf 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -30,6 +30,7 @@
30#include "core/hle/kernel/k_worker_task_manager.h" 30#include "core/hle/kernel/k_worker_task_manager.h"
31#include "core/hle/kernel/kernel.h" 31#include "core/hle/kernel/kernel.h"
32#include "core/hle/kernel/svc_results.h" 32#include "core/hle/kernel/svc_results.h"
33#include "core/hle/kernel/svc_types.h"
33#include "core/hle/result.h" 34#include "core/hle/result.h"
34#include "core/memory.h" 35#include "core/memory.h"
35 36
@@ -38,6 +39,9 @@
38#endif 39#endif
39 40
40namespace { 41namespace {
42
43constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1;
44
41static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, 45static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
42 u32 entry_point, u32 arg) { 46 u32 entry_point, u32 arg) {
43 context = {}; 47 context = {};
@@ -144,7 +148,9 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
144 physical_affinity_mask.SetAffinity(phys_core, true); 148 physical_affinity_mask.SetAffinity(phys_core, true);
145 149
146 // Set the thread state. 150 // Set the thread state.
147 thread_state = (type == ThreadType::Main) ? ThreadState::Runnable : ThreadState::Initialized; 151 thread_state = (type == ThreadType::Main || type == ThreadType::Dummy)
152 ? ThreadState::Runnable
153 : ThreadState::Initialized;
148 154
149 // Set TLS address. 155 // Set TLS address.
150 tls_address = 0; 156 tls_address = 0;
@@ -241,7 +247,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
241 } 247 }
242 } 248 }
243 249
244 return ResultSuccess; 250 R_SUCCEED();
245} 251}
246 252
247Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, 253Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
@@ -254,7 +260,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_
254 thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func)); 260 thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func));
255 thread->is_single_core = !Settings::values.use_multi_core.GetValue(); 261 thread->is_single_core = !Settings::values.use_multi_core.GetValue();
256 262
257 return ResultSuccess; 263 R_SUCCEED();
258} 264}
259 265
260Result KThread::InitializeDummyThread(KThread* thread) { 266Result KThread::InitializeDummyThread(KThread* thread) {
@@ -264,31 +270,32 @@ Result KThread::InitializeDummyThread(KThread* thread) {
264 // Initialize emulation parameters. 270 // Initialize emulation parameters.
265 thread->stack_parameters.disable_count = 0; 271 thread->stack_parameters.disable_count = 0;
266 272
267 return ResultSuccess; 273 R_SUCCEED();
268} 274}
269 275
270Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) { 276Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) {
271 return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, 277 R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
272 system.GetCpuManager().GetGuestActivateFunc()); 278 ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc()));
273} 279}
274 280
275Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { 281Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
276 return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, 282 R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
277 system.GetCpuManager().GetIdleThreadStartFunc()); 283 ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc()));
278} 284}
279 285
280Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, 286Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
281 KThreadFunction func, uintptr_t arg, s32 virt_core) { 287 KThreadFunction func, uintptr_t arg, s32 virt_core) {
282 return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority, 288 R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr,
283 system.GetCpuManager().GetShutdownThreadStartFunc()); 289 ThreadType::HighPriority,
290 system.GetCpuManager().GetShutdownThreadStartFunc()));
284} 291}
285 292
286Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, 293Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
287 uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, 294 uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
288 KProcess* owner) { 295 KProcess* owner) {
289 system.Kernel().GlobalSchedulerContext().AddThread(thread); 296 system.Kernel().GlobalSchedulerContext().AddThread(thread);
290 return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, 297 R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
291 ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()); 298 ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
292} 299}
293 300
294void KThread::PostDestroy(uintptr_t arg) { 301void KThread::PostDestroy(uintptr_t arg) {
@@ -538,7 +545,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
538 *out_ideal_core = virtual_ideal_core_id; 545 *out_ideal_core = virtual_ideal_core_id;
539 *out_affinity_mask = virtual_affinity_mask; 546 *out_affinity_mask = virtual_affinity_mask;
540 547
541 return ResultSuccess; 548 R_SUCCEED();
542} 549}
543 550
544Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { 551Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
@@ -554,7 +561,7 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask)
554 *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask(); 561 *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
555 } 562 }
556 563
557 return ResultSuccess; 564 R_SUCCEED();
558} 565}
559 566
560Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { 567Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
@@ -666,7 +673,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
666 } while (retry_update); 673 } while (retry_update);
667 } 674 }
668 675
669 return ResultSuccess; 676 R_SUCCEED();
670} 677}
671 678
672void KThread::SetBasePriority(s32 value) { 679void KThread::SetBasePriority(s32 value) {
@@ -839,7 +846,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
839 } while (thread_is_current); 846 } while (thread_is_current);
840 } 847 }
841 848
842 return ResultSuccess; 849 R_SUCCEED();
843} 850}
844 851
845Result KThread::GetThreadContext3(std::vector<u8>& out) { 852Result KThread::GetThreadContext3(std::vector<u8>& out) {
@@ -874,7 +881,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
874 } 881 }
875 } 882 }
876 883
877 return ResultSuccess; 884 R_SUCCEED();
878} 885}
879 886
880void KThread::AddWaiterImpl(KThread* thread) { 887void KThread::AddWaiterImpl(KThread* thread) {
@@ -1038,7 +1045,7 @@ Result KThread::Run() {
1038 // Set our state and finish. 1045 // Set our state and finish.
1039 SetState(ThreadState::Runnable); 1046 SetState(ThreadState::Runnable);
1040 1047
1041 return ResultSuccess; 1048 R_SUCCEED();
1042 } 1049 }
1043} 1050}
1044 1051
@@ -1073,6 +1080,78 @@ void KThread::Exit() {
1073 UNREACHABLE_MSG("KThread::Exit() would return"); 1080 UNREACHABLE_MSG("KThread::Exit() would return");
1074} 1081}
1075 1082
1083Result KThread::Terminate() {
1084 ASSERT(this != GetCurrentThreadPointer(kernel));
1085
1086 // Request the thread terminate if it hasn't already.
1087 if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) {
1088 // If the thread isn't terminated, wait for it to terminate.
1089 s32 index;
1090 KSynchronizationObject* objects[] = {this};
1091 R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1,
1092 Svc::WaitInfinite));
1093 }
1094
1095 R_SUCCEED();
1096}
1097
1098ThreadState KThread::RequestTerminate() {
1099 ASSERT(this != GetCurrentThreadPointer(kernel));
1100
1101 KScopedSchedulerLock sl{kernel};
1102
1103 // Determine if this is the first termination request.
1104 const bool first_request = [&]() -> bool {
1105 // Perform an atomic compare-and-swap from false to true.
1106 bool expected = false;
1107 return termination_requested.compare_exchange_strong(expected, true);
1108 }();
1109
1110 // If this is the first request, start termination procedure.
1111 if (first_request) {
1112 // If the thread is in initialized state, just change state to terminated.
1113 if (this->GetState() == ThreadState::Initialized) {
1114 thread_state = ThreadState::Terminated;
1115 return ThreadState::Terminated;
1116 }
1117
1118 // Register the terminating dpc.
1119 this->RegisterDpc(DpcFlag::Terminating);
1120
1121 // If the thread is pinned, unpin it.
1122 if (this->GetStackParameters().is_pinned) {
1123 this->GetOwnerProcess()->UnpinThread(this);
1124 }
1125
1126 // If the thread is suspended, continue it.
1127 if (this->IsSuspended()) {
1128 suspend_allowed_flags = 0;
1129 this->UpdateState();
1130 }
1131
1132 // Change the thread's priority to be higher than any system thread's.
1133 if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) {
1134 this->SetBasePriority(TerminatingThreadPriority);
1135 }
1136
1137 // If the thread is runnable, send a termination interrupt to other cores.
1138 if (this->GetState() == ThreadState::Runnable) {
1139 if (const u64 core_mask =
1140 physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel));
1141 core_mask != 0) {
1142 Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask);
1143 }
1144 }
1145
1146 // Wake up the thread.
1147 if (this->GetState() == ThreadState::Waiting) {
1148 wait_queue->CancelWait(this, ResultTerminationRequested, true);
1149 }
1150 }
1151
1152 return this->GetState();
1153}
1154
1076Result KThread::Sleep(s64 timeout) { 1155Result KThread::Sleep(s64 timeout) {
1077 ASSERT(!kernel.GlobalSchedulerContext().IsLocked()); 1156 ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
1078 ASSERT(this == GetCurrentThreadPointer(kernel)); 1157 ASSERT(this == GetCurrentThreadPointer(kernel));
@@ -1086,7 +1165,7 @@ Result KThread::Sleep(s64 timeout) {
1086 // Check if the thread should terminate. 1165 // Check if the thread should terminate.
1087 if (this->IsTerminationRequested()) { 1166 if (this->IsTerminationRequested()) {
1088 slp.CancelSleep(); 1167 slp.CancelSleep();
1089 return ResultTerminationRequested; 1168 R_THROW(ResultTerminationRequested);
1090 } 1169 }
1091 1170
1092 // Wait for the sleep to end. 1171 // Wait for the sleep to end.
@@ -1094,33 +1173,32 @@ Result KThread::Sleep(s64 timeout) {
1094 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); 1173 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
1095 } 1174 }
1096 1175
1097 return ResultSuccess; 1176 R_SUCCEED();
1098} 1177}
1099 1178
1100void KThread::IfDummyThreadTryWait() { 1179void KThread::RequestDummyThreadWait() {
1101 if (!IsDummyThread()) { 1180 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
1102 return; 1181 ASSERT(this->IsDummyThread());
1103 }
1104 1182
1105 if (GetState() != ThreadState::Waiting) { 1183 // We will block when the scheduler lock is released.
1106 return; 1184 dummy_thread_runnable.store(false);
1107 } 1185}
1108 1186
1187void KThread::DummyThreadBeginWait() {
1188 ASSERT(this->IsDummyThread());
1109 ASSERT(!kernel.IsPhantomModeForSingleCore()); 1189 ASSERT(!kernel.IsPhantomModeForSingleCore());
1110 1190
1111 // Block until we are no longer waiting. 1191 // Block until runnable is no longer false.
1112 std::unique_lock lk(dummy_wait_lock); 1192 dummy_thread_runnable.wait(false);
1113 dummy_wait_cv.wait(
1114 lk, [&] { return GetState() != ThreadState::Waiting || kernel.IsShuttingDown(); });
1115} 1193}
1116 1194
1117void KThread::IfDummyThreadEndWait() { 1195void KThread::DummyThreadEndWait() {
1118 if (!IsDummyThread()) { 1196 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
1119 return; 1197 ASSERT(this->IsDummyThread());
1120 }
1121 1198
1122 // Wake up the waiting thread. 1199 // Wake up the waiting thread.
1123 dummy_wait_cv.notify_one(); 1200 dummy_thread_runnable.store(true);
1201 dummy_thread_runnable.notify_one();
1124} 1202}
1125 1203
1126void KThread::BeginWait(KThreadQueue* queue) { 1204void KThread::BeginWait(KThreadQueue* queue) {
@@ -1154,9 +1232,6 @@ void KThread::EndWait(Result wait_result_) {
1154 } 1232 }
1155 1233
1156 wait_queue->EndWait(this, wait_result_); 1234 wait_queue->EndWait(this, wait_result_);
1157
1158 // Special case for dummy threads to wakeup if necessary.
1159 IfDummyThreadEndWait();
1160 } 1235 }
1161} 1236}
1162 1237
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index 9ee20208e..30aa10c9a 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -180,6 +180,10 @@ public:
180 180
181 void Exit(); 181 void Exit();
182 182
183 Result Terminate();
184
185 ThreadState RequestTerminate();
186
183 [[nodiscard]] u32 GetSuspendFlags() const { 187 [[nodiscard]] u32 GetSuspendFlags() const {
184 return suspend_allowed_flags & suspend_request_flags; 188 return suspend_allowed_flags & suspend_request_flags;
185 } 189 }
@@ -639,8 +643,9 @@ public:
639 // therefore will not block on guest kernel synchronization primitives. These methods handle 643 // therefore will not block on guest kernel synchronization primitives. These methods handle
640 // blocking as needed. 644 // blocking as needed.
641 645
642 void IfDummyThreadTryWait(); 646 void RequestDummyThreadWait();
643 void IfDummyThreadEndWait(); 647 void DummyThreadBeginWait();
648 void DummyThreadEndWait();
644 649
645 [[nodiscard]] uintptr_t GetArgument() const { 650 [[nodiscard]] uintptr_t GetArgument() const {
646 return argument; 651 return argument;
@@ -773,8 +778,7 @@ private:
773 bool is_single_core{}; 778 bool is_single_core{};
774 ThreadType thread_type{}; 779 ThreadType thread_type{};
775 StepState step_state{}; 780 StepState step_state{};
776 std::mutex dummy_wait_lock; 781 std::atomic<bool> dummy_thread_runnable{true};
777 std::condition_variable dummy_wait_cv;
778 782
779 // For debugging 783 // For debugging
780 std::vector<KSynchronizationObject*> wait_objects_for_debugging; 784 std::vector<KSynchronizationObject*> wait_objects_for_debugging;
diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h
index 0a7f22680..5d466ace7 100644
--- a/src/core/hle/kernel/k_thread_local_page.h
+++ b/src/core/hle/kernel/k_thread_local_page.h
@@ -26,7 +26,7 @@ public:
26 static_assert(RegionsPerPage > 0); 26 static_assert(RegionsPerPage > 0);
27 27
28public: 28public:
29 constexpr explicit KThreadLocalPage(VAddr addr = {}) : m_virt_addr(addr) { 29 constexpr explicit KThreadLocalPage(KernelCore&, VAddr addr = {}) : m_virt_addr(addr) {
30 m_is_region_free.fill(true); 30 m_is_region_free.fill(true);
31 } 31 }
32 32
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 9251f29ad..fdc774e30 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -24,6 +24,7 @@
24#include "core/hardware_properties.h" 24#include "core/hardware_properties.h"
25#include "core/hle/kernel/init/init_slab_setup.h" 25#include "core/hle/kernel/init/init_slab_setup.h"
26#include "core/hle/kernel/k_client_port.h" 26#include "core/hle/kernel/k_client_port.h"
27#include "core/hle/kernel/k_dynamic_resource_manager.h"
27#include "core/hle/kernel/k_handle_table.h" 28#include "core/hle/kernel/k_handle_table.h"
28#include "core/hle/kernel/k_memory_layout.h" 29#include "core/hle/kernel/k_memory_layout.h"
29#include "core/hle/kernel/k_memory_manager.h" 30#include "core/hle/kernel/k_memory_manager.h"
@@ -47,8 +48,8 @@ namespace Kernel {
47 48
48struct KernelCore::Impl { 49struct KernelCore::Impl {
49 explicit Impl(Core::System& system_, KernelCore& kernel_) 50 explicit Impl(Core::System& system_, KernelCore& kernel_)
50 : time_manager{system_}, 51 : time_manager{system_}, service_threads_manager{1, "ServiceThreadsManager"},
51 service_threads_manager{1, "ServiceThreadsManager"}, system{system_} {} 52 service_thread_barrier{2}, system{system_} {}
52 53
53 void SetMulticore(bool is_multi) { 54 void SetMulticore(bool is_multi) {
54 is_multicore = is_multi; 55 is_multicore = is_multi;
@@ -73,8 +74,16 @@ struct KernelCore::Impl {
73 InitializeMemoryLayout(); 74 InitializeMemoryLayout();
74 Init::InitializeKPageBufferSlabHeap(system); 75 Init::InitializeKPageBufferSlabHeap(system);
75 InitializeShutdownThreads(); 76 InitializeShutdownThreads();
76 InitializePreemption(kernel);
77 InitializePhysicalCores(); 77 InitializePhysicalCores();
78 InitializePreemption(kernel);
79
80 // Initialize the Dynamic Slab Heaps.
81 {
82 const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion();
83 ASSERT(pt_heap_region.GetEndAddress() != 0);
84
85 InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize());
86 }
78 87
79 RegisterHostThread(); 88 RegisterHostThread();
80 } 89 }
@@ -86,6 +95,15 @@ struct KernelCore::Impl {
86 } 95 }
87 } 96 }
88 97
98 void CloseCurrentProcess() {
99 (*current_process).Finalize();
100 // current_process->Close();
101 // TODO: The current process should be destroyed based on accurate ref counting after
102 // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
103 (*current_process).Destroy();
104 current_process = nullptr;
105 }
106
89 void Shutdown() { 107 void Shutdown() {
90 is_shutting_down.store(true, std::memory_order_relaxed); 108 is_shutting_down.store(true, std::memory_order_relaxed);
91 SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); }); 109 SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
@@ -99,10 +117,6 @@ struct KernelCore::Impl {
99 next_user_process_id = KProcess::ProcessIDMin; 117 next_user_process_id = KProcess::ProcessIDMin;
100 next_thread_id = 1; 118 next_thread_id = 1;
101 119
102 for (auto& core : cores) {
103 core = nullptr;
104 }
105
106 global_handle_table->Finalize(); 120 global_handle_table->Finalize();
107 global_handle_table.reset(); 121 global_handle_table.reset();
108 122
@@ -152,15 +166,7 @@ struct KernelCore::Impl {
152 } 166 }
153 } 167 }
154 168
155 // Shutdown all processes. 169 CloseCurrentProcess();
156 if (current_process) {
157 (*current_process).Finalize();
158 // current_process->Close();
159 // TODO: The current process should be destroyed based on accurate ref counting after
160 // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
161 (*current_process).Destroy();
162 current_process = nullptr;
163 }
164 170
165 // Track kernel objects that were not freed on shutdown 171 // Track kernel objects that were not freed on shutdown
166 { 172 {
@@ -257,6 +263,18 @@ struct KernelCore::Impl {
257 system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); 263 system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
258 } 264 }
259 265
266 void InitializeResourceManagers(VAddr address, size_t size) {
267 dynamic_page_manager = std::make_unique<KDynamicPageManager>();
268 memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>();
269 app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>();
270
271 dynamic_page_manager->Initialize(address, size);
272 static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
273 memory_block_heap->Initialize(dynamic_page_manager.get(),
274 ApplicationMemoryBlockSlabHeapSize);
275 app_memory_block_manager->Initialize(nullptr, memory_block_heap.get());
276 }
277
260 void InitializeShutdownThreads() { 278 void InitializeShutdownThreads() {
261 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 279 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
262 shutdown_threads[core_id] = KThread::Create(system.Kernel()); 280 shutdown_threads[core_id] = KThread::Create(system.Kernel());
@@ -344,11 +362,6 @@ struct KernelCore::Impl {
344 static inline thread_local KThread* current_thread{nullptr}; 362 static inline thread_local KThread* current_thread{nullptr};
345 363
346 KThread* GetCurrentEmuThread() { 364 KThread* GetCurrentEmuThread() {
347 // If we are shutting down the kernel, none of this is relevant anymore.
348 if (IsShuttingDown()) {
349 return {};
350 }
351
352 const auto thread_id = GetCurrentHostThreadID(); 365 const auto thread_id = GetCurrentHostThreadID();
353 if (thread_id >= Core::Hardware::NUM_CPU_CORES) { 366 if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
354 return GetHostDummyThread(); 367 return GetHostDummyThread();
@@ -724,7 +737,12 @@ struct KernelCore::Impl {
724 } 737 }
725 738
726 void ClearServiceThreads() { 739 void ClearServiceThreads() {
727 service_threads_manager.QueueWork([this]() { service_threads.clear(); }); 740 service_threads_manager.QueueWork([this] {
741 service_threads.clear();
742 default_service_thread.reset();
743 service_thread_barrier.Sync();
744 });
745 service_thread_barrier.Sync();
728 } 746 }
729 747
730 std::mutex server_objects_lock; 748 std::mutex server_objects_lock;
@@ -770,6 +788,11 @@ struct KernelCore::Impl {
770 // Kernel memory management 788 // Kernel memory management
771 std::unique_ptr<KMemoryManager> memory_manager; 789 std::unique_ptr<KMemoryManager> memory_manager;
772 790
791 // Dynamic slab managers
792 std::unique_ptr<KDynamicPageManager> dynamic_page_manager;
793 std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap;
794 std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager;
795
773 // Shared memory for services 796 // Shared memory for services
774 Kernel::KSharedMemory* hid_shared_mem{}; 797 Kernel::KSharedMemory* hid_shared_mem{};
775 Kernel::KSharedMemory* font_shared_mem{}; 798 Kernel::KSharedMemory* font_shared_mem{};
@@ -784,6 +807,7 @@ struct KernelCore::Impl {
784 std::unordered_set<std::shared_ptr<ServiceThread>> service_threads; 807 std::unordered_set<std::shared_ptr<ServiceThread>> service_threads;
785 std::weak_ptr<ServiceThread> default_service_thread; 808 std::weak_ptr<ServiceThread> default_service_thread;
786 Common::ThreadWorker service_threads_manager; 809 Common::ThreadWorker service_threads_manager;
810 Common::Barrier service_thread_barrier;
787 811
788 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> shutdown_threads; 812 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> shutdown_threads;
789 std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; 813 std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
@@ -853,6 +877,10 @@ const KProcess* KernelCore::CurrentProcess() const {
853 return impl->current_process; 877 return impl->current_process;
854} 878}
855 879
880void KernelCore::CloseCurrentProcess() {
881 impl->CloseCurrentProcess();
882}
883
856const std::vector<KProcess*>& KernelCore::GetProcessList() const { 884const std::vector<KProcess*>& KernelCore::GetProcessList() const {
857 return impl->process_list; 885 return impl->process_list;
858} 886}
@@ -1041,6 +1069,14 @@ const KMemoryManager& KernelCore::MemoryManager() const {
1041 return *impl->memory_manager; 1069 return *impl->memory_manager;
1042} 1070}
1043 1071
1072KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() {
1073 return *impl->app_memory_block_manager;
1074}
1075
1076const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const {
1077 return *impl->app_memory_block_manager;
1078}
1079
1044Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { 1080Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
1045 return *impl->hid_shared_mem; 1081 return *impl->hid_shared_mem;
1046} 1082}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 0847cbcbf..266be2bc4 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -37,6 +37,7 @@ class KClientSession;
37class KEvent; 37class KEvent;
38class KHandleTable; 38class KHandleTable;
39class KLinkedListNode; 39class KLinkedListNode;
40class KMemoryBlockSlabManager;
40class KMemoryLayout; 41class KMemoryLayout;
41class KMemoryManager; 42class KMemoryManager;
42class KPageBuffer; 43class KPageBuffer;
@@ -46,6 +47,7 @@ class KResourceLimit;
46class KScheduler; 47class KScheduler;
47class KServerSession; 48class KServerSession;
48class KSession; 49class KSession;
50class KSessionRequest;
49class KSharedMemory; 51class KSharedMemory;
50class KSharedMemoryInfo; 52class KSharedMemoryInfo;
51class KThread; 53class KThread;
@@ -130,6 +132,9 @@ public:
130 /// Retrieves a const pointer to the current process. 132 /// Retrieves a const pointer to the current process.
131 const KProcess* CurrentProcess() const; 133 const KProcess* CurrentProcess() const;
132 134
135 /// Closes the current process.
136 void CloseCurrentProcess();
137
133 /// Retrieves the list of processes. 138 /// Retrieves the list of processes.
134 const std::vector<KProcess*>& GetProcessList() const; 139 const std::vector<KProcess*>& GetProcessList() const;
135 140
@@ -238,6 +243,12 @@ public:
238 /// Gets the virtual memory manager for the kernel. 243 /// Gets the virtual memory manager for the kernel.
239 const KMemoryManager& MemoryManager() const; 244 const KMemoryManager& MemoryManager() const;
240 245
246 /// Gets the application memory block manager for the kernel.
247 KMemoryBlockSlabManager& GetApplicationMemoryBlockManager();
248
249 /// Gets the application memory block manager for the kernel.
250 const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const;
251
241 /// Gets the shared memory object for HID services. 252 /// Gets the shared memory object for HID services.
242 Kernel::KSharedMemory& GetHidSharedMem(); 253 Kernel::KSharedMemory& GetHidSharedMem();
243 254
@@ -350,6 +361,8 @@ public:
350 return slab_heap_container->page_buffer; 361 return slab_heap_container->page_buffer;
351 } else if constexpr (std::is_same_v<T, KThreadLocalPage>) { 362 } else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
352 return slab_heap_container->thread_local_page; 363 return slab_heap_container->thread_local_page;
364 } else if constexpr (std::is_same_v<T, KSessionRequest>) {
365 return slab_heap_container->session_request;
353 } 366 }
354 } 367 }
355 368
@@ -412,6 +425,7 @@ private:
412 KSlabHeap<KCodeMemory> code_memory; 425 KSlabHeap<KCodeMemory> code_memory;
413 KSlabHeap<KPageBuffer> page_buffer; 426 KSlabHeap<KPageBuffer> page_buffer;
414 KSlabHeap<KThreadLocalPage> thread_local_page; 427 KSlabHeap<KThreadLocalPage> thread_local_page;
428 KSlabHeap<KSessionRequest> session_request;
415 }; 429 };
416 430
417 std::unique_ptr<SlabHeapContainer> slab_heap_container; 431 std::unique_ptr<SlabHeapContainer> slab_heap_container;
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h
index 299a981a8..06b51e919 100644
--- a/src/core/hle/kernel/slab_helpers.h
+++ b/src/core/hle/kernel/slab_helpers.h
@@ -24,7 +24,7 @@ public:
24 } 24 }
25 25
26 static Derived* Allocate(KernelCore& kernel) { 26 static Derived* Allocate(KernelCore& kernel) {
27 return kernel.SlabHeap<Derived>().Allocate(); 27 return kernel.SlabHeap<Derived>().Allocate(kernel);
28 } 28 }
29 29
30 static void Free(KernelCore& kernel, Derived* obj) { 30 static void Free(KernelCore& kernel, Derived* obj) {
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 1d145ea91..4aca5b27d 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -751,8 +751,8 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
751 } 751 }
752 752
753 system.GetReporter().SaveSvcBreakReport( 753 system.GetReporter().SaveSvcBreakReport(
754 static_cast<u32>(break_reason.break_type.Value()), break_reason.signal_debugger, info1, 754 static_cast<u32>(break_reason.break_type.Value()), break_reason.signal_debugger.As<bool>(),
755 info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt); 755 info1, info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
756 756
757 if (!break_reason.signal_debugger) { 757 if (!break_reason.signal_debugger) {
758 LOG_CRITICAL( 758 LOG_CRITICAL(
@@ -933,7 +933,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han
933 return ResultSuccess; 933 return ResultSuccess;
934 934
935 case GetInfoType::UserExceptionContextAddr: 935 case GetInfoType::UserExceptionContextAddr:
936 *result = process->GetTLSRegionAddress(); 936 *result = process->GetProcessLocalRegionAddress();
937 return ResultSuccess; 937 return ResultSuccess;
938 938
939 case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource: 939 case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
@@ -1888,7 +1888,7 @@ static void ExitProcess(Core::System& system) {
1888 auto* current_process = system.Kernel().CurrentProcess(); 1888 auto* current_process = system.Kernel().CurrentProcess();
1889 1889
1890 LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); 1890 LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
1891 ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running, 1891 ASSERT_MSG(current_process->GetState() == KProcess::State::Running,
1892 "Process has already exited"); 1892 "Process has already exited");
1893 1893
1894 system.Exit(); 1894 system.Exit();
@@ -2557,7 +2557,7 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand
2557 return ResultInvalidEnumValue; 2557 return ResultInvalidEnumValue;
2558 } 2558 }
2559 2559
2560 *out = static_cast<u64>(process->GetStatus()); 2560 *out = static_cast<u64>(process->GetState());
2561 return ResultSuccess; 2561 return ResultSuccess;
2562} 2562}
2563 2563
diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h
index 95750c3eb..85506710e 100644
--- a/src/core/hle/kernel/svc_common.h
+++ b/src/core/hle/kernel/svc_common.h
@@ -14,8 +14,11 @@ namespace Kernel::Svc {
14 14
15using namespace Common::Literals; 15using namespace Common::Literals;
16 16
17constexpr s32 ArgumentHandleCountMax = 0x40; 17constexpr inline s32 ArgumentHandleCountMax = 0x40;
18constexpr u32 HandleWaitMask{1u << 30}; 18
19constexpr inline u32 HandleWaitMask = 1u << 30;
20
21constexpr inline s64 WaitInfinite = -1;
19 22
20constexpr inline std::size_t HeapSizeAlignment = 2_MiB; 23constexpr inline std::size_t HeapSizeAlignment = 2_MiB;
21 24
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 79e15183a..abb9847fe 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -95,6 +95,19 @@ constexpr inline s32 IdealCoreNoUpdate = -3;
95constexpr inline s32 LowestThreadPriority = 63; 95constexpr inline s32 LowestThreadPriority = 63;
96constexpr inline s32 HighestThreadPriority = 0; 96constexpr inline s32 HighestThreadPriority = 0;
97 97
98constexpr inline s32 SystemThreadPriorityHighest = 16;
99
100enum class ProcessState : u32 {
101 Created = 0,
102 CreatedAttached = 1,
103 Running = 2,
104 Crashed = 3,
105 RunningAttached = 4,
106 Terminating = 5,
107 Terminated = 6,
108 DebugBreak = 7,
109};
110
98constexpr inline size_t ThreadLocalRegionSize = 0x200; 111constexpr inline size_t ThreadLocalRegionSize = 0x200;
99 112
100} // namespace Kernel::Svc 113} // namespace Kernel::Svc
diff --git a/src/core/hle/result.h b/src/core/hle/result.h
index d67e68bae..ef4b2d417 100644
--- a/src/core/hle/result.h
+++ b/src/core/hle/result.h
@@ -135,6 +135,14 @@ union Result {
135 [[nodiscard]] constexpr bool IsFailure() const { 135 [[nodiscard]] constexpr bool IsFailure() const {
136 return !IsSuccess(); 136 return !IsSuccess();
137 } 137 }
138
139 [[nodiscard]] constexpr u32 GetInnerValue() const {
140 return static_cast<u32>(module.Value()) | (description << module.bits);
141 }
142
143 [[nodiscard]] constexpr bool Includes(Result result) const {
144 return GetInnerValue() == result.GetInnerValue();
145 }
138}; 146};
139static_assert(std::is_trivial_v<Result>); 147static_assert(std::is_trivial_v<Result>);
140 148
@@ -462,9 +470,6 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess;
462#define R_UNLESS(expr, res) \ 470#define R_UNLESS(expr, res) \
463 { \ 471 { \
464 if (!(expr)) { \ 472 if (!(expr)) { \
465 if (res.IsError()) { \
466 LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \
467 } \
468 R_THROW(res); \ 473 R_THROW(res); \
469 } \ 474 } \
470 } 475 }
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index e55233054..8ea7fd760 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -299,7 +299,7 @@ ISelfController::ISelfController(Core::System& system_, NVFlinger::NVFlinger& nv
299 {100, &ISelfController::SetAlbumImageTakenNotificationEnabled, "SetAlbumImageTakenNotificationEnabled"}, 299 {100, &ISelfController::SetAlbumImageTakenNotificationEnabled, "SetAlbumImageTakenNotificationEnabled"},
300 {110, nullptr, "SetApplicationAlbumUserData"}, 300 {110, nullptr, "SetApplicationAlbumUserData"},
301 {120, &ISelfController::SaveCurrentScreenshot, "SaveCurrentScreenshot"}, 301 {120, &ISelfController::SaveCurrentScreenshot, "SaveCurrentScreenshot"},
302 {130, nullptr, "SetRecordVolumeMuted"}, 302 {130, &ISelfController::SetRecordVolumeMuted, "SetRecordVolumeMuted"},
303 {1000, nullptr, "GetDebugStorageChannel"}, 303 {1000, nullptr, "GetDebugStorageChannel"},
304 }; 304 };
305 // clang-format on 305 // clang-format on
@@ -597,6 +597,17 @@ void ISelfController::SaveCurrentScreenshot(Kernel::HLERequestContext& ctx) {
597 rb.Push(ResultSuccess); 597 rb.Push(ResultSuccess);
598} 598}
599 599
600void ISelfController::SetRecordVolumeMuted(Kernel::HLERequestContext& ctx) {
601 IPC::RequestParser rp{ctx};
602
603 const auto is_record_volume_muted = rp.Pop<bool>();
604
605 LOG_WARNING(Service_AM, "(STUBBED) called. is_record_volume_muted={}", is_record_volume_muted);
606
607 IPC::ResponseBuilder rb{ctx, 2};
608 rb.Push(ResultSuccess);
609}
610
600AppletMessageQueue::AppletMessageQueue(Core::System& system) 611AppletMessageQueue::AppletMessageQueue(Core::System& system)
601 : service_context{system, "AppletMessageQueue"} { 612 : service_context{system, "AppletMessageQueue"} {
602 on_new_message = service_context.CreateEvent("AMMessageQueue:OnMessageReceived"); 613 on_new_message = service_context.CreateEvent("AMMessageQueue:OnMessageReceived");
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index bb75c6281..a0fbfcfc5 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -182,6 +182,7 @@ private:
182 void GetAccumulatedSuspendedTickChangedEvent(Kernel::HLERequestContext& ctx); 182 void GetAccumulatedSuspendedTickChangedEvent(Kernel::HLERequestContext& ctx);
183 void SetAlbumImageTakenNotificationEnabled(Kernel::HLERequestContext& ctx); 183 void SetAlbumImageTakenNotificationEnabled(Kernel::HLERequestContext& ctx);
184 void SaveCurrentScreenshot(Kernel::HLERequestContext& ctx); 184 void SaveCurrentScreenshot(Kernel::HLERequestContext& ctx);
185 void SetRecordVolumeMuted(Kernel::HLERequestContext& ctx);
185 186
186 enum class ScreenshotPermission : u32 { 187 enum class ScreenshotPermission : u32 {
187 Inherit = 0, 188 Inherit = 0,
diff --git a/src/core/hle/service/am/applets/applets.h b/src/core/hle/service/am/applets/applets.h
index e78a57657..12c6a5b1a 100644
--- a/src/core/hle/service/am/applets/applets.h
+++ b/src/core/hle/service/am/applets/applets.h
@@ -164,7 +164,7 @@ protected:
164 u32_le size; 164 u32_le size;
165 u32_le library_version; 165 u32_le library_version;
166 u32_le theme_color; 166 u32_le theme_color;
167 u8 play_startup_sound; 167 bool play_startup_sound;
168 u64_le system_tick; 168 u64_le system_tick;
169 }; 169 };
170 static_assert(sizeof(CommonArguments) == 0x20, "CommonArguments has incorrect size."); 170 static_assert(sizeof(CommonArguments) == 0x20, "CommonArguments has incorrect size.");
diff --git a/src/core/hle/service/audio/audctl.cpp b/src/core/hle/service/audio/audctl.cpp
index 4a2ae5f88..5abf22ba4 100644
--- a/src/core/hle/service/audio/audctl.cpp
+++ b/src/core/hle/service/audio/audctl.cpp
@@ -45,9 +45,25 @@ AudCtl::AudCtl(Core::System& system_) : ServiceFramework{system_, "audctl"} {
45 {32, nullptr, "GetActiveOutputTarget"}, 45 {32, nullptr, "GetActiveOutputTarget"},
46 {33, nullptr, "GetTargetDeviceInfo"}, 46 {33, nullptr, "GetTargetDeviceInfo"},
47 {34, nullptr, "AcquireTargetNotification"}, 47 {34, nullptr, "AcquireTargetNotification"},
48 {35, nullptr, "SetHearingProtectionSafeguardTimerRemainingTimeForDebug"},
49 {36, nullptr, "GetHearingProtectionSafeguardTimerRemainingTimeForDebug"},
50 {37, nullptr, "SetHearingProtectionSafeguardEnabled"},
51 {38, nullptr, "IsHearingProtectionSafeguardEnabled"},
52 {39, nullptr, "IsHearingProtectionSafeguardMonitoringOutputForDebug"},
53 {40, nullptr, "GetSystemInformationForDebug"},
54 {41, nullptr, "SetVolumeButtonLongPressTime"},
55 {42, nullptr, "SetNativeVolumeForDebug"},
48 {10000, nullptr, "NotifyAudioOutputTargetForPlayReport"}, 56 {10000, nullptr, "NotifyAudioOutputTargetForPlayReport"},
49 {10001, nullptr, "NotifyAudioOutputChannelCountForPlayReport"}, 57 {10001, nullptr, "NotifyAudioOutputChannelCountForPlayReport"},
50 {10002, nullptr, "NotifyUnsupportedUsbOutputDeviceAttachedForPlayReport"}, 58 {10002, nullptr, "NotifyUnsupportedUsbOutputDeviceAttachedForPlayReport"},
59 {10100, nullptr, "GetAudioVolumeDataForPlayReport"},
60 {10101, nullptr, "BindAudioVolumeUpdateEventForPlayReport"},
61 {10102, nullptr, "BindAudioOutputTargetUpdateEventForPlayReport"},
62 {10103, nullptr, "GetAudioOutputTargetForPlayReport"},
63 {10104, nullptr, "GetAudioOutputChannelCountForPlayReport"},
64 {10105, nullptr, "BindAudioOutputChannelCountUpdateEventForPlayReport"},
65 {10106, nullptr, "GetDefaultAudioOutputTargetForPlayReport"},
66 {50000, nullptr, "SetAnalogInputBoostGainForPrototyping"},
51 }; 67 };
52 // clang-format on 68 // clang-format on
53 69
diff --git a/src/core/hle/service/audio/audin_u.cpp b/src/core/hle/service/audio/audin_u.cpp
index 48a9a73a0..608925dfc 100644
--- a/src/core/hle/service/audio/audin_u.cpp
+++ b/src/core/hle/service/audio/audin_u.cpp
@@ -17,7 +17,7 @@ using namespace AudioCore::AudioIn;
17class IAudioIn final : public ServiceFramework<IAudioIn> { 17class IAudioIn final : public ServiceFramework<IAudioIn> {
18public: 18public:
19 explicit IAudioIn(Core::System& system_, Manager& manager, size_t session_id, 19 explicit IAudioIn(Core::System& system_, Manager& manager, size_t session_id,
20 std::string& device_name, const AudioInParameter& in_params, u32 handle, 20 const std::string& device_name, const AudioInParameter& in_params, u32 handle,
21 u64 applet_resource_user_id) 21 u64 applet_resource_user_id)
22 : ServiceFramework{system_, "IAudioIn"}, 22 : ServiceFramework{system_, "IAudioIn"},
23 service_context{system_, "IAudioIn"}, event{service_context.CreateEvent("AudioInEvent")}, 23 service_context{system_, "IAudioIn"}, event{service_context.CreateEvent("AudioInEvent")},
diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp
index 49c092301..122290c6a 100644
--- a/src/core/hle/service/audio/audout_u.cpp
+++ b/src/core/hle/service/audio/audout_u.cpp
@@ -24,7 +24,7 @@ using namespace AudioCore::AudioOut;
24class IAudioOut final : public ServiceFramework<IAudioOut> { 24class IAudioOut final : public ServiceFramework<IAudioOut> {
25public: 25public:
26 explicit IAudioOut(Core::System& system_, AudioCore::AudioOut::Manager& manager, 26 explicit IAudioOut(Core::System& system_, AudioCore::AudioOut::Manager& manager,
27 size_t session_id, std::string& device_name, 27 size_t session_id, const std::string& device_name,
28 const AudioOutParameter& in_params, u32 handle, u64 applet_resource_user_id) 28 const AudioOutParameter& in_params, u32 handle, u64 applet_resource_user_id)
29 : ServiceFramework{system_, "IAudioOut", ServiceThreadType::CreateNew}, 29 : ServiceFramework{system_, "IAudioOut", ServiceThreadType::CreateNew},
30 service_context{system_, "IAudioOut"}, event{service_context.CreateEvent( 30 service_context{system_, "IAudioOut"}, event{service_context.CreateEvent(
diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp
index 60c30cd5b..13423dca6 100644
--- a/src/core/hle/service/audio/audren_u.cpp
+++ b/src/core/hle/service/audio/audren_u.cpp
@@ -52,6 +52,8 @@ public:
52 {9, &IAudioRenderer::GetRenderingTimeLimit, "GetRenderingTimeLimit"}, 52 {9, &IAudioRenderer::GetRenderingTimeLimit, "GetRenderingTimeLimit"},
53 {10, &IAudioRenderer::RequestUpdate, "RequestUpdateAuto"}, 53 {10, &IAudioRenderer::RequestUpdate, "RequestUpdateAuto"},
54 {11, nullptr, "ExecuteAudioRendererRendering"}, 54 {11, nullptr, "ExecuteAudioRendererRendering"},
55 {12, &IAudioRenderer::SetVoiceDropParameter, "SetVoiceDropParameter"},
56 {13, &IAudioRenderer::GetVoiceDropParameter, "GetVoiceDropParameter"},
55 }; 57 };
56 // clang-format on 58 // clang-format on
57 RegisterHandlers(functions); 59 RegisterHandlers(functions);
@@ -205,6 +207,30 @@ private:
205 LOG_DEBUG(Service_Audio, "called"); 207 LOG_DEBUG(Service_Audio, "called");
206 } 208 }
207 209
210 void SetVoiceDropParameter(Kernel::HLERequestContext& ctx) {
211 LOG_DEBUG(Service_Audio, "called");
212
213 IPC::RequestParser rp{ctx};
214 auto voice_drop_param{rp.Pop<f32>()};
215
216 auto& system_ = impl->GetSystem();
217 system_.SetVoiceDropParameter(voice_drop_param);
218
219 IPC::ResponseBuilder rb{ctx, 2};
220 rb.Push(ResultSuccess);
221 }
222
223 void GetVoiceDropParameter(Kernel::HLERequestContext& ctx) {
224 LOG_DEBUG(Service_Audio, "called");
225
226 auto& system_ = impl->GetSystem();
227 auto voice_drop_param{system_.GetVoiceDropParameter()};
228
229 IPC::ResponseBuilder rb{ctx, 3};
230 rb.Push(ResultSuccess);
231 rb.Push(voice_drop_param);
232 }
233
208 KernelHelpers::ServiceContext service_context; 234 KernelHelpers::ServiceContext service_context;
209 Kernel::KEvent* rendered_event; 235 Kernel::KEvent* rendered_event;
210 Manager& manager; 236 Manager& manager;
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index 98e4f2af7..2f871de31 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -745,8 +745,9 @@ void Controller_NPad::SetSupportedNpadIdTypes(u8* data, std::size_t length) {
745} 745}
746 746
747void Controller_NPad::GetSupportedNpadIdTypes(u32* data, std::size_t max_length) { 747void Controller_NPad::GetSupportedNpadIdTypes(u32* data, std::size_t max_length) {
748 ASSERT(max_length < supported_npad_id_types.size()); 748 const auto copy_amount = supported_npad_id_types.size() * sizeof(u32);
749 std::memcpy(data, supported_npad_id_types.data(), supported_npad_id_types.size()); 749 ASSERT(max_length <= copy_amount);
750 std::memcpy(data, supported_npad_id_types.data(), copy_amount);
750} 751}
751 752
752std::size_t Controller_NPad::GetSupportedNpadIdTypesSize() const { 753std::size_t Controller_NPad::GetSupportedNpadIdTypesSize() const {
@@ -867,7 +868,7 @@ bool Controller_NPad::VibrateControllerAtIndex(Core::HID::NpadIdType npad_id,
867 return false; 868 return false;
868 } 869 }
869 870
870 if (!controller.device->IsVibrationEnabled()) { 871 if (!controller.device->IsVibrationEnabled(device_index)) {
871 if (controller.vibration[device_index].latest_vibration_value.low_amplitude != 0.0f || 872 if (controller.vibration[device_index].latest_vibration_value.low_amplitude != 0.0f ||
872 controller.vibration[device_index].latest_vibration_value.high_amplitude != 0.0f) { 873 controller.vibration[device_index].latest_vibration_value.high_amplitude != 0.0f) {
873 // Send an empty vibration to stop any vibrations. 874 // Send an empty vibration to stop any vibrations.
@@ -1000,7 +1001,7 @@ void Controller_NPad::InitializeVibrationDeviceAtIndex(Core::HID::NpadIdType npa
1000 } 1001 }
1001 1002
1002 controller.vibration[device_index].device_mounted = 1003 controller.vibration[device_index].device_mounted =
1003 controller.device->TestVibration(device_index); 1004 controller.device->IsVibrationEnabled(device_index);
1004} 1005}
1005 1006
1006void Controller_NPad::SetPermitVibrationSession(bool permit_vibration_session) { 1007void Controller_NPad::SetPermitVibrationSession(bool permit_vibration_session) {
@@ -1501,25 +1502,25 @@ bool Controller_NPad::IsControllerSupported(Core::HID::NpadStyleIndex controller
1501 Core::HID::NpadStyleTag style = GetSupportedStyleSet(); 1502 Core::HID::NpadStyleTag style = GetSupportedStyleSet();
1502 switch (controller) { 1503 switch (controller) {
1503 case Core::HID::NpadStyleIndex::ProController: 1504 case Core::HID::NpadStyleIndex::ProController:
1504 return style.fullkey; 1505 return style.fullkey.As<bool>();
1505 case Core::HID::NpadStyleIndex::JoyconDual: 1506 case Core::HID::NpadStyleIndex::JoyconDual:
1506 return style.joycon_dual; 1507 return style.joycon_dual.As<bool>();
1507 case Core::HID::NpadStyleIndex::JoyconLeft: 1508 case Core::HID::NpadStyleIndex::JoyconLeft:
1508 return style.joycon_left; 1509 return style.joycon_left.As<bool>();
1509 case Core::HID::NpadStyleIndex::JoyconRight: 1510 case Core::HID::NpadStyleIndex::JoyconRight:
1510 return style.joycon_right; 1511 return style.joycon_right.As<bool>();
1511 case Core::HID::NpadStyleIndex::GameCube: 1512 case Core::HID::NpadStyleIndex::GameCube:
1512 return style.gamecube; 1513 return style.gamecube.As<bool>();
1513 case Core::HID::NpadStyleIndex::Pokeball: 1514 case Core::HID::NpadStyleIndex::Pokeball:
1514 return style.palma; 1515 return style.palma.As<bool>();
1515 case Core::HID::NpadStyleIndex::NES: 1516 case Core::HID::NpadStyleIndex::NES:
1516 return style.lark; 1517 return style.lark.As<bool>();
1517 case Core::HID::NpadStyleIndex::SNES: 1518 case Core::HID::NpadStyleIndex::SNES:
1518 return style.lucia; 1519 return style.lucia.As<bool>();
1519 case Core::HID::NpadStyleIndex::N64: 1520 case Core::HID::NpadStyleIndex::N64:
1520 return style.lagoon; 1521 return style.lagoon.As<bool>();
1521 case Core::HID::NpadStyleIndex::SegaGenesis: 1522 case Core::HID::NpadStyleIndex::SegaGenesis:
1522 return style.lager; 1523 return style.lager.As<bool>();
1523 default: 1524 default:
1524 return false; 1525 return false;
1525 } 1526 }
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index becd6d1b9..652441bc2 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -290,7 +290,7 @@ public:
290 const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; 290 const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
291 const auto start_info{page_table.QueryInfo(start - 1)}; 291 const auto start_info{page_table.QueryInfo(start - 1)};
292 292
293 if (start_info.state != Kernel::KMemoryState::Free) { 293 if (start_info.GetState() != Kernel::KMemoryState::Free) {
294 return {}; 294 return {};
295 } 295 }
296 296
@@ -300,7 +300,7 @@ public:
300 300
301 const auto end_info{page_table.QueryInfo(start + size)}; 301 const auto end_info{page_table.QueryInfo(start + size)};
302 302
303 if (end_info.state != Kernel::KMemoryState::Free) { 303 if (end_info.GetState() != Kernel::KMemoryState::Free) {
304 return {}; 304 return {};
305 } 305 }
306 306
diff --git a/src/core/hle/service/nfp/amiibo_crypto.cpp b/src/core/hle/service/nfp/amiibo_crypto.cpp
index c32a6816b..167e29572 100644
--- a/src/core/hle/service/nfp/amiibo_crypto.cpp
+++ b/src/core/hle/service/nfp/amiibo_crypto.cpp
@@ -9,6 +9,7 @@
9#include <mbedtls/hmac_drbg.h> 9#include <mbedtls/hmac_drbg.h>
10 10
11#include "common/fs/file.h" 11#include "common/fs/file.h"
12#include "common/fs/fs.h"
12#include "common/fs/path_util.h" 13#include "common/fs/path_util.h"
13#include "common/logging/log.h" 14#include "common/logging/log.h"
14#include "core/hle/service/mii/mii_manager.h" 15#include "core/hle/service/mii/mii_manager.h"
@@ -279,7 +280,7 @@ bool LoadKeys(InternalKey& locked_secret, InternalKey& unfixed_info) {
279 Common::FS::FileType::BinaryFile}; 280 Common::FS::FileType::BinaryFile};
280 281
281 if (!keys_file.IsOpen()) { 282 if (!keys_file.IsOpen()) {
282 LOG_ERROR(Service_NFP, "No keys detected"); 283 LOG_ERROR(Service_NFP, "Failed to open key file");
283 return false; 284 return false;
284 } 285 }
285 286
@@ -295,6 +296,11 @@ bool LoadKeys(InternalKey& locked_secret, InternalKey& unfixed_info) {
295 return true; 296 return true;
296} 297}
297 298
299bool IsKeyAvailable() {
300 const auto yuzu_keys_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::KeysDir);
301 return Common::FS::Exists(yuzu_keys_dir / "key_retail.bin");
302}
303
298bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& tag_data) { 304bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& tag_data) {
299 InternalKey locked_secret{}; 305 InternalKey locked_secret{};
300 InternalKey unfixed_info{}; 306 InternalKey unfixed_info{};
diff --git a/src/core/hle/service/nfp/amiibo_crypto.h b/src/core/hle/service/nfp/amiibo_crypto.h
index 0175ced91..1fa61174e 100644
--- a/src/core/hle/service/nfp/amiibo_crypto.h
+++ b/src/core/hle/service/nfp/amiibo_crypto.h
@@ -91,6 +91,9 @@ void Cipher(const DerivedKeys& keys, const NTAG215File& in_data, NTAG215File& ou
91/// Loads both amiibo keys from key_retail.bin 91/// Loads both amiibo keys from key_retail.bin
92bool LoadKeys(InternalKey& locked_secret, InternalKey& unfixed_info); 92bool LoadKeys(InternalKey& locked_secret, InternalKey& unfixed_info);
93 93
94/// Returns true if key_retail.bin exist
95bool IsKeyAvailable();
96
94/// Decodes encripted amiibo data returns true if output is valid 97/// Decodes encripted amiibo data returns true if output is valid
95bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& tag_data); 98bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& tag_data);
96 99
diff --git a/src/core/hle/service/nfp/nfp_device.cpp b/src/core/hle/service/nfp/nfp_device.cpp
index 76f8a267a..b19672560 100644
--- a/src/core/hle/service/nfp/nfp_device.cpp
+++ b/src/core/hle/service/nfp/nfp_device.cpp
@@ -17,6 +17,7 @@
17#include "core/hle/ipc_helpers.h" 17#include "core/hle/ipc_helpers.h"
18#include "core/hle/kernel/k_event.h" 18#include "core/hle/kernel/k_event.h"
19#include "core/hle/service/mii/mii_manager.h" 19#include "core/hle/service/mii/mii_manager.h"
20#include "core/hle/service/mii/types.h"
20#include "core/hle/service/nfp/amiibo_crypto.h" 21#include "core/hle/service/nfp/amiibo_crypto.h"
21#include "core/hle/service/nfp/nfp.h" 22#include "core/hle/service/nfp/nfp.h"
22#include "core/hle/service/nfp/nfp_device.h" 23#include "core/hle/service/nfp/nfp_device.h"
@@ -233,6 +234,14 @@ Result NfpDevice::Mount(MountTarget mount_target_) {
233 return NotAnAmiibo; 234 return NotAnAmiibo;
234 } 235 }
235 236
237 // Mark amiibos as read only when keys are missing
238 if (!AmiiboCrypto::IsKeyAvailable()) {
239 LOG_ERROR(Service_NFP, "No keys detected");
240 device_state = DeviceState::TagMounted;
241 mount_target = MountTarget::Rom;
242 return ResultSuccess;
243 }
244
236 if (!AmiiboCrypto::DecodeAmiibo(encrypted_tag_data, tag_data)) { 245 if (!AmiiboCrypto::DecodeAmiibo(encrypted_tag_data, tag_data)) {
237 LOG_ERROR(Service_NFP, "Can't decode amiibo {}", device_state); 246 LOG_ERROR(Service_NFP, "Can't decode amiibo {}", device_state);
238 return CorruptedData; 247 return CorruptedData;
diff --git a/src/core/hle/service/nfp/nfp_device.h b/src/core/hle/service/nfp/nfp_device.h
index a5b72cf19..76d0e9ae4 100644
--- a/src/core/hle/service/nfp/nfp_device.h
+++ b/src/core/hle/service/nfp/nfp_device.h
@@ -8,7 +8,6 @@
8 8
9#include "common/common_funcs.h" 9#include "common/common_funcs.h"
10#include "core/hle/service/kernel_helpers.h" 10#include "core/hle/service/kernel_helpers.h"
11#include "core/hle/service/mii/types.h"
12#include "core/hle/service/nfp/nfp_types.h" 11#include "core/hle/service/nfp/nfp_types.h"
13#include "core/hle/service/service.h" 12#include "core/hle/service/service.h"
14 13
diff --git a/src/core/hle/service/nfp/nfp_types.h b/src/core/hle/service/nfp/nfp_types.h
index c09f9ddb6..63d5917cb 100644
--- a/src/core/hle/service/nfp/nfp_types.h
+++ b/src/core/hle/service/nfp/nfp_types.h
@@ -17,11 +17,6 @@ enum class ServiceType : u32 {
17 System, 17 System,
18}; 18};
19 19
20enum class State : u32 {
21 NonInitialized,
22 Initialized,
23};
24
25enum class DeviceState : u32 { 20enum class DeviceState : u32 {
26 Initialized, 21 Initialized,
27 SearchingForTag, 22 SearchingForTag,
diff --git a/src/core/hle/service/nfp/nfp_user.cpp b/src/core/hle/service/nfp/nfp_user.cpp
index 4ed53b534..33e2ef518 100644
--- a/src/core/hle/service/nfp/nfp_user.cpp
+++ b/src/core/hle/service/nfp/nfp_user.cpp
@@ -6,12 +6,9 @@
6 6
7#include "common/logging/log.h" 7#include "common/logging/log.h"
8#include "core/core.h" 8#include "core/core.h"
9#include "core/hid/emulated_controller.h"
10#include "core/hid/hid_core.h"
11#include "core/hid/hid_types.h" 9#include "core/hid/hid_types.h"
12#include "core/hle/ipc_helpers.h" 10#include "core/hle/ipc_helpers.h"
13#include "core/hle/kernel/k_event.h" 11#include "core/hle/kernel/k_event.h"
14#include "core/hle/service/mii/mii_manager.h"
15#include "core/hle/service/nfp/nfp_device.h" 12#include "core/hle/service/nfp/nfp_device.h"
16#include "core/hle/service/nfp/nfp_result.h" 13#include "core/hle/service/nfp/nfp_result.h"
17#include "core/hle/service/nfp/nfp_user.h" 14#include "core/hle/service/nfp/nfp_user.h"
diff --git a/src/core/hle/service/nfp/nfp_user.h b/src/core/hle/service/nfp/nfp_user.h
index 68c60ae82..47aff3695 100644
--- a/src/core/hle/service/nfp/nfp_user.h
+++ b/src/core/hle/service/nfp/nfp_user.h
@@ -4,8 +4,7 @@
4#pragma once 4#pragma once
5 5
6#include "core/hle/service/kernel_helpers.h" 6#include "core/hle/service/kernel_helpers.h"
7#include "core/hle/service/nfp/nfp.h" 7#include "core/hle/service/service.h"
8#include "core/hle/service/nfp/nfp_types.h"
9 8
10namespace Service::NFP { 9namespace Service::NFP {
11class NfpDevice; 10class NfpDevice;
@@ -15,6 +14,11 @@ public:
15 explicit IUser(Core::System& system_); 14 explicit IUser(Core::System& system_);
16 15
17private: 16private:
17 enum class State : u32 {
18 NonInitialized,
19 Initialized,
20 };
21
18 void Initialize(Kernel::HLERequestContext& ctx); 22 void Initialize(Kernel::HLERequestContext& ctx);
19 void Finalize(Kernel::HLERequestContext& ctx); 23 void Finalize(Kernel::HLERequestContext& ctx);
20 void ListDevices(Kernel::HLERequestContext& ctx); 24 void ListDevices(Kernel::HLERequestContext& ctx);
diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp
index fbd8a74a5..a51ca5444 100644
--- a/src/core/hle/service/nvdrv/core/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/core/nvmap.cpp
@@ -255,15 +255,16 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna
255 .address = handle_description->address, 255 .address = handle_description->address,
256 .size = handle_description->size, 256 .size = handle_description->size,
257 .was_uncached = handle_description->flags.map_uncached.Value() != 0, 257 .was_uncached = handle_description->flags.map_uncached.Value() != 0,
258 .can_unlock = true,
258 }; 259 };
259 } else { 260 } else {
260 return std::nullopt; 261 return std::nullopt;
261 } 262 }
262 263
263 // Handle hasn't been freed from memory, set address to 0 to mark that the handle wasn't freed 264 // If the handle hasn't been freed from memory, mark that
264 if (!hWeak.expired()) { 265 if (!hWeak.expired()) {
265 LOG_DEBUG(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle); 266 LOG_DEBUG(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle);
266 freeInfo.address = 0; 267 freeInfo.can_unlock = false;
267 } 268 }
268 269
269 return freeInfo; 270 return freeInfo;
diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h
index b9dd3801f..a8e573890 100644
--- a/src/core/hle/service/nvdrv/core/nvmap.h
+++ b/src/core/hle/service/nvdrv/core/nvmap.h
@@ -105,6 +105,7 @@ public:
105 u64 address; //!< Address the handle referred to before deletion 105 u64 address; //!< Address the handle referred to before deletion
106 u64 size; //!< Page-aligned handle size 106 u64 size; //!< Page-aligned handle size
107 bool was_uncached; //!< If the handle was allocated as uncached 107 bool was_uncached; //!< If the handle was allocated as uncached
108 bool can_unlock; //!< If the address region is ready to be unlocked
108 }; 109 };
109 110
110 explicit NvMap(Tegra::Host1x::Host1x& host1x); 111 explicit NvMap(Tegra::Host1x::Host1x& host1x);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index 6411dbf43..b635e6ed1 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -311,7 +311,8 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
311 handle->address + 311 handle->address +
312 (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))}; 312 (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
313 313
314 gmmu->Map(virtual_address, cpu_address, size, use_big_pages); 314 gmmu->Map(virtual_address, cpu_address, size, static_cast<Tegra::PTEKind>(entry.kind),
315 use_big_pages);
315 } 316 }
316 } 317 }
317 318
@@ -350,7 +351,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
350 u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)}; 351 u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)};
351 VAddr cpu_address{mapping->ptr + params.buffer_offset}; 352 VAddr cpu_address{mapping->ptr + params.buffer_offset};
352 353
353 gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page); 354 gmmu->Map(gpu_address, cpu_address, params.mapping_size,
355 static_cast<Tegra::PTEKind>(params.kind), mapping->big_page);
354 356
355 return NvResult::Success; 357 return NvResult::Success;
356 } catch (const std::out_of_range&) { 358 } catch (const std::out_of_range&) {
@@ -389,7 +391,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
389 } 391 }
390 392
391 const bool use_big_pages = alloc->second.big_pages && big_page; 393 const bool use_big_pages = alloc->second.big_pages && big_page;
392 gmmu->Map(params.offset, cpu_address, size, use_big_pages); 394 gmmu->Map(params.offset, cpu_address, size, static_cast<Tegra::PTEKind>(params.kind),
395 use_big_pages);
393 396
394 auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true, 397 auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true,
395 use_big_pages, alloc->second.sparse)}; 398 use_big_pages, alloc->second.sparse)};
@@ -409,7 +412,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
409 return NvResult::InsufficientMemory; 412 return NvResult::InsufficientMemory;
410 } 413 }
411 414
412 gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), big_page); 415 gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size),
416 static_cast<Tegra::PTEKind>(params.kind), big_page);
413 417
414 auto mapping{ 418 auto mapping{
415 std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)}; 419 std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)};
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index ddf273b5e..44388655d 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -128,7 +128,8 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
128 } 128 }
129 ASSERT(system.CurrentProcess() 129 ASSERT(system.CurrentProcess()
130 ->PageTable() 130 ->PageTable()
131 .LockForDeviceAddressSpace(handle_description->address, handle_description->size) 131 .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size,
132 Kernel::KMemoryPermission::None, true)
132 .IsSuccess()); 133 .IsSuccess());
133 std::memcpy(output.data(), &params, sizeof(params)); 134 std::memcpy(output.data(), &params, sizeof(params));
134 return result; 135 return result;
@@ -250,10 +251,12 @@ NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) {
250 } 251 }
251 252
252 if (auto freeInfo{file.FreeHandle(params.handle, false)}) { 253 if (auto freeInfo{file.FreeHandle(params.handle, false)}) {
253 ASSERT(system.CurrentProcess() 254 if (freeInfo->can_unlock) {
254 ->PageTable() 255 ASSERT(system.CurrentProcess()
255 .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size) 256 ->PageTable()
256 .IsSuccess()); 257 .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size)
258 .IsSuccess());
259 }
257 params.address = freeInfo->address; 260 params.address = freeInfo->address;
258 params.size = static_cast<u32>(freeInfo->size); 261 params.size = static_cast<u32>(freeInfo->size);
259 params.flags.raw = 0; 262 params.flags.raw = 0;
diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp
index 9d9924395..9f4c7c99a 100644
--- a/src/core/hle/service/nvdrv/nvdrv.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv.cpp
@@ -53,7 +53,7 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger
53} 53}
54 54
55Module::Module(Core::System& system) 55Module::Module(Core::System& system)
56 : service_context{system, "nvdrv"}, events_interface{*this}, container{system.Host1x()} { 56 : container{system.Host1x()}, service_context{system, "nvdrv"}, events_interface{*this} {
57 builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) { 57 builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) {
58 std::shared_ptr<Devices::nvdevice> device = 58 std::shared_ptr<Devices::nvdevice> device =
59 std::make_shared<Devices::nvhost_as_gpu>(system, *this, container); 59 std::make_shared<Devices::nvhost_as_gpu>(system, *this, container);
diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h
index 146d046a9..f3c81bd88 100644
--- a/src/core/hle/service/nvdrv/nvdrv.h
+++ b/src/core/hle/service/nvdrv/nvdrv.h
@@ -97,6 +97,9 @@ private:
97 friend class EventInterface; 97 friend class EventInterface;
98 friend class Service::NVFlinger::NVFlinger; 98 friend class Service::NVFlinger::NVFlinger;
99 99
100 /// Manages syncpoints on the host
101 NvCore::Container container;
102
100 /// Id to use for the next open file descriptor. 103 /// Id to use for the next open file descriptor.
101 DeviceFD next_fd = 1; 104 DeviceFD next_fd = 1;
102 105
@@ -108,9 +111,6 @@ private:
108 111
109 EventInterface events_interface; 112 EventInterface events_interface;
110 113
111 /// Manages syncpoints on the host
112 NvCore::Container container;
113
114 std::unordered_map<std::string, std::function<FilesContainerType::iterator(DeviceFD)>> builders; 114 std::unordered_map<std::string, std::function<FilesContainerType::iterator(DeviceFD)>> builders;
115}; 115};
116 116
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
index 77ddbb6ef..41ba44b21 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
@@ -742,6 +742,13 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) {
742 return Status::NoError; 742 return Status::NoError;
743 } 743 }
744 744
745 // HACK: We are not Android. Remove handle for items in queue, and clear queue.
746 // Allows synchronous destruction of nvmap handles.
747 for (auto& item : core->queue) {
748 nvmap.FreeHandle(item.graphic_buffer->BufferId(), true);
749 }
750 core->queue.clear();
751
745 switch (api) { 752 switch (api) {
746 case NativeWindowApi::Egl: 753 case NativeWindowApi::Egl:
747 case NativeWindowApi::Cpu: 754 case NativeWindowApi::Cpu:
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index aa14d2cbc..c3af12c90 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -102,15 +102,19 @@ NVFlinger::~NVFlinger() {
102 system.CoreTiming().UnscheduleEvent(single_composition_event, {}); 102 system.CoreTiming().UnscheduleEvent(single_composition_event, {});
103 } 103 }
104 104
105 ShutdownLayers();
106
107 if (nvdrv) {
108 nvdrv->Close(disp_fd);
109 }
110}
111
112void NVFlinger::ShutdownLayers() {
105 for (auto& display : displays) { 113 for (auto& display : displays) {
106 for (size_t layer = 0; layer < display.GetNumLayers(); ++layer) { 114 for (size_t layer = 0; layer < display.GetNumLayers(); ++layer) {
107 display.GetLayer(layer).Core().NotifyShutdown(); 115 display.GetLayer(layer).Core().NotifyShutdown();
108 } 116 }
109 } 117 }
110
111 if (nvdrv) {
112 nvdrv->Close(disp_fd);
113 }
114} 118}
115 119
116void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) { 120void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
@@ -134,6 +138,19 @@ std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) {
134 return itr->GetID(); 138 return itr->GetID();
135} 139}
136 140
141bool NVFlinger::CloseDisplay(u64 display_id) {
142 const auto lock_guard = Lock();
143 auto* const display = FindDisplay(display_id);
144
145 if (display == nullptr) {
146 return false;
147 }
148
149 display->Reset();
150
151 return true;
152}
153
137std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { 154std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
138 const auto lock_guard = Lock(); 155 const auto lock_guard = Lock();
139 auto* const display = FindDisplay(display_id); 156 auto* const display = FindDisplay(display_id);
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h
index 99509bc5b..460bef976 100644
--- a/src/core/hle/service/nvflinger/nvflinger.h
+++ b/src/core/hle/service/nvflinger/nvflinger.h
@@ -48,6 +48,8 @@ public:
48 explicit NVFlinger(Core::System& system_, HosBinderDriverServer& hos_binder_driver_server_); 48 explicit NVFlinger(Core::System& system_, HosBinderDriverServer& hos_binder_driver_server_);
49 ~NVFlinger(); 49 ~NVFlinger();
50 50
51 void ShutdownLayers();
52
51 /// Sets the NVDrv module instance to use to send buffers to the GPU. 53 /// Sets the NVDrv module instance to use to send buffers to the GPU.
52 void SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance); 54 void SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance);
53 55
@@ -56,6 +58,11 @@ public:
56 /// If an invalid display name is provided, then an empty optional is returned. 58 /// If an invalid display name is provided, then an empty optional is returned.
57 [[nodiscard]] std::optional<u64> OpenDisplay(std::string_view name); 59 [[nodiscard]] std::optional<u64> OpenDisplay(std::string_view name);
58 60
61 /// Closes the specified display by its ID.
62 ///
63 /// Returns false if an invalid display ID is provided.
64 [[nodiscard]] bool CloseDisplay(u64 display_id);
65
59 /// Creates a layer on the specified display and returns the layer ID. 66 /// Creates a layer on the specified display and returns the layer ID.
60 /// 67 ///
61 /// If an invalid display ID is specified, then an empty optional is returned. 68 /// If an invalid display ID is specified, then an empty optional is returned.
diff --git a/src/core/hle/service/service.cpp b/src/core/hle/service/service.cpp
index dadaf897f..5db6588e4 100644
--- a/src/core/hle/service/service.cpp
+++ b/src/core/hle/service/service.cpp
@@ -303,4 +303,8 @@ Services::Services(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system
303 303
304Services::~Services() = default; 304Services::~Services() = default;
305 305
306void Services::KillNVNFlinger() {
307 nv_flinger->ShutdownLayers();
308}
309
306} // namespace Service 310} // namespace Service
diff --git a/src/core/hle/service/service.h b/src/core/hle/service/service.h
index 5bf197c51..ec9deeee4 100644
--- a/src/core/hle/service/service.h
+++ b/src/core/hle/service/service.h
@@ -238,6 +238,8 @@ public:
238 explicit Services(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system); 238 explicit Services(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system);
239 ~Services(); 239 ~Services();
240 240
241 void KillNVNFlinger();
242
241private: 243private:
242 std::unique_ptr<NVFlinger::HosBinderDriverServer> hos_binder_driver_server; 244 std::unique_ptr<NVFlinger::HosBinderDriverServer> hos_binder_driver_server;
243 std::unique_ptr<NVFlinger::NVFlinger> nv_flinger; 245 std::unique_ptr<NVFlinger::NVFlinger> nv_flinger;
diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp
index 48e70f93c..e2b8d8720 100644
--- a/src/core/hle/service/sm/sm.cpp
+++ b/src/core/hle/service/sm/sm.cpp
@@ -80,7 +80,6 @@ ResultVal<Kernel::KPort*> ServiceManager::GetServicePort(const std::string& name
80 } 80 }
81 81
82 auto* port = Kernel::KPort::Create(kernel); 82 auto* port = Kernel::KPort::Create(kernel);
83 SCOPE_EXIT({ port->Close(); });
84 83
85 port->Initialize(ServerSessionCountMax, false, name); 84 port->Initialize(ServerSessionCountMax, false, name);
86 auto handler = it->second; 85 auto handler = it->second;
diff --git a/src/core/hle/service/sm/sm_controller.cpp b/src/core/hle/service/sm/sm_controller.cpp
index 2a4bd64ab..273f79568 100644
--- a/src/core/hle/service/sm/sm_controller.cpp
+++ b/src/core/hle/service/sm/sm_controller.cpp
@@ -15,9 +15,10 @@
15namespace Service::SM { 15namespace Service::SM {
16 16
17void Controller::ConvertCurrentObjectToDomain(Kernel::HLERequestContext& ctx) { 17void Controller::ConvertCurrentObjectToDomain(Kernel::HLERequestContext& ctx) {
18 ASSERT_MSG(!ctx.Session()->IsDomain(), "Session is already a domain"); 18 ASSERT_MSG(!ctx.Session()->GetSessionRequestManager()->IsDomain(),
19 "Session is already a domain");
19 LOG_DEBUG(Service, "called, server_session={}", ctx.Session()->GetId()); 20 LOG_DEBUG(Service, "called, server_session={}", ctx.Session()->GetId());
20 ctx.Session()->ConvertToDomain(); 21 ctx.Session()->GetSessionRequestManager()->ConvertToDomainOnRequestEnd();
21 22
22 IPC::ResponseBuilder rb{ctx, 3}; 23 IPC::ResponseBuilder rb{ctx, 3};
23 rb.Push(ResultSuccess); 24 rb.Push(ResultSuccess);
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h
index 33d5f398c..0b65a65da 100644
--- a/src/core/hle/service/vi/display/vi_display.h
+++ b/src/core/hle/service/vi/display/vi_display.h
@@ -106,6 +106,12 @@ public:
106 /// 106 ///
107 void CloseLayer(u64 layer_id); 107 void CloseLayer(u64 layer_id);
108 108
109 /// Resets the display for a new connection.
110 void Reset() {
111 layers.clear();
112 got_vsync_event = false;
113 }
114
109 /// Attempts to find a layer with the given ID. 115 /// Attempts to find a layer with the given ID.
110 /// 116 ///
111 /// @param layer_id The layer ID. 117 /// @param layer_id The layer ID.
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index 9c917cacf..bb283e74e 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -324,10 +324,10 @@ private:
324 IPC::RequestParser rp{ctx}; 324 IPC::RequestParser rp{ctx};
325 const u64 display = rp.Pop<u64>(); 325 const u64 display = rp.Pop<u64>();
326 326
327 LOG_WARNING(Service_VI, "(STUBBED) called. display=0x{:016X}", display); 327 const Result rc = nv_flinger.CloseDisplay(display) ? ResultSuccess : ResultUnknown;
328 328
329 IPC::ResponseBuilder rb{ctx, 2}; 329 IPC::ResponseBuilder rb{ctx, 2};
330 rb.Push(ResultSuccess); 330 rb.Push(rc);
331 } 331 }
332 332
333 void CreateManagedLayer(Kernel::HLERequestContext& ctx) { 333 void CreateManagedLayer(Kernel::HLERequestContext& ctx) {
@@ -508,10 +508,10 @@ private:
508 IPC::RequestParser rp{ctx}; 508 IPC::RequestParser rp{ctx};
509 const u64 display_id = rp.Pop<u64>(); 509 const u64 display_id = rp.Pop<u64>();
510 510
511 LOG_WARNING(Service_VI, "(STUBBED) called. display_id=0x{:016X}", display_id); 511 const Result rc = nv_flinger.CloseDisplay(display_id) ? ResultSuccess : ResultUnknown;
512 512
513 IPC::ResponseBuilder rb{ctx, 2}; 513 IPC::ResponseBuilder rb{ctx, 2};
514 rb.Push(ResultSuccess); 514 rb.Push(rc);
515 } 515 }
516 516
517 // This literally does nothing internally in the actual service itself, 517 // This literally does nothing internally in the actual service itself,
diff --git a/src/core/hle/service/vi/vi_results.h b/src/core/hle/service/vi/vi_results.h
index a46c247d2..22bac799f 100644
--- a/src/core/hle/service/vi/vi_results.h
+++ b/src/core/hle/service/vi/vi_results.h
@@ -1,6 +1,8 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#pragma once
5
4#include "core/hle/result.h" 6#include "core/hle/result.h"
5 7
6namespace Service::VI { 8namespace Service::VI {
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 2ac792566..3ca80c8ff 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -65,7 +65,7 @@ struct Memory::Impl {
65 return {}; 65 return {};
66 } 66 }
67 67
68 return system.DeviceMemory().GetPointer(paddr) + vaddr; 68 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
69 } 69 }
70 70
71 [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { 71 [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
@@ -75,7 +75,7 @@ struct Memory::Impl {
75 return {}; 75 return {};
76 } 76 }
77 77
78 return system.DeviceMemory().GetPointer(paddr) + vaddr; 78 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
79 } 79 }
80 80
81 u8 Read8(const VAddr addr) { 81 u8 Read8(const VAddr addr) {
@@ -233,18 +233,17 @@ struct Memory::Impl {
233 current_vaddr, src_addr, size); 233 current_vaddr, src_addr, size);
234 std::memset(dest_buffer, 0, copy_amount); 234 std::memset(dest_buffer, 0, copy_amount);
235 }, 235 },
236 [&dest_buffer](const std::size_t copy_amount, const u8* const src_ptr) { 236 [&](const std::size_t copy_amount, const u8* const src_ptr) {
237 std::memcpy(dest_buffer, src_ptr, copy_amount); 237 std::memcpy(dest_buffer, src_ptr, copy_amount);
238 }, 238 },
239 [&system = system, &dest_buffer](const VAddr current_vaddr, 239 [&](const VAddr current_vaddr, const std::size_t copy_amount,
240 const std::size_t copy_amount, 240 const u8* const host_ptr) {
241 const u8* const host_ptr) {
242 if constexpr (!UNSAFE) { 241 if constexpr (!UNSAFE) {
243 system.GPU().FlushRegion(current_vaddr, copy_amount); 242 system.GPU().FlushRegion(current_vaddr, copy_amount);
244 } 243 }
245 std::memcpy(dest_buffer, host_ptr, copy_amount); 244 std::memcpy(dest_buffer, host_ptr, copy_amount);
246 }, 245 },
247 [&dest_buffer](const std::size_t copy_amount) { 246 [&](const std::size_t copy_amount) {
248 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; 247 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
249 }); 248 });
250 } 249 }
@@ -267,17 +266,16 @@ struct Memory::Impl {
267 "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 266 "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
268 current_vaddr, dest_addr, size); 267 current_vaddr, dest_addr, size);
269 }, 268 },
270 [&src_buffer](const std::size_t copy_amount, u8* const dest_ptr) { 269 [&](const std::size_t copy_amount, u8* const dest_ptr) {
271 std::memcpy(dest_ptr, src_buffer, copy_amount); 270 std::memcpy(dest_ptr, src_buffer, copy_amount);
272 }, 271 },
273 [&system = system, &src_buffer](const VAddr current_vaddr, 272 [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) {
274 const std::size_t copy_amount, u8* const host_ptr) {
275 if constexpr (!UNSAFE) { 273 if constexpr (!UNSAFE) {
276 system.GPU().InvalidateRegion(current_vaddr, copy_amount); 274 system.GPU().InvalidateRegion(current_vaddr, copy_amount);
277 } 275 }
278 std::memcpy(host_ptr, src_buffer, copy_amount); 276 std::memcpy(host_ptr, src_buffer, copy_amount);
279 }, 277 },
280 [&src_buffer](const std::size_t copy_amount) { 278 [&](const std::size_t copy_amount) {
281 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; 279 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
282 }); 280 });
283 } 281 }
@@ -301,8 +299,7 @@ struct Memory::Impl {
301 [](const std::size_t copy_amount, u8* const dest_ptr) { 299 [](const std::size_t copy_amount, u8* const dest_ptr) {
302 std::memset(dest_ptr, 0, copy_amount); 300 std::memset(dest_ptr, 0, copy_amount);
303 }, 301 },
304 [&system = system](const VAddr current_vaddr, const std::size_t copy_amount, 302 [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) {
305 u8* const host_ptr) {
306 system.GPU().InvalidateRegion(current_vaddr, copy_amount); 303 system.GPU().InvalidateRegion(current_vaddr, copy_amount);
307 std::memset(host_ptr, 0, copy_amount); 304 std::memset(host_ptr, 0, copy_amount);
308 }, 305 },
@@ -313,22 +310,20 @@ struct Memory::Impl {
313 const std::size_t size) { 310 const std::size_t size) {
314 WalkBlock( 311 WalkBlock(
315 process, dest_addr, size, 312 process, dest_addr, size,
316 [this, &process, &dest_addr, &src_addr, size](const std::size_t copy_amount, 313 [&](const std::size_t copy_amount, const VAddr current_vaddr) {
317 const VAddr current_vaddr) {
318 LOG_ERROR(HW_Memory, 314 LOG_ERROR(HW_Memory,
319 "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 315 "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
320 current_vaddr, src_addr, size); 316 current_vaddr, src_addr, size);
321 ZeroBlock(process, dest_addr, copy_amount); 317 ZeroBlock(process, dest_addr, copy_amount);
322 }, 318 },
323 [this, &process, &dest_addr](const std::size_t copy_amount, const u8* const src_ptr) { 319 [&](const std::size_t copy_amount, const u8* const src_ptr) {
324 WriteBlockImpl<false>(process, dest_addr, src_ptr, copy_amount); 320 WriteBlockImpl<false>(process, dest_addr, src_ptr, copy_amount);
325 }, 321 },
326 [this, &system = system, &process, &dest_addr]( 322 [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) {
327 const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) {
328 system.GPU().FlushRegion(current_vaddr, copy_amount); 323 system.GPU().FlushRegion(current_vaddr, copy_amount);
329 WriteBlockImpl<false>(process, dest_addr, host_ptr, copy_amount); 324 WriteBlockImpl<false>(process, dest_addr, host_ptr, copy_amount);
330 }, 325 },
331 [&dest_addr, &src_addr](const std::size_t copy_amount) { 326 [&](const std::size_t copy_amount) {
332 dest_addr += static_cast<VAddr>(copy_amount); 327 dest_addr += static_cast<VAddr>(copy_amount);
333 src_addr += static_cast<VAddr>(copy_amount); 328 src_addr += static_cast<VAddr>(copy_amount);
334 }); 329 });
@@ -499,7 +494,7 @@ struct Memory::Impl {
499 } else { 494 } else {
500 while (base != end) { 495 while (base != end) {
501 page_table.pointers[base].Store( 496 page_table.pointers[base].Store(
502 system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type); 497 system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type);
503 page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS); 498 page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
504 499
505 ASSERT_MSG(page_table.pointers[base].Pointer(), 500 ASSERT_MSG(page_table.pointers[base].Pointer(),
@@ -575,7 +570,7 @@ struct Memory::Impl {
575 [vaddr]() { 570 [vaddr]() {
576 LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, vaddr); 571 LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, vaddr);
577 }, 572 },
578 [&system = system, vaddr]() { system.GPU().FlushRegion(vaddr, sizeof(T)); }); 573 [&]() { system.GPU().FlushRegion(vaddr, sizeof(T)); });
579 if (ptr) { 574 if (ptr) {
580 std::memcpy(&result, ptr, sizeof(T)); 575 std::memcpy(&result, ptr, sizeof(T));
581 } 576 }
@@ -599,7 +594,7 @@ struct Memory::Impl {
599 LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8, 594 LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8,
600 vaddr, static_cast<u64>(data)); 595 vaddr, static_cast<u64>(data));
601 }, 596 },
602 [&system = system, vaddr]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); }); 597 [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); });
603 if (ptr) { 598 if (ptr) {
604 std::memcpy(ptr, &data, sizeof(T)); 599 std::memcpy(ptr, &data, sizeof(T));
605 } 600 }
@@ -613,7 +608,7 @@ struct Memory::Impl {
613 LOG_ERROR(HW_Memory, "Unmapped WriteExclusive{} @ 0x{:016X} = 0x{:016X}", 608 LOG_ERROR(HW_Memory, "Unmapped WriteExclusive{} @ 0x{:016X} = 0x{:016X}",
614 sizeof(T) * 8, vaddr, static_cast<u64>(data)); 609 sizeof(T) * 8, vaddr, static_cast<u64>(data));
615 }, 610 },
616 [&system = system, vaddr]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); }); 611 [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); });
617 if (ptr) { 612 if (ptr) {
618 const auto volatile_pointer = reinterpret_cast<volatile T*>(ptr); 613 const auto volatile_pointer = reinterpret_cast<volatile T*>(ptr);
619 return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); 614 return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
@@ -628,7 +623,7 @@ struct Memory::Impl {
628 LOG_ERROR(HW_Memory, "Unmapped WriteExclusive128 @ 0x{:016X} = 0x{:016X}{:016X}", 623 LOG_ERROR(HW_Memory, "Unmapped WriteExclusive128 @ 0x{:016X} = 0x{:016X}{:016X}",
629 vaddr, static_cast<u64>(data[1]), static_cast<u64>(data[0])); 624 vaddr, static_cast<u64>(data[1]), static_cast<u64>(data[0]));
630 }, 625 },
631 [&system = system, vaddr]() { system.GPU().InvalidateRegion(vaddr, sizeof(u128)); }); 626 [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(u128)); });
632 if (ptr) { 627 if (ptr) {
633 const auto volatile_pointer = reinterpret_cast<volatile u64*>(ptr); 628 const auto volatile_pointer = reinterpret_cast<volatile u64*>(ptr);
634 return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); 629 return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
diff --git a/src/input_common/CMakeLists.txt b/src/input_common/CMakeLists.txt
index 2cf9eb97f..cc6f0ffc0 100644
--- a/src/input_common/CMakeLists.txt
+++ b/src/input_common/CMakeLists.txt
@@ -39,21 +39,14 @@ add_library(input_common STATIC
39if (MSVC) 39if (MSVC)
40 target_compile_options(input_common PRIVATE 40 target_compile_options(input_common PRIVATE
41 /W4 41 /W4
42 /WX
43 42
44 /we4242 # 'identifier': conversion from 'type1' to 'type2', possible loss of data 43 /we4242 # 'identifier': conversion from 'type1' to 'type2', possible loss of data
45 /we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data
46 /we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch
47 /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data 44 /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
45 /we4800 # Implicit conversion from 'type' to bool. Possible information loss
48 ) 46 )
49else() 47else()
50 target_compile_options(input_common PRIVATE 48 target_compile_options(input_common PRIVATE
51 -Werror
52 -Werror=conversion 49 -Werror=conversion
53 -Werror=ignored-qualifiers
54 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
55 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
56 -Werror=unused-variable
57 ) 50 )
58endif() 51endif()
59 52
diff --git a/src/input_common/drivers/gc_adapter.cpp b/src/input_common/drivers/gc_adapter.cpp
index f4dd24e7d..826fa2109 100644
--- a/src/input_common/drivers/gc_adapter.cpp
+++ b/src/input_common/drivers/gc_adapter.cpp
@@ -324,7 +324,7 @@ bool GCAdapter::GetGCEndpoint(libusb_device* device) {
324 return true; 324 return true;
325} 325}
326 326
327Common::Input::VibrationError GCAdapter::SetRumble( 327Common::Input::VibrationError GCAdapter::SetVibration(
328 const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) { 328 const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) {
329 const auto mean_amplitude = (vibration.low_amplitude + vibration.high_amplitude) * 0.5f; 329 const auto mean_amplitude = (vibration.low_amplitude + vibration.high_amplitude) * 0.5f;
330 const auto processed_amplitude = 330 const auto processed_amplitude =
@@ -338,6 +338,10 @@ Common::Input::VibrationError GCAdapter::SetRumble(
338 return Common::Input::VibrationError::None; 338 return Common::Input::VibrationError::None;
339} 339}
340 340
341bool GCAdapter::IsVibrationEnabled([[maybe_unused]] const PadIdentifier& identifier) {
342 return rumble_enabled;
343}
344
341void GCAdapter::UpdateVibrations() { 345void GCAdapter::UpdateVibrations() {
342 // Use 8 states to keep the switching between on/off fast enough for 346 // Use 8 states to keep the switching between on/off fast enough for
343 // a human to feel different vibration strenght 347 // a human to feel different vibration strenght
diff --git a/src/input_common/drivers/gc_adapter.h b/src/input_common/drivers/gc_adapter.h
index 8682da847..7f81767f7 100644
--- a/src/input_common/drivers/gc_adapter.h
+++ b/src/input_common/drivers/gc_adapter.h
@@ -25,9 +25,11 @@ public:
25 explicit GCAdapter(std::string input_engine_); 25 explicit GCAdapter(std::string input_engine_);
26 ~GCAdapter() override; 26 ~GCAdapter() override;
27 27
28 Common::Input::VibrationError SetRumble( 28 Common::Input::VibrationError SetVibration(
29 const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) override; 29 const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) override;
30 30
31 bool IsVibrationEnabled(const PadIdentifier& identifier) override;
32
31 /// Used for automapping features 33 /// Used for automapping features
32 std::vector<Common::ParamPackage> GetInputDevices() const override; 34 std::vector<Common::ParamPackage> GetInputDevices() const override;
33 ButtonMapping GetButtonMappingForDevice(const Common::ParamPackage& params) override; 35 ButtonMapping GetButtonMappingForDevice(const Common::ParamPackage& params) override;
diff --git a/src/input_common/drivers/sdl_driver.cpp b/src/input_common/drivers/sdl_driver.cpp
index b72e4b397..45ce588f0 100644
--- a/src/input_common/drivers/sdl_driver.cpp
+++ b/src/input_common/drivers/sdl_driver.cpp
@@ -40,8 +40,8 @@ public:
40 void EnableMotion() { 40 void EnableMotion() {
41 if (sdl_controller) { 41 if (sdl_controller) {
42 SDL_GameController* controller = sdl_controller.get(); 42 SDL_GameController* controller = sdl_controller.get();
43 has_accel = SDL_GameControllerHasSensor(controller, SDL_SENSOR_ACCEL); 43 has_accel = SDL_GameControllerHasSensor(controller, SDL_SENSOR_ACCEL) == SDL_TRUE;
44 has_gyro = SDL_GameControllerHasSensor(controller, SDL_SENSOR_GYRO); 44 has_gyro = SDL_GameControllerHasSensor(controller, SDL_SENSOR_GYRO) == SDL_TRUE;
45 if (has_accel) { 45 if (has_accel) {
46 SDL_GameControllerSetSensorEnabled(controller, SDL_SENSOR_ACCEL, SDL_TRUE); 46 SDL_GameControllerSetSensorEnabled(controller, SDL_SENSOR_ACCEL, SDL_TRUE);
47 } 47 }
@@ -114,6 +114,20 @@ public:
114 } 114 }
115 return false; 115 return false;
116 } 116 }
117
118 void EnableVibration(bool is_enabled) {
119 has_vibration = is_enabled;
120 is_vibration_tested = true;
121 }
122
123 bool HasVibration() const {
124 return has_vibration;
125 }
126
127 bool IsVibrationTested() const {
128 return is_vibration_tested;
129 }
130
117 /** 131 /**
118 * The Pad identifier of the joystick 132 * The Pad identifier of the joystick
119 */ 133 */
@@ -236,6 +250,8 @@ private:
236 u64 last_motion_update{}; 250 u64 last_motion_update{};
237 bool has_gyro{false}; 251 bool has_gyro{false};
238 bool has_accel{false}; 252 bool has_accel{false};
253 bool has_vibration{false};
254 bool is_vibration_tested{false};
239 BasicMotion motion; 255 BasicMotion motion;
240}; 256};
241 257
@@ -517,7 +533,7 @@ std::vector<Common::ParamPackage> SDLDriver::GetInputDevices() const {
517 return devices; 533 return devices;
518} 534}
519 535
520Common::Input::VibrationError SDLDriver::SetRumble( 536Common::Input::VibrationError SDLDriver::SetVibration(
521 const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) { 537 const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) {
522 const auto joystick = 538 const auto joystick =
523 GetSDLJoystickByGUID(identifier.guid.RawString(), static_cast<int>(identifier.port)); 539 GetSDLJoystickByGUID(identifier.guid.RawString(), static_cast<int>(identifier.port));
@@ -546,13 +562,6 @@ Common::Input::VibrationError SDLDriver::SetRumble(
546 .type = Common::Input::VibrationAmplificationType::Exponential, 562 .type = Common::Input::VibrationAmplificationType::Exponential,
547 }; 563 };
548 564
549 if (vibration.type == Common::Input::VibrationAmplificationType::Test) {
550 if (!joystick->RumblePlay(new_vibration)) {
551 return Common::Input::VibrationError::Unknown;
552 }
553 return Common::Input::VibrationError::None;
554 }
555
556 vibration_queue.Push(VibrationRequest{ 565 vibration_queue.Push(VibrationRequest{
557 .identifier = identifier, 566 .identifier = identifier,
558 .vibration = new_vibration, 567 .vibration = new_vibration,
@@ -561,6 +570,45 @@ Common::Input::VibrationError SDLDriver::SetRumble(
561 return Common::Input::VibrationError::None; 570 return Common::Input::VibrationError::None;
562} 571}
563 572
573bool SDLDriver::IsVibrationEnabled(const PadIdentifier& identifier) {
574 const auto joystick =
575 GetSDLJoystickByGUID(identifier.guid.RawString(), static_cast<int>(identifier.port));
576
577 constexpr Common::Input::VibrationStatus test_vibration{
578 .low_amplitude = 1,
579 .low_frequency = 160.0f,
580 .high_amplitude = 1,
581 .high_frequency = 320.0f,
582 .type = Common::Input::VibrationAmplificationType::Exponential,
583 };
584
585 constexpr Common::Input::VibrationStatus zero_vibration{
586 .low_amplitude = 0,
587 .low_frequency = 160.0f,
588 .high_amplitude = 0,
589 .high_frequency = 320.0f,
590 .type = Common::Input::VibrationAmplificationType::Exponential,
591 };
592
593 if (joystick->IsVibrationTested()) {
594 return joystick->HasVibration();
595 }
596
597 // First vibration might fail
598 joystick->RumblePlay(test_vibration);
599
600 // Wait for about 15ms to ensure the controller is ready for the stop command
601 std::this_thread::sleep_for(std::chrono::milliseconds(15));
602
603 if (!joystick->RumblePlay(zero_vibration)) {
604 joystick->EnableVibration(false);
605 return false;
606 }
607
608 joystick->EnableVibration(true);
609 return true;
610}
611
564void SDLDriver::SendVibrations() { 612void SDLDriver::SendVibrations() {
565 while (!vibration_queue.Empty()) { 613 while (!vibration_queue.Empty()) {
566 VibrationRequest request; 614 VibrationRequest request;
diff --git a/src/input_common/drivers/sdl_driver.h b/src/input_common/drivers/sdl_driver.h
index fc3a44572..d1b4471cf 100644
--- a/src/input_common/drivers/sdl_driver.h
+++ b/src/input_common/drivers/sdl_driver.h
@@ -61,9 +61,11 @@ public:
61 61
62 bool IsStickInverted(const Common::ParamPackage& params) override; 62 bool IsStickInverted(const Common::ParamPackage& params) override;
63 63
64 Common::Input::VibrationError SetRumble( 64 Common::Input::VibrationError SetVibration(
65 const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) override; 65 const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) override;
66 66
67 bool IsVibrationEnabled(const PadIdentifier& identifier) override;
68
67private: 69private:
68 struct VibrationRequest { 70 struct VibrationRequest {
69 PadIdentifier identifier; 71 PadIdentifier identifier;
diff --git a/src/input_common/input_engine.h b/src/input_common/input_engine.h
index cfbdb26bd..d4c264a8e 100644
--- a/src/input_common/input_engine.h
+++ b/src/input_common/input_engine.h
@@ -108,12 +108,17 @@ public:
108 [[maybe_unused]] const Common::Input::LedStatus& led_status) {} 108 [[maybe_unused]] const Common::Input::LedStatus& led_status) {}
109 109
110 // Sets rumble to a controller 110 // Sets rumble to a controller
111 virtual Common::Input::VibrationError SetRumble( 111 virtual Common::Input::VibrationError SetVibration(
112 [[maybe_unused]] const PadIdentifier& identifier, 112 [[maybe_unused]] const PadIdentifier& identifier,
113 [[maybe_unused]] const Common::Input::VibrationStatus& vibration) { 113 [[maybe_unused]] const Common::Input::VibrationStatus& vibration) {
114 return Common::Input::VibrationError::NotSupported; 114 return Common::Input::VibrationError::NotSupported;
115 } 115 }
116 116
117 // Returns true if device supports vibrations
118 virtual bool IsVibrationEnabled([[maybe_unused]] const PadIdentifier& identifier) {
119 return false;
120 }
121
117 // Sets polling mode to a controller 122 // Sets polling mode to a controller
118 virtual Common::Input::PollingError SetPollingMode( 123 virtual Common::Input::PollingError SetPollingMode(
119 [[maybe_unused]] const PadIdentifier& identifier, 124 [[maybe_unused]] const PadIdentifier& identifier,
diff --git a/src/input_common/input_poller.cpp b/src/input_common/input_poller.cpp
index ca33fb4eb..4ac182147 100644
--- a/src/input_common/input_poller.cpp
+++ b/src/input_common/input_poller.cpp
@@ -763,7 +763,11 @@ public:
763 763
764 Common::Input::VibrationError SetVibration( 764 Common::Input::VibrationError SetVibration(
765 const Common::Input::VibrationStatus& vibration_status) override { 765 const Common::Input::VibrationStatus& vibration_status) override {
766 return input_engine->SetRumble(identifier, vibration_status); 766 return input_engine->SetVibration(identifier, vibration_status);
767 }
768
769 bool IsVibrationEnabled() override {
770 return input_engine->IsVibrationEnabled(identifier);
767 } 771 }
768 772
769 Common::Input::PollingError SetPollingMode(Common::Input::PollingMode polling_mode) override { 773 Common::Input::PollingError SetPollingMode(Common::Input::PollingMode polling_mode) override {
@@ -797,8 +801,8 @@ std::unique_ptr<Common::Input::InputDevice> InputFactory::CreateButtonDevice(
797 801
798 const auto button_id = params.Get("button", 0); 802 const auto button_id = params.Get("button", 0);
799 const auto keyboard_key = params.Get("code", 0); 803 const auto keyboard_key = params.Get("code", 0);
800 const auto toggle = params.Get("toggle", false); 804 const auto toggle = params.Get("toggle", false) != 0;
801 const auto inverted = params.Get("inverted", false); 805 const auto inverted = params.Get("inverted", false) != 0;
802 input_engine->PreSetController(identifier); 806 input_engine->PreSetController(identifier);
803 input_engine->PreSetButton(identifier, button_id); 807 input_engine->PreSetButton(identifier, button_id);
804 input_engine->PreSetButton(identifier, keyboard_key); 808 input_engine->PreSetButton(identifier, keyboard_key);
@@ -820,8 +824,8 @@ std::unique_ptr<Common::Input::InputDevice> InputFactory::CreateHatButtonDevice(
820 824
821 const auto button_id = params.Get("hat", 0); 825 const auto button_id = params.Get("hat", 0);
822 const auto direction = input_engine->GetHatButtonId(params.Get("direction", "")); 826 const auto direction = input_engine->GetHatButtonId(params.Get("direction", ""));
823 const auto toggle = params.Get("toggle", false); 827 const auto toggle = params.Get("toggle", false) != 0;
824 const auto inverted = params.Get("inverted", false); 828 const auto inverted = params.Get("inverted", false) != 0;
825 829
826 input_engine->PreSetController(identifier); 830 input_engine->PreSetController(identifier);
827 input_engine->PreSetHatButton(identifier, button_id); 831 input_engine->PreSetHatButton(identifier, button_id);
@@ -879,7 +883,7 @@ std::unique_ptr<Common::Input::InputDevice> InputFactory::CreateAnalogDevice(
879 .threshold = std::clamp(params.Get("threshold", 0.5f), 0.0f, 1.0f), 883 .threshold = std::clamp(params.Get("threshold", 0.5f), 0.0f, 1.0f),
880 .offset = std::clamp(params.Get("offset", 0.0f), -1.0f, 1.0f), 884 .offset = std::clamp(params.Get("offset", 0.0f), -1.0f, 1.0f),
881 .inverted = params.Get("invert", "+") == "-", 885 .inverted = params.Get("invert", "+") == "-",
882 .toggle = static_cast<bool>(params.Get("toggle", false)), 886 .toggle = params.Get("toggle", false) != 0,
883 }; 887 };
884 input_engine->PreSetController(identifier); 888 input_engine->PreSetController(identifier);
885 input_engine->PreSetAxis(identifier, axis); 889 input_engine->PreSetAxis(identifier, axis);
@@ -895,8 +899,8 @@ std::unique_ptr<Common::Input::InputDevice> InputFactory::CreateTriggerDevice(
895 }; 899 };
896 900
897 const auto button = params.Get("button", 0); 901 const auto button = params.Get("button", 0);
898 const auto toggle = params.Get("toggle", false); 902 const auto toggle = params.Get("toggle", false) != 0;
899 const auto inverted = params.Get("inverted", false); 903 const auto inverted = params.Get("inverted", false) != 0;
900 904
901 const auto axis = params.Get("axis", 0); 905 const auto axis = params.Get("axis", 0);
902 const Common::Input::AnalogProperties properties = { 906 const Common::Input::AnalogProperties properties = {
@@ -926,8 +930,8 @@ std::unique_ptr<Common::Input::InputDevice> InputFactory::CreateTouchDevice(
926 }; 930 };
927 931
928 const auto button = params.Get("button", 0); 932 const auto button = params.Get("button", 0);
929 const auto toggle = params.Get("toggle", false); 933 const auto toggle = params.Get("toggle", false) != 0;
930 const auto inverted = params.Get("inverted", false); 934 const auto inverted = params.Get("inverted", false) != 0;
931 935
932 const auto axis_x = params.Get("axis_x", 0); 936 const auto axis_x = params.Get("axis_x", 0);
933 const Common::Input::AnalogProperties properties_x = { 937 const Common::Input::AnalogProperties properties_x = {
diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt
index af8e51fe8..bcdd60db9 100644
--- a/src/shader_recompiler/CMakeLists.txt
+++ b/src/shader_recompiler/CMakeLists.txt
@@ -241,24 +241,14 @@ target_link_libraries(shader_recompiler PUBLIC common fmt::fmt sirit)
241if (MSVC) 241if (MSVC)
242 target_compile_options(shader_recompiler PRIVATE 242 target_compile_options(shader_recompiler PRIVATE
243 /W4 243 /W4
244 /WX 244
245 /we4018 # 'expression' : signed/unsigned mismatch 245 /we4242 # 'identifier': conversion from 'type1' to 'type2', possible loss of data
246 /we4244 # 'argument' : conversion from 'type1' to 'type2', possible loss of data (floating-point)
247 /we4245 # 'conversion' : conversion from 'type1' to 'type2', signed/unsigned mismatch
248 /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data 246 /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
249 /we4267 # 'var' : conversion from 'size_t' to 'type', possible loss of data
250 /we4305 # 'context' : truncation from 'type1' to 'type2'
251 /we4800 # Implicit conversion from 'type' to bool. Possible information loss 247 /we4800 # Implicit conversion from 'type' to bool. Possible information loss
252 /we4826 # Conversion from 'type1' to 'type2' is sign-extended. This may cause unexpected runtime behavior.
253 ) 248 )
254else() 249else()
255 target_compile_options(shader_recompiler PRIVATE 250 target_compile_options(shader_recompiler PRIVATE
256 -Werror
257 -Werror=conversion 251 -Werror=conversion
258 -Werror=ignored-qualifiers
259 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
260 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
261 -Werror=unused-variable
262 252
263 # Bracket depth determines maximum size of a fold expression in Clang since 9c9974c3ccb6. 253 # Bracket depth determines maximum size of a fold expression in Clang since 9c9974c3ccb6.
264 # And this in turns limits the size of a std::array. 254 # And this in turns limits the size of a std::array.
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp
index 7094d8e42..1f4ffdd62 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp
@@ -5,10 +5,6 @@
5#include "shader_recompiler/backend/glasm/glasm_emit_context.h" 5#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
6#include "shader_recompiler/frontend/ir/value.h" 6#include "shader_recompiler/frontend/ir/value.h"
7 7
8#ifdef _MSC_VER
9#pragma warning(disable : 4100)
10#endif
11
12namespace Shader::Backend::GLASM { 8namespace Shader::Backend::GLASM {
13 9
14#define NotImplemented() throw NotImplementedException("GLASM instruction {}", __LINE__) 10#define NotImplemented() throw NotImplementedException("GLASM instruction {}", __LINE__)
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp
index b03a8ba1e..9f1ed95a4 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp
@@ -7,10 +7,6 @@
7#include "shader_recompiler/backend/glsl/glsl_emit_context.h" 7#include "shader_recompiler/backend/glsl/glsl_emit_context.h"
8#include "shader_recompiler/frontend/ir/value.h" 8#include "shader_recompiler/frontend/ir/value.h"
9 9
10#ifdef _MSC_VER
11#pragma warning(disable : 4100)
12#endif
13
14namespace Shader::Backend::GLSL { 10namespace Shader::Backend::GLSL {
15 11
16void EmitGetRegister(EmitContext& ctx) { 12void EmitGetRegister(EmitContext& ctx) {
diff --git a/src/shader_recompiler/frontend/ir/microinstruction.cpp b/src/shader_recompiler/frontend/ir/microinstruction.cpp
index 468782eb1..84417980b 100644
--- a/src/shader_recompiler/frontend/ir/microinstruction.cpp
+++ b/src/shader_recompiler/frontend/ir/microinstruction.cpp
@@ -325,11 +325,6 @@ void Inst::AddPhiOperand(Block* predecessor, const Value& value) {
325 phi_args.emplace_back(predecessor, value); 325 phi_args.emplace_back(predecessor, value);
326} 326}
327 327
328void Inst::ErasePhiOperand(size_t index) {
329 const auto operand_it{phi_args.begin() + static_cast<ptrdiff_t>(index)};
330 phi_args.erase(operand_it);
331}
332
333void Inst::OrderPhiArgs() { 328void Inst::OrderPhiArgs() {
334 if (op != Opcode::Phi) { 329 if (op != Opcode::Phi) {
335 throw LogicError("{} is not a Phi instruction", op); 330 throw LogicError("{} is not a Phi instruction", op);
diff --git a/src/shader_recompiler/frontend/ir/value.h b/src/shader_recompiler/frontend/ir/value.h
index 1a2e4ccb6..6a673ca05 100644
--- a/src/shader_recompiler/frontend/ir/value.h
+++ b/src/shader_recompiler/frontend/ir/value.h
@@ -178,13 +178,9 @@ public:
178 178
179 /// Get a pointer to the block of a phi argument. 179 /// Get a pointer to the block of a phi argument.
180 [[nodiscard]] Block* PhiBlock(size_t index) const; 180 [[nodiscard]] Block* PhiBlock(size_t index) const;
181
182 /// Add phi operand to a phi instruction. 181 /// Add phi operand to a phi instruction.
183 void AddPhiOperand(Block* predecessor, const Value& value); 182 void AddPhiOperand(Block* predecessor, const Value& value);
184 183
185 // Erase the phi operand at the given index.
186 void ErasePhiOperand(size_t index);
187
188 /// Orders the Phi arguments from farthest away to nearest. 184 /// Orders the Phi arguments from farthest away to nearest.
189 void OrderPhiArgs(); 185 void OrderPhiArgs();
190 186
diff --git a/src/shader_recompiler/frontend/maxwell/translate_program.cpp b/src/shader_recompiler/frontend/maxwell/translate_program.cpp
index 77efb4f57..b58741d4d 100644
--- a/src/shader_recompiler/frontend/maxwell/translate_program.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate_program.cpp
@@ -137,28 +137,35 @@ bool IsLegacyAttribute(IR::Attribute attribute) {
137} 137}
138 138
139std::map<IR::Attribute, IR::Attribute> GenerateLegacyToGenericMappings( 139std::map<IR::Attribute, IR::Attribute> GenerateLegacyToGenericMappings(
140 const VaryingState& state, std::queue<IR::Attribute> ununsed_generics) { 140 const VaryingState& state, std::queue<IR::Attribute> unused_generics,
141 const std::map<IR::Attribute, IR::Attribute>& previous_stage_mapping) {
141 std::map<IR::Attribute, IR::Attribute> mapping; 142 std::map<IR::Attribute, IR::Attribute> mapping;
143 auto update_mapping = [&mapping, &unused_generics, previous_stage_mapping](IR::Attribute attr,
144 size_t count) {
145 if (previous_stage_mapping.find(attr) != previous_stage_mapping.end()) {
146 for (size_t i = 0; i < count; ++i) {
147 mapping.insert({attr + i, previous_stage_mapping.at(attr + i)});
148 }
149 } else {
150 for (size_t i = 0; i < count; ++i) {
151 mapping.insert({attr + i, unused_generics.front() + i});
152 }
153 unused_generics.pop();
154 }
155 };
142 for (size_t index = 0; index < 4; ++index) { 156 for (size_t index = 0; index < 4; ++index) {
143 auto attr = IR::Attribute::ColorFrontDiffuseR + index * 4; 157 auto attr = IR::Attribute::ColorFrontDiffuseR + index * 4;
144 if (state.AnyComponent(attr)) { 158 if (state.AnyComponent(attr)) {
145 for (size_t i = 0; i < 4; ++i) { 159 update_mapping(attr, 4);
146 mapping.insert({attr + i, ununsed_generics.front() + i});
147 }
148 ununsed_generics.pop();
149 } 160 }
150 } 161 }
151 if (state[IR::Attribute::FogCoordinate]) { 162 if (state[IR::Attribute::FogCoordinate]) {
152 mapping.insert({IR::Attribute::FogCoordinate, ununsed_generics.front()}); 163 update_mapping(IR::Attribute::FogCoordinate, 1);
153 ununsed_generics.pop();
154 } 164 }
155 for (size_t index = 0; index < IR::NUM_FIXEDFNCTEXTURE; ++index) { 165 for (size_t index = 0; index < IR::NUM_FIXEDFNCTEXTURE; ++index) {
156 auto attr = IR::Attribute::FixedFncTexture0S + index * 4; 166 auto attr = IR::Attribute::FixedFncTexture0S + index * 4;
157 if (state.AnyComponent(attr)) { 167 if (state.AnyComponent(attr)) {
158 for (size_t i = 0; i < 4; ++i) { 168 update_mapping(attr, 4);
159 mapping.insert({attr + i, ununsed_generics.front() + i});
160 }
161 ununsed_generics.pop();
162 } 169 }
163 } 170 }
164 return mapping; 171 return mapping;
@@ -265,21 +272,22 @@ IR::Program MergeDualVertexPrograms(IR::Program& vertex_a, IR::Program& vertex_b
265void ConvertLegacyToGeneric(IR::Program& program, const Shader::RuntimeInfo& runtime_info) { 272void ConvertLegacyToGeneric(IR::Program& program, const Shader::RuntimeInfo& runtime_info) {
266 auto& stores = program.info.stores; 273 auto& stores = program.info.stores;
267 if (stores.Legacy()) { 274 if (stores.Legacy()) {
268 std::queue<IR::Attribute> ununsed_output_generics{}; 275 std::queue<IR::Attribute> unused_output_generics{};
269 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) { 276 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
270 if (!stores.Generic(index)) { 277 if (!stores.Generic(index)) {
271 ununsed_output_generics.push(IR::Attribute::Generic0X + index * 4); 278 unused_output_generics.push(IR::Attribute::Generic0X + index * 4);
272 } 279 }
273 } 280 }
274 auto mappings = GenerateLegacyToGenericMappings(stores, ununsed_output_generics); 281 program.info.legacy_stores_mapping =
282 GenerateLegacyToGenericMappings(stores, unused_output_generics, {});
275 for (IR::Block* const block : program.post_order_blocks) { 283 for (IR::Block* const block : program.post_order_blocks) {
276 for (IR::Inst& inst : block->Instructions()) { 284 for (IR::Inst& inst : block->Instructions()) {
277 switch (inst.GetOpcode()) { 285 switch (inst.GetOpcode()) {
278 case IR::Opcode::SetAttribute: { 286 case IR::Opcode::SetAttribute: {
279 const auto attr = inst.Arg(0).Attribute(); 287 const auto attr = inst.Arg(0).Attribute();
280 if (IsLegacyAttribute(attr)) { 288 if (IsLegacyAttribute(attr)) {
281 stores.Set(mappings[attr], true); 289 stores.Set(program.info.legacy_stores_mapping[attr], true);
282 inst.SetArg(0, Shader::IR::Value(mappings[attr])); 290 inst.SetArg(0, Shader::IR::Value(program.info.legacy_stores_mapping[attr]));
283 } 291 }
284 break; 292 break;
285 } 293 }
@@ -292,15 +300,16 @@ void ConvertLegacyToGeneric(IR::Program& program, const Shader::RuntimeInfo& run
292 300
293 auto& loads = program.info.loads; 301 auto& loads = program.info.loads;
294 if (loads.Legacy()) { 302 if (loads.Legacy()) {
295 std::queue<IR::Attribute> ununsed_input_generics{}; 303 std::queue<IR::Attribute> unused_input_generics{};
296 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) { 304 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
297 const AttributeType input_type{runtime_info.generic_input_types[index]}; 305 const AttributeType input_type{runtime_info.generic_input_types[index]};
298 if (!runtime_info.previous_stage_stores.Generic(index) || !loads.Generic(index) || 306 if (!runtime_info.previous_stage_stores.Generic(index) || !loads.Generic(index) ||
299 input_type == AttributeType::Disabled) { 307 input_type == AttributeType::Disabled) {
300 ununsed_input_generics.push(IR::Attribute::Generic0X + index * 4); 308 unused_input_generics.push(IR::Attribute::Generic0X + index * 4);
301 } 309 }
302 } 310 }
303 auto mappings = GenerateLegacyToGenericMappings(loads, ununsed_input_generics); 311 auto mappings = GenerateLegacyToGenericMappings(
312 loads, unused_input_generics, runtime_info.previous_stage_legacy_stores_mapping);
304 for (IR::Block* const block : program.post_order_blocks) { 313 for (IR::Block* const block : program.post_order_blocks) {
305 for (IR::Inst& inst : block->Instructions()) { 314 for (IR::Inst& inst : block->Instructions()) {
306 switch (inst.GetOpcode()) { 315 switch (inst.GetOpcode()) {
diff --git a/src/shader_recompiler/ir_opt/dead_code_elimination_pass.cpp b/src/shader_recompiler/ir_opt/dead_code_elimination_pass.cpp
index 9a7d47344..1bd8afd6f 100644
--- a/src/shader_recompiler/ir_opt/dead_code_elimination_pass.cpp
+++ b/src/shader_recompiler/ir_opt/dead_code_elimination_pass.cpp
@@ -1,104 +1,24 @@
1// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include <algorithm>
5
6#include <boost/container/small_vector.hpp>
7
8#include "shader_recompiler/frontend/ir/basic_block.h" 4#include "shader_recompiler/frontend/ir/basic_block.h"
9#include "shader_recompiler/frontend/ir/value.h" 5#include "shader_recompiler/frontend/ir/value.h"
10#include "shader_recompiler/ir_opt/passes.h" 6#include "shader_recompiler/ir_opt/passes.h"
11 7
12namespace Shader::Optimization { 8namespace Shader::Optimization {
13namespace { 9
14template <bool TEST_USES> 10void DeadCodeEliminationPass(IR::Program& program) {
15void DeadInstElimination(IR::Block* const block) {
16 // We iterate over the instructions in reverse order. 11 // We iterate over the instructions in reverse order.
17 // This is because removing an instruction reduces the number of uses for earlier instructions. 12 // This is because removing an instruction reduces the number of uses for earlier instructions.
18 auto it{block->end()}; 13 for (IR::Block* const block : program.post_order_blocks) {
19 while (it != block->begin()) { 14 auto it{block->end()};
20 --it; 15 while (it != block->begin()) {
21 if constexpr (TEST_USES) { 16 --it;
22 if (it->HasUses() || it->MayHaveSideEffects()) { 17 if (!it->HasUses() && !it->MayHaveSideEffects()) {
23 continue; 18 it->Invalidate();
24 } 19 it = block->Instructions().erase(it);
25 }
26 it->Invalidate();
27 it = block->Instructions().erase(it);
28 }
29}
30
31void DeletedPhiArgElimination(IR::Program& program, std::span<const IR::Block*> dead_blocks) {
32 for (IR::Block* const block : program.blocks) {
33 for (IR::Inst& phi : *block) {
34 if (!IR::IsPhi(phi)) {
35 continue;
36 }
37 for (size_t i = 0; i < phi.NumArgs(); ++i) {
38 if (std::ranges::find(dead_blocks, phi.PhiBlock(i)) == dead_blocks.end()) {
39 continue;
40 }
41 // Phi operand at this index is an unreachable block
42 phi.ErasePhiOperand(i);
43 --i;
44 }
45 }
46 }
47}
48
49void DeadBranchElimination(IR::Program& program) {
50 boost::container::small_vector<const IR::Block*, 3> dead_blocks;
51 const auto begin_it{program.syntax_list.begin()};
52 for (auto node_it = begin_it; node_it != program.syntax_list.end(); ++node_it) {
53 if (node_it->type != IR::AbstractSyntaxNode::Type::If) {
54 continue;
55 }
56 IR::Inst* const cond_ref{node_it->data.if_node.cond.Inst()};
57 const IR::U1 cond{cond_ref->Arg(0)};
58 if (!cond.IsImmediate()) {
59 continue;
60 }
61 if (cond.U1()) {
62 continue;
63 }
64 // False immediate condition. Remove condition ref, erase the entire branch.
65 cond_ref->Invalidate();
66 // Account for nested if-statements within the if(false) branch
67 u32 nested_ifs{1u};
68 while (node_it->type != IR::AbstractSyntaxNode::Type::EndIf || nested_ifs > 0) {
69 node_it = program.syntax_list.erase(node_it);
70 switch (node_it->type) {
71 case IR::AbstractSyntaxNode::Type::If:
72 ++nested_ifs;
73 break;
74 case IR::AbstractSyntaxNode::Type::EndIf:
75 --nested_ifs;
76 break;
77 case IR::AbstractSyntaxNode::Type::Block: {
78 IR::Block* const block{node_it->data.block};
79 DeadInstElimination<false>(block);
80 dead_blocks.push_back(block);
81 break;
82 }
83 default:
84 break;
85 } 20 }
86 } 21 }
87 // Erase EndIf node of the if(false) branch
88 node_it = program.syntax_list.erase(node_it);
89 // Account for loop increment
90 --node_it;
91 }
92 if (!dead_blocks.empty()) {
93 DeletedPhiArgElimination(program, std::span(dead_blocks.data(), dead_blocks.size()));
94 }
95}
96} // namespace
97
98void DeadCodeEliminationPass(IR::Program& program) {
99 DeadBranchElimination(program);
100 for (IR::Block* const block : program.post_order_blocks) {
101 DeadInstElimination<true>(block);
102 } 22 }
103} 23}
104 24
diff --git a/src/shader_recompiler/runtime_info.h b/src/shader_recompiler/runtime_info.h
index dcb5ab158..549b81ef7 100644
--- a/src/shader_recompiler/runtime_info.h
+++ b/src/shader_recompiler/runtime_info.h
@@ -4,6 +4,7 @@
4#pragma once 4#pragma once
5 5
6#include <array> 6#include <array>
7#include <map>
7#include <optional> 8#include <optional>
8#include <vector> 9#include <vector>
9 10
@@ -60,6 +61,7 @@ struct TransformFeedbackVarying {
60struct RuntimeInfo { 61struct RuntimeInfo {
61 std::array<AttributeType, 32> generic_input_types{}; 62 std::array<AttributeType, 32> generic_input_types{};
62 VaryingState previous_stage_stores; 63 VaryingState previous_stage_stores;
64 std::map<IR::Attribute, IR::Attribute> previous_stage_legacy_stores_mapping;
63 65
64 bool convert_depth_mode{}; 66 bool convert_depth_mode{};
65 bool force_early_z{}; 67 bool force_early_z{};
diff --git a/src/shader_recompiler/shader_info.h b/src/shader_recompiler/shader_info.h
index cc596da4f..81097bf1a 100644
--- a/src/shader_recompiler/shader_info.h
+++ b/src/shader_recompiler/shader_info.h
@@ -5,6 +5,7 @@
5 5
6#include <array> 6#include <array>
7#include <bitset> 7#include <bitset>
8#include <map>
8 9
9#include "common/common_types.h" 10#include "common/common_types.h"
10#include "shader_recompiler/frontend/ir/type.h" 11#include "shader_recompiler/frontend/ir/type.h"
@@ -127,6 +128,8 @@ struct Info {
127 VaryingState stores; 128 VaryingState stores;
128 VaryingState passthrough; 129 VaryingState passthrough;
129 130
131 std::map<IR::Attribute, IR::Attribute> legacy_stores_mapping;
132
130 bool loads_indexed_attributes{}; 133 bool loads_indexed_attributes{};
131 134
132 std::array<bool, 8> stores_frag_color{}; 135 std::array<bool, 8> stores_frag_color{};
diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp
index 7c432a63c..284b2ae66 100644
--- a/src/tests/core/core_timing.cpp
+++ b/src/tests/core/core_timing.cpp
@@ -40,9 +40,6 @@ struct ScopeInit final {
40 core_timing.SetMulticore(true); 40 core_timing.SetMulticore(true);
41 core_timing.Initialize([]() {}); 41 core_timing.Initialize([]() {});
42 } 42 }
43 ~ScopeInit() {
44 core_timing.Shutdown();
45 }
46 43
47 Core::Timing::CoreTiming core_timing; 44 Core::Timing::CoreTiming core_timing;
48}; 45};
diff --git a/src/tests/video_core/buffer_base.cpp b/src/tests/video_core/buffer_base.cpp
index 71121e42a..f7236afab 100644
--- a/src/tests/video_core/buffer_base.cpp
+++ b/src/tests/video_core/buffer_base.cpp
@@ -44,7 +44,7 @@ public:
44 44
45 [[nodiscard]] unsigned Count() const noexcept { 45 [[nodiscard]] unsigned Count() const noexcept {
46 unsigned count = 0; 46 unsigned count = 0;
47 for (const auto [index, value] : page_table) { 47 for (const auto& [index, value] : page_table) {
48 count += value; 48 count += value;
49 } 49 }
50 return count; 50 return count;
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 40e6d1ec4..106991969 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -82,6 +82,7 @@ add_library(video_core STATIC
82 gpu_thread.h 82 gpu_thread.h
83 memory_manager.cpp 83 memory_manager.cpp
84 memory_manager.h 84 memory_manager.h
85 pte_kind.h
85 query_cache.h 86 query_cache.h
86 rasterizer_accelerated.cpp 87 rasterizer_accelerated.cpp
87 rasterizer_accelerated.h 88 rasterizer_accelerated.h
@@ -278,14 +279,8 @@ if (MSVC)
278else() 279else()
279 target_compile_options(video_core PRIVATE 280 target_compile_options(video_core PRIVATE
280 -Werror=conversion 281 -Werror=conversion
281 -Wno-error=sign-conversion
282 -Werror=pessimizing-move
283 -Werror=redundant-move
284 -Werror=type-limits
285 282
286 $<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess> 283 -Wno-sign-conversion
287 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
288 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
289 ) 284 )
290endif() 285endif()
291 286
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 89a9d1f5a..f9794dfe4 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -117,10 +117,15 @@ void Maxwell3D::InitializeRegisterDefaults() {
117 117
118 shadow_state = regs; 118 shadow_state = regs;
119 119
120 mme_inline[MAXWELL3D_REG_INDEX(draw.end)] = true; 120 draw_command[MAXWELL3D_REG_INDEX(draw.end)] = true;
121 mme_inline[MAXWELL3D_REG_INDEX(draw.begin)] = true; 121 draw_command[MAXWELL3D_REG_INDEX(draw.begin)] = true;
122 mme_inline[MAXWELL3D_REG_INDEX(vertex_buffer.count)] = true; 122 draw_command[MAXWELL3D_REG_INDEX(vertex_buffer.first)] = true;
123 mme_inline[MAXWELL3D_REG_INDEX(index_buffer.count)] = true; 123 draw_command[MAXWELL3D_REG_INDEX(vertex_buffer.count)] = true;
124 draw_command[MAXWELL3D_REG_INDEX(index_buffer.first)] = true;
125 draw_command[MAXWELL3D_REG_INDEX(index_buffer.count)] = true;
126 draw_command[MAXWELL3D_REG_INDEX(draw_inline_index)] = true;
127 draw_command[MAXWELL3D_REG_INDEX(inline_index_2x16.even)] = true;
128 draw_command[MAXWELL3D_REG_INDEX(inline_index_4x8.index0)] = true;
124} 129}
125 130
126void Maxwell3D::ProcessMacro(u32 method, const u32* base_start, u32 amount, bool is_last_call) { 131void Maxwell3D::ProcessMacro(u32 method, const u32* base_start, u32 amount, bool is_last_call) {
@@ -208,25 +213,21 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
208 return ProcessCBBind(3); 213 return ProcessCBBind(3);
209 case MAXWELL3D_REG_INDEX(bind_groups[4].raw_config): 214 case MAXWELL3D_REG_INDEX(bind_groups[4].raw_config):
210 return ProcessCBBind(4); 215 return ProcessCBBind(4);
211 case MAXWELL3D_REG_INDEX(draw.end):
212 return DrawArrays();
213 case MAXWELL3D_REG_INDEX(index_buffer32_first): 216 case MAXWELL3D_REG_INDEX(index_buffer32_first):
214 regs.index_buffer.count = regs.index_buffer32_first.count; 217 regs.index_buffer.count = regs.index_buffer32_first.count;
215 regs.index_buffer.first = regs.index_buffer32_first.first; 218 regs.index_buffer.first = regs.index_buffer32_first.first;
216 dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; 219 dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
217 return DrawArrays(); 220 return ProcessDraw();
218 case MAXWELL3D_REG_INDEX(index_buffer16_first): 221 case MAXWELL3D_REG_INDEX(index_buffer16_first):
219 regs.index_buffer.count = regs.index_buffer16_first.count; 222 regs.index_buffer.count = regs.index_buffer16_first.count;
220 regs.index_buffer.first = regs.index_buffer16_first.first; 223 regs.index_buffer.first = regs.index_buffer16_first.first;
221 dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; 224 dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
222 return DrawArrays(); 225 return ProcessDraw();
223 case MAXWELL3D_REG_INDEX(index_buffer8_first): 226 case MAXWELL3D_REG_INDEX(index_buffer8_first):
224 regs.index_buffer.count = regs.index_buffer8_first.count; 227 regs.index_buffer.count = regs.index_buffer8_first.count;
225 regs.index_buffer.first = regs.index_buffer8_first.first; 228 regs.index_buffer.first = regs.index_buffer8_first.first;
226 dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; 229 dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
227 // a macro calls this one over and over, should it increase instancing? 230 return ProcessDraw();
228 // Used by Hades and likely other Vulkan games.
229 return DrawArrays();
230 case MAXWELL3D_REG_INDEX(topology_override): 231 case MAXWELL3D_REG_INDEX(topology_override):
231 use_topology_override = true; 232 use_topology_override = true;
232 return; 233 return;
@@ -261,14 +262,13 @@ void Maxwell3D::CallMacroMethod(u32 method, const std::vector<u32>& parameters)
261 262
262 // Execute the current macro. 263 // Execute the current macro.
263 macro_engine->Execute(macro_positions[entry], parameters); 264 macro_engine->Execute(macro_positions[entry], parameters);
264 if (mme_draw.current_mode != MMEDrawMode::Undefined) { 265
265 FlushMMEInlineDraw(); 266 ProcessDeferredDraw();
266 }
267} 267}
268 268
269void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) { 269void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
270 // It is an error to write to a register other than the current macro's ARG register before it 270 // It is an error to write to a register other than the current macro's ARG register before
271 // has finished execution. 271 // it has finished execution.
272 if (executing_macro != 0) { 272 if (executing_macro != 0) {
273 ASSERT(method == executing_macro + 1); 273 ASSERT(method == executing_macro + 1);
274 } 274 }
@@ -283,9 +283,33 @@ void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
283 ASSERT_MSG(method < Regs::NUM_REGS, 283 ASSERT_MSG(method < Regs::NUM_REGS,
284 "Invalid Maxwell3D register, increase the size of the Regs structure"); 284 "Invalid Maxwell3D register, increase the size of the Regs structure");
285 285
286 const u32 argument = ProcessShadowRam(method, method_argument); 286 if (draw_command[method]) {
287 ProcessDirtyRegisters(method, argument); 287 regs.reg_array[method] = method_argument;
288 ProcessMethodCall(method, argument, method_argument, is_last_call); 288 deferred_draw_method.push_back(method);
289 auto u32_to_u8 = [&](const u32 argument) {
290 inline_index_draw_indexes.push_back(static_cast<u8>(argument & 0x000000ff));
291 inline_index_draw_indexes.push_back(static_cast<u8>((argument & 0x0000ff00) >> 8));
292 inline_index_draw_indexes.push_back(static_cast<u8>((argument & 0x00ff0000) >> 16));
293 inline_index_draw_indexes.push_back(static_cast<u8>((argument & 0xff000000) >> 24));
294 };
295 if (MAXWELL3D_REG_INDEX(draw_inline_index) == method) {
296 u32_to_u8(method_argument);
297 } else if (MAXWELL3D_REG_INDEX(inline_index_2x16.even) == method) {
298 u32_to_u8(regs.inline_index_2x16.even);
299 u32_to_u8(regs.inline_index_2x16.odd);
300 } else if (MAXWELL3D_REG_INDEX(inline_index_4x8.index0) == method) {
301 u32_to_u8(regs.inline_index_4x8.index0);
302 u32_to_u8(regs.inline_index_4x8.index1);
303 u32_to_u8(regs.inline_index_4x8.index2);
304 u32_to_u8(regs.inline_index_4x8.index3);
305 }
306 } else {
307 ProcessDeferredDraw();
308
309 const u32 argument = ProcessShadowRam(method, method_argument);
310 ProcessDirtyRegisters(method, argument);
311 ProcessMethodCall(method, argument, method_argument, is_last_call);
312 }
289} 313}
290 314
291void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount, 315void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
@@ -326,55 +350,6 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
326 } 350 }
327} 351}
328 352
329void Maxwell3D::StepInstance(const MMEDrawMode expected_mode, const u32 count) {
330 if (mme_draw.current_mode == MMEDrawMode::Undefined) {
331 if (mme_draw.gl_begin_consume) {
332 mme_draw.current_mode = expected_mode;
333 mme_draw.current_count = count;
334 mme_draw.instance_count = 1;
335 mme_draw.gl_begin_consume = false;
336 mme_draw.gl_end_count = 0;
337 }
338 return;
339 } else {
340 if (mme_draw.current_mode == expected_mode && count == mme_draw.current_count &&
341 mme_draw.instance_mode && mme_draw.gl_begin_consume) {
342 mme_draw.instance_count++;
343 mme_draw.gl_begin_consume = false;
344 return;
345 } else {
346 FlushMMEInlineDraw();
347 }
348 }
349 // Tail call in case it needs to retry.
350 StepInstance(expected_mode, count);
351}
352
353void Maxwell3D::CallMethodFromMME(u32 method, u32 method_argument) {
354 if (mme_inline[method]) {
355 regs.reg_array[method] = method_argument;
356 if (method == MAXWELL3D_REG_INDEX(vertex_buffer.count) ||
357 method == MAXWELL3D_REG_INDEX(index_buffer.count)) {
358 const MMEDrawMode expected_mode = method == MAXWELL3D_REG_INDEX(vertex_buffer.count)
359 ? MMEDrawMode::Array
360 : MMEDrawMode::Indexed;
361 StepInstance(expected_mode, method_argument);
362 } else if (method == MAXWELL3D_REG_INDEX(draw.begin)) {
363 mme_draw.instance_mode =
364 (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) ||
365 (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Unchanged);
366 mme_draw.gl_begin_consume = true;
367 } else {
368 mme_draw.gl_end_count++;
369 }
370 } else {
371 if (mme_draw.current_mode != MMEDrawMode::Undefined) {
372 FlushMMEInlineDraw();
373 }
374 CallMethod(method, method_argument, true);
375 }
376}
377
378void Maxwell3D::ProcessTopologyOverride() { 353void Maxwell3D::ProcessTopologyOverride() {
379 using PrimitiveTopology = Maxwell3D::Regs::PrimitiveTopology; 354 using PrimitiveTopology = Maxwell3D::Regs::PrimitiveTopology;
380 using PrimitiveTopologyOverride = Maxwell3D::Regs::PrimitiveTopologyOverride; 355 using PrimitiveTopologyOverride = Maxwell3D::Regs::PrimitiveTopologyOverride;
@@ -404,41 +379,6 @@ void Maxwell3D::ProcessTopologyOverride() {
404 } 379 }
405} 380}
406 381
407void Maxwell3D::FlushMMEInlineDraw() {
408 LOG_TRACE(HW_GPU, "called, topology={}, count={}", regs.draw.topology.Value(),
409 regs.vertex_buffer.count);
410 ASSERT_MSG(!(regs.index_buffer.count && regs.vertex_buffer.count), "Both indexed and direct?");
411 ASSERT(mme_draw.instance_count == mme_draw.gl_end_count);
412
413 // Both instance configuration registers can not be set at the same time.
414 ASSERT_MSG(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::First ||
415 regs.draw.instance_id != Maxwell3D::Regs::Draw::InstanceId::Unchanged,
416 "Illegal combination of instancing parameters");
417
418 ProcessTopologyOverride();
419
420 const bool is_indexed = mme_draw.current_mode == MMEDrawMode::Indexed;
421 if (ShouldExecute()) {
422 rasterizer->Draw(is_indexed, true);
423 }
424
425 // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if
426 // the game is trying to draw indexed or direct mode. This needs to be verified on HW still -
427 // it's possible that it is incorrect and that there is some other register used to specify the
428 // drawing mode.
429 if (is_indexed) {
430 regs.index_buffer.count = 0;
431 } else {
432 regs.vertex_buffer.count = 0;
433 }
434 mme_draw.current_mode = MMEDrawMode::Undefined;
435 mme_draw.current_count = 0;
436 mme_draw.instance_count = 0;
437 mme_draw.instance_mode = false;
438 mme_draw.gl_begin_consume = false;
439 mme_draw.gl_end_count = 0;
440}
441
442void Maxwell3D::ProcessMacroUpload(u32 data) { 382void Maxwell3D::ProcessMacroUpload(u32 data) {
443 macro_engine->AddCode(regs.load_mme.instruction_ptr++, data); 383 macro_engine->AddCode(regs.load_mme.instruction_ptr++, data);
444} 384}
@@ -473,9 +413,7 @@ void Maxwell3D::ProcessQueryGet() {
473 413
474 switch (regs.report_semaphore.query.operation) { 414 switch (regs.report_semaphore.query.operation) {
475 case Regs::ReportSemaphore::Operation::Release: 415 case Regs::ReportSemaphore::Operation::Release:
476 if (regs.report_semaphore.query.release == 416 if (regs.report_semaphore.query.short_query != 0) {
477 Regs::ReportSemaphore::Release::AfterAllPreceedingWrites ||
478 regs.report_semaphore.query.short_query != 0) {
479 const GPUVAddr sequence_address{regs.report_semaphore.Address()}; 417 const GPUVAddr sequence_address{regs.report_semaphore.Address()};
480 const u32 payload = regs.report_semaphore.payload; 418 const u32 payload = regs.report_semaphore.payload;
481 std::function<void()> operation([this, sequence_address, payload] { 419 std::function<void()> operation([this, sequence_address, payload] {
@@ -489,11 +427,10 @@ void Maxwell3D::ProcessQueryGet() {
489 }; 427 };
490 const GPUVAddr sequence_address{regs.report_semaphore.Address()}; 428 const GPUVAddr sequence_address{regs.report_semaphore.Address()};
491 const u32 payload = regs.report_semaphore.payload; 429 const u32 payload = regs.report_semaphore.payload;
492 std::function<void()> operation([this, sequence_address, payload] { 430 [this, sequence_address, payload] {
493 memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks()); 431 memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
494 memory_manager.Write<u64>(sequence_address, payload); 432 memory_manager.Write<u64>(sequence_address, payload);
495 }); 433 }();
496 rasterizer->SyncOperation(std::move(operation));
497 } 434 }
498 break; 435 break;
499 case Regs::ReportSemaphore::Operation::Acquire: 436 case Regs::ReportSemaphore::Operation::Acquire:
@@ -569,47 +506,11 @@ void Maxwell3D::ProcessCounterReset() {
569 506
570void Maxwell3D::ProcessSyncPoint() { 507void Maxwell3D::ProcessSyncPoint() {
571 const u32 sync_point = regs.sync_info.sync_point.Value(); 508 const u32 sync_point = regs.sync_info.sync_point.Value();
572 const auto condition = regs.sync_info.condition.Value(); 509 const u32 cache_flush = regs.sync_info.clean_l2.Value();
573 [[maybe_unused]] const u32 cache_flush = regs.sync_info.clean_l2.Value(); 510 if (cache_flush != 0) {
574 if (condition == Regs::SyncInfo::Condition::RopWritesDone) { 511 rasterizer->InvalidateGPUCache();
575 rasterizer->SignalSyncPoint(sync_point);
576 }
577}
578
579void Maxwell3D::DrawArrays() {
580 LOG_TRACE(HW_GPU, "called, topology={}, count={}", regs.draw.topology.Value(),
581 regs.vertex_buffer.count);
582 ASSERT_MSG(!(regs.index_buffer.count && regs.vertex_buffer.count), "Both indexed and direct?");
583
584 // Both instance configuration registers can not be set at the same time.
585 ASSERT_MSG(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::First ||
586 regs.draw.instance_id != Maxwell3D::Regs::Draw::InstanceId::Unchanged,
587 "Illegal combination of instancing parameters");
588
589 ProcessTopologyOverride();
590
591 if (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) {
592 // Increment the current instance *before* drawing.
593 state.current_instance++;
594 } else if (regs.draw.instance_id != Maxwell3D::Regs::Draw::InstanceId::Unchanged) {
595 // Reset the current instance to 0.
596 state.current_instance = 0;
597 }
598
599 const bool is_indexed{regs.index_buffer.count && !regs.vertex_buffer.count};
600 if (ShouldExecute()) {
601 rasterizer->Draw(is_indexed, false);
602 }
603
604 // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if
605 // the game is trying to draw indexed or direct mode. This needs to be verified on HW still -
606 // it's possible that it is incorrect and that there is some other register used to specify the
607 // drawing mode.
608 if (is_indexed) {
609 regs.index_buffer.count = 0;
610 } else {
611 regs.vertex_buffer.count = 0;
612 } 512 }
513 rasterizer->SignalSyncPoint(sync_point);
613} 514}
614 515
615std::optional<u64> Maxwell3D::GetQueryResult() { 516std::optional<u64> Maxwell3D::GetQueryResult() {
@@ -694,4 +595,90 @@ void Maxwell3D::ProcessClearBuffers() {
694 rasterizer->Clear(); 595 rasterizer->Clear();
695} 596}
696 597
598void Maxwell3D::ProcessDraw(u32 instance_count) {
599 LOG_TRACE(HW_GPU, "called, topology={}, count={}", regs.draw.topology.Value(),
600 regs.vertex_buffer.count);
601
602 ASSERT_MSG(!(regs.index_buffer.count && regs.vertex_buffer.count), "Both indexed and direct?");
603
604 // Both instance configuration registers can not be set at the same time.
605 ASSERT_MSG(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::First ||
606 regs.draw.instance_id != Maxwell3D::Regs::Draw::InstanceId::Unchanged,
607 "Illegal combination of instancing parameters");
608
609 ProcessTopologyOverride();
610
611 const bool is_indexed = regs.index_buffer.count && !regs.vertex_buffer.count;
612 if (ShouldExecute()) {
613 rasterizer->Draw(is_indexed, instance_count);
614 }
615
616 if (is_indexed) {
617 regs.index_buffer.count = 0;
618 } else {
619 regs.vertex_buffer.count = 0;
620 }
621}
622
623void Maxwell3D::ProcessDeferredDraw() {
624 if (deferred_draw_method.empty()) {
625 return;
626 }
627
628 enum class DrawMode {
629 Undefined,
630 General,
631 Instance,
632 };
633 DrawMode draw_mode{DrawMode::Undefined};
634 u32 instance_count = 1;
635
636 u32 index = 0;
637 u32 method = 0;
638 u32 method_count = static_cast<u32>(deferred_draw_method.size());
639 for (; index < method_count &&
640 (method = deferred_draw_method[index]) != MAXWELL3D_REG_INDEX(draw.begin);
641 ++index)
642 ;
643
644 if (MAXWELL3D_REG_INDEX(draw.begin) != method) {
645 return;
646 }
647
648 // The minimum number of methods for drawing must be greater than or equal to
649 // 3[draw.begin->vertex(index)count(first)->draw.end] to avoid errors in index mode drawing
650 if ((method_count - index) < 3) {
651 return;
652 }
653 draw_mode = (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) ||
654 (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Unchanged)
655 ? DrawMode::Instance
656 : DrawMode::General;
657
658 // Drawing will only begin with draw.begin or index_buffer method, other methods directly
659 // clear
660 if (draw_mode == DrawMode::Undefined) {
661 deferred_draw_method.clear();
662 return;
663 }
664
665 if (draw_mode == DrawMode::Instance) {
666 ASSERT_MSG(deferred_draw_method.size() % 4 == 0, "Instance mode method size error");
667 instance_count = static_cast<u32>(method_count - index) / 4;
668 } else {
669 method = deferred_draw_method[index + 1];
670 if (MAXWELL3D_REG_INDEX(draw_inline_index) == method ||
671 MAXWELL3D_REG_INDEX(inline_index_2x16.even) == method ||
672 MAXWELL3D_REG_INDEX(inline_index_4x8.index0) == method) {
673 regs.index_buffer.count = static_cast<u32>(inline_index_draw_indexes.size() / 4);
674 regs.index_buffer.format = Regs::IndexFormat::UnsignedInt;
675 }
676 }
677
678 ProcessDraw(instance_count);
679
680 deferred_draw_method.clear();
681 inline_index_draw_indexes.clear();
682}
683
697} // namespace Tegra::Engines 684} // namespace Tegra::Engines
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 75e3b868d..a948fcb14 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -1739,14 +1739,11 @@ public:
1739 Footprint_1x1_Virtual = 2, 1739 Footprint_1x1_Virtual = 2,
1740 }; 1740 };
1741 1741
1742 struct InlineIndex4x8Align { 1742 struct InlineIndex4x8 {
1743 union { 1743 union {
1744 BitField<0, 30, u32> count; 1744 BitField<0, 30, u32> count;
1745 BitField<30, 2, u32> start; 1745 BitField<30, 2, u32> start;
1746 }; 1746 };
1747 };
1748
1749 struct InlineIndex4x8Index {
1750 union { 1747 union {
1751 BitField<0, 8, u32> index0; 1748 BitField<0, 8, u32> index0;
1752 BitField<8, 8, u32> index1; 1749 BitField<8, 8, u32> index1;
@@ -2836,8 +2833,7 @@ public:
2836 u32 depth_write_enabled; ///< 0x12E8 2833 u32 depth_write_enabled; ///< 0x12E8
2837 u32 alpha_test_enabled; ///< 0x12EC 2834 u32 alpha_test_enabled; ///< 0x12EC
2838 INSERT_PADDING_BYTES_NOINIT(0x10); 2835 INSERT_PADDING_BYTES_NOINIT(0x10);
2839 InlineIndex4x8Align inline_index_4x8_align; ///< 0x1300 2836 InlineIndex4x8 inline_index_4x8; ///< 0x1300
2840 InlineIndex4x8Index inline_index_4x8_index; ///< 0x1304
2841 D3DCullMode d3d_cull_mode; ///< 0x1308 2837 D3DCullMode d3d_cull_mode; ///< 0x1308
2842 ComparisonOp depth_test_func; ///< 0x130C 2838 ComparisonOp depth_test_func; ///< 0x130C
2843 f32 alpha_test_ref; ///< 0x1310 2839 f32 alpha_test_ref; ///< 0x1310
@@ -3048,8 +3044,6 @@ public:
3048 }; 3044 };
3049 3045
3050 std::array<ShaderStageInfo, Regs::MaxShaderStage> shader_stages; 3046 std::array<ShaderStageInfo, Regs::MaxShaderStage> shader_stages;
3051
3052 u32 current_instance = 0; ///< Current instance to be used to simulate instanced rendering.
3053 }; 3047 };
3054 3048
3055 State state{}; 3049 State state{};
@@ -3064,11 +3058,6 @@ public:
3064 void CallMultiMethod(u32 method, const u32* base_start, u32 amount, 3058 void CallMultiMethod(u32 method, const u32* base_start, u32 amount,
3065 u32 methods_pending) override; 3059 u32 methods_pending) override;
3066 3060
3067 /// Write the value to the register identified by method.
3068 void CallMethodFromMME(u32 method, u32 method_argument);
3069
3070 void FlushMMEInlineDraw();
3071
3072 bool ShouldExecute() const { 3061 bool ShouldExecute() const {
3073 return execute_on; 3062 return execute_on;
3074 } 3063 }
@@ -3081,21 +3070,6 @@ public:
3081 return *rasterizer; 3070 return *rasterizer;
3082 } 3071 }
3083 3072
3084 enum class MMEDrawMode : u32 {
3085 Undefined,
3086 Array,
3087 Indexed,
3088 };
3089
3090 struct MMEDrawState {
3091 MMEDrawMode current_mode{MMEDrawMode::Undefined};
3092 u32 current_count{};
3093 u32 instance_count{};
3094 bool instance_mode{};
3095 bool gl_begin_consume{};
3096 u32 gl_end_count{};
3097 } mme_draw;
3098
3099 struct DirtyState { 3073 struct DirtyState {
3100 using Flags = std::bitset<std::numeric_limits<u8>::max()>; 3074 using Flags = std::bitset<std::numeric_limits<u8>::max()>;
3101 using Table = std::array<u8, Regs::NUM_REGS>; 3075 using Table = std::array<u8, Regs::NUM_REGS>;
@@ -3105,6 +3079,8 @@ public:
3105 Tables tables{}; 3079 Tables tables{};
3106 } dirty; 3080 } dirty;
3107 3081
3082 std::vector<u8> inline_index_draw_indexes;
3083
3108private: 3084private:
3109 void InitializeRegisterDefaults(); 3085 void InitializeRegisterDefaults();
3110 3086
@@ -3164,14 +3140,12 @@ private:
3164 /// Handles a write to the CB_BIND register. 3140 /// Handles a write to the CB_BIND register.
3165 void ProcessCBBind(size_t stage_index); 3141 void ProcessCBBind(size_t stage_index);
3166 3142
3167 /// Handles a write to the VERTEX_END_GL register, triggering a draw.
3168 void DrawArrays();
3169
3170 /// Handles use of topology overrides (e.g., to avoid using a topology assigned from a macro) 3143 /// Handles use of topology overrides (e.g., to avoid using a topology assigned from a macro)
3171 void ProcessTopologyOverride(); 3144 void ProcessTopologyOverride();
3172 3145
3173 // Handles a instance drawcall from MME 3146 void ProcessDraw(u32 instance_count = 1);
3174 void StepInstance(MMEDrawMode expected_mode, u32 count); 3147
3148 void ProcessDeferredDraw();
3175 3149
3176 /// Returns a query's value or an empty object if the value will be deferred through a cache. 3150 /// Returns a query's value or an empty object if the value will be deferred through a cache.
3177 std::optional<u64> GetQueryResult(); 3151 std::optional<u64> GetQueryResult();
@@ -3184,8 +3158,6 @@ private:
3184 /// Start offsets of each macro in macro_memory 3158 /// Start offsets of each macro in macro_memory
3185 std::array<u32, 0x80> macro_positions{}; 3159 std::array<u32, 0x80> macro_positions{};
3186 3160
3187 std::array<bool, Regs::NUM_REGS> mme_inline{};
3188
3189 /// Macro method that is currently being executed / being fed parameters. 3161 /// Macro method that is currently being executed / being fed parameters.
3190 u32 executing_macro = 0; 3162 u32 executing_macro = 0;
3191 /// Parameters that have been submitted to the macro call so far. 3163 /// Parameters that have been submitted to the macro call so far.
@@ -3198,6 +3170,9 @@ private:
3198 3170
3199 bool execute_on{true}; 3171 bool execute_on{true};
3200 bool use_topology_override{false}; 3172 bool use_topology_override{false};
3173
3174 std::array<bool, Regs::NUM_REGS> draw_command{};
3175 std::vector<u32> deferred_draw_method;
3201}; 3176};
3202 3177
3203#define ASSERT_REG_POSITION(field_name, position) \ 3178#define ASSERT_REG_POSITION(field_name, position) \
@@ -3402,8 +3377,7 @@ ASSERT_REG_POSITION(alpha_to_coverage_dither, 0x12E0);
3402ASSERT_REG_POSITION(blend_per_target_enabled, 0x12E4); 3377ASSERT_REG_POSITION(blend_per_target_enabled, 0x12E4);
3403ASSERT_REG_POSITION(depth_write_enabled, 0x12E8); 3378ASSERT_REG_POSITION(depth_write_enabled, 0x12E8);
3404ASSERT_REG_POSITION(alpha_test_enabled, 0x12EC); 3379ASSERT_REG_POSITION(alpha_test_enabled, 0x12EC);
3405ASSERT_REG_POSITION(inline_index_4x8_align, 0x1300); 3380ASSERT_REG_POSITION(inline_index_4x8, 0x1300);
3406ASSERT_REG_POSITION(inline_index_4x8_index, 0x1304);
3407ASSERT_REG_POSITION(d3d_cull_mode, 0x1308); 3381ASSERT_REG_POSITION(d3d_cull_mode, 0x1308);
3408ASSERT_REG_POSITION(depth_test_func, 0x130C); 3382ASSERT_REG_POSITION(depth_test_func, 0x130C);
3409ASSERT_REG_POSITION(alpha_test_ref, 0x1310); 3383ASSERT_REG_POSITION(alpha_test_ref, 0x1310);
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 3909d36c1..4eb7a100d 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -56,66 +56,85 @@ void MaxwellDMA::Launch() {
56 ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE); 56 ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE);
57 ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED); 57 ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED);
58 58
59 const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH; 59 if (launch.multi_line_enable) {
60 const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH; 60 const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
61 61 const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH;
62 if (!is_src_pitch && !is_dst_pitch) { 62
63 // If both the source and the destination are in block layout, assert. 63 if (!is_src_pitch && !is_dst_pitch) {
64 UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented"); 64 // If both the source and the destination are in block layout, assert.
65 return; 65 UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented");
66 } 66 return;
67 }
67 68
68 if (is_src_pitch && is_dst_pitch) { 69 if (is_src_pitch && is_dst_pitch) {
69 CopyPitchToPitch(); 70 for (u32 line = 0; line < regs.line_count; ++line) {
71 const GPUVAddr source_line =
72 regs.offset_in + static_cast<size_t>(line) * regs.pitch_in;
73 const GPUVAddr dest_line =
74 regs.offset_out + static_cast<size_t>(line) * regs.pitch_out;
75 memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in);
76 }
77 } else {
78 if (!is_src_pitch && is_dst_pitch) {
79 CopyBlockLinearToPitch();
80 } else {
81 CopyPitchToBlockLinear();
82 }
83 }
70 } else { 84 } else {
71 ASSERT(launch.multi_line_enable == 1); 85 // TODO: allow multisized components.
72 86 auto& accelerate = rasterizer->AccessAccelerateDMA();
73 if (!is_src_pitch && is_dst_pitch) { 87 const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A;
74 CopyBlockLinearToPitch(); 88 if (regs.launch_dma.remap_enable != 0 && is_const_a_dst) {
89 ASSERT(regs.remap_const.component_size_minus_one == 3);
90 accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value);
91 std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value);
92 memory_manager.WriteBlockUnsafe(regs.offset_out,
93 reinterpret_cast<u8*>(tmp_buffer.data()),
94 regs.line_length_in * sizeof(u32));
75 } else { 95 } else {
76 CopyPitchToBlockLinear(); 96 auto convert_linear_2_blocklinear_addr = [](u64 address) {
97 return (address & ~0x1f0ULL) | ((address & 0x40) >> 2) | ((address & 0x10) << 1) |
98 ((address & 0x180) >> 1) | ((address & 0x20) << 3);
99 };
100 auto src_kind = memory_manager.GetPageKind(regs.offset_in);
101 auto dst_kind = memory_manager.GetPageKind(regs.offset_out);
102 const bool is_src_pitch = IsPitchKind(static_cast<PTEKind>(src_kind));
103 const bool is_dst_pitch = IsPitchKind(static_cast<PTEKind>(dst_kind));
104 if (!is_src_pitch && is_dst_pitch) {
105 std::vector<u8> tmp_buffer(regs.line_length_in);
106 std::vector<u8> dst_buffer(regs.line_length_in);
107 memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
108 regs.line_length_in);
109 for (u32 offset = 0; offset < regs.line_length_in; ++offset) {
110 dst_buffer[offset] =
111 tmp_buffer[convert_linear_2_blocklinear_addr(regs.offset_in + offset) -
112 regs.offset_in];
113 }
114 memory_manager.WriteBlock(regs.offset_out, dst_buffer.data(), regs.line_length_in);
115 } else if (is_src_pitch && !is_dst_pitch) {
116 std::vector<u8> tmp_buffer(regs.line_length_in);
117 std::vector<u8> dst_buffer(regs.line_length_in);
118 memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
119 regs.line_length_in);
120 for (u32 offset = 0; offset < regs.line_length_in; ++offset) {
121 dst_buffer[convert_linear_2_blocklinear_addr(regs.offset_out + offset) -
122 regs.offset_out] = tmp_buffer[offset];
123 }
124 memory_manager.WriteBlock(regs.offset_out, dst_buffer.data(), regs.line_length_in);
125 } else {
126 if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) {
127 std::vector<u8> tmp_buffer(regs.line_length_in);
128 memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
129 regs.line_length_in);
130 memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(),
131 regs.line_length_in);
132 }
133 }
77 } 134 }
78 } 135 }
79 ReleaseSemaphore();
80}
81 136
82void MaxwellDMA::CopyPitchToPitch() { 137 ReleaseSemaphore();
83 // When `multi_line_enable` bit is enabled we copy a 2D image of dimensions
84 // (line_length_in, line_count).
85 // Otherwise the copy is performed as if we were copying a 1D buffer of length line_length_in.
86 const bool remap_enabled = regs.launch_dma.remap_enable != 0;
87 if (regs.launch_dma.multi_line_enable) {
88 UNIMPLEMENTED_IF(remap_enabled);
89
90 // Perform a line-by-line copy.
91 // We're going to take a subrect of size (line_length_in, line_count) from the source
92 // rectangle. There is no need to manually flush/invalidate the regions because CopyBlock
93 // does that for us.
94 for (u32 line = 0; line < regs.line_count; ++line) {
95 const GPUVAddr source_line = regs.offset_in + static_cast<size_t>(line) * regs.pitch_in;
96 const GPUVAddr dest_line = regs.offset_out + static_cast<size_t>(line) * regs.pitch_out;
97 memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in);
98 }
99 return;
100 }
101 // TODO: allow multisized components.
102 auto& accelerate = rasterizer->AccessAccelerateDMA();
103 const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A;
104 const bool is_buffer_clear = remap_enabled && is_const_a_dst;
105 if (is_buffer_clear) {
106 ASSERT(regs.remap_const.component_size_minus_one == 3);
107 accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value);
108 std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value);
109 memory_manager.WriteBlockUnsafe(regs.offset_out, reinterpret_cast<u8*>(tmp_buffer.data()),
110 regs.line_length_in * sizeof(u32));
111 return;
112 }
113 UNIMPLEMENTED_IF(remap_enabled);
114 if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) {
115 std::vector<u8> tmp_buffer(regs.line_length_in);
116 memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(), regs.line_length_in);
117 memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(), regs.line_length_in);
118 }
119} 138}
120 139
121void MaxwellDMA::CopyBlockLinearToPitch() { 140void MaxwellDMA::CopyBlockLinearToPitch() {
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index bc48320ce..953e34adc 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -219,8 +219,6 @@ private:
219 /// registers. 219 /// registers.
220 void Launch(); 220 void Launch();
221 221
222 void CopyPitchToPitch();
223
224 void CopyBlockLinearToPitch(); 222 void CopyBlockLinearToPitch();
225 223
226 void CopyPitchToBlockLinear(); 224 void CopyPitchToBlockLinear();
diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp
index cca890792..3977bb0fb 100644
--- a/src/video_core/engines/puller.cpp
+++ b/src/video_core/engines/puller.cpp
@@ -75,11 +75,10 @@ void Puller::ProcessSemaphoreTriggerMethod() {
75 if (op == GpuSemaphoreOperation::WriteLong) { 75 if (op == GpuSemaphoreOperation::WriteLong) {
76 const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()}; 76 const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
77 const u32 payload = regs.semaphore_sequence; 77 const u32 payload = regs.semaphore_sequence;
78 std::function<void()> operation([this, sequence_address, payload] { 78 [this, sequence_address, payload] {
79 memory_manager.Write<u64>(sequence_address + sizeof(u64), gpu.GetTicks()); 79 memory_manager.Write<u64>(sequence_address + sizeof(u64), gpu.GetTicks());
80 memory_manager.Write<u64>(sequence_address, payload); 80 memory_manager.Write<u64>(sequence_address, payload);
81 }); 81 }();
82 rasterizer->SignalFence(std::move(operation));
83 } else { 82 } else {
84 do { 83 do {
85 const u32 word{memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress())}; 84 const u32 word{memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress())};
diff --git a/src/video_core/macro/macro_hle.cpp b/src/video_core/macro/macro_hle.cpp
index 8a8adbb42..f896591bf 100644
--- a/src/video_core/macro/macro_hle.cpp
+++ b/src/video_core/macro/macro_hle.cpp
@@ -22,35 +22,29 @@ void HLE_771BB18C62444DA0(Engines::Maxwell3D& maxwell3d, const std::vector<u32>&
22 maxwell3d.regs.draw.topology.Assign( 22 maxwell3d.regs.draw.topology.Assign(
23 static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0] & 0x3ffffff)); 23 static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0] & 0x3ffffff));
24 maxwell3d.regs.global_base_instance_index = parameters[5]; 24 maxwell3d.regs.global_base_instance_index = parameters[5];
25 maxwell3d.mme_draw.instance_count = instance_count;
26 maxwell3d.regs.global_base_vertex_index = parameters[3]; 25 maxwell3d.regs.global_base_vertex_index = parameters[3];
27 maxwell3d.regs.index_buffer.count = parameters[1]; 26 maxwell3d.regs.index_buffer.count = parameters[1];
28 maxwell3d.regs.index_buffer.first = parameters[4]; 27 maxwell3d.regs.index_buffer.first = parameters[4];
29 28
30 if (maxwell3d.ShouldExecute()) { 29 if (maxwell3d.ShouldExecute()) {
31 maxwell3d.Rasterizer().Draw(true, true); 30 maxwell3d.Rasterizer().Draw(true, instance_count);
32 } 31 }
33 maxwell3d.regs.index_buffer.count = 0; 32 maxwell3d.regs.index_buffer.count = 0;
34 maxwell3d.mme_draw.instance_count = 0;
35 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
36} 33}
37 34
38void HLE_0D61FC9FAAC9FCAD(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) { 35void HLE_0D61FC9FAAC9FCAD(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) {
39 const u32 count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]); 36 const u32 instance_count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]);
40 37
41 maxwell3d.regs.vertex_buffer.first = parameters[3]; 38 maxwell3d.regs.vertex_buffer.first = parameters[3];
42 maxwell3d.regs.vertex_buffer.count = parameters[1]; 39 maxwell3d.regs.vertex_buffer.count = parameters[1];
43 maxwell3d.regs.global_base_instance_index = parameters[4]; 40 maxwell3d.regs.global_base_instance_index = parameters[4];
44 maxwell3d.regs.draw.topology.Assign( 41 maxwell3d.regs.draw.topology.Assign(
45 static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0])); 42 static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0]));
46 maxwell3d.mme_draw.instance_count = count;
47 43
48 if (maxwell3d.ShouldExecute()) { 44 if (maxwell3d.ShouldExecute()) {
49 maxwell3d.Rasterizer().Draw(false, true); 45 maxwell3d.Rasterizer().Draw(false, instance_count);
50 } 46 }
51 maxwell3d.regs.vertex_buffer.count = 0; 47 maxwell3d.regs.vertex_buffer.count = 0;
52 maxwell3d.mme_draw.instance_count = 0;
53 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
54} 48}
55 49
56void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) { 50void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) {
@@ -63,24 +57,21 @@ void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>&
63 maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; 57 maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
64 maxwell3d.regs.global_base_vertex_index = element_base; 58 maxwell3d.regs.global_base_vertex_index = element_base;
65 maxwell3d.regs.global_base_instance_index = base_instance; 59 maxwell3d.regs.global_base_instance_index = base_instance;
66 maxwell3d.mme_draw.instance_count = instance_count; 60 maxwell3d.CallMethod(0x8e3, 0x640, true);
67 maxwell3d.CallMethodFromMME(0x8e3, 0x640); 61 maxwell3d.CallMethod(0x8e4, element_base, true);
68 maxwell3d.CallMethodFromMME(0x8e4, element_base); 62 maxwell3d.CallMethod(0x8e5, base_instance, true);
69 maxwell3d.CallMethodFromMME(0x8e5, base_instance);
70 maxwell3d.regs.draw.topology.Assign( 63 maxwell3d.regs.draw.topology.Assign(
71 static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0])); 64 static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0]));
72 if (maxwell3d.ShouldExecute()) { 65 if (maxwell3d.ShouldExecute()) {
73 maxwell3d.Rasterizer().Draw(true, true); 66 maxwell3d.Rasterizer().Draw(true, instance_count);
74 } 67 }
75 maxwell3d.regs.vertex_id_base = 0x0; 68 maxwell3d.regs.vertex_id_base = 0x0;
76 maxwell3d.regs.index_buffer.count = 0; 69 maxwell3d.regs.index_buffer.count = 0;
77 maxwell3d.regs.global_base_vertex_index = 0x0; 70 maxwell3d.regs.global_base_vertex_index = 0x0;
78 maxwell3d.regs.global_base_instance_index = 0x0; 71 maxwell3d.regs.global_base_instance_index = 0x0;
79 maxwell3d.mme_draw.instance_count = 0; 72 maxwell3d.CallMethod(0x8e3, 0x640, true);
80 maxwell3d.CallMethodFromMME(0x8e3, 0x640); 73 maxwell3d.CallMethod(0x8e4, 0x0, true);
81 maxwell3d.CallMethodFromMME(0x8e4, 0x0); 74 maxwell3d.CallMethod(0x8e5, 0x0, true);
82 maxwell3d.CallMethodFromMME(0x8e5, 0x0);
83 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
84} 75}
85 76
86// Multidraw Indirect 77// Multidraw Indirect
@@ -91,11 +82,9 @@ void HLE_3F5E74B9C9A50164(Engines::Maxwell3D& maxwell3d, const std::vector<u32>&
91 maxwell3d.regs.index_buffer.count = 0; 82 maxwell3d.regs.index_buffer.count = 0;
92 maxwell3d.regs.global_base_vertex_index = 0x0; 83 maxwell3d.regs.global_base_vertex_index = 0x0;
93 maxwell3d.regs.global_base_instance_index = 0x0; 84 maxwell3d.regs.global_base_instance_index = 0x0;
94 maxwell3d.mme_draw.instance_count = 0; 85 maxwell3d.CallMethod(0x8e3, 0x640, true);
95 maxwell3d.CallMethodFromMME(0x8e3, 0x640); 86 maxwell3d.CallMethod(0x8e4, 0x0, true);
96 maxwell3d.CallMethodFromMME(0x8e4, 0x0); 87 maxwell3d.CallMethod(0x8e5, 0x0, true);
97 maxwell3d.CallMethodFromMME(0x8e5, 0x0);
98 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
99 maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; 88 maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
100 }); 89 });
101 const u32 start_indirect = parameters[0]; 90 const u32 start_indirect = parameters[0];
@@ -127,15 +116,13 @@ void HLE_3F5E74B9C9A50164(Engines::Maxwell3D& maxwell3d, const std::vector<u32>&
127 maxwell3d.regs.index_buffer.count = num_vertices; 116 maxwell3d.regs.index_buffer.count = num_vertices;
128 maxwell3d.regs.global_base_vertex_index = base_vertex; 117 maxwell3d.regs.global_base_vertex_index = base_vertex;
129 maxwell3d.regs.global_base_instance_index = base_instance; 118 maxwell3d.regs.global_base_instance_index = base_instance;
130 maxwell3d.mme_draw.instance_count = instance_count; 119 maxwell3d.CallMethod(0x8e3, 0x640, true);
131 maxwell3d.CallMethodFromMME(0x8e3, 0x640); 120 maxwell3d.CallMethod(0x8e4, base_vertex, true);
132 maxwell3d.CallMethodFromMME(0x8e4, base_vertex); 121 maxwell3d.CallMethod(0x8e5, base_instance, true);
133 maxwell3d.CallMethodFromMME(0x8e5, base_instance);
134 maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; 122 maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
135 if (maxwell3d.ShouldExecute()) { 123 if (maxwell3d.ShouldExecute()) {
136 maxwell3d.Rasterizer().Draw(true, true); 124 maxwell3d.Rasterizer().Draw(true, instance_count);
137 } 125 }
138 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
139 } 126 }
140} 127}
141 128
diff --git a/src/video_core/macro/macro_interpreter.cpp b/src/video_core/macro/macro_interpreter.cpp
index f670b1bca..c0d32c112 100644
--- a/src/video_core/macro/macro_interpreter.cpp
+++ b/src/video_core/macro/macro_interpreter.cpp
@@ -335,7 +335,7 @@ void MacroInterpreterImpl::SetMethodAddress(u32 address) {
335} 335}
336 336
337void MacroInterpreterImpl::Send(u32 value) { 337void MacroInterpreterImpl::Send(u32 value) {
338 maxwell3d.CallMethodFromMME(method_address.address, value); 338 maxwell3d.CallMethod(method_address.address, value, true);
339 // Increment the method address by the method increment. 339 // Increment the method address by the method increment.
340 method_address.address.Assign(method_address.address.Value() + 340 method_address.address.Assign(method_address.address.Value() +
341 method_address.increment.Value()); 341 method_address.increment.Value());
diff --git a/src/video_core/macro/macro_jit_x64.cpp b/src/video_core/macro/macro_jit_x64.cpp
index a302a9603..25c1ce798 100644
--- a/src/video_core/macro/macro_jit_x64.cpp
+++ b/src/video_core/macro/macro_jit_x64.cpp
@@ -346,7 +346,7 @@ void MacroJITx64Impl::Compile_Read(Macro::Opcode opcode) {
346} 346}
347 347
348void Send(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) { 348void Send(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) {
349 maxwell3d->CallMethodFromMME(method_address.address, value); 349 maxwell3d->CallMethod(method_address.address, value, true);
350} 350}
351 351
352void MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) { 352void MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) {
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index cca401c74..384350dbd 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -41,7 +41,11 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64
41 big_entries.resize(big_page_table_size / 32, 0); 41 big_entries.resize(big_page_table_size / 32, 0);
42 big_page_table_cpu.resize(big_page_table_size); 42 big_page_table_cpu.resize(big_page_table_size);
43 big_page_continous.resize(big_page_table_size / continous_bits, 0); 43 big_page_continous.resize(big_page_table_size / continous_bits, 0);
44 std::array<PTEKind, 32> kind_valus;
45 kind_valus.fill(PTEKind::INVALID);
46 big_kinds.resize(big_page_table_size / 32, kind_valus);
44 entries.resize(page_table_size / 32, 0); 47 entries.resize(page_table_size / 32, 0);
48 kinds.resize(big_page_table_size / 32, kind_valus);
45} 49}
46 50
47MemoryManager::~MemoryManager() = default; 51MemoryManager::~MemoryManager() = default;
@@ -78,6 +82,41 @@ void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) {
78 } 82 }
79} 83}
80 84
85PTEKind MemoryManager::GetPageKind(GPUVAddr gpu_addr) const {
86 auto entry = GetEntry<true>(gpu_addr);
87 if (entry == EntryType::Mapped || entry == EntryType::Reserved) [[likely]] {
88 return GetKind<true>(gpu_addr);
89 } else {
90 return GetKind<false>(gpu_addr);
91 }
92}
93
94template <bool is_big_page>
95PTEKind MemoryManager::GetKind(size_t position) const {
96 if constexpr (is_big_page) {
97 position = position >> big_page_bits;
98 const size_t sub_index = position % 32;
99 return big_kinds[position / 32][sub_index];
100 } else {
101 position = position >> page_bits;
102 const size_t sub_index = position % 32;
103 return kinds[position / 32][sub_index];
104 }
105}
106
107template <bool is_big_page>
108void MemoryManager::SetKind(size_t position, PTEKind kind) {
109 if constexpr (is_big_page) {
110 position = position >> big_page_bits;
111 const size_t sub_index = position % 32;
112 big_kinds[position / 32][sub_index] = kind;
113 } else {
114 position = position >> page_bits;
115 const size_t sub_index = position % 32;
116 kinds[position / 32][sub_index] = kind;
117 }
118}
119
81inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const { 120inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const {
82 const u64 entry_mask = big_page_continous[big_page_index / continous_bits]; 121 const u64 entry_mask = big_page_continous[big_page_index / continous_bits];
83 const size_t sub_index = big_page_index % continous_bits; 122 const size_t sub_index = big_page_index % continous_bits;
@@ -92,9 +131,9 @@ inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value
92} 131}
93 132
94template <MemoryManager::EntryType entry_type> 133template <MemoryManager::EntryType entry_type>
95GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, 134GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
96 size_t size) { 135 PTEKind kind) {
97 u64 remaining_size{size}; 136 [[maybe_unused]] u64 remaining_size{size};
98 if constexpr (entry_type == EntryType::Mapped) { 137 if constexpr (entry_type == EntryType::Mapped) {
99 page_table.ReserveRange(gpu_addr, size); 138 page_table.ReserveRange(gpu_addr, size);
100 } 139 }
@@ -102,6 +141,7 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp
102 const GPUVAddr current_gpu_addr = gpu_addr + offset; 141 const GPUVAddr current_gpu_addr = gpu_addr + offset;
103 [[maybe_unused]] const auto current_entry_type = GetEntry<false>(current_gpu_addr); 142 [[maybe_unused]] const auto current_entry_type = GetEntry<false>(current_gpu_addr);
104 SetEntry<false>(current_gpu_addr, entry_type); 143 SetEntry<false>(current_gpu_addr, entry_type);
144 SetKind<false>(current_gpu_addr, kind);
105 if (current_entry_type != entry_type) { 145 if (current_entry_type != entry_type) {
106 rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size); 146 rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size);
107 } 147 }
@@ -118,12 +158,13 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp
118 158
119template <MemoryManager::EntryType entry_type> 159template <MemoryManager::EntryType entry_type>
120GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, 160GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
121 size_t size) { 161 size_t size, PTEKind kind) {
122 u64 remaining_size{size}; 162 [[maybe_unused]] u64 remaining_size{size};
123 for (u64 offset{}; offset < size; offset += big_page_size) { 163 for (u64 offset{}; offset < size; offset += big_page_size) {
124 const GPUVAddr current_gpu_addr = gpu_addr + offset; 164 const GPUVAddr current_gpu_addr = gpu_addr + offset;
125 [[maybe_unused]] const auto current_entry_type = GetEntry<true>(current_gpu_addr); 165 [[maybe_unused]] const auto current_entry_type = GetEntry<true>(current_gpu_addr);
126 SetEntry<true>(current_gpu_addr, entry_type); 166 SetEntry<true>(current_gpu_addr, entry_type);
167 SetKind<true>(current_gpu_addr, kind);
127 if (current_entry_type != entry_type) { 168 if (current_entry_type != entry_type) {
128 rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, big_page_size); 169 rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, big_page_size);
129 } 170 }
@@ -159,19 +200,19 @@ void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_)
159 rasterizer = rasterizer_; 200 rasterizer = rasterizer_;
160} 201}
161 202
162GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, 203GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, PTEKind kind,
163 bool is_big_pages) { 204 bool is_big_pages) {
164 if (is_big_pages) [[likely]] { 205 if (is_big_pages) [[likely]] {
165 return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size); 206 return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind);
166 } 207 }
167 return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size); 208 return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind);
168} 209}
169 210
170GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) { 211GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) {
171 if (is_big_pages) [[likely]] { 212 if (is_big_pages) [[likely]] {
172 return BigPageTableOp<EntryType::Reserved>(gpu_addr, 0, size); 213 return BigPageTableOp<EntryType::Reserved>(gpu_addr, 0, size, PTEKind::INVALID);
173 } 214 }
174 return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size); 215 return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size, PTEKind::INVALID);
175} 216}
176 217
177void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { 218void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
@@ -188,8 +229,8 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
188 rasterizer->UnmapMemory(*cpu_addr, map_size); 229 rasterizer->UnmapMemory(*cpu_addr, map_size);
189 } 230 }
190 231
191 BigPageTableOp<EntryType::Free>(gpu_addr, 0, size); 232 BigPageTableOp<EntryType::Free>(gpu_addr, 0, size, PTEKind::INVALID);
192 PageTableOp<EntryType::Free>(gpu_addr, 0, size); 233 PageTableOp<EntryType::Free>(gpu_addr, 0, size, PTEKind::INVALID);
193} 234}
194 235
195std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { 236std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index f992e29f3..ab4bc9ec6 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -11,6 +11,7 @@
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "common/multi_level_page_table.h" 12#include "common/multi_level_page_table.h"
13#include "common/virtual_buffer.h" 13#include "common/virtual_buffer.h"
14#include "video_core/pte_kind.h"
14 15
15namespace VideoCore { 16namespace VideoCore {
16class RasterizerInterface; 17class RasterizerInterface;
@@ -98,7 +99,8 @@ public:
98 std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr, 99 std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr,
99 std::size_t size) const; 100 std::size_t size) const;
100 101
101 GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, bool is_big_pages = true); 102 GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size,
103 PTEKind kind = PTEKind::INVALID, bool is_big_pages = true);
102 GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true); 104 GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true);
103 void Unmap(GPUVAddr gpu_addr, std::size_t size); 105 void Unmap(GPUVAddr gpu_addr, std::size_t size);
104 106
@@ -114,6 +116,8 @@ public:
114 return gpu_addr < address_space_size; 116 return gpu_addr < address_space_size;
115 } 117 }
116 118
119 PTEKind GetPageKind(GPUVAddr gpu_addr) const;
120
117private: 121private:
118 template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped> 122 template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
119 inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped, 123 inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped,
@@ -166,10 +170,12 @@ private:
166 std::vector<u64> big_entries; 170 std::vector<u64> big_entries;
167 171
168 template <EntryType entry_type> 172 template <EntryType entry_type>
169 GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size); 173 GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
174 PTEKind kind);
170 175
171 template <EntryType entry_type> 176 template <EntryType entry_type>
172 GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size); 177 GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
178 PTEKind kind);
173 179
174 template <bool is_big_page> 180 template <bool is_big_page>
175 inline EntryType GetEntry(size_t position) const; 181 inline EntryType GetEntry(size_t position) const;
@@ -177,6 +183,15 @@ private:
177 template <bool is_big_page> 183 template <bool is_big_page>
178 inline void SetEntry(size_t position, EntryType entry); 184 inline void SetEntry(size_t position, EntryType entry);
179 185
186 std::vector<std::array<PTEKind, 32>> kinds;
187 std::vector<std::array<PTEKind, 32>> big_kinds;
188
189 template <bool is_big_page>
190 inline PTEKind GetKind(size_t position) const;
191
192 template <bool is_big_page>
193 inline void SetKind(size_t position, PTEKind kind);
194
180 Common::MultiLevelPageTable<u32> page_table; 195 Common::MultiLevelPageTable<u32> page_table;
181 Common::VirtualBuffer<u32> big_page_table_cpu; 196 Common::VirtualBuffer<u32> big_page_table_cpu;
182 197
diff --git a/src/video_core/pte_kind.h b/src/video_core/pte_kind.h
new file mode 100644
index 000000000..591d7214b
--- /dev/null
+++ b/src/video_core/pte_kind.h
@@ -0,0 +1,264 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/common_types.h"
7
8namespace Tegra {
9
10// https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/volta/gv100/dev_mmu.ref.txt
11enum class PTEKind : u8 {
12 INVALID = 0xff,
13 PITCH = 0x00,
14 Z16 = 0x01,
15 Z16_2C = 0x02,
16 Z16_MS2_2C = 0x03,
17 Z16_MS4_2C = 0x04,
18 Z16_MS8_2C = 0x05,
19 Z16_MS16_2C = 0x06,
20 Z16_2Z = 0x07,
21 Z16_MS2_2Z = 0x08,
22 Z16_MS4_2Z = 0x09,
23 Z16_MS8_2Z = 0x0a,
24 Z16_MS16_2Z = 0x0b,
25 Z16_2CZ = 0x36,
26 Z16_MS2_2CZ = 0x37,
27 Z16_MS4_2CZ = 0x38,
28 Z16_MS8_2CZ = 0x39,
29 Z16_MS16_2CZ = 0x5f,
30 Z16_4CZ = 0x0c,
31 Z16_MS2_4CZ = 0x0d,
32 Z16_MS4_4CZ = 0x0e,
33 Z16_MS8_4CZ = 0x0f,
34 Z16_MS16_4CZ = 0x10,
35 S8Z24 = 0x11,
36 S8Z24_1Z = 0x12,
37 S8Z24_MS2_1Z = 0x13,
38 S8Z24_MS4_1Z = 0x14,
39 S8Z24_MS8_1Z = 0x15,
40 S8Z24_MS16_1Z = 0x16,
41 S8Z24_2CZ = 0x17,
42 S8Z24_MS2_2CZ = 0x18,
43 S8Z24_MS4_2CZ = 0x19,
44 S8Z24_MS8_2CZ = 0x1a,
45 S8Z24_MS16_2CZ = 0x1b,
46 S8Z24_2CS = 0x1c,
47 S8Z24_MS2_2CS = 0x1d,
48 S8Z24_MS4_2CS = 0x1e,
49 S8Z24_MS8_2CS = 0x1f,
50 S8Z24_MS16_2CS = 0x20,
51 S8Z24_4CSZV = 0x21,
52 S8Z24_MS2_4CSZV = 0x22,
53 S8Z24_MS4_4CSZV = 0x23,
54 S8Z24_MS8_4CSZV = 0x24,
55 S8Z24_MS16_4CSZV = 0x25,
56 V8Z24_MS4_VC12 = 0x26,
57 V8Z24_MS4_VC4 = 0x27,
58 V8Z24_MS8_VC8 = 0x28,
59 V8Z24_MS8_VC24 = 0x29,
60 V8Z24_MS4_VC12_1ZV = 0x2e,
61 V8Z24_MS4_VC4_1ZV = 0x2f,
62 V8Z24_MS8_VC8_1ZV = 0x30,
63 V8Z24_MS8_VC24_1ZV = 0x31,
64 V8Z24_MS4_VC12_2CS = 0x32,
65 V8Z24_MS4_VC4_2CS = 0x33,
66 V8Z24_MS8_VC8_2CS = 0x34,
67 V8Z24_MS8_VC24_2CS = 0x35,
68 V8Z24_MS4_VC12_2CZV = 0x3a,
69 V8Z24_MS4_VC4_2CZV = 0x3b,
70 V8Z24_MS8_VC8_2CZV = 0x3c,
71 V8Z24_MS8_VC24_2CZV = 0x3d,
72 V8Z24_MS4_VC12_2ZV = 0x3e,
73 V8Z24_MS4_VC4_2ZV = 0x3f,
74 V8Z24_MS8_VC8_2ZV = 0x40,
75 V8Z24_MS8_VC24_2ZV = 0x41,
76 V8Z24_MS4_VC12_4CSZV = 0x42,
77 V8Z24_MS4_VC4_4CSZV = 0x43,
78 V8Z24_MS8_VC8_4CSZV = 0x44,
79 V8Z24_MS8_VC24_4CSZV = 0x45,
80 Z24S8 = 0x46,
81 Z24S8_1Z = 0x47,
82 Z24S8_MS2_1Z = 0x48,
83 Z24S8_MS4_1Z = 0x49,
84 Z24S8_MS8_1Z = 0x4a,
85 Z24S8_MS16_1Z = 0x4b,
86 Z24S8_2CS = 0x4c,
87 Z24S8_MS2_2CS = 0x4d,
88 Z24S8_MS4_2CS = 0x4e,
89 Z24S8_MS8_2CS = 0x4f,
90 Z24S8_MS16_2CS = 0x50,
91 Z24S8_2CZ = 0x51,
92 Z24S8_MS2_2CZ = 0x52,
93 Z24S8_MS4_2CZ = 0x53,
94 Z24S8_MS8_2CZ = 0x54,
95 Z24S8_MS16_2CZ = 0x55,
96 Z24S8_4CSZV = 0x56,
97 Z24S8_MS2_4CSZV = 0x57,
98 Z24S8_MS4_4CSZV = 0x58,
99 Z24S8_MS8_4CSZV = 0x59,
100 Z24S8_MS16_4CSZV = 0x5a,
101 Z24V8_MS4_VC12 = 0x5b,
102 Z24V8_MS4_VC4 = 0x5c,
103 Z24V8_MS8_VC8 = 0x5d,
104 Z24V8_MS8_VC24 = 0x5e,
105 YUV_B8C1_2Y = 0x60,
106 YUV_B8C2_2Y = 0x61,
107 YUV_B10C1_2Y = 0x62,
108 YUV_B10C2_2Y = 0x6b,
109 YUV_B12C1_2Y = 0x6c,
110 YUV_B12C2_2Y = 0x6d,
111 Z24V8_MS4_VC12_1ZV = 0x63,
112 Z24V8_MS4_VC4_1ZV = 0x64,
113 Z24V8_MS8_VC8_1ZV = 0x65,
114 Z24V8_MS8_VC24_1ZV = 0x66,
115 Z24V8_MS4_VC12_2CS = 0x67,
116 Z24V8_MS4_VC4_2CS = 0x68,
117 Z24V8_MS8_VC8_2CS = 0x69,
118 Z24V8_MS8_VC24_2CS = 0x6a,
119 Z24V8_MS4_VC12_2CZV = 0x6f,
120 Z24V8_MS4_VC4_2CZV = 0x70,
121 Z24V8_MS8_VC8_2CZV = 0x71,
122 Z24V8_MS8_VC24_2CZV = 0x72,
123 Z24V8_MS4_VC12_2ZV = 0x73,
124 Z24V8_MS4_VC4_2ZV = 0x74,
125 Z24V8_MS8_VC8_2ZV = 0x75,
126 Z24V8_MS8_VC24_2ZV = 0x76,
127 Z24V8_MS4_VC12_4CSZV = 0x77,
128 Z24V8_MS4_VC4_4CSZV = 0x78,
129 Z24V8_MS8_VC8_4CSZV = 0x79,
130 Z24V8_MS8_VC24_4CSZV = 0x7a,
131 ZF32 = 0x7b,
132 ZF32_1Z = 0x7c,
133 ZF32_MS2_1Z = 0x7d,
134 ZF32_MS4_1Z = 0x7e,
135 ZF32_MS8_1Z = 0x7f,
136 ZF32_MS16_1Z = 0x80,
137 ZF32_2CS = 0x81,
138 ZF32_MS2_2CS = 0x82,
139 ZF32_MS4_2CS = 0x83,
140 ZF32_MS8_2CS = 0x84,
141 ZF32_MS16_2CS = 0x85,
142 ZF32_2CZ = 0x86,
143 ZF32_MS2_2CZ = 0x87,
144 ZF32_MS4_2CZ = 0x88,
145 ZF32_MS8_2CZ = 0x89,
146 ZF32_MS16_2CZ = 0x8a,
147 X8Z24_X16V8S8_MS4_VC12 = 0x8b,
148 X8Z24_X16V8S8_MS4_VC4 = 0x8c,
149 X8Z24_X16V8S8_MS8_VC8 = 0x8d,
150 X8Z24_X16V8S8_MS8_VC24 = 0x8e,
151 X8Z24_X16V8S8_MS4_VC12_1CS = 0x8f,
152 X8Z24_X16V8S8_MS4_VC4_1CS = 0x90,
153 X8Z24_X16V8S8_MS8_VC8_1CS = 0x91,
154 X8Z24_X16V8S8_MS8_VC24_1CS = 0x92,
155 X8Z24_X16V8S8_MS4_VC12_1ZV = 0x97,
156 X8Z24_X16V8S8_MS4_VC4_1ZV = 0x98,
157 X8Z24_X16V8S8_MS8_VC8_1ZV = 0x99,
158 X8Z24_X16V8S8_MS8_VC24_1ZV = 0x9a,
159 X8Z24_X16V8S8_MS4_VC12_1CZV = 0x9b,
160 X8Z24_X16V8S8_MS4_VC4_1CZV = 0x9c,
161 X8Z24_X16V8S8_MS8_VC8_1CZV = 0x9d,
162 X8Z24_X16V8S8_MS8_VC24_1CZV = 0x9e,
163 X8Z24_X16V8S8_MS4_VC12_2CS = 0x9f,
164 X8Z24_X16V8S8_MS4_VC4_2CS = 0xa0,
165 X8Z24_X16V8S8_MS8_VC8_2CS = 0xa1,
166 X8Z24_X16V8S8_MS8_VC24_2CS = 0xa2,
167 X8Z24_X16V8S8_MS4_VC12_2CSZV = 0xa3,
168 X8Z24_X16V8S8_MS4_VC4_2CSZV = 0xa4,
169 X8Z24_X16V8S8_MS8_VC8_2CSZV = 0xa5,
170 X8Z24_X16V8S8_MS8_VC24_2CSZV = 0xa6,
171 ZF32_X16V8S8_MS4_VC12 = 0xa7,
172 ZF32_X16V8S8_MS4_VC4 = 0xa8,
173 ZF32_X16V8S8_MS8_VC8 = 0xa9,
174 ZF32_X16V8S8_MS8_VC24 = 0xaa,
175 ZF32_X16V8S8_MS4_VC12_1CS = 0xab,
176 ZF32_X16V8S8_MS4_VC4_1CS = 0xac,
177 ZF32_X16V8S8_MS8_VC8_1CS = 0xad,
178 ZF32_X16V8S8_MS8_VC24_1CS = 0xae,
179 ZF32_X16V8S8_MS4_VC12_1ZV = 0xb3,
180 ZF32_X16V8S8_MS4_VC4_1ZV = 0xb4,
181 ZF32_X16V8S8_MS8_VC8_1ZV = 0xb5,
182 ZF32_X16V8S8_MS8_VC24_1ZV = 0xb6,
183 ZF32_X16V8S8_MS4_VC12_1CZV = 0xb7,
184 ZF32_X16V8S8_MS4_VC4_1CZV = 0xb8,
185 ZF32_X16V8S8_MS8_VC8_1CZV = 0xb9,
186 ZF32_X16V8S8_MS8_VC24_1CZV = 0xba,
187 ZF32_X16V8S8_MS4_VC12_2CS = 0xbb,
188 ZF32_X16V8S8_MS4_VC4_2CS = 0xbc,
189 ZF32_X16V8S8_MS8_VC8_2CS = 0xbd,
190 ZF32_X16V8S8_MS8_VC24_2CS = 0xbe,
191 ZF32_X16V8S8_MS4_VC12_2CSZV = 0xbf,
192 ZF32_X16V8S8_MS4_VC4_2CSZV = 0xc0,
193 ZF32_X16V8S8_MS8_VC8_2CSZV = 0xc1,
194 ZF32_X16V8S8_MS8_VC24_2CSZV = 0xc2,
195 ZF32_X24S8 = 0xc3,
196 ZF32_X24S8_1CS = 0xc4,
197 ZF32_X24S8_MS2_1CS = 0xc5,
198 ZF32_X24S8_MS4_1CS = 0xc6,
199 ZF32_X24S8_MS8_1CS = 0xc7,
200 ZF32_X24S8_MS16_1CS = 0xc8,
201 ZF32_X24S8_2CSZV = 0xce,
202 ZF32_X24S8_MS2_2CSZV = 0xcf,
203 ZF32_X24S8_MS4_2CSZV = 0xd0,
204 ZF32_X24S8_MS8_2CSZV = 0xd1,
205 ZF32_X24S8_MS16_2CSZV = 0xd2,
206 ZF32_X24S8_2CS = 0xd3,
207 ZF32_X24S8_MS2_2CS = 0xd4,
208 ZF32_X24S8_MS4_2CS = 0xd5,
209 ZF32_X24S8_MS8_2CS = 0xd6,
210 ZF32_X24S8_MS16_2CS = 0xd7,
211 S8 = 0x2a,
212 S8_2S = 0x2b,
213 GENERIC_16BX2 = 0xfe,
214 C32_2C = 0xd8,
215 C32_2CBR = 0xd9,
216 C32_2CBA = 0xda,
217 C32_2CRA = 0xdb,
218 C32_2BRA = 0xdc,
219 C32_MS2_2C = 0xdd,
220 C32_MS2_2CBR = 0xde,
221 C32_MS2_4CBRA = 0xcc,
222 C32_MS4_2C = 0xdf,
223 C32_MS4_2CBR = 0xe0,
224 C32_MS4_2CBA = 0xe1,
225 C32_MS4_2CRA = 0xe2,
226 C32_MS4_2BRA = 0xe3,
227 C32_MS4_4CBRA = 0x2c,
228 C32_MS8_MS16_2C = 0xe4,
229 C32_MS8_MS16_2CRA = 0xe5,
230 C64_2C = 0xe6,
231 C64_2CBR = 0xe7,
232 C64_2CBA = 0xe8,
233 C64_2CRA = 0xe9,
234 C64_2BRA = 0xea,
235 C64_MS2_2C = 0xeb,
236 C64_MS2_2CBR = 0xec,
237 C64_MS2_4CBRA = 0xcd,
238 C64_MS4_2C = 0xed,
239 C64_MS4_2CBR = 0xee,
240 C64_MS4_2CBA = 0xef,
241 C64_MS4_2CRA = 0xf0,
242 C64_MS4_2BRA = 0xf1,
243 C64_MS4_4CBRA = 0x2d,
244 C64_MS8_MS16_2C = 0xf2,
245 C64_MS8_MS16_2CRA = 0xf3,
246 C128_2C = 0xf4,
247 C128_2CR = 0xf5,
248 C128_MS2_2C = 0xf6,
249 C128_MS2_2CR = 0xf7,
250 C128_MS4_2C = 0xf8,
251 C128_MS4_2CR = 0xf9,
252 C128_MS8_MS16_2C = 0xfa,
253 C128_MS8_MS16_2CR = 0xfb,
254 X8C24 = 0xfc,
255 PITCH_NO_SWIZZLE = 0xfd,
256 SMSKED_MESSAGE = 0xca,
257 SMHOST_MESSAGE = 0xcb,
258};
259
260constexpr bool IsPitchKind(PTEKind kind) {
261 return kind == PTEKind::PITCH || kind == PTEKind::PITCH_NO_SWIZZLE;
262}
263
264} // namespace Tegra
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index d2d40884c..1cbfef090 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -40,7 +40,7 @@ public:
40 virtual ~RasterizerInterface() = default; 40 virtual ~RasterizerInterface() = default;
41 41
42 /// Dispatches a draw invocation 42 /// Dispatches a draw invocation
43 virtual void Draw(bool is_indexed, bool is_instanced) = 0; 43 virtual void Draw(bool is_indexed, u32 instance_count) = 0;
44 44
45 /// Clear the current framebuffer 45 /// Clear the current framebuffer
46 virtual void Clear() = 0; 46 virtual void Clear() = 0;
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index e5c09a969..1590b21de 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -205,7 +205,7 @@ void RasterizerOpenGL::Clear() {
205 ++num_queued_commands; 205 ++num_queued_commands;
206} 206}
207 207
208void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { 208void RasterizerOpenGL::Draw(bool is_indexed, u32 instance_count) {
209 MICROPROFILE_SCOPE(OpenGL_Drawing); 209 MICROPROFILE_SCOPE(OpenGL_Drawing);
210 210
211 SCOPE_EXIT({ gpu.TickWork(); }); 211 SCOPE_EXIT({ gpu.TickWork(); });
@@ -222,14 +222,15 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
222 pipeline->SetEngine(maxwell3d, gpu_memory); 222 pipeline->SetEngine(maxwell3d, gpu_memory);
223 pipeline->Configure(is_indexed); 223 pipeline->Configure(is_indexed);
224 224
225 BindInlineIndexBuffer();
226
225 SyncState(); 227 SyncState();
226 228
227 const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d->regs.draw.topology); 229 const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d->regs.draw.topology);
228 BeginTransformFeedback(pipeline, primitive_mode); 230 BeginTransformFeedback(pipeline, primitive_mode);
229 231
230 const GLuint base_instance = static_cast<GLuint>(maxwell3d->regs.global_base_instance_index); 232 const GLuint base_instance = static_cast<GLuint>(maxwell3d->regs.global_base_instance_index);
231 const GLsizei num_instances = 233 const GLsizei num_instances = static_cast<GLsizei>(instance_count);
232 static_cast<GLsizei>(is_instanced ? maxwell3d->mme_draw.instance_count : 1);
233 if (is_indexed) { 234 if (is_indexed) {
234 const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.global_base_vertex_index); 235 const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.global_base_vertex_index);
235 const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.index_buffer.count); 236 const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.index_buffer.count);
@@ -1129,6 +1130,16 @@ void RasterizerOpenGL::ReleaseChannel(s32 channel_id) {
1129 query_cache.EraseChannel(channel_id); 1130 query_cache.EraseChannel(channel_id);
1130} 1131}
1131 1132
1133void RasterizerOpenGL::BindInlineIndexBuffer() {
1134 if (maxwell3d->inline_index_draw_indexes.empty()) {
1135 return;
1136 }
1137 const auto data_count = static_cast<u32>(maxwell3d->inline_index_draw_indexes.size());
1138 auto buffer = Buffer(buffer_cache_runtime, *this, 0, data_count);
1139 buffer.ImmediateUpload(0, maxwell3d->inline_index_draw_indexes);
1140 buffer_cache_runtime.BindIndexBuffer(buffer, 0, data_count);
1141}
1142
1132AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {} 1143AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {}
1133 1144
1134bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { 1145bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 45131b785..793e0d608 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -68,7 +68,7 @@ public:
68 StateTracker& state_tracker_); 68 StateTracker& state_tracker_);
69 ~RasterizerOpenGL() override; 69 ~RasterizerOpenGL() override;
70 70
71 void Draw(bool is_indexed, bool is_instanced) override; 71 void Draw(bool is_indexed, u32 instance_count) override;
72 void Clear() override; 72 void Clear() override;
73 void DispatchCompute() override; 73 void DispatchCompute() override;
74 void ResetCounter(VideoCore::QueryType type) override; 74 void ResetCounter(VideoCore::QueryType type) override;
@@ -199,6 +199,8 @@ private:
199 /// End a transform feedback 199 /// End a transform feedback
200 void EndTransformFeedback(); 200 void EndTransformFeedback();
201 201
202 void BindInlineIndexBuffer();
203
202 Tegra::GPU& gpu; 204 Tegra::GPU& gpu;
203 205
204 const Device& device; 206 const Device& device;
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 609f0a772..e94cfdb1a 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -63,6 +63,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
63 Shader::RuntimeInfo info; 63 Shader::RuntimeInfo info;
64 if (previous_program) { 64 if (previous_program) {
65 info.previous_stage_stores = previous_program->info.stores; 65 info.previous_stage_stores = previous_program->info.stores;
66 info.previous_stage_legacy_stores_mapping = previous_program->info.legacy_stores_mapping;
66 } else { 67 } else {
67 // Mark all stores as available for vertex shaders 68 // Mark all stores as available for vertex shaders
68 info.previous_stage_stores.mask.set(); 69 info.previous_stage_stores.mask.set();
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 20f1d6584..13d5a1f67 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -134,6 +134,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
134 Shader::RuntimeInfo info; 134 Shader::RuntimeInfo info;
135 if (previous_program) { 135 if (previous_program) {
136 info.previous_stage_stores = previous_program->info.stores; 136 info.previous_stage_stores = previous_program->info.stores;
137 info.previous_stage_legacy_stores_mapping = previous_program->info.legacy_stores_mapping;
137 if (previous_program->is_geometry_passthrough) { 138 if (previous_program->is_geometry_passthrough) {
138 info.previous_stage_stores.mask |= previous_program->info.passthrough.mask; 139 info.previous_stage_stores.mask |= previous_program->info.passthrough.mask;
139 } 140 }
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 7cb02631c..4b15c0f85 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -59,10 +59,11 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
59 std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) { 59 std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) {
60 return query_pool == *pool; 60 return query_pool == *pool;
61 }); 61 });
62 ASSERT(it != std::end(pools));
63 62
64 const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it); 63 if (it != std::end(pools)) {
65 usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; 64 const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
65 usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
66 }
66} 67}
67 68
68QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, 69QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 47dfb45a1..6ab68892c 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -127,11 +127,10 @@ VkRect2D GetScissorState(const Maxwell& regs, size_t index, u32 up_scale = 1, u3
127 return scissor; 127 return scissor;
128} 128}
129 129
130DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instanced, 130DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_indexed) {
131 bool is_indexed) {
132 DrawParams params{ 131 DrawParams params{
133 .base_instance = regs.global_base_instance_index, 132 .base_instance = regs.global_base_instance_index,
134 .num_instances = is_instanced ? num_instances : 1, 133 .num_instances = num_instances,
135 .base_vertex = is_indexed ? regs.global_base_vertex_index : regs.vertex_buffer.first, 134 .base_vertex = is_indexed ? regs.global_base_vertex_index : regs.vertex_buffer.first,
136 .num_vertices = is_indexed ? regs.index_buffer.count : regs.vertex_buffer.count, 135 .num_vertices = is_indexed ? regs.index_buffer.count : regs.vertex_buffer.count,
137 .first_index = is_indexed ? regs.index_buffer.first : 0, 136 .first_index = is_indexed ? regs.index_buffer.first : 0,
@@ -177,7 +176,7 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
177 176
178RasterizerVulkan::~RasterizerVulkan() = default; 177RasterizerVulkan::~RasterizerVulkan() = default;
179 178
180void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { 179void RasterizerVulkan::Draw(bool is_indexed, u32 instance_count) {
181 MICROPROFILE_SCOPE(Vulkan_Drawing); 180 MICROPROFILE_SCOPE(Vulkan_Drawing);
182 181
183 SCOPE_EXIT({ gpu.TickWork(); }); 182 SCOPE_EXIT({ gpu.TickWork(); });
@@ -194,13 +193,15 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
194 pipeline->SetEngine(maxwell3d, gpu_memory); 193 pipeline->SetEngine(maxwell3d, gpu_memory);
195 pipeline->Configure(is_indexed); 194 pipeline->Configure(is_indexed);
196 195
196 BindInlineIndexBuffer();
197
197 BeginTransformFeedback(); 198 BeginTransformFeedback();
198 199
199 UpdateDynamicStates(); 200 UpdateDynamicStates();
200 201
201 const auto& regs{maxwell3d->regs}; 202 const auto& regs{maxwell3d->regs};
202 const u32 num_instances{maxwell3d->mme_draw.instance_count}; 203 const u32 num_instances{instance_count};
203 const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)}; 204 const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_indexed)};
204 scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) { 205 scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) {
205 if (draw_params.is_indexed) { 206 if (draw_params.is_indexed) {
206 cmdbuf.DrawIndexed(draw_params.num_vertices, draw_params.num_instances, 207 cmdbuf.DrawIndexed(draw_params.num_vertices, draw_params.num_instances,
@@ -304,14 +305,19 @@ void RasterizerVulkan::Clear() {
304 } 305 }
305 } 306 }
306 307
307 scheduler.Record([color_attachment, clear_value, clear_rect](vk::CommandBuffer cmdbuf) { 308 if (regs.clear_surface.R && regs.clear_surface.G && regs.clear_surface.B &&
308 const VkClearAttachment attachment{ 309 regs.clear_surface.A) {
309 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, 310 scheduler.Record([color_attachment, clear_value, clear_rect](vk::CommandBuffer cmdbuf) {
310 .colorAttachment = color_attachment, 311 const VkClearAttachment attachment{
311 .clearValue = clear_value, 312 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
312 }; 313 .colorAttachment = color_attachment,
313 cmdbuf.ClearAttachments(attachment, clear_rect); 314 .clearValue = clear_value,
314 }); 315 };
316 cmdbuf.ClearAttachments(attachment, clear_rect);
317 });
318 } else {
319 UNIMPLEMENTED_MSG("Unimplemented Clear only the specified channel");
320 }
315 } 321 }
316 322
317 if (!use_depth && !use_stencil) { 323 if (!use_depth && !use_stencil) {
@@ -1009,4 +1015,17 @@ void RasterizerVulkan::ReleaseChannel(s32 channel_id) {
1009 query_cache.EraseChannel(channel_id); 1015 query_cache.EraseChannel(channel_id);
1010} 1016}
1011 1017
1018void RasterizerVulkan::BindInlineIndexBuffer() {
1019 if (maxwell3d->inline_index_draw_indexes.empty()) {
1020 return;
1021 }
1022 const auto data_count = static_cast<u32>(maxwell3d->inline_index_draw_indexes.size());
1023 auto buffer = buffer_cache_runtime.UploadStagingBuffer(data_count);
1024 std::memcpy(buffer.mapped_span.data(), maxwell3d->inline_index_draw_indexes.data(), data_count);
1025 buffer_cache_runtime.BindIndexBuffer(
1026 maxwell3d->regs.draw.topology, maxwell3d->regs.index_buffer.format,
1027 maxwell3d->regs.index_buffer.first, maxwell3d->regs.index_buffer.count, buffer.buffer,
1028 static_cast<u32>(buffer.offset), data_count);
1029}
1030
1012} // namespace Vulkan 1031} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 4cde3c983..e2fdc7611 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -64,7 +64,7 @@ public:
64 StateTracker& state_tracker_, Scheduler& scheduler_); 64 StateTracker& state_tracker_, Scheduler& scheduler_);
65 ~RasterizerVulkan() override; 65 ~RasterizerVulkan() override;
66 66
67 void Draw(bool is_indexed, bool is_instanced) override; 67 void Draw(bool is_indexed, u32 instance_count) override;
68 void Clear() override; 68 void Clear() override;
69 void DispatchCompute() override; 69 void DispatchCompute() override;
70 void ResetCounter(VideoCore::QueryType type) override; 70 void ResetCounter(VideoCore::QueryType type) override;
@@ -141,6 +141,8 @@ private:
141 141
142 void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs); 142 void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs);
143 143
144 void BindInlineIndexBuffer();
145
144 Tegra::GPU& gpu; 146 Tegra::GPU& gpu;
145 147
146 ScreenInfo& screen_info; 148 ScreenInfo& screen_info;
diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp
index ad935d386..08aa8ca33 100644
--- a/src/video_core/texture_cache/format_lookup_table.cpp
+++ b/src/video_core/texture_cache/format_lookup_table.cpp
@@ -150,6 +150,8 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red,
150 return PixelFormat::D24_UNORM_S8_UINT; 150 return PixelFormat::D24_UNORM_S8_UINT;
151 case Hash(TextureFormat::D32S8, FLOAT, UINT, UNORM, UNORM, LINEAR): 151 case Hash(TextureFormat::D32S8, FLOAT, UINT, UNORM, UNORM, LINEAR):
152 return PixelFormat::D32_FLOAT_S8_UINT; 152 return PixelFormat::D32_FLOAT_S8_UINT;
153 case Hash(TextureFormat::R32_B24G8, FLOAT, UINT, UNORM, UNORM, LINEAR):
154 return PixelFormat::D32_FLOAT_S8_UINT;
153 case Hash(TextureFormat::BC1_RGBA, UNORM, LINEAR): 155 case Hash(TextureFormat::BC1_RGBA, UNORM, LINEAR):
154 return PixelFormat::BC1_RGBA_UNORM; 156 return PixelFormat::BC1_RGBA_UNORM;
155 case Hash(TextureFormat::BC1_RGBA, UNORM, SRGB): 157 case Hash(TextureFormat::BC1_RGBA, UNORM, SRGB):
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 0e0fd410f..8ef75fe73 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -442,7 +442,7 @@ void TextureCache<P>::WriteMemory(VAddr cpu_addr, size_t size) {
442template <class P> 442template <class P>
443void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) { 443void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) {
444 std::vector<ImageId> images; 444 std::vector<ImageId> images;
445 ForEachImageInRegion(cpu_addr, size, [this, &images](ImageId image_id, ImageBase& image) { 445 ForEachImageInRegion(cpu_addr, size, [&images](ImageId image_id, ImageBase& image) {
446 if (!image.IsSafeDownload()) { 446 if (!image.IsSafeDownload()) {
447 return; 447 return;
448 } 448 }
@@ -1502,9 +1502,9 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
1502 image.flags &= ~ImageFlagBits::BadOverlap; 1502 image.flags &= ~ImageFlagBits::BadOverlap;
1503 lru_cache.Free(image.lru_index); 1503 lru_cache.Free(image.lru_index);
1504 const auto& clear_page_table = 1504 const auto& clear_page_table =
1505 [this, image_id](u64 page, 1505 [image_id](u64 page,
1506 std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>>& 1506 std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>>&
1507 selected_page_table) { 1507 selected_page_table) {
1508 const auto page_it = selected_page_table.find(page); 1508 const auto page_it = selected_page_table.find(page);
1509 if (page_it == selected_page_table.end()) { 1509 if (page_it == selected_page_table.end()) {
1510 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS); 1510 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS);
diff --git a/src/video_core/textures/astc.cpp b/src/video_core/textures/astc.cpp
index 15b9d4182..69a32819a 100644
--- a/src/video_core/textures/astc.cpp
+++ b/src/video_core/textures/astc.cpp
@@ -1661,8 +1661,8 @@ void Decompress(std::span<const uint8_t> data, uint32_t width, uint32_t height,
1661 for (u32 z = 0; z < depth; ++z) { 1661 for (u32 z = 0; z < depth; ++z) {
1662 const u32 depth_offset = z * height * width * 4; 1662 const u32 depth_offset = z * height * width * 4;
1663 for (u32 y_index = 0; y_index < rows; ++y_index) { 1663 for (u32 y_index = 0; y_index < rows; ++y_index) {
1664 auto decompress_stride = [data, width, height, depth, block_width, block_height, output, 1664 auto decompress_stride = [data, width, height, block_width, block_height, output, rows,
1665 rows, cols, z, depth_offset, y_index] { 1665 cols, z, depth_offset, y_index] {
1666 const u32 y = y_index * block_height; 1666 const u32 y = y_index * block_height;
1667 for (u32 x_index = 0; x_index < cols; ++x_index) { 1667 for (u32 x_index = 0; x_index < cols; ++x_index) {
1668 const u32 block_index = (z * rows * cols) + (y_index * cols) + x_index; 1668 const u32 block_index = (z * rows * cols) + (y_index * cols) + x_index;
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index 52d067a2d..fd1a4b987 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -21,7 +21,7 @@ constexpr u32 pdep(u32 value) {
21 u32 m = mask; 21 u32 m = mask;
22 for (u32 bit = 1; m; bit += bit) { 22 for (u32 bit = 1; m; bit += bit) {
23 if (value & bit) 23 if (value & bit)
24 result |= m & -m; 24 result |= m & (~m + 1);
25 m &= m - 1; 25 m &= m - 1;
26 } 26 }
27 return result; 27 return result;
diff --git a/src/yuzu/applets/qt_controller.ui b/src/yuzu/applets/qt_controller.ui
index c8cb6bcf3..f5eccba70 100644
--- a/src/yuzu/applets/qt_controller.ui
+++ b/src/yuzu/applets/qt_controller.ui
@@ -2300,7 +2300,7 @@
2300 <item> 2300 <item>
2301 <widget class="QRadioButton" name="radioUndocked"> 2301 <widget class="QRadioButton" name="radioUndocked">
2302 <property name="text"> 2302 <property name="text">
2303 <string>Undocked</string> 2303 <string>Handheld</string>
2304 </property> 2304 </property>
2305 </widget> 2305 </widget>
2306 </item> 2306 </item>
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp
index 24251247d..6acfb7b06 100644
--- a/src/yuzu/bootmanager.cpp
+++ b/src/yuzu/bootmanager.cpp
@@ -120,8 +120,8 @@ void EmuThread::run() {
120 } 120 }
121 } 121 }
122 122
123 // Shutdown the core emulation 123 // Shutdown the main emulated process
124 system.Shutdown(); 124 system.ShutdownMainProcess();
125 125
126#if MICROPROFILE_ENABLED 126#if MICROPROFILE_ENABLED
127 MicroProfileOnThreadExit(); 127 MicroProfileOnThreadExit();
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index a94624be6..7b16d7f7e 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -294,6 +294,7 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan
294#ifdef __linux__ 294#ifdef __linux__
295 SetupSigInterrupts(); 295 SetupSigInterrupts();
296#endif 296#endif
297 system->Initialize();
297 298
298 Common::Log::Initialize(); 299 Common::Log::Initialize();
299 LoadTranslation(); 300 LoadTranslation();
@@ -1895,6 +1896,8 @@ void GMainWindow::OnGameListOpenFolder(u64 program_id, GameListOpenTarget target
1895 case GameListOpenTarget::SaveData: { 1896 case GameListOpenTarget::SaveData: {
1896 open_target = tr("Save Data"); 1897 open_target = tr("Save Data");
1897 const auto nand_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir); 1898 const auto nand_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir);
1899 auto vfs_nand_dir =
1900 vfs->OpenDirectory(Common::FS::PathToUTF8String(nand_dir), FileSys::Mode::Read);
1898 1901
1899 if (has_user_save) { 1902 if (has_user_save) {
1900 // User save data 1903 // User save data
@@ -1921,15 +1924,15 @@ void GMainWindow::OnGameListOpenFolder(u64 program_id, GameListOpenTarget target
1921 ASSERT(user_id); 1924 ASSERT(user_id);
1922 1925
1923 const auto user_save_data_path = FileSys::SaveDataFactory::GetFullPath( 1926 const auto user_save_data_path = FileSys::SaveDataFactory::GetFullPath(
1924 *system, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData, 1927 *system, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser,
1925 program_id, user_id->AsU128(), 0); 1928 FileSys::SaveDataType::SaveData, program_id, user_id->AsU128(), 0);
1926 1929
1927 path = Common::FS::ConcatPathSafe(nand_dir, user_save_data_path); 1930 path = Common::FS::ConcatPathSafe(nand_dir, user_save_data_path);
1928 } else { 1931 } else {
1929 // Device save data 1932 // Device save data
1930 const auto device_save_data_path = FileSys::SaveDataFactory::GetFullPath( 1933 const auto device_save_data_path = FileSys::SaveDataFactory::GetFullPath(
1931 *system, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData, 1934 *system, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser,
1932 program_id, {}, 0); 1935 FileSys::SaveDataType::SaveData, program_id, {}, 0);
1933 1936
1934 path = Common::FS::ConcatPathSafe(nand_dir, device_save_data_path); 1937 path = Common::FS::ConcatPathSafe(nand_dir, device_save_data_path);
1935 } 1938 }
diff --git a/src/yuzu/multiplayer/state.cpp b/src/yuzu/multiplayer/state.cpp
index ae2738ad4..285bb150d 100644
--- a/src/yuzu/multiplayer/state.cpp
+++ b/src/yuzu/multiplayer/state.cpp
@@ -268,7 +268,7 @@ bool MultiplayerState::OnCloseRoom() {
268 return true; 268 return true;
269 } 269 }
270 // Save ban list 270 // Save ban list
271 UISettings::values.multiplayer_ban_list = std::move(room->GetBanList()); 271 UISettings::values.multiplayer_ban_list = room->GetBanList();
272 272
273 room->Destroy(); 273 room->Destroy();
274 announce_multiplayer_session->Stop(); 274 announce_multiplayer_session->Stop();
diff --git a/src/yuzu/startup_checks.cpp b/src/yuzu/startup_checks.cpp
index fc2693f9d..6a91212e2 100644
--- a/src/yuzu/startup_checks.cpp
+++ b/src/yuzu/startup_checks.cpp
@@ -49,7 +49,7 @@ bool CheckEnvVars(bool* is_child) {
49 *is_child = true; 49 *is_child = true;
50 return false; 50 return false;
51 } else if (!SetEnvironmentVariableA(IS_CHILD_ENV_VAR, ENV_VAR_ENABLED_TEXT)) { 51 } else if (!SetEnvironmentVariableA(IS_CHILD_ENV_VAR, ENV_VAR_ENABLED_TEXT)) {
52 std::fprintf(stderr, "SetEnvironmentVariableA failed to set %s with error %d\n", 52 std::fprintf(stderr, "SetEnvironmentVariableA failed to set %s with error %lu\n",
53 IS_CHILD_ENV_VAR, GetLastError()); 53 IS_CHILD_ENV_VAR, GetLastError());
54 return true; 54 return true;
55 } 55 }
@@ -62,7 +62,7 @@ bool StartupChecks(const char* arg0, bool* has_broken_vulkan, bool perform_vulka
62 // Set the startup variable for child processes 62 // Set the startup variable for child processes
63 const bool env_var_set = SetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, ENV_VAR_ENABLED_TEXT); 63 const bool env_var_set = SetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, ENV_VAR_ENABLED_TEXT);
64 if (!env_var_set) { 64 if (!env_var_set) {
65 std::fprintf(stderr, "SetEnvironmentVariableA failed to set %s with error %d\n", 65 std::fprintf(stderr, "SetEnvironmentVariableA failed to set %s with error %lu\n",
66 STARTUP_CHECK_ENV_VAR, GetLastError()); 66 STARTUP_CHECK_ENV_VAR, GetLastError());
67 return false; 67 return false;
68 } 68 }
@@ -81,22 +81,22 @@ bool StartupChecks(const char* arg0, bool* has_broken_vulkan, bool perform_vulka
81 DWORD exit_code = STILL_ACTIVE; 81 DWORD exit_code = STILL_ACTIVE;
82 const int err = GetExitCodeProcess(process_info.hProcess, &exit_code); 82 const int err = GetExitCodeProcess(process_info.hProcess, &exit_code);
83 if (err == 0) { 83 if (err == 0) {
84 std::fprintf(stderr, "GetExitCodeProcess failed with error %d\n", GetLastError()); 84 std::fprintf(stderr, "GetExitCodeProcess failed with error %lu\n", GetLastError());
85 } 85 }
86 86
87 // Vulkan is broken if the child crashed (return value is not zero) 87 // Vulkan is broken if the child crashed (return value is not zero)
88 *has_broken_vulkan = (exit_code != 0); 88 *has_broken_vulkan = (exit_code != 0);
89 89
90 if (CloseHandle(process_info.hProcess) == 0) { 90 if (CloseHandle(process_info.hProcess) == 0) {
91 std::fprintf(stderr, "CloseHandle failed with error %d\n", GetLastError()); 91 std::fprintf(stderr, "CloseHandle failed with error %lu\n", GetLastError());
92 } 92 }
93 if (CloseHandle(process_info.hThread) == 0) { 93 if (CloseHandle(process_info.hThread) == 0) {
94 std::fprintf(stderr, "CloseHandle failed with error %d\n", GetLastError()); 94 std::fprintf(stderr, "CloseHandle failed with error %lu\n", GetLastError());
95 } 95 }
96 } 96 }
97 97
98 if (!SetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, nullptr)) { 98 if (!SetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, nullptr)) {
99 std::fprintf(stderr, "SetEnvironmentVariableA failed to clear %s with error %d\n", 99 std::fprintf(stderr, "SetEnvironmentVariableA failed to clear %s with error %lu\n",
100 STARTUP_CHECK_ENV_VAR, GetLastError()); 100 STARTUP_CHECK_ENV_VAR, GetLastError());
101 } 101 }
102 102
@@ -135,7 +135,8 @@ bool SpawnChild(const char* arg0, PROCESS_INFORMATION* pi, int flags) {
135 startup_info.cb = sizeof(startup_info); 135 startup_info.cb = sizeof(startup_info);
136 136
137 char p_name[255]; 137 char p_name[255];
138 std::strncpy(p_name, arg0, 255); 138 std::strncpy(p_name, arg0, 254);
139 p_name[254] = '\0';
139 140
140 const bool process_created = CreateProcessA(nullptr, // lpApplicationName 141 const bool process_created = CreateProcessA(nullptr, // lpApplicationName
141 p_name, // lpCommandLine 142 p_name, // lpCommandLine
@@ -149,7 +150,7 @@ bool SpawnChild(const char* arg0, PROCESS_INFORMATION* pi, int flags) {
149 pi // lpProcessInformation 150 pi // lpProcessInformation
150 ); 151 );
151 if (!process_created) { 152 if (!process_created) {
152 std::fprintf(stderr, "CreateProcessA failed with error %d\n", GetLastError()); 153 std::fprintf(stderr, "CreateProcessA failed with error %lu\n", GetLastError());
153 return false; 154 return false;
154 } 155 }
155 156
diff --git a/src/yuzu_cmd/yuzu.cpp b/src/yuzu_cmd/yuzu.cpp
index 3a0f33cba..e16f79eb4 100644
--- a/src/yuzu_cmd/yuzu.cpp
+++ b/src/yuzu_cmd/yuzu.cpp
@@ -302,6 +302,8 @@ int main(int argc, char** argv) {
302 } 302 }
303 303
304 Core::System system{}; 304 Core::System system{};
305 system.Initialize();
306
305 InputCommon::InputSubsystem input_subsystem{}; 307 InputCommon::InputSubsystem input_subsystem{};
306 308
307 // Apply the command line arguments 309 // Apply the command line arguments
@@ -392,7 +394,7 @@ int main(int argc, char** argv) {
392 } 394 }
393 system.DetachDebugger(); 395 system.DetachDebugger();
394 void(system.Pause()); 396 void(system.Pause());
395 system.Shutdown(); 397 system.ShutdownMainProcess();
396 398
397 detached_tasks.WaitForAllTasks(); 399 detached_tasks.WaitForAllTasks();
398 return 0; 400 return 0;