summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/audio_core/in/audio_in_system.cpp6
-rw-r--r--src/audio_core/out/audio_out_system.cpp6
-rw-r--r--src/audio_core/renderer/system.cpp89
-rw-r--r--src/audio_core/renderer/system.h16
-rw-r--r--src/audio_core/renderer/voice/voice_context.cpp4
-rw-r--r--src/audio_core/sink/cubeb_sink.cpp31
-rw-r--r--src/audio_core/sink/cubeb_sink.h7
-rw-r--r--src/audio_core/sink/sdl2_sink.cpp14
-rw-r--r--src/audio_core/sink/sdl2_sink.h7
-rw-r--r--src/audio_core/sink/sink_details.cpp66
-rw-r--r--src/audio_core/sink/sink_details.h2
-rw-r--r--src/common/CMakeLists.txt6
-rw-r--r--src/common/concepts.h8
-rw-r--r--src/common/fixed_point.h274
-rw-r--r--src/common/settings.h2
-rw-r--r--src/core/CMakeLists.txt7
-rw-r--r--src/core/arm/arm_interface.cpp8
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp1
-rw-r--r--src/core/core.cpp92
-rw-r--r--src/core/core.h10
-rw-r--r--src/core/core_timing.cpp38
-rw-r--r--src/core/core_timing.h14
-rw-r--r--src/core/device_memory.h10
-rw-r--r--src/core/file_sys/savedata_factory.cpp58
-rw-r--r--src/core/file_sys/savedata_factory.h4
-rw-r--r--src/core/frontend/framebuffer_layout.cpp2
-rw-r--r--src/core/frontend/framebuffer_layout.h1
-rw-r--r--src/core/hid/irs_types.h20
-rw-r--r--src/core/hle/ipc_helpers.h14
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp110
-rw-r--r--src/core/hle/kernel/hle_ipc.h11
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.cpp8
-rw-r--r--src/core/hle/kernel/k_class_token.cpp12
-rw-r--r--src/core/hle/kernel/k_class_token.h1
-rw-r--r--src/core/hle/kernel/k_client_session.cpp18
-rw-r--r--src/core/hle/kernel/k_client_session.h3
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp2
-rw-r--r--src/core/hle/kernel/k_dynamic_page_manager.h136
-rw-r--r--src/core/hle/kernel/k_dynamic_resource_manager.h58
-rw-r--r--src/core/hle/kernel/k_dynamic_slab_heap.h122
-rw-r--r--src/core/hle/kernel/k_event.cpp44
-rw-r--r--src/core/hle/kernel/k_event.h31
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.cpp29
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.h4
-rw-r--r--src/core/hle/kernel/k_linked_list.h1
-rw-r--r--src/core/hle/kernel/k_memory_block.h506
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.cpp409
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.h145
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp2
-rw-r--r--src/core/hle/kernel/k_page_buffer.cpp2
-rw-r--r--src/core/hle/kernel/k_page_buffer.h1
-rw-r--r--src/core/hle/kernel/k_page_table.cpp1302
-rw-r--r--src/core/hle/kernel/k_page_table.h319
-rw-r--r--src/core/hle/kernel/k_process.cpp112
-rw-r--r--src/core/hle/kernel/k_process.h83
-rw-r--r--src/core/hle/kernel/k_readable_event.cpp33
-rw-r--r--src/core/hle/kernel/k_readable_event.h17
-rw-r--r--src/core/hle/kernel/k_server_session.cpp343
-rw-r--r--src/core/hle/kernel/k_server_session.h62
-rw-r--r--src/core/hle/kernel/k_session_request.cpp61
-rw-r--r--src/core/hle/kernel/k_session_request.h306
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp2
-rw-r--r--src/core/hle/kernel/k_shared_memory.h4
-rw-r--r--src/core/hle/kernel/k_shared_memory_info.h3
-rw-r--r--src/core/hle/kernel/k_slab_heap.h27
-rw-r--r--src/core/hle/kernel/k_thread.cpp115
-rw-r--r--src/core/hle/kernel/k_thread.h4
-rw-r--r--src/core/hle/kernel/k_thread_local_page.h2
-rw-r--r--src/core/hle/kernel/k_writable_event.cpp35
-rw-r--r--src/core/hle/kernel/k_writable_event.h39
-rw-r--r--src/core/hle/kernel/kernel.cpp68
-rw-r--r--src/core/hle/kernel/kernel.h18
-rw-r--r--src/core/hle/kernel/slab_helpers.h2
-rw-r--r--src/core/hle/kernel/svc.cpp201
-rw-r--r--src/core/hle/kernel/svc_common.h7
-rw-r--r--src/core/hle/kernel/svc_types.h13
-rw-r--r--src/core/hle/kernel/svc_wrap.h32
-rw-r--r--src/core/hle/result.h130
-rw-r--r--src/core/hle/service/acc/async_context.cpp2
-rw-r--r--src/core/hle/service/am/am.cpp12
-rw-r--r--src/core/hle/service/am/applets/applets.cpp10
-rw-r--r--src/core/hle/service/audio/audctl.cpp16
-rw-r--r--src/core/hle/service/audio/audren_u.cpp30
-rw-r--r--src/core/hle/service/bcat/backend/backend.cpp2
-rw-r--r--src/core/hle/service/friend/friend.cpp13
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp10
-rw-r--r--src/core/hle/service/hid/controllers/palma.cpp16
-rw-r--r--src/core/hle/service/hid/hid.cpp2
-rw-r--r--src/core/hle/service/hid/hidbus/ringcon.cpp8
-rw-r--r--src/core/hle/service/hid/irsensor/pointing_processor.h4
-rw-r--r--src/core/hle/service/kernel_helpers.cpp5
-rw-r--r--src/core/hle/service/ldn/ldn.cpp2
-rw-r--r--src/core/hle/service/ldr/ldr.cpp4
-rw-r--r--src/core/hle/service/nfp/nfp_device.cpp6
-rw-r--r--src/core/hle/service/nim/nim.cpp4
-rw-r--r--src/core/hle/service/ns/ns.cpp30
-rw-r--r--src/core/hle/service/ns/ns.h3
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp12
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp5
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp3
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.cpp1
-rw-r--r--src/core/hle/service/nvdrv/nvdrv_interface.h4
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_producer.cpp9
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_producer.h1
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.h1
-rw-r--r--src/core/hle/service/ptm/psm.cpp6
-rw-r--r--src/core/hle/service/ptm/ts.cpp15
-rw-r--r--src/core/hle/service/ptm/ts.h1
-rw-r--r--src/core/hle/service/set/set_sys.cpp79
-rw-r--r--src/core/hle/service/set/set_sys.h2
-rw-r--r--src/core/hle/service/sm/sm.cpp3
-rw-r--r--src/core/hle/service/sm/sm_controller.cpp5
-rw-r--r--src/core/hle/service/time/system_clock_context_update_callback.cpp10
-rw-r--r--src/core/hle/service/time/system_clock_context_update_callback.h6
-rw-r--r--src/core/hle/service/vi/display/vi_display.cpp3
-rw-r--r--src/core/hle/service/vi/vi_results.h2
-rw-r--r--src/core/memory.cpp6
-rw-r--r--src/tests/core/core_timing.cpp3
-rw-r--r--src/video_core/CMakeLists.txt1
-rw-r--r--src/video_core/dirty_flags.cpp2
-rw-r--r--src/video_core/engines/maxwell_3d.cpp8
-rw-r--r--src/video_core/engines/maxwell_3d.h26
-rw-r--r--src/video_core/engines/maxwell_dma.cpp127
-rw-r--r--src/video_core/engines/maxwell_dma.h2
-rw-r--r--src/video_core/host1x/syncpoint_manager.cpp12
-rw-r--r--src/video_core/memory_manager.cpp61
-rw-r--r--src/video_core/memory_manager.h21
-rw-r--r--src/video_core/pte_kind.h264
-rw-r--r--src/video_core/renderer_base.cpp8
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp17
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp4
-rw-r--r--src/video_core/renderer_opengl/gl_state_tracker.cpp14
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.cpp10
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp7
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp14
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp84
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h3
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.cpp12
-rw-r--r--src/video_core/texture_cache/descriptor_table.h2
-rw-r--r--src/video_core/texture_cache/format_lookup_table.cpp2
-rw-r--r--src/video_core/texture_cache/texture_cache.h8
-rw-r--r--src/yuzu/applets/qt_controller.ui2
-rw-r--r--src/yuzu/bootmanager.cpp4
-rw-r--r--src/yuzu/configuration/configure_audio.cpp4
-rw-r--r--src/yuzu/configuration/configure_graphics.ui5
-rw-r--r--src/yuzu/main.cpp13
-rw-r--r--src/yuzu_cmd/yuzu.cpp4
147 files changed, 4789 insertions, 2120 deletions
diff --git a/src/audio_core/in/audio_in_system.cpp b/src/audio_core/in/audio_in_system.cpp
index e7f918a47..6b7e6715c 100644
--- a/src/audio_core/in/audio_in_system.cpp
+++ b/src/audio_core/in/audio_in_system.cpp
@@ -23,7 +23,7 @@ System::~System() {
23void System::Finalize() { 23void System::Finalize() {
24 Stop(); 24 Stop();
25 session->Finalize(); 25 session->Finalize();
26 buffer_event->GetWritableEvent().Signal(); 26 buffer_event->Signal();
27} 27}
28 28
29void System::StartSession() { 29void System::StartSession() {
@@ -142,7 +142,7 @@ void System::ReleaseBuffers() {
142 142
143 if (signal) { 143 if (signal) {
144 // Signal if any buffer was released, or if none are registered, we need more. 144 // Signal if any buffer was released, or if none are registered, we need more.
145 buffer_event->GetWritableEvent().Signal(); 145 buffer_event->Signal();
146 } 146 }
147} 147}
148 148
@@ -159,7 +159,7 @@ bool System::FlushAudioInBuffers() {
159 buffers.FlushBuffers(buffers_released); 159 buffers.FlushBuffers(buffers_released);
160 160
161 if (buffers_released > 0) { 161 if (buffers_released > 0) {
162 buffer_event->GetWritableEvent().Signal(); 162 buffer_event->Signal();
163 } 163 }
164 return true; 164 return true;
165} 165}
diff --git a/src/audio_core/out/audio_out_system.cpp b/src/audio_core/out/audio_out_system.cpp
index 8b907590a..48a801923 100644
--- a/src/audio_core/out/audio_out_system.cpp
+++ b/src/audio_core/out/audio_out_system.cpp
@@ -24,7 +24,7 @@ System::~System() {
24void System::Finalize() { 24void System::Finalize() {
25 Stop(); 25 Stop();
26 session->Finalize(); 26 session->Finalize();
27 buffer_event->GetWritableEvent().Signal(); 27 buffer_event->Signal();
28} 28}
29 29
30std::string_view System::GetDefaultOutputDeviceName() const { 30std::string_view System::GetDefaultOutputDeviceName() const {
@@ -141,7 +141,7 @@ void System::ReleaseBuffers() {
141 bool signal{buffers.ReleaseBuffers(system.CoreTiming(), *session)}; 141 bool signal{buffers.ReleaseBuffers(system.CoreTiming(), *session)};
142 if (signal) { 142 if (signal) {
143 // Signal if any buffer was released, or if none are registered, we need more. 143 // Signal if any buffer was released, or if none are registered, we need more.
144 buffer_event->GetWritableEvent().Signal(); 144 buffer_event->Signal();
145 } 145 }
146} 146}
147 147
@@ -158,7 +158,7 @@ bool System::FlushAudioOutBuffers() {
158 buffers.FlushBuffers(buffers_released); 158 buffers.FlushBuffers(buffers_released);
159 159
160 if (buffers_released > 0) { 160 if (buffers_released > 0) {
161 buffer_event->GetWritableEvent().Signal(); 161 buffer_event->Signal();
162 } 162 }
163 return true; 163 return true;
164} 164}
diff --git a/src/audio_core/renderer/system.cpp b/src/audio_core/renderer/system.cpp
index 7a217969e..4fac30c7c 100644
--- a/src/audio_core/renderer/system.cpp
+++ b/src/audio_core/renderer/system.cpp
@@ -98,9 +98,8 @@ System::System(Core::System& core_, Kernel::KEvent* adsp_rendered_event_)
98 : core{core_}, adsp{core.AudioCore().GetADSP()}, adsp_rendered_event{adsp_rendered_event_} {} 98 : core{core_}, adsp{core.AudioCore().GetADSP()}, adsp_rendered_event{adsp_rendered_event_} {}
99 99
100Result System::Initialize(const AudioRendererParameterInternal& params, 100Result System::Initialize(const AudioRendererParameterInternal& params,
101 Kernel::KTransferMemory* transfer_memory, const u64 transfer_memory_size, 101 Kernel::KTransferMemory* transfer_memory, u64 transfer_memory_size,
102 const u32 process_handle_, const u64 applet_resource_user_id_, 102 u32 process_handle_, u64 applet_resource_user_id_, s32 session_id_) {
103 const s32 session_id_) {
104 if (!CheckValidRevision(params.revision)) { 103 if (!CheckValidRevision(params.revision)) {
105 return Service::Audio::ERR_INVALID_REVISION; 104 return Service::Audio::ERR_INVALID_REVISION;
106 } 105 }
@@ -354,6 +353,8 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
354 353
355 render_time_limit_percent = 100; 354 render_time_limit_percent = 100;
356 drop_voice = params.voice_drop_enabled && params.execution_mode == ExecutionMode::Auto; 355 drop_voice = params.voice_drop_enabled && params.execution_mode == ExecutionMode::Auto;
356 drop_voice_param = 1.0f;
357 num_voices_dropped = 0;
357 358
358 allocator.Align(0x40); 359 allocator.Align(0x40);
359 command_workbuffer_size = allocator.GetRemainingSize(); 360 command_workbuffer_size = allocator.GetRemainingSize();
@@ -534,7 +535,7 @@ Result System::Update(std::span<const u8> input, std::span<u8> performance, std:
534 return result; 535 return result;
535 } 536 }
536 537
537 adsp_rendered_event->GetWritableEvent().Clear(); 538 adsp_rendered_event->Clear();
538 num_times_updated++; 539 num_times_updated++;
539 540
540 const auto end_time{core.CoreTiming().GetClockTicks()}; 541 const auto end_time{core.CoreTiming().GetClockTicks()};
@@ -547,7 +548,7 @@ u32 System::GetRenderingTimeLimit() const {
547 return render_time_limit_percent; 548 return render_time_limit_percent;
548} 549}
549 550
550void System::SetRenderingTimeLimit(const u32 limit) { 551void System::SetRenderingTimeLimit(u32 limit) {
551 render_time_limit_percent = limit; 552 render_time_limit_percent = limit;
552} 553}
553 554
@@ -625,7 +626,7 @@ void System::SendCommandToDsp() {
625 reset_command_buffers = false; 626 reset_command_buffers = false;
626 command_buffer_size = command_size; 627 command_buffer_size = command_size;
627 if (remaining_command_count == 0) { 628 if (remaining_command_count == 0) {
628 adsp_rendered_event->GetWritableEvent().Signal(); 629 adsp_rendered_event->Signal();
629 } 630 }
630 } else { 631 } else {
631 adsp.ClearRemainCount(session_id); 632 adsp.ClearRemainCount(session_id);
@@ -635,7 +636,7 @@ void System::SendCommandToDsp() {
635} 636}
636 637
637u64 System::GenerateCommand(std::span<u8> in_command_buffer, 638u64 System::GenerateCommand(std::span<u8> in_command_buffer,
638 [[maybe_unused]] const u64 command_buffer_size_) { 639 [[maybe_unused]] u64 command_buffer_size_) {
639 PoolMapper::ClearUseState(memory_pool_workbuffer, memory_pool_count); 640 PoolMapper::ClearUseState(memory_pool_workbuffer, memory_pool_count);
640 const auto start_time{core.CoreTiming().GetClockTicks()}; 641 const auto start_time{core.CoreTiming().GetClockTicks()};
641 642
@@ -693,7 +694,8 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
693 694
694 voice_context.SortInfo(); 695 voice_context.SortInfo();
695 696
696 const auto start_estimated_time{command_buffer.estimated_process_time}; 697 const auto start_estimated_time{drop_voice_param *
698 static_cast<f32>(command_buffer.estimated_process_time)};
697 699
698 command_generator.GenerateVoiceCommands(); 700 command_generator.GenerateVoiceCommands();
699 command_generator.GenerateSubMixCommands(); 701 command_generator.GenerateSubMixCommands();
@@ -712,11 +714,16 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
712 render_context.behavior->IsAudioRendererProcessingTimeLimit70PercentSupported(); 714 render_context.behavior->IsAudioRendererProcessingTimeLimit70PercentSupported();
713 time_limit_percent = 70.0f; 715 time_limit_percent = 70.0f;
714 } 716 }
717
718 const auto end_estimated_time{drop_voice_param *
719 static_cast<f32>(command_buffer.estimated_process_time)};
720 const auto estimated_time{start_estimated_time - end_estimated_time};
721
715 const auto time_limit{static_cast<u32>( 722 const auto time_limit{static_cast<u32>(
716 static_cast<f32>(start_estimated_time - command_buffer.estimated_process_time) + 723 estimated_time + (((time_limit_percent / 100.0f) * 2'880'000.0) *
717 (((time_limit_percent / 100.0f) * 2'880'000.0) * 724 (static_cast<f32>(render_time_limit_percent) / 100.0f)))};
718 (static_cast<f32>(render_time_limit_percent) / 100.0f)))}; 725 num_voices_dropped =
719 num_voices_dropped = DropVoices(command_buffer, start_estimated_time, time_limit); 726 DropVoices(command_buffer, static_cast<u32>(start_estimated_time), time_limit);
720 } 727 }
721 728
722 command_list_header->buffer_size = command_buffer.size; 729 command_list_header->buffer_size = command_buffer.size;
@@ -737,24 +744,33 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
737 return command_buffer.size; 744 return command_buffer.size;
738} 745}
739 746
740u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_process_time, 747f32 System::GetVoiceDropParameter() const {
741 const u32 time_limit) { 748 return drop_voice_param;
749}
750
751void System::SetVoiceDropParameter(f32 voice_drop_) {
752 drop_voice_param = voice_drop_;
753}
754
755u32 System::DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit) {
742 u32 i{0}; 756 u32 i{0};
743 auto command_list{command_buffer.command_list.data() + sizeof(CommandListHeader)}; 757 auto command_list{command_buffer.command_list.data() + sizeof(CommandListHeader)};
744 ICommand* cmd{}; 758 ICommand* cmd{nullptr};
745 759
746 for (; i < command_buffer.count; i++) { 760 // Find a first valid voice to drop
761 while (i < command_buffer.count) {
747 cmd = reinterpret_cast<ICommand*>(command_list); 762 cmd = reinterpret_cast<ICommand*>(command_list);
748 if (cmd->type != CommandId::Performance && 763 if (cmd->type == CommandId::Performance ||
749 cmd->type != CommandId::DataSourcePcmInt16Version1 && 764 cmd->type == CommandId::DataSourcePcmInt16Version1 ||
750 cmd->type != CommandId::DataSourcePcmInt16Version2 && 765 cmd->type == CommandId::DataSourcePcmInt16Version2 ||
751 cmd->type != CommandId::DataSourcePcmFloatVersion1 && 766 cmd->type == CommandId::DataSourcePcmFloatVersion1 ||
752 cmd->type != CommandId::DataSourcePcmFloatVersion2 && 767 cmd->type == CommandId::DataSourcePcmFloatVersion2 ||
753 cmd->type != CommandId::DataSourceAdpcmVersion1 && 768 cmd->type == CommandId::DataSourceAdpcmVersion1 ||
754 cmd->type != CommandId::DataSourceAdpcmVersion2) { 769 cmd->type == CommandId::DataSourceAdpcmVersion2) {
755 break; 770 break;
756 } 771 }
757 command_list += cmd->size; 772 command_list += cmd->size;
773 i++;
758 } 774 }
759 775
760 if (cmd == nullptr || command_buffer.count == 0 || i >= command_buffer.count) { 776 if (cmd == nullptr || command_buffer.count == 0 || i >= command_buffer.count) {
@@ -767,6 +783,7 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
767 const auto node_id_type{cmd->node_id >> 28}; 783 const auto node_id_type{cmd->node_id >> 28};
768 const auto node_id_base{cmd->node_id & 0xFFF}; 784 const auto node_id_base{cmd->node_id & 0xFFF};
769 785
786 // If the new estimated process time falls below the limit, we're done dropping.
770 if (estimated_process_time <= time_limit) { 787 if (estimated_process_time <= time_limit) {
771 break; 788 break;
772 } 789 }
@@ -775,6 +792,7 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
775 break; 792 break;
776 } 793 }
777 794
795 // Don't drop voices marked with the highest priority.
778 auto& voice_info{voice_context.GetInfo(node_id_base)}; 796 auto& voice_info{voice_context.GetInfo(node_id_base)};
779 if (voice_info.priority == HighestVoicePriority) { 797 if (voice_info.priority == HighestVoicePriority) {
780 break; 798 break;
@@ -783,18 +801,23 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
783 voices_dropped++; 801 voices_dropped++;
784 voice_info.voice_dropped = true; 802 voice_info.voice_dropped = true;
785 803
786 if (i < command_buffer.count) { 804 // First iteration should drop the voice, and then iterate through all of the commands tied
787 while (cmd->node_id == node_id) { 805 // to the voice. We don't need reverb on a voice which we've just removed, for example.
788 if (cmd->type == CommandId::DepopPrepare) { 806 // Depops can't be removed otherwise we'll introduce audio popping, and we don't
789 cmd->enabled = true; 807 // remove perf commands. Lower the estimated time for each command dropped.
790 } else if (cmd->type == CommandId::Performance || !cmd->enabled) { 808 while (i < command_buffer.count && cmd->node_id == node_id) {
791 cmd->enabled = false; 809 if (cmd->type == CommandId::DepopPrepare) {
792 } 810 cmd->enabled = true;
793 i++; 811 } else if (cmd->enabled && cmd->type != CommandId::Performance) {
794 command_list += cmd->size; 812 cmd->enabled = false;
795 cmd = reinterpret_cast<ICommand*>(command_list); 813 estimated_process_time -= static_cast<u32>(
814 drop_voice_param * static_cast<f32>(cmd->estimated_process_time));
796 } 815 }
816 command_list += cmd->size;
817 cmd = reinterpret_cast<ICommand*>(command_list);
818 i++;
797 } 819 }
820 i++;
798 } 821 }
799 return voices_dropped; 822 return voices_dropped;
800} 823}
diff --git a/src/audio_core/renderer/system.h b/src/audio_core/renderer/system.h
index bcbe65b07..429196e41 100644
--- a/src/audio_core/renderer/system.h
+++ b/src/audio_core/renderer/system.h
@@ -196,6 +196,20 @@ public:
196 */ 196 */
197 u32 DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit); 197 u32 DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit);
198 198
199 /**
200 * Get the current voice drop parameter.
201 *
202 * @return The current voice drop.
203 */
204 f32 GetVoiceDropParameter() const;
205
206 /**
207 * Set the voice drop parameter.
208 *
209 * @param The new voice drop.
210 */
211 void SetVoiceDropParameter(f32 voice_drop);
212
199private: 213private:
200 /// Core system 214 /// Core system
201 Core::System& core; 215 Core::System& core;
@@ -301,6 +315,8 @@ private:
301 u32 num_voices_dropped{}; 315 u32 num_voices_dropped{};
302 /// Tick that rendering started 316 /// Tick that rendering started
303 u64 render_start_tick{}; 317 u64 render_start_tick{};
318 /// Parameter to control the threshold for dropping voices if the audio graph gets too large
319 f32 drop_voice_param{1.0f};
304}; 320};
305 321
306} // namespace AudioRenderer 322} // namespace AudioRenderer
diff --git a/src/audio_core/renderer/voice/voice_context.cpp b/src/audio_core/renderer/voice/voice_context.cpp
index eafb51b01..a501a677d 100644
--- a/src/audio_core/renderer/voice/voice_context.cpp
+++ b/src/audio_core/renderer/voice/voice_context.cpp
@@ -74,8 +74,8 @@ void VoiceContext::SortInfo() {
74 } 74 }
75 75
76 std::ranges::sort(sorted_voice_info, [](const VoiceInfo* a, const VoiceInfo* b) { 76 std::ranges::sort(sorted_voice_info, [](const VoiceInfo* a, const VoiceInfo* b) {
77 return a->priority != b->priority ? a->priority < b->priority 77 return a->priority != b->priority ? a->priority > b->priority
78 : a->sort_order < b->sort_order; 78 : a->sort_order > b->sort_order;
79 }); 79 });
80} 80}
81 81
diff --git a/src/audio_core/sink/cubeb_sink.cpp b/src/audio_core/sink/cubeb_sink.cpp
index 36b115ad6..32c1b1cb3 100644
--- a/src/audio_core/sink/cubeb_sink.cpp
+++ b/src/audio_core/sink/cubeb_sink.cpp
@@ -66,10 +66,10 @@ public:
66 const auto latency_error = cubeb_get_min_latency(ctx, &params, &minimum_latency); 66 const auto latency_error = cubeb_get_min_latency(ctx, &params, &minimum_latency);
67 if (latency_error != CUBEB_OK) { 67 if (latency_error != CUBEB_OK) {
68 LOG_CRITICAL(Audio_Sink, "Error getting minimum latency, error: {}", latency_error); 68 LOG_CRITICAL(Audio_Sink, "Error getting minimum latency, error: {}", latency_error);
69 minimum_latency = 256U; 69 minimum_latency = TargetSampleCount * 2;
70 } 70 }
71 71
72 minimum_latency = std::max(minimum_latency, 256u); 72 minimum_latency = std::max(minimum_latency, TargetSampleCount * 2);
73 73
74 LOG_INFO(Service_Audio, 74 LOG_INFO(Service_Audio,
75 "Opening cubeb stream {} type {} with: rate {} channels {} (system channels {}) " 75 "Opening cubeb stream {} type {} with: rate {} channels {} (system channels {}) "
@@ -326,4 +326,31 @@ std::vector<std::string> ListCubebSinkDevices(bool capture) {
326 return device_list; 326 return device_list;
327} 327}
328 328
329u32 GetCubebLatency() {
330 cubeb* ctx;
331
332 if (cubeb_init(&ctx, "yuzu Latency Getter", nullptr) != CUBEB_OK) {
333 LOG_CRITICAL(Audio_Sink, "cubeb_init failed");
334 // Return a large latency so we choose SDL instead.
335 return 10000u;
336 }
337
338 cubeb_stream_params params{};
339 params.rate = TargetSampleRate;
340 params.channels = 2;
341 params.format = CUBEB_SAMPLE_S16LE;
342 params.prefs = CUBEB_STREAM_PREF_NONE;
343 params.layout = CUBEB_LAYOUT_STEREO;
344
345 u32 latency{0};
346 const auto latency_error = cubeb_get_min_latency(ctx, &params, &latency);
347 if (latency_error != CUBEB_OK) {
348 LOG_CRITICAL(Audio_Sink, "Error getting minimum latency, error: {}", latency_error);
349 latency = TargetSampleCount * 2;
350 }
351 latency = std::max(latency, TargetSampleCount * 2);
352 cubeb_destroy(ctx);
353 return latency;
354}
355
329} // namespace AudioCore::Sink 356} // namespace AudioCore::Sink
diff --git a/src/audio_core/sink/cubeb_sink.h b/src/audio_core/sink/cubeb_sink.h
index 4b0cb160d..3302cb98d 100644
--- a/src/audio_core/sink/cubeb_sink.h
+++ b/src/audio_core/sink/cubeb_sink.h
@@ -96,4 +96,11 @@ private:
96 */ 96 */
97std::vector<std::string> ListCubebSinkDevices(bool capture); 97std::vector<std::string> ListCubebSinkDevices(bool capture);
98 98
99/**
100 * Get the reported latency for this sink.
101 *
102 * @return Minimum latency for this sink.
103 */
104u32 GetCubebLatency();
105
99} // namespace AudioCore::Sink 106} // namespace AudioCore::Sink
diff --git a/src/audio_core/sink/sdl2_sink.cpp b/src/audio_core/sink/sdl2_sink.cpp
index 1bd001b94..c138dc628 100644
--- a/src/audio_core/sink/sdl2_sink.cpp
+++ b/src/audio_core/sink/sdl2_sink.cpp
@@ -47,11 +47,7 @@ public:
47 spec.freq = TargetSampleRate; 47 spec.freq = TargetSampleRate;
48 spec.channels = static_cast<u8>(device_channels); 48 spec.channels = static_cast<u8>(device_channels);
49 spec.format = AUDIO_S16SYS; 49 spec.format = AUDIO_S16SYS;
50 if (type == StreamType::Render) { 50 spec.samples = TargetSampleCount * 2;
51 spec.samples = TargetSampleCount;
52 } else {
53 spec.samples = 1024;
54 }
55 spec.callback = &SDLSinkStream::DataCallback; 51 spec.callback = &SDLSinkStream::DataCallback;
56 spec.userdata = this; 52 spec.userdata = this;
57 53
@@ -234,10 +230,16 @@ std::vector<std::string> ListSDLSinkDevices(bool capture) {
234 230
235 const int device_count = SDL_GetNumAudioDevices(capture); 231 const int device_count = SDL_GetNumAudioDevices(capture);
236 for (int i = 0; i < device_count; ++i) { 232 for (int i = 0; i < device_count; ++i) {
237 device_list.emplace_back(SDL_GetAudioDeviceName(i, 0)); 233 if (const char* name = SDL_GetAudioDeviceName(i, capture)) {
234 device_list.emplace_back(name);
235 }
238 } 236 }
239 237
240 return device_list; 238 return device_list;
241} 239}
242 240
241u32 GetSDLLatency() {
242 return TargetSampleCount * 2;
243}
244
243} // namespace AudioCore::Sink 245} // namespace AudioCore::Sink
diff --git a/src/audio_core/sink/sdl2_sink.h b/src/audio_core/sink/sdl2_sink.h
index f01eddc1b..27ed1ab94 100644
--- a/src/audio_core/sink/sdl2_sink.h
+++ b/src/audio_core/sink/sdl2_sink.h
@@ -87,4 +87,11 @@ private:
87 */ 87 */
88std::vector<std::string> ListSDLSinkDevices(bool capture); 88std::vector<std::string> ListSDLSinkDevices(bool capture);
89 89
90/**
91 * Get the reported latency for this sink.
92 *
93 * @return Minimum latency for this sink.
94 */
95u32 GetSDLLatency();
96
90} // namespace AudioCore::Sink 97} // namespace AudioCore::Sink
diff --git a/src/audio_core/sink/sink_details.cpp b/src/audio_core/sink/sink_details.cpp
index 67bdab779..39ea6d91b 100644
--- a/src/audio_core/sink/sink_details.cpp
+++ b/src/audio_core/sink/sink_details.cpp
@@ -21,58 +21,80 @@ namespace {
21struct SinkDetails { 21struct SinkDetails {
22 using FactoryFn = std::unique_ptr<Sink> (*)(std::string_view); 22 using FactoryFn = std::unique_ptr<Sink> (*)(std::string_view);
23 using ListDevicesFn = std::vector<std::string> (*)(bool); 23 using ListDevicesFn = std::vector<std::string> (*)(bool);
24 using LatencyFn = u32 (*)();
24 25
25 /// Name for this sink. 26 /// Name for this sink.
26 const char* id; 27 std::string_view id;
27 /// A method to call to construct an instance of this type of sink. 28 /// A method to call to construct an instance of this type of sink.
28 FactoryFn factory; 29 FactoryFn factory;
29 /// A method to call to list available devices. 30 /// A method to call to list available devices.
30 ListDevicesFn list_devices; 31 ListDevicesFn list_devices;
32 /// Method to get the latency of this backend.
33 LatencyFn latency;
31}; 34};
32 35
33// sink_details is ordered in terms of desirability, with the best choice at the top. 36// sink_details is ordered in terms of desirability, with the best choice at the top.
34constexpr SinkDetails sink_details[] = { 37constexpr SinkDetails sink_details[] = {
35#ifdef HAVE_CUBEB 38#ifdef HAVE_CUBEB
36 SinkDetails{"cubeb", 39 SinkDetails{
37 [](std::string_view device_id) -> std::unique_ptr<Sink> { 40 "cubeb",
38 return std::make_unique<CubebSink>(device_id); 41 [](std::string_view device_id) -> std::unique_ptr<Sink> {
39 }, 42 return std::make_unique<CubebSink>(device_id);
40 &ListCubebSinkDevices}, 43 },
44 &ListCubebSinkDevices,
45 &GetCubebLatency,
46 },
41#endif 47#endif
42#ifdef HAVE_SDL2 48#ifdef HAVE_SDL2
43 SinkDetails{"sdl2", 49 SinkDetails{
44 [](std::string_view device_id) -> std::unique_ptr<Sink> { 50 "sdl2",
45 return std::make_unique<SDLSink>(device_id); 51 [](std::string_view device_id) -> std::unique_ptr<Sink> {
46 }, 52 return std::make_unique<SDLSink>(device_id);
47 &ListSDLSinkDevices}, 53 },
54 &ListSDLSinkDevices,
55 &GetSDLLatency,
56 },
48#endif 57#endif
49 SinkDetails{"null", 58 SinkDetails{"null",
50 [](std::string_view device_id) -> std::unique_ptr<Sink> { 59 [](std::string_view device_id) -> std::unique_ptr<Sink> {
51 return std::make_unique<NullSink>(device_id); 60 return std::make_unique<NullSink>(device_id);
52 }, 61 },
53 [](bool capture) { return std::vector<std::string>{"null"}; }}, 62 [](bool capture) { return std::vector<std::string>{"null"}; }, []() { return 0u; }},
54}; 63};
55 64
56const SinkDetails& GetOutputSinkDetails(std::string_view sink_id) { 65const SinkDetails& GetOutputSinkDetails(std::string_view sink_id) {
57 auto iter = 66 const auto find_backend{[](std::string_view id) {
58 std::find_if(std::begin(sink_details), std::end(sink_details), 67 return std::find_if(std::begin(sink_details), std::end(sink_details),
59 [sink_id](const auto& sink_detail) { return sink_detail.id == sink_id; }); 68 [&id](const auto& sink_detail) { return sink_detail.id == id; });
69 }};
60 70
61 if (sink_id == "auto" || iter == std::end(sink_details)) { 71 auto iter = find_backend(sink_id);
62 if (sink_id != "auto") { 72
63 LOG_ERROR(Audio, "Invalid sink_id {}", sink_id); 73 if (sink_id == "auto") {
74 // Auto-select a backend. Prefer CubeB, but it may report a large minimum latency which
75 // causes audio issues, in that case go with SDL.
76#if defined(HAVE_CUBEB) && defined(HAVE_SDL2)
77 iter = find_backend("cubeb");
78 if (iter->latency() > TargetSampleCount * 3) {
79 iter = find_backend("sdl2");
64 } 80 }
65 // Auto-select. 81#else
66 // sink_details is ordered in terms of desirability, with the best choice at the front.
67 iter = std::begin(sink_details); 82 iter = std::begin(sink_details);
83#endif
84 LOG_INFO(Service_Audio, "Auto-selecting the {} backend", iter->id);
85 }
86
87 if (iter == std::end(sink_details)) {
88 LOG_ERROR(Audio, "Invalid sink_id {}", sink_id);
89 iter = find_backend("null");
68 } 90 }
69 91
70 return *iter; 92 return *iter;
71} 93}
72} // Anonymous namespace 94} // Anonymous namespace
73 95
74std::vector<const char*> GetSinkIDs() { 96std::vector<std::string_view> GetSinkIDs() {
75 std::vector<const char*> sink_ids(std::size(sink_details)); 97 std::vector<std::string_view> sink_ids(std::size(sink_details));
76 98
77 std::transform(std::begin(sink_details), std::end(sink_details), std::begin(sink_ids), 99 std::transform(std::begin(sink_details), std::end(sink_details), std::begin(sink_ids),
78 [](const auto& sink) { return sink.id; }); 100 [](const auto& sink) { return sink.id; });
diff --git a/src/audio_core/sink/sink_details.h b/src/audio_core/sink/sink_details.h
index 3ebdb1e30..e75932898 100644
--- a/src/audio_core/sink/sink_details.h
+++ b/src/audio_core/sink/sink_details.h
@@ -19,7 +19,7 @@ class Sink;
19 * 19 *
20 * @return Vector of available sink names. 20 * @return Vector of available sink names.
21 */ 21 */
22std::vector<const char*> GetSinkIDs(); 22std::vector<std::string_view> GetSinkIDs();
23 23
24/** 24/**
25 * Gets the list of devices for a particular sink identified by the given ID. 25 * Gets the list of devices for a particular sink identified by the given ID.
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index a02696873..46cf75fde 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -169,7 +169,11 @@ endif()
169create_target_directory_groups(common) 169create_target_directory_groups(common)
170 170
171target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile Threads::Threads) 171target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile Threads::Threads)
172target_link_libraries(common PRIVATE lz4::lz4) 172if (TARGET lz4::lz4)
173 target_link_libraries(common PRIVATE lz4::lz4)
174else()
175 target_link_libraries(common PRIVATE LZ4::lz4_shared)
176endif()
173if (TARGET zstd::zstd) 177if (TARGET zstd::zstd)
174 target_link_libraries(common PRIVATE zstd::zstd) 178 target_link_libraries(common PRIVATE zstd::zstd)
175else() 179else()
diff --git a/src/common/concepts.h b/src/common/concepts.h
index a97555f6a..e8ce30dfe 100644
--- a/src/common/concepts.h
+++ b/src/common/concepts.h
@@ -34,4 +34,12 @@ concept DerivedFrom = requires {
34template <typename From, typename To> 34template <typename From, typename To>
35concept ConvertibleTo = std::is_convertible_v<From, To>; 35concept ConvertibleTo = std::is_convertible_v<From, To>;
36 36
37// No equivalents in the stdlib
38
39template <typename T>
40concept IsArithmetic = std::is_arithmetic_v<T>;
41
42template <typename T>
43concept IsIntegral = std::is_integral_v<T>;
44
37} // namespace Common 45} // namespace Common
diff --git a/src/common/fixed_point.h b/src/common/fixed_point.h
index 4a0f72cc9..f899b0d54 100644
--- a/src/common/fixed_point.h
+++ b/src/common/fixed_point.h
@@ -4,14 +4,7 @@
4// From: https://github.com/eteran/cpp-utilities/blob/master/fixed/include/cpp-utilities/fixed.h 4// From: https://github.com/eteran/cpp-utilities/blob/master/fixed/include/cpp-utilities/fixed.h
5// See also: http://stackoverflow.com/questions/79677/whats-the-best-way-to-do-fixed-point-math 5// See also: http://stackoverflow.com/questions/79677/whats-the-best-way-to-do-fixed-point-math
6 6
7#ifndef FIXED_H_ 7#pragma once
8#define FIXED_H_
9
10#if __cplusplus >= 201402L
11#define CONSTEXPR14 constexpr
12#else
13#define CONSTEXPR14
14#endif
15 8
16#include <cstddef> // for size_t 9#include <cstddef> // for size_t
17#include <cstdint> 10#include <cstdint>
@@ -19,6 +12,8 @@
19#include <ostream> 12#include <ostream>
20#include <type_traits> 13#include <type_traits>
21 14
15#include <common/concepts.h>
16
22namespace Common { 17namespace Common {
23 18
24template <size_t I, size_t F> 19template <size_t I, size_t F>
@@ -57,8 +52,8 @@ struct type_from_size<64> {
57 static constexpr size_t size = 64; 52 static constexpr size_t size = 64;
58 53
59 using value_type = int64_t; 54 using value_type = int64_t;
60 using unsigned_type = std::make_unsigned<value_type>::type; 55 using unsigned_type = std::make_unsigned_t<value_type>;
61 using signed_type = std::make_signed<value_type>::type; 56 using signed_type = std::make_signed_t<value_type>;
62 using next_size = type_from_size<128>; 57 using next_size = type_from_size<128>;
63}; 58};
64 59
@@ -68,8 +63,8 @@ struct type_from_size<32> {
68 static constexpr size_t size = 32; 63 static constexpr size_t size = 32;
69 64
70 using value_type = int32_t; 65 using value_type = int32_t;
71 using unsigned_type = std::make_unsigned<value_type>::type; 66 using unsigned_type = std::make_unsigned_t<value_type>;
72 using signed_type = std::make_signed<value_type>::type; 67 using signed_type = std::make_signed_t<value_type>;
73 using next_size = type_from_size<64>; 68 using next_size = type_from_size<64>;
74}; 69};
75 70
@@ -79,8 +74,8 @@ struct type_from_size<16> {
79 static constexpr size_t size = 16; 74 static constexpr size_t size = 16;
80 75
81 using value_type = int16_t; 76 using value_type = int16_t;
82 using unsigned_type = std::make_unsigned<value_type>::type; 77 using unsigned_type = std::make_unsigned_t<value_type>;
83 using signed_type = std::make_signed<value_type>::type; 78 using signed_type = std::make_signed_t<value_type>;
84 using next_size = type_from_size<32>; 79 using next_size = type_from_size<32>;
85}; 80};
86 81
@@ -90,8 +85,8 @@ struct type_from_size<8> {
90 static constexpr size_t size = 8; 85 static constexpr size_t size = 8;
91 86
92 using value_type = int8_t; 87 using value_type = int8_t;
93 using unsigned_type = std::make_unsigned<value_type>::type; 88 using unsigned_type = std::make_unsigned_t<value_type>;
94 using signed_type = std::make_signed<value_type>::type; 89 using signed_type = std::make_signed_t<value_type>;
95 using next_size = type_from_size<16>; 90 using next_size = type_from_size<16>;
96}; 91};
97 92
@@ -106,9 +101,9 @@ constexpr B next_to_base(N rhs) {
106struct divide_by_zero : std::exception {}; 101struct divide_by_zero : std::exception {};
107 102
108template <size_t I, size_t F> 103template <size_t I, size_t F>
109CONSTEXPR14 FixedPoint<I, F> divide( 104constexpr FixedPoint<I, F> divide(
110 FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder, 105 FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
111 typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { 106 std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
112 107
113 using next_type = typename FixedPoint<I, F>::next_type; 108 using next_type = typename FixedPoint<I, F>::next_type;
114 using base_type = typename FixedPoint<I, F>::base_type; 109 using base_type = typename FixedPoint<I, F>::base_type;
@@ -126,9 +121,9 @@ CONSTEXPR14 FixedPoint<I, F> divide(
126} 121}
127 122
128template <size_t I, size_t F> 123template <size_t I, size_t F>
129CONSTEXPR14 FixedPoint<I, F> divide( 124constexpr FixedPoint<I, F> divide(
130 FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder, 125 FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
131 typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { 126 std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
132 127
133 using unsigned_type = typename FixedPoint<I, F>::unsigned_type; 128 using unsigned_type = typename FixedPoint<I, F>::unsigned_type;
134 129
@@ -196,9 +191,9 @@ CONSTEXPR14 FixedPoint<I, F> divide(
196 191
197// this is the usual implementation of multiplication 192// this is the usual implementation of multiplication
198template <size_t I, size_t F> 193template <size_t I, size_t F>
199CONSTEXPR14 FixedPoint<I, F> multiply( 194constexpr FixedPoint<I, F> multiply(
200 FixedPoint<I, F> lhs, FixedPoint<I, F> rhs, 195 FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
201 typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { 196 std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
202 197
203 using next_type = typename FixedPoint<I, F>::next_type; 198 using next_type = typename FixedPoint<I, F>::next_type;
204 using base_type = typename FixedPoint<I, F>::base_type; 199 using base_type = typename FixedPoint<I, F>::base_type;
@@ -215,9 +210,9 @@ CONSTEXPR14 FixedPoint<I, F> multiply(
215// it is slightly slower, but is more robust since it doesn't 210// it is slightly slower, but is more robust since it doesn't
216// require and upgraded type 211// require and upgraded type
217template <size_t I, size_t F> 212template <size_t I, size_t F>
218CONSTEXPR14 FixedPoint<I, F> multiply( 213constexpr FixedPoint<I, F> multiply(
219 FixedPoint<I, F> lhs, FixedPoint<I, F> rhs, 214 FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
220 typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { 215 std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
221 216
222 using base_type = typename FixedPoint<I, F>::base_type; 217 using base_type = typename FixedPoint<I, F>::base_type;
223 218
@@ -272,19 +267,20 @@ public:
272 static constexpr base_type one = base_type(1) << fractional_bits; 267 static constexpr base_type one = base_type(1) << fractional_bits;
273 268
274public: // constructors 269public: // constructors
275 FixedPoint() = default; 270 constexpr FixedPoint() = default;
276 FixedPoint(const FixedPoint&) = default; 271
277 FixedPoint(FixedPoint&&) = default; 272 constexpr FixedPoint(const FixedPoint&) = default;
278 FixedPoint& operator=(const FixedPoint&) = default; 273 constexpr FixedPoint& operator=(const FixedPoint&) = default;
274
275 constexpr FixedPoint(FixedPoint&&) noexcept = default;
276 constexpr FixedPoint& operator=(FixedPoint&&) noexcept = default;
279 277
280 template <class Number> 278 template <IsArithmetic Number>
281 constexpr FixedPoint( 279 constexpr FixedPoint(Number n) : data_(static_cast<base_type>(n * one)) {}
282 Number n, typename std::enable_if<std::is_arithmetic<Number>::value>::type* = nullptr)
283 : data_(static_cast<base_type>(n * one)) {}
284 280
285public: // conversion 281public: // conversion
286 template <size_t I2, size_t F2> 282 template <size_t I2, size_t F2>
287 CONSTEXPR14 explicit FixedPoint(FixedPoint<I2, F2> other) { 283 constexpr explicit FixedPoint(FixedPoint<I2, F2> other) {
288 static_assert(I2 <= I && F2 <= F, "Scaling conversion can only upgrade types"); 284 static_assert(I2 <= I && F2 <= F, "Scaling conversion can only upgrade types");
289 using T = FixedPoint<I2, F2>; 285 using T = FixedPoint<I2, F2>;
290 286
@@ -308,36 +304,14 @@ public:
308 } 304 }
309 305
310public: // comparison operators 306public: // comparison operators
311 constexpr bool operator==(FixedPoint rhs) const { 307 friend constexpr auto operator<=>(FixedPoint lhs, FixedPoint rhs) = default;
312 return data_ == rhs.data_;
313 }
314
315 constexpr bool operator!=(FixedPoint rhs) const {
316 return data_ != rhs.data_;
317 }
318
319 constexpr bool operator<(FixedPoint rhs) const {
320 return data_ < rhs.data_;
321 }
322
323 constexpr bool operator>(FixedPoint rhs) const {
324 return data_ > rhs.data_;
325 }
326
327 constexpr bool operator<=(FixedPoint rhs) const {
328 return data_ <= rhs.data_;
329 }
330
331 constexpr bool operator>=(FixedPoint rhs) const {
332 return data_ >= rhs.data_;
333 }
334 308
335public: // unary operators 309public: // unary operators
336 constexpr bool operator!() const { 310 [[nodiscard]] constexpr bool operator!() const {
337 return !data_; 311 return !data_;
338 } 312 }
339 313
340 constexpr FixedPoint operator~() const { 314 [[nodiscard]] constexpr FixedPoint operator~() const {
341 // NOTE(eteran): this will often appear to "just negate" the value 315 // NOTE(eteran): this will often appear to "just negate" the value
342 // that is not an error, it is because -x == (~x+1) 316 // that is not an error, it is because -x == (~x+1)
343 // and that "+1" is adding an infinitesimally small fraction to the 317 // and that "+1" is adding an infinitesimally small fraction to the
@@ -345,89 +319,87 @@ public: // unary operators
345 return FixedPoint::from_base(~data_); 319 return FixedPoint::from_base(~data_);
346 } 320 }
347 321
348 constexpr FixedPoint operator-() const { 322 [[nodiscard]] constexpr FixedPoint operator-() const {
349 return FixedPoint::from_base(-data_); 323 return FixedPoint::from_base(-data_);
350 } 324 }
351 325
352 constexpr FixedPoint operator+() const { 326 [[nodiscard]] constexpr FixedPoint operator+() const {
353 return FixedPoint::from_base(+data_); 327 return FixedPoint::from_base(+data_);
354 } 328 }
355 329
356 CONSTEXPR14 FixedPoint& operator++() { 330 constexpr FixedPoint& operator++() {
357 data_ += one; 331 data_ += one;
358 return *this; 332 return *this;
359 } 333 }
360 334
361 CONSTEXPR14 FixedPoint& operator--() { 335 constexpr FixedPoint& operator--() {
362 data_ -= one; 336 data_ -= one;
363 return *this; 337 return *this;
364 } 338 }
365 339
366 CONSTEXPR14 FixedPoint operator++(int) { 340 constexpr FixedPoint operator++(int) {
367 FixedPoint tmp(*this); 341 FixedPoint tmp(*this);
368 data_ += one; 342 data_ += one;
369 return tmp; 343 return tmp;
370 } 344 }
371 345
372 CONSTEXPR14 FixedPoint operator--(int) { 346 constexpr FixedPoint operator--(int) {
373 FixedPoint tmp(*this); 347 FixedPoint tmp(*this);
374 data_ -= one; 348 data_ -= one;
375 return tmp; 349 return tmp;
376 } 350 }
377 351
378public: // basic math operators 352public: // basic math operators
379 CONSTEXPR14 FixedPoint& operator+=(FixedPoint n) { 353 constexpr FixedPoint& operator+=(FixedPoint n) {
380 data_ += n.data_; 354 data_ += n.data_;
381 return *this; 355 return *this;
382 } 356 }
383 357
384 CONSTEXPR14 FixedPoint& operator-=(FixedPoint n) { 358 constexpr FixedPoint& operator-=(FixedPoint n) {
385 data_ -= n.data_; 359 data_ -= n.data_;
386 return *this; 360 return *this;
387 } 361 }
388 362
389 CONSTEXPR14 FixedPoint& operator*=(FixedPoint n) { 363 constexpr FixedPoint& operator*=(FixedPoint n) {
390 return assign(detail::multiply(*this, n)); 364 return assign(detail::multiply(*this, n));
391 } 365 }
392 366
393 CONSTEXPR14 FixedPoint& operator/=(FixedPoint n) { 367 constexpr FixedPoint& operator/=(FixedPoint n) {
394 FixedPoint temp; 368 FixedPoint temp;
395 return assign(detail::divide(*this, n, temp)); 369 return assign(detail::divide(*this, n, temp));
396 } 370 }
397 371
398private: 372private:
399 CONSTEXPR14 FixedPoint& assign(FixedPoint rhs) { 373 constexpr FixedPoint& assign(FixedPoint rhs) {
400 data_ = rhs.data_; 374 data_ = rhs.data_;
401 return *this; 375 return *this;
402 } 376 }
403 377
404public: // binary math operators, effects underlying bit pattern since these 378public: // binary math operators, effects underlying bit pattern since these
405 // don't really typically make sense for non-integer values 379 // don't really typically make sense for non-integer values
406 CONSTEXPR14 FixedPoint& operator&=(FixedPoint n) { 380 constexpr FixedPoint& operator&=(FixedPoint n) {
407 data_ &= n.data_; 381 data_ &= n.data_;
408 return *this; 382 return *this;
409 } 383 }
410 384
411 CONSTEXPR14 FixedPoint& operator|=(FixedPoint n) { 385 constexpr FixedPoint& operator|=(FixedPoint n) {
412 data_ |= n.data_; 386 data_ |= n.data_;
413 return *this; 387 return *this;
414 } 388 }
415 389
416 CONSTEXPR14 FixedPoint& operator^=(FixedPoint n) { 390 constexpr FixedPoint& operator^=(FixedPoint n) {
417 data_ ^= n.data_; 391 data_ ^= n.data_;
418 return *this; 392 return *this;
419 } 393 }
420 394
421 template <class Integer, 395 template <IsIntegral Integer>
422 class = typename std::enable_if<std::is_integral<Integer>::value>::type> 396 constexpr FixedPoint& operator>>=(Integer n) {
423 CONSTEXPR14 FixedPoint& operator>>=(Integer n) {
424 data_ >>= n; 397 data_ >>= n;
425 return *this; 398 return *this;
426 } 399 }
427 400
428 template <class Integer, 401 template <IsIntegral Integer>
429 class = typename std::enable_if<std::is_integral<Integer>::value>::type> 402 constexpr FixedPoint& operator<<=(Integer n) {
430 CONSTEXPR14 FixedPoint& operator<<=(Integer n) {
431 data_ <<= n; 403 data_ <<= n;
432 return *this; 404 return *this;
433 } 405 }
@@ -437,42 +409,42 @@ public: // conversion to basic types
437 data_ += (data_ & fractional_mask) >> 1; 409 data_ += (data_ & fractional_mask) >> 1;
438 } 410 }
439 411
440 constexpr int to_int() { 412 [[nodiscard]] constexpr int to_int() {
441 round_up(); 413 round_up();
442 return static_cast<int>((data_ & integer_mask) >> fractional_bits); 414 return static_cast<int>((data_ & integer_mask) >> fractional_bits);
443 } 415 }
444 416
445 constexpr unsigned int to_uint() const { 417 [[nodiscard]] constexpr unsigned int to_uint() {
446 round_up(); 418 round_up();
447 return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits); 419 return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
448 } 420 }
449 421
450 constexpr int64_t to_long() { 422 [[nodiscard]] constexpr int64_t to_long() {
451 round_up(); 423 round_up();
452 return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits); 424 return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
453 } 425 }
454 426
455 constexpr int to_int_floor() const { 427 [[nodiscard]] constexpr int to_int_floor() const {
456 return static_cast<int>((data_ & integer_mask) >> fractional_bits); 428 return static_cast<int>((data_ & integer_mask) >> fractional_bits);
457 } 429 }
458 430
459 constexpr int64_t to_long_floor() { 431 [[nodiscard]] constexpr int64_t to_long_floor() const {
460 return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits); 432 return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
461 } 433 }
462 434
463 constexpr unsigned int to_uint_floor() const { 435 [[nodiscard]] constexpr unsigned int to_uint_floor() const {
464 return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits); 436 return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
465 } 437 }
466 438
467 constexpr float to_float() const { 439 [[nodiscard]] constexpr float to_float() const {
468 return static_cast<float>(data_) / FixedPoint::one; 440 return static_cast<float>(data_) / FixedPoint::one;
469 } 441 }
470 442
471 constexpr double to_double() const { 443 [[nodiscard]] constexpr double to_double() const {
472 return static_cast<double>(data_) / FixedPoint::one; 444 return static_cast<double>(data_) / FixedPoint::one;
473 } 445 }
474 446
475 constexpr base_type to_raw() const { 447 [[nodiscard]] constexpr base_type to_raw() const {
476 return data_; 448 return data_;
477 } 449 }
478 450
@@ -480,27 +452,27 @@ public: // conversion to basic types
480 data_ &= fractional_mask; 452 data_ &= fractional_mask;
481 } 453 }
482 454
483 constexpr base_type get_frac() const { 455 [[nodiscard]] constexpr base_type get_frac() const {
484 return data_ & fractional_mask; 456 return data_ & fractional_mask;
485 } 457 }
486 458
487public: 459public:
488 CONSTEXPR14 void swap(FixedPoint& rhs) { 460 constexpr void swap(FixedPoint& rhs) noexcept {
489 using std::swap; 461 using std::swap;
490 swap(data_, rhs.data_); 462 swap(data_, rhs.data_);
491 } 463 }
492 464
493public: 465public:
494 base_type data_; 466 base_type data_{};
495}; 467};
496 468
497// if we have the same fractional portion, but differing integer portions, we trivially upgrade the 469// if we have the same fractional portion, but differing integer portions, we trivially upgrade the
498// smaller type 470// smaller type
499template <size_t I1, size_t I2, size_t F> 471template <size_t I1, size_t I2, size_t F>
500CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type 472constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator+(
501operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { 473 FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
502 474
503 using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; 475 using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
504 476
505 const T l = T::from_base(lhs.to_raw()); 477 const T l = T::from_base(lhs.to_raw());
506 const T r = T::from_base(rhs.to_raw()); 478 const T r = T::from_base(rhs.to_raw());
@@ -508,10 +480,10 @@ operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
508} 480}
509 481
510template <size_t I1, size_t I2, size_t F> 482template <size_t I1, size_t I2, size_t F>
511CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type 483constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator-(
512operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { 484 FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
513 485
514 using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; 486 using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
515 487
516 const T l = T::from_base(lhs.to_raw()); 488 const T l = T::from_base(lhs.to_raw());
517 const T r = T::from_base(rhs.to_raw()); 489 const T r = T::from_base(rhs.to_raw());
@@ -519,10 +491,10 @@ operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
519} 491}
520 492
521template <size_t I1, size_t I2, size_t F> 493template <size_t I1, size_t I2, size_t F>
522CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type 494constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator*(
523operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { 495 FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
524 496
525 using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; 497 using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
526 498
527 const T l = T::from_base(lhs.to_raw()); 499 const T l = T::from_base(lhs.to_raw());
528 const T r = T::from_base(rhs.to_raw()); 500 const T r = T::from_base(rhs.to_raw());
@@ -530,10 +502,10 @@ operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
530} 502}
531 503
532template <size_t I1, size_t I2, size_t F> 504template <size_t I1, size_t I2, size_t F>
533CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type 505constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator/(
534operator/(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { 506 FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
535 507
536 using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; 508 using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
537 509
538 const T l = T::from_base(lhs.to_raw()); 510 const T l = T::from_base(lhs.to_raw());
539 const T r = T::from_base(rhs.to_raw()); 511 const T r = T::from_base(rhs.to_raw());
@@ -548,159 +520,133 @@ std::ostream& operator<<(std::ostream& os, FixedPoint<I, F> f) {
548 520
549// basic math operators 521// basic math operators
550template <size_t I, size_t F> 522template <size_t I, size_t F>
551CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { 523constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
552 lhs += rhs; 524 lhs += rhs;
553 return lhs; 525 return lhs;
554} 526}
555template <size_t I, size_t F> 527template <size_t I, size_t F>
556CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { 528constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
557 lhs -= rhs; 529 lhs -= rhs;
558 return lhs; 530 return lhs;
559} 531}
560template <size_t I, size_t F> 532template <size_t I, size_t F>
561CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { 533constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
562 lhs *= rhs; 534 lhs *= rhs;
563 return lhs; 535 return lhs;
564} 536}
565template <size_t I, size_t F> 537template <size_t I, size_t F>
566CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { 538constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
567 lhs /= rhs; 539 lhs /= rhs;
568 return lhs; 540 return lhs;
569} 541}
570 542
571template <size_t I, size_t F, class Number, 543template <size_t I, size_t F, IsArithmetic Number>
572 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 544constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) {
573CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) {
574 lhs += FixedPoint<I, F>(rhs); 545 lhs += FixedPoint<I, F>(rhs);
575 return lhs; 546 return lhs;
576} 547}
577template <size_t I, size_t F, class Number, 548template <size_t I, size_t F, IsArithmetic Number>
578 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 549constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) {
579CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) {
580 lhs -= FixedPoint<I, F>(rhs); 550 lhs -= FixedPoint<I, F>(rhs);
581 return lhs; 551 return lhs;
582} 552}
583template <size_t I, size_t F, class Number, 553template <size_t I, size_t F, IsArithmetic Number>
584 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 554constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) {
585CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) {
586 lhs *= FixedPoint<I, F>(rhs); 555 lhs *= FixedPoint<I, F>(rhs);
587 return lhs; 556 return lhs;
588} 557}
589template <size_t I, size_t F, class Number, 558template <size_t I, size_t F, IsArithmetic Number>
590 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 559constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) {
591CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) {
592 lhs /= FixedPoint<I, F>(rhs); 560 lhs /= FixedPoint<I, F>(rhs);
593 return lhs; 561 return lhs;
594} 562}
595 563
596template <size_t I, size_t F, class Number, 564template <size_t I, size_t F, IsArithmetic Number>
597 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 565constexpr FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) {
598CONSTEXPR14 FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) {
599 FixedPoint<I, F> tmp(lhs); 566 FixedPoint<I, F> tmp(lhs);
600 tmp += rhs; 567 tmp += rhs;
601 return tmp; 568 return tmp;
602} 569}
603template <size_t I, size_t F, class Number, 570template <size_t I, size_t F, IsArithmetic Number>
604 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 571constexpr FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) {
605CONSTEXPR14 FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) {
606 FixedPoint<I, F> tmp(lhs); 572 FixedPoint<I, F> tmp(lhs);
607 tmp -= rhs; 573 tmp -= rhs;
608 return tmp; 574 return tmp;
609} 575}
610template <size_t I, size_t F, class Number, 576template <size_t I, size_t F, IsArithmetic Number>
611 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 577constexpr FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) {
612CONSTEXPR14 FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) {
613 FixedPoint<I, F> tmp(lhs); 578 FixedPoint<I, F> tmp(lhs);
614 tmp *= rhs; 579 tmp *= rhs;
615 return tmp; 580 return tmp;
616} 581}
617template <size_t I, size_t F, class Number, 582template <size_t I, size_t F, IsArithmetic Number>
618 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> 583constexpr FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) {
619CONSTEXPR14 FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) {
620 FixedPoint<I, F> tmp(lhs); 584 FixedPoint<I, F> tmp(lhs);
621 tmp /= rhs; 585 tmp /= rhs;
622 return tmp; 586 return tmp;
623} 587}
624 588
625// shift operators 589// shift operators
626template <size_t I, size_t F, class Integer, 590template <size_t I, size_t F, IsIntegral Integer>
627 class = typename std::enable_if<std::is_integral<Integer>::value>::type> 591constexpr FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) {
628CONSTEXPR14 FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) {
629 lhs <<= rhs; 592 lhs <<= rhs;
630 return lhs; 593 return lhs;
631} 594}
632template <size_t I, size_t F, class Integer, 595template <size_t I, size_t F, IsIntegral Integer>
633 class = typename std::enable_if<std::is_integral<Integer>::value>::type> 596constexpr FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) {
634CONSTEXPR14 FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) {
635 lhs >>= rhs; 597 lhs >>= rhs;
636 return lhs; 598 return lhs;
637} 599}
638 600
639// comparison operators 601// comparison operators
640template <size_t I, size_t F, class Number, 602template <size_t I, size_t F, IsArithmetic Number>
641 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
642constexpr bool operator>(FixedPoint<I, F> lhs, Number rhs) { 603constexpr bool operator>(FixedPoint<I, F> lhs, Number rhs) {
643 return lhs > FixedPoint<I, F>(rhs); 604 return lhs > FixedPoint<I, F>(rhs);
644} 605}
645template <size_t I, size_t F, class Number, 606template <size_t I, size_t F, IsArithmetic Number>
646 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
647constexpr bool operator<(FixedPoint<I, F> lhs, Number rhs) { 607constexpr bool operator<(FixedPoint<I, F> lhs, Number rhs) {
648 return lhs < FixedPoint<I, F>(rhs); 608 return lhs < FixedPoint<I, F>(rhs);
649} 609}
650template <size_t I, size_t F, class Number, 610template <size_t I, size_t F, IsArithmetic Number>
651 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
652constexpr bool operator>=(FixedPoint<I, F> lhs, Number rhs) { 611constexpr bool operator>=(FixedPoint<I, F> lhs, Number rhs) {
653 return lhs >= FixedPoint<I, F>(rhs); 612 return lhs >= FixedPoint<I, F>(rhs);
654} 613}
655template <size_t I, size_t F, class Number, 614template <size_t I, size_t F, IsArithmetic Number>
656 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
657constexpr bool operator<=(FixedPoint<I, F> lhs, Number rhs) { 615constexpr bool operator<=(FixedPoint<I, F> lhs, Number rhs) {
658 return lhs <= FixedPoint<I, F>(rhs); 616 return lhs <= FixedPoint<I, F>(rhs);
659} 617}
660template <size_t I, size_t F, class Number, 618template <size_t I, size_t F, IsArithmetic Number>
661 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
662constexpr bool operator==(FixedPoint<I, F> lhs, Number rhs) { 619constexpr bool operator==(FixedPoint<I, F> lhs, Number rhs) {
663 return lhs == FixedPoint<I, F>(rhs); 620 return lhs == FixedPoint<I, F>(rhs);
664} 621}
665template <size_t I, size_t F, class Number, 622template <size_t I, size_t F, IsArithmetic Number>
666 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
667constexpr bool operator!=(FixedPoint<I, F> lhs, Number rhs) { 623constexpr bool operator!=(FixedPoint<I, F> lhs, Number rhs) {
668 return lhs != FixedPoint<I, F>(rhs); 624 return lhs != FixedPoint<I, F>(rhs);
669} 625}
670 626
671template <size_t I, size_t F, class Number, 627template <size_t I, size_t F, IsArithmetic Number>
672 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
673constexpr bool operator>(Number lhs, FixedPoint<I, F> rhs) { 628constexpr bool operator>(Number lhs, FixedPoint<I, F> rhs) {
674 return FixedPoint<I, F>(lhs) > rhs; 629 return FixedPoint<I, F>(lhs) > rhs;
675} 630}
676template <size_t I, size_t F, class Number, 631template <size_t I, size_t F, IsArithmetic Number>
677 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
678constexpr bool operator<(Number lhs, FixedPoint<I, F> rhs) { 632constexpr bool operator<(Number lhs, FixedPoint<I, F> rhs) {
679 return FixedPoint<I, F>(lhs) < rhs; 633 return FixedPoint<I, F>(lhs) < rhs;
680} 634}
681template <size_t I, size_t F, class Number, 635template <size_t I, size_t F, IsArithmetic Number>
682 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
683constexpr bool operator>=(Number lhs, FixedPoint<I, F> rhs) { 636constexpr bool operator>=(Number lhs, FixedPoint<I, F> rhs) {
684 return FixedPoint<I, F>(lhs) >= rhs; 637 return FixedPoint<I, F>(lhs) >= rhs;
685} 638}
686template <size_t I, size_t F, class Number, 639template <size_t I, size_t F, IsArithmetic Number>
687 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
688constexpr bool operator<=(Number lhs, FixedPoint<I, F> rhs) { 640constexpr bool operator<=(Number lhs, FixedPoint<I, F> rhs) {
689 return FixedPoint<I, F>(lhs) <= rhs; 641 return FixedPoint<I, F>(lhs) <= rhs;
690} 642}
691template <size_t I, size_t F, class Number, 643template <size_t I, size_t F, IsArithmetic Number>
692 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
693constexpr bool operator==(Number lhs, FixedPoint<I, F> rhs) { 644constexpr bool operator==(Number lhs, FixedPoint<I, F> rhs) {
694 return FixedPoint<I, F>(lhs) == rhs; 645 return FixedPoint<I, F>(lhs) == rhs;
695} 646}
696template <size_t I, size_t F, class Number, 647template <size_t I, size_t F, IsArithmetic Number>
697 class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
698constexpr bool operator!=(Number lhs, FixedPoint<I, F> rhs) { 648constexpr bool operator!=(Number lhs, FixedPoint<I, F> rhs) {
699 return FixedPoint<I, F>(lhs) != rhs; 649 return FixedPoint<I, F>(lhs) != rhs;
700} 650}
701 651
702} // namespace Common 652} // namespace Common
703
704#undef CONSTEXPR14
705
706#endif
diff --git a/src/common/settings.h b/src/common/settings.h
index d2452c93b..0eb98939c 100644
--- a/src/common/settings.h
+++ b/src/common/settings.h
@@ -431,7 +431,7 @@ struct Values {
431 FullscreenMode::Exclusive, 431 FullscreenMode::Exclusive,
432#endif 432#endif
433 FullscreenMode::Borderless, FullscreenMode::Exclusive, "fullscreen_mode"}; 433 FullscreenMode::Borderless, FullscreenMode::Exclusive, "fullscreen_mode"};
434 SwitchableSetting<int, true> aspect_ratio{0, 0, 3, "aspect_ratio"}; 434 SwitchableSetting<int, true> aspect_ratio{0, 0, 4, "aspect_ratio"};
435 SwitchableSetting<int, true> max_anisotropy{0, 0, 5, "max_anisotropy"}; 435 SwitchableSetting<int, true> max_anisotropy{0, 0, 5, "max_anisotropy"};
436 SwitchableSetting<bool> use_speed_limit{true, "use_speed_limit"}; 436 SwitchableSetting<bool> use_speed_limit{true, "use_speed_limit"};
437 SwitchableSetting<u16, true> speed_limit{100, 0, 9999, "speed_limit"}; 437 SwitchableSetting<u16, true> speed_limit{100, 0, 9999, "speed_limit"};
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 95302c419..055bea641 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -190,6 +190,9 @@ add_library(core STATIC
190 hle/kernel/k_code_memory.h 190 hle/kernel/k_code_memory.h
191 hle/kernel/k_condition_variable.cpp 191 hle/kernel/k_condition_variable.cpp
192 hle/kernel/k_condition_variable.h 192 hle/kernel/k_condition_variable.h
193 hle/kernel/k_dynamic_page_manager.h
194 hle/kernel/k_dynamic_resource_manager.h
195 hle/kernel/k_dynamic_slab_heap.h
193 hle/kernel/k_event.cpp 196 hle/kernel/k_event.cpp
194 hle/kernel/k_event.h 197 hle/kernel/k_event.h
195 hle/kernel/k_handle_table.cpp 198 hle/kernel/k_handle_table.cpp
@@ -240,6 +243,8 @@ add_library(core STATIC
240 hle/kernel/k_server_session.h 243 hle/kernel/k_server_session.h
241 hle/kernel/k_session.cpp 244 hle/kernel/k_session.cpp
242 hle/kernel/k_session.h 245 hle/kernel/k_session.h
246 hle/kernel/k_session_request.cpp
247 hle/kernel/k_session_request.h
243 hle/kernel/k_shared_memory.cpp 248 hle/kernel/k_shared_memory.cpp
244 hle/kernel/k_shared_memory.h 249 hle/kernel/k_shared_memory.h
245 hle/kernel/k_shared_memory_info.h 250 hle/kernel/k_shared_memory_info.h
@@ -261,8 +266,6 @@ add_library(core STATIC
261 hle/kernel/k_worker_task.h 266 hle/kernel/k_worker_task.h
262 hle/kernel/k_worker_task_manager.cpp 267 hle/kernel/k_worker_task_manager.cpp
263 hle/kernel/k_worker_task_manager.h 268 hle/kernel/k_worker_task_manager.h
264 hle/kernel/k_writable_event.cpp
265 hle/kernel/k_writable_event.h
266 hle/kernel/kernel.cpp 269 hle/kernel/kernel.cpp
267 hle/kernel/kernel.h 270 hle/kernel/kernel.h
268 hle/kernel/memory_types.h 271 hle/kernel/memory_types.h
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp
index 953d96439..29ba562dc 100644
--- a/src/core/arm/arm_interface.cpp
+++ b/src/core/arm/arm_interface.cpp
@@ -134,6 +134,14 @@ void ARM_Interface::Run() {
134 } 134 }
135 system.ExitDynarmicProfile(); 135 system.ExitDynarmicProfile();
136 136
137 // If the thread is scheduled for termination, exit the thread.
138 if (current_thread->HasDpc()) {
139 if (current_thread->IsTerminationRequested()) {
140 current_thread->Exit();
141 UNREACHABLE();
142 }
143 }
144
137 // Notify the debugger and go to sleep if a breakpoint was hit, 145 // Notify the debugger and go to sleep if a breakpoint was hit,
138 // or if the thread is unable to continue for any reason. 146 // or if the thread is unable to continue for any reason.
139 if (Has(hr, breakpoint) || Has(hr, no_execute)) { 147 if (Has(hr, breakpoint) || Has(hr, no_execute)) {
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 1d46f6d40..22b5d5656 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -111,6 +111,7 @@ public:
111 LOG_ERROR(Core_ARM, 111 LOG_ERROR(Core_ARM,
112 "Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc, 112 "Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc,
113 num_instructions, memory.Read32(pc)); 113 num_instructions, memory.Read32(pc));
114 ReturnException(pc, ARM_Interface::no_execute);
114 } 115 }
115 116
116 void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op, 117 void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op,
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 1deeee154..7fb8bc019 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -133,6 +133,50 @@ struct System::Impl {
133 : kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{}, 133 : kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{},
134 cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {} 134 cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {}
135 135
136 void Initialize(System& system) {
137 device_memory = std::make_unique<Core::DeviceMemory>();
138
139 is_multicore = Settings::values.use_multi_core.GetValue();
140
141 core_timing.SetMulticore(is_multicore);
142 core_timing.Initialize([&system]() { system.RegisterHostThread(); });
143
144 const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
145 const auto current_time =
146 std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
147 Settings::values.custom_rtc_differential =
148 Settings::values.custom_rtc.value_or(current_time) - current_time;
149
150 // Create a default fs if one doesn't already exist.
151 if (virtual_filesystem == nullptr) {
152 virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
153 }
154 if (content_provider == nullptr) {
155 content_provider = std::make_unique<FileSys::ContentProviderUnion>();
156 }
157
158 // Create default implementations of applets if one is not provided.
159 applet_manager.SetDefaultAppletsIfMissing();
160
161 is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
162
163 kernel.SetMulticore(is_multicore);
164 cpu_manager.SetMulticore(is_multicore);
165 cpu_manager.SetAsyncGpu(is_async_gpu);
166 }
167
168 void ReinitializeIfNecessary(System& system) {
169 if (is_multicore == Settings::values.use_multi_core.GetValue()) {
170 return;
171 }
172
173 LOG_DEBUG(Kernel, "Re-initializing");
174
175 is_multicore = Settings::values.use_multi_core.GetValue();
176
177 Initialize(system);
178 }
179
136 SystemResultStatus Run() { 180 SystemResultStatus Run() {
137 std::unique_lock<std::mutex> lk(suspend_guard); 181 std::unique_lock<std::mutex> lk(suspend_guard);
138 status = SystemResultStatus::Success; 182 status = SystemResultStatus::Success;
@@ -178,37 +222,14 @@ struct System::Impl {
178 debugger = std::make_unique<Debugger>(system, port); 222 debugger = std::make_unique<Debugger>(system, port);
179 } 223 }
180 224
181 SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) { 225 SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) {
182 LOG_DEBUG(Core, "initialized OK"); 226 LOG_DEBUG(Core, "initialized OK");
183 227
184 device_memory = std::make_unique<Core::DeviceMemory>(); 228 // Setting changes may require a full system reinitialization (e.g., disabling multicore).
185 229 ReinitializeIfNecessary(system);
186 is_multicore = Settings::values.use_multi_core.GetValue();
187 is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
188
189 kernel.SetMulticore(is_multicore);
190 cpu_manager.SetMulticore(is_multicore);
191 cpu_manager.SetAsyncGpu(is_async_gpu);
192 core_timing.SetMulticore(is_multicore);
193 230
194 kernel.Initialize(); 231 kernel.Initialize();
195 cpu_manager.Initialize(); 232 cpu_manager.Initialize();
196 core_timing.Initialize([&system]() { system.RegisterHostThread(); });
197
198 const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
199 const auto current_time =
200 std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
201 Settings::values.custom_rtc_differential =
202 Settings::values.custom_rtc.value_or(current_time) - current_time;
203
204 // Create a default fs if one doesn't already exist.
205 if (virtual_filesystem == nullptr)
206 virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
207 if (content_provider == nullptr)
208 content_provider = std::make_unique<FileSys::ContentProviderUnion>();
209
210 /// Create default implementations of applets if one is not provided.
211 applet_manager.SetDefaultAppletsIfMissing();
212 233
213 /// Reset all glue registrations 234 /// Reset all glue registrations
214 arp_manager.ResetAll(); 235 arp_manager.ResetAll();
@@ -253,11 +274,11 @@ struct System::Impl {
253 return SystemResultStatus::ErrorGetLoader; 274 return SystemResultStatus::ErrorGetLoader;
254 } 275 }
255 276
256 SystemResultStatus init_result{Init(system, emu_window)}; 277 SystemResultStatus init_result{SetupForMainProcess(system, emu_window)};
257 if (init_result != SystemResultStatus::Success) { 278 if (init_result != SystemResultStatus::Success) {
258 LOG_CRITICAL(Core, "Failed to initialize system (Error {})!", 279 LOG_CRITICAL(Core, "Failed to initialize system (Error {})!",
259 static_cast<int>(init_result)); 280 static_cast<int>(init_result));
260 Shutdown(); 281 ShutdownMainProcess();
261 return init_result; 282 return init_result;
262 } 283 }
263 284
@@ -276,7 +297,7 @@ struct System::Impl {
276 const auto [load_result, load_parameters] = app_loader->Load(*main_process, system); 297 const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
277 if (load_result != Loader::ResultStatus::Success) { 298 if (load_result != Loader::ResultStatus::Success) {
278 LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result); 299 LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
279 Shutdown(); 300 ShutdownMainProcess();
280 301
281 return static_cast<SystemResultStatus>( 302 return static_cast<SystemResultStatus>(
282 static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result)); 303 static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result));
@@ -335,7 +356,7 @@ struct System::Impl {
335 return status; 356 return status;
336 } 357 }
337 358
338 void Shutdown() { 359 void ShutdownMainProcess() {
339 SetShuttingDown(true); 360 SetShuttingDown(true);
340 361
341 // Log last frame performance stats if game was loded 362 // Log last frame performance stats if game was loded
@@ -369,7 +390,7 @@ struct System::Impl {
369 cheat_engine.reset(); 390 cheat_engine.reset();
370 telemetry_session.reset(); 391 telemetry_session.reset();
371 time_manager.Shutdown(); 392 time_manager.Shutdown();
372 core_timing.Shutdown(); 393 core_timing.ClearPendingEvents();
373 app_loader.reset(); 394 app_loader.reset();
374 audio_core.reset(); 395 audio_core.reset();
375 gpu_core.reset(); 396 gpu_core.reset();
@@ -377,7 +398,6 @@ struct System::Impl {
377 perf_stats.reset(); 398 perf_stats.reset();
378 kernel.Shutdown(); 399 kernel.Shutdown();
379 memory.Reset(); 400 memory.Reset();
380 applet_manager.ClearAll();
381 401
382 if (auto room_member = room_network.GetRoomMember().lock()) { 402 if (auto room_member = room_network.GetRoomMember().lock()) {
383 Network::GameInfo game_info{}; 403 Network::GameInfo game_info{};
@@ -520,6 +540,10 @@ const CpuManager& System::GetCpuManager() const {
520 return impl->cpu_manager; 540 return impl->cpu_manager;
521} 541}
522 542
543void System::Initialize() {
544 impl->Initialize(*this);
545}
546
523SystemResultStatus System::Run() { 547SystemResultStatus System::Run() {
524 return impl->Run(); 548 return impl->Run();
525} 549}
@@ -540,8 +564,8 @@ void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
540 impl->kernel.InvalidateCpuInstructionCacheRange(addr, size); 564 impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
541} 565}
542 566
543void System::Shutdown() { 567void System::ShutdownMainProcess() {
544 impl->Shutdown(); 568 impl->ShutdownMainProcess();
545} 569}
546 570
547bool System::IsShuttingDown() const { 571bool System::IsShuttingDown() const {
diff --git a/src/core/core.h b/src/core/core.h
index 7843cc8ad..4ebedffd9 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -143,6 +143,12 @@ public:
143 System& operator=(System&&) = delete; 143 System& operator=(System&&) = delete;
144 144
145 /** 145 /**
146 * Initializes the system
147 * This function will initialize core functionaility used for system emulation
148 */
149 void Initialize();
150
151 /**
146 * Run the OS and Application 152 * Run the OS and Application
147 * This function will start emulation and run the relevant devices 153 * This function will start emulation and run the relevant devices
148 */ 154 */
@@ -166,8 +172,8 @@ public:
166 172
167 void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size); 173 void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
168 174
169 /// Shutdown the emulated system. 175 /// Shutdown the main emulated process.
170 void Shutdown(); 176 void ShutdownMainProcess();
171 177
172 /// Check if the core is shutting down. 178 /// Check if the core is shutting down.
173 [[nodiscard]] bool IsShuttingDown() const; 179 [[nodiscard]] bool IsShuttingDown() const;
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index 6c0fcb7b5..0e7b5f943 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -40,7 +40,9 @@ struct CoreTiming::Event {
40CoreTiming::CoreTiming() 40CoreTiming::CoreTiming()
41 : clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {} 41 : clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {}
42 42
43CoreTiming::~CoreTiming() = default; 43CoreTiming::~CoreTiming() {
44 Reset();
45}
44 46
45void CoreTiming::ThreadEntry(CoreTiming& instance) { 47void CoreTiming::ThreadEntry(CoreTiming& instance) {
46 constexpr char name[] = "HostTiming"; 48 constexpr char name[] = "HostTiming";
@@ -53,6 +55,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
53} 55}
54 56
55void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) { 57void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
58 Reset();
56 on_thread_init = std::move(on_thread_init_); 59 on_thread_init = std::move(on_thread_init_);
57 event_fifo_id = 0; 60 event_fifo_id = 0;
58 shutting_down = false; 61 shutting_down = false;
@@ -65,17 +68,8 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
65 } 68 }
66} 69}
67 70
68void CoreTiming::Shutdown() { 71void CoreTiming::ClearPendingEvents() {
69 paused = true; 72 event_queue.clear();
70 shutting_down = true;
71 pause_event.Set();
72 event.Set();
73 if (timer_thread) {
74 timer_thread->join();
75 }
76 ClearPendingEvents();
77 timer_thread.reset();
78 has_started = false;
79} 73}
80 74
81void CoreTiming::Pause(bool is_paused) { 75void CoreTiming::Pause(bool is_paused) {
@@ -196,10 +190,6 @@ u64 CoreTiming::GetClockTicks() const {
196 return CpuCyclesToClockCycles(ticks); 190 return CpuCyclesToClockCycles(ticks);
197} 191}
198 192
199void CoreTiming::ClearPendingEvents() {
200 event_queue.clear();
201}
202
203void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { 193void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
204 std::scoped_lock lock{basic_lock}; 194 std::scoped_lock lock{basic_lock};
205 195
@@ -270,6 +260,7 @@ void CoreTiming::ThreadLoop() {
270 // There are more events left in the queue, wait until the next event. 260 // There are more events left in the queue, wait until the next event.
271 const auto wait_time = *next_time - GetGlobalTimeNs().count(); 261 const auto wait_time = *next_time - GetGlobalTimeNs().count();
272 if (wait_time > 0) { 262 if (wait_time > 0) {
263#ifdef _WIN32
273 // Assume a timer resolution of 1ms. 264 // Assume a timer resolution of 1ms.
274 static constexpr s64 TimerResolutionNS = 1000000; 265 static constexpr s64 TimerResolutionNS = 1000000;
275 266
@@ -287,6 +278,9 @@ void CoreTiming::ThreadLoop() {
287 if (event.IsSet()) { 278 if (event.IsSet()) {
288 event.Reset(); 279 event.Reset();
289 } 280 }
281#else
282 event.WaitFor(std::chrono::nanoseconds(wait_time));
283#endif
290 } 284 }
291 } else { 285 } else {
292 // Queue is empty, wait until another event is scheduled and signals us to continue. 286 // Queue is empty, wait until another event is scheduled and signals us to continue.
@@ -303,6 +297,18 @@ void CoreTiming::ThreadLoop() {
303 } 297 }
304} 298}
305 299
300void CoreTiming::Reset() {
301 paused = true;
302 shutting_down = true;
303 pause_event.Set();
304 event.Set();
305 if (timer_thread) {
306 timer_thread->join();
307 }
308 timer_thread.reset();
309 has_started = false;
310}
311
306std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const { 312std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
307 if (is_multicore) { 313 if (is_multicore) {
308 return clock->GetTimeNS(); 314 return clock->GetTimeNS();
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index 3259397b2..b5925193c 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -61,19 +61,14 @@ public:
61 /// required to end slice - 1 and start slice 0 before the first cycle of code is executed. 61 /// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
62 void Initialize(std::function<void()>&& on_thread_init_); 62 void Initialize(std::function<void()>&& on_thread_init_);
63 63
64 /// Tears down all timing related functionality. 64 /// Clear all pending events. This should ONLY be done on exit.
65 void Shutdown(); 65 void ClearPendingEvents();
66 66
67 /// Sets if emulation is multicore or single core, must be set before Initialize 67 /// Sets if emulation is multicore or single core, must be set before Initialize
68 void SetMulticore(bool is_multicore_) { 68 void SetMulticore(bool is_multicore_) {
69 is_multicore = is_multicore_; 69 is_multicore = is_multicore_;
70 } 70 }
71 71
72 /// Check if it's using host timing.
73 bool IsHostTiming() const {
74 return is_multicore;
75 }
76
77 /// Pauses/Unpauses the execution of the timer thread. 72 /// Pauses/Unpauses the execution of the timer thread.
78 void Pause(bool is_paused); 73 void Pause(bool is_paused);
79 74
@@ -136,12 +131,11 @@ public:
136private: 131private:
137 struct Event; 132 struct Event;
138 133
139 /// Clear all pending events. This should ONLY be done on exit.
140 void ClearPendingEvents();
141
142 static void ThreadEntry(CoreTiming& instance); 134 static void ThreadEntry(CoreTiming& instance);
143 void ThreadLoop(); 135 void ThreadLoop();
144 136
137 void Reset();
138
145 std::unique_ptr<Common::WallClock> clock; 139 std::unique_ptr<Common::WallClock> clock;
146 140
147 s64 global_timer = 0; 141 s64 global_timer = 0;
diff --git a/src/core/device_memory.h b/src/core/device_memory.h
index df61b0c0b..90510733c 100644
--- a/src/core/device_memory.h
+++ b/src/core/device_memory.h
@@ -31,12 +31,14 @@ public:
31 DramMemoryMap::Base; 31 DramMemoryMap::Base;
32 } 32 }
33 33
34 u8* GetPointer(PAddr addr) { 34 template <typename T>
35 return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); 35 T* GetPointer(PAddr addr) {
36 return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
36 } 37 }
37 38
38 const u8* GetPointer(PAddr addr) const { 39 template <typename T>
39 return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); 40 const T* GetPointer(PAddr addr) const {
41 return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
40 } 42 }
41 43
42 Common::HostMemory buffer; 44 Common::HostMemory buffer;
diff --git a/src/core/file_sys/savedata_factory.cpp b/src/core/file_sys/savedata_factory.cpp
index 8c1b2523c..1567da231 100644
--- a/src/core/file_sys/savedata_factory.cpp
+++ b/src/core/file_sys/savedata_factory.cpp
@@ -5,6 +5,7 @@
5#include "common/assert.h" 5#include "common/assert.h"
6#include "common/common_types.h" 6#include "common/common_types.h"
7#include "common/logging/log.h" 7#include "common/logging/log.h"
8#include "common/uuid.h"
8#include "core/core.h" 9#include "core/core.h"
9#include "core/file_sys/savedata_factory.h" 10#include "core/file_sys/savedata_factory.h"
10#include "core/file_sys/vfs.h" 11#include "core/file_sys/vfs.h"
@@ -59,6 +60,36 @@ bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataA
59 attr.title_id == 0 && attr.save_id == 0); 60 attr.title_id == 0 && attr.save_id == 0);
60} 61}
61 62
63std::string GetFutureSaveDataPath(SaveDataSpaceId space_id, SaveDataType type, u64 title_id,
64 u128 user_id) {
65 // Only detect nand user saves.
66 const auto space_id_path = [space_id]() -> std::string_view {
67 switch (space_id) {
68 case SaveDataSpaceId::NandUser:
69 return "/user/save";
70 default:
71 return "";
72 }
73 }();
74
75 if (space_id_path.empty()) {
76 return "";
77 }
78
79 Common::UUID uuid;
80 std::memcpy(uuid.uuid.data(), user_id.data(), sizeof(Common::UUID));
81
82 // Only detect account/device saves from the future location.
83 switch (type) {
84 case SaveDataType::SaveData:
85 return fmt::format("{}/account/{}/{:016X}/1", space_id_path, uuid.RawString(), title_id);
86 case SaveDataType::DeviceSaveData:
87 return fmt::format("{}/device/{:016X}/1", space_id_path, title_id);
88 default:
89 return "";
90 }
91}
92
62} // Anonymous namespace 93} // Anonymous namespace
63 94
64std::string SaveDataAttribute::DebugInfo() const { 95std::string SaveDataAttribute::DebugInfo() const {
@@ -82,7 +113,7 @@ ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space,
82 PrintSaveDataAttributeWarnings(meta); 113 PrintSaveDataAttributeWarnings(meta);
83 114
84 const auto save_directory = 115 const auto save_directory =
85 GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id); 116 GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
86 117
87 auto out = dir->CreateDirectoryRelative(save_directory); 118 auto out = dir->CreateDirectoryRelative(save_directory);
88 119
@@ -99,7 +130,7 @@ ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space,
99 const SaveDataAttribute& meta) const { 130 const SaveDataAttribute& meta) const {
100 131
101 const auto save_directory = 132 const auto save_directory =
102 GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id); 133 GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
103 134
104 auto out = dir->GetDirectoryRelative(save_directory); 135 auto out = dir->GetDirectoryRelative(save_directory);
105 136
@@ -134,9 +165,9 @@ std::string SaveDataFactory::GetSaveDataSpaceIdPath(SaveDataSpaceId space) {
134 } 165 }
135} 166}
136 167
137std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId space, 168std::string SaveDataFactory::GetFullPath(Core::System& system, VirtualDir dir,
138 SaveDataType type, u64 title_id, u128 user_id, 169 SaveDataSpaceId space, SaveDataType type, u64 title_id,
139 u64 save_id) { 170 u128 user_id, u64 save_id) {
140 // According to switchbrew, if a save is of type SaveData and the title id field is 0, it should 171 // According to switchbrew, if a save is of type SaveData and the title id field is 0, it should
141 // be interpreted as the title id of the current process. 172 // be interpreted as the title id of the current process.
142 if (type == SaveDataType::SaveData || type == SaveDataType::DeviceSaveData) { 173 if (type == SaveDataType::SaveData || type == SaveDataType::DeviceSaveData) {
@@ -145,6 +176,17 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId s
145 } 176 }
146 } 177 }
147 178
179 // For compat with a future impl.
180 if (std::string future_path =
181 GetFutureSaveDataPath(space, type, title_id & ~(0xFFULL), user_id);
182 !future_path.empty()) {
183 // Check if this location exists, and prefer it over the old.
184 if (const auto future_dir = dir->GetDirectoryRelative(future_path); future_dir != nullptr) {
185 LOG_INFO(Service_FS, "Using save at new location: {}", future_path);
186 return future_path;
187 }
188 }
189
148 std::string out = GetSaveDataSpaceIdPath(space); 190 std::string out = GetSaveDataSpaceIdPath(space);
149 191
150 switch (type) { 192 switch (type) {
@@ -167,7 +209,8 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId s
167 209
168SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id, 210SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
169 u128 user_id) const { 211 u128 user_id) const {
170 const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0); 212 const auto path =
213 GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
171 const auto relative_dir = GetOrCreateDirectoryRelative(dir, path); 214 const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
172 215
173 const auto size_file = relative_dir->GetFile(SAVE_DATA_SIZE_FILENAME); 216 const auto size_file = relative_dir->GetFile(SAVE_DATA_SIZE_FILENAME);
@@ -185,7 +228,8 @@ SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
185 228
186void SaveDataFactory::WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id, 229void SaveDataFactory::WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,
187 SaveDataSize new_value) const { 230 SaveDataSize new_value) const {
188 const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0); 231 const auto path =
232 GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
189 const auto relative_dir = GetOrCreateDirectoryRelative(dir, path); 233 const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
190 234
191 const auto size_file = relative_dir->CreateFile(SAVE_DATA_SIZE_FILENAME); 235 const auto size_file = relative_dir->CreateFile(SAVE_DATA_SIZE_FILENAME);
diff --git a/src/core/file_sys/savedata_factory.h b/src/core/file_sys/savedata_factory.h
index a763b94c8..d3633ef03 100644
--- a/src/core/file_sys/savedata_factory.h
+++ b/src/core/file_sys/savedata_factory.h
@@ -95,8 +95,8 @@ public:
95 VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const; 95 VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const;
96 96
97 static std::string GetSaveDataSpaceIdPath(SaveDataSpaceId space); 97 static std::string GetSaveDataSpaceIdPath(SaveDataSpaceId space);
98 static std::string GetFullPath(Core::System& system, SaveDataSpaceId space, SaveDataType type, 98 static std::string GetFullPath(Core::System& system, VirtualDir dir, SaveDataSpaceId space,
99 u64 title_id, u128 user_id, u64 save_id); 99 SaveDataType type, u64 title_id, u128 user_id, u64 save_id);
100 100
101 SaveDataSize ReadSaveDataSize(SaveDataType type, u64 title_id, u128 user_id) const; 101 SaveDataSize ReadSaveDataSize(SaveDataType type, u64 title_id, u128 user_id) const;
102 void WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id, 102 void WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,
diff --git a/src/core/frontend/framebuffer_layout.cpp b/src/core/frontend/framebuffer_layout.cpp
index 90dd68ff1..b4081fc39 100644
--- a/src/core/frontend/framebuffer_layout.cpp
+++ b/src/core/frontend/framebuffer_layout.cpp
@@ -67,6 +67,8 @@ float EmulationAspectRatio(AspectRatio aspect, float window_aspect_ratio) {
67 return 3.0f / 4.0f; 67 return 3.0f / 4.0f;
68 case AspectRatio::R21_9: 68 case AspectRatio::R21_9:
69 return 9.0f / 21.0f; 69 return 9.0f / 21.0f;
70 case AspectRatio::R16_10:
71 return 10.0f / 16.0f;
70 case AspectRatio::StretchToWindow: 72 case AspectRatio::StretchToWindow:
71 return window_aspect_ratio; 73 return window_aspect_ratio;
72 default: 74 default:
diff --git a/src/core/frontend/framebuffer_layout.h b/src/core/frontend/framebuffer_layout.h
index 1561d994e..94683b30f 100644
--- a/src/core/frontend/framebuffer_layout.h
+++ b/src/core/frontend/framebuffer_layout.h
@@ -27,6 +27,7 @@ enum class AspectRatio {
27 Default, 27 Default,
28 R4_3, 28 R4_3,
29 R21_9, 29 R21_9,
30 R16_10,
30 StretchToWindow, 31 StretchToWindow,
31}; 32};
32 33
diff --git a/src/core/hid/irs_types.h b/src/core/hid/irs_types.h
index 88c5b016d..0d1bfe53f 100644
--- a/src/core/hid/irs_types.h
+++ b/src/core/hid/irs_types.h
@@ -14,7 +14,7 @@ enum class CameraAmbientNoiseLevel : u32 {
14 Low, 14 Low,
15 Medium, 15 Medium,
16 High, 16 High,
17 Unkown3, // This level can't be reached 17 Unknown3, // This level can't be reached
18}; 18};
19 19
20// This is nn::irsensor::CameraLightTarget 20// This is nn::irsensor::CameraLightTarget
@@ -75,9 +75,9 @@ enum class IrCameraStatus : u32 {
75enum class IrCameraInternalStatus : u32 { 75enum class IrCameraInternalStatus : u32 {
76 Stopped, 76 Stopped,
77 FirmwareUpdateNeeded, 77 FirmwareUpdateNeeded,
78 Unkown2, 78 Unknown2,
79 Unkown3, 79 Unknown3,
80 Unkown4, 80 Unknown4,
81 FirmwareVersionRequested, 81 FirmwareVersionRequested,
82 FirmwareVersionIsInvalid, 82 FirmwareVersionIsInvalid,
83 Ready, 83 Ready,
@@ -121,20 +121,20 @@ enum class IrSensorFunctionLevel : u8 {
121 121
122// This is nn::irsensor::MomentProcessorPreprocess 122// This is nn::irsensor::MomentProcessorPreprocess
123enum class MomentProcessorPreprocess : u32 { 123enum class MomentProcessorPreprocess : u32 {
124 Unkown0, 124 Unknown0,
125 Unkown1, 125 Unknown1,
126}; 126};
127 127
128// This is nn::irsensor::PackedMomentProcessorPreprocess 128// This is nn::irsensor::PackedMomentProcessorPreprocess
129enum class PackedMomentProcessorPreprocess : u8 { 129enum class PackedMomentProcessorPreprocess : u8 {
130 Unkown0, 130 Unknown0,
131 Unkown1, 131 Unknown1,
132}; 132};
133 133
134// This is nn::irsensor::PointingStatus 134// This is nn::irsensor::PointingStatus
135enum class PointingStatus : u32 { 135enum class PointingStatus : u32 {
136 Unkown0, 136 Unknown0,
137 Unkown1, 137 Unknown1,
138}; 138};
139 139
140struct IrsRect { 140struct IrsRect {
diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h
index d631c0357..aa27be767 100644
--- a/src/core/hle/ipc_helpers.h
+++ b/src/core/hle/ipc_helpers.h
@@ -86,13 +86,13 @@ public:
86 u32 num_domain_objects{}; 86 u32 num_domain_objects{};
87 const bool always_move_handles{ 87 const bool always_move_handles{
88 (static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0}; 88 (static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0};
89 if (!ctx.Session()->IsDomain() || always_move_handles) { 89 if (!ctx.Session()->GetSessionRequestManager()->IsDomain() || always_move_handles) {
90 num_handles_to_move = num_objects_to_move; 90 num_handles_to_move = num_objects_to_move;
91 } else { 91 } else {
92 num_domain_objects = num_objects_to_move; 92 num_domain_objects = num_objects_to_move;
93 } 93 }
94 94
95 if (ctx.Session()->IsDomain()) { 95 if (ctx.Session()->GetSessionRequestManager()->IsDomain()) {
96 raw_data_size += 96 raw_data_size +=
97 static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects); 97 static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects);
98 ctx.write_size += num_domain_objects; 98 ctx.write_size += num_domain_objects;
@@ -125,7 +125,8 @@ public:
125 if (!ctx.IsTipc()) { 125 if (!ctx.IsTipc()) {
126 AlignWithPadding(); 126 AlignWithPadding();
127 127
128 if (ctx.Session()->IsDomain() && ctx.HasDomainMessageHeader()) { 128 if (ctx.Session()->GetSessionRequestManager()->IsDomain() &&
129 ctx.HasDomainMessageHeader()) {
129 IPC::DomainMessageHeader domain_header{}; 130 IPC::DomainMessageHeader domain_header{};
130 domain_header.num_objects = num_domain_objects; 131 domain_header.num_objects = num_domain_objects;
131 PushRaw(domain_header); 132 PushRaw(domain_header);
@@ -145,14 +146,15 @@ public:
145 146
146 template <class T> 147 template <class T>
147 void PushIpcInterface(std::shared_ptr<T> iface) { 148 void PushIpcInterface(std::shared_ptr<T> iface) {
148 if (context->Session()->IsDomain()) { 149 if (context->Session()->GetSessionRequestManager()->IsDomain()) {
149 context->AddDomainObject(std::move(iface)); 150 context->AddDomainObject(std::move(iface));
150 } else { 151 } else {
151 kernel.CurrentProcess()->GetResourceLimit()->Reserve( 152 kernel.CurrentProcess()->GetResourceLimit()->Reserve(
152 Kernel::LimitableResource::Sessions, 1); 153 Kernel::LimitableResource::Sessions, 1);
153 154
154 auto* session = Kernel::KSession::Create(kernel); 155 auto* session = Kernel::KSession::Create(kernel);
155 session->Initialize(nullptr, iface->GetServiceName()); 156 session->Initialize(nullptr, iface->GetServiceName(),
157 std::make_shared<Kernel::SessionRequestManager>(kernel));
156 158
157 context->AddMoveObject(&session->GetClientSession()); 159 context->AddMoveObject(&session->GetClientSession());
158 iface->ClientConnected(&session->GetServerSession()); 160 iface->ClientConnected(&session->GetServerSession());
@@ -385,7 +387,7 @@ public:
385 387
386 template <class T> 388 template <class T>
387 std::weak_ptr<T> PopIpcInterface() { 389 std::weak_ptr<T> PopIpcInterface() {
388 ASSERT(context->Session()->IsDomain()); 390 ASSERT(context->Session()->GetSessionRequestManager()->IsDomain());
389 ASSERT(context->GetDomainMessageHeader().input_object_count > 0); 391 ASSERT(context->GetDomainMessageHeader().input_object_count > 0);
390 return context->GetDomainHandler<T>(Pop<u32>() - 1); 392 return context->GetDomainHandler<T>(Pop<u32>() - 1);
391 } 393 }
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 5b3feec66..e4f43a053 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -19,6 +19,7 @@
19#include "core/hle/kernel/k_server_session.h" 19#include "core/hle/kernel/k_server_session.h"
20#include "core/hle/kernel/k_thread.h" 20#include "core/hle/kernel/k_thread.h"
21#include "core/hle/kernel/kernel.h" 21#include "core/hle/kernel/kernel.h"
22#include "core/hle/kernel/service_thread.h"
22#include "core/memory.h" 23#include "core/memory.h"
23 24
24namespace Kernel { 25namespace Kernel {
@@ -56,16 +57,103 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
56 } 57 }
57} 58}
58 59
60Result SessionRequestManager::CompleteSyncRequest(KServerSession* server_session,
61 HLERequestContext& context) {
62 Result result = ResultSuccess;
63
64 // If the session has been converted to a domain, handle the domain request
65 if (this->HasSessionRequestHandler(context)) {
66 if (IsDomain() && context.HasDomainMessageHeader()) {
67 result = HandleDomainSyncRequest(server_session, context);
68 // If there is no domain header, the regular session handler is used
69 } else if (this->HasSessionHandler()) {
70 // If this manager has an associated HLE handler, forward the request to it.
71 result = this->SessionHandler().HandleSyncRequest(*server_session, context);
72 }
73 } else {
74 ASSERT_MSG(false, "Session handler is invalid, stubbing response!");
75 IPC::ResponseBuilder rb(context, 2);
76 rb.Push(ResultSuccess);
77 }
78
79 if (convert_to_domain) {
80 ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance.");
81 this->ConvertToDomain();
82 convert_to_domain = false;
83 }
84
85 return result;
86}
87
88Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_session,
89 HLERequestContext& context) {
90 if (!context.HasDomainMessageHeader()) {
91 return ResultSuccess;
92 }
93
94 // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
95 context.SetSessionRequestManager(server_session->GetSessionRequestManager());
96
97 // If there is a DomainMessageHeader, then this is CommandType "Request"
98 const auto& domain_message_header = context.GetDomainMessageHeader();
99 const u32 object_id{domain_message_header.object_id};
100 switch (domain_message_header.command) {
101 case IPC::DomainMessageHeader::CommandType::SendMessage:
102 if (object_id > this->DomainHandlerCount()) {
103 LOG_CRITICAL(IPC,
104 "object_id {} is too big! This probably means a recent service call "
105 "needed to return a new interface!",
106 object_id);
107 ASSERT(false);
108 return ResultSuccess; // Ignore error if asserts are off
109 }
110 if (auto strong_ptr = this->DomainHandler(object_id - 1).lock()) {
111 return strong_ptr->HandleSyncRequest(*server_session, context);
112 } else {
113 ASSERT(false);
114 return ResultSuccess;
115 }
116
117 case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
118 LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
119
120 this->CloseDomainHandler(object_id - 1);
121
122 IPC::ResponseBuilder rb{context, 2};
123 rb.Push(ResultSuccess);
124 return ResultSuccess;
125 }
126 }
127
128 LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
129 ASSERT(false);
130 return ResultSuccess;
131}
132
133Result SessionRequestManager::QueueSyncRequest(KSession* parent,
134 std::shared_ptr<HLERequestContext>&& context) {
135 // Ensure we have a session request handler
136 if (this->HasSessionRequestHandler(*context)) {
137 if (auto strong_ptr = this->GetServiceThread().lock()) {
138 strong_ptr->QueueSyncRequest(*parent, std::move(context));
139 } else {
140 ASSERT_MSG(false, "strong_ptr is nullptr!");
141 }
142 } else {
143 ASSERT_MSG(false, "handler is invalid!");
144 }
145
146 return ResultSuccess;
147}
148
59void SessionRequestHandler::ClientConnected(KServerSession* session) { 149void SessionRequestHandler::ClientConnected(KServerSession* session) {
60 session->ClientConnected(shared_from_this()); 150 session->GetSessionRequestManager()->SetSessionHandler(shared_from_this());
61 151
62 // Ensure our server session is tracked globally. 152 // Ensure our server session is tracked globally.
63 kernel.RegisterServerObject(session); 153 kernel.RegisterServerObject(session);
64} 154}
65 155
66void SessionRequestHandler::ClientDisconnected(KServerSession* session) { 156void SessionRequestHandler::ClientDisconnected(KServerSession* session) {}
67 session->ClientDisconnected();
68}
69 157
70HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_, 158HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_,
71 KServerSession* server_session_, KThread* thread_) 159 KServerSession* server_session_, KThread* thread_)
@@ -126,7 +214,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
126 // Padding to align to 16 bytes 214 // Padding to align to 16 bytes
127 rp.AlignWithPadding(); 215 rp.AlignWithPadding();
128 216
129 if (Session()->IsDomain() && 217 if (Session()->GetSessionRequestManager()->IsDomain() &&
130 ((command_header->type == IPC::CommandType::Request || 218 ((command_header->type == IPC::CommandType::Request ||
131 command_header->type == IPC::CommandType::RequestWithContext) || 219 command_header->type == IPC::CommandType::RequestWithContext) ||
132 !incoming)) { 220 !incoming)) {
@@ -135,7 +223,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
135 if (incoming || domain_message_header) { 223 if (incoming || domain_message_header) {
136 domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>(); 224 domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>();
137 } else { 225 } else {
138 if (Session()->IsDomain()) { 226 if (Session()->GetSessionRequestManager()->IsDomain()) {
139 LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!"); 227 LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
140 } 228 }
141 } 229 }
@@ -228,12 +316,12 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_threa
228 // Write the domain objects to the command buffer, these go after the raw untranslated data. 316 // Write the domain objects to the command buffer, these go after the raw untranslated data.
229 // TODO(Subv): This completely ignores C buffers. 317 // TODO(Subv): This completely ignores C buffers.
230 318
231 if (Session()->IsDomain()) { 319 if (server_session->GetSessionRequestManager()->IsDomain()) {
232 current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size()); 320 current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size());
233 for (const auto& object : outgoing_domain_objects) { 321 for (auto& object : outgoing_domain_objects) {
234 server_session->AppendDomainHandler(object); 322 server_session->GetSessionRequestManager()->AppendDomainHandler(std::move(object));
235 cmd_buf[current_offset++] = 323 cmd_buf[current_offset++] = static_cast<u32_le>(
236 static_cast<u32_le>(server_session->NumDomainRequestHandlers()); 324 server_session->GetSessionRequestManager()->DomainHandlerCount());
237 } 325 }
238 } 326 }
239 327
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index 99265ce90..a0522bca0 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -43,13 +43,13 @@ class Domain;
43class HLERequestContext; 43class HLERequestContext;
44class KAutoObject; 44class KAutoObject;
45class KernelCore; 45class KernelCore;
46class KEvent;
46class KHandleTable; 47class KHandleTable;
47class KProcess; 48class KProcess;
48class KServerSession; 49class KServerSession;
49class KThread; 50class KThread;
50class KReadableEvent; 51class KReadableEvent;
51class KSession; 52class KSession;
52class KWritableEvent;
53class ServiceThread; 53class ServiceThread;
54 54
55enum class ThreadWakeupReason; 55enum class ThreadWakeupReason;
@@ -121,6 +121,10 @@ public:
121 is_domain = true; 121 is_domain = true;
122 } 122 }
123 123
124 void ConvertToDomainOnRequestEnd() {
125 convert_to_domain = true;
126 }
127
124 std::size_t DomainHandlerCount() const { 128 std::size_t DomainHandlerCount() const {
125 return domain_handlers.size(); 129 return domain_handlers.size();
126 } 130 }
@@ -164,7 +168,12 @@ public:
164 168
165 bool HasSessionRequestHandler(const HLERequestContext& context) const; 169 bool HasSessionRequestHandler(const HLERequestContext& context) const;
166 170
171 Result HandleDomainSyncRequest(KServerSession* server_session, HLERequestContext& context);
172 Result CompleteSyncRequest(KServerSession* server_session, HLERequestContext& context);
173 Result QueueSyncRequest(KSession* parent, std::shared_ptr<HLERequestContext>&& context);
174
167private: 175private:
176 bool convert_to_domain{};
168 bool is_domain{}; 177 bool is_domain{};
169 SessionRequestHandlerPtr session_handler; 178 SessionRequestHandlerPtr session_handler;
170 std::vector<SessionRequestHandlerPtr> domain_handlers; 179 std::vector<SessionRequestHandlerPtr> domain_handlers;
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index 9b6b284d0..477e4e407 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -18,6 +18,7 @@
18#include "core/hle/kernel/k_process.h" 18#include "core/hle/kernel/k_process.h"
19#include "core/hle/kernel/k_resource_limit.h" 19#include "core/hle/kernel/k_resource_limit.h"
20#include "core/hle/kernel/k_session.h" 20#include "core/hle/kernel/k_session.h"
21#include "core/hle/kernel/k_session_request.h"
21#include "core/hle/kernel/k_shared_memory.h" 22#include "core/hle/kernel/k_shared_memory.h"
22#include "core/hle/kernel/k_shared_memory_info.h" 23#include "core/hle/kernel/k_shared_memory_info.h"
23#include "core/hle/kernel/k_system_control.h" 24#include "core/hle/kernel/k_system_control.h"
@@ -34,6 +35,7 @@ namespace Kernel::Init {
34 HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \ 35 HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \
35 HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \ 36 HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \
36 HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \ 37 HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
38 HANDLER(KSessionRequest, (SLAB_COUNT(KSession) * 2), ##__VA_ARGS__) \
37 HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \ 39 HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
38 HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \ 40 HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \
39 HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \ 41 HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
@@ -94,8 +96,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
94 // TODO(bunnei): Fix this once we support the kernel virtual memory layout. 96 // TODO(bunnei): Fix this once we support the kernel virtual memory layout.
95 97
96 if (size > 0) { 98 if (size > 0) {
97 void* backing_kernel_memory{ 99 void* backing_kernel_memory{system.DeviceMemory().GetPointer<void>(
98 system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))}; 100 TranslateSlabAddrToPhysical(memory_layout, start))};
99 101
100 const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); 102 const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
101 ASSERT(region != nullptr); 103 ASSERT(region != nullptr);
@@ -181,7 +183,7 @@ void InitializeKPageBufferSlabHeap(Core::System& system) {
181 ASSERT(slab_address != 0); 183 ASSERT(slab_address != 0);
182 184
183 // Initialize the slabheap. 185 // Initialize the slabheap.
184 KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address), 186 KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address),
185 slab_size); 187 slab_size);
186} 188}
187 189
diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp
index cc2a0f7ca..10265c23c 100644
--- a/src/core/hle/kernel/k_class_token.cpp
+++ b/src/core/hle/kernel/k_class_token.cpp
@@ -18,7 +18,6 @@
18#include "core/hle/kernel/k_synchronization_object.h" 18#include "core/hle/kernel/k_synchronization_object.h"
19#include "core/hle/kernel/k_thread.h" 19#include "core/hle/kernel/k_thread.h"
20#include "core/hle/kernel/k_transfer_memory.h" 20#include "core/hle/kernel/k_transfer_memory.h"
21#include "core/hle/kernel/k_writable_event.h"
22 21
23namespace Kernel { 22namespace Kernel {
24 23
@@ -42,13 +41,12 @@ static_assert(ClassToken<KPort> == 0b10000101'00000000);
42static_assert(ClassToken<KSession> == 0b00011001'00000000); 41static_assert(ClassToken<KSession> == 0b00011001'00000000);
43static_assert(ClassToken<KSharedMemory> == 0b00101001'00000000); 42static_assert(ClassToken<KSharedMemory> == 0b00101001'00000000);
44static_assert(ClassToken<KEvent> == 0b01001001'00000000); 43static_assert(ClassToken<KEvent> == 0b01001001'00000000);
45static_assert(ClassToken<KWritableEvent> == 0b10001001'00000000);
46// static_assert(ClassToken<KLightClientSession> == 0b00110001'00000000); 44// static_assert(ClassToken<KLightClientSession> == 0b00110001'00000000);
47// static_assert(ClassToken<KLightServerSession> == 0b01010001'00000000); 45// static_assert(ClassToken<KLightServerSession> == 0b01010001'00000000);
48static_assert(ClassToken<KTransferMemory> == 0b10010001'00000000); 46static_assert(ClassToken<KTransferMemory> == 0b01010001'00000000);
49// static_assert(ClassToken<KDeviceAddressSpace> == 0b01100001'00000000); 47// static_assert(ClassToken<KDeviceAddressSpace> == 0b01100001'00000000);
50// static_assert(ClassToken<KSessionRequest> == 0b10100001'00000000); 48// static_assert(ClassToken<KSessionRequest> == 0b10100001'00000000);
51static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000); 49static_assert(ClassToken<KCodeMemory> == 0b10100001'00000000);
52 50
53// Ensure that the token hierarchy is correct. 51// Ensure that the token hierarchy is correct.
54 52
@@ -73,13 +71,12 @@ static_assert(ClassToken<KPort> == ((0b10000101 << 8) | ClassToken<KAutoObject>)
73static_assert(ClassToken<KSession> == ((0b00011001 << 8) | ClassToken<KAutoObject>)); 71static_assert(ClassToken<KSession> == ((0b00011001 << 8) | ClassToken<KAutoObject>));
74static_assert(ClassToken<KSharedMemory> == ((0b00101001 << 8) | ClassToken<KAutoObject>)); 72static_assert(ClassToken<KSharedMemory> == ((0b00101001 << 8) | ClassToken<KAutoObject>));
75static_assert(ClassToken<KEvent> == ((0b01001001 << 8) | ClassToken<KAutoObject>)); 73static_assert(ClassToken<KEvent> == ((0b01001001 << 8) | ClassToken<KAutoObject>));
76static_assert(ClassToken<KWritableEvent> == ((0b10001001 << 8) | ClassToken<KAutoObject>));
77// static_assert(ClassToken<KLightClientSession> == ((0b00110001 << 8) | ClassToken<KAutoObject>)); 74// static_assert(ClassToken<KLightClientSession> == ((0b00110001 << 8) | ClassToken<KAutoObject>));
78// static_assert(ClassToken<KLightServerSession> == ((0b01010001 << 8) | ClassToken<KAutoObject>)); 75// static_assert(ClassToken<KLightServerSession> == ((0b01010001 << 8) | ClassToken<KAutoObject>));
79static_assert(ClassToken<KTransferMemory> == ((0b10010001 << 8) | ClassToken<KAutoObject>)); 76static_assert(ClassToken<KTransferMemory> == ((0b01010001 << 8) | ClassToken<KAutoObject>));
80// static_assert(ClassToken<KDeviceAddressSpace> == ((0b01100001 << 8) | ClassToken<KAutoObject>)); 77// static_assert(ClassToken<KDeviceAddressSpace> == ((0b01100001 << 8) | ClassToken<KAutoObject>));
81// static_assert(ClassToken<KSessionRequest> == ((0b10100001 << 8) | ClassToken<KAutoObject>)); 78// static_assert(ClassToken<KSessionRequest> == ((0b10100001 << 8) | ClassToken<KAutoObject>));
82static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>)); 79static_assert(ClassToken<KCodeMemory> == ((0b10100001 << 8) | ClassToken<KAutoObject>));
83 80
84// Ensure that the token hierarchy reflects the class hierarchy. 81// Ensure that the token hierarchy reflects the class hierarchy.
85 82
@@ -110,7 +107,6 @@ static_assert(std::is_final_v<KPort> && std::is_base_of_v<KAutoObject, KPort>);
110static_assert(std::is_final_v<KSession> && std::is_base_of_v<KAutoObject, KSession>); 107static_assert(std::is_final_v<KSession> && std::is_base_of_v<KAutoObject, KSession>);
111static_assert(std::is_final_v<KSharedMemory> && std::is_base_of_v<KAutoObject, KSharedMemory>); 108static_assert(std::is_final_v<KSharedMemory> && std::is_base_of_v<KAutoObject, KSharedMemory>);
112static_assert(std::is_final_v<KEvent> && std::is_base_of_v<KAutoObject, KEvent>); 109static_assert(std::is_final_v<KEvent> && std::is_base_of_v<KAutoObject, KEvent>);
113static_assert(std::is_final_v<KWritableEvent> && std::is_base_of_v<KAutoObject, KWritableEvent>);
114// static_assert(std::is_final_v<KLightClientSession> && 110// static_assert(std::is_final_v<KLightClientSession> &&
115// std::is_base_of_v<KAutoObject, KLightClientSession>); 111// std::is_base_of_v<KAutoObject, KLightClientSession>);
116// static_assert(std::is_final_v<KLightServerSession> && 112// static_assert(std::is_final_v<KLightServerSession> &&
diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h
index c9001ae3d..ab20e00ff 100644
--- a/src/core/hle/kernel/k_class_token.h
+++ b/src/core/hle/kernel/k_class_token.h
@@ -101,7 +101,6 @@ public:
101 KSession, 101 KSession,
102 KSharedMemory, 102 KSharedMemory,
103 KEvent, 103 KEvent,
104 KWritableEvent,
105 KLightClientSession, 104 KLightClientSession,
106 KLightServerSession, 105 KLightServerSession,
107 KTransferMemory, 106 KTransferMemory,
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
index b2a887b14..b4197a8d5 100644
--- a/src/core/hle/kernel/k_client_session.cpp
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -1,6 +1,7 @@
1// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include "common/scope_exit.h"
4#include "core/hle/kernel/hle_ipc.h" 5#include "core/hle/kernel/hle_ipc.h"
5#include "core/hle/kernel/k_client_session.h" 6#include "core/hle/kernel/k_client_session.h"
6#include "core/hle/kernel/k_server_session.h" 7#include "core/hle/kernel/k_server_session.h"
@@ -10,6 +11,8 @@
10 11
11namespace Kernel { 12namespace Kernel {
12 13
14static constexpr u32 MessageBufferSize = 0x100;
15
13KClientSession::KClientSession(KernelCore& kernel_) 16KClientSession::KClientSession(KernelCore& kernel_)
14 : KAutoObjectWithSlabHeapAndContainer{kernel_} {} 17 : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
15KClientSession::~KClientSession() = default; 18KClientSession::~KClientSession() = default;
@@ -21,10 +24,17 @@ void KClientSession::Destroy() {
21 24
22void KClientSession::OnServerClosed() {} 25void KClientSession::OnServerClosed() {}
23 26
24Result KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory, 27Result KClientSession::SendSyncRequest() {
25 Core::Timing::CoreTiming& core_timing) { 28 // Create a session request.
26 // Signal the server session that new data is available 29 KSessionRequest* request = KSessionRequest::Create(kernel);
27 return parent->GetServerSession().HandleSyncRequest(thread, memory, core_timing); 30 R_UNLESS(request != nullptr, ResultOutOfResource);
31 SCOPE_EXIT({ request->Close(); });
32
33 // Initialize the request.
34 request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize);
35
36 // Send the request.
37 return parent->GetServerSession().OnRequest(request);
28} 38}
29 39
30} // namespace Kernel 40} // namespace Kernel
diff --git a/src/core/hle/kernel/k_client_session.h b/src/core/hle/kernel/k_client_session.h
index 0c750d756..b4a19c546 100644
--- a/src/core/hle/kernel/k_client_session.h
+++ b/src/core/hle/kernel/k_client_session.h
@@ -46,8 +46,7 @@ public:
46 return parent; 46 return parent;
47 } 47 }
48 48
49 Result SendSyncRequest(KThread* thread, Core::Memory::Memory& memory, 49 Result SendSyncRequest();
50 Core::Timing::CoreTiming& core_timing);
51 50
52 void OnServerClosed(); 51 void OnServerClosed();
53 52
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index da57ceb21..4b1c134d4 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
34 34
35 // Clear the memory. 35 // Clear the memory.
36 for (const auto& block : m_page_group.Nodes()) { 36 for (const auto& block : m_page_group.Nodes()) {
37 std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize()); 37 std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
38 } 38 }
39 39
40 // Set remaining tracking members. 40 // Set remaining tracking members.
diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h
new file mode 100644
index 000000000..9076c8fa3
--- /dev/null
+++ b/src/core/hle/kernel/k_dynamic_page_manager.h
@@ -0,0 +1,136 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/alignment.h"
7#include "common/common_types.h"
8#include "core/hle/kernel/k_page_bitmap.h"
9#include "core/hle/kernel/k_spin_lock.h"
10#include "core/hle/kernel/memory_types.h"
11#include "core/hle/kernel/svc_results.h"
12
13namespace Kernel {
14
15class KDynamicPageManager {
16public:
17 class PageBuffer {
18 private:
19 u8 m_buffer[PageSize];
20 };
21 static_assert(sizeof(PageBuffer) == PageSize);
22
23public:
24 KDynamicPageManager() = default;
25
26 template <typename T>
27 T* GetPointer(VAddr addr) {
28 return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
29 }
30
31 template <typename T>
32 const T* GetPointer(VAddr addr) const {
33 return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
34 }
35
36 Result Initialize(VAddr addr, size_t sz) {
37 // We need to have positive size.
38 R_UNLESS(sz > 0, ResultOutOfMemory);
39 m_backing_memory.resize(sz);
40
41 // Calculate management overhead.
42 const size_t management_size =
43 KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer));
44 const size_t allocatable_size = sz - management_size;
45
46 // Set tracking fields.
47 m_address = addr;
48 m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer));
49 m_count = allocatable_size / sizeof(PageBuffer);
50 R_UNLESS(m_count > 0, ResultOutOfMemory);
51
52 // Clear the management region.
53 u64* management_ptr = GetPointer<u64>(m_address + allocatable_size);
54 std::memset(management_ptr, 0, management_size);
55
56 // Initialize the bitmap.
57 m_page_bitmap.Initialize(management_ptr, m_count);
58
59 // Free the pages to the bitmap.
60 for (size_t i = 0; i < m_count; i++) {
61 // Ensure the freed page is all-zero.
62 std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize);
63
64 // Set the bit for the free page.
65 m_page_bitmap.SetBit(i);
66 }
67
68 R_SUCCEED();
69 }
70
71 VAddr GetAddress() const {
72 return m_address;
73 }
74 size_t GetSize() const {
75 return m_size;
76 }
77 size_t GetUsed() const {
78 return m_used;
79 }
80 size_t GetPeak() const {
81 return m_peak;
82 }
83 size_t GetCount() const {
84 return m_count;
85 }
86
87 PageBuffer* Allocate() {
88 // Take the lock.
89 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
90 KScopedSpinLock lk(m_lock);
91
92 // Find a random free block.
93 s64 soffset = m_page_bitmap.FindFreeBlock(true);
94 if (soffset < 0) [[unlikely]] {
95 return nullptr;
96 }
97
98 const size_t offset = static_cast<size_t>(soffset);
99
100 // Update our tracking.
101 m_page_bitmap.ClearBit(offset);
102 m_peak = std::max(m_peak, (++m_used));
103
104 return GetPointer<PageBuffer>(m_address) + offset;
105 }
106
107 void Free(PageBuffer* pb) {
108 // Ensure all pages in the heap are zero.
109 std::memset(pb, 0, PageSize);
110
111 // Take the lock.
112 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
113 KScopedSpinLock lk(m_lock);
114
115 // Set the bit for the free page.
116 size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer);
117 m_page_bitmap.SetBit(offset);
118
119 // Decrement our used count.
120 --m_used;
121 }
122
123private:
124 KSpinLock m_lock;
125 KPageBitmap m_page_bitmap;
126 size_t m_used{};
127 size_t m_peak{};
128 size_t m_count{};
129 VAddr m_address{};
130 size_t m_size{};
131
132 // TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
133 std::vector<u8> m_backing_memory;
134};
135
136} // namespace Kernel
diff --git a/src/core/hle/kernel/k_dynamic_resource_manager.h b/src/core/hle/kernel/k_dynamic_resource_manager.h
new file mode 100644
index 000000000..1ce517e8e
--- /dev/null
+++ b/src/core/hle/kernel/k_dynamic_resource_manager.h
@@ -0,0 +1,58 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/common_funcs.h"
7#include "core/hle/kernel/k_dynamic_slab_heap.h"
8#include "core/hle/kernel/k_memory_block.h"
9
10namespace Kernel {
11
12template <typename T, bool ClearNode = false>
13class KDynamicResourceManager {
14 YUZU_NON_COPYABLE(KDynamicResourceManager);
15 YUZU_NON_MOVEABLE(KDynamicResourceManager);
16
17public:
18 using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>;
19
20public:
21 constexpr KDynamicResourceManager() = default;
22
23 constexpr size_t GetSize() const {
24 return m_slab_heap->GetSize();
25 }
26 constexpr size_t GetUsed() const {
27 return m_slab_heap->GetUsed();
28 }
29 constexpr size_t GetPeak() const {
30 return m_slab_heap->GetPeak();
31 }
32 constexpr size_t GetCount() const {
33 return m_slab_heap->GetCount();
34 }
35
36 void Initialize(KDynamicPageManager* page_allocator, DynamicSlabType* slab_heap) {
37 m_page_allocator = page_allocator;
38 m_slab_heap = slab_heap;
39 }
40
41 T* Allocate() const {
42 return m_slab_heap->Allocate(m_page_allocator);
43 }
44
45 void Free(T* t) const {
46 m_slab_heap->Free(t);
47 }
48
49private:
50 KDynamicPageManager* m_page_allocator{};
51 DynamicSlabType* m_slab_heap{};
52};
53
54class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {};
55
56using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType;
57
58} // namespace Kernel
diff --git a/src/core/hle/kernel/k_dynamic_slab_heap.h b/src/core/hle/kernel/k_dynamic_slab_heap.h
new file mode 100644
index 000000000..3a0ddd050
--- /dev/null
+++ b/src/core/hle/kernel/k_dynamic_slab_heap.h
@@ -0,0 +1,122 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <atomic>
7
8#include "common/common_funcs.h"
9#include "core/hle/kernel/k_dynamic_page_manager.h"
10#include "core/hle/kernel/k_slab_heap.h"
11
12namespace Kernel {
13
14template <typename T, bool ClearNode = false>
15class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
16 YUZU_NON_COPYABLE(KDynamicSlabHeap);
17 YUZU_NON_MOVEABLE(KDynamicSlabHeap);
18
19public:
20 constexpr KDynamicSlabHeap() = default;
21
22 constexpr VAddr GetAddress() const {
23 return m_address;
24 }
25 constexpr size_t GetSize() const {
26 return m_size;
27 }
28 constexpr size_t GetUsed() const {
29 return m_used.load();
30 }
31 constexpr size_t GetPeak() const {
32 return m_peak.load();
33 }
34 constexpr size_t GetCount() const {
35 return m_count.load();
36 }
37
38 constexpr bool IsInRange(VAddr addr) const {
39 return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
40 }
41
42 void Initialize(KDynamicPageManager* page_allocator, size_t num_objects) {
43 ASSERT(page_allocator != nullptr);
44
45 // Initialize members.
46 m_address = page_allocator->GetAddress();
47 m_size = page_allocator->GetSize();
48
49 // Initialize the base allocator.
50 KSlabHeapImpl::Initialize();
51
52 // Allocate until we have the correct number of objects.
53 while (m_count.load() < num_objects) {
54 auto* allocated = reinterpret_cast<T*>(page_allocator->Allocate());
55 ASSERT(allocated != nullptr);
56
57 for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
58 KSlabHeapImpl::Free(allocated + i);
59 }
60
61 m_count += sizeof(PageBuffer) / sizeof(T);
62 }
63 }
64
65 T* Allocate(KDynamicPageManager* page_allocator) {
66 T* allocated = static_cast<T*>(KSlabHeapImpl::Allocate());
67
68 // If we successfully allocated and we should clear the node, do so.
69 if constexpr (ClearNode) {
70 if (allocated != nullptr) [[likely]] {
71 reinterpret_cast<KSlabHeapImpl::Node*>(allocated)->next = nullptr;
72 }
73 }
74
75 // If we fail to allocate, try to get a new page from our next allocator.
76 if (allocated == nullptr) [[unlikely]] {
77 if (page_allocator != nullptr) {
78 allocated = reinterpret_cast<T*>(page_allocator->Allocate());
79 if (allocated != nullptr) {
80 // If we succeeded in getting a page, free the rest to our slab.
81 for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
82 KSlabHeapImpl::Free(allocated + i);
83 }
84 m_count += sizeof(PageBuffer) / sizeof(T);
85 }
86 }
87 }
88
89 if (allocated != nullptr) [[likely]] {
90 // Construct the object.
91 std::construct_at(allocated);
92
93 // Update our tracking.
94 const size_t used = ++m_used;
95 size_t peak = m_peak.load();
96 while (peak < used) {
97 if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
98 break;
99 }
100 }
101 }
102
103 return allocated;
104 }
105
106 void Free(T* t) {
107 KSlabHeapImpl::Free(t);
108 --m_used;
109 }
110
111private:
112 using PageBuffer = KDynamicPageManager::PageBuffer;
113
114private:
115 std::atomic<size_t> m_used{};
116 std::atomic<size_t> m_peak{};
117 std::atomic<size_t> m_count{};
118 VAddr m_address{};
119 size_t m_size{};
120};
121
122} // namespace Kernel
diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp
index e52fafbc7..78ca59463 100644
--- a/src/core/hle/kernel/k_event.cpp
+++ b/src/core/hle/kernel/k_event.cpp
@@ -8,39 +8,45 @@
8namespace Kernel { 8namespace Kernel {
9 9
10KEvent::KEvent(KernelCore& kernel_) 10KEvent::KEvent(KernelCore& kernel_)
11 : KAutoObjectWithSlabHeapAndContainer{kernel_}, readable_event{kernel_}, writable_event{ 11 : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_readable_event{kernel_} {}
12 kernel_} {}
13 12
14KEvent::~KEvent() = default; 13KEvent::~KEvent() = default;
15 14
16void KEvent::Initialize(std::string&& name_, KProcess* owner_) { 15void KEvent::Initialize(KProcess* owner) {
17 // Increment reference count. 16 // Create our readable event.
18 // Because reference count is one on creation, this will result 17 KAutoObject::Create(std::addressof(m_readable_event));
19 // in a reference count of two. Thus, when both readable and
20 // writable events are closed this object will be destroyed.
21 Open();
22 18
23 // Create our sub events. 19 // Initialize our readable event.
24 KAutoObject::Create(std::addressof(readable_event)); 20 m_readable_event.Initialize(this);
25 KAutoObject::Create(std::addressof(writable_event));
26
27 // Initialize our sub sessions.
28 readable_event.Initialize(this, name_ + ":Readable");
29 writable_event.Initialize(this, name_ + ":Writable");
30 21
31 // Set our owner process. 22 // Set our owner process.
32 owner = owner_; 23 m_owner = owner;
33 owner->Open(); 24 m_owner->Open();
34 25
35 // Mark initialized. 26 // Mark initialized.
36 name = std::move(name_); 27 m_initialized = true;
37 initialized = true;
38} 28}
39 29
40void KEvent::Finalize() { 30void KEvent::Finalize() {
41 KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList>::Finalize(); 31 KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList>::Finalize();
42} 32}
43 33
34Result KEvent::Signal() {
35 KScopedSchedulerLock sl{kernel};
36
37 R_SUCCEED_IF(m_readable_event_destroyed);
38
39 return m_readable_event.Signal();
40}
41
42Result KEvent::Clear() {
43 KScopedSchedulerLock sl{kernel};
44
45 R_SUCCEED_IF(m_readable_event_destroyed);
46
47 return m_readable_event.Clear();
48}
49
44void KEvent::PostDestroy(uintptr_t arg) { 50void KEvent::PostDestroy(uintptr_t arg) {
45 // Release the event count resource the owner process holds. 51 // Release the event count resource the owner process holds.
46 KProcess* owner = reinterpret_cast<KProcess*>(arg); 52 KProcess* owner = reinterpret_cast<KProcess*>(arg);
diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h
index 2ff828feb..48ce7d9a0 100644
--- a/src/core/hle/kernel/k_event.h
+++ b/src/core/hle/kernel/k_event.h
@@ -4,14 +4,12 @@
4#pragma once 4#pragma once
5 5
6#include "core/hle/kernel/k_readable_event.h" 6#include "core/hle/kernel/k_readable_event.h"
7#include "core/hle/kernel/k_writable_event.h"
8#include "core/hle/kernel/slab_helpers.h" 7#include "core/hle/kernel/slab_helpers.h"
9 8
10namespace Kernel { 9namespace Kernel {
11 10
12class KernelCore; 11class KernelCore;
13class KReadableEvent; 12class KReadableEvent;
14class KWritableEvent;
15class KProcess; 13class KProcess;
16 14
17class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList> { 15class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList> {
@@ -21,37 +19,40 @@ public:
21 explicit KEvent(KernelCore& kernel_); 19 explicit KEvent(KernelCore& kernel_);
22 ~KEvent() override; 20 ~KEvent() override;
23 21
24 void Initialize(std::string&& name, KProcess* owner_); 22 void Initialize(KProcess* owner);
25 23
26 void Finalize() override; 24 void Finalize() override;
27 25
28 bool IsInitialized() const override { 26 bool IsInitialized() const override {
29 return initialized; 27 return m_initialized;
30 } 28 }
31 29
32 uintptr_t GetPostDestroyArgument() const override { 30 uintptr_t GetPostDestroyArgument() const override {
33 return reinterpret_cast<uintptr_t>(owner); 31 return reinterpret_cast<uintptr_t>(m_owner);
34 } 32 }
35 33
36 KProcess* GetOwner() const override { 34 KProcess* GetOwner() const override {
37 return owner; 35 return m_owner;
38 } 36 }
39 37
40 KReadableEvent& GetReadableEvent() { 38 KReadableEvent& GetReadableEvent() {
41 return readable_event; 39 return m_readable_event;
42 }
43
44 KWritableEvent& GetWritableEvent() {
45 return writable_event;
46 } 40 }
47 41
48 static void PostDestroy(uintptr_t arg); 42 static void PostDestroy(uintptr_t arg);
49 43
44 Result Signal();
45 Result Clear();
46
47 void OnReadableEventDestroyed() {
48 m_readable_event_destroyed = true;
49 }
50
50private: 51private:
51 KReadableEvent readable_event; 52 KReadableEvent m_readable_event;
52 KWritableEvent writable_event; 53 KProcess* m_owner{};
53 KProcess* owner{}; 54 bool m_initialized{};
54 bool initialized{}; 55 bool m_readable_event_destroyed{};
55}; 56};
56 57
57} // namespace Kernel 58} // namespace Kernel
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp
index 1b577a5b3..4a6b60d26 100644
--- a/src/core/hle/kernel/k_interrupt_manager.cpp
+++ b/src/core/hle/kernel/k_interrupt_manager.cpp
@@ -11,29 +11,34 @@
11namespace Kernel::KInterruptManager { 11namespace Kernel::KInterruptManager {
12 12
13void HandleInterrupt(KernelCore& kernel, s32 core_id) { 13void HandleInterrupt(KernelCore& kernel, s32 core_id) {
14 auto* process = kernel.CurrentProcess();
15 if (!process) {
16 return;
17 }
18
19 // Acknowledge the interrupt. 14 // Acknowledge the interrupt.
20 kernel.PhysicalCore(core_id).ClearInterrupt(); 15 kernel.PhysicalCore(core_id).ClearInterrupt();
21 16
22 auto& current_thread = GetCurrentThread(kernel); 17 auto& current_thread = GetCurrentThread(kernel);
23 18
24 // If the user disable count is set, we may need to pin the current thread. 19 if (auto* process = kernel.CurrentProcess(); process) {
25 if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) { 20 // If the user disable count is set, we may need to pin the current thread.
26 KScopedSchedulerLock sl{kernel}; 21 if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
22 KScopedSchedulerLock sl{kernel};
27 23
28 // Pin the current thread. 24 // Pin the current thread.
29 process->PinCurrentThread(core_id); 25 process->PinCurrentThread(core_id);
30 26
31 // Set the interrupt flag for the thread. 27 // Set the interrupt flag for the thread.
32 GetCurrentThread(kernel).SetInterruptFlag(); 28 GetCurrentThread(kernel).SetInterruptFlag();
29 }
33 } 30 }
34 31
35 // Request interrupt scheduling. 32 // Request interrupt scheduling.
36 kernel.CurrentScheduler()->RequestScheduleOnInterrupt(); 33 kernel.CurrentScheduler()->RequestScheduleOnInterrupt();
37} 34}
38 35
36void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) {
37 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
38 if (core_mask & (1ULL << core_id)) {
39 kernel.PhysicalCore(core_id).Interrupt();
40 }
41 }
42}
43
39} // namespace Kernel::KInterruptManager 44} // namespace Kernel::KInterruptManager
diff --git a/src/core/hle/kernel/k_interrupt_manager.h b/src/core/hle/kernel/k_interrupt_manager.h
index f103dfe3f..803dc9211 100644
--- a/src/core/hle/kernel/k_interrupt_manager.h
+++ b/src/core/hle/kernel/k_interrupt_manager.h
@@ -11,6 +11,8 @@ class KernelCore;
11 11
12namespace KInterruptManager { 12namespace KInterruptManager {
13void HandleInterrupt(KernelCore& kernel, s32 core_id); 13void HandleInterrupt(KernelCore& kernel, s32 core_id);
14} 14void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask);
15
16} // namespace KInterruptManager
15 17
16} // namespace Kernel 18} // namespace Kernel
diff --git a/src/core/hle/kernel/k_linked_list.h b/src/core/hle/kernel/k_linked_list.h
index 78859ced3..29ebd16b7 100644
--- a/src/core/hle/kernel/k_linked_list.h
+++ b/src/core/hle/kernel/k_linked_list.h
@@ -16,6 +16,7 @@ class KLinkedListNode : public boost::intrusive::list_base_hook<>,
16 public KSlabAllocated<KLinkedListNode> { 16 public KSlabAllocated<KLinkedListNode> {
17 17
18public: 18public:
19 explicit KLinkedListNode(KernelCore&) {}
19 KLinkedListNode() = default; 20 KLinkedListNode() = default;
20 21
21 void Initialize(void* it) { 22 void Initialize(void* it) {
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index 18df1f836..9444f6bd2 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -6,6 +6,7 @@
6#include "common/alignment.h" 6#include "common/alignment.h"
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "common/intrusive_red_black_tree.h"
9#include "core/hle/kernel/memory_types.h" 10#include "core/hle/kernel/memory_types.h"
10#include "core/hle/kernel/svc_types.h" 11#include "core/hle/kernel/svc_types.h"
11 12
@@ -168,9 +169,8 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per
168 169
169enum class KMemoryAttribute : u8 { 170enum class KMemoryAttribute : u8 {
170 None = 0x00, 171 None = 0x00,
171 Mask = 0x7F, 172 All = 0xFF,
172 All = Mask, 173 UserMask = All,
173 DontCareMask = 0x80,
174 174
175 Locked = static_cast<u8>(Svc::MemoryAttribute::Locked), 175 Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
176 IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked), 176 IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
@@ -178,76 +178,112 @@ enum class KMemoryAttribute : u8 {
178 Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached), 178 Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
179 179
180 SetMask = Uncached, 180 SetMask = Uncached,
181
182 IpcAndDeviceMapped = IpcLocked | DeviceShared,
183 LockedAndIpcLocked = Locked | IpcLocked,
184 DeviceSharedAndUncached = DeviceShared | Uncached
185}; 181};
186DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute); 182DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
187 183
188static_assert((static_cast<u8>(KMemoryAttribute::Mask) & 184enum class KMemoryBlockDisableMergeAttribute : u8 {
189 static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0); 185 None = 0,
186 Normal = (1u << 0),
187 DeviceLeft = (1u << 1),
188 IpcLeft = (1u << 2),
189 Locked = (1u << 3),
190 DeviceRight = (1u << 4),
191
192 AllLeft = Normal | DeviceLeft | IpcLeft | Locked,
193 AllRight = DeviceRight,
194};
195DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute);
190 196
191struct KMemoryInfo { 197struct KMemoryInfo {
192 VAddr addr{}; 198 uintptr_t m_address;
193 std::size_t size{}; 199 size_t m_size;
194 KMemoryState state{}; 200 KMemoryState m_state;
195 KMemoryPermission perm{}; 201 u16 m_device_disable_merge_left_count;
196 KMemoryAttribute attribute{}; 202 u16 m_device_disable_merge_right_count;
197 KMemoryPermission original_perm{}; 203 u16 m_ipc_lock_count;
198 u16 ipc_lock_count{}; 204 u16 m_device_use_count;
199 u16 device_use_count{}; 205 u16 m_ipc_disable_merge_count;
206 KMemoryPermission m_permission;
207 KMemoryAttribute m_attribute;
208 KMemoryPermission m_original_permission;
209 KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
200 210
201 constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { 211 constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
202 return { 212 return {
203 addr, 213 .addr = m_address,
204 size, 214 .size = m_size,
205 static_cast<Svc::MemoryState>(state & KMemoryState::Mask), 215 .state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask),
206 static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask), 216 .attr = static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask),
207 static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask), 217 .perm = static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask),
208 ipc_lock_count, 218 .ipc_refcount = m_ipc_lock_count,
209 device_use_count, 219 .device_refcount = m_device_use_count,
220 .padding = {},
210 }; 221 };
211 } 222 }
212 223
213 constexpr VAddr GetAddress() const { 224 constexpr uintptr_t GetAddress() const {
214 return addr; 225 return m_address;
226 }
227
228 constexpr size_t GetSize() const {
229 return m_size;
215 } 230 }
216 constexpr std::size_t GetSize() const { 231
217 return size; 232 constexpr size_t GetNumPages() const {
233 return this->GetSize() / PageSize;
218 } 234 }
219 constexpr std::size_t GetNumPages() const { 235
220 return GetSize() / PageSize; 236 constexpr uintptr_t GetEndAddress() const {
237 return this->GetAddress() + this->GetSize();
221 } 238 }
222 constexpr VAddr GetEndAddress() const { 239
223 return GetAddress() + GetSize(); 240 constexpr uintptr_t GetLastAddress() const {
241 return this->GetEndAddress() - 1;
224 } 242 }
225 constexpr VAddr GetLastAddress() const { 243
226 return GetEndAddress() - 1; 244 constexpr u16 GetIpcLockCount() const {
245 return m_ipc_lock_count;
227 } 246 }
247
248 constexpr u16 GetIpcDisableMergeCount() const {
249 return m_ipc_disable_merge_count;
250 }
251
228 constexpr KMemoryState GetState() const { 252 constexpr KMemoryState GetState() const {
229 return state; 253 return m_state;
254 }
255
256 constexpr KMemoryPermission GetPermission() const {
257 return m_permission;
230 } 258 }
259
260 constexpr KMemoryPermission GetOriginalPermission() const {
261 return m_original_permission;
262 }
263
231 constexpr KMemoryAttribute GetAttribute() const { 264 constexpr KMemoryAttribute GetAttribute() const {
232 return attribute; 265 return m_attribute;
233 } 266 }
234 constexpr KMemoryPermission GetPermission() const { 267
235 return perm; 268 constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
269 return m_disable_merge_attribute;
236 } 270 }
237}; 271};
238 272
239class KMemoryBlock final { 273class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
240 friend class KMemoryBlockManager;
241
242private: 274private:
243 VAddr addr{}; 275 u16 m_device_disable_merge_left_count;
244 std::size_t num_pages{}; 276 u16 m_device_disable_merge_right_count;
245 KMemoryState state{KMemoryState::None}; 277 VAddr m_address;
246 u16 ipc_lock_count{}; 278 size_t m_num_pages;
247 u16 device_use_count{}; 279 KMemoryState m_memory_state;
248 KMemoryPermission perm{KMemoryPermission::None}; 280 u16 m_ipc_lock_count;
249 KMemoryPermission original_perm{KMemoryPermission::None}; 281 u16 m_device_use_count;
250 KMemoryAttribute attribute{KMemoryAttribute::None}; 282 u16 m_ipc_disable_merge_count;
283 KMemoryPermission m_permission;
284 KMemoryPermission m_original_permission;
285 KMemoryAttribute m_attribute;
286 KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
251 287
252public: 288public:
253 static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) { 289 static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) {
@@ -261,113 +297,349 @@ public:
261 } 297 }
262 298
263public: 299public:
264 constexpr KMemoryBlock() = default;
265 constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_,
266 KMemoryPermission perm_, KMemoryAttribute attribute_)
267 : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
268
269 constexpr VAddr GetAddress() const { 300 constexpr VAddr GetAddress() const {
270 return addr; 301 return m_address;
271 } 302 }
272 303
273 constexpr std::size_t GetNumPages() const { 304 constexpr size_t GetNumPages() const {
274 return num_pages; 305 return m_num_pages;
275 } 306 }
276 307
277 constexpr std::size_t GetSize() const { 308 constexpr size_t GetSize() const {
278 return GetNumPages() * PageSize; 309 return this->GetNumPages() * PageSize;
279 } 310 }
280 311
281 constexpr VAddr GetEndAddress() const { 312 constexpr VAddr GetEndAddress() const {
282 return GetAddress() + GetSize(); 313 return this->GetAddress() + this->GetSize();
283 } 314 }
284 315
285 constexpr VAddr GetLastAddress() const { 316 constexpr VAddr GetLastAddress() const {
286 return GetEndAddress() - 1; 317 return this->GetEndAddress() - 1;
318 }
319
320 constexpr u16 GetIpcLockCount() const {
321 return m_ipc_lock_count;
322 }
323
324 constexpr u16 GetIpcDisableMergeCount() const {
325 return m_ipc_disable_merge_count;
326 }
327
328 constexpr KMemoryPermission GetPermission() const {
329 return m_permission;
330 }
331
332 constexpr KMemoryPermission GetOriginalPermission() const {
333 return m_original_permission;
334 }
335
336 constexpr KMemoryAttribute GetAttribute() const {
337 return m_attribute;
287 } 338 }
288 339
289 constexpr KMemoryInfo GetMemoryInfo() const { 340 constexpr KMemoryInfo GetMemoryInfo() const {
290 return { 341 return {
291 GetAddress(), GetSize(), state, perm, 342 .m_address = this->GetAddress(),
292 attribute, original_perm, ipc_lock_count, device_use_count, 343 .m_size = this->GetSize(),
344 .m_state = m_memory_state,
345 .m_device_disable_merge_left_count = m_device_disable_merge_left_count,
346 .m_device_disable_merge_right_count = m_device_disable_merge_right_count,
347 .m_ipc_lock_count = m_ipc_lock_count,
348 .m_device_use_count = m_device_use_count,
349 .m_ipc_disable_merge_count = m_ipc_disable_merge_count,
350 .m_permission = m_permission,
351 .m_attribute = m_attribute,
352 .m_original_permission = m_original_permission,
353 .m_disable_merge_attribute = m_disable_merge_attribute,
293 }; 354 };
294 } 355 }
295 356
296 void ShareToDevice(KMemoryPermission /*new_perm*/) { 357public:
297 ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || 358 explicit KMemoryBlock() = default;
298 device_use_count == 0); 359
299 attribute |= KMemoryAttribute::DeviceShared; 360 constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
300 const u16 new_use_count{++device_use_count}; 361 KMemoryAttribute attr)
301 ASSERT(new_use_count > 0); 362 : Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(),
363 m_device_disable_merge_left_count(), m_device_disable_merge_right_count(),
364 m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0),
365 m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p),
366 m_original_permission(KMemoryPermission::None), m_attribute(attr),
367 m_disable_merge_attribute() {}
368
369 constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
370 KMemoryAttribute attr) {
371 m_device_disable_merge_left_count = 0;
372 m_device_disable_merge_right_count = 0;
373 m_address = addr;
374 m_num_pages = np;
375 m_memory_state = ms;
376 m_ipc_lock_count = 0;
377 m_device_use_count = 0;
378 m_permission = p;
379 m_original_permission = KMemoryPermission::None;
380 m_attribute = attr;
381 m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None;
382 }
383
384 constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
385 constexpr auto AttributeIgnoreMask =
386 KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
387 return m_memory_state == s && m_permission == p &&
388 (m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
389 }
390
391 constexpr bool HasSameProperties(const KMemoryBlock& rhs) const {
392 return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission &&
393 m_original_permission == rhs.m_original_permission &&
394 m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count &&
395 m_device_use_count == rhs.m_device_use_count;
396 }
397
398 constexpr bool CanMergeWith(const KMemoryBlock& rhs) const {
399 return this->HasSameProperties(rhs) &&
400 (m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) ==
401 KMemoryBlockDisableMergeAttribute::None &&
402 (rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) ==
403 KMemoryBlockDisableMergeAttribute::None;
302 } 404 }
303 405
304 void UnshareToDevice(KMemoryPermission /*new_perm*/) { 406 constexpr bool Contains(VAddr addr) const {
305 ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); 407 return this->GetAddress() <= addr && addr <= this->GetEndAddress();
306 const u16 prev_use_count{device_use_count--}; 408 }
307 ASSERT(prev_use_count > 0); 409
308 if (prev_use_count == 1) { 410 constexpr void Add(const KMemoryBlock& added_block) {
309 attribute &= ~KMemoryAttribute::DeviceShared; 411 ASSERT(added_block.GetNumPages() > 0);
412 ASSERT(this->GetAddress() + added_block.GetSize() - 1 <
413 this->GetEndAddress() + added_block.GetSize() - 1);
414
415 m_num_pages += added_block.GetNumPages();
416 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
417 m_disable_merge_attribute | added_block.m_disable_merge_attribute);
418 m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count;
419 }
420
421 constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a,
422 bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) {
423 ASSERT(m_original_permission == KMemoryPermission::None);
424 ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
425
426 m_memory_state = s;
427 m_permission = p;
428 m_attribute = static_cast<KMemoryAttribute>(
429 a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
430
431 if (set_disable_merge_attr && set_mask != 0) {
432 m_disable_merge_attribute = m_disable_merge_attribute |
433 static_cast<KMemoryBlockDisableMergeAttribute>(set_mask);
434 }
435 if (clear_mask != 0) {
436 m_disable_merge_attribute = m_disable_merge_attribute &
437 static_cast<KMemoryBlockDisableMergeAttribute>(~clear_mask);
310 } 438 }
311 } 439 }
312 440
313private: 441 constexpr void Split(KMemoryBlock* block, VAddr addr) {
314 constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const { 442 ASSERT(this->GetAddress() < addr);
315 constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask | 443 ASSERT(this->Contains(addr));
316 KMemoryAttribute::IpcLocked | 444 ASSERT(Common::IsAligned(addr, PageSize));
317 KMemoryAttribute::DeviceShared}; 445
318 return state == s && perm == p && 446 block->m_address = m_address;
319 (attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); 447 block->m_num_pages = (addr - this->GetAddress()) / PageSize;
448 block->m_memory_state = m_memory_state;
449 block->m_ipc_lock_count = m_ipc_lock_count;
450 block->m_device_use_count = m_device_use_count;
451 block->m_permission = m_permission;
452 block->m_original_permission = m_original_permission;
453 block->m_attribute = m_attribute;
454 block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
455 m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft);
456 block->m_ipc_disable_merge_count = m_ipc_disable_merge_count;
457 block->m_device_disable_merge_left_count = m_device_disable_merge_left_count;
458 block->m_device_disable_merge_right_count = 0;
459
460 m_address = addr;
461 m_num_pages -= block->m_num_pages;
462
463 m_ipc_disable_merge_count = 0;
464 m_device_disable_merge_left_count = 0;
465 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
466 m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight);
320 } 467 }
321 468
322 constexpr bool HasSameProperties(const KMemoryBlock& rhs) const { 469 constexpr void UpdateDeviceDisableMergeStateForShareLeft(
323 return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm && 470 [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
324 attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count && 471 if (left) {
325 device_use_count == rhs.device_use_count; 472 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
473 m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft);
474 const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count;
475 ASSERT(new_device_disable_merge_left_count > 0);
476 }
326 } 477 }
327 478
328 constexpr bool Contains(VAddr start) const { 479 constexpr void UpdateDeviceDisableMergeStateForShareRight(
329 return GetAddress() <= start && start <= GetEndAddress(); 480 [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
481 if (right) {
482 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
483 m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight);
484 const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count;
485 ASSERT(new_device_disable_merge_right_count > 0);
486 }
487 }
488
489 constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left,
490 bool right) {
491 this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right);
492 this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
330 } 493 }
331 494
332 constexpr void Add(std::size_t count) { 495 constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
333 ASSERT(count > 0); 496 bool right) {
334 ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1); 497 // We must either be shared or have a zero lock count.
498 ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
499 m_device_use_count == 0);
335 500
336 num_pages += count; 501 // Share.
502 const u16 new_count = ++m_device_use_count;
503 ASSERT(new_count > 0);
504
505 m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::DeviceShared);
506
507 this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
337 } 508 }
338 509
339 constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm, 510 constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
340 KMemoryAttribute new_attribute) { 511 [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
341 ASSERT(original_perm == KMemoryPermission::None);
342 ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
343 512
344 state = new_state; 513 if (left) {
345 perm = new_perm; 514 if (!m_device_disable_merge_left_count) {
515 return;
516 }
517 --m_device_disable_merge_left_count;
518 }
346 519
347 attribute = static_cast<KMemoryAttribute>( 520 m_device_disable_merge_left_count =
348 new_attribute | 521 std::min(m_device_disable_merge_left_count, m_device_use_count);
349 (attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); 522
523 if (m_device_disable_merge_left_count == 0) {
524 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
525 m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft);
526 }
350 } 527 }
351 528
352 constexpr KMemoryBlock Split(VAddr split_addr) { 529 constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
353 ASSERT(GetAddress() < split_addr); 530 [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
354 ASSERT(Contains(split_addr)); 531 if (right) {
355 ASSERT(Common::IsAligned(split_addr, PageSize)); 532 const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;
533 ASSERT(old_device_disable_merge_right_count > 0);
534 if (old_device_disable_merge_right_count == 1) {
535 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
536 m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight);
537 }
538 }
539 }
356 540
357 KMemoryBlock block; 541 constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left,
358 block.addr = addr; 542 bool right) {
359 block.num_pages = (split_addr - GetAddress()) / PageSize; 543 this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right);
360 block.state = state; 544 this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
361 block.ipc_lock_count = ipc_lock_count; 545 }
362 block.device_use_count = device_use_count;
363 block.perm = perm;
364 block.original_perm = original_perm;
365 block.attribute = attribute;
366 546
367 addr = split_addr; 547 constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
368 num_pages -= block.num_pages; 548 bool right) {
549 // We must be shared.
550 ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
551
552 // Unhare.
553 const u16 old_count = m_device_use_count--;
554 ASSERT(old_count > 0);
555
556 if (old_count == 1) {
557 m_attribute =
558 static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
559 }
560
561 this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
562 }
563
564 constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
565 bool right) {
566
567 // We must be shared.
568 ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
569
570 // Unhare.
571 const u16 old_count = m_device_use_count--;
572 ASSERT(old_count > 0);
573
574 if (old_count == 1) {
575 m_attribute =
576 static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
577 }
578
579 this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
580 }
581
582 constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
583 // We must either be locked or have a zero lock count.
584 ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked ||
585 m_ipc_lock_count == 0);
586
587 // Lock.
588 const u16 new_lock_count = ++m_ipc_lock_count;
589 ASSERT(new_lock_count > 0);
590
591 // If this is our first lock, update our permissions.
592 if (new_lock_count == 1) {
593 ASSERT(m_original_permission == KMemoryPermission::None);
594 ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) ==
595 (m_permission | KMemoryPermission::NotMapped));
596 ASSERT((m_permission & KMemoryPermission::UserExecute) !=
597 KMemoryPermission::UserExecute ||
598 (new_perm == KMemoryPermission::UserRead));
599 m_original_permission = m_permission;
600 m_permission = static_cast<KMemoryPermission>(
601 (new_perm & KMemoryPermission::IpcLockChangeMask) |
602 (m_original_permission & ~KMemoryPermission::IpcLockChangeMask));
603 }
604 m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::IpcLocked);
605
606 if (left) {
607 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
608 m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft);
609 const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count;
610 ASSERT(new_ipc_disable_merge_count > 0);
611 }
612 }
613
614 constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
615 [[maybe_unused]] bool right) {
616 // We must be locked.
617 ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked);
618
619 // Unlock.
620 const u16 old_lock_count = m_ipc_lock_count--;
621 ASSERT(old_lock_count > 0);
622
623 // If this is our last unlock, update our permissions.
624 if (old_lock_count == 1) {
625 ASSERT(m_original_permission != KMemoryPermission::None);
626 m_permission = m_original_permission;
627 m_original_permission = KMemoryPermission::None;
628 m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::IpcLocked);
629 }
630
631 if (left) {
632 const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--;
633 ASSERT(old_ipc_disable_merge_count > 0);
634 if (old_ipc_disable_merge_count == 1) {
635 m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
636 m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft);
637 }
638 }
639 }
369 640
370 return block; 641 constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
642 return m_disable_merge_attribute;
371 } 643 }
372}; 644};
373static_assert(std::is_trivially_destructible<KMemoryBlock>::value); 645static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp
index 3ddb9984f..cf4c1e371 100644
--- a/src/core/hle/kernel/k_memory_block_manager.cpp
+++ b/src/core/hle/kernel/k_memory_block_manager.cpp
@@ -2,221 +2,336 @@
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include "core/hle/kernel/k_memory_block_manager.h" 4#include "core/hle/kernel/k_memory_block_manager.h"
5#include "core/hle/kernel/memory_types.h"
6 5
7namespace Kernel { 6namespace Kernel {
8 7
9KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_) 8KMemoryBlockManager::KMemoryBlockManager() = default;
10 : start_addr{start_addr_}, end_addr{end_addr_} {
11 const u64 num_pages{(end_addr - start_addr) / PageSize};
12 memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free,
13 KMemoryPermission::None, KMemoryAttribute::None);
14}
15 9
16KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) { 10Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
17 auto node{memory_block_tree.begin()}; 11 // Allocate a block to encapsulate the address space, insert it into the tree.
18 while (node != end()) { 12 KMemoryBlock* start_block = slab_manager->Allocate();
19 const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()}; 13 R_UNLESS(start_block != nullptr, ResultOutOfResource);
20 if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) { 14
21 return node; 15 // Set our start and end.
22 } 16 m_start_address = st;
23 node = std::next(node); 17 m_end_address = nd;
24 } 18 ASSERT(Common::IsAligned(m_start_address, PageSize));
25 return end(); 19 ASSERT(Common::IsAligned(m_end_address, PageSize));
20
21 // Initialize and insert the block.
22 start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
23 KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None);
24 m_memory_block_tree.insert(*start_block);
25
26 R_SUCCEED();
26} 27}
27 28
28VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages, 29void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
29 std::size_t num_pages, std::size_t align, 30 HostUnmapCallback&& host_unmap_callback) {
30 std::size_t offset, std::size_t guard_pages) { 31 // Erase every block until we have none left.
31 if (num_pages == 0) { 32 auto it = m_memory_block_tree.begin();
32 return {}; 33 while (it != m_memory_block_tree.end()) {
34 KMemoryBlock* block = std::addressof(*it);
35 it = m_memory_block_tree.erase(it);
36 slab_manager->Free(block);
37 host_unmap_callback(block->GetAddress(), block->GetSize());
33 } 38 }
34 39
35 const VAddr region_end{region_start + region_num_pages * PageSize}; 40 ASSERT(m_memory_block_tree.empty());
36 const VAddr region_last{region_end - 1}; 41}
37 for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
38 const auto info{it->GetMemoryInfo()};
39 if (region_last < info.GetAddress()) {
40 break;
41 }
42 42
43 if (info.state != KMemoryState::Free) { 43VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
44 continue; 44 size_t num_pages, size_t alignment, size_t offset,
45 } 45 size_t guard_pages) const {
46 if (num_pages > 0) {
47 const VAddr region_end = region_start + region_num_pages * PageSize;
48 const VAddr region_last = region_end - 1;
49 for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
50 it++) {
51 const KMemoryInfo info = it->GetMemoryInfo();
52 if (region_last < info.GetAddress()) {
53 break;
54 }
55 if (info.m_state != KMemoryState::Free) {
56 continue;
57 }
46 58
47 VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()}; 59 VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
48 area += guard_pages * PageSize; 60 area += guard_pages * PageSize;
49 61
50 const VAddr offset_area{Common::AlignDown(area, align) + offset}; 62 const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
51 area = (area <= offset_area) ? offset_area : offset_area + align; 63 area = (area <= offset_area) ? offset_area : offset_area + alignment;
52 64
53 const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize}; 65 const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
54 const VAddr area_last{area_end - 1}; 66 const VAddr area_last = area_end - 1;
55 67
56 if (info.GetAddress() <= area && area < area_last && area_last <= region_last && 68 if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
57 area_last <= info.GetLastAddress()) { 69 area_last <= info.GetLastAddress()) {
58 return area; 70 return area;
71 }
59 } 72 }
60 } 73 }
61 74
62 return {}; 75 return {};
63} 76}
64 77
65void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, 78void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
66 KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, 79 VAddr address, size_t num_pages) {
67 KMemoryState state, KMemoryPermission perm, 80 // Find the iterator now that we've updated.
68 KMemoryAttribute attribute) { 81 iterator it = this->FindIterator(address);
69 const VAddr update_end_addr{addr + num_pages * PageSize}; 82 if (address != m_start_address) {
70 iterator node{memory_block_tree.begin()}; 83 it--;
84 }
71 85
72 prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped; 86 // Coalesce blocks that we can.
87 while (true) {
88 iterator prev = it++;
89 if (it == m_memory_block_tree.end()) {
90 break;
91 }
73 92
74 while (node != memory_block_tree.end()) { 93 if (prev->CanMergeWith(*it)) {
75 KMemoryBlock* block{&(*node)}; 94 KMemoryBlock* block = std::addressof(*it);
76 iterator next_node{std::next(node)}; 95 m_memory_block_tree.erase(it);
77 const VAddr cur_addr{block->GetAddress()}; 96 prev->Add(*block);
78 const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; 97 allocator->Free(block);
98 it = prev;
99 }
79 100
80 if (addr < cur_end_addr && cur_addr < update_end_addr) { 101 if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) {
81 if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) { 102 break;
82 node = next_node; 103 }
83 continue; 104 }
84 } 105}
85 106
86 iterator new_node{node}; 107void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
87 if (addr > cur_addr) { 108 size_t num_pages, KMemoryState state, KMemoryPermission perm,
88 memory_block_tree.insert(node, block->Split(addr)); 109 KMemoryAttribute attr,
110 KMemoryBlockDisableMergeAttribute set_disable_attr,
111 KMemoryBlockDisableMergeAttribute clear_disable_attr) {
112 // Ensure for auditing that we never end up with an invalid tree.
113 KScopedMemoryBlockManagerAuditor auditor(this);
114 ASSERT(Common::IsAligned(address, PageSize));
115 ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
116 KMemoryAttribute::None);
117
118 VAddr cur_address = address;
119 size_t remaining_pages = num_pages;
120 iterator it = this->FindIterator(address);
121
122 while (remaining_pages > 0) {
123 const size_t remaining_size = remaining_pages * PageSize;
124 KMemoryInfo cur_info = it->GetMemoryInfo();
125 if (it->HasProperties(state, perm, attr)) {
126 // If we already have the right properties, just advance.
127 if (cur_address + remaining_size < cur_info.GetEndAddress()) {
128 remaining_pages = 0;
129 cur_address += remaining_size;
130 } else {
131 remaining_pages =
132 (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
133 cur_address = cur_info.GetEndAddress();
89 } 134 }
135 } else {
136 // If we need to, create a new block before and insert it.
137 if (cur_info.GetAddress() != cur_address) {
138 KMemoryBlock* new_block = allocator->Allocate();
139
140 it->Split(new_block, cur_address);
141 it = m_memory_block_tree.insert(*new_block);
142 it++;
90 143
91 if (update_end_addr < cur_end_addr) { 144 cur_info = it->GetMemoryInfo();
92 new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); 145 cur_address = cur_info.GetAddress();
93 } 146 }
94 147
95 new_node->Update(state, perm, attribute); 148 // If we need to, create a new block after and insert it.
149 if (cur_info.GetSize() > remaining_size) {
150 KMemoryBlock* new_block = allocator->Allocate();
96 151
97 MergeAdjacent(new_node, next_node); 152 it->Split(new_block, cur_address + remaining_size);
98 } 153 it = m_memory_block_tree.insert(*new_block);
99 154
100 if (cur_end_addr - 1 >= update_end_addr - 1) { 155 cur_info = it->GetMemoryInfo();
101 break; 156 }
102 }
103 157
104 node = next_node; 158 // Update block state.
159 it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
160 static_cast<u8>(clear_disable_attr));
161 cur_address += cur_info.GetSize();
162 remaining_pages -= cur_info.GetNumPages();
163 }
164 it++;
105 } 165 }
166
167 this->CoalesceForUpdate(allocator, address, num_pages);
106} 168}
107 169
108void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state, 170void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
109 KMemoryPermission perm, KMemoryAttribute attribute) { 171 VAddr address, size_t num_pages, KMemoryState test_state,
110 const VAddr update_end_addr{addr + num_pages * PageSize}; 172 KMemoryPermission test_perm, KMemoryAttribute test_attr,
111 iterator node{memory_block_tree.begin()}; 173 KMemoryState state, KMemoryPermission perm,
174 KMemoryAttribute attr) {
175 // Ensure for auditing that we never end up with an invalid tree.
176 KScopedMemoryBlockManagerAuditor auditor(this);
177 ASSERT(Common::IsAligned(address, PageSize));
178 ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
179 KMemoryAttribute::None);
180
181 VAddr cur_address = address;
182 size_t remaining_pages = num_pages;
183 iterator it = this->FindIterator(address);
184
185 while (remaining_pages > 0) {
186 const size_t remaining_size = remaining_pages * PageSize;
187 KMemoryInfo cur_info = it->GetMemoryInfo();
188 if (it->HasProperties(test_state, test_perm, test_attr) &&
189 !it->HasProperties(state, perm, attr)) {
190 // If we need to, create a new block before and insert it.
191 if (cur_info.GetAddress() != cur_address) {
192 KMemoryBlock* new_block = allocator->Allocate();
193
194 it->Split(new_block, cur_address);
195 it = m_memory_block_tree.insert(*new_block);
196 it++;
197
198 cur_info = it->GetMemoryInfo();
199 cur_address = cur_info.GetAddress();
200 }
112 201
113 while (node != memory_block_tree.end()) { 202 // If we need to, create a new block after and insert it.
114 KMemoryBlock* block{&(*node)}; 203 if (cur_info.GetSize() > remaining_size) {
115 iterator next_node{std::next(node)}; 204 KMemoryBlock* new_block = allocator->Allocate();
116 const VAddr cur_addr{block->GetAddress()};
117 const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
118 205
119 if (addr < cur_end_addr && cur_addr < update_end_addr) { 206 it->Split(new_block, cur_address + remaining_size);
120 iterator new_node{node}; 207 it = m_memory_block_tree.insert(*new_block);
121 208
122 if (addr > cur_addr) { 209 cur_info = it->GetMemoryInfo();
123 memory_block_tree.insert(node, block->Split(addr));
124 } 210 }
125 211
126 if (update_end_addr < cur_end_addr) { 212 // Update block state.
127 new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); 213 it->Update(state, perm, attr, false, 0, 0);
214 cur_address += cur_info.GetSize();
215 remaining_pages -= cur_info.GetNumPages();
216 } else {
217 // If we already have the right properties, just advance.
218 if (cur_address + remaining_size < cur_info.GetEndAddress()) {
219 remaining_pages = 0;
220 cur_address += remaining_size;
221 } else {
222 remaining_pages =
223 (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
224 cur_address = cur_info.GetEndAddress();
128 } 225 }
129
130 new_node->Update(state, perm, attribute);
131
132 MergeAdjacent(new_node, next_node);
133 }
134
135 if (cur_end_addr - 1 >= update_end_addr - 1) {
136 break;
137 } 226 }
138 227 it++;
139 node = next_node;
140 } 228 }
229
230 this->CoalesceForUpdate(allocator, address, num_pages);
141} 231}
142 232
143void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, 233void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
234 size_t num_pages, MemoryBlockLockFunction lock_func,
144 KMemoryPermission perm) { 235 KMemoryPermission perm) {
145 const VAddr update_end_addr{addr + num_pages * PageSize}; 236 // Ensure for auditing that we never end up with an invalid tree.
146 iterator node{memory_block_tree.begin()}; 237 KScopedMemoryBlockManagerAuditor auditor(this);
238 ASSERT(Common::IsAligned(address, PageSize));
147 239
148 while (node != memory_block_tree.end()) { 240 VAddr cur_address = address;
149 KMemoryBlock* block{&(*node)}; 241 size_t remaining_pages = num_pages;
150 iterator next_node{std::next(node)}; 242 iterator it = this->FindIterator(address);
151 const VAddr cur_addr{block->GetAddress()};
152 const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
153 243
154 if (addr < cur_end_addr && cur_addr < update_end_addr) { 244 const VAddr end_address = address + (num_pages * PageSize);
155 iterator new_node{node};
156 245
157 if (addr > cur_addr) { 246 while (remaining_pages > 0) {
158 memory_block_tree.insert(node, block->Split(addr)); 247 const size_t remaining_size = remaining_pages * PageSize;
159 } 248 KMemoryInfo cur_info = it->GetMemoryInfo();
160 249
161 if (update_end_addr < cur_end_addr) { 250 // If we need to, create a new block before and insert it.
162 new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); 251 if (cur_info.m_address != cur_address) {
163 } 252 KMemoryBlock* new_block = allocator->Allocate();
164 253
165 lock_func(new_node, perm); 254 it->Split(new_block, cur_address);
255 it = m_memory_block_tree.insert(*new_block);
256 it++;
166 257
167 MergeAdjacent(new_node, next_node); 258 cur_info = it->GetMemoryInfo();
259 cur_address = cur_info.GetAddress();
168 } 260 }
169 261
170 if (cur_end_addr - 1 >= update_end_addr - 1) { 262 if (cur_info.GetSize() > remaining_size) {
171 break; 263 // If we need to, create a new block after and insert it.
264 KMemoryBlock* new_block = allocator->Allocate();
265
266 it->Split(new_block, cur_address + remaining_size);
267 it = m_memory_block_tree.insert(*new_block);
268
269 cur_info = it->GetMemoryInfo();
172 } 270 }
173 271
174 node = next_node; 272 // Call the locked update function.
273 (std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address,
274 cur_info.GetEndAddress() == end_address);
275 cur_address += cur_info.GetSize();
276 remaining_pages -= cur_info.GetNumPages();
277 it++;
175 } 278 }
176}
177 279
178void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) { 280 this->CoalesceForUpdate(allocator, address, num_pages);
179 const_iterator it{FindIterator(start)};
180 KMemoryInfo info{};
181 do {
182 info = it->GetMemoryInfo();
183 func(info);
184 it = std::next(it);
185 } while (info.addr + info.size - 1 < end - 1 && it != cend());
186} 281}
187 282
188void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) { 283// Debug.
189 KMemoryBlock* block{&(*it)}; 284bool KMemoryBlockManager::CheckState() const {
190 285 // Loop over every block, ensuring that we are sorted and coalesced.
191 auto EraseIt = [&](const iterator it_to_erase) { 286 auto it = m_memory_block_tree.cbegin();
192 if (next_it == it_to_erase) { 287 auto prev = it++;
193 next_it = std::next(next_it); 288 while (it != m_memory_block_tree.cend()) {
289 const KMemoryInfo prev_info = prev->GetMemoryInfo();
290 const KMemoryInfo cur_info = it->GetMemoryInfo();
291
292 // Sequential blocks which can be merged should be merged.
293 if (prev->CanMergeWith(*it)) {
294 return false;
194 } 295 }
195 memory_block_tree.erase(it_to_erase);
196 };
197 296
198 if (it != memory_block_tree.begin()) { 297 // Sequential blocks should be sequential.
199 KMemoryBlock* prev{&(*std::prev(it))}; 298 if (prev_info.GetEndAddress() != cur_info.GetAddress()) {
200 299 return false;
201 if (block->HasSameProperties(*prev)) { 300 }
202 const iterator prev_it{std::prev(it)};
203 301
204 prev->Add(block->GetNumPages()); 302 // If the block is ipc locked, it must have a count.
205 EraseIt(it); 303 if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
304 cur_info.m_ipc_lock_count == 0) {
305 return false;
306 }
206 307
207 it = prev_it; 308 // If the block is device shared, it must have a count.
208 block = prev; 309 if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
310 cur_info.m_device_use_count == 0) {
311 return false;
209 } 312 }
313
314 // Advance the iterator.
315 prev = it++;
210 } 316 }
211 317
212 if (it != cend()) { 318 // Our loop will miss checking the last block, potentially, so check it.
213 const KMemoryBlock* const next{&(*std::next(it))}; 319 if (prev != m_memory_block_tree.cend()) {
320 const KMemoryInfo prev_info = prev->GetMemoryInfo();
321 // If the block is ipc locked, it must have a count.
322 if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
323 prev_info.m_ipc_lock_count == 0) {
324 return false;
325 }
214 326
215 if (block->HasSameProperties(*next)) { 327 // If the block is device shared, it must have a count.
216 block->Add(next->GetNumPages()); 328 if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
217 EraseIt(std::next(it)); 329 prev_info.m_device_use_count == 0) {
330 return false;
218 } 331 }
219 } 332 }
333
334 return true;
220} 335}
221 336
222} // namespace Kernel 337} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h
index e14741b89..9b5873883 100644
--- a/src/core/hle/kernel/k_memory_block_manager.h
+++ b/src/core/hle/kernel/k_memory_block_manager.h
@@ -4,63 +4,154 @@
4#pragma once 4#pragma once
5 5
6#include <functional> 6#include <functional>
7#include <list>
8 7
8#include "common/common_funcs.h"
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "core/hle/kernel/k_dynamic_resource_manager.h"
10#include "core/hle/kernel/k_memory_block.h" 11#include "core/hle/kernel/k_memory_block.h"
11 12
12namespace Kernel { 13namespace Kernel {
13 14
15class KMemoryBlockManagerUpdateAllocator {
16public:
17 static constexpr size_t MaxBlocks = 2;
18
19private:
20 KMemoryBlock* m_blocks[MaxBlocks];
21 size_t m_index;
22 KMemoryBlockSlabManager* m_slab_manager;
23
24private:
25 Result Initialize(size_t num_blocks) {
26 // Check num blocks.
27 ASSERT(num_blocks <= MaxBlocks);
28
29 // Set index.
30 m_index = MaxBlocks - num_blocks;
31
32 // Allocate the blocks.
33 for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) {
34 m_blocks[m_index + i] = m_slab_manager->Allocate();
35 R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource);
36 }
37
38 R_SUCCEED();
39 }
40
41public:
42 KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm,
43 size_t num_blocks = MaxBlocks)
44 : m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) {
45 *out_result = this->Initialize(num_blocks);
46 }
47
48 ~KMemoryBlockManagerUpdateAllocator() {
49 for (const auto& block : m_blocks) {
50 if (block != nullptr) {
51 m_slab_manager->Free(block);
52 }
53 }
54 }
55
56 KMemoryBlock* Allocate() {
57 ASSERT(m_index < MaxBlocks);
58 ASSERT(m_blocks[m_index] != nullptr);
59 KMemoryBlock* block = nullptr;
60 std::swap(block, m_blocks[m_index++]);
61 return block;
62 }
63
64 void Free(KMemoryBlock* block) {
65 ASSERT(m_index <= MaxBlocks);
66 ASSERT(block != nullptr);
67 if (m_index == 0) {
68 m_slab_manager->Free(block);
69 } else {
70 m_blocks[--m_index] = block;
71 }
72 }
73};
74
14class KMemoryBlockManager final { 75class KMemoryBlockManager final {
15public: 76public:
16 using MemoryBlockTree = std::list<KMemoryBlock>; 77 using MemoryBlockTree =
78 Common::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>;
79 using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left,
80 bool right);
17 using iterator = MemoryBlockTree::iterator; 81 using iterator = MemoryBlockTree::iterator;
18 using const_iterator = MemoryBlockTree::const_iterator; 82 using const_iterator = MemoryBlockTree::const_iterator;
19 83
20public: 84public:
21 KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_); 85 KMemoryBlockManager();
86
87 using HostUnmapCallback = std::function<void(VAddr, u64)>;
88
89 Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager);
90 void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
22 91
23 iterator end() { 92 iterator end() {
24 return memory_block_tree.end(); 93 return m_memory_block_tree.end();
25 } 94 }
26 const_iterator end() const { 95 const_iterator end() const {
27 return memory_block_tree.end(); 96 return m_memory_block_tree.end();
28 } 97 }
29 const_iterator cend() const { 98 const_iterator cend() const {
30 return memory_block_tree.cend(); 99 return m_memory_block_tree.cend();
31 } 100 }
32 101
33 iterator FindIterator(VAddr addr); 102 VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
103 size_t alignment, size_t offset, size_t guard_pages) const;
34 104
35 VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, 105 void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
36 std::size_t align, std::size_t offset, std::size_t guard_pages); 106 KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
107 KMemoryBlockDisableMergeAttribute set_disable_attr,
108 KMemoryBlockDisableMergeAttribute clear_disable_attr);
109 void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
110 MemoryBlockLockFunction lock_func, KMemoryPermission perm);
37 111
38 void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, 112 void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
39 KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state, 113 size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
40 KMemoryPermission perm, KMemoryAttribute attribute); 114 KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
115 KMemoryAttribute attr);
41 116
42 void Update(VAddr addr, std::size_t num_pages, KMemoryState state, 117 iterator FindIterator(VAddr address) const {
43 KMemoryPermission perm = KMemoryPermission::None, 118 return m_memory_block_tree.find(KMemoryBlock(
44 KMemoryAttribute attribute = KMemoryAttribute::None); 119 address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None));
45 120 }
46 using LockFunc = std::function<void(iterator, KMemoryPermission)>;
47 void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
48 KMemoryPermission perm);
49 121
50 using IterateFunc = std::function<void(const KMemoryInfo&)>; 122 const KMemoryBlock* FindBlock(VAddr address) const {
51 void IterateForRange(VAddr start, VAddr end, IterateFunc&& func); 123 if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
124 return std::addressof(*it);
125 }
52 126
53 KMemoryBlock& FindBlock(VAddr addr) { 127 return nullptr;
54 return *FindIterator(addr);
55 } 128 }
56 129
130 // Debug.
131 bool CheckState() const;
132
57private: 133private:
58 void MergeAdjacent(iterator it, iterator& next_it); 134 void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
135 size_t num_pages);
59 136
60 [[maybe_unused]] const VAddr start_addr; 137 MemoryBlockTree m_memory_block_tree;
61 [[maybe_unused]] const VAddr end_addr; 138 VAddr m_start_address{};
139 VAddr m_end_address{};
140};
62 141
63 MemoryBlockTree memory_block_tree; 142class KScopedMemoryBlockManagerAuditor {
143public:
144 explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) {
145 ASSERT(m_manager->CheckState());
146 }
147 explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m)
148 : KScopedMemoryBlockManagerAuditor(std::addressof(m)) {}
149 ~KScopedMemoryBlockManagerAuditor() {
150 ASSERT(m_manager->CheckState());
151 }
152
153private:
154 KMemoryBlockManager* m_manager;
64}; 155};
65 156
66} // namespace Kernel 157} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index 5b0a9963a..646711505 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -331,7 +331,7 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag
331 331
332 // Set all the allocated memory. 332 // Set all the allocated memory.
333 for (const auto& block : out->Nodes()) { 333 for (const auto& block : out->Nodes()) {
334 std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern, 334 std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
335 block.GetSize()); 335 block.GetSize());
336 } 336 }
337 337
diff --git a/src/core/hle/kernel/k_page_buffer.cpp b/src/core/hle/kernel/k_page_buffer.cpp
index 1a0bf4439..0c16dded4 100644
--- a/src/core/hle/kernel/k_page_buffer.cpp
+++ b/src/core/hle/kernel/k_page_buffer.cpp
@@ -12,7 +12,7 @@ namespace Kernel {
12 12
13KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) { 13KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
14 ASSERT(Common::IsAligned(phys_addr, PageSize)); 14 ASSERT(Common::IsAligned(phys_addr, PageSize));
15 return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr)); 15 return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr);
16} 16}
17 17
18} // namespace Kernel 18} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h
index 7e50dc1d1..aef06e213 100644
--- a/src/core/hle/kernel/k_page_buffer.h
+++ b/src/core/hle/kernel/k_page_buffer.h
@@ -13,6 +13,7 @@ namespace Kernel {
13 13
14class KPageBuffer final : public KSlabAllocated<KPageBuffer> { 14class KPageBuffer final : public KSlabAllocated<KPageBuffer> {
15public: 15public:
16 explicit KPageBuffer(KernelCore&) {}
16 KPageBuffer() = default; 17 KPageBuffer() = default;
17 18
18 static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr); 19 static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr);
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index d975de844..307e491cb 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -25,7 +25,7 @@ namespace {
25 25
26using namespace Common::Literals; 26using namespace Common::Literals;
27 27
28constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { 28constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
29 switch (as_type) { 29 switch (as_type) {
30 case FileSys::ProgramAddressSpaceType::Is32Bit: 30 case FileSys::ProgramAddressSpaceType::Is32Bit:
31 case FileSys::ProgramAddressSpaceType::Is32BitNoMap: 31 case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
@@ -43,27 +43,29 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT
43} // namespace 43} // namespace
44 44
45KPageTable::KPageTable(Core::System& system_) 45KPageTable::KPageTable(Core::System& system_)
46 : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {} 46 : m_general_lock{system_.Kernel()},
47 m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {}
47 48
48KPageTable::~KPageTable() = default; 49KPageTable::~KPageTable() = default;
49 50
50Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, 51Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
51 VAddr code_addr, std::size_t code_size, 52 VAddr code_addr, size_t code_size,
53 KMemoryBlockSlabManager* mem_block_slab_manager,
52 KMemoryManager::Pool pool) { 54 KMemoryManager::Pool pool) {
53 55
54 const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { 56 const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
55 return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type); 57 return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
56 }; 58 };
57 const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { 59 const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) {
58 return KAddressSpaceInfo::GetAddressSpaceSize(address_space_width, type); 60 return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
59 }; 61 };
60 62
61 // Set our width and heap/alias sizes 63 // Set our width and heap/alias sizes
62 address_space_width = GetAddressSpaceWidthFromType(as_type); 64 m_address_space_width = GetAddressSpaceWidthFromType(as_type);
63 const VAddr start = 0; 65 const VAddr start = 0;
64 const VAddr end{1ULL << address_space_width}; 66 const VAddr end{1ULL << m_address_space_width};
65 std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; 67 size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
66 std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; 68 size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
67 69
68 ASSERT(code_addr < code_addr + code_size); 70 ASSERT(code_addr < code_addr + code_size);
69 ASSERT(code_addr + code_size - 1 <= end - 1); 71 ASSERT(code_addr + code_size - 1 <= end - 1);
@@ -75,66 +77,65 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
75 } 77 }
76 78
77 // Set code regions and determine remaining 79 // Set code regions and determine remaining
78 constexpr std::size_t RegionAlignment{2_MiB}; 80 constexpr size_t RegionAlignment{2_MiB};
79 VAddr process_code_start{}; 81 VAddr process_code_start{};
80 VAddr process_code_end{}; 82 VAddr process_code_end{};
81 std::size_t stack_region_size{}; 83 size_t stack_region_size{};
82 std::size_t kernel_map_region_size{}; 84 size_t kernel_map_region_size{};
83 85
84 if (address_space_width == 39) { 86 if (m_address_space_width == 39) {
85 alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); 87 alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
86 heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); 88 heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
87 stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); 89 stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
88 kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); 90 kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
89 code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); 91 m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
90 code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); 92 m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
91 alias_code_region_start = code_region_start; 93 m_alias_code_region_start = m_code_region_start;
92 alias_code_region_end = code_region_end; 94 m_alias_code_region_end = m_code_region_end;
93 process_code_start = Common::AlignDown(code_addr, RegionAlignment); 95 process_code_start = Common::AlignDown(code_addr, RegionAlignment);
94 process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); 96 process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment);
95 } else { 97 } else {
96 stack_region_size = 0; 98 stack_region_size = 0;
97 kernel_map_region_size = 0; 99 kernel_map_region_size = 0;
98 code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); 100 m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
99 code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); 101 m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
100 stack_region_start = code_region_start; 102 m_stack_region_start = m_code_region_start;
101 alias_code_region_start = code_region_start; 103 m_alias_code_region_start = m_code_region_start;
102 alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + 104 m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
103 GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); 105 GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
104 stack_region_end = code_region_end; 106 m_stack_region_end = m_code_region_end;
105 kernel_map_region_start = code_region_start; 107 m_kernel_map_region_start = m_code_region_start;
106 kernel_map_region_end = code_region_end; 108 m_kernel_map_region_end = m_code_region_end;
107 process_code_start = code_region_start; 109 process_code_start = m_code_region_start;
108 process_code_end = code_region_end; 110 process_code_end = m_code_region_end;
109 } 111 }
110 112
111 // Set other basic fields 113 // Set other basic fields
112 is_aslr_enabled = enable_aslr; 114 m_enable_aslr = enable_aslr;
113 address_space_start = start; 115 m_enable_device_address_space_merge = false;
114 address_space_end = end; 116 m_address_space_start = start;
115 is_kernel = false; 117 m_address_space_end = end;
118 m_is_kernel = false;
119 m_memory_block_slab_manager = mem_block_slab_manager;
116 120
117 // Determine the region we can place our undetermineds in 121 // Determine the region we can place our undetermineds in
118 VAddr alloc_start{}; 122 VAddr alloc_start{};
119 std::size_t alloc_size{}; 123 size_t alloc_size{};
120 if ((process_code_start - code_region_start) >= (end - process_code_end)) { 124 if ((process_code_start - m_code_region_start) >= (end - process_code_end)) {
121 alloc_start = code_region_start; 125 alloc_start = m_code_region_start;
122 alloc_size = process_code_start - code_region_start; 126 alloc_size = process_code_start - m_code_region_start;
123 } else { 127 } else {
124 alloc_start = process_code_end; 128 alloc_start = process_code_end;
125 alloc_size = end - process_code_end; 129 alloc_size = end - process_code_end;
126 } 130 }
127 const std::size_t needed_size{ 131 const size_t needed_size =
128 (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; 132 (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
129 if (alloc_size < needed_size) { 133 R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
130 ASSERT(false);
131 return ResultOutOfMemory;
132 }
133 134
134 const std::size_t remaining_size{alloc_size - needed_size}; 135 const size_t remaining_size{alloc_size - needed_size};
135 136
136 // Determine random placements for each region 137 // Determine random placements for each region
137 std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; 138 size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{};
138 if (enable_aslr) { 139 if (enable_aslr) {
139 alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * 140 alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
140 RegionAlignment; 141 RegionAlignment;
@@ -147,117 +148,130 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
147 } 148 }
148 149
149 // Setup heap and alias regions 150 // Setup heap and alias regions
150 alias_region_start = alloc_start + alias_rnd; 151 m_alias_region_start = alloc_start + alias_rnd;
151 alias_region_end = alias_region_start + alias_region_size; 152 m_alias_region_end = m_alias_region_start + alias_region_size;
152 heap_region_start = alloc_start + heap_rnd; 153 m_heap_region_start = alloc_start + heap_rnd;
153 heap_region_end = heap_region_start + heap_region_size; 154 m_heap_region_end = m_heap_region_start + heap_region_size;
154 155
155 if (alias_rnd <= heap_rnd) { 156 if (alias_rnd <= heap_rnd) {
156 heap_region_start += alias_region_size; 157 m_heap_region_start += alias_region_size;
157 heap_region_end += alias_region_size; 158 m_heap_region_end += alias_region_size;
158 } else { 159 } else {
159 alias_region_start += heap_region_size; 160 m_alias_region_start += heap_region_size;
160 alias_region_end += heap_region_size; 161 m_alias_region_end += heap_region_size;
161 } 162 }
162 163
163 // Setup stack region 164 // Setup stack region
164 if (stack_region_size) { 165 if (stack_region_size) {
165 stack_region_start = alloc_start + stack_rnd; 166 m_stack_region_start = alloc_start + stack_rnd;
166 stack_region_end = stack_region_start + stack_region_size; 167 m_stack_region_end = m_stack_region_start + stack_region_size;
167 168
168 if (alias_rnd < stack_rnd) { 169 if (alias_rnd < stack_rnd) {
169 stack_region_start += alias_region_size; 170 m_stack_region_start += alias_region_size;
170 stack_region_end += alias_region_size; 171 m_stack_region_end += alias_region_size;
171 } else { 172 } else {
172 alias_region_start += stack_region_size; 173 m_alias_region_start += stack_region_size;
173 alias_region_end += stack_region_size; 174 m_alias_region_end += stack_region_size;
174 } 175 }
175 176
176 if (heap_rnd < stack_rnd) { 177 if (heap_rnd < stack_rnd) {
177 stack_region_start += heap_region_size; 178 m_stack_region_start += heap_region_size;
178 stack_region_end += heap_region_size; 179 m_stack_region_end += heap_region_size;
179 } else { 180 } else {
180 heap_region_start += stack_region_size; 181 m_heap_region_start += stack_region_size;
181 heap_region_end += stack_region_size; 182 m_heap_region_end += stack_region_size;
182 } 183 }
183 } 184 }
184 185
185 // Setup kernel map region 186 // Setup kernel map region
186 if (kernel_map_region_size) { 187 if (kernel_map_region_size) {
187 kernel_map_region_start = alloc_start + kmap_rnd; 188 m_kernel_map_region_start = alloc_start + kmap_rnd;
188 kernel_map_region_end = kernel_map_region_start + kernel_map_region_size; 189 m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
189 190
190 if (alias_rnd < kmap_rnd) { 191 if (alias_rnd < kmap_rnd) {
191 kernel_map_region_start += alias_region_size; 192 m_kernel_map_region_start += alias_region_size;
192 kernel_map_region_end += alias_region_size; 193 m_kernel_map_region_end += alias_region_size;
193 } else { 194 } else {
194 alias_region_start += kernel_map_region_size; 195 m_alias_region_start += kernel_map_region_size;
195 alias_region_end += kernel_map_region_size; 196 m_alias_region_end += kernel_map_region_size;
196 } 197 }
197 198
198 if (heap_rnd < kmap_rnd) { 199 if (heap_rnd < kmap_rnd) {
199 kernel_map_region_start += heap_region_size; 200 m_kernel_map_region_start += heap_region_size;
200 kernel_map_region_end += heap_region_size; 201 m_kernel_map_region_end += heap_region_size;
201 } else { 202 } else {
202 heap_region_start += kernel_map_region_size; 203 m_heap_region_start += kernel_map_region_size;
203 heap_region_end += kernel_map_region_size; 204 m_heap_region_end += kernel_map_region_size;
204 } 205 }
205 206
206 if (stack_region_size) { 207 if (stack_region_size) {
207 if (stack_rnd < kmap_rnd) { 208 if (stack_rnd < kmap_rnd) {
208 kernel_map_region_start += stack_region_size; 209 m_kernel_map_region_start += stack_region_size;
209 kernel_map_region_end += stack_region_size; 210 m_kernel_map_region_end += stack_region_size;
210 } else { 211 } else {
211 stack_region_start += kernel_map_region_size; 212 m_stack_region_start += kernel_map_region_size;
212 stack_region_end += kernel_map_region_size; 213 m_stack_region_end += kernel_map_region_size;
213 } 214 }
214 } 215 }
215 } 216 }
216 217
217 // Set heap members 218 // Set heap members
218 current_heap_end = heap_region_start; 219 m_current_heap_end = m_heap_region_start;
219 max_heap_size = 0; 220 m_max_heap_size = 0;
220 max_physical_memory_size = 0; 221 m_max_physical_memory_size = 0;
221 222
222 // Ensure that we regions inside our address space 223 // Ensure that we regions inside our address space
223 auto IsInAddressSpace = [&](VAddr addr) { 224 auto IsInAddressSpace = [&](VAddr addr) {
224 return address_space_start <= addr && addr <= address_space_end; 225 return m_address_space_start <= addr && addr <= m_address_space_end;
225 }; 226 };
226 ASSERT(IsInAddressSpace(alias_region_start)); 227 ASSERT(IsInAddressSpace(m_alias_region_start));
227 ASSERT(IsInAddressSpace(alias_region_end)); 228 ASSERT(IsInAddressSpace(m_alias_region_end));
228 ASSERT(IsInAddressSpace(heap_region_start)); 229 ASSERT(IsInAddressSpace(m_heap_region_start));
229 ASSERT(IsInAddressSpace(heap_region_end)); 230 ASSERT(IsInAddressSpace(m_heap_region_end));
230 ASSERT(IsInAddressSpace(stack_region_start)); 231 ASSERT(IsInAddressSpace(m_stack_region_start));
231 ASSERT(IsInAddressSpace(stack_region_end)); 232 ASSERT(IsInAddressSpace(m_stack_region_end));
232 ASSERT(IsInAddressSpace(kernel_map_region_start)); 233 ASSERT(IsInAddressSpace(m_kernel_map_region_start));
233 ASSERT(IsInAddressSpace(kernel_map_region_end)); 234 ASSERT(IsInAddressSpace(m_kernel_map_region_end));
234 235
235 // Ensure that we selected regions that don't overlap 236 // Ensure that we selected regions that don't overlap
236 const VAddr alias_start{alias_region_start}; 237 const VAddr alias_start{m_alias_region_start};
237 const VAddr alias_last{alias_region_end - 1}; 238 const VAddr alias_last{m_alias_region_end - 1};
238 const VAddr heap_start{heap_region_start}; 239 const VAddr heap_start{m_heap_region_start};
239 const VAddr heap_last{heap_region_end - 1}; 240 const VAddr heap_last{m_heap_region_end - 1};
240 const VAddr stack_start{stack_region_start}; 241 const VAddr stack_start{m_stack_region_start};
241 const VAddr stack_last{stack_region_end - 1}; 242 const VAddr stack_last{m_stack_region_end - 1};
242 const VAddr kmap_start{kernel_map_region_start}; 243 const VAddr kmap_start{m_kernel_map_region_start};
243 const VAddr kmap_last{kernel_map_region_end - 1}; 244 const VAddr kmap_last{m_kernel_map_region_end - 1};
244 ASSERT(alias_last < heap_start || heap_last < alias_start); 245 ASSERT(alias_last < heap_start || heap_last < alias_start);
245 ASSERT(alias_last < stack_start || stack_last < alias_start); 246 ASSERT(alias_last < stack_start || stack_last < alias_start);
246 ASSERT(alias_last < kmap_start || kmap_last < alias_start); 247 ASSERT(alias_last < kmap_start || kmap_last < alias_start);
247 ASSERT(heap_last < stack_start || stack_last < heap_start); 248 ASSERT(heap_last < stack_start || stack_last < heap_start);
248 ASSERT(heap_last < kmap_start || kmap_last < heap_start); 249 ASSERT(heap_last < kmap_start || kmap_last < heap_start);
249 250
250 current_heap_end = heap_region_start; 251 m_current_heap_end = m_heap_region_start;
251 max_heap_size = 0; 252 m_max_heap_size = 0;
252 mapped_physical_memory_size = 0; 253 m_mapped_physical_memory_size = 0;
253 memory_pool = pool; 254 m_memory_pool = pool;
255
256 m_page_table_impl = std::make_unique<Common::PageTable>();
257 m_page_table_impl->Resize(m_address_space_width, PageBits);
258
259 // Initialize our memory block manager.
260 R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
261 m_memory_block_slab_manager));
262}
254 263
255 page_table_impl.Resize(address_space_width, PageBits); 264void KPageTable::Finalize() {
265 // Finalize memory blocks.
266 m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) {
267 m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size);
268 });
256 269
257 return InitializeMemoryLayout(start, end); 270 // Close the backing page table, as the destructor is not called for guest objects.
271 m_page_table_impl.reset();
258} 272}
259 273
260Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state, 274Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state,
261 KMemoryPermission perm) { 275 KMemoryPermission perm) {
262 const u64 size{num_pages * PageSize}; 276 const u64 size{num_pages * PageSize};
263 277
@@ -265,52 +279,76 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat
265 R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); 279 R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
266 280
267 // Lock the table. 281 // Lock the table.
268 KScopedLightLock lk(general_lock); 282 KScopedLightLock lk(m_general_lock);
269 283
270 // Verify that the destination memory is unmapped. 284 // Verify that the destination memory is unmapped.
271 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, 285 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
272 KMemoryPermission::None, KMemoryPermission::None, 286 KMemoryPermission::None, KMemoryPermission::None,
273 KMemoryAttribute::None, KMemoryAttribute::None)); 287 KMemoryAttribute::None, KMemoryAttribute::None));
288
289 // Create an update allocator.
290 Result allocator_result{ResultSuccess};
291 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
292 m_memory_block_slab_manager);
293
294 // Allocate and open.
274 KPageGroup pg; 295 KPageGroup pg;
275 R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( 296 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
276 &pg, num_pages, 297 &pg, num_pages,
277 KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option))); 298 KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
278 299
279 R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); 300 R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup));
280 301
281 block_manager->Update(addr, num_pages, state, perm); 302 // Update the blocks.
303 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
304 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
305 KMemoryBlockDisableMergeAttribute::None);
282 306
283 return ResultSuccess; 307 R_SUCCEED();
284} 308}
285 309
286Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) { 310Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) {
287 // Validate the mapping request. 311 // Validate the mapping request.
288 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), 312 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
289 ResultInvalidMemoryRegion); 313 ResultInvalidMemoryRegion);
290 314
291 // Lock the table. 315 // Lock the table.
292 KScopedLightLock lk(general_lock); 316 KScopedLightLock lk(m_general_lock);
293 317
294 // Verify that the source memory is normal heap. 318 // Verify that the source memory is normal heap.
295 KMemoryState src_state{}; 319 KMemoryState src_state{};
296 KMemoryPermission src_perm{}; 320 KMemoryPermission src_perm{};
297 std::size_t num_src_allocator_blocks{}; 321 size_t num_src_allocator_blocks{};
298 R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks, 322 R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks,
299 src_address, size, KMemoryState::All, KMemoryState::Normal, 323 src_address, size, KMemoryState::All, KMemoryState::Normal,
300 KMemoryPermission::All, KMemoryPermission::UserReadWrite, 324 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
301 KMemoryAttribute::All, KMemoryAttribute::None)); 325 KMemoryAttribute::All, KMemoryAttribute::None));
302 326
303 // Verify that the destination memory is unmapped. 327 // Verify that the destination memory is unmapped.
304 std::size_t num_dst_allocator_blocks{}; 328 size_t num_dst_allocator_blocks{};
305 R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All, 329 R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All,
306 KMemoryState::Free, KMemoryPermission::None, 330 KMemoryState::Free, KMemoryPermission::None,
307 KMemoryPermission::None, KMemoryAttribute::None, 331 KMemoryPermission::None, KMemoryAttribute::None,
308 KMemoryAttribute::None)); 332 KMemoryAttribute::None));
309 333
334 // Create an update allocator for the source.
335 Result src_allocator_result{ResultSuccess};
336 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
337 m_memory_block_slab_manager,
338 num_src_allocator_blocks);
339 R_TRY(src_allocator_result);
340
341 // Create an update allocator for the destination.
342 Result dst_allocator_result{ResultSuccess};
343 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
344 m_memory_block_slab_manager,
345 num_dst_allocator_blocks);
346 R_TRY(dst_allocator_result);
347
310 // Map the code memory. 348 // Map the code memory.
311 { 349 {
312 // Determine the number of pages being operated on. 350 // Determine the number of pages being operated on.
313 const std::size_t num_pages = size / PageSize; 351 const size_t num_pages = size / PageSize;
314 352
315 // Create page groups for the memory being mapped. 353 // Create page groups for the memory being mapped.
316 KPageGroup pg; 354 KPageGroup pg;
@@ -335,33 +373,37 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size
335 unprot_guard.Cancel(); 373 unprot_guard.Cancel();
336 374
337 // Apply the memory block updates. 375 // Apply the memory block updates.
338 block_manager->Update(src_address, num_pages, src_state, new_perm, 376 m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
339 KMemoryAttribute::Locked); 377 src_state, new_perm, KMemoryAttribute::Locked,
340 block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm, 378 KMemoryBlockDisableMergeAttribute::Locked,
341 KMemoryAttribute::None); 379 KMemoryBlockDisableMergeAttribute::None);
380 m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
381 KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
382 KMemoryBlockDisableMergeAttribute::Normal,
383 KMemoryBlockDisableMergeAttribute::None);
342 } 384 }
343 385
344 return ResultSuccess; 386 R_SUCCEED();
345} 387}
346 388
347Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, 389Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
348 ICacheInvalidationStrategy icache_invalidation_strategy) { 390 ICacheInvalidationStrategy icache_invalidation_strategy) {
349 // Validate the mapping request. 391 // Validate the mapping request.
350 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), 392 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
351 ResultInvalidMemoryRegion); 393 ResultInvalidMemoryRegion);
352 394
353 // Lock the table. 395 // Lock the table.
354 KScopedLightLock lk(general_lock); 396 KScopedLightLock lk(m_general_lock);
355 397
356 // Verify that the source memory is locked normal heap. 398 // Verify that the source memory is locked normal heap.
357 std::size_t num_src_allocator_blocks{}; 399 size_t num_src_allocator_blocks{};
358 R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, 400 R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
359 KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, 401 KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
360 KMemoryPermission::None, KMemoryAttribute::All, 402 KMemoryPermission::None, KMemoryAttribute::All,
361 KMemoryAttribute::Locked)); 403 KMemoryAttribute::Locked));
362 404
363 // Verify that the destination memory is aliasable code. 405 // Verify that the destination memory is aliasable code.
364 std::size_t num_dst_allocator_blocks{}; 406 size_t num_dst_allocator_blocks{};
365 R_TRY(this->CheckMemoryStateContiguous( 407 R_TRY(this->CheckMemoryStateContiguous(
366 std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, 408 std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
367 KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, 409 KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
@@ -370,7 +412,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
370 // Determine whether any pages being unmapped are code. 412 // Determine whether any pages being unmapped are code.
371 bool any_code_pages = false; 413 bool any_code_pages = false;
372 { 414 {
373 KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address); 415 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
374 while (true) { 416 while (true) {
375 // Get the memory info. 417 // Get the memory info.
376 const KMemoryInfo info = it->GetMemoryInfo(); 418 const KMemoryInfo info = it->GetMemoryInfo();
@@ -396,9 +438,9 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
396 SCOPE_EXIT({ 438 SCOPE_EXIT({
397 if (reprotected_pages && any_code_pages) { 439 if (reprotected_pages && any_code_pages) {
398 if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) { 440 if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
399 system.InvalidateCpuInstructionCacheRange(dst_address, size); 441 m_system.InvalidateCpuInstructionCacheRange(dst_address, size);
400 } else { 442 } else {
401 system.InvalidateCpuInstructionCaches(); 443 m_system.InvalidateCpuInstructionCaches();
402 } 444 }
403 } 445 }
404 }); 446 });
@@ -406,7 +448,21 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
406 // Unmap. 448 // Unmap.
407 { 449 {
408 // Determine the number of pages being operated on. 450 // Determine the number of pages being operated on.
409 const std::size_t num_pages = size / PageSize; 451 const size_t num_pages = size / PageSize;
452
453 // Create an update allocator for the source.
454 Result src_allocator_result{ResultSuccess};
455 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
456 m_memory_block_slab_manager,
457 num_src_allocator_blocks);
458 R_TRY(src_allocator_result);
459
460 // Create an update allocator for the destination.
461 Result dst_allocator_result{ResultSuccess};
462 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
463 m_memory_block_slab_manager,
464 num_dst_allocator_blocks);
465 R_TRY(dst_allocator_result);
410 466
411 // Unmap the aliased copy of the pages. 467 // Unmap the aliased copy of the pages.
412 R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); 468 R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
@@ -416,73 +472,34 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
416 OperationType::ChangePermissions)); 472 OperationType::ChangePermissions));
417 473
418 // Apply the memory block updates. 474 // Apply the memory block updates.
419 block_manager->Update(dst_address, num_pages, KMemoryState::None); 475 m_memory_block_manager.Update(
420 block_manager->Update(src_address, num_pages, KMemoryState::Normal, 476 std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
421 KMemoryPermission::UserReadWrite); 477 KMemoryPermission::None, KMemoryAttribute::None,
478 KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
479 m_memory_block_manager.Update(
480 std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal,
481 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
482 KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
422 483
423 // Note that we reprotected pages. 484 // Note that we reprotected pages.
424 reprotected_pages = true; 485 reprotected_pages = true;
425 } 486 }
426 487
427 return ResultSuccess; 488 R_SUCCEED();
428} 489}
429 490
430VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, 491VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
431 std::size_t num_pages, std::size_t alignment, std::size_t offset, 492 size_t alignment, size_t offset, size_t guard_pages) {
432 std::size_t guard_pages) {
433 VAddr address = 0; 493 VAddr address = 0;
434 494
435 if (num_pages <= region_num_pages) { 495 if (num_pages <= region_num_pages) {
436 if (this->IsAslrEnabled()) { 496 if (this->IsAslrEnabled()) {
437 // Try to directly find a free area up to 8 times. 497 UNIMPLEMENTED();
438 for (std::size_t i = 0; i < 8; i++) {
439 const std::size_t random_offset =
440 KSystemControl::GenerateRandomRange(
441 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) *
442 alignment;
443 const VAddr candidate =
444 Common::AlignDown((region_start + random_offset), alignment) + offset;
445
446 KMemoryInfo info = this->QueryInfoImpl(candidate);
447
448 if (info.state != KMemoryState::Free) {
449 continue;
450 }
451 if (region_start > candidate) {
452 continue;
453 }
454 if (info.GetAddress() + guard_pages * PageSize > candidate) {
455 continue;
456 }
457
458 const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1;
459 if (candidate_end > info.GetLastAddress()) {
460 continue;
461 }
462 if (candidate_end > region_start + region_num_pages * PageSize - 1) {
463 continue;
464 }
465
466 address = candidate;
467 break;
468 }
469 // Fall back to finding the first free area with a random offset.
470 if (address == 0) {
471 // NOTE: Nintendo does not account for guard pages here.
472 // This may theoretically cause an offset to be chosen that cannot be mapped. We
473 // will account for guard pages.
474 const std::size_t offset_pages = KSystemControl::GenerateRandomRange(
475 0, region_num_pages - num_pages - guard_pages);
476 address = block_manager->FindFreeArea(region_start + offset_pages * PageSize,
477 region_num_pages - offset_pages, num_pages,
478 alignment, offset, guard_pages);
479 }
480 } 498 }
481
482 // Find the first free area. 499 // Find the first free area.
483 if (address == 0) { 500 if (address == 0) {
484 address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages, 501 address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
485 alignment, offset, guard_pages); 502 alignment, offset, guard_pages);
486 } 503 }
487 } 504 }
488 505
@@ -500,7 +517,8 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
500 // Begin traversal. 517 // Begin traversal.
501 Common::PageTable::TraversalContext context; 518 Common::PageTable::TraversalContext context;
502 Common::PageTable::TraversalEntry next_entry; 519 Common::PageTable::TraversalEntry next_entry;
503 R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory); 520 R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, addr),
521 ResultInvalidCurrentMemory);
504 522
505 // Prepare tracking variables. 523 // Prepare tracking variables.
506 PAddr cur_addr = next_entry.phys_addr; 524 PAddr cur_addr = next_entry.phys_addr;
@@ -508,9 +526,9 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
508 size_t tot_size = cur_size; 526 size_t tot_size = cur_size;
509 527
510 // Iterate, adding to group as we go. 528 // Iterate, adding to group as we go.
511 const auto& memory_layout = system.Kernel().MemoryLayout(); 529 const auto& memory_layout = m_system.Kernel().MemoryLayout();
512 while (tot_size < size) { 530 while (tot_size < size) {
513 R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context), 531 R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context),
514 ResultInvalidCurrentMemory); 532 ResultInvalidCurrentMemory);
515 533
516 if (next_entry.phys_addr != (cur_addr + cur_size)) { 534 if (next_entry.phys_addr != (cur_addr + cur_size)) {
@@ -538,7 +556,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
538 R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory); 556 R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
539 R_TRY(pg.AddBlock(cur_addr, cur_pages)); 557 R_TRY(pg.AddBlock(cur_addr, cur_pages));
540 558
541 return ResultSuccess; 559 R_SUCCEED();
542} 560}
543 561
544bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { 562bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) {
@@ -546,7 +564,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
546 564
547 const size_t size = num_pages * PageSize; 565 const size_t size = num_pages * PageSize;
548 const auto& pg = pg_ll.Nodes(); 566 const auto& pg = pg_ll.Nodes();
549 const auto& memory_layout = system.Kernel().MemoryLayout(); 567 const auto& memory_layout = m_system.Kernel().MemoryLayout();
550 568
551 // Empty groups are necessarily invalid. 569 // Empty groups are necessarily invalid.
552 if (pg.empty()) { 570 if (pg.empty()) {
@@ -573,7 +591,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
573 // Begin traversal. 591 // Begin traversal.
574 Common::PageTable::TraversalContext context; 592 Common::PageTable::TraversalContext context;
575 Common::PageTable::TraversalEntry next_entry; 593 Common::PageTable::TraversalEntry next_entry;
576 if (!page_table_impl.BeginTraversal(next_entry, context, addr)) { 594 if (!m_page_table_impl->BeginTraversal(next_entry, context, addr)) {
577 return false; 595 return false;
578 } 596 }
579 597
@@ -584,7 +602,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
584 602
585 // Iterate, comparing expected to actual. 603 // Iterate, comparing expected to actual.
586 while (tot_size < size) { 604 while (tot_size < size) {
587 if (!page_table_impl.ContinueTraversal(next_entry, context)) { 605 if (!m_page_table_impl->ContinueTraversal(next_entry, context)) {
588 return false; 606 return false;
589 } 607 }
590 608
@@ -630,11 +648,11 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
630 return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); 648 return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
631} 649}
632 650
633Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, 651Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
634 VAddr src_addr) { 652 VAddr src_addr) {
635 KScopedLightLock lk(general_lock); 653 KScopedLightLock lk(m_general_lock);
636 654
637 const std::size_t num_pages{size / PageSize}; 655 const size_t num_pages{size / PageSize};
638 656
639 // Check that the memory is mapped in the destination process. 657 // Check that the memory is mapped in the destination process.
640 size_t num_allocator_blocks; 658 size_t num_allocator_blocks;
@@ -649,43 +667,51 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTab
649 KMemoryPermission::None, KMemoryAttribute::All, 667 KMemoryPermission::None, KMemoryAttribute::All,
650 KMemoryAttribute::None)); 668 KMemoryAttribute::None));
651 669
670 // Create an update allocator.
671 Result allocator_result{ResultSuccess};
672 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
673 m_memory_block_slab_manager, num_allocator_blocks);
674 R_TRY(allocator_result);
675
652 CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); 676 CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
653 677
654 // Apply the memory block update. 678 // Apply the memory block update.
655 block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None, 679 m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages,
656 KMemoryAttribute::None); 680 KMemoryState::Free, KMemoryPermission::None,
681 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
682 KMemoryBlockDisableMergeAttribute::Normal);
657 683
658 system.InvalidateCpuInstructionCaches(); 684 m_system.InvalidateCpuInstructionCaches();
659 685
660 return ResultSuccess; 686 R_SUCCEED();
661} 687}
662 688
663Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { 689Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
664 // Lock the physical memory lock. 690 // Lock the physical memory lock.
665 KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); 691 KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
666 692
667 // Calculate the last address for convenience. 693 // Calculate the last address for convenience.
668 const VAddr last_address = address + size - 1; 694 const VAddr last_address = address + size - 1;
669 695
670 // Define iteration variables. 696 // Define iteration variables.
671 VAddr cur_address; 697 VAddr cur_address;
672 std::size_t mapped_size; 698 size_t mapped_size;
673 699
674 // The entire mapping process can be retried. 700 // The entire mapping process can be retried.
675 while (true) { 701 while (true) {
676 // Check if the memory is already mapped. 702 // Check if the memory is already mapped.
677 { 703 {
678 // Lock the table. 704 // Lock the table.
679 KScopedLightLock lk(general_lock); 705 KScopedLightLock lk(m_general_lock);
680 706
681 // Iterate over the memory. 707 // Iterate over the memory.
682 cur_address = address; 708 cur_address = address;
683 mapped_size = 0; 709 mapped_size = 0;
684 710
685 auto it = block_manager->FindIterator(cur_address); 711 auto it = m_memory_block_manager.FindIterator(cur_address);
686 while (true) { 712 while (true) {
687 // Check that the iterator is valid. 713 // Check that the iterator is valid.
688 ASSERT(it != block_manager->end()); 714 ASSERT(it != m_memory_block_manager.end());
689 715
690 // Get the memory info. 716 // Get the memory info.
691 const KMemoryInfo info = it->GetMemoryInfo(); 717 const KMemoryInfo info = it->GetMemoryInfo();
@@ -716,20 +742,20 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
716 { 742 {
717 // Reserve the memory from the process resource limit. 743 // Reserve the memory from the process resource limit.
718 KScopedResourceReservation memory_reservation( 744 KScopedResourceReservation memory_reservation(
719 system.Kernel().CurrentProcess()->GetResourceLimit(), 745 m_system.Kernel().CurrentProcess()->GetResourceLimit(),
720 LimitableResource::PhysicalMemory, size - mapped_size); 746 LimitableResource::PhysicalMemory, size - mapped_size);
721 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 747 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
722 748
723 // Allocate pages for the new memory. 749 // Allocate pages for the new memory.
724 KPageGroup pg; 750 KPageGroup pg;
725 R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( 751 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess(
726 &pg, (size - mapped_size) / PageSize, 752 &pg, (size - mapped_size) / PageSize,
727 KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); 753 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
728 754
729 // Map the memory. 755 // Map the memory.
730 { 756 {
731 // Lock the table. 757 // Lock the table.
732 KScopedLightLock lk(general_lock); 758 KScopedLightLock lk(m_general_lock);
733 759
734 size_t num_allocator_blocks = 0; 760 size_t num_allocator_blocks = 0;
735 761
@@ -739,10 +765,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
739 size_t checked_mapped_size = 0; 765 size_t checked_mapped_size = 0;
740 cur_address = address; 766 cur_address = address;
741 767
742 auto it = block_manager->FindIterator(cur_address); 768 auto it = m_memory_block_manager.FindIterator(cur_address);
743 while (true) { 769 while (true) {
744 // Check that the iterator is valid. 770 // Check that the iterator is valid.
745 ASSERT(it != block_manager->end()); 771 ASSERT(it != m_memory_block_manager.end());
746 772
747 // Get the memory info. 773 // Get the memory info.
748 const KMemoryInfo info = it->GetMemoryInfo(); 774 const KMemoryInfo info = it->GetMemoryInfo();
@@ -782,6 +808,14 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
782 } 808 }
783 } 809 }
784 810
811 // Create an update allocator.
812 ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
813 Result allocator_result{ResultSuccess};
814 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
815 m_memory_block_slab_manager,
816 num_allocator_blocks);
817 R_TRY(allocator_result);
818
785 // Reset the current tracking address, and make sure we clean up on failure. 819 // Reset the current tracking address, and make sure we clean up on failure.
786 cur_address = address; 820 cur_address = address;
787 auto unmap_guard = detail::ScopeExit([&] { 821 auto unmap_guard = detail::ScopeExit([&] {
@@ -791,10 +825,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
791 // Iterate, unmapping the pages. 825 // Iterate, unmapping the pages.
792 cur_address = address; 826 cur_address = address;
793 827
794 auto it = block_manager->FindIterator(cur_address); 828 auto it = m_memory_block_manager.FindIterator(cur_address);
795 while (true) { 829 while (true) {
796 // Check that the iterator is valid. 830 // Check that the iterator is valid.
797 ASSERT(it != block_manager->end()); 831 ASSERT(it != m_memory_block_manager.end());
798 832
799 // Get the memory info. 833 // Get the memory info.
800 const KMemoryInfo info = it->GetMemoryInfo(); 834 const KMemoryInfo info = it->GetMemoryInfo();
@@ -830,10 +864,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
830 PAddr pg_phys_addr = pg_it->GetAddress(); 864 PAddr pg_phys_addr = pg_it->GetAddress();
831 size_t pg_pages = pg_it->GetNumPages(); 865 size_t pg_pages = pg_it->GetNumPages();
832 866
833 auto it = block_manager->FindIterator(cur_address); 867 auto it = m_memory_block_manager.FindIterator(cur_address);
834 while (true) { 868 while (true) {
835 // Check that the iterator is valid. 869 // Check that the iterator is valid.
836 ASSERT(it != block_manager->end()); 870 ASSERT(it != m_memory_block_manager.end());
837 871
838 // Get the memory info. 872 // Get the memory info.
839 const KMemoryInfo info = it->GetMemoryInfo(); 873 const KMemoryInfo info = it->GetMemoryInfo();
@@ -886,37 +920,37 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
886 memory_reservation.Commit(); 920 memory_reservation.Commit();
887 921
888 // Increase our tracked mapped size. 922 // Increase our tracked mapped size.
889 mapped_physical_memory_size += (size - mapped_size); 923 m_mapped_physical_memory_size += (size - mapped_size);
890 924
891 // Update the relevant memory blocks. 925 // Update the relevant memory blocks.
892 block_manager->Update(address, size / PageSize, KMemoryState::Free, 926 m_memory_block_manager.UpdateIfMatch(
893 KMemoryPermission::None, KMemoryAttribute::None, 927 std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
894 KMemoryState::Normal, KMemoryPermission::UserReadWrite, 928 KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
895 KMemoryAttribute::None); 929 KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
896 930
897 // Cancel our guard. 931 // Cancel our guard.
898 unmap_guard.Cancel(); 932 unmap_guard.Cancel();
899 933
900 return ResultSuccess; 934 R_SUCCEED();
901 } 935 }
902 } 936 }
903 } 937 }
904} 938}
905 939
906Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { 940Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
907 // Lock the physical memory lock. 941 // Lock the physical memory lock.
908 KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); 942 KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
909 943
910 // Lock the table. 944 // Lock the table.
911 KScopedLightLock lk(general_lock); 945 KScopedLightLock lk(m_general_lock);
912 946
913 // Calculate the last address for convenience. 947 // Calculate the last address for convenience.
914 const VAddr last_address = address + size - 1; 948 const VAddr last_address = address + size - 1;
915 949
916 // Define iteration variables. 950 // Define iteration variables.
917 VAddr cur_address = 0; 951 VAddr cur_address = 0;
918 std::size_t mapped_size = 0; 952 size_t mapped_size = 0;
919 std::size_t num_allocator_blocks = 0; 953 size_t num_allocator_blocks = 0;
920 954
921 // Check if the memory is mapped. 955 // Check if the memory is mapped.
922 { 956 {
@@ -924,10 +958,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
924 cur_address = address; 958 cur_address = address;
925 mapped_size = 0; 959 mapped_size = 0;
926 960
927 auto it = block_manager->FindIterator(cur_address); 961 auto it = m_memory_block_manager.FindIterator(cur_address);
928 while (true) { 962 while (true) {
929 // Check that the iterator is valid. 963 // Check that the iterator is valid.
930 ASSERT(it != block_manager->end()); 964 ASSERT(it != m_memory_block_manager.end());
931 965
932 // Get the memory info. 966 // Get the memory info.
933 const KMemoryInfo info = it->GetMemoryInfo(); 967 const KMemoryInfo info = it->GetMemoryInfo();
@@ -1022,6 +1056,13 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
1022 } 1056 }
1023 ASSERT(pg.GetNumPages() == mapped_size / PageSize); 1057 ASSERT(pg.GetNumPages() == mapped_size / PageSize);
1024 1058
1059 // Create an update allocator.
1060 ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
1061 Result allocator_result{ResultSuccess};
1062 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1063 m_memory_block_slab_manager, num_allocator_blocks);
1064 R_TRY(allocator_result);
1065
1025 // Reset the current tracking address, and make sure we clean up on failure. 1066 // Reset the current tracking address, and make sure we clean up on failure.
1026 cur_address = address; 1067 cur_address = address;
1027 auto remap_guard = detail::ScopeExit([&] { 1068 auto remap_guard = detail::ScopeExit([&] {
@@ -1030,7 +1071,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
1030 cur_address = address; 1071 cur_address = address;
1031 1072
1032 // Iterate over the memory we unmapped. 1073 // Iterate over the memory we unmapped.
1033 auto it = block_manager->FindIterator(cur_address); 1074 auto it = m_memory_block_manager.FindIterator(cur_address);
1034 auto pg_it = pg.Nodes().begin(); 1075 auto pg_it = pg.Nodes().begin();
1035 PAddr pg_phys_addr = pg_it->GetAddress(); 1076 PAddr pg_phys_addr = pg_it->GetAddress();
1036 size_t pg_pages = pg_it->GetNumPages(); 1077 size_t pg_pages = pg_it->GetNumPages();
@@ -1085,10 +1126,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
1085 }); 1126 });
1086 1127
1087 // Iterate over the memory, unmapping as we go. 1128 // Iterate over the memory, unmapping as we go.
1088 auto it = block_manager->FindIterator(cur_address); 1129 auto it = m_memory_block_manager.FindIterator(cur_address);
1089 while (true) { 1130 while (true) {
1090 // Check that the iterator is valid. 1131 // Check that the iterator is valid.
1091 ASSERT(it != block_manager->end()); 1132 ASSERT(it != m_memory_block_manager.end());
1092 1133
1093 // Get the memory info. 1134 // Get the memory info.
1094 const KMemoryInfo info = it->GetMemoryInfo(); 1135 const KMemoryInfo info = it->GetMemoryInfo();
@@ -1115,104 +1156,159 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
1115 } 1156 }
1116 1157
1117 // Release the memory resource. 1158 // Release the memory resource.
1118 mapped_physical_memory_size -= mapped_size; 1159 m_mapped_physical_memory_size -= mapped_size;
1119 auto process{system.Kernel().CurrentProcess()}; 1160 auto process{m_system.Kernel().CurrentProcess()};
1120 process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); 1161 process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
1121 1162
1122 // Update memory blocks. 1163 // Update memory blocks.
1123 block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, 1164 m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
1124 KMemoryAttribute::None); 1165 KMemoryState::Free, KMemoryPermission::None,
1166 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
1167 KMemoryBlockDisableMergeAttribute::None);
1125 1168
1126 // TODO(bunnei): This is a workaround until the next set of changes, where we add reference 1169 // TODO(bunnei): This is a workaround until the next set of changes, where we add reference
1127 // counting for mapped pages. Until then, we must manually close the reference to the page 1170 // counting for mapped pages. Until then, we must manually close the reference to the page
1128 // group. 1171 // group.
1129 system.Kernel().MemoryManager().Close(pg); 1172 m_system.Kernel().MemoryManager().Close(pg);
1130 1173
1131 // We succeeded. 1174 // We succeeded.
1132 remap_guard.Cancel(); 1175 remap_guard.Cancel();
1133 1176
1134 return ResultSuccess; 1177 R_SUCCEED();
1135} 1178}
1136 1179
1137Result KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { 1180Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) {
1138 KScopedLightLock lk(general_lock); 1181 // Lock the table.
1139 1182 KScopedLightLock lk(m_general_lock);
1140 KMemoryState src_state{}; 1183
1141 CASCADE_CODE(CheckMemoryState( 1184 // Validate that the source address's state is valid.
1142 &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, 1185 KMemoryState src_state;
1143 KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::UserReadWrite, 1186 size_t num_src_allocator_blocks;
1144 KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); 1187 R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
1188 std::addressof(num_src_allocator_blocks), src_address, size,
1189 KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
1190 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
1191 KMemoryAttribute::All, KMemoryAttribute::None));
1145 1192
1146 if (IsRegionMapped(dst_addr, size)) { 1193 // Validate that the dst address's state is valid.
1147 return ResultInvalidCurrentMemory; 1194 size_t num_dst_allocator_blocks;
1148 } 1195 R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
1196 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
1197 KMemoryPermission::None, KMemoryAttribute::None,
1198 KMemoryAttribute::None));
1149 1199
1200 // Create an update allocator for the source.
1201 Result src_allocator_result{ResultSuccess};
1202 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
1203 m_memory_block_slab_manager,
1204 num_src_allocator_blocks);
1205 R_TRY(src_allocator_result);
1206
1207 // Create an update allocator for the destination.
1208 Result dst_allocator_result{ResultSuccess};
1209 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
1210 m_memory_block_slab_manager,
1211 num_dst_allocator_blocks);
1212 R_TRY(dst_allocator_result);
1213
1214 // Map the memory.
1150 KPageGroup page_linked_list; 1215 KPageGroup page_linked_list;
1151 const std::size_t num_pages{size / PageSize}; 1216 const size_t num_pages{size / PageSize};
1152 1217 const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
1153 AddRegionToPages(src_addr, num_pages, page_linked_list); 1218 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
1219 const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
1154 1220
1221 AddRegionToPages(src_address, num_pages, page_linked_list);
1155 { 1222 {
1223 // Reprotect the source as kernel-read/not mapped.
1156 auto block_guard = detail::ScopeExit([&] { 1224 auto block_guard = detail::ScopeExit([&] {
1157 Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite, 1225 Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
1158 OperationType::ChangePermissions); 1226 OperationType::ChangePermissions);
1159 }); 1227 });
1160 1228 R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions));
1161 CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None, 1229 R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite));
1162 OperationType::ChangePermissions));
1163 CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::UserReadWrite));
1164 1230
1165 block_guard.Cancel(); 1231 block_guard.Cancel();
1166 } 1232 }
1167 1233
1168 block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::None, 1234 // Apply the memory block updates.
1169 KMemoryAttribute::Locked); 1235 m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
1170 block_manager->Update(dst_addr, num_pages, KMemoryState::Stack, 1236 new_src_perm, new_src_attr,
1171 KMemoryPermission::UserReadWrite); 1237 KMemoryBlockDisableMergeAttribute::Locked,
1172 1238 KMemoryBlockDisableMergeAttribute::None);
1173 return ResultSuccess; 1239 m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
1240 KMemoryState::Stack, KMemoryPermission::UserReadWrite,
1241 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
1242 KMemoryBlockDisableMergeAttribute::None);
1243
1244 R_SUCCEED();
1174} 1245}
1175 1246
1176Result KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { 1247Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) {
1177 KScopedLightLock lk(general_lock); 1248 // Lock the table.
1249 KScopedLightLock lk(m_general_lock);
1250
1251 // Validate that the source address's state is valid.
1252 KMemoryState src_state;
1253 size_t num_src_allocator_blocks;
1254 R_TRY(this->CheckMemoryState(
1255 std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
1256 src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
1257 KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
1258 KMemoryAttribute::All, KMemoryAttribute::Locked));
1259
1260 // Validate that the dst address's state is valid.
1261 KMemoryPermission dst_perm;
1262 size_t num_dst_allocator_blocks;
1263 R_TRY(this->CheckMemoryState(
1264 nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
1265 dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
1266 KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
1178 1267
1179 KMemoryState src_state{}; 1268 // Create an update allocator for the source.
1180 CASCADE_CODE(CheckMemoryState( 1269 Result src_allocator_result{ResultSuccess};
1181 &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, 1270 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
1182 KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::None, 1271 m_memory_block_slab_manager,
1183 KMemoryAttribute::Mask, KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); 1272 num_src_allocator_blocks);
1273 R_TRY(src_allocator_result);
1184 1274
1185 KMemoryPermission dst_perm{}; 1275 // Create an update allocator for the destination.
1186 CASCADE_CODE(CheckMemoryState(nullptr, &dst_perm, nullptr, nullptr, dst_addr, size, 1276 Result dst_allocator_result{ResultSuccess};
1187 KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, 1277 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
1188 KMemoryPermission::None, KMemoryAttribute::Mask, 1278 m_memory_block_slab_manager,
1189 KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); 1279 num_dst_allocator_blocks);
1280 R_TRY(dst_allocator_result);
1190 1281
1191 KPageGroup src_pages; 1282 KPageGroup src_pages;
1192 KPageGroup dst_pages; 1283 KPageGroup dst_pages;
1193 const std::size_t num_pages{size / PageSize}; 1284 const size_t num_pages{size / PageSize};
1194 1285
1195 AddRegionToPages(src_addr, num_pages, src_pages); 1286 AddRegionToPages(src_address, num_pages, src_pages);
1196 AddRegionToPages(dst_addr, num_pages, dst_pages); 1287 AddRegionToPages(dst_address, num_pages, dst_pages);
1197 1288
1198 if (!dst_pages.IsEqual(src_pages)) { 1289 R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion);
1199 return ResultInvalidMemoryRegion;
1200 }
1201 1290
1202 { 1291 {
1203 auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); }); 1292 auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
1204 1293
1205 CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); 1294 R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
1206 CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite, 1295 R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
1207 OperationType::ChangePermissions)); 1296 OperationType::ChangePermissions));
1208 1297
1209 block_guard.Cancel(); 1298 block_guard.Cancel();
1210 } 1299 }
1211 1300
1212 block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::UserReadWrite); 1301 // Apply the memory block updates.
1213 block_manager->Update(dst_addr, num_pages, KMemoryState::Free); 1302 m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
1214 1303 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
1215 return ResultSuccess; 1304 KMemoryBlockDisableMergeAttribute::None,
1305 KMemoryBlockDisableMergeAttribute::Locked);
1306 m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
1307 KMemoryState::None, KMemoryPermission::None,
1308 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
1309 KMemoryBlockDisableMergeAttribute::Normal);
1310
1311 R_SUCCEED();
1216} 1312}
1217 1313
1218Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, 1314Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
@@ -1225,48 +1321,54 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
1225 if (const auto result{ 1321 if (const auto result{
1226 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; 1322 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
1227 result.IsError()) { 1323 result.IsError()) {
1228 const std::size_t num_pages{(addr - cur_addr) / PageSize}; 1324 const size_t num_pages{(addr - cur_addr) / PageSize};
1229 1325
1230 ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) 1326 ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)
1231 .IsSuccess()); 1327 .IsSuccess());
1232 1328
1233 return result; 1329 R_RETURN(result);
1234 } 1330 }
1235 1331
1236 cur_addr += node.GetNumPages() * PageSize; 1332 cur_addr += node.GetNumPages() * PageSize;
1237 } 1333 }
1238 1334
1239 return ResultSuccess; 1335 R_SUCCEED();
1240} 1336}
1241 1337
1242Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, 1338Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
1243 KMemoryPermission perm) { 1339 KMemoryPermission perm) {
1244 // Check that the map is in range. 1340 // Check that the map is in range.
1245 const std::size_t num_pages{page_linked_list.GetNumPages()}; 1341 const size_t num_pages{page_linked_list.GetNumPages()};
1246 const std::size_t size{num_pages * PageSize}; 1342 const size_t size{num_pages * PageSize};
1247 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); 1343 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
1248 1344
1249 // Lock the table. 1345 // Lock the table.
1250 KScopedLightLock lk(general_lock); 1346 KScopedLightLock lk(m_general_lock);
1251 1347
1252 // Check the memory state. 1348 // Check the memory state.
1253 R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, 1349 R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
1254 KMemoryPermission::None, KMemoryPermission::None, 1350 KMemoryPermission::None, KMemoryPermission::None,
1255 KMemoryAttribute::None, KMemoryAttribute::None)); 1351 KMemoryAttribute::None, KMemoryAttribute::None));
1256 1352
1353 // Create an update allocator.
1354 Result allocator_result{ResultSuccess};
1355 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1356 m_memory_block_slab_manager);
1357
1257 // Map the pages. 1358 // Map the pages.
1258 R_TRY(MapPages(address, page_linked_list, perm)); 1359 R_TRY(MapPages(address, page_linked_list, perm));
1259 1360
1260 // Update the blocks. 1361 // Update the blocks.
1261 block_manager->Update(address, num_pages, state, perm); 1362 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
1363 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
1364 KMemoryBlockDisableMergeAttribute::None);
1262 1365
1263 return ResultSuccess; 1366 R_SUCCEED();
1264} 1367}
1265 1368
1266Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, 1369Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
1267 PAddr phys_addr, bool is_pa_valid, VAddr region_start, 1370 bool is_pa_valid, VAddr region_start, size_t region_num_pages,
1268 std::size_t region_num_pages, KMemoryState state, 1371 KMemoryState state, KMemoryPermission perm) {
1269 KMemoryPermission perm) {
1270 ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); 1372 ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
1271 1373
1272 // Ensure this is a valid map request. 1374 // Ensure this is a valid map request.
@@ -1275,7 +1377,7 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t
1275 R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); 1377 R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
1276 1378
1277 // Lock the table. 1379 // Lock the table.
1278 KScopedLightLock lk(general_lock); 1380 KScopedLightLock lk(m_general_lock);
1279 1381
1280 // Find a random address to map at. 1382 // Find a random address to map at.
1281 VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, 1383 VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
@@ -1288,6 +1390,11 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t
1288 KMemoryAttribute::None, KMemoryAttribute::None) 1390 KMemoryAttribute::None, KMemoryAttribute::None)
1289 .IsSuccess()); 1391 .IsSuccess());
1290 1392
1393 // Create an update allocator.
1394 Result allocator_result{ResultSuccess};
1395 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1396 m_memory_block_slab_manager);
1397
1291 // Perform mapping operation. 1398 // Perform mapping operation.
1292 if (is_pa_valid) { 1399 if (is_pa_valid) {
1293 R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr)); 1400 R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr));
@@ -1296,11 +1403,13 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t
1296 } 1403 }
1297 1404
1298 // Update the blocks. 1405 // Update the blocks.
1299 block_manager->Update(addr, num_pages, state, perm); 1406 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
1407 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
1408 KMemoryBlockDisableMergeAttribute::None);
1300 1409
1301 // We successfully mapped the pages. 1410 // We successfully mapped the pages.
1302 *out_addr = addr; 1411 *out_addr = addr;
1303 return ResultSuccess; 1412 R_SUCCEED();
1304} 1413}
1305 1414
1306Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { 1415Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
@@ -1312,60 +1421,80 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
1312 if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, 1421 if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
1313 OperationType::Unmap)}; 1422 OperationType::Unmap)};
1314 result.IsError()) { 1423 result.IsError()) {
1315 return result; 1424 R_RETURN(result);
1316 } 1425 }
1317 1426
1318 cur_addr += node.GetNumPages() * PageSize; 1427 cur_addr += node.GetNumPages() * PageSize;
1319 } 1428 }
1320 1429
1321 return ResultSuccess; 1430 R_SUCCEED();
1322} 1431}
1323 1432
1324Result KPageTable::UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state) { 1433Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) {
1325 // Check that the unmap is in range. 1434 // Check that the unmap is in range.
1326 const std::size_t num_pages{page_linked_list.GetNumPages()}; 1435 const size_t num_pages{page_linked_list.GetNumPages()};
1327 const std::size_t size{num_pages * PageSize}; 1436 const size_t size{num_pages * PageSize};
1328 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); 1437 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1329 1438
1330 // Lock the table. 1439 // Lock the table.
1331 KScopedLightLock lk(general_lock); 1440 KScopedLightLock lk(m_general_lock);
1332 1441
1333 // Check the memory state. 1442 // Check the memory state.
1334 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None, 1443 size_t num_allocator_blocks;
1444 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
1445 KMemoryState::All, state, KMemoryPermission::None,
1335 KMemoryPermission::None, KMemoryAttribute::All, 1446 KMemoryPermission::None, KMemoryAttribute::All,
1336 KMemoryAttribute::None)); 1447 KMemoryAttribute::None));
1337 1448
1449 // Create an update allocator.
1450 Result allocator_result{ResultSuccess};
1451 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1452 m_memory_block_slab_manager, num_allocator_blocks);
1453 R_TRY(allocator_result);
1454
1338 // Perform the unmap. 1455 // Perform the unmap.
1339 R_TRY(UnmapPages(addr, page_linked_list)); 1456 R_TRY(UnmapPages(address, page_linked_list));
1340 1457
1341 // Update the blocks. 1458 // Update the blocks.
1342 block_manager->Update(addr, num_pages, state, KMemoryPermission::None); 1459 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
1460 KMemoryPermission::None, KMemoryAttribute::None,
1461 KMemoryBlockDisableMergeAttribute::None,
1462 KMemoryBlockDisableMergeAttribute::Normal);
1343 1463
1344 return ResultSuccess; 1464 R_SUCCEED();
1345} 1465}
1346 1466
1347Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) { 1467Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) {
1348 // Check that the unmap is in range. 1468 // Check that the unmap is in range.
1349 const std::size_t size = num_pages * PageSize; 1469 const size_t size = num_pages * PageSize;
1350 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); 1470 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1351 1471
1352 // Lock the table. 1472 // Lock the table.
1353 KScopedLightLock lk(general_lock); 1473 KScopedLightLock lk(m_general_lock);
1354 1474
1355 // Check the memory state. 1475 // Check the memory state.
1356 std::size_t num_allocator_blocks{}; 1476 size_t num_allocator_blocks{};
1357 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, 1477 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
1358 KMemoryState::All, state, KMemoryPermission::None, 1478 KMemoryState::All, state, KMemoryPermission::None,
1359 KMemoryPermission::None, KMemoryAttribute::All, 1479 KMemoryPermission::None, KMemoryAttribute::All,
1360 KMemoryAttribute::None)); 1480 KMemoryAttribute::None));
1361 1481
1482 // Create an update allocator.
1483 Result allocator_result{ResultSuccess};
1484 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1485 m_memory_block_slab_manager, num_allocator_blocks);
1486 R_TRY(allocator_result);
1487
1362 // Perform the unmap. 1488 // Perform the unmap.
1363 R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); 1489 R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap));
1364 1490
1365 // Update the blocks. 1491 // Update the blocks.
1366 block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None); 1492 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
1493 KMemoryPermission::None, KMemoryAttribute::None,
1494 KMemoryBlockDisableMergeAttribute::None,
1495 KMemoryBlockDisableMergeAttribute::Normal);
1367 1496
1368 return ResultSuccess; 1497 R_SUCCEED();
1369} 1498}
1370 1499
1371Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, 1500Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
@@ -1380,7 +1509,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n
1380 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); 1509 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1381 1510
1382 // Lock the table. 1511 // Lock the table.
1383 KScopedLightLock lk(general_lock); 1512 KScopedLightLock lk(m_general_lock);
1384 1513
1385 // Check if state allows us to create the group. 1514 // Check if state allows us to create the group.
1386 R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, 1515 R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
@@ -1390,15 +1519,15 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n
1390 // Create a new page group for the region. 1519 // Create a new page group for the region.
1391 R_TRY(this->MakePageGroup(*out, address, num_pages)); 1520 R_TRY(this->MakePageGroup(*out, address, num_pages));
1392 1521
1393 return ResultSuccess; 1522 R_SUCCEED();
1394} 1523}
1395 1524
1396Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, 1525Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size,
1397 Svc::MemoryPermission svc_perm) { 1526 Svc::MemoryPermission svc_perm) {
1398 const size_t num_pages = size / PageSize; 1527 const size_t num_pages = size / PageSize;
1399 1528
1400 // Lock the table. 1529 // Lock the table.
1401 KScopedLightLock lk(general_lock); 1530 KScopedLightLock lk(m_general_lock);
1402 1531
1403 // Verify we can change the memory permission. 1532 // Verify we can change the memory permission.
1404 KMemoryState old_state; 1533 KMemoryState old_state;
@@ -1435,105 +1564,101 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
1435 // Succeed if there's nothing to do. 1564 // Succeed if there's nothing to do.
1436 R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); 1565 R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
1437 1566
1567 // Create an update allocator.
1568 Result allocator_result{ResultSuccess};
1569 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1570 m_memory_block_slab_manager, num_allocator_blocks);
1571 R_TRY(allocator_result);
1572
1438 // Perform mapping operation. 1573 // Perform mapping operation.
1439 const auto operation = 1574 const auto operation =
1440 was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions; 1575 was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions;
1441 R_TRY(Operate(addr, num_pages, new_perm, operation)); 1576 R_TRY(Operate(addr, num_pages, new_perm, operation));
1442 1577
1443 // Update the blocks. 1578 // Update the blocks.
1444 block_manager->Update(addr, num_pages, new_state, new_perm, KMemoryAttribute::None); 1579 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
1580 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
1581 KMemoryBlockDisableMergeAttribute::None);
1445 1582
1446 // Ensure cache coherency, if we're setting pages as executable. 1583 // Ensure cache coherency, if we're setting pages as executable.
1447 if (is_x) { 1584 if (is_x) {
1448 system.InvalidateCpuInstructionCacheRange(addr, size); 1585 m_system.InvalidateCpuInstructionCacheRange(addr, size);
1449 } 1586 }
1450 1587
1451 return ResultSuccess; 1588 R_SUCCEED();
1452} 1589}
1453 1590
1454KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { 1591KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) {
1455 KScopedLightLock lk(general_lock); 1592 KScopedLightLock lk(m_general_lock);
1456 1593
1457 return block_manager->FindBlock(addr).GetMemoryInfo(); 1594 return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo();
1458} 1595}
1459 1596
1460KMemoryInfo KPageTable::QueryInfo(VAddr addr) { 1597KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
1461 if (!Contains(addr, 1)) { 1598 if (!Contains(addr, 1)) {
1462 return {address_space_end, 0 - address_space_end, KMemoryState::Inaccessible, 1599 return {
1463 KMemoryPermission::None, KMemoryAttribute::None, KMemoryPermission::None}; 1600 .m_address = m_address_space_end,
1601 .m_size = 0 - m_address_space_end,
1602 .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
1603 .m_device_disable_merge_left_count = 0,
1604 .m_device_disable_merge_right_count = 0,
1605 .m_ipc_lock_count = 0,
1606 .m_device_use_count = 0,
1607 .m_ipc_disable_merge_count = 0,
1608 .m_permission = KMemoryPermission::None,
1609 .m_attribute = KMemoryAttribute::None,
1610 .m_original_permission = KMemoryPermission::None,
1611 .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
1612 };
1464 } 1613 }
1465 1614
1466 return QueryInfoImpl(addr); 1615 return QueryInfoImpl(addr);
1467} 1616}
1468 1617
1469Result KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) { 1618Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) {
1470 KScopedLightLock lk(general_lock);
1471
1472 KMemoryState state{};
1473 KMemoryAttribute attribute{};
1474
1475 R_TRY(CheckMemoryState(&state, nullptr, &attribute, nullptr, addr, size,
1476 KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
1477 KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
1478 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
1479 KMemoryAttribute::Mask, KMemoryAttribute::None,
1480 KMemoryAttribute::IpcAndDeviceMapped));
1481
1482 block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked);
1483
1484 return ResultSuccess;
1485}
1486
1487Result KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
1488 KScopedLightLock lk(general_lock);
1489
1490 KMemoryState state{};
1491
1492 R_TRY(CheckMemoryState(&state, nullptr, nullptr, nullptr, addr, size,
1493 KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
1494 KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
1495 KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask,
1496 KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
1497
1498 block_manager->Update(addr, size / PageSize, state, KMemoryPermission::UserReadWrite);
1499 return ResultSuccess;
1500}
1501
1502Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
1503 Svc::MemoryPermission svc_perm) {
1504 const size_t num_pages = size / PageSize; 1619 const size_t num_pages = size / PageSize;
1505 1620
1506 // Lock the table. 1621 // Lock the table.
1507 KScopedLightLock lk(general_lock); 1622 KScopedLightLock lk(m_general_lock);
1508 1623
1509 // Verify we can change the memory permission. 1624 // Verify we can change the memory permission.
1510 KMemoryState old_state; 1625 KMemoryState old_state;
1511 KMemoryPermission old_perm; 1626 KMemoryPermission old_perm;
1512 R_TRY(this->CheckMemoryState( 1627 size_t num_allocator_blocks;
1513 std::addressof(old_state), std::addressof(old_perm), nullptr, nullptr, addr, size, 1628 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
1514 KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, KMemoryPermission::None, 1629 std::addressof(num_allocator_blocks), addr, size,
1515 KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); 1630 KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
1631 KMemoryPermission::None, KMemoryPermission::None,
1632 KMemoryAttribute::All, KMemoryAttribute::None));
1516 1633
1517 // Determine new perm. 1634 // Determine new perm.
1518 const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); 1635 const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
1519 R_SUCCEED_IF(old_perm == new_perm); 1636 R_SUCCEED_IF(old_perm == new_perm);
1520 1637
1638 // Create an update allocator.
1639 Result allocator_result{ResultSuccess};
1640 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1641 m_memory_block_slab_manager, num_allocator_blocks);
1642 R_TRY(allocator_result);
1643
1521 // Perform mapping operation. 1644 // Perform mapping operation.
1522 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); 1645 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
1523 1646
1524 // Update the blocks. 1647 // Update the blocks.
1525 block_manager->Update(addr, num_pages, old_state, new_perm, KMemoryAttribute::None); 1648 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
1649 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
1650 KMemoryBlockDisableMergeAttribute::None);
1526 1651
1527 return ResultSuccess; 1652 R_SUCCEED();
1528} 1653}
1529 1654
1530Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) { 1655Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) {
1531 const size_t num_pages = size / PageSize; 1656 const size_t num_pages = size / PageSize;
1532 ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) == 1657 ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
1533 KMemoryAttribute::SetMask); 1658 KMemoryAttribute::SetMask);
1534 1659
1535 // Lock the table. 1660 // Lock the table.
1536 KScopedLightLock lk(general_lock); 1661 KScopedLightLock lk(m_general_lock);
1537 1662
1538 // Verify we can change the memory attribute. 1663 // Verify we can change the memory attribute.
1539 KMemoryState old_state; 1664 KMemoryState old_state;
@@ -1548,6 +1673,12 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3
1548 KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, 1673 KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
1549 AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); 1674 AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
1550 1675
1676 // Create an update allocator.
1677 Result allocator_result{ResultSuccess};
1678 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1679 m_memory_block_slab_manager, num_allocator_blocks);
1680 R_TRY(allocator_result);
1681
1551 // Determine the new attribute. 1682 // Determine the new attribute.
1552 const KMemoryAttribute new_attr = 1683 const KMemoryAttribute new_attr =
1553 static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) | 1684 static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) |
@@ -1557,123 +1688,142 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3
1557 this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); 1688 this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh);
1558 1689
1559 // Update the blocks. 1690 // Update the blocks.
1560 block_manager->Update(addr, num_pages, old_state, old_perm, new_attr); 1691 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm,
1692 new_attr, KMemoryBlockDisableMergeAttribute::None,
1693 KMemoryBlockDisableMergeAttribute::None);
1561 1694
1562 return ResultSuccess; 1695 R_SUCCEED();
1563} 1696}
1564 1697
1565Result KPageTable::SetMaxHeapSize(std::size_t size) { 1698Result KPageTable::SetMaxHeapSize(size_t size) {
1566 // Lock the table. 1699 // Lock the table.
1567 KScopedLightLock lk(general_lock); 1700 KScopedLightLock lk(m_general_lock);
1568 1701
1569 // Only process page tables are allowed to set heap size. 1702 // Only process page tables are allowed to set heap size.
1570 ASSERT(!this->IsKernel()); 1703 ASSERT(!this->IsKernel());
1571 1704
1572 max_heap_size = size; 1705 m_max_heap_size = size;
1573 1706
1574 return ResultSuccess; 1707 R_SUCCEED();
1575} 1708}
1576 1709
1577Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { 1710Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
1578 // Lock the physical memory mutex. 1711 // Lock the physical memory mutex.
1579 KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); 1712 KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
1580 1713
1581 // Try to perform a reduction in heap, instead of an extension. 1714 // Try to perform a reduction in heap, instead of an extension.
1582 VAddr cur_address{}; 1715 VAddr cur_address{};
1583 std::size_t allocation_size{}; 1716 size_t allocation_size{};
1584 { 1717 {
1585 // Lock the table. 1718 // Lock the table.
1586 KScopedLightLock lk(general_lock); 1719 KScopedLightLock lk(m_general_lock);
1587 1720
1588 // Validate that setting heap size is possible at all. 1721 // Validate that setting heap size is possible at all.
1589 R_UNLESS(!is_kernel, ResultOutOfMemory); 1722 R_UNLESS(!m_is_kernel, ResultOutOfMemory);
1590 R_UNLESS(size <= static_cast<std::size_t>(heap_region_end - heap_region_start), 1723 R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start),
1591 ResultOutOfMemory); 1724 ResultOutOfMemory);
1592 R_UNLESS(size <= max_heap_size, ResultOutOfMemory); 1725 R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory);
1593 1726
1594 if (size < GetHeapSize()) { 1727 if (size < GetHeapSize()) {
1595 // The size being requested is less than the current size, so we need to free the end of 1728 // The size being requested is less than the current size, so we need to free the end of
1596 // the heap. 1729 // the heap.
1597 1730
1598 // Validate memory state. 1731 // Validate memory state.
1599 std::size_t num_allocator_blocks; 1732 size_t num_allocator_blocks;
1600 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), 1733 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
1601 heap_region_start + size, GetHeapSize() - size, 1734 m_heap_region_start + size, GetHeapSize() - size,
1602 KMemoryState::All, KMemoryState::Normal, 1735 KMemoryState::All, KMemoryState::Normal,
1603 KMemoryPermission::All, KMemoryPermission::UserReadWrite, 1736 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
1604 KMemoryAttribute::All, KMemoryAttribute::None)); 1737 KMemoryAttribute::All, KMemoryAttribute::None));
1605 1738
1739 // Create an update allocator.
1740 Result allocator_result{ResultSuccess};
1741 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1742 m_memory_block_slab_manager,
1743 num_allocator_blocks);
1744 R_TRY(allocator_result);
1745
1606 // Unmap the end of the heap. 1746 // Unmap the end of the heap.
1607 const auto num_pages = (GetHeapSize() - size) / PageSize; 1747 const auto num_pages = (GetHeapSize() - size) / PageSize;
1608 R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None, 1748 R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None,
1609 OperationType::Unmap)); 1749 OperationType::Unmap));
1610 1750
1611 // Release the memory from the resource limit. 1751 // Release the memory from the resource limit.
1612 system.Kernel().CurrentProcess()->GetResourceLimit()->Release( 1752 m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release(
1613 LimitableResource::PhysicalMemory, num_pages * PageSize); 1753 LimitableResource::PhysicalMemory, num_pages * PageSize);
1614 1754
1615 // Apply the memory block update. 1755 // Apply the memory block update.
1616 block_manager->Update(heap_region_start + size, num_pages, KMemoryState::Free, 1756 m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
1617 KMemoryPermission::None, KMemoryAttribute::None); 1757 num_pages, KMemoryState::Free, KMemoryPermission::None,
1758 KMemoryAttribute::None,
1759 KMemoryBlockDisableMergeAttribute::None,
1760 size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
1761 : KMemoryBlockDisableMergeAttribute::None);
1618 1762
1619 // Update the current heap end. 1763 // Update the current heap end.
1620 current_heap_end = heap_region_start + size; 1764 m_current_heap_end = m_heap_region_start + size;
1621 1765
1622 // Set the output. 1766 // Set the output.
1623 *out = heap_region_start; 1767 *out = m_heap_region_start;
1624 return ResultSuccess; 1768 R_SUCCEED();
1625 } else if (size == GetHeapSize()) { 1769 } else if (size == GetHeapSize()) {
1626 // The size requested is exactly the current size. 1770 // The size requested is exactly the current size.
1627 *out = heap_region_start; 1771 *out = m_heap_region_start;
1628 return ResultSuccess; 1772 R_SUCCEED();
1629 } else { 1773 } else {
1630 // We have to allocate memory. Determine how much to allocate and where while the table 1774 // We have to allocate memory. Determine how much to allocate and where while the table
1631 // is locked. 1775 // is locked.
1632 cur_address = current_heap_end; 1776 cur_address = m_current_heap_end;
1633 allocation_size = size - GetHeapSize(); 1777 allocation_size = size - GetHeapSize();
1634 } 1778 }
1635 } 1779 }
1636 1780
1637 // Reserve memory for the heap extension. 1781 // Reserve memory for the heap extension.
1638 KScopedResourceReservation memory_reservation( 1782 KScopedResourceReservation memory_reservation(
1639 system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, 1783 m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
1640 allocation_size); 1784 allocation_size);
1641 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 1785 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
1642 1786
1643 // Allocate pages for the heap extension. 1787 // Allocate pages for the heap extension.
1644 KPageGroup pg; 1788 KPageGroup pg;
1645 R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( 1789 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
1646 &pg, allocation_size / PageSize, 1790 &pg, allocation_size / PageSize,
1647 KMemoryManager::EncodeOption(memory_pool, allocation_option))); 1791 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
1648 1792
1649 // Clear all the newly allocated pages. 1793 // Clear all the newly allocated pages.
1650 for (const auto& it : pg.Nodes()) { 1794 for (const auto& it : pg.Nodes()) {
1651 std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, 1795 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
1652 it.GetSize()); 1796 it.GetSize());
1653 } 1797 }
1654 1798
1655 // Map the pages. 1799 // Map the pages.
1656 { 1800 {
1657 // Lock the table. 1801 // Lock the table.
1658 KScopedLightLock lk(general_lock); 1802 KScopedLightLock lk(m_general_lock);
1659 1803
1660 // Ensure that the heap hasn't changed since we began executing. 1804 // Ensure that the heap hasn't changed since we began executing.
1661 ASSERT(cur_address == current_heap_end); 1805 ASSERT(cur_address == m_current_heap_end);
1662 1806
1663 // Check the memory state. 1807 // Check the memory state.
1664 std::size_t num_allocator_blocks{}; 1808 size_t num_allocator_blocks{};
1665 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end, 1809 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end,
1666 allocation_size, KMemoryState::All, KMemoryState::Free, 1810 allocation_size, KMemoryState::All, KMemoryState::Free,
1667 KMemoryPermission::None, KMemoryPermission::None, 1811 KMemoryPermission::None, KMemoryPermission::None,
1668 KMemoryAttribute::None, KMemoryAttribute::None)); 1812 KMemoryAttribute::None, KMemoryAttribute::None));
1669 1813
1814 // Create an update allocator.
1815 Result allocator_result{ResultSuccess};
1816 KMemoryBlockManagerUpdateAllocator allocator(
1817 std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
1818 R_TRY(allocator_result);
1819
1670 // Map the pages. 1820 // Map the pages.
1671 const auto num_pages = allocation_size / PageSize; 1821 const auto num_pages = allocation_size / PageSize;
1672 R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup)); 1822 R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup));
1673 1823
1674 // Clear all the newly allocated pages. 1824 // Clear all the newly allocated pages.
1675 for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { 1825 for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
1676 std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0, 1826 std::memset(m_system.Memory().GetPointer(m_current_heap_end + (cur_page * PageSize)), 0,
1677 PageSize); 1827 PageSize);
1678 } 1828 }
1679 1829
@@ -1681,133 +1831,172 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
1681 memory_reservation.Commit(); 1831 memory_reservation.Commit();
1682 1832
1683 // Apply the memory block update. 1833 // Apply the memory block update.
1684 block_manager->Update(current_heap_end, num_pages, KMemoryState::Normal, 1834 m_memory_block_manager.Update(
1685 KMemoryPermission::UserReadWrite, KMemoryAttribute::None); 1835 std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal,
1836 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
1837 m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
1838 : KMemoryBlockDisableMergeAttribute::None,
1839 KMemoryBlockDisableMergeAttribute::None);
1686 1840
1687 // Update the current heap end. 1841 // Update the current heap end.
1688 current_heap_end = heap_region_start + size; 1842 m_current_heap_end = m_heap_region_start + size;
1689 1843
1690 // Set the output. 1844 // Set the output.
1691 *out = heap_region_start; 1845 *out = m_heap_region_start;
1692 return ResultSuccess; 1846 R_SUCCEED();
1693 } 1847 }
1694} 1848}
1695 1849
1696ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, 1850ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align,
1697 bool is_map_only, VAddr region_start, 1851 bool is_map_only, VAddr region_start,
1698 std::size_t region_num_pages, KMemoryState state, 1852 size_t region_num_pages, KMemoryState state,
1699 KMemoryPermission perm, PAddr map_addr) { 1853 KMemoryPermission perm, PAddr map_addr) {
1700 KScopedLightLock lk(general_lock); 1854 KScopedLightLock lk(m_general_lock);
1701
1702 if (!CanContain(region_start, region_num_pages * PageSize, state)) {
1703 return ResultInvalidCurrentMemory;
1704 }
1705
1706 if (region_num_pages <= needed_num_pages) {
1707 return ResultOutOfMemory;
1708 }
1709 1855
1856 R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state),
1857 ResultInvalidCurrentMemory);
1858 R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory);
1710 const VAddr addr{ 1859 const VAddr addr{
1711 AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)}; 1860 AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
1712 if (!addr) { 1861 R_UNLESS(addr, ResultOutOfMemory);
1713 return ResultOutOfMemory; 1862
1714 } 1863 // Create an update allocator.
1864 Result allocator_result{ResultSuccess};
1865 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1866 m_memory_block_slab_manager);
1715 1867
1716 if (is_map_only) { 1868 if (is_map_only) {
1717 R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); 1869 R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
1718 } else { 1870 } else {
1719 KPageGroup page_group; 1871 KPageGroup page_group;
1720 R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( 1872 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess(
1721 &page_group, needed_num_pages, 1873 &page_group, needed_num_pages,
1722 KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); 1874 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
1723 R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); 1875 R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
1724 } 1876 }
1725 1877
1726 block_manager->Update(addr, needed_num_pages, state, perm); 1878 // Update the blocks.
1879 m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm,
1880 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
1881 KMemoryBlockDisableMergeAttribute::None);
1727 1882
1728 return addr; 1883 return addr;
1729} 1884}
1730 1885
1731Result KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { 1886Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
1732 KScopedLightLock lk(general_lock); 1887 bool is_aligned) {
1733 1888 // Lightly validate the range before doing anything else.
1734 KMemoryPermission perm{}; 1889 const size_t num_pages = size / PageSize;
1735 if (const Result result{CheckMemoryState( 1890 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1736 nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
1737 KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
1738 KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
1739 KMemoryAttribute::DeviceSharedAndUncached)};
1740 result.IsError()) {
1741 return result;
1742 }
1743 1891
1744 block_manager->UpdateLock( 1892 // Lock the table.
1745 addr, size / PageSize, 1893 KScopedLightLock lk(m_general_lock);
1746 [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
1747 block->ShareToDevice(permission);
1748 },
1749 perm);
1750 1894
1751 return ResultSuccess; 1895 // Check the memory state.
1896 const auto test_state =
1897 (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap);
1898 size_t num_allocator_blocks;
1899 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state,
1900 test_state, perm, perm,
1901 KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
1902 KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
1903
1904 // Create an update allocator.
1905 Result allocator_result{ResultSuccess};
1906 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1907 m_memory_block_slab_manager, num_allocator_blocks);
1908 R_TRY(allocator_result);
1909
1910 // Update the memory blocks.
1911 m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
1912 &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
1913
1914 R_SUCCEED();
1752} 1915}
1753 1916
1754Result KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) { 1917Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) {
1755 KScopedLightLock lk(general_lock); 1918 // Lightly validate the range before doing anything else.
1756 1919 const size_t num_pages = size / PageSize;
1757 KMemoryPermission perm{}; 1920 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1758 if (const Result result{CheckMemoryState(
1759 nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
1760 KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
1761 KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
1762 KMemoryAttribute::DeviceSharedAndUncached)};
1763 result.IsError()) {
1764 return result;
1765 }
1766 1921
1767 block_manager->UpdateLock( 1922 // Lock the table.
1768 addr, size / PageSize, 1923 KScopedLightLock lk(m_general_lock);
1769 [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
1770 block->UnshareToDevice(permission);
1771 },
1772 perm);
1773 1924
1774 return ResultSuccess; 1925 // Check the memory state.
1926 size_t num_allocator_blocks;
1927 R_TRY(this->CheckMemoryStateContiguous(
1928 std::addressof(num_allocator_blocks), address, size,
1929 KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap,
1930 KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap,
1931 KMemoryPermission::None, KMemoryPermission::None,
1932 KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
1933
1934 // Create an update allocator.
1935 Result allocator_result{ResultSuccess};
1936 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1937 m_memory_block_slab_manager, num_allocator_blocks);
1938 R_TRY(allocator_result);
1939
1940 // Update the memory blocks.
1941 const KMemoryBlockManager::MemoryBlockLockFunction lock_func =
1942 m_enable_device_address_space_merge
1943 ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare
1944 : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
1945 m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
1946 KMemoryPermission::None);
1947
1948 R_SUCCEED();
1775} 1949}
1776 1950
1777Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) { 1951Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {
1778 return this->LockMemoryAndOpen( 1952 // Lightly validate the range before doing anything else.
1953 const size_t num_pages = size / PageSize;
1954 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1955
1956 // Lock the table.
1957 KScopedLightLock lk(m_general_lock);
1958
1959 // Check the memory state.
1960 size_t num_allocator_blocks;
1961 R_TRY(this->CheckMemoryStateContiguous(
1962 std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
1963 KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
1964 KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
1965
1966 // Create an update allocator.
1967 Result allocator_result{ResultSuccess};
1968 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1969 m_memory_block_slab_manager, num_allocator_blocks);
1970 R_TRY(allocator_result);
1971
1972 // Update the memory blocks.
1973 m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
1974 &KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
1975
1976 R_SUCCEED();
1977}
1978
1979Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) {
1980 R_RETURN(this->LockMemoryAndOpen(
1779 out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, 1981 out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
1780 KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, 1982 KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
1781 KMemoryAttribute::None, 1983 KMemoryAttribute::None,
1782 static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | 1984 static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
1783 KMemoryPermission::KernelReadWrite), 1985 KMemoryPermission::KernelReadWrite),
1784 KMemoryAttribute::Locked); 1986 KMemoryAttribute::Locked));
1785} 1987}
1786 1988
1787Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) { 1989Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) {
1788 return this->UnlockMemory( 1990 R_RETURN(this->UnlockMemory(
1789 addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, 1991 addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
1790 KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, 1992 KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
1791 KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg); 1993 KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg));
1792}
1793
1794Result KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
1795 block_manager = std::make_unique<KMemoryBlockManager>(start, end);
1796
1797 return ResultSuccess;
1798}
1799
1800bool KPageTable::IsRegionMapped(VAddr address, u64 size) {
1801 return CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
1802 KMemoryPermission::All, KMemoryPermission::None, KMemoryAttribute::Mask,
1803 KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)
1804 .IsError();
1805} 1994}
1806 1995
1807bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { 1996bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
1808 auto start_ptr = system.Memory().GetPointer(addr); 1997 auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(addr);
1809 for (u64 offset{}; offset < size; offset += PageSize) { 1998 for (u64 offset{}; offset < size; offset += PageSize) {
1810 if (start_ptr != system.Memory().GetPointer(addr + offset)) { 1999 if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(addr + offset)) {
1811 return false; 2000 return false;
1812 } 2001 }
1813 start_ptr += PageSize; 2002 start_ptr += PageSize;
@@ -1815,8 +2004,7 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
1815 return true; 2004 return true;
1816} 2005}
1817 2006
1818void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, 2007void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list) {
1819 KPageGroup& page_linked_list) {
1820 VAddr addr{start}; 2008 VAddr addr{start};
1821 while (addr < start + (num_pages * PageSize)) { 2009 while (addr < start + (num_pages * PageSize)) {
1822 const PAddr paddr{GetPhysicalAddr(addr)}; 2010 const PAddr paddr{GetPhysicalAddr(addr)};
@@ -1826,16 +2014,16 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
1826 } 2014 }
1827} 2015}
1828 2016
1829VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, 2017VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
1830 u64 needed_num_pages, std::size_t align) { 2018 size_t align) {
1831 if (is_aslr_enabled) { 2019 if (m_enable_aslr) {
1832 UNIMPLEMENTED(); 2020 UNIMPLEMENTED();
1833 } 2021 }
1834 return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, 2022 return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0,
1835 IsKernel() ? 1 : 4); 2023 IsKernel() ? 1 : 4);
1836} 2024}
1837 2025
1838Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, 2026Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
1839 OperationType operation) { 2027 OperationType operation) {
1840 ASSERT(this->IsLockedByCurrentThread()); 2028 ASSERT(this->IsLockedByCurrentThread());
1841 2029
@@ -1844,11 +2032,11 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup&
1844 ASSERT(num_pages == page_group.GetNumPages()); 2032 ASSERT(num_pages == page_group.GetNumPages());
1845 2033
1846 for (const auto& node : page_group.Nodes()) { 2034 for (const auto& node : page_group.Nodes()) {
1847 const std::size_t size{node.GetNumPages() * PageSize}; 2035 const size_t size{node.GetNumPages() * PageSize};
1848 2036
1849 switch (operation) { 2037 switch (operation) {
1850 case OperationType::MapGroup: 2038 case OperationType::MapGroup:
1851 system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress()); 2039 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
1852 break; 2040 break;
1853 default: 2041 default:
1854 ASSERT(false); 2042 ASSERT(false);
@@ -1857,10 +2045,10 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup&
1857 addr += size; 2045 addr += size;
1858 } 2046 }
1859 2047
1860 return ResultSuccess; 2048 R_SUCCEED();
1861} 2049}
1862 2050
1863Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, 2051Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
1864 OperationType operation, PAddr map_addr) { 2052 OperationType operation, PAddr map_addr) {
1865 ASSERT(this->IsLockedByCurrentThread()); 2053 ASSERT(this->IsLockedByCurrentThread());
1866 2054
@@ -1870,12 +2058,12 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission
1870 2058
1871 switch (operation) { 2059 switch (operation) {
1872 case OperationType::Unmap: 2060 case OperationType::Unmap:
1873 system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize); 2061 m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
1874 break; 2062 break;
1875 case OperationType::Map: { 2063 case OperationType::Map: {
1876 ASSERT(map_addr); 2064 ASSERT(map_addr);
1877 ASSERT(Common::IsAligned(map_addr, PageSize)); 2065 ASSERT(Common::IsAligned(map_addr, PageSize));
1878 system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr); 2066 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
1879 break; 2067 break;
1880 } 2068 }
1881 case OperationType::ChangePermissions: 2069 case OperationType::ChangePermissions:
@@ -1884,25 +2072,25 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission
1884 default: 2072 default:
1885 ASSERT(false); 2073 ASSERT(false);
1886 } 2074 }
1887 return ResultSuccess; 2075 R_SUCCEED();
1888} 2076}
1889 2077
1890VAddr KPageTable::GetRegionAddress(KMemoryState state) const { 2078VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
1891 switch (state) { 2079 switch (state) {
1892 case KMemoryState::Free: 2080 case KMemoryState::Free:
1893 case KMemoryState::Kernel: 2081 case KMemoryState::Kernel:
1894 return address_space_start; 2082 return m_address_space_start;
1895 case KMemoryState::Normal: 2083 case KMemoryState::Normal:
1896 return heap_region_start; 2084 return m_heap_region_start;
1897 case KMemoryState::Ipc: 2085 case KMemoryState::Ipc:
1898 case KMemoryState::NonSecureIpc: 2086 case KMemoryState::NonSecureIpc:
1899 case KMemoryState::NonDeviceIpc: 2087 case KMemoryState::NonDeviceIpc:
1900 return alias_region_start; 2088 return m_alias_region_start;
1901 case KMemoryState::Stack: 2089 case KMemoryState::Stack:
1902 return stack_region_start; 2090 return m_stack_region_start;
1903 case KMemoryState::Static: 2091 case KMemoryState::Static:
1904 case KMemoryState::ThreadLocal: 2092 case KMemoryState::ThreadLocal:
1905 return kernel_map_region_start; 2093 return m_kernel_map_region_start;
1906 case KMemoryState::Io: 2094 case KMemoryState::Io:
1907 case KMemoryState::Shared: 2095 case KMemoryState::Shared:
1908 case KMemoryState::AliasCode: 2096 case KMemoryState::AliasCode:
@@ -1913,31 +2101,31 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
1913 case KMemoryState::GeneratedCode: 2101 case KMemoryState::GeneratedCode:
1914 case KMemoryState::CodeOut: 2102 case KMemoryState::CodeOut:
1915 case KMemoryState::Coverage: 2103 case KMemoryState::Coverage:
1916 return alias_code_region_start; 2104 return m_alias_code_region_start;
1917 case KMemoryState::Code: 2105 case KMemoryState::Code:
1918 case KMemoryState::CodeData: 2106 case KMemoryState::CodeData:
1919 return code_region_start; 2107 return m_code_region_start;
1920 default: 2108 default:
1921 UNREACHABLE(); 2109 UNREACHABLE();
1922 } 2110 }
1923} 2111}
1924 2112
1925std::size_t KPageTable::GetRegionSize(KMemoryState state) const { 2113size_t KPageTable::GetRegionSize(KMemoryState state) const {
1926 switch (state) { 2114 switch (state) {
1927 case KMemoryState::Free: 2115 case KMemoryState::Free:
1928 case KMemoryState::Kernel: 2116 case KMemoryState::Kernel:
1929 return address_space_end - address_space_start; 2117 return m_address_space_end - m_address_space_start;
1930 case KMemoryState::Normal: 2118 case KMemoryState::Normal:
1931 return heap_region_end - heap_region_start; 2119 return m_heap_region_end - m_heap_region_start;
1932 case KMemoryState::Ipc: 2120 case KMemoryState::Ipc:
1933 case KMemoryState::NonSecureIpc: 2121 case KMemoryState::NonSecureIpc:
1934 case KMemoryState::NonDeviceIpc: 2122 case KMemoryState::NonDeviceIpc:
1935 return alias_region_end - alias_region_start; 2123 return m_alias_region_end - m_alias_region_start;
1936 case KMemoryState::Stack: 2124 case KMemoryState::Stack:
1937 return stack_region_end - stack_region_start; 2125 return m_stack_region_end - m_stack_region_start;
1938 case KMemoryState::Static: 2126 case KMemoryState::Static:
1939 case KMemoryState::ThreadLocal: 2127 case KMemoryState::ThreadLocal:
1940 return kernel_map_region_end - kernel_map_region_start; 2128 return m_kernel_map_region_end - m_kernel_map_region_start;
1941 case KMemoryState::Io: 2129 case KMemoryState::Io:
1942 case KMemoryState::Shared: 2130 case KMemoryState::Shared:
1943 case KMemoryState::AliasCode: 2131 case KMemoryState::AliasCode:
@@ -1948,16 +2136,16 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
1948 case KMemoryState::GeneratedCode: 2136 case KMemoryState::GeneratedCode:
1949 case KMemoryState::CodeOut: 2137 case KMemoryState::CodeOut:
1950 case KMemoryState::Coverage: 2138 case KMemoryState::Coverage:
1951 return alias_code_region_end - alias_code_region_start; 2139 return m_alias_code_region_end - m_alias_code_region_start;
1952 case KMemoryState::Code: 2140 case KMemoryState::Code:
1953 case KMemoryState::CodeData: 2141 case KMemoryState::CodeData:
1954 return code_region_end - code_region_start; 2142 return m_code_region_end - m_code_region_start;
1955 default: 2143 default:
1956 UNREACHABLE(); 2144 UNREACHABLE();
1957 } 2145 }
1958} 2146}
1959 2147
1960bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { 2148bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const {
1961 const VAddr end = addr + size; 2149 const VAddr end = addr + size;
1962 const VAddr last = end - 1; 2150 const VAddr last = end - 1;
1963 2151
@@ -1966,10 +2154,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co
1966 2154
1967 const bool is_in_region = 2155 const bool is_in_region =
1968 region_start <= addr && addr < end && last <= region_start + region_size - 1; 2156 region_start <= addr && addr < end && last <= region_start + region_size - 1;
1969 const bool is_in_heap = !(end <= heap_region_start || heap_region_end <= addr || 2157 const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr ||
1970 heap_region_start == heap_region_end); 2158 m_heap_region_start == m_heap_region_end);
1971 const bool is_in_alias = !(end <= alias_region_start || alias_region_end <= addr || 2159 const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
1972 alias_region_start == alias_region_end); 2160 m_alias_region_start == m_alias_region_end);
1973 switch (state) { 2161 switch (state) {
1974 case KMemoryState::Free: 2162 case KMemoryState::Free:
1975 case KMemoryState::Kernel: 2163 case KMemoryState::Kernel:
@@ -2008,23 +2196,23 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_
2008 KMemoryPermission perm, KMemoryAttribute attr_mask, 2196 KMemoryPermission perm, KMemoryAttribute attr_mask,
2009 KMemoryAttribute attr) const { 2197 KMemoryAttribute attr) const {
2010 // Validate the states match expectation. 2198 // Validate the states match expectation.
2011 R_UNLESS((info.state & state_mask) == state, ResultInvalidCurrentMemory); 2199 R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
2012 R_UNLESS((info.perm & perm_mask) == perm, ResultInvalidCurrentMemory); 2200 R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
2013 R_UNLESS((info.attribute & attr_mask) == attr, ResultInvalidCurrentMemory); 2201 R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
2014 2202
2015 return ResultSuccess; 2203 R_SUCCEED();
2016} 2204}
2017 2205
2018Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, 2206Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
2019 std::size_t size, KMemoryState state_mask, 2207 KMemoryState state_mask, KMemoryState state,
2020 KMemoryState state, KMemoryPermission perm_mask, 2208 KMemoryPermission perm_mask, KMemoryPermission perm,
2021 KMemoryPermission perm, KMemoryAttribute attr_mask, 2209 KMemoryAttribute attr_mask,
2022 KMemoryAttribute attr) const { 2210 KMemoryAttribute attr) const {
2023 ASSERT(this->IsLockedByCurrentThread()); 2211 ASSERT(this->IsLockedByCurrentThread());
2024 2212
2025 // Get information about the first block. 2213 // Get information about the first block.
2026 const VAddr last_addr = addr + size - 1; 2214 const VAddr last_addr = addr + size - 1;
2027 KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr); 2215 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
2028 KMemoryInfo info = it->GetMemoryInfo(); 2216 KMemoryInfo info = it->GetMemoryInfo();
2029 2217
2030 // If the start address isn't aligned, we need a block. 2218 // If the start address isn't aligned, we need a block.
@@ -2042,7 +2230,7 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA
2042 2230
2043 // Advance our iterator. 2231 // Advance our iterator.
2044 it++; 2232 it++;
2045 ASSERT(it != block_manager->cend()); 2233 ASSERT(it != m_memory_block_manager.cend());
2046 info = it->GetMemoryInfo(); 2234 info = it->GetMemoryInfo();
2047 } 2235 }
2048 2236
@@ -2054,12 +2242,12 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA
2054 *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; 2242 *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
2055 } 2243 }
2056 2244
2057 return ResultSuccess; 2245 R_SUCCEED();
2058} 2246}
2059 2247
2060Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, 2248Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
2061 KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, 2249 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
2062 VAddr addr, std::size_t size, KMemoryState state_mask, 2250 VAddr addr, size_t size, KMemoryState state_mask,
2063 KMemoryState state, KMemoryPermission perm_mask, 2251 KMemoryState state, KMemoryPermission perm_mask,
2064 KMemoryPermission perm, KMemoryAttribute attr_mask, 2252 KMemoryPermission perm, KMemoryAttribute attr_mask,
2065 KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { 2253 KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
@@ -2067,7 +2255,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
2067 2255
2068 // Get information about the first block. 2256 // Get information about the first block.
2069 const VAddr last_addr = addr + size - 1; 2257 const VAddr last_addr = addr + size - 1;
2070 KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr); 2258 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
2071 KMemoryInfo info = it->GetMemoryInfo(); 2259 KMemoryInfo info = it->GetMemoryInfo();
2072 2260
2073 // If the start address isn't aligned, we need a block. 2261 // If the start address isn't aligned, we need a block.
@@ -2075,14 +2263,14 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
2075 (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0; 2263 (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0;
2076 2264
2077 // Validate all blocks in the range have correct state. 2265 // Validate all blocks in the range have correct state.
2078 const KMemoryState first_state = info.state; 2266 const KMemoryState first_state = info.m_state;
2079 const KMemoryPermission first_perm = info.perm; 2267 const KMemoryPermission first_perm = info.m_permission;
2080 const KMemoryAttribute first_attr = info.attribute; 2268 const KMemoryAttribute first_attr = info.m_attribute;
2081 while (true) { 2269 while (true) {
2082 // Validate the current block. 2270 // Validate the current block.
2083 R_UNLESS(info.state == first_state, ResultInvalidCurrentMemory); 2271 R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
2084 R_UNLESS(info.perm == first_perm, ResultInvalidCurrentMemory); 2272 R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
2085 R_UNLESS((info.attribute | ignore_attr) == (first_attr | ignore_attr), 2273 R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
2086 ResultInvalidCurrentMemory); 2274 ResultInvalidCurrentMemory);
2087 2275
2088 // Validate against the provided masks. 2276 // Validate against the provided masks.
@@ -2095,7 +2283,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
2095 2283
2096 // Advance our iterator. 2284 // Advance our iterator.
2097 it++; 2285 it++;
2098 ASSERT(it != block_manager->cend()); 2286 ASSERT(it != m_memory_block_manager.cend());
2099 info = it->GetMemoryInfo(); 2287 info = it->GetMemoryInfo();
2100 } 2288 }
2101 2289
@@ -2116,7 +2304,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
2116 if (out_blocks_needed != nullptr) { 2304 if (out_blocks_needed != nullptr) {
2117 *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; 2305 *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
2118 } 2306 }
2119 return ResultSuccess; 2307 R_SUCCEED();
2120} 2308}
2121 2309
2122Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, 2310Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
@@ -2134,7 +2322,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
2134 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); 2322 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
2135 2323
2136 // Lock the table. 2324 // Lock the table.
2137 KScopedLightLock lk(general_lock); 2325 KScopedLightLock lk(m_general_lock);
2138 2326
2139 // Check that the output page group is empty, if it exists. 2327 // Check that the output page group is empty, if it exists.
2140 if (out_pg) { 2328 if (out_pg) {
@@ -2162,6 +2350,12 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
2162 R_TRY(this->MakePageGroup(*out_pg, addr, num_pages)); 2350 R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
2163 } 2351 }
2164 2352
2353 // Create an update allocator.
2354 Result allocator_result{ResultSuccess};
2355 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2356 m_memory_block_slab_manager, num_allocator_blocks);
2357 R_TRY(allocator_result);
2358
2165 // Decide on new perm and attr. 2359 // Decide on new perm and attr.
2166 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; 2360 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
2167 KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr); 2361 KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
@@ -2172,9 +2366,11 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
2172 } 2366 }
2173 2367
2174 // Apply the memory block updates. 2368 // Apply the memory block updates.
2175 block_manager->Update(addr, num_pages, old_state, new_perm, new_attr); 2369 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
2370 new_attr, KMemoryBlockDisableMergeAttribute::Locked,
2371 KMemoryBlockDisableMergeAttribute::None);
2176 2372
2177 return ResultSuccess; 2373 R_SUCCEED();
2178} 2374}
2179 2375
2180Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, 2376Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
@@ -2191,7 +2387,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask
2191 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); 2387 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
2192 2388
2193 // Lock the table. 2389 // Lock the table.
2194 KScopedLightLock lk(general_lock); 2390 KScopedLightLock lk(m_general_lock);
2195 2391
2196 // Check the state. 2392 // Check the state.
2197 KMemoryState old_state{}; 2393 KMemoryState old_state{};
@@ -2213,15 +2409,23 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask
2213 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; 2409 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
2214 KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr); 2410 KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
2215 2411
2412 // Create an update allocator.
2413 Result allocator_result{ResultSuccess};
2414 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2415 m_memory_block_slab_manager, num_allocator_blocks);
2416 R_TRY(allocator_result);
2417
2216 // Update permission, if we need to. 2418 // Update permission, if we need to.
2217 if (new_perm != old_perm) { 2419 if (new_perm != old_perm) {
2218 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); 2420 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
2219 } 2421 }
2220 2422
2221 // Apply the memory block updates. 2423 // Apply the memory block updates.
2222 block_manager->Update(addr, num_pages, old_state, new_perm, new_attr); 2424 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
2425 new_attr, KMemoryBlockDisableMergeAttribute::None,
2426 KMemoryBlockDisableMergeAttribute::Locked);
2223 2427
2224 return ResultSuccess; 2428 R_SUCCEED();
2225} 2429}
2226 2430
2227} // namespace Kernel 2431} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 25774f232..c6aeacd96 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -9,8 +9,10 @@
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "common/page_table.h" 10#include "common/page_table.h"
11#include "core/file_sys/program_metadata.h" 11#include "core/file_sys/program_metadata.h"
12#include "core/hle/kernel/k_dynamic_resource_manager.h"
12#include "core/hle/kernel/k_light_lock.h" 13#include "core/hle/kernel/k_light_lock.h"
13#include "core/hle/kernel/k_memory_block.h" 14#include "core/hle/kernel/k_memory_block.h"
15#include "core/hle/kernel/k_memory_block_manager.h"
14#include "core/hle/kernel/k_memory_layout.h" 16#include "core/hle/kernel/k_memory_layout.h"
15#include "core/hle/kernel/k_memory_manager.h" 17#include "core/hle/kernel/k_memory_manager.h"
16#include "core/hle/result.h" 18#include "core/hle/result.h"
@@ -34,58 +36,66 @@ public:
34 ~KPageTable(); 36 ~KPageTable();
35 37
36 Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, 38 Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
37 VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool); 39 VAddr code_addr, size_t code_size,
38 Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, 40 KMemoryBlockSlabManager* mem_block_slab_manager,
41 KMemoryManager::Pool pool);
42
43 void Finalize();
44
45 Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state,
39 KMemoryPermission perm); 46 KMemoryPermission perm);
40 Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); 47 Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size);
41 Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, 48 Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
42 ICacheInvalidationStrategy icache_invalidation_strategy); 49 ICacheInvalidationStrategy icache_invalidation_strategy);
43 Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, 50 Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
44 VAddr src_addr); 51 VAddr src_addr);
45 Result MapPhysicalMemory(VAddr addr, std::size_t size); 52 Result MapPhysicalMemory(VAddr addr, size_t size);
46 Result UnmapPhysicalMemory(VAddr addr, std::size_t size); 53 Result UnmapPhysicalMemory(VAddr addr, size_t size);
47 Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); 54 Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
48 Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); 55 Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
49 Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state, 56 Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
50 KMemoryPermission perm); 57 KMemoryPermission perm);
51 Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, 58 Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
52 KMemoryState state, KMemoryPermission perm) { 59 KMemoryState state, KMemoryPermission perm) {
53 return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, 60 R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
54 this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, 61 this->GetRegionAddress(state),
55 state, perm); 62 this->GetRegionSize(state) / PageSize, state, perm));
56 } 63 }
57 Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); 64 Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
58 Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state); 65 Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
59 Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm); 66 Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
60 KMemoryInfo QueryInfo(VAddr addr); 67 KMemoryInfo QueryInfo(VAddr addr);
61 Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); 68 Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
62 Result ResetTransferMemory(VAddr addr, std::size_t size); 69 Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
63 Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm); 70 Result SetMaxHeapSize(size_t size);
64 Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr); 71 Result SetHeapSize(VAddr* out, size_t size);
65 Result SetMaxHeapSize(std::size_t size); 72 ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only,
66 Result SetHeapSize(VAddr* out, std::size_t size); 73 VAddr region_start, size_t region_num_pages,
67 ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, 74 KMemoryState state, KMemoryPermission perm,
68 bool is_map_only, VAddr region_start, 75 PAddr map_addr = 0);
69 std::size_t region_num_pages, KMemoryState state, 76
70 KMemoryPermission perm, PAddr map_addr = 0); 77 Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
71 Result LockForDeviceAddressSpace(VAddr addr, std::size_t size); 78 bool is_aligned);
72 Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); 79 Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size);
73 Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size); 80
74 Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg); 81 Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
82
83 Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
84 Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
75 Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, 85 Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
76 KMemoryState state_mask, KMemoryState state, 86 KMemoryState state_mask, KMemoryState state,
77 KMemoryPermission perm_mask, KMemoryPermission perm, 87 KMemoryPermission perm_mask, KMemoryPermission perm,
78 KMemoryAttribute attr_mask, KMemoryAttribute attr); 88 KMemoryAttribute attr_mask, KMemoryAttribute attr);
79 89
80 Common::PageTable& PageTableImpl() { 90 Common::PageTable& PageTableImpl() {
81 return page_table_impl; 91 return *m_page_table_impl;
82 } 92 }
83 93
84 const Common::PageTable& PageTableImpl() const { 94 const Common::PageTable& PageTableImpl() const {
85 return page_table_impl; 95 return *m_page_table_impl;
86 } 96 }
87 97
88 bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const; 98 bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
89 99
90private: 100private:
91 enum class OperationType : u32 { 101 enum class OperationType : u32 {
@@ -96,67 +106,65 @@ private:
96 ChangePermissionsAndRefresh, 106 ChangePermissionsAndRefresh,
97 }; 107 };
98 108
99 static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask | 109 static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
100 KMemoryAttribute::IpcLocked | 110 KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
101 KMemoryAttribute::DeviceShared;
102 111
103 Result InitializeMemoryLayout(VAddr start, VAddr end);
104 Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); 112 Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
105 Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, 113 Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
106 bool is_pa_valid, VAddr region_start, std::size_t region_num_pages, 114 bool is_pa_valid, VAddr region_start, size_t region_num_pages,
107 KMemoryState state, KMemoryPermission perm); 115 KMemoryState state, KMemoryPermission perm);
108 Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); 116 Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
109 bool IsRegionMapped(VAddr address, u64 size);
110 bool IsRegionContiguous(VAddr addr, u64 size) const; 117 bool IsRegionContiguous(VAddr addr, u64 size) const;
111 void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list); 118 void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
112 KMemoryInfo QueryInfoImpl(VAddr addr); 119 KMemoryInfo QueryInfoImpl(VAddr addr);
113 VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages, 120 VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
114 std::size_t align); 121 size_t align);
115 Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, 122 Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
116 OperationType operation); 123 OperationType operation);
117 Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, 124 Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
118 OperationType operation, PAddr map_addr = 0); 125 PAddr map_addr = 0);
119 VAddr GetRegionAddress(KMemoryState state) const; 126 VAddr GetRegionAddress(KMemoryState state) const;
120 std::size_t GetRegionSize(KMemoryState state) const; 127 size_t GetRegionSize(KMemoryState state) const;
121 128
122 VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, 129 VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
123 std::size_t alignment, std::size_t offset, std::size_t guard_pages); 130 size_t alignment, size_t offset, size_t guard_pages);
124 131
125 Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, 132 Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
126 KMemoryState state_mask, KMemoryState state, 133 KMemoryState state_mask, KMemoryState state,
127 KMemoryPermission perm_mask, KMemoryPermission perm, 134 KMemoryPermission perm_mask, KMemoryPermission perm,
128 KMemoryAttribute attr_mask, KMemoryAttribute attr) const; 135 KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
129 Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask, 136 Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask,
130 KMemoryState state, KMemoryPermission perm_mask, 137 KMemoryState state, KMemoryPermission perm_mask,
131 KMemoryPermission perm, KMemoryAttribute attr_mask, 138 KMemoryPermission perm, KMemoryAttribute attr_mask,
132 KMemoryAttribute attr) const { 139 KMemoryAttribute attr) const {
133 return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, 140 R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
134 perm, attr_mask, attr); 141 perm, attr_mask, attr));
135 } 142 }
136 143
137 Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, 144 Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
138 KMemoryPermission perm_mask, KMemoryPermission perm, 145 KMemoryPermission perm_mask, KMemoryPermission perm,
139 KMemoryAttribute attr_mask, KMemoryAttribute attr) const; 146 KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
140 Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, 147 Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
141 KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr, 148 KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr,
142 std::size_t size, KMemoryState state_mask, KMemoryState state, 149 size_t size, KMemoryState state_mask, KMemoryState state,
143 KMemoryPermission perm_mask, KMemoryPermission perm, 150 KMemoryPermission perm_mask, KMemoryPermission perm,
144 KMemoryAttribute attr_mask, KMemoryAttribute attr, 151 KMemoryAttribute attr_mask, KMemoryAttribute attr,
145 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; 152 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
146 Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, 153 Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
147 KMemoryState state_mask, KMemoryState state, 154 KMemoryState state_mask, KMemoryState state,
148 KMemoryPermission perm_mask, KMemoryPermission perm, 155 KMemoryPermission perm_mask, KMemoryPermission perm,
149 KMemoryAttribute attr_mask, KMemoryAttribute attr, 156 KMemoryAttribute attr_mask, KMemoryAttribute attr,
150 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { 157 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
151 return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, 158 R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
152 state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); 159 state_mask, state, perm_mask, perm, attr_mask, attr,
160 ignore_attr));
153 } 161 }
154 Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask, 162 Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
155 KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, 163 KMemoryPermission perm_mask, KMemoryPermission perm,
156 KMemoryAttribute attr_mask, KMemoryAttribute attr, 164 KMemoryAttribute attr_mask, KMemoryAttribute attr,
157 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { 165 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
158 return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, 166 R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
159 attr_mask, attr, ignore_attr); 167 attr_mask, attr, ignore_attr));
160 } 168 }
161 169
162 Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, 170 Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
@@ -174,13 +182,13 @@ private:
174 bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages); 182 bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
175 183
176 bool IsLockedByCurrentThread() const { 184 bool IsLockedByCurrentThread() const {
177 return general_lock.IsLockedByCurrentThread(); 185 return m_general_lock.IsLockedByCurrentThread();
178 } 186 }
179 187
180 bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) { 188 bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
181 ASSERT(this->IsLockedByCurrentThread()); 189 ASSERT(this->IsLockedByCurrentThread());
182 190
183 return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr); 191 return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
184 } 192 }
185 193
186 bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const { 194 bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
@@ -191,95 +199,93 @@ private:
191 return *out != 0; 199 return *out != 0;
192 } 200 }
193 201
194 mutable KLightLock general_lock; 202 mutable KLightLock m_general_lock;
195 mutable KLightLock map_physical_memory_lock; 203 mutable KLightLock m_map_physical_memory_lock;
196
197 std::unique_ptr<KMemoryBlockManager> block_manager;
198 204
199public: 205public:
200 constexpr VAddr GetAddressSpaceStart() const { 206 constexpr VAddr GetAddressSpaceStart() const {
201 return address_space_start; 207 return m_address_space_start;
202 } 208 }
203 constexpr VAddr GetAddressSpaceEnd() const { 209 constexpr VAddr GetAddressSpaceEnd() const {
204 return address_space_end; 210 return m_address_space_end;
205 } 211 }
206 constexpr std::size_t GetAddressSpaceSize() const { 212 constexpr size_t GetAddressSpaceSize() const {
207 return address_space_end - address_space_start; 213 return m_address_space_end - m_address_space_start;
208 } 214 }
209 constexpr VAddr GetHeapRegionStart() const { 215 constexpr VAddr GetHeapRegionStart() const {
210 return heap_region_start; 216 return m_heap_region_start;
211 } 217 }
212 constexpr VAddr GetHeapRegionEnd() const { 218 constexpr VAddr GetHeapRegionEnd() const {
213 return heap_region_end; 219 return m_heap_region_end;
214 } 220 }
215 constexpr std::size_t GetHeapRegionSize() const { 221 constexpr size_t GetHeapRegionSize() const {
216 return heap_region_end - heap_region_start; 222 return m_heap_region_end - m_heap_region_start;
217 } 223 }
218 constexpr VAddr GetAliasRegionStart() const { 224 constexpr VAddr GetAliasRegionStart() const {
219 return alias_region_start; 225 return m_alias_region_start;
220 } 226 }
221 constexpr VAddr GetAliasRegionEnd() const { 227 constexpr VAddr GetAliasRegionEnd() const {
222 return alias_region_end; 228 return m_alias_region_end;
223 } 229 }
224 constexpr std::size_t GetAliasRegionSize() const { 230 constexpr size_t GetAliasRegionSize() const {
225 return alias_region_end - alias_region_start; 231 return m_alias_region_end - m_alias_region_start;
226 } 232 }
227 constexpr VAddr GetStackRegionStart() const { 233 constexpr VAddr GetStackRegionStart() const {
228 return stack_region_start; 234 return m_stack_region_start;
229 } 235 }
230 constexpr VAddr GetStackRegionEnd() const { 236 constexpr VAddr GetStackRegionEnd() const {
231 return stack_region_end; 237 return m_stack_region_end;
232 } 238 }
233 constexpr std::size_t GetStackRegionSize() const { 239 constexpr size_t GetStackRegionSize() const {
234 return stack_region_end - stack_region_start; 240 return m_stack_region_end - m_stack_region_start;
235 } 241 }
236 constexpr VAddr GetKernelMapRegionStart() const { 242 constexpr VAddr GetKernelMapRegionStart() const {
237 return kernel_map_region_start; 243 return m_kernel_map_region_start;
238 } 244 }
239 constexpr VAddr GetKernelMapRegionEnd() const { 245 constexpr VAddr GetKernelMapRegionEnd() const {
240 return kernel_map_region_end; 246 return m_kernel_map_region_end;
241 } 247 }
242 constexpr VAddr GetCodeRegionStart() const { 248 constexpr VAddr GetCodeRegionStart() const {
243 return code_region_start; 249 return m_code_region_start;
244 } 250 }
245 constexpr VAddr GetCodeRegionEnd() const { 251 constexpr VAddr GetCodeRegionEnd() const {
246 return code_region_end; 252 return m_code_region_end;
247 } 253 }
248 constexpr VAddr GetAliasCodeRegionStart() const { 254 constexpr VAddr GetAliasCodeRegionStart() const {
249 return alias_code_region_start; 255 return m_alias_code_region_start;
250 } 256 }
251 constexpr VAddr GetAliasCodeRegionSize() const { 257 constexpr VAddr GetAliasCodeRegionSize() const {
252 return alias_code_region_end - alias_code_region_start; 258 return m_alias_code_region_end - m_alias_code_region_start;
253 } 259 }
254 std::size_t GetNormalMemorySize() { 260 size_t GetNormalMemorySize() {
255 KScopedLightLock lk(general_lock); 261 KScopedLightLock lk(m_general_lock);
256 return GetHeapSize() + mapped_physical_memory_size; 262 return GetHeapSize() + m_mapped_physical_memory_size;
257 } 263 }
258 constexpr std::size_t GetAddressSpaceWidth() const { 264 constexpr size_t GetAddressSpaceWidth() const {
259 return address_space_width; 265 return m_address_space_width;
260 } 266 }
261 constexpr std::size_t GetHeapSize() const { 267 constexpr size_t GetHeapSize() const {
262 return current_heap_end - heap_region_start; 268 return m_current_heap_end - m_heap_region_start;
263 } 269 }
264 constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { 270 constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const {
265 return address_space_start <= address && address + size - 1 <= address_space_end - 1; 271 return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1;
266 } 272 }
267 constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const { 273 constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const {
268 return alias_region_start > address || address + size - 1 > alias_region_end - 1; 274 return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1;
269 } 275 }
270 constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const { 276 constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const {
271 return stack_region_start > address || address + size - 1 > stack_region_end - 1; 277 return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1;
272 } 278 }
273 constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const { 279 constexpr bool IsInvalidRegion(VAddr address, size_t size) const {
274 return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; 280 return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
275 } 281 }
276 constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const { 282 constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const {
277 return address + size > heap_region_start && heap_region_end > address; 283 return address + size > m_heap_region_start && m_heap_region_end > address;
278 } 284 }
279 constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const { 285 constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const {
280 return address + size > alias_region_start && alias_region_end > address; 286 return address + size > m_alias_region_start && m_alias_region_end > address;
281 } 287 }
282 constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const { 288 constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const {
283 if (IsInvalidRegion(address, size)) { 289 if (IsInvalidRegion(address, size)) {
284 return true; 290 return true;
285 } 291 }
@@ -291,73 +297,78 @@ public:
291 } 297 }
292 return {}; 298 return {};
293 } 299 }
294 constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { 300 constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const {
295 return !IsOutsideASLRRegion(address, size); 301 return !IsOutsideASLRRegion(address, size);
296 } 302 }
297 constexpr std::size_t GetNumGuardPages() const { 303 constexpr size_t GetNumGuardPages() const {
298 return IsKernel() ? 1 : 4; 304 return IsKernel() ? 1 : 4;
299 } 305 }
300 PAddr GetPhysicalAddr(VAddr addr) const { 306 PAddr GetPhysicalAddr(VAddr addr) const {
301 const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; 307 const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
302 ASSERT(backing_addr); 308 ASSERT(backing_addr);
303 return backing_addr + addr; 309 return backing_addr + addr;
304 } 310 }
305 constexpr bool Contains(VAddr addr) const { 311 constexpr bool Contains(VAddr addr) const {
306 return address_space_start <= addr && addr <= address_space_end - 1; 312 return m_address_space_start <= addr && addr <= m_address_space_end - 1;
307 } 313 }
308 constexpr bool Contains(VAddr addr, std::size_t size) const { 314 constexpr bool Contains(VAddr addr, size_t size) const {
309 return address_space_start <= addr && addr < addr + size && 315 return m_address_space_start <= addr && addr < addr + size &&
310 addr + size - 1 <= address_space_end - 1; 316 addr + size - 1 <= m_address_space_end - 1;
311 } 317 }
312 318
313private: 319private:
314 constexpr bool IsKernel() const { 320 constexpr bool IsKernel() const {
315 return is_kernel; 321 return m_is_kernel;
316 } 322 }
317 constexpr bool IsAslrEnabled() const { 323 constexpr bool IsAslrEnabled() const {
318 return is_aslr_enabled; 324 return m_enable_aslr;
319 } 325 }
320 326
321 constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { 327 constexpr bool ContainsPages(VAddr addr, size_t num_pages) const {
322 return (address_space_start <= addr) && 328 return (m_address_space_start <= addr) &&
323 (num_pages <= (address_space_end - address_space_start) / PageSize) && 329 (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
324 (addr + num_pages * PageSize - 1 <= address_space_end - 1); 330 (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
325 } 331 }
326 332
327private: 333private:
328 VAddr address_space_start{}; 334 VAddr m_address_space_start{};
329 VAddr address_space_end{}; 335 VAddr m_address_space_end{};
330 VAddr heap_region_start{}; 336 VAddr m_heap_region_start{};
331 VAddr heap_region_end{}; 337 VAddr m_heap_region_end{};
332 VAddr current_heap_end{}; 338 VAddr m_current_heap_end{};
333 VAddr alias_region_start{}; 339 VAddr m_alias_region_start{};
334 VAddr alias_region_end{}; 340 VAddr m_alias_region_end{};
335 VAddr stack_region_start{}; 341 VAddr m_stack_region_start{};
336 VAddr stack_region_end{}; 342 VAddr m_stack_region_end{};
337 VAddr kernel_map_region_start{}; 343 VAddr m_kernel_map_region_start{};
338 VAddr kernel_map_region_end{}; 344 VAddr m_kernel_map_region_end{};
339 VAddr code_region_start{}; 345 VAddr m_code_region_start{};
340 VAddr code_region_end{}; 346 VAddr m_code_region_end{};
341 VAddr alias_code_region_start{}; 347 VAddr m_alias_code_region_start{};
342 VAddr alias_code_region_end{}; 348 VAddr m_alias_code_region_end{};
343 349
344 std::size_t mapped_physical_memory_size{}; 350 size_t m_mapped_physical_memory_size{};
345 std::size_t max_heap_size{}; 351 size_t m_max_heap_size{};
346 std::size_t max_physical_memory_size{}; 352 size_t m_max_physical_memory_size{};
347 std::size_t address_space_width{}; 353 size_t m_address_space_width{};
348 354
349 bool is_kernel{}; 355 KMemoryBlockManager m_memory_block_manager;
350 bool is_aslr_enabled{}; 356
351 357 bool m_is_kernel{};
352 u32 heap_fill_value{}; 358 bool m_enable_aslr{};
353 const KMemoryRegion* cached_physical_heap_region{}; 359 bool m_enable_device_address_space_merge{};
354 360
355 KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; 361 KMemoryBlockSlabManager* m_memory_block_slab_manager{};
356 KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; 362
357 363 u32 m_heap_fill_value{};
358 Common::PageTable page_table_impl; 364 const KMemoryRegion* m_cached_physical_heap_region{};
359 365
360 Core::System& system; 366 KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
367 KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
368
369 std::unique_ptr<Common::PageTable> m_page_table_impl;
370
371 Core::System& m_system;
361}; 372};
362 373
363} // namespace Kernel 374} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index d3e99665f..8c3495e5a 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -72,7 +72,8 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
72 72
73 process->name = std::move(process_name); 73 process->name = std::move(process_name);
74 process->resource_limit = res_limit; 74 process->resource_limit = res_limit;
75 process->status = ProcessStatus::Created; 75 process->system_resource_address = 0;
76 process->state = State::Created;
76 process->program_id = 0; 77 process->program_id = 0;
77 process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() 78 process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
78 : kernel.CreateNewUserProcessID(); 79 : kernel.CreateNewUserProcessID();
@@ -92,11 +93,12 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
92 process->exception_thread = nullptr; 93 process->exception_thread = nullptr;
93 process->is_suspended = false; 94 process->is_suspended = false;
94 process->schedule_count = 0; 95 process->schedule_count = 0;
96 process->is_handle_table_initialized = false;
95 97
96 // Open a reference to the resource limit. 98 // Open a reference to the resource limit.
97 process->resource_limit->Open(); 99 process->resource_limit->Open();
98 100
99 return ResultSuccess; 101 R_SUCCEED();
100} 102}
101 103
102void KProcess::DoWorkerTaskImpl() { 104void KProcess::DoWorkerTaskImpl() {
@@ -121,9 +123,9 @@ void KProcess::DecrementRunningThreadCount() {
121 } 123 }
122} 124}
123 125
124u64 KProcess::GetTotalPhysicalMemoryAvailable() const { 126u64 KProcess::GetTotalPhysicalMemoryAvailable() {
125 const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + 127 const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
126 page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size + 128 page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +
127 main_thread_stack_size}; 129 main_thread_stack_size};
128 if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); 130 if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
129 capacity != pool_size) { 131 capacity != pool_size) {
@@ -135,16 +137,16 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
135 return memory_usage_capacity; 137 return memory_usage_capacity;
136} 138}
137 139
138u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { 140u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
139 return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); 141 return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
140} 142}
141 143
142u64 KProcess::GetTotalPhysicalMemoryUsed() const { 144u64 KProcess::GetTotalPhysicalMemoryUsed() {
143 return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() + 145 return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() +
144 GetSystemResourceSize(); 146 GetSystemResourceSize();
145} 147}
146 148
147u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { 149u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
148 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); 150 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
149} 151}
150 152
@@ -244,7 +246,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
244 shmem->Open(); 246 shmem->Open();
245 shemen_info->Open(); 247 shemen_info->Open();
246 248
247 return ResultSuccess; 249 R_SUCCEED();
248} 250}
249 251
250void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, 252void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
@@ -289,12 +291,12 @@ Result KProcess::Reset() {
289 KScopedSchedulerLock sl{kernel}; 291 KScopedSchedulerLock sl{kernel};
290 292
291 // Validate that we're in a state that we can reset. 293 // Validate that we're in a state that we can reset.
292 R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); 294 R_UNLESS(state != State::Terminated, ResultInvalidState);
293 R_UNLESS(is_signaled, ResultInvalidState); 295 R_UNLESS(is_signaled, ResultInvalidState);
294 296
295 // Clear signaled. 297 // Clear signaled.
296 is_signaled = false; 298 is_signaled = false;
297 return ResultSuccess; 299 R_SUCCEED();
298} 300}
299 301
300Result KProcess::SetActivity(ProcessActivity activity) { 302Result KProcess::SetActivity(ProcessActivity activity) {
@@ -304,15 +306,13 @@ Result KProcess::SetActivity(ProcessActivity activity) {
304 KScopedSchedulerLock sl{kernel}; 306 KScopedSchedulerLock sl{kernel};
305 307
306 // Validate our state. 308 // Validate our state.
307 R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState); 309 R_UNLESS(state != State::Terminating, ResultInvalidState);
308 R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); 310 R_UNLESS(state != State::Terminated, ResultInvalidState);
309 311
310 // Either pause or resume. 312 // Either pause or resume.
311 if (activity == ProcessActivity::Paused) { 313 if (activity == ProcessActivity::Paused) {
312 // Verify that we're not suspended. 314 // Verify that we're not suspended.
313 if (is_suspended) { 315 R_UNLESS(!is_suspended, ResultInvalidState);
314 return ResultInvalidState;
315 }
316 316
317 // Suspend all threads. 317 // Suspend all threads.
318 for (auto* thread : GetThreadList()) { 318 for (auto* thread : GetThreadList()) {
@@ -325,9 +325,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
325 ASSERT(activity == ProcessActivity::Runnable); 325 ASSERT(activity == ProcessActivity::Runnable);
326 326
327 // Verify that we're suspended. 327 // Verify that we're suspended.
328 if (!is_suspended) { 328 R_UNLESS(is_suspended, ResultInvalidState);
329 return ResultInvalidState;
330 }
331 329
332 // Resume all threads. 330 // Resume all threads.
333 for (auto* thread : GetThreadList()) { 331 for (auto* thread : GetThreadList()) {
@@ -338,7 +336,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
338 SetSuspended(false); 336 SetSuspended(false);
339 } 337 }
340 338
341 return ResultSuccess; 339 R_SUCCEED();
342} 340}
343 341
344Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) { 342Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
@@ -348,35 +346,38 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
348 system_resource_size = metadata.GetSystemResourceSize(); 346 system_resource_size = metadata.GetSystemResourceSize();
349 image_size = code_size; 347 image_size = code_size;
350 348
349 // We currently do not support process-specific system resource
350 UNIMPLEMENTED_IF(system_resource_size != 0);
351
351 KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, 352 KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
352 code_size + system_resource_size); 353 code_size + system_resource_size);
353 if (!memory_reservation.Succeeded()) { 354 if (!memory_reservation.Succeeded()) {
354 LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", 355 LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
355 code_size + system_resource_size); 356 code_size + system_resource_size);
356 return ResultLimitReached; 357 R_RETURN(ResultLimitReached);
357 } 358 }
358 // Initialize proces address space 359 // Initialize proces address space
359 if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 360 if (const Result result{page_table.InitializeForProcess(
360 0x8000000, code_size, 361 metadata.GetAddressSpaceType(), false, 0x8000000, code_size,
361 KMemoryManager::Pool::Application)}; 362 &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)};
362 result.IsError()) { 363 result.IsError()) {
363 return result; 364 R_RETURN(result);
364 } 365 }
365 366
366 // Map process code region 367 // Map process code region
367 if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(), 368 if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(),
368 code_size / PageSize, KMemoryState::Code, 369 code_size / PageSize, KMemoryState::Code,
369 KMemoryPermission::None)}; 370 KMemoryPermission::None)};
370 result.IsError()) { 371 result.IsError()) {
371 return result; 372 R_RETURN(result);
372 } 373 }
373 374
374 // Initialize process capabilities 375 // Initialize process capabilities
375 const auto& caps{metadata.GetKernelCapabilities()}; 376 const auto& caps{metadata.GetKernelCapabilities()};
376 if (const Result result{ 377 if (const Result result{
377 capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)}; 378 capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)};
378 result.IsError()) { 379 result.IsError()) {
379 return result; 380 R_RETURN(result);
380 } 381 }
381 382
382 // Set memory usage capacity 383 // Set memory usage capacity
@@ -384,12 +385,12 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
384 case FileSys::ProgramAddressSpaceType::Is32Bit: 385 case FileSys::ProgramAddressSpaceType::Is32Bit:
385 case FileSys::ProgramAddressSpaceType::Is36Bit: 386 case FileSys::ProgramAddressSpaceType::Is36Bit:
386 case FileSys::ProgramAddressSpaceType::Is39Bit: 387 case FileSys::ProgramAddressSpaceType::Is39Bit:
387 memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart(); 388 memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart();
388 break; 389 break;
389 390
390 case FileSys::ProgramAddressSpaceType::Is32BitNoMap: 391 case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
391 memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() + 392 memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() +
392 page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart(); 393 page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart();
393 break; 394 break;
394 395
395 default: 396 default:
@@ -397,10 +398,10 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
397 } 398 }
398 399
399 // Create TLS region 400 // Create TLS region
400 R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address))); 401 R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address)));
401 memory_reservation.Commit(); 402 memory_reservation.Commit();
402 403
403 return handle_table.Initialize(capabilities.GetHandleTableSize()); 404 R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize()));
404} 405}
405 406
406void KProcess::Run(s32 main_thread_priority, u64 stack_size) { 407void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
@@ -409,15 +410,15 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
409 resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); 410 resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
410 411
411 const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; 412 const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
412 ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError()); 413 ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
413 414
414 ChangeStatus(ProcessStatus::Running); 415 ChangeState(State::Running);
415 416
416 SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top); 417 SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
417} 418}
418 419
419void KProcess::PrepareForTermination() { 420void KProcess::PrepareForTermination() {
420 ChangeStatus(ProcessStatus::Exiting); 421 ChangeState(State::Terminating);
421 422
422 const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) { 423 const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
423 for (auto* thread : in_thread_list) { 424 for (auto* thread : in_thread_list) {
@@ -437,15 +438,15 @@ void KProcess::PrepareForTermination() {
437 438
438 stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); 439 stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
439 440
440 this->DeleteThreadLocalRegion(tls_region_address); 441 this->DeleteThreadLocalRegion(plr_address);
441 tls_region_address = 0; 442 plr_address = 0;
442 443
443 if (resource_limit) { 444 if (resource_limit) {
444 resource_limit->Release(LimitableResource::PhysicalMemory, 445 resource_limit->Release(LimitableResource::PhysicalMemory,
445 main_thread_stack_size + image_size); 446 main_thread_stack_size + image_size);
446 } 447 }
447 448
448 ChangeStatus(ProcessStatus::Exited); 449 ChangeState(State::Terminated);
449} 450}
450 451
451void KProcess::Finalize() { 452void KProcess::Finalize() {
@@ -474,7 +475,7 @@ void KProcess::Finalize() {
474 } 475 }
475 476
476 // Finalize the page table. 477 // Finalize the page table.
477 page_table.reset(); 478 page_table.Finalize();
478 479
479 // Perform inherited finalization. 480 // Perform inherited finalization.
480 KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize(); 481 KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
@@ -499,7 +500,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
499 } 500 }
500 501
501 *out = tlr; 502 *out = tlr;
502 return ResultSuccess; 503 R_SUCCEED();
503 } 504 }
504 } 505 }
505 506
@@ -528,7 +529,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
528 // We succeeded! 529 // We succeeded!
529 tlp_guard.Cancel(); 530 tlp_guard.Cancel();
530 *out = tlr; 531 *out = tlr;
531 return ResultSuccess; 532 R_SUCCEED();
532} 533}
533 534
534Result KProcess::DeleteThreadLocalRegion(VAddr addr) { 535Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
@@ -576,7 +577,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
576 KThreadLocalPage::Free(kernel, page_to_free); 577 KThreadLocalPage::Free(kernel, page_to_free);
577 } 578 }
578 579
579 return ResultSuccess; 580 R_SUCCEED();
580} 581}
581 582
582bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, 583bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
@@ -628,7 +629,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
628void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { 629void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
629 const auto ReprotectSegment = [&](const CodeSet::Segment& segment, 630 const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
630 Svc::MemoryPermission permission) { 631 Svc::MemoryPermission permission) {
631 page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); 632 page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
632 }; 633 };
633 634
634 kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), 635 kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
@@ -645,19 +646,18 @@ bool KProcess::IsSignaled() const {
645} 646}
646 647
647KProcess::KProcess(KernelCore& kernel_) 648KProcess::KProcess(KernelCore& kernel_)
648 : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>( 649 : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()},
649 kernel_.System())},
650 handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, 650 handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
651 state_lock{kernel_}, list_lock{kernel_} {} 651 state_lock{kernel_}, list_lock{kernel_} {}
652 652
653KProcess::~KProcess() = default; 653KProcess::~KProcess() = default;
654 654
655void KProcess::ChangeStatus(ProcessStatus new_status) { 655void KProcess::ChangeState(State new_state) {
656 if (status == new_status) { 656 if (state == new_state) {
657 return; 657 return;
658 } 658 }
659 659
660 status = new_status; 660 state = new_state;
661 is_signaled = true; 661 is_signaled = true;
662 NotifyAvailable(); 662 NotifyAvailable();
663} 663}
@@ -668,17 +668,17 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
668 // The kernel always ensures that the given stack size is page aligned. 668 // The kernel always ensures that the given stack size is page aligned.
669 main_thread_stack_size = Common::AlignUp(stack_size, PageSize); 669 main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
670 670
671 const VAddr start{page_table->GetStackRegionStart()}; 671 const VAddr start{page_table.GetStackRegionStart()};
672 const std::size_t size{page_table->GetStackRegionEnd() - start}; 672 const std::size_t size{page_table.GetStackRegionEnd() - start};
673 673
674 CASCADE_RESULT(main_thread_stack_top, 674 CASCADE_RESULT(main_thread_stack_top,
675 page_table->AllocateAndMapMemory( 675 page_table.AllocateAndMapMemory(
676 main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize, 676 main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
677 KMemoryState::Stack, KMemoryPermission::UserReadWrite)); 677 KMemoryState::Stack, KMemoryPermission::UserReadWrite));
678 678
679 main_thread_stack_top += main_thread_stack_size; 679 main_thread_stack_top += main_thread_stack_size;
680 680
681 return ResultSuccess; 681 R_SUCCEED();
682} 682}
683 683
684} // namespace Kernel 684} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index d56d73bab..2e0cc3d0b 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -13,6 +13,7 @@
13#include "core/hle/kernel/k_auto_object.h" 13#include "core/hle/kernel/k_auto_object.h"
14#include "core/hle/kernel/k_condition_variable.h" 14#include "core/hle/kernel/k_condition_variable.h"
15#include "core/hle/kernel/k_handle_table.h" 15#include "core/hle/kernel/k_handle_table.h"
16#include "core/hle/kernel/k_page_table.h"
16#include "core/hle/kernel/k_synchronization_object.h" 17#include "core/hle/kernel/k_synchronization_object.h"
17#include "core/hle/kernel/k_thread_local_page.h" 18#include "core/hle/kernel/k_thread_local_page.h"
18#include "core/hle/kernel/k_worker_task.h" 19#include "core/hle/kernel/k_worker_task.h"
@@ -31,7 +32,6 @@ class ProgramMetadata;
31namespace Kernel { 32namespace Kernel {
32 33
33class KernelCore; 34class KernelCore;
34class KPageTable;
35class KResourceLimit; 35class KResourceLimit;
36class KThread; 36class KThread;
37class KSharedMemoryInfo; 37class KSharedMemoryInfo;
@@ -45,24 +45,6 @@ enum class MemoryRegion : u16 {
45 BASE = 3, 45 BASE = 3,
46}; 46};
47 47
48/**
49 * Indicates the status of a Process instance.
50 *
51 * @note These match the values as used by kernel,
52 * so new entries should only be added if RE
53 * shows that a new value has been introduced.
54 */
55enum class ProcessStatus {
56 Created,
57 CreatedWithDebuggerAttached,
58 Running,
59 WaitingForDebuggerToAttach,
60 DebuggerAttached,
61 Exiting,
62 Exited,
63 DebugBreak,
64};
65
66enum class ProcessActivity : u32 { 48enum class ProcessActivity : u32 {
67 Runnable, 49 Runnable,
68 Paused, 50 Paused,
@@ -89,6 +71,17 @@ public:
89 explicit KProcess(KernelCore& kernel_); 71 explicit KProcess(KernelCore& kernel_);
90 ~KProcess() override; 72 ~KProcess() override;
91 73
74 enum class State {
75 Created = static_cast<u32>(Svc::ProcessState::Created),
76 CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached),
77 Running = static_cast<u32>(Svc::ProcessState::Running),
78 Crashed = static_cast<u32>(Svc::ProcessState::Crashed),
79 RunningAttached = static_cast<u32>(Svc::ProcessState::RunningAttached),
80 Terminating = static_cast<u32>(Svc::ProcessState::Terminating),
81 Terminated = static_cast<u32>(Svc::ProcessState::Terminated),
82 DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak),
83 };
84
92 enum : u64 { 85 enum : u64 {
93 /// Lowest allowed process ID for a kernel initial process. 86 /// Lowest allowed process ID for a kernel initial process.
94 InitialKIPIDMin = 1, 87 InitialKIPIDMin = 1,
@@ -114,12 +107,12 @@ public:
114 107
115 /// Gets a reference to the process' page table. 108 /// Gets a reference to the process' page table.
116 KPageTable& PageTable() { 109 KPageTable& PageTable() {
117 return *page_table; 110 return page_table;
118 } 111 }
119 112
120 /// Gets const a reference to the process' page table. 113 /// Gets const a reference to the process' page table.
121 const KPageTable& PageTable() const { 114 const KPageTable& PageTable() const {
122 return *page_table; 115 return page_table;
123 } 116 }
124 117
125 /// Gets a reference to the process' handle table. 118 /// Gets a reference to the process' handle table.
@@ -145,26 +138,25 @@ public:
145 } 138 }
146 139
147 Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { 140 Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
148 return condition_var.Wait(address, cv_key, tag, ns); 141 R_RETURN(condition_var.Wait(address, cv_key, tag, ns));
149 } 142 }
150 143
151 Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) { 144 Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
152 return address_arbiter.SignalToAddress(address, signal_type, value, count); 145 R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count));
153 } 146 }
154 147
155 Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, 148 Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
156 s64 timeout) { 149 s64 timeout) {
157 return address_arbiter.WaitForAddress(address, arb_type, value, timeout); 150 R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout));
158 } 151 }
159 152
160 /// Gets the address to the process' dedicated TLS region. 153 VAddr GetProcessLocalRegionAddress() const {
161 VAddr GetTLSRegionAddress() const { 154 return plr_address;
162 return tls_region_address;
163 } 155 }
164 156
165 /// Gets the current status of the process 157 /// Gets the current status of the process
166 ProcessStatus GetStatus() const { 158 State GetState() const {
167 return status; 159 return state;
168 } 160 }
169 161
170 /// Gets the unique ID that identifies this particular process. 162 /// Gets the unique ID that identifies this particular process.
@@ -286,18 +278,18 @@ public:
286 } 278 }
287 279
288 /// Retrieves the total physical memory available to this process in bytes. 280 /// Retrieves the total physical memory available to this process in bytes.
289 u64 GetTotalPhysicalMemoryAvailable() const; 281 u64 GetTotalPhysicalMemoryAvailable();
290 282
291 /// Retrieves the total physical memory available to this process in bytes, 283 /// Retrieves the total physical memory available to this process in bytes,
292 /// without the size of the personal system resource heap added to it. 284 /// without the size of the personal system resource heap added to it.
293 u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const; 285 u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
294 286
295 /// Retrieves the total physical memory used by this process in bytes. 287 /// Retrieves the total physical memory used by this process in bytes.
296 u64 GetTotalPhysicalMemoryUsed() const; 288 u64 GetTotalPhysicalMemoryUsed();
297 289
298 /// Retrieves the total physical memory used by this process in bytes, 290 /// Retrieves the total physical memory used by this process in bytes,
299 /// without the size of the personal system resource heap added to it. 291 /// without the size of the personal system resource heap added to it.
300 u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const; 292 u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
301 293
302 /// Gets the list of all threads created with this process as their owner. 294 /// Gets the list of all threads created with this process as their owner.
303 std::list<KThread*>& GetThreadList() { 295 std::list<KThread*>& GetThreadList() {
@@ -415,19 +407,24 @@ private:
415 pinned_threads[core_id] = nullptr; 407 pinned_threads[core_id] = nullptr;
416 } 408 }
417 409
418 /// Changes the process status. If the status is different 410 void FinalizeHandleTable() {
419 /// from the current process status, then this will trigger 411 // Finalize the table.
420 /// a process signal. 412 handle_table.Finalize();
421 void ChangeStatus(ProcessStatus new_status); 413
414 // Note that the table is finalized.
415 is_handle_table_initialized = false;
416 }
417
418 void ChangeState(State new_state);
422 419
423 /// Allocates the main thread stack for the process, given the stack size in bytes. 420 /// Allocates the main thread stack for the process, given the stack size in bytes.
424 Result AllocateMainThreadStack(std::size_t stack_size); 421 Result AllocateMainThreadStack(std::size_t stack_size);
425 422
426 /// Memory manager for this process 423 /// Memory manager for this process
427 std::unique_ptr<KPageTable> page_table; 424 KPageTable page_table;
428 425
429 /// Current status of the process 426 /// Current status of the process
430 ProcessStatus status{}; 427 State state{};
431 428
432 /// The ID of this process 429 /// The ID of this process
433 u64 process_id = 0; 430 u64 process_id = 0;
@@ -443,6 +440,8 @@ private:
443 /// Resource limit descriptor for this process 440 /// Resource limit descriptor for this process
444 KResourceLimit* resource_limit{}; 441 KResourceLimit* resource_limit{};
445 442
443 VAddr system_resource_address{};
444
446 /// The ideal CPU core for this process, threads are scheduled on this core by default. 445 /// The ideal CPU core for this process, threads are scheduled on this core by default.
447 u8 ideal_core = 0; 446 u8 ideal_core = 0;
448 447
@@ -469,7 +468,7 @@ private:
469 KConditionVariable condition_var; 468 KConditionVariable condition_var;
470 469
471 /// Address indicating the location of the process' dedicated TLS region. 470 /// Address indicating the location of the process' dedicated TLS region.
472 VAddr tls_region_address = 0; 471 VAddr plr_address = 0;
473 472
474 /// Random values for svcGetInfo RandomEntropy 473 /// Random values for svcGetInfo RandomEntropy
475 std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{}; 474 std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
@@ -495,8 +494,12 @@ private:
495 /// Schedule count of this process 494 /// Schedule count of this process
496 s64 schedule_count{}; 495 s64 schedule_count{};
497 496
497 size_t memory_release_hint{};
498
498 bool is_signaled{}; 499 bool is_signaled{};
499 bool is_suspended{}; 500 bool is_suspended{};
501 bool is_immortal{};
502 bool is_handle_table_initialized{};
500 bool is_initialized{}; 503 bool is_initialized{};
501 504
502 std::atomic<u16> num_running_threads{}; 505 std::atomic<u16> num_running_threads{};
diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp
index 94c5464fe..5c942d47c 100644
--- a/src/core/hle/kernel/k_readable_event.cpp
+++ b/src/core/hle/kernel/k_readable_event.cpp
@@ -15,31 +15,44 @@ KReadableEvent::KReadableEvent(KernelCore& kernel_) : KSynchronizationObject{ker
15 15
16KReadableEvent::~KReadableEvent() = default; 16KReadableEvent::~KReadableEvent() = default;
17 17
18void KReadableEvent::Initialize(KEvent* parent) {
19 m_is_signaled = false;
20 m_parent = parent;
21
22 if (m_parent != nullptr) {
23 m_parent->Open();
24 }
25}
26
18bool KReadableEvent::IsSignaled() const { 27bool KReadableEvent::IsSignaled() const {
19 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 28 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
20 29
21 return is_signaled; 30 return m_is_signaled;
22} 31}
23 32
24void KReadableEvent::Destroy() { 33void KReadableEvent::Destroy() {
25 if (parent) { 34 if (m_parent) {
26 parent->Close(); 35 {
36 KScopedSchedulerLock sl{kernel};
37 m_parent->OnReadableEventDestroyed();
38 }
39 m_parent->Close();
27 } 40 }
28} 41}
29 42
30Result KReadableEvent::Signal() { 43Result KReadableEvent::Signal() {
31 KScopedSchedulerLock lk{kernel}; 44 KScopedSchedulerLock lk{kernel};
32 45
33 if (!is_signaled) { 46 if (!m_is_signaled) {
34 is_signaled = true; 47 m_is_signaled = true;
35 NotifyAvailable(); 48 this->NotifyAvailable();
36 } 49 }
37 50
38 return ResultSuccess; 51 return ResultSuccess;
39} 52}
40 53
41Result KReadableEvent::Clear() { 54Result KReadableEvent::Clear() {
42 Reset(); 55 this->Reset();
43 56
44 return ResultSuccess; 57 return ResultSuccess;
45} 58}
@@ -47,11 +60,11 @@ Result KReadableEvent::Clear() {
47Result KReadableEvent::Reset() { 60Result KReadableEvent::Reset() {
48 KScopedSchedulerLock lk{kernel}; 61 KScopedSchedulerLock lk{kernel};
49 62
50 if (!is_signaled) { 63 if (!m_is_signaled) {
51 return ResultInvalidState; 64 return ResultInvalidState;
52 } 65 }
53 66
54 is_signaled = false; 67 m_is_signaled = false;
55 return ResultSuccess; 68 return ResultSuccess;
56} 69}
57 70
diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h
index 18dcad289..743f96bf5 100644
--- a/src/core/hle/kernel/k_readable_event.h
+++ b/src/core/hle/kernel/k_readable_event.h
@@ -20,26 +20,23 @@ public:
20 explicit KReadableEvent(KernelCore& kernel_); 20 explicit KReadableEvent(KernelCore& kernel_);
21 ~KReadableEvent() override; 21 ~KReadableEvent() override;
22 22
23 void Initialize(KEvent* parent_event_, std::string&& name_) { 23 void Initialize(KEvent* parent);
24 is_signaled = false;
25 parent = parent_event_;
26 name = std::move(name_);
27 }
28 24
29 KEvent* GetParent() const { 25 KEvent* GetParent() const {
30 return parent; 26 return m_parent;
31 } 27 }
32 28
29 Result Signal();
30 Result Clear();
31
33 bool IsSignaled() const override; 32 bool IsSignaled() const override;
34 void Destroy() override; 33 void Destroy() override;
35 34
36 Result Signal();
37 Result Clear();
38 Result Reset(); 35 Result Reset();
39 36
40private: 37private:
41 bool is_signaled{}; 38 bool m_is_signaled{};
42 KEvent* parent{}; 39 KEvent* m_parent{};
43}; 40};
44 41
45} // namespace Kernel 42} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index 802c646a6..faf03fcc8 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -7,6 +7,8 @@
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "common/logging/log.h" 9#include "common/logging/log.h"
10#include "common/scope_exit.h"
11#include "core/core.h"
10#include "core/core_timing.h" 12#include "core/core_timing.h"
11#include "core/hle/ipc_helpers.h" 13#include "core/hle/ipc_helpers.h"
12#include "core/hle/kernel/hle_ipc.h" 14#include "core/hle/kernel/hle_ipc.h"
@@ -18,13 +20,16 @@
18#include "core/hle/kernel/k_server_session.h" 20#include "core/hle/kernel/k_server_session.h"
19#include "core/hle/kernel/k_session.h" 21#include "core/hle/kernel/k_session.h"
20#include "core/hle/kernel/k_thread.h" 22#include "core/hle/kernel/k_thread.h"
23#include "core/hle/kernel/k_thread_queue.h"
21#include "core/hle/kernel/kernel.h" 24#include "core/hle/kernel/kernel.h"
22#include "core/hle/kernel/service_thread.h"
23#include "core/memory.h" 25#include "core/memory.h"
24 26
25namespace Kernel { 27namespace Kernel {
26 28
27KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} 29using ThreadQueueImplForKServerSessionRequest = KThreadQueue;
30
31KServerSession::KServerSession(KernelCore& kernel_)
32 : KSynchronizationObject{kernel_}, m_lock{kernel_} {}
28 33
29KServerSession::~KServerSession() = default; 34KServerSession::~KServerSession() = default;
30 35
@@ -33,17 +38,14 @@ void KServerSession::Initialize(KSession* parent_session_, std::string&& name_,
33 // Set member variables. 38 // Set member variables.
34 parent = parent_session_; 39 parent = parent_session_;
35 name = std::move(name_); 40 name = std::move(name_);
36 41 manager = manager_;
37 if (manager_) {
38 manager = manager_;
39 } else {
40 manager = std::make_shared<SessionRequestManager>(kernel);
41 }
42} 42}
43 43
44void KServerSession::Destroy() { 44void KServerSession::Destroy() {
45 parent->OnServerClosed(); 45 parent->OnServerClosed();
46 46
47 this->CleanupRequests();
48
47 parent->Close(); 49 parent->Close();
48 50
49 // Release host emulation members. 51 // Release host emulation members.
@@ -54,13 +56,13 @@ void KServerSession::Destroy() {
54} 56}
55 57
56void KServerSession::OnClientClosed() { 58void KServerSession::OnClientClosed() {
57 if (manager->HasSessionHandler()) { 59 if (manager && manager->HasSessionHandler()) {
58 manager->SessionHandler().ClientDisconnected(this); 60 manager->SessionHandler().ClientDisconnected(this);
59 } 61 }
60} 62}
61 63
62bool KServerSession::IsSignaled() const { 64bool KServerSession::IsSignaled() const {
63 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 65 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
64 66
65 // If the client is closed, we're always signaled. 67 // If the client is closed, we're always signaled.
66 if (parent->IsClientClosed()) { 68 if (parent->IsClientClosed()) {
@@ -68,114 +70,281 @@ bool KServerSession::IsSignaled() const {
68 } 70 }
69 71
70 // Otherwise, we're signaled if we have a request and aren't handling one. 72 // Otherwise, we're signaled if we have a request and aren't handling one.
71 return false; 73 return !m_request_list.empty() && m_current_request == nullptr;
72} 74}
73 75
74void KServerSession::AppendDomainHandler(SessionRequestHandlerPtr handler) { 76Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
75 manager->AppendDomainHandler(std::move(handler)); 77 u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
78 auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread);
79
80 context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
81
82 return manager->QueueSyncRequest(parent, std::move(context));
76} 83}
77 84
78std::size_t KServerSession::NumDomainRequestHandlers() const { 85Result KServerSession::CompleteSyncRequest(HLERequestContext& context) {
79 return manager->DomainHandlerCount(); 86 Result result = manager->CompleteSyncRequest(this, context);
87
88 // The calling thread is waiting for this request to complete, so wake it up.
89 context.GetThread().EndWait(result);
90
91 return result;
80} 92}
81 93
82Result KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) { 94Result KServerSession::OnRequest(KSessionRequest* request) {
83 if (!context.HasDomainMessageHeader()) { 95 // Create the wait queue.
84 return ResultSuccess; 96 ThreadQueueImplForKServerSessionRequest wait_queue{kernel};
85 }
86 97
87 // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs 98 {
88 context.SetSessionRequestManager(manager); 99 // Lock the scheduler.
89 100 KScopedSchedulerLock sl{kernel};
90 // If there is a DomainMessageHeader, then this is CommandType "Request" 101
91 const auto& domain_message_header = context.GetDomainMessageHeader(); 102 // Ensure that we can handle new requests.
92 const u32 object_id{domain_message_header.object_id}; 103 R_UNLESS(!parent->IsServerClosed(), ResultSessionClosed);
93 switch (domain_message_header.command) { 104
94 case IPC::DomainMessageHeader::CommandType::SendMessage: 105 // Check that we're not terminating.
95 if (object_id > manager->DomainHandlerCount()) { 106 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested);
96 LOG_CRITICAL(IPC, 107
97 "object_id {} is too big! This probably means a recent service call " 108 if (manager) {
98 "to {} needed to return a new interface!", 109 // HLE request.
99 object_id, name); 110 auto& memory{kernel.System().Memory()};
100 ASSERT(false); 111 this->QueueSyncRequest(GetCurrentThreadPointer(kernel), memory);
101 return ResultSuccess; // Ignore error if asserts are off
102 }
103 if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) {
104 return strong_ptr->HandleSyncRequest(*this, context);
105 } else { 112 } else {
106 ASSERT(false); 113 // Non-HLE request.
107 return ResultSuccess;
108 }
109 114
110 case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: { 115 // Get whether we're empty.
111 LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id); 116 const bool was_empty = m_request_list.empty();
112 117
113 manager->CloseDomainHandler(object_id - 1); 118 // Add the request to the list.
119 request->Open();
120 m_request_list.push_back(*request);
114 121
115 IPC::ResponseBuilder rb{context, 2}; 122 // If we were empty, signal.
116 rb.Push(ResultSuccess); 123 if (was_empty) {
117 return ResultSuccess; 124 this->NotifyAvailable();
118 } 125 }
126 }
127
128 // If we have a request event, this is asynchronous, and we don't need to wait.
129 R_SUCCEED_IF(request->GetEvent() != nullptr);
130
131 // This is a synchronous request, so we should wait for our request to complete.
132 GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
133 GetCurrentThread(kernel).BeginWait(&wait_queue);
119 } 134 }
120 135
121 LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value()); 136 return GetCurrentThread(kernel).GetWaitResult();
122 ASSERT(false);
123 return ResultSuccess;
124} 137}
125 138
126Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) { 139Result KServerSession::SendReply() {
127 u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; 140 // Lock the session.
128 auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread); 141 KScopedLightLock lk{m_lock};
129 142
130 context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); 143 // Get the request.
144 KSessionRequest* request;
145 {
146 KScopedSchedulerLock sl{kernel};
131 147
132 // Ensure we have a session request handler 148 // Get the current request.
133 if (manager->HasSessionRequestHandler(*context)) { 149 request = m_current_request;
134 if (auto strong_ptr = manager->GetServiceThread().lock()) { 150 R_UNLESS(request != nullptr, ResultInvalidState);
135 strong_ptr->QueueSyncRequest(*parent, std::move(context)); 151
136 } else { 152 // Clear the current request, since we're processing it.
137 ASSERT_MSG(false, "strong_ptr is nullptr!"); 153 m_current_request = nullptr;
154 if (!m_request_list.empty()) {
155 this->NotifyAvailable();
138 } 156 }
139 } else {
140 ASSERT_MSG(false, "handler is invalid!");
141 } 157 }
142 158
143 return ResultSuccess; 159 // Close reference to the request once we're done processing it.
144} 160 SCOPE_EXIT({ request->Close(); });
145 161
146Result KServerSession::CompleteSyncRequest(HLERequestContext& context) { 162 // Extract relevant information from the request.
147 Result result = ResultSuccess; 163 const uintptr_t client_message = request->GetAddress();
164 const size_t client_buffer_size = request->GetSize();
165 KThread* client_thread = request->GetThread();
166 KEvent* event = request->GetEvent();
148 167
149 // If the session has been converted to a domain, handle the domain request 168 // Check whether we're closed.
150 if (manager->HasSessionRequestHandler(context)) { 169 const bool closed = (client_thread == nullptr || parent->IsClientClosed());
151 if (IsDomain() && context.HasDomainMessageHeader()) { 170
152 result = HandleDomainSyncRequest(context); 171 Result result = ResultSuccess;
153 // If there is no domain header, the regular session handler is used 172 if (!closed) {
154 } else if (manager->HasSessionHandler()) { 173 // If we're not closed, send the reply.
155 // If this ServerSession has an associated HLE handler, forward the request to it. 174 Core::Memory::Memory& memory{kernel.System().Memory()};
156 result = manager->SessionHandler().HandleSyncRequest(*this, context); 175 KThread* server_thread{GetCurrentThreadPointer(kernel)};
157 } 176 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
177
178 auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
179 auto* dst_msg_buffer = memory.GetPointer(client_message);
180 std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
158 } else { 181 } else {
159 ASSERT_MSG(false, "Session handler is invalid, stubbing response!"); 182 result = ResultSessionClosed;
160 IPC::ResponseBuilder rb(context, 2);
161 rb.Push(ResultSuccess);
162 } 183 }
163 184
164 if (convert_to_domain) { 185 // Select a result for the client.
165 ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance."); 186 Result client_result = result;
166 manager->ConvertToDomain(); 187 if (closed && R_SUCCEEDED(result)) {
167 convert_to_domain = false; 188 result = ResultSessionClosed;
189 client_result = ResultSessionClosed;
190 } else {
191 result = ResultSuccess;
168 } 192 }
169 193
170 // The calling thread is waiting for this request to complete, so wake it up. 194 // If there's a client thread, update it.
171 context.GetThread().EndWait(result); 195 if (client_thread != nullptr) {
196 if (event != nullptr) {
197 // // Get the client process/page table.
198 // KProcess *client_process = client_thread->GetOwnerProcess();
199 // KPageTable *client_page_table = &client_process->PageTable();
200
201 // // If we need to, reply with an async error.
202 // if (R_FAILED(client_result)) {
203 // ReplyAsyncError(client_process, client_message, client_buffer_size,
204 // client_result);
205 // }
206
207 // // Unlock the client buffer.
208 // // NOTE: Nintendo does not check the result of this.
209 // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size);
210
211 // Signal the event.
212 event->Signal();
213 } else {
214 // End the client thread's wait.
215 KScopedSchedulerLock sl{kernel};
216
217 if (!client_thread->IsTerminationRequested()) {
218 client_thread->EndWait(client_result);
219 }
220 }
221 }
172 222
173 return result; 223 return result;
174} 224}
175 225
176Result KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory, 226Result KServerSession::ReceiveRequest() {
177 Core::Timing::CoreTiming& core_timing) { 227 // Lock the session.
178 return QueueSyncRequest(thread, memory); 228 KScopedLightLock lk{m_lock};
229
230 // Get the request and client thread.
231 KSessionRequest* request;
232 KThread* client_thread;
233
234 {
235 KScopedSchedulerLock sl{kernel};
236
237 // Ensure that we can service the request.
238 R_UNLESS(!parent->IsClientClosed(), ResultSessionClosed);
239
240 // Ensure we aren't already servicing a request.
241 R_UNLESS(m_current_request == nullptr, ResultNotFound);
242
243 // Ensure we have a request to service.
244 R_UNLESS(!m_request_list.empty(), ResultNotFound);
245
246 // Pop the first request from the list.
247 request = &m_request_list.front();
248 m_request_list.pop_front();
249
250 // Get the thread for the request.
251 client_thread = request->GetThread();
252 R_UNLESS(client_thread != nullptr, ResultSessionClosed);
253
254 // Open the client thread.
255 client_thread->Open();
256 }
257
258 SCOPE_EXIT({ client_thread->Close(); });
259
260 // Set the request as our current.
261 m_current_request = request;
262
263 // Get the client address.
264 uintptr_t client_message = request->GetAddress();
265 size_t client_buffer_size = request->GetSize();
266 // bool recv_list_broken = false;
267
268 // Receive the message.
269 Core::Memory::Memory& memory{kernel.System().Memory()};
270 KThread* server_thread{GetCurrentThreadPointer(kernel)};
271 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
272
273 auto* src_msg_buffer = memory.GetPointer(client_message);
274 auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
275 std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
276
277 // We succeeded.
278 return ResultSuccess;
279}
280
281void KServerSession::CleanupRequests() {
282 KScopedLightLock lk(m_lock);
283
284 // Clean up any pending requests.
285 while (true) {
286 // Get the next request.
287 KSessionRequest* request = nullptr;
288 {
289 KScopedSchedulerLock sl{kernel};
290
291 if (m_current_request) {
292 // Choose the current request if we have one.
293 request = m_current_request;
294 m_current_request = nullptr;
295 } else if (!m_request_list.empty()) {
296 // Pop the request from the front of the list.
297 request = &m_request_list.front();
298 m_request_list.pop_front();
299 }
300 }
301
302 // If there's no request, we're done.
303 if (request == nullptr) {
304 break;
305 }
306
307 // Close a reference to the request once it's cleaned up.
308 SCOPE_EXIT({ request->Close(); });
309
310 // Extract relevant information from the request.
311 // const uintptr_t client_message = request->GetAddress();
312 // const size_t client_buffer_size = request->GetSize();
313 KThread* client_thread = request->GetThread();
314 KEvent* event = request->GetEvent();
315
316 // KProcess *server_process = request->GetServerProcess();
317 // KProcess *client_process = (client_thread != nullptr) ?
318 // client_thread->GetOwnerProcess() : nullptr;
319 // KProcessPageTable *client_page_table = (client_process != nullptr) ?
320 // &client_process->GetPageTable() : nullptr;
321
322 // Cleanup the mappings.
323 // Result result = CleanupMap(request, server_process, client_page_table);
324
325 // If there's a client thread, update it.
326 if (client_thread != nullptr) {
327 if (event != nullptr) {
328 // // We need to reply async.
329 // ReplyAsyncError(client_process, client_message, client_buffer_size,
330 // (R_SUCCEEDED(result) ? ResultSessionClosed : result));
331
332 // // Unlock the client buffer.
333 // NOTE: Nintendo does not check the result of this.
334 // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size);
335
336 // Signal the event.
337 event->Signal();
338 } else {
339 // End the client thread's wait.
340 KScopedSchedulerLock sl{kernel};
341
342 if (!client_thread->IsTerminationRequested()) {
343 client_thread->EndWait(ResultSessionClosed);
344 }
345 }
346 }
347 }
179} 348}
180 349
181} // namespace Kernel 350} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h
index 6d0821945..32135473b 100644
--- a/src/core/hle/kernel/k_server_session.h
+++ b/src/core/hle/kernel/k_server_session.h
@@ -3,6 +3,7 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <list>
6#include <memory> 7#include <memory>
7#include <string> 8#include <string>
8#include <utility> 9#include <utility>
@@ -10,6 +11,8 @@
10#include <boost/intrusive/list.hpp> 11#include <boost/intrusive/list.hpp>
11 12
12#include "core/hle/kernel/hle_ipc.h" 13#include "core/hle/kernel/hle_ipc.h"
14#include "core/hle/kernel/k_light_lock.h"
15#include "core/hle/kernel/k_session_request.h"
13#include "core/hle/kernel/k_synchronization_object.h" 16#include "core/hle/kernel/k_synchronization_object.h"
14#include "core/hle/result.h" 17#include "core/hle/result.h"
15 18
@@ -55,64 +58,29 @@ public:
55 } 58 }
56 59
57 bool IsSignaled() const override; 60 bool IsSignaled() const override;
58
59 void OnClientClosed(); 61 void OnClientClosed();
60 62
61 void ClientConnected(SessionRequestHandlerPtr handler) {
62 manager->SetSessionHandler(std::move(handler));
63 }
64
65 void ClientDisconnected() {
66 manager = nullptr;
67 }
68
69 /**
70 * Handle a sync request from the emulated application.
71 *
72 * @param thread Thread that initiated the request.
73 * @param memory Memory context to handle the sync request under.
74 * @param core_timing Core timing context to schedule the request event under.
75 *
76 * @returns Result from the operation.
77 */
78 Result HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
79 Core::Timing::CoreTiming& core_timing);
80
81 /// Adds a new domain request handler to the collection of request handlers within
82 /// this ServerSession instance.
83 void AppendDomainHandler(SessionRequestHandlerPtr handler);
84
85 /// Retrieves the total number of domain request handlers that have been
86 /// appended to this ServerSession instance.
87 std::size_t NumDomainRequestHandlers() const;
88
89 /// Returns true if the session has been converted to a domain, otherwise False
90 bool IsDomain() const {
91 return manager->IsDomain();
92 }
93
94 /// Converts the session to a domain at the end of the current command
95 void ConvertToDomain() {
96 convert_to_domain = true;
97 }
98
99 /// Gets the session request manager, which forwards requests to the underlying service 63 /// Gets the session request manager, which forwards requests to the underlying service
100 std::shared_ptr<SessionRequestManager>& GetSessionRequestManager() { 64 std::shared_ptr<SessionRequestManager>& GetSessionRequestManager() {
101 return manager; 65 return manager;
102 } 66 }
103 67
68 /// TODO: flesh these out to match the real kernel
69 Result OnRequest(KSessionRequest* request);
70 Result SendReply();
71 Result ReceiveRequest();
72
104private: 73private:
74 /// Frees up waiting client sessions when this server session is about to die
75 void CleanupRequests();
76
105 /// Queues a sync request from the emulated application. 77 /// Queues a sync request from the emulated application.
106 Result QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory); 78 Result QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
107 79
108 /// Completes a sync request from the emulated application. 80 /// Completes a sync request from the emulated application.
109 Result CompleteSyncRequest(HLERequestContext& context); 81 Result CompleteSyncRequest(HLERequestContext& context);
110 82
111 /// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an 83 /// This session's HLE request handlers; if nullptr, this is not an HLE server
112 /// object handle.
113 Result HandleDomainSyncRequest(Kernel::HLERequestContext& context);
114
115 /// This session's HLE request handlers
116 std::shared_ptr<SessionRequestManager> manager; 84 std::shared_ptr<SessionRequestManager> manager;
117 85
118 /// When set to True, converts the session to a domain at the end of the command 86 /// When set to True, converts the session to a domain at the end of the command
@@ -120,6 +88,12 @@ private:
120 88
121 /// KSession that owns this KServerSession 89 /// KSession that owns this KServerSession
122 KSession* parent{}; 90 KSession* parent{};
91
92 /// List of threads which are pending a reply.
93 boost::intrusive::list<KSessionRequest> m_request_list;
94 KSessionRequest* m_current_request;
95
96 KLightLock m_lock;
123}; 97};
124 98
125} // namespace Kernel 99} // namespace Kernel
diff --git a/src/core/hle/kernel/k_session_request.cpp b/src/core/hle/kernel/k_session_request.cpp
new file mode 100644
index 000000000..520da6aa7
--- /dev/null
+++ b/src/core/hle/kernel/k_session_request.cpp
@@ -0,0 +1,61 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/hle/kernel/k_page_buffer.h"
5#include "core/hle/kernel/k_session_request.h"
6
7namespace Kernel {
8
9Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, size_t size,
10 KMemoryState state, size_t index) {
11 // At most 15 buffers of each type (4-bit descriptor counts).
12 ASSERT(index < ((1ul << 4) - 1) * 3);
13
14 // Get the mapping.
15 Mapping* mapping;
16 if (index < NumStaticMappings) {
17 mapping = &m_static_mappings[index];
18 } else {
19 // Allocate a page for the extra mappings.
20 if (m_mappings == nullptr) {
21 KPageBuffer* page_buffer = KPageBuffer::Allocate(kernel);
22 R_UNLESS(page_buffer != nullptr, ResultOutOfMemory);
23
24 m_mappings = reinterpret_cast<Mapping*>(page_buffer);
25 }
26
27 mapping = &m_mappings[index - NumStaticMappings];
28 }
29
30 // Set the mapping.
31 mapping->Set(client, server, size, state);
32
33 return ResultSuccess;
34}
35
36Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size,
37 KMemoryState state) {
38 ASSERT(m_num_recv == 0);
39 ASSERT(m_num_exch == 0);
40 return this->PushMap(client, server, size, state, m_num_send++);
41}
42
43Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size,
44 KMemoryState state) {
45 ASSERT(m_num_exch == 0);
46 return this->PushMap(client, server, size, state, m_num_send + m_num_recv++);
47}
48
49Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size,
50 KMemoryState state) {
51 return this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++);
52}
53
54void KSessionRequest::SessionMappings::Finalize() {
55 if (m_mappings) {
56 KPageBuffer::Free(kernel, reinterpret_cast<KPageBuffer*>(m_mappings));
57 m_mappings = nullptr;
58 }
59}
60
61} // namespace Kernel
diff --git a/src/core/hle/kernel/k_session_request.h b/src/core/hle/kernel/k_session_request.h
new file mode 100644
index 000000000..e5558bc2c
--- /dev/null
+++ b/src/core/hle/kernel/k_session_request.h
@@ -0,0 +1,306 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <array>
7
8#include "core/hle/kernel/k_auto_object.h"
9#include "core/hle/kernel/k_event.h"
10#include "core/hle/kernel/k_memory_block.h"
11#include "core/hle/kernel/k_process.h"
12#include "core/hle/kernel/k_thread.h"
13#include "core/hle/kernel/slab_helpers.h"
14
15namespace Kernel {
16
17class KSessionRequest final : public KSlabAllocated<KSessionRequest>,
18 public KAutoObject,
19 public boost::intrusive::list_base_hook<> {
20 KERNEL_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject);
21
22public:
23 class SessionMappings {
24 private:
25 static constexpr size_t NumStaticMappings = 8;
26
27 class Mapping {
28 public:
29 constexpr void Set(VAddr c, VAddr s, size_t sz, KMemoryState st) {
30 m_client_address = c;
31 m_server_address = s;
32 m_size = sz;
33 m_state = st;
34 }
35
36 constexpr VAddr GetClientAddress() const {
37 return m_client_address;
38 }
39 constexpr VAddr GetServerAddress() const {
40 return m_server_address;
41 }
42 constexpr size_t GetSize() const {
43 return m_size;
44 }
45 constexpr KMemoryState GetMemoryState() const {
46 return m_state;
47 }
48
49 private:
50 VAddr m_client_address;
51 VAddr m_server_address;
52 size_t m_size;
53 KMemoryState m_state;
54 };
55
56 public:
57 explicit SessionMappings(KernelCore& kernel_) : kernel(kernel_) {}
58
59 void Initialize() {}
60 void Finalize();
61
62 size_t GetSendCount() const {
63 return m_num_send;
64 }
65 size_t GetReceiveCount() const {
66 return m_num_recv;
67 }
68 size_t GetExchangeCount() const {
69 return m_num_exch;
70 }
71
72 Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state);
73 Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state);
74 Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state);
75
76 VAddr GetSendClientAddress(size_t i) const {
77 return GetSendMapping(i).GetClientAddress();
78 }
79 VAddr GetSendServerAddress(size_t i) const {
80 return GetSendMapping(i).GetServerAddress();
81 }
82 size_t GetSendSize(size_t i) const {
83 return GetSendMapping(i).GetSize();
84 }
85 KMemoryState GetSendMemoryState(size_t i) const {
86 return GetSendMapping(i).GetMemoryState();
87 }
88
89 VAddr GetReceiveClientAddress(size_t i) const {
90 return GetReceiveMapping(i).GetClientAddress();
91 }
92 VAddr GetReceiveServerAddress(size_t i) const {
93 return GetReceiveMapping(i).GetServerAddress();
94 }
95 size_t GetReceiveSize(size_t i) const {
96 return GetReceiveMapping(i).GetSize();
97 }
98 KMemoryState GetReceiveMemoryState(size_t i) const {
99 return GetReceiveMapping(i).GetMemoryState();
100 }
101
102 VAddr GetExchangeClientAddress(size_t i) const {
103 return GetExchangeMapping(i).GetClientAddress();
104 }
105 VAddr GetExchangeServerAddress(size_t i) const {
106 return GetExchangeMapping(i).GetServerAddress();
107 }
108 size_t GetExchangeSize(size_t i) const {
109 return GetExchangeMapping(i).GetSize();
110 }
111 KMemoryState GetExchangeMemoryState(size_t i) const {
112 return GetExchangeMapping(i).GetMemoryState();
113 }
114
115 private:
116 Result PushMap(VAddr client, VAddr server, size_t size, KMemoryState state, size_t index);
117
118 const Mapping& GetSendMapping(size_t i) const {
119 ASSERT(i < m_num_send);
120
121 const size_t index = i;
122 if (index < NumStaticMappings) {
123 return m_static_mappings[index];
124 } else {
125 return m_mappings[index - NumStaticMappings];
126 }
127 }
128
129 const Mapping& GetReceiveMapping(size_t i) const {
130 ASSERT(i < m_num_recv);
131
132 const size_t index = m_num_send + i;
133 if (index < NumStaticMappings) {
134 return m_static_mappings[index];
135 } else {
136 return m_mappings[index - NumStaticMappings];
137 }
138 }
139
140 const Mapping& GetExchangeMapping(size_t i) const {
141 ASSERT(i < m_num_exch);
142
143 const size_t index = m_num_send + m_num_recv + i;
144 if (index < NumStaticMappings) {
145 return m_static_mappings[index];
146 } else {
147 return m_mappings[index - NumStaticMappings];
148 }
149 }
150
151 private:
152 KernelCore& kernel;
153 std::array<Mapping, NumStaticMappings> m_static_mappings;
154 Mapping* m_mappings{};
155 u8 m_num_send{};
156 u8 m_num_recv{};
157 u8 m_num_exch{};
158 };
159
160public:
161 explicit KSessionRequest(KernelCore& kernel_) : KAutoObject(kernel_), m_mappings(kernel_) {}
162
163 static KSessionRequest* Create(KernelCore& kernel) {
164 KSessionRequest* req = KSessionRequest::Allocate(kernel);
165 if (req != nullptr) [[likely]] {
166 KAutoObject::Create(req);
167 }
168 return req;
169 }
170
171 void Destroy() override {
172 this->Finalize();
173 KSessionRequest::Free(kernel, this);
174 }
175
176 void Initialize(KEvent* event, uintptr_t address, size_t size) {
177 m_mappings.Initialize();
178
179 m_thread = GetCurrentThreadPointer(kernel);
180 m_event = event;
181 m_address = address;
182 m_size = size;
183
184 m_thread->Open();
185 if (m_event != nullptr) {
186 m_event->Open();
187 }
188 }
189
190 static void PostDestroy(uintptr_t arg) {}
191
192 KThread* GetThread() const {
193 return m_thread;
194 }
195 KEvent* GetEvent() const {
196 return m_event;
197 }
198 uintptr_t GetAddress() const {
199 return m_address;
200 }
201 size_t GetSize() const {
202 return m_size;
203 }
204 KProcess* GetServerProcess() const {
205 return m_server;
206 }
207
208 void SetServerProcess(KProcess* process) {
209 m_server = process;
210 m_server->Open();
211 }
212
213 void ClearThread() {
214 m_thread = nullptr;
215 }
216 void ClearEvent() {
217 m_event = nullptr;
218 }
219
220 size_t GetSendCount() const {
221 return m_mappings.GetSendCount();
222 }
223 size_t GetReceiveCount() const {
224 return m_mappings.GetReceiveCount();
225 }
226 size_t GetExchangeCount() const {
227 return m_mappings.GetExchangeCount();
228 }
229
230 Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state) {
231 return m_mappings.PushSend(client, server, size, state);
232 }
233
234 Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state) {
235 return m_mappings.PushReceive(client, server, size, state);
236 }
237
238 Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state) {
239 return m_mappings.PushExchange(client, server, size, state);
240 }
241
242 VAddr GetSendClientAddress(size_t i) const {
243 return m_mappings.GetSendClientAddress(i);
244 }
245 VAddr GetSendServerAddress(size_t i) const {
246 return m_mappings.GetSendServerAddress(i);
247 }
248 size_t GetSendSize(size_t i) const {
249 return m_mappings.GetSendSize(i);
250 }
251 KMemoryState GetSendMemoryState(size_t i) const {
252 return m_mappings.GetSendMemoryState(i);
253 }
254
255 VAddr GetReceiveClientAddress(size_t i) const {
256 return m_mappings.GetReceiveClientAddress(i);
257 }
258 VAddr GetReceiveServerAddress(size_t i) const {
259 return m_mappings.GetReceiveServerAddress(i);
260 }
261 size_t GetReceiveSize(size_t i) const {
262 return m_mappings.GetReceiveSize(i);
263 }
264 KMemoryState GetReceiveMemoryState(size_t i) const {
265 return m_mappings.GetReceiveMemoryState(i);
266 }
267
268 VAddr GetExchangeClientAddress(size_t i) const {
269 return m_mappings.GetExchangeClientAddress(i);
270 }
271 VAddr GetExchangeServerAddress(size_t i) const {
272 return m_mappings.GetExchangeServerAddress(i);
273 }
274 size_t GetExchangeSize(size_t i) const {
275 return m_mappings.GetExchangeSize(i);
276 }
277 KMemoryState GetExchangeMemoryState(size_t i) const {
278 return m_mappings.GetExchangeMemoryState(i);
279 }
280
281private:
282 // NOTE: This is public and virtual in Nintendo's kernel.
283 void Finalize() override {
284 m_mappings.Finalize();
285
286 if (m_thread) {
287 m_thread->Close();
288 }
289 if (m_event) {
290 m_event->Close();
291 }
292 if (m_server) {
293 m_server->Close();
294 }
295 }
296
297private:
298 SessionMappings m_mappings;
299 KThread* m_thread{};
300 KProcess* m_server{};
301 KEvent* m_event{};
302 uintptr_t m_address{};
303 size_t m_size{};
304};
305
306} // namespace Kernel
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 8ff1545b6..a039cc591 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -50,7 +50,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
50 is_initialized = true; 50 is_initialized = true;
51 51
52 // Clear all pages in the memory. 52 // Clear all pages in the memory.
53 std::memset(device_memory_.GetPointer(physical_address_), 0, size_); 53 std::memset(device_memory_.GetPointer<void>(physical_address_), 0, size_);
54 54
55 return ResultSuccess; 55 return ResultSuccess;
56} 56}
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h
index 34cb98456..5620c3660 100644
--- a/src/core/hle/kernel/k_shared_memory.h
+++ b/src/core/hle/kernel/k_shared_memory.h
@@ -54,7 +54,7 @@ public:
54 * @return A pointer to the shared memory block from the specified offset 54 * @return A pointer to the shared memory block from the specified offset
55 */ 55 */
56 u8* GetPointer(std::size_t offset = 0) { 56 u8* GetPointer(std::size_t offset = 0) {
57 return device_memory->GetPointer(physical_address + offset); 57 return device_memory->GetPointer<u8>(physical_address + offset);
58 } 58 }
59 59
60 /** 60 /**
@@ -63,7 +63,7 @@ public:
63 * @return A pointer to the shared memory block from the specified offset 63 * @return A pointer to the shared memory block from the specified offset
64 */ 64 */
65 const u8* GetPointer(std::size_t offset = 0) const { 65 const u8* GetPointer(std::size_t offset = 0) const {
66 return device_memory->GetPointer(physical_address + offset); 66 return device_memory->GetPointer<u8>(physical_address + offset);
67 } 67 }
68 68
69 void Finalize() override; 69 void Finalize() override;
diff --git a/src/core/hle/kernel/k_shared_memory_info.h b/src/core/hle/kernel/k_shared_memory_info.h
index e43db8515..2bb6b6d08 100644
--- a/src/core/hle/kernel/k_shared_memory_info.h
+++ b/src/core/hle/kernel/k_shared_memory_info.h
@@ -15,7 +15,8 @@ class KSharedMemoryInfo final : public KSlabAllocated<KSharedMemoryInfo>,
15 public boost::intrusive::list_base_hook<> { 15 public boost::intrusive::list_base_hook<> {
16 16
17public: 17public:
18 explicit KSharedMemoryInfo() = default; 18 explicit KSharedMemoryInfo(KernelCore&) {}
19 KSharedMemoryInfo() = default;
19 20
20 constexpr void Initialize(KSharedMemory* shmem) { 21 constexpr void Initialize(KSharedMemory* shmem) {
21 shared_memory = shmem; 22 shared_memory = shmem;
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h
index 2b303537e..a8c77a7d4 100644
--- a/src/core/hle/kernel/k_slab_heap.h
+++ b/src/core/hle/kernel/k_slab_heap.h
@@ -8,6 +8,7 @@
8#include "common/assert.h" 8#include "common/assert.h"
9#include "common/common_funcs.h" 9#include "common/common_funcs.h"
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "common/spin_lock.h"
11 12
12namespace Kernel { 13namespace Kernel {
13 14
@@ -36,28 +37,34 @@ public:
36 } 37 }
37 38
38 void* Allocate() { 39 void* Allocate() {
39 Node* ret = m_head.load(); 40 // KScopedInterruptDisable di;
40 41
41 do { 42 m_lock.lock();
42 if (ret == nullptr) { 43
43 break; 44 Node* ret = m_head;
44 } 45 if (ret != nullptr) [[likely]] {
45 } while (!m_head.compare_exchange_weak(ret, ret->next)); 46 m_head = ret->next;
47 }
46 48
49 m_lock.unlock();
47 return ret; 50 return ret;
48 } 51 }
49 52
50 void Free(void* obj) { 53 void Free(void* obj) {
54 // KScopedInterruptDisable di;
55
56 m_lock.lock();
57
51 Node* node = static_cast<Node*>(obj); 58 Node* node = static_cast<Node*>(obj);
59 node->next = m_head;
60 m_head = node;
52 61
53 Node* cur_head = m_head.load(); 62 m_lock.unlock();
54 do {
55 node->next = cur_head;
56 } while (!m_head.compare_exchange_weak(cur_head, node));
57 } 63 }
58 64
59private: 65private:
60 std::atomic<Node*> m_head{}; 66 std::atomic<Node*> m_head{};
67 Common::SpinLock m_lock;
61}; 68};
62 69
63} // namespace impl 70} // namespace impl
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 174afc80d..b7bfcdce3 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -30,6 +30,7 @@
30#include "core/hle/kernel/k_worker_task_manager.h" 30#include "core/hle/kernel/k_worker_task_manager.h"
31#include "core/hle/kernel/kernel.h" 31#include "core/hle/kernel/kernel.h"
32#include "core/hle/kernel/svc_results.h" 32#include "core/hle/kernel/svc_results.h"
33#include "core/hle/kernel/svc_types.h"
33#include "core/hle/result.h" 34#include "core/hle/result.h"
34#include "core/memory.h" 35#include "core/memory.h"
35 36
@@ -38,6 +39,9 @@
38#endif 39#endif
39 40
40namespace { 41namespace {
42
43constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1;
44
41static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, 45static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
42 u32 entry_point, u32 arg) { 46 u32 entry_point, u32 arg) {
43 context = {}; 47 context = {};
@@ -241,7 +245,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
241 } 245 }
242 } 246 }
243 247
244 return ResultSuccess; 248 R_SUCCEED();
245} 249}
246 250
247Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, 251Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
@@ -254,7 +258,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_
254 thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func)); 258 thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func));
255 thread->is_single_core = !Settings::values.use_multi_core.GetValue(); 259 thread->is_single_core = !Settings::values.use_multi_core.GetValue();
256 260
257 return ResultSuccess; 261 R_SUCCEED();
258} 262}
259 263
260Result KThread::InitializeDummyThread(KThread* thread) { 264Result KThread::InitializeDummyThread(KThread* thread) {
@@ -264,31 +268,32 @@ Result KThread::InitializeDummyThread(KThread* thread) {
264 // Initialize emulation parameters. 268 // Initialize emulation parameters.
265 thread->stack_parameters.disable_count = 0; 269 thread->stack_parameters.disable_count = 0;
266 270
267 return ResultSuccess; 271 R_SUCCEED();
268} 272}
269 273
270Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) { 274Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) {
271 return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, 275 R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
272 system.GetCpuManager().GetGuestActivateFunc()); 276 ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc()));
273} 277}
274 278
275Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { 279Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
276 return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, 280 R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
277 system.GetCpuManager().GetIdleThreadStartFunc()); 281 ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc()));
278} 282}
279 283
280Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, 284Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
281 KThreadFunction func, uintptr_t arg, s32 virt_core) { 285 KThreadFunction func, uintptr_t arg, s32 virt_core) {
282 return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority, 286 R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr,
283 system.GetCpuManager().GetShutdownThreadStartFunc()); 287 ThreadType::HighPriority,
288 system.GetCpuManager().GetShutdownThreadStartFunc()));
284} 289}
285 290
286Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, 291Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
287 uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, 292 uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
288 KProcess* owner) { 293 KProcess* owner) {
289 system.Kernel().GlobalSchedulerContext().AddThread(thread); 294 system.Kernel().GlobalSchedulerContext().AddThread(thread);
290 return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, 295 R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
291 ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()); 296 ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
292} 297}
293 298
294void KThread::PostDestroy(uintptr_t arg) { 299void KThread::PostDestroy(uintptr_t arg) {
@@ -538,7 +543,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
538 *out_ideal_core = virtual_ideal_core_id; 543 *out_ideal_core = virtual_ideal_core_id;
539 *out_affinity_mask = virtual_affinity_mask; 544 *out_affinity_mask = virtual_affinity_mask;
540 545
541 return ResultSuccess; 546 R_SUCCEED();
542} 547}
543 548
544Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { 549Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
@@ -554,7 +559,7 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask)
554 *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask(); 559 *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
555 } 560 }
556 561
557 return ResultSuccess; 562 R_SUCCEED();
558} 563}
559 564
560Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { 565Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
@@ -666,7 +671,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
666 } while (retry_update); 671 } while (retry_update);
667 } 672 }
668 673
669 return ResultSuccess; 674 R_SUCCEED();
670} 675}
671 676
672void KThread::SetBasePriority(s32 value) { 677void KThread::SetBasePriority(s32 value) {
@@ -839,7 +844,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
839 } while (thread_is_current); 844 } while (thread_is_current);
840 } 845 }
841 846
842 return ResultSuccess; 847 R_SUCCEED();
843} 848}
844 849
845Result KThread::GetThreadContext3(std::vector<u8>& out) { 850Result KThread::GetThreadContext3(std::vector<u8>& out) {
@@ -874,7 +879,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
874 } 879 }
875 } 880 }
876 881
877 return ResultSuccess; 882 R_SUCCEED();
878} 883}
879 884
880void KThread::AddWaiterImpl(KThread* thread) { 885void KThread::AddWaiterImpl(KThread* thread) {
@@ -1038,7 +1043,7 @@ Result KThread::Run() {
1038 // Set our state and finish. 1043 // Set our state and finish.
1039 SetState(ThreadState::Runnable); 1044 SetState(ThreadState::Runnable);
1040 1045
1041 return ResultSuccess; 1046 R_SUCCEED();
1042 } 1047 }
1043} 1048}
1044 1049
@@ -1073,6 +1078,78 @@ void KThread::Exit() {
1073 UNREACHABLE_MSG("KThread::Exit() would return"); 1078 UNREACHABLE_MSG("KThread::Exit() would return");
1074} 1079}
1075 1080
1081Result KThread::Terminate() {
1082 ASSERT(this != GetCurrentThreadPointer(kernel));
1083
1084 // Request the thread terminate if it hasn't already.
1085 if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) {
1086 // If the thread isn't terminated, wait for it to terminate.
1087 s32 index;
1088 KSynchronizationObject* objects[] = {this};
1089 R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1,
1090 Svc::WaitInfinite));
1091 }
1092
1093 R_SUCCEED();
1094}
1095
1096ThreadState KThread::RequestTerminate() {
1097 ASSERT(this != GetCurrentThreadPointer(kernel));
1098
1099 KScopedSchedulerLock sl{kernel};
1100
1101 // Determine if this is the first termination request.
1102 const bool first_request = [&]() -> bool {
1103 // Perform an atomic compare-and-swap from false to true.
1104 bool expected = false;
1105 return termination_requested.compare_exchange_strong(expected, true);
1106 }();
1107
1108 // If this is the first request, start termination procedure.
1109 if (first_request) {
1110 // If the thread is in initialized state, just change state to terminated.
1111 if (this->GetState() == ThreadState::Initialized) {
1112 thread_state = ThreadState::Terminated;
1113 return ThreadState::Terminated;
1114 }
1115
1116 // Register the terminating dpc.
1117 this->RegisterDpc(DpcFlag::Terminating);
1118
1119 // If the thread is pinned, unpin it.
1120 if (this->GetStackParameters().is_pinned) {
1121 this->GetOwnerProcess()->UnpinThread(this);
1122 }
1123
1124 // If the thread is suspended, continue it.
1125 if (this->IsSuspended()) {
1126 suspend_allowed_flags = 0;
1127 this->UpdateState();
1128 }
1129
1130 // Change the thread's priority to be higher than any system thread's.
1131 if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) {
1132 this->SetBasePriority(TerminatingThreadPriority);
1133 }
1134
1135 // If the thread is runnable, send a termination interrupt to other cores.
1136 if (this->GetState() == ThreadState::Runnable) {
1137 if (const u64 core_mask =
1138 physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel));
1139 core_mask != 0) {
1140 Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask);
1141 }
1142 }
1143
1144 // Wake up the thread.
1145 if (this->GetState() == ThreadState::Waiting) {
1146 wait_queue->CancelWait(this, ResultTerminationRequested, true);
1147 }
1148 }
1149
1150 return this->GetState();
1151}
1152
1076Result KThread::Sleep(s64 timeout) { 1153Result KThread::Sleep(s64 timeout) {
1077 ASSERT(!kernel.GlobalSchedulerContext().IsLocked()); 1154 ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
1078 ASSERT(this == GetCurrentThreadPointer(kernel)); 1155 ASSERT(this == GetCurrentThreadPointer(kernel));
@@ -1086,7 +1163,7 @@ Result KThread::Sleep(s64 timeout) {
1086 // Check if the thread should terminate. 1163 // Check if the thread should terminate.
1087 if (this->IsTerminationRequested()) { 1164 if (this->IsTerminationRequested()) {
1088 slp.CancelSleep(); 1165 slp.CancelSleep();
1089 return ResultTerminationRequested; 1166 R_THROW(ResultTerminationRequested);
1090 } 1167 }
1091 1168
1092 // Wait for the sleep to end. 1169 // Wait for the sleep to end.
@@ -1094,7 +1171,7 @@ Result KThread::Sleep(s64 timeout) {
1094 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); 1171 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
1095 } 1172 }
1096 1173
1097 return ResultSuccess; 1174 R_SUCCEED();
1098} 1175}
1099 1176
1100void KThread::IfDummyThreadTryWait() { 1177void KThread::IfDummyThreadTryWait() {
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index 9ee20208e..e2a27d603 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -180,6 +180,10 @@ public:
180 180
181 void Exit(); 181 void Exit();
182 182
183 Result Terminate();
184
185 ThreadState RequestTerminate();
186
183 [[nodiscard]] u32 GetSuspendFlags() const { 187 [[nodiscard]] u32 GetSuspendFlags() const {
184 return suspend_allowed_flags & suspend_request_flags; 188 return suspend_allowed_flags & suspend_request_flags;
185 } 189 }
diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h
index 0a7f22680..5d466ace7 100644
--- a/src/core/hle/kernel/k_thread_local_page.h
+++ b/src/core/hle/kernel/k_thread_local_page.h
@@ -26,7 +26,7 @@ public:
26 static_assert(RegionsPerPage > 0); 26 static_assert(RegionsPerPage > 0);
27 27
28public: 28public:
29 constexpr explicit KThreadLocalPage(VAddr addr = {}) : m_virt_addr(addr) { 29 constexpr explicit KThreadLocalPage(KernelCore&, VAddr addr = {}) : m_virt_addr(addr) {
30 m_is_region_free.fill(true); 30 m_is_region_free.fill(true);
31 } 31 }
32 32
diff --git a/src/core/hle/kernel/k_writable_event.cpp b/src/core/hle/kernel/k_writable_event.cpp
deleted file mode 100644
index ff88c5acd..000000000
--- a/src/core/hle/kernel/k_writable_event.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/hle/kernel/k_event.h"
5#include "core/hle/kernel/k_readable_event.h"
6#include "core/hle/kernel/k_writable_event.h"
7
8namespace Kernel {
9
10KWritableEvent::KWritableEvent(KernelCore& kernel_)
11 : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
12
13KWritableEvent::~KWritableEvent() = default;
14
15void KWritableEvent::Initialize(KEvent* parent_event_, std::string&& name_) {
16 parent = parent_event_;
17 name = std::move(name_);
18 parent->GetReadableEvent().Open();
19}
20
21Result KWritableEvent::Signal() {
22 return parent->GetReadableEvent().Signal();
23}
24
25Result KWritableEvent::Clear() {
26 return parent->GetReadableEvent().Clear();
27}
28
29void KWritableEvent::Destroy() {
30 // Close our references.
31 parent->GetReadableEvent().Close();
32 parent->Close();
33}
34
35} // namespace Kernel
diff --git a/src/core/hle/kernel/k_writable_event.h b/src/core/hle/kernel/k_writable_event.h
deleted file mode 100644
index 3fd0c7d0a..000000000
--- a/src/core/hle/kernel/k_writable_event.h
+++ /dev/null
@@ -1,39 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/hle/kernel/k_auto_object.h"
7#include "core/hle/kernel/slab_helpers.h"
8#include "core/hle/result.h"
9
10namespace Kernel {
11
12class KernelCore;
13class KEvent;
14
15class KWritableEvent final
16 : public KAutoObjectWithSlabHeapAndContainer<KWritableEvent, KAutoObjectWithList> {
17 KERNEL_AUTOOBJECT_TRAITS(KWritableEvent, KAutoObject);
18
19public:
20 explicit KWritableEvent(KernelCore& kernel_);
21 ~KWritableEvent() override;
22
23 void Destroy() override;
24
25 static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
26
27 void Initialize(KEvent* parent_, std::string&& name_);
28 Result Signal();
29 Result Clear();
30
31 KEvent* GetParent() const {
32 return parent;
33 }
34
35private:
36 KEvent* parent{};
37};
38
39} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 9251f29ad..eed2dc9f3 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -24,6 +24,7 @@
24#include "core/hardware_properties.h" 24#include "core/hardware_properties.h"
25#include "core/hle/kernel/init/init_slab_setup.h" 25#include "core/hle/kernel/init/init_slab_setup.h"
26#include "core/hle/kernel/k_client_port.h" 26#include "core/hle/kernel/k_client_port.h"
27#include "core/hle/kernel/k_dynamic_resource_manager.h"
27#include "core/hle/kernel/k_handle_table.h" 28#include "core/hle/kernel/k_handle_table.h"
28#include "core/hle/kernel/k_memory_layout.h" 29#include "core/hle/kernel/k_memory_layout.h"
29#include "core/hle/kernel/k_memory_manager.h" 30#include "core/hle/kernel/k_memory_manager.h"
@@ -73,8 +74,16 @@ struct KernelCore::Impl {
73 InitializeMemoryLayout(); 74 InitializeMemoryLayout();
74 Init::InitializeKPageBufferSlabHeap(system); 75 Init::InitializeKPageBufferSlabHeap(system);
75 InitializeShutdownThreads(); 76 InitializeShutdownThreads();
76 InitializePreemption(kernel);
77 InitializePhysicalCores(); 77 InitializePhysicalCores();
78 InitializePreemption(kernel);
79
80 // Initialize the Dynamic Slab Heaps.
81 {
82 const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion();
83 ASSERT(pt_heap_region.GetEndAddress() != 0);
84
85 InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize());
86 }
78 87
79 RegisterHostThread(); 88 RegisterHostThread();
80 } 89 }
@@ -86,6 +95,15 @@ struct KernelCore::Impl {
86 } 95 }
87 } 96 }
88 97
98 void CloseCurrentProcess() {
99 (*current_process).Finalize();
100 // current_process->Close();
101 // TODO: The current process should be destroyed based on accurate ref counting after
102 // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
103 (*current_process).Destroy();
104 current_process = nullptr;
105 }
106
89 void Shutdown() { 107 void Shutdown() {
90 is_shutting_down.store(true, std::memory_order_relaxed); 108 is_shutting_down.store(true, std::memory_order_relaxed);
91 SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); }); 109 SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
@@ -99,10 +117,6 @@ struct KernelCore::Impl {
99 next_user_process_id = KProcess::ProcessIDMin; 117 next_user_process_id = KProcess::ProcessIDMin;
100 next_thread_id = 1; 118 next_thread_id = 1;
101 119
102 for (auto& core : cores) {
103 core = nullptr;
104 }
105
106 global_handle_table->Finalize(); 120 global_handle_table->Finalize();
107 global_handle_table.reset(); 121 global_handle_table.reset();
108 122
@@ -152,15 +166,7 @@ struct KernelCore::Impl {
152 } 166 }
153 } 167 }
154 168
155 // Shutdown all processes. 169 CloseCurrentProcess();
156 if (current_process) {
157 (*current_process).Finalize();
158 // current_process->Close();
159 // TODO: The current process should be destroyed based on accurate ref counting after
160 // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
161 (*current_process).Destroy();
162 current_process = nullptr;
163 }
164 170
165 // Track kernel objects that were not freed on shutdown 171 // Track kernel objects that were not freed on shutdown
166 { 172 {
@@ -257,6 +263,18 @@ struct KernelCore::Impl {
257 system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); 263 system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
258 } 264 }
259 265
266 void InitializeResourceManagers(VAddr address, size_t size) {
267 dynamic_page_manager = std::make_unique<KDynamicPageManager>();
268 memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>();
269 app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>();
270
271 dynamic_page_manager->Initialize(address, size);
272 static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
273 memory_block_heap->Initialize(dynamic_page_manager.get(),
274 ApplicationMemoryBlockSlabHeapSize);
275 app_memory_block_manager->Initialize(nullptr, memory_block_heap.get());
276 }
277
260 void InitializeShutdownThreads() { 278 void InitializeShutdownThreads() {
261 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 279 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
262 shutdown_threads[core_id] = KThread::Create(system.Kernel()); 280 shutdown_threads[core_id] = KThread::Create(system.Kernel());
@@ -344,11 +362,6 @@ struct KernelCore::Impl {
344 static inline thread_local KThread* current_thread{nullptr}; 362 static inline thread_local KThread* current_thread{nullptr};
345 363
346 KThread* GetCurrentEmuThread() { 364 KThread* GetCurrentEmuThread() {
347 // If we are shutting down the kernel, none of this is relevant anymore.
348 if (IsShuttingDown()) {
349 return {};
350 }
351
352 const auto thread_id = GetCurrentHostThreadID(); 365 const auto thread_id = GetCurrentHostThreadID();
353 if (thread_id >= Core::Hardware::NUM_CPU_CORES) { 366 if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
354 return GetHostDummyThread(); 367 return GetHostDummyThread();
@@ -770,6 +783,11 @@ struct KernelCore::Impl {
770 // Kernel memory management 783 // Kernel memory management
771 std::unique_ptr<KMemoryManager> memory_manager; 784 std::unique_ptr<KMemoryManager> memory_manager;
772 785
786 // Dynamic slab managers
787 std::unique_ptr<KDynamicPageManager> dynamic_page_manager;
788 std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap;
789 std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager;
790
773 // Shared memory for services 791 // Shared memory for services
774 Kernel::KSharedMemory* hid_shared_mem{}; 792 Kernel::KSharedMemory* hid_shared_mem{};
775 Kernel::KSharedMemory* font_shared_mem{}; 793 Kernel::KSharedMemory* font_shared_mem{};
@@ -853,6 +871,10 @@ const KProcess* KernelCore::CurrentProcess() const {
853 return impl->current_process; 871 return impl->current_process;
854} 872}
855 873
874void KernelCore::CloseCurrentProcess() {
875 impl->CloseCurrentProcess();
876}
877
856const std::vector<KProcess*>& KernelCore::GetProcessList() const { 878const std::vector<KProcess*>& KernelCore::GetProcessList() const {
857 return impl->process_list; 879 return impl->process_list;
858} 880}
@@ -1041,6 +1063,14 @@ const KMemoryManager& KernelCore::MemoryManager() const {
1041 return *impl->memory_manager; 1063 return *impl->memory_manager;
1042} 1064}
1043 1065
1066KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() {
1067 return *impl->app_memory_block_manager;
1068}
1069
1070const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const {
1071 return *impl->app_memory_block_manager;
1072}
1073
1044Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { 1074Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
1045 return *impl->hid_shared_mem; 1075 return *impl->hid_shared_mem;
1046} 1076}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index bcf016a97..266be2bc4 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -37,6 +37,7 @@ class KClientSession;
37class KEvent; 37class KEvent;
38class KHandleTable; 38class KHandleTable;
39class KLinkedListNode; 39class KLinkedListNode;
40class KMemoryBlockSlabManager;
40class KMemoryLayout; 41class KMemoryLayout;
41class KMemoryManager; 42class KMemoryManager;
42class KPageBuffer; 43class KPageBuffer;
@@ -46,13 +47,13 @@ class KResourceLimit;
46class KScheduler; 47class KScheduler;
47class KServerSession; 48class KServerSession;
48class KSession; 49class KSession;
50class KSessionRequest;
49class KSharedMemory; 51class KSharedMemory;
50class KSharedMemoryInfo; 52class KSharedMemoryInfo;
51class KThread; 53class KThread;
52class KThreadLocalPage; 54class KThreadLocalPage;
53class KTransferMemory; 55class KTransferMemory;
54class KWorkerTaskManager; 56class KWorkerTaskManager;
55class KWritableEvent;
56class KCodeMemory; 57class KCodeMemory;
57class PhysicalCore; 58class PhysicalCore;
58class ServiceThread; 59class ServiceThread;
@@ -131,6 +132,9 @@ public:
131 /// Retrieves a const pointer to the current process. 132 /// Retrieves a const pointer to the current process.
132 const KProcess* CurrentProcess() const; 133 const KProcess* CurrentProcess() const;
133 134
135 /// Closes the current process.
136 void CloseCurrentProcess();
137
134 /// Retrieves the list of processes. 138 /// Retrieves the list of processes.
135 const std::vector<KProcess*>& GetProcessList() const; 139 const std::vector<KProcess*>& GetProcessList() const;
136 140
@@ -239,6 +243,12 @@ public:
239 /// Gets the virtual memory manager for the kernel. 243 /// Gets the virtual memory manager for the kernel.
240 const KMemoryManager& MemoryManager() const; 244 const KMemoryManager& MemoryManager() const;
241 245
246 /// Gets the application memory block manager for the kernel.
247 KMemoryBlockSlabManager& GetApplicationMemoryBlockManager();
248
249 /// Gets the application memory block manager for the kernel.
250 const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const;
251
242 /// Gets the shared memory object for HID services. 252 /// Gets the shared memory object for HID services.
243 Kernel::KSharedMemory& GetHidSharedMem(); 253 Kernel::KSharedMemory& GetHidSharedMem();
244 254
@@ -345,14 +355,14 @@ public:
345 return slab_heap_container->thread; 355 return slab_heap_container->thread;
346 } else if constexpr (std::is_same_v<T, KTransferMemory>) { 356 } else if constexpr (std::is_same_v<T, KTransferMemory>) {
347 return slab_heap_container->transfer_memory; 357 return slab_heap_container->transfer_memory;
348 } else if constexpr (std::is_same_v<T, KWritableEvent>) {
349 return slab_heap_container->writeable_event;
350 } else if constexpr (std::is_same_v<T, KCodeMemory>) { 358 } else if constexpr (std::is_same_v<T, KCodeMemory>) {
351 return slab_heap_container->code_memory; 359 return slab_heap_container->code_memory;
352 } else if constexpr (std::is_same_v<T, KPageBuffer>) { 360 } else if constexpr (std::is_same_v<T, KPageBuffer>) {
353 return slab_heap_container->page_buffer; 361 return slab_heap_container->page_buffer;
354 } else if constexpr (std::is_same_v<T, KThreadLocalPage>) { 362 } else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
355 return slab_heap_container->thread_local_page; 363 return slab_heap_container->thread_local_page;
364 } else if constexpr (std::is_same_v<T, KSessionRequest>) {
365 return slab_heap_container->session_request;
356 } 366 }
357 } 367 }
358 368
@@ -412,10 +422,10 @@ private:
412 KSlabHeap<KSharedMemoryInfo> shared_memory_info; 422 KSlabHeap<KSharedMemoryInfo> shared_memory_info;
413 KSlabHeap<KThread> thread; 423 KSlabHeap<KThread> thread;
414 KSlabHeap<KTransferMemory> transfer_memory; 424 KSlabHeap<KTransferMemory> transfer_memory;
415 KSlabHeap<KWritableEvent> writeable_event;
416 KSlabHeap<KCodeMemory> code_memory; 425 KSlabHeap<KCodeMemory> code_memory;
417 KSlabHeap<KPageBuffer> page_buffer; 426 KSlabHeap<KPageBuffer> page_buffer;
418 KSlabHeap<KThreadLocalPage> thread_local_page; 427 KSlabHeap<KThreadLocalPage> thread_local_page;
428 KSlabHeap<KSessionRequest> session_request;
419 }; 429 };
420 430
421 std::unique_ptr<SlabHeapContainer> slab_heap_container; 431 std::unique_ptr<SlabHeapContainer> slab_heap_container;
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h
index 299a981a8..06b51e919 100644
--- a/src/core/hle/kernel/slab_helpers.h
+++ b/src/core/hle/kernel/slab_helpers.h
@@ -24,7 +24,7 @@ public:
24 } 24 }
25 25
26 static Derived* Allocate(KernelCore& kernel) { 26 static Derived* Allocate(KernelCore& kernel) {
27 return kernel.SlabHeap<Derived>().Allocate(); 27 return kernel.SlabHeap<Derived>().Allocate(kernel);
28 } 28 }
29 29
30 static void Free(KernelCore& kernel, Derived* obj) { 30 static void Free(KernelCore& kernel, Derived* obj) {
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 27e5a805d..b07ae3f02 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -29,12 +29,12 @@
29#include "core/hle/kernel/k_resource_limit.h" 29#include "core/hle/kernel/k_resource_limit.h"
30#include "core/hle/kernel/k_scheduler.h" 30#include "core/hle/kernel/k_scheduler.h"
31#include "core/hle/kernel/k_scoped_resource_reservation.h" 31#include "core/hle/kernel/k_scoped_resource_reservation.h"
32#include "core/hle/kernel/k_session.h"
32#include "core/hle/kernel/k_shared_memory.h" 33#include "core/hle/kernel/k_shared_memory.h"
33#include "core/hle/kernel/k_synchronization_object.h" 34#include "core/hle/kernel/k_synchronization_object.h"
34#include "core/hle/kernel/k_thread.h" 35#include "core/hle/kernel/k_thread.h"
35#include "core/hle/kernel/k_thread_queue.h" 36#include "core/hle/kernel/k_thread_queue.h"
36#include "core/hle/kernel/k_transfer_memory.h" 37#include "core/hle/kernel/k_transfer_memory.h"
37#include "core/hle/kernel/k_writable_event.h"
38#include "core/hle/kernel/kernel.h" 38#include "core/hle/kernel/kernel.h"
39#include "core/hle/kernel/physical_core.h" 39#include "core/hle/kernel/physical_core.h"
40#include "core/hle/kernel/svc.h" 40#include "core/hle/kernel/svc.h"
@@ -256,6 +256,93 @@ static Result UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u3
256 return UnmapMemory(system, dst_addr, src_addr, size); 256 return UnmapMemory(system, dst_addr, src_addr, size);
257} 257}
258 258
259template <typename T>
260Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, u64 name) {
261 auto& process = *system.CurrentProcess();
262 auto& handle_table = process.GetHandleTable();
263
264 // Declare the session we're going to allocate.
265 T* session;
266
267 // Reserve a new session from the process resource limit.
268 // FIXME: LimitableResource_SessionCountMax
269 KScopedResourceReservation session_reservation(&process, LimitableResource::Sessions);
270 if (session_reservation.Succeeded()) {
271 session = T::Create(system.Kernel());
272 } else {
273 return ResultLimitReached;
274
275 // // We couldn't reserve a session. Check that we support dynamically expanding the
276 // // resource limit.
277 // R_UNLESS(process.GetResourceLimit() ==
278 // &system.Kernel().GetSystemResourceLimit(), ResultLimitReached);
279 // R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), ResultLimitReached());
280
281 // // Try to allocate a session from unused slab memory.
282 // session = T::CreateFromUnusedSlabMemory();
283 // R_UNLESS(session != nullptr, ResultLimitReached);
284 // ON_RESULT_FAILURE { session->Close(); };
285
286 // // If we're creating a KSession, we want to add two KSessionRequests to the heap, to
287 // // prevent request exhaustion.
288 // // NOTE: Nintendo checks if session->DynamicCast<KSession *>() != nullptr, but there's
289 // // no reason to not do this statically.
290 // if constexpr (std::same_as<T, KSession>) {
291 // for (size_t i = 0; i < 2; i++) {
292 // KSessionRequest* request = KSessionRequest::CreateFromUnusedSlabMemory();
293 // R_UNLESS(request != nullptr, ResultLimitReached);
294 // request->Close();
295 // }
296 // }
297
298 // We successfully allocated a session, so add the object we allocated to the resource
299 // limit.
300 // system.Kernel().GetSystemResourceLimit().Reserve(LimitableResource::Sessions, 1);
301 }
302
303 // Check that we successfully created a session.
304 R_UNLESS(session != nullptr, ResultOutOfResource);
305
306 // Initialize the session.
307 session->Initialize(nullptr, fmt::format("{}", name));
308
309 // Commit the session reservation.
310 session_reservation.Commit();
311
312 // Ensure that we clean up the session (and its only references are handle table) on function
313 // end.
314 SCOPE_EXIT({
315 session->GetClientSession().Close();
316 session->GetServerSession().Close();
317 });
318
319 // Register the session.
320 T::Register(system.Kernel(), session);
321
322 // Add the server session to the handle table.
323 R_TRY(handle_table.Add(out_server, &session->GetServerSession()));
324
325 // Add the client session to the handle table.
326 const auto result = handle_table.Add(out_client, &session->GetClientSession());
327
328 if (!R_SUCCEEDED(result)) {
329 // Ensure that we maintaing a clean handle state on exit.
330 handle_table.Remove(*out_server);
331 }
332
333 return result;
334}
335
336static Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client,
337 u32 is_light, u64 name) {
338 if (is_light) {
339 // return CreateSession<KLightSession>(system, out_server, out_client, name);
340 return ResultUnknown;
341 } else {
342 return CreateSession<KSession>(system, out_server, out_client, name);
343 }
344}
345
259/// Connect to an OS service given the port name, returns the handle to the port to out 346/// Connect to an OS service given the port name, returns the handle to the port to out
260static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) { 347static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
261 auto& memory = system.Memory(); 348 auto& memory = system.Memory();
@@ -295,7 +382,8 @@ static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_n
295 382
296 // Create a session. 383 // Create a session.
297 KClientSession* session{}; 384 KClientSession* session{};
298 R_TRY(port->CreateSession(std::addressof(session))); 385 R_TRY(port->CreateSession(std::addressof(session),
386 std::make_shared<SessionRequestManager>(kernel)));
299 port->Close(); 387 port->Close();
300 388
301 // Register the session in the table, close the extra reference. 389 // Register the session in the table, close the extra reference.
@@ -313,7 +401,7 @@ static Result ConnectToNamedPort32(Core::System& system, Handle* out_handle,
313 return ConnectToNamedPort(system, out_handle, port_name_address); 401 return ConnectToNamedPort(system, out_handle, port_name_address);
314} 402}
315 403
316/// Makes a blocking IPC call to an OS service. 404/// Makes a blocking IPC call to a service.
317static Result SendSyncRequest(Core::System& system, Handle handle) { 405static Result SendSyncRequest(Core::System& system, Handle handle) {
318 auto& kernel = system.Kernel(); 406 auto& kernel = system.Kernel();
319 407
@@ -327,22 +415,75 @@ static Result SendSyncRequest(Core::System& system, Handle handle) {
327 415
328 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); 416 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
329 417
330 { 418 return session->SendSyncRequest();
331 KScopedSchedulerLock lock(kernel);
332
333 // This is a synchronous request, so we should wait for our request to complete.
334 GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue));
335 GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
336 session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming());
337 }
338
339 return GetCurrentThread(kernel).GetWaitResult();
340} 419}
341 420
342static Result SendSyncRequest32(Core::System& system, Handle handle) { 421static Result SendSyncRequest32(Core::System& system, Handle handle) {
343 return SendSyncRequest(system, handle); 422 return SendSyncRequest(system, handle);
344} 423}
345 424
425static Result ReplyAndReceive(Core::System& system, s32* out_index, Handle* handles,
426 s32 num_handles, Handle reply_target, s64 timeout_ns) {
427 auto& kernel = system.Kernel();
428 auto& handle_table = GetCurrentThread(kernel).GetOwnerProcess()->GetHandleTable();
429
430 // Convert handle list to object table.
431 std::vector<KSynchronizationObject*> objs(num_handles);
432 R_UNLESS(
433 handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles, num_handles),
434 ResultInvalidHandle);
435
436 // Ensure handles are closed when we're done.
437 SCOPE_EXIT({
438 for (auto i = 0; i < num_handles; ++i) {
439 objs[i]->Close();
440 }
441 });
442
443 // Reply to the target, if one is specified.
444 if (reply_target != InvalidHandle) {
445 KScopedAutoObject session = handle_table.GetObject<KServerSession>(reply_target);
446 R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
447
448 // If we fail to reply, we want to set the output index to -1.
449 // ON_RESULT_FAILURE { *out_index = -1; };
450
451 // Send the reply.
452 // R_TRY(session->SendReply());
453
454 Result rc = session->SendReply();
455 if (!R_SUCCEEDED(rc)) {
456 *out_index = -1;
457 return rc;
458 }
459 }
460
461 // Wait for a message.
462 while (true) {
463 // Wait for an object.
464 s32 index;
465 Result result = KSynchronizationObject::Wait(kernel, &index, objs.data(),
466 static_cast<s32>(objs.size()), timeout_ns);
467 if (result == ResultTimedOut) {
468 return result;
469 }
470
471 // Receive the request.
472 if (R_SUCCEEDED(result)) {
473 KServerSession* session = objs[index]->DynamicCast<KServerSession*>();
474 if (session != nullptr) {
475 result = session->ReceiveRequest();
476 if (result == ResultNotFound) {
477 continue;
478 }
479 }
480 }
481
482 *out_index = index;
483 return result;
484 }
485}
486
346/// Get the ID for the specified thread. 487/// Get the ID for the specified thread.
347static Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) { 488static Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
348 // Get the thread from its handle. 489 // Get the thread from its handle.
@@ -792,7 +933,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han
792 return ResultSuccess; 933 return ResultSuccess;
793 934
794 case GetInfoType::UserExceptionContextAddr: 935 case GetInfoType::UserExceptionContextAddr:
795 *result = process->GetTLSRegionAddress(); 936 *result = process->GetProcessLocalRegionAddress();
796 return ResultSuccess; 937 return ResultSuccess;
797 938
798 case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource: 939 case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
@@ -1747,7 +1888,7 @@ static void ExitProcess(Core::System& system) {
1747 auto* current_process = system.Kernel().CurrentProcess(); 1888 auto* current_process = system.Kernel().CurrentProcess();
1748 1889
1749 LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); 1890 LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
1750 ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running, 1891 ASSERT_MSG(current_process->GetState() == KProcess::State::Running,
1751 "Process has already exited"); 1892 "Process has already exited");
1752 1893
1753 system.Exit(); 1894 system.Exit();
@@ -2303,11 +2444,11 @@ static Result SignalEvent(Core::System& system, Handle event_handle) {
2303 // Get the current handle table. 2444 // Get the current handle table.
2304 const KHandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 2445 const KHandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
2305 2446
2306 // Get the writable event. 2447 // Get the event.
2307 KScopedAutoObject writable_event = handle_table.GetObject<KWritableEvent>(event_handle); 2448 KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
2308 R_UNLESS(writable_event.IsNotNull(), ResultInvalidHandle); 2449 R_UNLESS(event.IsNotNull(), ResultInvalidHandle);
2309 2450
2310 return writable_event->Signal(); 2451 return event->Signal();
2311} 2452}
2312 2453
2313static Result SignalEvent32(Core::System& system, Handle event_handle) { 2454static Result SignalEvent32(Core::System& system, Handle event_handle) {
@@ -2322,9 +2463,9 @@ static Result ClearEvent(Core::System& system, Handle event_handle) {
2322 2463
2323 // Try to clear the writable event. 2464 // Try to clear the writable event.
2324 { 2465 {
2325 KScopedAutoObject writable_event = handle_table.GetObject<KWritableEvent>(event_handle); 2466 KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
2326 if (writable_event.IsNotNull()) { 2467 if (event.IsNotNull()) {
2327 return writable_event->Clear(); 2468 return event->Clear();
2328 } 2469 }
2329 } 2470 }
2330 2471
@@ -2362,24 +2503,24 @@ static Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_r
2362 R_UNLESS(event != nullptr, ResultOutOfResource); 2503 R_UNLESS(event != nullptr, ResultOutOfResource);
2363 2504
2364 // Initialize the event. 2505 // Initialize the event.
2365 event->Initialize("CreateEvent", kernel.CurrentProcess()); 2506 event->Initialize(kernel.CurrentProcess());
2366 2507
2367 // Commit the thread reservation. 2508 // Commit the thread reservation.
2368 event_reservation.Commit(); 2509 event_reservation.Commit();
2369 2510
2370 // Ensure that we clean up the event (and its only references are handle table) on function end. 2511 // Ensure that we clean up the event (and its only references are handle table) on function end.
2371 SCOPE_EXIT({ 2512 SCOPE_EXIT({
2372 event->GetWritableEvent().Close();
2373 event->GetReadableEvent().Close(); 2513 event->GetReadableEvent().Close();
2514 event->Close();
2374 }); 2515 });
2375 2516
2376 // Register the event. 2517 // Register the event.
2377 KEvent::Register(kernel, event); 2518 KEvent::Register(kernel, event);
2378 2519
2379 // Add the writable event to the handle table. 2520 // Add the event to the handle table.
2380 R_TRY(handle_table.Add(out_write, std::addressof(event->GetWritableEvent()))); 2521 R_TRY(handle_table.Add(out_write, event));
2381 2522
2382 // Add the writable event to the handle table. 2523 // Ensure that we maintaing a clean handle state on exit.
2383 auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*out_write); }); 2524 auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*out_write); });
2384 2525
2385 // Add the readable event to the handle table. 2526 // Add the readable event to the handle table.
@@ -2416,7 +2557,7 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand
2416 return ResultInvalidEnumValue; 2557 return ResultInvalidEnumValue;
2417 } 2558 }
2418 2559
2419 *out = static_cast<u64>(process->GetStatus()); 2560 *out = static_cast<u64>(process->GetState());
2420 return ResultSuccess; 2561 return ResultSuccess;
2421} 2562}
2422 2563
@@ -2860,10 +3001,10 @@ static const FunctionDef SVC_Table_64[] = {
2860 {0x3D, SvcWrap64<ChangeKernelTraceState>, "ChangeKernelTraceState"}, 3001 {0x3D, SvcWrap64<ChangeKernelTraceState>, "ChangeKernelTraceState"},
2861 {0x3E, nullptr, "Unknown3e"}, 3002 {0x3E, nullptr, "Unknown3e"},
2862 {0x3F, nullptr, "Unknown3f"}, 3003 {0x3F, nullptr, "Unknown3f"},
2863 {0x40, nullptr, "CreateSession"}, 3004 {0x40, SvcWrap64<CreateSession>, "CreateSession"},
2864 {0x41, nullptr, "AcceptSession"}, 3005 {0x41, nullptr, "AcceptSession"},
2865 {0x42, nullptr, "ReplyAndReceiveLight"}, 3006 {0x42, nullptr, "ReplyAndReceiveLight"},
2866 {0x43, nullptr, "ReplyAndReceive"}, 3007 {0x43, SvcWrap64<ReplyAndReceive>, "ReplyAndReceive"},
2867 {0x44, nullptr, "ReplyAndReceiveWithUserBuffer"}, 3008 {0x44, nullptr, "ReplyAndReceiveWithUserBuffer"},
2868 {0x45, SvcWrap64<CreateEvent>, "CreateEvent"}, 3009 {0x45, SvcWrap64<CreateEvent>, "CreateEvent"},
2869 {0x46, nullptr, "MapIoRegion"}, 3010 {0x46, nullptr, "MapIoRegion"},
diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h
index 95750c3eb..85506710e 100644
--- a/src/core/hle/kernel/svc_common.h
+++ b/src/core/hle/kernel/svc_common.h
@@ -14,8 +14,11 @@ namespace Kernel::Svc {
14 14
15using namespace Common::Literals; 15using namespace Common::Literals;
16 16
17constexpr s32 ArgumentHandleCountMax = 0x40; 17constexpr inline s32 ArgumentHandleCountMax = 0x40;
18constexpr u32 HandleWaitMask{1u << 30}; 18
19constexpr inline u32 HandleWaitMask = 1u << 30;
20
21constexpr inline s64 WaitInfinite = -1;
19 22
20constexpr inline std::size_t HeapSizeAlignment = 2_MiB; 23constexpr inline std::size_t HeapSizeAlignment = 2_MiB;
21 24
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 79e15183a..abb9847fe 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -95,6 +95,19 @@ constexpr inline s32 IdealCoreNoUpdate = -3;
95constexpr inline s32 LowestThreadPriority = 63; 95constexpr inline s32 LowestThreadPriority = 63;
96constexpr inline s32 HighestThreadPriority = 0; 96constexpr inline s32 HighestThreadPriority = 0;
97 97
98constexpr inline s32 SystemThreadPriorityHighest = 16;
99
100enum class ProcessState : u32 {
101 Created = 0,
102 CreatedAttached = 1,
103 Running = 2,
104 Crashed = 3,
105 RunningAttached = 4,
106 Terminating = 5,
107 Terminated = 6,
108 DebugBreak = 7,
109};
110
98constexpr inline size_t ThreadLocalRegionSize = 0x200; 111constexpr inline size_t ThreadLocalRegionSize = 0x200;
99 112
100} // namespace Kernel::Svc 113} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 4bc49087e..272c54cf7 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -8,6 +8,7 @@
8#include "core/core.h" 8#include "core/core.h"
9#include "core/hle/kernel/svc_types.h" 9#include "core/hle/kernel/svc_types.h"
10#include "core/hle/result.h" 10#include "core/hle/result.h"
11#include "core/memory.h"
11 12
12namespace Kernel { 13namespace Kernel {
13 14
@@ -346,6 +347,37 @@ void SvcWrap64(Core::System& system) {
346 FuncReturn(system, retval); 347 FuncReturn(system, retval);
347} 348}
348 349
350// Used by CreateSession
351template <Result func(Core::System&, Handle*, Handle*, u32, u64)>
352void SvcWrap64(Core::System& system) {
353 Handle param_1 = 0;
354 Handle param_2 = 0;
355 const u32 retval = func(system, &param_1, &param_2, static_cast<u32>(Param(system, 2)),
356 static_cast<u32>(Param(system, 3)))
357 .raw;
358
359 system.CurrentArmInterface().SetReg(1, param_1);
360 system.CurrentArmInterface().SetReg(2, param_2);
361 FuncReturn(system, retval);
362}
363
364// Used by ReplyAndReceive
365template <Result func(Core::System&, s32*, Handle*, s32, Handle, s64)>
366void SvcWrap64(Core::System& system) {
367 s32 param_1 = 0;
368 s32 num_handles = static_cast<s32>(Param(system, 2));
369
370 std::vector<Handle> handles(num_handles);
371 system.Memory().ReadBlock(Param(system, 1), handles.data(), num_handles * sizeof(Handle));
372
373 const u32 retval = func(system, &param_1, handles.data(), num_handles,
374 static_cast<s32>(Param(system, 3)), static_cast<s64>(Param(system, 4)))
375 .raw;
376
377 system.CurrentArmInterface().SetReg(1, param_1);
378 FuncReturn(system, retval);
379}
380
349// Used by WaitForAddress 381// Used by WaitForAddress
350template <Result func(Core::System&, u64, Svc::ArbitrationType, s32, s64)> 382template <Result func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
351void SvcWrap64(Core::System& system) { 383void SvcWrap64(Core::System& system) {
diff --git a/src/core/hle/result.h b/src/core/hle/result.h
index 47a1b829b..ef4b2d417 100644
--- a/src/core/hle/result.h
+++ b/src/core/hle/result.h
@@ -5,6 +5,7 @@
5 5
6#include "common/assert.h" 6#include "common/assert.h"
7#include "common/bit_field.h" 7#include "common/bit_field.h"
8#include "common/common_funcs.h"
8#include "common/common_types.h" 9#include "common/common_types.h"
9#include "common/expected.h" 10#include "common/expected.h"
10 11
@@ -130,6 +131,18 @@ union Result {
130 [[nodiscard]] constexpr bool IsError() const { 131 [[nodiscard]] constexpr bool IsError() const {
131 return !IsSuccess(); 132 return !IsSuccess();
132 } 133 }
134
135 [[nodiscard]] constexpr bool IsFailure() const {
136 return !IsSuccess();
137 }
138
139 [[nodiscard]] constexpr u32 GetInnerValue() const {
140 return static_cast<u32>(module.Value()) | (description << module.bits);
141 }
142
143 [[nodiscard]] constexpr bool Includes(Result result) const {
144 return GetInnerValue() == result.GetInnerValue();
145 }
133}; 146};
134static_assert(std::is_trivial_v<Result>); 147static_assert(std::is_trivial_v<Result>);
135 148
@@ -349,19 +362,115 @@ private:
349 } \ 362 } \
350 } while (false) 363 } while (false)
351 364
352#define R_SUCCEEDED(res) (res.IsSuccess()) 365#define R_SUCCEEDED(res) (static_cast<Result>(res).IsSuccess())
366#define R_FAILED(res) (static_cast<Result>(res).IsFailure())
353 367
354/// Evaluates a boolean expression, and succeeds if that expression is true. 368namespace ResultImpl {
355#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess) 369template <auto EvaluateResult, class F>
370class ScopedResultGuard {
371 YUZU_NON_COPYABLE(ScopedResultGuard);
372 YUZU_NON_MOVEABLE(ScopedResultGuard);
373
374private:
375 Result& m_ref;
376 F m_f;
377
378public:
379 constexpr ScopedResultGuard(Result& ref, F f) : m_ref(ref), m_f(std::move(f)) {}
380 constexpr ~ScopedResultGuard() {
381 if (EvaluateResult(m_ref)) {
382 m_f();
383 }
384 }
385};
386
387template <auto EvaluateResult>
388class ResultReferenceForScopedResultGuard {
389private:
390 Result& m_ref;
391
392public:
393 constexpr ResultReferenceForScopedResultGuard(Result& r) : m_ref(r) {}
394 constexpr operator Result&() const {
395 return m_ref;
396 }
397};
398
399template <auto EvaluateResult, typename F>
400constexpr ScopedResultGuard<EvaluateResult, F> operator+(
401 ResultReferenceForScopedResultGuard<EvaluateResult> ref, F&& f) {
402 return ScopedResultGuard<EvaluateResult, F>(static_cast<Result&>(ref), std::forward<F>(f));
403}
404
405constexpr bool EvaluateResultSuccess(const Result& r) {
406 return R_SUCCEEDED(r);
407}
408constexpr bool EvaluateResultFailure(const Result& r) {
409 return R_FAILED(r);
410}
411
412template <typename T>
413constexpr void UpdateCurrentResultReference(T result_reference, Result result) = delete;
414// Intentionally not defined
415
416template <>
417constexpr void UpdateCurrentResultReference<Result&>(Result& result_reference, Result result) {
418 result_reference = result;
419}
420
421template <>
422constexpr void UpdateCurrentResultReference<const Result>(Result result_reference, Result result) {}
423} // namespace ResultImpl
424
425#define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE) \
426 [[maybe_unused]] constexpr bool HasPrevRef_##COUNTER_VALUE = \
427 std::same_as<decltype(__TmpCurrentResultReference), Result&>; \
428 [[maybe_unused]] auto& PrevRef_##COUNTER_VALUE = __TmpCurrentResultReference; \
429 [[maybe_unused]] Result __tmp_result_##COUNTER_VALUE = ResultSuccess; \
430 Result& __TmpCurrentResultReference = \
431 HasPrevRef_##COUNTER_VALUE ? PrevRef_##COUNTER_VALUE : __tmp_result_##COUNTER_VALUE
432
433#define ON_RESULT_RETURN_IMPL(...) \
434 static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result&>); \
435 auto RESULT_GUARD_STATE_##__COUNTER__ = \
436 ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>( \
437 __TmpCurrentResultReference) + \
438 [&]()
439
440#define ON_RESULT_FAILURE_2 ON_RESULT_RETURN_IMPL(ResultImpl::EvaluateResultFailure)
441
442#define ON_RESULT_FAILURE \
443 DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(__COUNTER__); \
444 ON_RESULT_FAILURE_2
445
446#define ON_RESULT_SUCCESS_2 ON_RESULT_RETURN_IMPL(ResultImpl::EvaluateResultSuccess)
447
448#define ON_RESULT_SUCCESS \
449 DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(__COUNTER__); \
450 ON_RESULT_SUCCESS_2
451
452constexpr inline Result __TmpCurrentResultReference = ResultSuccess;
453
454/// Returns a result.
455#define R_RETURN(res_expr) \
456 { \
457 const Result _tmp_r_throw_rc = (res_expr); \
458 ResultImpl::UpdateCurrentResultReference<decltype(__TmpCurrentResultReference)>( \
459 __TmpCurrentResultReference, _tmp_r_throw_rc); \
460 return _tmp_r_throw_rc; \
461 }
462
463/// Returns ResultSuccess()
464#define R_SUCCEED() R_RETURN(ResultSuccess)
465
466/// Throws a result.
467#define R_THROW(res_expr) R_RETURN(res_expr)
356 468
357/// Evaluates a boolean expression, and returns a result unless that expression is true. 469/// Evaluates a boolean expression, and returns a result unless that expression is true.
358#define R_UNLESS(expr, res) \ 470#define R_UNLESS(expr, res) \
359 { \ 471 { \
360 if (!(expr)) { \ 472 if (!(expr)) { \
361 if (res.IsError()) { \ 473 R_THROW(res); \
362 LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \
363 } \
364 return res; \
365 } \ 474 } \
366 } 475 }
367 476
@@ -369,7 +478,10 @@ private:
369#define R_TRY(res_expr) \ 478#define R_TRY(res_expr) \
370 { \ 479 { \
371 const auto _tmp_r_try_rc = (res_expr); \ 480 const auto _tmp_r_try_rc = (res_expr); \
372 if (_tmp_r_try_rc.IsError()) { \ 481 if (R_FAILED(_tmp_r_try_rc)) { \
373 return _tmp_r_try_rc; \ 482 R_THROW(_tmp_r_try_rc); \
374 } \ 483 } \
375 } 484 }
485
486/// Evaluates a boolean expression, and succeeds if that expression is true.
487#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess)
diff --git a/src/core/hle/service/acc/async_context.cpp b/src/core/hle/service/acc/async_context.cpp
index c85b2e43a..713689d8f 100644
--- a/src/core/hle/service/acc/async_context.cpp
+++ b/src/core/hle/service/acc/async_context.cpp
@@ -64,7 +64,7 @@ void IAsyncContext::GetResult(Kernel::HLERequestContext& ctx) {
64 64
65void IAsyncContext::MarkComplete() { 65void IAsyncContext::MarkComplete() {
66 is_complete.store(true); 66 is_complete.store(true);
67 completion_event->GetWritableEvent().Signal(); 67 completion_event->Signal();
68} 68}
69 69
70} // namespace Service::Account 70} // namespace Service::Account
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 6fb7e198e..e55233054 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -316,7 +316,7 @@ ISelfController::ISelfController(Core::System& system_, NVFlinger::NVFlinger& nv
316 316
317 accumulated_suspended_tick_changed_event = 317 accumulated_suspended_tick_changed_event =
318 service_context.CreateEvent("ISelfController:AccumulatedSuspendedTickChangedEvent"); 318 service_context.CreateEvent("ISelfController:AccumulatedSuspendedTickChangedEvent");
319 accumulated_suspended_tick_changed_event->GetWritableEvent().Signal(); 319 accumulated_suspended_tick_changed_event->Signal();
320} 320}
321 321
322ISelfController::~ISelfController() { 322ISelfController::~ISelfController() {
@@ -378,7 +378,7 @@ void ISelfController::LeaveFatalSection(Kernel::HLERequestContext& ctx) {
378void ISelfController::GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx) { 378void ISelfController::GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx) {
379 LOG_WARNING(Service_AM, "(STUBBED) called"); 379 LOG_WARNING(Service_AM, "(STUBBED) called");
380 380
381 launchable_event->GetWritableEvent().Signal(); 381 launchable_event->Signal();
382 382
383 IPC::ResponseBuilder rb{ctx, 2, 1}; 383 IPC::ResponseBuilder rb{ctx, 2, 1};
384 rb.Push(ResultSuccess); 384 rb.Push(ResultSuccess);
@@ -618,18 +618,18 @@ Kernel::KReadableEvent& AppletMessageQueue::GetOperationModeChangedEvent() {
618 618
619void AppletMessageQueue::PushMessage(AppletMessage msg) { 619void AppletMessageQueue::PushMessage(AppletMessage msg) {
620 messages.push(msg); 620 messages.push(msg);
621 on_new_message->GetWritableEvent().Signal(); 621 on_new_message->Signal();
622} 622}
623 623
624AppletMessageQueue::AppletMessage AppletMessageQueue::PopMessage() { 624AppletMessageQueue::AppletMessage AppletMessageQueue::PopMessage() {
625 if (messages.empty()) { 625 if (messages.empty()) {
626 on_new_message->GetWritableEvent().Clear(); 626 on_new_message->Clear();
627 return AppletMessage::None; 627 return AppletMessage::None;
628 } 628 }
629 auto msg = messages.front(); 629 auto msg = messages.front();
630 messages.pop(); 630 messages.pop();
631 if (messages.empty()) { 631 if (messages.empty()) {
632 on_new_message->GetWritableEvent().Clear(); 632 on_new_message->Clear();
633 } 633 }
634 return msg; 634 return msg;
635} 635}
@@ -653,7 +653,7 @@ void AppletMessageQueue::FocusStateChanged() {
653void AppletMessageQueue::OperationModeChanged() { 653void AppletMessageQueue::OperationModeChanged() {
654 PushMessage(AppletMessage::OperationModeChanged); 654 PushMessage(AppletMessage::OperationModeChanged);
655 PushMessage(AppletMessage::PerformanceModeChanged); 655 PushMessage(AppletMessage::PerformanceModeChanged);
656 on_operation_mode_changed->GetWritableEvent().Signal(); 656 on_operation_mode_changed->Signal();
657} 657}
658 658
659ICommonStateGetter::ICommonStateGetter(Core::System& system_, 659ICommonStateGetter::ICommonStateGetter(Core::System& system_,
diff --git a/src/core/hle/service/am/applets/applets.cpp b/src/core/hle/service/am/applets/applets.cpp
index b5b8e4cad..7062df21c 100644
--- a/src/core/hle/service/am/applets/applets.cpp
+++ b/src/core/hle/service/am/applets/applets.cpp
@@ -65,7 +65,7 @@ std::shared_ptr<IStorage> AppletDataBroker::PopNormalDataToGame() {
65 65
66 auto out = std::move(out_channel.front()); 66 auto out = std::move(out_channel.front());
67 out_channel.pop_front(); 67 out_channel.pop_front();
68 pop_out_data_event->GetWritableEvent().Clear(); 68 pop_out_data_event->Clear();
69 return out; 69 return out;
70} 70}
71 71
@@ -84,7 +84,7 @@ std::shared_ptr<IStorage> AppletDataBroker::PopInteractiveDataToGame() {
84 84
85 auto out = std::move(out_interactive_channel.front()); 85 auto out = std::move(out_interactive_channel.front());
86 out_interactive_channel.pop_front(); 86 out_interactive_channel.pop_front();
87 pop_interactive_out_data_event->GetWritableEvent().Clear(); 87 pop_interactive_out_data_event->Clear();
88 return out; 88 return out;
89} 89}
90 90
@@ -103,7 +103,7 @@ void AppletDataBroker::PushNormalDataFromGame(std::shared_ptr<IStorage>&& storag
103 103
104void AppletDataBroker::PushNormalDataFromApplet(std::shared_ptr<IStorage>&& storage) { 104void AppletDataBroker::PushNormalDataFromApplet(std::shared_ptr<IStorage>&& storage) {
105 out_channel.emplace_back(std::move(storage)); 105 out_channel.emplace_back(std::move(storage));
106 pop_out_data_event->GetWritableEvent().Signal(); 106 pop_out_data_event->Signal();
107} 107}
108 108
109void AppletDataBroker::PushInteractiveDataFromGame(std::shared_ptr<IStorage>&& storage) { 109void AppletDataBroker::PushInteractiveDataFromGame(std::shared_ptr<IStorage>&& storage) {
@@ -112,11 +112,11 @@ void AppletDataBroker::PushInteractiveDataFromGame(std::shared_ptr<IStorage>&& s
112 112
113void AppletDataBroker::PushInteractiveDataFromApplet(std::shared_ptr<IStorage>&& storage) { 113void AppletDataBroker::PushInteractiveDataFromApplet(std::shared_ptr<IStorage>&& storage) {
114 out_interactive_channel.emplace_back(std::move(storage)); 114 out_interactive_channel.emplace_back(std::move(storage));
115 pop_interactive_out_data_event->GetWritableEvent().Signal(); 115 pop_interactive_out_data_event->Signal();
116} 116}
117 117
118void AppletDataBroker::SignalStateChanged() { 118void AppletDataBroker::SignalStateChanged() {
119 state_changed_event->GetWritableEvent().Signal(); 119 state_changed_event->Signal();
120 120
121 switch (applet_mode) { 121 switch (applet_mode) {
122 case LibraryAppletMode::AllForeground: 122 case LibraryAppletMode::AllForeground:
diff --git a/src/core/hle/service/audio/audctl.cpp b/src/core/hle/service/audio/audctl.cpp
index 4a2ae5f88..5abf22ba4 100644
--- a/src/core/hle/service/audio/audctl.cpp
+++ b/src/core/hle/service/audio/audctl.cpp
@@ -45,9 +45,25 @@ AudCtl::AudCtl(Core::System& system_) : ServiceFramework{system_, "audctl"} {
45 {32, nullptr, "GetActiveOutputTarget"}, 45 {32, nullptr, "GetActiveOutputTarget"},
46 {33, nullptr, "GetTargetDeviceInfo"}, 46 {33, nullptr, "GetTargetDeviceInfo"},
47 {34, nullptr, "AcquireTargetNotification"}, 47 {34, nullptr, "AcquireTargetNotification"},
48 {35, nullptr, "SetHearingProtectionSafeguardTimerRemainingTimeForDebug"},
49 {36, nullptr, "GetHearingProtectionSafeguardTimerRemainingTimeForDebug"},
50 {37, nullptr, "SetHearingProtectionSafeguardEnabled"},
51 {38, nullptr, "IsHearingProtectionSafeguardEnabled"},
52 {39, nullptr, "IsHearingProtectionSafeguardMonitoringOutputForDebug"},
53 {40, nullptr, "GetSystemInformationForDebug"},
54 {41, nullptr, "SetVolumeButtonLongPressTime"},
55 {42, nullptr, "SetNativeVolumeForDebug"},
48 {10000, nullptr, "NotifyAudioOutputTargetForPlayReport"}, 56 {10000, nullptr, "NotifyAudioOutputTargetForPlayReport"},
49 {10001, nullptr, "NotifyAudioOutputChannelCountForPlayReport"}, 57 {10001, nullptr, "NotifyAudioOutputChannelCountForPlayReport"},
50 {10002, nullptr, "NotifyUnsupportedUsbOutputDeviceAttachedForPlayReport"}, 58 {10002, nullptr, "NotifyUnsupportedUsbOutputDeviceAttachedForPlayReport"},
59 {10100, nullptr, "GetAudioVolumeDataForPlayReport"},
60 {10101, nullptr, "BindAudioVolumeUpdateEventForPlayReport"},
61 {10102, nullptr, "BindAudioOutputTargetUpdateEventForPlayReport"},
62 {10103, nullptr, "GetAudioOutputTargetForPlayReport"},
63 {10104, nullptr, "GetAudioOutputChannelCountForPlayReport"},
64 {10105, nullptr, "BindAudioOutputChannelCountUpdateEventForPlayReport"},
65 {10106, nullptr, "GetDefaultAudioOutputTargetForPlayReport"},
66 {50000, nullptr, "SetAnalogInputBoostGainForPrototyping"},
51 }; 67 };
52 // clang-format on 68 // clang-format on
53 69
diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp
index 6fb07c37d..13423dca6 100644
--- a/src/core/hle/service/audio/audren_u.cpp
+++ b/src/core/hle/service/audio/audren_u.cpp
@@ -52,6 +52,8 @@ public:
52 {9, &IAudioRenderer::GetRenderingTimeLimit, "GetRenderingTimeLimit"}, 52 {9, &IAudioRenderer::GetRenderingTimeLimit, "GetRenderingTimeLimit"},
53 {10, &IAudioRenderer::RequestUpdate, "RequestUpdateAuto"}, 53 {10, &IAudioRenderer::RequestUpdate, "RequestUpdateAuto"},
54 {11, nullptr, "ExecuteAudioRendererRendering"}, 54 {11, nullptr, "ExecuteAudioRendererRendering"},
55 {12, &IAudioRenderer::SetVoiceDropParameter, "SetVoiceDropParameter"},
56 {13, &IAudioRenderer::GetVoiceDropParameter, "GetVoiceDropParameter"},
55 }; 57 };
56 // clang-format on 58 // clang-format on
57 RegisterHandlers(functions); 59 RegisterHandlers(functions);
@@ -205,6 +207,30 @@ private:
205 LOG_DEBUG(Service_Audio, "called"); 207 LOG_DEBUG(Service_Audio, "called");
206 } 208 }
207 209
210 void SetVoiceDropParameter(Kernel::HLERequestContext& ctx) {
211 LOG_DEBUG(Service_Audio, "called");
212
213 IPC::RequestParser rp{ctx};
214 auto voice_drop_param{rp.Pop<f32>()};
215
216 auto& system_ = impl->GetSystem();
217 system_.SetVoiceDropParameter(voice_drop_param);
218
219 IPC::ResponseBuilder rb{ctx, 2};
220 rb.Push(ResultSuccess);
221 }
222
223 void GetVoiceDropParameter(Kernel::HLERequestContext& ctx) {
224 LOG_DEBUG(Service_Audio, "called");
225
226 auto& system_ = impl->GetSystem();
227 auto voice_drop_param{system_.GetVoiceDropParameter()};
228
229 IPC::ResponseBuilder rb{ctx, 3};
230 rb.Push(ResultSuccess);
231 rb.Push(voice_drop_param);
232 }
233
208 KernelHelpers::ServiceContext service_context; 234 KernelHelpers::ServiceContext service_context;
209 Kernel::KEvent* rendered_event; 235 Kernel::KEvent* rendered_event;
210 Manager& manager; 236 Manager& manager;
@@ -239,7 +265,7 @@ public:
239 }; 265 };
240 RegisterHandlers(functions); 266 RegisterHandlers(functions);
241 267
242 event->GetWritableEvent().Signal(); 268 event->Signal();
243 } 269 }
244 270
245 ~IAudioDevice() override { 271 ~IAudioDevice() override {
@@ -325,7 +351,7 @@ private:
325 void QueryAudioDeviceSystemEvent(Kernel::HLERequestContext& ctx) { 351 void QueryAudioDeviceSystemEvent(Kernel::HLERequestContext& ctx) {
326 LOG_DEBUG(Service_Audio, "(STUBBED) called"); 352 LOG_DEBUG(Service_Audio, "(STUBBED) called");
327 353
328 event->GetWritableEvent().Signal(); 354 event->Signal();
329 355
330 IPC::ResponseBuilder rb{ctx, 2, 1}; 356 IPC::ResponseBuilder rb{ctx, 2, 1};
331 rb.Push(ResultSuccess); 357 rb.Push(ResultSuccess);
diff --git a/src/core/hle/service/bcat/backend/backend.cpp b/src/core/hle/service/bcat/backend/backend.cpp
index cd0b405ff..847f76987 100644
--- a/src/core/hle/service/bcat/backend/backend.cpp
+++ b/src/core/hle/service/bcat/backend/backend.cpp
@@ -82,7 +82,7 @@ void ProgressServiceBackend::FinishDownload(Result result) {
82} 82}
83 83
84void ProgressServiceBackend::SignalUpdate() { 84void ProgressServiceBackend::SignalUpdate() {
85 update_event->GetWritableEvent().Signal(); 85 update_event->Signal();
86} 86}
87 87
88Backend::Backend(DirectoryGetter getter) : dir_getter(std::move(getter)) {} 88Backend::Backend(DirectoryGetter getter) : dir_getter(std::move(getter)) {}
diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp
index e0db787fc..fad532115 100644
--- a/src/core/hle/service/friend/friend.cpp
+++ b/src/core/hle/service/friend/friend.cpp
@@ -26,7 +26,7 @@ public:
26 {10101, &IFriendService::GetFriendList, "GetFriendList"}, 26 {10101, &IFriendService::GetFriendList, "GetFriendList"},
27 {10102, nullptr, "UpdateFriendInfo"}, 27 {10102, nullptr, "UpdateFriendInfo"},
28 {10110, nullptr, "GetFriendProfileImage"}, 28 {10110, nullptr, "GetFriendProfileImage"},
29 {10120, nullptr, "IsFriendListCacheAvailable"}, 29 {10120, &IFriendService::CheckFriendListAvailability, "CheckFriendListAvailability"},
30 {10121, nullptr, "EnsureFriendListAvailable"}, 30 {10121, nullptr, "EnsureFriendListAvailable"},
31 {10200, nullptr, "SendFriendRequestForApplication"}, 31 {10200, nullptr, "SendFriendRequestForApplication"},
32 {10211, nullptr, "AddFacedFriendRequestForApplication"}, 32 {10211, nullptr, "AddFacedFriendRequestForApplication"},
@@ -194,6 +194,17 @@ private:
194 // TODO(ogniK): Return a buffer of u64s which are the "NetworkServiceAccountId" 194 // TODO(ogniK): Return a buffer of u64s which are the "NetworkServiceAccountId"
195 } 195 }
196 196
197 void CheckFriendListAvailability(Kernel::HLERequestContext& ctx) {
198 IPC::RequestParser rp{ctx};
199 const auto uuid{rp.PopRaw<Common::UUID>()};
200
201 LOG_WARNING(Service_Friend, "(STUBBED) called, uuid=0x{}", uuid.RawString());
202
203 IPC::ResponseBuilder rb{ctx, 3};
204 rb.Push(ResultSuccess);
205 rb.Push(true);
206 }
207
197 KernelHelpers::ServiceContext service_context; 208 KernelHelpers::ServiceContext service_context;
198 209
199 Kernel::KEvent* completion_event; 210 Kernel::KEvent* completion_event;
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index f8972ec7a..ba8a1f786 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -16,7 +16,6 @@
16#include "core/hid/hid_core.h" 16#include "core/hid/hid_core.h"
17#include "core/hle/kernel/k_event.h" 17#include "core/hle/kernel/k_event.h"
18#include "core/hle/kernel/k_readable_event.h" 18#include "core/hle/kernel/k_readable_event.h"
19#include "core/hle/kernel/k_writable_event.h"
20#include "core/hle/service/hid/controllers/npad.h" 19#include "core/hle/service/hid/controllers/npad.h"
21#include "core/hle/service/hid/errors.h" 20#include "core/hle/service/hid/errors.h"
22#include "core/hle/service/kernel_helpers.h" 21#include "core/hle/service/kernel_helpers.h"
@@ -167,7 +166,7 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) {
167 const auto& battery_level = controller.device->GetBattery(); 166 const auto& battery_level = controller.device->GetBattery();
168 auto* shared_memory = controller.shared_memory; 167 auto* shared_memory = controller.shared_memory;
169 if (controller_type == Core::HID::NpadStyleIndex::None) { 168 if (controller_type == Core::HID::NpadStyleIndex::None) {
170 controller.styleset_changed_event->GetWritableEvent().Signal(); 169 controller.styleset_changed_event->Signal();
171 return; 170 return;
172 } 171 }
173 172
@@ -746,8 +745,9 @@ void Controller_NPad::SetSupportedNpadIdTypes(u8* data, std::size_t length) {
746} 745}
747 746
748void Controller_NPad::GetSupportedNpadIdTypes(u32* data, std::size_t max_length) { 747void Controller_NPad::GetSupportedNpadIdTypes(u32* data, std::size_t max_length) {
749 ASSERT(max_length < supported_npad_id_types.size()); 748 const auto copy_amount = supported_npad_id_types.size() * sizeof(u32);
750 std::memcpy(data, supported_npad_id_types.data(), supported_npad_id_types.size()); 749 ASSERT(max_length <= copy_amount);
750 std::memcpy(data, supported_npad_id_types.data(), copy_amount);
751} 751}
752 752
753std::size_t Controller_NPad::GetSupportedNpadIdTypesSize() const { 753std::size_t Controller_NPad::GetSupportedNpadIdTypesSize() const {
@@ -1033,7 +1033,7 @@ Kernel::KReadableEvent& Controller_NPad::GetStyleSetChangedEvent(Core::HID::Npad
1033 1033
1034void Controller_NPad::SignalStyleSetChangedEvent(Core::HID::NpadIdType npad_id) const { 1034void Controller_NPad::SignalStyleSetChangedEvent(Core::HID::NpadIdType npad_id) const {
1035 const auto& controller = GetControllerFromNpadIdType(npad_id); 1035 const auto& controller = GetControllerFromNpadIdType(npad_id);
1036 controller.styleset_changed_event->GetWritableEvent().Signal(); 1036 controller.styleset_changed_event->Signal();
1037} 1037}
1038 1038
1039void Controller_NPad::AddNewControllerAt(Core::HID::NpadStyleIndex controller, 1039void Controller_NPad::AddNewControllerAt(Core::HID::NpadStyleIndex controller,
diff --git a/src/core/hle/service/hid/controllers/palma.cpp b/src/core/hle/service/hid/controllers/palma.cpp
index 575d4e626..4564ea1e2 100644
--- a/src/core/hle/service/hid/controllers/palma.cpp
+++ b/src/core/hle/service/hid/controllers/palma.cpp
@@ -73,7 +73,7 @@ Result Controller_Palma::PlayPalmaActivity(const PalmaConnectionHandle& handle,
73 operation.operation = PalmaOperationType::PlayActivity; 73 operation.operation = PalmaOperationType::PlayActivity;
74 operation.result = PalmaResultSuccess; 74 operation.result = PalmaResultSuccess;
75 operation.data = {}; 75 operation.data = {};
76 operation_complete_event->GetWritableEvent().Signal(); 76 operation_complete_event->Signal();
77 return ResultSuccess; 77 return ResultSuccess;
78} 78}
79 79
@@ -93,7 +93,7 @@ Result Controller_Palma::ReadPalmaStep(const PalmaConnectionHandle& handle) {
93 operation.operation = PalmaOperationType::ReadStep; 93 operation.operation = PalmaOperationType::ReadStep;
94 operation.result = PalmaResultSuccess; 94 operation.result = PalmaResultSuccess;
95 operation.data = {}; 95 operation.data = {};
96 operation_complete_event->GetWritableEvent().Signal(); 96 operation_complete_event->Signal();
97 return ResultSuccess; 97 return ResultSuccess;
98} 98}
99 99
@@ -122,7 +122,7 @@ Result Controller_Palma::ReadPalmaUniqueCode(const PalmaConnectionHandle& handle
122 operation.operation = PalmaOperationType::ReadUniqueCode; 122 operation.operation = PalmaOperationType::ReadUniqueCode;
123 operation.result = PalmaResultSuccess; 123 operation.result = PalmaResultSuccess;
124 operation.data = {}; 124 operation.data = {};
125 operation_complete_event->GetWritableEvent().Signal(); 125 operation_complete_event->Signal();
126 return ResultSuccess; 126 return ResultSuccess;
127} 127}
128 128
@@ -133,7 +133,7 @@ Result Controller_Palma::SetPalmaUniqueCodeInvalid(const PalmaConnectionHandle&
133 operation.operation = PalmaOperationType::SetUniqueCodeInvalid; 133 operation.operation = PalmaOperationType::SetUniqueCodeInvalid;
134 operation.result = PalmaResultSuccess; 134 operation.result = PalmaResultSuccess;
135 operation.data = {}; 135 operation.data = {};
136 operation_complete_event->GetWritableEvent().Signal(); 136 operation_complete_event->Signal();
137 return ResultSuccess; 137 return ResultSuccess;
138} 138}
139 139
@@ -147,7 +147,7 @@ Result Controller_Palma::WritePalmaRgbLedPatternEntry(const PalmaConnectionHandl
147 operation.operation = PalmaOperationType::WriteRgbLedPatternEntry; 147 operation.operation = PalmaOperationType::WriteRgbLedPatternEntry;
148 operation.result = PalmaResultSuccess; 148 operation.result = PalmaResultSuccess;
149 operation.data = {}; 149 operation.data = {};
150 operation_complete_event->GetWritableEvent().Signal(); 150 operation_complete_event->Signal();
151 return ResultSuccess; 151 return ResultSuccess;
152} 152}
153 153
@@ -159,7 +159,7 @@ Result Controller_Palma::WritePalmaWaveEntry(const PalmaConnectionHandle& handle
159 operation.operation = PalmaOperationType::WriteWaveEntry; 159 operation.operation = PalmaOperationType::WriteWaveEntry;
160 operation.result = PalmaResultSuccess; 160 operation.result = PalmaResultSuccess;
161 operation.data = {}; 161 operation.data = {};
162 operation_complete_event->GetWritableEvent().Signal(); 162 operation_complete_event->Signal();
163 return ResultSuccess; 163 return ResultSuccess;
164} 164}
165 165
@@ -172,7 +172,7 @@ Result Controller_Palma::SetPalmaDataBaseIdentificationVersion(const PalmaConnec
172 operation.operation = PalmaOperationType::ReadDataBaseIdentificationVersion; 172 operation.operation = PalmaOperationType::ReadDataBaseIdentificationVersion;
173 operation.result = PalmaResultSuccess; 173 operation.result = PalmaResultSuccess;
174 operation.data[0] = {}; 174 operation.data[0] = {};
175 operation_complete_event->GetWritableEvent().Signal(); 175 operation_complete_event->Signal();
176 return ResultSuccess; 176 return ResultSuccess;
177} 177}
178 178
@@ -185,7 +185,7 @@ Result Controller_Palma::GetPalmaDataBaseIdentificationVersion(
185 operation.result = PalmaResultSuccess; 185 operation.result = PalmaResultSuccess;
186 operation.data = {}; 186 operation.data = {};
187 operation.data[0] = static_cast<u8>(database_id_version); 187 operation.data[0] = static_cast<u8>(database_id_version);
188 operation_complete_event->GetWritableEvent().Signal(); 188 operation_complete_event->Signal();
189 return ResultSuccess; 189 return ResultSuccess;
190} 190}
191 191
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 46bad7871..79375bd2f 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -2118,7 +2118,7 @@ void Hid::WritePalmaWaveEntry(Kernel::HLERequestContext& ctx) {
2118 ASSERT_MSG(t_mem->GetSize() == 0x3000, "t_mem has incorrect size"); 2118 ASSERT_MSG(t_mem->GetSize() == 0x3000, "t_mem has incorrect size");
2119 2119
2120 LOG_WARNING(Service_HID, 2120 LOG_WARNING(Service_HID,
2121 "(STUBBED) called, connection_handle={}, wave_set={}, unkown={}, " 2121 "(STUBBED) called, connection_handle={}, wave_set={}, unknown={}, "
2122 "t_mem_handle=0x{:08X}, t_mem_size={}, size={}", 2122 "t_mem_handle=0x{:08X}, t_mem_size={}, size={}",
2123 connection_handle.npad_id, wave_set, unknown, t_mem_handle, t_mem_size, size); 2123 connection_handle.npad_id, wave_set, unknown, t_mem_handle, t_mem_size, size);
2124 2124
diff --git a/src/core/hle/service/hid/hidbus/ringcon.cpp b/src/core/hle/service/hid/hidbus/ringcon.cpp
index ad223d649..57f1a2a26 100644
--- a/src/core/hle/service/hid/hidbus/ringcon.cpp
+++ b/src/core/hle/service/hid/hidbus/ringcon.cpp
@@ -131,12 +131,12 @@ bool RingController::SetCommand(const std::vector<u8>& data) {
131 case RingConCommands::ReadRepCount: 131 case RingConCommands::ReadRepCount:
132 case RingConCommands::ReadTotalPushCount: 132 case RingConCommands::ReadTotalPushCount:
133 ASSERT_MSG(data.size() == 0x4, "data.size is not 0x4 bytes"); 133 ASSERT_MSG(data.size() == 0x4, "data.size is not 0x4 bytes");
134 send_command_async_event->GetWritableEvent().Signal(); 134 send_command_async_event->Signal();
135 return true; 135 return true;
136 case RingConCommands::ResetRepCount: 136 case RingConCommands::ResetRepCount:
137 ASSERT_MSG(data.size() == 0x4, "data.size is not 0x4 bytes"); 137 ASSERT_MSG(data.size() == 0x4, "data.size is not 0x4 bytes");
138 total_rep_count = 0; 138 total_rep_count = 0;
139 send_command_async_event->GetWritableEvent().Signal(); 139 send_command_async_event->Signal();
140 return true; 140 return true;
141 case RingConCommands::SaveCalData: { 141 case RingConCommands::SaveCalData: {
142 ASSERT_MSG(data.size() == 0x14, "data.size is not 0x14 bytes"); 142 ASSERT_MSG(data.size() == 0x14, "data.size is not 0x14 bytes");
@@ -144,14 +144,14 @@ bool RingController::SetCommand(const std::vector<u8>& data) {
144 SaveCalData save_info{}; 144 SaveCalData save_info{};
145 std::memcpy(&save_info, data.data(), sizeof(SaveCalData)); 145 std::memcpy(&save_info, data.data(), sizeof(SaveCalData));
146 user_calibration = save_info.calibration; 146 user_calibration = save_info.calibration;
147 send_command_async_event->GetWritableEvent().Signal(); 147 send_command_async_event->Signal();
148 return true; 148 return true;
149 } 149 }
150 default: 150 default:
151 LOG_ERROR(Service_HID, "Command not implemented {}", command); 151 LOG_ERROR(Service_HID, "Command not implemented {}", command);
152 command = RingConCommands::Error; 152 command = RingConCommands::Error;
153 // Signal a reply to avoid softlocking the game 153 // Signal a reply to avoid softlocking the game
154 send_command_async_event->GetWritableEvent().Signal(); 154 send_command_async_event->Signal();
155 return false; 155 return false;
156 } 156 }
157} 157}
diff --git a/src/core/hle/service/hid/irsensor/pointing_processor.h b/src/core/hle/service/hid/irsensor/pointing_processor.h
index cf4930794..d63423aff 100644
--- a/src/core/hle/service/hid/irsensor/pointing_processor.h
+++ b/src/core/hle/service/hid/irsensor/pointing_processor.h
@@ -37,10 +37,10 @@ private:
37 u8 pointing_status; 37 u8 pointing_status;
38 INSERT_PADDING_BYTES(3); 38 INSERT_PADDING_BYTES(3);
39 u32 unknown; 39 u32 unknown;
40 float unkown_float1; 40 float unknown_float1;
41 float position_x; 41 float position_x;
42 float position_y; 42 float position_y;
43 float unkown_float2; 43 float unknown_float2;
44 Core::IrSensor::IrsRect window_of_interest; 44 Core::IrSensor::IrsRect window_of_interest;
45 }; 45 };
46 static_assert(sizeof(PointingProcessorMarkerData) == 0x20, 46 static_assert(sizeof(PointingProcessorMarkerData) == 0x20,
diff --git a/src/core/hle/service/kernel_helpers.cpp b/src/core/hle/service/kernel_helpers.cpp
index 3e317367b..af133af93 100644
--- a/src/core/hle/service/kernel_helpers.cpp
+++ b/src/core/hle/service/kernel_helpers.cpp
@@ -9,7 +9,6 @@
9#include "core/hle/kernel/k_readable_event.h" 9#include "core/hle/kernel/k_readable_event.h"
10#include "core/hle/kernel/k_resource_limit.h" 10#include "core/hle/kernel/k_resource_limit.h"
11#include "core/hle/kernel/k_scoped_resource_reservation.h" 11#include "core/hle/kernel/k_scoped_resource_reservation.h"
12#include "core/hle/kernel/k_writable_event.h"
13#include "core/hle/service/kernel_helpers.h" 12#include "core/hle/service/kernel_helpers.h"
14 13
15namespace Service::KernelHelpers { 14namespace Service::KernelHelpers {
@@ -46,7 +45,7 @@ Kernel::KEvent* ServiceContext::CreateEvent(std::string&& name) {
46 } 45 }
47 46
48 // Initialize the event. 47 // Initialize the event.
49 event->Initialize(std::move(name), process); 48 event->Initialize(process);
50 49
51 // Commit the thread reservation. 50 // Commit the thread reservation.
52 event_reservation.Commit(); 51 event_reservation.Commit();
@@ -59,7 +58,7 @@ Kernel::KEvent* ServiceContext::CreateEvent(std::string&& name) {
59 58
60void ServiceContext::CloseEvent(Kernel::KEvent* event) { 59void ServiceContext::CloseEvent(Kernel::KEvent* event) {
61 event->GetReadableEvent().Close(); 60 event->GetReadableEvent().Close();
62 event->GetWritableEvent().Close(); 61 event->Close();
63} 62}
64 63
65} // namespace Service::KernelHelpers 64} // namespace Service::KernelHelpers
diff --git a/src/core/hle/service/ldn/ldn.cpp b/src/core/hle/service/ldn/ldn.cpp
index ea3e7e55a..6df563136 100644
--- a/src/core/hle/service/ldn/ldn.cpp
+++ b/src/core/hle/service/ldn/ldn.cpp
@@ -165,7 +165,7 @@ public:
165 } 165 }
166 166
167 void OnEventFired() { 167 void OnEventFired() {
168 state_change_event->GetWritableEvent().Signal(); 168 state_change_event->Signal();
169 } 169 }
170 170
171 void GetState(Kernel::HLERequestContext& ctx) { 171 void GetState(Kernel::HLERequestContext& ctx) {
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index becd6d1b9..652441bc2 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -290,7 +290,7 @@ public:
290 const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; 290 const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
291 const auto start_info{page_table.QueryInfo(start - 1)}; 291 const auto start_info{page_table.QueryInfo(start - 1)};
292 292
293 if (start_info.state != Kernel::KMemoryState::Free) { 293 if (start_info.GetState() != Kernel::KMemoryState::Free) {
294 return {}; 294 return {};
295 } 295 }
296 296
@@ -300,7 +300,7 @@ public:
300 300
301 const auto end_info{page_table.QueryInfo(start + size)}; 301 const auto end_info{page_table.QueryInfo(start + size)};
302 302
303 if (end_info.state != Kernel::KMemoryState::Free) { 303 if (end_info.GetState() != Kernel::KMemoryState::Free) {
304 return {}; 304 return {};
305 } 305 }
306 306
diff --git a/src/core/hle/service/nfp/nfp_device.cpp b/src/core/hle/service/nfp/nfp_device.cpp
index ec895ac01..76f8a267a 100644
--- a/src/core/hle/service/nfp/nfp_device.cpp
+++ b/src/core/hle/service/nfp/nfp_device.cpp
@@ -58,7 +58,7 @@ NfpDevice::~NfpDevice() {
58void NfpDevice::NpadUpdate(Core::HID::ControllerTriggerType type) { 58void NfpDevice::NpadUpdate(Core::HID::ControllerTriggerType type) {
59 if (type == Core::HID::ControllerTriggerType::Connected || 59 if (type == Core::HID::ControllerTriggerType::Connected ||
60 type == Core::HID::ControllerTriggerType::Disconnected) { 60 type == Core::HID::ControllerTriggerType::Disconnected) {
61 availability_change_event->GetWritableEvent().Signal(); 61 availability_change_event->Signal();
62 return; 62 return;
63 } 63 }
64 64
@@ -100,7 +100,7 @@ bool NfpDevice::LoadAmiibo(std::span<const u8> data) {
100 100
101 device_state = DeviceState::TagFound; 101 device_state = DeviceState::TagFound;
102 deactivate_event->GetReadableEvent().Clear(); 102 deactivate_event->GetReadableEvent().Clear();
103 activate_event->GetWritableEvent().Signal(); 103 activate_event->Signal();
104 return true; 104 return true;
105} 105}
106 106
@@ -115,7 +115,7 @@ void NfpDevice::CloseAmiibo() {
115 encrypted_tag_data = {}; 115 encrypted_tag_data = {};
116 tag_data = {}; 116 tag_data = {};
117 activate_event->GetReadableEvent().Clear(); 117 activate_event->GetReadableEvent().Clear();
118 deactivate_event->GetWritableEvent().Signal(); 118 deactivate_event->Signal();
119} 119}
120 120
121Kernel::KReadableEvent& NfpDevice::GetActivateEvent() const { 121Kernel::KReadableEvent& NfpDevice::GetActivateEvent() const {
diff --git a/src/core/hle/service/nim/nim.cpp b/src/core/hle/service/nim/nim.cpp
index b2bb7426d..5a8a91e0b 100644
--- a/src/core/hle/service/nim/nim.cpp
+++ b/src/core/hle/service/nim/nim.cpp
@@ -328,7 +328,7 @@ private:
328 void StartTask(Kernel::HLERequestContext& ctx) { 328 void StartTask(Kernel::HLERequestContext& ctx) {
329 // No need to connect to the internet, just finish the task straight away. 329 // No need to connect to the internet, just finish the task straight away.
330 LOG_DEBUG(Service_NIM, "called"); 330 LOG_DEBUG(Service_NIM, "called");
331 finished_event->GetWritableEvent().Signal(); 331 finished_event->Signal();
332 IPC::ResponseBuilder rb{ctx, 2}; 332 IPC::ResponseBuilder rb{ctx, 2};
333 rb.Push(ResultSuccess); 333 rb.Push(ResultSuccess);
334 } 334 }
@@ -350,7 +350,7 @@ private:
350 350
351 void Cancel(Kernel::HLERequestContext& ctx) { 351 void Cancel(Kernel::HLERequestContext& ctx) {
352 LOG_DEBUG(Service_NIM, "called"); 352 LOG_DEBUG(Service_NIM, "called");
353 finished_event->GetWritableEvent().Clear(); 353 finished_event->Clear();
354 IPC::ResponseBuilder rb{ctx, 2}; 354 IPC::ResponseBuilder rb{ctx, 2};
355 rb.Push(ResultSuccess); 355 rb.Push(ResultSuccess);
356 } 356 }
diff --git a/src/core/hle/service/ns/ns.cpp b/src/core/hle/service/ns/ns.cpp
index f7318c3cb..f59a1a63d 100644
--- a/src/core/hle/service/ns/ns.cpp
+++ b/src/core/hle/service/ns/ns.cpp
@@ -8,6 +8,7 @@
8#include "core/file_sys/patch_manager.h" 8#include "core/file_sys/patch_manager.h"
9#include "core/file_sys/vfs.h" 9#include "core/file_sys/vfs.h"
10#include "core/hle/ipc_helpers.h" 10#include "core/hle/ipc_helpers.h"
11#include "core/hle/service/glue/glue_manager.h"
11#include "core/hle/service/ns/errors.h" 12#include "core/hle/service/ns/errors.h"
12#include "core/hle/service/ns/iplatform_service_manager.h" 13#include "core/hle/service/ns/iplatform_service_manager.h"
13#include "core/hle/service/ns/language.h" 14#include "core/hle/service/ns/language.h"
@@ -581,7 +582,7 @@ IReadOnlyApplicationControlDataInterface::IReadOnlyApplicationControlDataInterfa
581 : ServiceFramework{system_, "IReadOnlyApplicationControlDataInterface"} { 582 : ServiceFramework{system_, "IReadOnlyApplicationControlDataInterface"} {
582 // clang-format off 583 // clang-format off
583 static const FunctionInfo functions[] = { 584 static const FunctionInfo functions[] = {
584 {0, nullptr, "GetApplicationControlData"}, 585 {0, &IReadOnlyApplicationControlDataInterface::GetApplicationControlData, "GetApplicationControlData"},
585 {1, nullptr, "GetApplicationDesiredLanguage"}, 586 {1, nullptr, "GetApplicationDesiredLanguage"},
586 {2, nullptr, "ConvertApplicationLanguageToLanguageCode"}, 587 {2, nullptr, "ConvertApplicationLanguageToLanguageCode"},
587 {3, nullptr, "ConvertLanguageCodeToApplicationLanguage"}, 588 {3, nullptr, "ConvertLanguageCodeToApplicationLanguage"},
@@ -594,6 +595,33 @@ IReadOnlyApplicationControlDataInterface::IReadOnlyApplicationControlDataInterfa
594 595
595IReadOnlyApplicationControlDataInterface::~IReadOnlyApplicationControlDataInterface() = default; 596IReadOnlyApplicationControlDataInterface::~IReadOnlyApplicationControlDataInterface() = default;
596 597
598void IReadOnlyApplicationControlDataInterface::GetApplicationControlData(
599 Kernel::HLERequestContext& ctx) {
600 enum class ApplicationControlSource : u8 {
601 CacheOnly,
602 Storage,
603 StorageOnly,
604 };
605
606 struct RequestParameters {
607 ApplicationControlSource source;
608 u64 application_id;
609 };
610 static_assert(sizeof(RequestParameters) == 0x10, "RequestParameters has incorrect size.");
611
612 IPC::RequestParser rp{ctx};
613 const auto parameters{rp.PopRaw<RequestParameters>()};
614 const auto nacp_data{system.GetARPManager().GetControlProperty(parameters.application_id)};
615 const auto result = nacp_data ? ResultSuccess : ResultUnknown;
616
617 if (nacp_data) {
618 ctx.WriteBuffer(nacp_data->data(), nacp_data->size());
619 }
620
621 IPC::ResponseBuilder rb{ctx, 2};
622 rb.Push(result);
623}
624
597NS::NS(const char* name, Core::System& system_) : ServiceFramework{system_, name} { 625NS::NS(const char* name, Core::System& system_) : ServiceFramework{system_, name} {
598 // clang-format off 626 // clang-format off
599 static const FunctionInfo functions[] = { 627 static const FunctionInfo functions[] = {
diff --git a/src/core/hle/service/ns/ns.h b/src/core/hle/service/ns/ns.h
index 4dc191518..9c18e935c 100644
--- a/src/core/hle/service/ns/ns.h
+++ b/src/core/hle/service/ns/ns.h
@@ -78,6 +78,9 @@ class IReadOnlyApplicationControlDataInterface final
78public: 78public:
79 explicit IReadOnlyApplicationControlDataInterface(Core::System& system_); 79 explicit IReadOnlyApplicationControlDataInterface(Core::System& system_);
80 ~IReadOnlyApplicationControlDataInterface() override; 80 ~IReadOnlyApplicationControlDataInterface() override;
81
82private:
83 void GetApplicationControlData(Kernel::HLERequestContext& ctx);
81}; 84};
82 85
83class NS final : public ServiceFramework<NS> { 86class NS final : public ServiceFramework<NS> {
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index 6411dbf43..b635e6ed1 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -311,7 +311,8 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
311 handle->address + 311 handle->address +
312 (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))}; 312 (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
313 313
314 gmmu->Map(virtual_address, cpu_address, size, use_big_pages); 314 gmmu->Map(virtual_address, cpu_address, size, static_cast<Tegra::PTEKind>(entry.kind),
315 use_big_pages);
315 } 316 }
316 } 317 }
317 318
@@ -350,7 +351,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
350 u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)}; 351 u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)};
351 VAddr cpu_address{mapping->ptr + params.buffer_offset}; 352 VAddr cpu_address{mapping->ptr + params.buffer_offset};
352 353
353 gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page); 354 gmmu->Map(gpu_address, cpu_address, params.mapping_size,
355 static_cast<Tegra::PTEKind>(params.kind), mapping->big_page);
354 356
355 return NvResult::Success; 357 return NvResult::Success;
356 } catch (const std::out_of_range&) { 358 } catch (const std::out_of_range&) {
@@ -389,7 +391,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
389 } 391 }
390 392
391 const bool use_big_pages = alloc->second.big_pages && big_page; 393 const bool use_big_pages = alloc->second.big_pages && big_page;
392 gmmu->Map(params.offset, cpu_address, size, use_big_pages); 394 gmmu->Map(params.offset, cpu_address, size, static_cast<Tegra::PTEKind>(params.kind),
395 use_big_pages);
393 396
394 auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true, 397 auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true,
395 use_big_pages, alloc->second.sparse)}; 398 use_big_pages, alloc->second.sparse)};
@@ -409,7 +412,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
409 return NvResult::InsufficientMemory; 412 return NvResult::InsufficientMemory;
410 } 413 }
411 414
412 gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), big_page); 415 gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size),
416 static_cast<Tegra::PTEKind>(params.kind), big_page);
413 417
414 auto mapping{ 418 auto mapping{
415 std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)}; 419 std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)};
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
index 5bee4a3d3..eee11fab8 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
@@ -12,7 +12,6 @@
12#include "common/scope_exit.h" 12#include "common/scope_exit.h"
13#include "core/core.h" 13#include "core/core.h"
14#include "core/hle/kernel/k_event.h" 14#include "core/hle/kernel/k_event.h"
15#include "core/hle/kernel/k_writable_event.h"
16#include "core/hle/service/nvdrv/core/container.h" 15#include "core/hle/service/nvdrv/core/container.h"
17#include "core/hle/service/nvdrv/core/syncpoint_manager.h" 16#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
18#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h" 17#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h"
@@ -206,7 +205,7 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
206 auto& event_ = events[slot]; 205 auto& event_ = events[slot];
207 if (event_.status.exchange(EventState::Signalling, std::memory_order_acq_rel) == 206 if (event_.status.exchange(EventState::Signalling, std::memory_order_acq_rel) ==
208 EventState::Waiting) { 207 EventState::Waiting) {
209 event_.kevent->GetWritableEvent().Signal(); 208 event_.kevent->Signal();
210 } 209 }
211 event_.status.store(EventState::Signalled, std::memory_order_release); 210 event_.status.store(EventState::Signalled, std::memory_order_release);
212 }); 211 });
@@ -306,7 +305,7 @@ NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::v
306 } 305 }
307 event.fails++; 306 event.fails++;
308 event.status.store(EventState::Cancelled, std::memory_order_release); 307 event.status.store(EventState::Cancelled, std::memory_order_release);
309 event.kevent->GetWritableEvent().Clear(); 308 event.kevent->Clear();
310 309
311 return NvResult::Success; 310 return NvResult::Success;
312} 311}
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index ddf273b5e..b60679021 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -128,7 +128,8 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
128 } 128 }
129 ASSERT(system.CurrentProcess() 129 ASSERT(system.CurrentProcess()
130 ->PageTable() 130 ->PageTable()
131 .LockForDeviceAddressSpace(handle_description->address, handle_description->size) 131 .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size,
132 Kernel::KMemoryPermission::None, true)
132 .IsSuccess()); 133 .IsSuccess());
133 std::memcpy(output.data(), &params, sizeof(params)); 134 std::memcpy(output.data(), &params, sizeof(params));
134 return result; 135 return result;
diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp
index 5e7b7468f..9d9924395 100644
--- a/src/core/hle/service/nvdrv/nvdrv.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv.cpp
@@ -8,7 +8,6 @@
8#include "core/core.h" 8#include "core/core.h"
9#include "core/hle/ipc_helpers.h" 9#include "core/hle/ipc_helpers.h"
10#include "core/hle/kernel/k_event.h" 10#include "core/hle/kernel/k_event.h"
11#include "core/hle/kernel/k_writable_event.h"
12#include "core/hle/service/nvdrv/core/container.h" 11#include "core/hle/service/nvdrv/core/container.h"
13#include "core/hle/service/nvdrv/devices/nvdevice.h" 12#include "core/hle/service/nvdrv/devices/nvdevice.h"
14#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" 13#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.h b/src/core/hle/service/nvdrv/nvdrv_interface.h
index cd58a4f35..5ac06ee30 100644
--- a/src/core/hle/service/nvdrv/nvdrv_interface.h
+++ b/src/core/hle/service/nvdrv/nvdrv_interface.h
@@ -7,10 +7,6 @@
7#include "core/hle/service/nvdrv/nvdrv.h" 7#include "core/hle/service/nvdrv/nvdrv.h"
8#include "core/hle/service/service.h" 8#include "core/hle/service/service.h"
9 9
10namespace Kernel {
11class KWritableEvent;
12}
13
14namespace Service::Nvidia { 10namespace Service::Nvidia {
15 11
16class NVDRV final : public ServiceFramework<NVDRV> { 12class NVDRV final : public ServiceFramework<NVDRV> {
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
index d4ab23a10..77ddbb6ef 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
@@ -11,7 +11,6 @@
11#include "core/hle/kernel/hle_ipc.h" 11#include "core/hle/kernel/hle_ipc.h"
12#include "core/hle/kernel/k_event.h" 12#include "core/hle/kernel/k_event.h"
13#include "core/hle/kernel/k_readable_event.h" 13#include "core/hle/kernel/k_readable_event.h"
14#include "core/hle/kernel/k_writable_event.h"
15#include "core/hle/kernel/kernel.h" 14#include "core/hle/kernel/kernel.h"
16#include "core/hle/service/kernel_helpers.h" 15#include "core/hle/service/kernel_helpers.h"
17#include "core/hle/service/nvdrv/core/nvmap.h" 16#include "core/hle/service/nvdrv/core/nvmap.h"
@@ -110,7 +109,7 @@ Status BufferQueueProducer::SetBufferCount(s32 buffer_count) {
110 109
111 core->override_max_buffer_count = buffer_count; 110 core->override_max_buffer_count = buffer_count;
112 core->SignalDequeueCondition(); 111 core->SignalDequeueCondition();
113 buffer_wait_event->GetWritableEvent().Signal(); 112 buffer_wait_event->Signal();
114 listener = core->consumer_listener; 113 listener = core->consumer_listener;
115 } 114 }
116 115
@@ -623,7 +622,7 @@ void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
623 slots[slot].fence = fence; 622 slots[slot].fence = fence;
624 623
625 core->SignalDequeueCondition(); 624 core->SignalDequeueCondition();
626 buffer_wait_event->GetWritableEvent().Signal(); 625 buffer_wait_event->Signal();
627} 626}
628 627
629Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) { 628Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
@@ -753,7 +752,7 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) {
753 core->connected_producer_listener = nullptr; 752 core->connected_producer_listener = nullptr;
754 core->connected_api = NativeWindowApi::NoConnectedApi; 753 core->connected_api = NativeWindowApi::NoConnectedApi;
755 core->SignalDequeueCondition(); 754 core->SignalDequeueCondition();
756 buffer_wait_event->GetWritableEvent().Signal(); 755 buffer_wait_event->Signal();
757 listener = core->consumer_listener; 756 listener = core->consumer_listener;
758 } else { 757 } else {
759 LOG_ERROR(Service_NVFlinger, "still connected to another api (cur = {} req = {})", 758 LOG_ERROR(Service_NVFlinger, "still connected to another api (cur = {} req = {})",
@@ -802,7 +801,7 @@ Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot,
802 } 801 }
803 802
804 core->SignalDequeueCondition(); 803 core->SignalDequeueCondition();
805 buffer_wait_event->GetWritableEvent().Signal(); 804 buffer_wait_event->Signal();
806 805
807 return Status::NoError; 806 return Status::NoError;
808} 807}
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.h b/src/core/hle/service/nvflinger/buffer_queue_producer.h
index 0ba03a568..7526bf8ec 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_producer.h
+++ b/src/core/hle/service/nvflinger/buffer_queue_producer.h
@@ -24,7 +24,6 @@ namespace Kernel {
24class KernelCore; 24class KernelCore;
25class KEvent; 25class KEvent;
26class KReadableEvent; 26class KReadableEvent;
27class KWritableEvent;
28} // namespace Kernel 27} // namespace Kernel
29 28
30namespace Service::KernelHelpers { 29namespace Service::KernelHelpers {
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h
index b62615de2..99509bc5b 100644
--- a/src/core/hle/service/nvflinger/nvflinger.h
+++ b/src/core/hle/service/nvflinger/nvflinger.h
@@ -25,7 +25,6 @@ struct EventType;
25 25
26namespace Kernel { 26namespace Kernel {
27class KReadableEvent; 27class KReadableEvent;
28class KWritableEvent;
29} // namespace Kernel 28} // namespace Kernel
30 29
31namespace Service::Nvidia { 30namespace Service::Nvidia {
diff --git a/src/core/hle/service/ptm/psm.cpp b/src/core/hle/service/ptm/psm.cpp
index 2c31e9485..1ac97fe31 100644
--- a/src/core/hle/service/ptm/psm.cpp
+++ b/src/core/hle/service/ptm/psm.cpp
@@ -37,19 +37,19 @@ public:
37 37
38 void SignalChargerTypeChanged() { 38 void SignalChargerTypeChanged() {
39 if (should_signal && should_signal_charger_type) { 39 if (should_signal && should_signal_charger_type) {
40 state_change_event->GetWritableEvent().Signal(); 40 state_change_event->Signal();
41 } 41 }
42 } 42 }
43 43
44 void SignalPowerSupplyChanged() { 44 void SignalPowerSupplyChanged() {
45 if (should_signal && should_signal_power_supply) { 45 if (should_signal && should_signal_power_supply) {
46 state_change_event->GetWritableEvent().Signal(); 46 state_change_event->Signal();
47 } 47 }
48 } 48 }
49 49
50 void SignalBatteryVoltageStateChanged() { 50 void SignalBatteryVoltageStateChanged() {
51 if (should_signal && should_signal_battery_voltage) { 51 if (should_signal && should_signal_battery_voltage) {
52 state_change_event->GetWritableEvent().Signal(); 52 state_change_event->Signal();
53 } 53 }
54 } 54 }
55 55
diff --git a/src/core/hle/service/ptm/ts.cpp b/src/core/hle/service/ptm/ts.cpp
index 65c3f135f..b1a0a5544 100644
--- a/src/core/hle/service/ptm/ts.cpp
+++ b/src/core/hle/service/ptm/ts.cpp
@@ -15,7 +15,7 @@ TS::TS(Core::System& system_) : ServiceFramework{system_, "ts"} {
15 {0, nullptr, "GetTemperatureRange"}, 15 {0, nullptr, "GetTemperatureRange"},
16 {1, &TS::GetTemperature, "GetTemperature"}, 16 {1, &TS::GetTemperature, "GetTemperature"},
17 {2, nullptr, "SetMeasurementMode"}, 17 {2, nullptr, "SetMeasurementMode"},
18 {3, nullptr, "GetTemperatureMilliC"}, 18 {3, &TS::GetTemperatureMilliC, "GetTemperatureMilliC"},
19 {4, nullptr, "OpenSession"}, 19 {4, nullptr, "OpenSession"},
20 }; 20 };
21 // clang-format on 21 // clang-format on
@@ -29,8 +29,6 @@ void TS::GetTemperature(Kernel::HLERequestContext& ctx) {
29 IPC::RequestParser rp{ctx}; 29 IPC::RequestParser rp{ctx};
30 const auto location{rp.PopEnum<Location>()}; 30 const auto location{rp.PopEnum<Location>()};
31 31
32 LOG_WARNING(Service_HID, "(STUBBED) called. location={}", location);
33
34 const s32 temperature = location == Location::Internal ? 35 : 20; 32 const s32 temperature = location == Location::Internal ? 35 : 20;
35 33
36 IPC::ResponseBuilder rb{ctx, 3}; 34 IPC::ResponseBuilder rb{ctx, 3};
@@ -38,4 +36,15 @@ void TS::GetTemperature(Kernel::HLERequestContext& ctx) {
38 rb.Push(temperature); 36 rb.Push(temperature);
39} 37}
40 38
39void TS::GetTemperatureMilliC(Kernel::HLERequestContext& ctx) {
40 IPC::RequestParser rp{ctx};
41 const auto location{rp.PopEnum<Location>()};
42
43 const s32 temperature = location == Location::Internal ? 35000 : 20000;
44
45 IPC::ResponseBuilder rb{ctx, 3};
46 rb.Push(ResultSuccess);
47 rb.Push(temperature);
48}
49
41} // namespace Service::PTM 50} // namespace Service::PTM
diff --git a/src/core/hle/service/ptm/ts.h b/src/core/hle/service/ptm/ts.h
index 39a734ef7..39d51847e 100644
--- a/src/core/hle/service/ptm/ts.h
+++ b/src/core/hle/service/ptm/ts.h
@@ -20,6 +20,7 @@ private:
20 }; 20 };
21 21
22 void GetTemperature(Kernel::HLERequestContext& ctx); 22 void GetTemperature(Kernel::HLERequestContext& ctx);
23 void GetTemperatureMilliC(Kernel::HLERequestContext& ctx);
23}; 24};
24 25
25} // namespace Service::PTM 26} // namespace Service::PTM
diff --git a/src/core/hle/service/set/set_sys.cpp b/src/core/hle/service/set/set_sys.cpp
index 2a0b812c1..d7cea6aac 100644
--- a/src/core/hle/service/set/set_sys.cpp
+++ b/src/core/hle/service/set/set_sys.cpp
@@ -101,6 +101,81 @@ void SET_SYS::SetColorSetId(Kernel::HLERequestContext& ctx) {
101 rb.Push(ResultSuccess); 101 rb.Push(ResultSuccess);
102} 102}
103 103
104// FIXME: implement support for the real system_settings.ini
105
106template <typename T>
107static std::vector<u8> ToBytes(const T& value) {
108 static_assert(std::is_trivially_copyable_v<T>);
109
110 const auto* begin = reinterpret_cast<const u8*>(&value);
111 const auto* end = begin + sizeof(T);
112
113 return std::vector<u8>(begin, end);
114}
115
116using Settings =
117 std::map<std::string, std::map<std::string, std::vector<u8>, std::less<>>, std::less<>>;
118
119static Settings GetSettings() {
120 Settings ret;
121
122 ret["hbloader"]["applet_heap_size"] = ToBytes(u64{0x0});
123 ret["hbloader"]["applet_heap_reservation_size"] = ToBytes(u64{0x8600000});
124
125 return ret;
126}
127
128void SET_SYS::GetSettingsItemValueSize(Kernel::HLERequestContext& ctx) {
129 LOG_DEBUG(Service_SET, "called");
130
131 // The category of the setting. This corresponds to the top-level keys of
132 // system_settings.ini.
133 const auto setting_category_buf{ctx.ReadBuffer(0)};
134 const std::string setting_category{setting_category_buf.begin(), setting_category_buf.end()};
135
136 // The name of the setting. This corresponds to the second-level keys of
137 // system_settings.ini.
138 const auto setting_name_buf{ctx.ReadBuffer(1)};
139 const std::string setting_name{setting_name_buf.begin(), setting_name_buf.end()};
140
141 auto settings{GetSettings()};
142 u64 response_size{0};
143
144 if (settings.contains(setting_category) && settings[setting_category].contains(setting_name)) {
145 response_size = settings[setting_category][setting_name].size();
146 }
147
148 IPC::ResponseBuilder rb{ctx, 4};
149 rb.Push(response_size == 0 ? ResultUnknown : ResultSuccess);
150 rb.Push(response_size);
151}
152
153void SET_SYS::GetSettingsItemValue(Kernel::HLERequestContext& ctx) {
154 LOG_DEBUG(Service_SET, "called");
155
156 // The category of the setting. This corresponds to the top-level keys of
157 // system_settings.ini.
158 const auto setting_category_buf{ctx.ReadBuffer(0)};
159 const std::string setting_category{setting_category_buf.begin(), setting_category_buf.end()};
160
161 // The name of the setting. This corresponds to the second-level keys of
162 // system_settings.ini.
163 const auto setting_name_buf{ctx.ReadBuffer(1)};
164 const std::string setting_name{setting_name_buf.begin(), setting_name_buf.end()};
165
166 auto settings{GetSettings()};
167 Result response{ResultUnknown};
168
169 if (settings.contains(setting_category) && settings[setting_category].contains(setting_name)) {
170 auto setting_value = settings[setting_category][setting_name];
171 ctx.WriteBuffer(setting_value.data(), setting_value.size());
172 response = ResultSuccess;
173 }
174
175 IPC::ResponseBuilder rb{ctx, 2};
176 rb.Push(response);
177}
178
104SET_SYS::SET_SYS(Core::System& system_) : ServiceFramework{system_, "set:sys"} { 179SET_SYS::SET_SYS(Core::System& system_) : ServiceFramework{system_, "set:sys"} {
105 // clang-format off 180 // clang-format off
106 static const FunctionInfo functions[] = { 181 static const FunctionInfo functions[] = {
@@ -138,8 +213,8 @@ SET_SYS::SET_SYS(Core::System& system_) : ServiceFramework{system_, "set:sys"} {
138 {32, nullptr, "SetAccountNotificationSettings"}, 213 {32, nullptr, "SetAccountNotificationSettings"},
139 {35, nullptr, "GetVibrationMasterVolume"}, 214 {35, nullptr, "GetVibrationMasterVolume"},
140 {36, nullptr, "SetVibrationMasterVolume"}, 215 {36, nullptr, "SetVibrationMasterVolume"},
141 {37, nullptr, "GetSettingsItemValueSize"}, 216 {37, &SET_SYS::GetSettingsItemValueSize, "GetSettingsItemValueSize"},
142 {38, nullptr, "GetSettingsItemValue"}, 217 {38, &SET_SYS::GetSettingsItemValue, "GetSettingsItemValue"},
143 {39, nullptr, "GetTvSettings"}, 218 {39, nullptr, "GetTvSettings"},
144 {40, nullptr, "SetTvSettings"}, 219 {40, nullptr, "SetTvSettings"},
145 {41, nullptr, "GetEdid"}, 220 {41, nullptr, "GetEdid"},
diff --git a/src/core/hle/service/set/set_sys.h b/src/core/hle/service/set/set_sys.h
index ac97772b7..258ef8c57 100644
--- a/src/core/hle/service/set/set_sys.h
+++ b/src/core/hle/service/set/set_sys.h
@@ -23,6 +23,8 @@ private:
23 BasicBlack = 1, 23 BasicBlack = 1,
24 }; 24 };
25 25
26 void GetSettingsItemValueSize(Kernel::HLERequestContext& ctx);
27 void GetSettingsItemValue(Kernel::HLERequestContext& ctx);
26 void GetFirmwareVersion(Kernel::HLERequestContext& ctx); 28 void GetFirmwareVersion(Kernel::HLERequestContext& ctx);
27 void GetFirmwareVersion2(Kernel::HLERequestContext& ctx); 29 void GetFirmwareVersion2(Kernel::HLERequestContext& ctx);
28 void GetColorSetId(Kernel::HLERequestContext& ctx); 30 void GetColorSetId(Kernel::HLERequestContext& ctx);
diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp
index 246c94623..48e70f93c 100644
--- a/src/core/hle/service/sm/sm.cpp
+++ b/src/core/hle/service/sm/sm.cpp
@@ -156,7 +156,8 @@ ResultVal<Kernel::KClientSession*> SM::GetServiceImpl(Kernel::HLERequestContext&
156 156
157 // Create a new session. 157 // Create a new session.
158 Kernel::KClientSession* session{}; 158 Kernel::KClientSession* session{};
159 if (const auto result = port->GetClientPort().CreateSession(std::addressof(session)); 159 if (const auto result = port->GetClientPort().CreateSession(
160 std::addressof(session), std::make_shared<Kernel::SessionRequestManager>(kernel));
160 result.IsError()) { 161 result.IsError()) {
161 LOG_ERROR(Service_SM, "called service={} -> error 0x{:08X}", name, result.raw); 162 LOG_ERROR(Service_SM, "called service={} -> error 0x{:08X}", name, result.raw);
162 return result; 163 return result;
diff --git a/src/core/hle/service/sm/sm_controller.cpp b/src/core/hle/service/sm/sm_controller.cpp
index 2a4bd64ab..273f79568 100644
--- a/src/core/hle/service/sm/sm_controller.cpp
+++ b/src/core/hle/service/sm/sm_controller.cpp
@@ -15,9 +15,10 @@
15namespace Service::SM { 15namespace Service::SM {
16 16
17void Controller::ConvertCurrentObjectToDomain(Kernel::HLERequestContext& ctx) { 17void Controller::ConvertCurrentObjectToDomain(Kernel::HLERequestContext& ctx) {
18 ASSERT_MSG(!ctx.Session()->IsDomain(), "Session is already a domain"); 18 ASSERT_MSG(!ctx.Session()->GetSessionRequestManager()->IsDomain(),
19 "Session is already a domain");
19 LOG_DEBUG(Service, "called, server_session={}", ctx.Session()->GetId()); 20 LOG_DEBUG(Service, "called, server_session={}", ctx.Session()->GetId());
20 ctx.Session()->ConvertToDomain(); 21 ctx.Session()->GetSessionRequestManager()->ConvertToDomainOnRequestEnd();
21 22
22 IPC::ResponseBuilder rb{ctx, 3}; 23 IPC::ResponseBuilder rb{ctx, 3};
23 rb.Push(ResultSuccess); 24 rb.Push(ResultSuccess);
diff --git a/src/core/hle/service/time/system_clock_context_update_callback.cpp b/src/core/hle/service/time/system_clock_context_update_callback.cpp
index a649bed3a..cafc04ee7 100644
--- a/src/core/hle/service/time/system_clock_context_update_callback.cpp
+++ b/src/core/hle/service/time/system_clock_context_update_callback.cpp
@@ -1,7 +1,7 @@
1// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include "core/hle/kernel/k_writable_event.h" 4#include "core/hle/kernel/k_event.h"
5#include "core/hle/service/time/errors.h" 5#include "core/hle/service/time/errors.h"
6#include "core/hle/service/time/system_clock_context_update_callback.h" 6#include "core/hle/service/time/system_clock_context_update_callback.h"
7 7
@@ -20,13 +20,13 @@ bool SystemClockContextUpdateCallback::NeedUpdate(const SystemClockContext& valu
20} 20}
21 21
22void SystemClockContextUpdateCallback::RegisterOperationEvent( 22void SystemClockContextUpdateCallback::RegisterOperationEvent(
23 std::shared_ptr<Kernel::KWritableEvent>&& writable_event) { 23 std::shared_ptr<Kernel::KEvent>&& event) {
24 operation_event_list.emplace_back(std::move(writable_event)); 24 operation_event_list.emplace_back(std::move(event));
25} 25}
26 26
27void SystemClockContextUpdateCallback::BroadcastOperationEvent() { 27void SystemClockContextUpdateCallback::BroadcastOperationEvent() {
28 for (const auto& writable_event : operation_event_list) { 28 for (const auto& event : operation_event_list) {
29 writable_event->Signal(); 29 event->Signal();
30 } 30 }
31} 31}
32 32
diff --git a/src/core/hle/service/time/system_clock_context_update_callback.h b/src/core/hle/service/time/system_clock_context_update_callback.h
index 9c6caf196..bf657acd9 100644
--- a/src/core/hle/service/time/system_clock_context_update_callback.h
+++ b/src/core/hle/service/time/system_clock_context_update_callback.h
@@ -9,7 +9,7 @@
9#include "core/hle/service/time/clock_types.h" 9#include "core/hle/service/time/clock_types.h"
10 10
11namespace Kernel { 11namespace Kernel {
12class KWritableEvent; 12class KEvent;
13} 13}
14 14
15namespace Service::Time::Clock { 15namespace Service::Time::Clock {
@@ -24,7 +24,7 @@ public:
24 24
25 bool NeedUpdate(const SystemClockContext& value) const; 25 bool NeedUpdate(const SystemClockContext& value) const;
26 26
27 void RegisterOperationEvent(std::shared_ptr<Kernel::KWritableEvent>&& writable_event); 27 void RegisterOperationEvent(std::shared_ptr<Kernel::KEvent>&& event);
28 28
29 void BroadcastOperationEvent(); 29 void BroadcastOperationEvent();
30 30
@@ -37,7 +37,7 @@ protected:
37 37
38private: 38private:
39 bool has_context{}; 39 bool has_context{};
40 std::vector<std::shared_ptr<Kernel::KWritableEvent>> operation_event_list; 40 std::vector<std::shared_ptr<Kernel::KEvent>> operation_event_list;
41}; 41};
42 42
43} // namespace Service::Time::Clock 43} // namespace Service::Time::Clock
diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp
index 288aafaaf..8ef74f1f0 100644
--- a/src/core/hle/service/vi/display/vi_display.cpp
+++ b/src/core/hle/service/vi/display/vi_display.cpp
@@ -10,7 +10,6 @@
10#include "core/core.h" 10#include "core/core.h"
11#include "core/hle/kernel/k_event.h" 11#include "core/hle/kernel/k_event.h"
12#include "core/hle/kernel/k_readable_event.h" 12#include "core/hle/kernel/k_readable_event.h"
13#include "core/hle/kernel/k_writable_event.h"
14#include "core/hle/service/kernel_helpers.h" 13#include "core/hle/service/kernel_helpers.h"
15#include "core/hle/service/nvdrv/core/container.h" 14#include "core/hle/service/nvdrv/core/container.h"
16#include "core/hle/service/nvflinger/buffer_item_consumer.h" 15#include "core/hle/service/nvflinger/buffer_item_consumer.h"
@@ -74,7 +73,7 @@ Kernel::KReadableEvent* Display::GetVSyncEventUnchecked() {
74} 73}
75 74
76void Display::SignalVSyncEvent() { 75void Display::SignalVSyncEvent() {
77 vsync_event->GetWritableEvent().Signal(); 76 vsync_event->Signal();
78} 77}
79 78
80void Display::CreateLayer(u64 layer_id, u32 binder_id, 79void Display::CreateLayer(u64 layer_id, u32 binder_id,
diff --git a/src/core/hle/service/vi/vi_results.h b/src/core/hle/service/vi/vi_results.h
index a46c247d2..22bac799f 100644
--- a/src/core/hle/service/vi/vi_results.h
+++ b/src/core/hle/service/vi/vi_results.h
@@ -1,6 +1,8 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#pragma once
5
4#include "core/hle/result.h" 6#include "core/hle/result.h"
5 7
6namespace Service::VI { 8namespace Service::VI {
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 2ac792566..9637cb5b1 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -65,7 +65,7 @@ struct Memory::Impl {
65 return {}; 65 return {};
66 } 66 }
67 67
68 return system.DeviceMemory().GetPointer(paddr) + vaddr; 68 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
69 } 69 }
70 70
71 [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { 71 [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
@@ -75,7 +75,7 @@ struct Memory::Impl {
75 return {}; 75 return {};
76 } 76 }
77 77
78 return system.DeviceMemory().GetPointer(paddr) + vaddr; 78 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
79 } 79 }
80 80
81 u8 Read8(const VAddr addr) { 81 u8 Read8(const VAddr addr) {
@@ -499,7 +499,7 @@ struct Memory::Impl {
499 } else { 499 } else {
500 while (base != end) { 500 while (base != end) {
501 page_table.pointers[base].Store( 501 page_table.pointers[base].Store(
502 system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type); 502 system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type);
503 page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS); 503 page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
504 504
505 ASSERT_MSG(page_table.pointers[base].Pointer(), 505 ASSERT_MSG(page_table.pointers[base].Pointer(),
diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp
index 7c432a63c..284b2ae66 100644
--- a/src/tests/core/core_timing.cpp
+++ b/src/tests/core/core_timing.cpp
@@ -40,9 +40,6 @@ struct ScopeInit final {
40 core_timing.SetMulticore(true); 40 core_timing.SetMulticore(true);
41 core_timing.Initialize([]() {}); 41 core_timing.Initialize([]() {});
42 } 42 }
43 ~ScopeInit() {
44 core_timing.Shutdown();
45 }
46 43
47 Core::Timing::CoreTiming core_timing; 44 Core::Timing::CoreTiming core_timing;
48}; 45};
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 40e6d1ec4..cb8b46edf 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -82,6 +82,7 @@ add_library(video_core STATIC
82 gpu_thread.h 82 gpu_thread.h
83 memory_manager.cpp 83 memory_manager.cpp
84 memory_manager.h 84 memory_manager.h
85 pte_kind.h
85 query_cache.h 86 query_cache.h
86 rasterizer_accelerated.cpp 87 rasterizer_accelerated.cpp
87 rasterizer_accelerated.h 88 rasterizer_accelerated.h
diff --git a/src/video_core/dirty_flags.cpp b/src/video_core/dirty_flags.cpp
index 1039e036f..c2ecc12f5 100644
--- a/src/video_core/dirty_flags.cpp
+++ b/src/video_core/dirty_flags.cpp
@@ -61,7 +61,7 @@ void SetupDirtyRenderTargets(Maxwell3D::DirtyState::Tables& tables) {
61} 61}
62 62
63void SetupDirtyShaders(Maxwell3D::DirtyState::Tables& tables) { 63void SetupDirtyShaders(Maxwell3D::DirtyState::Tables& tables) {
64 FillBlock(tables[0], OFF(pipelines), NUM(pipelines) * Maxwell3D::Regs::MaxShaderProgram, 64 FillBlock(tables[0], OFF(pipelines), NUM(pipelines[0]) * Maxwell3D::Regs::MaxShaderProgram,
65 Shaders); 65 Shaders);
66} 66}
67} // Anonymous namespace 67} // Anonymous namespace
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index fdf470913..b1a22b76c 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -74,15 +74,15 @@ void Maxwell3D::InitializeRegisterDefaults() {
74 regs.stencil_front_op.zfail = Regs::StencilOp::Op::Keep_D3D; 74 regs.stencil_front_op.zfail = Regs::StencilOp::Op::Keep_D3D;
75 regs.stencil_front_op.zpass = Regs::StencilOp::Op::Keep_D3D; 75 regs.stencil_front_op.zpass = Regs::StencilOp::Op::Keep_D3D;
76 regs.stencil_front_op.func = Regs::ComparisonOp::Always_GL; 76 regs.stencil_front_op.func = Regs::ComparisonOp::Always_GL;
77 regs.stencil_front_func.func_mask = 0xFFFFFFFF; 77 regs.stencil_front_func_mask = 0xFFFFFFFF;
78 regs.stencil_front_func.mask = 0xFFFFFFFF; 78 regs.stencil_front_mask = 0xFFFFFFFF;
79 regs.stencil_two_side_enable = 1; 79 regs.stencil_two_side_enable = 1;
80 regs.stencil_back_op.fail = Regs::StencilOp::Op::Keep_D3D; 80 regs.stencil_back_op.fail = Regs::StencilOp::Op::Keep_D3D;
81 regs.stencil_back_op.zfail = Regs::StencilOp::Op::Keep_D3D; 81 regs.stencil_back_op.zfail = Regs::StencilOp::Op::Keep_D3D;
82 regs.stencil_back_op.zpass = Regs::StencilOp::Op::Keep_D3D; 82 regs.stencil_back_op.zpass = Regs::StencilOp::Op::Keep_D3D;
83 regs.stencil_back_op.func = Regs::ComparisonOp::Always_GL; 83 regs.stencil_back_op.func = Regs::ComparisonOp::Always_GL;
84 regs.stencil_back_func.func_mask = 0xFFFFFFFF; 84 regs.stencil_back_func_mask = 0xFFFFFFFF;
85 regs.stencil_back_func.mask = 0xFFFFFFFF; 85 regs.stencil_back_mask = 0xFFFFFFFF;
86 86
87 regs.depth_test_func = Regs::ComparisonOp::Always_GL; 87 regs.depth_test_func = Regs::ComparisonOp::Always_GL;
88 regs.gl_front_face = Regs::FrontFace::CounterClockWise; 88 regs.gl_front_face = Regs::FrontFace::CounterClockWise;
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index efe1073b0..75e3b868d 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -390,7 +390,7 @@ public:
390 FractionalEven = 2, 390 FractionalEven = 2,
391 }; 391 };
392 392
393 enum class OutputPrimitves : u32 { 393 enum class OutputPrimitives : u32 {
394 Points = 0, 394 Points = 0,
395 Lines = 1, 395 Lines = 1,
396 Triangles_CW = 2, 396 Triangles_CW = 2,
@@ -401,7 +401,7 @@ public:
401 union { 401 union {
402 BitField<0, 2, DomainType> domain_type; 402 BitField<0, 2, DomainType> domain_type;
403 BitField<4, 2, Spacing> spacing; 403 BitField<4, 2, Spacing> spacing;
404 BitField<8, 2, OutputPrimitves> output_primitives; 404 BitField<8, 2, OutputPrimitives> output_primitives;
405 }; 405 };
406 } params; 406 } params;
407 407
@@ -1795,12 +1795,6 @@ public:
1795 ComparisonOp func; 1795 ComparisonOp func;
1796 }; 1796 };
1797 1797
1798 struct StencilFunc {
1799 s32 ref;
1800 u32 func_mask;
1801 u32 mask;
1802 };
1803
1804 struct PsSaturate { 1798 struct PsSaturate {
1805 // Opposite of DepthMode 1799 // Opposite of DepthMode
1806 enum class Depth : u32 { 1800 enum class Depth : u32 {
@@ -2737,7 +2731,9 @@ public:
2737 u32 post_z_pixel_imask; ///< 0x0F1C 2731 u32 post_z_pixel_imask; ///< 0x0F1C
2738 INSERT_PADDING_BYTES_NOINIT(0x20); 2732 INSERT_PADDING_BYTES_NOINIT(0x20);
2739 ConstantColorRendering const_color_rendering; ///< 0x0F40 2733 ConstantColorRendering const_color_rendering; ///< 0x0F40
2740 StencilFunc stencil_back_func; ///< 0x0F54 2734 s32 stencil_back_ref; ///< 0x0F54
2735 u32 stencil_back_mask; ///< 0x0F58
2736 u32 stencil_back_func_mask; ///< 0x0F5C
2741 INSERT_PADDING_BYTES_NOINIT(0x24); 2737 INSERT_PADDING_BYTES_NOINIT(0x24);
2742 VertexStreamSubstitute vertex_stream_substitute; ///< 0x0F84 2738 VertexStreamSubstitute vertex_stream_substitute; ///< 0x0F84
2743 u32 line_mode_clip_generated_edge_do_not_draw; ///< 0x0F8C 2739 u32 line_mode_clip_generated_edge_do_not_draw; ///< 0x0F8C
@@ -2855,7 +2851,9 @@ public:
2855 Blend blend; ///< 0x133C 2851 Blend blend; ///< 0x133C
2856 u32 stencil_enable; ///< 0x1380 2852 u32 stencil_enable; ///< 0x1380
2857 StencilOp stencil_front_op; ///< 0x1384 2853 StencilOp stencil_front_op; ///< 0x1384
2858 StencilFunc stencil_front_func; ///< 0x1394 2854 s32 stencil_front_ref; ///< 0x1394
2855 s32 stencil_front_func_mask; ///< 0x1398
2856 s32 stencil_front_mask; ///< 0x139C
2859 INSERT_PADDING_BYTES_NOINIT(0x4); 2857 INSERT_PADDING_BYTES_NOINIT(0x4);
2860 u32 draw_auto_start_byte_count; ///< 0x13A4 2858 u32 draw_auto_start_byte_count; ///< 0x13A4
2861 PsSaturate frag_color_clamp; ///< 0x13A8 2859 PsSaturate frag_color_clamp; ///< 0x13A8
@@ -3311,7 +3309,9 @@ ASSERT_REG_POSITION(vpc_perf, 0x0F14);
3311ASSERT_REG_POSITION(pm_local_trigger, 0x0F18); 3309ASSERT_REG_POSITION(pm_local_trigger, 0x0F18);
3312ASSERT_REG_POSITION(post_z_pixel_imask, 0x0F1C); 3310ASSERT_REG_POSITION(post_z_pixel_imask, 0x0F1C);
3313ASSERT_REG_POSITION(const_color_rendering, 0x0F40); 3311ASSERT_REG_POSITION(const_color_rendering, 0x0F40);
3314ASSERT_REG_POSITION(stencil_back_func, 0x0F54); 3312ASSERT_REG_POSITION(stencil_back_ref, 0x0F54);
3313ASSERT_REG_POSITION(stencil_back_mask, 0x0F58);
3314ASSERT_REG_POSITION(stencil_back_func_mask, 0x0F5C);
3315ASSERT_REG_POSITION(vertex_stream_substitute, 0x0F84); 3315ASSERT_REG_POSITION(vertex_stream_substitute, 0x0F84);
3316ASSERT_REG_POSITION(line_mode_clip_generated_edge_do_not_draw, 0x0F8C); 3316ASSERT_REG_POSITION(line_mode_clip_generated_edge_do_not_draw, 0x0F8C);
3317ASSERT_REG_POSITION(color_mask_common, 0x0F90); 3317ASSERT_REG_POSITION(color_mask_common, 0x0F90);
@@ -3416,7 +3416,9 @@ ASSERT_REG_POSITION(invalidate_texture_data_cache_lines, 0x1338);
3416ASSERT_REG_POSITION(blend, 0x133C); 3416ASSERT_REG_POSITION(blend, 0x133C);
3417ASSERT_REG_POSITION(stencil_enable, 0x1380); 3417ASSERT_REG_POSITION(stencil_enable, 0x1380);
3418ASSERT_REG_POSITION(stencil_front_op, 0x1384); 3418ASSERT_REG_POSITION(stencil_front_op, 0x1384);
3419ASSERT_REG_POSITION(stencil_front_func, 0x1394); 3419ASSERT_REG_POSITION(stencil_front_ref, 0x1394);
3420ASSERT_REG_POSITION(stencil_front_func_mask, 0x1398);
3421ASSERT_REG_POSITION(stencil_front_mask, 0x139C);
3420ASSERT_REG_POSITION(draw_auto_start_byte_count, 0x13A4); 3422ASSERT_REG_POSITION(draw_auto_start_byte_count, 0x13A4);
3421ASSERT_REG_POSITION(frag_color_clamp, 0x13A8); 3423ASSERT_REG_POSITION(frag_color_clamp, 0x13A8);
3422ASSERT_REG_POSITION(window_origin, 0x13AC); 3424ASSERT_REG_POSITION(window_origin, 0x13AC);
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 3909d36c1..4eb7a100d 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -56,66 +56,85 @@ void MaxwellDMA::Launch() {
56 ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE); 56 ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE);
57 ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED); 57 ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED);
58 58
59 const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH; 59 if (launch.multi_line_enable) {
60 const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH; 60 const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
61 61 const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH;
62 if (!is_src_pitch && !is_dst_pitch) { 62
63 // If both the source and the destination are in block layout, assert. 63 if (!is_src_pitch && !is_dst_pitch) {
64 UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented"); 64 // If both the source and the destination are in block layout, assert.
65 return; 65 UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented");
66 } 66 return;
67 }
67 68
68 if (is_src_pitch && is_dst_pitch) { 69 if (is_src_pitch && is_dst_pitch) {
69 CopyPitchToPitch(); 70 for (u32 line = 0; line < regs.line_count; ++line) {
71 const GPUVAddr source_line =
72 regs.offset_in + static_cast<size_t>(line) * regs.pitch_in;
73 const GPUVAddr dest_line =
74 regs.offset_out + static_cast<size_t>(line) * regs.pitch_out;
75 memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in);
76 }
77 } else {
78 if (!is_src_pitch && is_dst_pitch) {
79 CopyBlockLinearToPitch();
80 } else {
81 CopyPitchToBlockLinear();
82 }
83 }
70 } else { 84 } else {
71 ASSERT(launch.multi_line_enable == 1); 85 // TODO: allow multisized components.
72 86 auto& accelerate = rasterizer->AccessAccelerateDMA();
73 if (!is_src_pitch && is_dst_pitch) { 87 const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A;
74 CopyBlockLinearToPitch(); 88 if (regs.launch_dma.remap_enable != 0 && is_const_a_dst) {
89 ASSERT(regs.remap_const.component_size_minus_one == 3);
90 accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value);
91 std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value);
92 memory_manager.WriteBlockUnsafe(regs.offset_out,
93 reinterpret_cast<u8*>(tmp_buffer.data()),
94 regs.line_length_in * sizeof(u32));
75 } else { 95 } else {
76 CopyPitchToBlockLinear(); 96 auto convert_linear_2_blocklinear_addr = [](u64 address) {
97 return (address & ~0x1f0ULL) | ((address & 0x40) >> 2) | ((address & 0x10) << 1) |
98 ((address & 0x180) >> 1) | ((address & 0x20) << 3);
99 };
100 auto src_kind = memory_manager.GetPageKind(regs.offset_in);
101 auto dst_kind = memory_manager.GetPageKind(regs.offset_out);
102 const bool is_src_pitch = IsPitchKind(static_cast<PTEKind>(src_kind));
103 const bool is_dst_pitch = IsPitchKind(static_cast<PTEKind>(dst_kind));
104 if (!is_src_pitch && is_dst_pitch) {
105 std::vector<u8> tmp_buffer(regs.line_length_in);
106 std::vector<u8> dst_buffer(regs.line_length_in);
107 memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
108 regs.line_length_in);
109 for (u32 offset = 0; offset < regs.line_length_in; ++offset) {
110 dst_buffer[offset] =
111 tmp_buffer[convert_linear_2_blocklinear_addr(regs.offset_in + offset) -
112 regs.offset_in];
113 }
114 memory_manager.WriteBlock(regs.offset_out, dst_buffer.data(), regs.line_length_in);
115 } else if (is_src_pitch && !is_dst_pitch) {
116 std::vector<u8> tmp_buffer(regs.line_length_in);
117 std::vector<u8> dst_buffer(regs.line_length_in);
118 memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
119 regs.line_length_in);
120 for (u32 offset = 0; offset < regs.line_length_in; ++offset) {
121 dst_buffer[convert_linear_2_blocklinear_addr(regs.offset_out + offset) -
122 regs.offset_out] = tmp_buffer[offset];
123 }
124 memory_manager.WriteBlock(regs.offset_out, dst_buffer.data(), regs.line_length_in);
125 } else {
126 if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) {
127 std::vector<u8> tmp_buffer(regs.line_length_in);
128 memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
129 regs.line_length_in);
130 memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(),
131 regs.line_length_in);
132 }
133 }
77 } 134 }
78 } 135 }
79 ReleaseSemaphore();
80}
81 136
82void MaxwellDMA::CopyPitchToPitch() { 137 ReleaseSemaphore();
83 // When `multi_line_enable` bit is enabled we copy a 2D image of dimensions
84 // (line_length_in, line_count).
85 // Otherwise the copy is performed as if we were copying a 1D buffer of length line_length_in.
86 const bool remap_enabled = regs.launch_dma.remap_enable != 0;
87 if (regs.launch_dma.multi_line_enable) {
88 UNIMPLEMENTED_IF(remap_enabled);
89
90 // Perform a line-by-line copy.
91 // We're going to take a subrect of size (line_length_in, line_count) from the source
92 // rectangle. There is no need to manually flush/invalidate the regions because CopyBlock
93 // does that for us.
94 for (u32 line = 0; line < regs.line_count; ++line) {
95 const GPUVAddr source_line = regs.offset_in + static_cast<size_t>(line) * regs.pitch_in;
96 const GPUVAddr dest_line = regs.offset_out + static_cast<size_t>(line) * regs.pitch_out;
97 memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in);
98 }
99 return;
100 }
101 // TODO: allow multisized components.
102 auto& accelerate = rasterizer->AccessAccelerateDMA();
103 const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A;
104 const bool is_buffer_clear = remap_enabled && is_const_a_dst;
105 if (is_buffer_clear) {
106 ASSERT(regs.remap_const.component_size_minus_one == 3);
107 accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value);
108 std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value);
109 memory_manager.WriteBlockUnsafe(regs.offset_out, reinterpret_cast<u8*>(tmp_buffer.data()),
110 regs.line_length_in * sizeof(u32));
111 return;
112 }
113 UNIMPLEMENTED_IF(remap_enabled);
114 if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) {
115 std::vector<u8> tmp_buffer(regs.line_length_in);
116 memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(), regs.line_length_in);
117 memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(), regs.line_length_in);
118 }
119} 138}
120 139
121void MaxwellDMA::CopyBlockLinearToPitch() { 140void MaxwellDMA::CopyBlockLinearToPitch() {
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index bc48320ce..953e34adc 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -219,8 +219,6 @@ private:
219 /// registers. 219 /// registers.
220 void Launch(); 220 void Launch();
221 221
222 void CopyPitchToPitch();
223
224 void CopyBlockLinearToPitch(); 222 void CopyBlockLinearToPitch();
225 223
226 void CopyPitchToBlockLinear(); 224 void CopyPitchToBlockLinear();
diff --git a/src/video_core/host1x/syncpoint_manager.cpp b/src/video_core/host1x/syncpoint_manager.cpp
index 326e8355a..a44fc83d3 100644
--- a/src/video_core/host1x/syncpoint_manager.cpp
+++ b/src/video_core/host1x/syncpoint_manager.cpp
@@ -36,7 +36,17 @@ SyncpointManager::ActionHandle SyncpointManager::RegisterAction(
36void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage, 36void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage,
37 ActionHandle& handle) { 37 ActionHandle& handle) {
38 std::unique_lock lk(guard); 38 std::unique_lock lk(guard);
39 action_storage.erase(handle); 39
40 // We want to ensure the iterator still exists prior to erasing it
41 // Otherwise, if an invalid iterator was passed in then it could lead to UB
42 // It is important to avoid UB in that case since the deregister isn't called from a locked
43 // context
44 for (auto it = action_storage.begin(); it != action_storage.end(); it++) {
45 if (it == handle) {
46 action_storage.erase(it);
47 return;
48 }
49 }
40} 50}
41 51
42void SyncpointManager::DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle) { 52void SyncpointManager::DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle) {
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index cca401c74..d07b21bd6 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -41,7 +41,11 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64
41 big_entries.resize(big_page_table_size / 32, 0); 41 big_entries.resize(big_page_table_size / 32, 0);
42 big_page_table_cpu.resize(big_page_table_size); 42 big_page_table_cpu.resize(big_page_table_size);
43 big_page_continous.resize(big_page_table_size / continous_bits, 0); 43 big_page_continous.resize(big_page_table_size / continous_bits, 0);
44 std::array<PTEKind, 32> kind_valus;
45 kind_valus.fill(PTEKind::INVALID);
46 big_kinds.resize(big_page_table_size / 32, kind_valus);
44 entries.resize(page_table_size / 32, 0); 47 entries.resize(page_table_size / 32, 0);
48 kinds.resize(big_page_table_size / 32, kind_valus);
45} 49}
46 50
47MemoryManager::~MemoryManager() = default; 51MemoryManager::~MemoryManager() = default;
@@ -78,6 +82,41 @@ void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) {
78 } 82 }
79} 83}
80 84
85PTEKind MemoryManager::GetPageKind(GPUVAddr gpu_addr) const {
86 auto entry = GetEntry<true>(gpu_addr);
87 if (entry == EntryType::Mapped || entry == EntryType::Reserved) [[likely]] {
88 return GetKind<true>(gpu_addr);
89 } else {
90 return GetKind<false>(gpu_addr);
91 }
92}
93
94template <bool is_big_page>
95PTEKind MemoryManager::GetKind(size_t position) const {
96 if constexpr (is_big_page) {
97 position = position >> big_page_bits;
98 const size_t sub_index = position % 32;
99 return big_kinds[position / 32][sub_index];
100 } else {
101 position = position >> page_bits;
102 const size_t sub_index = position % 32;
103 return kinds[position / 32][sub_index];
104 }
105}
106
107template <bool is_big_page>
108void MemoryManager::SetKind(size_t position, PTEKind kind) {
109 if constexpr (is_big_page) {
110 position = position >> big_page_bits;
111 const size_t sub_index = position % 32;
112 big_kinds[position / 32][sub_index] = kind;
113 } else {
114 position = position >> page_bits;
115 const size_t sub_index = position % 32;
116 kinds[position / 32][sub_index] = kind;
117 }
118}
119
81inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const { 120inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const {
82 const u64 entry_mask = big_page_continous[big_page_index / continous_bits]; 121 const u64 entry_mask = big_page_continous[big_page_index / continous_bits];
83 const size_t sub_index = big_page_index % continous_bits; 122 const size_t sub_index = big_page_index % continous_bits;
@@ -92,8 +131,8 @@ inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value
92} 131}
93 132
94template <MemoryManager::EntryType entry_type> 133template <MemoryManager::EntryType entry_type>
95GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, 134GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
96 size_t size) { 135 PTEKind kind) {
97 u64 remaining_size{size}; 136 u64 remaining_size{size};
98 if constexpr (entry_type == EntryType::Mapped) { 137 if constexpr (entry_type == EntryType::Mapped) {
99 page_table.ReserveRange(gpu_addr, size); 138 page_table.ReserveRange(gpu_addr, size);
@@ -102,6 +141,7 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp
102 const GPUVAddr current_gpu_addr = gpu_addr + offset; 141 const GPUVAddr current_gpu_addr = gpu_addr + offset;
103 [[maybe_unused]] const auto current_entry_type = GetEntry<false>(current_gpu_addr); 142 [[maybe_unused]] const auto current_entry_type = GetEntry<false>(current_gpu_addr);
104 SetEntry<false>(current_gpu_addr, entry_type); 143 SetEntry<false>(current_gpu_addr, entry_type);
144 SetKind<false>(current_gpu_addr, kind);
105 if (current_entry_type != entry_type) { 145 if (current_entry_type != entry_type) {
106 rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size); 146 rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size);
107 } 147 }
@@ -118,12 +158,13 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp
118 158
119template <MemoryManager::EntryType entry_type> 159template <MemoryManager::EntryType entry_type>
120GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, 160GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
121 size_t size) { 161 size_t size, PTEKind kind) {
122 u64 remaining_size{size}; 162 u64 remaining_size{size};
123 for (u64 offset{}; offset < size; offset += big_page_size) { 163 for (u64 offset{}; offset < size; offset += big_page_size) {
124 const GPUVAddr current_gpu_addr = gpu_addr + offset; 164 const GPUVAddr current_gpu_addr = gpu_addr + offset;
125 [[maybe_unused]] const auto current_entry_type = GetEntry<true>(current_gpu_addr); 165 [[maybe_unused]] const auto current_entry_type = GetEntry<true>(current_gpu_addr);
126 SetEntry<true>(current_gpu_addr, entry_type); 166 SetEntry<true>(current_gpu_addr, entry_type);
167 SetKind<true>(current_gpu_addr, kind);
127 if (current_entry_type != entry_type) { 168 if (current_entry_type != entry_type) {
128 rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, big_page_size); 169 rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, big_page_size);
129 } 170 }
@@ -159,19 +200,19 @@ void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_)
159 rasterizer = rasterizer_; 200 rasterizer = rasterizer_;
160} 201}
161 202
162GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, 203GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, PTEKind kind,
163 bool is_big_pages) { 204 bool is_big_pages) {
164 if (is_big_pages) [[likely]] { 205 if (is_big_pages) [[likely]] {
165 return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size); 206 return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind);
166 } 207 }
167 return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size); 208 return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind);
168} 209}
169 210
170GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) { 211GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) {
171 if (is_big_pages) [[likely]] { 212 if (is_big_pages) [[likely]] {
172 return BigPageTableOp<EntryType::Reserved>(gpu_addr, 0, size); 213 return BigPageTableOp<EntryType::Reserved>(gpu_addr, 0, size, PTEKind::INVALID);
173 } 214 }
174 return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size); 215 return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size, PTEKind::INVALID);
175} 216}
176 217
177void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { 218void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
@@ -188,8 +229,8 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
188 rasterizer->UnmapMemory(*cpu_addr, map_size); 229 rasterizer->UnmapMemory(*cpu_addr, map_size);
189 } 230 }
190 231
191 BigPageTableOp<EntryType::Free>(gpu_addr, 0, size); 232 BigPageTableOp<EntryType::Free>(gpu_addr, 0, size, PTEKind::INVALID);
192 PageTableOp<EntryType::Free>(gpu_addr, 0, size); 233 PageTableOp<EntryType::Free>(gpu_addr, 0, size, PTEKind::INVALID);
193} 234}
194 235
195std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { 236std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index f992e29f3..ab4bc9ec6 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -11,6 +11,7 @@
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "common/multi_level_page_table.h" 12#include "common/multi_level_page_table.h"
13#include "common/virtual_buffer.h" 13#include "common/virtual_buffer.h"
14#include "video_core/pte_kind.h"
14 15
15namespace VideoCore { 16namespace VideoCore {
16class RasterizerInterface; 17class RasterizerInterface;
@@ -98,7 +99,8 @@ public:
98 std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr, 99 std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr,
99 std::size_t size) const; 100 std::size_t size) const;
100 101
101 GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, bool is_big_pages = true); 102 GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size,
103 PTEKind kind = PTEKind::INVALID, bool is_big_pages = true);
102 GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true); 104 GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true);
103 void Unmap(GPUVAddr gpu_addr, std::size_t size); 105 void Unmap(GPUVAddr gpu_addr, std::size_t size);
104 106
@@ -114,6 +116,8 @@ public:
114 return gpu_addr < address_space_size; 116 return gpu_addr < address_space_size;
115 } 117 }
116 118
119 PTEKind GetPageKind(GPUVAddr gpu_addr) const;
120
117private: 121private:
118 template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped> 122 template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
119 inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped, 123 inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped,
@@ -166,10 +170,12 @@ private:
166 std::vector<u64> big_entries; 170 std::vector<u64> big_entries;
167 171
168 template <EntryType entry_type> 172 template <EntryType entry_type>
169 GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size); 173 GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
174 PTEKind kind);
170 175
171 template <EntryType entry_type> 176 template <EntryType entry_type>
172 GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size); 177 GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
178 PTEKind kind);
173 179
174 template <bool is_big_page> 180 template <bool is_big_page>
175 inline EntryType GetEntry(size_t position) const; 181 inline EntryType GetEntry(size_t position) const;
@@ -177,6 +183,15 @@ private:
177 template <bool is_big_page> 183 template <bool is_big_page>
178 inline void SetEntry(size_t position, EntryType entry); 184 inline void SetEntry(size_t position, EntryType entry);
179 185
186 std::vector<std::array<PTEKind, 32>> kinds;
187 std::vector<std::array<PTEKind, 32>> big_kinds;
188
189 template <bool is_big_page>
190 inline PTEKind GetKind(size_t position) const;
191
192 template <bool is_big_page>
193 inline void SetKind(size_t position, PTEKind kind);
194
180 Common::MultiLevelPageTable<u32> page_table; 195 Common::MultiLevelPageTable<u32> page_table;
181 Common::VirtualBuffer<u32> big_page_table_cpu; 196 Common::VirtualBuffer<u32> big_page_table_cpu;
182 197
diff --git a/src/video_core/pte_kind.h b/src/video_core/pte_kind.h
new file mode 100644
index 000000000..591d7214b
--- /dev/null
+++ b/src/video_core/pte_kind.h
@@ -0,0 +1,264 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/common_types.h"
7
8namespace Tegra {
9
10// https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/volta/gv100/dev_mmu.ref.txt
11enum class PTEKind : u8 {
12 INVALID = 0xff,
13 PITCH = 0x00,
14 Z16 = 0x01,
15 Z16_2C = 0x02,
16 Z16_MS2_2C = 0x03,
17 Z16_MS4_2C = 0x04,
18 Z16_MS8_2C = 0x05,
19 Z16_MS16_2C = 0x06,
20 Z16_2Z = 0x07,
21 Z16_MS2_2Z = 0x08,
22 Z16_MS4_2Z = 0x09,
23 Z16_MS8_2Z = 0x0a,
24 Z16_MS16_2Z = 0x0b,
25 Z16_2CZ = 0x36,
26 Z16_MS2_2CZ = 0x37,
27 Z16_MS4_2CZ = 0x38,
28 Z16_MS8_2CZ = 0x39,
29 Z16_MS16_2CZ = 0x5f,
30 Z16_4CZ = 0x0c,
31 Z16_MS2_4CZ = 0x0d,
32 Z16_MS4_4CZ = 0x0e,
33 Z16_MS8_4CZ = 0x0f,
34 Z16_MS16_4CZ = 0x10,
35 S8Z24 = 0x11,
36 S8Z24_1Z = 0x12,
37 S8Z24_MS2_1Z = 0x13,
38 S8Z24_MS4_1Z = 0x14,
39 S8Z24_MS8_1Z = 0x15,
40 S8Z24_MS16_1Z = 0x16,
41 S8Z24_2CZ = 0x17,
42 S8Z24_MS2_2CZ = 0x18,
43 S8Z24_MS4_2CZ = 0x19,
44 S8Z24_MS8_2CZ = 0x1a,
45 S8Z24_MS16_2CZ = 0x1b,
46 S8Z24_2CS = 0x1c,
47 S8Z24_MS2_2CS = 0x1d,
48 S8Z24_MS4_2CS = 0x1e,
49 S8Z24_MS8_2CS = 0x1f,
50 S8Z24_MS16_2CS = 0x20,
51 S8Z24_4CSZV = 0x21,
52 S8Z24_MS2_4CSZV = 0x22,
53 S8Z24_MS4_4CSZV = 0x23,
54 S8Z24_MS8_4CSZV = 0x24,
55 S8Z24_MS16_4CSZV = 0x25,
56 V8Z24_MS4_VC12 = 0x26,
57 V8Z24_MS4_VC4 = 0x27,
58 V8Z24_MS8_VC8 = 0x28,
59 V8Z24_MS8_VC24 = 0x29,
60 V8Z24_MS4_VC12_1ZV = 0x2e,
61 V8Z24_MS4_VC4_1ZV = 0x2f,
62 V8Z24_MS8_VC8_1ZV = 0x30,
63 V8Z24_MS8_VC24_1ZV = 0x31,
64 V8Z24_MS4_VC12_2CS = 0x32,
65 V8Z24_MS4_VC4_2CS = 0x33,
66 V8Z24_MS8_VC8_2CS = 0x34,
67 V8Z24_MS8_VC24_2CS = 0x35,
68 V8Z24_MS4_VC12_2CZV = 0x3a,
69 V8Z24_MS4_VC4_2CZV = 0x3b,
70 V8Z24_MS8_VC8_2CZV = 0x3c,
71 V8Z24_MS8_VC24_2CZV = 0x3d,
72 V8Z24_MS4_VC12_2ZV = 0x3e,
73 V8Z24_MS4_VC4_2ZV = 0x3f,
74 V8Z24_MS8_VC8_2ZV = 0x40,
75 V8Z24_MS8_VC24_2ZV = 0x41,
76 V8Z24_MS4_VC12_4CSZV = 0x42,
77 V8Z24_MS4_VC4_4CSZV = 0x43,
78 V8Z24_MS8_VC8_4CSZV = 0x44,
79 V8Z24_MS8_VC24_4CSZV = 0x45,
80 Z24S8 = 0x46,
81 Z24S8_1Z = 0x47,
82 Z24S8_MS2_1Z = 0x48,
83 Z24S8_MS4_1Z = 0x49,
84 Z24S8_MS8_1Z = 0x4a,
85 Z24S8_MS16_1Z = 0x4b,
86 Z24S8_2CS = 0x4c,
87 Z24S8_MS2_2CS = 0x4d,
88 Z24S8_MS4_2CS = 0x4e,
89 Z24S8_MS8_2CS = 0x4f,
90 Z24S8_MS16_2CS = 0x50,
91 Z24S8_2CZ = 0x51,
92 Z24S8_MS2_2CZ = 0x52,
93 Z24S8_MS4_2CZ = 0x53,
94 Z24S8_MS8_2CZ = 0x54,
95 Z24S8_MS16_2CZ = 0x55,
96 Z24S8_4CSZV = 0x56,
97 Z24S8_MS2_4CSZV = 0x57,
98 Z24S8_MS4_4CSZV = 0x58,
99 Z24S8_MS8_4CSZV = 0x59,
100 Z24S8_MS16_4CSZV = 0x5a,
101 Z24V8_MS4_VC12 = 0x5b,
102 Z24V8_MS4_VC4 = 0x5c,
103 Z24V8_MS8_VC8 = 0x5d,
104 Z24V8_MS8_VC24 = 0x5e,
105 YUV_B8C1_2Y = 0x60,
106 YUV_B8C2_2Y = 0x61,
107 YUV_B10C1_2Y = 0x62,
108 YUV_B10C2_2Y = 0x6b,
109 YUV_B12C1_2Y = 0x6c,
110 YUV_B12C2_2Y = 0x6d,
111 Z24V8_MS4_VC12_1ZV = 0x63,
112 Z24V8_MS4_VC4_1ZV = 0x64,
113 Z24V8_MS8_VC8_1ZV = 0x65,
114 Z24V8_MS8_VC24_1ZV = 0x66,
115 Z24V8_MS4_VC12_2CS = 0x67,
116 Z24V8_MS4_VC4_2CS = 0x68,
117 Z24V8_MS8_VC8_2CS = 0x69,
118 Z24V8_MS8_VC24_2CS = 0x6a,
119 Z24V8_MS4_VC12_2CZV = 0x6f,
120 Z24V8_MS4_VC4_2CZV = 0x70,
121 Z24V8_MS8_VC8_2CZV = 0x71,
122 Z24V8_MS8_VC24_2CZV = 0x72,
123 Z24V8_MS4_VC12_2ZV = 0x73,
124 Z24V8_MS4_VC4_2ZV = 0x74,
125 Z24V8_MS8_VC8_2ZV = 0x75,
126 Z24V8_MS8_VC24_2ZV = 0x76,
127 Z24V8_MS4_VC12_4CSZV = 0x77,
128 Z24V8_MS4_VC4_4CSZV = 0x78,
129 Z24V8_MS8_VC8_4CSZV = 0x79,
130 Z24V8_MS8_VC24_4CSZV = 0x7a,
131 ZF32 = 0x7b,
132 ZF32_1Z = 0x7c,
133 ZF32_MS2_1Z = 0x7d,
134 ZF32_MS4_1Z = 0x7e,
135 ZF32_MS8_1Z = 0x7f,
136 ZF32_MS16_1Z = 0x80,
137 ZF32_2CS = 0x81,
138 ZF32_MS2_2CS = 0x82,
139 ZF32_MS4_2CS = 0x83,
140 ZF32_MS8_2CS = 0x84,
141 ZF32_MS16_2CS = 0x85,
142 ZF32_2CZ = 0x86,
143 ZF32_MS2_2CZ = 0x87,
144 ZF32_MS4_2CZ = 0x88,
145 ZF32_MS8_2CZ = 0x89,
146 ZF32_MS16_2CZ = 0x8a,
147 X8Z24_X16V8S8_MS4_VC12 = 0x8b,
148 X8Z24_X16V8S8_MS4_VC4 = 0x8c,
149 X8Z24_X16V8S8_MS8_VC8 = 0x8d,
150 X8Z24_X16V8S8_MS8_VC24 = 0x8e,
151 X8Z24_X16V8S8_MS4_VC12_1CS = 0x8f,
152 X8Z24_X16V8S8_MS4_VC4_1CS = 0x90,
153 X8Z24_X16V8S8_MS8_VC8_1CS = 0x91,
154 X8Z24_X16V8S8_MS8_VC24_1CS = 0x92,
155 X8Z24_X16V8S8_MS4_VC12_1ZV = 0x97,
156 X8Z24_X16V8S8_MS4_VC4_1ZV = 0x98,
157 X8Z24_X16V8S8_MS8_VC8_1ZV = 0x99,
158 X8Z24_X16V8S8_MS8_VC24_1ZV = 0x9a,
159 X8Z24_X16V8S8_MS4_VC12_1CZV = 0x9b,
160 X8Z24_X16V8S8_MS4_VC4_1CZV = 0x9c,
161 X8Z24_X16V8S8_MS8_VC8_1CZV = 0x9d,
162 X8Z24_X16V8S8_MS8_VC24_1CZV = 0x9e,
163 X8Z24_X16V8S8_MS4_VC12_2CS = 0x9f,
164 X8Z24_X16V8S8_MS4_VC4_2CS = 0xa0,
165 X8Z24_X16V8S8_MS8_VC8_2CS = 0xa1,
166 X8Z24_X16V8S8_MS8_VC24_2CS = 0xa2,
167 X8Z24_X16V8S8_MS4_VC12_2CSZV = 0xa3,
168 X8Z24_X16V8S8_MS4_VC4_2CSZV = 0xa4,
169 X8Z24_X16V8S8_MS8_VC8_2CSZV = 0xa5,
170 X8Z24_X16V8S8_MS8_VC24_2CSZV = 0xa6,
171 ZF32_X16V8S8_MS4_VC12 = 0xa7,
172 ZF32_X16V8S8_MS4_VC4 = 0xa8,
173 ZF32_X16V8S8_MS8_VC8 = 0xa9,
174 ZF32_X16V8S8_MS8_VC24 = 0xaa,
175 ZF32_X16V8S8_MS4_VC12_1CS = 0xab,
176 ZF32_X16V8S8_MS4_VC4_1CS = 0xac,
177 ZF32_X16V8S8_MS8_VC8_1CS = 0xad,
178 ZF32_X16V8S8_MS8_VC24_1CS = 0xae,
179 ZF32_X16V8S8_MS4_VC12_1ZV = 0xb3,
180 ZF32_X16V8S8_MS4_VC4_1ZV = 0xb4,
181 ZF32_X16V8S8_MS8_VC8_1ZV = 0xb5,
182 ZF32_X16V8S8_MS8_VC24_1ZV = 0xb6,
183 ZF32_X16V8S8_MS4_VC12_1CZV = 0xb7,
184 ZF32_X16V8S8_MS4_VC4_1CZV = 0xb8,
185 ZF32_X16V8S8_MS8_VC8_1CZV = 0xb9,
186 ZF32_X16V8S8_MS8_VC24_1CZV = 0xba,
187 ZF32_X16V8S8_MS4_VC12_2CS = 0xbb,
188 ZF32_X16V8S8_MS4_VC4_2CS = 0xbc,
189 ZF32_X16V8S8_MS8_VC8_2CS = 0xbd,
190 ZF32_X16V8S8_MS8_VC24_2CS = 0xbe,
191 ZF32_X16V8S8_MS4_VC12_2CSZV = 0xbf,
192 ZF32_X16V8S8_MS4_VC4_2CSZV = 0xc0,
193 ZF32_X16V8S8_MS8_VC8_2CSZV = 0xc1,
194 ZF32_X16V8S8_MS8_VC24_2CSZV = 0xc2,
195 ZF32_X24S8 = 0xc3,
196 ZF32_X24S8_1CS = 0xc4,
197 ZF32_X24S8_MS2_1CS = 0xc5,
198 ZF32_X24S8_MS4_1CS = 0xc6,
199 ZF32_X24S8_MS8_1CS = 0xc7,
200 ZF32_X24S8_MS16_1CS = 0xc8,
201 ZF32_X24S8_2CSZV = 0xce,
202 ZF32_X24S8_MS2_2CSZV = 0xcf,
203 ZF32_X24S8_MS4_2CSZV = 0xd0,
204 ZF32_X24S8_MS8_2CSZV = 0xd1,
205 ZF32_X24S8_MS16_2CSZV = 0xd2,
206 ZF32_X24S8_2CS = 0xd3,
207 ZF32_X24S8_MS2_2CS = 0xd4,
208 ZF32_X24S8_MS4_2CS = 0xd5,
209 ZF32_X24S8_MS8_2CS = 0xd6,
210 ZF32_X24S8_MS16_2CS = 0xd7,
211 S8 = 0x2a,
212 S8_2S = 0x2b,
213 GENERIC_16BX2 = 0xfe,
214 C32_2C = 0xd8,
215 C32_2CBR = 0xd9,
216 C32_2CBA = 0xda,
217 C32_2CRA = 0xdb,
218 C32_2BRA = 0xdc,
219 C32_MS2_2C = 0xdd,
220 C32_MS2_2CBR = 0xde,
221 C32_MS2_4CBRA = 0xcc,
222 C32_MS4_2C = 0xdf,
223 C32_MS4_2CBR = 0xe0,
224 C32_MS4_2CBA = 0xe1,
225 C32_MS4_2CRA = 0xe2,
226 C32_MS4_2BRA = 0xe3,
227 C32_MS4_4CBRA = 0x2c,
228 C32_MS8_MS16_2C = 0xe4,
229 C32_MS8_MS16_2CRA = 0xe5,
230 C64_2C = 0xe6,
231 C64_2CBR = 0xe7,
232 C64_2CBA = 0xe8,
233 C64_2CRA = 0xe9,
234 C64_2BRA = 0xea,
235 C64_MS2_2C = 0xeb,
236 C64_MS2_2CBR = 0xec,
237 C64_MS2_4CBRA = 0xcd,
238 C64_MS4_2C = 0xed,
239 C64_MS4_2CBR = 0xee,
240 C64_MS4_2CBA = 0xef,
241 C64_MS4_2CRA = 0xf0,
242 C64_MS4_2BRA = 0xf1,
243 C64_MS4_4CBRA = 0x2d,
244 C64_MS8_MS16_2C = 0xf2,
245 C64_MS8_MS16_2CRA = 0xf3,
246 C128_2C = 0xf4,
247 C128_2CR = 0xf5,
248 C128_MS2_2C = 0xf6,
249 C128_MS2_2CR = 0xf7,
250 C128_MS4_2C = 0xf8,
251 C128_MS4_2CR = 0xf9,
252 C128_MS8_MS16_2C = 0xfa,
253 C128_MS8_MS16_2CR = 0xfb,
254 X8C24 = 0xfc,
255 PITCH_NO_SWIZZLE = 0xfd,
256 SMSKED_MESSAGE = 0xca,
257 SMHOST_MESSAGE = 0xcb,
258};
259
260constexpr bool IsPitchKind(PTEKind kind) {
261 return kind == PTEKind::PITCH || kind == PTEKind::PITCH_NO_SWIZZLE;
262}
263
264} // namespace Tegra
diff --git a/src/video_core/renderer_base.cpp b/src/video_core/renderer_base.cpp
index 45791aa75..e8761a747 100644
--- a/src/video_core/renderer_base.cpp
+++ b/src/video_core/renderer_base.cpp
@@ -1,6 +1,8 @@
1// SPDX-FileCopyrightText: 2015 Citra Emulator Project 1// SPDX-FileCopyrightText: 2015 Citra Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include <thread>
5
4#include "common/logging/log.h" 6#include "common/logging/log.h"
5#include "core/frontend/emu_window.h" 7#include "core/frontend/emu_window.h"
6#include "video_core/renderer_base.h" 8#include "video_core/renderer_base.h"
@@ -35,8 +37,12 @@ void RendererBase::RequestScreenshot(void* data, std::function<void(bool)> callb
35 LOG_ERROR(Render, "A screenshot is already requested or in progress, ignoring the request"); 37 LOG_ERROR(Render, "A screenshot is already requested or in progress, ignoring the request");
36 return; 38 return;
37 } 39 }
40 auto async_callback{[callback = std::move(callback)](bool invert_y) {
41 std::thread t{callback, invert_y};
42 t.detach();
43 }};
38 renderer_settings.screenshot_bits = data; 44 renderer_settings.screenshot_bits = data;
39 renderer_settings.screenshot_complete_callback = std::move(callback); 45 renderer_settings.screenshot_complete_callback = async_callback;
40 renderer_settings.screenshot_framebuffer_layout = layout; 46 renderer_settings.screenshot_framebuffer_layout = layout;
41 renderer_settings.screenshot_requested = true; 47 renderer_settings.screenshot_requested = true;
42} 48}
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index cce00cea8..e5c09a969 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -658,8 +658,13 @@ void RasterizerOpenGL::SyncDepthClamp() {
658 } 658 }
659 flags[Dirty::DepthClampEnabled] = false; 659 flags[Dirty::DepthClampEnabled] = false;
660 660
661 oglEnable(GL_DEPTH_CLAMP, maxwell3d->regs.viewport_clip_control.geometry_clip != 661 bool depth_clamp_disabled{maxwell3d->regs.viewport_clip_control.geometry_clip ==
662 Maxwell::ViewportClipControl::GeometryClip::Passthrough); 662 Maxwell::ViewportClipControl::GeometryClip::Passthrough ||
663 maxwell3d->regs.viewport_clip_control.geometry_clip ==
664 Maxwell::ViewportClipControl::GeometryClip::FrustumXYZ ||
665 maxwell3d->regs.viewport_clip_control.geometry_clip ==
666 Maxwell::ViewportClipControl::GeometryClip::FrustumZ};
667 oglEnable(GL_DEPTH_CLAMP, !depth_clamp_disabled);
663} 668}
664 669
665void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) { 670void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) {
@@ -746,19 +751,19 @@ void RasterizerOpenGL::SyncStencilTestState() {
746 oglEnable(GL_STENCIL_TEST, regs.stencil_enable); 751 oglEnable(GL_STENCIL_TEST, regs.stencil_enable);
747 752
748 glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_op.func), 753 glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_op.func),
749 regs.stencil_front_func.ref, regs.stencil_front_func.func_mask); 754 regs.stencil_front_ref, regs.stencil_front_func_mask);
750 glStencilOpSeparate(GL_FRONT, MaxwellToGL::StencilOp(regs.stencil_front_op.fail), 755 glStencilOpSeparate(GL_FRONT, MaxwellToGL::StencilOp(regs.stencil_front_op.fail),
751 MaxwellToGL::StencilOp(regs.stencil_front_op.zfail), 756 MaxwellToGL::StencilOp(regs.stencil_front_op.zfail),
752 MaxwellToGL::StencilOp(regs.stencil_front_op.zpass)); 757 MaxwellToGL::StencilOp(regs.stencil_front_op.zpass));
753 glStencilMaskSeparate(GL_FRONT, regs.stencil_front_func.mask); 758 glStencilMaskSeparate(GL_FRONT, regs.stencil_front_mask);
754 759
755 if (regs.stencil_two_side_enable) { 760 if (regs.stencil_two_side_enable) {
756 glStencilFuncSeparate(GL_BACK, MaxwellToGL::ComparisonOp(regs.stencil_back_op.func), 761 glStencilFuncSeparate(GL_BACK, MaxwellToGL::ComparisonOp(regs.stencil_back_op.func),
757 regs.stencil_back_func.ref, regs.stencil_back_func.mask); 762 regs.stencil_back_ref, regs.stencil_back_mask);
758 glStencilOpSeparate(GL_BACK, MaxwellToGL::StencilOp(regs.stencil_back_op.fail), 763 glStencilOpSeparate(GL_BACK, MaxwellToGL::StencilOp(regs.stencil_back_op.fail),
759 MaxwellToGL::StencilOp(regs.stencil_back_op.zfail), 764 MaxwellToGL::StencilOp(regs.stencil_back_op.zfail),
760 MaxwellToGL::StencilOp(regs.stencil_back_op.zpass)); 765 MaxwellToGL::StencilOp(regs.stencil_back_op.zpass));
761 glStencilMaskSeparate(GL_BACK, regs.stencil_back_func.mask); 766 glStencilMaskSeparate(GL_BACK, regs.stencil_back_mask);
762 } else { 767 } else {
763 glStencilFuncSeparate(GL_BACK, GL_ALWAYS, 0, 0xFFFFFFFF); 768 glStencilFuncSeparate(GL_BACK, GL_ALWAYS, 0, 0xFFFFFFFF);
764 glStencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_KEEP); 769 glStencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_KEEP);
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 6bdb0b645..609f0a772 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -317,8 +317,8 @@ GraphicsPipeline* ShaderCache::CurrentGraphicsPipeline() {
317 graphics_key.tessellation_primitive.Assign(regs.tessellation.params.domain_type.Value()); 317 graphics_key.tessellation_primitive.Assign(regs.tessellation.params.domain_type.Value());
318 graphics_key.tessellation_spacing.Assign(regs.tessellation.params.spacing.Value()); 318 graphics_key.tessellation_spacing.Assign(regs.tessellation.params.spacing.Value());
319 graphics_key.tessellation_clockwise.Assign( 319 graphics_key.tessellation_clockwise.Assign(
320 regs.tessellation.params.output_primitives.Value() != 320 regs.tessellation.params.output_primitives.Value() ==
321 Maxwell::Tessellation::OutputPrimitves::Triangles_CCW); 321 Maxwell::Tessellation::OutputPrimitives::Triangles_CW);
322 graphics_key.xfb_enabled.Assign(regs.transform_feedback_enabled != 0 ? 1 : 0); 322 graphics_key.xfb_enabled.Assign(regs.transform_feedback_enabled != 0 ? 1 : 0);
323 if (graphics_key.xfb_enabled) { 323 if (graphics_key.xfb_enabled) {
324 SetXfbState(graphics_key.xfb_state, regs); 324 SetXfbState(graphics_key.xfb_state, regs);
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.cpp b/src/video_core/renderer_opengl/gl_state_tracker.cpp
index e2c709aac..a359f96f1 100644
--- a/src/video_core/renderer_opengl/gl_state_tracker.cpp
+++ b/src/video_core/renderer_opengl/gl_state_tracker.cpp
@@ -100,14 +100,12 @@ void SetupDirtyDepthTest(Tables& tables) {
100 100
101void SetupDirtyStencilTest(Tables& tables) { 101void SetupDirtyStencilTest(Tables& tables) {
102 static constexpr std::array offsets = { 102 static constexpr std::array offsets = {
103 OFF(stencil_enable), OFF(stencil_front_op.func), 103 OFF(stencil_enable), OFF(stencil_front_op.func), OFF(stencil_front_ref),
104 OFF(stencil_front_func.ref), OFF(stencil_front_func.func_mask), 104 OFF(stencil_front_func_mask), OFF(stencil_front_op.fail), OFF(stencil_front_op.zfail),
105 OFF(stencil_front_op.fail), OFF(stencil_front_op.zfail), 105 OFF(stencil_front_op.zpass), OFF(stencil_front_mask), OFF(stencil_two_side_enable),
106 OFF(stencil_front_op.zpass), OFF(stencil_front_func.mask), 106 OFF(stencil_back_op.func), OFF(stencil_back_ref), OFF(stencil_back_func_mask),
107 OFF(stencil_two_side_enable), OFF(stencil_back_op.func), 107 OFF(stencil_back_op.fail), OFF(stencil_back_op.zfail), OFF(stencil_back_op.zpass),
108 OFF(stencil_back_func.ref), OFF(stencil_back_func.func_mask), 108 OFF(stencil_back_mask)};
109 OFF(stencil_back_op.fail), OFF(stencil_back_op.zfail),
110 OFF(stencil_back_op.zpass), OFF(stencil_back_func.mask)};
111 for (const auto offset : offsets) { 109 for (const auto offset : offsets) {
112 tables[0][offset] = StencilTest; 110 tables[0][offset] = StencilTest;
113 } 111 }
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index eb7c22fd5..f85ed8e5b 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -63,14 +63,18 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
63 primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0); 63 primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0);
64 depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0); 64 depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0);
65 depth_clamp_disabled.Assign(regs.viewport_clip_control.geometry_clip == 65 depth_clamp_disabled.Assign(regs.viewport_clip_control.geometry_clip ==
66 Maxwell::ViewportClipControl::GeometryClip::Passthrough); 66 Maxwell::ViewportClipControl::GeometryClip::Passthrough ||
67 regs.viewport_clip_control.geometry_clip ==
68 Maxwell::ViewportClipControl::GeometryClip::FrustumXYZ ||
69 regs.viewport_clip_control.geometry_clip ==
70 Maxwell::ViewportClipControl::GeometryClip::FrustumZ);
67 ndc_minus_one_to_one.Assign(regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1 : 0); 71 ndc_minus_one_to_one.Assign(regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1 : 0);
68 polygon_mode.Assign(PackPolygonMode(regs.polygon_mode_front)); 72 polygon_mode.Assign(PackPolygonMode(regs.polygon_mode_front));
69 patch_control_points_minus_one.Assign(regs.patch_vertices - 1); 73 patch_control_points_minus_one.Assign(regs.patch_vertices - 1);
70 tessellation_primitive.Assign(static_cast<u32>(regs.tessellation.params.domain_type.Value())); 74 tessellation_primitive.Assign(static_cast<u32>(regs.tessellation.params.domain_type.Value()));
71 tessellation_spacing.Assign(static_cast<u32>(regs.tessellation.params.spacing.Value())); 75 tessellation_spacing.Assign(static_cast<u32>(regs.tessellation.params.spacing.Value()));
72 tessellation_clockwise.Assign(regs.tessellation.params.output_primitives.Value() != 76 tessellation_clockwise.Assign(regs.tessellation.params.output_primitives.Value() ==
73 Maxwell::Tessellation::OutputPrimitves::Triangles_CCW); 77 Maxwell::Tessellation::OutputPrimitives::Triangles_CW);
74 logic_op_enable.Assign(regs.logic_op.enable != 0 ? 1 : 0); 78 logic_op_enable.Assign(regs.logic_op.enable != 0 ? 1 : 0);
75 logic_op.Assign(PackLogicOp(regs.logic_op.op)); 79 logic_op.Assign(PackLogicOp(regs.logic_op.op));
76 topology.Assign(regs.draw.topology); 80 topology.Assign(regs.draw.topology);
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 7cb02631c..4b15c0f85 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -59,10 +59,11 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
59 std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) { 59 std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) {
60 return query_pool == *pool; 60 return query_pool == *pool;
61 }); 61 });
62 ASSERT(it != std::end(pools));
63 62
64 const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it); 63 if (it != std::end(pools)) {
65 usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; 64 const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
65 usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
66 }
66} 67}
67 68
68QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, 69QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 892cd94a3..47dfb45a1 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -772,11 +772,10 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs)
772 if (regs.stencil_two_side_enable) { 772 if (regs.stencil_two_side_enable) {
773 // Separate values per face 773 // Separate values per face
774 scheduler.Record( 774 scheduler.Record(
775 [front_ref = regs.stencil_front_func.ref, 775 [front_ref = regs.stencil_front_ref, front_write_mask = regs.stencil_front_mask,
776 front_write_mask = regs.stencil_front_func.mask, 776 front_test_mask = regs.stencil_front_func_mask, back_ref = regs.stencil_back_ref,
777 front_test_mask = regs.stencil_front_func.func_mask, 777 back_write_mask = regs.stencil_back_mask,
778 back_ref = regs.stencil_back_func.ref, back_write_mask = regs.stencil_back_func.mask, 778 back_test_mask = regs.stencil_back_func_mask](vk::CommandBuffer cmdbuf) {
779 back_test_mask = regs.stencil_back_func.func_mask](vk::CommandBuffer cmdbuf) {
780 // Front face 779 // Front face
781 cmdbuf.SetStencilReference(VK_STENCIL_FACE_FRONT_BIT, front_ref); 780 cmdbuf.SetStencilReference(VK_STENCIL_FACE_FRONT_BIT, front_ref);
782 cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_FRONT_BIT, front_write_mask); 781 cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_FRONT_BIT, front_write_mask);
@@ -789,9 +788,8 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs)
789 }); 788 });
790 } else { 789 } else {
791 // Front face defines both faces 790 // Front face defines both faces
792 scheduler.Record([ref = regs.stencil_front_func.ref, 791 scheduler.Record([ref = regs.stencil_front_ref, write_mask = regs.stencil_front_mask,
793 write_mask = regs.stencil_front_func.mask, 792 test_mask = regs.stencil_front_func_mask](vk::CommandBuffer cmdbuf) {
794 test_mask = regs.stencil_front_func.func_mask](vk::CommandBuffer cmdbuf) {
795 cmdbuf.SetStencilReference(VK_STENCIL_FACE_FRONT_AND_BACK, ref); 793 cmdbuf.SetStencilReference(VK_STENCIL_FACE_FRONT_AND_BACK, ref);
796 cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_FRONT_AND_BACK, write_mask); 794 cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_FRONT_AND_BACK, write_mask);
797 cmdbuf.SetStencilCompareMask(VK_STENCIL_FACE_FRONT_AND_BACK, test_mask); 795 cmdbuf.SetStencilCompareMask(VK_STENCIL_FACE_FRONT_AND_BACK, test_mask);
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 7fb256953..06f68d09a 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -26,39 +26,20 @@ using namespace Common::Literals;
26constexpr VkDeviceSize MAX_ALIGNMENT = 256; 26constexpr VkDeviceSize MAX_ALIGNMENT = 256;
27// Maximum size to put elements in the stream buffer 27// Maximum size to put elements in the stream buffer
28constexpr VkDeviceSize MAX_STREAM_BUFFER_REQUEST_SIZE = 8_MiB; 28constexpr VkDeviceSize MAX_STREAM_BUFFER_REQUEST_SIZE = 8_MiB;
29// Stream buffer size in bytes
30constexpr VkDeviceSize STREAM_BUFFER_SIZE = 128_MiB;
31constexpr VkDeviceSize REGION_SIZE = STREAM_BUFFER_SIZE / StagingBufferPool::NUM_SYNCS;
29 32
30constexpr VkMemoryPropertyFlags HOST_FLAGS = 33constexpr VkMemoryPropertyFlags HOST_FLAGS =
31 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; 34 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
32constexpr VkMemoryPropertyFlags STREAM_FLAGS = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | HOST_FLAGS; 35constexpr VkMemoryPropertyFlags STREAM_FLAGS = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | HOST_FLAGS;
33 36
34static bool IsStreamHeap(VkMemoryHeap heap, size_t staging_buffer_size) noexcept { 37bool IsStreamHeap(VkMemoryHeap heap) noexcept {
35 return staging_buffer_size < (heap.size * 2) / 3; 38 return STREAM_BUFFER_SIZE < (heap.size * 2) / 3;
36}
37
38static bool HasLargeDeviceLocalHostVisibleMemory(const VkPhysicalDeviceMemoryProperties& props) {
39 const auto flags{VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT};
40
41 for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) {
42 const auto& memory_type{props.memoryTypes[type_index]};
43
44 if ((memory_type.propertyFlags & flags) != flags) {
45 // Memory must be device local and host visible
46 continue;
47 }
48
49 const auto& heap{props.memoryHeaps[memory_type.heapIndex]};
50 if (heap.size >= 7168_MiB) {
51 // This is the right type of memory
52 return true;
53 }
54 }
55
56 return false;
57} 39}
58 40
59std::optional<u32> FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_mask, 41std::optional<u32> FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_mask,
60 VkMemoryPropertyFlags flags, 42 VkMemoryPropertyFlags flags) noexcept {
61 size_t staging_buffer_size) noexcept {
62 for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) { 43 for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) {
63 if (((type_mask >> type_index) & 1) == 0) { 44 if (((type_mask >> type_index) & 1) == 0) {
64 // Memory type is incompatible 45 // Memory type is incompatible
@@ -69,7 +50,7 @@ std::optional<u32> FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& p
69 // Memory type doesn't have the flags we want 50 // Memory type doesn't have the flags we want
70 continue; 51 continue;
71 } 52 }
72 if (!IsStreamHeap(props.memoryHeaps[memory_type.heapIndex], staging_buffer_size)) { 53 if (!IsStreamHeap(props.memoryHeaps[memory_type.heapIndex])) {
73 // Memory heap is not suitable for streaming 54 // Memory heap is not suitable for streaming
74 continue; 55 continue;
75 } 56 }
@@ -80,17 +61,17 @@ std::optional<u32> FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& p
80} 61}
81 62
82u32 FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_mask, 63u32 FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_mask,
83 bool try_device_local, size_t staging_buffer_size) { 64 bool try_device_local) {
84 std::optional<u32> type; 65 std::optional<u32> type;
85 if (try_device_local) { 66 if (try_device_local) {
86 // Try to find a DEVICE_LOCAL_BIT type, Nvidia and AMD have a dedicated heap for this 67 // Try to find a DEVICE_LOCAL_BIT type, Nvidia and AMD have a dedicated heap for this
87 type = FindMemoryTypeIndex(props, type_mask, STREAM_FLAGS, staging_buffer_size); 68 type = FindMemoryTypeIndex(props, type_mask, STREAM_FLAGS);
88 if (type) { 69 if (type) {
89 return *type; 70 return *type;
90 } 71 }
91 } 72 }
92 // Otherwise try without the DEVICE_LOCAL_BIT 73 // Otherwise try without the DEVICE_LOCAL_BIT
93 type = FindMemoryTypeIndex(props, type_mask, HOST_FLAGS, staging_buffer_size); 74 type = FindMemoryTypeIndex(props, type_mask, HOST_FLAGS);
94 if (type) { 75 if (type) {
95 return *type; 76 return *type;
96 } 77 }
@@ -98,32 +79,20 @@ u32 FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_
98 throw vk::Exception(VK_ERROR_OUT_OF_DEVICE_MEMORY); 79 throw vk::Exception(VK_ERROR_OUT_OF_DEVICE_MEMORY);
99} 80}
100 81
101size_t Region(size_t iterator, size_t region_size) noexcept { 82size_t Region(size_t iterator) noexcept {
102 return iterator / region_size; 83 return iterator / REGION_SIZE;
103} 84}
104} // Anonymous namespace 85} // Anonymous namespace
105 86
106StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_, 87StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_,
107 Scheduler& scheduler_) 88 Scheduler& scheduler_)
108 : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_} { 89 : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_} {
109
110 const auto memory_properties{device.GetPhysical().GetMemoryProperties().memoryProperties};
111 if (HasLargeDeviceLocalHostVisibleMemory(memory_properties)) {
112 // Possible on many integrated and newer discrete cards
113 staging_buffer_size = 1_GiB;
114 } else {
115 // Well-supported default size used by most Vulkan PC games
116 staging_buffer_size = 256_MiB;
117 }
118
119 region_size = staging_buffer_size / StagingBufferPool::NUM_SYNCS;
120
121 const vk::Device& dev = device.GetLogical(); 90 const vk::Device& dev = device.GetLogical();
122 stream_buffer = dev.CreateBuffer(VkBufferCreateInfo{ 91 stream_buffer = dev.CreateBuffer(VkBufferCreateInfo{
123 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, 92 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
124 .pNext = nullptr, 93 .pNext = nullptr,
125 .flags = 0, 94 .flags = 0,
126 .size = staging_buffer_size, 95 .size = STREAM_BUFFER_SIZE,
127 .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | 96 .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
128 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, 97 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
129 .sharingMode = VK_SHARING_MODE_EXCLUSIVE, 98 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
@@ -148,18 +117,19 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem
148 .image = nullptr, 117 .image = nullptr,
149 .buffer = *stream_buffer, 118 .buffer = *stream_buffer,
150 }; 119 };
120 const auto memory_properties = device.GetPhysical().GetMemoryProperties().memoryProperties;
151 VkMemoryAllocateInfo stream_memory_info{ 121 VkMemoryAllocateInfo stream_memory_info{
152 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, 122 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
153 .pNext = make_dedicated ? &dedicated_info : nullptr, 123 .pNext = make_dedicated ? &dedicated_info : nullptr,
154 .allocationSize = requirements.size, 124 .allocationSize = requirements.size,
155 .memoryTypeIndex = FindMemoryTypeIndex(memory_properties, requirements.memoryTypeBits, true, 125 .memoryTypeIndex =
156 staging_buffer_size), 126 FindMemoryTypeIndex(memory_properties, requirements.memoryTypeBits, true),
157 }; 127 };
158 stream_memory = dev.TryAllocateMemory(stream_memory_info); 128 stream_memory = dev.TryAllocateMemory(stream_memory_info);
159 if (!stream_memory) { 129 if (!stream_memory) {
160 LOG_INFO(Render_Vulkan, "Dynamic memory allocation failed, trying with system memory"); 130 LOG_INFO(Render_Vulkan, "Dynamic memory allocation failed, trying with system memory");
161 stream_memory_info.memoryTypeIndex = FindMemoryTypeIndex( 131 stream_memory_info.memoryTypeIndex =
162 memory_properties, requirements.memoryTypeBits, false, staging_buffer_size); 132 FindMemoryTypeIndex(memory_properties, requirements.memoryTypeBits, false);
163 stream_memory = dev.AllocateMemory(stream_memory_info); 133 stream_memory = dev.AllocateMemory(stream_memory_info);
164 } 134 }
165 135
@@ -167,7 +137,7 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem
167 stream_memory.SetObjectNameEXT("Stream Buffer Memory"); 137 stream_memory.SetObjectNameEXT("Stream Buffer Memory");
168 } 138 }
169 stream_buffer.BindMemory(*stream_memory, 0); 139 stream_buffer.BindMemory(*stream_memory, 0);
170 stream_pointer = stream_memory.Map(0, staging_buffer_size); 140 stream_pointer = stream_memory.Map(0, STREAM_BUFFER_SIZE);
171} 141}
172 142
173StagingBufferPool::~StagingBufferPool() = default; 143StagingBufferPool::~StagingBufferPool() = default;
@@ -188,25 +158,25 @@ void StagingBufferPool::TickFrame() {
188} 158}
189 159
190StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) { 160StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
191 if (AreRegionsActive(Region(free_iterator, region_size) + 1, 161 if (AreRegionsActive(Region(free_iterator) + 1,
192 std::min(Region(iterator + size, region_size) + 1, NUM_SYNCS))) { 162 std::min(Region(iterator + size) + 1, NUM_SYNCS))) {
193 // Avoid waiting for the previous usages to be free 163 // Avoid waiting for the previous usages to be free
194 return GetStagingBuffer(size, MemoryUsage::Upload); 164 return GetStagingBuffer(size, MemoryUsage::Upload);
195 } 165 }
196 const u64 current_tick = scheduler.CurrentTick(); 166 const u64 current_tick = scheduler.CurrentTick();
197 std::fill(sync_ticks.begin() + Region(used_iterator, region_size), 167 std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + Region(iterator),
198 sync_ticks.begin() + Region(iterator, region_size), current_tick); 168 current_tick);
199 used_iterator = iterator; 169 used_iterator = iterator;
200 free_iterator = std::max(free_iterator, iterator + size); 170 free_iterator = std::max(free_iterator, iterator + size);
201 171
202 if (iterator + size >= staging_buffer_size) { 172 if (iterator + size >= STREAM_BUFFER_SIZE) {
203 std::fill(sync_ticks.begin() + Region(used_iterator, region_size), 173 std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + NUM_SYNCS,
204 sync_ticks.begin() + NUM_SYNCS, current_tick); 174 current_tick);
205 used_iterator = 0; 175 used_iterator = 0;
206 iterator = 0; 176 iterator = 0;
207 free_iterator = size; 177 free_iterator = size;
208 178
209 if (AreRegionsActive(0, Region(size, region_size) + 1)) { 179 if (AreRegionsActive(0, Region(size) + 1)) {
210 // Avoid waiting for the previous usages to be free 180 // Avoid waiting for the previous usages to be free
211 return GetStagingBuffer(size, MemoryUsage::Upload); 181 return GetStagingBuffer(size, MemoryUsage::Upload);
212 } 182 }
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
index 90c67177f..91dc84da8 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -93,9 +93,6 @@ private:
93 size_t free_iterator = 0; 93 size_t free_iterator = 0;
94 std::array<u64, NUM_SYNCS> sync_ticks{}; 94 std::array<u64, NUM_SYNCS> sync_ticks{};
95 95
96 size_t staging_buffer_size = 0;
97 size_t region_size = 0;
98
99 StagingBuffersCache device_local_cache; 96 StagingBuffersCache device_local_cache;
100 StagingBuffersCache upload_cache; 97 StagingBuffersCache upload_cache;
101 StagingBuffersCache download_cache; 98 StagingBuffersCache download_cache;
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
index ed98c8370..b87c3be66 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
@@ -77,12 +77,12 @@ void SetupDirtyDepthBounds(Tables& tables) {
77void SetupDirtyStencilProperties(Tables& tables) { 77void SetupDirtyStencilProperties(Tables& tables) {
78 auto& table = tables[0]; 78 auto& table = tables[0];
79 table[OFF(stencil_two_side_enable)] = StencilProperties; 79 table[OFF(stencil_two_side_enable)] = StencilProperties;
80 table[OFF(stencil_front_func.ref)] = StencilProperties; 80 table[OFF(stencil_front_ref)] = StencilProperties;
81 table[OFF(stencil_front_func.mask)] = StencilProperties; 81 table[OFF(stencil_front_mask)] = StencilProperties;
82 table[OFF(stencil_front_func.func_mask)] = StencilProperties; 82 table[OFF(stencil_front_func_mask)] = StencilProperties;
83 table[OFF(stencil_back_func.ref)] = StencilProperties; 83 table[OFF(stencil_back_ref)] = StencilProperties;
84 table[OFF(stencil_back_func.mask)] = StencilProperties; 84 table[OFF(stencil_back_mask)] = StencilProperties;
85 table[OFF(stencil_back_func.func_mask)] = StencilProperties; 85 table[OFF(stencil_back_func_mask)] = StencilProperties;
86} 86}
87 87
88void SetupDirtyLineWidth(Tables& tables) { 88void SetupDirtyLineWidth(Tables& tables) {
diff --git a/src/video_core/texture_cache/descriptor_table.h b/src/video_core/texture_cache/descriptor_table.h
index b18e3838f..ee4240288 100644
--- a/src/video_core/texture_cache/descriptor_table.h
+++ b/src/video_core/texture_cache/descriptor_table.h
@@ -18,7 +18,7 @@ class DescriptorTable {
18public: 18public:
19 explicit DescriptorTable(Tegra::MemoryManager& gpu_memory_) : gpu_memory{gpu_memory_} {} 19 explicit DescriptorTable(Tegra::MemoryManager& gpu_memory_) : gpu_memory{gpu_memory_} {}
20 20
21 [[nodiscard]] bool Synchornize(GPUVAddr gpu_addr, u32 limit) { 21 [[nodiscard]] bool Synchronize(GPUVAddr gpu_addr, u32 limit) {
22 [[likely]] if (current_gpu_addr == gpu_addr && current_limit == limit) { 22 [[likely]] if (current_gpu_addr == gpu_addr && current_limit == limit) {
23 return false; 23 return false;
24 } 24 }
diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp
index ad935d386..08aa8ca33 100644
--- a/src/video_core/texture_cache/format_lookup_table.cpp
+++ b/src/video_core/texture_cache/format_lookup_table.cpp
@@ -150,6 +150,8 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red,
150 return PixelFormat::D24_UNORM_S8_UINT; 150 return PixelFormat::D24_UNORM_S8_UINT;
151 case Hash(TextureFormat::D32S8, FLOAT, UINT, UNORM, UNORM, LINEAR): 151 case Hash(TextureFormat::D32S8, FLOAT, UINT, UNORM, UNORM, LINEAR):
152 return PixelFormat::D32_FLOAT_S8_UINT; 152 return PixelFormat::D32_FLOAT_S8_UINT;
153 case Hash(TextureFormat::R32_B24G8, FLOAT, UINT, UNORM, UNORM, LINEAR):
154 return PixelFormat::D32_FLOAT_S8_UINT;
153 case Hash(TextureFormat::BC1_RGBA, UNORM, LINEAR): 155 case Hash(TextureFormat::BC1_RGBA, UNORM, LINEAR):
154 return PixelFormat::BC1_RGBA_UNORM; 156 return PixelFormat::BC1_RGBA_UNORM;
155 case Hash(TextureFormat::BC1_RGBA, UNORM, SRGB): 157 case Hash(TextureFormat::BC1_RGBA, UNORM, SRGB):
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 413baf730..0e0fd410f 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -193,11 +193,11 @@ void TextureCache<P>::SynchronizeGraphicsDescriptors() {
193 const bool linked_tsc = maxwell3d->regs.sampler_binding == SamplerBinding::ViaHeaderBinding; 193 const bool linked_tsc = maxwell3d->regs.sampler_binding == SamplerBinding::ViaHeaderBinding;
194 const u32 tic_limit = maxwell3d->regs.tex_header.limit; 194 const u32 tic_limit = maxwell3d->regs.tex_header.limit;
195 const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d->regs.tex_sampler.limit; 195 const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d->regs.tex_sampler.limit;
196 if (channel_state->graphics_sampler_table.Synchornize(maxwell3d->regs.tex_sampler.Address(), 196 if (channel_state->graphics_sampler_table.Synchronize(maxwell3d->regs.tex_sampler.Address(),
197 tsc_limit)) { 197 tsc_limit)) {
198 channel_state->graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); 198 channel_state->graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
199 } 199 }
200 if (channel_state->graphics_image_table.Synchornize(maxwell3d->regs.tex_header.Address(), 200 if (channel_state->graphics_image_table.Synchronize(maxwell3d->regs.tex_header.Address(),
201 tic_limit)) { 201 tic_limit)) {
202 channel_state->graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); 202 channel_state->graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
203 } 203 }
@@ -209,10 +209,10 @@ void TextureCache<P>::SynchronizeComputeDescriptors() {
209 const u32 tic_limit = kepler_compute->regs.tic.limit; 209 const u32 tic_limit = kepler_compute->regs.tic.limit;
210 const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute->regs.tsc.limit; 210 const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute->regs.tsc.limit;
211 const GPUVAddr tsc_gpu_addr = kepler_compute->regs.tsc.Address(); 211 const GPUVAddr tsc_gpu_addr = kepler_compute->regs.tsc.Address();
212 if (channel_state->compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) { 212 if (channel_state->compute_sampler_table.Synchronize(tsc_gpu_addr, tsc_limit)) {
213 channel_state->compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); 213 channel_state->compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
214 } 214 }
215 if (channel_state->compute_image_table.Synchornize(kepler_compute->regs.tic.Address(), 215 if (channel_state->compute_image_table.Synchronize(kepler_compute->regs.tic.Address(),
216 tic_limit)) { 216 tic_limit)) {
217 channel_state->compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); 217 channel_state->compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
218 } 218 }
diff --git a/src/yuzu/applets/qt_controller.ui b/src/yuzu/applets/qt_controller.ui
index c8cb6bcf3..f5eccba70 100644
--- a/src/yuzu/applets/qt_controller.ui
+++ b/src/yuzu/applets/qt_controller.ui
@@ -2300,7 +2300,7 @@
2300 <item> 2300 <item>
2301 <widget class="QRadioButton" name="radioUndocked"> 2301 <widget class="QRadioButton" name="radioUndocked">
2302 <property name="text"> 2302 <property name="text">
2303 <string>Undocked</string> 2303 <string>Handheld</string>
2304 </property> 2304 </property>
2305 </widget> 2305 </widget>
2306 </item> 2306 </item>
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp
index 24251247d..6acfb7b06 100644
--- a/src/yuzu/bootmanager.cpp
+++ b/src/yuzu/bootmanager.cpp
@@ -120,8 +120,8 @@ void EmuThread::run() {
120 } 120 }
121 } 121 }
122 122
123 // Shutdown the core emulation 123 // Shutdown the main emulated process
124 system.Shutdown(); 124 system.ShutdownMainProcess();
125 125
126#if MICROPROFILE_ENABLED 126#if MICROPROFILE_ENABLED
127 MicroProfileOnThreadExit(); 127 MicroProfileOnThreadExit();
diff --git a/src/yuzu/configuration/configure_audio.cpp b/src/yuzu/configuration/configure_audio.cpp
index 19b8b15ef..70cc6f84b 100644
--- a/src/yuzu/configuration/configure_audio.cpp
+++ b/src/yuzu/configuration/configure_audio.cpp
@@ -161,8 +161,8 @@ void ConfigureAudio::InitializeAudioSinkComboBox() {
161 ui->sink_combo_box->clear(); 161 ui->sink_combo_box->clear();
162 ui->sink_combo_box->addItem(QString::fromUtf8(AudioCore::Sink::auto_device_name)); 162 ui->sink_combo_box->addItem(QString::fromUtf8(AudioCore::Sink::auto_device_name));
163 163
164 for (const char* id : AudioCore::Sink::GetSinkIDs()) { 164 for (const auto& id : AudioCore::Sink::GetSinkIDs()) {
165 ui->sink_combo_box->addItem(QString::fromUtf8(id)); 165 ui->sink_combo_box->addItem(QString::fromUtf8(id.data(), static_cast<s32>(id.length())));
166 } 166 }
167} 167}
168 168
diff --git a/src/yuzu/configuration/configure_graphics.ui b/src/yuzu/configuration/configure_graphics.ui
index 1e4f74704..fdbb33372 100644
--- a/src/yuzu/configuration/configure_graphics.ui
+++ b/src/yuzu/configuration/configure_graphics.ui
@@ -301,6 +301,11 @@
301 </item> 301 </item>
302 <item> 302 <item>
303 <property name="text"> 303 <property name="text">
304 <string>Force 16:10</string>
305 </property>
306 </item>
307 <item>
308 <property name="text">
304 <string>Stretch to Window</string> 309 <string>Stretch to Window</string>
305 </property> 310 </property>
306 </item> 311 </item>
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index f45a25410..7b16d7f7e 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -294,6 +294,7 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan
294#ifdef __linux__ 294#ifdef __linux__
295 SetupSigInterrupts(); 295 SetupSigInterrupts();
296#endif 296#endif
297 system->Initialize();
297 298
298 Common::Log::Initialize(); 299 Common::Log::Initialize();
299 LoadTranslation(); 300 LoadTranslation();
@@ -1895,6 +1896,8 @@ void GMainWindow::OnGameListOpenFolder(u64 program_id, GameListOpenTarget target
1895 case GameListOpenTarget::SaveData: { 1896 case GameListOpenTarget::SaveData: {
1896 open_target = tr("Save Data"); 1897 open_target = tr("Save Data");
1897 const auto nand_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir); 1898 const auto nand_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir);
1899 auto vfs_nand_dir =
1900 vfs->OpenDirectory(Common::FS::PathToUTF8String(nand_dir), FileSys::Mode::Read);
1898 1901
1899 if (has_user_save) { 1902 if (has_user_save) {
1900 // User save data 1903 // User save data
@@ -1921,15 +1924,15 @@ void GMainWindow::OnGameListOpenFolder(u64 program_id, GameListOpenTarget target
1921 ASSERT(user_id); 1924 ASSERT(user_id);
1922 1925
1923 const auto user_save_data_path = FileSys::SaveDataFactory::GetFullPath( 1926 const auto user_save_data_path = FileSys::SaveDataFactory::GetFullPath(
1924 *system, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData, 1927 *system, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser,
1925 program_id, user_id->AsU128(), 0); 1928 FileSys::SaveDataType::SaveData, program_id, user_id->AsU128(), 0);
1926 1929
1927 path = Common::FS::ConcatPathSafe(nand_dir, user_save_data_path); 1930 path = Common::FS::ConcatPathSafe(nand_dir, user_save_data_path);
1928 } else { 1931 } else {
1929 // Device save data 1932 // Device save data
1930 const auto device_save_data_path = FileSys::SaveDataFactory::GetFullPath( 1933 const auto device_save_data_path = FileSys::SaveDataFactory::GetFullPath(
1931 *system, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData, 1934 *system, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser,
1932 program_id, {}, 0); 1935 FileSys::SaveDataType::SaveData, program_id, {}, 0);
1933 1936
1934 path = Common::FS::ConcatPathSafe(nand_dir, device_save_data_path); 1937 path = Common::FS::ConcatPathSafe(nand_dir, device_save_data_path);
1935 } 1938 }
@@ -3280,7 +3283,7 @@ void GMainWindow::LoadAmiibo(const QString& filename) {
3280 QMessageBox::warning(this, title, tr("The current game is not looking for amiibos")); 3283 QMessageBox::warning(this, title, tr("The current game is not looking for amiibos"));
3281 break; 3284 break;
3282 case InputCommon::VirtualAmiibo::Info::Unknown: 3285 case InputCommon::VirtualAmiibo::Info::Unknown:
3283 QMessageBox::warning(this, title, tr("An unkown error occured")); 3286 QMessageBox::warning(this, title, tr("An unknown error occurred"));
3284 break; 3287 break;
3285 default: 3288 default:
3286 break; 3289 break;
diff --git a/src/yuzu_cmd/yuzu.cpp b/src/yuzu_cmd/yuzu.cpp
index 3a0f33cba..e16f79eb4 100644
--- a/src/yuzu_cmd/yuzu.cpp
+++ b/src/yuzu_cmd/yuzu.cpp
@@ -302,6 +302,8 @@ int main(int argc, char** argv) {
302 } 302 }
303 303
304 Core::System system{}; 304 Core::System system{};
305 system.Initialize();
306
305 InputCommon::InputSubsystem input_subsystem{}; 307 InputCommon::InputSubsystem input_subsystem{};
306 308
307 // Apply the command line arguments 309 // Apply the command line arguments
@@ -392,7 +394,7 @@ int main(int argc, char** argv) {
392 } 394 }
393 system.DetachDebugger(); 395 system.DetachDebugger();
394 void(system.Pause()); 396 void(system.Pause());
395 system.Shutdown(); 397 system.ShutdownMainProcess();
396 398
397 detached_tasks.WaitForAllTasks(); 399 detached_tasks.WaitForAllTasks();
398 return 0; 400 return 0;