diff options
85 files changed, 3808 insertions, 1770 deletions
diff --git a/.ci/scripts/transifex/docker.sh b/.ci/scripts/transifex/docker.sh index 6237b3f73..6ddbfd0dd 100755 --- a/.ci/scripts/transifex/docker.sh +++ b/.ci/scripts/transifex/docker.sh | |||
| @@ -3,15 +3,6 @@ | |||
| 3 | # SPDX-FileCopyrightText: 2021 yuzu Emulator Project | 3 | # SPDX-FileCopyrightText: 2021 yuzu Emulator Project |
| 4 | # SPDX-License-Identifier: GPL-2.0-or-later | 4 | # SPDX-License-Identifier: GPL-2.0-or-later |
| 5 | 5 | ||
| 6 | # Setup RC file for tx | ||
| 7 | cat << EOF > ~/.transifexrc | ||
| 8 | [https://www.transifex.com] | ||
| 9 | hostname = https://www.transifex.com | ||
| 10 | username = api | ||
| 11 | password = $TRANSIFEX_API_TOKEN | ||
| 12 | EOF | ||
| 13 | |||
| 14 | |||
| 15 | set -x | 6 | set -x |
| 16 | 7 | ||
| 17 | echo -e "\e[1m\e[33mBuild tools information:\e[0m" | 8 | echo -e "\e[1m\e[33mBuild tools information:\e[0m" |
| @@ -19,9 +10,6 @@ cmake --version | |||
| 19 | gcc -v | 10 | gcc -v |
| 20 | tx --version | 11 | tx --version |
| 21 | 12 | ||
| 22 | # vcpkg needs these: curl zip unzip tar, have tar | ||
| 23 | apt-get install -y curl zip unzip | ||
| 24 | |||
| 25 | mkdir build && cd build | 13 | mkdir build && cd build |
| 26 | cmake .. -DENABLE_QT_TRANSLATION=ON -DGENERATE_QT_TRANSLATION=ON -DCMAKE_BUILD_TYPE=Release -DENABLE_SDL2=OFF -DYUZU_TESTS=OFF -DYUZU_USE_BUNDLED_VCPKG=ON | 14 | cmake .. -DENABLE_QT_TRANSLATION=ON -DGENERATE_QT_TRANSLATION=ON -DCMAKE_BUILD_TYPE=Release -DENABLE_SDL2=OFF -DYUZU_TESTS=OFF -DYUZU_USE_BUNDLED_VCPKG=ON |
| 27 | make translation | 15 | make translation |
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index aa5824824..25ef1f078 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml | |||
| @@ -19,11 +19,11 @@ jobs: | |||
| 19 | - uses: actions/checkout@v3 | 19 | - uses: actions/checkout@v3 |
| 20 | with: | 20 | with: |
| 21 | submodules: recursive | 21 | submodules: recursive |
| 22 | fetch-depth: 0 | 22 | fetch-depth: 0 |
| 23 | - name: Update Translation | 23 | - name: Update Translation |
| 24 | run: ./.ci/scripts/transifex/docker.sh | 24 | run: ./.ci/scripts/transifex/docker.sh |
| 25 | env: | 25 | env: |
| 26 | TRANSIFEX_API_TOKEN: ${{ secrets.TRANSIFEX_API_TOKEN }} | 26 | TX_TOKEN: ${{ secrets.TRANSIFEX_API_TOKEN }} |
| 27 | 27 | ||
| 28 | reuse: | 28 | reuse: |
| 29 | runs-on: ubuntu-latest | 29 | runs-on: ubuntu-latest |
diff --git a/dist/languages/.tx/config b/dist/languages/.tx/config index 0d9b512ea..30e76b925 100644 --- a/dist/languages/.tx/config +++ b/dist/languages/.tx/config | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | [main] | 1 | [main] |
| 2 | host = https://www.transifex.com | 2 | host = https://www.transifex.com |
| 3 | 3 | ||
| 4 | [yuzu.emulator] | 4 | [o:yuzu-emulator:p:yuzu:r:emulator] |
| 5 | file_filter = <lang>.ts | 5 | file_filter = <lang>.ts |
| 6 | source_file = en.ts | 6 | source_file = en.ts |
| 7 | source_lang = en | 7 | source_lang = en |
diff --git a/dist/languages/README.md b/dist/languages/README.md index 61981ab1d..c5ea1ada0 100644 --- a/dist/languages/README.md +++ b/dist/languages/README.md | |||
| @@ -1 +1,3 @@ | |||
| 1 | This directory stores translation patches (TS files) for yuzu Qt frontend. This directory is linked with [yuzu project on transifex](https://www.transifex.com/yuzu-emulator/yuzu), so you can update the translation by executing `tx pull -a`. If you want to contribute to the translation, please go the transifex link and submit your translation there. This directory on the main repo will be synchronized with transifex periodically. Do not directly open PRs on github to modify the translation. | 1 | This directory stores translation patches (TS files) for yuzu Qt frontend. This directory is linked with [yuzu project on transifex](https://www.transifex.com/yuzu-emulator/yuzu), so you can update the translation by executing `tx pull -t -a`. If you want to contribute to the translation, please go the transifex link and submit your translation there. This directory on the main repo will be synchronized with transifex periodically. |
| 2 | |||
| 3 | Do not directly open PRs on github to modify the translation. | ||
diff --git a/src/audio_core/renderer/system.cpp b/src/audio_core/renderer/system.cpp index bde794cd1..4fac30c7c 100644 --- a/src/audio_core/renderer/system.cpp +++ b/src/audio_core/renderer/system.cpp | |||
| @@ -98,9 +98,8 @@ System::System(Core::System& core_, Kernel::KEvent* adsp_rendered_event_) | |||
| 98 | : core{core_}, adsp{core.AudioCore().GetADSP()}, adsp_rendered_event{adsp_rendered_event_} {} | 98 | : core{core_}, adsp{core.AudioCore().GetADSP()}, adsp_rendered_event{adsp_rendered_event_} {} |
| 99 | 99 | ||
| 100 | Result System::Initialize(const AudioRendererParameterInternal& params, | 100 | Result System::Initialize(const AudioRendererParameterInternal& params, |
| 101 | Kernel::KTransferMemory* transfer_memory, const u64 transfer_memory_size, | 101 | Kernel::KTransferMemory* transfer_memory, u64 transfer_memory_size, |
| 102 | const u32 process_handle_, const u64 applet_resource_user_id_, | 102 | u32 process_handle_, u64 applet_resource_user_id_, s32 session_id_) { |
| 103 | const s32 session_id_) { | ||
| 104 | if (!CheckValidRevision(params.revision)) { | 103 | if (!CheckValidRevision(params.revision)) { |
| 105 | return Service::Audio::ERR_INVALID_REVISION; | 104 | return Service::Audio::ERR_INVALID_REVISION; |
| 106 | } | 105 | } |
| @@ -354,6 +353,8 @@ Result System::Initialize(const AudioRendererParameterInternal& params, | |||
| 354 | 353 | ||
| 355 | render_time_limit_percent = 100; | 354 | render_time_limit_percent = 100; |
| 356 | drop_voice = params.voice_drop_enabled && params.execution_mode == ExecutionMode::Auto; | 355 | drop_voice = params.voice_drop_enabled && params.execution_mode == ExecutionMode::Auto; |
| 356 | drop_voice_param = 1.0f; | ||
| 357 | num_voices_dropped = 0; | ||
| 357 | 358 | ||
| 358 | allocator.Align(0x40); | 359 | allocator.Align(0x40); |
| 359 | command_workbuffer_size = allocator.GetRemainingSize(); | 360 | command_workbuffer_size = allocator.GetRemainingSize(); |
| @@ -547,7 +548,7 @@ u32 System::GetRenderingTimeLimit() const { | |||
| 547 | return render_time_limit_percent; | 548 | return render_time_limit_percent; |
| 548 | } | 549 | } |
| 549 | 550 | ||
| 550 | void System::SetRenderingTimeLimit(const u32 limit) { | 551 | void System::SetRenderingTimeLimit(u32 limit) { |
| 551 | render_time_limit_percent = limit; | 552 | render_time_limit_percent = limit; |
| 552 | } | 553 | } |
| 553 | 554 | ||
| @@ -635,7 +636,7 @@ void System::SendCommandToDsp() { | |||
| 635 | } | 636 | } |
| 636 | 637 | ||
| 637 | u64 System::GenerateCommand(std::span<u8> in_command_buffer, | 638 | u64 System::GenerateCommand(std::span<u8> in_command_buffer, |
| 638 | [[maybe_unused]] const u64 command_buffer_size_) { | 639 | [[maybe_unused]] u64 command_buffer_size_) { |
| 639 | PoolMapper::ClearUseState(memory_pool_workbuffer, memory_pool_count); | 640 | PoolMapper::ClearUseState(memory_pool_workbuffer, memory_pool_count); |
| 640 | const auto start_time{core.CoreTiming().GetClockTicks()}; | 641 | const auto start_time{core.CoreTiming().GetClockTicks()}; |
| 641 | 642 | ||
| @@ -693,7 +694,8 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer, | |||
| 693 | 694 | ||
| 694 | voice_context.SortInfo(); | 695 | voice_context.SortInfo(); |
| 695 | 696 | ||
| 696 | const auto start_estimated_time{command_buffer.estimated_process_time}; | 697 | const auto start_estimated_time{drop_voice_param * |
| 698 | static_cast<f32>(command_buffer.estimated_process_time)}; | ||
| 697 | 699 | ||
| 698 | command_generator.GenerateVoiceCommands(); | 700 | command_generator.GenerateVoiceCommands(); |
| 699 | command_generator.GenerateSubMixCommands(); | 701 | command_generator.GenerateSubMixCommands(); |
| @@ -712,11 +714,16 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer, | |||
| 712 | render_context.behavior->IsAudioRendererProcessingTimeLimit70PercentSupported(); | 714 | render_context.behavior->IsAudioRendererProcessingTimeLimit70PercentSupported(); |
| 713 | time_limit_percent = 70.0f; | 715 | time_limit_percent = 70.0f; |
| 714 | } | 716 | } |
| 717 | |||
| 718 | const auto end_estimated_time{drop_voice_param * | ||
| 719 | static_cast<f32>(command_buffer.estimated_process_time)}; | ||
| 720 | const auto estimated_time{start_estimated_time - end_estimated_time}; | ||
| 721 | |||
| 715 | const auto time_limit{static_cast<u32>( | 722 | const auto time_limit{static_cast<u32>( |
| 716 | static_cast<f32>(start_estimated_time - command_buffer.estimated_process_time) + | 723 | estimated_time + (((time_limit_percent / 100.0f) * 2'880'000.0) * |
| 717 | (((time_limit_percent / 100.0f) * 2'880'000.0) * | 724 | (static_cast<f32>(render_time_limit_percent) / 100.0f)))}; |
| 718 | (static_cast<f32>(render_time_limit_percent) / 100.0f)))}; | 725 | num_voices_dropped = |
| 719 | num_voices_dropped = DropVoices(command_buffer, start_estimated_time, time_limit); | 726 | DropVoices(command_buffer, static_cast<u32>(start_estimated_time), time_limit); |
| 720 | } | 727 | } |
| 721 | 728 | ||
| 722 | command_list_header->buffer_size = command_buffer.size; | 729 | command_list_header->buffer_size = command_buffer.size; |
| @@ -737,24 +744,33 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer, | |||
| 737 | return command_buffer.size; | 744 | return command_buffer.size; |
| 738 | } | 745 | } |
| 739 | 746 | ||
| 740 | u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_process_time, | 747 | f32 System::GetVoiceDropParameter() const { |
| 741 | const u32 time_limit) { | 748 | return drop_voice_param; |
| 749 | } | ||
| 750 | |||
| 751 | void System::SetVoiceDropParameter(f32 voice_drop_) { | ||
| 752 | drop_voice_param = voice_drop_; | ||
| 753 | } | ||
| 754 | |||
| 755 | u32 System::DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit) { | ||
| 742 | u32 i{0}; | 756 | u32 i{0}; |
| 743 | auto command_list{command_buffer.command_list.data() + sizeof(CommandListHeader)}; | 757 | auto command_list{command_buffer.command_list.data() + sizeof(CommandListHeader)}; |
| 744 | ICommand* cmd{}; | 758 | ICommand* cmd{nullptr}; |
| 745 | 759 | ||
| 746 | for (; i < command_buffer.count; i++) { | 760 | // Find a first valid voice to drop |
| 761 | while (i < command_buffer.count) { | ||
| 747 | cmd = reinterpret_cast<ICommand*>(command_list); | 762 | cmd = reinterpret_cast<ICommand*>(command_list); |
| 748 | if (cmd->type != CommandId::Performance && | 763 | if (cmd->type == CommandId::Performance || |
| 749 | cmd->type != CommandId::DataSourcePcmInt16Version1 && | 764 | cmd->type == CommandId::DataSourcePcmInt16Version1 || |
| 750 | cmd->type != CommandId::DataSourcePcmInt16Version2 && | 765 | cmd->type == CommandId::DataSourcePcmInt16Version2 || |
| 751 | cmd->type != CommandId::DataSourcePcmFloatVersion1 && | 766 | cmd->type == CommandId::DataSourcePcmFloatVersion1 || |
| 752 | cmd->type != CommandId::DataSourcePcmFloatVersion2 && | 767 | cmd->type == CommandId::DataSourcePcmFloatVersion2 || |
| 753 | cmd->type != CommandId::DataSourceAdpcmVersion1 && | 768 | cmd->type == CommandId::DataSourceAdpcmVersion1 || |
| 754 | cmd->type != CommandId::DataSourceAdpcmVersion2) { | 769 | cmd->type == CommandId::DataSourceAdpcmVersion2) { |
| 755 | break; | 770 | break; |
| 756 | } | 771 | } |
| 757 | command_list += cmd->size; | 772 | command_list += cmd->size; |
| 773 | i++; | ||
| 758 | } | 774 | } |
| 759 | 775 | ||
| 760 | if (cmd == nullptr || command_buffer.count == 0 || i >= command_buffer.count) { | 776 | if (cmd == nullptr || command_buffer.count == 0 || i >= command_buffer.count) { |
| @@ -767,6 +783,7 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces | |||
| 767 | const auto node_id_type{cmd->node_id >> 28}; | 783 | const auto node_id_type{cmd->node_id >> 28}; |
| 768 | const auto node_id_base{cmd->node_id & 0xFFF}; | 784 | const auto node_id_base{cmd->node_id & 0xFFF}; |
| 769 | 785 | ||
| 786 | // If the new estimated process time falls below the limit, we're done dropping. | ||
| 770 | if (estimated_process_time <= time_limit) { | 787 | if (estimated_process_time <= time_limit) { |
| 771 | break; | 788 | break; |
| 772 | } | 789 | } |
| @@ -775,6 +792,7 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces | |||
| 775 | break; | 792 | break; |
| 776 | } | 793 | } |
| 777 | 794 | ||
| 795 | // Don't drop voices marked with the highest priority. | ||
| 778 | auto& voice_info{voice_context.GetInfo(node_id_base)}; | 796 | auto& voice_info{voice_context.GetInfo(node_id_base)}; |
| 779 | if (voice_info.priority == HighestVoicePriority) { | 797 | if (voice_info.priority == HighestVoicePriority) { |
| 780 | break; | 798 | break; |
| @@ -783,18 +801,23 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces | |||
| 783 | voices_dropped++; | 801 | voices_dropped++; |
| 784 | voice_info.voice_dropped = true; | 802 | voice_info.voice_dropped = true; |
| 785 | 803 | ||
| 786 | if (i < command_buffer.count) { | 804 | // First iteration should drop the voice, and then iterate through all of the commands tied |
| 787 | while (cmd->node_id == node_id) { | 805 | // to the voice. We don't need reverb on a voice which we've just removed, for example. |
| 788 | if (cmd->type == CommandId::DepopPrepare) { | 806 | // Depops can't be removed otherwise we'll introduce audio popping, and we don't |
| 789 | cmd->enabled = true; | 807 | // remove perf commands. Lower the estimated time for each command dropped. |
| 790 | } else if (cmd->type == CommandId::Performance || !cmd->enabled) { | 808 | while (i < command_buffer.count && cmd->node_id == node_id) { |
| 791 | cmd->enabled = false; | 809 | if (cmd->type == CommandId::DepopPrepare) { |
| 792 | } | 810 | cmd->enabled = true; |
| 793 | i++; | 811 | } else if (cmd->enabled && cmd->type != CommandId::Performance) { |
| 794 | command_list += cmd->size; | 812 | cmd->enabled = false; |
| 795 | cmd = reinterpret_cast<ICommand*>(command_list); | 813 | estimated_process_time -= static_cast<u32>( |
| 814 | drop_voice_param * static_cast<f32>(cmd->estimated_process_time)); | ||
| 796 | } | 815 | } |
| 816 | command_list += cmd->size; | ||
| 817 | cmd = reinterpret_cast<ICommand*>(command_list); | ||
| 818 | i++; | ||
| 797 | } | 819 | } |
| 820 | i++; | ||
| 798 | } | 821 | } |
| 799 | return voices_dropped; | 822 | return voices_dropped; |
| 800 | } | 823 | } |
diff --git a/src/audio_core/renderer/system.h b/src/audio_core/renderer/system.h index bcbe65b07..429196e41 100644 --- a/src/audio_core/renderer/system.h +++ b/src/audio_core/renderer/system.h | |||
| @@ -196,6 +196,20 @@ public: | |||
| 196 | */ | 196 | */ |
| 197 | u32 DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit); | 197 | u32 DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit); |
| 198 | 198 | ||
| 199 | /** | ||
| 200 | * Get the current voice drop parameter. | ||
| 201 | * | ||
| 202 | * @return The current voice drop. | ||
| 203 | */ | ||
| 204 | f32 GetVoiceDropParameter() const; | ||
| 205 | |||
| 206 | /** | ||
| 207 | * Set the voice drop parameter. | ||
| 208 | * | ||
| 209 | * @param The new voice drop. | ||
| 210 | */ | ||
| 211 | void SetVoiceDropParameter(f32 voice_drop); | ||
| 212 | |||
| 199 | private: | 213 | private: |
| 200 | /// Core system | 214 | /// Core system |
| 201 | Core::System& core; | 215 | Core::System& core; |
| @@ -301,6 +315,8 @@ private: | |||
| 301 | u32 num_voices_dropped{}; | 315 | u32 num_voices_dropped{}; |
| 302 | /// Tick that rendering started | 316 | /// Tick that rendering started |
| 303 | u64 render_start_tick{}; | 317 | u64 render_start_tick{}; |
| 318 | /// Parameter to control the threshold for dropping voices if the audio graph gets too large | ||
| 319 | f32 drop_voice_param{1.0f}; | ||
| 304 | }; | 320 | }; |
| 305 | 321 | ||
| 306 | } // namespace AudioRenderer | 322 | } // namespace AudioRenderer |
diff --git a/src/audio_core/renderer/voice/voice_context.cpp b/src/audio_core/renderer/voice/voice_context.cpp index eafb51b01..a501a677d 100644 --- a/src/audio_core/renderer/voice/voice_context.cpp +++ b/src/audio_core/renderer/voice/voice_context.cpp | |||
| @@ -74,8 +74,8 @@ void VoiceContext::SortInfo() { | |||
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | std::ranges::sort(sorted_voice_info, [](const VoiceInfo* a, const VoiceInfo* b) { | 76 | std::ranges::sort(sorted_voice_info, [](const VoiceInfo* a, const VoiceInfo* b) { |
| 77 | return a->priority != b->priority ? a->priority < b->priority | 77 | return a->priority != b->priority ? a->priority > b->priority |
| 78 | : a->sort_order < b->sort_order; | 78 | : a->sort_order > b->sort_order; |
| 79 | }); | 79 | }); |
| 80 | } | 80 | } |
| 81 | 81 | ||
diff --git a/src/audio_core/sink/sdl2_sink.cpp b/src/audio_core/sink/sdl2_sink.cpp index f12ebf7fe..c138dc628 100644 --- a/src/audio_core/sink/sdl2_sink.cpp +++ b/src/audio_core/sink/sdl2_sink.cpp | |||
| @@ -230,7 +230,9 @@ std::vector<std::string> ListSDLSinkDevices(bool capture) { | |||
| 230 | 230 | ||
| 231 | const int device_count = SDL_GetNumAudioDevices(capture); | 231 | const int device_count = SDL_GetNumAudioDevices(capture); |
| 232 | for (int i = 0; i < device_count; ++i) { | 232 | for (int i = 0; i < device_count; ++i) { |
| 233 | device_list.emplace_back(SDL_GetAudioDeviceName(i, 0)); | 233 | if (const char* name = SDL_GetAudioDeviceName(i, capture)) { |
| 234 | device_list.emplace_back(name); | ||
| 235 | } | ||
| 234 | } | 236 | } |
| 235 | 237 | ||
| 236 | return device_list; | 238 | return device_list; |
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index a02696873..46cf75fde 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt | |||
| @@ -169,7 +169,11 @@ endif() | |||
| 169 | create_target_directory_groups(common) | 169 | create_target_directory_groups(common) |
| 170 | 170 | ||
| 171 | target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile Threads::Threads) | 171 | target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile Threads::Threads) |
| 172 | target_link_libraries(common PRIVATE lz4::lz4) | 172 | if (TARGET lz4::lz4) |
| 173 | target_link_libraries(common PRIVATE lz4::lz4) | ||
| 174 | else() | ||
| 175 | target_link_libraries(common PRIVATE LZ4::lz4_shared) | ||
| 176 | endif() | ||
| 173 | if (TARGET zstd::zstd) | 177 | if (TARGET zstd::zstd) |
| 174 | target_link_libraries(common PRIVATE zstd::zstd) | 178 | target_link_libraries(common PRIVATE zstd::zstd) |
| 175 | else() | 179 | else() |
diff --git a/src/common/concepts.h b/src/common/concepts.h index a97555f6a..e8ce30dfe 100644 --- a/src/common/concepts.h +++ b/src/common/concepts.h | |||
| @@ -34,4 +34,12 @@ concept DerivedFrom = requires { | |||
| 34 | template <typename From, typename To> | 34 | template <typename From, typename To> |
| 35 | concept ConvertibleTo = std::is_convertible_v<From, To>; | 35 | concept ConvertibleTo = std::is_convertible_v<From, To>; |
| 36 | 36 | ||
| 37 | // No equivalents in the stdlib | ||
| 38 | |||
| 39 | template <typename T> | ||
| 40 | concept IsArithmetic = std::is_arithmetic_v<T>; | ||
| 41 | |||
| 42 | template <typename T> | ||
| 43 | concept IsIntegral = std::is_integral_v<T>; | ||
| 44 | |||
| 37 | } // namespace Common | 45 | } // namespace Common |
diff --git a/src/common/fixed_point.h b/src/common/fixed_point.h index 4a0f72cc9..f899b0d54 100644 --- a/src/common/fixed_point.h +++ b/src/common/fixed_point.h | |||
| @@ -4,14 +4,7 @@ | |||
| 4 | // From: https://github.com/eteran/cpp-utilities/blob/master/fixed/include/cpp-utilities/fixed.h | 4 | // From: https://github.com/eteran/cpp-utilities/blob/master/fixed/include/cpp-utilities/fixed.h |
| 5 | // See also: http://stackoverflow.com/questions/79677/whats-the-best-way-to-do-fixed-point-math | 5 | // See also: http://stackoverflow.com/questions/79677/whats-the-best-way-to-do-fixed-point-math |
| 6 | 6 | ||
| 7 | #ifndef FIXED_H_ | 7 | #pragma once |
| 8 | #define FIXED_H_ | ||
| 9 | |||
| 10 | #if __cplusplus >= 201402L | ||
| 11 | #define CONSTEXPR14 constexpr | ||
| 12 | #else | ||
| 13 | #define CONSTEXPR14 | ||
| 14 | #endif | ||
| 15 | 8 | ||
| 16 | #include <cstddef> // for size_t | 9 | #include <cstddef> // for size_t |
| 17 | #include <cstdint> | 10 | #include <cstdint> |
| @@ -19,6 +12,8 @@ | |||
| 19 | #include <ostream> | 12 | #include <ostream> |
| 20 | #include <type_traits> | 13 | #include <type_traits> |
| 21 | 14 | ||
| 15 | #include <common/concepts.h> | ||
| 16 | |||
| 22 | namespace Common { | 17 | namespace Common { |
| 23 | 18 | ||
| 24 | template <size_t I, size_t F> | 19 | template <size_t I, size_t F> |
| @@ -57,8 +52,8 @@ struct type_from_size<64> { | |||
| 57 | static constexpr size_t size = 64; | 52 | static constexpr size_t size = 64; |
| 58 | 53 | ||
| 59 | using value_type = int64_t; | 54 | using value_type = int64_t; |
| 60 | using unsigned_type = std::make_unsigned<value_type>::type; | 55 | using unsigned_type = std::make_unsigned_t<value_type>; |
| 61 | using signed_type = std::make_signed<value_type>::type; | 56 | using signed_type = std::make_signed_t<value_type>; |
| 62 | using next_size = type_from_size<128>; | 57 | using next_size = type_from_size<128>; |
| 63 | }; | 58 | }; |
| 64 | 59 | ||
| @@ -68,8 +63,8 @@ struct type_from_size<32> { | |||
| 68 | static constexpr size_t size = 32; | 63 | static constexpr size_t size = 32; |
| 69 | 64 | ||
| 70 | using value_type = int32_t; | 65 | using value_type = int32_t; |
| 71 | using unsigned_type = std::make_unsigned<value_type>::type; | 66 | using unsigned_type = std::make_unsigned_t<value_type>; |
| 72 | using signed_type = std::make_signed<value_type>::type; | 67 | using signed_type = std::make_signed_t<value_type>; |
| 73 | using next_size = type_from_size<64>; | 68 | using next_size = type_from_size<64>; |
| 74 | }; | 69 | }; |
| 75 | 70 | ||
| @@ -79,8 +74,8 @@ struct type_from_size<16> { | |||
| 79 | static constexpr size_t size = 16; | 74 | static constexpr size_t size = 16; |
| 80 | 75 | ||
| 81 | using value_type = int16_t; | 76 | using value_type = int16_t; |
| 82 | using unsigned_type = std::make_unsigned<value_type>::type; | 77 | using unsigned_type = std::make_unsigned_t<value_type>; |
| 83 | using signed_type = std::make_signed<value_type>::type; | 78 | using signed_type = std::make_signed_t<value_type>; |
| 84 | using next_size = type_from_size<32>; | 79 | using next_size = type_from_size<32>; |
| 85 | }; | 80 | }; |
| 86 | 81 | ||
| @@ -90,8 +85,8 @@ struct type_from_size<8> { | |||
| 90 | static constexpr size_t size = 8; | 85 | static constexpr size_t size = 8; |
| 91 | 86 | ||
| 92 | using value_type = int8_t; | 87 | using value_type = int8_t; |
| 93 | using unsigned_type = std::make_unsigned<value_type>::type; | 88 | using unsigned_type = std::make_unsigned_t<value_type>; |
| 94 | using signed_type = std::make_signed<value_type>::type; | 89 | using signed_type = std::make_signed_t<value_type>; |
| 95 | using next_size = type_from_size<16>; | 90 | using next_size = type_from_size<16>; |
| 96 | }; | 91 | }; |
| 97 | 92 | ||
| @@ -106,9 +101,9 @@ constexpr B next_to_base(N rhs) { | |||
| 106 | struct divide_by_zero : std::exception {}; | 101 | struct divide_by_zero : std::exception {}; |
| 107 | 102 | ||
| 108 | template <size_t I, size_t F> | 103 | template <size_t I, size_t F> |
| 109 | CONSTEXPR14 FixedPoint<I, F> divide( | 104 | constexpr FixedPoint<I, F> divide( |
| 110 | FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder, | 105 | FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder, |
| 111 | typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { | 106 | std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) { |
| 112 | 107 | ||
| 113 | using next_type = typename FixedPoint<I, F>::next_type; | 108 | using next_type = typename FixedPoint<I, F>::next_type; |
| 114 | using base_type = typename FixedPoint<I, F>::base_type; | 109 | using base_type = typename FixedPoint<I, F>::base_type; |
| @@ -126,9 +121,9 @@ CONSTEXPR14 FixedPoint<I, F> divide( | |||
| 126 | } | 121 | } |
| 127 | 122 | ||
| 128 | template <size_t I, size_t F> | 123 | template <size_t I, size_t F> |
| 129 | CONSTEXPR14 FixedPoint<I, F> divide( | 124 | constexpr FixedPoint<I, F> divide( |
| 130 | FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder, | 125 | FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder, |
| 131 | typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { | 126 | std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) { |
| 132 | 127 | ||
| 133 | using unsigned_type = typename FixedPoint<I, F>::unsigned_type; | 128 | using unsigned_type = typename FixedPoint<I, F>::unsigned_type; |
| 134 | 129 | ||
| @@ -196,9 +191,9 @@ CONSTEXPR14 FixedPoint<I, F> divide( | |||
| 196 | 191 | ||
| 197 | // this is the usual implementation of multiplication | 192 | // this is the usual implementation of multiplication |
| 198 | template <size_t I, size_t F> | 193 | template <size_t I, size_t F> |
| 199 | CONSTEXPR14 FixedPoint<I, F> multiply( | 194 | constexpr FixedPoint<I, F> multiply( |
| 200 | FixedPoint<I, F> lhs, FixedPoint<I, F> rhs, | 195 | FixedPoint<I, F> lhs, FixedPoint<I, F> rhs, |
| 201 | typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { | 196 | std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) { |
| 202 | 197 | ||
| 203 | using next_type = typename FixedPoint<I, F>::next_type; | 198 | using next_type = typename FixedPoint<I, F>::next_type; |
| 204 | using base_type = typename FixedPoint<I, F>::base_type; | 199 | using base_type = typename FixedPoint<I, F>::base_type; |
| @@ -215,9 +210,9 @@ CONSTEXPR14 FixedPoint<I, F> multiply( | |||
| 215 | // it is slightly slower, but is more robust since it doesn't | 210 | // it is slightly slower, but is more robust since it doesn't |
| 216 | // require and upgraded type | 211 | // require and upgraded type |
| 217 | template <size_t I, size_t F> | 212 | template <size_t I, size_t F> |
| 218 | CONSTEXPR14 FixedPoint<I, F> multiply( | 213 | constexpr FixedPoint<I, F> multiply( |
| 219 | FixedPoint<I, F> lhs, FixedPoint<I, F> rhs, | 214 | FixedPoint<I, F> lhs, FixedPoint<I, F> rhs, |
| 220 | typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { | 215 | std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) { |
| 221 | 216 | ||
| 222 | using base_type = typename FixedPoint<I, F>::base_type; | 217 | using base_type = typename FixedPoint<I, F>::base_type; |
| 223 | 218 | ||
| @@ -272,19 +267,20 @@ public: | |||
| 272 | static constexpr base_type one = base_type(1) << fractional_bits; | 267 | static constexpr base_type one = base_type(1) << fractional_bits; |
| 273 | 268 | ||
| 274 | public: // constructors | 269 | public: // constructors |
| 275 | FixedPoint() = default; | 270 | constexpr FixedPoint() = default; |
| 276 | FixedPoint(const FixedPoint&) = default; | 271 | |
| 277 | FixedPoint(FixedPoint&&) = default; | 272 | constexpr FixedPoint(const FixedPoint&) = default; |
| 278 | FixedPoint& operator=(const FixedPoint&) = default; | 273 | constexpr FixedPoint& operator=(const FixedPoint&) = default; |
| 274 | |||
| 275 | constexpr FixedPoint(FixedPoint&&) noexcept = default; | ||
| 276 | constexpr FixedPoint& operator=(FixedPoint&&) noexcept = default; | ||
| 279 | 277 | ||
| 280 | template <class Number> | 278 | template <IsArithmetic Number> |
| 281 | constexpr FixedPoint( | 279 | constexpr FixedPoint(Number n) : data_(static_cast<base_type>(n * one)) {} |
| 282 | Number n, typename std::enable_if<std::is_arithmetic<Number>::value>::type* = nullptr) | ||
| 283 | : data_(static_cast<base_type>(n * one)) {} | ||
| 284 | 280 | ||
| 285 | public: // conversion | 281 | public: // conversion |
| 286 | template <size_t I2, size_t F2> | 282 | template <size_t I2, size_t F2> |
| 287 | CONSTEXPR14 explicit FixedPoint(FixedPoint<I2, F2> other) { | 283 | constexpr explicit FixedPoint(FixedPoint<I2, F2> other) { |
| 288 | static_assert(I2 <= I && F2 <= F, "Scaling conversion can only upgrade types"); | 284 | static_assert(I2 <= I && F2 <= F, "Scaling conversion can only upgrade types"); |
| 289 | using T = FixedPoint<I2, F2>; | 285 | using T = FixedPoint<I2, F2>; |
| 290 | 286 | ||
| @@ -308,36 +304,14 @@ public: | |||
| 308 | } | 304 | } |
| 309 | 305 | ||
| 310 | public: // comparison operators | 306 | public: // comparison operators |
| 311 | constexpr bool operator==(FixedPoint rhs) const { | 307 | friend constexpr auto operator<=>(FixedPoint lhs, FixedPoint rhs) = default; |
| 312 | return data_ == rhs.data_; | ||
| 313 | } | ||
| 314 | |||
| 315 | constexpr bool operator!=(FixedPoint rhs) const { | ||
| 316 | return data_ != rhs.data_; | ||
| 317 | } | ||
| 318 | |||
| 319 | constexpr bool operator<(FixedPoint rhs) const { | ||
| 320 | return data_ < rhs.data_; | ||
| 321 | } | ||
| 322 | |||
| 323 | constexpr bool operator>(FixedPoint rhs) const { | ||
| 324 | return data_ > rhs.data_; | ||
| 325 | } | ||
| 326 | |||
| 327 | constexpr bool operator<=(FixedPoint rhs) const { | ||
| 328 | return data_ <= rhs.data_; | ||
| 329 | } | ||
| 330 | |||
| 331 | constexpr bool operator>=(FixedPoint rhs) const { | ||
| 332 | return data_ >= rhs.data_; | ||
| 333 | } | ||
| 334 | 308 | ||
| 335 | public: // unary operators | 309 | public: // unary operators |
| 336 | constexpr bool operator!() const { | 310 | [[nodiscard]] constexpr bool operator!() const { |
| 337 | return !data_; | 311 | return !data_; |
| 338 | } | 312 | } |
| 339 | 313 | ||
| 340 | constexpr FixedPoint operator~() const { | 314 | [[nodiscard]] constexpr FixedPoint operator~() const { |
| 341 | // NOTE(eteran): this will often appear to "just negate" the value | 315 | // NOTE(eteran): this will often appear to "just negate" the value |
| 342 | // that is not an error, it is because -x == (~x+1) | 316 | // that is not an error, it is because -x == (~x+1) |
| 343 | // and that "+1" is adding an infinitesimally small fraction to the | 317 | // and that "+1" is adding an infinitesimally small fraction to the |
| @@ -345,89 +319,87 @@ public: // unary operators | |||
| 345 | return FixedPoint::from_base(~data_); | 319 | return FixedPoint::from_base(~data_); |
| 346 | } | 320 | } |
| 347 | 321 | ||
| 348 | constexpr FixedPoint operator-() const { | 322 | [[nodiscard]] constexpr FixedPoint operator-() const { |
| 349 | return FixedPoint::from_base(-data_); | 323 | return FixedPoint::from_base(-data_); |
| 350 | } | 324 | } |
| 351 | 325 | ||
| 352 | constexpr FixedPoint operator+() const { | 326 | [[nodiscard]] constexpr FixedPoint operator+() const { |
| 353 | return FixedPoint::from_base(+data_); | 327 | return FixedPoint::from_base(+data_); |
| 354 | } | 328 | } |
| 355 | 329 | ||
| 356 | CONSTEXPR14 FixedPoint& operator++() { | 330 | constexpr FixedPoint& operator++() { |
| 357 | data_ += one; | 331 | data_ += one; |
| 358 | return *this; | 332 | return *this; |
| 359 | } | 333 | } |
| 360 | 334 | ||
| 361 | CONSTEXPR14 FixedPoint& operator--() { | 335 | constexpr FixedPoint& operator--() { |
| 362 | data_ -= one; | 336 | data_ -= one; |
| 363 | return *this; | 337 | return *this; |
| 364 | } | 338 | } |
| 365 | 339 | ||
| 366 | CONSTEXPR14 FixedPoint operator++(int) { | 340 | constexpr FixedPoint operator++(int) { |
| 367 | FixedPoint tmp(*this); | 341 | FixedPoint tmp(*this); |
| 368 | data_ += one; | 342 | data_ += one; |
| 369 | return tmp; | 343 | return tmp; |
| 370 | } | 344 | } |
| 371 | 345 | ||
| 372 | CONSTEXPR14 FixedPoint operator--(int) { | 346 | constexpr FixedPoint operator--(int) { |
| 373 | FixedPoint tmp(*this); | 347 | FixedPoint tmp(*this); |
| 374 | data_ -= one; | 348 | data_ -= one; |
| 375 | return tmp; | 349 | return tmp; |
| 376 | } | 350 | } |
| 377 | 351 | ||
| 378 | public: // basic math operators | 352 | public: // basic math operators |
| 379 | CONSTEXPR14 FixedPoint& operator+=(FixedPoint n) { | 353 | constexpr FixedPoint& operator+=(FixedPoint n) { |
| 380 | data_ += n.data_; | 354 | data_ += n.data_; |
| 381 | return *this; | 355 | return *this; |
| 382 | } | 356 | } |
| 383 | 357 | ||
| 384 | CONSTEXPR14 FixedPoint& operator-=(FixedPoint n) { | 358 | constexpr FixedPoint& operator-=(FixedPoint n) { |
| 385 | data_ -= n.data_; | 359 | data_ -= n.data_; |
| 386 | return *this; | 360 | return *this; |
| 387 | } | 361 | } |
| 388 | 362 | ||
| 389 | CONSTEXPR14 FixedPoint& operator*=(FixedPoint n) { | 363 | constexpr FixedPoint& operator*=(FixedPoint n) { |
| 390 | return assign(detail::multiply(*this, n)); | 364 | return assign(detail::multiply(*this, n)); |
| 391 | } | 365 | } |
| 392 | 366 | ||
| 393 | CONSTEXPR14 FixedPoint& operator/=(FixedPoint n) { | 367 | constexpr FixedPoint& operator/=(FixedPoint n) { |
| 394 | FixedPoint temp; | 368 | FixedPoint temp; |
| 395 | return assign(detail::divide(*this, n, temp)); | 369 | return assign(detail::divide(*this, n, temp)); |
| 396 | } | 370 | } |
| 397 | 371 | ||
| 398 | private: | 372 | private: |
| 399 | CONSTEXPR14 FixedPoint& assign(FixedPoint rhs) { | 373 | constexpr FixedPoint& assign(FixedPoint rhs) { |
| 400 | data_ = rhs.data_; | 374 | data_ = rhs.data_; |
| 401 | return *this; | 375 | return *this; |
| 402 | } | 376 | } |
| 403 | 377 | ||
| 404 | public: // binary math operators, effects underlying bit pattern since these | 378 | public: // binary math operators, effects underlying bit pattern since these |
| 405 | // don't really typically make sense for non-integer values | 379 | // don't really typically make sense for non-integer values |
| 406 | CONSTEXPR14 FixedPoint& operator&=(FixedPoint n) { | 380 | constexpr FixedPoint& operator&=(FixedPoint n) { |
| 407 | data_ &= n.data_; | 381 | data_ &= n.data_; |
| 408 | return *this; | 382 | return *this; |
| 409 | } | 383 | } |
| 410 | 384 | ||
| 411 | CONSTEXPR14 FixedPoint& operator|=(FixedPoint n) { | 385 | constexpr FixedPoint& operator|=(FixedPoint n) { |
| 412 | data_ |= n.data_; | 386 | data_ |= n.data_; |
| 413 | return *this; | 387 | return *this; |
| 414 | } | 388 | } |
| 415 | 389 | ||
| 416 | CONSTEXPR14 FixedPoint& operator^=(FixedPoint n) { | 390 | constexpr FixedPoint& operator^=(FixedPoint n) { |
| 417 | data_ ^= n.data_; | 391 | data_ ^= n.data_; |
| 418 | return *this; | 392 | return *this; |
| 419 | } | 393 | } |
| 420 | 394 | ||
| 421 | template <class Integer, | 395 | template <IsIntegral Integer> |
| 422 | class = typename std::enable_if<std::is_integral<Integer>::value>::type> | 396 | constexpr FixedPoint& operator>>=(Integer n) { |
| 423 | CONSTEXPR14 FixedPoint& operator>>=(Integer n) { | ||
| 424 | data_ >>= n; | 397 | data_ >>= n; |
| 425 | return *this; | 398 | return *this; |
| 426 | } | 399 | } |
| 427 | 400 | ||
| 428 | template <class Integer, | 401 | template <IsIntegral Integer> |
| 429 | class = typename std::enable_if<std::is_integral<Integer>::value>::type> | 402 | constexpr FixedPoint& operator<<=(Integer n) { |
| 430 | CONSTEXPR14 FixedPoint& operator<<=(Integer n) { | ||
| 431 | data_ <<= n; | 403 | data_ <<= n; |
| 432 | return *this; | 404 | return *this; |
| 433 | } | 405 | } |
| @@ -437,42 +409,42 @@ public: // conversion to basic types | |||
| 437 | data_ += (data_ & fractional_mask) >> 1; | 409 | data_ += (data_ & fractional_mask) >> 1; |
| 438 | } | 410 | } |
| 439 | 411 | ||
| 440 | constexpr int to_int() { | 412 | [[nodiscard]] constexpr int to_int() { |
| 441 | round_up(); | 413 | round_up(); |
| 442 | return static_cast<int>((data_ & integer_mask) >> fractional_bits); | 414 | return static_cast<int>((data_ & integer_mask) >> fractional_bits); |
| 443 | } | 415 | } |
| 444 | 416 | ||
| 445 | constexpr unsigned int to_uint() const { | 417 | [[nodiscard]] constexpr unsigned int to_uint() { |
| 446 | round_up(); | 418 | round_up(); |
| 447 | return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits); | 419 | return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits); |
| 448 | } | 420 | } |
| 449 | 421 | ||
| 450 | constexpr int64_t to_long() { | 422 | [[nodiscard]] constexpr int64_t to_long() { |
| 451 | round_up(); | 423 | round_up(); |
| 452 | return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits); | 424 | return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits); |
| 453 | } | 425 | } |
| 454 | 426 | ||
| 455 | constexpr int to_int_floor() const { | 427 | [[nodiscard]] constexpr int to_int_floor() const { |
| 456 | return static_cast<int>((data_ & integer_mask) >> fractional_bits); | 428 | return static_cast<int>((data_ & integer_mask) >> fractional_bits); |
| 457 | } | 429 | } |
| 458 | 430 | ||
| 459 | constexpr int64_t to_long_floor() { | 431 | [[nodiscard]] constexpr int64_t to_long_floor() const { |
| 460 | return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits); | 432 | return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits); |
| 461 | } | 433 | } |
| 462 | 434 | ||
| 463 | constexpr unsigned int to_uint_floor() const { | 435 | [[nodiscard]] constexpr unsigned int to_uint_floor() const { |
| 464 | return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits); | 436 | return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits); |
| 465 | } | 437 | } |
| 466 | 438 | ||
| 467 | constexpr float to_float() const { | 439 | [[nodiscard]] constexpr float to_float() const { |
| 468 | return static_cast<float>(data_) / FixedPoint::one; | 440 | return static_cast<float>(data_) / FixedPoint::one; |
| 469 | } | 441 | } |
| 470 | 442 | ||
| 471 | constexpr double to_double() const { | 443 | [[nodiscard]] constexpr double to_double() const { |
| 472 | return static_cast<double>(data_) / FixedPoint::one; | 444 | return static_cast<double>(data_) / FixedPoint::one; |
| 473 | } | 445 | } |
| 474 | 446 | ||
| 475 | constexpr base_type to_raw() const { | 447 | [[nodiscard]] constexpr base_type to_raw() const { |
| 476 | return data_; | 448 | return data_; |
| 477 | } | 449 | } |
| 478 | 450 | ||
| @@ -480,27 +452,27 @@ public: // conversion to basic types | |||
| 480 | data_ &= fractional_mask; | 452 | data_ &= fractional_mask; |
| 481 | } | 453 | } |
| 482 | 454 | ||
| 483 | constexpr base_type get_frac() const { | 455 | [[nodiscard]] constexpr base_type get_frac() const { |
| 484 | return data_ & fractional_mask; | 456 | return data_ & fractional_mask; |
| 485 | } | 457 | } |
| 486 | 458 | ||
| 487 | public: | 459 | public: |
| 488 | CONSTEXPR14 void swap(FixedPoint& rhs) { | 460 | constexpr void swap(FixedPoint& rhs) noexcept { |
| 489 | using std::swap; | 461 | using std::swap; |
| 490 | swap(data_, rhs.data_); | 462 | swap(data_, rhs.data_); |
| 491 | } | 463 | } |
| 492 | 464 | ||
| 493 | public: | 465 | public: |
| 494 | base_type data_; | 466 | base_type data_{}; |
| 495 | }; | 467 | }; |
| 496 | 468 | ||
| 497 | // if we have the same fractional portion, but differing integer portions, we trivially upgrade the | 469 | // if we have the same fractional portion, but differing integer portions, we trivially upgrade the |
| 498 | // smaller type | 470 | // smaller type |
| 499 | template <size_t I1, size_t I2, size_t F> | 471 | template <size_t I1, size_t I2, size_t F> |
| 500 | CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type | 472 | constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator+( |
| 501 | operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { | 473 | FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { |
| 502 | 474 | ||
| 503 | using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; | 475 | using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>; |
| 504 | 476 | ||
| 505 | const T l = T::from_base(lhs.to_raw()); | 477 | const T l = T::from_base(lhs.to_raw()); |
| 506 | const T r = T::from_base(rhs.to_raw()); | 478 | const T r = T::from_base(rhs.to_raw()); |
| @@ -508,10 +480,10 @@ operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { | |||
| 508 | } | 480 | } |
| 509 | 481 | ||
| 510 | template <size_t I1, size_t I2, size_t F> | 482 | template <size_t I1, size_t I2, size_t F> |
| 511 | CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type | 483 | constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator-( |
| 512 | operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { | 484 | FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { |
| 513 | 485 | ||
| 514 | using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; | 486 | using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>; |
| 515 | 487 | ||
| 516 | const T l = T::from_base(lhs.to_raw()); | 488 | const T l = T::from_base(lhs.to_raw()); |
| 517 | const T r = T::from_base(rhs.to_raw()); | 489 | const T r = T::from_base(rhs.to_raw()); |
| @@ -519,10 +491,10 @@ operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { | |||
| 519 | } | 491 | } |
| 520 | 492 | ||
| 521 | template <size_t I1, size_t I2, size_t F> | 493 | template <size_t I1, size_t I2, size_t F> |
| 522 | CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type | 494 | constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator*( |
| 523 | operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { | 495 | FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { |
| 524 | 496 | ||
| 525 | using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; | 497 | using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>; |
| 526 | 498 | ||
| 527 | const T l = T::from_base(lhs.to_raw()); | 499 | const T l = T::from_base(lhs.to_raw()); |
| 528 | const T r = T::from_base(rhs.to_raw()); | 500 | const T r = T::from_base(rhs.to_raw()); |
| @@ -530,10 +502,10 @@ operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { | |||
| 530 | } | 502 | } |
| 531 | 503 | ||
| 532 | template <size_t I1, size_t I2, size_t F> | 504 | template <size_t I1, size_t I2, size_t F> |
| 533 | CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type | 505 | constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator/( |
| 534 | operator/(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { | 506 | FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { |
| 535 | 507 | ||
| 536 | using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; | 508 | using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>; |
| 537 | 509 | ||
| 538 | const T l = T::from_base(lhs.to_raw()); | 510 | const T l = T::from_base(lhs.to_raw()); |
| 539 | const T r = T::from_base(rhs.to_raw()); | 511 | const T r = T::from_base(rhs.to_raw()); |
| @@ -548,159 +520,133 @@ std::ostream& operator<<(std::ostream& os, FixedPoint<I, F> f) { | |||
| 548 | 520 | ||
| 549 | // basic math operators | 521 | // basic math operators |
| 550 | template <size_t I, size_t F> | 522 | template <size_t I, size_t F> |
| 551 | CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { | 523 | constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { |
| 552 | lhs += rhs; | 524 | lhs += rhs; |
| 553 | return lhs; | 525 | return lhs; |
| 554 | } | 526 | } |
| 555 | template <size_t I, size_t F> | 527 | template <size_t I, size_t F> |
| 556 | CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { | 528 | constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { |
| 557 | lhs -= rhs; | 529 | lhs -= rhs; |
| 558 | return lhs; | 530 | return lhs; |
| 559 | } | 531 | } |
| 560 | template <size_t I, size_t F> | 532 | template <size_t I, size_t F> |
| 561 | CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { | 533 | constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { |
| 562 | lhs *= rhs; | 534 | lhs *= rhs; |
| 563 | return lhs; | 535 | return lhs; |
| 564 | } | 536 | } |
| 565 | template <size_t I, size_t F> | 537 | template <size_t I, size_t F> |
| 566 | CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { | 538 | constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { |
| 567 | lhs /= rhs; | 539 | lhs /= rhs; |
| 568 | return lhs; | 540 | return lhs; |
| 569 | } | 541 | } |
| 570 | 542 | ||
| 571 | template <size_t I, size_t F, class Number, | 543 | template <size_t I, size_t F, IsArithmetic Number> |
| 572 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | 544 | constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) { |
| 573 | CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) { | ||
| 574 | lhs += FixedPoint<I, F>(rhs); | 545 | lhs += FixedPoint<I, F>(rhs); |
| 575 | return lhs; | 546 | return lhs; |
| 576 | } | 547 | } |
| 577 | template <size_t I, size_t F, class Number, | 548 | template <size_t I, size_t F, IsArithmetic Number> |
| 578 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | 549 | constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) { |
| 579 | CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) { | ||
| 580 | lhs -= FixedPoint<I, F>(rhs); | 550 | lhs -= FixedPoint<I, F>(rhs); |
| 581 | return lhs; | 551 | return lhs; |
| 582 | } | 552 | } |
| 583 | template <size_t I, size_t F, class Number, | 553 | template <size_t I, size_t F, IsArithmetic Number> |
| 584 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | 554 | constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) { |
| 585 | CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) { | ||
| 586 | lhs *= FixedPoint<I, F>(rhs); | 555 | lhs *= FixedPoint<I, F>(rhs); |
| 587 | return lhs; | 556 | return lhs; |
| 588 | } | 557 | } |
| 589 | template <size_t I, size_t F, class Number, | 558 | template <size_t I, size_t F, IsArithmetic Number> |
| 590 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | 559 | constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) { |
| 591 | CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) { | ||
| 592 | lhs /= FixedPoint<I, F>(rhs); | 560 | lhs /= FixedPoint<I, F>(rhs); |
| 593 | return lhs; | 561 | return lhs; |
| 594 | } | 562 | } |
| 595 | 563 | ||
| 596 | template <size_t I, size_t F, class Number, | 564 | template <size_t I, size_t F, IsArithmetic Number> |
| 597 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | 565 | constexpr FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) { |
| 598 | CONSTEXPR14 FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) { | ||
| 599 | FixedPoint<I, F> tmp(lhs); | 566 | FixedPoint<I, F> tmp(lhs); |
| 600 | tmp += rhs; | 567 | tmp += rhs; |
| 601 | return tmp; | 568 | return tmp; |
| 602 | } | 569 | } |
| 603 | template <size_t I, size_t F, class Number, | 570 | template <size_t I, size_t F, IsArithmetic Number> |
| 604 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | 571 | constexpr FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) { |
| 605 | CONSTEXPR14 FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) { | ||
| 606 | FixedPoint<I, F> tmp(lhs); | 572 | FixedPoint<I, F> tmp(lhs); |
| 607 | tmp -= rhs; | 573 | tmp -= rhs; |
| 608 | return tmp; | 574 | return tmp; |
| 609 | } | 575 | } |
| 610 | template <size_t I, size_t F, class Number, | 576 | template <size_t I, size_t F, IsArithmetic Number> |
| 611 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | 577 | constexpr FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) { |
| 612 | CONSTEXPR14 FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) { | ||
| 613 | FixedPoint<I, F> tmp(lhs); | 578 | FixedPoint<I, F> tmp(lhs); |
| 614 | tmp *= rhs; | 579 | tmp *= rhs; |
| 615 | return tmp; | 580 | return tmp; |
| 616 | } | 581 | } |
| 617 | template <size_t I, size_t F, class Number, | 582 | template <size_t I, size_t F, IsArithmetic Number> |
| 618 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | 583 | constexpr FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) { |
| 619 | CONSTEXPR14 FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) { | ||
| 620 | FixedPoint<I, F> tmp(lhs); | 584 | FixedPoint<I, F> tmp(lhs); |
| 621 | tmp /= rhs; | 585 | tmp /= rhs; |
| 622 | return tmp; | 586 | return tmp; |
| 623 | } | 587 | } |
| 624 | 588 | ||
| 625 | // shift operators | 589 | // shift operators |
| 626 | template <size_t I, size_t F, class Integer, | 590 | template <size_t I, size_t F, IsIntegral Integer> |
| 627 | class = typename std::enable_if<std::is_integral<Integer>::value>::type> | 591 | constexpr FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) { |
| 628 | CONSTEXPR14 FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) { | ||
| 629 | lhs <<= rhs; | 592 | lhs <<= rhs; |
| 630 | return lhs; | 593 | return lhs; |
| 631 | } | 594 | } |
| 632 | template <size_t I, size_t F, class Integer, | 595 | template <size_t I, size_t F, IsIntegral Integer> |
| 633 | class = typename std::enable_if<std::is_integral<Integer>::value>::type> | 596 | constexpr FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) { |
| 634 | CONSTEXPR14 FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) { | ||
| 635 | lhs >>= rhs; | 597 | lhs >>= rhs; |
| 636 | return lhs; | 598 | return lhs; |
| 637 | } | 599 | } |
| 638 | 600 | ||
| 639 | // comparison operators | 601 | // comparison operators |
| 640 | template <size_t I, size_t F, class Number, | 602 | template <size_t I, size_t F, IsArithmetic Number> |
| 641 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 642 | constexpr bool operator>(FixedPoint<I, F> lhs, Number rhs) { | 603 | constexpr bool operator>(FixedPoint<I, F> lhs, Number rhs) { |
| 643 | return lhs > FixedPoint<I, F>(rhs); | 604 | return lhs > FixedPoint<I, F>(rhs); |
| 644 | } | 605 | } |
| 645 | template <size_t I, size_t F, class Number, | 606 | template <size_t I, size_t F, IsArithmetic Number> |
| 646 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 647 | constexpr bool operator<(FixedPoint<I, F> lhs, Number rhs) { | 607 | constexpr bool operator<(FixedPoint<I, F> lhs, Number rhs) { |
| 648 | return lhs < FixedPoint<I, F>(rhs); | 608 | return lhs < FixedPoint<I, F>(rhs); |
| 649 | } | 609 | } |
| 650 | template <size_t I, size_t F, class Number, | 610 | template <size_t I, size_t F, IsArithmetic Number> |
| 651 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 652 | constexpr bool operator>=(FixedPoint<I, F> lhs, Number rhs) { | 611 | constexpr bool operator>=(FixedPoint<I, F> lhs, Number rhs) { |
| 653 | return lhs >= FixedPoint<I, F>(rhs); | 612 | return lhs >= FixedPoint<I, F>(rhs); |
| 654 | } | 613 | } |
| 655 | template <size_t I, size_t F, class Number, | 614 | template <size_t I, size_t F, IsArithmetic Number> |
| 656 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 657 | constexpr bool operator<=(FixedPoint<I, F> lhs, Number rhs) { | 615 | constexpr bool operator<=(FixedPoint<I, F> lhs, Number rhs) { |
| 658 | return lhs <= FixedPoint<I, F>(rhs); | 616 | return lhs <= FixedPoint<I, F>(rhs); |
| 659 | } | 617 | } |
| 660 | template <size_t I, size_t F, class Number, | 618 | template <size_t I, size_t F, IsArithmetic Number> |
| 661 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 662 | constexpr bool operator==(FixedPoint<I, F> lhs, Number rhs) { | 619 | constexpr bool operator==(FixedPoint<I, F> lhs, Number rhs) { |
| 663 | return lhs == FixedPoint<I, F>(rhs); | 620 | return lhs == FixedPoint<I, F>(rhs); |
| 664 | } | 621 | } |
| 665 | template <size_t I, size_t F, class Number, | 622 | template <size_t I, size_t F, IsArithmetic Number> |
| 666 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 667 | constexpr bool operator!=(FixedPoint<I, F> lhs, Number rhs) { | 623 | constexpr bool operator!=(FixedPoint<I, F> lhs, Number rhs) { |
| 668 | return lhs != FixedPoint<I, F>(rhs); | 624 | return lhs != FixedPoint<I, F>(rhs); |
| 669 | } | 625 | } |
| 670 | 626 | ||
| 671 | template <size_t I, size_t F, class Number, | 627 | template <size_t I, size_t F, IsArithmetic Number> |
| 672 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 673 | constexpr bool operator>(Number lhs, FixedPoint<I, F> rhs) { | 628 | constexpr bool operator>(Number lhs, FixedPoint<I, F> rhs) { |
| 674 | return FixedPoint<I, F>(lhs) > rhs; | 629 | return FixedPoint<I, F>(lhs) > rhs; |
| 675 | } | 630 | } |
| 676 | template <size_t I, size_t F, class Number, | 631 | template <size_t I, size_t F, IsArithmetic Number> |
| 677 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 678 | constexpr bool operator<(Number lhs, FixedPoint<I, F> rhs) { | 632 | constexpr bool operator<(Number lhs, FixedPoint<I, F> rhs) { |
| 679 | return FixedPoint<I, F>(lhs) < rhs; | 633 | return FixedPoint<I, F>(lhs) < rhs; |
| 680 | } | 634 | } |
| 681 | template <size_t I, size_t F, class Number, | 635 | template <size_t I, size_t F, IsArithmetic Number> |
| 682 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 683 | constexpr bool operator>=(Number lhs, FixedPoint<I, F> rhs) { | 636 | constexpr bool operator>=(Number lhs, FixedPoint<I, F> rhs) { |
| 684 | return FixedPoint<I, F>(lhs) >= rhs; | 637 | return FixedPoint<I, F>(lhs) >= rhs; |
| 685 | } | 638 | } |
| 686 | template <size_t I, size_t F, class Number, | 639 | template <size_t I, size_t F, IsArithmetic Number> |
| 687 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 688 | constexpr bool operator<=(Number lhs, FixedPoint<I, F> rhs) { | 640 | constexpr bool operator<=(Number lhs, FixedPoint<I, F> rhs) { |
| 689 | return FixedPoint<I, F>(lhs) <= rhs; | 641 | return FixedPoint<I, F>(lhs) <= rhs; |
| 690 | } | 642 | } |
| 691 | template <size_t I, size_t F, class Number, | 643 | template <size_t I, size_t F, IsArithmetic Number> |
| 692 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 693 | constexpr bool operator==(Number lhs, FixedPoint<I, F> rhs) { | 644 | constexpr bool operator==(Number lhs, FixedPoint<I, F> rhs) { |
| 694 | return FixedPoint<I, F>(lhs) == rhs; | 645 | return FixedPoint<I, F>(lhs) == rhs; |
| 695 | } | 646 | } |
| 696 | template <size_t I, size_t F, class Number, | 647 | template <size_t I, size_t F, IsArithmetic Number> |
| 697 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 698 | constexpr bool operator!=(Number lhs, FixedPoint<I, F> rhs) { | 648 | constexpr bool operator!=(Number lhs, FixedPoint<I, F> rhs) { |
| 699 | return FixedPoint<I, F>(lhs) != rhs; | 649 | return FixedPoint<I, F>(lhs) != rhs; |
| 700 | } | 650 | } |
| 701 | 651 | ||
| 702 | } // namespace Common | 652 | } // namespace Common |
| 703 | |||
| 704 | #undef CONSTEXPR14 | ||
| 705 | |||
| 706 | #endif | ||
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index abeb5859b..055bea641 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -190,6 +190,9 @@ add_library(core STATIC | |||
| 190 | hle/kernel/k_code_memory.h | 190 | hle/kernel/k_code_memory.h |
| 191 | hle/kernel/k_condition_variable.cpp | 191 | hle/kernel/k_condition_variable.cpp |
| 192 | hle/kernel/k_condition_variable.h | 192 | hle/kernel/k_condition_variable.h |
| 193 | hle/kernel/k_dynamic_page_manager.h | ||
| 194 | hle/kernel/k_dynamic_resource_manager.h | ||
| 195 | hle/kernel/k_dynamic_slab_heap.h | ||
| 193 | hle/kernel/k_event.cpp | 196 | hle/kernel/k_event.cpp |
| 194 | hle/kernel/k_event.h | 197 | hle/kernel/k_event.h |
| 195 | hle/kernel/k_handle_table.cpp | 198 | hle/kernel/k_handle_table.cpp |
| @@ -240,6 +243,8 @@ add_library(core STATIC | |||
| 240 | hle/kernel/k_server_session.h | 243 | hle/kernel/k_server_session.h |
| 241 | hle/kernel/k_session.cpp | 244 | hle/kernel/k_session.cpp |
| 242 | hle/kernel/k_session.h | 245 | hle/kernel/k_session.h |
| 246 | hle/kernel/k_session_request.cpp | ||
| 247 | hle/kernel/k_session_request.h | ||
| 243 | hle/kernel/k_shared_memory.cpp | 248 | hle/kernel/k_shared_memory.cpp |
| 244 | hle/kernel/k_shared_memory.h | 249 | hle/kernel/k_shared_memory.h |
| 245 | hle/kernel/k_shared_memory_info.h | 250 | hle/kernel/k_shared_memory_info.h |
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index 953d96439..29ba562dc 100644 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp | |||
| @@ -134,6 +134,14 @@ void ARM_Interface::Run() { | |||
| 134 | } | 134 | } |
| 135 | system.ExitDynarmicProfile(); | 135 | system.ExitDynarmicProfile(); |
| 136 | 136 | ||
| 137 | // If the thread is scheduled for termination, exit the thread. | ||
| 138 | if (current_thread->HasDpc()) { | ||
| 139 | if (current_thread->IsTerminationRequested()) { | ||
| 140 | current_thread->Exit(); | ||
| 141 | UNREACHABLE(); | ||
| 142 | } | ||
| 143 | } | ||
| 144 | |||
| 137 | // Notify the debugger and go to sleep if a breakpoint was hit, | 145 | // Notify the debugger and go to sleep if a breakpoint was hit, |
| 138 | // or if the thread is unable to continue for any reason. | 146 | // or if the thread is unable to continue for any reason. |
| 139 | if (Has(hr, breakpoint) || Has(hr, no_execute)) { | 147 | if (Has(hr, breakpoint) || Has(hr, no_execute)) { |
diff --git a/src/core/core.cpp b/src/core/core.cpp index 1deeee154..7fb8bc019 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp | |||
| @@ -133,6 +133,50 @@ struct System::Impl { | |||
| 133 | : kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{}, | 133 | : kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{}, |
| 134 | cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {} | 134 | cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {} |
| 135 | 135 | ||
| 136 | void Initialize(System& system) { | ||
| 137 | device_memory = std::make_unique<Core::DeviceMemory>(); | ||
| 138 | |||
| 139 | is_multicore = Settings::values.use_multi_core.GetValue(); | ||
| 140 | |||
| 141 | core_timing.SetMulticore(is_multicore); | ||
| 142 | core_timing.Initialize([&system]() { system.RegisterHostThread(); }); | ||
| 143 | |||
| 144 | const auto posix_time = std::chrono::system_clock::now().time_since_epoch(); | ||
| 145 | const auto current_time = | ||
| 146 | std::chrono::duration_cast<std::chrono::seconds>(posix_time).count(); | ||
| 147 | Settings::values.custom_rtc_differential = | ||
| 148 | Settings::values.custom_rtc.value_or(current_time) - current_time; | ||
| 149 | |||
| 150 | // Create a default fs if one doesn't already exist. | ||
| 151 | if (virtual_filesystem == nullptr) { | ||
| 152 | virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>(); | ||
| 153 | } | ||
| 154 | if (content_provider == nullptr) { | ||
| 155 | content_provider = std::make_unique<FileSys::ContentProviderUnion>(); | ||
| 156 | } | ||
| 157 | |||
| 158 | // Create default implementations of applets if one is not provided. | ||
| 159 | applet_manager.SetDefaultAppletsIfMissing(); | ||
| 160 | |||
| 161 | is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue(); | ||
| 162 | |||
| 163 | kernel.SetMulticore(is_multicore); | ||
| 164 | cpu_manager.SetMulticore(is_multicore); | ||
| 165 | cpu_manager.SetAsyncGpu(is_async_gpu); | ||
| 166 | } | ||
| 167 | |||
| 168 | void ReinitializeIfNecessary(System& system) { | ||
| 169 | if (is_multicore == Settings::values.use_multi_core.GetValue()) { | ||
| 170 | return; | ||
| 171 | } | ||
| 172 | |||
| 173 | LOG_DEBUG(Kernel, "Re-initializing"); | ||
| 174 | |||
| 175 | is_multicore = Settings::values.use_multi_core.GetValue(); | ||
| 176 | |||
| 177 | Initialize(system); | ||
| 178 | } | ||
| 179 | |||
| 136 | SystemResultStatus Run() { | 180 | SystemResultStatus Run() { |
| 137 | std::unique_lock<std::mutex> lk(suspend_guard); | 181 | std::unique_lock<std::mutex> lk(suspend_guard); |
| 138 | status = SystemResultStatus::Success; | 182 | status = SystemResultStatus::Success; |
| @@ -178,37 +222,14 @@ struct System::Impl { | |||
| 178 | debugger = std::make_unique<Debugger>(system, port); | 222 | debugger = std::make_unique<Debugger>(system, port); |
| 179 | } | 223 | } |
| 180 | 224 | ||
| 181 | SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) { | 225 | SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) { |
| 182 | LOG_DEBUG(Core, "initialized OK"); | 226 | LOG_DEBUG(Core, "initialized OK"); |
| 183 | 227 | ||
| 184 | device_memory = std::make_unique<Core::DeviceMemory>(); | 228 | // Setting changes may require a full system reinitialization (e.g., disabling multicore). |
| 185 | 229 | ReinitializeIfNecessary(system); | |
| 186 | is_multicore = Settings::values.use_multi_core.GetValue(); | ||
| 187 | is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue(); | ||
| 188 | |||
| 189 | kernel.SetMulticore(is_multicore); | ||
| 190 | cpu_manager.SetMulticore(is_multicore); | ||
| 191 | cpu_manager.SetAsyncGpu(is_async_gpu); | ||
| 192 | core_timing.SetMulticore(is_multicore); | ||
| 193 | 230 | ||
| 194 | kernel.Initialize(); | 231 | kernel.Initialize(); |
| 195 | cpu_manager.Initialize(); | 232 | cpu_manager.Initialize(); |
| 196 | core_timing.Initialize([&system]() { system.RegisterHostThread(); }); | ||
| 197 | |||
| 198 | const auto posix_time = std::chrono::system_clock::now().time_since_epoch(); | ||
| 199 | const auto current_time = | ||
| 200 | std::chrono::duration_cast<std::chrono::seconds>(posix_time).count(); | ||
| 201 | Settings::values.custom_rtc_differential = | ||
| 202 | Settings::values.custom_rtc.value_or(current_time) - current_time; | ||
| 203 | |||
| 204 | // Create a default fs if one doesn't already exist. | ||
| 205 | if (virtual_filesystem == nullptr) | ||
| 206 | virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>(); | ||
| 207 | if (content_provider == nullptr) | ||
| 208 | content_provider = std::make_unique<FileSys::ContentProviderUnion>(); | ||
| 209 | |||
| 210 | /// Create default implementations of applets if one is not provided. | ||
| 211 | applet_manager.SetDefaultAppletsIfMissing(); | ||
| 212 | 233 | ||
| 213 | /// Reset all glue registrations | 234 | /// Reset all glue registrations |
| 214 | arp_manager.ResetAll(); | 235 | arp_manager.ResetAll(); |
| @@ -253,11 +274,11 @@ struct System::Impl { | |||
| 253 | return SystemResultStatus::ErrorGetLoader; | 274 | return SystemResultStatus::ErrorGetLoader; |
| 254 | } | 275 | } |
| 255 | 276 | ||
| 256 | SystemResultStatus init_result{Init(system, emu_window)}; | 277 | SystemResultStatus init_result{SetupForMainProcess(system, emu_window)}; |
| 257 | if (init_result != SystemResultStatus::Success) { | 278 | if (init_result != SystemResultStatus::Success) { |
| 258 | LOG_CRITICAL(Core, "Failed to initialize system (Error {})!", | 279 | LOG_CRITICAL(Core, "Failed to initialize system (Error {})!", |
| 259 | static_cast<int>(init_result)); | 280 | static_cast<int>(init_result)); |
| 260 | Shutdown(); | 281 | ShutdownMainProcess(); |
| 261 | return init_result; | 282 | return init_result; |
| 262 | } | 283 | } |
| 263 | 284 | ||
| @@ -276,7 +297,7 @@ struct System::Impl { | |||
| 276 | const auto [load_result, load_parameters] = app_loader->Load(*main_process, system); | 297 | const auto [load_result, load_parameters] = app_loader->Load(*main_process, system); |
| 277 | if (load_result != Loader::ResultStatus::Success) { | 298 | if (load_result != Loader::ResultStatus::Success) { |
| 278 | LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result); | 299 | LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result); |
| 279 | Shutdown(); | 300 | ShutdownMainProcess(); |
| 280 | 301 | ||
| 281 | return static_cast<SystemResultStatus>( | 302 | return static_cast<SystemResultStatus>( |
| 282 | static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result)); | 303 | static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result)); |
| @@ -335,7 +356,7 @@ struct System::Impl { | |||
| 335 | return status; | 356 | return status; |
| 336 | } | 357 | } |
| 337 | 358 | ||
| 338 | void Shutdown() { | 359 | void ShutdownMainProcess() { |
| 339 | SetShuttingDown(true); | 360 | SetShuttingDown(true); |
| 340 | 361 | ||
| 341 | // Log last frame performance stats if game was loded | 362 | // Log last frame performance stats if game was loded |
| @@ -369,7 +390,7 @@ struct System::Impl { | |||
| 369 | cheat_engine.reset(); | 390 | cheat_engine.reset(); |
| 370 | telemetry_session.reset(); | 391 | telemetry_session.reset(); |
| 371 | time_manager.Shutdown(); | 392 | time_manager.Shutdown(); |
| 372 | core_timing.Shutdown(); | 393 | core_timing.ClearPendingEvents(); |
| 373 | app_loader.reset(); | 394 | app_loader.reset(); |
| 374 | audio_core.reset(); | 395 | audio_core.reset(); |
| 375 | gpu_core.reset(); | 396 | gpu_core.reset(); |
| @@ -377,7 +398,6 @@ struct System::Impl { | |||
| 377 | perf_stats.reset(); | 398 | perf_stats.reset(); |
| 378 | kernel.Shutdown(); | 399 | kernel.Shutdown(); |
| 379 | memory.Reset(); | 400 | memory.Reset(); |
| 380 | applet_manager.ClearAll(); | ||
| 381 | 401 | ||
| 382 | if (auto room_member = room_network.GetRoomMember().lock()) { | 402 | if (auto room_member = room_network.GetRoomMember().lock()) { |
| 383 | Network::GameInfo game_info{}; | 403 | Network::GameInfo game_info{}; |
| @@ -520,6 +540,10 @@ const CpuManager& System::GetCpuManager() const { | |||
| 520 | return impl->cpu_manager; | 540 | return impl->cpu_manager; |
| 521 | } | 541 | } |
| 522 | 542 | ||
| 543 | void System::Initialize() { | ||
| 544 | impl->Initialize(*this); | ||
| 545 | } | ||
| 546 | |||
| 523 | SystemResultStatus System::Run() { | 547 | SystemResultStatus System::Run() { |
| 524 | return impl->Run(); | 548 | return impl->Run(); |
| 525 | } | 549 | } |
| @@ -540,8 +564,8 @@ void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) { | |||
| 540 | impl->kernel.InvalidateCpuInstructionCacheRange(addr, size); | 564 | impl->kernel.InvalidateCpuInstructionCacheRange(addr, size); |
| 541 | } | 565 | } |
| 542 | 566 | ||
| 543 | void System::Shutdown() { | 567 | void System::ShutdownMainProcess() { |
| 544 | impl->Shutdown(); | 568 | impl->ShutdownMainProcess(); |
| 545 | } | 569 | } |
| 546 | 570 | ||
| 547 | bool System::IsShuttingDown() const { | 571 | bool System::IsShuttingDown() const { |
diff --git a/src/core/core.h b/src/core/core.h index 7843cc8ad..4ebedffd9 100644 --- a/src/core/core.h +++ b/src/core/core.h | |||
| @@ -143,6 +143,12 @@ public: | |||
| 143 | System& operator=(System&&) = delete; | 143 | System& operator=(System&&) = delete; |
| 144 | 144 | ||
| 145 | /** | 145 | /** |
| 146 | * Initializes the system | ||
| 147 | * This function will initialize core functionaility used for system emulation | ||
| 148 | */ | ||
| 149 | void Initialize(); | ||
| 150 | |||
| 151 | /** | ||
| 146 | * Run the OS and Application | 152 | * Run the OS and Application |
| 147 | * This function will start emulation and run the relevant devices | 153 | * This function will start emulation and run the relevant devices |
| 148 | */ | 154 | */ |
| @@ -166,8 +172,8 @@ public: | |||
| 166 | 172 | ||
| 167 | void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size); | 173 | void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size); |
| 168 | 174 | ||
| 169 | /// Shutdown the emulated system. | 175 | /// Shutdown the main emulated process. |
| 170 | void Shutdown(); | 176 | void ShutdownMainProcess(); |
| 171 | 177 | ||
| 172 | /// Check if the core is shutting down. | 178 | /// Check if the core is shutting down. |
| 173 | [[nodiscard]] bool IsShuttingDown() const; | 179 | [[nodiscard]] bool IsShuttingDown() const; |
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index 2678ce532..0e7b5f943 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp | |||
| @@ -40,7 +40,9 @@ struct CoreTiming::Event { | |||
| 40 | CoreTiming::CoreTiming() | 40 | CoreTiming::CoreTiming() |
| 41 | : clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {} | 41 | : clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {} |
| 42 | 42 | ||
| 43 | CoreTiming::~CoreTiming() = default; | 43 | CoreTiming::~CoreTiming() { |
| 44 | Reset(); | ||
| 45 | } | ||
| 44 | 46 | ||
| 45 | void CoreTiming::ThreadEntry(CoreTiming& instance) { | 47 | void CoreTiming::ThreadEntry(CoreTiming& instance) { |
| 46 | constexpr char name[] = "HostTiming"; | 48 | constexpr char name[] = "HostTiming"; |
| @@ -53,6 +55,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) { | |||
| 53 | } | 55 | } |
| 54 | 56 | ||
| 55 | void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) { | 57 | void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) { |
| 58 | Reset(); | ||
| 56 | on_thread_init = std::move(on_thread_init_); | 59 | on_thread_init = std::move(on_thread_init_); |
| 57 | event_fifo_id = 0; | 60 | event_fifo_id = 0; |
| 58 | shutting_down = false; | 61 | shutting_down = false; |
| @@ -65,17 +68,8 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) { | |||
| 65 | } | 68 | } |
| 66 | } | 69 | } |
| 67 | 70 | ||
| 68 | void CoreTiming::Shutdown() { | 71 | void CoreTiming::ClearPendingEvents() { |
| 69 | paused = true; | 72 | event_queue.clear(); |
| 70 | shutting_down = true; | ||
| 71 | pause_event.Set(); | ||
| 72 | event.Set(); | ||
| 73 | if (timer_thread) { | ||
| 74 | timer_thread->join(); | ||
| 75 | } | ||
| 76 | ClearPendingEvents(); | ||
| 77 | timer_thread.reset(); | ||
| 78 | has_started = false; | ||
| 79 | } | 73 | } |
| 80 | 74 | ||
| 81 | void CoreTiming::Pause(bool is_paused) { | 75 | void CoreTiming::Pause(bool is_paused) { |
| @@ -196,10 +190,6 @@ u64 CoreTiming::GetClockTicks() const { | |||
| 196 | return CpuCyclesToClockCycles(ticks); | 190 | return CpuCyclesToClockCycles(ticks); |
| 197 | } | 191 | } |
| 198 | 192 | ||
| 199 | void CoreTiming::ClearPendingEvents() { | ||
| 200 | event_queue.clear(); | ||
| 201 | } | ||
| 202 | |||
| 203 | void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { | 193 | void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { |
| 204 | std::scoped_lock lock{basic_lock}; | 194 | std::scoped_lock lock{basic_lock}; |
| 205 | 195 | ||
| @@ -307,6 +297,18 @@ void CoreTiming::ThreadLoop() { | |||
| 307 | } | 297 | } |
| 308 | } | 298 | } |
| 309 | 299 | ||
| 300 | void CoreTiming::Reset() { | ||
| 301 | paused = true; | ||
| 302 | shutting_down = true; | ||
| 303 | pause_event.Set(); | ||
| 304 | event.Set(); | ||
| 305 | if (timer_thread) { | ||
| 306 | timer_thread->join(); | ||
| 307 | } | ||
| 308 | timer_thread.reset(); | ||
| 309 | has_started = false; | ||
| 310 | } | ||
| 311 | |||
| 310 | std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const { | 312 | std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const { |
| 311 | if (is_multicore) { | 313 | if (is_multicore) { |
| 312 | return clock->GetTimeNS(); | 314 | return clock->GetTimeNS(); |
diff --git a/src/core/core_timing.h b/src/core/core_timing.h index 3259397b2..b5925193c 100644 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h | |||
| @@ -61,19 +61,14 @@ public: | |||
| 61 | /// required to end slice - 1 and start slice 0 before the first cycle of code is executed. | 61 | /// required to end slice - 1 and start slice 0 before the first cycle of code is executed. |
| 62 | void Initialize(std::function<void()>&& on_thread_init_); | 62 | void Initialize(std::function<void()>&& on_thread_init_); |
| 63 | 63 | ||
| 64 | /// Tears down all timing related functionality. | 64 | /// Clear all pending events. This should ONLY be done on exit. |
| 65 | void Shutdown(); | 65 | void ClearPendingEvents(); |
| 66 | 66 | ||
| 67 | /// Sets if emulation is multicore or single core, must be set before Initialize | 67 | /// Sets if emulation is multicore or single core, must be set before Initialize |
| 68 | void SetMulticore(bool is_multicore_) { | 68 | void SetMulticore(bool is_multicore_) { |
| 69 | is_multicore = is_multicore_; | 69 | is_multicore = is_multicore_; |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | /// Check if it's using host timing. | ||
| 73 | bool IsHostTiming() const { | ||
| 74 | return is_multicore; | ||
| 75 | } | ||
| 76 | |||
| 77 | /// Pauses/Unpauses the execution of the timer thread. | 72 | /// Pauses/Unpauses the execution of the timer thread. |
| 78 | void Pause(bool is_paused); | 73 | void Pause(bool is_paused); |
| 79 | 74 | ||
| @@ -136,12 +131,11 @@ public: | |||
| 136 | private: | 131 | private: |
| 137 | struct Event; | 132 | struct Event; |
| 138 | 133 | ||
| 139 | /// Clear all pending events. This should ONLY be done on exit. | ||
| 140 | void ClearPendingEvents(); | ||
| 141 | |||
| 142 | static void ThreadEntry(CoreTiming& instance); | 134 | static void ThreadEntry(CoreTiming& instance); |
| 143 | void ThreadLoop(); | 135 | void ThreadLoop(); |
| 144 | 136 | ||
| 137 | void Reset(); | ||
| 138 | |||
| 145 | std::unique_ptr<Common::WallClock> clock; | 139 | std::unique_ptr<Common::WallClock> clock; |
| 146 | 140 | ||
| 147 | s64 global_timer = 0; | 141 | s64 global_timer = 0; |
diff --git a/src/core/device_memory.h b/src/core/device_memory.h index df61b0c0b..90510733c 100644 --- a/src/core/device_memory.h +++ b/src/core/device_memory.h | |||
| @@ -31,12 +31,14 @@ public: | |||
| 31 | DramMemoryMap::Base; | 31 | DramMemoryMap::Base; |
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | u8* GetPointer(PAddr addr) { | 34 | template <typename T> |
| 35 | return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); | 35 | T* GetPointer(PAddr addr) { |
| 36 | return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base)); | ||
| 36 | } | 37 | } |
| 37 | 38 | ||
| 38 | const u8* GetPointer(PAddr addr) const { | 39 | template <typename T> |
| 39 | return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); | 40 | const T* GetPointer(PAddr addr) const { |
| 41 | return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base)); | ||
| 40 | } | 42 | } |
| 41 | 43 | ||
| 42 | Common::HostMemory buffer; | 44 | Common::HostMemory buffer; |
diff --git a/src/core/file_sys/savedata_factory.cpp b/src/core/file_sys/savedata_factory.cpp index 8c1b2523c..1567da231 100644 --- a/src/core/file_sys/savedata_factory.cpp +++ b/src/core/file_sys/savedata_factory.cpp | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include "common/assert.h" | 5 | #include "common/assert.h" |
| 6 | #include "common/common_types.h" | 6 | #include "common/common_types.h" |
| 7 | #include "common/logging/log.h" | 7 | #include "common/logging/log.h" |
| 8 | #include "common/uuid.h" | ||
| 8 | #include "core/core.h" | 9 | #include "core/core.h" |
| 9 | #include "core/file_sys/savedata_factory.h" | 10 | #include "core/file_sys/savedata_factory.h" |
| 10 | #include "core/file_sys/vfs.h" | 11 | #include "core/file_sys/vfs.h" |
| @@ -59,6 +60,36 @@ bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataA | |||
| 59 | attr.title_id == 0 && attr.save_id == 0); | 60 | attr.title_id == 0 && attr.save_id == 0); |
| 60 | } | 61 | } |
| 61 | 62 | ||
| 63 | std::string GetFutureSaveDataPath(SaveDataSpaceId space_id, SaveDataType type, u64 title_id, | ||
| 64 | u128 user_id) { | ||
| 65 | // Only detect nand user saves. | ||
| 66 | const auto space_id_path = [space_id]() -> std::string_view { | ||
| 67 | switch (space_id) { | ||
| 68 | case SaveDataSpaceId::NandUser: | ||
| 69 | return "/user/save"; | ||
| 70 | default: | ||
| 71 | return ""; | ||
| 72 | } | ||
| 73 | }(); | ||
| 74 | |||
| 75 | if (space_id_path.empty()) { | ||
| 76 | return ""; | ||
| 77 | } | ||
| 78 | |||
| 79 | Common::UUID uuid; | ||
| 80 | std::memcpy(uuid.uuid.data(), user_id.data(), sizeof(Common::UUID)); | ||
| 81 | |||
| 82 | // Only detect account/device saves from the future location. | ||
| 83 | switch (type) { | ||
| 84 | case SaveDataType::SaveData: | ||
| 85 | return fmt::format("{}/account/{}/{:016X}/1", space_id_path, uuid.RawString(), title_id); | ||
| 86 | case SaveDataType::DeviceSaveData: | ||
| 87 | return fmt::format("{}/device/{:016X}/1", space_id_path, title_id); | ||
| 88 | default: | ||
| 89 | return ""; | ||
| 90 | } | ||
| 91 | } | ||
| 92 | |||
| 62 | } // Anonymous namespace | 93 | } // Anonymous namespace |
| 63 | 94 | ||
| 64 | std::string SaveDataAttribute::DebugInfo() const { | 95 | std::string SaveDataAttribute::DebugInfo() const { |
| @@ -82,7 +113,7 @@ ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space, | |||
| 82 | PrintSaveDataAttributeWarnings(meta); | 113 | PrintSaveDataAttributeWarnings(meta); |
| 83 | 114 | ||
| 84 | const auto save_directory = | 115 | const auto save_directory = |
| 85 | GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id); | 116 | GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id); |
| 86 | 117 | ||
| 87 | auto out = dir->CreateDirectoryRelative(save_directory); | 118 | auto out = dir->CreateDirectoryRelative(save_directory); |
| 88 | 119 | ||
| @@ -99,7 +130,7 @@ ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space, | |||
| 99 | const SaveDataAttribute& meta) const { | 130 | const SaveDataAttribute& meta) const { |
| 100 | 131 | ||
| 101 | const auto save_directory = | 132 | const auto save_directory = |
| 102 | GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id); | 133 | GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id); |
| 103 | 134 | ||
| 104 | auto out = dir->GetDirectoryRelative(save_directory); | 135 | auto out = dir->GetDirectoryRelative(save_directory); |
| 105 | 136 | ||
| @@ -134,9 +165,9 @@ std::string SaveDataFactory::GetSaveDataSpaceIdPath(SaveDataSpaceId space) { | |||
| 134 | } | 165 | } |
| 135 | } | 166 | } |
| 136 | 167 | ||
| 137 | std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId space, | 168 | std::string SaveDataFactory::GetFullPath(Core::System& system, VirtualDir dir, |
| 138 | SaveDataType type, u64 title_id, u128 user_id, | 169 | SaveDataSpaceId space, SaveDataType type, u64 title_id, |
| 139 | u64 save_id) { | 170 | u128 user_id, u64 save_id) { |
| 140 | // According to switchbrew, if a save is of type SaveData and the title id field is 0, it should | 171 | // According to switchbrew, if a save is of type SaveData and the title id field is 0, it should |
| 141 | // be interpreted as the title id of the current process. | 172 | // be interpreted as the title id of the current process. |
| 142 | if (type == SaveDataType::SaveData || type == SaveDataType::DeviceSaveData) { | 173 | if (type == SaveDataType::SaveData || type == SaveDataType::DeviceSaveData) { |
| @@ -145,6 +176,17 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId s | |||
| 145 | } | 176 | } |
| 146 | } | 177 | } |
| 147 | 178 | ||
| 179 | // For compat with a future impl. | ||
| 180 | if (std::string future_path = | ||
| 181 | GetFutureSaveDataPath(space, type, title_id & ~(0xFFULL), user_id); | ||
| 182 | !future_path.empty()) { | ||
| 183 | // Check if this location exists, and prefer it over the old. | ||
| 184 | if (const auto future_dir = dir->GetDirectoryRelative(future_path); future_dir != nullptr) { | ||
| 185 | LOG_INFO(Service_FS, "Using save at new location: {}", future_path); | ||
| 186 | return future_path; | ||
| 187 | } | ||
| 188 | } | ||
| 189 | |||
| 148 | std::string out = GetSaveDataSpaceIdPath(space); | 190 | std::string out = GetSaveDataSpaceIdPath(space); |
| 149 | 191 | ||
| 150 | switch (type) { | 192 | switch (type) { |
| @@ -167,7 +209,8 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId s | |||
| 167 | 209 | ||
| 168 | SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id, | 210 | SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id, |
| 169 | u128 user_id) const { | 211 | u128 user_id) const { |
| 170 | const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0); | 212 | const auto path = |
| 213 | GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0); | ||
| 171 | const auto relative_dir = GetOrCreateDirectoryRelative(dir, path); | 214 | const auto relative_dir = GetOrCreateDirectoryRelative(dir, path); |
| 172 | 215 | ||
| 173 | const auto size_file = relative_dir->GetFile(SAVE_DATA_SIZE_FILENAME); | 216 | const auto size_file = relative_dir->GetFile(SAVE_DATA_SIZE_FILENAME); |
| @@ -185,7 +228,8 @@ SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id, | |||
| 185 | 228 | ||
| 186 | void SaveDataFactory::WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id, | 229 | void SaveDataFactory::WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id, |
| 187 | SaveDataSize new_value) const { | 230 | SaveDataSize new_value) const { |
| 188 | const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0); | 231 | const auto path = |
| 232 | GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0); | ||
| 189 | const auto relative_dir = GetOrCreateDirectoryRelative(dir, path); | 233 | const auto relative_dir = GetOrCreateDirectoryRelative(dir, path); |
| 190 | 234 | ||
| 191 | const auto size_file = relative_dir->CreateFile(SAVE_DATA_SIZE_FILENAME); | 235 | const auto size_file = relative_dir->CreateFile(SAVE_DATA_SIZE_FILENAME); |
diff --git a/src/core/file_sys/savedata_factory.h b/src/core/file_sys/savedata_factory.h index a763b94c8..d3633ef03 100644 --- a/src/core/file_sys/savedata_factory.h +++ b/src/core/file_sys/savedata_factory.h | |||
| @@ -95,8 +95,8 @@ public: | |||
| 95 | VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const; | 95 | VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const; |
| 96 | 96 | ||
| 97 | static std::string GetSaveDataSpaceIdPath(SaveDataSpaceId space); | 97 | static std::string GetSaveDataSpaceIdPath(SaveDataSpaceId space); |
| 98 | static std::string GetFullPath(Core::System& system, SaveDataSpaceId space, SaveDataType type, | 98 | static std::string GetFullPath(Core::System& system, VirtualDir dir, SaveDataSpaceId space, |
| 99 | u64 title_id, u128 user_id, u64 save_id); | 99 | SaveDataType type, u64 title_id, u128 user_id, u64 save_id); |
| 100 | 100 | ||
| 101 | SaveDataSize ReadSaveDataSize(SaveDataType type, u64 title_id, u128 user_id) const; | 101 | SaveDataSize ReadSaveDataSize(SaveDataType type, u64 title_id, u128 user_id) const; |
| 102 | void WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id, | 102 | void WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id, |
diff --git a/src/core/hid/irs_types.h b/src/core/hid/irs_types.h index 88c5b016d..0d1bfe53f 100644 --- a/src/core/hid/irs_types.h +++ b/src/core/hid/irs_types.h | |||
| @@ -14,7 +14,7 @@ enum class CameraAmbientNoiseLevel : u32 { | |||
| 14 | Low, | 14 | Low, |
| 15 | Medium, | 15 | Medium, |
| 16 | High, | 16 | High, |
| 17 | Unkown3, // This level can't be reached | 17 | Unknown3, // This level can't be reached |
| 18 | }; | 18 | }; |
| 19 | 19 | ||
| 20 | // This is nn::irsensor::CameraLightTarget | 20 | // This is nn::irsensor::CameraLightTarget |
| @@ -75,9 +75,9 @@ enum class IrCameraStatus : u32 { | |||
| 75 | enum class IrCameraInternalStatus : u32 { | 75 | enum class IrCameraInternalStatus : u32 { |
| 76 | Stopped, | 76 | Stopped, |
| 77 | FirmwareUpdateNeeded, | 77 | FirmwareUpdateNeeded, |
| 78 | Unkown2, | 78 | Unknown2, |
| 79 | Unkown3, | 79 | Unknown3, |
| 80 | Unkown4, | 80 | Unknown4, |
| 81 | FirmwareVersionRequested, | 81 | FirmwareVersionRequested, |
| 82 | FirmwareVersionIsInvalid, | 82 | FirmwareVersionIsInvalid, |
| 83 | Ready, | 83 | Ready, |
| @@ -121,20 +121,20 @@ enum class IrSensorFunctionLevel : u8 { | |||
| 121 | 121 | ||
| 122 | // This is nn::irsensor::MomentProcessorPreprocess | 122 | // This is nn::irsensor::MomentProcessorPreprocess |
| 123 | enum class MomentProcessorPreprocess : u32 { | 123 | enum class MomentProcessorPreprocess : u32 { |
| 124 | Unkown0, | 124 | Unknown0, |
| 125 | Unkown1, | 125 | Unknown1, |
| 126 | }; | 126 | }; |
| 127 | 127 | ||
| 128 | // This is nn::irsensor::PackedMomentProcessorPreprocess | 128 | // This is nn::irsensor::PackedMomentProcessorPreprocess |
| 129 | enum class PackedMomentProcessorPreprocess : u8 { | 129 | enum class PackedMomentProcessorPreprocess : u8 { |
| 130 | Unkown0, | 130 | Unknown0, |
| 131 | Unkown1, | 131 | Unknown1, |
| 132 | }; | 132 | }; |
| 133 | 133 | ||
| 134 | // This is nn::irsensor::PointingStatus | 134 | // This is nn::irsensor::PointingStatus |
| 135 | enum class PointingStatus : u32 { | 135 | enum class PointingStatus : u32 { |
| 136 | Unkown0, | 136 | Unknown0, |
| 137 | Unkown1, | 137 | Unknown1, |
| 138 | }; | 138 | }; |
| 139 | 139 | ||
| 140 | struct IrsRect { | 140 | struct IrsRect { |
diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h index 0cc26a211..aa27be767 100644 --- a/src/core/hle/ipc_helpers.h +++ b/src/core/hle/ipc_helpers.h | |||
| @@ -86,13 +86,13 @@ public: | |||
| 86 | u32 num_domain_objects{}; | 86 | u32 num_domain_objects{}; |
| 87 | const bool always_move_handles{ | 87 | const bool always_move_handles{ |
| 88 | (static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0}; | 88 | (static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0}; |
| 89 | if (!ctx.Session()->IsDomain() || always_move_handles) { | 89 | if (!ctx.Session()->GetSessionRequestManager()->IsDomain() || always_move_handles) { |
| 90 | num_handles_to_move = num_objects_to_move; | 90 | num_handles_to_move = num_objects_to_move; |
| 91 | } else { | 91 | } else { |
| 92 | num_domain_objects = num_objects_to_move; | 92 | num_domain_objects = num_objects_to_move; |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | if (ctx.Session()->IsDomain()) { | 95 | if (ctx.Session()->GetSessionRequestManager()->IsDomain()) { |
| 96 | raw_data_size += | 96 | raw_data_size += |
| 97 | static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects); | 97 | static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects); |
| 98 | ctx.write_size += num_domain_objects; | 98 | ctx.write_size += num_domain_objects; |
| @@ -125,7 +125,8 @@ public: | |||
| 125 | if (!ctx.IsTipc()) { | 125 | if (!ctx.IsTipc()) { |
| 126 | AlignWithPadding(); | 126 | AlignWithPadding(); |
| 127 | 127 | ||
| 128 | if (ctx.Session()->IsDomain() && ctx.HasDomainMessageHeader()) { | 128 | if (ctx.Session()->GetSessionRequestManager()->IsDomain() && |
| 129 | ctx.HasDomainMessageHeader()) { | ||
| 129 | IPC::DomainMessageHeader domain_header{}; | 130 | IPC::DomainMessageHeader domain_header{}; |
| 130 | domain_header.num_objects = num_domain_objects; | 131 | domain_header.num_objects = num_domain_objects; |
| 131 | PushRaw(domain_header); | 132 | PushRaw(domain_header); |
| @@ -145,7 +146,7 @@ public: | |||
| 145 | 146 | ||
| 146 | template <class T> | 147 | template <class T> |
| 147 | void PushIpcInterface(std::shared_ptr<T> iface) { | 148 | void PushIpcInterface(std::shared_ptr<T> iface) { |
| 148 | if (context->Session()->IsDomain()) { | 149 | if (context->Session()->GetSessionRequestManager()->IsDomain()) { |
| 149 | context->AddDomainObject(std::move(iface)); | 150 | context->AddDomainObject(std::move(iface)); |
| 150 | } else { | 151 | } else { |
| 151 | kernel.CurrentProcess()->GetResourceLimit()->Reserve( | 152 | kernel.CurrentProcess()->GetResourceLimit()->Reserve( |
| @@ -386,7 +387,7 @@ public: | |||
| 386 | 387 | ||
| 387 | template <class T> | 388 | template <class T> |
| 388 | std::weak_ptr<T> PopIpcInterface() { | 389 | std::weak_ptr<T> PopIpcInterface() { |
| 389 | ASSERT(context->Session()->IsDomain()); | 390 | ASSERT(context->Session()->GetSessionRequestManager()->IsDomain()); |
| 390 | ASSERT(context->GetDomainMessageHeader().input_object_count > 0); | 391 | ASSERT(context->GetDomainMessageHeader().input_object_count > 0); |
| 391 | return context->GetDomainHandler<T>(Pop<u32>() - 1); | 392 | return context->GetDomainHandler<T>(Pop<u32>() - 1); |
| 392 | } | 393 | } |
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 5b3feec66..e4f43a053 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include "core/hle/kernel/k_server_session.h" | 19 | #include "core/hle/kernel/k_server_session.h" |
| 20 | #include "core/hle/kernel/k_thread.h" | 20 | #include "core/hle/kernel/k_thread.h" |
| 21 | #include "core/hle/kernel/kernel.h" | 21 | #include "core/hle/kernel/kernel.h" |
| 22 | #include "core/hle/kernel/service_thread.h" | ||
| 22 | #include "core/memory.h" | 23 | #include "core/memory.h" |
| 23 | 24 | ||
| 24 | namespace Kernel { | 25 | namespace Kernel { |
| @@ -56,16 +57,103 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co | |||
| 56 | } | 57 | } |
| 57 | } | 58 | } |
| 58 | 59 | ||
| 60 | Result SessionRequestManager::CompleteSyncRequest(KServerSession* server_session, | ||
| 61 | HLERequestContext& context) { | ||
| 62 | Result result = ResultSuccess; | ||
| 63 | |||
| 64 | // If the session has been converted to a domain, handle the domain request | ||
| 65 | if (this->HasSessionRequestHandler(context)) { | ||
| 66 | if (IsDomain() && context.HasDomainMessageHeader()) { | ||
| 67 | result = HandleDomainSyncRequest(server_session, context); | ||
| 68 | // If there is no domain header, the regular session handler is used | ||
| 69 | } else if (this->HasSessionHandler()) { | ||
| 70 | // If this manager has an associated HLE handler, forward the request to it. | ||
| 71 | result = this->SessionHandler().HandleSyncRequest(*server_session, context); | ||
| 72 | } | ||
| 73 | } else { | ||
| 74 | ASSERT_MSG(false, "Session handler is invalid, stubbing response!"); | ||
| 75 | IPC::ResponseBuilder rb(context, 2); | ||
| 76 | rb.Push(ResultSuccess); | ||
| 77 | } | ||
| 78 | |||
| 79 | if (convert_to_domain) { | ||
| 80 | ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance."); | ||
| 81 | this->ConvertToDomain(); | ||
| 82 | convert_to_domain = false; | ||
| 83 | } | ||
| 84 | |||
| 85 | return result; | ||
| 86 | } | ||
| 87 | |||
| 88 | Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_session, | ||
| 89 | HLERequestContext& context) { | ||
| 90 | if (!context.HasDomainMessageHeader()) { | ||
| 91 | return ResultSuccess; | ||
| 92 | } | ||
| 93 | |||
| 94 | // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs | ||
| 95 | context.SetSessionRequestManager(server_session->GetSessionRequestManager()); | ||
| 96 | |||
| 97 | // If there is a DomainMessageHeader, then this is CommandType "Request" | ||
| 98 | const auto& domain_message_header = context.GetDomainMessageHeader(); | ||
| 99 | const u32 object_id{domain_message_header.object_id}; | ||
| 100 | switch (domain_message_header.command) { | ||
| 101 | case IPC::DomainMessageHeader::CommandType::SendMessage: | ||
| 102 | if (object_id > this->DomainHandlerCount()) { | ||
| 103 | LOG_CRITICAL(IPC, | ||
| 104 | "object_id {} is too big! This probably means a recent service call " | ||
| 105 | "needed to return a new interface!", | ||
| 106 | object_id); | ||
| 107 | ASSERT(false); | ||
| 108 | return ResultSuccess; // Ignore error if asserts are off | ||
| 109 | } | ||
| 110 | if (auto strong_ptr = this->DomainHandler(object_id - 1).lock()) { | ||
| 111 | return strong_ptr->HandleSyncRequest(*server_session, context); | ||
| 112 | } else { | ||
| 113 | ASSERT(false); | ||
| 114 | return ResultSuccess; | ||
| 115 | } | ||
| 116 | |||
| 117 | case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: { | ||
| 118 | LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id); | ||
| 119 | |||
| 120 | this->CloseDomainHandler(object_id - 1); | ||
| 121 | |||
| 122 | IPC::ResponseBuilder rb{context, 2}; | ||
| 123 | rb.Push(ResultSuccess); | ||
| 124 | return ResultSuccess; | ||
| 125 | } | ||
| 126 | } | ||
| 127 | |||
| 128 | LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value()); | ||
| 129 | ASSERT(false); | ||
| 130 | return ResultSuccess; | ||
| 131 | } | ||
| 132 | |||
| 133 | Result SessionRequestManager::QueueSyncRequest(KSession* parent, | ||
| 134 | std::shared_ptr<HLERequestContext>&& context) { | ||
| 135 | // Ensure we have a session request handler | ||
| 136 | if (this->HasSessionRequestHandler(*context)) { | ||
| 137 | if (auto strong_ptr = this->GetServiceThread().lock()) { | ||
| 138 | strong_ptr->QueueSyncRequest(*parent, std::move(context)); | ||
| 139 | } else { | ||
| 140 | ASSERT_MSG(false, "strong_ptr is nullptr!"); | ||
| 141 | } | ||
| 142 | } else { | ||
| 143 | ASSERT_MSG(false, "handler is invalid!"); | ||
| 144 | } | ||
| 145 | |||
| 146 | return ResultSuccess; | ||
| 147 | } | ||
| 148 | |||
| 59 | void SessionRequestHandler::ClientConnected(KServerSession* session) { | 149 | void SessionRequestHandler::ClientConnected(KServerSession* session) { |
| 60 | session->ClientConnected(shared_from_this()); | 150 | session->GetSessionRequestManager()->SetSessionHandler(shared_from_this()); |
| 61 | 151 | ||
| 62 | // Ensure our server session is tracked globally. | 152 | // Ensure our server session is tracked globally. |
| 63 | kernel.RegisterServerObject(session); | 153 | kernel.RegisterServerObject(session); |
| 64 | } | 154 | } |
| 65 | 155 | ||
| 66 | void SessionRequestHandler::ClientDisconnected(KServerSession* session) { | 156 | void SessionRequestHandler::ClientDisconnected(KServerSession* session) {} |
| 67 | session->ClientDisconnected(); | ||
| 68 | } | ||
| 69 | 157 | ||
| 70 | HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_, | 158 | HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_, |
| 71 | KServerSession* server_session_, KThread* thread_) | 159 | KServerSession* server_session_, KThread* thread_) |
| @@ -126,7 +214,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32 | |||
| 126 | // Padding to align to 16 bytes | 214 | // Padding to align to 16 bytes |
| 127 | rp.AlignWithPadding(); | 215 | rp.AlignWithPadding(); |
| 128 | 216 | ||
| 129 | if (Session()->IsDomain() && | 217 | if (Session()->GetSessionRequestManager()->IsDomain() && |
| 130 | ((command_header->type == IPC::CommandType::Request || | 218 | ((command_header->type == IPC::CommandType::Request || |
| 131 | command_header->type == IPC::CommandType::RequestWithContext) || | 219 | command_header->type == IPC::CommandType::RequestWithContext) || |
| 132 | !incoming)) { | 220 | !incoming)) { |
| @@ -135,7 +223,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32 | |||
| 135 | if (incoming || domain_message_header) { | 223 | if (incoming || domain_message_header) { |
| 136 | domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>(); | 224 | domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>(); |
| 137 | } else { | 225 | } else { |
| 138 | if (Session()->IsDomain()) { | 226 | if (Session()->GetSessionRequestManager()->IsDomain()) { |
| 139 | LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!"); | 227 | LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!"); |
| 140 | } | 228 | } |
| 141 | } | 229 | } |
| @@ -228,12 +316,12 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_threa | |||
| 228 | // Write the domain objects to the command buffer, these go after the raw untranslated data. | 316 | // Write the domain objects to the command buffer, these go after the raw untranslated data. |
| 229 | // TODO(Subv): This completely ignores C buffers. | 317 | // TODO(Subv): This completely ignores C buffers. |
| 230 | 318 | ||
| 231 | if (Session()->IsDomain()) { | 319 | if (server_session->GetSessionRequestManager()->IsDomain()) { |
| 232 | current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size()); | 320 | current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size()); |
| 233 | for (const auto& object : outgoing_domain_objects) { | 321 | for (auto& object : outgoing_domain_objects) { |
| 234 | server_session->AppendDomainHandler(object); | 322 | server_session->GetSessionRequestManager()->AppendDomainHandler(std::move(object)); |
| 235 | cmd_buf[current_offset++] = | 323 | cmd_buf[current_offset++] = static_cast<u32_le>( |
| 236 | static_cast<u32_le>(server_session->NumDomainRequestHandlers()); | 324 | server_session->GetSessionRequestManager()->DomainHandlerCount()); |
| 237 | } | 325 | } |
| 238 | } | 326 | } |
| 239 | 327 | ||
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index e258e2cdf..a0522bca0 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h | |||
| @@ -121,6 +121,10 @@ public: | |||
| 121 | is_domain = true; | 121 | is_domain = true; |
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | void ConvertToDomainOnRequestEnd() { | ||
| 125 | convert_to_domain = true; | ||
| 126 | } | ||
| 127 | |||
| 124 | std::size_t DomainHandlerCount() const { | 128 | std::size_t DomainHandlerCount() const { |
| 125 | return domain_handlers.size(); | 129 | return domain_handlers.size(); |
| 126 | } | 130 | } |
| @@ -164,7 +168,12 @@ public: | |||
| 164 | 168 | ||
| 165 | bool HasSessionRequestHandler(const HLERequestContext& context) const; | 169 | bool HasSessionRequestHandler(const HLERequestContext& context) const; |
| 166 | 170 | ||
| 171 | Result HandleDomainSyncRequest(KServerSession* server_session, HLERequestContext& context); | ||
| 172 | Result CompleteSyncRequest(KServerSession* server_session, HLERequestContext& context); | ||
| 173 | Result QueueSyncRequest(KSession* parent, std::shared_ptr<HLERequestContext>&& context); | ||
| 174 | |||
| 167 | private: | 175 | private: |
| 176 | bool convert_to_domain{}; | ||
| 168 | bool is_domain{}; | 177 | bool is_domain{}; |
| 169 | SessionRequestHandlerPtr session_handler; | 178 | SessionRequestHandlerPtr session_handler; |
| 170 | std::vector<SessionRequestHandlerPtr> domain_handlers; | 179 | std::vector<SessionRequestHandlerPtr> domain_handlers; |
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 9b6b284d0..477e4e407 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include "core/hle/kernel/k_process.h" | 18 | #include "core/hle/kernel/k_process.h" |
| 19 | #include "core/hle/kernel/k_resource_limit.h" | 19 | #include "core/hle/kernel/k_resource_limit.h" |
| 20 | #include "core/hle/kernel/k_session.h" | 20 | #include "core/hle/kernel/k_session.h" |
| 21 | #include "core/hle/kernel/k_session_request.h" | ||
| 21 | #include "core/hle/kernel/k_shared_memory.h" | 22 | #include "core/hle/kernel/k_shared_memory.h" |
| 22 | #include "core/hle/kernel/k_shared_memory_info.h" | 23 | #include "core/hle/kernel/k_shared_memory_info.h" |
| 23 | #include "core/hle/kernel/k_system_control.h" | 24 | #include "core/hle/kernel/k_system_control.h" |
| @@ -34,6 +35,7 @@ namespace Kernel::Init { | |||
| 34 | HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \ | 35 | HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \ |
| 35 | HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \ | 36 | HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \ |
| 36 | HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \ | 37 | HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \ |
| 38 | HANDLER(KSessionRequest, (SLAB_COUNT(KSession) * 2), ##__VA_ARGS__) \ | ||
| 37 | HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \ | 39 | HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \ |
| 38 | HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \ | 40 | HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \ |
| 39 | HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \ | 41 | HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \ |
| @@ -94,8 +96,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd | |||
| 94 | // TODO(bunnei): Fix this once we support the kernel virtual memory layout. | 96 | // TODO(bunnei): Fix this once we support the kernel virtual memory layout. |
| 95 | 97 | ||
| 96 | if (size > 0) { | 98 | if (size > 0) { |
| 97 | void* backing_kernel_memory{ | 99 | void* backing_kernel_memory{system.DeviceMemory().GetPointer<void>( |
| 98 | system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))}; | 100 | TranslateSlabAddrToPhysical(memory_layout, start))}; |
| 99 | 101 | ||
| 100 | const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); | 102 | const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); |
| 101 | ASSERT(region != nullptr); | 103 | ASSERT(region != nullptr); |
| @@ -181,7 +183,7 @@ void InitializeKPageBufferSlabHeap(Core::System& system) { | |||
| 181 | ASSERT(slab_address != 0); | 183 | ASSERT(slab_address != 0); |
| 182 | 184 | ||
| 183 | // Initialize the slabheap. | 185 | // Initialize the slabheap. |
| 184 | KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address), | 186 | KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address), |
| 185 | slab_size); | 187 | slab_size); |
| 186 | } | 188 | } |
| 187 | 189 | ||
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp index 8892c5b7c..b4197a8d5 100644 --- a/src/core/hle/kernel/k_client_session.cpp +++ b/src/core/hle/kernel/k_client_session.cpp | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include "common/scope_exit.h" | ||
| 4 | #include "core/hle/kernel/hle_ipc.h" | 5 | #include "core/hle/kernel/hle_ipc.h" |
| 5 | #include "core/hle/kernel/k_client_session.h" | 6 | #include "core/hle/kernel/k_client_session.h" |
| 6 | #include "core/hle/kernel/k_server_session.h" | 7 | #include "core/hle/kernel/k_server_session.h" |
| @@ -10,6 +11,8 @@ | |||
| 10 | 11 | ||
| 11 | namespace Kernel { | 12 | namespace Kernel { |
| 12 | 13 | ||
| 14 | static constexpr u32 MessageBufferSize = 0x100; | ||
| 15 | |||
| 13 | KClientSession::KClientSession(KernelCore& kernel_) | 16 | KClientSession::KClientSession(KernelCore& kernel_) |
| 14 | : KAutoObjectWithSlabHeapAndContainer{kernel_} {} | 17 | : KAutoObjectWithSlabHeapAndContainer{kernel_} {} |
| 15 | KClientSession::~KClientSession() = default; | 18 | KClientSession::~KClientSession() = default; |
| @@ -22,8 +25,16 @@ void KClientSession::Destroy() { | |||
| 22 | void KClientSession::OnServerClosed() {} | 25 | void KClientSession::OnServerClosed() {} |
| 23 | 26 | ||
| 24 | Result KClientSession::SendSyncRequest() { | 27 | Result KClientSession::SendSyncRequest() { |
| 25 | // Signal the server session that new data is available | 28 | // Create a session request. |
| 26 | return parent->GetServerSession().OnRequest(); | 29 | KSessionRequest* request = KSessionRequest::Create(kernel); |
| 30 | R_UNLESS(request != nullptr, ResultOutOfResource); | ||
| 31 | SCOPE_EXIT({ request->Close(); }); | ||
| 32 | |||
| 33 | // Initialize the request. | ||
| 34 | request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize); | ||
| 35 | |||
| 36 | // Send the request. | ||
| 37 | return parent->GetServerSession().OnRequest(request); | ||
| 27 | } | 38 | } |
| 28 | 39 | ||
| 29 | } // namespace Kernel | 40 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp index da57ceb21..4b1c134d4 100644 --- a/src/core/hle/kernel/k_code_memory.cpp +++ b/src/core/hle/kernel/k_code_memory.cpp | |||
| @@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si | |||
| 34 | 34 | ||
| 35 | // Clear the memory. | 35 | // Clear the memory. |
| 36 | for (const auto& block : m_page_group.Nodes()) { | 36 | for (const auto& block : m_page_group.Nodes()) { |
| 37 | std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize()); | 37 | std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize()); |
| 38 | } | 38 | } |
| 39 | 39 | ||
| 40 | // Set remaining tracking members. | 40 | // Set remaining tracking members. |
diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h new file mode 100644 index 000000000..9076c8fa3 --- /dev/null +++ b/src/core/hle/kernel/k_dynamic_page_manager.h | |||
| @@ -0,0 +1,136 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include "common/alignment.h" | ||
| 7 | #include "common/common_types.h" | ||
| 8 | #include "core/hle/kernel/k_page_bitmap.h" | ||
| 9 | #include "core/hle/kernel/k_spin_lock.h" | ||
| 10 | #include "core/hle/kernel/memory_types.h" | ||
| 11 | #include "core/hle/kernel/svc_results.h" | ||
| 12 | |||
| 13 | namespace Kernel { | ||
| 14 | |||
| 15 | class KDynamicPageManager { | ||
| 16 | public: | ||
| 17 | class PageBuffer { | ||
| 18 | private: | ||
| 19 | u8 m_buffer[PageSize]; | ||
| 20 | }; | ||
| 21 | static_assert(sizeof(PageBuffer) == PageSize); | ||
| 22 | |||
| 23 | public: | ||
| 24 | KDynamicPageManager() = default; | ||
| 25 | |||
| 26 | template <typename T> | ||
| 27 | T* GetPointer(VAddr addr) { | ||
| 28 | return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); | ||
| 29 | } | ||
| 30 | |||
| 31 | template <typename T> | ||
| 32 | const T* GetPointer(VAddr addr) const { | ||
| 33 | return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); | ||
| 34 | } | ||
| 35 | |||
| 36 | Result Initialize(VAddr addr, size_t sz) { | ||
| 37 | // We need to have positive size. | ||
| 38 | R_UNLESS(sz > 0, ResultOutOfMemory); | ||
| 39 | m_backing_memory.resize(sz); | ||
| 40 | |||
| 41 | // Calculate management overhead. | ||
| 42 | const size_t management_size = | ||
| 43 | KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer)); | ||
| 44 | const size_t allocatable_size = sz - management_size; | ||
| 45 | |||
| 46 | // Set tracking fields. | ||
| 47 | m_address = addr; | ||
| 48 | m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer)); | ||
| 49 | m_count = allocatable_size / sizeof(PageBuffer); | ||
| 50 | R_UNLESS(m_count > 0, ResultOutOfMemory); | ||
| 51 | |||
| 52 | // Clear the management region. | ||
| 53 | u64* management_ptr = GetPointer<u64>(m_address + allocatable_size); | ||
| 54 | std::memset(management_ptr, 0, management_size); | ||
| 55 | |||
| 56 | // Initialize the bitmap. | ||
| 57 | m_page_bitmap.Initialize(management_ptr, m_count); | ||
| 58 | |||
| 59 | // Free the pages to the bitmap. | ||
| 60 | for (size_t i = 0; i < m_count; i++) { | ||
| 61 | // Ensure the freed page is all-zero. | ||
| 62 | std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); | ||
| 63 | |||
| 64 | // Set the bit for the free page. | ||
| 65 | m_page_bitmap.SetBit(i); | ||
| 66 | } | ||
| 67 | |||
| 68 | R_SUCCEED(); | ||
| 69 | } | ||
| 70 | |||
| 71 | VAddr GetAddress() const { | ||
| 72 | return m_address; | ||
| 73 | } | ||
| 74 | size_t GetSize() const { | ||
| 75 | return m_size; | ||
| 76 | } | ||
| 77 | size_t GetUsed() const { | ||
| 78 | return m_used; | ||
| 79 | } | ||
| 80 | size_t GetPeak() const { | ||
| 81 | return m_peak; | ||
| 82 | } | ||
| 83 | size_t GetCount() const { | ||
| 84 | return m_count; | ||
| 85 | } | ||
| 86 | |||
| 87 | PageBuffer* Allocate() { | ||
| 88 | // Take the lock. | ||
| 89 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | ||
| 90 | KScopedSpinLock lk(m_lock); | ||
| 91 | |||
| 92 | // Find a random free block. | ||
| 93 | s64 soffset = m_page_bitmap.FindFreeBlock(true); | ||
| 94 | if (soffset < 0) [[unlikely]] { | ||
| 95 | return nullptr; | ||
| 96 | } | ||
| 97 | |||
| 98 | const size_t offset = static_cast<size_t>(soffset); | ||
| 99 | |||
| 100 | // Update our tracking. | ||
| 101 | m_page_bitmap.ClearBit(offset); | ||
| 102 | m_peak = std::max(m_peak, (++m_used)); | ||
| 103 | |||
| 104 | return GetPointer<PageBuffer>(m_address) + offset; | ||
| 105 | } | ||
| 106 | |||
| 107 | void Free(PageBuffer* pb) { | ||
| 108 | // Ensure all pages in the heap are zero. | ||
| 109 | std::memset(pb, 0, PageSize); | ||
| 110 | |||
| 111 | // Take the lock. | ||
| 112 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | ||
| 113 | KScopedSpinLock lk(m_lock); | ||
| 114 | |||
| 115 | // Set the bit for the free page. | ||
| 116 | size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer); | ||
| 117 | m_page_bitmap.SetBit(offset); | ||
| 118 | |||
| 119 | // Decrement our used count. | ||
| 120 | --m_used; | ||
| 121 | } | ||
| 122 | |||
| 123 | private: | ||
| 124 | KSpinLock m_lock; | ||
| 125 | KPageBitmap m_page_bitmap; | ||
| 126 | size_t m_used{}; | ||
| 127 | size_t m_peak{}; | ||
| 128 | size_t m_count{}; | ||
| 129 | VAddr m_address{}; | ||
| 130 | size_t m_size{}; | ||
| 131 | |||
| 132 | // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. | ||
| 133 | std::vector<u8> m_backing_memory; | ||
| 134 | }; | ||
| 135 | |||
| 136 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_dynamic_resource_manager.h b/src/core/hle/kernel/k_dynamic_resource_manager.h new file mode 100644 index 000000000..1ce517e8e --- /dev/null +++ b/src/core/hle/kernel/k_dynamic_resource_manager.h | |||
| @@ -0,0 +1,58 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include "common/common_funcs.h" | ||
| 7 | #include "core/hle/kernel/k_dynamic_slab_heap.h" | ||
| 8 | #include "core/hle/kernel/k_memory_block.h" | ||
| 9 | |||
| 10 | namespace Kernel { | ||
| 11 | |||
| 12 | template <typename T, bool ClearNode = false> | ||
| 13 | class KDynamicResourceManager { | ||
| 14 | YUZU_NON_COPYABLE(KDynamicResourceManager); | ||
| 15 | YUZU_NON_MOVEABLE(KDynamicResourceManager); | ||
| 16 | |||
| 17 | public: | ||
| 18 | using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>; | ||
| 19 | |||
| 20 | public: | ||
| 21 | constexpr KDynamicResourceManager() = default; | ||
| 22 | |||
| 23 | constexpr size_t GetSize() const { | ||
| 24 | return m_slab_heap->GetSize(); | ||
| 25 | } | ||
| 26 | constexpr size_t GetUsed() const { | ||
| 27 | return m_slab_heap->GetUsed(); | ||
| 28 | } | ||
| 29 | constexpr size_t GetPeak() const { | ||
| 30 | return m_slab_heap->GetPeak(); | ||
| 31 | } | ||
| 32 | constexpr size_t GetCount() const { | ||
| 33 | return m_slab_heap->GetCount(); | ||
| 34 | } | ||
| 35 | |||
| 36 | void Initialize(KDynamicPageManager* page_allocator, DynamicSlabType* slab_heap) { | ||
| 37 | m_page_allocator = page_allocator; | ||
| 38 | m_slab_heap = slab_heap; | ||
| 39 | } | ||
| 40 | |||
| 41 | T* Allocate() const { | ||
| 42 | return m_slab_heap->Allocate(m_page_allocator); | ||
| 43 | } | ||
| 44 | |||
| 45 | void Free(T* t) const { | ||
| 46 | m_slab_heap->Free(t); | ||
| 47 | } | ||
| 48 | |||
| 49 | private: | ||
| 50 | KDynamicPageManager* m_page_allocator{}; | ||
| 51 | DynamicSlabType* m_slab_heap{}; | ||
| 52 | }; | ||
| 53 | |||
| 54 | class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {}; | ||
| 55 | |||
| 56 | using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType; | ||
| 57 | |||
| 58 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_dynamic_slab_heap.h b/src/core/hle/kernel/k_dynamic_slab_heap.h new file mode 100644 index 000000000..3a0ddd050 --- /dev/null +++ b/src/core/hle/kernel/k_dynamic_slab_heap.h | |||
| @@ -0,0 +1,122 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <atomic> | ||
| 7 | |||
| 8 | #include "common/common_funcs.h" | ||
| 9 | #include "core/hle/kernel/k_dynamic_page_manager.h" | ||
| 10 | #include "core/hle/kernel/k_slab_heap.h" | ||
| 11 | |||
| 12 | namespace Kernel { | ||
| 13 | |||
| 14 | template <typename T, bool ClearNode = false> | ||
| 15 | class KDynamicSlabHeap : protected impl::KSlabHeapImpl { | ||
| 16 | YUZU_NON_COPYABLE(KDynamicSlabHeap); | ||
| 17 | YUZU_NON_MOVEABLE(KDynamicSlabHeap); | ||
| 18 | |||
| 19 | public: | ||
| 20 | constexpr KDynamicSlabHeap() = default; | ||
| 21 | |||
| 22 | constexpr VAddr GetAddress() const { | ||
| 23 | return m_address; | ||
| 24 | } | ||
| 25 | constexpr size_t GetSize() const { | ||
| 26 | return m_size; | ||
| 27 | } | ||
| 28 | constexpr size_t GetUsed() const { | ||
| 29 | return m_used.load(); | ||
| 30 | } | ||
| 31 | constexpr size_t GetPeak() const { | ||
| 32 | return m_peak.load(); | ||
| 33 | } | ||
| 34 | constexpr size_t GetCount() const { | ||
| 35 | return m_count.load(); | ||
| 36 | } | ||
| 37 | |||
| 38 | constexpr bool IsInRange(VAddr addr) const { | ||
| 39 | return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1; | ||
| 40 | } | ||
| 41 | |||
| 42 | void Initialize(KDynamicPageManager* page_allocator, size_t num_objects) { | ||
| 43 | ASSERT(page_allocator != nullptr); | ||
| 44 | |||
| 45 | // Initialize members. | ||
| 46 | m_address = page_allocator->GetAddress(); | ||
| 47 | m_size = page_allocator->GetSize(); | ||
| 48 | |||
| 49 | // Initialize the base allocator. | ||
| 50 | KSlabHeapImpl::Initialize(); | ||
| 51 | |||
| 52 | // Allocate until we have the correct number of objects. | ||
| 53 | while (m_count.load() < num_objects) { | ||
| 54 | auto* allocated = reinterpret_cast<T*>(page_allocator->Allocate()); | ||
| 55 | ASSERT(allocated != nullptr); | ||
| 56 | |||
| 57 | for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) { | ||
| 58 | KSlabHeapImpl::Free(allocated + i); | ||
| 59 | } | ||
| 60 | |||
| 61 | m_count += sizeof(PageBuffer) / sizeof(T); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | T* Allocate(KDynamicPageManager* page_allocator) { | ||
| 66 | T* allocated = static_cast<T*>(KSlabHeapImpl::Allocate()); | ||
| 67 | |||
| 68 | // If we successfully allocated and we should clear the node, do so. | ||
| 69 | if constexpr (ClearNode) { | ||
| 70 | if (allocated != nullptr) [[likely]] { | ||
| 71 | reinterpret_cast<KSlabHeapImpl::Node*>(allocated)->next = nullptr; | ||
| 72 | } | ||
| 73 | } | ||
| 74 | |||
| 75 | // If we fail to allocate, try to get a new page from our next allocator. | ||
| 76 | if (allocated == nullptr) [[unlikely]] { | ||
| 77 | if (page_allocator != nullptr) { | ||
| 78 | allocated = reinterpret_cast<T*>(page_allocator->Allocate()); | ||
| 79 | if (allocated != nullptr) { | ||
| 80 | // If we succeeded in getting a page, free the rest to our slab. | ||
| 81 | for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) { | ||
| 82 | KSlabHeapImpl::Free(allocated + i); | ||
| 83 | } | ||
| 84 | m_count += sizeof(PageBuffer) / sizeof(T); | ||
| 85 | } | ||
| 86 | } | ||
| 87 | } | ||
| 88 | |||
| 89 | if (allocated != nullptr) [[likely]] { | ||
| 90 | // Construct the object. | ||
| 91 | std::construct_at(allocated); | ||
| 92 | |||
| 93 | // Update our tracking. | ||
| 94 | const size_t used = ++m_used; | ||
| 95 | size_t peak = m_peak.load(); | ||
| 96 | while (peak < used) { | ||
| 97 | if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) { | ||
| 98 | break; | ||
| 99 | } | ||
| 100 | } | ||
| 101 | } | ||
| 102 | |||
| 103 | return allocated; | ||
| 104 | } | ||
| 105 | |||
| 106 | void Free(T* t) { | ||
| 107 | KSlabHeapImpl::Free(t); | ||
| 108 | --m_used; | ||
| 109 | } | ||
| 110 | |||
| 111 | private: | ||
| 112 | using PageBuffer = KDynamicPageManager::PageBuffer; | ||
| 113 | |||
| 114 | private: | ||
| 115 | std::atomic<size_t> m_used{}; | ||
| 116 | std::atomic<size_t> m_peak{}; | ||
| 117 | std::atomic<size_t> m_count{}; | ||
| 118 | VAddr m_address{}; | ||
| 119 | size_t m_size{}; | ||
| 120 | }; | ||
| 121 | |||
| 122 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp index 1b577a5b3..4a6b60d26 100644 --- a/src/core/hle/kernel/k_interrupt_manager.cpp +++ b/src/core/hle/kernel/k_interrupt_manager.cpp | |||
| @@ -11,29 +11,34 @@ | |||
| 11 | namespace Kernel::KInterruptManager { | 11 | namespace Kernel::KInterruptManager { |
| 12 | 12 | ||
| 13 | void HandleInterrupt(KernelCore& kernel, s32 core_id) { | 13 | void HandleInterrupt(KernelCore& kernel, s32 core_id) { |
| 14 | auto* process = kernel.CurrentProcess(); | ||
| 15 | if (!process) { | ||
| 16 | return; | ||
| 17 | } | ||
| 18 | |||
| 19 | // Acknowledge the interrupt. | 14 | // Acknowledge the interrupt. |
| 20 | kernel.PhysicalCore(core_id).ClearInterrupt(); | 15 | kernel.PhysicalCore(core_id).ClearInterrupt(); |
| 21 | 16 | ||
| 22 | auto& current_thread = GetCurrentThread(kernel); | 17 | auto& current_thread = GetCurrentThread(kernel); |
| 23 | 18 | ||
| 24 | // If the user disable count is set, we may need to pin the current thread. | 19 | if (auto* process = kernel.CurrentProcess(); process) { |
| 25 | if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) { | 20 | // If the user disable count is set, we may need to pin the current thread. |
| 26 | KScopedSchedulerLock sl{kernel}; | 21 | if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) { |
| 22 | KScopedSchedulerLock sl{kernel}; | ||
| 27 | 23 | ||
| 28 | // Pin the current thread. | 24 | // Pin the current thread. |
| 29 | process->PinCurrentThread(core_id); | 25 | process->PinCurrentThread(core_id); |
| 30 | 26 | ||
| 31 | // Set the interrupt flag for the thread. | 27 | // Set the interrupt flag for the thread. |
| 32 | GetCurrentThread(kernel).SetInterruptFlag(); | 28 | GetCurrentThread(kernel).SetInterruptFlag(); |
| 29 | } | ||
| 33 | } | 30 | } |
| 34 | 31 | ||
| 35 | // Request interrupt scheduling. | 32 | // Request interrupt scheduling. |
| 36 | kernel.CurrentScheduler()->RequestScheduleOnInterrupt(); | 33 | kernel.CurrentScheduler()->RequestScheduleOnInterrupt(); |
| 37 | } | 34 | } |
| 38 | 35 | ||
| 36 | void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) { | ||
| 37 | for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) { | ||
| 38 | if (core_mask & (1ULL << core_id)) { | ||
| 39 | kernel.PhysicalCore(core_id).Interrupt(); | ||
| 40 | } | ||
| 41 | } | ||
| 42 | } | ||
| 43 | |||
| 39 | } // namespace Kernel::KInterruptManager | 44 | } // namespace Kernel::KInterruptManager |
diff --git a/src/core/hle/kernel/k_interrupt_manager.h b/src/core/hle/kernel/k_interrupt_manager.h index f103dfe3f..803dc9211 100644 --- a/src/core/hle/kernel/k_interrupt_manager.h +++ b/src/core/hle/kernel/k_interrupt_manager.h | |||
| @@ -11,6 +11,8 @@ class KernelCore; | |||
| 11 | 11 | ||
| 12 | namespace KInterruptManager { | 12 | namespace KInterruptManager { |
| 13 | void HandleInterrupt(KernelCore& kernel, s32 core_id); | 13 | void HandleInterrupt(KernelCore& kernel, s32 core_id); |
| 14 | } | 14 | void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask); |
| 15 | |||
| 16 | } // namespace KInterruptManager | ||
| 15 | 17 | ||
| 16 | } // namespace Kernel | 18 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_linked_list.h b/src/core/hle/kernel/k_linked_list.h index 78859ced3..29ebd16b7 100644 --- a/src/core/hle/kernel/k_linked_list.h +++ b/src/core/hle/kernel/k_linked_list.h | |||
| @@ -16,6 +16,7 @@ class KLinkedListNode : public boost::intrusive::list_base_hook<>, | |||
| 16 | public KSlabAllocated<KLinkedListNode> { | 16 | public KSlabAllocated<KLinkedListNode> { |
| 17 | 17 | ||
| 18 | public: | 18 | public: |
| 19 | explicit KLinkedListNode(KernelCore&) {} | ||
| 19 | KLinkedListNode() = default; | 20 | KLinkedListNode() = default; |
| 20 | 21 | ||
| 21 | void Initialize(void* it) { | 22 | void Initialize(void* it) { |
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index 18df1f836..9444f6bd2 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include "common/alignment.h" | 6 | #include "common/alignment.h" |
| 7 | #include "common/assert.h" | 7 | #include "common/assert.h" |
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "common/intrusive_red_black_tree.h" | ||
| 9 | #include "core/hle/kernel/memory_types.h" | 10 | #include "core/hle/kernel/memory_types.h" |
| 10 | #include "core/hle/kernel/svc_types.h" | 11 | #include "core/hle/kernel/svc_types.h" |
| 11 | 12 | ||
| @@ -168,9 +169,8 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per | |||
| 168 | 169 | ||
| 169 | enum class KMemoryAttribute : u8 { | 170 | enum class KMemoryAttribute : u8 { |
| 170 | None = 0x00, | 171 | None = 0x00, |
| 171 | Mask = 0x7F, | 172 | All = 0xFF, |
| 172 | All = Mask, | 173 | UserMask = All, |
| 173 | DontCareMask = 0x80, | ||
| 174 | 174 | ||
| 175 | Locked = static_cast<u8>(Svc::MemoryAttribute::Locked), | 175 | Locked = static_cast<u8>(Svc::MemoryAttribute::Locked), |
| 176 | IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked), | 176 | IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked), |
| @@ -178,76 +178,112 @@ enum class KMemoryAttribute : u8 { | |||
| 178 | Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached), | 178 | Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached), |
| 179 | 179 | ||
| 180 | SetMask = Uncached, | 180 | SetMask = Uncached, |
| 181 | |||
| 182 | IpcAndDeviceMapped = IpcLocked | DeviceShared, | ||
| 183 | LockedAndIpcLocked = Locked | IpcLocked, | ||
| 184 | DeviceSharedAndUncached = DeviceShared | Uncached | ||
| 185 | }; | 181 | }; |
| 186 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute); | 182 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute); |
| 187 | 183 | ||
| 188 | static_assert((static_cast<u8>(KMemoryAttribute::Mask) & | 184 | enum class KMemoryBlockDisableMergeAttribute : u8 { |
| 189 | static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0); | 185 | None = 0, |
| 186 | Normal = (1u << 0), | ||
| 187 | DeviceLeft = (1u << 1), | ||
| 188 | IpcLeft = (1u << 2), | ||
| 189 | Locked = (1u << 3), | ||
| 190 | DeviceRight = (1u << 4), | ||
| 191 | |||
| 192 | AllLeft = Normal | DeviceLeft | IpcLeft | Locked, | ||
| 193 | AllRight = DeviceRight, | ||
| 194 | }; | ||
| 195 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute); | ||
| 190 | 196 | ||
| 191 | struct KMemoryInfo { | 197 | struct KMemoryInfo { |
| 192 | VAddr addr{}; | 198 | uintptr_t m_address; |
| 193 | std::size_t size{}; | 199 | size_t m_size; |
| 194 | KMemoryState state{}; | 200 | KMemoryState m_state; |
| 195 | KMemoryPermission perm{}; | 201 | u16 m_device_disable_merge_left_count; |
| 196 | KMemoryAttribute attribute{}; | 202 | u16 m_device_disable_merge_right_count; |
| 197 | KMemoryPermission original_perm{}; | 203 | u16 m_ipc_lock_count; |
| 198 | u16 ipc_lock_count{}; | 204 | u16 m_device_use_count; |
| 199 | u16 device_use_count{}; | 205 | u16 m_ipc_disable_merge_count; |
| 206 | KMemoryPermission m_permission; | ||
| 207 | KMemoryAttribute m_attribute; | ||
| 208 | KMemoryPermission m_original_permission; | ||
| 209 | KMemoryBlockDisableMergeAttribute m_disable_merge_attribute; | ||
| 200 | 210 | ||
| 201 | constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { | 211 | constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { |
| 202 | return { | 212 | return { |
| 203 | addr, | 213 | .addr = m_address, |
| 204 | size, | 214 | .size = m_size, |
| 205 | static_cast<Svc::MemoryState>(state & KMemoryState::Mask), | 215 | .state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask), |
| 206 | static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask), | 216 | .attr = static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask), |
| 207 | static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask), | 217 | .perm = static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask), |
| 208 | ipc_lock_count, | 218 | .ipc_refcount = m_ipc_lock_count, |
| 209 | device_use_count, | 219 | .device_refcount = m_device_use_count, |
| 220 | .padding = {}, | ||
| 210 | }; | 221 | }; |
| 211 | } | 222 | } |
| 212 | 223 | ||
| 213 | constexpr VAddr GetAddress() const { | 224 | constexpr uintptr_t GetAddress() const { |
| 214 | return addr; | 225 | return m_address; |
| 226 | } | ||
| 227 | |||
| 228 | constexpr size_t GetSize() const { | ||
| 229 | return m_size; | ||
| 215 | } | 230 | } |
| 216 | constexpr std::size_t GetSize() const { | 231 | |
| 217 | return size; | 232 | constexpr size_t GetNumPages() const { |
| 233 | return this->GetSize() / PageSize; | ||
| 218 | } | 234 | } |
| 219 | constexpr std::size_t GetNumPages() const { | 235 | |
| 220 | return GetSize() / PageSize; | 236 | constexpr uintptr_t GetEndAddress() const { |
| 237 | return this->GetAddress() + this->GetSize(); | ||
| 221 | } | 238 | } |
| 222 | constexpr VAddr GetEndAddress() const { | 239 | |
| 223 | return GetAddress() + GetSize(); | 240 | constexpr uintptr_t GetLastAddress() const { |
| 241 | return this->GetEndAddress() - 1; | ||
| 224 | } | 242 | } |
| 225 | constexpr VAddr GetLastAddress() const { | 243 | |
| 226 | return GetEndAddress() - 1; | 244 | constexpr u16 GetIpcLockCount() const { |
| 245 | return m_ipc_lock_count; | ||
| 227 | } | 246 | } |
| 247 | |||
| 248 | constexpr u16 GetIpcDisableMergeCount() const { | ||
| 249 | return m_ipc_disable_merge_count; | ||
| 250 | } | ||
| 251 | |||
| 228 | constexpr KMemoryState GetState() const { | 252 | constexpr KMemoryState GetState() const { |
| 229 | return state; | 253 | return m_state; |
| 254 | } | ||
| 255 | |||
| 256 | constexpr KMemoryPermission GetPermission() const { | ||
| 257 | return m_permission; | ||
| 230 | } | 258 | } |
| 259 | |||
| 260 | constexpr KMemoryPermission GetOriginalPermission() const { | ||
| 261 | return m_original_permission; | ||
| 262 | } | ||
| 263 | |||
| 231 | constexpr KMemoryAttribute GetAttribute() const { | 264 | constexpr KMemoryAttribute GetAttribute() const { |
| 232 | return attribute; | 265 | return m_attribute; |
| 233 | } | 266 | } |
| 234 | constexpr KMemoryPermission GetPermission() const { | 267 | |
| 235 | return perm; | 268 | constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const { |
| 269 | return m_disable_merge_attribute; | ||
| 236 | } | 270 | } |
| 237 | }; | 271 | }; |
| 238 | 272 | ||
| 239 | class KMemoryBlock final { | 273 | class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> { |
| 240 | friend class KMemoryBlockManager; | ||
| 241 | |||
| 242 | private: | 274 | private: |
| 243 | VAddr addr{}; | 275 | u16 m_device_disable_merge_left_count; |
| 244 | std::size_t num_pages{}; | 276 | u16 m_device_disable_merge_right_count; |
| 245 | KMemoryState state{KMemoryState::None}; | 277 | VAddr m_address; |
| 246 | u16 ipc_lock_count{}; | 278 | size_t m_num_pages; |
| 247 | u16 device_use_count{}; | 279 | KMemoryState m_memory_state; |
| 248 | KMemoryPermission perm{KMemoryPermission::None}; | 280 | u16 m_ipc_lock_count; |
| 249 | KMemoryPermission original_perm{KMemoryPermission::None}; | 281 | u16 m_device_use_count; |
| 250 | KMemoryAttribute attribute{KMemoryAttribute::None}; | 282 | u16 m_ipc_disable_merge_count; |
| 283 | KMemoryPermission m_permission; | ||
| 284 | KMemoryPermission m_original_permission; | ||
| 285 | KMemoryAttribute m_attribute; | ||
| 286 | KMemoryBlockDisableMergeAttribute m_disable_merge_attribute; | ||
| 251 | 287 | ||
| 252 | public: | 288 | public: |
| 253 | static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) { | 289 | static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) { |
| @@ -261,113 +297,349 @@ public: | |||
| 261 | } | 297 | } |
| 262 | 298 | ||
| 263 | public: | 299 | public: |
| 264 | constexpr KMemoryBlock() = default; | ||
| 265 | constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_, | ||
| 266 | KMemoryPermission perm_, KMemoryAttribute attribute_) | ||
| 267 | : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {} | ||
| 268 | |||
| 269 | constexpr VAddr GetAddress() const { | 300 | constexpr VAddr GetAddress() const { |
| 270 | return addr; | 301 | return m_address; |
| 271 | } | 302 | } |
| 272 | 303 | ||
| 273 | constexpr std::size_t GetNumPages() const { | 304 | constexpr size_t GetNumPages() const { |
| 274 | return num_pages; | 305 | return m_num_pages; |
| 275 | } | 306 | } |
| 276 | 307 | ||
| 277 | constexpr std::size_t GetSize() const { | 308 | constexpr size_t GetSize() const { |
| 278 | return GetNumPages() * PageSize; | 309 | return this->GetNumPages() * PageSize; |
| 279 | } | 310 | } |
| 280 | 311 | ||
| 281 | constexpr VAddr GetEndAddress() const { | 312 | constexpr VAddr GetEndAddress() const { |
| 282 | return GetAddress() + GetSize(); | 313 | return this->GetAddress() + this->GetSize(); |
| 283 | } | 314 | } |
| 284 | 315 | ||
| 285 | constexpr VAddr GetLastAddress() const { | 316 | constexpr VAddr GetLastAddress() const { |
| 286 | return GetEndAddress() - 1; | 317 | return this->GetEndAddress() - 1; |
| 318 | } | ||
| 319 | |||
| 320 | constexpr u16 GetIpcLockCount() const { | ||
| 321 | return m_ipc_lock_count; | ||
| 322 | } | ||
| 323 | |||
| 324 | constexpr u16 GetIpcDisableMergeCount() const { | ||
| 325 | return m_ipc_disable_merge_count; | ||
| 326 | } | ||
| 327 | |||
| 328 | constexpr KMemoryPermission GetPermission() const { | ||
| 329 | return m_permission; | ||
| 330 | } | ||
| 331 | |||
| 332 | constexpr KMemoryPermission GetOriginalPermission() const { | ||
| 333 | return m_original_permission; | ||
| 334 | } | ||
| 335 | |||
| 336 | constexpr KMemoryAttribute GetAttribute() const { | ||
| 337 | return m_attribute; | ||
| 287 | } | 338 | } |
| 288 | 339 | ||
| 289 | constexpr KMemoryInfo GetMemoryInfo() const { | 340 | constexpr KMemoryInfo GetMemoryInfo() const { |
| 290 | return { | 341 | return { |
| 291 | GetAddress(), GetSize(), state, perm, | 342 | .m_address = this->GetAddress(), |
| 292 | attribute, original_perm, ipc_lock_count, device_use_count, | 343 | .m_size = this->GetSize(), |
| 344 | .m_state = m_memory_state, | ||
| 345 | .m_device_disable_merge_left_count = m_device_disable_merge_left_count, | ||
| 346 | .m_device_disable_merge_right_count = m_device_disable_merge_right_count, | ||
| 347 | .m_ipc_lock_count = m_ipc_lock_count, | ||
| 348 | .m_device_use_count = m_device_use_count, | ||
| 349 | .m_ipc_disable_merge_count = m_ipc_disable_merge_count, | ||
| 350 | .m_permission = m_permission, | ||
| 351 | .m_attribute = m_attribute, | ||
| 352 | .m_original_permission = m_original_permission, | ||
| 353 | .m_disable_merge_attribute = m_disable_merge_attribute, | ||
| 293 | }; | 354 | }; |
| 294 | } | 355 | } |
| 295 | 356 | ||
| 296 | void ShareToDevice(KMemoryPermission /*new_perm*/) { | 357 | public: |
| 297 | ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || | 358 | explicit KMemoryBlock() = default; |
| 298 | device_use_count == 0); | 359 | |
| 299 | attribute |= KMemoryAttribute::DeviceShared; | 360 | constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p, |
| 300 | const u16 new_use_count{++device_use_count}; | 361 | KMemoryAttribute attr) |
| 301 | ASSERT(new_use_count > 0); | 362 | : Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(), |
| 363 | m_device_disable_merge_left_count(), m_device_disable_merge_right_count(), | ||
| 364 | m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0), | ||
| 365 | m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p), | ||
| 366 | m_original_permission(KMemoryPermission::None), m_attribute(attr), | ||
| 367 | m_disable_merge_attribute() {} | ||
| 368 | |||
| 369 | constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p, | ||
| 370 | KMemoryAttribute attr) { | ||
| 371 | m_device_disable_merge_left_count = 0; | ||
| 372 | m_device_disable_merge_right_count = 0; | ||
| 373 | m_address = addr; | ||
| 374 | m_num_pages = np; | ||
| 375 | m_memory_state = ms; | ||
| 376 | m_ipc_lock_count = 0; | ||
| 377 | m_device_use_count = 0; | ||
| 378 | m_permission = p; | ||
| 379 | m_original_permission = KMemoryPermission::None; | ||
| 380 | m_attribute = attr; | ||
| 381 | m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None; | ||
| 382 | } | ||
| 383 | |||
| 384 | constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const { | ||
| 385 | constexpr auto AttributeIgnoreMask = | ||
| 386 | KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; | ||
| 387 | return m_memory_state == s && m_permission == p && | ||
| 388 | (m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); | ||
| 389 | } | ||
| 390 | |||
| 391 | constexpr bool HasSameProperties(const KMemoryBlock& rhs) const { | ||
| 392 | return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission && | ||
| 393 | m_original_permission == rhs.m_original_permission && | ||
| 394 | m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count && | ||
| 395 | m_device_use_count == rhs.m_device_use_count; | ||
| 396 | } | ||
| 397 | |||
| 398 | constexpr bool CanMergeWith(const KMemoryBlock& rhs) const { | ||
| 399 | return this->HasSameProperties(rhs) && | ||
| 400 | (m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) == | ||
| 401 | KMemoryBlockDisableMergeAttribute::None && | ||
| 402 | (rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) == | ||
| 403 | KMemoryBlockDisableMergeAttribute::None; | ||
| 302 | } | 404 | } |
| 303 | 405 | ||
| 304 | void UnshareToDevice(KMemoryPermission /*new_perm*/) { | 406 | constexpr bool Contains(VAddr addr) const { |
| 305 | ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); | 407 | return this->GetAddress() <= addr && addr <= this->GetEndAddress(); |
| 306 | const u16 prev_use_count{device_use_count--}; | 408 | } |
| 307 | ASSERT(prev_use_count > 0); | 409 | |
| 308 | if (prev_use_count == 1) { | 410 | constexpr void Add(const KMemoryBlock& added_block) { |
| 309 | attribute &= ~KMemoryAttribute::DeviceShared; | 411 | ASSERT(added_block.GetNumPages() > 0); |
| 412 | ASSERT(this->GetAddress() + added_block.GetSize() - 1 < | ||
| 413 | this->GetEndAddress() + added_block.GetSize() - 1); | ||
| 414 | |||
| 415 | m_num_pages += added_block.GetNumPages(); | ||
| 416 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | ||
| 417 | m_disable_merge_attribute | added_block.m_disable_merge_attribute); | ||
| 418 | m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count; | ||
| 419 | } | ||
| 420 | |||
| 421 | constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a, | ||
| 422 | bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) { | ||
| 423 | ASSERT(m_original_permission == KMemoryPermission::None); | ||
| 424 | ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None); | ||
| 425 | |||
| 426 | m_memory_state = s; | ||
| 427 | m_permission = p; | ||
| 428 | m_attribute = static_cast<KMemoryAttribute>( | ||
| 429 | a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); | ||
| 430 | |||
| 431 | if (set_disable_merge_attr && set_mask != 0) { | ||
| 432 | m_disable_merge_attribute = m_disable_merge_attribute | | ||
| 433 | static_cast<KMemoryBlockDisableMergeAttribute>(set_mask); | ||
| 434 | } | ||
| 435 | if (clear_mask != 0) { | ||
| 436 | m_disable_merge_attribute = m_disable_merge_attribute & | ||
| 437 | static_cast<KMemoryBlockDisableMergeAttribute>(~clear_mask); | ||
| 310 | } | 438 | } |
| 311 | } | 439 | } |
| 312 | 440 | ||
| 313 | private: | 441 | constexpr void Split(KMemoryBlock* block, VAddr addr) { |
| 314 | constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const { | 442 | ASSERT(this->GetAddress() < addr); |
| 315 | constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask | | 443 | ASSERT(this->Contains(addr)); |
| 316 | KMemoryAttribute::IpcLocked | | 444 | ASSERT(Common::IsAligned(addr, PageSize)); |
| 317 | KMemoryAttribute::DeviceShared}; | 445 | |
| 318 | return state == s && perm == p && | 446 | block->m_address = m_address; |
| 319 | (attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); | 447 | block->m_num_pages = (addr - this->GetAddress()) / PageSize; |
| 448 | block->m_memory_state = m_memory_state; | ||
| 449 | block->m_ipc_lock_count = m_ipc_lock_count; | ||
| 450 | block->m_device_use_count = m_device_use_count; | ||
| 451 | block->m_permission = m_permission; | ||
| 452 | block->m_original_permission = m_original_permission; | ||
| 453 | block->m_attribute = m_attribute; | ||
| 454 | block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | ||
| 455 | m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft); | ||
| 456 | block->m_ipc_disable_merge_count = m_ipc_disable_merge_count; | ||
| 457 | block->m_device_disable_merge_left_count = m_device_disable_merge_left_count; | ||
| 458 | block->m_device_disable_merge_right_count = 0; | ||
| 459 | |||
| 460 | m_address = addr; | ||
| 461 | m_num_pages -= block->m_num_pages; | ||
| 462 | |||
| 463 | m_ipc_disable_merge_count = 0; | ||
| 464 | m_device_disable_merge_left_count = 0; | ||
| 465 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | ||
| 466 | m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight); | ||
| 320 | } | 467 | } |
| 321 | 468 | ||
| 322 | constexpr bool HasSameProperties(const KMemoryBlock& rhs) const { | 469 | constexpr void UpdateDeviceDisableMergeStateForShareLeft( |
| 323 | return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm && | 470 | [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { |
| 324 | attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count && | 471 | if (left) { |
| 325 | device_use_count == rhs.device_use_count; | 472 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( |
| 473 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft); | ||
| 474 | const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count; | ||
| 475 | ASSERT(new_device_disable_merge_left_count > 0); | ||
| 476 | } | ||
| 326 | } | 477 | } |
| 327 | 478 | ||
| 328 | constexpr bool Contains(VAddr start) const { | 479 | constexpr void UpdateDeviceDisableMergeStateForShareRight( |
| 329 | return GetAddress() <= start && start <= GetEndAddress(); | 480 | [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { |
| 481 | if (right) { | ||
| 482 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | ||
| 483 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight); | ||
| 484 | const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count; | ||
| 485 | ASSERT(new_device_disable_merge_right_count > 0); | ||
| 486 | } | ||
| 487 | } | ||
| 488 | |||
| 489 | constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left, | ||
| 490 | bool right) { | ||
| 491 | this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right); | ||
| 492 | this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right); | ||
| 330 | } | 493 | } |
| 331 | 494 | ||
| 332 | constexpr void Add(std::size_t count) { | 495 | constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, |
| 333 | ASSERT(count > 0); | 496 | bool right) { |
| 334 | ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1); | 497 | // We must either be shared or have a zero lock count. |
| 498 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || | ||
| 499 | m_device_use_count == 0); | ||
| 335 | 500 | ||
| 336 | num_pages += count; | 501 | // Share. |
| 502 | const u16 new_count = ++m_device_use_count; | ||
| 503 | ASSERT(new_count > 0); | ||
| 504 | |||
| 505 | m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::DeviceShared); | ||
| 506 | |||
| 507 | this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right); | ||
| 337 | } | 508 | } |
| 338 | 509 | ||
| 339 | constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm, | 510 | constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( |
| 340 | KMemoryAttribute new_attribute) { | 511 | [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { |
| 341 | ASSERT(original_perm == KMemoryPermission::None); | ||
| 342 | ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None); | ||
| 343 | 512 | ||
| 344 | state = new_state; | 513 | if (left) { |
| 345 | perm = new_perm; | 514 | if (!m_device_disable_merge_left_count) { |
| 515 | return; | ||
| 516 | } | ||
| 517 | --m_device_disable_merge_left_count; | ||
| 518 | } | ||
| 346 | 519 | ||
| 347 | attribute = static_cast<KMemoryAttribute>( | 520 | m_device_disable_merge_left_count = |
| 348 | new_attribute | | 521 | std::min(m_device_disable_merge_left_count, m_device_use_count); |
| 349 | (attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); | 522 | |
| 523 | if (m_device_disable_merge_left_count == 0) { | ||
| 524 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | ||
| 525 | m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft); | ||
| 526 | } | ||
| 350 | } | 527 | } |
| 351 | 528 | ||
| 352 | constexpr KMemoryBlock Split(VAddr split_addr) { | 529 | constexpr void UpdateDeviceDisableMergeStateForUnshareRight( |
| 353 | ASSERT(GetAddress() < split_addr); | 530 | [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { |
| 354 | ASSERT(Contains(split_addr)); | 531 | if (right) { |
| 355 | ASSERT(Common::IsAligned(split_addr, PageSize)); | 532 | const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--; |
| 533 | ASSERT(old_device_disable_merge_right_count > 0); | ||
| 534 | if (old_device_disable_merge_right_count == 1) { | ||
| 535 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | ||
| 536 | m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight); | ||
| 537 | } | ||
| 538 | } | ||
| 539 | } | ||
| 356 | 540 | ||
| 357 | KMemoryBlock block; | 541 | constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left, |
| 358 | block.addr = addr; | 542 | bool right) { |
| 359 | block.num_pages = (split_addr - GetAddress()) / PageSize; | 543 | this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right); |
| 360 | block.state = state; | 544 | this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); |
| 361 | block.ipc_lock_count = ipc_lock_count; | 545 | } |
| 362 | block.device_use_count = device_use_count; | ||
| 363 | block.perm = perm; | ||
| 364 | block.original_perm = original_perm; | ||
| 365 | block.attribute = attribute; | ||
| 366 | 546 | ||
| 367 | addr = split_addr; | 547 | constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, |
| 368 | num_pages -= block.num_pages; | 548 | bool right) { |
| 549 | // We must be shared. | ||
| 550 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); | ||
| 551 | |||
| 552 | // Unhare. | ||
| 553 | const u16 old_count = m_device_use_count--; | ||
| 554 | ASSERT(old_count > 0); | ||
| 555 | |||
| 556 | if (old_count == 1) { | ||
| 557 | m_attribute = | ||
| 558 | static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared); | ||
| 559 | } | ||
| 560 | |||
| 561 | this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right); | ||
| 562 | } | ||
| 563 | |||
| 564 | constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, | ||
| 565 | bool right) { | ||
| 566 | |||
| 567 | // We must be shared. | ||
| 568 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); | ||
| 569 | |||
| 570 | // Unhare. | ||
| 571 | const u16 old_count = m_device_use_count--; | ||
| 572 | ASSERT(old_count > 0); | ||
| 573 | |||
| 574 | if (old_count == 1) { | ||
| 575 | m_attribute = | ||
| 576 | static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared); | ||
| 577 | } | ||
| 578 | |||
| 579 | this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); | ||
| 580 | } | ||
| 581 | |||
| 582 | constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { | ||
| 583 | // We must either be locked or have a zero lock count. | ||
| 584 | ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked || | ||
| 585 | m_ipc_lock_count == 0); | ||
| 586 | |||
| 587 | // Lock. | ||
| 588 | const u16 new_lock_count = ++m_ipc_lock_count; | ||
| 589 | ASSERT(new_lock_count > 0); | ||
| 590 | |||
| 591 | // If this is our first lock, update our permissions. | ||
| 592 | if (new_lock_count == 1) { | ||
| 593 | ASSERT(m_original_permission == KMemoryPermission::None); | ||
| 594 | ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) == | ||
| 595 | (m_permission | KMemoryPermission::NotMapped)); | ||
| 596 | ASSERT((m_permission & KMemoryPermission::UserExecute) != | ||
| 597 | KMemoryPermission::UserExecute || | ||
| 598 | (new_perm == KMemoryPermission::UserRead)); | ||
| 599 | m_original_permission = m_permission; | ||
| 600 | m_permission = static_cast<KMemoryPermission>( | ||
| 601 | (new_perm & KMemoryPermission::IpcLockChangeMask) | | ||
| 602 | (m_original_permission & ~KMemoryPermission::IpcLockChangeMask)); | ||
| 603 | } | ||
| 604 | m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::IpcLocked); | ||
| 605 | |||
| 606 | if (left) { | ||
| 607 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | ||
| 608 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft); | ||
| 609 | const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count; | ||
| 610 | ASSERT(new_ipc_disable_merge_count > 0); | ||
| 611 | } | ||
| 612 | } | ||
| 613 | |||
| 614 | constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, | ||
| 615 | [[maybe_unused]] bool right) { | ||
| 616 | // We must be locked. | ||
| 617 | ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked); | ||
| 618 | |||
| 619 | // Unlock. | ||
| 620 | const u16 old_lock_count = m_ipc_lock_count--; | ||
| 621 | ASSERT(old_lock_count > 0); | ||
| 622 | |||
| 623 | // If this is our last unlock, update our permissions. | ||
| 624 | if (old_lock_count == 1) { | ||
| 625 | ASSERT(m_original_permission != KMemoryPermission::None); | ||
| 626 | m_permission = m_original_permission; | ||
| 627 | m_original_permission = KMemoryPermission::None; | ||
| 628 | m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::IpcLocked); | ||
| 629 | } | ||
| 630 | |||
| 631 | if (left) { | ||
| 632 | const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--; | ||
| 633 | ASSERT(old_ipc_disable_merge_count > 0); | ||
| 634 | if (old_ipc_disable_merge_count == 1) { | ||
| 635 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | ||
| 636 | m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft); | ||
| 637 | } | ||
| 638 | } | ||
| 639 | } | ||
| 369 | 640 | ||
| 370 | return block; | 641 | constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const { |
| 642 | return m_disable_merge_attribute; | ||
| 371 | } | 643 | } |
| 372 | }; | 644 | }; |
| 373 | static_assert(std::is_trivially_destructible<KMemoryBlock>::value); | 645 | static_assert(std::is_trivially_destructible<KMemoryBlock>::value); |
diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp index 3ddb9984f..cf4c1e371 100644 --- a/src/core/hle/kernel/k_memory_block_manager.cpp +++ b/src/core/hle/kernel/k_memory_block_manager.cpp | |||
| @@ -2,221 +2,336 @@ | |||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include "core/hle/kernel/k_memory_block_manager.h" | 4 | #include "core/hle/kernel/k_memory_block_manager.h" |
| 5 | #include "core/hle/kernel/memory_types.h" | ||
| 6 | 5 | ||
| 7 | namespace Kernel { | 6 | namespace Kernel { |
| 8 | 7 | ||
| 9 | KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_) | 8 | KMemoryBlockManager::KMemoryBlockManager() = default; |
| 10 | : start_addr{start_addr_}, end_addr{end_addr_} { | ||
| 11 | const u64 num_pages{(end_addr - start_addr) / PageSize}; | ||
| 12 | memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free, | ||
| 13 | KMemoryPermission::None, KMemoryAttribute::None); | ||
| 14 | } | ||
| 15 | 9 | ||
| 16 | KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) { | 10 | Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) { |
| 17 | auto node{memory_block_tree.begin()}; | 11 | // Allocate a block to encapsulate the address space, insert it into the tree. |
| 18 | while (node != end()) { | 12 | KMemoryBlock* start_block = slab_manager->Allocate(); |
| 19 | const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()}; | 13 | R_UNLESS(start_block != nullptr, ResultOutOfResource); |
| 20 | if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) { | 14 | |
| 21 | return node; | 15 | // Set our start and end. |
| 22 | } | 16 | m_start_address = st; |
| 23 | node = std::next(node); | 17 | m_end_address = nd; |
| 24 | } | 18 | ASSERT(Common::IsAligned(m_start_address, PageSize)); |
| 25 | return end(); | 19 | ASSERT(Common::IsAligned(m_end_address, PageSize)); |
| 20 | |||
| 21 | // Initialize and insert the block. | ||
| 22 | start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize, | ||
| 23 | KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None); | ||
| 24 | m_memory_block_tree.insert(*start_block); | ||
| 25 | |||
| 26 | R_SUCCEED(); | ||
| 26 | } | 27 | } |
| 27 | 28 | ||
| 28 | VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages, | 29 | void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager, |
| 29 | std::size_t num_pages, std::size_t align, | 30 | HostUnmapCallback&& host_unmap_callback) { |
| 30 | std::size_t offset, std::size_t guard_pages) { | 31 | // Erase every block until we have none left. |
| 31 | if (num_pages == 0) { | 32 | auto it = m_memory_block_tree.begin(); |
| 32 | return {}; | 33 | while (it != m_memory_block_tree.end()) { |
| 34 | KMemoryBlock* block = std::addressof(*it); | ||
| 35 | it = m_memory_block_tree.erase(it); | ||
| 36 | slab_manager->Free(block); | ||
| 37 | host_unmap_callback(block->GetAddress(), block->GetSize()); | ||
| 33 | } | 38 | } |
| 34 | 39 | ||
| 35 | const VAddr region_end{region_start + region_num_pages * PageSize}; | 40 | ASSERT(m_memory_block_tree.empty()); |
| 36 | const VAddr region_last{region_end - 1}; | 41 | } |
| 37 | for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) { | ||
| 38 | const auto info{it->GetMemoryInfo()}; | ||
| 39 | if (region_last < info.GetAddress()) { | ||
| 40 | break; | ||
| 41 | } | ||
| 42 | 42 | ||
| 43 | if (info.state != KMemoryState::Free) { | 43 | VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages, |
| 44 | continue; | 44 | size_t num_pages, size_t alignment, size_t offset, |
| 45 | } | 45 | size_t guard_pages) const { |
| 46 | if (num_pages > 0) { | ||
| 47 | const VAddr region_end = region_start + region_num_pages * PageSize; | ||
| 48 | const VAddr region_last = region_end - 1; | ||
| 49 | for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend(); | ||
| 50 | it++) { | ||
| 51 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 52 | if (region_last < info.GetAddress()) { | ||
| 53 | break; | ||
| 54 | } | ||
| 55 | if (info.m_state != KMemoryState::Free) { | ||
| 56 | continue; | ||
| 57 | } | ||
| 46 | 58 | ||
| 47 | VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()}; | 59 | VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress(); |
| 48 | area += guard_pages * PageSize; | 60 | area += guard_pages * PageSize; |
| 49 | 61 | ||
| 50 | const VAddr offset_area{Common::AlignDown(area, align) + offset}; | 62 | const VAddr offset_area = Common::AlignDown(area, alignment) + offset; |
| 51 | area = (area <= offset_area) ? offset_area : offset_area + align; | 63 | area = (area <= offset_area) ? offset_area : offset_area + alignment; |
| 52 | 64 | ||
| 53 | const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize}; | 65 | const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize; |
| 54 | const VAddr area_last{area_end - 1}; | 66 | const VAddr area_last = area_end - 1; |
| 55 | 67 | ||
| 56 | if (info.GetAddress() <= area && area < area_last && area_last <= region_last && | 68 | if (info.GetAddress() <= area && area < area_last && area_last <= region_last && |
| 57 | area_last <= info.GetLastAddress()) { | 69 | area_last <= info.GetLastAddress()) { |
| 58 | return area; | 70 | return area; |
| 71 | } | ||
| 59 | } | 72 | } |
| 60 | } | 73 | } |
| 61 | 74 | ||
| 62 | return {}; | 75 | return {}; |
| 63 | } | 76 | } |
| 64 | 77 | ||
| 65 | void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, | 78 | void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, |
| 66 | KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, | 79 | VAddr address, size_t num_pages) { |
| 67 | KMemoryState state, KMemoryPermission perm, | 80 | // Find the iterator now that we've updated. |
| 68 | KMemoryAttribute attribute) { | 81 | iterator it = this->FindIterator(address); |
| 69 | const VAddr update_end_addr{addr + num_pages * PageSize}; | 82 | if (address != m_start_address) { |
| 70 | iterator node{memory_block_tree.begin()}; | 83 | it--; |
| 84 | } | ||
| 71 | 85 | ||
| 72 | prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped; | 86 | // Coalesce blocks that we can. |
| 87 | while (true) { | ||
| 88 | iterator prev = it++; | ||
| 89 | if (it == m_memory_block_tree.end()) { | ||
| 90 | break; | ||
| 91 | } | ||
| 73 | 92 | ||
| 74 | while (node != memory_block_tree.end()) { | 93 | if (prev->CanMergeWith(*it)) { |
| 75 | KMemoryBlock* block{&(*node)}; | 94 | KMemoryBlock* block = std::addressof(*it); |
| 76 | iterator next_node{std::next(node)}; | 95 | m_memory_block_tree.erase(it); |
| 77 | const VAddr cur_addr{block->GetAddress()}; | 96 | prev->Add(*block); |
| 78 | const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; | 97 | allocator->Free(block); |
| 98 | it = prev; | ||
| 99 | } | ||
| 79 | 100 | ||
| 80 | if (addr < cur_end_addr && cur_addr < update_end_addr) { | 101 | if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) { |
| 81 | if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) { | 102 | break; |
| 82 | node = next_node; | 103 | } |
| 83 | continue; | 104 | } |
| 84 | } | 105 | } |
| 85 | 106 | ||
| 86 | iterator new_node{node}; | 107 | void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, |
| 87 | if (addr > cur_addr) { | 108 | size_t num_pages, KMemoryState state, KMemoryPermission perm, |
| 88 | memory_block_tree.insert(node, block->Split(addr)); | 109 | KMemoryAttribute attr, |
| 110 | KMemoryBlockDisableMergeAttribute set_disable_attr, | ||
| 111 | KMemoryBlockDisableMergeAttribute clear_disable_attr) { | ||
| 112 | // Ensure for auditing that we never end up with an invalid tree. | ||
| 113 | KScopedMemoryBlockManagerAuditor auditor(this); | ||
| 114 | ASSERT(Common::IsAligned(address, PageSize)); | ||
| 115 | ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == | ||
| 116 | KMemoryAttribute::None); | ||
| 117 | |||
| 118 | VAddr cur_address = address; | ||
| 119 | size_t remaining_pages = num_pages; | ||
| 120 | iterator it = this->FindIterator(address); | ||
| 121 | |||
| 122 | while (remaining_pages > 0) { | ||
| 123 | const size_t remaining_size = remaining_pages * PageSize; | ||
| 124 | KMemoryInfo cur_info = it->GetMemoryInfo(); | ||
| 125 | if (it->HasProperties(state, perm, attr)) { | ||
| 126 | // If we already have the right properties, just advance. | ||
| 127 | if (cur_address + remaining_size < cur_info.GetEndAddress()) { | ||
| 128 | remaining_pages = 0; | ||
| 129 | cur_address += remaining_size; | ||
| 130 | } else { | ||
| 131 | remaining_pages = | ||
| 132 | (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; | ||
| 133 | cur_address = cur_info.GetEndAddress(); | ||
| 89 | } | 134 | } |
| 135 | } else { | ||
| 136 | // If we need to, create a new block before and insert it. | ||
| 137 | if (cur_info.GetAddress() != cur_address) { | ||
| 138 | KMemoryBlock* new_block = allocator->Allocate(); | ||
| 139 | |||
| 140 | it->Split(new_block, cur_address); | ||
| 141 | it = m_memory_block_tree.insert(*new_block); | ||
| 142 | it++; | ||
| 90 | 143 | ||
| 91 | if (update_end_addr < cur_end_addr) { | 144 | cur_info = it->GetMemoryInfo(); |
| 92 | new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); | 145 | cur_address = cur_info.GetAddress(); |
| 93 | } | 146 | } |
| 94 | 147 | ||
| 95 | new_node->Update(state, perm, attribute); | 148 | // If we need to, create a new block after and insert it. |
| 149 | if (cur_info.GetSize() > remaining_size) { | ||
| 150 | KMemoryBlock* new_block = allocator->Allocate(); | ||
| 96 | 151 | ||
| 97 | MergeAdjacent(new_node, next_node); | 152 | it->Split(new_block, cur_address + remaining_size); |
| 98 | } | 153 | it = m_memory_block_tree.insert(*new_block); |
| 99 | 154 | ||
| 100 | if (cur_end_addr - 1 >= update_end_addr - 1) { | 155 | cur_info = it->GetMemoryInfo(); |
| 101 | break; | 156 | } |
| 102 | } | ||
| 103 | 157 | ||
| 104 | node = next_node; | 158 | // Update block state. |
| 159 | it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr), | ||
| 160 | static_cast<u8>(clear_disable_attr)); | ||
| 161 | cur_address += cur_info.GetSize(); | ||
| 162 | remaining_pages -= cur_info.GetNumPages(); | ||
| 163 | } | ||
| 164 | it++; | ||
| 105 | } | 165 | } |
| 166 | |||
| 167 | this->CoalesceForUpdate(allocator, address, num_pages); | ||
| 106 | } | 168 | } |
| 107 | 169 | ||
| 108 | void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state, | 170 | void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, |
| 109 | KMemoryPermission perm, KMemoryAttribute attribute) { | 171 | VAddr address, size_t num_pages, KMemoryState test_state, |
| 110 | const VAddr update_end_addr{addr + num_pages * PageSize}; | 172 | KMemoryPermission test_perm, KMemoryAttribute test_attr, |
| 111 | iterator node{memory_block_tree.begin()}; | 173 | KMemoryState state, KMemoryPermission perm, |
| 174 | KMemoryAttribute attr) { | ||
| 175 | // Ensure for auditing that we never end up with an invalid tree. | ||
| 176 | KScopedMemoryBlockManagerAuditor auditor(this); | ||
| 177 | ASSERT(Common::IsAligned(address, PageSize)); | ||
| 178 | ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == | ||
| 179 | KMemoryAttribute::None); | ||
| 180 | |||
| 181 | VAddr cur_address = address; | ||
| 182 | size_t remaining_pages = num_pages; | ||
| 183 | iterator it = this->FindIterator(address); | ||
| 184 | |||
| 185 | while (remaining_pages > 0) { | ||
| 186 | const size_t remaining_size = remaining_pages * PageSize; | ||
| 187 | KMemoryInfo cur_info = it->GetMemoryInfo(); | ||
| 188 | if (it->HasProperties(test_state, test_perm, test_attr) && | ||
| 189 | !it->HasProperties(state, perm, attr)) { | ||
| 190 | // If we need to, create a new block before and insert it. | ||
| 191 | if (cur_info.GetAddress() != cur_address) { | ||
| 192 | KMemoryBlock* new_block = allocator->Allocate(); | ||
| 193 | |||
| 194 | it->Split(new_block, cur_address); | ||
| 195 | it = m_memory_block_tree.insert(*new_block); | ||
| 196 | it++; | ||
| 197 | |||
| 198 | cur_info = it->GetMemoryInfo(); | ||
| 199 | cur_address = cur_info.GetAddress(); | ||
| 200 | } | ||
| 112 | 201 | ||
| 113 | while (node != memory_block_tree.end()) { | 202 | // If we need to, create a new block after and insert it. |
| 114 | KMemoryBlock* block{&(*node)}; | 203 | if (cur_info.GetSize() > remaining_size) { |
| 115 | iterator next_node{std::next(node)}; | 204 | KMemoryBlock* new_block = allocator->Allocate(); |
| 116 | const VAddr cur_addr{block->GetAddress()}; | ||
| 117 | const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; | ||
| 118 | 205 | ||
| 119 | if (addr < cur_end_addr && cur_addr < update_end_addr) { | 206 | it->Split(new_block, cur_address + remaining_size); |
| 120 | iterator new_node{node}; | 207 | it = m_memory_block_tree.insert(*new_block); |
| 121 | 208 | ||
| 122 | if (addr > cur_addr) { | 209 | cur_info = it->GetMemoryInfo(); |
| 123 | memory_block_tree.insert(node, block->Split(addr)); | ||
| 124 | } | 210 | } |
| 125 | 211 | ||
| 126 | if (update_end_addr < cur_end_addr) { | 212 | // Update block state. |
| 127 | new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); | 213 | it->Update(state, perm, attr, false, 0, 0); |
| 214 | cur_address += cur_info.GetSize(); | ||
| 215 | remaining_pages -= cur_info.GetNumPages(); | ||
| 216 | } else { | ||
| 217 | // If we already have the right properties, just advance. | ||
| 218 | if (cur_address + remaining_size < cur_info.GetEndAddress()) { | ||
| 219 | remaining_pages = 0; | ||
| 220 | cur_address += remaining_size; | ||
| 221 | } else { | ||
| 222 | remaining_pages = | ||
| 223 | (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; | ||
| 224 | cur_address = cur_info.GetEndAddress(); | ||
| 128 | } | 225 | } |
| 129 | |||
| 130 | new_node->Update(state, perm, attribute); | ||
| 131 | |||
| 132 | MergeAdjacent(new_node, next_node); | ||
| 133 | } | ||
| 134 | |||
| 135 | if (cur_end_addr - 1 >= update_end_addr - 1) { | ||
| 136 | break; | ||
| 137 | } | 226 | } |
| 138 | 227 | it++; | |
| 139 | node = next_node; | ||
| 140 | } | 228 | } |
| 229 | |||
| 230 | this->CoalesceForUpdate(allocator, address, num_pages); | ||
| 141 | } | 231 | } |
| 142 | 232 | ||
| 143 | void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, | 233 | void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, |
| 234 | size_t num_pages, MemoryBlockLockFunction lock_func, | ||
| 144 | KMemoryPermission perm) { | 235 | KMemoryPermission perm) { |
| 145 | const VAddr update_end_addr{addr + num_pages * PageSize}; | 236 | // Ensure for auditing that we never end up with an invalid tree. |
| 146 | iterator node{memory_block_tree.begin()}; | 237 | KScopedMemoryBlockManagerAuditor auditor(this); |
| 238 | ASSERT(Common::IsAligned(address, PageSize)); | ||
| 147 | 239 | ||
| 148 | while (node != memory_block_tree.end()) { | 240 | VAddr cur_address = address; |
| 149 | KMemoryBlock* block{&(*node)}; | 241 | size_t remaining_pages = num_pages; |
| 150 | iterator next_node{std::next(node)}; | 242 | iterator it = this->FindIterator(address); |
| 151 | const VAddr cur_addr{block->GetAddress()}; | ||
| 152 | const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; | ||
| 153 | 243 | ||
| 154 | if (addr < cur_end_addr && cur_addr < update_end_addr) { | 244 | const VAddr end_address = address + (num_pages * PageSize); |
| 155 | iterator new_node{node}; | ||
| 156 | 245 | ||
| 157 | if (addr > cur_addr) { | 246 | while (remaining_pages > 0) { |
| 158 | memory_block_tree.insert(node, block->Split(addr)); | 247 | const size_t remaining_size = remaining_pages * PageSize; |
| 159 | } | 248 | KMemoryInfo cur_info = it->GetMemoryInfo(); |
| 160 | 249 | ||
| 161 | if (update_end_addr < cur_end_addr) { | 250 | // If we need to, create a new block before and insert it. |
| 162 | new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); | 251 | if (cur_info.m_address != cur_address) { |
| 163 | } | 252 | KMemoryBlock* new_block = allocator->Allocate(); |
| 164 | 253 | ||
| 165 | lock_func(new_node, perm); | 254 | it->Split(new_block, cur_address); |
| 255 | it = m_memory_block_tree.insert(*new_block); | ||
| 256 | it++; | ||
| 166 | 257 | ||
| 167 | MergeAdjacent(new_node, next_node); | 258 | cur_info = it->GetMemoryInfo(); |
| 259 | cur_address = cur_info.GetAddress(); | ||
| 168 | } | 260 | } |
| 169 | 261 | ||
| 170 | if (cur_end_addr - 1 >= update_end_addr - 1) { | 262 | if (cur_info.GetSize() > remaining_size) { |
| 171 | break; | 263 | // If we need to, create a new block after and insert it. |
| 264 | KMemoryBlock* new_block = allocator->Allocate(); | ||
| 265 | |||
| 266 | it->Split(new_block, cur_address + remaining_size); | ||
| 267 | it = m_memory_block_tree.insert(*new_block); | ||
| 268 | |||
| 269 | cur_info = it->GetMemoryInfo(); | ||
| 172 | } | 270 | } |
| 173 | 271 | ||
| 174 | node = next_node; | 272 | // Call the locked update function. |
| 273 | (std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address, | ||
| 274 | cur_info.GetEndAddress() == end_address); | ||
| 275 | cur_address += cur_info.GetSize(); | ||
| 276 | remaining_pages -= cur_info.GetNumPages(); | ||
| 277 | it++; | ||
| 175 | } | 278 | } |
| 176 | } | ||
| 177 | 279 | ||
| 178 | void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) { | 280 | this->CoalesceForUpdate(allocator, address, num_pages); |
| 179 | const_iterator it{FindIterator(start)}; | ||
| 180 | KMemoryInfo info{}; | ||
| 181 | do { | ||
| 182 | info = it->GetMemoryInfo(); | ||
| 183 | func(info); | ||
| 184 | it = std::next(it); | ||
| 185 | } while (info.addr + info.size - 1 < end - 1 && it != cend()); | ||
| 186 | } | 281 | } |
| 187 | 282 | ||
| 188 | void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) { | 283 | // Debug. |
| 189 | KMemoryBlock* block{&(*it)}; | 284 | bool KMemoryBlockManager::CheckState() const { |
| 190 | 285 | // Loop over every block, ensuring that we are sorted and coalesced. | |
| 191 | auto EraseIt = [&](const iterator it_to_erase) { | 286 | auto it = m_memory_block_tree.cbegin(); |
| 192 | if (next_it == it_to_erase) { | 287 | auto prev = it++; |
| 193 | next_it = std::next(next_it); | 288 | while (it != m_memory_block_tree.cend()) { |
| 289 | const KMemoryInfo prev_info = prev->GetMemoryInfo(); | ||
| 290 | const KMemoryInfo cur_info = it->GetMemoryInfo(); | ||
| 291 | |||
| 292 | // Sequential blocks which can be merged should be merged. | ||
| 293 | if (prev->CanMergeWith(*it)) { | ||
| 294 | return false; | ||
| 194 | } | 295 | } |
| 195 | memory_block_tree.erase(it_to_erase); | ||
| 196 | }; | ||
| 197 | 296 | ||
| 198 | if (it != memory_block_tree.begin()) { | 297 | // Sequential blocks should be sequential. |
| 199 | KMemoryBlock* prev{&(*std::prev(it))}; | 298 | if (prev_info.GetEndAddress() != cur_info.GetAddress()) { |
| 200 | 299 | return false; | |
| 201 | if (block->HasSameProperties(*prev)) { | 300 | } |
| 202 | const iterator prev_it{std::prev(it)}; | ||
| 203 | 301 | ||
| 204 | prev->Add(block->GetNumPages()); | 302 | // If the block is ipc locked, it must have a count. |
| 205 | EraseIt(it); | 303 | if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None && |
| 304 | cur_info.m_ipc_lock_count == 0) { | ||
| 305 | return false; | ||
| 306 | } | ||
| 206 | 307 | ||
| 207 | it = prev_it; | 308 | // If the block is device shared, it must have a count. |
| 208 | block = prev; | 309 | if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None && |
| 310 | cur_info.m_device_use_count == 0) { | ||
| 311 | return false; | ||
| 209 | } | 312 | } |
| 313 | |||
| 314 | // Advance the iterator. | ||
| 315 | prev = it++; | ||
| 210 | } | 316 | } |
| 211 | 317 | ||
| 212 | if (it != cend()) { | 318 | // Our loop will miss checking the last block, potentially, so check it. |
| 213 | const KMemoryBlock* const next{&(*std::next(it))}; | 319 | if (prev != m_memory_block_tree.cend()) { |
| 320 | const KMemoryInfo prev_info = prev->GetMemoryInfo(); | ||
| 321 | // If the block is ipc locked, it must have a count. | ||
| 322 | if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None && | ||
| 323 | prev_info.m_ipc_lock_count == 0) { | ||
| 324 | return false; | ||
| 325 | } | ||
| 214 | 326 | ||
| 215 | if (block->HasSameProperties(*next)) { | 327 | // If the block is device shared, it must have a count. |
| 216 | block->Add(next->GetNumPages()); | 328 | if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None && |
| 217 | EraseIt(std::next(it)); | 329 | prev_info.m_device_use_count == 0) { |
| 330 | return false; | ||
| 218 | } | 331 | } |
| 219 | } | 332 | } |
| 333 | |||
| 334 | return true; | ||
| 220 | } | 335 | } |
| 221 | 336 | ||
| 222 | } // namespace Kernel | 337 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h index e14741b89..9b5873883 100644 --- a/src/core/hle/kernel/k_memory_block_manager.h +++ b/src/core/hle/kernel/k_memory_block_manager.h | |||
| @@ -4,63 +4,154 @@ | |||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <functional> | 6 | #include <functional> |
| 7 | #include <list> | ||
| 8 | 7 | ||
| 8 | #include "common/common_funcs.h" | ||
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 10 | #include "core/hle/kernel/k_memory_block.h" | 11 | #include "core/hle/kernel/k_memory_block.h" |
| 11 | 12 | ||
| 12 | namespace Kernel { | 13 | namespace Kernel { |
| 13 | 14 | ||
| 15 | class KMemoryBlockManagerUpdateAllocator { | ||
| 16 | public: | ||
| 17 | static constexpr size_t MaxBlocks = 2; | ||
| 18 | |||
| 19 | private: | ||
| 20 | KMemoryBlock* m_blocks[MaxBlocks]; | ||
| 21 | size_t m_index; | ||
| 22 | KMemoryBlockSlabManager* m_slab_manager; | ||
| 23 | |||
| 24 | private: | ||
| 25 | Result Initialize(size_t num_blocks) { | ||
| 26 | // Check num blocks. | ||
| 27 | ASSERT(num_blocks <= MaxBlocks); | ||
| 28 | |||
| 29 | // Set index. | ||
| 30 | m_index = MaxBlocks - num_blocks; | ||
| 31 | |||
| 32 | // Allocate the blocks. | ||
| 33 | for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) { | ||
| 34 | m_blocks[m_index + i] = m_slab_manager->Allocate(); | ||
| 35 | R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource); | ||
| 36 | } | ||
| 37 | |||
| 38 | R_SUCCEED(); | ||
| 39 | } | ||
| 40 | |||
| 41 | public: | ||
| 42 | KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm, | ||
| 43 | size_t num_blocks = MaxBlocks) | ||
| 44 | : m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) { | ||
| 45 | *out_result = this->Initialize(num_blocks); | ||
| 46 | } | ||
| 47 | |||
| 48 | ~KMemoryBlockManagerUpdateAllocator() { | ||
| 49 | for (const auto& block : m_blocks) { | ||
| 50 | if (block != nullptr) { | ||
| 51 | m_slab_manager->Free(block); | ||
| 52 | } | ||
| 53 | } | ||
| 54 | } | ||
| 55 | |||
| 56 | KMemoryBlock* Allocate() { | ||
| 57 | ASSERT(m_index < MaxBlocks); | ||
| 58 | ASSERT(m_blocks[m_index] != nullptr); | ||
| 59 | KMemoryBlock* block = nullptr; | ||
| 60 | std::swap(block, m_blocks[m_index++]); | ||
| 61 | return block; | ||
| 62 | } | ||
| 63 | |||
| 64 | void Free(KMemoryBlock* block) { | ||
| 65 | ASSERT(m_index <= MaxBlocks); | ||
| 66 | ASSERT(block != nullptr); | ||
| 67 | if (m_index == 0) { | ||
| 68 | m_slab_manager->Free(block); | ||
| 69 | } else { | ||
| 70 | m_blocks[--m_index] = block; | ||
| 71 | } | ||
| 72 | } | ||
| 73 | }; | ||
| 74 | |||
| 14 | class KMemoryBlockManager final { | 75 | class KMemoryBlockManager final { |
| 15 | public: | 76 | public: |
| 16 | using MemoryBlockTree = std::list<KMemoryBlock>; | 77 | using MemoryBlockTree = |
| 78 | Common::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>; | ||
| 79 | using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left, | ||
| 80 | bool right); | ||
| 17 | using iterator = MemoryBlockTree::iterator; | 81 | using iterator = MemoryBlockTree::iterator; |
| 18 | using const_iterator = MemoryBlockTree::const_iterator; | 82 | using const_iterator = MemoryBlockTree::const_iterator; |
| 19 | 83 | ||
| 20 | public: | 84 | public: |
| 21 | KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_); | 85 | KMemoryBlockManager(); |
| 86 | |||
| 87 | using HostUnmapCallback = std::function<void(VAddr, u64)>; | ||
| 88 | |||
| 89 | Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager); | ||
| 90 | void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback); | ||
| 22 | 91 | ||
| 23 | iterator end() { | 92 | iterator end() { |
| 24 | return memory_block_tree.end(); | 93 | return m_memory_block_tree.end(); |
| 25 | } | 94 | } |
| 26 | const_iterator end() const { | 95 | const_iterator end() const { |
| 27 | return memory_block_tree.end(); | 96 | return m_memory_block_tree.end(); |
| 28 | } | 97 | } |
| 29 | const_iterator cend() const { | 98 | const_iterator cend() const { |
| 30 | return memory_block_tree.cend(); | 99 | return m_memory_block_tree.cend(); |
| 31 | } | 100 | } |
| 32 | 101 | ||
| 33 | iterator FindIterator(VAddr addr); | 102 | VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, |
| 103 | size_t alignment, size_t offset, size_t guard_pages) const; | ||
| 34 | 104 | ||
| 35 | VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, | 105 | void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, |
| 36 | std::size_t align, std::size_t offset, std::size_t guard_pages); | 106 | KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, |
| 107 | KMemoryBlockDisableMergeAttribute set_disable_attr, | ||
| 108 | KMemoryBlockDisableMergeAttribute clear_disable_attr); | ||
| 109 | void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, | ||
| 110 | MemoryBlockLockFunction lock_func, KMemoryPermission perm); | ||
| 37 | 111 | ||
| 38 | void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, | 112 | void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, |
| 39 | KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state, | 113 | size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, |
| 40 | KMemoryPermission perm, KMemoryAttribute attribute); | 114 | KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, |
| 115 | KMemoryAttribute attr); | ||
| 41 | 116 | ||
| 42 | void Update(VAddr addr, std::size_t num_pages, KMemoryState state, | 117 | iterator FindIterator(VAddr address) const { |
| 43 | KMemoryPermission perm = KMemoryPermission::None, | 118 | return m_memory_block_tree.find(KMemoryBlock( |
| 44 | KMemoryAttribute attribute = KMemoryAttribute::None); | 119 | address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None)); |
| 45 | 120 | } | |
| 46 | using LockFunc = std::function<void(iterator, KMemoryPermission)>; | ||
| 47 | void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, | ||
| 48 | KMemoryPermission perm); | ||
| 49 | 121 | ||
| 50 | using IterateFunc = std::function<void(const KMemoryInfo&)>; | 122 | const KMemoryBlock* FindBlock(VAddr address) const { |
| 51 | void IterateForRange(VAddr start, VAddr end, IterateFunc&& func); | 123 | if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) { |
| 124 | return std::addressof(*it); | ||
| 125 | } | ||
| 52 | 126 | ||
| 53 | KMemoryBlock& FindBlock(VAddr addr) { | 127 | return nullptr; |
| 54 | return *FindIterator(addr); | ||
| 55 | } | 128 | } |
| 56 | 129 | ||
| 130 | // Debug. | ||
| 131 | bool CheckState() const; | ||
| 132 | |||
| 57 | private: | 133 | private: |
| 58 | void MergeAdjacent(iterator it, iterator& next_it); | 134 | void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, |
| 135 | size_t num_pages); | ||
| 59 | 136 | ||
| 60 | [[maybe_unused]] const VAddr start_addr; | 137 | MemoryBlockTree m_memory_block_tree; |
| 61 | [[maybe_unused]] const VAddr end_addr; | 138 | VAddr m_start_address{}; |
| 139 | VAddr m_end_address{}; | ||
| 140 | }; | ||
| 62 | 141 | ||
| 63 | MemoryBlockTree memory_block_tree; | 142 | class KScopedMemoryBlockManagerAuditor { |
| 143 | public: | ||
| 144 | explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) { | ||
| 145 | ASSERT(m_manager->CheckState()); | ||
| 146 | } | ||
| 147 | explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m) | ||
| 148 | : KScopedMemoryBlockManagerAuditor(std::addressof(m)) {} | ||
| 149 | ~KScopedMemoryBlockManagerAuditor() { | ||
| 150 | ASSERT(m_manager->CheckState()); | ||
| 151 | } | ||
| 152 | |||
| 153 | private: | ||
| 154 | KMemoryBlockManager* m_manager; | ||
| 64 | }; | 155 | }; |
| 65 | 156 | ||
| 66 | } // namespace Kernel | 157 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 5b0a9963a..646711505 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp | |||
| @@ -331,7 +331,7 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag | |||
| 331 | 331 | ||
| 332 | // Set all the allocated memory. | 332 | // Set all the allocated memory. |
| 333 | for (const auto& block : out->Nodes()) { | 333 | for (const auto& block : out->Nodes()) { |
| 334 | std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern, | 334 | std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, |
| 335 | block.GetSize()); | 335 | block.GetSize()); |
| 336 | } | 336 | } |
| 337 | 337 | ||
diff --git a/src/core/hle/kernel/k_page_buffer.cpp b/src/core/hle/kernel/k_page_buffer.cpp index 1a0bf4439..0c16dded4 100644 --- a/src/core/hle/kernel/k_page_buffer.cpp +++ b/src/core/hle/kernel/k_page_buffer.cpp | |||
| @@ -12,7 +12,7 @@ namespace Kernel { | |||
| 12 | 12 | ||
| 13 | KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) { | 13 | KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) { |
| 14 | ASSERT(Common::IsAligned(phys_addr, PageSize)); | 14 | ASSERT(Common::IsAligned(phys_addr, PageSize)); |
| 15 | return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr)); | 15 | return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr); |
| 16 | } | 16 | } |
| 17 | 17 | ||
| 18 | } // namespace Kernel | 18 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h index 7e50dc1d1..aef06e213 100644 --- a/src/core/hle/kernel/k_page_buffer.h +++ b/src/core/hle/kernel/k_page_buffer.h | |||
| @@ -13,6 +13,7 @@ namespace Kernel { | |||
| 13 | 13 | ||
| 14 | class KPageBuffer final : public KSlabAllocated<KPageBuffer> { | 14 | class KPageBuffer final : public KSlabAllocated<KPageBuffer> { |
| 15 | public: | 15 | public: |
| 16 | explicit KPageBuffer(KernelCore&) {} | ||
| 16 | KPageBuffer() = default; | 17 | KPageBuffer() = default; |
| 17 | 18 | ||
| 18 | static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr); | 19 | static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr); |
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index d975de844..307e491cb 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -25,7 +25,7 @@ namespace { | |||
| 25 | 25 | ||
| 26 | using namespace Common::Literals; | 26 | using namespace Common::Literals; |
| 27 | 27 | ||
| 28 | constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { | 28 | constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { |
| 29 | switch (as_type) { | 29 | switch (as_type) { |
| 30 | case FileSys::ProgramAddressSpaceType::Is32Bit: | 30 | case FileSys::ProgramAddressSpaceType::Is32Bit: |
| 31 | case FileSys::ProgramAddressSpaceType::Is32BitNoMap: | 31 | case FileSys::ProgramAddressSpaceType::Is32BitNoMap: |
| @@ -43,27 +43,29 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT | |||
| 43 | } // namespace | 43 | } // namespace |
| 44 | 44 | ||
| 45 | KPageTable::KPageTable(Core::System& system_) | 45 | KPageTable::KPageTable(Core::System& system_) |
| 46 | : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {} | 46 | : m_general_lock{system_.Kernel()}, |
| 47 | m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} | ||
| 47 | 48 | ||
| 48 | KPageTable::~KPageTable() = default; | 49 | KPageTable::~KPageTable() = default; |
| 49 | 50 | ||
| 50 | Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, | 51 | Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, |
| 51 | VAddr code_addr, std::size_t code_size, | 52 | VAddr code_addr, size_t code_size, |
| 53 | KMemoryBlockSlabManager* mem_block_slab_manager, | ||
| 52 | KMemoryManager::Pool pool) { | 54 | KMemoryManager::Pool pool) { |
| 53 | 55 | ||
| 54 | const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { | 56 | const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { |
| 55 | return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type); | 57 | return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); |
| 56 | }; | 58 | }; |
| 57 | const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { | 59 | const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { |
| 58 | return KAddressSpaceInfo::GetAddressSpaceSize(address_space_width, type); | 60 | return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); |
| 59 | }; | 61 | }; |
| 60 | 62 | ||
| 61 | // Set our width and heap/alias sizes | 63 | // Set our width and heap/alias sizes |
| 62 | address_space_width = GetAddressSpaceWidthFromType(as_type); | 64 | m_address_space_width = GetAddressSpaceWidthFromType(as_type); |
| 63 | const VAddr start = 0; | 65 | const VAddr start = 0; |
| 64 | const VAddr end{1ULL << address_space_width}; | 66 | const VAddr end{1ULL << m_address_space_width}; |
| 65 | std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; | 67 | size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; |
| 66 | std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; | 68 | size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; |
| 67 | 69 | ||
| 68 | ASSERT(code_addr < code_addr + code_size); | 70 | ASSERT(code_addr < code_addr + code_size); |
| 69 | ASSERT(code_addr + code_size - 1 <= end - 1); | 71 | ASSERT(code_addr + code_size - 1 <= end - 1); |
| @@ -75,66 +77,65 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type | |||
| 75 | } | 77 | } |
| 76 | 78 | ||
| 77 | // Set code regions and determine remaining | 79 | // Set code regions and determine remaining |
| 78 | constexpr std::size_t RegionAlignment{2_MiB}; | 80 | constexpr size_t RegionAlignment{2_MiB}; |
| 79 | VAddr process_code_start{}; | 81 | VAddr process_code_start{}; |
| 80 | VAddr process_code_end{}; | 82 | VAddr process_code_end{}; |
| 81 | std::size_t stack_region_size{}; | 83 | size_t stack_region_size{}; |
| 82 | std::size_t kernel_map_region_size{}; | 84 | size_t kernel_map_region_size{}; |
| 83 | 85 | ||
| 84 | if (address_space_width == 39) { | 86 | if (m_address_space_width == 39) { |
| 85 | alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); | 87 | alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); |
| 86 | heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); | 88 | heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); |
| 87 | stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); | 89 | stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); |
| 88 | kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); | 90 | kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); |
| 89 | code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); | 91 | m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); |
| 90 | code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); | 92 | m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); |
| 91 | alias_code_region_start = code_region_start; | 93 | m_alias_code_region_start = m_code_region_start; |
| 92 | alias_code_region_end = code_region_end; | 94 | m_alias_code_region_end = m_code_region_end; |
| 93 | process_code_start = Common::AlignDown(code_addr, RegionAlignment); | 95 | process_code_start = Common::AlignDown(code_addr, RegionAlignment); |
| 94 | process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); | 96 | process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); |
| 95 | } else { | 97 | } else { |
| 96 | stack_region_size = 0; | 98 | stack_region_size = 0; |
| 97 | kernel_map_region_size = 0; | 99 | kernel_map_region_size = 0; |
| 98 | code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); | 100 | m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); |
| 99 | code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); | 101 | m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); |
| 100 | stack_region_start = code_region_start; | 102 | m_stack_region_start = m_code_region_start; |
| 101 | alias_code_region_start = code_region_start; | 103 | m_alias_code_region_start = m_code_region_start; |
| 102 | alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + | 104 | m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + |
| 103 | GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); | 105 | GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); |
| 104 | stack_region_end = code_region_end; | 106 | m_stack_region_end = m_code_region_end; |
| 105 | kernel_map_region_start = code_region_start; | 107 | m_kernel_map_region_start = m_code_region_start; |
| 106 | kernel_map_region_end = code_region_end; | 108 | m_kernel_map_region_end = m_code_region_end; |
| 107 | process_code_start = code_region_start; | 109 | process_code_start = m_code_region_start; |
| 108 | process_code_end = code_region_end; | 110 | process_code_end = m_code_region_end; |
| 109 | } | 111 | } |
| 110 | 112 | ||
| 111 | // Set other basic fields | 113 | // Set other basic fields |
| 112 | is_aslr_enabled = enable_aslr; | 114 | m_enable_aslr = enable_aslr; |
| 113 | address_space_start = start; | 115 | m_enable_device_address_space_merge = false; |
| 114 | address_space_end = end; | 116 | m_address_space_start = start; |
| 115 | is_kernel = false; | 117 | m_address_space_end = end; |
| 118 | m_is_kernel = false; | ||
| 119 | m_memory_block_slab_manager = mem_block_slab_manager; | ||
| 116 | 120 | ||
| 117 | // Determine the region we can place our undetermineds in | 121 | // Determine the region we can place our undetermineds in |
| 118 | VAddr alloc_start{}; | 122 | VAddr alloc_start{}; |
| 119 | std::size_t alloc_size{}; | 123 | size_t alloc_size{}; |
| 120 | if ((process_code_start - code_region_start) >= (end - process_code_end)) { | 124 | if ((process_code_start - m_code_region_start) >= (end - process_code_end)) { |
| 121 | alloc_start = code_region_start; | 125 | alloc_start = m_code_region_start; |
| 122 | alloc_size = process_code_start - code_region_start; | 126 | alloc_size = process_code_start - m_code_region_start; |
| 123 | } else { | 127 | } else { |
| 124 | alloc_start = process_code_end; | 128 | alloc_start = process_code_end; |
| 125 | alloc_size = end - process_code_end; | 129 | alloc_size = end - process_code_end; |
| 126 | } | 130 | } |
| 127 | const std::size_t needed_size{ | 131 | const size_t needed_size = |
| 128 | (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; | 132 | (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size); |
| 129 | if (alloc_size < needed_size) { | 133 | R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory); |
| 130 | ASSERT(false); | ||
| 131 | return ResultOutOfMemory; | ||
| 132 | } | ||
| 133 | 134 | ||
| 134 | const std::size_t remaining_size{alloc_size - needed_size}; | 135 | const size_t remaining_size{alloc_size - needed_size}; |
| 135 | 136 | ||
| 136 | // Determine random placements for each region | 137 | // Determine random placements for each region |
| 137 | std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; | 138 | size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; |
| 138 | if (enable_aslr) { | 139 | if (enable_aslr) { |
| 139 | alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | 140 | alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * |
| 140 | RegionAlignment; | 141 | RegionAlignment; |
| @@ -147,117 +148,130 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type | |||
| 147 | } | 148 | } |
| 148 | 149 | ||
| 149 | // Setup heap and alias regions | 150 | // Setup heap and alias regions |
| 150 | alias_region_start = alloc_start + alias_rnd; | 151 | m_alias_region_start = alloc_start + alias_rnd; |
| 151 | alias_region_end = alias_region_start + alias_region_size; | 152 | m_alias_region_end = m_alias_region_start + alias_region_size; |
| 152 | heap_region_start = alloc_start + heap_rnd; | 153 | m_heap_region_start = alloc_start + heap_rnd; |
| 153 | heap_region_end = heap_region_start + heap_region_size; | 154 | m_heap_region_end = m_heap_region_start + heap_region_size; |
| 154 | 155 | ||
| 155 | if (alias_rnd <= heap_rnd) { | 156 | if (alias_rnd <= heap_rnd) { |
| 156 | heap_region_start += alias_region_size; | 157 | m_heap_region_start += alias_region_size; |
| 157 | heap_region_end += alias_region_size; | 158 | m_heap_region_end += alias_region_size; |
| 158 | } else { | 159 | } else { |
| 159 | alias_region_start += heap_region_size; | 160 | m_alias_region_start += heap_region_size; |
| 160 | alias_region_end += heap_region_size; | 161 | m_alias_region_end += heap_region_size; |
| 161 | } | 162 | } |
| 162 | 163 | ||
| 163 | // Setup stack region | 164 | // Setup stack region |
| 164 | if (stack_region_size) { | 165 | if (stack_region_size) { |
| 165 | stack_region_start = alloc_start + stack_rnd; | 166 | m_stack_region_start = alloc_start + stack_rnd; |
| 166 | stack_region_end = stack_region_start + stack_region_size; | 167 | m_stack_region_end = m_stack_region_start + stack_region_size; |
| 167 | 168 | ||
| 168 | if (alias_rnd < stack_rnd) { | 169 | if (alias_rnd < stack_rnd) { |
| 169 | stack_region_start += alias_region_size; | 170 | m_stack_region_start += alias_region_size; |
| 170 | stack_region_end += alias_region_size; | 171 | m_stack_region_end += alias_region_size; |
| 171 | } else { | 172 | } else { |
| 172 | alias_region_start += stack_region_size; | 173 | m_alias_region_start += stack_region_size; |
| 173 | alias_region_end += stack_region_size; | 174 | m_alias_region_end += stack_region_size; |
| 174 | } | 175 | } |
| 175 | 176 | ||
| 176 | if (heap_rnd < stack_rnd) { | 177 | if (heap_rnd < stack_rnd) { |
| 177 | stack_region_start += heap_region_size; | 178 | m_stack_region_start += heap_region_size; |
| 178 | stack_region_end += heap_region_size; | 179 | m_stack_region_end += heap_region_size; |
| 179 | } else { | 180 | } else { |
| 180 | heap_region_start += stack_region_size; | 181 | m_heap_region_start += stack_region_size; |
| 181 | heap_region_end += stack_region_size; | 182 | m_heap_region_end += stack_region_size; |
| 182 | } | 183 | } |
| 183 | } | 184 | } |
| 184 | 185 | ||
| 185 | // Setup kernel map region | 186 | // Setup kernel map region |
| 186 | if (kernel_map_region_size) { | 187 | if (kernel_map_region_size) { |
| 187 | kernel_map_region_start = alloc_start + kmap_rnd; | 188 | m_kernel_map_region_start = alloc_start + kmap_rnd; |
| 188 | kernel_map_region_end = kernel_map_region_start + kernel_map_region_size; | 189 | m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size; |
| 189 | 190 | ||
| 190 | if (alias_rnd < kmap_rnd) { | 191 | if (alias_rnd < kmap_rnd) { |
| 191 | kernel_map_region_start += alias_region_size; | 192 | m_kernel_map_region_start += alias_region_size; |
| 192 | kernel_map_region_end += alias_region_size; | 193 | m_kernel_map_region_end += alias_region_size; |
| 193 | } else { | 194 | } else { |
| 194 | alias_region_start += kernel_map_region_size; | 195 | m_alias_region_start += kernel_map_region_size; |
| 195 | alias_region_end += kernel_map_region_size; | 196 | m_alias_region_end += kernel_map_region_size; |
| 196 | } | 197 | } |
| 197 | 198 | ||
| 198 | if (heap_rnd < kmap_rnd) { | 199 | if (heap_rnd < kmap_rnd) { |
| 199 | kernel_map_region_start += heap_region_size; | 200 | m_kernel_map_region_start += heap_region_size; |
| 200 | kernel_map_region_end += heap_region_size; | 201 | m_kernel_map_region_end += heap_region_size; |
| 201 | } else { | 202 | } else { |
| 202 | heap_region_start += kernel_map_region_size; | 203 | m_heap_region_start += kernel_map_region_size; |
| 203 | heap_region_end += kernel_map_region_size; | 204 | m_heap_region_end += kernel_map_region_size; |
| 204 | } | 205 | } |
| 205 | 206 | ||
| 206 | if (stack_region_size) { | 207 | if (stack_region_size) { |
| 207 | if (stack_rnd < kmap_rnd) { | 208 | if (stack_rnd < kmap_rnd) { |
| 208 | kernel_map_region_start += stack_region_size; | 209 | m_kernel_map_region_start += stack_region_size; |
| 209 | kernel_map_region_end += stack_region_size; | 210 | m_kernel_map_region_end += stack_region_size; |
| 210 | } else { | 211 | } else { |
| 211 | stack_region_start += kernel_map_region_size; | 212 | m_stack_region_start += kernel_map_region_size; |
| 212 | stack_region_end += kernel_map_region_size; | 213 | m_stack_region_end += kernel_map_region_size; |
| 213 | } | 214 | } |
| 214 | } | 215 | } |
| 215 | } | 216 | } |
| 216 | 217 | ||
| 217 | // Set heap members | 218 | // Set heap members |
| 218 | current_heap_end = heap_region_start; | 219 | m_current_heap_end = m_heap_region_start; |
| 219 | max_heap_size = 0; | 220 | m_max_heap_size = 0; |
| 220 | max_physical_memory_size = 0; | 221 | m_max_physical_memory_size = 0; |
| 221 | 222 | ||
| 222 | // Ensure that we regions inside our address space | 223 | // Ensure that we regions inside our address space |
| 223 | auto IsInAddressSpace = [&](VAddr addr) { | 224 | auto IsInAddressSpace = [&](VAddr addr) { |
| 224 | return address_space_start <= addr && addr <= address_space_end; | 225 | return m_address_space_start <= addr && addr <= m_address_space_end; |
| 225 | }; | 226 | }; |
| 226 | ASSERT(IsInAddressSpace(alias_region_start)); | 227 | ASSERT(IsInAddressSpace(m_alias_region_start)); |
| 227 | ASSERT(IsInAddressSpace(alias_region_end)); | 228 | ASSERT(IsInAddressSpace(m_alias_region_end)); |
| 228 | ASSERT(IsInAddressSpace(heap_region_start)); | 229 | ASSERT(IsInAddressSpace(m_heap_region_start)); |
| 229 | ASSERT(IsInAddressSpace(heap_region_end)); | 230 | ASSERT(IsInAddressSpace(m_heap_region_end)); |
| 230 | ASSERT(IsInAddressSpace(stack_region_start)); | 231 | ASSERT(IsInAddressSpace(m_stack_region_start)); |
| 231 | ASSERT(IsInAddressSpace(stack_region_end)); | 232 | ASSERT(IsInAddressSpace(m_stack_region_end)); |
| 232 | ASSERT(IsInAddressSpace(kernel_map_region_start)); | 233 | ASSERT(IsInAddressSpace(m_kernel_map_region_start)); |
| 233 | ASSERT(IsInAddressSpace(kernel_map_region_end)); | 234 | ASSERT(IsInAddressSpace(m_kernel_map_region_end)); |
| 234 | 235 | ||
| 235 | // Ensure that we selected regions that don't overlap | 236 | // Ensure that we selected regions that don't overlap |
| 236 | const VAddr alias_start{alias_region_start}; | 237 | const VAddr alias_start{m_alias_region_start}; |
| 237 | const VAddr alias_last{alias_region_end - 1}; | 238 | const VAddr alias_last{m_alias_region_end - 1}; |
| 238 | const VAddr heap_start{heap_region_start}; | 239 | const VAddr heap_start{m_heap_region_start}; |
| 239 | const VAddr heap_last{heap_region_end - 1}; | 240 | const VAddr heap_last{m_heap_region_end - 1}; |
| 240 | const VAddr stack_start{stack_region_start}; | 241 | const VAddr stack_start{m_stack_region_start}; |
| 241 | const VAddr stack_last{stack_region_end - 1}; | 242 | const VAddr stack_last{m_stack_region_end - 1}; |
| 242 | const VAddr kmap_start{kernel_map_region_start}; | 243 | const VAddr kmap_start{m_kernel_map_region_start}; |
| 243 | const VAddr kmap_last{kernel_map_region_end - 1}; | 244 | const VAddr kmap_last{m_kernel_map_region_end - 1}; |
| 244 | ASSERT(alias_last < heap_start || heap_last < alias_start); | 245 | ASSERT(alias_last < heap_start || heap_last < alias_start); |
| 245 | ASSERT(alias_last < stack_start || stack_last < alias_start); | 246 | ASSERT(alias_last < stack_start || stack_last < alias_start); |
| 246 | ASSERT(alias_last < kmap_start || kmap_last < alias_start); | 247 | ASSERT(alias_last < kmap_start || kmap_last < alias_start); |
| 247 | ASSERT(heap_last < stack_start || stack_last < heap_start); | 248 | ASSERT(heap_last < stack_start || stack_last < heap_start); |
| 248 | ASSERT(heap_last < kmap_start || kmap_last < heap_start); | 249 | ASSERT(heap_last < kmap_start || kmap_last < heap_start); |
| 249 | 250 | ||
| 250 | current_heap_end = heap_region_start; | 251 | m_current_heap_end = m_heap_region_start; |
| 251 | max_heap_size = 0; | 252 | m_max_heap_size = 0; |
| 252 | mapped_physical_memory_size = 0; | 253 | m_mapped_physical_memory_size = 0; |
| 253 | memory_pool = pool; | 254 | m_memory_pool = pool; |
| 255 | |||
| 256 | m_page_table_impl = std::make_unique<Common::PageTable>(); | ||
| 257 | m_page_table_impl->Resize(m_address_space_width, PageBits); | ||
| 258 | |||
| 259 | // Initialize our memory block manager. | ||
| 260 | R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, | ||
| 261 | m_memory_block_slab_manager)); | ||
| 262 | } | ||
| 254 | 263 | ||
| 255 | page_table_impl.Resize(address_space_width, PageBits); | 264 | void KPageTable::Finalize() { |
| 265 | // Finalize memory blocks. | ||
| 266 | m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) { | ||
| 267 | m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size); | ||
| 268 | }); | ||
| 256 | 269 | ||
| 257 | return InitializeMemoryLayout(start, end); | 270 | // Close the backing page table, as the destructor is not called for guest objects. |
| 271 | m_page_table_impl.reset(); | ||
| 258 | } | 272 | } |
| 259 | 273 | ||
| 260 | Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state, | 274 | Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state, |
| 261 | KMemoryPermission perm) { | 275 | KMemoryPermission perm) { |
| 262 | const u64 size{num_pages * PageSize}; | 276 | const u64 size{num_pages * PageSize}; |
| 263 | 277 | ||
| @@ -265,52 +279,76 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat | |||
| 265 | R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); | 279 | R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); |
| 266 | 280 | ||
| 267 | // Lock the table. | 281 | // Lock the table. |
| 268 | KScopedLightLock lk(general_lock); | 282 | KScopedLightLock lk(m_general_lock); |
| 269 | 283 | ||
| 270 | // Verify that the destination memory is unmapped. | 284 | // Verify that the destination memory is unmapped. |
| 271 | R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, | 285 | R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, |
| 272 | KMemoryPermission::None, KMemoryPermission::None, | 286 | KMemoryPermission::None, KMemoryPermission::None, |
| 273 | KMemoryAttribute::None, KMemoryAttribute::None)); | 287 | KMemoryAttribute::None, KMemoryAttribute::None)); |
| 288 | |||
| 289 | // Create an update allocator. | ||
| 290 | Result allocator_result{ResultSuccess}; | ||
| 291 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 292 | m_memory_block_slab_manager); | ||
| 293 | |||
| 294 | // Allocate and open. | ||
| 274 | KPageGroup pg; | 295 | KPageGroup pg; |
| 275 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( | 296 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( |
| 276 | &pg, num_pages, | 297 | &pg, num_pages, |
| 277 | KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option))); | 298 | KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); |
| 278 | 299 | ||
| 279 | R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); | 300 | R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); |
| 280 | 301 | ||
| 281 | block_manager->Update(addr, num_pages, state, perm); | 302 | // Update the blocks. |
| 303 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 304 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 305 | KMemoryBlockDisableMergeAttribute::None); | ||
| 282 | 306 | ||
| 283 | return ResultSuccess; | 307 | R_SUCCEED(); |
| 284 | } | 308 | } |
| 285 | 309 | ||
| 286 | Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) { | 310 | Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) { |
| 287 | // Validate the mapping request. | 311 | // Validate the mapping request. |
| 288 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), | 312 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), |
| 289 | ResultInvalidMemoryRegion); | 313 | ResultInvalidMemoryRegion); |
| 290 | 314 | ||
| 291 | // Lock the table. | 315 | // Lock the table. |
| 292 | KScopedLightLock lk(general_lock); | 316 | KScopedLightLock lk(m_general_lock); |
| 293 | 317 | ||
| 294 | // Verify that the source memory is normal heap. | 318 | // Verify that the source memory is normal heap. |
| 295 | KMemoryState src_state{}; | 319 | KMemoryState src_state{}; |
| 296 | KMemoryPermission src_perm{}; | 320 | KMemoryPermission src_perm{}; |
| 297 | std::size_t num_src_allocator_blocks{}; | 321 | size_t num_src_allocator_blocks{}; |
| 298 | R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks, | 322 | R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks, |
| 299 | src_address, size, KMemoryState::All, KMemoryState::Normal, | 323 | src_address, size, KMemoryState::All, KMemoryState::Normal, |
| 300 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, | 324 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, |
| 301 | KMemoryAttribute::All, KMemoryAttribute::None)); | 325 | KMemoryAttribute::All, KMemoryAttribute::None)); |
| 302 | 326 | ||
| 303 | // Verify that the destination memory is unmapped. | 327 | // Verify that the destination memory is unmapped. |
| 304 | std::size_t num_dst_allocator_blocks{}; | 328 | size_t num_dst_allocator_blocks{}; |
| 305 | R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All, | 329 | R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All, |
| 306 | KMemoryState::Free, KMemoryPermission::None, | 330 | KMemoryState::Free, KMemoryPermission::None, |
| 307 | KMemoryPermission::None, KMemoryAttribute::None, | 331 | KMemoryPermission::None, KMemoryAttribute::None, |
| 308 | KMemoryAttribute::None)); | 332 | KMemoryAttribute::None)); |
| 309 | 333 | ||
| 334 | // Create an update allocator for the source. | ||
| 335 | Result src_allocator_result{ResultSuccess}; | ||
| 336 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 337 | m_memory_block_slab_manager, | ||
| 338 | num_src_allocator_blocks); | ||
| 339 | R_TRY(src_allocator_result); | ||
| 340 | |||
| 341 | // Create an update allocator for the destination. | ||
| 342 | Result dst_allocator_result{ResultSuccess}; | ||
| 343 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 344 | m_memory_block_slab_manager, | ||
| 345 | num_dst_allocator_blocks); | ||
| 346 | R_TRY(dst_allocator_result); | ||
| 347 | |||
| 310 | // Map the code memory. | 348 | // Map the code memory. |
| 311 | { | 349 | { |
| 312 | // Determine the number of pages being operated on. | 350 | // Determine the number of pages being operated on. |
| 313 | const std::size_t num_pages = size / PageSize; | 351 | const size_t num_pages = size / PageSize; |
| 314 | 352 | ||
| 315 | // Create page groups for the memory being mapped. | 353 | // Create page groups for the memory being mapped. |
| 316 | KPageGroup pg; | 354 | KPageGroup pg; |
| @@ -335,33 +373,37 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size | |||
| 335 | unprot_guard.Cancel(); | 373 | unprot_guard.Cancel(); |
| 336 | 374 | ||
| 337 | // Apply the memory block updates. | 375 | // Apply the memory block updates. |
| 338 | block_manager->Update(src_address, num_pages, src_state, new_perm, | 376 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, |
| 339 | KMemoryAttribute::Locked); | 377 | src_state, new_perm, KMemoryAttribute::Locked, |
| 340 | block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm, | 378 | KMemoryBlockDisableMergeAttribute::Locked, |
| 341 | KMemoryAttribute::None); | 379 | KMemoryBlockDisableMergeAttribute::None); |
| 380 | m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, | ||
| 381 | KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, | ||
| 382 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 383 | KMemoryBlockDisableMergeAttribute::None); | ||
| 342 | } | 384 | } |
| 343 | 385 | ||
| 344 | return ResultSuccess; | 386 | R_SUCCEED(); |
| 345 | } | 387 | } |
| 346 | 388 | ||
| 347 | Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, | 389 | Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, |
| 348 | ICacheInvalidationStrategy icache_invalidation_strategy) { | 390 | ICacheInvalidationStrategy icache_invalidation_strategy) { |
| 349 | // Validate the mapping request. | 391 | // Validate the mapping request. |
| 350 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), | 392 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), |
| 351 | ResultInvalidMemoryRegion); | 393 | ResultInvalidMemoryRegion); |
| 352 | 394 | ||
| 353 | // Lock the table. | 395 | // Lock the table. |
| 354 | KScopedLightLock lk(general_lock); | 396 | KScopedLightLock lk(m_general_lock); |
| 355 | 397 | ||
| 356 | // Verify that the source memory is locked normal heap. | 398 | // Verify that the source memory is locked normal heap. |
| 357 | std::size_t num_src_allocator_blocks{}; | 399 | size_t num_src_allocator_blocks{}; |
| 358 | R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, | 400 | R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, |
| 359 | KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, | 401 | KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, |
| 360 | KMemoryPermission::None, KMemoryAttribute::All, | 402 | KMemoryPermission::None, KMemoryAttribute::All, |
| 361 | KMemoryAttribute::Locked)); | 403 | KMemoryAttribute::Locked)); |
| 362 | 404 | ||
| 363 | // Verify that the destination memory is aliasable code. | 405 | // Verify that the destination memory is aliasable code. |
| 364 | std::size_t num_dst_allocator_blocks{}; | 406 | size_t num_dst_allocator_blocks{}; |
| 365 | R_TRY(this->CheckMemoryStateContiguous( | 407 | R_TRY(this->CheckMemoryStateContiguous( |
| 366 | std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, | 408 | std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, |
| 367 | KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, | 409 | KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, |
| @@ -370,7 +412,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si | |||
| 370 | // Determine whether any pages being unmapped are code. | 412 | // Determine whether any pages being unmapped are code. |
| 371 | bool any_code_pages = false; | 413 | bool any_code_pages = false; |
| 372 | { | 414 | { |
| 373 | KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address); | 415 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address); |
| 374 | while (true) { | 416 | while (true) { |
| 375 | // Get the memory info. | 417 | // Get the memory info. |
| 376 | const KMemoryInfo info = it->GetMemoryInfo(); | 418 | const KMemoryInfo info = it->GetMemoryInfo(); |
| @@ -396,9 +438,9 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si | |||
| 396 | SCOPE_EXIT({ | 438 | SCOPE_EXIT({ |
| 397 | if (reprotected_pages && any_code_pages) { | 439 | if (reprotected_pages && any_code_pages) { |
| 398 | if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) { | 440 | if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) { |
| 399 | system.InvalidateCpuInstructionCacheRange(dst_address, size); | 441 | m_system.InvalidateCpuInstructionCacheRange(dst_address, size); |
| 400 | } else { | 442 | } else { |
| 401 | system.InvalidateCpuInstructionCaches(); | 443 | m_system.InvalidateCpuInstructionCaches(); |
| 402 | } | 444 | } |
| 403 | } | 445 | } |
| 404 | }); | 446 | }); |
| @@ -406,7 +448,21 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si | |||
| 406 | // Unmap. | 448 | // Unmap. |
| 407 | { | 449 | { |
| 408 | // Determine the number of pages being operated on. | 450 | // Determine the number of pages being operated on. |
| 409 | const std::size_t num_pages = size / PageSize; | 451 | const size_t num_pages = size / PageSize; |
| 452 | |||
| 453 | // Create an update allocator for the source. | ||
| 454 | Result src_allocator_result{ResultSuccess}; | ||
| 455 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 456 | m_memory_block_slab_manager, | ||
| 457 | num_src_allocator_blocks); | ||
| 458 | R_TRY(src_allocator_result); | ||
| 459 | |||
| 460 | // Create an update allocator for the destination. | ||
| 461 | Result dst_allocator_result{ResultSuccess}; | ||
| 462 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 463 | m_memory_block_slab_manager, | ||
| 464 | num_dst_allocator_blocks); | ||
| 465 | R_TRY(dst_allocator_result); | ||
| 410 | 466 | ||
| 411 | // Unmap the aliased copy of the pages. | 467 | // Unmap the aliased copy of the pages. |
| 412 | R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); | 468 | R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); |
| @@ -416,73 +472,34 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si | |||
| 416 | OperationType::ChangePermissions)); | 472 | OperationType::ChangePermissions)); |
| 417 | 473 | ||
| 418 | // Apply the memory block updates. | 474 | // Apply the memory block updates. |
| 419 | block_manager->Update(dst_address, num_pages, KMemoryState::None); | 475 | m_memory_block_manager.Update( |
| 420 | block_manager->Update(src_address, num_pages, KMemoryState::Normal, | 476 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, |
| 421 | KMemoryPermission::UserReadWrite); | 477 | KMemoryPermission::None, KMemoryAttribute::None, |
| 478 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); | ||
| 479 | m_memory_block_manager.Update( | ||
| 480 | std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal, | ||
| 481 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 482 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); | ||
| 422 | 483 | ||
| 423 | // Note that we reprotected pages. | 484 | // Note that we reprotected pages. |
| 424 | reprotected_pages = true; | 485 | reprotected_pages = true; |
| 425 | } | 486 | } |
| 426 | 487 | ||
| 427 | return ResultSuccess; | 488 | R_SUCCEED(); |
| 428 | } | 489 | } |
| 429 | 490 | ||
| 430 | VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, | 491 | VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, |
| 431 | std::size_t num_pages, std::size_t alignment, std::size_t offset, | 492 | size_t alignment, size_t offset, size_t guard_pages) { |
| 432 | std::size_t guard_pages) { | ||
| 433 | VAddr address = 0; | 493 | VAddr address = 0; |
| 434 | 494 | ||
| 435 | if (num_pages <= region_num_pages) { | 495 | if (num_pages <= region_num_pages) { |
| 436 | if (this->IsAslrEnabled()) { | 496 | if (this->IsAslrEnabled()) { |
| 437 | // Try to directly find a free area up to 8 times. | 497 | UNIMPLEMENTED(); |
| 438 | for (std::size_t i = 0; i < 8; i++) { | ||
| 439 | const std::size_t random_offset = | ||
| 440 | KSystemControl::GenerateRandomRange( | ||
| 441 | 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * | ||
| 442 | alignment; | ||
| 443 | const VAddr candidate = | ||
| 444 | Common::AlignDown((region_start + random_offset), alignment) + offset; | ||
| 445 | |||
| 446 | KMemoryInfo info = this->QueryInfoImpl(candidate); | ||
| 447 | |||
| 448 | if (info.state != KMemoryState::Free) { | ||
| 449 | continue; | ||
| 450 | } | ||
| 451 | if (region_start > candidate) { | ||
| 452 | continue; | ||
| 453 | } | ||
| 454 | if (info.GetAddress() + guard_pages * PageSize > candidate) { | ||
| 455 | continue; | ||
| 456 | } | ||
| 457 | |||
| 458 | const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1; | ||
| 459 | if (candidate_end > info.GetLastAddress()) { | ||
| 460 | continue; | ||
| 461 | } | ||
| 462 | if (candidate_end > region_start + region_num_pages * PageSize - 1) { | ||
| 463 | continue; | ||
| 464 | } | ||
| 465 | |||
| 466 | address = candidate; | ||
| 467 | break; | ||
| 468 | } | ||
| 469 | // Fall back to finding the first free area with a random offset. | ||
| 470 | if (address == 0) { | ||
| 471 | // NOTE: Nintendo does not account for guard pages here. | ||
| 472 | // This may theoretically cause an offset to be chosen that cannot be mapped. We | ||
| 473 | // will account for guard pages. | ||
| 474 | const std::size_t offset_pages = KSystemControl::GenerateRandomRange( | ||
| 475 | 0, region_num_pages - num_pages - guard_pages); | ||
| 476 | address = block_manager->FindFreeArea(region_start + offset_pages * PageSize, | ||
| 477 | region_num_pages - offset_pages, num_pages, | ||
| 478 | alignment, offset, guard_pages); | ||
| 479 | } | ||
| 480 | } | 498 | } |
| 481 | |||
| 482 | // Find the first free area. | 499 | // Find the first free area. |
| 483 | if (address == 0) { | 500 | if (address == 0) { |
| 484 | address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages, | 501 | address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, |
| 485 | alignment, offset, guard_pages); | 502 | alignment, offset, guard_pages); |
| 486 | } | 503 | } |
| 487 | } | 504 | } |
| 488 | 505 | ||
| @@ -500,7 +517,8 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { | |||
| 500 | // Begin traversal. | 517 | // Begin traversal. |
| 501 | Common::PageTable::TraversalContext context; | 518 | Common::PageTable::TraversalContext context; |
| 502 | Common::PageTable::TraversalEntry next_entry; | 519 | Common::PageTable::TraversalEntry next_entry; |
| 503 | R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory); | 520 | R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, addr), |
| 521 | ResultInvalidCurrentMemory); | ||
| 504 | 522 | ||
| 505 | // Prepare tracking variables. | 523 | // Prepare tracking variables. |
| 506 | PAddr cur_addr = next_entry.phys_addr; | 524 | PAddr cur_addr = next_entry.phys_addr; |
| @@ -508,9 +526,9 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { | |||
| 508 | size_t tot_size = cur_size; | 526 | size_t tot_size = cur_size; |
| 509 | 527 | ||
| 510 | // Iterate, adding to group as we go. | 528 | // Iterate, adding to group as we go. |
| 511 | const auto& memory_layout = system.Kernel().MemoryLayout(); | 529 | const auto& memory_layout = m_system.Kernel().MemoryLayout(); |
| 512 | while (tot_size < size) { | 530 | while (tot_size < size) { |
| 513 | R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context), | 531 | R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context), |
| 514 | ResultInvalidCurrentMemory); | 532 | ResultInvalidCurrentMemory); |
| 515 | 533 | ||
| 516 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | 534 | if (next_entry.phys_addr != (cur_addr + cur_size)) { |
| @@ -538,7 +556,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { | |||
| 538 | R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory); | 556 | R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory); |
| 539 | R_TRY(pg.AddBlock(cur_addr, cur_pages)); | 557 | R_TRY(pg.AddBlock(cur_addr, cur_pages)); |
| 540 | 558 | ||
| 541 | return ResultSuccess; | 559 | R_SUCCEED(); |
| 542 | } | 560 | } |
| 543 | 561 | ||
| 544 | bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { | 562 | bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { |
| @@ -546,7 +564,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu | |||
| 546 | 564 | ||
| 547 | const size_t size = num_pages * PageSize; | 565 | const size_t size = num_pages * PageSize; |
| 548 | const auto& pg = pg_ll.Nodes(); | 566 | const auto& pg = pg_ll.Nodes(); |
| 549 | const auto& memory_layout = system.Kernel().MemoryLayout(); | 567 | const auto& memory_layout = m_system.Kernel().MemoryLayout(); |
| 550 | 568 | ||
| 551 | // Empty groups are necessarily invalid. | 569 | // Empty groups are necessarily invalid. |
| 552 | if (pg.empty()) { | 570 | if (pg.empty()) { |
| @@ -573,7 +591,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu | |||
| 573 | // Begin traversal. | 591 | // Begin traversal. |
| 574 | Common::PageTable::TraversalContext context; | 592 | Common::PageTable::TraversalContext context; |
| 575 | Common::PageTable::TraversalEntry next_entry; | 593 | Common::PageTable::TraversalEntry next_entry; |
| 576 | if (!page_table_impl.BeginTraversal(next_entry, context, addr)) { | 594 | if (!m_page_table_impl->BeginTraversal(next_entry, context, addr)) { |
| 577 | return false; | 595 | return false; |
| 578 | } | 596 | } |
| 579 | 597 | ||
| @@ -584,7 +602,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu | |||
| 584 | 602 | ||
| 585 | // Iterate, comparing expected to actual. | 603 | // Iterate, comparing expected to actual. |
| 586 | while (tot_size < size) { | 604 | while (tot_size < size) { |
| 587 | if (!page_table_impl.ContinueTraversal(next_entry, context)) { | 605 | if (!m_page_table_impl->ContinueTraversal(next_entry, context)) { |
| 588 | return false; | 606 | return false; |
| 589 | } | 607 | } |
| 590 | 608 | ||
| @@ -630,11 +648,11 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu | |||
| 630 | return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); | 648 | return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); |
| 631 | } | 649 | } |
| 632 | 650 | ||
| 633 | Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, | 651 | Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, |
| 634 | VAddr src_addr) { | 652 | VAddr src_addr) { |
| 635 | KScopedLightLock lk(general_lock); | 653 | KScopedLightLock lk(m_general_lock); |
| 636 | 654 | ||
| 637 | const std::size_t num_pages{size / PageSize}; | 655 | const size_t num_pages{size / PageSize}; |
| 638 | 656 | ||
| 639 | // Check that the memory is mapped in the destination process. | 657 | // Check that the memory is mapped in the destination process. |
| 640 | size_t num_allocator_blocks; | 658 | size_t num_allocator_blocks; |
| @@ -649,43 +667,51 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTab | |||
| 649 | KMemoryPermission::None, KMemoryAttribute::All, | 667 | KMemoryPermission::None, KMemoryAttribute::All, |
| 650 | KMemoryAttribute::None)); | 668 | KMemoryAttribute::None)); |
| 651 | 669 | ||
| 670 | // Create an update allocator. | ||
| 671 | Result allocator_result{ResultSuccess}; | ||
| 672 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 673 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 674 | R_TRY(allocator_result); | ||
| 675 | |||
| 652 | CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); | 676 | CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); |
| 653 | 677 | ||
| 654 | // Apply the memory block update. | 678 | // Apply the memory block update. |
| 655 | block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None, | 679 | m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages, |
| 656 | KMemoryAttribute::None); | 680 | KMemoryState::Free, KMemoryPermission::None, |
| 681 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 682 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 657 | 683 | ||
| 658 | system.InvalidateCpuInstructionCaches(); | 684 | m_system.InvalidateCpuInstructionCaches(); |
| 659 | 685 | ||
| 660 | return ResultSuccess; | 686 | R_SUCCEED(); |
| 661 | } | 687 | } |
| 662 | 688 | ||
| 663 | Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | 689 | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { |
| 664 | // Lock the physical memory lock. | 690 | // Lock the physical memory lock. |
| 665 | KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); | 691 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); |
| 666 | 692 | ||
| 667 | // Calculate the last address for convenience. | 693 | // Calculate the last address for convenience. |
| 668 | const VAddr last_address = address + size - 1; | 694 | const VAddr last_address = address + size - 1; |
| 669 | 695 | ||
| 670 | // Define iteration variables. | 696 | // Define iteration variables. |
| 671 | VAddr cur_address; | 697 | VAddr cur_address; |
| 672 | std::size_t mapped_size; | 698 | size_t mapped_size; |
| 673 | 699 | ||
| 674 | // The entire mapping process can be retried. | 700 | // The entire mapping process can be retried. |
| 675 | while (true) { | 701 | while (true) { |
| 676 | // Check if the memory is already mapped. | 702 | // Check if the memory is already mapped. |
| 677 | { | 703 | { |
| 678 | // Lock the table. | 704 | // Lock the table. |
| 679 | KScopedLightLock lk(general_lock); | 705 | KScopedLightLock lk(m_general_lock); |
| 680 | 706 | ||
| 681 | // Iterate over the memory. | 707 | // Iterate over the memory. |
| 682 | cur_address = address; | 708 | cur_address = address; |
| 683 | mapped_size = 0; | 709 | mapped_size = 0; |
| 684 | 710 | ||
| 685 | auto it = block_manager->FindIterator(cur_address); | 711 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 686 | while (true) { | 712 | while (true) { |
| 687 | // Check that the iterator is valid. | 713 | // Check that the iterator is valid. |
| 688 | ASSERT(it != block_manager->end()); | 714 | ASSERT(it != m_memory_block_manager.end()); |
| 689 | 715 | ||
| 690 | // Get the memory info. | 716 | // Get the memory info. |
| 691 | const KMemoryInfo info = it->GetMemoryInfo(); | 717 | const KMemoryInfo info = it->GetMemoryInfo(); |
| @@ -716,20 +742,20 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 716 | { | 742 | { |
| 717 | // Reserve the memory from the process resource limit. | 743 | // Reserve the memory from the process resource limit. |
| 718 | KScopedResourceReservation memory_reservation( | 744 | KScopedResourceReservation memory_reservation( |
| 719 | system.Kernel().CurrentProcess()->GetResourceLimit(), | 745 | m_system.Kernel().CurrentProcess()->GetResourceLimit(), |
| 720 | LimitableResource::PhysicalMemory, size - mapped_size); | 746 | LimitableResource::PhysicalMemory, size - mapped_size); |
| 721 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 747 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 722 | 748 | ||
| 723 | // Allocate pages for the new memory. | 749 | // Allocate pages for the new memory. |
| 724 | KPageGroup pg; | 750 | KPageGroup pg; |
| 725 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( | 751 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( |
| 726 | &pg, (size - mapped_size) / PageSize, | 752 | &pg, (size - mapped_size) / PageSize, |
| 727 | KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); | 753 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); |
| 728 | 754 | ||
| 729 | // Map the memory. | 755 | // Map the memory. |
| 730 | { | 756 | { |
| 731 | // Lock the table. | 757 | // Lock the table. |
| 732 | KScopedLightLock lk(general_lock); | 758 | KScopedLightLock lk(m_general_lock); |
| 733 | 759 | ||
| 734 | size_t num_allocator_blocks = 0; | 760 | size_t num_allocator_blocks = 0; |
| 735 | 761 | ||
| @@ -739,10 +765,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 739 | size_t checked_mapped_size = 0; | 765 | size_t checked_mapped_size = 0; |
| 740 | cur_address = address; | 766 | cur_address = address; |
| 741 | 767 | ||
| 742 | auto it = block_manager->FindIterator(cur_address); | 768 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 743 | while (true) { | 769 | while (true) { |
| 744 | // Check that the iterator is valid. | 770 | // Check that the iterator is valid. |
| 745 | ASSERT(it != block_manager->end()); | 771 | ASSERT(it != m_memory_block_manager.end()); |
| 746 | 772 | ||
| 747 | // Get the memory info. | 773 | // Get the memory info. |
| 748 | const KMemoryInfo info = it->GetMemoryInfo(); | 774 | const KMemoryInfo info = it->GetMemoryInfo(); |
| @@ -782,6 +808,14 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 782 | } | 808 | } |
| 783 | } | 809 | } |
| 784 | 810 | ||
| 811 | // Create an update allocator. | ||
| 812 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 813 | Result allocator_result{ResultSuccess}; | ||
| 814 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 815 | m_memory_block_slab_manager, | ||
| 816 | num_allocator_blocks); | ||
| 817 | R_TRY(allocator_result); | ||
| 818 | |||
| 785 | // Reset the current tracking address, and make sure we clean up on failure. | 819 | // Reset the current tracking address, and make sure we clean up on failure. |
| 786 | cur_address = address; | 820 | cur_address = address; |
| 787 | auto unmap_guard = detail::ScopeExit([&] { | 821 | auto unmap_guard = detail::ScopeExit([&] { |
| @@ -791,10 +825,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 791 | // Iterate, unmapping the pages. | 825 | // Iterate, unmapping the pages. |
| 792 | cur_address = address; | 826 | cur_address = address; |
| 793 | 827 | ||
| 794 | auto it = block_manager->FindIterator(cur_address); | 828 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 795 | while (true) { | 829 | while (true) { |
| 796 | // Check that the iterator is valid. | 830 | // Check that the iterator is valid. |
| 797 | ASSERT(it != block_manager->end()); | 831 | ASSERT(it != m_memory_block_manager.end()); |
| 798 | 832 | ||
| 799 | // Get the memory info. | 833 | // Get the memory info. |
| 800 | const KMemoryInfo info = it->GetMemoryInfo(); | 834 | const KMemoryInfo info = it->GetMemoryInfo(); |
| @@ -830,10 +864,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 830 | PAddr pg_phys_addr = pg_it->GetAddress(); | 864 | PAddr pg_phys_addr = pg_it->GetAddress(); |
| 831 | size_t pg_pages = pg_it->GetNumPages(); | 865 | size_t pg_pages = pg_it->GetNumPages(); |
| 832 | 866 | ||
| 833 | auto it = block_manager->FindIterator(cur_address); | 867 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 834 | while (true) { | 868 | while (true) { |
| 835 | // Check that the iterator is valid. | 869 | // Check that the iterator is valid. |
| 836 | ASSERT(it != block_manager->end()); | 870 | ASSERT(it != m_memory_block_manager.end()); |
| 837 | 871 | ||
| 838 | // Get the memory info. | 872 | // Get the memory info. |
| 839 | const KMemoryInfo info = it->GetMemoryInfo(); | 873 | const KMemoryInfo info = it->GetMemoryInfo(); |
| @@ -886,37 +920,37 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 886 | memory_reservation.Commit(); | 920 | memory_reservation.Commit(); |
| 887 | 921 | ||
| 888 | // Increase our tracked mapped size. | 922 | // Increase our tracked mapped size. |
| 889 | mapped_physical_memory_size += (size - mapped_size); | 923 | m_mapped_physical_memory_size += (size - mapped_size); |
| 890 | 924 | ||
| 891 | // Update the relevant memory blocks. | 925 | // Update the relevant memory blocks. |
| 892 | block_manager->Update(address, size / PageSize, KMemoryState::Free, | 926 | m_memory_block_manager.UpdateIfMatch( |
| 893 | KMemoryPermission::None, KMemoryAttribute::None, | 927 | std::addressof(allocator), address, size / PageSize, KMemoryState::Free, |
| 894 | KMemoryState::Normal, KMemoryPermission::UserReadWrite, | 928 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, |
| 895 | KMemoryAttribute::None); | 929 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); |
| 896 | 930 | ||
| 897 | // Cancel our guard. | 931 | // Cancel our guard. |
| 898 | unmap_guard.Cancel(); | 932 | unmap_guard.Cancel(); |
| 899 | 933 | ||
| 900 | return ResultSuccess; | 934 | R_SUCCEED(); |
| 901 | } | 935 | } |
| 902 | } | 936 | } |
| 903 | } | 937 | } |
| 904 | } | 938 | } |
| 905 | 939 | ||
| 906 | Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | 940 | Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { |
| 907 | // Lock the physical memory lock. | 941 | // Lock the physical memory lock. |
| 908 | KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); | 942 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); |
| 909 | 943 | ||
| 910 | // Lock the table. | 944 | // Lock the table. |
| 911 | KScopedLightLock lk(general_lock); | 945 | KScopedLightLock lk(m_general_lock); |
| 912 | 946 | ||
| 913 | // Calculate the last address for convenience. | 947 | // Calculate the last address for convenience. |
| 914 | const VAddr last_address = address + size - 1; | 948 | const VAddr last_address = address + size - 1; |
| 915 | 949 | ||
| 916 | // Define iteration variables. | 950 | // Define iteration variables. |
| 917 | VAddr cur_address = 0; | 951 | VAddr cur_address = 0; |
| 918 | std::size_t mapped_size = 0; | 952 | size_t mapped_size = 0; |
| 919 | std::size_t num_allocator_blocks = 0; | 953 | size_t num_allocator_blocks = 0; |
| 920 | 954 | ||
| 921 | // Check if the memory is mapped. | 955 | // Check if the memory is mapped. |
| 922 | { | 956 | { |
| @@ -924,10 +958,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 924 | cur_address = address; | 958 | cur_address = address; |
| 925 | mapped_size = 0; | 959 | mapped_size = 0; |
| 926 | 960 | ||
| 927 | auto it = block_manager->FindIterator(cur_address); | 961 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 928 | while (true) { | 962 | while (true) { |
| 929 | // Check that the iterator is valid. | 963 | // Check that the iterator is valid. |
| 930 | ASSERT(it != block_manager->end()); | 964 | ASSERT(it != m_memory_block_manager.end()); |
| 931 | 965 | ||
| 932 | // Get the memory info. | 966 | // Get the memory info. |
| 933 | const KMemoryInfo info = it->GetMemoryInfo(); | 967 | const KMemoryInfo info = it->GetMemoryInfo(); |
| @@ -1022,6 +1056,13 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 1022 | } | 1056 | } |
| 1023 | ASSERT(pg.GetNumPages() == mapped_size / PageSize); | 1057 | ASSERT(pg.GetNumPages() == mapped_size / PageSize); |
| 1024 | 1058 | ||
| 1059 | // Create an update allocator. | ||
| 1060 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 1061 | Result allocator_result{ResultSuccess}; | ||
| 1062 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1063 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1064 | R_TRY(allocator_result); | ||
| 1065 | |||
| 1025 | // Reset the current tracking address, and make sure we clean up on failure. | 1066 | // Reset the current tracking address, and make sure we clean up on failure. |
| 1026 | cur_address = address; | 1067 | cur_address = address; |
| 1027 | auto remap_guard = detail::ScopeExit([&] { | 1068 | auto remap_guard = detail::ScopeExit([&] { |
| @@ -1030,7 +1071,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 1030 | cur_address = address; | 1071 | cur_address = address; |
| 1031 | 1072 | ||
| 1032 | // Iterate over the memory we unmapped. | 1073 | // Iterate over the memory we unmapped. |
| 1033 | auto it = block_manager->FindIterator(cur_address); | 1074 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 1034 | auto pg_it = pg.Nodes().begin(); | 1075 | auto pg_it = pg.Nodes().begin(); |
| 1035 | PAddr pg_phys_addr = pg_it->GetAddress(); | 1076 | PAddr pg_phys_addr = pg_it->GetAddress(); |
| 1036 | size_t pg_pages = pg_it->GetNumPages(); | 1077 | size_t pg_pages = pg_it->GetNumPages(); |
| @@ -1085,10 +1126,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 1085 | }); | 1126 | }); |
| 1086 | 1127 | ||
| 1087 | // Iterate over the memory, unmapping as we go. | 1128 | // Iterate over the memory, unmapping as we go. |
| 1088 | auto it = block_manager->FindIterator(cur_address); | 1129 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 1089 | while (true) { | 1130 | while (true) { |
| 1090 | // Check that the iterator is valid. | 1131 | // Check that the iterator is valid. |
| 1091 | ASSERT(it != block_manager->end()); | 1132 | ASSERT(it != m_memory_block_manager.end()); |
| 1092 | 1133 | ||
| 1093 | // Get the memory info. | 1134 | // Get the memory info. |
| 1094 | const KMemoryInfo info = it->GetMemoryInfo(); | 1135 | const KMemoryInfo info = it->GetMemoryInfo(); |
| @@ -1115,104 +1156,159 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 1115 | } | 1156 | } |
| 1116 | 1157 | ||
| 1117 | // Release the memory resource. | 1158 | // Release the memory resource. |
| 1118 | mapped_physical_memory_size -= mapped_size; | 1159 | m_mapped_physical_memory_size -= mapped_size; |
| 1119 | auto process{system.Kernel().CurrentProcess()}; | 1160 | auto process{m_system.Kernel().CurrentProcess()}; |
| 1120 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); | 1161 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); |
| 1121 | 1162 | ||
| 1122 | // Update memory blocks. | 1163 | // Update memory blocks. |
| 1123 | block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, | 1164 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, |
| 1124 | KMemoryAttribute::None); | 1165 | KMemoryState::Free, KMemoryPermission::None, |
| 1166 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1167 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1125 | 1168 | ||
| 1126 | // TODO(bunnei): This is a workaround until the next set of changes, where we add reference | 1169 | // TODO(bunnei): This is a workaround until the next set of changes, where we add reference |
| 1127 | // counting for mapped pages. Until then, we must manually close the reference to the page | 1170 | // counting for mapped pages. Until then, we must manually close the reference to the page |
| 1128 | // group. | 1171 | // group. |
| 1129 | system.Kernel().MemoryManager().Close(pg); | 1172 | m_system.Kernel().MemoryManager().Close(pg); |
| 1130 | 1173 | ||
| 1131 | // We succeeded. | 1174 | // We succeeded. |
| 1132 | remap_guard.Cancel(); | 1175 | remap_guard.Cancel(); |
| 1133 | 1176 | ||
| 1134 | return ResultSuccess; | 1177 | R_SUCCEED(); |
| 1135 | } | 1178 | } |
| 1136 | 1179 | ||
| 1137 | Result KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { | 1180 | Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) { |
| 1138 | KScopedLightLock lk(general_lock); | 1181 | // Lock the table. |
| 1139 | 1182 | KScopedLightLock lk(m_general_lock); | |
| 1140 | KMemoryState src_state{}; | 1183 | |
| 1141 | CASCADE_CODE(CheckMemoryState( | 1184 | // Validate that the source address's state is valid. |
| 1142 | &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, | 1185 | KMemoryState src_state; |
| 1143 | KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::UserReadWrite, | 1186 | size_t num_src_allocator_blocks; |
| 1144 | KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); | 1187 | R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, |
| 1188 | std::addressof(num_src_allocator_blocks), src_address, size, | ||
| 1189 | KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, | ||
| 1190 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, | ||
| 1191 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 1145 | 1192 | ||
| 1146 | if (IsRegionMapped(dst_addr, size)) { | 1193 | // Validate that the dst address's state is valid. |
| 1147 | return ResultInvalidCurrentMemory; | 1194 | size_t num_dst_allocator_blocks; |
| 1148 | } | 1195 | R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, |
| 1196 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 1197 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1198 | KMemoryAttribute::None)); | ||
| 1149 | 1199 | ||
| 1200 | // Create an update allocator for the source. | ||
| 1201 | Result src_allocator_result{ResultSuccess}; | ||
| 1202 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 1203 | m_memory_block_slab_manager, | ||
| 1204 | num_src_allocator_blocks); | ||
| 1205 | R_TRY(src_allocator_result); | ||
| 1206 | |||
| 1207 | // Create an update allocator for the destination. | ||
| 1208 | Result dst_allocator_result{ResultSuccess}; | ||
| 1209 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 1210 | m_memory_block_slab_manager, | ||
| 1211 | num_dst_allocator_blocks); | ||
| 1212 | R_TRY(dst_allocator_result); | ||
| 1213 | |||
| 1214 | // Map the memory. | ||
| 1150 | KPageGroup page_linked_list; | 1215 | KPageGroup page_linked_list; |
| 1151 | const std::size_t num_pages{size / PageSize}; | 1216 | const size_t num_pages{size / PageSize}; |
| 1152 | 1217 | const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( | |
| 1153 | AddRegionToPages(src_addr, num_pages, page_linked_list); | 1218 | KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); |
| 1219 | const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; | ||
| 1154 | 1220 | ||
| 1221 | AddRegionToPages(src_address, num_pages, page_linked_list); | ||
| 1155 | { | 1222 | { |
| 1223 | // Reprotect the source as kernel-read/not mapped. | ||
| 1156 | auto block_guard = detail::ScopeExit([&] { | 1224 | auto block_guard = detail::ScopeExit([&] { |
| 1157 | Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite, | 1225 | Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, |
| 1158 | OperationType::ChangePermissions); | 1226 | OperationType::ChangePermissions); |
| 1159 | }); | 1227 | }); |
| 1160 | 1228 | R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions)); | |
| 1161 | CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None, | 1229 | R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite)); |
| 1162 | OperationType::ChangePermissions)); | ||
| 1163 | CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::UserReadWrite)); | ||
| 1164 | 1230 | ||
| 1165 | block_guard.Cancel(); | 1231 | block_guard.Cancel(); |
| 1166 | } | 1232 | } |
| 1167 | 1233 | ||
| 1168 | block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::None, | 1234 | // Apply the memory block updates. |
| 1169 | KMemoryAttribute::Locked); | 1235 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, |
| 1170 | block_manager->Update(dst_addr, num_pages, KMemoryState::Stack, | 1236 | new_src_perm, new_src_attr, |
| 1171 | KMemoryPermission::UserReadWrite); | 1237 | KMemoryBlockDisableMergeAttribute::Locked, |
| 1172 | 1238 | KMemoryBlockDisableMergeAttribute::None); | |
| 1173 | return ResultSuccess; | 1239 | m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, |
| 1240 | KMemoryState::Stack, KMemoryPermission::UserReadWrite, | ||
| 1241 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 1242 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1243 | |||
| 1244 | R_SUCCEED(); | ||
| 1174 | } | 1245 | } |
| 1175 | 1246 | ||
| 1176 | Result KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { | 1247 | Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) { |
| 1177 | KScopedLightLock lk(general_lock); | 1248 | // Lock the table. |
| 1249 | KScopedLightLock lk(m_general_lock); | ||
| 1250 | |||
| 1251 | // Validate that the source address's state is valid. | ||
| 1252 | KMemoryState src_state; | ||
| 1253 | size_t num_src_allocator_blocks; | ||
| 1254 | R_TRY(this->CheckMemoryState( | ||
| 1255 | std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks), | ||
| 1256 | src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, | ||
| 1257 | KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead, | ||
| 1258 | KMemoryAttribute::All, KMemoryAttribute::Locked)); | ||
| 1259 | |||
| 1260 | // Validate that the dst address's state is valid. | ||
| 1261 | KMemoryPermission dst_perm; | ||
| 1262 | size_t num_dst_allocator_blocks; | ||
| 1263 | R_TRY(this->CheckMemoryState( | ||
| 1264 | nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks), | ||
| 1265 | dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, | ||
| 1266 | KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 1178 | 1267 | ||
| 1179 | KMemoryState src_state{}; | 1268 | // Create an update allocator for the source. |
| 1180 | CASCADE_CODE(CheckMemoryState( | 1269 | Result src_allocator_result{ResultSuccess}; |
| 1181 | &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, | 1270 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), |
| 1182 | KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::None, | 1271 | m_memory_block_slab_manager, |
| 1183 | KMemoryAttribute::Mask, KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); | 1272 | num_src_allocator_blocks); |
| 1273 | R_TRY(src_allocator_result); | ||
| 1184 | 1274 | ||
| 1185 | KMemoryPermission dst_perm{}; | 1275 | // Create an update allocator for the destination. |
| 1186 | CASCADE_CODE(CheckMemoryState(nullptr, &dst_perm, nullptr, nullptr, dst_addr, size, | 1276 | Result dst_allocator_result{ResultSuccess}; |
| 1187 | KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, | 1277 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), |
| 1188 | KMemoryPermission::None, KMemoryAttribute::Mask, | 1278 | m_memory_block_slab_manager, |
| 1189 | KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); | 1279 | num_dst_allocator_blocks); |
| 1280 | R_TRY(dst_allocator_result); | ||
| 1190 | 1281 | ||
| 1191 | KPageGroup src_pages; | 1282 | KPageGroup src_pages; |
| 1192 | KPageGroup dst_pages; | 1283 | KPageGroup dst_pages; |
| 1193 | const std::size_t num_pages{size / PageSize}; | 1284 | const size_t num_pages{size / PageSize}; |
| 1194 | 1285 | ||
| 1195 | AddRegionToPages(src_addr, num_pages, src_pages); | 1286 | AddRegionToPages(src_address, num_pages, src_pages); |
| 1196 | AddRegionToPages(dst_addr, num_pages, dst_pages); | 1287 | AddRegionToPages(dst_address, num_pages, dst_pages); |
| 1197 | 1288 | ||
| 1198 | if (!dst_pages.IsEqual(src_pages)) { | 1289 | R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion); |
| 1199 | return ResultInvalidMemoryRegion; | ||
| 1200 | } | ||
| 1201 | 1290 | ||
| 1202 | { | 1291 | { |
| 1203 | auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); }); | 1292 | auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); |
| 1204 | 1293 | ||
| 1205 | CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); | 1294 | R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); |
| 1206 | CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite, | 1295 | R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, |
| 1207 | OperationType::ChangePermissions)); | 1296 | OperationType::ChangePermissions)); |
| 1208 | 1297 | ||
| 1209 | block_guard.Cancel(); | 1298 | block_guard.Cancel(); |
| 1210 | } | 1299 | } |
| 1211 | 1300 | ||
| 1212 | block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::UserReadWrite); | 1301 | // Apply the memory block updates. |
| 1213 | block_manager->Update(dst_addr, num_pages, KMemoryState::Free); | 1302 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, |
| 1214 | 1303 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | |
| 1215 | return ResultSuccess; | 1304 | KMemoryBlockDisableMergeAttribute::None, |
| 1305 | KMemoryBlockDisableMergeAttribute::Locked); | ||
| 1306 | m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, | ||
| 1307 | KMemoryState::None, KMemoryPermission::None, | ||
| 1308 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1309 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1310 | |||
| 1311 | R_SUCCEED(); | ||
| 1216 | } | 1312 | } |
| 1217 | 1313 | ||
| 1218 | Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, | 1314 | Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, |
| @@ -1225,48 +1321,54 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, | |||
| 1225 | if (const auto result{ | 1321 | if (const auto result{ |
| 1226 | Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; | 1322 | Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; |
| 1227 | result.IsError()) { | 1323 | result.IsError()) { |
| 1228 | const std::size_t num_pages{(addr - cur_addr) / PageSize}; | 1324 | const size_t num_pages{(addr - cur_addr) / PageSize}; |
| 1229 | 1325 | ||
| 1230 | ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) | 1326 | ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) |
| 1231 | .IsSuccess()); | 1327 | .IsSuccess()); |
| 1232 | 1328 | ||
| 1233 | return result; | 1329 | R_RETURN(result); |
| 1234 | } | 1330 | } |
| 1235 | 1331 | ||
| 1236 | cur_addr += node.GetNumPages() * PageSize; | 1332 | cur_addr += node.GetNumPages() * PageSize; |
| 1237 | } | 1333 | } |
| 1238 | 1334 | ||
| 1239 | return ResultSuccess; | 1335 | R_SUCCEED(); |
| 1240 | } | 1336 | } |
| 1241 | 1337 | ||
| 1242 | Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, | 1338 | Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, |
| 1243 | KMemoryPermission perm) { | 1339 | KMemoryPermission perm) { |
| 1244 | // Check that the map is in range. | 1340 | // Check that the map is in range. |
| 1245 | const std::size_t num_pages{page_linked_list.GetNumPages()}; | 1341 | const size_t num_pages{page_linked_list.GetNumPages()}; |
| 1246 | const std::size_t size{num_pages * PageSize}; | 1342 | const size_t size{num_pages * PageSize}; |
| 1247 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | 1343 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); |
| 1248 | 1344 | ||
| 1249 | // Lock the table. | 1345 | // Lock the table. |
| 1250 | KScopedLightLock lk(general_lock); | 1346 | KScopedLightLock lk(m_general_lock); |
| 1251 | 1347 | ||
| 1252 | // Check the memory state. | 1348 | // Check the memory state. |
| 1253 | R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, | 1349 | R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, |
| 1254 | KMemoryPermission::None, KMemoryPermission::None, | 1350 | KMemoryPermission::None, KMemoryPermission::None, |
| 1255 | KMemoryAttribute::None, KMemoryAttribute::None)); | 1351 | KMemoryAttribute::None, KMemoryAttribute::None)); |
| 1256 | 1352 | ||
| 1353 | // Create an update allocator. | ||
| 1354 | Result allocator_result{ResultSuccess}; | ||
| 1355 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1356 | m_memory_block_slab_manager); | ||
| 1357 | |||
| 1257 | // Map the pages. | 1358 | // Map the pages. |
| 1258 | R_TRY(MapPages(address, page_linked_list, perm)); | 1359 | R_TRY(MapPages(address, page_linked_list, perm)); |
| 1259 | 1360 | ||
| 1260 | // Update the blocks. | 1361 | // Update the blocks. |
| 1261 | block_manager->Update(address, num_pages, state, perm); | 1362 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, |
| 1363 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 1364 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1262 | 1365 | ||
| 1263 | return ResultSuccess; | 1366 | R_SUCCEED(); |
| 1264 | } | 1367 | } |
| 1265 | 1368 | ||
| 1266 | Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, | 1369 | Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, |
| 1267 | PAddr phys_addr, bool is_pa_valid, VAddr region_start, | 1370 | bool is_pa_valid, VAddr region_start, size_t region_num_pages, |
| 1268 | std::size_t region_num_pages, KMemoryState state, | 1371 | KMemoryState state, KMemoryPermission perm) { |
| 1269 | KMemoryPermission perm) { | ||
| 1270 | ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); | 1372 | ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); |
| 1271 | 1373 | ||
| 1272 | // Ensure this is a valid map request. | 1374 | // Ensure this is a valid map request. |
| @@ -1275,7 +1377,7 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t | |||
| 1275 | R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); | 1377 | R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); |
| 1276 | 1378 | ||
| 1277 | // Lock the table. | 1379 | // Lock the table. |
| 1278 | KScopedLightLock lk(general_lock); | 1380 | KScopedLightLock lk(m_general_lock); |
| 1279 | 1381 | ||
| 1280 | // Find a random address to map at. | 1382 | // Find a random address to map at. |
| 1281 | VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, | 1383 | VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, |
| @@ -1288,6 +1390,11 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t | |||
| 1288 | KMemoryAttribute::None, KMemoryAttribute::None) | 1390 | KMemoryAttribute::None, KMemoryAttribute::None) |
| 1289 | .IsSuccess()); | 1391 | .IsSuccess()); |
| 1290 | 1392 | ||
| 1393 | // Create an update allocator. | ||
| 1394 | Result allocator_result{ResultSuccess}; | ||
| 1395 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1396 | m_memory_block_slab_manager); | ||
| 1397 | |||
| 1291 | // Perform mapping operation. | 1398 | // Perform mapping operation. |
| 1292 | if (is_pa_valid) { | 1399 | if (is_pa_valid) { |
| 1293 | R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr)); | 1400 | R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr)); |
| @@ -1296,11 +1403,13 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t | |||
| 1296 | } | 1403 | } |
| 1297 | 1404 | ||
| 1298 | // Update the blocks. | 1405 | // Update the blocks. |
| 1299 | block_manager->Update(addr, num_pages, state, perm); | 1406 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, |
| 1407 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 1408 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1300 | 1409 | ||
| 1301 | // We successfully mapped the pages. | 1410 | // We successfully mapped the pages. |
| 1302 | *out_addr = addr; | 1411 | *out_addr = addr; |
| 1303 | return ResultSuccess; | 1412 | R_SUCCEED(); |
| 1304 | } | 1413 | } |
| 1305 | 1414 | ||
| 1306 | Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { | 1415 | Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { |
| @@ -1312,60 +1421,80 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { | |||
| 1312 | if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, | 1421 | if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, |
| 1313 | OperationType::Unmap)}; | 1422 | OperationType::Unmap)}; |
| 1314 | result.IsError()) { | 1423 | result.IsError()) { |
| 1315 | return result; | 1424 | R_RETURN(result); |
| 1316 | } | 1425 | } |
| 1317 | 1426 | ||
| 1318 | cur_addr += node.GetNumPages() * PageSize; | 1427 | cur_addr += node.GetNumPages() * PageSize; |
| 1319 | } | 1428 | } |
| 1320 | 1429 | ||
| 1321 | return ResultSuccess; | 1430 | R_SUCCEED(); |
| 1322 | } | 1431 | } |
| 1323 | 1432 | ||
| 1324 | Result KPageTable::UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state) { | 1433 | Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) { |
| 1325 | // Check that the unmap is in range. | 1434 | // Check that the unmap is in range. |
| 1326 | const std::size_t num_pages{page_linked_list.GetNumPages()}; | 1435 | const size_t num_pages{page_linked_list.GetNumPages()}; |
| 1327 | const std::size_t size{num_pages * PageSize}; | 1436 | const size_t size{num_pages * PageSize}; |
| 1328 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); | 1437 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| 1329 | 1438 | ||
| 1330 | // Lock the table. | 1439 | // Lock the table. |
| 1331 | KScopedLightLock lk(general_lock); | 1440 | KScopedLightLock lk(m_general_lock); |
| 1332 | 1441 | ||
| 1333 | // Check the memory state. | 1442 | // Check the memory state. |
| 1334 | R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None, | 1443 | size_t num_allocator_blocks; |
| 1444 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 1445 | KMemoryState::All, state, KMemoryPermission::None, | ||
| 1335 | KMemoryPermission::None, KMemoryAttribute::All, | 1446 | KMemoryPermission::None, KMemoryAttribute::All, |
| 1336 | KMemoryAttribute::None)); | 1447 | KMemoryAttribute::None)); |
| 1337 | 1448 | ||
| 1449 | // Create an update allocator. | ||
| 1450 | Result allocator_result{ResultSuccess}; | ||
| 1451 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1452 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1453 | R_TRY(allocator_result); | ||
| 1454 | |||
| 1338 | // Perform the unmap. | 1455 | // Perform the unmap. |
| 1339 | R_TRY(UnmapPages(addr, page_linked_list)); | 1456 | R_TRY(UnmapPages(address, page_linked_list)); |
| 1340 | 1457 | ||
| 1341 | // Update the blocks. | 1458 | // Update the blocks. |
| 1342 | block_manager->Update(addr, num_pages, state, KMemoryPermission::None); | 1459 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, |
| 1460 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1461 | KMemoryBlockDisableMergeAttribute::None, | ||
| 1462 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1343 | 1463 | ||
| 1344 | return ResultSuccess; | 1464 | R_SUCCEED(); |
| 1345 | } | 1465 | } |
| 1346 | 1466 | ||
| 1347 | Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) { | 1467 | Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) { |
| 1348 | // Check that the unmap is in range. | 1468 | // Check that the unmap is in range. |
| 1349 | const std::size_t size = num_pages * PageSize; | 1469 | const size_t size = num_pages * PageSize; |
| 1350 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 1470 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| 1351 | 1471 | ||
| 1352 | // Lock the table. | 1472 | // Lock the table. |
| 1353 | KScopedLightLock lk(general_lock); | 1473 | KScopedLightLock lk(m_general_lock); |
| 1354 | 1474 | ||
| 1355 | // Check the memory state. | 1475 | // Check the memory state. |
| 1356 | std::size_t num_allocator_blocks{}; | 1476 | size_t num_allocator_blocks{}; |
| 1357 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | 1477 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, |
| 1358 | KMemoryState::All, state, KMemoryPermission::None, | 1478 | KMemoryState::All, state, KMemoryPermission::None, |
| 1359 | KMemoryPermission::None, KMemoryAttribute::All, | 1479 | KMemoryPermission::None, KMemoryAttribute::All, |
| 1360 | KMemoryAttribute::None)); | 1480 | KMemoryAttribute::None)); |
| 1361 | 1481 | ||
| 1482 | // Create an update allocator. | ||
| 1483 | Result allocator_result{ResultSuccess}; | ||
| 1484 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1485 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1486 | R_TRY(allocator_result); | ||
| 1487 | |||
| 1362 | // Perform the unmap. | 1488 | // Perform the unmap. |
| 1363 | R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); | 1489 | R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); |
| 1364 | 1490 | ||
| 1365 | // Update the blocks. | 1491 | // Update the blocks. |
| 1366 | block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None); | 1492 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, |
| 1493 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1494 | KMemoryBlockDisableMergeAttribute::None, | ||
| 1495 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1367 | 1496 | ||
| 1368 | return ResultSuccess; | 1497 | R_SUCCEED(); |
| 1369 | } | 1498 | } |
| 1370 | 1499 | ||
| 1371 | Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, | 1500 | Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, |
| @@ -1380,7 +1509,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n | |||
| 1380 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 1509 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| 1381 | 1510 | ||
| 1382 | // Lock the table. | 1511 | // Lock the table. |
| 1383 | KScopedLightLock lk(general_lock); | 1512 | KScopedLightLock lk(m_general_lock); |
| 1384 | 1513 | ||
| 1385 | // Check if state allows us to create the group. | 1514 | // Check if state allows us to create the group. |
| 1386 | R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, | 1515 | R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, |
| @@ -1390,15 +1519,15 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n | |||
| 1390 | // Create a new page group for the region. | 1519 | // Create a new page group for the region. |
| 1391 | R_TRY(this->MakePageGroup(*out, address, num_pages)); | 1520 | R_TRY(this->MakePageGroup(*out, address, num_pages)); |
| 1392 | 1521 | ||
| 1393 | return ResultSuccess; | 1522 | R_SUCCEED(); |
| 1394 | } | 1523 | } |
| 1395 | 1524 | ||
| 1396 | Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, | 1525 | Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size, |
| 1397 | Svc::MemoryPermission svc_perm) { | 1526 | Svc::MemoryPermission svc_perm) { |
| 1398 | const size_t num_pages = size / PageSize; | 1527 | const size_t num_pages = size / PageSize; |
| 1399 | 1528 | ||
| 1400 | // Lock the table. | 1529 | // Lock the table. |
| 1401 | KScopedLightLock lk(general_lock); | 1530 | KScopedLightLock lk(m_general_lock); |
| 1402 | 1531 | ||
| 1403 | // Verify we can change the memory permission. | 1532 | // Verify we can change the memory permission. |
| 1404 | KMemoryState old_state; | 1533 | KMemoryState old_state; |
| @@ -1435,105 +1564,101 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, | |||
| 1435 | // Succeed if there's nothing to do. | 1564 | // Succeed if there's nothing to do. |
| 1436 | R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); | 1565 | R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); |
| 1437 | 1566 | ||
| 1567 | // Create an update allocator. | ||
| 1568 | Result allocator_result{ResultSuccess}; | ||
| 1569 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1570 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1571 | R_TRY(allocator_result); | ||
| 1572 | |||
| 1438 | // Perform mapping operation. | 1573 | // Perform mapping operation. |
| 1439 | const auto operation = | 1574 | const auto operation = |
| 1440 | was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions; | 1575 | was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions; |
| 1441 | R_TRY(Operate(addr, num_pages, new_perm, operation)); | 1576 | R_TRY(Operate(addr, num_pages, new_perm, operation)); |
| 1442 | 1577 | ||
| 1443 | // Update the blocks. | 1578 | // Update the blocks. |
| 1444 | block_manager->Update(addr, num_pages, new_state, new_perm, KMemoryAttribute::None); | 1579 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, |
| 1580 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1581 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1445 | 1582 | ||
| 1446 | // Ensure cache coherency, if we're setting pages as executable. | 1583 | // Ensure cache coherency, if we're setting pages as executable. |
| 1447 | if (is_x) { | 1584 | if (is_x) { |
| 1448 | system.InvalidateCpuInstructionCacheRange(addr, size); | 1585 | m_system.InvalidateCpuInstructionCacheRange(addr, size); |
| 1449 | } | 1586 | } |
| 1450 | 1587 | ||
| 1451 | return ResultSuccess; | 1588 | R_SUCCEED(); |
| 1452 | } | 1589 | } |
| 1453 | 1590 | ||
| 1454 | KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { | 1591 | KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { |
| 1455 | KScopedLightLock lk(general_lock); | 1592 | KScopedLightLock lk(m_general_lock); |
| 1456 | 1593 | ||
| 1457 | return block_manager->FindBlock(addr).GetMemoryInfo(); | 1594 | return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo(); |
| 1458 | } | 1595 | } |
| 1459 | 1596 | ||
| 1460 | KMemoryInfo KPageTable::QueryInfo(VAddr addr) { | 1597 | KMemoryInfo KPageTable::QueryInfo(VAddr addr) { |
| 1461 | if (!Contains(addr, 1)) { | 1598 | if (!Contains(addr, 1)) { |
| 1462 | return {address_space_end, 0 - address_space_end, KMemoryState::Inaccessible, | 1599 | return { |
| 1463 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryPermission::None}; | 1600 | .m_address = m_address_space_end, |
| 1601 | .m_size = 0 - m_address_space_end, | ||
| 1602 | .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible), | ||
| 1603 | .m_device_disable_merge_left_count = 0, | ||
| 1604 | .m_device_disable_merge_right_count = 0, | ||
| 1605 | .m_ipc_lock_count = 0, | ||
| 1606 | .m_device_use_count = 0, | ||
| 1607 | .m_ipc_disable_merge_count = 0, | ||
| 1608 | .m_permission = KMemoryPermission::None, | ||
| 1609 | .m_attribute = KMemoryAttribute::None, | ||
| 1610 | .m_original_permission = KMemoryPermission::None, | ||
| 1611 | .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None, | ||
| 1612 | }; | ||
| 1464 | } | 1613 | } |
| 1465 | 1614 | ||
| 1466 | return QueryInfoImpl(addr); | 1615 | return QueryInfoImpl(addr); |
| 1467 | } | 1616 | } |
| 1468 | 1617 | ||
| 1469 | Result KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) { | 1618 | Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) { |
| 1470 | KScopedLightLock lk(general_lock); | ||
| 1471 | |||
| 1472 | KMemoryState state{}; | ||
| 1473 | KMemoryAttribute attribute{}; | ||
| 1474 | |||
| 1475 | R_TRY(CheckMemoryState(&state, nullptr, &attribute, nullptr, addr, size, | ||
| 1476 | KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, | ||
| 1477 | KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, | ||
| 1478 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, | ||
| 1479 | KMemoryAttribute::Mask, KMemoryAttribute::None, | ||
| 1480 | KMemoryAttribute::IpcAndDeviceMapped)); | ||
| 1481 | |||
| 1482 | block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked); | ||
| 1483 | |||
| 1484 | return ResultSuccess; | ||
| 1485 | } | ||
| 1486 | |||
| 1487 | Result KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) { | ||
| 1488 | KScopedLightLock lk(general_lock); | ||
| 1489 | |||
| 1490 | KMemoryState state{}; | ||
| 1491 | |||
| 1492 | R_TRY(CheckMemoryState(&state, nullptr, nullptr, nullptr, addr, size, | ||
| 1493 | KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, | ||
| 1494 | KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, | ||
| 1495 | KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask, | ||
| 1496 | KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); | ||
| 1497 | |||
| 1498 | block_manager->Update(addr, size / PageSize, state, KMemoryPermission::UserReadWrite); | ||
| 1499 | return ResultSuccess; | ||
| 1500 | } | ||
| 1501 | |||
| 1502 | Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, | ||
| 1503 | Svc::MemoryPermission svc_perm) { | ||
| 1504 | const size_t num_pages = size / PageSize; | 1619 | const size_t num_pages = size / PageSize; |
| 1505 | 1620 | ||
| 1506 | // Lock the table. | 1621 | // Lock the table. |
| 1507 | KScopedLightLock lk(general_lock); | 1622 | KScopedLightLock lk(m_general_lock); |
| 1508 | 1623 | ||
| 1509 | // Verify we can change the memory permission. | 1624 | // Verify we can change the memory permission. |
| 1510 | KMemoryState old_state; | 1625 | KMemoryState old_state; |
| 1511 | KMemoryPermission old_perm; | 1626 | KMemoryPermission old_perm; |
| 1512 | R_TRY(this->CheckMemoryState( | 1627 | size_t num_allocator_blocks; |
| 1513 | std::addressof(old_state), std::addressof(old_perm), nullptr, nullptr, addr, size, | 1628 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, |
| 1514 | KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, KMemoryPermission::None, | 1629 | std::addressof(num_allocator_blocks), addr, size, |
| 1515 | KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); | 1630 | KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, |
| 1631 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 1632 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 1516 | 1633 | ||
| 1517 | // Determine new perm. | 1634 | // Determine new perm. |
| 1518 | const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); | 1635 | const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); |
| 1519 | R_SUCCEED_IF(old_perm == new_perm); | 1636 | R_SUCCEED_IF(old_perm == new_perm); |
| 1520 | 1637 | ||
| 1638 | // Create an update allocator. | ||
| 1639 | Result allocator_result{ResultSuccess}; | ||
| 1640 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1641 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1642 | R_TRY(allocator_result); | ||
| 1643 | |||
| 1521 | // Perform mapping operation. | 1644 | // Perform mapping operation. |
| 1522 | R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); | 1645 | R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); |
| 1523 | 1646 | ||
| 1524 | // Update the blocks. | 1647 | // Update the blocks. |
| 1525 | block_manager->Update(addr, num_pages, old_state, new_perm, KMemoryAttribute::None); | 1648 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, |
| 1649 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1650 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1526 | 1651 | ||
| 1527 | return ResultSuccess; | 1652 | R_SUCCEED(); |
| 1528 | } | 1653 | } |
| 1529 | 1654 | ||
| 1530 | Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) { | 1655 | Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) { |
| 1531 | const size_t num_pages = size / PageSize; | 1656 | const size_t num_pages = size / PageSize; |
| 1532 | ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) == | 1657 | ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) == |
| 1533 | KMemoryAttribute::SetMask); | 1658 | KMemoryAttribute::SetMask); |
| 1534 | 1659 | ||
| 1535 | // Lock the table. | 1660 | // Lock the table. |
| 1536 | KScopedLightLock lk(general_lock); | 1661 | KScopedLightLock lk(m_general_lock); |
| 1537 | 1662 | ||
| 1538 | // Verify we can change the memory attribute. | 1663 | // Verify we can change the memory attribute. |
| 1539 | KMemoryState old_state; | 1664 | KMemoryState old_state; |
| @@ -1548,6 +1673,12 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3 | |||
| 1548 | KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, | 1673 | KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, |
| 1549 | AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); | 1674 | AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); |
| 1550 | 1675 | ||
| 1676 | // Create an update allocator. | ||
| 1677 | Result allocator_result{ResultSuccess}; | ||
| 1678 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1679 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1680 | R_TRY(allocator_result); | ||
| 1681 | |||
| 1551 | // Determine the new attribute. | 1682 | // Determine the new attribute. |
| 1552 | const KMemoryAttribute new_attr = | 1683 | const KMemoryAttribute new_attr = |
| 1553 | static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) | | 1684 | static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) | |
| @@ -1557,123 +1688,142 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3 | |||
| 1557 | this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); | 1688 | this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); |
| 1558 | 1689 | ||
| 1559 | // Update the blocks. | 1690 | // Update the blocks. |
| 1560 | block_manager->Update(addr, num_pages, old_state, old_perm, new_attr); | 1691 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, |
| 1692 | new_attr, KMemoryBlockDisableMergeAttribute::None, | ||
| 1693 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1561 | 1694 | ||
| 1562 | return ResultSuccess; | 1695 | R_SUCCEED(); |
| 1563 | } | 1696 | } |
| 1564 | 1697 | ||
| 1565 | Result KPageTable::SetMaxHeapSize(std::size_t size) { | 1698 | Result KPageTable::SetMaxHeapSize(size_t size) { |
| 1566 | // Lock the table. | 1699 | // Lock the table. |
| 1567 | KScopedLightLock lk(general_lock); | 1700 | KScopedLightLock lk(m_general_lock); |
| 1568 | 1701 | ||
| 1569 | // Only process page tables are allowed to set heap size. | 1702 | // Only process page tables are allowed to set heap size. |
| 1570 | ASSERT(!this->IsKernel()); | 1703 | ASSERT(!this->IsKernel()); |
| 1571 | 1704 | ||
| 1572 | max_heap_size = size; | 1705 | m_max_heap_size = size; |
| 1573 | 1706 | ||
| 1574 | return ResultSuccess; | 1707 | R_SUCCEED(); |
| 1575 | } | 1708 | } |
| 1576 | 1709 | ||
| 1577 | Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { | 1710 | Result KPageTable::SetHeapSize(VAddr* out, size_t size) { |
| 1578 | // Lock the physical memory mutex. | 1711 | // Lock the physical memory mutex. |
| 1579 | KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); | 1712 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); |
| 1580 | 1713 | ||
| 1581 | // Try to perform a reduction in heap, instead of an extension. | 1714 | // Try to perform a reduction in heap, instead of an extension. |
| 1582 | VAddr cur_address{}; | 1715 | VAddr cur_address{}; |
| 1583 | std::size_t allocation_size{}; | 1716 | size_t allocation_size{}; |
| 1584 | { | 1717 | { |
| 1585 | // Lock the table. | 1718 | // Lock the table. |
| 1586 | KScopedLightLock lk(general_lock); | 1719 | KScopedLightLock lk(m_general_lock); |
| 1587 | 1720 | ||
| 1588 | // Validate that setting heap size is possible at all. | 1721 | // Validate that setting heap size is possible at all. |
| 1589 | R_UNLESS(!is_kernel, ResultOutOfMemory); | 1722 | R_UNLESS(!m_is_kernel, ResultOutOfMemory); |
| 1590 | R_UNLESS(size <= static_cast<std::size_t>(heap_region_end - heap_region_start), | 1723 | R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start), |
| 1591 | ResultOutOfMemory); | 1724 | ResultOutOfMemory); |
| 1592 | R_UNLESS(size <= max_heap_size, ResultOutOfMemory); | 1725 | R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory); |
| 1593 | 1726 | ||
| 1594 | if (size < GetHeapSize()) { | 1727 | if (size < GetHeapSize()) { |
| 1595 | // The size being requested is less than the current size, so we need to free the end of | 1728 | // The size being requested is less than the current size, so we need to free the end of |
| 1596 | // the heap. | 1729 | // the heap. |
| 1597 | 1730 | ||
| 1598 | // Validate memory state. | 1731 | // Validate memory state. |
| 1599 | std::size_t num_allocator_blocks; | 1732 | size_t num_allocator_blocks; |
| 1600 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), | 1733 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), |
| 1601 | heap_region_start + size, GetHeapSize() - size, | 1734 | m_heap_region_start + size, GetHeapSize() - size, |
| 1602 | KMemoryState::All, KMemoryState::Normal, | 1735 | KMemoryState::All, KMemoryState::Normal, |
| 1603 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, | 1736 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, |
| 1604 | KMemoryAttribute::All, KMemoryAttribute::None)); | 1737 | KMemoryAttribute::All, KMemoryAttribute::None)); |
| 1605 | 1738 | ||
| 1739 | // Create an update allocator. | ||
| 1740 | Result allocator_result{ResultSuccess}; | ||
| 1741 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1742 | m_memory_block_slab_manager, | ||
| 1743 | num_allocator_blocks); | ||
| 1744 | R_TRY(allocator_result); | ||
| 1745 | |||
| 1606 | // Unmap the end of the heap. | 1746 | // Unmap the end of the heap. |
| 1607 | const auto num_pages = (GetHeapSize() - size) / PageSize; | 1747 | const auto num_pages = (GetHeapSize() - size) / PageSize; |
| 1608 | R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None, | 1748 | R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None, |
| 1609 | OperationType::Unmap)); | 1749 | OperationType::Unmap)); |
| 1610 | 1750 | ||
| 1611 | // Release the memory from the resource limit. | 1751 | // Release the memory from the resource limit. |
| 1612 | system.Kernel().CurrentProcess()->GetResourceLimit()->Release( | 1752 | m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release( |
| 1613 | LimitableResource::PhysicalMemory, num_pages * PageSize); | 1753 | LimitableResource::PhysicalMemory, num_pages * PageSize); |
| 1614 | 1754 | ||
| 1615 | // Apply the memory block update. | 1755 | // Apply the memory block update. |
| 1616 | block_manager->Update(heap_region_start + size, num_pages, KMemoryState::Free, | 1756 | m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, |
| 1617 | KMemoryPermission::None, KMemoryAttribute::None); | 1757 | num_pages, KMemoryState::Free, KMemoryPermission::None, |
| 1758 | KMemoryAttribute::None, | ||
| 1759 | KMemoryBlockDisableMergeAttribute::None, | ||
| 1760 | size == 0 ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 1761 | : KMemoryBlockDisableMergeAttribute::None); | ||
| 1618 | 1762 | ||
| 1619 | // Update the current heap end. | 1763 | // Update the current heap end. |
| 1620 | current_heap_end = heap_region_start + size; | 1764 | m_current_heap_end = m_heap_region_start + size; |
| 1621 | 1765 | ||
| 1622 | // Set the output. | 1766 | // Set the output. |
| 1623 | *out = heap_region_start; | 1767 | *out = m_heap_region_start; |
| 1624 | return ResultSuccess; | 1768 | R_SUCCEED(); |
| 1625 | } else if (size == GetHeapSize()) { | 1769 | } else if (size == GetHeapSize()) { |
| 1626 | // The size requested is exactly the current size. | 1770 | // The size requested is exactly the current size. |
| 1627 | *out = heap_region_start; | 1771 | *out = m_heap_region_start; |
| 1628 | return ResultSuccess; | 1772 | R_SUCCEED(); |
| 1629 | } else { | 1773 | } else { |
| 1630 | // We have to allocate memory. Determine how much to allocate and where while the table | 1774 | // We have to allocate memory. Determine how much to allocate and where while the table |
| 1631 | // is locked. | 1775 | // is locked. |
| 1632 | cur_address = current_heap_end; | 1776 | cur_address = m_current_heap_end; |
| 1633 | allocation_size = size - GetHeapSize(); | 1777 | allocation_size = size - GetHeapSize(); |
| 1634 | } | 1778 | } |
| 1635 | } | 1779 | } |
| 1636 | 1780 | ||
| 1637 | // Reserve memory for the heap extension. | 1781 | // Reserve memory for the heap extension. |
| 1638 | KScopedResourceReservation memory_reservation( | 1782 | KScopedResourceReservation memory_reservation( |
| 1639 | system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, | 1783 | m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, |
| 1640 | allocation_size); | 1784 | allocation_size); |
| 1641 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 1785 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 1642 | 1786 | ||
| 1643 | // Allocate pages for the heap extension. | 1787 | // Allocate pages for the heap extension. |
| 1644 | KPageGroup pg; | 1788 | KPageGroup pg; |
| 1645 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( | 1789 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( |
| 1646 | &pg, allocation_size / PageSize, | 1790 | &pg, allocation_size / PageSize, |
| 1647 | KMemoryManager::EncodeOption(memory_pool, allocation_option))); | 1791 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); |
| 1648 | 1792 | ||
| 1649 | // Clear all the newly allocated pages. | 1793 | // Clear all the newly allocated pages. |
| 1650 | for (const auto& it : pg.Nodes()) { | 1794 | for (const auto& it : pg.Nodes()) { |
| 1651 | std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, | 1795 | std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, |
| 1652 | it.GetSize()); | 1796 | it.GetSize()); |
| 1653 | } | 1797 | } |
| 1654 | 1798 | ||
| 1655 | // Map the pages. | 1799 | // Map the pages. |
| 1656 | { | 1800 | { |
| 1657 | // Lock the table. | 1801 | // Lock the table. |
| 1658 | KScopedLightLock lk(general_lock); | 1802 | KScopedLightLock lk(m_general_lock); |
| 1659 | 1803 | ||
| 1660 | // Ensure that the heap hasn't changed since we began executing. | 1804 | // Ensure that the heap hasn't changed since we began executing. |
| 1661 | ASSERT(cur_address == current_heap_end); | 1805 | ASSERT(cur_address == m_current_heap_end); |
| 1662 | 1806 | ||
| 1663 | // Check the memory state. | 1807 | // Check the memory state. |
| 1664 | std::size_t num_allocator_blocks{}; | 1808 | size_t num_allocator_blocks{}; |
| 1665 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end, | 1809 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, |
| 1666 | allocation_size, KMemoryState::All, KMemoryState::Free, | 1810 | allocation_size, KMemoryState::All, KMemoryState::Free, |
| 1667 | KMemoryPermission::None, KMemoryPermission::None, | 1811 | KMemoryPermission::None, KMemoryPermission::None, |
| 1668 | KMemoryAttribute::None, KMemoryAttribute::None)); | 1812 | KMemoryAttribute::None, KMemoryAttribute::None)); |
| 1669 | 1813 | ||
| 1814 | // Create an update allocator. | ||
| 1815 | Result allocator_result{ResultSuccess}; | ||
| 1816 | KMemoryBlockManagerUpdateAllocator allocator( | ||
| 1817 | std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1818 | R_TRY(allocator_result); | ||
| 1819 | |||
| 1670 | // Map the pages. | 1820 | // Map the pages. |
| 1671 | const auto num_pages = allocation_size / PageSize; | 1821 | const auto num_pages = allocation_size / PageSize; |
| 1672 | R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup)); | 1822 | R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup)); |
| 1673 | 1823 | ||
| 1674 | // Clear all the newly allocated pages. | 1824 | // Clear all the newly allocated pages. |
| 1675 | for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { | 1825 | for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) { |
| 1676 | std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0, | 1826 | std::memset(m_system.Memory().GetPointer(m_current_heap_end + (cur_page * PageSize)), 0, |
| 1677 | PageSize); | 1827 | PageSize); |
| 1678 | } | 1828 | } |
| 1679 | 1829 | ||
| @@ -1681,133 +1831,172 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { | |||
| 1681 | memory_reservation.Commit(); | 1831 | memory_reservation.Commit(); |
| 1682 | 1832 | ||
| 1683 | // Apply the memory block update. | 1833 | // Apply the memory block update. |
| 1684 | block_manager->Update(current_heap_end, num_pages, KMemoryState::Normal, | 1834 | m_memory_block_manager.Update( |
| 1685 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); | 1835 | std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal, |
| 1836 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 1837 | m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 1838 | : KMemoryBlockDisableMergeAttribute::None, | ||
| 1839 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1686 | 1840 | ||
| 1687 | // Update the current heap end. | 1841 | // Update the current heap end. |
| 1688 | current_heap_end = heap_region_start + size; | 1842 | m_current_heap_end = m_heap_region_start + size; |
| 1689 | 1843 | ||
| 1690 | // Set the output. | 1844 | // Set the output. |
| 1691 | *out = heap_region_start; | 1845 | *out = m_heap_region_start; |
| 1692 | return ResultSuccess; | 1846 | R_SUCCEED(); |
| 1693 | } | 1847 | } |
| 1694 | } | 1848 | } |
| 1695 | 1849 | ||
| 1696 | ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, | 1850 | ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align, |
| 1697 | bool is_map_only, VAddr region_start, | 1851 | bool is_map_only, VAddr region_start, |
| 1698 | std::size_t region_num_pages, KMemoryState state, | 1852 | size_t region_num_pages, KMemoryState state, |
| 1699 | KMemoryPermission perm, PAddr map_addr) { | 1853 | KMemoryPermission perm, PAddr map_addr) { |
| 1700 | KScopedLightLock lk(general_lock); | 1854 | KScopedLightLock lk(m_general_lock); |
| 1701 | |||
| 1702 | if (!CanContain(region_start, region_num_pages * PageSize, state)) { | ||
| 1703 | return ResultInvalidCurrentMemory; | ||
| 1704 | } | ||
| 1705 | |||
| 1706 | if (region_num_pages <= needed_num_pages) { | ||
| 1707 | return ResultOutOfMemory; | ||
| 1708 | } | ||
| 1709 | 1855 | ||
| 1856 | R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state), | ||
| 1857 | ResultInvalidCurrentMemory); | ||
| 1858 | R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory); | ||
| 1710 | const VAddr addr{ | 1859 | const VAddr addr{ |
| 1711 | AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)}; | 1860 | AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)}; |
| 1712 | if (!addr) { | 1861 | R_UNLESS(addr, ResultOutOfMemory); |
| 1713 | return ResultOutOfMemory; | 1862 | |
| 1714 | } | 1863 | // Create an update allocator. |
| 1864 | Result allocator_result{ResultSuccess}; | ||
| 1865 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1866 | m_memory_block_slab_manager); | ||
| 1715 | 1867 | ||
| 1716 | if (is_map_only) { | 1868 | if (is_map_only) { |
| 1717 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); | 1869 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); |
| 1718 | } else { | 1870 | } else { |
| 1719 | KPageGroup page_group; | 1871 | KPageGroup page_group; |
| 1720 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( | 1872 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( |
| 1721 | &page_group, needed_num_pages, | 1873 | &page_group, needed_num_pages, |
| 1722 | KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); | 1874 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); |
| 1723 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); | 1875 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); |
| 1724 | } | 1876 | } |
| 1725 | 1877 | ||
| 1726 | block_manager->Update(addr, needed_num_pages, state, perm); | 1878 | // Update the blocks. |
| 1879 | m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm, | ||
| 1880 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 1881 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1727 | 1882 | ||
| 1728 | return addr; | 1883 | return addr; |
| 1729 | } | 1884 | } |
| 1730 | 1885 | ||
| 1731 | Result KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { | 1886 | Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, |
| 1732 | KScopedLightLock lk(general_lock); | 1887 | bool is_aligned) { |
| 1733 | 1888 | // Lightly validate the range before doing anything else. | |
| 1734 | KMemoryPermission perm{}; | 1889 | const size_t num_pages = size / PageSize; |
| 1735 | if (const Result result{CheckMemoryState( | 1890 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| 1736 | nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute, | ||
| 1737 | KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, | ||
| 1738 | KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, | ||
| 1739 | KMemoryAttribute::DeviceSharedAndUncached)}; | ||
| 1740 | result.IsError()) { | ||
| 1741 | return result; | ||
| 1742 | } | ||
| 1743 | 1891 | ||
| 1744 | block_manager->UpdateLock( | 1892 | // Lock the table. |
| 1745 | addr, size / PageSize, | 1893 | KScopedLightLock lk(m_general_lock); |
| 1746 | [](KMemoryBlockManager::iterator block, KMemoryPermission permission) { | ||
| 1747 | block->ShareToDevice(permission); | ||
| 1748 | }, | ||
| 1749 | perm); | ||
| 1750 | 1894 | ||
| 1751 | return ResultSuccess; | 1895 | // Check the memory state. |
| 1896 | const auto test_state = | ||
| 1897 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); | ||
| 1898 | size_t num_allocator_blocks; | ||
| 1899 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, | ||
| 1900 | test_state, perm, perm, | ||
| 1901 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, | ||
| 1902 | KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); | ||
| 1903 | |||
| 1904 | // Create an update allocator. | ||
| 1905 | Result allocator_result{ResultSuccess}; | ||
| 1906 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1907 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1908 | R_TRY(allocator_result); | ||
| 1909 | |||
| 1910 | // Update the memory blocks. | ||
| 1911 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | ||
| 1912 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); | ||
| 1913 | |||
| 1914 | R_SUCCEED(); | ||
| 1752 | } | 1915 | } |
| 1753 | 1916 | ||
| 1754 | Result KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) { | 1917 | Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { |
| 1755 | KScopedLightLock lk(general_lock); | 1918 | // Lightly validate the range before doing anything else. |
| 1756 | 1919 | const size_t num_pages = size / PageSize; | |
| 1757 | KMemoryPermission perm{}; | 1920 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| 1758 | if (const Result result{CheckMemoryState( | ||
| 1759 | nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute, | ||
| 1760 | KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, | ||
| 1761 | KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, | ||
| 1762 | KMemoryAttribute::DeviceSharedAndUncached)}; | ||
| 1763 | result.IsError()) { | ||
| 1764 | return result; | ||
| 1765 | } | ||
| 1766 | 1921 | ||
| 1767 | block_manager->UpdateLock( | 1922 | // Lock the table. |
| 1768 | addr, size / PageSize, | 1923 | KScopedLightLock lk(m_general_lock); |
| 1769 | [](KMemoryBlockManager::iterator block, KMemoryPermission permission) { | ||
| 1770 | block->UnshareToDevice(permission); | ||
| 1771 | }, | ||
| 1772 | perm); | ||
| 1773 | 1924 | ||
| 1774 | return ResultSuccess; | 1925 | // Check the memory state. |
| 1926 | size_t num_allocator_blocks; | ||
| 1927 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 1928 | std::addressof(num_allocator_blocks), address, size, | ||
| 1929 | KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, | ||
| 1930 | KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, | ||
| 1931 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 1932 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 1933 | |||
| 1934 | // Create an update allocator. | ||
| 1935 | Result allocator_result{ResultSuccess}; | ||
| 1936 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1937 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1938 | R_TRY(allocator_result); | ||
| 1939 | |||
| 1940 | // Update the memory blocks. | ||
| 1941 | const KMemoryBlockManager::MemoryBlockLockFunction lock_func = | ||
| 1942 | m_enable_device_address_space_merge | ||
| 1943 | ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare | ||
| 1944 | : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; | ||
| 1945 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, | ||
| 1946 | KMemoryPermission::None); | ||
| 1947 | |||
| 1948 | R_SUCCEED(); | ||
| 1775 | } | 1949 | } |
| 1776 | 1950 | ||
| 1777 | Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) { | 1951 | Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) { |
| 1778 | return this->LockMemoryAndOpen( | 1952 | // Lightly validate the range before doing anything else. |
| 1953 | const size_t num_pages = size / PageSize; | ||
| 1954 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 1955 | |||
| 1956 | // Lock the table. | ||
| 1957 | KScopedLightLock lk(m_general_lock); | ||
| 1958 | |||
| 1959 | // Check the memory state. | ||
| 1960 | size_t num_allocator_blocks; | ||
| 1961 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 1962 | std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap, | ||
| 1963 | KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None, | ||
| 1964 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 1965 | |||
| 1966 | // Create an update allocator. | ||
| 1967 | Result allocator_result{ResultSuccess}; | ||
| 1968 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1969 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1970 | R_TRY(allocator_result); | ||
| 1971 | |||
| 1972 | // Update the memory blocks. | ||
| 1973 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | ||
| 1974 | &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); | ||
| 1975 | |||
| 1976 | R_SUCCEED(); | ||
| 1977 | } | ||
| 1978 | |||
| 1979 | Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { | ||
| 1980 | R_RETURN(this->LockMemoryAndOpen( | ||
| 1779 | out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, | 1981 | out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, |
| 1780 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | 1982 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, |
| 1781 | KMemoryAttribute::None, | 1983 | KMemoryAttribute::None, |
| 1782 | static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | | 1984 | static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | |
| 1783 | KMemoryPermission::KernelReadWrite), | 1985 | KMemoryPermission::KernelReadWrite), |
| 1784 | KMemoryAttribute::Locked); | 1986 | KMemoryAttribute::Locked)); |
| 1785 | } | 1987 | } |
| 1786 | 1988 | ||
| 1787 | Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) { | 1989 | Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) { |
| 1788 | return this->UnlockMemory( | 1990 | R_RETURN(this->UnlockMemory( |
| 1789 | addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, | 1991 | addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, |
| 1790 | KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, | 1992 | KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, |
| 1791 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg); | 1993 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg)); |
| 1792 | } | ||
| 1793 | |||
| 1794 | Result KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) { | ||
| 1795 | block_manager = std::make_unique<KMemoryBlockManager>(start, end); | ||
| 1796 | |||
| 1797 | return ResultSuccess; | ||
| 1798 | } | ||
| 1799 | |||
| 1800 | bool KPageTable::IsRegionMapped(VAddr address, u64 size) { | ||
| 1801 | return CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, | ||
| 1802 | KMemoryPermission::All, KMemoryPermission::None, KMemoryAttribute::Mask, | ||
| 1803 | KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped) | ||
| 1804 | .IsError(); | ||
| 1805 | } | 1994 | } |
| 1806 | 1995 | ||
| 1807 | bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { | 1996 | bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { |
| 1808 | auto start_ptr = system.Memory().GetPointer(addr); | 1997 | auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(addr); |
| 1809 | for (u64 offset{}; offset < size; offset += PageSize) { | 1998 | for (u64 offset{}; offset < size; offset += PageSize) { |
| 1810 | if (start_ptr != system.Memory().GetPointer(addr + offset)) { | 1999 | if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(addr + offset)) { |
| 1811 | return false; | 2000 | return false; |
| 1812 | } | 2001 | } |
| 1813 | start_ptr += PageSize; | 2002 | start_ptr += PageSize; |
| @@ -1815,8 +2004,7 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { | |||
| 1815 | return true; | 2004 | return true; |
| 1816 | } | 2005 | } |
| 1817 | 2006 | ||
| 1818 | void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, | 2007 | void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list) { |
| 1819 | KPageGroup& page_linked_list) { | ||
| 1820 | VAddr addr{start}; | 2008 | VAddr addr{start}; |
| 1821 | while (addr < start + (num_pages * PageSize)) { | 2009 | while (addr < start + (num_pages * PageSize)) { |
| 1822 | const PAddr paddr{GetPhysicalAddr(addr)}; | 2010 | const PAddr paddr{GetPhysicalAddr(addr)}; |
| @@ -1826,16 +2014,16 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, | |||
| 1826 | } | 2014 | } |
| 1827 | } | 2015 | } |
| 1828 | 2016 | ||
| 1829 | VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, | 2017 | VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, |
| 1830 | u64 needed_num_pages, std::size_t align) { | 2018 | size_t align) { |
| 1831 | if (is_aslr_enabled) { | 2019 | if (m_enable_aslr) { |
| 1832 | UNIMPLEMENTED(); | 2020 | UNIMPLEMENTED(); |
| 1833 | } | 2021 | } |
| 1834 | return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, | 2022 | return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, |
| 1835 | IsKernel() ? 1 : 4); | 2023 | IsKernel() ? 1 : 4); |
| 1836 | } | 2024 | } |
| 1837 | 2025 | ||
| 1838 | Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, | 2026 | Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, |
| 1839 | OperationType operation) { | 2027 | OperationType operation) { |
| 1840 | ASSERT(this->IsLockedByCurrentThread()); | 2028 | ASSERT(this->IsLockedByCurrentThread()); |
| 1841 | 2029 | ||
| @@ -1844,11 +2032,11 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& | |||
| 1844 | ASSERT(num_pages == page_group.GetNumPages()); | 2032 | ASSERT(num_pages == page_group.GetNumPages()); |
| 1845 | 2033 | ||
| 1846 | for (const auto& node : page_group.Nodes()) { | 2034 | for (const auto& node : page_group.Nodes()) { |
| 1847 | const std::size_t size{node.GetNumPages() * PageSize}; | 2035 | const size_t size{node.GetNumPages() * PageSize}; |
| 1848 | 2036 | ||
| 1849 | switch (operation) { | 2037 | switch (operation) { |
| 1850 | case OperationType::MapGroup: | 2038 | case OperationType::MapGroup: |
| 1851 | system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress()); | 2039 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); |
| 1852 | break; | 2040 | break; |
| 1853 | default: | 2041 | default: |
| 1854 | ASSERT(false); | 2042 | ASSERT(false); |
| @@ -1857,10 +2045,10 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& | |||
| 1857 | addr += size; | 2045 | addr += size; |
| 1858 | } | 2046 | } |
| 1859 | 2047 | ||
| 1860 | return ResultSuccess; | 2048 | R_SUCCEED(); |
| 1861 | } | 2049 | } |
| 1862 | 2050 | ||
| 1863 | Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, | 2051 | Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, |
| 1864 | OperationType operation, PAddr map_addr) { | 2052 | OperationType operation, PAddr map_addr) { |
| 1865 | ASSERT(this->IsLockedByCurrentThread()); | 2053 | ASSERT(this->IsLockedByCurrentThread()); |
| 1866 | 2054 | ||
| @@ -1870,12 +2058,12 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission | |||
| 1870 | 2058 | ||
| 1871 | switch (operation) { | 2059 | switch (operation) { |
| 1872 | case OperationType::Unmap: | 2060 | case OperationType::Unmap: |
| 1873 | system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize); | 2061 | m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); |
| 1874 | break; | 2062 | break; |
| 1875 | case OperationType::Map: { | 2063 | case OperationType::Map: { |
| 1876 | ASSERT(map_addr); | 2064 | ASSERT(map_addr); |
| 1877 | ASSERT(Common::IsAligned(map_addr, PageSize)); | 2065 | ASSERT(Common::IsAligned(map_addr, PageSize)); |
| 1878 | system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr); | 2066 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); |
| 1879 | break; | 2067 | break; |
| 1880 | } | 2068 | } |
| 1881 | case OperationType::ChangePermissions: | 2069 | case OperationType::ChangePermissions: |
| @@ -1884,25 +2072,25 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission | |||
| 1884 | default: | 2072 | default: |
| 1885 | ASSERT(false); | 2073 | ASSERT(false); |
| 1886 | } | 2074 | } |
| 1887 | return ResultSuccess; | 2075 | R_SUCCEED(); |
| 1888 | } | 2076 | } |
| 1889 | 2077 | ||
| 1890 | VAddr KPageTable::GetRegionAddress(KMemoryState state) const { | 2078 | VAddr KPageTable::GetRegionAddress(KMemoryState state) const { |
| 1891 | switch (state) { | 2079 | switch (state) { |
| 1892 | case KMemoryState::Free: | 2080 | case KMemoryState::Free: |
| 1893 | case KMemoryState::Kernel: | 2081 | case KMemoryState::Kernel: |
| 1894 | return address_space_start; | 2082 | return m_address_space_start; |
| 1895 | case KMemoryState::Normal: | 2083 | case KMemoryState::Normal: |
| 1896 | return heap_region_start; | 2084 | return m_heap_region_start; |
| 1897 | case KMemoryState::Ipc: | 2085 | case KMemoryState::Ipc: |
| 1898 | case KMemoryState::NonSecureIpc: | 2086 | case KMemoryState::NonSecureIpc: |
| 1899 | case KMemoryState::NonDeviceIpc: | 2087 | case KMemoryState::NonDeviceIpc: |
| 1900 | return alias_region_start; | 2088 | return m_alias_region_start; |
| 1901 | case KMemoryState::Stack: | 2089 | case KMemoryState::Stack: |
| 1902 | return stack_region_start; | 2090 | return m_stack_region_start; |
| 1903 | case KMemoryState::Static: | 2091 | case KMemoryState::Static: |
| 1904 | case KMemoryState::ThreadLocal: | 2092 | case KMemoryState::ThreadLocal: |
| 1905 | return kernel_map_region_start; | 2093 | return m_kernel_map_region_start; |
| 1906 | case KMemoryState::Io: | 2094 | case KMemoryState::Io: |
| 1907 | case KMemoryState::Shared: | 2095 | case KMemoryState::Shared: |
| 1908 | case KMemoryState::AliasCode: | 2096 | case KMemoryState::AliasCode: |
| @@ -1913,31 +2101,31 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { | |||
| 1913 | case KMemoryState::GeneratedCode: | 2101 | case KMemoryState::GeneratedCode: |
| 1914 | case KMemoryState::CodeOut: | 2102 | case KMemoryState::CodeOut: |
| 1915 | case KMemoryState::Coverage: | 2103 | case KMemoryState::Coverage: |
| 1916 | return alias_code_region_start; | 2104 | return m_alias_code_region_start; |
| 1917 | case KMemoryState::Code: | 2105 | case KMemoryState::Code: |
| 1918 | case KMemoryState::CodeData: | 2106 | case KMemoryState::CodeData: |
| 1919 | return code_region_start; | 2107 | return m_code_region_start; |
| 1920 | default: | 2108 | default: |
| 1921 | UNREACHABLE(); | 2109 | UNREACHABLE(); |
| 1922 | } | 2110 | } |
| 1923 | } | 2111 | } |
| 1924 | 2112 | ||
| 1925 | std::size_t KPageTable::GetRegionSize(KMemoryState state) const { | 2113 | size_t KPageTable::GetRegionSize(KMemoryState state) const { |
| 1926 | switch (state) { | 2114 | switch (state) { |
| 1927 | case KMemoryState::Free: | 2115 | case KMemoryState::Free: |
| 1928 | case KMemoryState::Kernel: | 2116 | case KMemoryState::Kernel: |
| 1929 | return address_space_end - address_space_start; | 2117 | return m_address_space_end - m_address_space_start; |
| 1930 | case KMemoryState::Normal: | 2118 | case KMemoryState::Normal: |
| 1931 | return heap_region_end - heap_region_start; | 2119 | return m_heap_region_end - m_heap_region_start; |
| 1932 | case KMemoryState::Ipc: | 2120 | case KMemoryState::Ipc: |
| 1933 | case KMemoryState::NonSecureIpc: | 2121 | case KMemoryState::NonSecureIpc: |
| 1934 | case KMemoryState::NonDeviceIpc: | 2122 | case KMemoryState::NonDeviceIpc: |
| 1935 | return alias_region_end - alias_region_start; | 2123 | return m_alias_region_end - m_alias_region_start; |
| 1936 | case KMemoryState::Stack: | 2124 | case KMemoryState::Stack: |
| 1937 | return stack_region_end - stack_region_start; | 2125 | return m_stack_region_end - m_stack_region_start; |
| 1938 | case KMemoryState::Static: | 2126 | case KMemoryState::Static: |
| 1939 | case KMemoryState::ThreadLocal: | 2127 | case KMemoryState::ThreadLocal: |
| 1940 | return kernel_map_region_end - kernel_map_region_start; | 2128 | return m_kernel_map_region_end - m_kernel_map_region_start; |
| 1941 | case KMemoryState::Io: | 2129 | case KMemoryState::Io: |
| 1942 | case KMemoryState::Shared: | 2130 | case KMemoryState::Shared: |
| 1943 | case KMemoryState::AliasCode: | 2131 | case KMemoryState::AliasCode: |
| @@ -1948,16 +2136,16 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const { | |||
| 1948 | case KMemoryState::GeneratedCode: | 2136 | case KMemoryState::GeneratedCode: |
| 1949 | case KMemoryState::CodeOut: | 2137 | case KMemoryState::CodeOut: |
| 1950 | case KMemoryState::Coverage: | 2138 | case KMemoryState::Coverage: |
| 1951 | return alias_code_region_end - alias_code_region_start; | 2139 | return m_alias_code_region_end - m_alias_code_region_start; |
| 1952 | case KMemoryState::Code: | 2140 | case KMemoryState::Code: |
| 1953 | case KMemoryState::CodeData: | 2141 | case KMemoryState::CodeData: |
| 1954 | return code_region_end - code_region_start; | 2142 | return m_code_region_end - m_code_region_start; |
| 1955 | default: | 2143 | default: |
| 1956 | UNREACHABLE(); | 2144 | UNREACHABLE(); |
| 1957 | } | 2145 | } |
| 1958 | } | 2146 | } |
| 1959 | 2147 | ||
| 1960 | bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { | 2148 | bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const { |
| 1961 | const VAddr end = addr + size; | 2149 | const VAddr end = addr + size; |
| 1962 | const VAddr last = end - 1; | 2150 | const VAddr last = end - 1; |
| 1963 | 2151 | ||
| @@ -1966,10 +2154,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co | |||
| 1966 | 2154 | ||
| 1967 | const bool is_in_region = | 2155 | const bool is_in_region = |
| 1968 | region_start <= addr && addr < end && last <= region_start + region_size - 1; | 2156 | region_start <= addr && addr < end && last <= region_start + region_size - 1; |
| 1969 | const bool is_in_heap = !(end <= heap_region_start || heap_region_end <= addr || | 2157 | const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || |
| 1970 | heap_region_start == heap_region_end); | 2158 | m_heap_region_start == m_heap_region_end); |
| 1971 | const bool is_in_alias = !(end <= alias_region_start || alias_region_end <= addr || | 2159 | const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || |
| 1972 | alias_region_start == alias_region_end); | 2160 | m_alias_region_start == m_alias_region_end); |
| 1973 | switch (state) { | 2161 | switch (state) { |
| 1974 | case KMemoryState::Free: | 2162 | case KMemoryState::Free: |
| 1975 | case KMemoryState::Kernel: | 2163 | case KMemoryState::Kernel: |
| @@ -2008,23 +2196,23 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_ | |||
| 2008 | KMemoryPermission perm, KMemoryAttribute attr_mask, | 2196 | KMemoryPermission perm, KMemoryAttribute attr_mask, |
| 2009 | KMemoryAttribute attr) const { | 2197 | KMemoryAttribute attr) const { |
| 2010 | // Validate the states match expectation. | 2198 | // Validate the states match expectation. |
| 2011 | R_UNLESS((info.state & state_mask) == state, ResultInvalidCurrentMemory); | 2199 | R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory); |
| 2012 | R_UNLESS((info.perm & perm_mask) == perm, ResultInvalidCurrentMemory); | 2200 | R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory); |
| 2013 | R_UNLESS((info.attribute & attr_mask) == attr, ResultInvalidCurrentMemory); | 2201 | R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory); |
| 2014 | 2202 | ||
| 2015 | return ResultSuccess; | 2203 | R_SUCCEED(); |
| 2016 | } | 2204 | } |
| 2017 | 2205 | ||
| 2018 | Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, | 2206 | Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, |
| 2019 | std::size_t size, KMemoryState state_mask, | 2207 | KMemoryState state_mask, KMemoryState state, |
| 2020 | KMemoryState state, KMemoryPermission perm_mask, | 2208 | KMemoryPermission perm_mask, KMemoryPermission perm, |
| 2021 | KMemoryPermission perm, KMemoryAttribute attr_mask, | 2209 | KMemoryAttribute attr_mask, |
| 2022 | KMemoryAttribute attr) const { | 2210 | KMemoryAttribute attr) const { |
| 2023 | ASSERT(this->IsLockedByCurrentThread()); | 2211 | ASSERT(this->IsLockedByCurrentThread()); |
| 2024 | 2212 | ||
| 2025 | // Get information about the first block. | 2213 | // Get information about the first block. |
| 2026 | const VAddr last_addr = addr + size - 1; | 2214 | const VAddr last_addr = addr + size - 1; |
| 2027 | KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr); | 2215 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); |
| 2028 | KMemoryInfo info = it->GetMemoryInfo(); | 2216 | KMemoryInfo info = it->GetMemoryInfo(); |
| 2029 | 2217 | ||
| 2030 | // If the start address isn't aligned, we need a block. | 2218 | // If the start address isn't aligned, we need a block. |
| @@ -2042,7 +2230,7 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA | |||
| 2042 | 2230 | ||
| 2043 | // Advance our iterator. | 2231 | // Advance our iterator. |
| 2044 | it++; | 2232 | it++; |
| 2045 | ASSERT(it != block_manager->cend()); | 2233 | ASSERT(it != m_memory_block_manager.cend()); |
| 2046 | info = it->GetMemoryInfo(); | 2234 | info = it->GetMemoryInfo(); |
| 2047 | } | 2235 | } |
| 2048 | 2236 | ||
| @@ -2054,12 +2242,12 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA | |||
| 2054 | *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; | 2242 | *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; |
| 2055 | } | 2243 | } |
| 2056 | 2244 | ||
| 2057 | return ResultSuccess; | 2245 | R_SUCCEED(); |
| 2058 | } | 2246 | } |
| 2059 | 2247 | ||
| 2060 | Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | 2248 | Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, |
| 2061 | KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, | 2249 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, |
| 2062 | VAddr addr, std::size_t size, KMemoryState state_mask, | 2250 | VAddr addr, size_t size, KMemoryState state_mask, |
| 2063 | KMemoryState state, KMemoryPermission perm_mask, | 2251 | KMemoryState state, KMemoryPermission perm_mask, |
| 2064 | KMemoryPermission perm, KMemoryAttribute attr_mask, | 2252 | KMemoryPermission perm, KMemoryAttribute attr_mask, |
| 2065 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { | 2253 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { |
| @@ -2067,7 +2255,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* | |||
| 2067 | 2255 | ||
| 2068 | // Get information about the first block. | 2256 | // Get information about the first block. |
| 2069 | const VAddr last_addr = addr + size - 1; | 2257 | const VAddr last_addr = addr + size - 1; |
| 2070 | KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr); | 2258 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); |
| 2071 | KMemoryInfo info = it->GetMemoryInfo(); | 2259 | KMemoryInfo info = it->GetMemoryInfo(); |
| 2072 | 2260 | ||
| 2073 | // If the start address isn't aligned, we need a block. | 2261 | // If the start address isn't aligned, we need a block. |
| @@ -2075,14 +2263,14 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* | |||
| 2075 | (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0; | 2263 | (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0; |
| 2076 | 2264 | ||
| 2077 | // Validate all blocks in the range have correct state. | 2265 | // Validate all blocks in the range have correct state. |
| 2078 | const KMemoryState first_state = info.state; | 2266 | const KMemoryState first_state = info.m_state; |
| 2079 | const KMemoryPermission first_perm = info.perm; | 2267 | const KMemoryPermission first_perm = info.m_permission; |
| 2080 | const KMemoryAttribute first_attr = info.attribute; | 2268 | const KMemoryAttribute first_attr = info.m_attribute; |
| 2081 | while (true) { | 2269 | while (true) { |
| 2082 | // Validate the current block. | 2270 | // Validate the current block. |
| 2083 | R_UNLESS(info.state == first_state, ResultInvalidCurrentMemory); | 2271 | R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory); |
| 2084 | R_UNLESS(info.perm == first_perm, ResultInvalidCurrentMemory); | 2272 | R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory); |
| 2085 | R_UNLESS((info.attribute | ignore_attr) == (first_attr | ignore_attr), | 2273 | R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), |
| 2086 | ResultInvalidCurrentMemory); | 2274 | ResultInvalidCurrentMemory); |
| 2087 | 2275 | ||
| 2088 | // Validate against the provided masks. | 2276 | // Validate against the provided masks. |
| @@ -2095,7 +2283,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* | |||
| 2095 | 2283 | ||
| 2096 | // Advance our iterator. | 2284 | // Advance our iterator. |
| 2097 | it++; | 2285 | it++; |
| 2098 | ASSERT(it != block_manager->cend()); | 2286 | ASSERT(it != m_memory_block_manager.cend()); |
| 2099 | info = it->GetMemoryInfo(); | 2287 | info = it->GetMemoryInfo(); |
| 2100 | } | 2288 | } |
| 2101 | 2289 | ||
| @@ -2116,7 +2304,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* | |||
| 2116 | if (out_blocks_needed != nullptr) { | 2304 | if (out_blocks_needed != nullptr) { |
| 2117 | *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; | 2305 | *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; |
| 2118 | } | 2306 | } |
| 2119 | return ResultSuccess; | 2307 | R_SUCCEED(); |
| 2120 | } | 2308 | } |
| 2121 | 2309 | ||
| 2122 | Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, | 2310 | Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, |
| @@ -2134,7 +2322,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr | |||
| 2134 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); | 2322 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); |
| 2135 | 2323 | ||
| 2136 | // Lock the table. | 2324 | // Lock the table. |
| 2137 | KScopedLightLock lk(general_lock); | 2325 | KScopedLightLock lk(m_general_lock); |
| 2138 | 2326 | ||
| 2139 | // Check that the output page group is empty, if it exists. | 2327 | // Check that the output page group is empty, if it exists. |
| 2140 | if (out_pg) { | 2328 | if (out_pg) { |
| @@ -2162,6 +2350,12 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr | |||
| 2162 | R_TRY(this->MakePageGroup(*out_pg, addr, num_pages)); | 2350 | R_TRY(this->MakePageGroup(*out_pg, addr, num_pages)); |
| 2163 | } | 2351 | } |
| 2164 | 2352 | ||
| 2353 | // Create an update allocator. | ||
| 2354 | Result allocator_result{ResultSuccess}; | ||
| 2355 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2356 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2357 | R_TRY(allocator_result); | ||
| 2358 | |||
| 2165 | // Decide on new perm and attr. | 2359 | // Decide on new perm and attr. |
| 2166 | new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; | 2360 | new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; |
| 2167 | KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr); | 2361 | KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr); |
| @@ -2172,9 +2366,11 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr | |||
| 2172 | } | 2366 | } |
| 2173 | 2367 | ||
| 2174 | // Apply the memory block updates. | 2368 | // Apply the memory block updates. |
| 2175 | block_manager->Update(addr, num_pages, old_state, new_perm, new_attr); | 2369 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, |
| 2370 | new_attr, KMemoryBlockDisableMergeAttribute::Locked, | ||
| 2371 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2176 | 2372 | ||
| 2177 | return ResultSuccess; | 2373 | R_SUCCEED(); |
| 2178 | } | 2374 | } |
| 2179 | 2375 | ||
| 2180 | Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, | 2376 | Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, |
| @@ -2191,7 +2387,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask | |||
| 2191 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); | 2387 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); |
| 2192 | 2388 | ||
| 2193 | // Lock the table. | 2389 | // Lock the table. |
| 2194 | KScopedLightLock lk(general_lock); | 2390 | KScopedLightLock lk(m_general_lock); |
| 2195 | 2391 | ||
| 2196 | // Check the state. | 2392 | // Check the state. |
| 2197 | KMemoryState old_state{}; | 2393 | KMemoryState old_state{}; |
| @@ -2213,15 +2409,23 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask | |||
| 2213 | new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; | 2409 | new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; |
| 2214 | KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr); | 2410 | KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr); |
| 2215 | 2411 | ||
| 2412 | // Create an update allocator. | ||
| 2413 | Result allocator_result{ResultSuccess}; | ||
| 2414 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2415 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2416 | R_TRY(allocator_result); | ||
| 2417 | |||
| 2216 | // Update permission, if we need to. | 2418 | // Update permission, if we need to. |
| 2217 | if (new_perm != old_perm) { | 2419 | if (new_perm != old_perm) { |
| 2218 | R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); | 2420 | R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); |
| 2219 | } | 2421 | } |
| 2220 | 2422 | ||
| 2221 | // Apply the memory block updates. | 2423 | // Apply the memory block updates. |
| 2222 | block_manager->Update(addr, num_pages, old_state, new_perm, new_attr); | 2424 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, |
| 2425 | new_attr, KMemoryBlockDisableMergeAttribute::None, | ||
| 2426 | KMemoryBlockDisableMergeAttribute::Locked); | ||
| 2223 | 2427 | ||
| 2224 | return ResultSuccess; | 2428 | R_SUCCEED(); |
| 2225 | } | 2429 | } |
| 2226 | 2430 | ||
| 2227 | } // namespace Kernel | 2431 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 25774f232..c6aeacd96 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h | |||
| @@ -9,8 +9,10 @@ | |||
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "common/page_table.h" | 10 | #include "common/page_table.h" |
| 11 | #include "core/file_sys/program_metadata.h" | 11 | #include "core/file_sys/program_metadata.h" |
| 12 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 12 | #include "core/hle/kernel/k_light_lock.h" | 13 | #include "core/hle/kernel/k_light_lock.h" |
| 13 | #include "core/hle/kernel/k_memory_block.h" | 14 | #include "core/hle/kernel/k_memory_block.h" |
| 15 | #include "core/hle/kernel/k_memory_block_manager.h" | ||
| 14 | #include "core/hle/kernel/k_memory_layout.h" | 16 | #include "core/hle/kernel/k_memory_layout.h" |
| 15 | #include "core/hle/kernel/k_memory_manager.h" | 17 | #include "core/hle/kernel/k_memory_manager.h" |
| 16 | #include "core/hle/result.h" | 18 | #include "core/hle/result.h" |
| @@ -34,58 +36,66 @@ public: | |||
| 34 | ~KPageTable(); | 36 | ~KPageTable(); |
| 35 | 37 | ||
| 36 | Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, | 38 | Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, |
| 37 | VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool); | 39 | VAddr code_addr, size_t code_size, |
| 38 | Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, | 40 | KMemoryBlockSlabManager* mem_block_slab_manager, |
| 41 | KMemoryManager::Pool pool); | ||
| 42 | |||
| 43 | void Finalize(); | ||
| 44 | |||
| 45 | Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state, | ||
| 39 | KMemoryPermission perm); | 46 | KMemoryPermission perm); |
| 40 | Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); | 47 | Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size); |
| 41 | Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, | 48 | Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, |
| 42 | ICacheInvalidationStrategy icache_invalidation_strategy); | 49 | ICacheInvalidationStrategy icache_invalidation_strategy); |
| 43 | Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, | 50 | Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, |
| 44 | VAddr src_addr); | 51 | VAddr src_addr); |
| 45 | Result MapPhysicalMemory(VAddr addr, std::size_t size); | 52 | Result MapPhysicalMemory(VAddr addr, size_t size); |
| 46 | Result UnmapPhysicalMemory(VAddr addr, std::size_t size); | 53 | Result UnmapPhysicalMemory(VAddr addr, size_t size); |
| 47 | Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); | 54 | Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size); |
| 48 | Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); | 55 | Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size); |
| 49 | Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state, | 56 | Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state, |
| 50 | KMemoryPermission perm); | 57 | KMemoryPermission perm); |
| 51 | Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, | 58 | Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, |
| 52 | KMemoryState state, KMemoryPermission perm) { | 59 | KMemoryState state, KMemoryPermission perm) { |
| 53 | return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, | 60 | R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, |
| 54 | this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, | 61 | this->GetRegionAddress(state), |
| 55 | state, perm); | 62 | this->GetRegionSize(state) / PageSize, state, perm)); |
| 56 | } | 63 | } |
| 57 | Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); | 64 | Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); |
| 58 | Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state); | 65 | Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state); |
| 59 | Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm); | 66 | Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm); |
| 60 | KMemoryInfo QueryInfo(VAddr addr); | 67 | KMemoryInfo QueryInfo(VAddr addr); |
| 61 | Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); | 68 | Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm); |
| 62 | Result ResetTransferMemory(VAddr addr, std::size_t size); | 69 | Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr); |
| 63 | Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm); | 70 | Result SetMaxHeapSize(size_t size); |
| 64 | Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr); | 71 | Result SetHeapSize(VAddr* out, size_t size); |
| 65 | Result SetMaxHeapSize(std::size_t size); | 72 | ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only, |
| 66 | Result SetHeapSize(VAddr* out, std::size_t size); | 73 | VAddr region_start, size_t region_num_pages, |
| 67 | ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, | 74 | KMemoryState state, KMemoryPermission perm, |
| 68 | bool is_map_only, VAddr region_start, | 75 | PAddr map_addr = 0); |
| 69 | std::size_t region_num_pages, KMemoryState state, | 76 | |
| 70 | KMemoryPermission perm, PAddr map_addr = 0); | 77 | Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, |
| 71 | Result LockForDeviceAddressSpace(VAddr addr, std::size_t size); | 78 | bool is_aligned); |
| 72 | Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); | 79 | Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size); |
| 73 | Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size); | 80 | |
| 74 | Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg); | 81 | Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); |
| 82 | |||
| 83 | Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); | ||
| 84 | Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); | ||
| 75 | Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, | 85 | Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, |
| 76 | KMemoryState state_mask, KMemoryState state, | 86 | KMemoryState state_mask, KMemoryState state, |
| 77 | KMemoryPermission perm_mask, KMemoryPermission perm, | 87 | KMemoryPermission perm_mask, KMemoryPermission perm, |
| 78 | KMemoryAttribute attr_mask, KMemoryAttribute attr); | 88 | KMemoryAttribute attr_mask, KMemoryAttribute attr); |
| 79 | 89 | ||
| 80 | Common::PageTable& PageTableImpl() { | 90 | Common::PageTable& PageTableImpl() { |
| 81 | return page_table_impl; | 91 | return *m_page_table_impl; |
| 82 | } | 92 | } |
| 83 | 93 | ||
| 84 | const Common::PageTable& PageTableImpl() const { | 94 | const Common::PageTable& PageTableImpl() const { |
| 85 | return page_table_impl; | 95 | return *m_page_table_impl; |
| 86 | } | 96 | } |
| 87 | 97 | ||
| 88 | bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const; | 98 | bool CanContain(VAddr addr, size_t size, KMemoryState state) const; |
| 89 | 99 | ||
| 90 | private: | 100 | private: |
| 91 | enum class OperationType : u32 { | 101 | enum class OperationType : u32 { |
| @@ -96,67 +106,65 @@ private: | |||
| 96 | ChangePermissionsAndRefresh, | 106 | ChangePermissionsAndRefresh, |
| 97 | }; | 107 | }; |
| 98 | 108 | ||
| 99 | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask | | 109 | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = |
| 100 | KMemoryAttribute::IpcLocked | | 110 | KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; |
| 101 | KMemoryAttribute::DeviceShared; | ||
| 102 | 111 | ||
| 103 | Result InitializeMemoryLayout(VAddr start, VAddr end); | ||
| 104 | Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); | 112 | Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); |
| 105 | Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, | 113 | Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, |
| 106 | bool is_pa_valid, VAddr region_start, std::size_t region_num_pages, | 114 | bool is_pa_valid, VAddr region_start, size_t region_num_pages, |
| 107 | KMemoryState state, KMemoryPermission perm); | 115 | KMemoryState state, KMemoryPermission perm); |
| 108 | Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); | 116 | Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); |
| 109 | bool IsRegionMapped(VAddr address, u64 size); | ||
| 110 | bool IsRegionContiguous(VAddr addr, u64 size) const; | 117 | bool IsRegionContiguous(VAddr addr, u64 size) const; |
| 111 | void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list); | 118 | void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list); |
| 112 | KMemoryInfo QueryInfoImpl(VAddr addr); | 119 | KMemoryInfo QueryInfoImpl(VAddr addr); |
| 113 | VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages, | 120 | VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, |
| 114 | std::size_t align); | 121 | size_t align); |
| 115 | Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, | 122 | Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, |
| 116 | OperationType operation); | 123 | OperationType operation); |
| 117 | Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, | 124 | Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, |
| 118 | OperationType operation, PAddr map_addr = 0); | 125 | PAddr map_addr = 0); |
| 119 | VAddr GetRegionAddress(KMemoryState state) const; | 126 | VAddr GetRegionAddress(KMemoryState state) const; |
| 120 | std::size_t GetRegionSize(KMemoryState state) const; | 127 | size_t GetRegionSize(KMemoryState state) const; |
| 121 | 128 | ||
| 122 | VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, | 129 | VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, |
| 123 | std::size_t alignment, std::size_t offset, std::size_t guard_pages); | 130 | size_t alignment, size_t offset, size_t guard_pages); |
| 124 | 131 | ||
| 125 | Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, | 132 | Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, |
| 126 | KMemoryState state_mask, KMemoryState state, | 133 | KMemoryState state_mask, KMemoryState state, |
| 127 | KMemoryPermission perm_mask, KMemoryPermission perm, | 134 | KMemoryPermission perm_mask, KMemoryPermission perm, |
| 128 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; | 135 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; |
| 129 | Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask, | 136 | Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask, |
| 130 | KMemoryState state, KMemoryPermission perm_mask, | 137 | KMemoryState state, KMemoryPermission perm_mask, |
| 131 | KMemoryPermission perm, KMemoryAttribute attr_mask, | 138 | KMemoryPermission perm, KMemoryAttribute attr_mask, |
| 132 | KMemoryAttribute attr) const { | 139 | KMemoryAttribute attr) const { |
| 133 | return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, | 140 | R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, |
| 134 | perm, attr_mask, attr); | 141 | perm, attr_mask, attr)); |
| 135 | } | 142 | } |
| 136 | 143 | ||
| 137 | Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, | 144 | Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, |
| 138 | KMemoryPermission perm_mask, KMemoryPermission perm, | 145 | KMemoryPermission perm_mask, KMemoryPermission perm, |
| 139 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; | 146 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; |
| 140 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | 147 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, |
| 141 | KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr, | 148 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr, |
| 142 | std::size_t size, KMemoryState state_mask, KMemoryState state, | 149 | size_t size, KMemoryState state_mask, KMemoryState state, |
| 143 | KMemoryPermission perm_mask, KMemoryPermission perm, | 150 | KMemoryPermission perm_mask, KMemoryPermission perm, |
| 144 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | 151 | KMemoryAttribute attr_mask, KMemoryAttribute attr, |
| 145 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; | 152 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; |
| 146 | Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, | 153 | Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size, |
| 147 | KMemoryState state_mask, KMemoryState state, | 154 | KMemoryState state_mask, KMemoryState state, |
| 148 | KMemoryPermission perm_mask, KMemoryPermission perm, | 155 | KMemoryPermission perm_mask, KMemoryPermission perm, |
| 149 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | 156 | KMemoryAttribute attr_mask, KMemoryAttribute attr, |
| 150 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { | 157 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { |
| 151 | return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, | 158 | R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, |
| 152 | state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); | 159 | state_mask, state, perm_mask, perm, attr_mask, attr, |
| 160 | ignore_attr)); | ||
| 153 | } | 161 | } |
| 154 | Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask, | 162 | Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, |
| 155 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | 163 | KMemoryPermission perm_mask, KMemoryPermission perm, |
| 156 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | 164 | KMemoryAttribute attr_mask, KMemoryAttribute attr, |
| 157 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { | 165 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { |
| 158 | return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, | 166 | R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, |
| 159 | attr_mask, attr, ignore_attr); | 167 | attr_mask, attr, ignore_attr)); |
| 160 | } | 168 | } |
| 161 | 169 | ||
| 162 | Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, | 170 | Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, |
| @@ -174,13 +182,13 @@ private: | |||
| 174 | bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages); | 182 | bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages); |
| 175 | 183 | ||
| 176 | bool IsLockedByCurrentThread() const { | 184 | bool IsLockedByCurrentThread() const { |
| 177 | return general_lock.IsLockedByCurrentThread(); | 185 | return m_general_lock.IsLockedByCurrentThread(); |
| 178 | } | 186 | } |
| 179 | 187 | ||
| 180 | bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) { | 188 | bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) { |
| 181 | ASSERT(this->IsLockedByCurrentThread()); | 189 | ASSERT(this->IsLockedByCurrentThread()); |
| 182 | 190 | ||
| 183 | return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr); | 191 | return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr); |
| 184 | } | 192 | } |
| 185 | 193 | ||
| 186 | bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const { | 194 | bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const { |
| @@ -191,95 +199,93 @@ private: | |||
| 191 | return *out != 0; | 199 | return *out != 0; |
| 192 | } | 200 | } |
| 193 | 201 | ||
| 194 | mutable KLightLock general_lock; | 202 | mutable KLightLock m_general_lock; |
| 195 | mutable KLightLock map_physical_memory_lock; | 203 | mutable KLightLock m_map_physical_memory_lock; |
| 196 | |||
| 197 | std::unique_ptr<KMemoryBlockManager> block_manager; | ||
| 198 | 204 | ||
| 199 | public: | 205 | public: |
| 200 | constexpr VAddr GetAddressSpaceStart() const { | 206 | constexpr VAddr GetAddressSpaceStart() const { |
| 201 | return address_space_start; | 207 | return m_address_space_start; |
| 202 | } | 208 | } |
| 203 | constexpr VAddr GetAddressSpaceEnd() const { | 209 | constexpr VAddr GetAddressSpaceEnd() const { |
| 204 | return address_space_end; | 210 | return m_address_space_end; |
| 205 | } | 211 | } |
| 206 | constexpr std::size_t GetAddressSpaceSize() const { | 212 | constexpr size_t GetAddressSpaceSize() const { |
| 207 | return address_space_end - address_space_start; | 213 | return m_address_space_end - m_address_space_start; |
| 208 | } | 214 | } |
| 209 | constexpr VAddr GetHeapRegionStart() const { | 215 | constexpr VAddr GetHeapRegionStart() const { |
| 210 | return heap_region_start; | 216 | return m_heap_region_start; |
| 211 | } | 217 | } |
| 212 | constexpr VAddr GetHeapRegionEnd() const { | 218 | constexpr VAddr GetHeapRegionEnd() const { |
| 213 | return heap_region_end; | 219 | return m_heap_region_end; |
| 214 | } | 220 | } |
| 215 | constexpr std::size_t GetHeapRegionSize() const { | 221 | constexpr size_t GetHeapRegionSize() const { |
| 216 | return heap_region_end - heap_region_start; | 222 | return m_heap_region_end - m_heap_region_start; |
| 217 | } | 223 | } |
| 218 | constexpr VAddr GetAliasRegionStart() const { | 224 | constexpr VAddr GetAliasRegionStart() const { |
| 219 | return alias_region_start; | 225 | return m_alias_region_start; |
| 220 | } | 226 | } |
| 221 | constexpr VAddr GetAliasRegionEnd() const { | 227 | constexpr VAddr GetAliasRegionEnd() const { |
| 222 | return alias_region_end; | 228 | return m_alias_region_end; |
| 223 | } | 229 | } |
| 224 | constexpr std::size_t GetAliasRegionSize() const { | 230 | constexpr size_t GetAliasRegionSize() const { |
| 225 | return alias_region_end - alias_region_start; | 231 | return m_alias_region_end - m_alias_region_start; |
| 226 | } | 232 | } |
| 227 | constexpr VAddr GetStackRegionStart() const { | 233 | constexpr VAddr GetStackRegionStart() const { |
| 228 | return stack_region_start; | 234 | return m_stack_region_start; |
| 229 | } | 235 | } |
| 230 | constexpr VAddr GetStackRegionEnd() const { | 236 | constexpr VAddr GetStackRegionEnd() const { |
| 231 | return stack_region_end; | 237 | return m_stack_region_end; |
| 232 | } | 238 | } |
| 233 | constexpr std::size_t GetStackRegionSize() const { | 239 | constexpr size_t GetStackRegionSize() const { |
| 234 | return stack_region_end - stack_region_start; | 240 | return m_stack_region_end - m_stack_region_start; |
| 235 | } | 241 | } |
| 236 | constexpr VAddr GetKernelMapRegionStart() const { | 242 | constexpr VAddr GetKernelMapRegionStart() const { |
| 237 | return kernel_map_region_start; | 243 | return m_kernel_map_region_start; |
| 238 | } | 244 | } |
| 239 | constexpr VAddr GetKernelMapRegionEnd() const { | 245 | constexpr VAddr GetKernelMapRegionEnd() const { |
| 240 | return kernel_map_region_end; | 246 | return m_kernel_map_region_end; |
| 241 | } | 247 | } |
| 242 | constexpr VAddr GetCodeRegionStart() const { | 248 | constexpr VAddr GetCodeRegionStart() const { |
| 243 | return code_region_start; | 249 | return m_code_region_start; |
| 244 | } | 250 | } |
| 245 | constexpr VAddr GetCodeRegionEnd() const { | 251 | constexpr VAddr GetCodeRegionEnd() const { |
| 246 | return code_region_end; | 252 | return m_code_region_end; |
| 247 | } | 253 | } |
| 248 | constexpr VAddr GetAliasCodeRegionStart() const { | 254 | constexpr VAddr GetAliasCodeRegionStart() const { |
| 249 | return alias_code_region_start; | 255 | return m_alias_code_region_start; |
| 250 | } | 256 | } |
| 251 | constexpr VAddr GetAliasCodeRegionSize() const { | 257 | constexpr VAddr GetAliasCodeRegionSize() const { |
| 252 | return alias_code_region_end - alias_code_region_start; | 258 | return m_alias_code_region_end - m_alias_code_region_start; |
| 253 | } | 259 | } |
| 254 | std::size_t GetNormalMemorySize() { | 260 | size_t GetNormalMemorySize() { |
| 255 | KScopedLightLock lk(general_lock); | 261 | KScopedLightLock lk(m_general_lock); |
| 256 | return GetHeapSize() + mapped_physical_memory_size; | 262 | return GetHeapSize() + m_mapped_physical_memory_size; |
| 257 | } | 263 | } |
| 258 | constexpr std::size_t GetAddressSpaceWidth() const { | 264 | constexpr size_t GetAddressSpaceWidth() const { |
| 259 | return address_space_width; | 265 | return m_address_space_width; |
| 260 | } | 266 | } |
| 261 | constexpr std::size_t GetHeapSize() const { | 267 | constexpr size_t GetHeapSize() const { |
| 262 | return current_heap_end - heap_region_start; | 268 | return m_current_heap_end - m_heap_region_start; |
| 263 | } | 269 | } |
| 264 | constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { | 270 | constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const { |
| 265 | return address_space_start <= address && address + size - 1 <= address_space_end - 1; | 271 | return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1; |
| 266 | } | 272 | } |
| 267 | constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const { | 273 | constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const { |
| 268 | return alias_region_start > address || address + size - 1 > alias_region_end - 1; | 274 | return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1; |
| 269 | } | 275 | } |
| 270 | constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const { | 276 | constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const { |
| 271 | return stack_region_start > address || address + size - 1 > stack_region_end - 1; | 277 | return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1; |
| 272 | } | 278 | } |
| 273 | constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const { | 279 | constexpr bool IsInvalidRegion(VAddr address, size_t size) const { |
| 274 | return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; | 280 | return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; |
| 275 | } | 281 | } |
| 276 | constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const { | 282 | constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const { |
| 277 | return address + size > heap_region_start && heap_region_end > address; | 283 | return address + size > m_heap_region_start && m_heap_region_end > address; |
| 278 | } | 284 | } |
| 279 | constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const { | 285 | constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const { |
| 280 | return address + size > alias_region_start && alias_region_end > address; | 286 | return address + size > m_alias_region_start && m_alias_region_end > address; |
| 281 | } | 287 | } |
| 282 | constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const { | 288 | constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const { |
| 283 | if (IsInvalidRegion(address, size)) { | 289 | if (IsInvalidRegion(address, size)) { |
| 284 | return true; | 290 | return true; |
| 285 | } | 291 | } |
| @@ -291,73 +297,78 @@ public: | |||
| 291 | } | 297 | } |
| 292 | return {}; | 298 | return {}; |
| 293 | } | 299 | } |
| 294 | constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { | 300 | constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const { |
| 295 | return !IsOutsideASLRRegion(address, size); | 301 | return !IsOutsideASLRRegion(address, size); |
| 296 | } | 302 | } |
| 297 | constexpr std::size_t GetNumGuardPages() const { | 303 | constexpr size_t GetNumGuardPages() const { |
| 298 | return IsKernel() ? 1 : 4; | 304 | return IsKernel() ? 1 : 4; |
| 299 | } | 305 | } |
| 300 | PAddr GetPhysicalAddr(VAddr addr) const { | 306 | PAddr GetPhysicalAddr(VAddr addr) const { |
| 301 | const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; | 307 | const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits]; |
| 302 | ASSERT(backing_addr); | 308 | ASSERT(backing_addr); |
| 303 | return backing_addr + addr; | 309 | return backing_addr + addr; |
| 304 | } | 310 | } |
| 305 | constexpr bool Contains(VAddr addr) const { | 311 | constexpr bool Contains(VAddr addr) const { |
| 306 | return address_space_start <= addr && addr <= address_space_end - 1; | 312 | return m_address_space_start <= addr && addr <= m_address_space_end - 1; |
| 307 | } | 313 | } |
| 308 | constexpr bool Contains(VAddr addr, std::size_t size) const { | 314 | constexpr bool Contains(VAddr addr, size_t size) const { |
| 309 | return address_space_start <= addr && addr < addr + size && | 315 | return m_address_space_start <= addr && addr < addr + size && |
| 310 | addr + size - 1 <= address_space_end - 1; | 316 | addr + size - 1 <= m_address_space_end - 1; |
| 311 | } | 317 | } |
| 312 | 318 | ||
| 313 | private: | 319 | private: |
| 314 | constexpr bool IsKernel() const { | 320 | constexpr bool IsKernel() const { |
| 315 | return is_kernel; | 321 | return m_is_kernel; |
| 316 | } | 322 | } |
| 317 | constexpr bool IsAslrEnabled() const { | 323 | constexpr bool IsAslrEnabled() const { |
| 318 | return is_aslr_enabled; | 324 | return m_enable_aslr; |
| 319 | } | 325 | } |
| 320 | 326 | ||
| 321 | constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { | 327 | constexpr bool ContainsPages(VAddr addr, size_t num_pages) const { |
| 322 | return (address_space_start <= addr) && | 328 | return (m_address_space_start <= addr) && |
| 323 | (num_pages <= (address_space_end - address_space_start) / PageSize) && | 329 | (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && |
| 324 | (addr + num_pages * PageSize - 1 <= address_space_end - 1); | 330 | (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); |
| 325 | } | 331 | } |
| 326 | 332 | ||
| 327 | private: | 333 | private: |
| 328 | VAddr address_space_start{}; | 334 | VAddr m_address_space_start{}; |
| 329 | VAddr address_space_end{}; | 335 | VAddr m_address_space_end{}; |
| 330 | VAddr heap_region_start{}; | 336 | VAddr m_heap_region_start{}; |
| 331 | VAddr heap_region_end{}; | 337 | VAddr m_heap_region_end{}; |
| 332 | VAddr current_heap_end{}; | 338 | VAddr m_current_heap_end{}; |
| 333 | VAddr alias_region_start{}; | 339 | VAddr m_alias_region_start{}; |
| 334 | VAddr alias_region_end{}; | 340 | VAddr m_alias_region_end{}; |
| 335 | VAddr stack_region_start{}; | 341 | VAddr m_stack_region_start{}; |
| 336 | VAddr stack_region_end{}; | 342 | VAddr m_stack_region_end{}; |
| 337 | VAddr kernel_map_region_start{}; | 343 | VAddr m_kernel_map_region_start{}; |
| 338 | VAddr kernel_map_region_end{}; | 344 | VAddr m_kernel_map_region_end{}; |
| 339 | VAddr code_region_start{}; | 345 | VAddr m_code_region_start{}; |
| 340 | VAddr code_region_end{}; | 346 | VAddr m_code_region_end{}; |
| 341 | VAddr alias_code_region_start{}; | 347 | VAddr m_alias_code_region_start{}; |
| 342 | VAddr alias_code_region_end{}; | 348 | VAddr m_alias_code_region_end{}; |
| 343 | 349 | ||
| 344 | std::size_t mapped_physical_memory_size{}; | 350 | size_t m_mapped_physical_memory_size{}; |
| 345 | std::size_t max_heap_size{}; | 351 | size_t m_max_heap_size{}; |
| 346 | std::size_t max_physical_memory_size{}; | 352 | size_t m_max_physical_memory_size{}; |
| 347 | std::size_t address_space_width{}; | 353 | size_t m_address_space_width{}; |
| 348 | 354 | ||
| 349 | bool is_kernel{}; | 355 | KMemoryBlockManager m_memory_block_manager; |
| 350 | bool is_aslr_enabled{}; | 356 | |
| 351 | 357 | bool m_is_kernel{}; | |
| 352 | u32 heap_fill_value{}; | 358 | bool m_enable_aslr{}; |
| 353 | const KMemoryRegion* cached_physical_heap_region{}; | 359 | bool m_enable_device_address_space_merge{}; |
| 354 | 360 | ||
| 355 | KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; | 361 | KMemoryBlockSlabManager* m_memory_block_slab_manager{}; |
| 356 | KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; | 362 | |
| 357 | 363 | u32 m_heap_fill_value{}; | |
| 358 | Common::PageTable page_table_impl; | 364 | const KMemoryRegion* m_cached_physical_heap_region{}; |
| 359 | 365 | ||
| 360 | Core::System& system; | 366 | KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; |
| 367 | KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront}; | ||
| 368 | |||
| 369 | std::unique_ptr<Common::PageTable> m_page_table_impl; | ||
| 370 | |||
| 371 | Core::System& m_system; | ||
| 361 | }; | 372 | }; |
| 362 | 373 | ||
| 363 | } // namespace Kernel | 374 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index d3e99665f..8c3495e5a 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -72,7 +72,8 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string | |||
| 72 | 72 | ||
| 73 | process->name = std::move(process_name); | 73 | process->name = std::move(process_name); |
| 74 | process->resource_limit = res_limit; | 74 | process->resource_limit = res_limit; |
| 75 | process->status = ProcessStatus::Created; | 75 | process->system_resource_address = 0; |
| 76 | process->state = State::Created; | ||
| 76 | process->program_id = 0; | 77 | process->program_id = 0; |
| 77 | process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() | 78 | process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() |
| 78 | : kernel.CreateNewUserProcessID(); | 79 | : kernel.CreateNewUserProcessID(); |
| @@ -92,11 +93,12 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string | |||
| 92 | process->exception_thread = nullptr; | 93 | process->exception_thread = nullptr; |
| 93 | process->is_suspended = false; | 94 | process->is_suspended = false; |
| 94 | process->schedule_count = 0; | 95 | process->schedule_count = 0; |
| 96 | process->is_handle_table_initialized = false; | ||
| 95 | 97 | ||
| 96 | // Open a reference to the resource limit. | 98 | // Open a reference to the resource limit. |
| 97 | process->resource_limit->Open(); | 99 | process->resource_limit->Open(); |
| 98 | 100 | ||
| 99 | return ResultSuccess; | 101 | R_SUCCEED(); |
| 100 | } | 102 | } |
| 101 | 103 | ||
| 102 | void KProcess::DoWorkerTaskImpl() { | 104 | void KProcess::DoWorkerTaskImpl() { |
| @@ -121,9 +123,9 @@ void KProcess::DecrementRunningThreadCount() { | |||
| 121 | } | 123 | } |
| 122 | } | 124 | } |
| 123 | 125 | ||
| 124 | u64 KProcess::GetTotalPhysicalMemoryAvailable() const { | 126 | u64 KProcess::GetTotalPhysicalMemoryAvailable() { |
| 125 | const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + | 127 | const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + |
| 126 | page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size + | 128 | page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size + |
| 127 | main_thread_stack_size}; | 129 | main_thread_stack_size}; |
| 128 | if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); | 130 | if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); |
| 129 | capacity != pool_size) { | 131 | capacity != pool_size) { |
| @@ -135,16 +137,16 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() const { | |||
| 135 | return memory_usage_capacity; | 137 | return memory_usage_capacity; |
| 136 | } | 138 | } |
| 137 | 139 | ||
| 138 | u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { | 140 | u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() { |
| 139 | return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); | 141 | return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); |
| 140 | } | 142 | } |
| 141 | 143 | ||
| 142 | u64 KProcess::GetTotalPhysicalMemoryUsed() const { | 144 | u64 KProcess::GetTotalPhysicalMemoryUsed() { |
| 143 | return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() + | 145 | return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() + |
| 144 | GetSystemResourceSize(); | 146 | GetSystemResourceSize(); |
| 145 | } | 147 | } |
| 146 | 148 | ||
| 147 | u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { | 149 | u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() { |
| 148 | return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); | 150 | return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); |
| 149 | } | 151 | } |
| 150 | 152 | ||
| @@ -244,7 +246,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad | |||
| 244 | shmem->Open(); | 246 | shmem->Open(); |
| 245 | shemen_info->Open(); | 247 | shemen_info->Open(); |
| 246 | 248 | ||
| 247 | return ResultSuccess; | 249 | R_SUCCEED(); |
| 248 | } | 250 | } |
| 249 | 251 | ||
| 250 | void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, | 252 | void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, |
| @@ -289,12 +291,12 @@ Result KProcess::Reset() { | |||
| 289 | KScopedSchedulerLock sl{kernel}; | 291 | KScopedSchedulerLock sl{kernel}; |
| 290 | 292 | ||
| 291 | // Validate that we're in a state that we can reset. | 293 | // Validate that we're in a state that we can reset. |
| 292 | R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); | 294 | R_UNLESS(state != State::Terminated, ResultInvalidState); |
| 293 | R_UNLESS(is_signaled, ResultInvalidState); | 295 | R_UNLESS(is_signaled, ResultInvalidState); |
| 294 | 296 | ||
| 295 | // Clear signaled. | 297 | // Clear signaled. |
| 296 | is_signaled = false; | 298 | is_signaled = false; |
| 297 | return ResultSuccess; | 299 | R_SUCCEED(); |
| 298 | } | 300 | } |
| 299 | 301 | ||
| 300 | Result KProcess::SetActivity(ProcessActivity activity) { | 302 | Result KProcess::SetActivity(ProcessActivity activity) { |
| @@ -304,15 +306,13 @@ Result KProcess::SetActivity(ProcessActivity activity) { | |||
| 304 | KScopedSchedulerLock sl{kernel}; | 306 | KScopedSchedulerLock sl{kernel}; |
| 305 | 307 | ||
| 306 | // Validate our state. | 308 | // Validate our state. |
| 307 | R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState); | 309 | R_UNLESS(state != State::Terminating, ResultInvalidState); |
| 308 | R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); | 310 | R_UNLESS(state != State::Terminated, ResultInvalidState); |
| 309 | 311 | ||
| 310 | // Either pause or resume. | 312 | // Either pause or resume. |
| 311 | if (activity == ProcessActivity::Paused) { | 313 | if (activity == ProcessActivity::Paused) { |
| 312 | // Verify that we're not suspended. | 314 | // Verify that we're not suspended. |
| 313 | if (is_suspended) { | 315 | R_UNLESS(!is_suspended, ResultInvalidState); |
| 314 | return ResultInvalidState; | ||
| 315 | } | ||
| 316 | 316 | ||
| 317 | // Suspend all threads. | 317 | // Suspend all threads. |
| 318 | for (auto* thread : GetThreadList()) { | 318 | for (auto* thread : GetThreadList()) { |
| @@ -325,9 +325,7 @@ Result KProcess::SetActivity(ProcessActivity activity) { | |||
| 325 | ASSERT(activity == ProcessActivity::Runnable); | 325 | ASSERT(activity == ProcessActivity::Runnable); |
| 326 | 326 | ||
| 327 | // Verify that we're suspended. | 327 | // Verify that we're suspended. |
| 328 | if (!is_suspended) { | 328 | R_UNLESS(is_suspended, ResultInvalidState); |
| 329 | return ResultInvalidState; | ||
| 330 | } | ||
| 331 | 329 | ||
| 332 | // Resume all threads. | 330 | // Resume all threads. |
| 333 | for (auto* thread : GetThreadList()) { | 331 | for (auto* thread : GetThreadList()) { |
| @@ -338,7 +336,7 @@ Result KProcess::SetActivity(ProcessActivity activity) { | |||
| 338 | SetSuspended(false); | 336 | SetSuspended(false); |
| 339 | } | 337 | } |
| 340 | 338 | ||
| 341 | return ResultSuccess; | 339 | R_SUCCEED(); |
| 342 | } | 340 | } |
| 343 | 341 | ||
| 344 | Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) { | 342 | Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) { |
| @@ -348,35 +346,38 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | |||
| 348 | system_resource_size = metadata.GetSystemResourceSize(); | 346 | system_resource_size = metadata.GetSystemResourceSize(); |
| 349 | image_size = code_size; | 347 | image_size = code_size; |
| 350 | 348 | ||
| 349 | // We currently do not support process-specific system resource | ||
| 350 | UNIMPLEMENTED_IF(system_resource_size != 0); | ||
| 351 | |||
| 351 | KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, | 352 | KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, |
| 352 | code_size + system_resource_size); | 353 | code_size + system_resource_size); |
| 353 | if (!memory_reservation.Succeeded()) { | 354 | if (!memory_reservation.Succeeded()) { |
| 354 | LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", | 355 | LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", |
| 355 | code_size + system_resource_size); | 356 | code_size + system_resource_size); |
| 356 | return ResultLimitReached; | 357 | R_RETURN(ResultLimitReached); |
| 357 | } | 358 | } |
| 358 | // Initialize proces address space | 359 | // Initialize proces address space |
| 359 | if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, | 360 | if (const Result result{page_table.InitializeForProcess( |
| 360 | 0x8000000, code_size, | 361 | metadata.GetAddressSpaceType(), false, 0x8000000, code_size, |
| 361 | KMemoryManager::Pool::Application)}; | 362 | &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)}; |
| 362 | result.IsError()) { | 363 | result.IsError()) { |
| 363 | return result; | 364 | R_RETURN(result); |
| 364 | } | 365 | } |
| 365 | 366 | ||
| 366 | // Map process code region | 367 | // Map process code region |
| 367 | if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(), | 368 | if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(), |
| 368 | code_size / PageSize, KMemoryState::Code, | 369 | code_size / PageSize, KMemoryState::Code, |
| 369 | KMemoryPermission::None)}; | 370 | KMemoryPermission::None)}; |
| 370 | result.IsError()) { | 371 | result.IsError()) { |
| 371 | return result; | 372 | R_RETURN(result); |
| 372 | } | 373 | } |
| 373 | 374 | ||
| 374 | // Initialize process capabilities | 375 | // Initialize process capabilities |
| 375 | const auto& caps{metadata.GetKernelCapabilities()}; | 376 | const auto& caps{metadata.GetKernelCapabilities()}; |
| 376 | if (const Result result{ | 377 | if (const Result result{ |
| 377 | capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)}; | 378 | capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)}; |
| 378 | result.IsError()) { | 379 | result.IsError()) { |
| 379 | return result; | 380 | R_RETURN(result); |
| 380 | } | 381 | } |
| 381 | 382 | ||
| 382 | // Set memory usage capacity | 383 | // Set memory usage capacity |
| @@ -384,12 +385,12 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | |||
| 384 | case FileSys::ProgramAddressSpaceType::Is32Bit: | 385 | case FileSys::ProgramAddressSpaceType::Is32Bit: |
| 385 | case FileSys::ProgramAddressSpaceType::Is36Bit: | 386 | case FileSys::ProgramAddressSpaceType::Is36Bit: |
| 386 | case FileSys::ProgramAddressSpaceType::Is39Bit: | 387 | case FileSys::ProgramAddressSpaceType::Is39Bit: |
| 387 | memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart(); | 388 | memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart(); |
| 388 | break; | 389 | break; |
| 389 | 390 | ||
| 390 | case FileSys::ProgramAddressSpaceType::Is32BitNoMap: | 391 | case FileSys::ProgramAddressSpaceType::Is32BitNoMap: |
| 391 | memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() + | 392 | memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() + |
| 392 | page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart(); | 393 | page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart(); |
| 393 | break; | 394 | break; |
| 394 | 395 | ||
| 395 | default: | 396 | default: |
| @@ -397,10 +398,10 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | |||
| 397 | } | 398 | } |
| 398 | 399 | ||
| 399 | // Create TLS region | 400 | // Create TLS region |
| 400 | R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address))); | 401 | R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address))); |
| 401 | memory_reservation.Commit(); | 402 | memory_reservation.Commit(); |
| 402 | 403 | ||
| 403 | return handle_table.Initialize(capabilities.GetHandleTableSize()); | 404 | R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize())); |
| 404 | } | 405 | } |
| 405 | 406 | ||
| 406 | void KProcess::Run(s32 main_thread_priority, u64 stack_size) { | 407 | void KProcess::Run(s32 main_thread_priority, u64 stack_size) { |
| @@ -409,15 +410,15 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) { | |||
| 409 | resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); | 410 | resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); |
| 410 | 411 | ||
| 411 | const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; | 412 | const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; |
| 412 | ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError()); | 413 | ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); |
| 413 | 414 | ||
| 414 | ChangeStatus(ProcessStatus::Running); | 415 | ChangeState(State::Running); |
| 415 | 416 | ||
| 416 | SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top); | 417 | SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top); |
| 417 | } | 418 | } |
| 418 | 419 | ||
| 419 | void KProcess::PrepareForTermination() { | 420 | void KProcess::PrepareForTermination() { |
| 420 | ChangeStatus(ProcessStatus::Exiting); | 421 | ChangeState(State::Terminating); |
| 421 | 422 | ||
| 422 | const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) { | 423 | const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) { |
| 423 | for (auto* thread : in_thread_list) { | 424 | for (auto* thread : in_thread_list) { |
| @@ -437,15 +438,15 @@ void KProcess::PrepareForTermination() { | |||
| 437 | 438 | ||
| 438 | stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); | 439 | stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); |
| 439 | 440 | ||
| 440 | this->DeleteThreadLocalRegion(tls_region_address); | 441 | this->DeleteThreadLocalRegion(plr_address); |
| 441 | tls_region_address = 0; | 442 | plr_address = 0; |
| 442 | 443 | ||
| 443 | if (resource_limit) { | 444 | if (resource_limit) { |
| 444 | resource_limit->Release(LimitableResource::PhysicalMemory, | 445 | resource_limit->Release(LimitableResource::PhysicalMemory, |
| 445 | main_thread_stack_size + image_size); | 446 | main_thread_stack_size + image_size); |
| 446 | } | 447 | } |
| 447 | 448 | ||
| 448 | ChangeStatus(ProcessStatus::Exited); | 449 | ChangeState(State::Terminated); |
| 449 | } | 450 | } |
| 450 | 451 | ||
| 451 | void KProcess::Finalize() { | 452 | void KProcess::Finalize() { |
| @@ -474,7 +475,7 @@ void KProcess::Finalize() { | |||
| 474 | } | 475 | } |
| 475 | 476 | ||
| 476 | // Finalize the page table. | 477 | // Finalize the page table. |
| 477 | page_table.reset(); | 478 | page_table.Finalize(); |
| 478 | 479 | ||
| 479 | // Perform inherited finalization. | 480 | // Perform inherited finalization. |
| 480 | KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize(); | 481 | KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize(); |
| @@ -499,7 +500,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { | |||
| 499 | } | 500 | } |
| 500 | 501 | ||
| 501 | *out = tlr; | 502 | *out = tlr; |
| 502 | return ResultSuccess; | 503 | R_SUCCEED(); |
| 503 | } | 504 | } |
| 504 | } | 505 | } |
| 505 | 506 | ||
| @@ -528,7 +529,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { | |||
| 528 | // We succeeded! | 529 | // We succeeded! |
| 529 | tlp_guard.Cancel(); | 530 | tlp_guard.Cancel(); |
| 530 | *out = tlr; | 531 | *out = tlr; |
| 531 | return ResultSuccess; | 532 | R_SUCCEED(); |
| 532 | } | 533 | } |
| 533 | 534 | ||
| 534 | Result KProcess::DeleteThreadLocalRegion(VAddr addr) { | 535 | Result KProcess::DeleteThreadLocalRegion(VAddr addr) { |
| @@ -576,7 +577,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) { | |||
| 576 | KThreadLocalPage::Free(kernel, page_to_free); | 577 | KThreadLocalPage::Free(kernel, page_to_free); |
| 577 | } | 578 | } |
| 578 | 579 | ||
| 579 | return ResultSuccess; | 580 | R_SUCCEED(); |
| 580 | } | 581 | } |
| 581 | 582 | ||
| 582 | bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, | 583 | bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, |
| @@ -628,7 +629,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, | |||
| 628 | void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { | 629 | void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { |
| 629 | const auto ReprotectSegment = [&](const CodeSet::Segment& segment, | 630 | const auto ReprotectSegment = [&](const CodeSet::Segment& segment, |
| 630 | Svc::MemoryPermission permission) { | 631 | Svc::MemoryPermission permission) { |
| 631 | page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); | 632 | page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); |
| 632 | }; | 633 | }; |
| 633 | 634 | ||
| 634 | kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), | 635 | kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), |
| @@ -645,19 +646,18 @@ bool KProcess::IsSignaled() const { | |||
| 645 | } | 646 | } |
| 646 | 647 | ||
| 647 | KProcess::KProcess(KernelCore& kernel_) | 648 | KProcess::KProcess(KernelCore& kernel_) |
| 648 | : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>( | 649 | : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()}, |
| 649 | kernel_.System())}, | ||
| 650 | handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, | 650 | handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, |
| 651 | state_lock{kernel_}, list_lock{kernel_} {} | 651 | state_lock{kernel_}, list_lock{kernel_} {} |
| 652 | 652 | ||
| 653 | KProcess::~KProcess() = default; | 653 | KProcess::~KProcess() = default; |
| 654 | 654 | ||
| 655 | void KProcess::ChangeStatus(ProcessStatus new_status) { | 655 | void KProcess::ChangeState(State new_state) { |
| 656 | if (status == new_status) { | 656 | if (state == new_state) { |
| 657 | return; | 657 | return; |
| 658 | } | 658 | } |
| 659 | 659 | ||
| 660 | status = new_status; | 660 | state = new_state; |
| 661 | is_signaled = true; | 661 | is_signaled = true; |
| 662 | NotifyAvailable(); | 662 | NotifyAvailable(); |
| 663 | } | 663 | } |
| @@ -668,17 +668,17 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { | |||
| 668 | // The kernel always ensures that the given stack size is page aligned. | 668 | // The kernel always ensures that the given stack size is page aligned. |
| 669 | main_thread_stack_size = Common::AlignUp(stack_size, PageSize); | 669 | main_thread_stack_size = Common::AlignUp(stack_size, PageSize); |
| 670 | 670 | ||
| 671 | const VAddr start{page_table->GetStackRegionStart()}; | 671 | const VAddr start{page_table.GetStackRegionStart()}; |
| 672 | const std::size_t size{page_table->GetStackRegionEnd() - start}; | 672 | const std::size_t size{page_table.GetStackRegionEnd() - start}; |
| 673 | 673 | ||
| 674 | CASCADE_RESULT(main_thread_stack_top, | 674 | CASCADE_RESULT(main_thread_stack_top, |
| 675 | page_table->AllocateAndMapMemory( | 675 | page_table.AllocateAndMapMemory( |
| 676 | main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize, | 676 | main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize, |
| 677 | KMemoryState::Stack, KMemoryPermission::UserReadWrite)); | 677 | KMemoryState::Stack, KMemoryPermission::UserReadWrite)); |
| 678 | 678 | ||
| 679 | main_thread_stack_top += main_thread_stack_size; | 679 | main_thread_stack_top += main_thread_stack_size; |
| 680 | 680 | ||
| 681 | return ResultSuccess; | 681 | R_SUCCEED(); |
| 682 | } | 682 | } |
| 683 | 683 | ||
| 684 | } // namespace Kernel | 684 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index d56d73bab..2e0cc3d0b 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include "core/hle/kernel/k_auto_object.h" | 13 | #include "core/hle/kernel/k_auto_object.h" |
| 14 | #include "core/hle/kernel/k_condition_variable.h" | 14 | #include "core/hle/kernel/k_condition_variable.h" |
| 15 | #include "core/hle/kernel/k_handle_table.h" | 15 | #include "core/hle/kernel/k_handle_table.h" |
| 16 | #include "core/hle/kernel/k_page_table.h" | ||
| 16 | #include "core/hle/kernel/k_synchronization_object.h" | 17 | #include "core/hle/kernel/k_synchronization_object.h" |
| 17 | #include "core/hle/kernel/k_thread_local_page.h" | 18 | #include "core/hle/kernel/k_thread_local_page.h" |
| 18 | #include "core/hle/kernel/k_worker_task.h" | 19 | #include "core/hle/kernel/k_worker_task.h" |
| @@ -31,7 +32,6 @@ class ProgramMetadata; | |||
| 31 | namespace Kernel { | 32 | namespace Kernel { |
| 32 | 33 | ||
| 33 | class KernelCore; | 34 | class KernelCore; |
| 34 | class KPageTable; | ||
| 35 | class KResourceLimit; | 35 | class KResourceLimit; |
| 36 | class KThread; | 36 | class KThread; |
| 37 | class KSharedMemoryInfo; | 37 | class KSharedMemoryInfo; |
| @@ -45,24 +45,6 @@ enum class MemoryRegion : u16 { | |||
| 45 | BASE = 3, | 45 | BASE = 3, |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | /** | ||
| 49 | * Indicates the status of a Process instance. | ||
| 50 | * | ||
| 51 | * @note These match the values as used by kernel, | ||
| 52 | * so new entries should only be added if RE | ||
| 53 | * shows that a new value has been introduced. | ||
| 54 | */ | ||
| 55 | enum class ProcessStatus { | ||
| 56 | Created, | ||
| 57 | CreatedWithDebuggerAttached, | ||
| 58 | Running, | ||
| 59 | WaitingForDebuggerToAttach, | ||
| 60 | DebuggerAttached, | ||
| 61 | Exiting, | ||
| 62 | Exited, | ||
| 63 | DebugBreak, | ||
| 64 | }; | ||
| 65 | |||
| 66 | enum class ProcessActivity : u32 { | 48 | enum class ProcessActivity : u32 { |
| 67 | Runnable, | 49 | Runnable, |
| 68 | Paused, | 50 | Paused, |
| @@ -89,6 +71,17 @@ public: | |||
| 89 | explicit KProcess(KernelCore& kernel_); | 71 | explicit KProcess(KernelCore& kernel_); |
| 90 | ~KProcess() override; | 72 | ~KProcess() override; |
| 91 | 73 | ||
| 74 | enum class State { | ||
| 75 | Created = static_cast<u32>(Svc::ProcessState::Created), | ||
| 76 | CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached), | ||
| 77 | Running = static_cast<u32>(Svc::ProcessState::Running), | ||
| 78 | Crashed = static_cast<u32>(Svc::ProcessState::Crashed), | ||
| 79 | RunningAttached = static_cast<u32>(Svc::ProcessState::RunningAttached), | ||
| 80 | Terminating = static_cast<u32>(Svc::ProcessState::Terminating), | ||
| 81 | Terminated = static_cast<u32>(Svc::ProcessState::Terminated), | ||
| 82 | DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak), | ||
| 83 | }; | ||
| 84 | |||
| 92 | enum : u64 { | 85 | enum : u64 { |
| 93 | /// Lowest allowed process ID for a kernel initial process. | 86 | /// Lowest allowed process ID for a kernel initial process. |
| 94 | InitialKIPIDMin = 1, | 87 | InitialKIPIDMin = 1, |
| @@ -114,12 +107,12 @@ public: | |||
| 114 | 107 | ||
| 115 | /// Gets a reference to the process' page table. | 108 | /// Gets a reference to the process' page table. |
| 116 | KPageTable& PageTable() { | 109 | KPageTable& PageTable() { |
| 117 | return *page_table; | 110 | return page_table; |
| 118 | } | 111 | } |
| 119 | 112 | ||
| 120 | /// Gets const a reference to the process' page table. | 113 | /// Gets const a reference to the process' page table. |
| 121 | const KPageTable& PageTable() const { | 114 | const KPageTable& PageTable() const { |
| 122 | return *page_table; | 115 | return page_table; |
| 123 | } | 116 | } |
| 124 | 117 | ||
| 125 | /// Gets a reference to the process' handle table. | 118 | /// Gets a reference to the process' handle table. |
| @@ -145,26 +138,25 @@ public: | |||
| 145 | } | 138 | } |
| 146 | 139 | ||
| 147 | Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { | 140 | Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { |
| 148 | return condition_var.Wait(address, cv_key, tag, ns); | 141 | R_RETURN(condition_var.Wait(address, cv_key, tag, ns)); |
| 149 | } | 142 | } |
| 150 | 143 | ||
| 151 | Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) { | 144 | Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) { |
| 152 | return address_arbiter.SignalToAddress(address, signal_type, value, count); | 145 | R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count)); |
| 153 | } | 146 | } |
| 154 | 147 | ||
| 155 | Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, | 148 | Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, |
| 156 | s64 timeout) { | 149 | s64 timeout) { |
| 157 | return address_arbiter.WaitForAddress(address, arb_type, value, timeout); | 150 | R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout)); |
| 158 | } | 151 | } |
| 159 | 152 | ||
| 160 | /// Gets the address to the process' dedicated TLS region. | 153 | VAddr GetProcessLocalRegionAddress() const { |
| 161 | VAddr GetTLSRegionAddress() const { | 154 | return plr_address; |
| 162 | return tls_region_address; | ||
| 163 | } | 155 | } |
| 164 | 156 | ||
| 165 | /// Gets the current status of the process | 157 | /// Gets the current status of the process |
| 166 | ProcessStatus GetStatus() const { | 158 | State GetState() const { |
| 167 | return status; | 159 | return state; |
| 168 | } | 160 | } |
| 169 | 161 | ||
| 170 | /// Gets the unique ID that identifies this particular process. | 162 | /// Gets the unique ID that identifies this particular process. |
| @@ -286,18 +278,18 @@ public: | |||
| 286 | } | 278 | } |
| 287 | 279 | ||
| 288 | /// Retrieves the total physical memory available to this process in bytes. | 280 | /// Retrieves the total physical memory available to this process in bytes. |
| 289 | u64 GetTotalPhysicalMemoryAvailable() const; | 281 | u64 GetTotalPhysicalMemoryAvailable(); |
| 290 | 282 | ||
| 291 | /// Retrieves the total physical memory available to this process in bytes, | 283 | /// Retrieves the total physical memory available to this process in bytes, |
| 292 | /// without the size of the personal system resource heap added to it. | 284 | /// without the size of the personal system resource heap added to it. |
| 293 | u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const; | 285 | u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource(); |
| 294 | 286 | ||
| 295 | /// Retrieves the total physical memory used by this process in bytes. | 287 | /// Retrieves the total physical memory used by this process in bytes. |
| 296 | u64 GetTotalPhysicalMemoryUsed() const; | 288 | u64 GetTotalPhysicalMemoryUsed(); |
| 297 | 289 | ||
| 298 | /// Retrieves the total physical memory used by this process in bytes, | 290 | /// Retrieves the total physical memory used by this process in bytes, |
| 299 | /// without the size of the personal system resource heap added to it. | 291 | /// without the size of the personal system resource heap added to it. |
| 300 | u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const; | 292 | u64 GetTotalPhysicalMemoryUsedWithoutSystemResource(); |
| 301 | 293 | ||
| 302 | /// Gets the list of all threads created with this process as their owner. | 294 | /// Gets the list of all threads created with this process as their owner. |
| 303 | std::list<KThread*>& GetThreadList() { | 295 | std::list<KThread*>& GetThreadList() { |
| @@ -415,19 +407,24 @@ private: | |||
| 415 | pinned_threads[core_id] = nullptr; | 407 | pinned_threads[core_id] = nullptr; |
| 416 | } | 408 | } |
| 417 | 409 | ||
| 418 | /// Changes the process status. If the status is different | 410 | void FinalizeHandleTable() { |
| 419 | /// from the current process status, then this will trigger | 411 | // Finalize the table. |
| 420 | /// a process signal. | 412 | handle_table.Finalize(); |
| 421 | void ChangeStatus(ProcessStatus new_status); | 413 | |
| 414 | // Note that the table is finalized. | ||
| 415 | is_handle_table_initialized = false; | ||
| 416 | } | ||
| 417 | |||
| 418 | void ChangeState(State new_state); | ||
| 422 | 419 | ||
| 423 | /// Allocates the main thread stack for the process, given the stack size in bytes. | 420 | /// Allocates the main thread stack for the process, given the stack size in bytes. |
| 424 | Result AllocateMainThreadStack(std::size_t stack_size); | 421 | Result AllocateMainThreadStack(std::size_t stack_size); |
| 425 | 422 | ||
| 426 | /// Memory manager for this process | 423 | /// Memory manager for this process |
| 427 | std::unique_ptr<KPageTable> page_table; | 424 | KPageTable page_table; |
| 428 | 425 | ||
| 429 | /// Current status of the process | 426 | /// Current status of the process |
| 430 | ProcessStatus status{}; | 427 | State state{}; |
| 431 | 428 | ||
| 432 | /// The ID of this process | 429 | /// The ID of this process |
| 433 | u64 process_id = 0; | 430 | u64 process_id = 0; |
| @@ -443,6 +440,8 @@ private: | |||
| 443 | /// Resource limit descriptor for this process | 440 | /// Resource limit descriptor for this process |
| 444 | KResourceLimit* resource_limit{}; | 441 | KResourceLimit* resource_limit{}; |
| 445 | 442 | ||
| 443 | VAddr system_resource_address{}; | ||
| 444 | |||
| 446 | /// The ideal CPU core for this process, threads are scheduled on this core by default. | 445 | /// The ideal CPU core for this process, threads are scheduled on this core by default. |
| 447 | u8 ideal_core = 0; | 446 | u8 ideal_core = 0; |
| 448 | 447 | ||
| @@ -469,7 +468,7 @@ private: | |||
| 469 | KConditionVariable condition_var; | 468 | KConditionVariable condition_var; |
| 470 | 469 | ||
| 471 | /// Address indicating the location of the process' dedicated TLS region. | 470 | /// Address indicating the location of the process' dedicated TLS region. |
| 472 | VAddr tls_region_address = 0; | 471 | VAddr plr_address = 0; |
| 473 | 472 | ||
| 474 | /// Random values for svcGetInfo RandomEntropy | 473 | /// Random values for svcGetInfo RandomEntropy |
| 475 | std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{}; | 474 | std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{}; |
| @@ -495,8 +494,12 @@ private: | |||
| 495 | /// Schedule count of this process | 494 | /// Schedule count of this process |
| 496 | s64 schedule_count{}; | 495 | s64 schedule_count{}; |
| 497 | 496 | ||
| 497 | size_t memory_release_hint{}; | ||
| 498 | |||
| 498 | bool is_signaled{}; | 499 | bool is_signaled{}; |
| 499 | bool is_suspended{}; | 500 | bool is_suspended{}; |
| 501 | bool is_immortal{}; | ||
| 502 | bool is_handle_table_initialized{}; | ||
| 500 | bool is_initialized{}; | 503 | bool is_initialized{}; |
| 501 | 504 | ||
| 502 | std::atomic<u16> num_running_threads{}; | 505 | std::atomic<u16> num_running_threads{}; |
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index 4252c9adb..faf03fcc8 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp | |||
| @@ -22,15 +22,12 @@ | |||
| 22 | #include "core/hle/kernel/k_thread.h" | 22 | #include "core/hle/kernel/k_thread.h" |
| 23 | #include "core/hle/kernel/k_thread_queue.h" | 23 | #include "core/hle/kernel/k_thread_queue.h" |
| 24 | #include "core/hle/kernel/kernel.h" | 24 | #include "core/hle/kernel/kernel.h" |
| 25 | #include "core/hle/kernel/service_thread.h" | ||
| 26 | #include "core/memory.h" | 25 | #include "core/memory.h" |
| 27 | 26 | ||
| 28 | namespace Kernel { | 27 | namespace Kernel { |
| 29 | 28 | ||
| 30 | using ThreadQueueImplForKServerSessionRequest = KThreadQueue; | 29 | using ThreadQueueImplForKServerSessionRequest = KThreadQueue; |
| 31 | 30 | ||
| 32 | static constexpr u32 MessageBufferSize = 0x100; | ||
| 33 | |||
| 34 | KServerSession::KServerSession(KernelCore& kernel_) | 31 | KServerSession::KServerSession(KernelCore& kernel_) |
| 35 | : KSynchronizationObject{kernel_}, m_lock{kernel_} {} | 32 | : KSynchronizationObject{kernel_}, m_lock{kernel_} {} |
| 36 | 33 | ||
| @@ -73,59 +70,7 @@ bool KServerSession::IsSignaled() const { | |||
| 73 | } | 70 | } |
| 74 | 71 | ||
| 75 | // Otherwise, we're signaled if we have a request and aren't handling one. | 72 | // Otherwise, we're signaled if we have a request and aren't handling one. |
| 76 | return !m_thread_request_list.empty() && m_current_thread_request == nullptr; | 73 | return !m_request_list.empty() && m_current_request == nullptr; |
| 77 | } | ||
| 78 | |||
| 79 | void KServerSession::AppendDomainHandler(SessionRequestHandlerPtr handler) { | ||
| 80 | manager->AppendDomainHandler(std::move(handler)); | ||
| 81 | } | ||
| 82 | |||
| 83 | std::size_t KServerSession::NumDomainRequestHandlers() const { | ||
| 84 | return manager->DomainHandlerCount(); | ||
| 85 | } | ||
| 86 | |||
| 87 | Result KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) { | ||
| 88 | if (!context.HasDomainMessageHeader()) { | ||
| 89 | return ResultSuccess; | ||
| 90 | } | ||
| 91 | |||
| 92 | // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs | ||
| 93 | context.SetSessionRequestManager(manager); | ||
| 94 | |||
| 95 | // If there is a DomainMessageHeader, then this is CommandType "Request" | ||
| 96 | const auto& domain_message_header = context.GetDomainMessageHeader(); | ||
| 97 | const u32 object_id{domain_message_header.object_id}; | ||
| 98 | switch (domain_message_header.command) { | ||
| 99 | case IPC::DomainMessageHeader::CommandType::SendMessage: | ||
| 100 | if (object_id > manager->DomainHandlerCount()) { | ||
| 101 | LOG_CRITICAL(IPC, | ||
| 102 | "object_id {} is too big! This probably means a recent service call " | ||
| 103 | "to {} needed to return a new interface!", | ||
| 104 | object_id, name); | ||
| 105 | ASSERT(false); | ||
| 106 | return ResultSuccess; // Ignore error if asserts are off | ||
| 107 | } | ||
| 108 | if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) { | ||
| 109 | return strong_ptr->HandleSyncRequest(*this, context); | ||
| 110 | } else { | ||
| 111 | ASSERT(false); | ||
| 112 | return ResultSuccess; | ||
| 113 | } | ||
| 114 | |||
| 115 | case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: { | ||
| 116 | LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id); | ||
| 117 | |||
| 118 | manager->CloseDomainHandler(object_id - 1); | ||
| 119 | |||
| 120 | IPC::ResponseBuilder rb{context, 2}; | ||
| 121 | rb.Push(ResultSuccess); | ||
| 122 | return ResultSuccess; | ||
| 123 | } | ||
| 124 | } | ||
| 125 | |||
| 126 | LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value()); | ||
| 127 | ASSERT(false); | ||
| 128 | return ResultSuccess; | ||
| 129 | } | 74 | } |
| 130 | 75 | ||
| 131 | Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) { | 76 | Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) { |
| @@ -134,43 +79,11 @@ Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& m | |||
| 134 | 79 | ||
| 135 | context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); | 80 | context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); |
| 136 | 81 | ||
| 137 | // Ensure we have a session request handler | 82 | return manager->QueueSyncRequest(parent, std::move(context)); |
| 138 | if (manager->HasSessionRequestHandler(*context)) { | ||
| 139 | if (auto strong_ptr = manager->GetServiceThread().lock()) { | ||
| 140 | strong_ptr->QueueSyncRequest(*parent, std::move(context)); | ||
| 141 | } else { | ||
| 142 | ASSERT_MSG(false, "strong_ptr is nullptr!"); | ||
| 143 | } | ||
| 144 | } else { | ||
| 145 | ASSERT_MSG(false, "handler is invalid!"); | ||
| 146 | } | ||
| 147 | |||
| 148 | return ResultSuccess; | ||
| 149 | } | 83 | } |
| 150 | 84 | ||
| 151 | Result KServerSession::CompleteSyncRequest(HLERequestContext& context) { | 85 | Result KServerSession::CompleteSyncRequest(HLERequestContext& context) { |
| 152 | Result result = ResultSuccess; | 86 | Result result = manager->CompleteSyncRequest(this, context); |
| 153 | |||
| 154 | // If the session has been converted to a domain, handle the domain request | ||
| 155 | if (manager->HasSessionRequestHandler(context)) { | ||
| 156 | if (IsDomain() && context.HasDomainMessageHeader()) { | ||
| 157 | result = HandleDomainSyncRequest(context); | ||
| 158 | // If there is no domain header, the regular session handler is used | ||
| 159 | } else if (manager->HasSessionHandler()) { | ||
| 160 | // If this ServerSession has an associated HLE handler, forward the request to it. | ||
| 161 | result = manager->SessionHandler().HandleSyncRequest(*this, context); | ||
| 162 | } | ||
| 163 | } else { | ||
| 164 | ASSERT_MSG(false, "Session handler is invalid, stubbing response!"); | ||
| 165 | IPC::ResponseBuilder rb(context, 2); | ||
| 166 | rb.Push(ResultSuccess); | ||
| 167 | } | ||
| 168 | |||
| 169 | if (convert_to_domain) { | ||
| 170 | ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance."); | ||
| 171 | manager->ConvertToDomain(); | ||
| 172 | convert_to_domain = false; | ||
| 173 | } | ||
| 174 | 87 | ||
| 175 | // The calling thread is waiting for this request to complete, so wake it up. | 88 | // The calling thread is waiting for this request to complete, so wake it up. |
| 176 | context.GetThread().EndWait(result); | 89 | context.GetThread().EndWait(result); |
| @@ -178,7 +91,7 @@ Result KServerSession::CompleteSyncRequest(HLERequestContext& context) { | |||
| 178 | return result; | 91 | return result; |
| 179 | } | 92 | } |
| 180 | 93 | ||
| 181 | Result KServerSession::OnRequest() { | 94 | Result KServerSession::OnRequest(KSessionRequest* request) { |
| 182 | // Create the wait queue. | 95 | // Create the wait queue. |
| 183 | ThreadQueueImplForKServerSessionRequest wait_queue{kernel}; | 96 | ThreadQueueImplForKServerSessionRequest wait_queue{kernel}; |
| 184 | 97 | ||
| @@ -198,14 +111,13 @@ Result KServerSession::OnRequest() { | |||
| 198 | this->QueueSyncRequest(GetCurrentThreadPointer(kernel), memory); | 111 | this->QueueSyncRequest(GetCurrentThreadPointer(kernel), memory); |
| 199 | } else { | 112 | } else { |
| 200 | // Non-HLE request. | 113 | // Non-HLE request. |
| 201 | auto* thread{GetCurrentThreadPointer(kernel)}; | ||
| 202 | 114 | ||
| 203 | // Get whether we're empty. | 115 | // Get whether we're empty. |
| 204 | const bool was_empty = m_thread_request_list.empty(); | 116 | const bool was_empty = m_request_list.empty(); |
| 205 | 117 | ||
| 206 | // Add the thread to the list. | 118 | // Add the request to the list. |
| 207 | thread->Open(); | 119 | request->Open(); |
| 208 | m_thread_request_list.push_back(thread); | 120 | m_request_list.push_back(*request); |
| 209 | 121 | ||
| 210 | // If we were empty, signal. | 122 | // If we were empty, signal. |
| 211 | if (was_empty) { | 123 | if (was_empty) { |
| @@ -213,6 +125,9 @@ Result KServerSession::OnRequest() { | |||
| 213 | } | 125 | } |
| 214 | } | 126 | } |
| 215 | 127 | ||
| 128 | // If we have a request event, this is asynchronous, and we don't need to wait. | ||
| 129 | R_SUCCEED_IF(request->GetEvent() != nullptr); | ||
| 130 | |||
| 216 | // This is a synchronous request, so we should wait for our request to complete. | 131 | // This is a synchronous request, so we should wait for our request to complete. |
| 217 | GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); | 132 | GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); |
| 218 | GetCurrentThread(kernel).BeginWait(&wait_queue); | 133 | GetCurrentThread(kernel).BeginWait(&wait_queue); |
| @@ -223,32 +138,32 @@ Result KServerSession::OnRequest() { | |||
| 223 | 138 | ||
| 224 | Result KServerSession::SendReply() { | 139 | Result KServerSession::SendReply() { |
| 225 | // Lock the session. | 140 | // Lock the session. |
| 226 | KScopedLightLock lk(m_lock); | 141 | KScopedLightLock lk{m_lock}; |
| 227 | 142 | ||
| 228 | // Get the request. | 143 | // Get the request. |
| 229 | KThread* client_thread; | 144 | KSessionRequest* request; |
| 230 | { | 145 | { |
| 231 | KScopedSchedulerLock sl{kernel}; | 146 | KScopedSchedulerLock sl{kernel}; |
| 232 | 147 | ||
| 233 | // Get the current request. | 148 | // Get the current request. |
| 234 | client_thread = m_current_thread_request; | 149 | request = m_current_request; |
| 235 | R_UNLESS(client_thread != nullptr, ResultInvalidState); | 150 | R_UNLESS(request != nullptr, ResultInvalidState); |
| 236 | 151 | ||
| 237 | // Clear the current request, since we're processing it. | 152 | // Clear the current request, since we're processing it. |
| 238 | m_current_thread_request = nullptr; | 153 | m_current_request = nullptr; |
| 239 | if (!m_thread_request_list.empty()) { | 154 | if (!m_request_list.empty()) { |
| 240 | this->NotifyAvailable(); | 155 | this->NotifyAvailable(); |
| 241 | } | 156 | } |
| 242 | } | 157 | } |
| 243 | 158 | ||
| 244 | // Close reference to the request once we're done processing it. | 159 | // Close reference to the request once we're done processing it. |
| 245 | SCOPE_EXIT({ client_thread->Close(); }); | 160 | SCOPE_EXIT({ request->Close(); }); |
| 246 | 161 | ||
| 247 | // Extract relevant information from the request. | 162 | // Extract relevant information from the request. |
| 248 | // const uintptr_t client_message = request->GetAddress(); | 163 | const uintptr_t client_message = request->GetAddress(); |
| 249 | // const size_t client_buffer_size = request->GetSize(); | 164 | const size_t client_buffer_size = request->GetSize(); |
| 250 | // KThread *client_thread = request->GetThread(); | 165 | KThread* client_thread = request->GetThread(); |
| 251 | // KEvent *event = request->GetEvent(); | 166 | KEvent* event = request->GetEvent(); |
| 252 | 167 | ||
| 253 | // Check whether we're closed. | 168 | // Check whether we're closed. |
| 254 | const bool closed = (client_thread == nullptr || parent->IsClientClosed()); | 169 | const bool closed = (client_thread == nullptr || parent->IsClientClosed()); |
| @@ -261,8 +176,8 @@ Result KServerSession::SendReply() { | |||
| 261 | UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); | 176 | UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); |
| 262 | 177 | ||
| 263 | auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); | 178 | auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); |
| 264 | auto* dst_msg_buffer = memory.GetPointer(client_thread->GetTLSAddress()); | 179 | auto* dst_msg_buffer = memory.GetPointer(client_message); |
| 265 | std::memcpy(dst_msg_buffer, src_msg_buffer, MessageBufferSize); | 180 | std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); |
| 266 | } else { | 181 | } else { |
| 267 | result = ResultSessionClosed; | 182 | result = ResultSessionClosed; |
| 268 | } | 183 | } |
| @@ -278,11 +193,30 @@ Result KServerSession::SendReply() { | |||
| 278 | 193 | ||
| 279 | // If there's a client thread, update it. | 194 | // If there's a client thread, update it. |
| 280 | if (client_thread != nullptr) { | 195 | if (client_thread != nullptr) { |
| 281 | // End the client thread's wait. | 196 | if (event != nullptr) { |
| 282 | KScopedSchedulerLock sl{kernel}; | 197 | // // Get the client process/page table. |
| 198 | // KProcess *client_process = client_thread->GetOwnerProcess(); | ||
| 199 | // KPageTable *client_page_table = &client_process->PageTable(); | ||
| 200 | |||
| 201 | // // If we need to, reply with an async error. | ||
| 202 | // if (R_FAILED(client_result)) { | ||
| 203 | // ReplyAsyncError(client_process, client_message, client_buffer_size, | ||
| 204 | // client_result); | ||
| 205 | // } | ||
| 206 | |||
| 207 | // // Unlock the client buffer. | ||
| 208 | // // NOTE: Nintendo does not check the result of this. | ||
| 209 | // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size); | ||
| 210 | |||
| 211 | // Signal the event. | ||
| 212 | event->Signal(); | ||
| 213 | } else { | ||
| 214 | // End the client thread's wait. | ||
| 215 | KScopedSchedulerLock sl{kernel}; | ||
| 283 | 216 | ||
| 284 | if (!client_thread->IsTerminationRequested()) { | 217 | if (!client_thread->IsTerminationRequested()) { |
| 285 | client_thread->EndWait(client_result); | 218 | client_thread->EndWait(client_result); |
| 219 | } | ||
| 286 | } | 220 | } |
| 287 | } | 221 | } |
| 288 | 222 | ||
| @@ -291,10 +225,10 @@ Result KServerSession::SendReply() { | |||
| 291 | 225 | ||
| 292 | Result KServerSession::ReceiveRequest() { | 226 | Result KServerSession::ReceiveRequest() { |
| 293 | // Lock the session. | 227 | // Lock the session. |
| 294 | KScopedLightLock lk(m_lock); | 228 | KScopedLightLock lk{m_lock}; |
| 295 | 229 | ||
| 296 | // Get the request and client thread. | 230 | // Get the request and client thread. |
| 297 | // KSessionRequest *request; | 231 | KSessionRequest* request; |
| 298 | KThread* client_thread; | 232 | KThread* client_thread; |
| 299 | 233 | ||
| 300 | { | 234 | { |
| @@ -304,35 +238,41 @@ Result KServerSession::ReceiveRequest() { | |||
| 304 | R_UNLESS(!parent->IsClientClosed(), ResultSessionClosed); | 238 | R_UNLESS(!parent->IsClientClosed(), ResultSessionClosed); |
| 305 | 239 | ||
| 306 | // Ensure we aren't already servicing a request. | 240 | // Ensure we aren't already servicing a request. |
| 307 | R_UNLESS(m_current_thread_request == nullptr, ResultNotFound); | 241 | R_UNLESS(m_current_request == nullptr, ResultNotFound); |
| 308 | 242 | ||
| 309 | // Ensure we have a request to service. | 243 | // Ensure we have a request to service. |
| 310 | R_UNLESS(!m_thread_request_list.empty(), ResultNotFound); | 244 | R_UNLESS(!m_request_list.empty(), ResultNotFound); |
| 311 | 245 | ||
| 312 | // Pop the first request from the list. | 246 | // Pop the first request from the list. |
| 313 | client_thread = m_thread_request_list.front(); | 247 | request = &m_request_list.front(); |
| 314 | m_thread_request_list.pop_front(); | 248 | m_request_list.pop_front(); |
| 315 | 249 | ||
| 316 | // Get the thread for the request. | 250 | // Get the thread for the request. |
| 251 | client_thread = request->GetThread(); | ||
| 317 | R_UNLESS(client_thread != nullptr, ResultSessionClosed); | 252 | R_UNLESS(client_thread != nullptr, ResultSessionClosed); |
| 318 | 253 | ||
| 319 | // Open the client thread. | 254 | // Open the client thread. |
| 320 | client_thread->Open(); | 255 | client_thread->Open(); |
| 321 | } | 256 | } |
| 322 | 257 | ||
| 323 | // SCOPE_EXIT({ client_thread->Close(); }); | 258 | SCOPE_EXIT({ client_thread->Close(); }); |
| 324 | 259 | ||
| 325 | // Set the request as our current. | 260 | // Set the request as our current. |
| 326 | m_current_thread_request = client_thread; | 261 | m_current_request = request; |
| 262 | |||
| 263 | // Get the client address. | ||
| 264 | uintptr_t client_message = request->GetAddress(); | ||
| 265 | size_t client_buffer_size = request->GetSize(); | ||
| 266 | // bool recv_list_broken = false; | ||
| 327 | 267 | ||
| 328 | // Receive the message. | 268 | // Receive the message. |
| 329 | Core::Memory::Memory& memory{kernel.System().Memory()}; | 269 | Core::Memory::Memory& memory{kernel.System().Memory()}; |
| 330 | KThread* server_thread{GetCurrentThreadPointer(kernel)}; | 270 | KThread* server_thread{GetCurrentThreadPointer(kernel)}; |
| 331 | UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); | 271 | UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); |
| 332 | 272 | ||
| 333 | auto* src_msg_buffer = memory.GetPointer(client_thread->GetTLSAddress()); | 273 | auto* src_msg_buffer = memory.GetPointer(client_message); |
| 334 | auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); | 274 | auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); |
| 335 | std::memcpy(dst_msg_buffer, src_msg_buffer, MessageBufferSize); | 275 | std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); |
| 336 | 276 | ||
| 337 | // We succeeded. | 277 | // We succeeded. |
| 338 | return ResultSuccess; | 278 | return ResultSuccess; |
| @@ -344,35 +284,34 @@ void KServerSession::CleanupRequests() { | |||
| 344 | // Clean up any pending requests. | 284 | // Clean up any pending requests. |
| 345 | while (true) { | 285 | while (true) { |
| 346 | // Get the next request. | 286 | // Get the next request. |
| 347 | // KSessionRequest *request = nullptr; | 287 | KSessionRequest* request = nullptr; |
| 348 | KThread* client_thread = nullptr; | ||
| 349 | { | 288 | { |
| 350 | KScopedSchedulerLock sl{kernel}; | 289 | KScopedSchedulerLock sl{kernel}; |
| 351 | 290 | ||
| 352 | if (m_current_thread_request) { | 291 | if (m_current_request) { |
| 353 | // Choose the current request if we have one. | 292 | // Choose the current request if we have one. |
| 354 | client_thread = m_current_thread_request; | 293 | request = m_current_request; |
| 355 | m_current_thread_request = nullptr; | 294 | m_current_request = nullptr; |
| 356 | } else if (!m_thread_request_list.empty()) { | 295 | } else if (!m_request_list.empty()) { |
| 357 | // Pop the request from the front of the list. | 296 | // Pop the request from the front of the list. |
| 358 | client_thread = m_thread_request_list.front(); | 297 | request = &m_request_list.front(); |
| 359 | m_thread_request_list.pop_front(); | 298 | m_request_list.pop_front(); |
| 360 | } | 299 | } |
| 361 | } | 300 | } |
| 362 | 301 | ||
| 363 | // If there's no request, we're done. | 302 | // If there's no request, we're done. |
| 364 | if (client_thread == nullptr) { | 303 | if (request == nullptr) { |
| 365 | break; | 304 | break; |
| 366 | } | 305 | } |
| 367 | 306 | ||
| 368 | // Close a reference to the request once it's cleaned up. | 307 | // Close a reference to the request once it's cleaned up. |
| 369 | SCOPE_EXIT({ client_thread->Close(); }); | 308 | SCOPE_EXIT({ request->Close(); }); |
| 370 | 309 | ||
| 371 | // Extract relevant information from the request. | 310 | // Extract relevant information from the request. |
| 372 | // const uintptr_t client_message = request->GetAddress(); | 311 | // const uintptr_t client_message = request->GetAddress(); |
| 373 | // const size_t client_buffer_size = request->GetSize(); | 312 | // const size_t client_buffer_size = request->GetSize(); |
| 374 | // KThread *client_thread = request->GetThread(); | 313 | KThread* client_thread = request->GetThread(); |
| 375 | // KEvent *event = request->GetEvent(); | 314 | KEvent* event = request->GetEvent(); |
| 376 | 315 | ||
| 377 | // KProcess *server_process = request->GetServerProcess(); | 316 | // KProcess *server_process = request->GetServerProcess(); |
| 378 | // KProcess *client_process = (client_thread != nullptr) ? | 317 | // KProcess *client_process = (client_thread != nullptr) ? |
| @@ -385,11 +324,24 @@ void KServerSession::CleanupRequests() { | |||
| 385 | 324 | ||
| 386 | // If there's a client thread, update it. | 325 | // If there's a client thread, update it. |
| 387 | if (client_thread != nullptr) { | 326 | if (client_thread != nullptr) { |
| 388 | // End the client thread's wait. | 327 | if (event != nullptr) { |
| 389 | KScopedSchedulerLock sl{kernel}; | 328 | // // We need to reply async. |
| 390 | 329 | // ReplyAsyncError(client_process, client_message, client_buffer_size, | |
| 391 | if (!client_thread->IsTerminationRequested()) { | 330 | // (R_SUCCEEDED(result) ? ResultSessionClosed : result)); |
| 392 | client_thread->EndWait(ResultSessionClosed); | 331 | |
| 332 | // // Unlock the client buffer. | ||
| 333 | // NOTE: Nintendo does not check the result of this. | ||
| 334 | // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size); | ||
| 335 | |||
| 336 | // Signal the event. | ||
| 337 | event->Signal(); | ||
| 338 | } else { | ||
| 339 | // End the client thread's wait. | ||
| 340 | KScopedSchedulerLock sl{kernel}; | ||
| 341 | |||
| 342 | if (!client_thread->IsTerminationRequested()) { | ||
| 343 | client_thread->EndWait(ResultSessionClosed); | ||
| 344 | } | ||
| 393 | } | 345 | } |
| 394 | } | 346 | } |
| 395 | } | 347 | } |
diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h index 748d52826..32135473b 100644 --- a/src/core/hle/kernel/k_server_session.h +++ b/src/core/hle/kernel/k_server_session.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | #include "core/hle/kernel/hle_ipc.h" | 13 | #include "core/hle/kernel/hle_ipc.h" |
| 14 | #include "core/hle/kernel/k_light_lock.h" | 14 | #include "core/hle/kernel/k_light_lock.h" |
| 15 | #include "core/hle/kernel/k_session_request.h" | ||
| 15 | #include "core/hle/kernel/k_synchronization_object.h" | 16 | #include "core/hle/kernel/k_synchronization_object.h" |
| 16 | #include "core/hle/result.h" | 17 | #include "core/hle/result.h" |
| 17 | 18 | ||
| @@ -57,44 +58,15 @@ public: | |||
| 57 | } | 58 | } |
| 58 | 59 | ||
| 59 | bool IsSignaled() const override; | 60 | bool IsSignaled() const override; |
| 60 | |||
| 61 | void OnClientClosed(); | 61 | void OnClientClosed(); |
| 62 | 62 | ||
| 63 | void ClientConnected(SessionRequestHandlerPtr handler) { | ||
| 64 | if (manager) { | ||
| 65 | manager->SetSessionHandler(std::move(handler)); | ||
| 66 | } | ||
| 67 | } | ||
| 68 | |||
| 69 | void ClientDisconnected() { | ||
| 70 | manager = nullptr; | ||
| 71 | } | ||
| 72 | |||
| 73 | /// Adds a new domain request handler to the collection of request handlers within | ||
| 74 | /// this ServerSession instance. | ||
| 75 | void AppendDomainHandler(SessionRequestHandlerPtr handler); | ||
| 76 | |||
| 77 | /// Retrieves the total number of domain request handlers that have been | ||
| 78 | /// appended to this ServerSession instance. | ||
| 79 | std::size_t NumDomainRequestHandlers() const; | ||
| 80 | |||
| 81 | /// Returns true if the session has been converted to a domain, otherwise False | ||
| 82 | bool IsDomain() const { | ||
| 83 | return manager && manager->IsDomain(); | ||
| 84 | } | ||
| 85 | |||
| 86 | /// Converts the session to a domain at the end of the current command | ||
| 87 | void ConvertToDomain() { | ||
| 88 | convert_to_domain = true; | ||
| 89 | } | ||
| 90 | |||
| 91 | /// Gets the session request manager, which forwards requests to the underlying service | 63 | /// Gets the session request manager, which forwards requests to the underlying service |
| 92 | std::shared_ptr<SessionRequestManager>& GetSessionRequestManager() { | 64 | std::shared_ptr<SessionRequestManager>& GetSessionRequestManager() { |
| 93 | return manager; | 65 | return manager; |
| 94 | } | 66 | } |
| 95 | 67 | ||
| 96 | /// TODO: flesh these out to match the real kernel | 68 | /// TODO: flesh these out to match the real kernel |
| 97 | Result OnRequest(); | 69 | Result OnRequest(KSessionRequest* request); |
| 98 | Result SendReply(); | 70 | Result SendReply(); |
| 99 | Result ReceiveRequest(); | 71 | Result ReceiveRequest(); |
| 100 | 72 | ||
| @@ -108,10 +80,6 @@ private: | |||
| 108 | /// Completes a sync request from the emulated application. | 80 | /// Completes a sync request from the emulated application. |
| 109 | Result CompleteSyncRequest(HLERequestContext& context); | 81 | Result CompleteSyncRequest(HLERequestContext& context); |
| 110 | 82 | ||
| 111 | /// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an | ||
| 112 | /// object handle. | ||
| 113 | Result HandleDomainSyncRequest(Kernel::HLERequestContext& context); | ||
| 114 | |||
| 115 | /// This session's HLE request handlers; if nullptr, this is not an HLE server | 83 | /// This session's HLE request handlers; if nullptr, this is not an HLE server |
| 116 | std::shared_ptr<SessionRequestManager> manager; | 84 | std::shared_ptr<SessionRequestManager> manager; |
| 117 | 85 | ||
| @@ -122,9 +90,8 @@ private: | |||
| 122 | KSession* parent{}; | 90 | KSession* parent{}; |
| 123 | 91 | ||
| 124 | /// List of threads which are pending a reply. | 92 | /// List of threads which are pending a reply. |
| 125 | /// FIXME: KSessionRequest | 93 | boost::intrusive::list<KSessionRequest> m_request_list; |
| 126 | std::list<KThread*> m_thread_request_list; | 94 | KSessionRequest* m_current_request; |
| 127 | KThread* m_current_thread_request{}; | ||
| 128 | 95 | ||
| 129 | KLightLock m_lock; | 96 | KLightLock m_lock; |
| 130 | }; | 97 | }; |
diff --git a/src/core/hle/kernel/k_session_request.cpp b/src/core/hle/kernel/k_session_request.cpp new file mode 100644 index 000000000..520da6aa7 --- /dev/null +++ b/src/core/hle/kernel/k_session_request.cpp | |||
| @@ -0,0 +1,61 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include "core/hle/kernel/k_page_buffer.h" | ||
| 5 | #include "core/hle/kernel/k_session_request.h" | ||
| 6 | |||
| 7 | namespace Kernel { | ||
| 8 | |||
| 9 | Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, size_t size, | ||
| 10 | KMemoryState state, size_t index) { | ||
| 11 | // At most 15 buffers of each type (4-bit descriptor counts). | ||
| 12 | ASSERT(index < ((1ul << 4) - 1) * 3); | ||
| 13 | |||
| 14 | // Get the mapping. | ||
| 15 | Mapping* mapping; | ||
| 16 | if (index < NumStaticMappings) { | ||
| 17 | mapping = &m_static_mappings[index]; | ||
| 18 | } else { | ||
| 19 | // Allocate a page for the extra mappings. | ||
| 20 | if (m_mappings == nullptr) { | ||
| 21 | KPageBuffer* page_buffer = KPageBuffer::Allocate(kernel); | ||
| 22 | R_UNLESS(page_buffer != nullptr, ResultOutOfMemory); | ||
| 23 | |||
| 24 | m_mappings = reinterpret_cast<Mapping*>(page_buffer); | ||
| 25 | } | ||
| 26 | |||
| 27 | mapping = &m_mappings[index - NumStaticMappings]; | ||
| 28 | } | ||
| 29 | |||
| 30 | // Set the mapping. | ||
| 31 | mapping->Set(client, server, size, state); | ||
| 32 | |||
| 33 | return ResultSuccess; | ||
| 34 | } | ||
| 35 | |||
| 36 | Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size, | ||
| 37 | KMemoryState state) { | ||
| 38 | ASSERT(m_num_recv == 0); | ||
| 39 | ASSERT(m_num_exch == 0); | ||
| 40 | return this->PushMap(client, server, size, state, m_num_send++); | ||
| 41 | } | ||
| 42 | |||
| 43 | Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size, | ||
| 44 | KMemoryState state) { | ||
| 45 | ASSERT(m_num_exch == 0); | ||
| 46 | return this->PushMap(client, server, size, state, m_num_send + m_num_recv++); | ||
| 47 | } | ||
| 48 | |||
| 49 | Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size, | ||
| 50 | KMemoryState state) { | ||
| 51 | return this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++); | ||
| 52 | } | ||
| 53 | |||
| 54 | void KSessionRequest::SessionMappings::Finalize() { | ||
| 55 | if (m_mappings) { | ||
| 56 | KPageBuffer::Free(kernel, reinterpret_cast<KPageBuffer*>(m_mappings)); | ||
| 57 | m_mappings = nullptr; | ||
| 58 | } | ||
| 59 | } | ||
| 60 | |||
| 61 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_session_request.h b/src/core/hle/kernel/k_session_request.h new file mode 100644 index 000000000..fcf521597 --- /dev/null +++ b/src/core/hle/kernel/k_session_request.h | |||
| @@ -0,0 +1,307 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include "core/hle/kernel/k_auto_object.h" | ||
| 7 | #include "core/hle/kernel/k_event.h" | ||
| 8 | #include "core/hle/kernel/k_memory_block.h" | ||
| 9 | #include "core/hle/kernel/k_process.h" | ||
| 10 | #include "core/hle/kernel/k_thread.h" | ||
| 11 | #include "core/hle/kernel/slab_helpers.h" | ||
| 12 | |||
| 13 | namespace Kernel { | ||
| 14 | |||
| 15 | class KSessionRequest final : public KSlabAllocated<KSessionRequest>, | ||
| 16 | public KAutoObject, | ||
| 17 | public boost::intrusive::list_base_hook<> { | ||
| 18 | KERNEL_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject); | ||
| 19 | |||
| 20 | public: | ||
| 21 | class SessionMappings { | ||
| 22 | private: | ||
| 23 | static constexpr size_t NumStaticMappings = 8; | ||
| 24 | |||
| 25 | class Mapping { | ||
| 26 | public: | ||
| 27 | constexpr void Set(VAddr c, VAddr s, size_t sz, KMemoryState st) { | ||
| 28 | m_client_address = c; | ||
| 29 | m_server_address = s; | ||
| 30 | m_size = sz; | ||
| 31 | m_state = st; | ||
| 32 | } | ||
| 33 | |||
| 34 | constexpr VAddr GetClientAddress() const { | ||
| 35 | return m_client_address; | ||
| 36 | } | ||
| 37 | constexpr VAddr GetServerAddress() const { | ||
| 38 | return m_server_address; | ||
| 39 | } | ||
| 40 | constexpr size_t GetSize() const { | ||
| 41 | return m_size; | ||
| 42 | } | ||
| 43 | constexpr KMemoryState GetMemoryState() const { | ||
| 44 | return m_state; | ||
| 45 | } | ||
| 46 | |||
| 47 | private: | ||
| 48 | VAddr m_client_address; | ||
| 49 | VAddr m_server_address; | ||
| 50 | size_t m_size; | ||
| 51 | KMemoryState m_state; | ||
| 52 | }; | ||
| 53 | |||
| 54 | public: | ||
| 55 | explicit SessionMappings(KernelCore& kernel_) | ||
| 56 | : kernel(kernel_), m_mappings(nullptr), m_num_send(), m_num_recv(), m_num_exch() {} | ||
| 57 | |||
| 58 | void Initialize() {} | ||
| 59 | void Finalize(); | ||
| 60 | |||
| 61 | size_t GetSendCount() const { | ||
| 62 | return m_num_send; | ||
| 63 | } | ||
| 64 | size_t GetReceiveCount() const { | ||
| 65 | return m_num_recv; | ||
| 66 | } | ||
| 67 | size_t GetExchangeCount() const { | ||
| 68 | return m_num_exch; | ||
| 69 | } | ||
| 70 | |||
| 71 | Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state); | ||
| 72 | Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state); | ||
| 73 | Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state); | ||
| 74 | |||
| 75 | VAddr GetSendClientAddress(size_t i) const { | ||
| 76 | return GetSendMapping(i).GetClientAddress(); | ||
| 77 | } | ||
| 78 | VAddr GetSendServerAddress(size_t i) const { | ||
| 79 | return GetSendMapping(i).GetServerAddress(); | ||
| 80 | } | ||
| 81 | size_t GetSendSize(size_t i) const { | ||
| 82 | return GetSendMapping(i).GetSize(); | ||
| 83 | } | ||
| 84 | KMemoryState GetSendMemoryState(size_t i) const { | ||
| 85 | return GetSendMapping(i).GetMemoryState(); | ||
| 86 | } | ||
| 87 | |||
| 88 | VAddr GetReceiveClientAddress(size_t i) const { | ||
| 89 | return GetReceiveMapping(i).GetClientAddress(); | ||
| 90 | } | ||
| 91 | VAddr GetReceiveServerAddress(size_t i) const { | ||
| 92 | return GetReceiveMapping(i).GetServerAddress(); | ||
| 93 | } | ||
| 94 | size_t GetReceiveSize(size_t i) const { | ||
| 95 | return GetReceiveMapping(i).GetSize(); | ||
| 96 | } | ||
| 97 | KMemoryState GetReceiveMemoryState(size_t i) const { | ||
| 98 | return GetReceiveMapping(i).GetMemoryState(); | ||
| 99 | } | ||
| 100 | |||
| 101 | VAddr GetExchangeClientAddress(size_t i) const { | ||
| 102 | return GetExchangeMapping(i).GetClientAddress(); | ||
| 103 | } | ||
| 104 | VAddr GetExchangeServerAddress(size_t i) const { | ||
| 105 | return GetExchangeMapping(i).GetServerAddress(); | ||
| 106 | } | ||
| 107 | size_t GetExchangeSize(size_t i) const { | ||
| 108 | return GetExchangeMapping(i).GetSize(); | ||
| 109 | } | ||
| 110 | KMemoryState GetExchangeMemoryState(size_t i) const { | ||
| 111 | return GetExchangeMapping(i).GetMemoryState(); | ||
| 112 | } | ||
| 113 | |||
| 114 | private: | ||
| 115 | Result PushMap(VAddr client, VAddr server, size_t size, KMemoryState state, size_t index); | ||
| 116 | |||
| 117 | const Mapping& GetSendMapping(size_t i) const { | ||
| 118 | ASSERT(i < m_num_send); | ||
| 119 | |||
| 120 | const size_t index = i; | ||
| 121 | if (index < NumStaticMappings) { | ||
| 122 | return m_static_mappings[index]; | ||
| 123 | } else { | ||
| 124 | return m_mappings[index - NumStaticMappings]; | ||
| 125 | } | ||
| 126 | } | ||
| 127 | |||
| 128 | const Mapping& GetReceiveMapping(size_t i) const { | ||
| 129 | ASSERT(i < m_num_recv); | ||
| 130 | |||
| 131 | const size_t index = m_num_send + i; | ||
| 132 | if (index < NumStaticMappings) { | ||
| 133 | return m_static_mappings[index]; | ||
| 134 | } else { | ||
| 135 | return m_mappings[index - NumStaticMappings]; | ||
| 136 | } | ||
| 137 | } | ||
| 138 | |||
| 139 | const Mapping& GetExchangeMapping(size_t i) const { | ||
| 140 | ASSERT(i < m_num_exch); | ||
| 141 | |||
| 142 | const size_t index = m_num_send + m_num_recv + i; | ||
| 143 | if (index < NumStaticMappings) { | ||
| 144 | return m_static_mappings[index]; | ||
| 145 | } else { | ||
| 146 | return m_mappings[index - NumStaticMappings]; | ||
| 147 | } | ||
| 148 | } | ||
| 149 | |||
| 150 | private: | ||
| 151 | KernelCore& kernel; | ||
| 152 | Mapping m_static_mappings[NumStaticMappings]; | ||
| 153 | Mapping* m_mappings; | ||
| 154 | u8 m_num_send; | ||
| 155 | u8 m_num_recv; | ||
| 156 | u8 m_num_exch; | ||
| 157 | }; | ||
| 158 | |||
| 159 | public: | ||
| 160 | explicit KSessionRequest(KernelCore& kernel_) | ||
| 161 | : KAutoObject(kernel_), m_mappings(kernel_), m_thread(nullptr), m_server(nullptr), | ||
| 162 | m_event(nullptr) {} | ||
| 163 | |||
| 164 | static KSessionRequest* Create(KernelCore& kernel) { | ||
| 165 | KSessionRequest* req = KSessionRequest::Allocate(kernel); | ||
| 166 | if (req != nullptr) [[likely]] { | ||
| 167 | KAutoObject::Create(req); | ||
| 168 | } | ||
| 169 | return req; | ||
| 170 | } | ||
| 171 | |||
| 172 | void Destroy() override { | ||
| 173 | this->Finalize(); | ||
| 174 | KSessionRequest::Free(kernel, this); | ||
| 175 | } | ||
| 176 | |||
| 177 | void Initialize(KEvent* event, uintptr_t address, size_t size) { | ||
| 178 | m_mappings.Initialize(); | ||
| 179 | |||
| 180 | m_thread = GetCurrentThreadPointer(kernel); | ||
| 181 | m_event = event; | ||
| 182 | m_address = address; | ||
| 183 | m_size = size; | ||
| 184 | |||
| 185 | m_thread->Open(); | ||
| 186 | if (m_event != nullptr) { | ||
| 187 | m_event->Open(); | ||
| 188 | } | ||
| 189 | } | ||
| 190 | |||
| 191 | static void PostDestroy(uintptr_t arg) {} | ||
| 192 | |||
| 193 | KThread* GetThread() const { | ||
| 194 | return m_thread; | ||
| 195 | } | ||
| 196 | KEvent* GetEvent() const { | ||
| 197 | return m_event; | ||
| 198 | } | ||
| 199 | uintptr_t GetAddress() const { | ||
| 200 | return m_address; | ||
| 201 | } | ||
| 202 | size_t GetSize() const { | ||
| 203 | return m_size; | ||
| 204 | } | ||
| 205 | KProcess* GetServerProcess() const { | ||
| 206 | return m_server; | ||
| 207 | } | ||
| 208 | |||
| 209 | void SetServerProcess(KProcess* process) { | ||
| 210 | m_server = process; | ||
| 211 | m_server->Open(); | ||
| 212 | } | ||
| 213 | |||
| 214 | void ClearThread() { | ||
| 215 | m_thread = nullptr; | ||
| 216 | } | ||
| 217 | void ClearEvent() { | ||
| 218 | m_event = nullptr; | ||
| 219 | } | ||
| 220 | |||
| 221 | size_t GetSendCount() const { | ||
| 222 | return m_mappings.GetSendCount(); | ||
| 223 | } | ||
| 224 | size_t GetReceiveCount() const { | ||
| 225 | return m_mappings.GetReceiveCount(); | ||
| 226 | } | ||
| 227 | size_t GetExchangeCount() const { | ||
| 228 | return m_mappings.GetExchangeCount(); | ||
| 229 | } | ||
| 230 | |||
| 231 | Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state) { | ||
| 232 | return m_mappings.PushSend(client, server, size, state); | ||
| 233 | } | ||
| 234 | |||
| 235 | Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state) { | ||
| 236 | return m_mappings.PushReceive(client, server, size, state); | ||
| 237 | } | ||
| 238 | |||
| 239 | Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state) { | ||
| 240 | return m_mappings.PushExchange(client, server, size, state); | ||
| 241 | } | ||
| 242 | |||
| 243 | VAddr GetSendClientAddress(size_t i) const { | ||
| 244 | return m_mappings.GetSendClientAddress(i); | ||
| 245 | } | ||
| 246 | VAddr GetSendServerAddress(size_t i) const { | ||
| 247 | return m_mappings.GetSendServerAddress(i); | ||
| 248 | } | ||
| 249 | size_t GetSendSize(size_t i) const { | ||
| 250 | return m_mappings.GetSendSize(i); | ||
| 251 | } | ||
| 252 | KMemoryState GetSendMemoryState(size_t i) const { | ||
| 253 | return m_mappings.GetSendMemoryState(i); | ||
| 254 | } | ||
| 255 | |||
| 256 | VAddr GetReceiveClientAddress(size_t i) const { | ||
| 257 | return m_mappings.GetReceiveClientAddress(i); | ||
| 258 | } | ||
| 259 | VAddr GetReceiveServerAddress(size_t i) const { | ||
| 260 | return m_mappings.GetReceiveServerAddress(i); | ||
| 261 | } | ||
| 262 | size_t GetReceiveSize(size_t i) const { | ||
| 263 | return m_mappings.GetReceiveSize(i); | ||
| 264 | } | ||
| 265 | KMemoryState GetReceiveMemoryState(size_t i) const { | ||
| 266 | return m_mappings.GetReceiveMemoryState(i); | ||
| 267 | } | ||
| 268 | |||
| 269 | VAddr GetExchangeClientAddress(size_t i) const { | ||
| 270 | return m_mappings.GetExchangeClientAddress(i); | ||
| 271 | } | ||
| 272 | VAddr GetExchangeServerAddress(size_t i) const { | ||
| 273 | return m_mappings.GetExchangeServerAddress(i); | ||
| 274 | } | ||
| 275 | size_t GetExchangeSize(size_t i) const { | ||
| 276 | return m_mappings.GetExchangeSize(i); | ||
| 277 | } | ||
| 278 | KMemoryState GetExchangeMemoryState(size_t i) const { | ||
| 279 | return m_mappings.GetExchangeMemoryState(i); | ||
| 280 | } | ||
| 281 | |||
| 282 | private: | ||
| 283 | // NOTE: This is public and virtual in Nintendo's kernel. | ||
| 284 | void Finalize() { | ||
| 285 | m_mappings.Finalize(); | ||
| 286 | |||
| 287 | if (m_thread) { | ||
| 288 | m_thread->Close(); | ||
| 289 | } | ||
| 290 | if (m_event) { | ||
| 291 | m_event->Close(); | ||
| 292 | } | ||
| 293 | if (m_server) { | ||
| 294 | m_server->Close(); | ||
| 295 | } | ||
| 296 | } | ||
| 297 | |||
| 298 | private: | ||
| 299 | SessionMappings m_mappings; | ||
| 300 | KThread* m_thread; | ||
| 301 | KProcess* m_server; | ||
| 302 | KEvent* m_event; | ||
| 303 | uintptr_t m_address; | ||
| 304 | size_t m_size; | ||
| 305 | }; | ||
| 306 | |||
| 307 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index 8ff1545b6..a039cc591 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp | |||
| @@ -50,7 +50,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | |||
| 50 | is_initialized = true; | 50 | is_initialized = true; |
| 51 | 51 | ||
| 52 | // Clear all pages in the memory. | 52 | // Clear all pages in the memory. |
| 53 | std::memset(device_memory_.GetPointer(physical_address_), 0, size_); | 53 | std::memset(device_memory_.GetPointer<void>(physical_address_), 0, size_); |
| 54 | 54 | ||
| 55 | return ResultSuccess; | 55 | return ResultSuccess; |
| 56 | } | 56 | } |
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h index 34cb98456..5620c3660 100644 --- a/src/core/hle/kernel/k_shared_memory.h +++ b/src/core/hle/kernel/k_shared_memory.h | |||
| @@ -54,7 +54,7 @@ public: | |||
| 54 | * @return A pointer to the shared memory block from the specified offset | 54 | * @return A pointer to the shared memory block from the specified offset |
| 55 | */ | 55 | */ |
| 56 | u8* GetPointer(std::size_t offset = 0) { | 56 | u8* GetPointer(std::size_t offset = 0) { |
| 57 | return device_memory->GetPointer(physical_address + offset); | 57 | return device_memory->GetPointer<u8>(physical_address + offset); |
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | /** | 60 | /** |
| @@ -63,7 +63,7 @@ public: | |||
| 63 | * @return A pointer to the shared memory block from the specified offset | 63 | * @return A pointer to the shared memory block from the specified offset |
| 64 | */ | 64 | */ |
| 65 | const u8* GetPointer(std::size_t offset = 0) const { | 65 | const u8* GetPointer(std::size_t offset = 0) const { |
| 66 | return device_memory->GetPointer(physical_address + offset); | 66 | return device_memory->GetPointer<u8>(physical_address + offset); |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | void Finalize() override; | 69 | void Finalize() override; |
diff --git a/src/core/hle/kernel/k_shared_memory_info.h b/src/core/hle/kernel/k_shared_memory_info.h index e43db8515..2bb6b6d08 100644 --- a/src/core/hle/kernel/k_shared_memory_info.h +++ b/src/core/hle/kernel/k_shared_memory_info.h | |||
| @@ -15,7 +15,8 @@ class KSharedMemoryInfo final : public KSlabAllocated<KSharedMemoryInfo>, | |||
| 15 | public boost::intrusive::list_base_hook<> { | 15 | public boost::intrusive::list_base_hook<> { |
| 16 | 16 | ||
| 17 | public: | 17 | public: |
| 18 | explicit KSharedMemoryInfo() = default; | 18 | explicit KSharedMemoryInfo(KernelCore&) {} |
| 19 | KSharedMemoryInfo() = default; | ||
| 19 | 20 | ||
| 20 | constexpr void Initialize(KSharedMemory* shmem) { | 21 | constexpr void Initialize(KSharedMemory* shmem) { |
| 21 | shared_memory = shmem; | 22 | shared_memory = shmem; |
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h index 2b303537e..a8c77a7d4 100644 --- a/src/core/hle/kernel/k_slab_heap.h +++ b/src/core/hle/kernel/k_slab_heap.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include "common/assert.h" | 8 | #include "common/assert.h" |
| 9 | #include "common/common_funcs.h" | 9 | #include "common/common_funcs.h" |
| 10 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 11 | #include "common/spin_lock.h" | ||
| 11 | 12 | ||
| 12 | namespace Kernel { | 13 | namespace Kernel { |
| 13 | 14 | ||
| @@ -36,28 +37,34 @@ public: | |||
| 36 | } | 37 | } |
| 37 | 38 | ||
| 38 | void* Allocate() { | 39 | void* Allocate() { |
| 39 | Node* ret = m_head.load(); | 40 | // KScopedInterruptDisable di; |
| 40 | 41 | ||
| 41 | do { | 42 | m_lock.lock(); |
| 42 | if (ret == nullptr) { | 43 | |
| 43 | break; | 44 | Node* ret = m_head; |
| 44 | } | 45 | if (ret != nullptr) [[likely]] { |
| 45 | } while (!m_head.compare_exchange_weak(ret, ret->next)); | 46 | m_head = ret->next; |
| 47 | } | ||
| 46 | 48 | ||
| 49 | m_lock.unlock(); | ||
| 47 | return ret; | 50 | return ret; |
| 48 | } | 51 | } |
| 49 | 52 | ||
| 50 | void Free(void* obj) { | 53 | void Free(void* obj) { |
| 54 | // KScopedInterruptDisable di; | ||
| 55 | |||
| 56 | m_lock.lock(); | ||
| 57 | |||
| 51 | Node* node = static_cast<Node*>(obj); | 58 | Node* node = static_cast<Node*>(obj); |
| 59 | node->next = m_head; | ||
| 60 | m_head = node; | ||
| 52 | 61 | ||
| 53 | Node* cur_head = m_head.load(); | 62 | m_lock.unlock(); |
| 54 | do { | ||
| 55 | node->next = cur_head; | ||
| 56 | } while (!m_head.compare_exchange_weak(cur_head, node)); | ||
| 57 | } | 63 | } |
| 58 | 64 | ||
| 59 | private: | 65 | private: |
| 60 | std::atomic<Node*> m_head{}; | 66 | std::atomic<Node*> m_head{}; |
| 67 | Common::SpinLock m_lock; | ||
| 61 | }; | 68 | }; |
| 62 | 69 | ||
| 63 | } // namespace impl | 70 | } // namespace impl |
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 174afc80d..b7bfcdce3 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include "core/hle/kernel/k_worker_task_manager.h" | 30 | #include "core/hle/kernel/k_worker_task_manager.h" |
| 31 | #include "core/hle/kernel/kernel.h" | 31 | #include "core/hle/kernel/kernel.h" |
| 32 | #include "core/hle/kernel/svc_results.h" | 32 | #include "core/hle/kernel/svc_results.h" |
| 33 | #include "core/hle/kernel/svc_types.h" | ||
| 33 | #include "core/hle/result.h" | 34 | #include "core/hle/result.h" |
| 34 | #include "core/memory.h" | 35 | #include "core/memory.h" |
| 35 | 36 | ||
| @@ -38,6 +39,9 @@ | |||
| 38 | #endif | 39 | #endif |
| 39 | 40 | ||
| 40 | namespace { | 41 | namespace { |
| 42 | |||
| 43 | constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1; | ||
| 44 | |||
| 41 | static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, | 45 | static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, |
| 42 | u32 entry_point, u32 arg) { | 46 | u32 entry_point, u32 arg) { |
| 43 | context = {}; | 47 | context = {}; |
| @@ -241,7 +245,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack | |||
| 241 | } | 245 | } |
| 242 | } | 246 | } |
| 243 | 247 | ||
| 244 | return ResultSuccess; | 248 | R_SUCCEED(); |
| 245 | } | 249 | } |
| 246 | 250 | ||
| 247 | Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, | 251 | Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, |
| @@ -254,7 +258,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_ | |||
| 254 | thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func)); | 258 | thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func)); |
| 255 | thread->is_single_core = !Settings::values.use_multi_core.GetValue(); | 259 | thread->is_single_core = !Settings::values.use_multi_core.GetValue(); |
| 256 | 260 | ||
| 257 | return ResultSuccess; | 261 | R_SUCCEED(); |
| 258 | } | 262 | } |
| 259 | 263 | ||
| 260 | Result KThread::InitializeDummyThread(KThread* thread) { | 264 | Result KThread::InitializeDummyThread(KThread* thread) { |
| @@ -264,31 +268,32 @@ Result KThread::InitializeDummyThread(KThread* thread) { | |||
| 264 | // Initialize emulation parameters. | 268 | // Initialize emulation parameters. |
| 265 | thread->stack_parameters.disable_count = 0; | 269 | thread->stack_parameters.disable_count = 0; |
| 266 | 270 | ||
| 267 | return ResultSuccess; | 271 | R_SUCCEED(); |
| 268 | } | 272 | } |
| 269 | 273 | ||
| 270 | Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) { | 274 | Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) { |
| 271 | return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, | 275 | R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, |
| 272 | system.GetCpuManager().GetGuestActivateFunc()); | 276 | ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc())); |
| 273 | } | 277 | } |
| 274 | 278 | ||
| 275 | Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { | 279 | Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { |
| 276 | return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, | 280 | R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, |
| 277 | system.GetCpuManager().GetIdleThreadStartFunc()); | 281 | ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc())); |
| 278 | } | 282 | } |
| 279 | 283 | ||
| 280 | Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, | 284 | Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, |
| 281 | KThreadFunction func, uintptr_t arg, s32 virt_core) { | 285 | KThreadFunction func, uintptr_t arg, s32 virt_core) { |
| 282 | return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority, | 286 | R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, |
| 283 | system.GetCpuManager().GetShutdownThreadStartFunc()); | 287 | ThreadType::HighPriority, |
| 288 | system.GetCpuManager().GetShutdownThreadStartFunc())); | ||
| 284 | } | 289 | } |
| 285 | 290 | ||
| 286 | Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, | 291 | Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, |
| 287 | uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, | 292 | uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, |
| 288 | KProcess* owner) { | 293 | KProcess* owner) { |
| 289 | system.Kernel().GlobalSchedulerContext().AddThread(thread); | 294 | system.Kernel().GlobalSchedulerContext().AddThread(thread); |
| 290 | return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, | 295 | R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, |
| 291 | ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()); | 296 | ThreadType::User, system.GetCpuManager().GetGuestThreadFunc())); |
| 292 | } | 297 | } |
| 293 | 298 | ||
| 294 | void KThread::PostDestroy(uintptr_t arg) { | 299 | void KThread::PostDestroy(uintptr_t arg) { |
| @@ -538,7 +543,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { | |||
| 538 | *out_ideal_core = virtual_ideal_core_id; | 543 | *out_ideal_core = virtual_ideal_core_id; |
| 539 | *out_affinity_mask = virtual_affinity_mask; | 544 | *out_affinity_mask = virtual_affinity_mask; |
| 540 | 545 | ||
| 541 | return ResultSuccess; | 546 | R_SUCCEED(); |
| 542 | } | 547 | } |
| 543 | 548 | ||
| 544 | Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { | 549 | Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { |
| @@ -554,7 +559,7 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) | |||
| 554 | *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask(); | 559 | *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask(); |
| 555 | } | 560 | } |
| 556 | 561 | ||
| 557 | return ResultSuccess; | 562 | R_SUCCEED(); |
| 558 | } | 563 | } |
| 559 | 564 | ||
| 560 | Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { | 565 | Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { |
| @@ -666,7 +671,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { | |||
| 666 | } while (retry_update); | 671 | } while (retry_update); |
| 667 | } | 672 | } |
| 668 | 673 | ||
| 669 | return ResultSuccess; | 674 | R_SUCCEED(); |
| 670 | } | 675 | } |
| 671 | 676 | ||
| 672 | void KThread::SetBasePriority(s32 value) { | 677 | void KThread::SetBasePriority(s32 value) { |
| @@ -839,7 +844,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { | |||
| 839 | } while (thread_is_current); | 844 | } while (thread_is_current); |
| 840 | } | 845 | } |
| 841 | 846 | ||
| 842 | return ResultSuccess; | 847 | R_SUCCEED(); |
| 843 | } | 848 | } |
| 844 | 849 | ||
| 845 | Result KThread::GetThreadContext3(std::vector<u8>& out) { | 850 | Result KThread::GetThreadContext3(std::vector<u8>& out) { |
| @@ -874,7 +879,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) { | |||
| 874 | } | 879 | } |
| 875 | } | 880 | } |
| 876 | 881 | ||
| 877 | return ResultSuccess; | 882 | R_SUCCEED(); |
| 878 | } | 883 | } |
| 879 | 884 | ||
| 880 | void KThread::AddWaiterImpl(KThread* thread) { | 885 | void KThread::AddWaiterImpl(KThread* thread) { |
| @@ -1038,7 +1043,7 @@ Result KThread::Run() { | |||
| 1038 | // Set our state and finish. | 1043 | // Set our state and finish. |
| 1039 | SetState(ThreadState::Runnable); | 1044 | SetState(ThreadState::Runnable); |
| 1040 | 1045 | ||
| 1041 | return ResultSuccess; | 1046 | R_SUCCEED(); |
| 1042 | } | 1047 | } |
| 1043 | } | 1048 | } |
| 1044 | 1049 | ||
| @@ -1073,6 +1078,78 @@ void KThread::Exit() { | |||
| 1073 | UNREACHABLE_MSG("KThread::Exit() would return"); | 1078 | UNREACHABLE_MSG("KThread::Exit() would return"); |
| 1074 | } | 1079 | } |
| 1075 | 1080 | ||
| 1081 | Result KThread::Terminate() { | ||
| 1082 | ASSERT(this != GetCurrentThreadPointer(kernel)); | ||
| 1083 | |||
| 1084 | // Request the thread terminate if it hasn't already. | ||
| 1085 | if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) { | ||
| 1086 | // If the thread isn't terminated, wait for it to terminate. | ||
| 1087 | s32 index; | ||
| 1088 | KSynchronizationObject* objects[] = {this}; | ||
| 1089 | R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1, | ||
| 1090 | Svc::WaitInfinite)); | ||
| 1091 | } | ||
| 1092 | |||
| 1093 | R_SUCCEED(); | ||
| 1094 | } | ||
| 1095 | |||
| 1096 | ThreadState KThread::RequestTerminate() { | ||
| 1097 | ASSERT(this != GetCurrentThreadPointer(kernel)); | ||
| 1098 | |||
| 1099 | KScopedSchedulerLock sl{kernel}; | ||
| 1100 | |||
| 1101 | // Determine if this is the first termination request. | ||
| 1102 | const bool first_request = [&]() -> bool { | ||
| 1103 | // Perform an atomic compare-and-swap from false to true. | ||
| 1104 | bool expected = false; | ||
| 1105 | return termination_requested.compare_exchange_strong(expected, true); | ||
| 1106 | }(); | ||
| 1107 | |||
| 1108 | // If this is the first request, start termination procedure. | ||
| 1109 | if (first_request) { | ||
| 1110 | // If the thread is in initialized state, just change state to terminated. | ||
| 1111 | if (this->GetState() == ThreadState::Initialized) { | ||
| 1112 | thread_state = ThreadState::Terminated; | ||
| 1113 | return ThreadState::Terminated; | ||
| 1114 | } | ||
| 1115 | |||
| 1116 | // Register the terminating dpc. | ||
| 1117 | this->RegisterDpc(DpcFlag::Terminating); | ||
| 1118 | |||
| 1119 | // If the thread is pinned, unpin it. | ||
| 1120 | if (this->GetStackParameters().is_pinned) { | ||
| 1121 | this->GetOwnerProcess()->UnpinThread(this); | ||
| 1122 | } | ||
| 1123 | |||
| 1124 | // If the thread is suspended, continue it. | ||
| 1125 | if (this->IsSuspended()) { | ||
| 1126 | suspend_allowed_flags = 0; | ||
| 1127 | this->UpdateState(); | ||
| 1128 | } | ||
| 1129 | |||
| 1130 | // Change the thread's priority to be higher than any system thread's. | ||
| 1131 | if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) { | ||
| 1132 | this->SetBasePriority(TerminatingThreadPriority); | ||
| 1133 | } | ||
| 1134 | |||
| 1135 | // If the thread is runnable, send a termination interrupt to other cores. | ||
| 1136 | if (this->GetState() == ThreadState::Runnable) { | ||
| 1137 | if (const u64 core_mask = | ||
| 1138 | physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel)); | ||
| 1139 | core_mask != 0) { | ||
| 1140 | Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask); | ||
| 1141 | } | ||
| 1142 | } | ||
| 1143 | |||
| 1144 | // Wake up the thread. | ||
| 1145 | if (this->GetState() == ThreadState::Waiting) { | ||
| 1146 | wait_queue->CancelWait(this, ResultTerminationRequested, true); | ||
| 1147 | } | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | return this->GetState(); | ||
| 1151 | } | ||
| 1152 | |||
| 1076 | Result KThread::Sleep(s64 timeout) { | 1153 | Result KThread::Sleep(s64 timeout) { |
| 1077 | ASSERT(!kernel.GlobalSchedulerContext().IsLocked()); | 1154 | ASSERT(!kernel.GlobalSchedulerContext().IsLocked()); |
| 1078 | ASSERT(this == GetCurrentThreadPointer(kernel)); | 1155 | ASSERT(this == GetCurrentThreadPointer(kernel)); |
| @@ -1086,7 +1163,7 @@ Result KThread::Sleep(s64 timeout) { | |||
| 1086 | // Check if the thread should terminate. | 1163 | // Check if the thread should terminate. |
| 1087 | if (this->IsTerminationRequested()) { | 1164 | if (this->IsTerminationRequested()) { |
| 1088 | slp.CancelSleep(); | 1165 | slp.CancelSleep(); |
| 1089 | return ResultTerminationRequested; | 1166 | R_THROW(ResultTerminationRequested); |
| 1090 | } | 1167 | } |
| 1091 | 1168 | ||
| 1092 | // Wait for the sleep to end. | 1169 | // Wait for the sleep to end. |
| @@ -1094,7 +1171,7 @@ Result KThread::Sleep(s64 timeout) { | |||
| 1094 | SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); | 1171 | SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); |
| 1095 | } | 1172 | } |
| 1096 | 1173 | ||
| 1097 | return ResultSuccess; | 1174 | R_SUCCEED(); |
| 1098 | } | 1175 | } |
| 1099 | 1176 | ||
| 1100 | void KThread::IfDummyThreadTryWait() { | 1177 | void KThread::IfDummyThreadTryWait() { |
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 9ee20208e..e2a27d603 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -180,6 +180,10 @@ public: | |||
| 180 | 180 | ||
| 181 | void Exit(); | 181 | void Exit(); |
| 182 | 182 | ||
| 183 | Result Terminate(); | ||
| 184 | |||
| 185 | ThreadState RequestTerminate(); | ||
| 186 | |||
| 183 | [[nodiscard]] u32 GetSuspendFlags() const { | 187 | [[nodiscard]] u32 GetSuspendFlags() const { |
| 184 | return suspend_allowed_flags & suspend_request_flags; | 188 | return suspend_allowed_flags & suspend_request_flags; |
| 185 | } | 189 | } |
diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h index 0a7f22680..5d466ace7 100644 --- a/src/core/hle/kernel/k_thread_local_page.h +++ b/src/core/hle/kernel/k_thread_local_page.h | |||
| @@ -26,7 +26,7 @@ public: | |||
| 26 | static_assert(RegionsPerPage > 0); | 26 | static_assert(RegionsPerPage > 0); |
| 27 | 27 | ||
| 28 | public: | 28 | public: |
| 29 | constexpr explicit KThreadLocalPage(VAddr addr = {}) : m_virt_addr(addr) { | 29 | constexpr explicit KThreadLocalPage(KernelCore&, VAddr addr = {}) : m_virt_addr(addr) { |
| 30 | m_is_region_free.fill(true); | 30 | m_is_region_free.fill(true); |
| 31 | } | 31 | } |
| 32 | 32 | ||
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 9251f29ad..eed2dc9f3 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include "core/hardware_properties.h" | 24 | #include "core/hardware_properties.h" |
| 25 | #include "core/hle/kernel/init/init_slab_setup.h" | 25 | #include "core/hle/kernel/init/init_slab_setup.h" |
| 26 | #include "core/hle/kernel/k_client_port.h" | 26 | #include "core/hle/kernel/k_client_port.h" |
| 27 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 27 | #include "core/hle/kernel/k_handle_table.h" | 28 | #include "core/hle/kernel/k_handle_table.h" |
| 28 | #include "core/hle/kernel/k_memory_layout.h" | 29 | #include "core/hle/kernel/k_memory_layout.h" |
| 29 | #include "core/hle/kernel/k_memory_manager.h" | 30 | #include "core/hle/kernel/k_memory_manager.h" |
| @@ -73,8 +74,16 @@ struct KernelCore::Impl { | |||
| 73 | InitializeMemoryLayout(); | 74 | InitializeMemoryLayout(); |
| 74 | Init::InitializeKPageBufferSlabHeap(system); | 75 | Init::InitializeKPageBufferSlabHeap(system); |
| 75 | InitializeShutdownThreads(); | 76 | InitializeShutdownThreads(); |
| 76 | InitializePreemption(kernel); | ||
| 77 | InitializePhysicalCores(); | 77 | InitializePhysicalCores(); |
| 78 | InitializePreemption(kernel); | ||
| 79 | |||
| 80 | // Initialize the Dynamic Slab Heaps. | ||
| 81 | { | ||
| 82 | const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion(); | ||
| 83 | ASSERT(pt_heap_region.GetEndAddress() != 0); | ||
| 84 | |||
| 85 | InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize()); | ||
| 86 | } | ||
| 78 | 87 | ||
| 79 | RegisterHostThread(); | 88 | RegisterHostThread(); |
| 80 | } | 89 | } |
| @@ -86,6 +95,15 @@ struct KernelCore::Impl { | |||
| 86 | } | 95 | } |
| 87 | } | 96 | } |
| 88 | 97 | ||
| 98 | void CloseCurrentProcess() { | ||
| 99 | (*current_process).Finalize(); | ||
| 100 | // current_process->Close(); | ||
| 101 | // TODO: The current process should be destroyed based on accurate ref counting after | ||
| 102 | // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak. | ||
| 103 | (*current_process).Destroy(); | ||
| 104 | current_process = nullptr; | ||
| 105 | } | ||
| 106 | |||
| 89 | void Shutdown() { | 107 | void Shutdown() { |
| 90 | is_shutting_down.store(true, std::memory_order_relaxed); | 108 | is_shutting_down.store(true, std::memory_order_relaxed); |
| 91 | SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); }); | 109 | SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); }); |
| @@ -99,10 +117,6 @@ struct KernelCore::Impl { | |||
| 99 | next_user_process_id = KProcess::ProcessIDMin; | 117 | next_user_process_id = KProcess::ProcessIDMin; |
| 100 | next_thread_id = 1; | 118 | next_thread_id = 1; |
| 101 | 119 | ||
| 102 | for (auto& core : cores) { | ||
| 103 | core = nullptr; | ||
| 104 | } | ||
| 105 | |||
| 106 | global_handle_table->Finalize(); | 120 | global_handle_table->Finalize(); |
| 107 | global_handle_table.reset(); | 121 | global_handle_table.reset(); |
| 108 | 122 | ||
| @@ -152,15 +166,7 @@ struct KernelCore::Impl { | |||
| 152 | } | 166 | } |
| 153 | } | 167 | } |
| 154 | 168 | ||
| 155 | // Shutdown all processes. | 169 | CloseCurrentProcess(); |
| 156 | if (current_process) { | ||
| 157 | (*current_process).Finalize(); | ||
| 158 | // current_process->Close(); | ||
| 159 | // TODO: The current process should be destroyed based on accurate ref counting after | ||
| 160 | // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak. | ||
| 161 | (*current_process).Destroy(); | ||
| 162 | current_process = nullptr; | ||
| 163 | } | ||
| 164 | 170 | ||
| 165 | // Track kernel objects that were not freed on shutdown | 171 | // Track kernel objects that were not freed on shutdown |
| 166 | { | 172 | { |
| @@ -257,6 +263,18 @@ struct KernelCore::Impl { | |||
| 257 | system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); | 263 | system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); |
| 258 | } | 264 | } |
| 259 | 265 | ||
| 266 | void InitializeResourceManagers(VAddr address, size_t size) { | ||
| 267 | dynamic_page_manager = std::make_unique<KDynamicPageManager>(); | ||
| 268 | memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); | ||
| 269 | app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); | ||
| 270 | |||
| 271 | dynamic_page_manager->Initialize(address, size); | ||
| 272 | static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; | ||
| 273 | memory_block_heap->Initialize(dynamic_page_manager.get(), | ||
| 274 | ApplicationMemoryBlockSlabHeapSize); | ||
| 275 | app_memory_block_manager->Initialize(nullptr, memory_block_heap.get()); | ||
| 276 | } | ||
| 277 | |||
| 260 | void InitializeShutdownThreads() { | 278 | void InitializeShutdownThreads() { |
| 261 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | 279 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 262 | shutdown_threads[core_id] = KThread::Create(system.Kernel()); | 280 | shutdown_threads[core_id] = KThread::Create(system.Kernel()); |
| @@ -344,11 +362,6 @@ struct KernelCore::Impl { | |||
| 344 | static inline thread_local KThread* current_thread{nullptr}; | 362 | static inline thread_local KThread* current_thread{nullptr}; |
| 345 | 363 | ||
| 346 | KThread* GetCurrentEmuThread() { | 364 | KThread* GetCurrentEmuThread() { |
| 347 | // If we are shutting down the kernel, none of this is relevant anymore. | ||
| 348 | if (IsShuttingDown()) { | ||
| 349 | return {}; | ||
| 350 | } | ||
| 351 | |||
| 352 | const auto thread_id = GetCurrentHostThreadID(); | 365 | const auto thread_id = GetCurrentHostThreadID(); |
| 353 | if (thread_id >= Core::Hardware::NUM_CPU_CORES) { | 366 | if (thread_id >= Core::Hardware::NUM_CPU_CORES) { |
| 354 | return GetHostDummyThread(); | 367 | return GetHostDummyThread(); |
| @@ -770,6 +783,11 @@ struct KernelCore::Impl { | |||
| 770 | // Kernel memory management | 783 | // Kernel memory management |
| 771 | std::unique_ptr<KMemoryManager> memory_manager; | 784 | std::unique_ptr<KMemoryManager> memory_manager; |
| 772 | 785 | ||
| 786 | // Dynamic slab managers | ||
| 787 | std::unique_ptr<KDynamicPageManager> dynamic_page_manager; | ||
| 788 | std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap; | ||
| 789 | std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager; | ||
| 790 | |||
| 773 | // Shared memory for services | 791 | // Shared memory for services |
| 774 | Kernel::KSharedMemory* hid_shared_mem{}; | 792 | Kernel::KSharedMemory* hid_shared_mem{}; |
| 775 | Kernel::KSharedMemory* font_shared_mem{}; | 793 | Kernel::KSharedMemory* font_shared_mem{}; |
| @@ -853,6 +871,10 @@ const KProcess* KernelCore::CurrentProcess() const { | |||
| 853 | return impl->current_process; | 871 | return impl->current_process; |
| 854 | } | 872 | } |
| 855 | 873 | ||
| 874 | void KernelCore::CloseCurrentProcess() { | ||
| 875 | impl->CloseCurrentProcess(); | ||
| 876 | } | ||
| 877 | |||
| 856 | const std::vector<KProcess*>& KernelCore::GetProcessList() const { | 878 | const std::vector<KProcess*>& KernelCore::GetProcessList() const { |
| 857 | return impl->process_list; | 879 | return impl->process_list; |
| 858 | } | 880 | } |
| @@ -1041,6 +1063,14 @@ const KMemoryManager& KernelCore::MemoryManager() const { | |||
| 1041 | return *impl->memory_manager; | 1063 | return *impl->memory_manager; |
| 1042 | } | 1064 | } |
| 1043 | 1065 | ||
| 1066 | KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() { | ||
| 1067 | return *impl->app_memory_block_manager; | ||
| 1068 | } | ||
| 1069 | |||
| 1070 | const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const { | ||
| 1071 | return *impl->app_memory_block_manager; | ||
| 1072 | } | ||
| 1073 | |||
| 1044 | Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { | 1074 | Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { |
| 1045 | return *impl->hid_shared_mem; | 1075 | return *impl->hid_shared_mem; |
| 1046 | } | 1076 | } |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 0847cbcbf..266be2bc4 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -37,6 +37,7 @@ class KClientSession; | |||
| 37 | class KEvent; | 37 | class KEvent; |
| 38 | class KHandleTable; | 38 | class KHandleTable; |
| 39 | class KLinkedListNode; | 39 | class KLinkedListNode; |
| 40 | class KMemoryBlockSlabManager; | ||
| 40 | class KMemoryLayout; | 41 | class KMemoryLayout; |
| 41 | class KMemoryManager; | 42 | class KMemoryManager; |
| 42 | class KPageBuffer; | 43 | class KPageBuffer; |
| @@ -46,6 +47,7 @@ class KResourceLimit; | |||
| 46 | class KScheduler; | 47 | class KScheduler; |
| 47 | class KServerSession; | 48 | class KServerSession; |
| 48 | class KSession; | 49 | class KSession; |
| 50 | class KSessionRequest; | ||
| 49 | class KSharedMemory; | 51 | class KSharedMemory; |
| 50 | class KSharedMemoryInfo; | 52 | class KSharedMemoryInfo; |
| 51 | class KThread; | 53 | class KThread; |
| @@ -130,6 +132,9 @@ public: | |||
| 130 | /// Retrieves a const pointer to the current process. | 132 | /// Retrieves a const pointer to the current process. |
| 131 | const KProcess* CurrentProcess() const; | 133 | const KProcess* CurrentProcess() const; |
| 132 | 134 | ||
| 135 | /// Closes the current process. | ||
| 136 | void CloseCurrentProcess(); | ||
| 137 | |||
| 133 | /// Retrieves the list of processes. | 138 | /// Retrieves the list of processes. |
| 134 | const std::vector<KProcess*>& GetProcessList() const; | 139 | const std::vector<KProcess*>& GetProcessList() const; |
| 135 | 140 | ||
| @@ -238,6 +243,12 @@ public: | |||
| 238 | /// Gets the virtual memory manager for the kernel. | 243 | /// Gets the virtual memory manager for the kernel. |
| 239 | const KMemoryManager& MemoryManager() const; | 244 | const KMemoryManager& MemoryManager() const; |
| 240 | 245 | ||
| 246 | /// Gets the application memory block manager for the kernel. | ||
| 247 | KMemoryBlockSlabManager& GetApplicationMemoryBlockManager(); | ||
| 248 | |||
| 249 | /// Gets the application memory block manager for the kernel. | ||
| 250 | const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const; | ||
| 251 | |||
| 241 | /// Gets the shared memory object for HID services. | 252 | /// Gets the shared memory object for HID services. |
| 242 | Kernel::KSharedMemory& GetHidSharedMem(); | 253 | Kernel::KSharedMemory& GetHidSharedMem(); |
| 243 | 254 | ||
| @@ -350,6 +361,8 @@ public: | |||
| 350 | return slab_heap_container->page_buffer; | 361 | return slab_heap_container->page_buffer; |
| 351 | } else if constexpr (std::is_same_v<T, KThreadLocalPage>) { | 362 | } else if constexpr (std::is_same_v<T, KThreadLocalPage>) { |
| 352 | return slab_heap_container->thread_local_page; | 363 | return slab_heap_container->thread_local_page; |
| 364 | } else if constexpr (std::is_same_v<T, KSessionRequest>) { | ||
| 365 | return slab_heap_container->session_request; | ||
| 353 | } | 366 | } |
| 354 | } | 367 | } |
| 355 | 368 | ||
| @@ -412,6 +425,7 @@ private: | |||
| 412 | KSlabHeap<KCodeMemory> code_memory; | 425 | KSlabHeap<KCodeMemory> code_memory; |
| 413 | KSlabHeap<KPageBuffer> page_buffer; | 426 | KSlabHeap<KPageBuffer> page_buffer; |
| 414 | KSlabHeap<KThreadLocalPage> thread_local_page; | 427 | KSlabHeap<KThreadLocalPage> thread_local_page; |
| 428 | KSlabHeap<KSessionRequest> session_request; | ||
| 415 | }; | 429 | }; |
| 416 | 430 | ||
| 417 | std::unique_ptr<SlabHeapContainer> slab_heap_container; | 431 | std::unique_ptr<SlabHeapContainer> slab_heap_container; |
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h index 299a981a8..06b51e919 100644 --- a/src/core/hle/kernel/slab_helpers.h +++ b/src/core/hle/kernel/slab_helpers.h | |||
| @@ -24,7 +24,7 @@ public: | |||
| 24 | } | 24 | } |
| 25 | 25 | ||
| 26 | static Derived* Allocate(KernelCore& kernel) { | 26 | static Derived* Allocate(KernelCore& kernel) { |
| 27 | return kernel.SlabHeap<Derived>().Allocate(); | 27 | return kernel.SlabHeap<Derived>().Allocate(kernel); |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | static void Free(KernelCore& kernel, Derived* obj) { | 30 | static void Free(KernelCore& kernel, Derived* obj) { |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 1d145ea91..b07ae3f02 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -933,7 +933,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han | |||
| 933 | return ResultSuccess; | 933 | return ResultSuccess; |
| 934 | 934 | ||
| 935 | case GetInfoType::UserExceptionContextAddr: | 935 | case GetInfoType::UserExceptionContextAddr: |
| 936 | *result = process->GetTLSRegionAddress(); | 936 | *result = process->GetProcessLocalRegionAddress(); |
| 937 | return ResultSuccess; | 937 | return ResultSuccess; |
| 938 | 938 | ||
| 939 | case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource: | 939 | case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource: |
| @@ -1888,7 +1888,7 @@ static void ExitProcess(Core::System& system) { | |||
| 1888 | auto* current_process = system.Kernel().CurrentProcess(); | 1888 | auto* current_process = system.Kernel().CurrentProcess(); |
| 1889 | 1889 | ||
| 1890 | LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); | 1890 | LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); |
| 1891 | ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running, | 1891 | ASSERT_MSG(current_process->GetState() == KProcess::State::Running, |
| 1892 | "Process has already exited"); | 1892 | "Process has already exited"); |
| 1893 | 1893 | ||
| 1894 | system.Exit(); | 1894 | system.Exit(); |
| @@ -2557,7 +2557,7 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand | |||
| 2557 | return ResultInvalidEnumValue; | 2557 | return ResultInvalidEnumValue; |
| 2558 | } | 2558 | } |
| 2559 | 2559 | ||
| 2560 | *out = static_cast<u64>(process->GetStatus()); | 2560 | *out = static_cast<u64>(process->GetState()); |
| 2561 | return ResultSuccess; | 2561 | return ResultSuccess; |
| 2562 | } | 2562 | } |
| 2563 | 2563 | ||
diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h index 95750c3eb..85506710e 100644 --- a/src/core/hle/kernel/svc_common.h +++ b/src/core/hle/kernel/svc_common.h | |||
| @@ -14,8 +14,11 @@ namespace Kernel::Svc { | |||
| 14 | 14 | ||
| 15 | using namespace Common::Literals; | 15 | using namespace Common::Literals; |
| 16 | 16 | ||
| 17 | constexpr s32 ArgumentHandleCountMax = 0x40; | 17 | constexpr inline s32 ArgumentHandleCountMax = 0x40; |
| 18 | constexpr u32 HandleWaitMask{1u << 30}; | 18 | |
| 19 | constexpr inline u32 HandleWaitMask = 1u << 30; | ||
| 20 | |||
| 21 | constexpr inline s64 WaitInfinite = -1; | ||
| 19 | 22 | ||
| 20 | constexpr inline std::size_t HeapSizeAlignment = 2_MiB; | 23 | constexpr inline std::size_t HeapSizeAlignment = 2_MiB; |
| 21 | 24 | ||
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index 79e15183a..abb9847fe 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h | |||
| @@ -95,6 +95,19 @@ constexpr inline s32 IdealCoreNoUpdate = -3; | |||
| 95 | constexpr inline s32 LowestThreadPriority = 63; | 95 | constexpr inline s32 LowestThreadPriority = 63; |
| 96 | constexpr inline s32 HighestThreadPriority = 0; | 96 | constexpr inline s32 HighestThreadPriority = 0; |
| 97 | 97 | ||
| 98 | constexpr inline s32 SystemThreadPriorityHighest = 16; | ||
| 99 | |||
| 100 | enum class ProcessState : u32 { | ||
| 101 | Created = 0, | ||
| 102 | CreatedAttached = 1, | ||
| 103 | Running = 2, | ||
| 104 | Crashed = 3, | ||
| 105 | RunningAttached = 4, | ||
| 106 | Terminating = 5, | ||
| 107 | Terminated = 6, | ||
| 108 | DebugBreak = 7, | ||
| 109 | }; | ||
| 110 | |||
| 98 | constexpr inline size_t ThreadLocalRegionSize = 0x200; | 111 | constexpr inline size_t ThreadLocalRegionSize = 0x200; |
| 99 | 112 | ||
| 100 | } // namespace Kernel::Svc | 113 | } // namespace Kernel::Svc |
diff --git a/src/core/hle/result.h b/src/core/hle/result.h index d67e68bae..ef4b2d417 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h | |||
| @@ -135,6 +135,14 @@ union Result { | |||
| 135 | [[nodiscard]] constexpr bool IsFailure() const { | 135 | [[nodiscard]] constexpr bool IsFailure() const { |
| 136 | return !IsSuccess(); | 136 | return !IsSuccess(); |
| 137 | } | 137 | } |
| 138 | |||
| 139 | [[nodiscard]] constexpr u32 GetInnerValue() const { | ||
| 140 | return static_cast<u32>(module.Value()) | (description << module.bits); | ||
| 141 | } | ||
| 142 | |||
| 143 | [[nodiscard]] constexpr bool Includes(Result result) const { | ||
| 144 | return GetInnerValue() == result.GetInnerValue(); | ||
| 145 | } | ||
| 138 | }; | 146 | }; |
| 139 | static_assert(std::is_trivial_v<Result>); | 147 | static_assert(std::is_trivial_v<Result>); |
| 140 | 148 | ||
| @@ -462,9 +470,6 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess; | |||
| 462 | #define R_UNLESS(expr, res) \ | 470 | #define R_UNLESS(expr, res) \ |
| 463 | { \ | 471 | { \ |
| 464 | if (!(expr)) { \ | 472 | if (!(expr)) { \ |
| 465 | if (res.IsError()) { \ | ||
| 466 | LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \ | ||
| 467 | } \ | ||
| 468 | R_THROW(res); \ | 473 | R_THROW(res); \ |
| 469 | } \ | 474 | } \ |
| 470 | } | 475 | } |
diff --git a/src/core/hle/service/audio/audctl.cpp b/src/core/hle/service/audio/audctl.cpp index 4a2ae5f88..5abf22ba4 100644 --- a/src/core/hle/service/audio/audctl.cpp +++ b/src/core/hle/service/audio/audctl.cpp | |||
| @@ -45,9 +45,25 @@ AudCtl::AudCtl(Core::System& system_) : ServiceFramework{system_, "audctl"} { | |||
| 45 | {32, nullptr, "GetActiveOutputTarget"}, | 45 | {32, nullptr, "GetActiveOutputTarget"}, |
| 46 | {33, nullptr, "GetTargetDeviceInfo"}, | 46 | {33, nullptr, "GetTargetDeviceInfo"}, |
| 47 | {34, nullptr, "AcquireTargetNotification"}, | 47 | {34, nullptr, "AcquireTargetNotification"}, |
| 48 | {35, nullptr, "SetHearingProtectionSafeguardTimerRemainingTimeForDebug"}, | ||
| 49 | {36, nullptr, "GetHearingProtectionSafeguardTimerRemainingTimeForDebug"}, | ||
| 50 | {37, nullptr, "SetHearingProtectionSafeguardEnabled"}, | ||
| 51 | {38, nullptr, "IsHearingProtectionSafeguardEnabled"}, | ||
| 52 | {39, nullptr, "IsHearingProtectionSafeguardMonitoringOutputForDebug"}, | ||
| 53 | {40, nullptr, "GetSystemInformationForDebug"}, | ||
| 54 | {41, nullptr, "SetVolumeButtonLongPressTime"}, | ||
| 55 | {42, nullptr, "SetNativeVolumeForDebug"}, | ||
| 48 | {10000, nullptr, "NotifyAudioOutputTargetForPlayReport"}, | 56 | {10000, nullptr, "NotifyAudioOutputTargetForPlayReport"}, |
| 49 | {10001, nullptr, "NotifyAudioOutputChannelCountForPlayReport"}, | 57 | {10001, nullptr, "NotifyAudioOutputChannelCountForPlayReport"}, |
| 50 | {10002, nullptr, "NotifyUnsupportedUsbOutputDeviceAttachedForPlayReport"}, | 58 | {10002, nullptr, "NotifyUnsupportedUsbOutputDeviceAttachedForPlayReport"}, |
| 59 | {10100, nullptr, "GetAudioVolumeDataForPlayReport"}, | ||
| 60 | {10101, nullptr, "BindAudioVolumeUpdateEventForPlayReport"}, | ||
| 61 | {10102, nullptr, "BindAudioOutputTargetUpdateEventForPlayReport"}, | ||
| 62 | {10103, nullptr, "GetAudioOutputTargetForPlayReport"}, | ||
| 63 | {10104, nullptr, "GetAudioOutputChannelCountForPlayReport"}, | ||
| 64 | {10105, nullptr, "BindAudioOutputChannelCountUpdateEventForPlayReport"}, | ||
| 65 | {10106, nullptr, "GetDefaultAudioOutputTargetForPlayReport"}, | ||
| 66 | {50000, nullptr, "SetAnalogInputBoostGainForPrototyping"}, | ||
| 51 | }; | 67 | }; |
| 52 | // clang-format on | 68 | // clang-format on |
| 53 | 69 | ||
diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp index 60c30cd5b..13423dca6 100644 --- a/src/core/hle/service/audio/audren_u.cpp +++ b/src/core/hle/service/audio/audren_u.cpp | |||
| @@ -52,6 +52,8 @@ public: | |||
| 52 | {9, &IAudioRenderer::GetRenderingTimeLimit, "GetRenderingTimeLimit"}, | 52 | {9, &IAudioRenderer::GetRenderingTimeLimit, "GetRenderingTimeLimit"}, |
| 53 | {10, &IAudioRenderer::RequestUpdate, "RequestUpdateAuto"}, | 53 | {10, &IAudioRenderer::RequestUpdate, "RequestUpdateAuto"}, |
| 54 | {11, nullptr, "ExecuteAudioRendererRendering"}, | 54 | {11, nullptr, "ExecuteAudioRendererRendering"}, |
| 55 | {12, &IAudioRenderer::SetVoiceDropParameter, "SetVoiceDropParameter"}, | ||
| 56 | {13, &IAudioRenderer::GetVoiceDropParameter, "GetVoiceDropParameter"}, | ||
| 55 | }; | 57 | }; |
| 56 | // clang-format on | 58 | // clang-format on |
| 57 | RegisterHandlers(functions); | 59 | RegisterHandlers(functions); |
| @@ -205,6 +207,30 @@ private: | |||
| 205 | LOG_DEBUG(Service_Audio, "called"); | 207 | LOG_DEBUG(Service_Audio, "called"); |
| 206 | } | 208 | } |
| 207 | 209 | ||
| 210 | void SetVoiceDropParameter(Kernel::HLERequestContext& ctx) { | ||
| 211 | LOG_DEBUG(Service_Audio, "called"); | ||
| 212 | |||
| 213 | IPC::RequestParser rp{ctx}; | ||
| 214 | auto voice_drop_param{rp.Pop<f32>()}; | ||
| 215 | |||
| 216 | auto& system_ = impl->GetSystem(); | ||
| 217 | system_.SetVoiceDropParameter(voice_drop_param); | ||
| 218 | |||
| 219 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 220 | rb.Push(ResultSuccess); | ||
| 221 | } | ||
| 222 | |||
| 223 | void GetVoiceDropParameter(Kernel::HLERequestContext& ctx) { | ||
| 224 | LOG_DEBUG(Service_Audio, "called"); | ||
| 225 | |||
| 226 | auto& system_ = impl->GetSystem(); | ||
| 227 | auto voice_drop_param{system_.GetVoiceDropParameter()}; | ||
| 228 | |||
| 229 | IPC::ResponseBuilder rb{ctx, 3}; | ||
| 230 | rb.Push(ResultSuccess); | ||
| 231 | rb.Push(voice_drop_param); | ||
| 232 | } | ||
| 233 | |||
| 208 | KernelHelpers::ServiceContext service_context; | 234 | KernelHelpers::ServiceContext service_context; |
| 209 | Kernel::KEvent* rendered_event; | 235 | Kernel::KEvent* rendered_event; |
| 210 | Manager& manager; | 236 | Manager& manager; |
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp index 46bad7871..79375bd2f 100644 --- a/src/core/hle/service/hid/hid.cpp +++ b/src/core/hle/service/hid/hid.cpp | |||
| @@ -2118,7 +2118,7 @@ void Hid::WritePalmaWaveEntry(Kernel::HLERequestContext& ctx) { | |||
| 2118 | ASSERT_MSG(t_mem->GetSize() == 0x3000, "t_mem has incorrect size"); | 2118 | ASSERT_MSG(t_mem->GetSize() == 0x3000, "t_mem has incorrect size"); |
| 2119 | 2119 | ||
| 2120 | LOG_WARNING(Service_HID, | 2120 | LOG_WARNING(Service_HID, |
| 2121 | "(STUBBED) called, connection_handle={}, wave_set={}, unkown={}, " | 2121 | "(STUBBED) called, connection_handle={}, wave_set={}, unknown={}, " |
| 2122 | "t_mem_handle=0x{:08X}, t_mem_size={}, size={}", | 2122 | "t_mem_handle=0x{:08X}, t_mem_size={}, size={}", |
| 2123 | connection_handle.npad_id, wave_set, unknown, t_mem_handle, t_mem_size, size); | 2123 | connection_handle.npad_id, wave_set, unknown, t_mem_handle, t_mem_size, size); |
| 2124 | 2124 | ||
diff --git a/src/core/hle/service/hid/irsensor/pointing_processor.h b/src/core/hle/service/hid/irsensor/pointing_processor.h index cf4930794..d63423aff 100644 --- a/src/core/hle/service/hid/irsensor/pointing_processor.h +++ b/src/core/hle/service/hid/irsensor/pointing_processor.h | |||
| @@ -37,10 +37,10 @@ private: | |||
| 37 | u8 pointing_status; | 37 | u8 pointing_status; |
| 38 | INSERT_PADDING_BYTES(3); | 38 | INSERT_PADDING_BYTES(3); |
| 39 | u32 unknown; | 39 | u32 unknown; |
| 40 | float unkown_float1; | 40 | float unknown_float1; |
| 41 | float position_x; | 41 | float position_x; |
| 42 | float position_y; | 42 | float position_y; |
| 43 | float unkown_float2; | 43 | float unknown_float2; |
| 44 | Core::IrSensor::IrsRect window_of_interest; | 44 | Core::IrSensor::IrsRect window_of_interest; |
| 45 | }; | 45 | }; |
| 46 | static_assert(sizeof(PointingProcessorMarkerData) == 0x20, | 46 | static_assert(sizeof(PointingProcessorMarkerData) == 0x20, |
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index becd6d1b9..652441bc2 100644 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp | |||
| @@ -290,7 +290,7 @@ public: | |||
| 290 | const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; | 290 | const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; |
| 291 | const auto start_info{page_table.QueryInfo(start - 1)}; | 291 | const auto start_info{page_table.QueryInfo(start - 1)}; |
| 292 | 292 | ||
| 293 | if (start_info.state != Kernel::KMemoryState::Free) { | 293 | if (start_info.GetState() != Kernel::KMemoryState::Free) { |
| 294 | return {}; | 294 | return {}; |
| 295 | } | 295 | } |
| 296 | 296 | ||
| @@ -300,7 +300,7 @@ public: | |||
| 300 | 300 | ||
| 301 | const auto end_info{page_table.QueryInfo(start + size)}; | 301 | const auto end_info{page_table.QueryInfo(start + size)}; |
| 302 | 302 | ||
| 303 | if (end_info.state != Kernel::KMemoryState::Free) { | 303 | if (end_info.GetState() != Kernel::KMemoryState::Free) { |
| 304 | return {}; | 304 | return {}; |
| 305 | } | 305 | } |
| 306 | 306 | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 6411dbf43..b635e6ed1 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp | |||
| @@ -311,7 +311,8 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out | |||
| 311 | handle->address + | 311 | handle->address + |
| 312 | (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))}; | 312 | (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))}; |
| 313 | 313 | ||
| 314 | gmmu->Map(virtual_address, cpu_address, size, use_big_pages); | 314 | gmmu->Map(virtual_address, cpu_address, size, static_cast<Tegra::PTEKind>(entry.kind), |
| 315 | use_big_pages); | ||
| 315 | } | 316 | } |
| 316 | } | 317 | } |
| 317 | 318 | ||
| @@ -350,7 +351,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8 | |||
| 350 | u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)}; | 351 | u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)}; |
| 351 | VAddr cpu_address{mapping->ptr + params.buffer_offset}; | 352 | VAddr cpu_address{mapping->ptr + params.buffer_offset}; |
| 352 | 353 | ||
| 353 | gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page); | 354 | gmmu->Map(gpu_address, cpu_address, params.mapping_size, |
| 355 | static_cast<Tegra::PTEKind>(params.kind), mapping->big_page); | ||
| 354 | 356 | ||
| 355 | return NvResult::Success; | 357 | return NvResult::Success; |
| 356 | } catch (const std::out_of_range&) { | 358 | } catch (const std::out_of_range&) { |
| @@ -389,7 +391,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8 | |||
| 389 | } | 391 | } |
| 390 | 392 | ||
| 391 | const bool use_big_pages = alloc->second.big_pages && big_page; | 393 | const bool use_big_pages = alloc->second.big_pages && big_page; |
| 392 | gmmu->Map(params.offset, cpu_address, size, use_big_pages); | 394 | gmmu->Map(params.offset, cpu_address, size, static_cast<Tegra::PTEKind>(params.kind), |
| 395 | use_big_pages); | ||
| 393 | 396 | ||
| 394 | auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true, | 397 | auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true, |
| 395 | use_big_pages, alloc->second.sparse)}; | 398 | use_big_pages, alloc->second.sparse)}; |
| @@ -409,7 +412,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8 | |||
| 409 | return NvResult::InsufficientMemory; | 412 | return NvResult::InsufficientMemory; |
| 410 | } | 413 | } |
| 411 | 414 | ||
| 412 | gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), big_page); | 415 | gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), |
| 416 | static_cast<Tegra::PTEKind>(params.kind), big_page); | ||
| 413 | 417 | ||
| 414 | auto mapping{ | 418 | auto mapping{ |
| 415 | std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)}; | 419 | std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)}; |
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index ddf273b5e..b60679021 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp | |||
| @@ -128,7 +128,8 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) | |||
| 128 | } | 128 | } |
| 129 | ASSERT(system.CurrentProcess() | 129 | ASSERT(system.CurrentProcess() |
| 130 | ->PageTable() | 130 | ->PageTable() |
| 131 | .LockForDeviceAddressSpace(handle_description->address, handle_description->size) | 131 | .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size, |
| 132 | Kernel::KMemoryPermission::None, true) | ||
| 132 | .IsSuccess()); | 133 | .IsSuccess()); |
| 133 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 134 | std::memcpy(output.data(), ¶ms, sizeof(params)); |
| 134 | return result; | 135 | return result; |
diff --git a/src/core/hle/service/sm/sm_controller.cpp b/src/core/hle/service/sm/sm_controller.cpp index 2a4bd64ab..273f79568 100644 --- a/src/core/hle/service/sm/sm_controller.cpp +++ b/src/core/hle/service/sm/sm_controller.cpp | |||
| @@ -15,9 +15,10 @@ | |||
| 15 | namespace Service::SM { | 15 | namespace Service::SM { |
| 16 | 16 | ||
| 17 | void Controller::ConvertCurrentObjectToDomain(Kernel::HLERequestContext& ctx) { | 17 | void Controller::ConvertCurrentObjectToDomain(Kernel::HLERequestContext& ctx) { |
| 18 | ASSERT_MSG(!ctx.Session()->IsDomain(), "Session is already a domain"); | 18 | ASSERT_MSG(!ctx.Session()->GetSessionRequestManager()->IsDomain(), |
| 19 | "Session is already a domain"); | ||
| 19 | LOG_DEBUG(Service, "called, server_session={}", ctx.Session()->GetId()); | 20 | LOG_DEBUG(Service, "called, server_session={}", ctx.Session()->GetId()); |
| 20 | ctx.Session()->ConvertToDomain(); | 21 | ctx.Session()->GetSessionRequestManager()->ConvertToDomainOnRequestEnd(); |
| 21 | 22 | ||
| 22 | IPC::ResponseBuilder rb{ctx, 3}; | 23 | IPC::ResponseBuilder rb{ctx, 3}; |
| 23 | rb.Push(ResultSuccess); | 24 | rb.Push(ResultSuccess); |
diff --git a/src/core/hle/service/vi/vi_results.h b/src/core/hle/service/vi/vi_results.h index a46c247d2..22bac799f 100644 --- a/src/core/hle/service/vi/vi_results.h +++ b/src/core/hle/service/vi/vi_results.h | |||
| @@ -1,6 +1,8 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | ||
| 5 | |||
| 4 | #include "core/hle/result.h" | 6 | #include "core/hle/result.h" |
| 5 | 7 | ||
| 6 | namespace Service::VI { | 8 | namespace Service::VI { |
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 2ac792566..9637cb5b1 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -65,7 +65,7 @@ struct Memory::Impl { | |||
| 65 | return {}; | 65 | return {}; |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | return system.DeviceMemory().GetPointer(paddr) + vaddr; | 68 | return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { | 71 | [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { |
| @@ -75,7 +75,7 @@ struct Memory::Impl { | |||
| 75 | return {}; | 75 | return {}; |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | return system.DeviceMemory().GetPointer(paddr) + vaddr; | 78 | return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | u8 Read8(const VAddr addr) { | 81 | u8 Read8(const VAddr addr) { |
| @@ -499,7 +499,7 @@ struct Memory::Impl { | |||
| 499 | } else { | 499 | } else { |
| 500 | while (base != end) { | 500 | while (base != end) { |
| 501 | page_table.pointers[base].Store( | 501 | page_table.pointers[base].Store( |
| 502 | system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type); | 502 | system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type); |
| 503 | page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS); | 503 | page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS); |
| 504 | 504 | ||
| 505 | ASSERT_MSG(page_table.pointers[base].Pointer(), | 505 | ASSERT_MSG(page_table.pointers[base].Pointer(), |
diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp index 7c432a63c..284b2ae66 100644 --- a/src/tests/core/core_timing.cpp +++ b/src/tests/core/core_timing.cpp | |||
| @@ -40,9 +40,6 @@ struct ScopeInit final { | |||
| 40 | core_timing.SetMulticore(true); | 40 | core_timing.SetMulticore(true); |
| 41 | core_timing.Initialize([]() {}); | 41 | core_timing.Initialize([]() {}); |
| 42 | } | 42 | } |
| 43 | ~ScopeInit() { | ||
| 44 | core_timing.Shutdown(); | ||
| 45 | } | ||
| 46 | 43 | ||
| 47 | Core::Timing::CoreTiming core_timing; | 44 | Core::Timing::CoreTiming core_timing; |
| 48 | }; | 45 | }; |
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 40e6d1ec4..cb8b46edf 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt | |||
| @@ -82,6 +82,7 @@ add_library(video_core STATIC | |||
| 82 | gpu_thread.h | 82 | gpu_thread.h |
| 83 | memory_manager.cpp | 83 | memory_manager.cpp |
| 84 | memory_manager.h | 84 | memory_manager.h |
| 85 | pte_kind.h | ||
| 85 | query_cache.h | 86 | query_cache.h |
| 86 | rasterizer_accelerated.cpp | 87 | rasterizer_accelerated.cpp |
| 87 | rasterizer_accelerated.h | 88 | rasterizer_accelerated.h |
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index 3909d36c1..4eb7a100d 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp | |||
| @@ -56,66 +56,85 @@ void MaxwellDMA::Launch() { | |||
| 56 | ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE); | 56 | ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE); |
| 57 | ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED); | 57 | ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED); |
| 58 | 58 | ||
| 59 | const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH; | 59 | if (launch.multi_line_enable) { |
| 60 | const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH; | 60 | const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH; |
| 61 | 61 | const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH; | |
| 62 | if (!is_src_pitch && !is_dst_pitch) { | 62 | |
| 63 | // If both the source and the destination are in block layout, assert. | 63 | if (!is_src_pitch && !is_dst_pitch) { |
| 64 | UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented"); | 64 | // If both the source and the destination are in block layout, assert. |
| 65 | return; | 65 | UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented"); |
| 66 | } | 66 | return; |
| 67 | } | ||
| 67 | 68 | ||
| 68 | if (is_src_pitch && is_dst_pitch) { | 69 | if (is_src_pitch && is_dst_pitch) { |
| 69 | CopyPitchToPitch(); | 70 | for (u32 line = 0; line < regs.line_count; ++line) { |
| 71 | const GPUVAddr source_line = | ||
| 72 | regs.offset_in + static_cast<size_t>(line) * regs.pitch_in; | ||
| 73 | const GPUVAddr dest_line = | ||
| 74 | regs.offset_out + static_cast<size_t>(line) * regs.pitch_out; | ||
| 75 | memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in); | ||
| 76 | } | ||
| 77 | } else { | ||
| 78 | if (!is_src_pitch && is_dst_pitch) { | ||
| 79 | CopyBlockLinearToPitch(); | ||
| 80 | } else { | ||
| 81 | CopyPitchToBlockLinear(); | ||
| 82 | } | ||
| 83 | } | ||
| 70 | } else { | 84 | } else { |
| 71 | ASSERT(launch.multi_line_enable == 1); | 85 | // TODO: allow multisized components. |
| 72 | 86 | auto& accelerate = rasterizer->AccessAccelerateDMA(); | |
| 73 | if (!is_src_pitch && is_dst_pitch) { | 87 | const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A; |
| 74 | CopyBlockLinearToPitch(); | 88 | if (regs.launch_dma.remap_enable != 0 && is_const_a_dst) { |
| 89 | ASSERT(regs.remap_const.component_size_minus_one == 3); | ||
| 90 | accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value); | ||
| 91 | std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value); | ||
| 92 | memory_manager.WriteBlockUnsafe(regs.offset_out, | ||
| 93 | reinterpret_cast<u8*>(tmp_buffer.data()), | ||
| 94 | regs.line_length_in * sizeof(u32)); | ||
| 75 | } else { | 95 | } else { |
| 76 | CopyPitchToBlockLinear(); | 96 | auto convert_linear_2_blocklinear_addr = [](u64 address) { |
| 97 | return (address & ~0x1f0ULL) | ((address & 0x40) >> 2) | ((address & 0x10) << 1) | | ||
| 98 | ((address & 0x180) >> 1) | ((address & 0x20) << 3); | ||
| 99 | }; | ||
| 100 | auto src_kind = memory_manager.GetPageKind(regs.offset_in); | ||
| 101 | auto dst_kind = memory_manager.GetPageKind(regs.offset_out); | ||
| 102 | const bool is_src_pitch = IsPitchKind(static_cast<PTEKind>(src_kind)); | ||
| 103 | const bool is_dst_pitch = IsPitchKind(static_cast<PTEKind>(dst_kind)); | ||
| 104 | if (!is_src_pitch && is_dst_pitch) { | ||
| 105 | std::vector<u8> tmp_buffer(regs.line_length_in); | ||
| 106 | std::vector<u8> dst_buffer(regs.line_length_in); | ||
| 107 | memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(), | ||
| 108 | regs.line_length_in); | ||
| 109 | for (u32 offset = 0; offset < regs.line_length_in; ++offset) { | ||
| 110 | dst_buffer[offset] = | ||
| 111 | tmp_buffer[convert_linear_2_blocklinear_addr(regs.offset_in + offset) - | ||
| 112 | regs.offset_in]; | ||
| 113 | } | ||
| 114 | memory_manager.WriteBlock(regs.offset_out, dst_buffer.data(), regs.line_length_in); | ||
| 115 | } else if (is_src_pitch && !is_dst_pitch) { | ||
| 116 | std::vector<u8> tmp_buffer(regs.line_length_in); | ||
| 117 | std::vector<u8> dst_buffer(regs.line_length_in); | ||
| 118 | memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(), | ||
| 119 | regs.line_length_in); | ||
| 120 | for (u32 offset = 0; offset < regs.line_length_in; ++offset) { | ||
| 121 | dst_buffer[convert_linear_2_blocklinear_addr(regs.offset_out + offset) - | ||
| 122 | regs.offset_out] = tmp_buffer[offset]; | ||
| 123 | } | ||
| 124 | memory_manager.WriteBlock(regs.offset_out, dst_buffer.data(), regs.line_length_in); | ||
| 125 | } else { | ||
| 126 | if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) { | ||
| 127 | std::vector<u8> tmp_buffer(regs.line_length_in); | ||
| 128 | memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(), | ||
| 129 | regs.line_length_in); | ||
| 130 | memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(), | ||
| 131 | regs.line_length_in); | ||
| 132 | } | ||
| 133 | } | ||
| 77 | } | 134 | } |
| 78 | } | 135 | } |
| 79 | ReleaseSemaphore(); | ||
| 80 | } | ||
| 81 | 136 | ||
| 82 | void MaxwellDMA::CopyPitchToPitch() { | 137 | ReleaseSemaphore(); |
| 83 | // When `multi_line_enable` bit is enabled we copy a 2D image of dimensions | ||
| 84 | // (line_length_in, line_count). | ||
| 85 | // Otherwise the copy is performed as if we were copying a 1D buffer of length line_length_in. | ||
| 86 | const bool remap_enabled = regs.launch_dma.remap_enable != 0; | ||
| 87 | if (regs.launch_dma.multi_line_enable) { | ||
| 88 | UNIMPLEMENTED_IF(remap_enabled); | ||
| 89 | |||
| 90 | // Perform a line-by-line copy. | ||
| 91 | // We're going to take a subrect of size (line_length_in, line_count) from the source | ||
| 92 | // rectangle. There is no need to manually flush/invalidate the regions because CopyBlock | ||
| 93 | // does that for us. | ||
| 94 | for (u32 line = 0; line < regs.line_count; ++line) { | ||
| 95 | const GPUVAddr source_line = regs.offset_in + static_cast<size_t>(line) * regs.pitch_in; | ||
| 96 | const GPUVAddr dest_line = regs.offset_out + static_cast<size_t>(line) * regs.pitch_out; | ||
| 97 | memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in); | ||
| 98 | } | ||
| 99 | return; | ||
| 100 | } | ||
| 101 | // TODO: allow multisized components. | ||
| 102 | auto& accelerate = rasterizer->AccessAccelerateDMA(); | ||
| 103 | const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A; | ||
| 104 | const bool is_buffer_clear = remap_enabled && is_const_a_dst; | ||
| 105 | if (is_buffer_clear) { | ||
| 106 | ASSERT(regs.remap_const.component_size_minus_one == 3); | ||
| 107 | accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value); | ||
| 108 | std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value); | ||
| 109 | memory_manager.WriteBlockUnsafe(regs.offset_out, reinterpret_cast<u8*>(tmp_buffer.data()), | ||
| 110 | regs.line_length_in * sizeof(u32)); | ||
| 111 | return; | ||
| 112 | } | ||
| 113 | UNIMPLEMENTED_IF(remap_enabled); | ||
| 114 | if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) { | ||
| 115 | std::vector<u8> tmp_buffer(regs.line_length_in); | ||
| 116 | memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(), regs.line_length_in); | ||
| 117 | memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(), regs.line_length_in); | ||
| 118 | } | ||
| 119 | } | 138 | } |
| 120 | 139 | ||
| 121 | void MaxwellDMA::CopyBlockLinearToPitch() { | 140 | void MaxwellDMA::CopyBlockLinearToPitch() { |
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h index bc48320ce..953e34adc 100644 --- a/src/video_core/engines/maxwell_dma.h +++ b/src/video_core/engines/maxwell_dma.h | |||
| @@ -219,8 +219,6 @@ private: | |||
| 219 | /// registers. | 219 | /// registers. |
| 220 | void Launch(); | 220 | void Launch(); |
| 221 | 221 | ||
| 222 | void CopyPitchToPitch(); | ||
| 223 | |||
| 224 | void CopyBlockLinearToPitch(); | 222 | void CopyBlockLinearToPitch(); |
| 225 | 223 | ||
| 226 | void CopyPitchToBlockLinear(); | 224 | void CopyPitchToBlockLinear(); |
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index cca401c74..d07b21bd6 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp | |||
| @@ -41,7 +41,11 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 | |||
| 41 | big_entries.resize(big_page_table_size / 32, 0); | 41 | big_entries.resize(big_page_table_size / 32, 0); |
| 42 | big_page_table_cpu.resize(big_page_table_size); | 42 | big_page_table_cpu.resize(big_page_table_size); |
| 43 | big_page_continous.resize(big_page_table_size / continous_bits, 0); | 43 | big_page_continous.resize(big_page_table_size / continous_bits, 0); |
| 44 | std::array<PTEKind, 32> kind_valus; | ||
| 45 | kind_valus.fill(PTEKind::INVALID); | ||
| 46 | big_kinds.resize(big_page_table_size / 32, kind_valus); | ||
| 44 | entries.resize(page_table_size / 32, 0); | 47 | entries.resize(page_table_size / 32, 0); |
| 48 | kinds.resize(big_page_table_size / 32, kind_valus); | ||
| 45 | } | 49 | } |
| 46 | 50 | ||
| 47 | MemoryManager::~MemoryManager() = default; | 51 | MemoryManager::~MemoryManager() = default; |
| @@ -78,6 +82,41 @@ void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) { | |||
| 78 | } | 82 | } |
| 79 | } | 83 | } |
| 80 | 84 | ||
| 85 | PTEKind MemoryManager::GetPageKind(GPUVAddr gpu_addr) const { | ||
| 86 | auto entry = GetEntry<true>(gpu_addr); | ||
| 87 | if (entry == EntryType::Mapped || entry == EntryType::Reserved) [[likely]] { | ||
| 88 | return GetKind<true>(gpu_addr); | ||
| 89 | } else { | ||
| 90 | return GetKind<false>(gpu_addr); | ||
| 91 | } | ||
| 92 | } | ||
| 93 | |||
| 94 | template <bool is_big_page> | ||
| 95 | PTEKind MemoryManager::GetKind(size_t position) const { | ||
| 96 | if constexpr (is_big_page) { | ||
| 97 | position = position >> big_page_bits; | ||
| 98 | const size_t sub_index = position % 32; | ||
| 99 | return big_kinds[position / 32][sub_index]; | ||
| 100 | } else { | ||
| 101 | position = position >> page_bits; | ||
| 102 | const size_t sub_index = position % 32; | ||
| 103 | return kinds[position / 32][sub_index]; | ||
| 104 | } | ||
| 105 | } | ||
| 106 | |||
| 107 | template <bool is_big_page> | ||
| 108 | void MemoryManager::SetKind(size_t position, PTEKind kind) { | ||
| 109 | if constexpr (is_big_page) { | ||
| 110 | position = position >> big_page_bits; | ||
| 111 | const size_t sub_index = position % 32; | ||
| 112 | big_kinds[position / 32][sub_index] = kind; | ||
| 113 | } else { | ||
| 114 | position = position >> page_bits; | ||
| 115 | const size_t sub_index = position % 32; | ||
| 116 | kinds[position / 32][sub_index] = kind; | ||
| 117 | } | ||
| 118 | } | ||
| 119 | |||
| 81 | inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const { | 120 | inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const { |
| 82 | const u64 entry_mask = big_page_continous[big_page_index / continous_bits]; | 121 | const u64 entry_mask = big_page_continous[big_page_index / continous_bits]; |
| 83 | const size_t sub_index = big_page_index % continous_bits; | 122 | const size_t sub_index = big_page_index % continous_bits; |
| @@ -92,8 +131,8 @@ inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value | |||
| 92 | } | 131 | } |
| 93 | 132 | ||
| 94 | template <MemoryManager::EntryType entry_type> | 133 | template <MemoryManager::EntryType entry_type> |
| 95 | GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, | 134 | GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size, |
| 96 | size_t size) { | 135 | PTEKind kind) { |
| 97 | u64 remaining_size{size}; | 136 | u64 remaining_size{size}; |
| 98 | if constexpr (entry_type == EntryType::Mapped) { | 137 | if constexpr (entry_type == EntryType::Mapped) { |
| 99 | page_table.ReserveRange(gpu_addr, size); | 138 | page_table.ReserveRange(gpu_addr, size); |
| @@ -102,6 +141,7 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp | |||
| 102 | const GPUVAddr current_gpu_addr = gpu_addr + offset; | 141 | const GPUVAddr current_gpu_addr = gpu_addr + offset; |
| 103 | [[maybe_unused]] const auto current_entry_type = GetEntry<false>(current_gpu_addr); | 142 | [[maybe_unused]] const auto current_entry_type = GetEntry<false>(current_gpu_addr); |
| 104 | SetEntry<false>(current_gpu_addr, entry_type); | 143 | SetEntry<false>(current_gpu_addr, entry_type); |
| 144 | SetKind<false>(current_gpu_addr, kind); | ||
| 105 | if (current_entry_type != entry_type) { | 145 | if (current_entry_type != entry_type) { |
| 106 | rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size); | 146 | rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size); |
| 107 | } | 147 | } |
| @@ -118,12 +158,13 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp | |||
| 118 | 158 | ||
| 119 | template <MemoryManager::EntryType entry_type> | 159 | template <MemoryManager::EntryType entry_type> |
| 120 | GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, | 160 | GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, |
| 121 | size_t size) { | 161 | size_t size, PTEKind kind) { |
| 122 | u64 remaining_size{size}; | 162 | u64 remaining_size{size}; |
| 123 | for (u64 offset{}; offset < size; offset += big_page_size) { | 163 | for (u64 offset{}; offset < size; offset += big_page_size) { |
| 124 | const GPUVAddr current_gpu_addr = gpu_addr + offset; | 164 | const GPUVAddr current_gpu_addr = gpu_addr + offset; |
| 125 | [[maybe_unused]] const auto current_entry_type = GetEntry<true>(current_gpu_addr); | 165 | [[maybe_unused]] const auto current_entry_type = GetEntry<true>(current_gpu_addr); |
| 126 | SetEntry<true>(current_gpu_addr, entry_type); | 166 | SetEntry<true>(current_gpu_addr, entry_type); |
| 167 | SetKind<true>(current_gpu_addr, kind); | ||
| 127 | if (current_entry_type != entry_type) { | 168 | if (current_entry_type != entry_type) { |
| 128 | rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, big_page_size); | 169 | rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, big_page_size); |
| 129 | } | 170 | } |
| @@ -159,19 +200,19 @@ void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) | |||
| 159 | rasterizer = rasterizer_; | 200 | rasterizer = rasterizer_; |
| 160 | } | 201 | } |
| 161 | 202 | ||
| 162 | GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, | 203 | GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, PTEKind kind, |
| 163 | bool is_big_pages) { | 204 | bool is_big_pages) { |
| 164 | if (is_big_pages) [[likely]] { | 205 | if (is_big_pages) [[likely]] { |
| 165 | return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size); | 206 | return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind); |
| 166 | } | 207 | } |
| 167 | return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size); | 208 | return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind); |
| 168 | } | 209 | } |
| 169 | 210 | ||
| 170 | GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) { | 211 | GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) { |
| 171 | if (is_big_pages) [[likely]] { | 212 | if (is_big_pages) [[likely]] { |
| 172 | return BigPageTableOp<EntryType::Reserved>(gpu_addr, 0, size); | 213 | return BigPageTableOp<EntryType::Reserved>(gpu_addr, 0, size, PTEKind::INVALID); |
| 173 | } | 214 | } |
| 174 | return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size); | 215 | return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size, PTEKind::INVALID); |
| 175 | } | 216 | } |
| 176 | 217 | ||
| 177 | void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { | 218 | void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { |
| @@ -188,8 +229,8 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { | |||
| 188 | rasterizer->UnmapMemory(*cpu_addr, map_size); | 229 | rasterizer->UnmapMemory(*cpu_addr, map_size); |
| 189 | } | 230 | } |
| 190 | 231 | ||
| 191 | BigPageTableOp<EntryType::Free>(gpu_addr, 0, size); | 232 | BigPageTableOp<EntryType::Free>(gpu_addr, 0, size, PTEKind::INVALID); |
| 192 | PageTableOp<EntryType::Free>(gpu_addr, 0, size); | 233 | PageTableOp<EntryType::Free>(gpu_addr, 0, size, PTEKind::INVALID); |
| 193 | } | 234 | } |
| 194 | 235 | ||
| 195 | std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { | 236 | std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { |
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index f992e29f3..ab4bc9ec6 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "common/multi_level_page_table.h" | 12 | #include "common/multi_level_page_table.h" |
| 13 | #include "common/virtual_buffer.h" | 13 | #include "common/virtual_buffer.h" |
| 14 | #include "video_core/pte_kind.h" | ||
| 14 | 15 | ||
| 15 | namespace VideoCore { | 16 | namespace VideoCore { |
| 16 | class RasterizerInterface; | 17 | class RasterizerInterface; |
| @@ -98,7 +99,8 @@ public: | |||
| 98 | std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr, | 99 | std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr, |
| 99 | std::size_t size) const; | 100 | std::size_t size) const; |
| 100 | 101 | ||
| 101 | GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, bool is_big_pages = true); | 102 | GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, |
| 103 | PTEKind kind = PTEKind::INVALID, bool is_big_pages = true); | ||
| 102 | GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true); | 104 | GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true); |
| 103 | void Unmap(GPUVAddr gpu_addr, std::size_t size); | 105 | void Unmap(GPUVAddr gpu_addr, std::size_t size); |
| 104 | 106 | ||
| @@ -114,6 +116,8 @@ public: | |||
| 114 | return gpu_addr < address_space_size; | 116 | return gpu_addr < address_space_size; |
| 115 | } | 117 | } |
| 116 | 118 | ||
| 119 | PTEKind GetPageKind(GPUVAddr gpu_addr) const; | ||
| 120 | |||
| 117 | private: | 121 | private: |
| 118 | template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped> | 122 | template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped> |
| 119 | inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped, | 123 | inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped, |
| @@ -166,10 +170,12 @@ private: | |||
| 166 | std::vector<u64> big_entries; | 170 | std::vector<u64> big_entries; |
| 167 | 171 | ||
| 168 | template <EntryType entry_type> | 172 | template <EntryType entry_type> |
| 169 | GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size); | 173 | GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size, |
| 174 | PTEKind kind); | ||
| 170 | 175 | ||
| 171 | template <EntryType entry_type> | 176 | template <EntryType entry_type> |
| 172 | GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size); | 177 | GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size, |
| 178 | PTEKind kind); | ||
| 173 | 179 | ||
| 174 | template <bool is_big_page> | 180 | template <bool is_big_page> |
| 175 | inline EntryType GetEntry(size_t position) const; | 181 | inline EntryType GetEntry(size_t position) const; |
| @@ -177,6 +183,15 @@ private: | |||
| 177 | template <bool is_big_page> | 183 | template <bool is_big_page> |
| 178 | inline void SetEntry(size_t position, EntryType entry); | 184 | inline void SetEntry(size_t position, EntryType entry); |
| 179 | 185 | ||
| 186 | std::vector<std::array<PTEKind, 32>> kinds; | ||
| 187 | std::vector<std::array<PTEKind, 32>> big_kinds; | ||
| 188 | |||
| 189 | template <bool is_big_page> | ||
| 190 | inline PTEKind GetKind(size_t position) const; | ||
| 191 | |||
| 192 | template <bool is_big_page> | ||
| 193 | inline void SetKind(size_t position, PTEKind kind); | ||
| 194 | |||
| 180 | Common::MultiLevelPageTable<u32> page_table; | 195 | Common::MultiLevelPageTable<u32> page_table; |
| 181 | Common::VirtualBuffer<u32> big_page_table_cpu; | 196 | Common::VirtualBuffer<u32> big_page_table_cpu; |
| 182 | 197 | ||
diff --git a/src/video_core/pte_kind.h b/src/video_core/pte_kind.h new file mode 100644 index 000000000..591d7214b --- /dev/null +++ b/src/video_core/pte_kind.h | |||
| @@ -0,0 +1,264 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include "common/common_types.h" | ||
| 7 | |||
| 8 | namespace Tegra { | ||
| 9 | |||
| 10 | // https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/volta/gv100/dev_mmu.ref.txt | ||
| 11 | enum class PTEKind : u8 { | ||
| 12 | INVALID = 0xff, | ||
| 13 | PITCH = 0x00, | ||
| 14 | Z16 = 0x01, | ||
| 15 | Z16_2C = 0x02, | ||
| 16 | Z16_MS2_2C = 0x03, | ||
| 17 | Z16_MS4_2C = 0x04, | ||
| 18 | Z16_MS8_2C = 0x05, | ||
| 19 | Z16_MS16_2C = 0x06, | ||
| 20 | Z16_2Z = 0x07, | ||
| 21 | Z16_MS2_2Z = 0x08, | ||
| 22 | Z16_MS4_2Z = 0x09, | ||
| 23 | Z16_MS8_2Z = 0x0a, | ||
| 24 | Z16_MS16_2Z = 0x0b, | ||
| 25 | Z16_2CZ = 0x36, | ||
| 26 | Z16_MS2_2CZ = 0x37, | ||
| 27 | Z16_MS4_2CZ = 0x38, | ||
| 28 | Z16_MS8_2CZ = 0x39, | ||
| 29 | Z16_MS16_2CZ = 0x5f, | ||
| 30 | Z16_4CZ = 0x0c, | ||
| 31 | Z16_MS2_4CZ = 0x0d, | ||
| 32 | Z16_MS4_4CZ = 0x0e, | ||
| 33 | Z16_MS8_4CZ = 0x0f, | ||
| 34 | Z16_MS16_4CZ = 0x10, | ||
| 35 | S8Z24 = 0x11, | ||
| 36 | S8Z24_1Z = 0x12, | ||
| 37 | S8Z24_MS2_1Z = 0x13, | ||
| 38 | S8Z24_MS4_1Z = 0x14, | ||
| 39 | S8Z24_MS8_1Z = 0x15, | ||
| 40 | S8Z24_MS16_1Z = 0x16, | ||
| 41 | S8Z24_2CZ = 0x17, | ||
| 42 | S8Z24_MS2_2CZ = 0x18, | ||
| 43 | S8Z24_MS4_2CZ = 0x19, | ||
| 44 | S8Z24_MS8_2CZ = 0x1a, | ||
| 45 | S8Z24_MS16_2CZ = 0x1b, | ||
| 46 | S8Z24_2CS = 0x1c, | ||
| 47 | S8Z24_MS2_2CS = 0x1d, | ||
| 48 | S8Z24_MS4_2CS = 0x1e, | ||
| 49 | S8Z24_MS8_2CS = 0x1f, | ||
| 50 | S8Z24_MS16_2CS = 0x20, | ||
| 51 | S8Z24_4CSZV = 0x21, | ||
| 52 | S8Z24_MS2_4CSZV = 0x22, | ||
| 53 | S8Z24_MS4_4CSZV = 0x23, | ||
| 54 | S8Z24_MS8_4CSZV = 0x24, | ||
| 55 | S8Z24_MS16_4CSZV = 0x25, | ||
| 56 | V8Z24_MS4_VC12 = 0x26, | ||
| 57 | V8Z24_MS4_VC4 = 0x27, | ||
| 58 | V8Z24_MS8_VC8 = 0x28, | ||
| 59 | V8Z24_MS8_VC24 = 0x29, | ||
| 60 | V8Z24_MS4_VC12_1ZV = 0x2e, | ||
| 61 | V8Z24_MS4_VC4_1ZV = 0x2f, | ||
| 62 | V8Z24_MS8_VC8_1ZV = 0x30, | ||
| 63 | V8Z24_MS8_VC24_1ZV = 0x31, | ||
| 64 | V8Z24_MS4_VC12_2CS = 0x32, | ||
| 65 | V8Z24_MS4_VC4_2CS = 0x33, | ||
| 66 | V8Z24_MS8_VC8_2CS = 0x34, | ||
| 67 | V8Z24_MS8_VC24_2CS = 0x35, | ||
| 68 | V8Z24_MS4_VC12_2CZV = 0x3a, | ||
| 69 | V8Z24_MS4_VC4_2CZV = 0x3b, | ||
| 70 | V8Z24_MS8_VC8_2CZV = 0x3c, | ||
| 71 | V8Z24_MS8_VC24_2CZV = 0x3d, | ||
| 72 | V8Z24_MS4_VC12_2ZV = 0x3e, | ||
| 73 | V8Z24_MS4_VC4_2ZV = 0x3f, | ||
| 74 | V8Z24_MS8_VC8_2ZV = 0x40, | ||
| 75 | V8Z24_MS8_VC24_2ZV = 0x41, | ||
| 76 | V8Z24_MS4_VC12_4CSZV = 0x42, | ||
| 77 | V8Z24_MS4_VC4_4CSZV = 0x43, | ||
| 78 | V8Z24_MS8_VC8_4CSZV = 0x44, | ||
| 79 | V8Z24_MS8_VC24_4CSZV = 0x45, | ||
| 80 | Z24S8 = 0x46, | ||
| 81 | Z24S8_1Z = 0x47, | ||
| 82 | Z24S8_MS2_1Z = 0x48, | ||
| 83 | Z24S8_MS4_1Z = 0x49, | ||
| 84 | Z24S8_MS8_1Z = 0x4a, | ||
| 85 | Z24S8_MS16_1Z = 0x4b, | ||
| 86 | Z24S8_2CS = 0x4c, | ||
| 87 | Z24S8_MS2_2CS = 0x4d, | ||
| 88 | Z24S8_MS4_2CS = 0x4e, | ||
| 89 | Z24S8_MS8_2CS = 0x4f, | ||
| 90 | Z24S8_MS16_2CS = 0x50, | ||
| 91 | Z24S8_2CZ = 0x51, | ||
| 92 | Z24S8_MS2_2CZ = 0x52, | ||
| 93 | Z24S8_MS4_2CZ = 0x53, | ||
| 94 | Z24S8_MS8_2CZ = 0x54, | ||
| 95 | Z24S8_MS16_2CZ = 0x55, | ||
| 96 | Z24S8_4CSZV = 0x56, | ||
| 97 | Z24S8_MS2_4CSZV = 0x57, | ||
| 98 | Z24S8_MS4_4CSZV = 0x58, | ||
| 99 | Z24S8_MS8_4CSZV = 0x59, | ||
| 100 | Z24S8_MS16_4CSZV = 0x5a, | ||
| 101 | Z24V8_MS4_VC12 = 0x5b, | ||
| 102 | Z24V8_MS4_VC4 = 0x5c, | ||
| 103 | Z24V8_MS8_VC8 = 0x5d, | ||
| 104 | Z24V8_MS8_VC24 = 0x5e, | ||
| 105 | YUV_B8C1_2Y = 0x60, | ||
| 106 | YUV_B8C2_2Y = 0x61, | ||
| 107 | YUV_B10C1_2Y = 0x62, | ||
| 108 | YUV_B10C2_2Y = 0x6b, | ||
| 109 | YUV_B12C1_2Y = 0x6c, | ||
| 110 | YUV_B12C2_2Y = 0x6d, | ||
| 111 | Z24V8_MS4_VC12_1ZV = 0x63, | ||
| 112 | Z24V8_MS4_VC4_1ZV = 0x64, | ||
| 113 | Z24V8_MS8_VC8_1ZV = 0x65, | ||
| 114 | Z24V8_MS8_VC24_1ZV = 0x66, | ||
| 115 | Z24V8_MS4_VC12_2CS = 0x67, | ||
| 116 | Z24V8_MS4_VC4_2CS = 0x68, | ||
| 117 | Z24V8_MS8_VC8_2CS = 0x69, | ||
| 118 | Z24V8_MS8_VC24_2CS = 0x6a, | ||
| 119 | Z24V8_MS4_VC12_2CZV = 0x6f, | ||
| 120 | Z24V8_MS4_VC4_2CZV = 0x70, | ||
| 121 | Z24V8_MS8_VC8_2CZV = 0x71, | ||
| 122 | Z24V8_MS8_VC24_2CZV = 0x72, | ||
| 123 | Z24V8_MS4_VC12_2ZV = 0x73, | ||
| 124 | Z24V8_MS4_VC4_2ZV = 0x74, | ||
| 125 | Z24V8_MS8_VC8_2ZV = 0x75, | ||
| 126 | Z24V8_MS8_VC24_2ZV = 0x76, | ||
| 127 | Z24V8_MS4_VC12_4CSZV = 0x77, | ||
| 128 | Z24V8_MS4_VC4_4CSZV = 0x78, | ||
| 129 | Z24V8_MS8_VC8_4CSZV = 0x79, | ||
| 130 | Z24V8_MS8_VC24_4CSZV = 0x7a, | ||
| 131 | ZF32 = 0x7b, | ||
| 132 | ZF32_1Z = 0x7c, | ||
| 133 | ZF32_MS2_1Z = 0x7d, | ||
| 134 | ZF32_MS4_1Z = 0x7e, | ||
| 135 | ZF32_MS8_1Z = 0x7f, | ||
| 136 | ZF32_MS16_1Z = 0x80, | ||
| 137 | ZF32_2CS = 0x81, | ||
| 138 | ZF32_MS2_2CS = 0x82, | ||
| 139 | ZF32_MS4_2CS = 0x83, | ||
| 140 | ZF32_MS8_2CS = 0x84, | ||
| 141 | ZF32_MS16_2CS = 0x85, | ||
| 142 | ZF32_2CZ = 0x86, | ||
| 143 | ZF32_MS2_2CZ = 0x87, | ||
| 144 | ZF32_MS4_2CZ = 0x88, | ||
| 145 | ZF32_MS8_2CZ = 0x89, | ||
| 146 | ZF32_MS16_2CZ = 0x8a, | ||
| 147 | X8Z24_X16V8S8_MS4_VC12 = 0x8b, | ||
| 148 | X8Z24_X16V8S8_MS4_VC4 = 0x8c, | ||
| 149 | X8Z24_X16V8S8_MS8_VC8 = 0x8d, | ||
| 150 | X8Z24_X16V8S8_MS8_VC24 = 0x8e, | ||
| 151 | X8Z24_X16V8S8_MS4_VC12_1CS = 0x8f, | ||
| 152 | X8Z24_X16V8S8_MS4_VC4_1CS = 0x90, | ||
| 153 | X8Z24_X16V8S8_MS8_VC8_1CS = 0x91, | ||
| 154 | X8Z24_X16V8S8_MS8_VC24_1CS = 0x92, | ||
| 155 | X8Z24_X16V8S8_MS4_VC12_1ZV = 0x97, | ||
| 156 | X8Z24_X16V8S8_MS4_VC4_1ZV = 0x98, | ||
| 157 | X8Z24_X16V8S8_MS8_VC8_1ZV = 0x99, | ||
| 158 | X8Z24_X16V8S8_MS8_VC24_1ZV = 0x9a, | ||
| 159 | X8Z24_X16V8S8_MS4_VC12_1CZV = 0x9b, | ||
| 160 | X8Z24_X16V8S8_MS4_VC4_1CZV = 0x9c, | ||
| 161 | X8Z24_X16V8S8_MS8_VC8_1CZV = 0x9d, | ||
| 162 | X8Z24_X16V8S8_MS8_VC24_1CZV = 0x9e, | ||
| 163 | X8Z24_X16V8S8_MS4_VC12_2CS = 0x9f, | ||
| 164 | X8Z24_X16V8S8_MS4_VC4_2CS = 0xa0, | ||
| 165 | X8Z24_X16V8S8_MS8_VC8_2CS = 0xa1, | ||
| 166 | X8Z24_X16V8S8_MS8_VC24_2CS = 0xa2, | ||
| 167 | X8Z24_X16V8S8_MS4_VC12_2CSZV = 0xa3, | ||
| 168 | X8Z24_X16V8S8_MS4_VC4_2CSZV = 0xa4, | ||
| 169 | X8Z24_X16V8S8_MS8_VC8_2CSZV = 0xa5, | ||
| 170 | X8Z24_X16V8S8_MS8_VC24_2CSZV = 0xa6, | ||
| 171 | ZF32_X16V8S8_MS4_VC12 = 0xa7, | ||
| 172 | ZF32_X16V8S8_MS4_VC4 = 0xa8, | ||
| 173 | ZF32_X16V8S8_MS8_VC8 = 0xa9, | ||
| 174 | ZF32_X16V8S8_MS8_VC24 = 0xaa, | ||
| 175 | ZF32_X16V8S8_MS4_VC12_1CS = 0xab, | ||
| 176 | ZF32_X16V8S8_MS4_VC4_1CS = 0xac, | ||
| 177 | ZF32_X16V8S8_MS8_VC8_1CS = 0xad, | ||
| 178 | ZF32_X16V8S8_MS8_VC24_1CS = 0xae, | ||
| 179 | ZF32_X16V8S8_MS4_VC12_1ZV = 0xb3, | ||
| 180 | ZF32_X16V8S8_MS4_VC4_1ZV = 0xb4, | ||
| 181 | ZF32_X16V8S8_MS8_VC8_1ZV = 0xb5, | ||
| 182 | ZF32_X16V8S8_MS8_VC24_1ZV = 0xb6, | ||
| 183 | ZF32_X16V8S8_MS4_VC12_1CZV = 0xb7, | ||
| 184 | ZF32_X16V8S8_MS4_VC4_1CZV = 0xb8, | ||
| 185 | ZF32_X16V8S8_MS8_VC8_1CZV = 0xb9, | ||
| 186 | ZF32_X16V8S8_MS8_VC24_1CZV = 0xba, | ||
| 187 | ZF32_X16V8S8_MS4_VC12_2CS = 0xbb, | ||
| 188 | ZF32_X16V8S8_MS4_VC4_2CS = 0xbc, | ||
| 189 | ZF32_X16V8S8_MS8_VC8_2CS = 0xbd, | ||
| 190 | ZF32_X16V8S8_MS8_VC24_2CS = 0xbe, | ||
| 191 | ZF32_X16V8S8_MS4_VC12_2CSZV = 0xbf, | ||
| 192 | ZF32_X16V8S8_MS4_VC4_2CSZV = 0xc0, | ||
| 193 | ZF32_X16V8S8_MS8_VC8_2CSZV = 0xc1, | ||
| 194 | ZF32_X16V8S8_MS8_VC24_2CSZV = 0xc2, | ||
| 195 | ZF32_X24S8 = 0xc3, | ||
| 196 | ZF32_X24S8_1CS = 0xc4, | ||
| 197 | ZF32_X24S8_MS2_1CS = 0xc5, | ||
| 198 | ZF32_X24S8_MS4_1CS = 0xc6, | ||
| 199 | ZF32_X24S8_MS8_1CS = 0xc7, | ||
| 200 | ZF32_X24S8_MS16_1CS = 0xc8, | ||
| 201 | ZF32_X24S8_2CSZV = 0xce, | ||
| 202 | ZF32_X24S8_MS2_2CSZV = 0xcf, | ||
| 203 | ZF32_X24S8_MS4_2CSZV = 0xd0, | ||
| 204 | ZF32_X24S8_MS8_2CSZV = 0xd1, | ||
| 205 | ZF32_X24S8_MS16_2CSZV = 0xd2, | ||
| 206 | ZF32_X24S8_2CS = 0xd3, | ||
| 207 | ZF32_X24S8_MS2_2CS = 0xd4, | ||
| 208 | ZF32_X24S8_MS4_2CS = 0xd5, | ||
| 209 | ZF32_X24S8_MS8_2CS = 0xd6, | ||
| 210 | ZF32_X24S8_MS16_2CS = 0xd7, | ||
| 211 | S8 = 0x2a, | ||
| 212 | S8_2S = 0x2b, | ||
| 213 | GENERIC_16BX2 = 0xfe, | ||
| 214 | C32_2C = 0xd8, | ||
| 215 | C32_2CBR = 0xd9, | ||
| 216 | C32_2CBA = 0xda, | ||
| 217 | C32_2CRA = 0xdb, | ||
| 218 | C32_2BRA = 0xdc, | ||
| 219 | C32_MS2_2C = 0xdd, | ||
| 220 | C32_MS2_2CBR = 0xde, | ||
| 221 | C32_MS2_4CBRA = 0xcc, | ||
| 222 | C32_MS4_2C = 0xdf, | ||
| 223 | C32_MS4_2CBR = 0xe0, | ||
| 224 | C32_MS4_2CBA = 0xe1, | ||
| 225 | C32_MS4_2CRA = 0xe2, | ||
| 226 | C32_MS4_2BRA = 0xe3, | ||
| 227 | C32_MS4_4CBRA = 0x2c, | ||
| 228 | C32_MS8_MS16_2C = 0xe4, | ||
| 229 | C32_MS8_MS16_2CRA = 0xe5, | ||
| 230 | C64_2C = 0xe6, | ||
| 231 | C64_2CBR = 0xe7, | ||
| 232 | C64_2CBA = 0xe8, | ||
| 233 | C64_2CRA = 0xe9, | ||
| 234 | C64_2BRA = 0xea, | ||
| 235 | C64_MS2_2C = 0xeb, | ||
| 236 | C64_MS2_2CBR = 0xec, | ||
| 237 | C64_MS2_4CBRA = 0xcd, | ||
| 238 | C64_MS4_2C = 0xed, | ||
| 239 | C64_MS4_2CBR = 0xee, | ||
| 240 | C64_MS4_2CBA = 0xef, | ||
| 241 | C64_MS4_2CRA = 0xf0, | ||
| 242 | C64_MS4_2BRA = 0xf1, | ||
| 243 | C64_MS4_4CBRA = 0x2d, | ||
| 244 | C64_MS8_MS16_2C = 0xf2, | ||
| 245 | C64_MS8_MS16_2CRA = 0xf3, | ||
| 246 | C128_2C = 0xf4, | ||
| 247 | C128_2CR = 0xf5, | ||
| 248 | C128_MS2_2C = 0xf6, | ||
| 249 | C128_MS2_2CR = 0xf7, | ||
| 250 | C128_MS4_2C = 0xf8, | ||
| 251 | C128_MS4_2CR = 0xf9, | ||
| 252 | C128_MS8_MS16_2C = 0xfa, | ||
| 253 | C128_MS8_MS16_2CR = 0xfb, | ||
| 254 | X8C24 = 0xfc, | ||
| 255 | PITCH_NO_SWIZZLE = 0xfd, | ||
| 256 | SMSKED_MESSAGE = 0xca, | ||
| 257 | SMHOST_MESSAGE = 0xcb, | ||
| 258 | }; | ||
| 259 | |||
| 260 | constexpr bool IsPitchKind(PTEKind kind) { | ||
| 261 | return kind == PTEKind::PITCH || kind == PTEKind::PITCH_NO_SWIZZLE; | ||
| 262 | } | ||
| 263 | |||
| 264 | } // namespace Tegra | ||
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp index 7cb02631c..4b15c0f85 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp | |||
| @@ -59,10 +59,11 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) { | |||
| 59 | std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) { | 59 | std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) { |
| 60 | return query_pool == *pool; | 60 | return query_pool == *pool; |
| 61 | }); | 61 | }); |
| 62 | ASSERT(it != std::end(pools)); | ||
| 63 | 62 | ||
| 64 | const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it); | 63 | if (it != std::end(pools)) { |
| 65 | usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; | 64 | const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it); |
| 65 | usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; | ||
| 66 | } | ||
| 66 | } | 67 | } |
| 67 | 68 | ||
| 68 | QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, | 69 | QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, |
diff --git a/src/video_core/texture_cache/descriptor_table.h b/src/video_core/texture_cache/descriptor_table.h index b18e3838f..ee4240288 100644 --- a/src/video_core/texture_cache/descriptor_table.h +++ b/src/video_core/texture_cache/descriptor_table.h | |||
| @@ -18,7 +18,7 @@ class DescriptorTable { | |||
| 18 | public: | 18 | public: |
| 19 | explicit DescriptorTable(Tegra::MemoryManager& gpu_memory_) : gpu_memory{gpu_memory_} {} | 19 | explicit DescriptorTable(Tegra::MemoryManager& gpu_memory_) : gpu_memory{gpu_memory_} {} |
| 20 | 20 | ||
| 21 | [[nodiscard]] bool Synchornize(GPUVAddr gpu_addr, u32 limit) { | 21 | [[nodiscard]] bool Synchronize(GPUVAddr gpu_addr, u32 limit) { |
| 22 | [[likely]] if (current_gpu_addr == gpu_addr && current_limit == limit) { | 22 | [[likely]] if (current_gpu_addr == gpu_addr && current_limit == limit) { |
| 23 | return false; | 23 | return false; |
| 24 | } | 24 | } |
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 413baf730..0e0fd410f 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h | |||
| @@ -193,11 +193,11 @@ void TextureCache<P>::SynchronizeGraphicsDescriptors() { | |||
| 193 | const bool linked_tsc = maxwell3d->regs.sampler_binding == SamplerBinding::ViaHeaderBinding; | 193 | const bool linked_tsc = maxwell3d->regs.sampler_binding == SamplerBinding::ViaHeaderBinding; |
| 194 | const u32 tic_limit = maxwell3d->regs.tex_header.limit; | 194 | const u32 tic_limit = maxwell3d->regs.tex_header.limit; |
| 195 | const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d->regs.tex_sampler.limit; | 195 | const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d->regs.tex_sampler.limit; |
| 196 | if (channel_state->graphics_sampler_table.Synchornize(maxwell3d->regs.tex_sampler.Address(), | 196 | if (channel_state->graphics_sampler_table.Synchronize(maxwell3d->regs.tex_sampler.Address(), |
| 197 | tsc_limit)) { | 197 | tsc_limit)) { |
| 198 | channel_state->graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); | 198 | channel_state->graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); |
| 199 | } | 199 | } |
| 200 | if (channel_state->graphics_image_table.Synchornize(maxwell3d->regs.tex_header.Address(), | 200 | if (channel_state->graphics_image_table.Synchronize(maxwell3d->regs.tex_header.Address(), |
| 201 | tic_limit)) { | 201 | tic_limit)) { |
| 202 | channel_state->graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); | 202 | channel_state->graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); |
| 203 | } | 203 | } |
| @@ -209,10 +209,10 @@ void TextureCache<P>::SynchronizeComputeDescriptors() { | |||
| 209 | const u32 tic_limit = kepler_compute->regs.tic.limit; | 209 | const u32 tic_limit = kepler_compute->regs.tic.limit; |
| 210 | const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute->regs.tsc.limit; | 210 | const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute->regs.tsc.limit; |
| 211 | const GPUVAddr tsc_gpu_addr = kepler_compute->regs.tsc.Address(); | 211 | const GPUVAddr tsc_gpu_addr = kepler_compute->regs.tsc.Address(); |
| 212 | if (channel_state->compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) { | 212 | if (channel_state->compute_sampler_table.Synchronize(tsc_gpu_addr, tsc_limit)) { |
| 213 | channel_state->compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); | 213 | channel_state->compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); |
| 214 | } | 214 | } |
| 215 | if (channel_state->compute_image_table.Synchornize(kepler_compute->regs.tic.Address(), | 215 | if (channel_state->compute_image_table.Synchronize(kepler_compute->regs.tic.Address(), |
| 216 | tic_limit)) { | 216 | tic_limit)) { |
| 217 | channel_state->compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); | 217 | channel_state->compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); |
| 218 | } | 218 | } |
diff --git a/src/yuzu/applets/qt_controller.ui b/src/yuzu/applets/qt_controller.ui index c8cb6bcf3..f5eccba70 100644 --- a/src/yuzu/applets/qt_controller.ui +++ b/src/yuzu/applets/qt_controller.ui | |||
| @@ -2300,7 +2300,7 @@ | |||
| 2300 | <item> | 2300 | <item> |
| 2301 | <widget class="QRadioButton" name="radioUndocked"> | 2301 | <widget class="QRadioButton" name="radioUndocked"> |
| 2302 | <property name="text"> | 2302 | <property name="text"> |
| 2303 | <string>Undocked</string> | 2303 | <string>Handheld</string> |
| 2304 | </property> | 2304 | </property> |
| 2305 | </widget> | 2305 | </widget> |
| 2306 | </item> | 2306 | </item> |
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp index 24251247d..6acfb7b06 100644 --- a/src/yuzu/bootmanager.cpp +++ b/src/yuzu/bootmanager.cpp | |||
| @@ -120,8 +120,8 @@ void EmuThread::run() { | |||
| 120 | } | 120 | } |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | // Shutdown the core emulation | 123 | // Shutdown the main emulated process |
| 124 | system.Shutdown(); | 124 | system.ShutdownMainProcess(); |
| 125 | 125 | ||
| 126 | #if MICROPROFILE_ENABLED | 126 | #if MICROPROFILE_ENABLED |
| 127 | MicroProfileOnThreadExit(); | 127 | MicroProfileOnThreadExit(); |
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index f45a25410..7b16d7f7e 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp | |||
| @@ -294,6 +294,7 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan | |||
| 294 | #ifdef __linux__ | 294 | #ifdef __linux__ |
| 295 | SetupSigInterrupts(); | 295 | SetupSigInterrupts(); |
| 296 | #endif | 296 | #endif |
| 297 | system->Initialize(); | ||
| 297 | 298 | ||
| 298 | Common::Log::Initialize(); | 299 | Common::Log::Initialize(); |
| 299 | LoadTranslation(); | 300 | LoadTranslation(); |
| @@ -1895,6 +1896,8 @@ void GMainWindow::OnGameListOpenFolder(u64 program_id, GameListOpenTarget target | |||
| 1895 | case GameListOpenTarget::SaveData: { | 1896 | case GameListOpenTarget::SaveData: { |
| 1896 | open_target = tr("Save Data"); | 1897 | open_target = tr("Save Data"); |
| 1897 | const auto nand_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir); | 1898 | const auto nand_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir); |
| 1899 | auto vfs_nand_dir = | ||
| 1900 | vfs->OpenDirectory(Common::FS::PathToUTF8String(nand_dir), FileSys::Mode::Read); | ||
| 1898 | 1901 | ||
| 1899 | if (has_user_save) { | 1902 | if (has_user_save) { |
| 1900 | // User save data | 1903 | // User save data |
| @@ -1921,15 +1924,15 @@ void GMainWindow::OnGameListOpenFolder(u64 program_id, GameListOpenTarget target | |||
| 1921 | ASSERT(user_id); | 1924 | ASSERT(user_id); |
| 1922 | 1925 | ||
| 1923 | const auto user_save_data_path = FileSys::SaveDataFactory::GetFullPath( | 1926 | const auto user_save_data_path = FileSys::SaveDataFactory::GetFullPath( |
| 1924 | *system, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData, | 1927 | *system, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser, |
| 1925 | program_id, user_id->AsU128(), 0); | 1928 | FileSys::SaveDataType::SaveData, program_id, user_id->AsU128(), 0); |
| 1926 | 1929 | ||
| 1927 | path = Common::FS::ConcatPathSafe(nand_dir, user_save_data_path); | 1930 | path = Common::FS::ConcatPathSafe(nand_dir, user_save_data_path); |
| 1928 | } else { | 1931 | } else { |
| 1929 | // Device save data | 1932 | // Device save data |
| 1930 | const auto device_save_data_path = FileSys::SaveDataFactory::GetFullPath( | 1933 | const auto device_save_data_path = FileSys::SaveDataFactory::GetFullPath( |
| 1931 | *system, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData, | 1934 | *system, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser, |
| 1932 | program_id, {}, 0); | 1935 | FileSys::SaveDataType::SaveData, program_id, {}, 0); |
| 1933 | 1936 | ||
| 1934 | path = Common::FS::ConcatPathSafe(nand_dir, device_save_data_path); | 1937 | path = Common::FS::ConcatPathSafe(nand_dir, device_save_data_path); |
| 1935 | } | 1938 | } |
| @@ -3280,7 +3283,7 @@ void GMainWindow::LoadAmiibo(const QString& filename) { | |||
| 3280 | QMessageBox::warning(this, title, tr("The current game is not looking for amiibos")); | 3283 | QMessageBox::warning(this, title, tr("The current game is not looking for amiibos")); |
| 3281 | break; | 3284 | break; |
| 3282 | case InputCommon::VirtualAmiibo::Info::Unknown: | 3285 | case InputCommon::VirtualAmiibo::Info::Unknown: |
| 3283 | QMessageBox::warning(this, title, tr("An unkown error occured")); | 3286 | QMessageBox::warning(this, title, tr("An unknown error occurred")); |
| 3284 | break; | 3287 | break; |
| 3285 | default: | 3288 | default: |
| 3286 | break; | 3289 | break; |
diff --git a/src/yuzu_cmd/yuzu.cpp b/src/yuzu_cmd/yuzu.cpp index 3a0f33cba..e16f79eb4 100644 --- a/src/yuzu_cmd/yuzu.cpp +++ b/src/yuzu_cmd/yuzu.cpp | |||
| @@ -302,6 +302,8 @@ int main(int argc, char** argv) { | |||
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | Core::System system{}; | 304 | Core::System system{}; |
| 305 | system.Initialize(); | ||
| 306 | |||
| 305 | InputCommon::InputSubsystem input_subsystem{}; | 307 | InputCommon::InputSubsystem input_subsystem{}; |
| 306 | 308 | ||
| 307 | // Apply the command line arguments | 309 | // Apply the command line arguments |
| @@ -392,7 +394,7 @@ int main(int argc, char** argv) { | |||
| 392 | } | 394 | } |
| 393 | system.DetachDebugger(); | 395 | system.DetachDebugger(); |
| 394 | void(system.Pause()); | 396 | void(system.Pause()); |
| 395 | system.Shutdown(); | 397 | system.ShutdownMainProcess(); |
| 396 | 398 | ||
| 397 | detached_tasks.WaitForAllTasks(); | 399 | detached_tasks.WaitForAllTasks(); |
| 398 | return 0; | 400 | return 0; |