diff options
Diffstat (limited to 'src/core')
23 files changed, 410 insertions, 250 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 0252c8c31..5afdeb5ff 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -226,6 +226,7 @@ add_library(core STATIC | |||
| 226 | hle/kernel/k_page_buffer.h | 226 | hle/kernel/k_page_buffer.h |
| 227 | hle/kernel/k_page_heap.cpp | 227 | hle/kernel/k_page_heap.cpp |
| 228 | hle/kernel/k_page_heap.h | 228 | hle/kernel/k_page_heap.h |
| 229 | hle/kernel/k_page_group.cpp | ||
| 229 | hle/kernel/k_page_group.h | 230 | hle/kernel/k_page_group.h |
| 230 | hle/kernel/k_page_table.cpp | 231 | hle/kernel/k_page_table.cpp |
| 231 | hle/kernel/k_page_table.h | 232 | hle/kernel/k_page_table.h |
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index 2df7b0ee8..8aa7b9641 100644 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp | |||
| @@ -1,14 +1,12 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #ifndef _MSC_VER | ||
| 5 | #include <cxxabi.h> | ||
| 6 | #endif | ||
| 7 | |||
| 8 | #include <map> | 4 | #include <map> |
| 9 | #include <optional> | 5 | #include <optional> |
| 6 | |||
| 10 | #include "common/bit_field.h" | 7 | #include "common/bit_field.h" |
| 11 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "common/demangle.h" | ||
| 12 | #include "common/logging/log.h" | 10 | #include "common/logging/log.h" |
| 13 | #include "core/arm/arm_interface.h" | 11 | #include "core/arm/arm_interface.h" |
| 14 | #include "core/arm/symbols.h" | 12 | #include "core/arm/symbols.h" |
| @@ -71,20 +69,8 @@ void ARM_Interface::SymbolicateBacktrace(Core::System& system, std::vector<Backt | |||
| 71 | const auto symbol_set = symbols.find(entry.module); | 69 | const auto symbol_set = symbols.find(entry.module); |
| 72 | if (symbol_set != symbols.end()) { | 70 | if (symbol_set != symbols.end()) { |
| 73 | const auto symbol = Symbols::GetSymbolName(symbol_set->second, entry.offset); | 71 | const auto symbol = Symbols::GetSymbolName(symbol_set->second, entry.offset); |
| 74 | if (symbol.has_value()) { | 72 | if (symbol) { |
| 75 | #ifdef _MSC_VER | 73 | entry.name = Common::DemangleSymbol(*symbol); |
| 76 | // TODO(DarkLordZach): Add demangling of symbol names. | ||
| 77 | entry.name = *symbol; | ||
| 78 | #else | ||
| 79 | int status{-1}; | ||
| 80 | char* demangled{abi::__cxa_demangle(symbol->c_str(), nullptr, nullptr, &status)}; | ||
| 81 | if (status == 0 && demangled != nullptr) { | ||
| 82 | entry.name = demangled; | ||
| 83 | std::free(demangled); | ||
| 84 | } else { | ||
| 85 | entry.name = *symbol; | ||
| 86 | } | ||
| 87 | #endif | ||
| 88 | } | 74 | } |
| 89 | } | 75 | } |
| 90 | } | 76 | } |
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index 947747d36..2a7570073 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp | |||
| @@ -229,7 +229,11 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* | |||
| 229 | config.enable_cycle_counting = true; | 229 | config.enable_cycle_counting = true; |
| 230 | 230 | ||
| 231 | // Code cache size | 231 | // Code cache size |
| 232 | #ifdef ARCHITECTURE_arm64 | ||
| 233 | config.code_cache_size = 128_MiB; | ||
| 234 | #else | ||
| 232 | config.code_cache_size = 512_MiB; | 235 | config.code_cache_size = 512_MiB; |
| 236 | #endif | ||
| 233 | 237 | ||
| 234 | // Allow memory fault handling to work | 238 | // Allow memory fault handling to work |
| 235 | if (system.DebuggerEnabled()) { | 239 | if (system.DebuggerEnabled()) { |
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index 3df943df7..7229fdc2a 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp | |||
| @@ -288,7 +288,11 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* | |||
| 288 | config.enable_cycle_counting = true; | 288 | config.enable_cycle_counting = true; |
| 289 | 289 | ||
| 290 | // Code cache size | 290 | // Code cache size |
| 291 | #ifdef ARCHITECTURE_arm64 | ||
| 292 | config.code_cache_size = 128_MiB; | ||
| 293 | #else | ||
| 291 | config.code_cache_size = 512_MiB; | 294 | config.code_cache_size = 512_MiB; |
| 295 | #endif | ||
| 292 | 296 | ||
| 293 | // Allow memory fault handling to work | 297 | // Allow memory fault handling to work |
| 294 | if (system.DebuggerEnabled()) { | 298 | if (system.DebuggerEnabled()) { |
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index 0e7b5f943..6bac6722f 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp | |||
| @@ -142,16 +142,24 @@ void CoreTiming::ScheduleLoopingEvent(std::chrono::nanoseconds start_time, | |||
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, | 144 | void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, |
| 145 | std::uintptr_t user_data) { | 145 | std::uintptr_t user_data, bool wait) { |
| 146 | std::scoped_lock scope{basic_lock}; | 146 | { |
| 147 | const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { | 147 | std::scoped_lock lk{basic_lock}; |
| 148 | return e.type.lock().get() == event_type.get() && e.user_data == user_data; | 148 | const auto itr = |
| 149 | }); | 149 | std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { |
| 150 | 150 | return e.type.lock().get() == event_type.get() && e.user_data == user_data; | |
| 151 | // Removing random items breaks the invariant so we have to re-establish it. | 151 | }); |
| 152 | if (itr != event_queue.end()) { | 152 | |
| 153 | event_queue.erase(itr, event_queue.end()); | 153 | // Removing random items breaks the invariant so we have to re-establish it. |
| 154 | std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>()); | 154 | if (itr != event_queue.end()) { |
| 155 | event_queue.erase(itr, event_queue.end()); | ||
| 156 | std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>()); | ||
| 157 | } | ||
| 158 | } | ||
| 159 | |||
| 160 | // Force any in-progress events to finish | ||
| 161 | if (wait) { | ||
| 162 | std::scoped_lock lk{advance_lock}; | ||
| 155 | } | 163 | } |
| 156 | } | 164 | } |
| 157 | 165 | ||
| @@ -190,20 +198,6 @@ u64 CoreTiming::GetClockTicks() const { | |||
| 190 | return CpuCyclesToClockCycles(ticks); | 198 | return CpuCyclesToClockCycles(ticks); |
| 191 | } | 199 | } |
| 192 | 200 | ||
| 193 | void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { | ||
| 194 | std::scoped_lock lock{basic_lock}; | ||
| 195 | |||
| 196 | const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { | ||
| 197 | return e.type.lock().get() == event_type.get(); | ||
| 198 | }); | ||
| 199 | |||
| 200 | // Removing random items breaks the invariant so we have to re-establish it. | ||
| 201 | if (itr != event_queue.end()) { | ||
| 202 | event_queue.erase(itr, event_queue.end()); | ||
| 203 | std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>()); | ||
| 204 | } | ||
| 205 | } | ||
| 206 | |||
| 207 | std::optional<s64> CoreTiming::Advance() { | 201 | std::optional<s64> CoreTiming::Advance() { |
| 208 | std::scoped_lock lock{advance_lock, basic_lock}; | 202 | std::scoped_lock lock{advance_lock, basic_lock}; |
| 209 | global_timer = GetGlobalTimeNs().count(); | 203 | global_timer = GetGlobalTimeNs().count(); |
diff --git a/src/core/core_timing.h b/src/core/core_timing.h index b5925193c..da366637b 100644 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h | |||
| @@ -98,10 +98,13 @@ public: | |||
| 98 | const std::shared_ptr<EventType>& event_type, | 98 | const std::shared_ptr<EventType>& event_type, |
| 99 | std::uintptr_t user_data = 0, bool absolute_time = false); | 99 | std::uintptr_t user_data = 0, bool absolute_time = false); |
| 100 | 100 | ||
| 101 | void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data); | 101 | void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data, |
| 102 | bool wait = true); | ||
| 102 | 103 | ||
| 103 | /// We only permit one event of each type in the queue at a time. | 104 | void UnscheduleEventWithoutWait(const std::shared_ptr<EventType>& event_type, |
| 104 | void RemoveEvent(const std::shared_ptr<EventType>& event_type); | 105 | std::uintptr_t user_data) { |
| 106 | UnscheduleEvent(event_type, user_data, false); | ||
| 107 | } | ||
| 105 | 108 | ||
| 106 | void AddTicks(u64 ticks_to_add); | 109 | void AddTicks(u64 ticks_to_add); |
| 107 | 110 | ||
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp index a64a9ac64..9c02b7b31 100644 --- a/src/core/debugger/gdbstub.cpp +++ b/src/core/debugger/gdbstub.cpp | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include "common/hex_util.h" | 11 | #include "common/hex_util.h" |
| 12 | #include "common/logging/log.h" | 12 | #include "common/logging/log.h" |
| 13 | #include "common/scope_exit.h" | 13 | #include "common/scope_exit.h" |
| 14 | #include "common/settings.h" | ||
| 14 | #include "core/arm/arm_interface.h" | 15 | #include "core/arm/arm_interface.h" |
| 15 | #include "core/core.h" | 16 | #include "core/core.h" |
| 16 | #include "core/debugger/gdbstub.h" | 17 | #include "core/debugger/gdbstub.h" |
| @@ -731,7 +732,25 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { | |||
| 731 | auto* process = system.CurrentProcess(); | 732 | auto* process = system.CurrentProcess(); |
| 732 | auto& page_table = process->PageTable(); | 733 | auto& page_table = process->PageTable(); |
| 733 | 734 | ||
| 734 | if (command_str == "get info") { | 735 | const char* commands = "Commands:\n" |
| 736 | " get fastmem\n" | ||
| 737 | " get info\n" | ||
| 738 | " get mappings\n"; | ||
| 739 | |||
| 740 | if (command_str == "get fastmem") { | ||
| 741 | if (Settings::IsFastmemEnabled()) { | ||
| 742 | const auto& impl = page_table.PageTableImpl(); | ||
| 743 | const auto region = reinterpret_cast<uintptr_t>(impl.fastmem_arena); | ||
| 744 | const auto region_bits = impl.current_address_space_width_in_bits; | ||
| 745 | const auto region_size = 1ULL << region_bits; | ||
| 746 | |||
| 747 | reply = fmt::format("Region bits: {}\n" | ||
| 748 | "Host address: {:#x} - {:#x}\n", | ||
| 749 | region_bits, region, region + region_size - 1); | ||
| 750 | } else { | ||
| 751 | reply = "Fastmem is not enabled.\n"; | ||
| 752 | } | ||
| 753 | } else if (command_str == "get info") { | ||
| 735 | Loader::AppLoader::Modules modules; | 754 | Loader::AppLoader::Modules modules; |
| 736 | system.GetAppLoader().ReadNSOModules(modules); | 755 | system.GetAppLoader().ReadNSOModules(modules); |
| 737 | 756 | ||
| @@ -787,9 +806,10 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { | |||
| 787 | cur_addr = next_address; | 806 | cur_addr = next_address; |
| 788 | } | 807 | } |
| 789 | } else if (command_str == "help") { | 808 | } else if (command_str == "help") { |
| 790 | reply = "Commands:\n get info\n get mappings\n"; | 809 | reply = commands; |
| 791 | } else { | 810 | } else { |
| 792 | reply = "Unknown command.\nCommands:\n get info\n get mappings\n"; | 811 | reply = "Unknown command.\n"; |
| 812 | reply += commands; | ||
| 793 | } | 813 | } |
| 794 | 814 | ||
| 795 | std::span<const u8> reply_span{reinterpret_cast<u8*>(&reply.front()), reply.size()}; | 815 | std::span<const u8> reply_span{reinterpret_cast<u8*>(&reply.front()), reply.size()}; |
diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp index 71364c323..a959c9db9 100644 --- a/src/core/hid/emulated_controller.cpp +++ b/src/core/hid/emulated_controller.cpp | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | 10 | ||
| 11 | namespace Core::HID { | 11 | namespace Core::HID { |
| 12 | constexpr s32 HID_JOYSTICK_MAX = 0x7fff; | 12 | constexpr s32 HID_JOYSTICK_MAX = 0x7fff; |
| 13 | constexpr s32 HID_JOYSTICK_MIN = 0x7ffe; | ||
| 13 | constexpr s32 HID_TRIGGER_MAX = 0x7fff; | 14 | constexpr s32 HID_TRIGGER_MAX = 0x7fff; |
| 14 | // Use a common UUID for TAS and Virtual Gamepad | 15 | // Use a common UUID for TAS and Virtual Gamepad |
| 15 | constexpr Common::UUID TAS_UUID = | 16 | constexpr Common::UUID TAS_UUID = |
| @@ -798,9 +799,16 @@ void EmulatedController::SetStick(const Common::Input::CallbackStatus& callback, | |||
| 798 | return; | 799 | return; |
| 799 | } | 800 | } |
| 800 | 801 | ||
| 802 | const auto FloatToShort = [](float a) { | ||
| 803 | if (a > 0) { | ||
| 804 | return static_cast<s32>(a * HID_JOYSTICK_MAX); | ||
| 805 | } | ||
| 806 | return static_cast<s32>(a * HID_JOYSTICK_MIN); | ||
| 807 | }; | ||
| 808 | |||
| 801 | const AnalogStickState stick{ | 809 | const AnalogStickState stick{ |
| 802 | .x = static_cast<s32>(controller.stick_values[index].x.value * HID_JOYSTICK_MAX), | 810 | .x = FloatToShort(controller.stick_values[index].x.value), |
| 803 | .y = static_cast<s32>(controller.stick_values[index].y.value * HID_JOYSTICK_MAX), | 811 | .y = FloatToShort(controller.stick_values[index].y.value), |
| 804 | }; | 812 | }; |
| 805 | 813 | ||
| 806 | switch (index) { | 814 | switch (index) { |
| @@ -1434,16 +1442,6 @@ AnalogSticks EmulatedController::GetSticks() const { | |||
| 1434 | return {}; | 1442 | return {}; |
| 1435 | } | 1443 | } |
| 1436 | 1444 | ||
| 1437 | // Some drivers like stick from buttons need constant refreshing | ||
| 1438 | for (auto& device : stick_devices) { | ||
| 1439 | if (!device) { | ||
| 1440 | continue; | ||
| 1441 | } | ||
| 1442 | lock.unlock(); | ||
| 1443 | device->SoftUpdate(); | ||
| 1444 | lock.lock(); | ||
| 1445 | } | ||
| 1446 | |||
| 1447 | return controller.analog_stick_state; | 1445 | return controller.analog_stick_state; |
| 1448 | } | 1446 | } |
| 1449 | 1447 | ||
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp index 4b1c134d4..d9da1e600 100644 --- a/src/core/hle/kernel/k_code_memory.cpp +++ b/src/core/hle/kernel/k_code_memory.cpp | |||
| @@ -27,13 +27,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si | |||
| 27 | auto& page_table = m_owner->PageTable(); | 27 | auto& page_table = m_owner->PageTable(); |
| 28 | 28 | ||
| 29 | // Construct the page group. | 29 | // Construct the page group. |
| 30 | m_page_group = {}; | 30 | m_page_group.emplace(kernel, page_table.GetBlockInfoManager()); |
| 31 | 31 | ||
| 32 | // Lock the memory. | 32 | // Lock the memory. |
| 33 | R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size)) | 33 | R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size)) |
| 34 | 34 | ||
| 35 | // Clear the memory. | 35 | // Clear the memory. |
| 36 | for (const auto& block : m_page_group.Nodes()) { | 36 | for (const auto& block : *m_page_group) { |
| 37 | std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize()); | 37 | std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize()); |
| 38 | } | 38 | } |
| 39 | 39 | ||
| @@ -51,12 +51,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si | |||
| 51 | void KCodeMemory::Finalize() { | 51 | void KCodeMemory::Finalize() { |
| 52 | // Unlock. | 52 | // Unlock. |
| 53 | if (!m_is_mapped && !m_is_owner_mapped) { | 53 | if (!m_is_mapped && !m_is_owner_mapped) { |
| 54 | const size_t size = m_page_group.GetNumPages() * PageSize; | 54 | const size_t size = m_page_group->GetNumPages() * PageSize; |
| 55 | m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group); | 55 | m_owner->PageTable().UnlockForCodeMemory(m_address, size, *m_page_group); |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | // Close the page group. | 58 | // Close the page group. |
| 59 | m_page_group = {}; | 59 | m_page_group->Close(); |
| 60 | m_page_group->Finalize(); | ||
| 60 | 61 | ||
| 61 | // Close our reference to our owner. | 62 | // Close our reference to our owner. |
| 62 | m_owner->Close(); | 63 | m_owner->Close(); |
| @@ -64,7 +65,7 @@ void KCodeMemory::Finalize() { | |||
| 64 | 65 | ||
| 65 | Result KCodeMemory::Map(VAddr address, size_t size) { | 66 | Result KCodeMemory::Map(VAddr address, size_t size) { |
| 66 | // Validate the size. | 67 | // Validate the size. |
| 67 | R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | 68 | R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); |
| 68 | 69 | ||
| 69 | // Lock ourselves. | 70 | // Lock ourselves. |
| 70 | KScopedLightLock lk(m_lock); | 71 | KScopedLightLock lk(m_lock); |
| @@ -74,7 +75,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) { | |||
| 74 | 75 | ||
| 75 | // Map the memory. | 76 | // Map the memory. |
| 76 | R_TRY(kernel.CurrentProcess()->PageTable().MapPages( | 77 | R_TRY(kernel.CurrentProcess()->PageTable().MapPages( |
| 77 | address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); | 78 | address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); |
| 78 | 79 | ||
| 79 | // Mark ourselves as mapped. | 80 | // Mark ourselves as mapped. |
| 80 | m_is_mapped = true; | 81 | m_is_mapped = true; |
| @@ -84,13 +85,13 @@ Result KCodeMemory::Map(VAddr address, size_t size) { | |||
| 84 | 85 | ||
| 85 | Result KCodeMemory::Unmap(VAddr address, size_t size) { | 86 | Result KCodeMemory::Unmap(VAddr address, size_t size) { |
| 86 | // Validate the size. | 87 | // Validate the size. |
| 87 | R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | 88 | R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); |
| 88 | 89 | ||
| 89 | // Lock ourselves. | 90 | // Lock ourselves. |
| 90 | KScopedLightLock lk(m_lock); | 91 | KScopedLightLock lk(m_lock); |
| 91 | 92 | ||
| 92 | // Unmap the memory. | 93 | // Unmap the memory. |
| 93 | R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group, | 94 | R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group, |
| 94 | KMemoryState::CodeOut)); | 95 | KMemoryState::CodeOut)); |
| 95 | 96 | ||
| 96 | // Mark ourselves as unmapped. | 97 | // Mark ourselves as unmapped. |
| @@ -101,7 +102,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) { | |||
| 101 | 102 | ||
| 102 | Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { | 103 | Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { |
| 103 | // Validate the size. | 104 | // Validate the size. |
| 104 | R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | 105 | R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); |
| 105 | 106 | ||
| 106 | // Lock ourselves. | 107 | // Lock ourselves. |
| 107 | KScopedLightLock lk(m_lock); | 108 | KScopedLightLock lk(m_lock); |
| @@ -125,7 +126,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission | |||
| 125 | 126 | ||
| 126 | // Map the memory. | 127 | // Map the memory. |
| 127 | R_TRY( | 128 | R_TRY( |
| 128 | m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm)); | 129 | m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm)); |
| 129 | 130 | ||
| 130 | // Mark ourselves as mapped. | 131 | // Mark ourselves as mapped. |
| 131 | m_is_owner_mapped = true; | 132 | m_is_owner_mapped = true; |
| @@ -135,13 +136,13 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission | |||
| 135 | 136 | ||
| 136 | Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { | 137 | Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { |
| 137 | // Validate the size. | 138 | // Validate the size. |
| 138 | R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | 139 | R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); |
| 139 | 140 | ||
| 140 | // Lock ourselves. | 141 | // Lock ourselves. |
| 141 | KScopedLightLock lk(m_lock); | 142 | KScopedLightLock lk(m_lock); |
| 142 | 143 | ||
| 143 | // Unmap the memory. | 144 | // Unmap the memory. |
| 144 | R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode)); | 145 | R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode)); |
| 145 | 146 | ||
| 146 | // Mark ourselves as unmapped. | 147 | // Mark ourselves as unmapped. |
| 147 | m_is_owner_mapped = false; | 148 | m_is_owner_mapped = false; |
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h index 2e7e1436a..5b260b385 100644 --- a/src/core/hle/kernel/k_code_memory.h +++ b/src/core/hle/kernel/k_code_memory.h | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <optional> | ||
| 7 | |||
| 6 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 7 | #include "core/device_memory.h" | 9 | #include "core/device_memory.h" |
| 8 | #include "core/hle/kernel/k_auto_object.h" | 10 | #include "core/hle/kernel/k_auto_object.h" |
| @@ -49,11 +51,11 @@ public: | |||
| 49 | return m_address; | 51 | return m_address; |
| 50 | } | 52 | } |
| 51 | size_t GetSize() const { | 53 | size_t GetSize() const { |
| 52 | return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0; | 54 | return m_is_initialized ? m_page_group->GetNumPages() * PageSize : 0; |
| 53 | } | 55 | } |
| 54 | 56 | ||
| 55 | private: | 57 | private: |
| 56 | KPageGroup m_page_group{}; | 58 | std::optional<KPageGroup> m_page_group{}; |
| 57 | KProcess* m_owner{}; | 59 | KProcess* m_owner{}; |
| 58 | VAddr m_address{}; | 60 | VAddr m_address{}; |
| 59 | KLightLock m_lock; | 61 | KLightLock m_lock; |
diff --git a/src/core/hle/kernel/k_hardware_timer.cpp b/src/core/hle/kernel/k_hardware_timer.cpp index 6bba79ea0..4dcd53821 100644 --- a/src/core/hle/kernel/k_hardware_timer.cpp +++ b/src/core/hle/kernel/k_hardware_timer.cpp | |||
| @@ -18,7 +18,8 @@ void KHardwareTimer::Initialize() { | |||
| 18 | } | 18 | } |
| 19 | 19 | ||
| 20 | void KHardwareTimer::Finalize() { | 20 | void KHardwareTimer::Finalize() { |
| 21 | this->DisableInterrupt(); | 21 | m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type, reinterpret_cast<uintptr_t>(this)); |
| 22 | m_wakeup_time = std::numeric_limits<s64>::max(); | ||
| 22 | m_event_type.reset(); | 23 | m_event_type.reset(); |
| 23 | } | 24 | } |
| 24 | 25 | ||
| @@ -59,7 +60,8 @@ void KHardwareTimer::EnableInterrupt(s64 wakeup_time) { | |||
| 59 | } | 60 | } |
| 60 | 61 | ||
| 61 | void KHardwareTimer::DisableInterrupt() { | 62 | void KHardwareTimer::DisableInterrupt() { |
| 62 | m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type, reinterpret_cast<uintptr_t>(this)); | 63 | m_kernel.System().CoreTiming().UnscheduleEventWithoutWait(m_event_type, |
| 64 | reinterpret_cast<uintptr_t>(this)); | ||
| 63 | m_wakeup_time = std::numeric_limits<s64>::max(); | 65 | m_wakeup_time = std::numeric_limits<s64>::max(); |
| 64 | } | 66 | } |
| 65 | 67 | ||
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index bd33571da..cd6ea388e 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp | |||
| @@ -223,7 +223,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, | |||
| 223 | 223 | ||
| 224 | // Ensure that we don't leave anything un-freed. | 224 | // Ensure that we don't leave anything un-freed. |
| 225 | ON_RESULT_FAILURE { | 225 | ON_RESULT_FAILURE { |
| 226 | for (const auto& it : out->Nodes()) { | 226 | for (const auto& it : *out) { |
| 227 | auto& manager = this->GetManager(it.GetAddress()); | 227 | auto& manager = this->GetManager(it.GetAddress()); |
| 228 | const size_t node_num_pages = std::min<u64>( | 228 | const size_t node_num_pages = std::min<u64>( |
| 229 | it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); | 229 | it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); |
| @@ -285,7 +285,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | |||
| 285 | m_has_optimized_process[static_cast<size_t>(pool)], true)); | 285 | m_has_optimized_process[static_cast<size_t>(pool)], true)); |
| 286 | 286 | ||
| 287 | // Open the first reference to the pages. | 287 | // Open the first reference to the pages. |
| 288 | for (const auto& block : out->Nodes()) { | 288 | for (const auto& block : *out) { |
| 289 | PAddr cur_address = block.GetAddress(); | 289 | PAddr cur_address = block.GetAddress(); |
| 290 | size_t remaining_pages = block.GetNumPages(); | 290 | size_t remaining_pages = block.GetNumPages(); |
| 291 | while (remaining_pages > 0) { | 291 | while (remaining_pages > 0) { |
| @@ -335,7 +335,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 | |||
| 335 | // Perform optimized memory tracking, if we should. | 335 | // Perform optimized memory tracking, if we should. |
| 336 | if (optimized) { | 336 | if (optimized) { |
| 337 | // Iterate over the allocated blocks. | 337 | // Iterate over the allocated blocks. |
| 338 | for (const auto& block : out->Nodes()) { | 338 | for (const auto& block : *out) { |
| 339 | // Get the block extents. | 339 | // Get the block extents. |
| 340 | const PAddr block_address = block.GetAddress(); | 340 | const PAddr block_address = block.GetAddress(); |
| 341 | const size_t block_pages = block.GetNumPages(); | 341 | const size_t block_pages = block.GetNumPages(); |
| @@ -391,7 +391,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 | |||
| 391 | } | 391 | } |
| 392 | } else { | 392 | } else { |
| 393 | // Set all the allocated memory. | 393 | // Set all the allocated memory. |
| 394 | for (const auto& block : out->Nodes()) { | 394 | for (const auto& block : *out) { |
| 395 | std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, | 395 | std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, |
| 396 | block.GetSize()); | 396 | block.GetSize()); |
| 397 | } | 397 | } |
diff --git a/src/core/hle/kernel/k_page_group.cpp b/src/core/hle/kernel/k_page_group.cpp new file mode 100644 index 000000000..d8c644a33 --- /dev/null +++ b/src/core/hle/kernel/k_page_group.cpp | |||
| @@ -0,0 +1,121 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 5 | #include "core/hle/kernel/k_memory_manager.h" | ||
| 6 | #include "core/hle/kernel/k_page_group.h" | ||
| 7 | #include "core/hle/kernel/kernel.h" | ||
| 8 | #include "core/hle/kernel/svc_results.h" | ||
| 9 | |||
| 10 | namespace Kernel { | ||
| 11 | |||
| 12 | void KPageGroup::Finalize() { | ||
| 13 | KBlockInfo* cur = m_first_block; | ||
| 14 | while (cur != nullptr) { | ||
| 15 | KBlockInfo* next = cur->GetNext(); | ||
| 16 | m_manager->Free(cur); | ||
| 17 | cur = next; | ||
| 18 | } | ||
| 19 | |||
| 20 | m_first_block = nullptr; | ||
| 21 | m_last_block = nullptr; | ||
| 22 | } | ||
| 23 | |||
| 24 | void KPageGroup::CloseAndReset() { | ||
| 25 | auto& mm = m_kernel.MemoryManager(); | ||
| 26 | |||
| 27 | KBlockInfo* cur = m_first_block; | ||
| 28 | while (cur != nullptr) { | ||
| 29 | KBlockInfo* next = cur->GetNext(); | ||
| 30 | mm.Close(cur->GetAddress(), cur->GetNumPages()); | ||
| 31 | m_manager->Free(cur); | ||
| 32 | cur = next; | ||
| 33 | } | ||
| 34 | |||
| 35 | m_first_block = nullptr; | ||
| 36 | m_last_block = nullptr; | ||
| 37 | } | ||
| 38 | |||
| 39 | size_t KPageGroup::GetNumPages() const { | ||
| 40 | size_t num_pages = 0; | ||
| 41 | |||
| 42 | for (const auto& it : *this) { | ||
| 43 | num_pages += it.GetNumPages(); | ||
| 44 | } | ||
| 45 | |||
| 46 | return num_pages; | ||
| 47 | } | ||
| 48 | |||
| 49 | Result KPageGroup::AddBlock(KPhysicalAddress addr, size_t num_pages) { | ||
| 50 | // Succeed immediately if we're adding no pages. | ||
| 51 | R_SUCCEED_IF(num_pages == 0); | ||
| 52 | |||
| 53 | // Check for overflow. | ||
| 54 | ASSERT(addr < addr + num_pages * PageSize); | ||
| 55 | |||
| 56 | // Try to just append to the last block. | ||
| 57 | if (m_last_block != nullptr) { | ||
| 58 | R_SUCCEED_IF(m_last_block->TryConcatenate(addr, num_pages)); | ||
| 59 | } | ||
| 60 | |||
| 61 | // Allocate a new block. | ||
| 62 | KBlockInfo* new_block = m_manager->Allocate(); | ||
| 63 | R_UNLESS(new_block != nullptr, ResultOutOfResource); | ||
| 64 | |||
| 65 | // Initialize the block. | ||
| 66 | new_block->Initialize(addr, num_pages); | ||
| 67 | |||
| 68 | // Add the block to our list. | ||
| 69 | if (m_last_block != nullptr) { | ||
| 70 | m_last_block->SetNext(new_block); | ||
| 71 | } else { | ||
| 72 | m_first_block = new_block; | ||
| 73 | } | ||
| 74 | m_last_block = new_block; | ||
| 75 | |||
| 76 | R_SUCCEED(); | ||
| 77 | } | ||
| 78 | |||
| 79 | void KPageGroup::Open() const { | ||
| 80 | auto& mm = m_kernel.MemoryManager(); | ||
| 81 | |||
| 82 | for (const auto& it : *this) { | ||
| 83 | mm.Open(it.GetAddress(), it.GetNumPages()); | ||
| 84 | } | ||
| 85 | } | ||
| 86 | |||
| 87 | void KPageGroup::OpenFirst() const { | ||
| 88 | auto& mm = m_kernel.MemoryManager(); | ||
| 89 | |||
| 90 | for (const auto& it : *this) { | ||
| 91 | mm.OpenFirst(it.GetAddress(), it.GetNumPages()); | ||
| 92 | } | ||
| 93 | } | ||
| 94 | |||
| 95 | void KPageGroup::Close() const { | ||
| 96 | auto& mm = m_kernel.MemoryManager(); | ||
| 97 | |||
| 98 | for (const auto& it : *this) { | ||
| 99 | mm.Close(it.GetAddress(), it.GetNumPages()); | ||
| 100 | } | ||
| 101 | } | ||
| 102 | |||
| 103 | bool KPageGroup::IsEquivalentTo(const KPageGroup& rhs) const { | ||
| 104 | auto lit = this->begin(); | ||
| 105 | auto rit = rhs.begin(); | ||
| 106 | auto lend = this->end(); | ||
| 107 | auto rend = rhs.end(); | ||
| 108 | |||
| 109 | while (lit != lend && rit != rend) { | ||
| 110 | if (*lit != *rit) { | ||
| 111 | return false; | ||
| 112 | } | ||
| 113 | |||
| 114 | ++lit; | ||
| 115 | ++rit; | ||
| 116 | } | ||
| 117 | |||
| 118 | return lit == lend && rit == rend; | ||
| 119 | } | ||
| 120 | |||
| 121 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h index 316f172f2..c07f17663 100644 --- a/src/core/hle/kernel/k_page_group.h +++ b/src/core/hle/kernel/k_page_group.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| @@ -13,24 +13,23 @@ | |||
| 13 | 13 | ||
| 14 | namespace Kernel { | 14 | namespace Kernel { |
| 15 | 15 | ||
| 16 | class KBlockInfoManager; | ||
| 17 | class KernelCore; | ||
| 16 | class KPageGroup; | 18 | class KPageGroup; |
| 17 | 19 | ||
| 18 | class KBlockInfo { | 20 | class KBlockInfo { |
| 19 | private: | ||
| 20 | friend class KPageGroup; | ||
| 21 | |||
| 22 | public: | 21 | public: |
| 23 | constexpr KBlockInfo() = default; | 22 | constexpr explicit KBlockInfo() : m_next(nullptr) {} |
| 24 | 23 | ||
| 25 | constexpr void Initialize(PAddr addr, size_t np) { | 24 | constexpr void Initialize(KPhysicalAddress addr, size_t np) { |
| 26 | ASSERT(Common::IsAligned(addr, PageSize)); | 25 | ASSERT(Common::IsAligned(addr, PageSize)); |
| 27 | ASSERT(static_cast<u32>(np) == np); | 26 | ASSERT(static_cast<u32>(np) == np); |
| 28 | 27 | ||
| 29 | m_page_index = static_cast<u32>(addr) / PageSize; | 28 | m_page_index = static_cast<u32>(addr / PageSize); |
| 30 | m_num_pages = static_cast<u32>(np); | 29 | m_num_pages = static_cast<u32>(np); |
| 31 | } | 30 | } |
| 32 | 31 | ||
| 33 | constexpr PAddr GetAddress() const { | 32 | constexpr KPhysicalAddress GetAddress() const { |
| 34 | return m_page_index * PageSize; | 33 | return m_page_index * PageSize; |
| 35 | } | 34 | } |
| 36 | constexpr size_t GetNumPages() const { | 35 | constexpr size_t GetNumPages() const { |
| @@ -39,10 +38,10 @@ public: | |||
| 39 | constexpr size_t GetSize() const { | 38 | constexpr size_t GetSize() const { |
| 40 | return this->GetNumPages() * PageSize; | 39 | return this->GetNumPages() * PageSize; |
| 41 | } | 40 | } |
| 42 | constexpr PAddr GetEndAddress() const { | 41 | constexpr KPhysicalAddress GetEndAddress() const { |
| 43 | return (m_page_index + m_num_pages) * PageSize; | 42 | return (m_page_index + m_num_pages) * PageSize; |
| 44 | } | 43 | } |
| 45 | constexpr PAddr GetLastAddress() const { | 44 | constexpr KPhysicalAddress GetLastAddress() const { |
| 46 | return this->GetEndAddress() - 1; | 45 | return this->GetEndAddress() - 1; |
| 47 | } | 46 | } |
| 48 | 47 | ||
| @@ -62,8 +61,8 @@ public: | |||
| 62 | return !(*this == rhs); | 61 | return !(*this == rhs); |
| 63 | } | 62 | } |
| 64 | 63 | ||
| 65 | constexpr bool IsStrictlyBefore(PAddr addr) const { | 64 | constexpr bool IsStrictlyBefore(KPhysicalAddress addr) const { |
| 66 | const PAddr end = this->GetEndAddress(); | 65 | const KPhysicalAddress end = this->GetEndAddress(); |
| 67 | 66 | ||
| 68 | if (m_page_index != 0 && end == 0) { | 67 | if (m_page_index != 0 && end == 0) { |
| 69 | return false; | 68 | return false; |
| @@ -72,11 +71,11 @@ public: | |||
| 72 | return end < addr; | 71 | return end < addr; |
| 73 | } | 72 | } |
| 74 | 73 | ||
| 75 | constexpr bool operator<(PAddr addr) const { | 74 | constexpr bool operator<(KPhysicalAddress addr) const { |
| 76 | return this->IsStrictlyBefore(addr); | 75 | return this->IsStrictlyBefore(addr); |
| 77 | } | 76 | } |
| 78 | 77 | ||
| 79 | constexpr bool TryConcatenate(PAddr addr, size_t np) { | 78 | constexpr bool TryConcatenate(KPhysicalAddress addr, size_t np) { |
| 80 | if (addr != 0 && addr == this->GetEndAddress()) { | 79 | if (addr != 0 && addr == this->GetEndAddress()) { |
| 81 | m_num_pages += static_cast<u32>(np); | 80 | m_num_pages += static_cast<u32>(np); |
| 82 | return true; | 81 | return true; |
| @@ -90,96 +89,118 @@ private: | |||
| 90 | } | 89 | } |
| 91 | 90 | ||
| 92 | private: | 91 | private: |
| 92 | friend class KPageGroup; | ||
| 93 | |||
| 93 | KBlockInfo* m_next{}; | 94 | KBlockInfo* m_next{}; |
| 94 | u32 m_page_index{}; | 95 | u32 m_page_index{}; |
| 95 | u32 m_num_pages{}; | 96 | u32 m_num_pages{}; |
| 96 | }; | 97 | }; |
| 97 | static_assert(sizeof(KBlockInfo) <= 0x10); | 98 | static_assert(sizeof(KBlockInfo) <= 0x10); |
| 98 | 99 | ||
| 99 | class KPageGroup final { | 100 | class KPageGroup { |
| 100 | public: | 101 | public: |
| 101 | class Node final { | 102 | class Iterator { |
| 102 | public: | 103 | public: |
| 103 | constexpr Node(u64 addr_, std::size_t num_pages_) : addr{addr_}, num_pages{num_pages_} {} | 104 | using iterator_category = std::forward_iterator_tag; |
| 105 | using value_type = const KBlockInfo; | ||
| 106 | using difference_type = std::ptrdiff_t; | ||
| 107 | using pointer = value_type*; | ||
| 108 | using reference = value_type&; | ||
| 109 | |||
| 110 | constexpr explicit Iterator(pointer n) : m_node(n) {} | ||
| 111 | |||
| 112 | constexpr bool operator==(const Iterator& rhs) const { | ||
| 113 | return m_node == rhs.m_node; | ||
| 114 | } | ||
| 115 | constexpr bool operator!=(const Iterator& rhs) const { | ||
| 116 | return !(*this == rhs); | ||
| 117 | } | ||
| 104 | 118 | ||
| 105 | constexpr u64 GetAddress() const { | 119 | constexpr pointer operator->() const { |
| 106 | return addr; | 120 | return m_node; |
| 121 | } | ||
| 122 | constexpr reference operator*() const { | ||
| 123 | return *m_node; | ||
| 107 | } | 124 | } |
| 108 | 125 | ||
| 109 | constexpr std::size_t GetNumPages() const { | 126 | constexpr Iterator& operator++() { |
| 110 | return num_pages; | 127 | m_node = m_node->GetNext(); |
| 128 | return *this; | ||
| 111 | } | 129 | } |
| 112 | 130 | ||
| 113 | constexpr std::size_t GetSize() const { | 131 | constexpr Iterator operator++(int) { |
| 114 | return GetNumPages() * PageSize; | 132 | const Iterator it{*this}; |
| 133 | ++(*this); | ||
| 134 | return it; | ||
| 115 | } | 135 | } |
| 116 | 136 | ||
| 117 | private: | 137 | private: |
| 118 | u64 addr{}; | 138 | pointer m_node{}; |
| 119 | std::size_t num_pages{}; | ||
| 120 | }; | 139 | }; |
| 121 | 140 | ||
| 122 | public: | 141 | explicit KPageGroup(KernelCore& kernel, KBlockInfoManager* m) |
| 123 | KPageGroup() = default; | 142 | : m_kernel{kernel}, m_manager{m} {} |
| 124 | KPageGroup(u64 address, u64 num_pages) { | 143 | ~KPageGroup() { |
| 125 | ASSERT(AddBlock(address, num_pages).IsSuccess()); | 144 | this->Finalize(); |
| 126 | } | 145 | } |
| 127 | 146 | ||
| 128 | constexpr std::list<Node>& Nodes() { | 147 | void CloseAndReset(); |
| 129 | return nodes; | 148 | void Finalize(); |
| 130 | } | ||
| 131 | 149 | ||
| 132 | constexpr const std::list<Node>& Nodes() const { | 150 | Iterator begin() const { |
| 133 | return nodes; | 151 | return Iterator{m_first_block}; |
| 152 | } | ||
| 153 | Iterator end() const { | ||
| 154 | return Iterator{nullptr}; | ||
| 155 | } | ||
| 156 | bool empty() const { | ||
| 157 | return m_first_block == nullptr; | ||
| 134 | } | 158 | } |
| 135 | 159 | ||
| 136 | std::size_t GetNumPages() const { | 160 | Result AddBlock(KPhysicalAddress addr, size_t num_pages); |
| 137 | std::size_t num_pages = 0; | 161 | void Open() const; |
| 138 | for (const Node& node : nodes) { | 162 | void OpenFirst() const; |
| 139 | num_pages += node.GetNumPages(); | 163 | void Close() const; |
| 140 | } | 164 | |
| 141 | return num_pages; | 165 | size_t GetNumPages() const; |
| 142 | } | 166 | |
| 143 | 167 | bool IsEquivalentTo(const KPageGroup& rhs) const; | |
| 144 | bool IsEqual(KPageGroup& other) const { | 168 | |
| 145 | auto this_node = nodes.begin(); | 169 | bool operator==(const KPageGroup& rhs) const { |
| 146 | auto other_node = other.nodes.begin(); | 170 | return this->IsEquivalentTo(rhs); |
| 147 | while (this_node != nodes.end() && other_node != other.nodes.end()) { | 171 | } |
| 148 | if (this_node->GetAddress() != other_node->GetAddress() || | ||
| 149 | this_node->GetNumPages() != other_node->GetNumPages()) { | ||
| 150 | return false; | ||
| 151 | } | ||
| 152 | this_node = std::next(this_node); | ||
| 153 | other_node = std::next(other_node); | ||
| 154 | } | ||
| 155 | 172 | ||
| 156 | return this_node == nodes.end() && other_node == other.nodes.end(); | 173 | bool operator!=(const KPageGroup& rhs) const { |
| 174 | return !(*this == rhs); | ||
| 157 | } | 175 | } |
| 158 | 176 | ||
| 159 | Result AddBlock(u64 address, u64 num_pages) { | 177 | private: |
| 160 | if (!num_pages) { | 178 | KernelCore& m_kernel; |
| 161 | return ResultSuccess; | 179 | KBlockInfo* m_first_block{}; |
| 180 | KBlockInfo* m_last_block{}; | ||
| 181 | KBlockInfoManager* m_manager{}; | ||
| 182 | }; | ||
| 183 | |||
| 184 | class KScopedPageGroup { | ||
| 185 | public: | ||
| 186 | explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) { | ||
| 187 | if (m_pg) { | ||
| 188 | m_pg->Open(); | ||
| 162 | } | 189 | } |
| 163 | if (!nodes.empty()) { | 190 | } |
| 164 | const auto node = nodes.back(); | 191 | explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {} |
| 165 | if (node.GetAddress() + node.GetNumPages() * PageSize == address) { | 192 | ~KScopedPageGroup() { |
| 166 | address = node.GetAddress(); | 193 | if (m_pg) { |
| 167 | num_pages += node.GetNumPages(); | 194 | m_pg->Close(); |
| 168 | nodes.pop_back(); | ||
| 169 | } | ||
| 170 | } | 195 | } |
| 171 | nodes.push_back({address, num_pages}); | ||
| 172 | return ResultSuccess; | ||
| 173 | } | 196 | } |
| 174 | 197 | ||
| 175 | bool Empty() const { | 198 | void CancelClose() { |
| 176 | return nodes.empty(); | 199 | m_pg = nullptr; |
| 177 | } | 200 | } |
| 178 | 201 | ||
| 179 | void Finalize() {} | ||
| 180 | |||
| 181 | private: | 202 | private: |
| 182 | std::list<Node> nodes; | 203 | const KPageGroup* m_pg{}; |
| 183 | }; | 204 | }; |
| 184 | 205 | ||
| 185 | } // namespace Kernel | 206 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 612fc76fa..9c7ac22dc 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -100,7 +100,7 @@ constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType a | |||
| 100 | 100 | ||
| 101 | KPageTable::KPageTable(Core::System& system_) | 101 | KPageTable::KPageTable(Core::System& system_) |
| 102 | : m_general_lock{system_.Kernel()}, | 102 | : m_general_lock{system_.Kernel()}, |
| 103 | m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} | 103 | m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {} |
| 104 | 104 | ||
| 105 | KPageTable::~KPageTable() = default; | 105 | KPageTable::~KPageTable() = default; |
| 106 | 106 | ||
| @@ -373,7 +373,7 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta | |||
| 373 | m_memory_block_slab_manager); | 373 | m_memory_block_slab_manager); |
| 374 | 374 | ||
| 375 | // Allocate and open. | 375 | // Allocate and open. |
| 376 | KPageGroup pg; | 376 | KPageGroup pg{m_kernel, m_block_info_manager}; |
| 377 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( | 377 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( |
| 378 | &pg, num_pages, | 378 | &pg, num_pages, |
| 379 | KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); | 379 | KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); |
| @@ -432,7 +432,7 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si | |||
| 432 | const size_t num_pages = size / PageSize; | 432 | const size_t num_pages = size / PageSize; |
| 433 | 433 | ||
| 434 | // Create page groups for the memory being mapped. | 434 | // Create page groups for the memory being mapped. |
| 435 | KPageGroup pg; | 435 | KPageGroup pg{m_kernel, m_block_info_manager}; |
| 436 | AddRegionToPages(src_address, num_pages, pg); | 436 | AddRegionToPages(src_address, num_pages, pg); |
| 437 | 437 | ||
| 438 | // Reprotect the source as kernel-read/not mapped. | 438 | // Reprotect the source as kernel-read/not mapped. |
| @@ -593,7 +593,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { | |||
| 593 | const size_t size = num_pages * PageSize; | 593 | const size_t size = num_pages * PageSize; |
| 594 | 594 | ||
| 595 | // We're making a new group, not adding to an existing one. | 595 | // We're making a new group, not adding to an existing one. |
| 596 | R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory); | 596 | R_UNLESS(pg.empty(), ResultInvalidCurrentMemory); |
| 597 | 597 | ||
| 598 | // Begin traversal. | 598 | // Begin traversal. |
| 599 | Common::PageTable::TraversalContext context; | 599 | Common::PageTable::TraversalContext context; |
| @@ -640,11 +640,10 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { | |||
| 640 | R_SUCCEED(); | 640 | R_SUCCEED(); |
| 641 | } | 641 | } |
| 642 | 642 | ||
| 643 | bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { | 643 | bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) { |
| 644 | ASSERT(this->IsLockedByCurrentThread()); | 644 | ASSERT(this->IsLockedByCurrentThread()); |
| 645 | 645 | ||
| 646 | const size_t size = num_pages * PageSize; | 646 | const size_t size = num_pages * PageSize; |
| 647 | const auto& pg = pg_ll.Nodes(); | ||
| 648 | const auto& memory_layout = m_system.Kernel().MemoryLayout(); | 647 | const auto& memory_layout = m_system.Kernel().MemoryLayout(); |
| 649 | 648 | ||
| 650 | // Empty groups are necessarily invalid. | 649 | // Empty groups are necessarily invalid. |
| @@ -942,9 +941,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
| 942 | 941 | ||
| 943 | ON_RESULT_FAILURE { | 942 | ON_RESULT_FAILURE { |
| 944 | if (cur_mapped_addr != dst_addr) { | 943 | if (cur_mapped_addr != dst_addr) { |
| 945 | // HACK: Manually close the pages. | ||
| 946 | HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize); | ||
| 947 | |||
| 948 | ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, | 944 | ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, |
| 949 | KMemoryPermission::None, OperationType::Unmap) | 945 | KMemoryPermission::None, OperationType::Unmap) |
| 950 | .IsSuccess()); | 946 | .IsSuccess()); |
| @@ -1020,9 +1016,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
| 1020 | // Map the page. | 1016 | // Map the page. |
| 1021 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); | 1017 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); |
| 1022 | 1018 | ||
| 1023 | // HACK: Manually open the pages. | ||
| 1024 | HACK_OpenPages(start_partial_page, 1); | ||
| 1025 | |||
| 1026 | // Update tracking extents. | 1019 | // Update tracking extents. |
| 1027 | cur_mapped_addr += PageSize; | 1020 | cur_mapped_addr += PageSize; |
| 1028 | cur_block_addr += PageSize; | 1021 | cur_block_addr += PageSize; |
| @@ -1051,9 +1044,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
| 1051 | R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, | 1044 | R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, |
| 1052 | cur_block_addr)); | 1045 | cur_block_addr)); |
| 1053 | 1046 | ||
| 1054 | // HACK: Manually open the pages. | ||
| 1055 | HACK_OpenPages(cur_block_addr, cur_block_size / PageSize); | ||
| 1056 | |||
| 1057 | // Update tracking extents. | 1047 | // Update tracking extents. |
| 1058 | cur_mapped_addr += cur_block_size; | 1048 | cur_mapped_addr += cur_block_size; |
| 1059 | cur_block_addr = next_entry.phys_addr; | 1049 | cur_block_addr = next_entry.phys_addr; |
| @@ -1073,9 +1063,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
| 1073 | R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, | 1063 | R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, |
| 1074 | cur_block_addr)); | 1064 | cur_block_addr)); |
| 1075 | 1065 | ||
| 1076 | // HACK: Manually open the pages. | ||
| 1077 | HACK_OpenPages(cur_block_addr, last_block_size / PageSize); | ||
| 1078 | |||
| 1079 | // Update tracking extents. | 1066 | // Update tracking extents. |
| 1080 | cur_mapped_addr += last_block_size; | 1067 | cur_mapped_addr += last_block_size; |
| 1081 | cur_block_addr += last_block_size; | 1068 | cur_block_addr += last_block_size; |
| @@ -1107,9 +1094,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
| 1107 | 1094 | ||
| 1108 | // Map the page. | 1095 | // Map the page. |
| 1109 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); | 1096 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); |
| 1110 | |||
| 1111 | // HACK: Manually open the pages. | ||
| 1112 | HACK_OpenPages(end_partial_page, 1); | ||
| 1113 | } | 1097 | } |
| 1114 | 1098 | ||
| 1115 | // Update memory blocks to reflect our changes | 1099 | // Update memory blocks to reflect our changes |
| @@ -1211,9 +1195,6 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState | |||
| 1211 | const size_t aligned_size = aligned_end - aligned_start; | 1195 | const size_t aligned_size = aligned_end - aligned_start; |
| 1212 | const size_t aligned_num_pages = aligned_size / PageSize; | 1196 | const size_t aligned_num_pages = aligned_size / PageSize; |
| 1213 | 1197 | ||
| 1214 | // HACK: Manually close the pages. | ||
| 1215 | HACK_ClosePages(aligned_start, aligned_num_pages); | ||
| 1216 | |||
| 1217 | // Unmap the pages. | 1198 | // Unmap the pages. |
| 1218 | R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); | 1199 | R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); |
| 1219 | 1200 | ||
| @@ -1501,17 +1482,6 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi | |||
| 1501 | } | 1482 | } |
| 1502 | } | 1483 | } |
| 1503 | 1484 | ||
| 1504 | void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) { | ||
| 1505 | m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages); | ||
| 1506 | } | ||
| 1507 | |||
| 1508 | void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) { | ||
| 1509 | for (size_t index = 0; index < num_pages; ++index) { | ||
| 1510 | const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize)); | ||
| 1511 | m_system.Kernel().MemoryManager().Close(paddr, 1); | ||
| 1512 | } | ||
| 1513 | } | ||
| 1514 | |||
| 1515 | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | 1485 | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { |
| 1516 | // Lock the physical memory lock. | 1486 | // Lock the physical memory lock. |
| 1517 | KScopedLightLock phys_lk(m_map_physical_memory_lock); | 1487 | KScopedLightLock phys_lk(m_map_physical_memory_lock); |
| @@ -1572,7 +1542,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 1572 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 1542 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 1573 | 1543 | ||
| 1574 | // Allocate pages for the new memory. | 1544 | // Allocate pages for the new memory. |
| 1575 | KPageGroup pg; | 1545 | KPageGroup pg{m_kernel, m_block_info_manager}; |
| 1576 | R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( | 1546 | R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( |
| 1577 | &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); | 1547 | &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); |
| 1578 | 1548 | ||
| @@ -1650,7 +1620,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 1650 | KScopedPageTableUpdater updater(this); | 1620 | KScopedPageTableUpdater updater(this); |
| 1651 | 1621 | ||
| 1652 | // Prepare to iterate over the memory. | 1622 | // Prepare to iterate over the memory. |
| 1653 | auto pg_it = pg.Nodes().begin(); | 1623 | auto pg_it = pg.begin(); |
| 1654 | PAddr pg_phys_addr = pg_it->GetAddress(); | 1624 | PAddr pg_phys_addr = pg_it->GetAddress(); |
| 1655 | size_t pg_pages = pg_it->GetNumPages(); | 1625 | size_t pg_pages = pg_it->GetNumPages(); |
| 1656 | 1626 | ||
| @@ -1680,9 +1650,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 1680 | last_unmap_address + 1 - cur_address) / | 1650 | last_unmap_address + 1 - cur_address) / |
| 1681 | PageSize; | 1651 | PageSize; |
| 1682 | 1652 | ||
| 1683 | // HACK: Manually close the pages. | ||
| 1684 | HACK_ClosePages(cur_address, cur_pages); | ||
| 1685 | |||
| 1686 | // Unmap. | 1653 | // Unmap. |
| 1687 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, | 1654 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, |
| 1688 | OperationType::Unmap) | 1655 | OperationType::Unmap) |
| @@ -1703,7 +1670,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 1703 | // Release any remaining unmapped memory. | 1670 | // Release any remaining unmapped memory. |
| 1704 | m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); | 1671 | m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); |
| 1705 | m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); | 1672 | m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); |
| 1706 | for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) { | 1673 | for (++pg_it; pg_it != pg.end(); ++pg_it) { |
| 1707 | m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), | 1674 | m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), |
| 1708 | pg_it->GetNumPages()); | 1675 | pg_it->GetNumPages()); |
| 1709 | m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), | 1676 | m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), |
| @@ -1731,7 +1698,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 1731 | // Check if we're at the end of the physical block. | 1698 | // Check if we're at the end of the physical block. |
| 1732 | if (pg_pages == 0) { | 1699 | if (pg_pages == 0) { |
| 1733 | // Ensure there are more pages to map. | 1700 | // Ensure there are more pages to map. |
| 1734 | ASSERT(pg_it != pg.Nodes().end()); | 1701 | ASSERT(pg_it != pg.end()); |
| 1735 | 1702 | ||
| 1736 | // Advance our physical block. | 1703 | // Advance our physical block. |
| 1737 | ++pg_it; | 1704 | ++pg_it; |
| @@ -1742,10 +1709,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 1742 | // Map whatever we can. | 1709 | // Map whatever we can. |
| 1743 | const size_t cur_pages = std::min(pg_pages, map_pages); | 1710 | const size_t cur_pages = std::min(pg_pages, map_pages); |
| 1744 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, | 1711 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, |
| 1745 | OperationType::Map, pg_phys_addr)); | 1712 | OperationType::MapFirst, pg_phys_addr)); |
| 1746 | |||
| 1747 | // HACK: Manually open the pages. | ||
| 1748 | HACK_OpenPages(pg_phys_addr, cur_pages); | ||
| 1749 | 1713 | ||
| 1750 | // Advance. | 1714 | // Advance. |
| 1751 | cur_address += cur_pages * PageSize; | 1715 | cur_address += cur_pages * PageSize; |
| @@ -1888,9 +1852,6 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1888 | last_address + 1 - cur_address) / | 1852 | last_address + 1 - cur_address) / |
| 1889 | PageSize; | 1853 | PageSize; |
| 1890 | 1854 | ||
| 1891 | // HACK: Manually close the pages. | ||
| 1892 | HACK_ClosePages(cur_address, cur_pages); | ||
| 1893 | |||
| 1894 | // Unmap. | 1855 | // Unmap. |
| 1895 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) | 1856 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) |
| 1896 | .IsSuccess()); | 1857 | .IsSuccess()); |
| @@ -1955,7 +1916,7 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) | |||
| 1955 | R_TRY(dst_allocator_result); | 1916 | R_TRY(dst_allocator_result); |
| 1956 | 1917 | ||
| 1957 | // Map the memory. | 1918 | // Map the memory. |
| 1958 | KPageGroup page_linked_list; | 1919 | KPageGroup page_linked_list{m_kernel, m_block_info_manager}; |
| 1959 | const size_t num_pages{size / PageSize}; | 1920 | const size_t num_pages{size / PageSize}; |
| 1960 | const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( | 1921 | const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( |
| 1961 | KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); | 1922 | KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); |
| @@ -2022,14 +1983,14 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size | |||
| 2022 | num_dst_allocator_blocks); | 1983 | num_dst_allocator_blocks); |
| 2023 | R_TRY(dst_allocator_result); | 1984 | R_TRY(dst_allocator_result); |
| 2024 | 1985 | ||
| 2025 | KPageGroup src_pages; | 1986 | KPageGroup src_pages{m_kernel, m_block_info_manager}; |
| 2026 | KPageGroup dst_pages; | 1987 | KPageGroup dst_pages{m_kernel, m_block_info_manager}; |
| 2027 | const size_t num_pages{size / PageSize}; | 1988 | const size_t num_pages{size / PageSize}; |
| 2028 | 1989 | ||
| 2029 | AddRegionToPages(src_address, num_pages, src_pages); | 1990 | AddRegionToPages(src_address, num_pages, src_pages); |
| 2030 | AddRegionToPages(dst_address, num_pages, dst_pages); | 1991 | AddRegionToPages(dst_address, num_pages, dst_pages); |
| 2031 | 1992 | ||
| 2032 | R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion); | 1993 | R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion); |
| 2033 | 1994 | ||
| 2034 | { | 1995 | { |
| 2035 | auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); | 1996 | auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); |
| @@ -2060,7 +2021,7 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, | |||
| 2060 | 2021 | ||
| 2061 | VAddr cur_addr{addr}; | 2022 | VAddr cur_addr{addr}; |
| 2062 | 2023 | ||
| 2063 | for (const auto& node : page_linked_list.Nodes()) { | 2024 | for (const auto& node : page_linked_list) { |
| 2064 | if (const auto result{ | 2025 | if (const auto result{ |
| 2065 | Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; | 2026 | Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; |
| 2066 | result.IsError()) { | 2027 | result.IsError()) { |
| @@ -2160,7 +2121,7 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { | |||
| 2160 | 2121 | ||
| 2161 | VAddr cur_addr{addr}; | 2122 | VAddr cur_addr{addr}; |
| 2162 | 2123 | ||
| 2163 | for (const auto& node : page_linked_list.Nodes()) { | 2124 | for (const auto& node : page_linked_list) { |
| 2164 | if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, | 2125 | if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, |
| 2165 | OperationType::Unmap)}; | 2126 | OperationType::Unmap)}; |
| 2166 | result.IsError()) { | 2127 | result.IsError()) { |
| @@ -2527,13 +2488,13 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { | |||
| 2527 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 2488 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 2528 | 2489 | ||
| 2529 | // Allocate pages for the heap extension. | 2490 | // Allocate pages for the heap extension. |
| 2530 | KPageGroup pg; | 2491 | KPageGroup pg{m_kernel, m_block_info_manager}; |
| 2531 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( | 2492 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( |
| 2532 | &pg, allocation_size / PageSize, | 2493 | &pg, allocation_size / PageSize, |
| 2533 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); | 2494 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); |
| 2534 | 2495 | ||
| 2535 | // Clear all the newly allocated pages. | 2496 | // Clear all the newly allocated pages. |
| 2536 | for (const auto& it : pg.Nodes()) { | 2497 | for (const auto& it : pg) { |
| 2537 | std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, | 2498 | std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, |
| 2538 | it.GetSize()); | 2499 | it.GetSize()); |
| 2539 | } | 2500 | } |
| @@ -2610,11 +2571,23 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_ | |||
| 2610 | if (is_map_only) { | 2571 | if (is_map_only) { |
| 2611 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); | 2572 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); |
| 2612 | } else { | 2573 | } else { |
| 2613 | KPageGroup page_group; | 2574 | // Create a page group tohold the pages we allocate. |
| 2614 | R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( | 2575 | KPageGroup pg{m_kernel, m_block_info_manager}; |
| 2615 | &page_group, needed_num_pages, | 2576 | |
| 2616 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); | 2577 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( |
| 2617 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); | 2578 | &pg, needed_num_pages, |
| 2579 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); | ||
| 2580 | |||
| 2581 | // Ensure that the page group is closed when we're done working with it. | ||
| 2582 | SCOPE_EXIT({ pg.Close(); }); | ||
| 2583 | |||
| 2584 | // Clear all pages. | ||
| 2585 | for (const auto& it : pg) { | ||
| 2586 | std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), | ||
| 2587 | m_heap_fill_value, it.GetSize()); | ||
| 2588 | } | ||
| 2589 | |||
| 2590 | R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup)); | ||
| 2618 | } | 2591 | } |
| 2619 | 2592 | ||
| 2620 | // Update the blocks. | 2593 | // Update the blocks. |
| @@ -2795,19 +2768,28 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_ | |||
| 2795 | ASSERT(num_pages > 0); | 2768 | ASSERT(num_pages > 0); |
| 2796 | ASSERT(num_pages == page_group.GetNumPages()); | 2769 | ASSERT(num_pages == page_group.GetNumPages()); |
| 2797 | 2770 | ||
| 2798 | for (const auto& node : page_group.Nodes()) { | 2771 | switch (operation) { |
| 2799 | const size_t size{node.GetNumPages() * PageSize}; | 2772 | case OperationType::MapGroup: { |
| 2773 | // We want to maintain a new reference to every page in the group. | ||
| 2774 | KScopedPageGroup spg(page_group); | ||
| 2775 | |||
| 2776 | for (const auto& node : page_group) { | ||
| 2777 | const size_t size{node.GetNumPages() * PageSize}; | ||
| 2800 | 2778 | ||
| 2801 | switch (operation) { | 2779 | // Map the pages. |
| 2802 | case OperationType::MapGroup: | ||
| 2803 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); | 2780 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); |
| 2804 | break; | 2781 | |
| 2805 | default: | 2782 | addr += size; |
| 2806 | ASSERT(false); | ||
| 2807 | break; | ||
| 2808 | } | 2783 | } |
| 2809 | 2784 | ||
| 2810 | addr += size; | 2785 | // We succeeded! We want to persist the reference to the pages. |
| 2786 | spg.CancelClose(); | ||
| 2787 | |||
| 2788 | break; | ||
| 2789 | } | ||
| 2790 | default: | ||
| 2791 | ASSERT(false); | ||
| 2792 | break; | ||
| 2811 | } | 2793 | } |
| 2812 | 2794 | ||
| 2813 | R_SUCCEED(); | 2795 | R_SUCCEED(); |
| @@ -2822,13 +2804,29 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, | |||
| 2822 | ASSERT(ContainsPages(addr, num_pages)); | 2804 | ASSERT(ContainsPages(addr, num_pages)); |
| 2823 | 2805 | ||
| 2824 | switch (operation) { | 2806 | switch (operation) { |
| 2825 | case OperationType::Unmap: | 2807 | case OperationType::Unmap: { |
| 2808 | // Ensure that any pages we track close on exit. | ||
| 2809 | KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()}; | ||
| 2810 | SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); | ||
| 2811 | |||
| 2812 | this->AddRegionToPages(addr, num_pages, pages_to_close); | ||
| 2826 | m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); | 2813 | m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); |
| 2827 | break; | 2814 | break; |
| 2815 | } | ||
| 2816 | case OperationType::MapFirst: | ||
| 2828 | case OperationType::Map: { | 2817 | case OperationType::Map: { |
| 2829 | ASSERT(map_addr); | 2818 | ASSERT(map_addr); |
| 2830 | ASSERT(Common::IsAligned(map_addr, PageSize)); | 2819 | ASSERT(Common::IsAligned(map_addr, PageSize)); |
| 2831 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); | 2820 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); |
| 2821 | |||
| 2822 | // Open references to pages, if we should. | ||
| 2823 | if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) { | ||
| 2824 | if (operation == OperationType::MapFirst) { | ||
| 2825 | m_kernel.MemoryManager().OpenFirst(map_addr, num_pages); | ||
| 2826 | } else { | ||
| 2827 | m_kernel.MemoryManager().Open(map_addr, num_pages); | ||
| 2828 | } | ||
| 2829 | } | ||
| 2832 | break; | 2830 | break; |
| 2833 | } | 2831 | } |
| 2834 | case OperationType::Separate: { | 2832 | case OperationType::Separate: { |
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index f1ca785d7..0a454b05b 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h | |||
| @@ -107,6 +107,10 @@ public: | |||
| 107 | return *m_page_table_impl; | 107 | return *m_page_table_impl; |
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | KBlockInfoManager* GetBlockInfoManager() { | ||
| 111 | return m_block_info_manager; | ||
| 112 | } | ||
| 113 | |||
| 110 | bool CanContain(VAddr addr, size_t size, KMemoryState state) const; | 114 | bool CanContain(VAddr addr, size_t size, KMemoryState state) const; |
| 111 | 115 | ||
| 112 | protected: | 116 | protected: |
| @@ -261,10 +265,6 @@ private: | |||
| 261 | void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, | 265 | void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, |
| 262 | size_t size, KMemoryPermission prot_perm); | 266 | size_t size, KMemoryPermission prot_perm); |
| 263 | 267 | ||
| 264 | // HACK: These will be removed once we automatically manage page reference counts. | ||
| 265 | void HACK_OpenPages(PAddr phys_addr, size_t num_pages); | ||
| 266 | void HACK_ClosePages(VAddr virt_addr, size_t num_pages); | ||
| 267 | |||
| 268 | mutable KLightLock m_general_lock; | 268 | mutable KLightLock m_general_lock; |
| 269 | mutable KLightLock m_map_physical_memory_lock; | 269 | mutable KLightLock m_map_physical_memory_lock; |
| 270 | 270 | ||
| @@ -488,6 +488,7 @@ private: | |||
| 488 | std::unique_ptr<Common::PageTable> m_page_table_impl; | 488 | std::unique_ptr<Common::PageTable> m_page_table_impl; |
| 489 | 489 | ||
| 490 | Core::System& m_system; | 490 | Core::System& m_system; |
| 491 | KernelCore& m_kernel; | ||
| 491 | }; | 492 | }; |
| 492 | 493 | ||
| 493 | } // namespace Kernel | 494 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index 0aa68103c..3cf2b5d91 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp | |||
| @@ -13,10 +13,7 @@ | |||
| 13 | namespace Kernel { | 13 | namespace Kernel { |
| 14 | 14 | ||
| 15 | KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} | 15 | KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} |
| 16 | 16 | KSharedMemory::~KSharedMemory() = default; | |
| 17 | KSharedMemory::~KSharedMemory() { | ||
| 18 | kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size); | ||
| 19 | } | ||
| 20 | 17 | ||
| 21 | Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, | 18 | Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, |
| 22 | Svc::MemoryPermission owner_permission_, | 19 | Svc::MemoryPermission owner_permission_, |
| @@ -49,7 +46,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | |||
| 49 | R_UNLESS(physical_address != 0, ResultOutOfMemory); | 46 | R_UNLESS(physical_address != 0, ResultOutOfMemory); |
| 50 | 47 | ||
| 51 | //! Insert the result into our page group. | 48 | //! Insert the result into our page group. |
| 52 | page_group.emplace(physical_address, num_pages); | 49 | page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager()); |
| 50 | page_group->AddBlock(physical_address, num_pages); | ||
| 53 | 51 | ||
| 54 | // Commit our reservation. | 52 | // Commit our reservation. |
| 55 | memory_reservation.Commit(); | 53 | memory_reservation.Commit(); |
| @@ -62,7 +60,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | |||
| 62 | is_initialized = true; | 60 | is_initialized = true; |
| 63 | 61 | ||
| 64 | // Clear all pages in the memory. | 62 | // Clear all pages in the memory. |
| 65 | for (const auto& block : page_group->Nodes()) { | 63 | for (const auto& block : *page_group) { |
| 66 | std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize()); | 64 | std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize()); |
| 67 | } | 65 | } |
| 68 | 66 | ||
| @@ -71,13 +69,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | |||
| 71 | 69 | ||
| 72 | void KSharedMemory::Finalize() { | 70 | void KSharedMemory::Finalize() { |
| 73 | // Close and finalize the page group. | 71 | // Close and finalize the page group. |
| 74 | // page_group->Close(); | 72 | page_group->Close(); |
| 75 | // page_group->Finalize(); | 73 | page_group->Finalize(); |
| 76 | |||
| 77 | //! HACK: Manually close. | ||
| 78 | for (const auto& block : page_group->Nodes()) { | ||
| 79 | kernel.MemoryManager().Close(block.GetAddress(), block.GetNumPages()); | ||
| 80 | } | ||
| 81 | 74 | ||
| 82 | // Release the memory reservation. | 75 | // Release the memory reservation. |
| 83 | resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); | 76 | resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); |
diff --git a/src/core/hle/kernel/memory_types.h b/src/core/hle/kernel/memory_types.h index 3975507bd..92b8b37ac 100644 --- a/src/core/hle/kernel/memory_types.h +++ b/src/core/hle/kernel/memory_types.h | |||
| @@ -14,4 +14,7 @@ constexpr std::size_t PageSize{1 << PageBits}; | |||
| 14 | 14 | ||
| 15 | using Page = std::array<u8, PageSize>; | 15 | using Page = std::array<u8, PageSize>; |
| 16 | 16 | ||
| 17 | using KPhysicalAddress = PAddr; | ||
| 18 | using KProcessAddress = VAddr; | ||
| 19 | |||
| 17 | } // namespace Kernel | 20 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 788ee2160..aca442196 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -1485,7 +1485,7 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p | |||
| 1485 | ResultInvalidMemoryRegion); | 1485 | ResultInvalidMemoryRegion); |
| 1486 | 1486 | ||
| 1487 | // Create a new page group. | 1487 | // Create a new page group. |
| 1488 | KPageGroup pg; | 1488 | KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()}; |
| 1489 | R_TRY(src_pt.MakeAndOpenPageGroup( | 1489 | R_TRY(src_pt.MakeAndOpenPageGroup( |
| 1490 | std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess, | 1490 | std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess, |
| 1491 | KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None, | 1491 | KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None, |
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index d1cbadde4..f4416f5b2 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp | |||
| @@ -312,8 +312,6 @@ void NVFlinger::Compose() { | |||
| 312 | } | 312 | } |
| 313 | 313 | ||
| 314 | s64 NVFlinger::GetNextTicks() const { | 314 | s64 NVFlinger::GetNextTicks() const { |
| 315 | static constexpr s64 max_hertz = 120LL; | ||
| 316 | |||
| 317 | const auto& settings = Settings::values; | 315 | const auto& settings = Settings::values; |
| 318 | auto speed_scale = 1.f; | 316 | auto speed_scale = 1.f; |
| 319 | if (settings.use_multi_core.GetValue()) { | 317 | if (settings.use_multi_core.GetValue()) { |
| @@ -327,9 +325,11 @@ s64 NVFlinger::GetNextTicks() const { | |||
| 327 | } | 325 | } |
| 328 | } | 326 | } |
| 329 | 327 | ||
| 330 | const auto next_ticks = ((1000000000 * (1LL << swap_interval)) / max_hertz); | 328 | // As an extension, treat nonpositive swap interval as framerate multiplier. |
| 329 | const f32 effective_fps = swap_interval <= 0 ? 120.f * static_cast<f32>(1 - swap_interval) | ||
| 330 | : 60.f / static_cast<f32>(swap_interval); | ||
| 331 | 331 | ||
| 332 | return static_cast<s64>(speed_scale * static_cast<float>(next_ticks)); | 332 | return static_cast<s64>(speed_scale * (1000000000.f / effective_fps)); |
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | } // namespace Service::NVFlinger | 335 | } // namespace Service::NVFlinger |
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h index 9b22397db..3828cf272 100644 --- a/src/core/hle/service/nvflinger/nvflinger.h +++ b/src/core/hle/service/nvflinger/nvflinger.h | |||
| @@ -133,7 +133,7 @@ private: | |||
| 133 | /// layers. | 133 | /// layers. |
| 134 | u32 next_buffer_queue_id = 1; | 134 | u32 next_buffer_queue_id = 1; |
| 135 | 135 | ||
| 136 | u32 swap_interval = 1; | 136 | s32 swap_interval = 1; |
| 137 | 137 | ||
| 138 | /// Event that handles screen composition. | 138 | /// Event that handles screen composition. |
| 139 | std::shared_ptr<Core::Timing::EventType> multi_composition_event; | 139 | std::shared_ptr<Core::Timing::EventType> multi_composition_event; |
diff --git a/src/core/internal_network/network.cpp b/src/core/internal_network/network.cpp index 447fbffaa..282ea1ff9 100644 --- a/src/core/internal_network/network.cpp +++ b/src/core/internal_network/network.cpp | |||
| @@ -117,6 +117,8 @@ Errno TranslateNativeError(int e) { | |||
| 117 | return Errno::NETUNREACH; | 117 | return Errno::NETUNREACH; |
| 118 | case WSAEMSGSIZE: | 118 | case WSAEMSGSIZE: |
| 119 | return Errno::MSGSIZE; | 119 | return Errno::MSGSIZE; |
| 120 | case WSAETIMEDOUT: | ||
| 121 | return Errno::TIMEDOUT; | ||
| 120 | default: | 122 | default: |
| 121 | UNIMPLEMENTED_MSG("Unimplemented errno={}", e); | 123 | UNIMPLEMENTED_MSG("Unimplemented errno={}", e); |
| 122 | return Errno::OTHER; | 124 | return Errno::OTHER; |
| @@ -211,6 +213,8 @@ Errno TranslateNativeError(int e) { | |||
| 211 | return Errno::NETUNREACH; | 213 | return Errno::NETUNREACH; |
| 212 | case EMSGSIZE: | 214 | case EMSGSIZE: |
| 213 | return Errno::MSGSIZE; | 215 | return Errno::MSGSIZE; |
| 216 | case ETIMEDOUT: | ||
| 217 | return Errno::TIMEDOUT; | ||
| 214 | default: | 218 | default: |
| 215 | UNIMPLEMENTED_MSG("Unimplemented errno={}", e); | 219 | UNIMPLEMENTED_MSG("Unimplemented errno={}", e); |
| 216 | return Errno::OTHER; | 220 | return Errno::OTHER; |
| @@ -226,7 +230,7 @@ Errno GetAndLogLastError() { | |||
| 226 | int e = errno; | 230 | int e = errno; |
| 227 | #endif | 231 | #endif |
| 228 | const Errno err = TranslateNativeError(e); | 232 | const Errno err = TranslateNativeError(e); |
| 229 | if (err == Errno::AGAIN) { | 233 | if (err == Errno::AGAIN || err == Errno::TIMEDOUT) { |
| 230 | return err; | 234 | return err; |
| 231 | } | 235 | } |
| 232 | LOG_ERROR(Network, "Socket operation error: {}", Common::NativeErrorToString(e)); | 236 | LOG_ERROR(Network, "Socket operation error: {}", Common::NativeErrorToString(e)); |
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 26be74df4..4e605fae4 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -383,6 +383,10 @@ struct Memory::Impl { | |||
| 383 | return; | 383 | return; |
| 384 | } | 384 | } |
| 385 | 385 | ||
| 386 | if (Settings::IsFastmemEnabled()) { | ||
| 387 | system.DeviceMemory().buffer.Protect(vaddr, size, !debug, !debug); | ||
| 388 | } | ||
| 389 | |||
| 386 | // Iterate over a contiguous CPU address space, marking/unmarking the region. | 390 | // Iterate over a contiguous CPU address space, marking/unmarking the region. |
| 387 | // The region is at a granularity of CPU pages. | 391 | // The region is at a granularity of CPU pages. |
| 388 | 392 | ||
| @@ -436,7 +440,7 @@ struct Memory::Impl { | |||
| 436 | } | 440 | } |
| 437 | 441 | ||
| 438 | if (Settings::IsFastmemEnabled()) { | 442 | if (Settings::IsFastmemEnabled()) { |
| 439 | const bool is_read_enable = Settings::IsGPULevelHigh() || !cached; | 443 | const bool is_read_enable = !Settings::IsGPULevelExtreme() || !cached; |
| 440 | system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached); | 444 | system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached); |
| 441 | } | 445 | } |
| 442 | 446 | ||