diff options
Diffstat (limited to 'src')
22 files changed, 463 insertions, 238 deletions
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp index 82964f0a1..2076aa8a2 100644 --- a/src/core/debugger/gdbstub.cpp +++ b/src/core/debugger/gdbstub.cpp | |||
| @@ -822,11 +822,13 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { | |||
| 822 | const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-'; | 822 | const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-'; |
| 823 | const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-'; | 823 | const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-'; |
| 824 | const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-'; | 824 | const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-'; |
| 825 | const char p = | ||
| 826 | True(mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-'; | ||
| 825 | 827 | ||
| 826 | reply += | 828 | reply += fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n", |
| 827 | fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{} [{}, {}]\n", | 829 | mem_info.base_address, |
| 828 | mem_info.base_address, mem_info.base_address + mem_info.size - 1, | 830 | mem_info.base_address + mem_info.size - 1, perm, state, l, i, |
| 829 | perm, state, l, i, d, u, mem_info.ipc_count, mem_info.device_count); | 831 | d, u, p, mem_info.ipc_count, mem_info.device_count); |
| 830 | } | 832 | } |
| 831 | 833 | ||
| 832 | const uintptr_t next_address = mem_info.base_address + mem_info.size; | 834 | const uintptr_t next_address = mem_info.base_address + mem_info.size; |
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 1f2db673c..a0e20bbbb 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp | |||
| @@ -106,7 +106,7 @@ static_assert(KernelPageBufferAdditionalSize == | |||
| 106 | /// memory. | 106 | /// memory. |
| 107 | static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, | 107 | static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, |
| 108 | KVirtualAddress slab_addr) { | 108 | KVirtualAddress slab_addr) { |
| 109 | slab_addr -= GetInteger(memory_layout.GetSlabRegionAddress()); | 109 | slab_addr -= memory_layout.GetSlabRegion().GetAddress(); |
| 110 | return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase; | 110 | return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase; |
| 111 | } | 111 | } |
| 112 | 112 | ||
| @@ -196,7 +196,12 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { | |||
| 196 | auto& kernel = system.Kernel(); | 196 | auto& kernel = system.Kernel(); |
| 197 | 197 | ||
| 198 | // Get the start of the slab region, since that's where we'll be working. | 198 | // Get the start of the slab region, since that's where we'll be working. |
| 199 | KVirtualAddress address = memory_layout.GetSlabRegionAddress(); | 199 | const KMemoryRegion& slab_region = memory_layout.GetSlabRegion(); |
| 200 | KVirtualAddress address = slab_region.GetAddress(); | ||
| 201 | |||
| 202 | // Clear the slab region. | ||
| 203 | // TODO: implement access to kernel VAs. | ||
| 204 | // std::memset(device_ptr, 0, slab_region.GetSize()); | ||
| 200 | 205 | ||
| 201 | // Initialize slab type array to be in sorted order. | 206 | // Initialize slab type array to be in sorted order. |
| 202 | std::array<KSlabType, KSlabType_Count> slab_types; | 207 | std::array<KSlabType, KSlabType_Count> slab_types; |
diff --git a/src/core/hle/kernel/initial_process.h b/src/core/hle/kernel/initial_process.h index 82195f4f7..2c95269fc 100644 --- a/src/core/hle/kernel/initial_process.h +++ b/src/core/hle/kernel/initial_process.h | |||
| @@ -19,4 +19,8 @@ static inline KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() { | |||
| 19 | MainMemoryAddress); | 19 | MainMemoryAddress); |
| 20 | } | 20 | } |
| 21 | 21 | ||
| 22 | static inline size_t GetInitialProcessBinarySize() { | ||
| 23 | return InitialProcessBinarySizeMax; | ||
| 24 | } | ||
| 25 | |||
| 22 | } // namespace Kernel | 26 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index 41a29da24..ef3f61321 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h | |||
| @@ -36,6 +36,7 @@ enum class KMemoryState : u32 { | |||
| 36 | FlagCanChangeAttribute = (1 << 24), | 36 | FlagCanChangeAttribute = (1 << 24), |
| 37 | FlagCanCodeMemory = (1 << 25), | 37 | FlagCanCodeMemory = (1 << 25), |
| 38 | FlagLinearMapped = (1 << 26), | 38 | FlagLinearMapped = (1 << 26), |
| 39 | FlagCanPermissionLock = (1 << 27), | ||
| 39 | 40 | ||
| 40 | FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | | 41 | FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | |
| 41 | FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | | 42 | FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | |
| @@ -50,12 +51,16 @@ enum class KMemoryState : u32 { | |||
| 50 | FlagLinearMapped, | 51 | FlagLinearMapped, |
| 51 | 52 | ||
| 52 | Free = static_cast<u32>(Svc::MemoryState::Free), | 53 | Free = static_cast<u32>(Svc::MemoryState::Free), |
| 53 | Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap | | 54 | |
| 54 | FlagCanAlignedDeviceMap, | 55 | IoMemory = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap | |
| 56 | FlagCanAlignedDeviceMap, | ||
| 57 | IoRegister = | ||
| 58 | static_cast<u32>(Svc::MemoryState::Io) | FlagCanDeviceMap | FlagCanAlignedDeviceMap, | ||
| 59 | |||
| 55 | Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, | 60 | Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, |
| 56 | Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, | 61 | Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, |
| 57 | CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | | 62 | CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | |
| 58 | FlagCanCodeMemory, | 63 | FlagCanCodeMemory | FlagCanPermissionLock, |
| 59 | Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, | 64 | Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, |
| 60 | Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted | | 65 | Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted | |
| 61 | FlagLinearMapped, | 66 | FlagLinearMapped, |
| @@ -65,7 +70,8 @@ enum class KMemoryState : u32 { | |||
| 65 | AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | | 70 | AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | |
| 66 | FlagCanCodeAlias, | 71 | FlagCanCodeAlias, |
| 67 | AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData | | 72 | AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData | |
| 68 | FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory, | 73 | FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory | |
| 74 | FlagCanPermissionLock, | ||
| 69 | 75 | ||
| 70 | Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap | | 76 | Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap | |
| 71 | FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 77 | FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |
| @@ -73,7 +79,7 @@ enum class KMemoryState : u32 { | |||
| 73 | Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | | 79 | Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | |
| 74 | FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 80 | FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |
| 75 | 81 | ||
| 76 | ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped, | 82 | ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagLinearMapped, |
| 77 | 83 | ||
| 78 | Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc | | 84 | Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc | |
| 79 | FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | | 85 | FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | |
| @@ -94,7 +100,7 @@ enum class KMemoryState : u32 { | |||
| 94 | NonDeviceIpc = | 100 | NonDeviceIpc = |
| 95 | static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc, | 101 | static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc, |
| 96 | 102 | ||
| 97 | Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped, | 103 | Kernel = static_cast<u32>(Svc::MemoryState::Kernel), |
| 98 | 104 | ||
| 99 | GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | | 105 | GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | |
| 100 | FlagReferenceCounted | FlagCanDebug | FlagLinearMapped, | 106 | FlagReferenceCounted | FlagCanDebug | FlagLinearMapped, |
| @@ -105,34 +111,36 @@ enum class KMemoryState : u32 { | |||
| 105 | 111 | ||
| 106 | Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted | | 112 | Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted | |
| 107 | FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap | | 113 | FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap | |
| 108 | FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 114 | FlagCanAlignedDeviceMap | FlagCanQueryPhysical | FlagCanUseNonSecureIpc | |
| 115 | FlagCanUseNonDeviceIpc, | ||
| 109 | }; | 116 | }; |
| 110 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryState); | 117 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryState); |
| 111 | 118 | ||
| 112 | static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000); | 119 | static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000); |
| 113 | static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001); | 120 | static_assert(static_cast<u32>(KMemoryState::IoMemory) == 0x00182001); |
| 121 | static_assert(static_cast<u32>(KMemoryState::IoRegister) == 0x00180001); | ||
| 114 | static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002); | 122 | static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002); |
| 115 | static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03); | 123 | static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03); |
| 116 | static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04); | 124 | static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x0FFEBD04); |
| 117 | static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05); | 125 | static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05); |
| 118 | static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006); | 126 | static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006); |
| 119 | 127 | ||
| 120 | static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08); | 128 | static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08); |
| 121 | static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09); | 129 | static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x0FFFBD09); |
| 122 | static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A); | 130 | static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A); |
| 123 | static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B); | 131 | static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B); |
| 124 | static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C); | 132 | static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400000C); |
| 125 | static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D); | 133 | static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D); |
| 126 | static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E); | 134 | static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E); |
| 127 | static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F); | 135 | static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F); |
| 128 | static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010); | 136 | static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010); |
| 129 | static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811); | 137 | static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811); |
| 130 | static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812); | 138 | static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812); |
| 131 | static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013); | 139 | static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00000013); |
| 132 | static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214); | 140 | static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214); |
| 133 | static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015); | 141 | static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015); |
| 134 | static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016); | 142 | static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016); |
| 135 | static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817); | 143 | static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x055C3817); |
| 136 | 144 | ||
| 137 | enum class KMemoryPermission : u8 { | 145 | enum class KMemoryPermission : u8 { |
| 138 | None = 0, | 146 | None = 0, |
| @@ -182,8 +190,9 @@ enum class KMemoryAttribute : u8 { | |||
| 182 | IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked), | 190 | IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked), |
| 183 | DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared), | 191 | DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared), |
| 184 | Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached), | 192 | Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached), |
| 193 | PermissionLocked = static_cast<u8>(Svc::MemoryAttribute::PermissionLocked), | ||
| 185 | 194 | ||
| 186 | SetMask = Uncached, | 195 | SetMask = Uncached | PermissionLocked, |
| 187 | }; | 196 | }; |
| 188 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute); | 197 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute); |
| 189 | 198 | ||
| @@ -261,6 +270,10 @@ struct KMemoryInfo { | |||
| 261 | return m_state; | 270 | return m_state; |
| 262 | } | 271 | } |
| 263 | 272 | ||
| 273 | constexpr Svc::MemoryState GetSvcState() const { | ||
| 274 | return static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask); | ||
| 275 | } | ||
| 276 | |||
| 264 | constexpr KMemoryPermission GetPermission() const { | 277 | constexpr KMemoryPermission GetPermission() const { |
| 265 | return m_permission; | 278 | return m_permission; |
| 266 | } | 279 | } |
| @@ -326,6 +339,10 @@ public: | |||
| 326 | return this->GetEndAddress() - 1; | 339 | return this->GetEndAddress() - 1; |
| 327 | } | 340 | } |
| 328 | 341 | ||
| 342 | constexpr KMemoryState GetState() const { | ||
| 343 | return m_memory_state; | ||
| 344 | } | ||
| 345 | |||
| 329 | constexpr u16 GetIpcLockCount() const { | 346 | constexpr u16 GetIpcLockCount() const { |
| 330 | return m_ipc_lock_count; | 347 | return m_ipc_lock_count; |
| 331 | } | 348 | } |
| @@ -443,6 +460,13 @@ public: | |||
| 443 | } | 460 | } |
| 444 | } | 461 | } |
| 445 | 462 | ||
| 463 | constexpr void UpdateAttribute(KMemoryAttribute mask, KMemoryAttribute attr) { | ||
| 464 | ASSERT(False(mask & KMemoryAttribute::IpcLocked)); | ||
| 465 | ASSERT(False(mask & KMemoryAttribute::DeviceShared)); | ||
| 466 | |||
| 467 | m_attribute = (m_attribute & ~mask) | attr; | ||
| 468 | } | ||
| 469 | |||
| 446 | constexpr void Split(KMemoryBlock* block, KProcessAddress addr) { | 470 | constexpr void Split(KMemoryBlock* block, KProcessAddress addr) { |
| 447 | ASSERT(this->GetAddress() < addr); | 471 | ASSERT(this->GetAddress() < addr); |
| 448 | ASSERT(this->Contains(addr)); | 472 | ASSERT(this->Contains(addr)); |
diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp index ab75f550e..58a1e7216 100644 --- a/src/core/hle/kernel/k_memory_block_manager.cpp +++ b/src/core/hle/kernel/k_memory_block_manager.cpp | |||
| @@ -160,8 +160,8 @@ void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, | |||
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | // Update block state. | 162 | // Update block state. |
| 163 | it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr), | 163 | it->Update(state, perm, attr, it->GetAddress() == address, |
| 164 | static_cast<u8>(clear_disable_attr)); | 164 | static_cast<u8>(set_disable_attr), static_cast<u8>(clear_disable_attr)); |
| 165 | cur_address += cur_info.GetSize(); | 165 | cur_address += cur_info.GetSize(); |
| 166 | remaining_pages -= cur_info.GetNumPages(); | 166 | remaining_pages -= cur_info.GetNumPages(); |
| 167 | } | 167 | } |
| @@ -175,7 +175,9 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo | |||
| 175 | KProcessAddress address, size_t num_pages, | 175 | KProcessAddress address, size_t num_pages, |
| 176 | KMemoryState test_state, KMemoryPermission test_perm, | 176 | KMemoryState test_state, KMemoryPermission test_perm, |
| 177 | KMemoryAttribute test_attr, KMemoryState state, | 177 | KMemoryAttribute test_attr, KMemoryState state, |
| 178 | KMemoryPermission perm, KMemoryAttribute attr) { | 178 | KMemoryPermission perm, KMemoryAttribute attr, |
| 179 | KMemoryBlockDisableMergeAttribute set_disable_attr, | ||
| 180 | KMemoryBlockDisableMergeAttribute clear_disable_attr) { | ||
| 179 | // Ensure for auditing that we never end up with an invalid tree. | 181 | // Ensure for auditing that we never end up with an invalid tree. |
| 180 | KScopedMemoryBlockManagerAuditor auditor(this); | 182 | KScopedMemoryBlockManagerAuditor auditor(this); |
| 181 | ASSERT(Common::IsAligned(GetInteger(address), PageSize)); | 183 | ASSERT(Common::IsAligned(GetInteger(address), PageSize)); |
| @@ -214,7 +216,8 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo | |||
| 214 | } | 216 | } |
| 215 | 217 | ||
| 216 | // Update block state. | 218 | // Update block state. |
| 217 | it->Update(state, perm, attr, false, 0, 0); | 219 | it->Update(state, perm, attr, false, static_cast<u8>(set_disable_attr), |
| 220 | static_cast<u8>(clear_disable_attr)); | ||
| 218 | cur_address += cur_info.GetSize(); | 221 | cur_address += cur_info.GetSize(); |
| 219 | remaining_pages -= cur_info.GetNumPages(); | 222 | remaining_pages -= cur_info.GetNumPages(); |
| 220 | } else { | 223 | } else { |
| @@ -284,6 +287,65 @@ void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocat | |||
| 284 | this->CoalesceForUpdate(allocator, address, num_pages); | 287 | this->CoalesceForUpdate(allocator, address, num_pages); |
| 285 | } | 288 | } |
| 286 | 289 | ||
| 290 | void KMemoryBlockManager::UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator, | ||
| 291 | KProcessAddress address, size_t num_pages, | ||
| 292 | KMemoryAttribute mask, KMemoryAttribute attr) { | ||
| 293 | // Ensure for auditing that we never end up with an invalid tree. | ||
| 294 | KScopedMemoryBlockManagerAuditor auditor(this); | ||
| 295 | ASSERT(Common::IsAligned(GetInteger(address), PageSize)); | ||
| 296 | |||
| 297 | KProcessAddress cur_address = address; | ||
| 298 | size_t remaining_pages = num_pages; | ||
| 299 | iterator it = this->FindIterator(address); | ||
| 300 | |||
| 301 | while (remaining_pages > 0) { | ||
| 302 | const size_t remaining_size = remaining_pages * PageSize; | ||
| 303 | KMemoryInfo cur_info = it->GetMemoryInfo(); | ||
| 304 | |||
| 305 | if ((it->GetAttribute() & mask) != attr) { | ||
| 306 | // If we need to, create a new block before and insert it. | ||
| 307 | if (cur_info.GetAddress() != GetInteger(cur_address)) { | ||
| 308 | KMemoryBlock* new_block = allocator->Allocate(); | ||
| 309 | |||
| 310 | it->Split(new_block, cur_address); | ||
| 311 | it = m_memory_block_tree.insert(*new_block); | ||
| 312 | it++; | ||
| 313 | |||
| 314 | cur_info = it->GetMemoryInfo(); | ||
| 315 | cur_address = cur_info.GetAddress(); | ||
| 316 | } | ||
| 317 | |||
| 318 | // If we need to, create a new block after and insert it. | ||
| 319 | if (cur_info.GetSize() > remaining_size) { | ||
| 320 | KMemoryBlock* new_block = allocator->Allocate(); | ||
| 321 | |||
| 322 | it->Split(new_block, cur_address + remaining_size); | ||
| 323 | it = m_memory_block_tree.insert(*new_block); | ||
| 324 | |||
| 325 | cur_info = it->GetMemoryInfo(); | ||
| 326 | } | ||
| 327 | |||
| 328 | // Update block state. | ||
| 329 | it->UpdateAttribute(mask, attr); | ||
| 330 | cur_address += cur_info.GetSize(); | ||
| 331 | remaining_pages -= cur_info.GetNumPages(); | ||
| 332 | } else { | ||
| 333 | // If we already have the right attributes, just advance. | ||
| 334 | if (cur_address + remaining_size < cur_info.GetEndAddress()) { | ||
| 335 | remaining_pages = 0; | ||
| 336 | cur_address += remaining_size; | ||
| 337 | } else { | ||
| 338 | remaining_pages = | ||
| 339 | (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; | ||
| 340 | cur_address = cur_info.GetEndAddress(); | ||
| 341 | } | ||
| 342 | } | ||
| 343 | it++; | ||
| 344 | } | ||
| 345 | |||
| 346 | this->CoalesceForUpdate(allocator, address, num_pages); | ||
| 347 | } | ||
| 348 | |||
| 287 | // Debug. | 349 | // Debug. |
| 288 | bool KMemoryBlockManager::CheckState() const { | 350 | bool KMemoryBlockManager::CheckState() const { |
| 289 | // Loop over every block, ensuring that we are sorted and coalesced. | 351 | // Loop over every block, ensuring that we are sorted and coalesced. |
diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h index 96496e990..cb7b6f430 100644 --- a/src/core/hle/kernel/k_memory_block_manager.h +++ b/src/core/hle/kernel/k_memory_block_manager.h | |||
| @@ -115,7 +115,11 @@ public: | |||
| 115 | void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address, | 115 | void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address, |
| 116 | size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, | 116 | size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, |
| 117 | KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, | 117 | KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, |
| 118 | KMemoryAttribute attr); | 118 | KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr, |
| 119 | KMemoryBlockDisableMergeAttribute clear_disable_attr); | ||
| 120 | |||
| 121 | void UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address, | ||
| 122 | size_t num_pages, KMemoryAttribute mask, KMemoryAttribute attr); | ||
| 119 | 123 | ||
| 120 | iterator FindIterator(KProcessAddress address) const { | 124 | iterator FindIterator(KProcessAddress address) const { |
| 121 | return m_memory_block_tree.find(KMemoryBlock( | 125 | return m_memory_block_tree.find(KMemoryBlock( |
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index 54a71df56..c8122644f 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h | |||
| @@ -137,11 +137,9 @@ public: | |||
| 137 | return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack); | 137 | return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack); |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | KVirtualAddress GetSlabRegionAddress() const { | 140 | const KMemoryRegion& GetSlabRegion() const { |
| 141 | return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)) | 141 | return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)); |
| 142 | .GetAddress(); | ||
| 143 | } | 142 | } |
| 144 | |||
| 145 | const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const { | 143 | const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const { |
| 146 | return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type)); | 144 | return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type)); |
| 147 | } | 145 | } |
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 74d8169e0..637558e10 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp | |||
| @@ -119,7 +119,8 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage | |||
| 119 | // Free each region to its corresponding heap. | 119 | // Free each region to its corresponding heap. |
| 120 | size_t reserved_sizes[MaxManagerCount] = {}; | 120 | size_t reserved_sizes[MaxManagerCount] = {}; |
| 121 | const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress(); | 121 | const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress(); |
| 122 | const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax; | 122 | const size_t ini_size = GetInitialProcessBinarySize(); |
| 123 | const KPhysicalAddress ini_end = ini_start + ini_size; | ||
| 123 | const KPhysicalAddress ini_last = ini_end - 1; | 124 | const KPhysicalAddress ini_last = ini_end - 1; |
| 124 | for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | 125 | for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { |
| 125 | if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | 126 | if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { |
| @@ -137,13 +138,13 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage | |||
| 137 | } | 138 | } |
| 138 | 139 | ||
| 139 | // Open/reserve the ini memory. | 140 | // Open/reserve the ini memory. |
| 140 | manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize); | 141 | manager.OpenFirst(ini_start, ini_size / PageSize); |
| 141 | reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax; | 142 | reserved_sizes[it.GetAttributes()] += ini_size; |
| 142 | 143 | ||
| 143 | // Free memory after the ini to the heap. | 144 | // Free memory after the ini to the heap. |
| 144 | if (ini_last != cur_last) { | 145 | if (ini_last != cur_last) { |
| 145 | ASSERT(cur_end != 0); | 146 | ASSERT(cur_end != 0); |
| 146 | manager.Free(ini_end, cur_end - ini_end); | 147 | manager.Free(ini_end, (cur_end - ini_end) / PageSize); |
| 147 | } | 148 | } |
| 148 | } else { | 149 | } else { |
| 149 | // Ensure there's no partial overlap with the ini image. | 150 | // Ensure there's no partial overlap with the ini image. |
diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h index e5630c1ac..bcbf450f0 100644 --- a/src/core/hle/kernel/k_memory_region_type.h +++ b/src/core/hle/kernel/k_memory_region_type.h | |||
| @@ -190,9 +190,15 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == | |||
| 190 | constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory = | 190 | constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory = |
| 191 | KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute( | 191 | KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute( |
| 192 | KMemoryRegionAttr_LinearMapped); | 192 | KMemoryRegionAttr_LinearMapped); |
| 193 | constexpr inline const auto KMemoryRegionType_DramKernelSecureUnknown = | ||
| 194 | KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 1).SetAttribute( | ||
| 195 | KMemoryRegionAttr_LinearMapped); | ||
| 193 | static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == | 196 | static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == |
| 194 | (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | | 197 | (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | |
| 195 | KMemoryRegionAttr_LinearMapped)); | 198 | KMemoryRegionAttr_LinearMapped)); |
| 199 | static_assert(KMemoryRegionType_DramKernelSecureUnknown.GetValue() == | ||
| 200 | (0x28E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | | ||
| 201 | KMemoryRegionAttr_LinearMapped)); | ||
| 196 | 202 | ||
| 197 | constexpr inline auto KMemoryRegionType_DramReservedEarly = | 203 | constexpr inline auto KMemoryRegionType_DramReservedEarly = |
| 198 | KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); | 204 | KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); |
| @@ -217,16 +223,18 @@ constexpr inline auto KMemoryRegionType_DramPoolPartition = | |||
| 217 | static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == | 223 | static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == |
| 218 | (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | 224 | (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |
| 219 | 225 | ||
| 220 | constexpr inline auto KMemoryRegionType_DramPoolManagement = | 226 | // UNUSED: .Derive(4, 1); |
| 221 | KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( | 227 | // UNUSED: .Derive(4, 2); |
| 228 | constexpr inline const auto KMemoryRegionType_DramPoolManagement = | ||
| 229 | KMemoryRegionType_DramPoolPartition.Derive(4, 0).SetAttribute( | ||
| 222 | KMemoryRegionAttr_CarveoutProtected); | 230 | KMemoryRegionAttr_CarveoutProtected); |
| 223 | constexpr inline auto KMemoryRegionType_DramUserPool = | 231 | constexpr inline const auto KMemoryRegionType_DramUserPool = |
| 224 | KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); | 232 | KMemoryRegionType_DramPoolPartition.Derive(4, 3); |
| 225 | static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == | 233 | static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == |
| 226 | (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | | 234 | (0xE6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | |
| 227 | KMemoryRegionAttr_CarveoutProtected)); | 235 | KMemoryRegionAttr_CarveoutProtected)); |
| 228 | static_assert(KMemoryRegionType_DramUserPool.GetValue() == | 236 | static_assert(KMemoryRegionType_DramUserPool.GetValue() == |
| 229 | (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | 237 | (0x266 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |
| 230 | 238 | ||
| 231 | constexpr inline auto KMemoryRegionType_DramApplicationPool = | 239 | constexpr inline auto KMemoryRegionType_DramApplicationPool = |
| 232 | KMemoryRegionType_DramUserPool.Derive(4, 0); | 240 | KMemoryRegionType_DramUserPool.Derive(4, 0); |
| @@ -237,60 +245,63 @@ constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool = | |||
| 237 | constexpr inline auto KMemoryRegionType_DramSystemPool = | 245 | constexpr inline auto KMemoryRegionType_DramSystemPool = |
| 238 | KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); | 246 | KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); |
| 239 | static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == | 247 | static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == |
| 240 | (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | 248 | (0xE66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |
| 241 | static_assert(KMemoryRegionType_DramAppletPool.GetValue() == | 249 | static_assert(KMemoryRegionType_DramAppletPool.GetValue() == |
| 242 | (0xBA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | 250 | (0x1666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |
| 243 | static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() == | 251 | static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() == |
| 244 | (0xDA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | 252 | (0x1A66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |
| 245 | static_assert(KMemoryRegionType_DramSystemPool.GetValue() == | 253 | static_assert(KMemoryRegionType_DramSystemPool.GetValue() == |
| 246 | (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | | 254 | (0x2666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | |
| 247 | KMemoryRegionAttr_CarveoutProtected)); | 255 | KMemoryRegionAttr_CarveoutProtected)); |
| 248 | 256 | ||
| 249 | constexpr inline auto KMemoryRegionType_VirtualDramHeapBase = | 257 | constexpr inline auto KMemoryRegionType_VirtualDramHeapBase = |
| 250 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); | 258 | KMemoryRegionType_Dram.DeriveSparse(1, 4, 0); |
| 251 | constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap = | 259 | constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap = |
| 252 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); | 260 | KMemoryRegionType_Dram.DeriveSparse(1, 4, 1); |
| 253 | constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer = | 261 | constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer = |
| 254 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 2); | 262 | KMemoryRegionType_Dram.DeriveSparse(1, 4, 2); |
| 255 | static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); | 263 | static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); |
| 256 | static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); | 264 | static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); |
| 257 | static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); | 265 | static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); |
| 258 | 266 | ||
| 259 | // UNUSED: .DeriveSparse(2, 2, 0); | 267 | // UNUSED: .Derive(4, 2); |
| 260 | constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug = | 268 | constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug = |
| 261 | KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); | 269 | KMemoryRegionType_Dram.Advance(2).Derive(4, 0); |
| 262 | static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); | 270 | constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = |
| 263 | 271 | KMemoryRegionType_Dram.Advance(2).Derive(4, 1); | |
| 264 | constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = | 272 | constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureUnknown = |
| 265 | KMemoryRegionType_Dram.DeriveSparse(3, 1, 0); | 273 | KMemoryRegionType_Dram.Advance(2).Derive(4, 3); |
| 266 | static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62)); | 274 | static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x32)); |
| 267 | 275 | static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x52)); | |
| 268 | constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt = | 276 | static_assert(KMemoryRegionType_VirtualDramKernelSecureUnknown.GetValue() == (0x92)); |
| 269 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); | 277 | |
| 270 | constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement = | 278 | // UNUSED: .Derive(4, 3); |
| 271 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); | 279 | constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt = |
| 272 | constexpr inline auto KMemoryRegionType_VirtualDramUserPool = | 280 | KMemoryRegionType_VirtualDramHeapBase.Derive(4, 0); |
| 273 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); | 281 | constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement = |
| 274 | static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A); | 282 | KMemoryRegionType_VirtualDramHeapBase.Derive(4, 1); |
| 275 | static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A); | 283 | constexpr inline const auto KMemoryRegionType_VirtualDramUserPool = |
| 276 | static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); | 284 | KMemoryRegionType_VirtualDramHeapBase.Derive(4, 2); |
| 285 | static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x31A); | ||
| 286 | static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x51A); | ||
| 287 | static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x61A); | ||
| 277 | 288 | ||
| 278 | // NOTE: For unknown reason, the pools are derived out-of-order here. | 289 | // NOTE: For unknown reason, the pools are derived out-of-order here. |
| 279 | // It's worth eventually trying to understand why Nintendo made this choice. | 290 | // It's worth eventually trying to understand why Nintendo made this choice. |
| 280 | // UNUSED: .Derive(6, 0); | 291 | // UNUSED: .Derive(6, 0); |
| 281 | // UNUSED: .Derive(6, 1); | 292 | // UNUSED: .Derive(6, 1); |
| 282 | constexpr inline auto KMemoryRegionType_VirtualDramAppletPool = | 293 | constexpr inline const auto KMemoryRegionType_VirtualDramApplicationPool = |
| 283 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); | 294 | KMemoryRegionType_VirtualDramUserPool.Derive(4, 0); |
| 284 | constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool = | 295 | constexpr inline const auto KMemoryRegionType_VirtualDramAppletPool = |
| 285 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); | 296 | KMemoryRegionType_VirtualDramUserPool.Derive(4, 1); |
| 286 | constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool = | 297 | constexpr inline const auto KMemoryRegionType_VirtualDramSystemNonSecurePool = |
| 287 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); | 298 | KMemoryRegionType_VirtualDramUserPool.Derive(4, 2); |
| 288 | constexpr inline auto KMemoryRegionType_VirtualDramSystemPool = | 299 | constexpr inline const auto KMemoryRegionType_VirtualDramSystemPool = |
| 289 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 5); | 300 | KMemoryRegionType_VirtualDramUserPool.Derive(4, 3); |
| 290 | static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A); | 301 | static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x361A); |
| 291 | static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A); | 302 | static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x561A); |
| 292 | static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A); | 303 | static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x661A); |
| 293 | static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); | 304 | static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x961A); |
| 294 | 305 | ||
| 295 | constexpr inline auto KMemoryRegionType_ArchDeviceBase = | 306 | constexpr inline auto KMemoryRegionType_ArchDeviceBase = |
| 296 | KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); | 307 | KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); |
| @@ -354,12 +365,14 @@ constexpr inline auto KMemoryRegionType_KernelTemp = | |||
| 354 | static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); | 365 | static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); |
| 355 | 366 | ||
| 356 | constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { | 367 | constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { |
| 357 | if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) { | 368 | if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { |
| 358 | return KMemoryRegionType_VirtualDramKernelTraceBuffer; | ||
| 359 | } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { | ||
| 360 | return KMemoryRegionType_VirtualDramKernelPtHeap; | 369 | return KMemoryRegionType_VirtualDramKernelPtHeap; |
| 361 | } else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) { | 370 | } else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) { |
| 362 | return KMemoryRegionType_VirtualDramKernelSecureAppletMemory; | 371 | return KMemoryRegionType_VirtualDramKernelSecureAppletMemory; |
| 372 | } else if (KMemoryRegionType_DramKernelSecureUnknown.IsAncestorOf(type_id)) { | ||
| 373 | return KMemoryRegionType_VirtualDramKernelSecureUnknown; | ||
| 374 | } else if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) { | ||
| 375 | return KMemoryRegionType_VirtualDramKernelTraceBuffer; | ||
| 363 | } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { | 376 | } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { |
| 364 | return KMemoryRegionType_VirtualDramUnknownDebug; | 377 | return KMemoryRegionType_VirtualDramUnknownDebug; |
| 365 | } else { | 378 | } else { |
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h index b32909f05..de9d63a8d 100644 --- a/src/core/hle/kernel/k_page_group.h +++ b/src/core/hle/kernel/k_page_group.h | |||
| @@ -183,12 +183,17 @@ private: | |||
| 183 | 183 | ||
| 184 | class KScopedPageGroup { | 184 | class KScopedPageGroup { |
| 185 | public: | 185 | public: |
| 186 | explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) { | 186 | explicit KScopedPageGroup(const KPageGroup* gp, bool not_first = true) : m_pg(gp) { |
| 187 | if (m_pg) { | 187 | if (m_pg) { |
| 188 | m_pg->Open(); | 188 | if (not_first) { |
| 189 | m_pg->Open(); | ||
| 190 | } else { | ||
| 191 | m_pg->OpenFirst(); | ||
| 192 | } | ||
| 189 | } | 193 | } |
| 190 | } | 194 | } |
| 191 | explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {} | 195 | explicit KScopedPageGroup(const KPageGroup& gp, bool not_first = true) |
| 196 | : KScopedPageGroup(std::addressof(gp), not_first) {} | ||
| 192 | ~KScopedPageGroup() { | 197 | ~KScopedPageGroup() { |
| 193 | if (m_pg) { | 198 | if (m_pg) { |
| 194 | m_pg->Close(); | 199 | m_pg->Close(); |
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 0b0cef984..217ccbae3 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -505,7 +505,7 @@ Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress | |||
| 505 | R_TRY(this->CheckMemoryStateContiguous( | 505 | R_TRY(this->CheckMemoryStateContiguous( |
| 506 | std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, | 506 | std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, |
| 507 | KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, | 507 | KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, |
| 508 | KMemoryAttribute::All, KMemoryAttribute::None)); | 508 | KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None)); |
| 509 | 509 | ||
| 510 | // Determine whether any pages being unmapped are code. | 510 | // Determine whether any pages being unmapped are code. |
| 511 | bool any_code_pages = false; | 511 | bool any_code_pages = false; |
| @@ -1724,29 +1724,43 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) { | |||
| 1724 | PageSize; | 1724 | PageSize; |
| 1725 | 1725 | ||
| 1726 | // While we have pages to map, map them. | 1726 | // While we have pages to map, map them. |
| 1727 | while (map_pages > 0) { | 1727 | { |
| 1728 | // Check if we're at the end of the physical block. | 1728 | // Create a page group for the current mapping range. |
| 1729 | if (pg_pages == 0) { | 1729 | KPageGroup cur_pg(m_kernel, m_block_info_manager); |
| 1730 | // Ensure there are more pages to map. | 1730 | { |
| 1731 | ASSERT(pg_it != pg.end()); | 1731 | ON_RESULT_FAILURE_2 { |
| 1732 | 1732 | cur_pg.OpenFirst(); | |
| 1733 | // Advance our physical block. | 1733 | cur_pg.Close(); |
| 1734 | ++pg_it; | 1734 | }; |
| 1735 | pg_phys_addr = pg_it->GetAddress(); | 1735 | |
| 1736 | pg_pages = pg_it->GetNumPages(); | 1736 | size_t remain_pages = map_pages; |
| 1737 | while (remain_pages > 0) { | ||
| 1738 | // Check if we're at the end of the physical block. | ||
| 1739 | if (pg_pages == 0) { | ||
| 1740 | // Ensure there are more pages to map. | ||
| 1741 | ASSERT(pg_it != pg.end()); | ||
| 1742 | |||
| 1743 | // Advance our physical block. | ||
| 1744 | ++pg_it; | ||
| 1745 | pg_phys_addr = pg_it->GetAddress(); | ||
| 1746 | pg_pages = pg_it->GetNumPages(); | ||
| 1747 | } | ||
| 1748 | |||
| 1749 | // Add whatever we can to the current block. | ||
| 1750 | const size_t cur_pages = std::min(pg_pages, remain_pages); | ||
| 1751 | R_TRY(cur_pg.AddBlock(pg_phys_addr + | ||
| 1752 | ((pg_pages - cur_pages) * PageSize), | ||
| 1753 | cur_pages)); | ||
| 1754 | |||
| 1755 | // Advance. | ||
| 1756 | remain_pages -= cur_pages; | ||
| 1757 | pg_pages -= cur_pages; | ||
| 1758 | } | ||
| 1737 | } | 1759 | } |
| 1738 | 1760 | ||
| 1739 | // Map whatever we can. | 1761 | // Map the pages. |
| 1740 | const size_t cur_pages = std::min(pg_pages, map_pages); | 1762 | R_TRY(this->Operate(cur_address, map_pages, cur_pg, |
| 1741 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, | 1763 | OperationType::MapFirstGroup)); |
| 1742 | OperationType::MapFirst, pg_phys_addr)); | ||
| 1743 | |||
| 1744 | // Advance. | ||
| 1745 | cur_address += cur_pages * PageSize; | ||
| 1746 | map_pages -= cur_pages; | ||
| 1747 | |||
| 1748 | pg_phys_addr += cur_pages * PageSize; | ||
| 1749 | pg_pages -= cur_pages; | ||
| 1750 | } | 1764 | } |
| 1751 | } | 1765 | } |
| 1752 | 1766 | ||
| @@ -1770,7 +1784,11 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) { | |||
| 1770 | m_memory_block_manager.UpdateIfMatch( | 1784 | m_memory_block_manager.UpdateIfMatch( |
| 1771 | std::addressof(allocator), address, size / PageSize, KMemoryState::Free, | 1785 | std::addressof(allocator), address, size / PageSize, KMemoryState::Free, |
| 1772 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, | 1786 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, |
| 1773 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); | 1787 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, |
| 1788 | address == this->GetAliasRegionStart() | ||
| 1789 | ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 1790 | : KMemoryBlockDisableMergeAttribute::None, | ||
| 1791 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1774 | 1792 | ||
| 1775 | R_SUCCEED(); | 1793 | R_SUCCEED(); |
| 1776 | } | 1794 | } |
| @@ -1868,6 +1886,13 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) { | |||
| 1868 | 1886 | ||
| 1869 | // Iterate over the memory, unmapping as we go. | 1887 | // Iterate over the memory, unmapping as we go. |
| 1870 | auto it = m_memory_block_manager.FindIterator(cur_address); | 1888 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 1889 | |||
| 1890 | const auto clear_merge_attr = | ||
| 1891 | (it->GetState() == KMemoryState::Normal && | ||
| 1892 | it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address) | ||
| 1893 | ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 1894 | : KMemoryBlockDisableMergeAttribute::None; | ||
| 1895 | |||
| 1871 | while (true) { | 1896 | while (true) { |
| 1872 | // Check that the iterator is valid. | 1897 | // Check that the iterator is valid. |
| 1873 | ASSERT(it != m_memory_block_manager.end()); | 1898 | ASSERT(it != m_memory_block_manager.end()); |
| @@ -1905,7 +1930,7 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) { | |||
| 1905 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, | 1930 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, |
| 1906 | KMemoryState::Free, KMemoryPermission::None, | 1931 | KMemoryState::Free, KMemoryPermission::None, |
| 1907 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | 1932 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, |
| 1908 | KMemoryBlockDisableMergeAttribute::None); | 1933 | clear_merge_attr); |
| 1909 | 1934 | ||
| 1910 | // We succeeded. | 1935 | // We succeeded. |
| 1911 | R_SUCCEED(); | 1936 | R_SUCCEED(); |
| @@ -2379,8 +2404,7 @@ Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, | |||
| 2379 | KScopedPageTableUpdater updater(this); | 2404 | KScopedPageTableUpdater updater(this); |
| 2380 | 2405 | ||
| 2381 | // Perform mapping operation. | 2406 | // Perform mapping operation. |
| 2382 | const KPageProperties properties = {perm, state == KMemoryState::Io, false, | 2407 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; |
| 2383 | DisableMergeAttribute::DisableHead}; | ||
| 2384 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); | 2408 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); |
| 2385 | 2409 | ||
| 2386 | // Update the blocks. | 2410 | // Update the blocks. |
| @@ -2422,8 +2446,7 @@ Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMem | |||
| 2422 | KScopedPageTableUpdater updater(this); | 2446 | KScopedPageTableUpdater updater(this); |
| 2423 | 2447 | ||
| 2424 | // Perform mapping operation. | 2448 | // Perform mapping operation. |
| 2425 | const KPageProperties properties = {perm, state == KMemoryState::Io, false, | 2449 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; |
| 2426 | DisableMergeAttribute::DisableHead}; | ||
| 2427 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); | 2450 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); |
| 2428 | 2451 | ||
| 2429 | // Update the blocks. | 2452 | // Update the blocks. |
| @@ -2652,11 +2675,18 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas | |||
| 2652 | size_t num_allocator_blocks; | 2675 | size_t num_allocator_blocks; |
| 2653 | constexpr auto AttributeTestMask = | 2676 | constexpr auto AttributeTestMask = |
| 2654 | ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared); | 2677 | ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared); |
| 2655 | R_TRY(this->CheckMemoryState( | 2678 | const KMemoryState state_test_mask = |
| 2656 | std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), | 2679 | static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached)) |
| 2657 | std::addressof(num_allocator_blocks), addr, size, KMemoryState::FlagCanChangeAttribute, | 2680 | ? static_cast<u32>(KMemoryState::FlagCanChangeAttribute) |
| 2658 | KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, | 2681 | : 0) | |
| 2659 | AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); | 2682 | ((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked)) |
| 2683 | ? static_cast<u32>(KMemoryState::FlagCanPermissionLock) | ||
| 2684 | : 0)); | ||
| 2685 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), | ||
| 2686 | std::addressof(old_attr), std::addressof(num_allocator_blocks), | ||
| 2687 | addr, size, state_test_mask, state_test_mask, | ||
| 2688 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2689 | AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); | ||
| 2660 | 2690 | ||
| 2661 | // Create an update allocator. | 2691 | // Create an update allocator. |
| 2662 | Result allocator_result{ResultSuccess}; | 2692 | Result allocator_result{ResultSuccess}; |
| @@ -2664,18 +2694,17 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas | |||
| 2664 | m_memory_block_slab_manager, num_allocator_blocks); | 2694 | m_memory_block_slab_manager, num_allocator_blocks); |
| 2665 | R_TRY(allocator_result); | 2695 | R_TRY(allocator_result); |
| 2666 | 2696 | ||
| 2667 | // Determine the new attribute. | 2697 | // If we need to, perform a change attribute operation. |
| 2668 | const KMemoryAttribute new_attr = | 2698 | if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) { |
| 2669 | static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) | | 2699 | // Perform operation. |
| 2670 | static_cast<KMemoryAttribute>(attr & mask))); | 2700 | R_TRY(this->Operate(addr, num_pages, old_perm, |
| 2671 | 2701 | OperationType::ChangePermissionsAndRefreshAndFlush, 0)); | |
| 2672 | // Perform operation. | 2702 | } |
| 2673 | this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); | ||
| 2674 | 2703 | ||
| 2675 | // Update the blocks. | 2704 | // Update the blocks. |
| 2676 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, | 2705 | m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, |
| 2677 | new_attr, KMemoryBlockDisableMergeAttribute::None, | 2706 | static_cast<KMemoryAttribute>(mask), |
| 2678 | KMemoryBlockDisableMergeAttribute::None); | 2707 | static_cast<KMemoryAttribute>(attr)); |
| 2679 | 2708 | ||
| 2680 | R_SUCCEED(); | 2709 | R_SUCCEED(); |
| 2681 | } | 2710 | } |
| @@ -2863,7 +2892,8 @@ Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress | |||
| 2863 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); | 2892 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); |
| 2864 | 2893 | ||
| 2865 | // Set whether the locked memory was io. | 2894 | // Set whether the locked memory was io. |
| 2866 | *out_is_io = old_state == KMemoryState::Io; | 2895 | *out_is_io = |
| 2896 | static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io; | ||
| 2867 | 2897 | ||
| 2868 | R_SUCCEED(); | 2898 | R_SUCCEED(); |
| 2869 | } | 2899 | } |
| @@ -3021,9 +3051,10 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGr | |||
| 3021 | ASSERT(num_pages == page_group.GetNumPages()); | 3051 | ASSERT(num_pages == page_group.GetNumPages()); |
| 3022 | 3052 | ||
| 3023 | switch (operation) { | 3053 | switch (operation) { |
| 3024 | case OperationType::MapGroup: { | 3054 | case OperationType::MapGroup: |
| 3055 | case OperationType::MapFirstGroup: { | ||
| 3025 | // We want to maintain a new reference to every page in the group. | 3056 | // We want to maintain a new reference to every page in the group. |
| 3026 | KScopedPageGroup spg(page_group); | 3057 | KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup); |
| 3027 | 3058 | ||
| 3028 | for (const auto& node : page_group) { | 3059 | for (const auto& node : page_group) { |
| 3029 | const size_t size{node.GetNumPages() * PageSize}; | 3060 | const size_t size{node.GetNumPages() * PageSize}; |
| @@ -3065,7 +3096,6 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis | |||
| 3065 | m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); | 3096 | m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); |
| 3066 | break; | 3097 | break; |
| 3067 | } | 3098 | } |
| 3068 | case OperationType::MapFirst: | ||
| 3069 | case OperationType::Map: { | 3099 | case OperationType::Map: { |
| 3070 | ASSERT(map_addr); | 3100 | ASSERT(map_addr); |
| 3071 | ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize)); | 3101 | ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize)); |
| @@ -3073,11 +3103,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis | |||
| 3073 | 3103 | ||
| 3074 | // Open references to pages, if we should. | 3104 | // Open references to pages, if we should. |
| 3075 | if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) { | 3105 | if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) { |
| 3076 | if (operation == OperationType::MapFirst) { | 3106 | m_kernel.MemoryManager().Open(map_addr, num_pages); |
| 3077 | m_kernel.MemoryManager().OpenFirst(map_addr, num_pages); | ||
| 3078 | } else { | ||
| 3079 | m_kernel.MemoryManager().Open(map_addr, num_pages); | ||
| 3080 | } | ||
| 3081 | } | 3107 | } |
| 3082 | break; | 3108 | break; |
| 3083 | } | 3109 | } |
| @@ -3087,6 +3113,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis | |||
| 3087 | } | 3113 | } |
| 3088 | case OperationType::ChangePermissions: | 3114 | case OperationType::ChangePermissions: |
| 3089 | case OperationType::ChangePermissionsAndRefresh: | 3115 | case OperationType::ChangePermissionsAndRefresh: |
| 3116 | case OperationType::ChangePermissionsAndRefreshAndFlush: | ||
| 3090 | break; | 3117 | break; |
| 3091 | default: | 3118 | default: |
| 3092 | ASSERT(false); | 3119 | ASSERT(false); |
| @@ -3106,79 +3133,79 @@ void KPageTable::FinalizeUpdate(PageLinkedList* page_list) { | |||
| 3106 | } | 3133 | } |
| 3107 | } | 3134 | } |
| 3108 | 3135 | ||
| 3109 | KProcessAddress KPageTable::GetRegionAddress(KMemoryState state) const { | 3136 | KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const { |
| 3110 | switch (state) { | 3137 | switch (state) { |
| 3111 | case KMemoryState::Free: | 3138 | case Svc::MemoryState::Free: |
| 3112 | case KMemoryState::Kernel: | 3139 | case Svc::MemoryState::Kernel: |
| 3113 | return m_address_space_start; | 3140 | return m_address_space_start; |
| 3114 | case KMemoryState::Normal: | 3141 | case Svc::MemoryState::Normal: |
| 3115 | return m_heap_region_start; | 3142 | return m_heap_region_start; |
| 3116 | case KMemoryState::Ipc: | 3143 | case Svc::MemoryState::Ipc: |
| 3117 | case KMemoryState::NonSecureIpc: | 3144 | case Svc::MemoryState::NonSecureIpc: |
| 3118 | case KMemoryState::NonDeviceIpc: | 3145 | case Svc::MemoryState::NonDeviceIpc: |
| 3119 | return m_alias_region_start; | 3146 | return m_alias_region_start; |
| 3120 | case KMemoryState::Stack: | 3147 | case Svc::MemoryState::Stack: |
| 3121 | return m_stack_region_start; | 3148 | return m_stack_region_start; |
| 3122 | case KMemoryState::Static: | 3149 | case Svc::MemoryState::Static: |
| 3123 | case KMemoryState::ThreadLocal: | 3150 | case Svc::MemoryState::ThreadLocal: |
| 3124 | return m_kernel_map_region_start; | 3151 | return m_kernel_map_region_start; |
| 3125 | case KMemoryState::Io: | 3152 | case Svc::MemoryState::Io: |
| 3126 | case KMemoryState::Shared: | 3153 | case Svc::MemoryState::Shared: |
| 3127 | case KMemoryState::AliasCode: | 3154 | case Svc::MemoryState::AliasCode: |
| 3128 | case KMemoryState::AliasCodeData: | 3155 | case Svc::MemoryState::AliasCodeData: |
| 3129 | case KMemoryState::Transfered: | 3156 | case Svc::MemoryState::Transfered: |
| 3130 | case KMemoryState::SharedTransfered: | 3157 | case Svc::MemoryState::SharedTransfered: |
| 3131 | case KMemoryState::SharedCode: | 3158 | case Svc::MemoryState::SharedCode: |
| 3132 | case KMemoryState::GeneratedCode: | 3159 | case Svc::MemoryState::GeneratedCode: |
| 3133 | case KMemoryState::CodeOut: | 3160 | case Svc::MemoryState::CodeOut: |
| 3134 | case KMemoryState::Coverage: | 3161 | case Svc::MemoryState::Coverage: |
| 3135 | case KMemoryState::Insecure: | 3162 | case Svc::MemoryState::Insecure: |
| 3136 | return m_alias_code_region_start; | 3163 | return m_alias_code_region_start; |
| 3137 | case KMemoryState::Code: | 3164 | case Svc::MemoryState::Code: |
| 3138 | case KMemoryState::CodeData: | 3165 | case Svc::MemoryState::CodeData: |
| 3139 | return m_code_region_start; | 3166 | return m_code_region_start; |
| 3140 | default: | 3167 | default: |
| 3141 | UNREACHABLE(); | 3168 | UNREACHABLE(); |
| 3142 | } | 3169 | } |
| 3143 | } | 3170 | } |
| 3144 | 3171 | ||
| 3145 | size_t KPageTable::GetRegionSize(KMemoryState state) const { | 3172 | size_t KPageTable::GetRegionSize(Svc::MemoryState state) const { |
| 3146 | switch (state) { | 3173 | switch (state) { |
| 3147 | case KMemoryState::Free: | 3174 | case Svc::MemoryState::Free: |
| 3148 | case KMemoryState::Kernel: | 3175 | case Svc::MemoryState::Kernel: |
| 3149 | return m_address_space_end - m_address_space_start; | 3176 | return m_address_space_end - m_address_space_start; |
| 3150 | case KMemoryState::Normal: | 3177 | case Svc::MemoryState::Normal: |
| 3151 | return m_heap_region_end - m_heap_region_start; | 3178 | return m_heap_region_end - m_heap_region_start; |
| 3152 | case KMemoryState::Ipc: | 3179 | case Svc::MemoryState::Ipc: |
| 3153 | case KMemoryState::NonSecureIpc: | 3180 | case Svc::MemoryState::NonSecureIpc: |
| 3154 | case KMemoryState::NonDeviceIpc: | 3181 | case Svc::MemoryState::NonDeviceIpc: |
| 3155 | return m_alias_region_end - m_alias_region_start; | 3182 | return m_alias_region_end - m_alias_region_start; |
| 3156 | case KMemoryState::Stack: | 3183 | case Svc::MemoryState::Stack: |
| 3157 | return m_stack_region_end - m_stack_region_start; | 3184 | return m_stack_region_end - m_stack_region_start; |
| 3158 | case KMemoryState::Static: | 3185 | case Svc::MemoryState::Static: |
| 3159 | case KMemoryState::ThreadLocal: | 3186 | case Svc::MemoryState::ThreadLocal: |
| 3160 | return m_kernel_map_region_end - m_kernel_map_region_start; | 3187 | return m_kernel_map_region_end - m_kernel_map_region_start; |
| 3161 | case KMemoryState::Io: | 3188 | case Svc::MemoryState::Io: |
| 3162 | case KMemoryState::Shared: | 3189 | case Svc::MemoryState::Shared: |
| 3163 | case KMemoryState::AliasCode: | 3190 | case Svc::MemoryState::AliasCode: |
| 3164 | case KMemoryState::AliasCodeData: | 3191 | case Svc::MemoryState::AliasCodeData: |
| 3165 | case KMemoryState::Transfered: | 3192 | case Svc::MemoryState::Transfered: |
| 3166 | case KMemoryState::SharedTransfered: | 3193 | case Svc::MemoryState::SharedTransfered: |
| 3167 | case KMemoryState::SharedCode: | 3194 | case Svc::MemoryState::SharedCode: |
| 3168 | case KMemoryState::GeneratedCode: | 3195 | case Svc::MemoryState::GeneratedCode: |
| 3169 | case KMemoryState::CodeOut: | 3196 | case Svc::MemoryState::CodeOut: |
| 3170 | case KMemoryState::Coverage: | 3197 | case Svc::MemoryState::Coverage: |
| 3171 | case KMemoryState::Insecure: | 3198 | case Svc::MemoryState::Insecure: |
| 3172 | return m_alias_code_region_end - m_alias_code_region_start; | 3199 | return m_alias_code_region_end - m_alias_code_region_start; |
| 3173 | case KMemoryState::Code: | 3200 | case Svc::MemoryState::Code: |
| 3174 | case KMemoryState::CodeData: | 3201 | case Svc::MemoryState::CodeData: |
| 3175 | return m_code_region_end - m_code_region_start; | 3202 | return m_code_region_end - m_code_region_start; |
| 3176 | default: | 3203 | default: |
| 3177 | UNREACHABLE(); | 3204 | UNREACHABLE(); |
| 3178 | } | 3205 | } |
| 3179 | } | 3206 | } |
| 3180 | 3207 | ||
| 3181 | bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { | 3208 | bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const { |
| 3182 | const KProcessAddress end = addr + size; | 3209 | const KProcessAddress end = addr + size; |
| 3183 | const KProcessAddress last = end - 1; | 3210 | const KProcessAddress last = end - 1; |
| 3184 | 3211 | ||
| @@ -3192,32 +3219,32 @@ bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState stat | |||
| 3192 | const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || | 3219 | const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || |
| 3193 | m_alias_region_start == m_alias_region_end); | 3220 | m_alias_region_start == m_alias_region_end); |
| 3194 | switch (state) { | 3221 | switch (state) { |
| 3195 | case KMemoryState::Free: | 3222 | case Svc::MemoryState::Free: |
| 3196 | case KMemoryState::Kernel: | 3223 | case Svc::MemoryState::Kernel: |
| 3197 | return is_in_region; | 3224 | return is_in_region; |
| 3198 | case KMemoryState::Io: | 3225 | case Svc::MemoryState::Io: |
| 3199 | case KMemoryState::Static: | 3226 | case Svc::MemoryState::Static: |
| 3200 | case KMemoryState::Code: | 3227 | case Svc::MemoryState::Code: |
| 3201 | case KMemoryState::CodeData: | 3228 | case Svc::MemoryState::CodeData: |
| 3202 | case KMemoryState::Shared: | 3229 | case Svc::MemoryState::Shared: |
| 3203 | case KMemoryState::AliasCode: | 3230 | case Svc::MemoryState::AliasCode: |
| 3204 | case KMemoryState::AliasCodeData: | 3231 | case Svc::MemoryState::AliasCodeData: |
| 3205 | case KMemoryState::Stack: | 3232 | case Svc::MemoryState::Stack: |
| 3206 | case KMemoryState::ThreadLocal: | 3233 | case Svc::MemoryState::ThreadLocal: |
| 3207 | case KMemoryState::Transfered: | 3234 | case Svc::MemoryState::Transfered: |
| 3208 | case KMemoryState::SharedTransfered: | 3235 | case Svc::MemoryState::SharedTransfered: |
| 3209 | case KMemoryState::SharedCode: | 3236 | case Svc::MemoryState::SharedCode: |
| 3210 | case KMemoryState::GeneratedCode: | 3237 | case Svc::MemoryState::GeneratedCode: |
| 3211 | case KMemoryState::CodeOut: | 3238 | case Svc::MemoryState::CodeOut: |
| 3212 | case KMemoryState::Coverage: | 3239 | case Svc::MemoryState::Coverage: |
| 3213 | case KMemoryState::Insecure: | 3240 | case Svc::MemoryState::Insecure: |
| 3214 | return is_in_region && !is_in_heap && !is_in_alias; | 3241 | return is_in_region && !is_in_heap && !is_in_alias; |
| 3215 | case KMemoryState::Normal: | 3242 | case Svc::MemoryState::Normal: |
| 3216 | ASSERT(is_in_heap); | 3243 | ASSERT(is_in_heap); |
| 3217 | return is_in_region && !is_in_alias; | 3244 | return is_in_region && !is_in_alias; |
| 3218 | case KMemoryState::Ipc: | 3245 | case Svc::MemoryState::Ipc: |
| 3219 | case KMemoryState::NonSecureIpc: | 3246 | case Svc::MemoryState::NonSecureIpc: |
| 3220 | case KMemoryState::NonDeviceIpc: | 3247 | case Svc::MemoryState::NonDeviceIpc: |
| 3221 | ASSERT(is_in_alias); | 3248 | ASSERT(is_in_alias); |
| 3222 | return is_in_region && !is_in_heap; | 3249 | return is_in_region && !is_in_heap; |
| 3223 | default: | 3250 | default: |
| @@ -3281,21 +3308,16 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProces | |||
| 3281 | 3308 | ||
| 3282 | Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | 3309 | Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, |
| 3283 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | 3310 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, |
| 3284 | KProcessAddress addr, size_t size, KMemoryState state_mask, | 3311 | KMemoryBlockManager::const_iterator it, |
| 3312 | KProcessAddress last_addr, KMemoryState state_mask, | ||
| 3285 | KMemoryState state, KMemoryPermission perm_mask, | 3313 | KMemoryState state, KMemoryPermission perm_mask, |
| 3286 | KMemoryPermission perm, KMemoryAttribute attr_mask, | 3314 | KMemoryPermission perm, KMemoryAttribute attr_mask, |
| 3287 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { | 3315 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { |
| 3288 | ASSERT(this->IsLockedByCurrentThread()); | 3316 | ASSERT(this->IsLockedByCurrentThread()); |
| 3289 | 3317 | ||
| 3290 | // Get information about the first block. | 3318 | // Get information about the first block. |
| 3291 | const KProcessAddress last_addr = addr + size - 1; | ||
| 3292 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); | ||
| 3293 | KMemoryInfo info = it->GetMemoryInfo(); | 3319 | KMemoryInfo info = it->GetMemoryInfo(); |
| 3294 | 3320 | ||
| 3295 | // If the start address isn't aligned, we need a block. | ||
| 3296 | const size_t blocks_for_start_align = | ||
| 3297 | (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0; | ||
| 3298 | |||
| 3299 | // Validate all blocks in the range have correct state. | 3321 | // Validate all blocks in the range have correct state. |
| 3300 | const KMemoryState first_state = info.m_state; | 3322 | const KMemoryState first_state = info.m_state; |
| 3301 | const KMemoryPermission first_perm = info.m_permission; | 3323 | const KMemoryPermission first_perm = info.m_permission; |
| @@ -3321,10 +3343,6 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* | |||
| 3321 | info = it->GetMemoryInfo(); | 3343 | info = it->GetMemoryInfo(); |
| 3322 | } | 3344 | } |
| 3323 | 3345 | ||
| 3324 | // If the end address isn't aligned, we need a block. | ||
| 3325 | const size_t blocks_for_end_align = | ||
| 3326 | (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0; | ||
| 3327 | |||
| 3328 | // Write output state. | 3346 | // Write output state. |
| 3329 | if (out_state != nullptr) { | 3347 | if (out_state != nullptr) { |
| 3330 | *out_state = first_state; | 3348 | *out_state = first_state; |
| @@ -3335,9 +3353,39 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* | |||
| 3335 | if (out_attr != nullptr) { | 3353 | if (out_attr != nullptr) { |
| 3336 | *out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr); | 3354 | *out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr); |
| 3337 | } | 3355 | } |
| 3356 | |||
| 3357 | // If the end address isn't aligned, we need a block. | ||
| 3338 | if (out_blocks_needed != nullptr) { | 3358 | if (out_blocks_needed != nullptr) { |
| 3339 | *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; | 3359 | const size_t blocks_for_end_align = |
| 3360 | (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress()) | ||
| 3361 | ? 1 | ||
| 3362 | : 0; | ||
| 3363 | *out_blocks_needed = blocks_for_end_align; | ||
| 3364 | } | ||
| 3365 | |||
| 3366 | R_SUCCEED(); | ||
| 3367 | } | ||
| 3368 | |||
| 3369 | Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 3370 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 3371 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 3372 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 3373 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 3374 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { | ||
| 3375 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3376 | |||
| 3377 | // Check memory state. | ||
| 3378 | const KProcessAddress last_addr = addr + size - 1; | ||
| 3379 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); | ||
| 3380 | R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr, | ||
| 3381 | state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr)); | ||
| 3382 | |||
| 3383 | // If the start address isn't aligned, we need a block. | ||
| 3384 | if (out_blocks_needed != nullptr && | ||
| 3385 | Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) { | ||
| 3386 | ++(*out_blocks_needed); | ||
| 3340 | } | 3387 | } |
| 3388 | |||
| 3341 | R_SUCCEED(); | 3389 | R_SUCCEED(); |
| 3342 | } | 3390 | } |
| 3343 | 3391 | ||
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 7da675f27..3d64b6fb0 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h | |||
| @@ -126,8 +126,6 @@ public: | |||
| 126 | return m_block_info_manager; | 126 | return m_block_info_manager; |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const; | ||
| 130 | |||
| 131 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | 129 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, |
| 132 | KPhysicalAddress phys_addr, KProcessAddress region_start, | 130 | KPhysicalAddress phys_addr, KProcessAddress region_start, |
| 133 | size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { | 131 | size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { |
| @@ -162,6 +160,21 @@ public: | |||
| 162 | void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, | 160 | void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, |
| 163 | const KPageGroup& pg); | 161 | const KPageGroup& pg); |
| 164 | 162 | ||
| 163 | KProcessAddress GetRegionAddress(Svc::MemoryState state) const; | ||
| 164 | size_t GetRegionSize(Svc::MemoryState state) const; | ||
| 165 | bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const; | ||
| 166 | |||
| 167 | KProcessAddress GetRegionAddress(KMemoryState state) const { | ||
| 168 | return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 169 | } | ||
| 170 | size_t GetRegionSize(KMemoryState state) const { | ||
| 171 | return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 172 | } | ||
| 173 | bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { | ||
| 174 | return this->CanContain(addr, size, | ||
| 175 | static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 176 | } | ||
| 177 | |||
| 165 | protected: | 178 | protected: |
| 166 | struct PageLinkedList { | 179 | struct PageLinkedList { |
| 167 | private: | 180 | private: |
| @@ -204,12 +217,13 @@ protected: | |||
| 204 | private: | 217 | private: |
| 205 | enum class OperationType : u32 { | 218 | enum class OperationType : u32 { |
| 206 | Map = 0, | 219 | Map = 0, |
| 207 | MapFirst = 1, | 220 | MapGroup = 1, |
| 208 | MapGroup = 2, | 221 | MapFirstGroup = 2, |
| 209 | Unmap = 3, | 222 | Unmap = 3, |
| 210 | ChangePermissions = 4, | 223 | ChangePermissions = 4, |
| 211 | ChangePermissionsAndRefresh = 5, | 224 | ChangePermissionsAndRefresh = 5, |
| 212 | Separate = 6, | 225 | ChangePermissionsAndRefreshAndFlush = 6, |
| 226 | Separate = 7, | ||
| 213 | }; | 227 | }; |
| 214 | 228 | ||
| 215 | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = | 229 | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = |
| @@ -228,8 +242,6 @@ private: | |||
| 228 | Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm, | 242 | Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm, |
| 229 | OperationType operation, KPhysicalAddress map_addr = 0); | 243 | OperationType operation, KPhysicalAddress map_addr = 0); |
| 230 | void FinalizeUpdate(PageLinkedList* page_list); | 244 | void FinalizeUpdate(PageLinkedList* page_list); |
| 231 | KProcessAddress GetRegionAddress(KMemoryState state) const; | ||
| 232 | size_t GetRegionSize(KMemoryState state) const; | ||
| 233 | 245 | ||
| 234 | KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, | 246 | KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, |
| 235 | size_t num_pages, size_t alignment, size_t offset, | 247 | size_t num_pages, size_t alignment, size_t offset, |
| @@ -252,6 +264,13 @@ private: | |||
| 252 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; | 264 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; |
| 253 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | 265 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, |
| 254 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | 266 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, |
| 267 | KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, | ||
| 268 | KMemoryState state_mask, KMemoryState state, | ||
| 269 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 270 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 271 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; | ||
| 272 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 273 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 255 | KProcessAddress addr, size_t size, KMemoryState state_mask, | 274 | KProcessAddress addr, size_t size, KMemoryState state_mask, |
| 256 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | 275 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, |
| 257 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | 276 | KMemoryAttribute attr_mask, KMemoryAttribute attr, |
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 4a099286b..7fa34d693 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -149,7 +149,7 @@ u64 KProcess::GetTotalPhysicalMemoryUsed() { | |||
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() { | 151 | u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() { |
| 152 | return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceUsage(); | 152 | return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceSize(); |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | bool KProcess::ReleaseUserException(KThread* thread) { | 155 | bool KProcess::ReleaseUserException(KThread* thread) { |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index cb025c3d6..24433d32b 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -623,14 +623,33 @@ struct KernelCore::Impl { | |||
| 623 | ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( | 623 | ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( |
| 624 | GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab)); | 624 | GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab)); |
| 625 | 625 | ||
| 626 | // Insert a physical region for the secure applet memory. | ||
| 627 | const auto secure_applet_end_phys_addr = | ||
| 628 | slab_end_phys_addr + KSystemControl::SecureAppletMemorySize; | ||
| 629 | if constexpr (KSystemControl::SecureAppletMemorySize > 0) { | ||
| 630 | ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( | ||
| 631 | GetInteger(slab_end_phys_addr), KSystemControl::SecureAppletMemorySize, | ||
| 632 | KMemoryRegionType_DramKernelSecureAppletMemory)); | ||
| 633 | } | ||
| 634 | |||
| 635 | // Insert a physical region for the unknown debug2 region. | ||
| 636 | constexpr size_t SecureUnknownRegionSize = 0; | ||
| 637 | const size_t secure_unknown_size = SecureUnknownRegionSize; | ||
| 638 | const auto secure_unknown_end_phys_addr = secure_applet_end_phys_addr + secure_unknown_size; | ||
| 639 | if constexpr (SecureUnknownRegionSize > 0) { | ||
| 640 | ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( | ||
| 641 | GetInteger(secure_applet_end_phys_addr), secure_unknown_size, | ||
| 642 | KMemoryRegionType_DramKernelSecureUnknown)); | ||
| 643 | } | ||
| 644 | |||
| 626 | // Determine size available for kernel page table heaps, requiring > 8 MB. | 645 | // Determine size available for kernel page table heaps, requiring > 8 MB. |
| 627 | const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size; | 646 | const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size; |
| 628 | const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr; | 647 | const size_t page_table_heap_size = resource_end_phys_addr - secure_unknown_end_phys_addr; |
| 629 | ASSERT(page_table_heap_size / 4_MiB > 2); | 648 | ASSERT(page_table_heap_size / 4_MiB > 2); |
| 630 | 649 | ||
| 631 | // Insert a physical region for the kernel page table heap region | 650 | // Insert a physical region for the kernel page table heap region |
| 632 | ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( | 651 | ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( |
| 633 | GetInteger(slab_end_phys_addr), page_table_heap_size, | 652 | GetInteger(secure_unknown_end_phys_addr), page_table_heap_size, |
| 634 | KMemoryRegionType_DramKernelPtHeap)); | 653 | KMemoryRegionType_DramKernelPtHeap)); |
| 635 | 654 | ||
| 636 | // All DRAM regions that we haven't tagged by this point will be mapped under the linear | 655 | // All DRAM regions that we haven't tagged by this point will be mapped under the linear |
diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp index 2cab74127..97f1210de 100644 --- a/src/core/hle/kernel/svc/svc_memory.cpp +++ b/src/core/hle/kernel/svc/svc_memory.cpp | |||
| @@ -76,7 +76,7 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 s | |||
| 76 | } // namespace | 76 | } // namespace |
| 77 | 77 | ||
| 78 | Result SetMemoryPermission(Core::System& system, u64 address, u64 size, MemoryPermission perm) { | 78 | Result SetMemoryPermission(Core::System& system, u64 address, u64 size, MemoryPermission perm) { |
| 79 | LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size, | 79 | LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X}", address, size, |
| 80 | perm); | 80 | perm); |
| 81 | 81 | ||
| 82 | // Validate address / size. | 82 | // Validate address / size. |
| @@ -108,10 +108,16 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask, | |||
| 108 | R_UNLESS((address < address + size), ResultInvalidCurrentMemory); | 108 | R_UNLESS((address < address + size), ResultInvalidCurrentMemory); |
| 109 | 109 | ||
| 110 | // Validate the attribute and mask. | 110 | // Validate the attribute and mask. |
| 111 | constexpr u32 SupportedMask = static_cast<u32>(MemoryAttribute::Uncached); | 111 | constexpr u32 SupportedMask = |
| 112 | static_cast<u32>(MemoryAttribute::Uncached | MemoryAttribute::PermissionLocked); | ||
| 112 | R_UNLESS((mask | attr) == mask, ResultInvalidCombination); | 113 | R_UNLESS((mask | attr) == mask, ResultInvalidCombination); |
| 113 | R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination); | 114 | R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination); |
| 114 | 115 | ||
| 116 | // Check that permission locked is either being set or not masked. | ||
| 117 | R_UNLESS((static_cast<Svc::MemoryAttribute>(mask) & Svc::MemoryAttribute::PermissionLocked) == | ||
| 118 | (static_cast<Svc::MemoryAttribute>(attr) & Svc::MemoryAttribute::PermissionLocked), | ||
| 119 | ResultInvalidCombination); | ||
| 120 | |||
| 115 | // Validate that the region is in range for the current process. | 121 | // Validate that the region is in range for the current process. |
| 116 | auto& page_table{GetCurrentProcess(system.Kernel()).GetPageTable()}; | 122 | auto& page_table{GetCurrentProcess(system.Kernel()).GetPageTable()}; |
| 117 | R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); | 123 | R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); |
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index 7f380ca4f..251e6013c 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h | |||
| @@ -46,6 +46,7 @@ enum class MemoryAttribute : u32 { | |||
| 46 | IpcLocked = (1 << 1), | 46 | IpcLocked = (1 << 1), |
| 47 | DeviceShared = (1 << 2), | 47 | DeviceShared = (1 << 2), |
| 48 | Uncached = (1 << 3), | 48 | Uncached = (1 << 3), |
| 49 | PermissionLocked = (1 << 4), | ||
| 49 | }; | 50 | }; |
| 50 | DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute); | 51 | DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute); |
| 51 | 52 | ||
diff --git a/src/core/hle/service/mii/types/core_data.cpp b/src/core/hle/service/mii/types/core_data.cpp index 970c748ca..ba1da76ba 100644 --- a/src/core/hle/service/mii/types/core_data.cpp +++ b/src/core/hle/service/mii/types/core_data.cpp | |||
| @@ -41,6 +41,7 @@ void CoreData::BuildRandom(Age age, Gender gender, Race race) { | |||
| 41 | } | 41 | } |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | SetDefault(); | ||
| 44 | SetGender(gender); | 45 | SetGender(gender); |
| 45 | SetFavoriteColor(MiiUtil::GetRandomValue(FavoriteColor::Max)); | 46 | SetFavoriteColor(MiiUtil::GetRandomValue(FavoriteColor::Max)); |
| 46 | SetRegionMove(0); | 47 | SetRegionMove(0); |
diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp index 469a53244..2e29bc848 100644 --- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp +++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp | |||
| @@ -46,7 +46,7 @@ Result AllocateIoForProcessAddressSpace(Common::ProcessAddress* out_map_address, | |||
| 46 | // Get bounds of where mapping is possible. | 46 | // Get bounds of where mapping is possible. |
| 47 | const VAddr alias_code_begin = GetInteger(page_table.GetAliasCodeRegionStart()); | 47 | const VAddr alias_code_begin = GetInteger(page_table.GetAliasCodeRegionStart()); |
| 48 | const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / YUZU_PAGESIZE; | 48 | const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / YUZU_PAGESIZE; |
| 49 | const auto state = Kernel::KMemoryState::Io; | 49 | const auto state = Kernel::KMemoryState::IoMemory; |
| 50 | const auto perm = Kernel::KMemoryPermission::UserReadWrite; | 50 | const auto perm = Kernel::KMemoryPermission::UserReadWrite; |
| 51 | std::mt19937_64 rng{process->GetRandomEntropy(0)}; | 51 | std::mt19937_64 rng{process->GetRandomEntropy(0)}; |
| 52 | 52 | ||
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h index 805a89900..c0e6471fe 100644 --- a/src/video_core/fence_manager.h +++ b/src/video_core/fence_manager.h | |||
| @@ -86,7 +86,10 @@ public: | |||
| 86 | uncommitted_operations.emplace_back(std::move(func)); | 86 | uncommitted_operations.emplace_back(std::move(func)); |
| 87 | } | 87 | } |
| 88 | pending_operations.emplace_back(std::move(uncommitted_operations)); | 88 | pending_operations.emplace_back(std::move(uncommitted_operations)); |
| 89 | QueueFence(new_fence); | 89 | { |
| 90 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | ||
| 91 | QueueFence(new_fence); | ||
| 92 | } | ||
| 90 | if (!delay_fence) { | 93 | if (!delay_fence) { |
| 91 | func(); | 94 | func(); |
| 92 | } | 95 | } |
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index c4c30d807..7e7a80740 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp | |||
| @@ -132,12 +132,16 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { | |||
| 132 | const bool use_accelerated = | 132 | const bool use_accelerated = |
| 133 | rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride); | 133 | rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride); |
| 134 | const bool is_srgb = use_accelerated && screen_info.is_srgb; | 134 | const bool is_srgb = use_accelerated && screen_info.is_srgb; |
| 135 | RenderScreenshot(*framebuffer, use_accelerated); | ||
| 136 | 135 | ||
| 137 | Frame* frame = present_manager.GetRenderFrame(); | 136 | { |
| 138 | blit_screen.DrawToSwapchain(frame, *framebuffer, use_accelerated, is_srgb); | 137 | std::scoped_lock lock{rasterizer.LockCaches()}; |
| 139 | scheduler.Flush(*frame->render_ready); | 138 | RenderScreenshot(*framebuffer, use_accelerated); |
| 140 | present_manager.Present(frame); | 139 | |
| 140 | Frame* frame = present_manager.GetRenderFrame(); | ||
| 141 | blit_screen.DrawToSwapchain(frame, *framebuffer, use_accelerated, is_srgb); | ||
| 142 | scheduler.Flush(*frame->render_ready); | ||
| 143 | present_manager.Present(frame); | ||
| 144 | } | ||
| 141 | 145 | ||
| 142 | gpu.RendererFrameEndNotify(); | 146 | gpu.RendererFrameEndNotify(); |
| 143 | rasterizer.TickFrame(); | 147 | rasterizer.TickFrame(); |
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 61d03daae..465eac37e 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp | |||
| @@ -198,7 +198,7 @@ void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) { | |||
| 198 | if (!pipeline) { | 198 | if (!pipeline) { |
| 199 | return; | 199 | return; |
| 200 | } | 200 | } |
| 201 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | 201 | std::scoped_lock lock{LockCaches()}; |
| 202 | // update engine as channel may be different. | 202 | // update engine as channel may be different. |
| 203 | pipeline->SetEngine(maxwell3d, gpu_memory); | 203 | pipeline->SetEngine(maxwell3d, gpu_memory); |
| 204 | pipeline->Configure(is_indexed); | 204 | pipeline->Configure(is_indexed); |
| @@ -708,6 +708,7 @@ void RasterizerVulkan::TiledCacheBarrier() { | |||
| 708 | } | 708 | } |
| 709 | 709 | ||
| 710 | void RasterizerVulkan::FlushCommands() { | 710 | void RasterizerVulkan::FlushCommands() { |
| 711 | std::scoped_lock lock{LockCaches()}; | ||
| 711 | if (draw_counter == 0) { | 712 | if (draw_counter == 0) { |
| 712 | return; | 713 | return; |
| 713 | } | 714 | } |
| @@ -805,6 +806,7 @@ void RasterizerVulkan::FlushWork() { | |||
| 805 | if ((++draw_counter & 7) != 7) { | 806 | if ((++draw_counter & 7) != 7) { |
| 806 | return; | 807 | return; |
| 807 | } | 808 | } |
| 809 | std::scoped_lock lock{LockCaches()}; | ||
| 808 | if (draw_counter < DRAWS_TO_DISPATCH) { | 810 | if (draw_counter < DRAWS_TO_DISPATCH) { |
| 809 | // Send recorded tasks to the worker thread | 811 | // Send recorded tasks to the worker thread |
| 810 | scheduler.DispatchWork(); | 812 | scheduler.DispatchWork(); |
| @@ -1499,7 +1501,7 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) | |||
| 1499 | void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) { | 1501 | void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) { |
| 1500 | CreateChannel(channel); | 1502 | CreateChannel(channel); |
| 1501 | { | 1503 | { |
| 1502 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | 1504 | std::scoped_lock lock{LockCaches()}; |
| 1503 | texture_cache.CreateChannel(channel); | 1505 | texture_cache.CreateChannel(channel); |
| 1504 | buffer_cache.CreateChannel(channel); | 1506 | buffer_cache.CreateChannel(channel); |
| 1505 | } | 1507 | } |
| @@ -1512,7 +1514,7 @@ void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) { | |||
| 1512 | const s32 channel_id = channel.bind_id; | 1514 | const s32 channel_id = channel.bind_id; |
| 1513 | BindToChannel(channel_id); | 1515 | BindToChannel(channel_id); |
| 1514 | { | 1516 | { |
| 1515 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | 1517 | std::scoped_lock lock{LockCaches()}; |
| 1516 | texture_cache.BindToChannel(channel_id); | 1518 | texture_cache.BindToChannel(channel_id); |
| 1517 | buffer_cache.BindToChannel(channel_id); | 1519 | buffer_cache.BindToChannel(channel_id); |
| 1518 | } | 1520 | } |
| @@ -1525,7 +1527,7 @@ void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) { | |||
| 1525 | void RasterizerVulkan::ReleaseChannel(s32 channel_id) { | 1527 | void RasterizerVulkan::ReleaseChannel(s32 channel_id) { |
| 1526 | EraseChannel(channel_id); | 1528 | EraseChannel(channel_id); |
| 1527 | { | 1529 | { |
| 1528 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | 1530 | std::scoped_lock lock{LockCaches()}; |
| 1529 | texture_cache.EraseChannel(channel_id); | 1531 | texture_cache.EraseChannel(channel_id); |
| 1530 | buffer_cache.EraseChannel(channel_id); | 1532 | buffer_cache.EraseChannel(channel_id); |
| 1531 | } | 1533 | } |
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index ad069556c..ce3dfbaab 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h | |||
| @@ -133,6 +133,10 @@ public: | |||
| 133 | 133 | ||
| 134 | void ReleaseChannel(s32 channel_id) override; | 134 | void ReleaseChannel(s32 channel_id) override; |
| 135 | 135 | ||
| 136 | std::scoped_lock<std::recursive_mutex, std::recursive_mutex> LockCaches() { | ||
| 137 | return std::scoped_lock{buffer_cache.mutex, texture_cache.mutex}; | ||
| 138 | } | ||
| 139 | |||
| 136 | private: | 140 | private: |
| 137 | static constexpr size_t MAX_TEXTURES = 192; | 141 | static constexpr size_t MAX_TEXTURES = 192; |
| 138 | static constexpr size_t MAX_IMAGES = 48; | 142 | static constexpr size_t MAX_IMAGES = 48; |