diff options
| author | 2022-09-09 21:12:37 -0700 | |
|---|---|---|
| committer | 2022-10-18 19:13:34 -0700 | |
| commit | 2bb41cffca7e5ec6383a59c513ef9d7e2def5f51 (patch) | |
| tree | e76369cb0d11749ce2ab586dba38c745d1e8a14a /src | |
| parent | core: hle: kernel: k_thread: Implement thread termination DPC. (diff) | |
| download | yuzu-2bb41cffca7e5ec6383a59c513ef9d7e2def5f51.tar.gz yuzu-2bb41cffca7e5ec6383a59c513ef9d7e2def5f51.tar.xz yuzu-2bb41cffca7e5ec6383a59c513ef9d7e2def5f51.zip | |
core: hle: kernel: k_memory_block_manager: Update.
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/hle/kernel/k_memory_block_manager.cpp | 409 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_memory_block_manager.h | 145 |
2 files changed, 380 insertions, 174 deletions
diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp index 3ddb9984f..c908af75a 100644 --- a/src/core/hle/kernel/k_memory_block_manager.cpp +++ b/src/core/hle/kernel/k_memory_block_manager.cpp | |||
| @@ -2,221 +2,336 @@ | |||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include "core/hle/kernel/k_memory_block_manager.h" | 4 | #include "core/hle/kernel/k_memory_block_manager.h" |
| 5 | #include "core/hle/kernel/memory_types.h" | ||
| 6 | 5 | ||
| 7 | namespace Kernel { | 6 | namespace Kernel { |
| 8 | 7 | ||
| 9 | KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_) | 8 | KMemoryBlockManager::KMemoryBlockManager() = default; |
| 10 | : start_addr{start_addr_}, end_addr{end_addr_} { | ||
| 11 | const u64 num_pages{(end_addr - start_addr) / PageSize}; | ||
| 12 | memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free, | ||
| 13 | KMemoryPermission::None, KMemoryAttribute::None); | ||
| 14 | } | ||
| 15 | 9 | ||
| 16 | KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) { | 10 | Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) { |
| 17 | auto node{memory_block_tree.begin()}; | 11 | // Allocate a block to encapsulate the address space, insert it into the tree. |
| 18 | while (node != end()) { | 12 | KMemoryBlock* start_block = slab_manager->Allocate(); |
| 19 | const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()}; | 13 | R_UNLESS(start_block != nullptr, ResultOutOfResource); |
| 20 | if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) { | 14 | |
| 21 | return node; | 15 | // Set our start and end. |
| 22 | } | 16 | m_start_address = st; |
| 23 | node = std::next(node); | 17 | m_end_address = nd; |
| 24 | } | 18 | ASSERT(Common::IsAligned(m_start_address, PageSize)); |
| 25 | return end(); | 19 | ASSERT(Common::IsAligned(m_end_address, PageSize)); |
| 20 | |||
| 21 | // Initialize and insert the block. | ||
| 22 | start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize, | ||
| 23 | KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None); | ||
| 24 | m_memory_block_tree.insert(*start_block); | ||
| 25 | |||
| 26 | return ResultSuccess; | ||
| 26 | } | 27 | } |
| 27 | 28 | ||
| 28 | VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages, | 29 | void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager, |
| 29 | std::size_t num_pages, std::size_t align, | 30 | HostUnmapCallback&& host_unmap_callback) { |
| 30 | std::size_t offset, std::size_t guard_pages) { | 31 | // Erase every block until we have none left. |
| 31 | if (num_pages == 0) { | 32 | auto it = m_memory_block_tree.begin(); |
| 32 | return {}; | 33 | while (it != m_memory_block_tree.end()) { |
| 34 | KMemoryBlock* block = std::addressof(*it); | ||
| 35 | it = m_memory_block_tree.erase(it); | ||
| 36 | slab_manager->Free(block); | ||
| 37 | host_unmap_callback(block->GetAddress(), block->GetSize()); | ||
| 33 | } | 38 | } |
| 34 | 39 | ||
| 35 | const VAddr region_end{region_start + region_num_pages * PageSize}; | 40 | ASSERT(m_memory_block_tree.empty()); |
| 36 | const VAddr region_last{region_end - 1}; | 41 | } |
| 37 | for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) { | ||
| 38 | const auto info{it->GetMemoryInfo()}; | ||
| 39 | if (region_last < info.GetAddress()) { | ||
| 40 | break; | ||
| 41 | } | ||
| 42 | 42 | ||
| 43 | if (info.state != KMemoryState::Free) { | 43 | VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages, |
| 44 | continue; | 44 | size_t num_pages, size_t alignment, size_t offset, |
| 45 | } | 45 | size_t guard_pages) const { |
| 46 | if (num_pages > 0) { | ||
| 47 | const VAddr region_end = region_start + region_num_pages * PageSize; | ||
| 48 | const VAddr region_last = region_end - 1; | ||
| 49 | for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend(); | ||
| 50 | it++) { | ||
| 51 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 52 | if (region_last < info.GetAddress()) { | ||
| 53 | break; | ||
| 54 | } | ||
| 55 | if (info.m_state != KMemoryState::Free) { | ||
| 56 | continue; | ||
| 57 | } | ||
| 46 | 58 | ||
| 47 | VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()}; | 59 | VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress(); |
| 48 | area += guard_pages * PageSize; | 60 | area += guard_pages * PageSize; |
| 49 | 61 | ||
| 50 | const VAddr offset_area{Common::AlignDown(area, align) + offset}; | 62 | const VAddr offset_area = Common::AlignDown(area, alignment) + offset; |
| 51 | area = (area <= offset_area) ? offset_area : offset_area + align; | 63 | area = (area <= offset_area) ? offset_area : offset_area + alignment; |
| 52 | 64 | ||
| 53 | const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize}; | 65 | const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize; |
| 54 | const VAddr area_last{area_end - 1}; | 66 | const VAddr area_last = area_end - 1; |
| 55 | 67 | ||
| 56 | if (info.GetAddress() <= area && area < area_last && area_last <= region_last && | 68 | if (info.GetAddress() <= area && area < area_last && area_last <= region_last && |
| 57 | area_last <= info.GetLastAddress()) { | 69 | area_last <= info.GetLastAddress()) { |
| 58 | return area; | 70 | return area; |
| 71 | } | ||
| 59 | } | 72 | } |
| 60 | } | 73 | } |
| 61 | 74 | ||
| 62 | return {}; | 75 | return {}; |
| 63 | } | 76 | } |
| 64 | 77 | ||
| 65 | void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, | 78 | void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, |
| 66 | KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, | 79 | VAddr address, size_t num_pages) { |
| 67 | KMemoryState state, KMemoryPermission perm, | 80 | // Find the iterator now that we've updated. |
| 68 | KMemoryAttribute attribute) { | 81 | iterator it = this->FindIterator(address); |
| 69 | const VAddr update_end_addr{addr + num_pages * PageSize}; | 82 | if (address != m_start_address) { |
| 70 | iterator node{memory_block_tree.begin()}; | 83 | it--; |
| 84 | } | ||
| 71 | 85 | ||
| 72 | prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped; | 86 | // Coalesce blocks that we can. |
| 87 | while (true) { | ||
| 88 | iterator prev = it++; | ||
| 89 | if (it == m_memory_block_tree.end()) { | ||
| 90 | break; | ||
| 91 | } | ||
| 73 | 92 | ||
| 74 | while (node != memory_block_tree.end()) { | 93 | if (prev->CanMergeWith(*it)) { |
| 75 | KMemoryBlock* block{&(*node)}; | 94 | KMemoryBlock* block = std::addressof(*it); |
| 76 | iterator next_node{std::next(node)}; | 95 | m_memory_block_tree.erase(it); |
| 77 | const VAddr cur_addr{block->GetAddress()}; | 96 | prev->Add(*block); |
| 78 | const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; | 97 | allocator->Free(block); |
| 98 | it = prev; | ||
| 99 | } | ||
| 79 | 100 | ||
| 80 | if (addr < cur_end_addr && cur_addr < update_end_addr) { | 101 | if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) { |
| 81 | if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) { | 102 | break; |
| 82 | node = next_node; | 103 | } |
| 83 | continue; | 104 | } |
| 84 | } | 105 | } |
| 85 | 106 | ||
| 86 | iterator new_node{node}; | 107 | void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, |
| 87 | if (addr > cur_addr) { | 108 | size_t num_pages, KMemoryState state, KMemoryPermission perm, |
| 88 | memory_block_tree.insert(node, block->Split(addr)); | 109 | KMemoryAttribute attr, |
| 110 | KMemoryBlockDisableMergeAttribute set_disable_attr, | ||
| 111 | KMemoryBlockDisableMergeAttribute clear_disable_attr) { | ||
| 112 | // Ensure for auditing that we never end up with an invalid tree. | ||
| 113 | KScopedMemoryBlockManagerAuditor auditor(this); | ||
| 114 | ASSERT(Common::IsAligned(address, PageSize)); | ||
| 115 | ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == | ||
| 116 | KMemoryAttribute::None); | ||
| 117 | |||
| 118 | VAddr cur_address = address; | ||
| 119 | size_t remaining_pages = num_pages; | ||
| 120 | iterator it = this->FindIterator(address); | ||
| 121 | |||
| 122 | while (remaining_pages > 0) { | ||
| 123 | const size_t remaining_size = remaining_pages * PageSize; | ||
| 124 | KMemoryInfo cur_info = it->GetMemoryInfo(); | ||
| 125 | if (it->HasProperties(state, perm, attr)) { | ||
| 126 | // If we already have the right properties, just advance. | ||
| 127 | if (cur_address + remaining_size < cur_info.GetEndAddress()) { | ||
| 128 | remaining_pages = 0; | ||
| 129 | cur_address += remaining_size; | ||
| 130 | } else { | ||
| 131 | remaining_pages = | ||
| 132 | (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; | ||
| 133 | cur_address = cur_info.GetEndAddress(); | ||
| 89 | } | 134 | } |
| 135 | } else { | ||
| 136 | // If we need to, create a new block before and insert it. | ||
| 137 | if (cur_info.GetAddress() != cur_address) { | ||
| 138 | KMemoryBlock* new_block = allocator->Allocate(); | ||
| 139 | |||
| 140 | it->Split(new_block, cur_address); | ||
| 141 | it = m_memory_block_tree.insert(*new_block); | ||
| 142 | it++; | ||
| 90 | 143 | ||
| 91 | if (update_end_addr < cur_end_addr) { | 144 | cur_info = it->GetMemoryInfo(); |
| 92 | new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); | 145 | cur_address = cur_info.GetAddress(); |
| 93 | } | 146 | } |
| 94 | 147 | ||
| 95 | new_node->Update(state, perm, attribute); | 148 | // If we need to, create a new block after and insert it. |
| 149 | if (cur_info.GetSize() > remaining_size) { | ||
| 150 | KMemoryBlock* new_block = allocator->Allocate(); | ||
| 96 | 151 | ||
| 97 | MergeAdjacent(new_node, next_node); | 152 | it->Split(new_block, cur_address + remaining_size); |
| 98 | } | 153 | it = m_memory_block_tree.insert(*new_block); |
| 99 | 154 | ||
| 100 | if (cur_end_addr - 1 >= update_end_addr - 1) { | 155 | cur_info = it->GetMemoryInfo(); |
| 101 | break; | 156 | } |
| 102 | } | ||
| 103 | 157 | ||
| 104 | node = next_node; | 158 | // Update block state. |
| 159 | it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr), | ||
| 160 | static_cast<u8>(clear_disable_attr)); | ||
| 161 | cur_address += cur_info.GetSize(); | ||
| 162 | remaining_pages -= cur_info.GetNumPages(); | ||
| 163 | } | ||
| 164 | it++; | ||
| 105 | } | 165 | } |
| 166 | |||
| 167 | this->CoalesceForUpdate(allocator, address, num_pages); | ||
| 106 | } | 168 | } |
| 107 | 169 | ||
| 108 | void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state, | 170 | void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, |
| 109 | KMemoryPermission perm, KMemoryAttribute attribute) { | 171 | VAddr address, size_t num_pages, KMemoryState test_state, |
| 110 | const VAddr update_end_addr{addr + num_pages * PageSize}; | 172 | KMemoryPermission test_perm, KMemoryAttribute test_attr, |
| 111 | iterator node{memory_block_tree.begin()}; | 173 | KMemoryState state, KMemoryPermission perm, |
| 174 | KMemoryAttribute attr) { | ||
| 175 | // Ensure for auditing that we never end up with an invalid tree. | ||
| 176 | KScopedMemoryBlockManagerAuditor auditor(this); | ||
| 177 | ASSERT(Common::IsAligned(address, PageSize)); | ||
| 178 | ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == | ||
| 179 | KMemoryAttribute::None); | ||
| 180 | |||
| 181 | VAddr cur_address = address; | ||
| 182 | size_t remaining_pages = num_pages; | ||
| 183 | iterator it = this->FindIterator(address); | ||
| 184 | |||
| 185 | while (remaining_pages > 0) { | ||
| 186 | const size_t remaining_size = remaining_pages * PageSize; | ||
| 187 | KMemoryInfo cur_info = it->GetMemoryInfo(); | ||
| 188 | if (it->HasProperties(test_state, test_perm, test_attr) && | ||
| 189 | !it->HasProperties(state, perm, attr)) { | ||
| 190 | // If we need to, create a new block before and insert it. | ||
| 191 | if (cur_info.GetAddress() != cur_address) { | ||
| 192 | KMemoryBlock* new_block = allocator->Allocate(); | ||
| 193 | |||
| 194 | it->Split(new_block, cur_address); | ||
| 195 | it = m_memory_block_tree.insert(*new_block); | ||
| 196 | it++; | ||
| 197 | |||
| 198 | cur_info = it->GetMemoryInfo(); | ||
| 199 | cur_address = cur_info.GetAddress(); | ||
| 200 | } | ||
| 112 | 201 | ||
| 113 | while (node != memory_block_tree.end()) { | 202 | // If we need to, create a new block after and insert it. |
| 114 | KMemoryBlock* block{&(*node)}; | 203 | if (cur_info.GetSize() > remaining_size) { |
| 115 | iterator next_node{std::next(node)}; | 204 | KMemoryBlock* new_block = allocator->Allocate(); |
| 116 | const VAddr cur_addr{block->GetAddress()}; | ||
| 117 | const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; | ||
| 118 | 205 | ||
| 119 | if (addr < cur_end_addr && cur_addr < update_end_addr) { | 206 | it->Split(new_block, cur_address + remaining_size); |
| 120 | iterator new_node{node}; | 207 | it = m_memory_block_tree.insert(*new_block); |
| 121 | 208 | ||
| 122 | if (addr > cur_addr) { | 209 | cur_info = it->GetMemoryInfo(); |
| 123 | memory_block_tree.insert(node, block->Split(addr)); | ||
| 124 | } | 210 | } |
| 125 | 211 | ||
| 126 | if (update_end_addr < cur_end_addr) { | 212 | // Update block state. |
| 127 | new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); | 213 | it->Update(state, perm, attr, false, 0, 0); |
| 214 | cur_address += cur_info.GetSize(); | ||
| 215 | remaining_pages -= cur_info.GetNumPages(); | ||
| 216 | } else { | ||
| 217 | // If we already have the right properties, just advance. | ||
| 218 | if (cur_address + remaining_size < cur_info.GetEndAddress()) { | ||
| 219 | remaining_pages = 0; | ||
| 220 | cur_address += remaining_size; | ||
| 221 | } else { | ||
| 222 | remaining_pages = | ||
| 223 | (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; | ||
| 224 | cur_address = cur_info.GetEndAddress(); | ||
| 128 | } | 225 | } |
| 129 | |||
| 130 | new_node->Update(state, perm, attribute); | ||
| 131 | |||
| 132 | MergeAdjacent(new_node, next_node); | ||
| 133 | } | ||
| 134 | |||
| 135 | if (cur_end_addr - 1 >= update_end_addr - 1) { | ||
| 136 | break; | ||
| 137 | } | 226 | } |
| 138 | 227 | it++; | |
| 139 | node = next_node; | ||
| 140 | } | 228 | } |
| 229 | |||
| 230 | this->CoalesceForUpdate(allocator, address, num_pages); | ||
| 141 | } | 231 | } |
| 142 | 232 | ||
| 143 | void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, | 233 | void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, |
| 234 | size_t num_pages, MemoryBlockLockFunction lock_func, | ||
| 144 | KMemoryPermission perm) { | 235 | KMemoryPermission perm) { |
| 145 | const VAddr update_end_addr{addr + num_pages * PageSize}; | 236 | // Ensure for auditing that we never end up with an invalid tree. |
| 146 | iterator node{memory_block_tree.begin()}; | 237 | KScopedMemoryBlockManagerAuditor auditor(this); |
| 238 | ASSERT(Common::IsAligned(address, PageSize)); | ||
| 147 | 239 | ||
| 148 | while (node != memory_block_tree.end()) { | 240 | VAddr cur_address = address; |
| 149 | KMemoryBlock* block{&(*node)}; | 241 | size_t remaining_pages = num_pages; |
| 150 | iterator next_node{std::next(node)}; | 242 | iterator it = this->FindIterator(address); |
| 151 | const VAddr cur_addr{block->GetAddress()}; | ||
| 152 | const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; | ||
| 153 | 243 | ||
| 154 | if (addr < cur_end_addr && cur_addr < update_end_addr) { | 244 | const VAddr end_address = address + (num_pages * PageSize); |
| 155 | iterator new_node{node}; | ||
| 156 | 245 | ||
| 157 | if (addr > cur_addr) { | 246 | while (remaining_pages > 0) { |
| 158 | memory_block_tree.insert(node, block->Split(addr)); | 247 | const size_t remaining_size = remaining_pages * PageSize; |
| 159 | } | 248 | KMemoryInfo cur_info = it->GetMemoryInfo(); |
| 160 | 249 | ||
| 161 | if (update_end_addr < cur_end_addr) { | 250 | // If we need to, create a new block before and insert it. |
| 162 | new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); | 251 | if (cur_info.m_address != cur_address) { |
| 163 | } | 252 | KMemoryBlock* new_block = allocator->Allocate(); |
| 164 | 253 | ||
| 165 | lock_func(new_node, perm); | 254 | it->Split(new_block, cur_address); |
| 255 | it = m_memory_block_tree.insert(*new_block); | ||
| 256 | it++; | ||
| 166 | 257 | ||
| 167 | MergeAdjacent(new_node, next_node); | 258 | cur_info = it->GetMemoryInfo(); |
| 259 | cur_address = cur_info.GetAddress(); | ||
| 168 | } | 260 | } |
| 169 | 261 | ||
| 170 | if (cur_end_addr - 1 >= update_end_addr - 1) { | 262 | if (cur_info.GetSize() > remaining_size) { |
| 171 | break; | 263 | // If we need to, create a new block after and insert it. |
| 264 | KMemoryBlock* new_block = allocator->Allocate(); | ||
| 265 | |||
| 266 | it->Split(new_block, cur_address + remaining_size); | ||
| 267 | it = m_memory_block_tree.insert(*new_block); | ||
| 268 | |||
| 269 | cur_info = it->GetMemoryInfo(); | ||
| 172 | } | 270 | } |
| 173 | 271 | ||
| 174 | node = next_node; | 272 | // Call the locked update function. |
| 273 | (std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address, | ||
| 274 | cur_info.GetEndAddress() == end_address); | ||
| 275 | cur_address += cur_info.GetSize(); | ||
| 276 | remaining_pages -= cur_info.GetNumPages(); | ||
| 277 | it++; | ||
| 175 | } | 278 | } |
| 176 | } | ||
| 177 | 279 | ||
| 178 | void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) { | 280 | this->CoalesceForUpdate(allocator, address, num_pages); |
| 179 | const_iterator it{FindIterator(start)}; | ||
| 180 | KMemoryInfo info{}; | ||
| 181 | do { | ||
| 182 | info = it->GetMemoryInfo(); | ||
| 183 | func(info); | ||
| 184 | it = std::next(it); | ||
| 185 | } while (info.addr + info.size - 1 < end - 1 && it != cend()); | ||
| 186 | } | 281 | } |
| 187 | 282 | ||
| 188 | void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) { | 283 | // Debug. |
| 189 | KMemoryBlock* block{&(*it)}; | 284 | bool KMemoryBlockManager::CheckState() const { |
| 190 | 285 | // Loop over every block, ensuring that we are sorted and coalesced. | |
| 191 | auto EraseIt = [&](const iterator it_to_erase) { | 286 | auto it = m_memory_block_tree.cbegin(); |
| 192 | if (next_it == it_to_erase) { | 287 | auto prev = it++; |
| 193 | next_it = std::next(next_it); | 288 | while (it != m_memory_block_tree.cend()) { |
| 289 | const KMemoryInfo prev_info = prev->GetMemoryInfo(); | ||
| 290 | const KMemoryInfo cur_info = it->GetMemoryInfo(); | ||
| 291 | |||
| 292 | // Sequential blocks which can be merged should be merged. | ||
| 293 | if (prev->CanMergeWith(*it)) { | ||
| 294 | return false; | ||
| 194 | } | 295 | } |
| 195 | memory_block_tree.erase(it_to_erase); | ||
| 196 | }; | ||
| 197 | 296 | ||
| 198 | if (it != memory_block_tree.begin()) { | 297 | // Sequential blocks should be sequential. |
| 199 | KMemoryBlock* prev{&(*std::prev(it))}; | 298 | if (prev_info.GetEndAddress() != cur_info.GetAddress()) { |
| 200 | 299 | return false; | |
| 201 | if (block->HasSameProperties(*prev)) { | 300 | } |
| 202 | const iterator prev_it{std::prev(it)}; | ||
| 203 | 301 | ||
| 204 | prev->Add(block->GetNumPages()); | 302 | // If the block is ipc locked, it must have a count. |
| 205 | EraseIt(it); | 303 | if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None && |
| 304 | cur_info.m_ipc_lock_count == 0) { | ||
| 305 | return false; | ||
| 306 | } | ||
| 206 | 307 | ||
| 207 | it = prev_it; | 308 | // If the block is device shared, it must have a count. |
| 208 | block = prev; | 309 | if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None && |
| 310 | cur_info.m_device_use_count == 0) { | ||
| 311 | return false; | ||
| 209 | } | 312 | } |
| 313 | |||
| 314 | // Advance the iterator. | ||
| 315 | prev = it++; | ||
| 210 | } | 316 | } |
| 211 | 317 | ||
| 212 | if (it != cend()) { | 318 | // Our loop will miss checking the last block, potentially, so check it. |
| 213 | const KMemoryBlock* const next{&(*std::next(it))}; | 319 | if (prev != m_memory_block_tree.cend()) { |
| 320 | const KMemoryInfo prev_info = prev->GetMemoryInfo(); | ||
| 321 | // If the block is ipc locked, it must have a count. | ||
| 322 | if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None && | ||
| 323 | prev_info.m_ipc_lock_count == 0) { | ||
| 324 | return false; | ||
| 325 | } | ||
| 214 | 326 | ||
| 215 | if (block->HasSameProperties(*next)) { | 327 | // If the block is device shared, it must have a count. |
| 216 | block->Add(next->GetNumPages()); | 328 | if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None && |
| 217 | EraseIt(std::next(it)); | 329 | prev_info.m_device_use_count == 0) { |
| 330 | return false; | ||
| 218 | } | 331 | } |
| 219 | } | 332 | } |
| 333 | |||
| 334 | return true; | ||
| 220 | } | 335 | } |
| 221 | 336 | ||
| 222 | } // namespace Kernel | 337 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h index e14741b89..b4ee4e319 100644 --- a/src/core/hle/kernel/k_memory_block_manager.h +++ b/src/core/hle/kernel/k_memory_block_manager.h | |||
| @@ -4,63 +4,154 @@ | |||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <functional> | 6 | #include <functional> |
| 7 | #include <list> | ||
| 8 | 7 | ||
| 8 | #include "common/common_funcs.h" | ||
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 10 | #include "core/hle/kernel/k_memory_block.h" | 11 | #include "core/hle/kernel/k_memory_block.h" |
| 11 | 12 | ||
| 12 | namespace Kernel { | 13 | namespace Kernel { |
| 13 | 14 | ||
| 15 | class KMemoryBlockManagerUpdateAllocator { | ||
| 16 | public: | ||
| 17 | static constexpr size_t MaxBlocks = 2; | ||
| 18 | |||
| 19 | private: | ||
| 20 | KMemoryBlock* m_blocks[MaxBlocks]; | ||
| 21 | size_t m_index; | ||
| 22 | KMemoryBlockSlabManager* m_slab_manager; | ||
| 23 | |||
| 24 | private: | ||
| 25 | Result Initialize(size_t num_blocks) { | ||
| 26 | // Check num blocks. | ||
| 27 | ASSERT(num_blocks <= MaxBlocks); | ||
| 28 | |||
| 29 | // Set index. | ||
| 30 | m_index = MaxBlocks - num_blocks; | ||
| 31 | |||
| 32 | // Allocate the blocks. | ||
| 33 | for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) { | ||
| 34 | m_blocks[m_index + i] = m_slab_manager->Allocate(); | ||
| 35 | R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource); | ||
| 36 | } | ||
| 37 | |||
| 38 | return ResultSuccess; | ||
| 39 | } | ||
| 40 | |||
| 41 | public: | ||
| 42 | KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm, | ||
| 43 | size_t num_blocks = MaxBlocks) | ||
| 44 | : m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) { | ||
| 45 | *out_result = this->Initialize(num_blocks); | ||
| 46 | } | ||
| 47 | |||
| 48 | ~KMemoryBlockManagerUpdateAllocator() { | ||
| 49 | for (const auto& block : m_blocks) { | ||
| 50 | if (block != nullptr) { | ||
| 51 | m_slab_manager->Free(block); | ||
| 52 | } | ||
| 53 | } | ||
| 54 | } | ||
| 55 | |||
| 56 | KMemoryBlock* Allocate() { | ||
| 57 | ASSERT(m_index < MaxBlocks); | ||
| 58 | ASSERT(m_blocks[m_index] != nullptr); | ||
| 59 | KMemoryBlock* block = nullptr; | ||
| 60 | std::swap(block, m_blocks[m_index++]); | ||
| 61 | return block; | ||
| 62 | } | ||
| 63 | |||
| 64 | void Free(KMemoryBlock* block) { | ||
| 65 | ASSERT(m_index <= MaxBlocks); | ||
| 66 | ASSERT(block != nullptr); | ||
| 67 | if (m_index == 0) { | ||
| 68 | m_slab_manager->Free(block); | ||
| 69 | } else { | ||
| 70 | m_blocks[--m_index] = block; | ||
| 71 | } | ||
| 72 | } | ||
| 73 | }; | ||
| 74 | |||
| 14 | class KMemoryBlockManager final { | 75 | class KMemoryBlockManager final { |
| 15 | public: | 76 | public: |
| 16 | using MemoryBlockTree = std::list<KMemoryBlock>; | 77 | using MemoryBlockTree = |
| 78 | Common::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>; | ||
| 79 | using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left, | ||
| 80 | bool right); | ||
| 17 | using iterator = MemoryBlockTree::iterator; | 81 | using iterator = MemoryBlockTree::iterator; |
| 18 | using const_iterator = MemoryBlockTree::const_iterator; | 82 | using const_iterator = MemoryBlockTree::const_iterator; |
| 19 | 83 | ||
| 20 | public: | 84 | public: |
| 21 | KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_); | 85 | KMemoryBlockManager(); |
| 86 | |||
| 87 | using HostUnmapCallback = std::function<void(VAddr, u64)>; | ||
| 88 | |||
| 89 | Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager); | ||
| 90 | void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback); | ||
| 22 | 91 | ||
| 23 | iterator end() { | 92 | iterator end() { |
| 24 | return memory_block_tree.end(); | 93 | return m_memory_block_tree.end(); |
| 25 | } | 94 | } |
| 26 | const_iterator end() const { | 95 | const_iterator end() const { |
| 27 | return memory_block_tree.end(); | 96 | return m_memory_block_tree.end(); |
| 28 | } | 97 | } |
| 29 | const_iterator cend() const { | 98 | const_iterator cend() const { |
| 30 | return memory_block_tree.cend(); | 99 | return m_memory_block_tree.cend(); |
| 31 | } | 100 | } |
| 32 | 101 | ||
| 33 | iterator FindIterator(VAddr addr); | 102 | VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, |
| 103 | size_t alignment, size_t offset, size_t guard_pages) const; | ||
| 34 | 104 | ||
| 35 | VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, | 105 | void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, |
| 36 | std::size_t align, std::size_t offset, std::size_t guard_pages); | 106 | KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, |
| 107 | KMemoryBlockDisableMergeAttribute set_disable_attr, | ||
| 108 | KMemoryBlockDisableMergeAttribute clear_disable_attr); | ||
| 109 | void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, | ||
| 110 | MemoryBlockLockFunction lock_func, KMemoryPermission perm); | ||
| 37 | 111 | ||
| 38 | void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, | 112 | void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, |
| 39 | KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state, | 113 | size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, |
| 40 | KMemoryPermission perm, KMemoryAttribute attribute); | 114 | KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, |
| 115 | KMemoryAttribute attr); | ||
| 41 | 116 | ||
| 42 | void Update(VAddr addr, std::size_t num_pages, KMemoryState state, | 117 | iterator FindIterator(VAddr address) const { |
| 43 | KMemoryPermission perm = KMemoryPermission::None, | 118 | return m_memory_block_tree.find(KMemoryBlock( |
| 44 | KMemoryAttribute attribute = KMemoryAttribute::None); | 119 | address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None)); |
| 45 | 120 | } | |
| 46 | using LockFunc = std::function<void(iterator, KMemoryPermission)>; | ||
| 47 | void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, | ||
| 48 | KMemoryPermission perm); | ||
| 49 | 121 | ||
| 50 | using IterateFunc = std::function<void(const KMemoryInfo&)>; | 122 | const KMemoryBlock* FindBlock(VAddr address) const { |
| 51 | void IterateForRange(VAddr start, VAddr end, IterateFunc&& func); | 123 | if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) { |
| 124 | return std::addressof(*it); | ||
| 125 | } | ||
| 52 | 126 | ||
| 53 | KMemoryBlock& FindBlock(VAddr addr) { | 127 | return nullptr; |
| 54 | return *FindIterator(addr); | ||
| 55 | } | 128 | } |
| 56 | 129 | ||
| 130 | // Debug. | ||
| 131 | bool CheckState() const; | ||
| 132 | |||
| 57 | private: | 133 | private: |
| 58 | void MergeAdjacent(iterator it, iterator& next_it); | 134 | void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, |
| 135 | size_t num_pages); | ||
| 59 | 136 | ||
| 60 | [[maybe_unused]] const VAddr start_addr; | 137 | MemoryBlockTree m_memory_block_tree; |
| 61 | [[maybe_unused]] const VAddr end_addr; | 138 | VAddr m_start_address{}; |
| 139 | VAddr m_end_address{}; | ||
| 140 | }; | ||
| 62 | 141 | ||
| 63 | MemoryBlockTree memory_block_tree; | 142 | class KScopedMemoryBlockManagerAuditor { |
| 143 | public: | ||
| 144 | explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) { | ||
| 145 | ASSERT(m_manager->CheckState()); | ||
| 146 | } | ||
| 147 | explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m) | ||
| 148 | : KScopedMemoryBlockManagerAuditor(std::addressof(m)) {} | ||
| 149 | ~KScopedMemoryBlockManagerAuditor() { | ||
| 150 | ASSERT(m_manager->CheckState()); | ||
| 151 | } | ||
| 152 | |||
| 153 | private: | ||
| 154 | KMemoryBlockManager* m_manager; | ||
| 64 | }; | 155 | }; |
| 65 | 156 | ||
| 66 | } // namespace Kernel | 157 | } // namespace Kernel |