diff options
| author | 2022-10-01 14:08:47 -0700 | |
|---|---|---|
| committer | 2022-10-18 19:13:35 -0700 | |
| commit | ff26190d422599ded0166cba686e7456c59163a5 (patch) | |
| tree | 5f5e7e11d67f4eb2b8a14608974a2bd86c3934f3 /src | |
| parent | video_core: renderer_vulkan: vk_query_cache: Avoid shutdown crash in QueryPoo... (diff) | |
| download | yuzu-ff26190d422599ded0166cba686e7456c59163a5.tar.gz yuzu-ff26190d422599ded0166cba686e7456c59163a5.tar.xz yuzu-ff26190d422599ded0166cba686e7456c59163a5.zip | |
core: hle: kernel: k_page_table: Impl. LockForUn/MapDeviceAddressSpace, cleanup.
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/hle/kernel/k_page_table.cpp | 881 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table.h | 285 | ||||
| -rw-r--r-- | src/core/hle/service/nvdrv/devices/nvmap.cpp | 3 |
3 files changed, 624 insertions, 545 deletions
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 2cf46af0a..fcffc0b88 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -25,7 +25,7 @@ namespace { | |||
| 25 | 25 | ||
| 26 | using namespace Common::Literals; | 26 | using namespace Common::Literals; |
| 27 | 27 | ||
| 28 | constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { | 28 | constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { |
| 29 | switch (as_type) { | 29 | switch (as_type) { |
| 30 | case FileSys::ProgramAddressSpaceType::Is32Bit: | 30 | case FileSys::ProgramAddressSpaceType::Is32Bit: |
| 31 | case FileSys::ProgramAddressSpaceType::Is32BitNoMap: | 31 | case FileSys::ProgramAddressSpaceType::Is32BitNoMap: |
| @@ -43,28 +43,29 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT | |||
| 43 | } // namespace | 43 | } // namespace |
| 44 | 44 | ||
| 45 | KPageTable::KPageTable(Core::System& system_) | 45 | KPageTable::KPageTable(Core::System& system_) |
| 46 | : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {} | 46 | : m_general_lock{system_.Kernel()}, |
| 47 | m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} | ||
| 47 | 48 | ||
| 48 | KPageTable::~KPageTable() = default; | 49 | KPageTable::~KPageTable() = default; |
| 49 | 50 | ||
| 50 | Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, | 51 | Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, |
| 51 | VAddr code_addr, std::size_t code_size, | 52 | VAddr code_addr, size_t code_size, |
| 52 | KMemoryBlockSlabManager* mem_block_slab_manager, | 53 | KMemoryBlockSlabManager* mem_block_slab_manager, |
| 53 | KMemoryManager::Pool pool) { | 54 | KMemoryManager::Pool pool) { |
| 54 | 55 | ||
| 55 | const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { | 56 | const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { |
| 56 | return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type); | 57 | return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); |
| 57 | }; | 58 | }; |
| 58 | const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { | 59 | const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { |
| 59 | return KAddressSpaceInfo::GetAddressSpaceSize(address_space_width, type); | 60 | return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); |
| 60 | }; | 61 | }; |
| 61 | 62 | ||
| 62 | // Set our width and heap/alias sizes | 63 | // Set our width and heap/alias sizes |
| 63 | address_space_width = GetAddressSpaceWidthFromType(as_type); | 64 | m_address_space_width = GetAddressSpaceWidthFromType(as_type); |
| 64 | const VAddr start = 0; | 65 | const VAddr start = 0; |
| 65 | const VAddr end{1ULL << address_space_width}; | 66 | const VAddr end{1ULL << m_address_space_width}; |
| 66 | std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; | 67 | size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; |
| 67 | std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; | 68 | size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; |
| 68 | 69 | ||
| 69 | ASSERT(code_addr < code_addr + code_size); | 70 | ASSERT(code_addr < code_addr + code_size); |
| 70 | ASSERT(code_addr + code_size - 1 <= end - 1); | 71 | ASSERT(code_addr + code_size - 1 <= end - 1); |
| @@ -76,67 +77,68 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type | |||
| 76 | } | 77 | } |
| 77 | 78 | ||
| 78 | // Set code regions and determine remaining | 79 | // Set code regions and determine remaining |
| 79 | constexpr std::size_t RegionAlignment{2_MiB}; | 80 | constexpr size_t RegionAlignment{2_MiB}; |
| 80 | VAddr process_code_start{}; | 81 | VAddr process_code_start{}; |
| 81 | VAddr process_code_end{}; | 82 | VAddr process_code_end{}; |
| 82 | std::size_t stack_region_size{}; | 83 | size_t stack_region_size{}; |
| 83 | std::size_t kernel_map_region_size{}; | 84 | size_t kernel_map_region_size{}; |
| 84 | 85 | ||
| 85 | if (address_space_width == 39) { | 86 | if (m_address_space_width == 39) { |
| 86 | alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); | 87 | alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); |
| 87 | heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); | 88 | heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); |
| 88 | stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); | 89 | stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); |
| 89 | kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); | 90 | kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); |
| 90 | code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); | 91 | m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); |
| 91 | code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); | 92 | m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); |
| 92 | alias_code_region_start = code_region_start; | 93 | m_alias_code_region_start = m_code_region_start; |
| 93 | alias_code_region_end = code_region_end; | 94 | m_alias_code_region_end = m_code_region_end; |
| 94 | process_code_start = Common::AlignDown(code_addr, RegionAlignment); | 95 | process_code_start = Common::AlignDown(code_addr, RegionAlignment); |
| 95 | process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); | 96 | process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); |
| 96 | } else { | 97 | } else { |
| 97 | stack_region_size = 0; | 98 | stack_region_size = 0; |
| 98 | kernel_map_region_size = 0; | 99 | kernel_map_region_size = 0; |
| 99 | code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); | 100 | m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); |
| 100 | code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); | 101 | m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); |
| 101 | stack_region_start = code_region_start; | 102 | m_stack_region_start = m_code_region_start; |
| 102 | alias_code_region_start = code_region_start; | 103 | m_alias_code_region_start = m_code_region_start; |
| 103 | alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + | 104 | m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + |
| 104 | GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); | 105 | GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); |
| 105 | stack_region_end = code_region_end; | 106 | m_stack_region_end = m_code_region_end; |
| 106 | kernel_map_region_start = code_region_start; | 107 | m_kernel_map_region_start = m_code_region_start; |
| 107 | kernel_map_region_end = code_region_end; | 108 | m_kernel_map_region_end = m_code_region_end; |
| 108 | process_code_start = code_region_start; | 109 | process_code_start = m_code_region_start; |
| 109 | process_code_end = code_region_end; | 110 | process_code_end = m_code_region_end; |
| 110 | } | 111 | } |
| 111 | 112 | ||
| 112 | // Set other basic fields | 113 | // Set other basic fields |
| 113 | is_aslr_enabled = enable_aslr; | 114 | m_enable_aslr = enable_aslr; |
| 114 | address_space_start = start; | 115 | m_enable_device_address_space_merge = false; |
| 115 | address_space_end = end; | 116 | m_address_space_start = start; |
| 116 | is_kernel = false; | 117 | m_address_space_end = end; |
| 117 | memory_block_slab_manager = mem_block_slab_manager; | 118 | m_is_kernel = false; |
| 119 | m_memory_block_slab_manager = mem_block_slab_manager; | ||
| 118 | 120 | ||
| 119 | // Determine the region we can place our undetermineds in | 121 | // Determine the region we can place our undetermineds in |
| 120 | VAddr alloc_start{}; | 122 | VAddr alloc_start{}; |
| 121 | std::size_t alloc_size{}; | 123 | size_t alloc_size{}; |
| 122 | if ((process_code_start - code_region_start) >= (end - process_code_end)) { | 124 | if ((process_code_start - m_code_region_start) >= (end - process_code_end)) { |
| 123 | alloc_start = code_region_start; | 125 | alloc_start = m_code_region_start; |
| 124 | alloc_size = process_code_start - code_region_start; | 126 | alloc_size = process_code_start - m_code_region_start; |
| 125 | } else { | 127 | } else { |
| 126 | alloc_start = process_code_end; | 128 | alloc_start = process_code_end; |
| 127 | alloc_size = end - process_code_end; | 129 | alloc_size = end - process_code_end; |
| 128 | } | 130 | } |
| 129 | const std::size_t needed_size{ | 131 | const size_t needed_size{ |
| 130 | (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; | 132 | (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; |
| 131 | if (alloc_size < needed_size) { | 133 | if (alloc_size < needed_size) { |
| 132 | ASSERT(false); | 134 | ASSERT(false); |
| 133 | return ResultOutOfMemory; | 135 | return ResultOutOfMemory; |
| 134 | } | 136 | } |
| 135 | 137 | ||
| 136 | const std::size_t remaining_size{alloc_size - needed_size}; | 138 | const size_t remaining_size{alloc_size - needed_size}; |
| 137 | 139 | ||
| 138 | // Determine random placements for each region | 140 | // Determine random placements for each region |
| 139 | std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; | 141 | size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; |
| 140 | if (enable_aslr) { | 142 | if (enable_aslr) { |
| 141 | alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | 143 | alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * |
| 142 | RegionAlignment; | 144 | RegionAlignment; |
| @@ -149,124 +151,124 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type | |||
| 149 | } | 151 | } |
| 150 | 152 | ||
| 151 | // Setup heap and alias regions | 153 | // Setup heap and alias regions |
| 152 | alias_region_start = alloc_start + alias_rnd; | 154 | m_alias_region_start = alloc_start + alias_rnd; |
| 153 | alias_region_end = alias_region_start + alias_region_size; | 155 | m_alias_region_end = m_alias_region_start + alias_region_size; |
| 154 | heap_region_start = alloc_start + heap_rnd; | 156 | m_heap_region_start = alloc_start + heap_rnd; |
| 155 | heap_region_end = heap_region_start + heap_region_size; | 157 | m_heap_region_end = m_heap_region_start + heap_region_size; |
| 156 | 158 | ||
| 157 | if (alias_rnd <= heap_rnd) { | 159 | if (alias_rnd <= heap_rnd) { |
| 158 | heap_region_start += alias_region_size; | 160 | m_heap_region_start += alias_region_size; |
| 159 | heap_region_end += alias_region_size; | 161 | m_heap_region_end += alias_region_size; |
| 160 | } else { | 162 | } else { |
| 161 | alias_region_start += heap_region_size; | 163 | m_alias_region_start += heap_region_size; |
| 162 | alias_region_end += heap_region_size; | 164 | m_alias_region_end += heap_region_size; |
| 163 | } | 165 | } |
| 164 | 166 | ||
| 165 | // Setup stack region | 167 | // Setup stack region |
| 166 | if (stack_region_size) { | 168 | if (stack_region_size) { |
| 167 | stack_region_start = alloc_start + stack_rnd; | 169 | m_stack_region_start = alloc_start + stack_rnd; |
| 168 | stack_region_end = stack_region_start + stack_region_size; | 170 | m_stack_region_end = m_stack_region_start + stack_region_size; |
| 169 | 171 | ||
| 170 | if (alias_rnd < stack_rnd) { | 172 | if (alias_rnd < stack_rnd) { |
| 171 | stack_region_start += alias_region_size; | 173 | m_stack_region_start += alias_region_size; |
| 172 | stack_region_end += alias_region_size; | 174 | m_stack_region_end += alias_region_size; |
| 173 | } else { | 175 | } else { |
| 174 | alias_region_start += stack_region_size; | 176 | m_alias_region_start += stack_region_size; |
| 175 | alias_region_end += stack_region_size; | 177 | m_alias_region_end += stack_region_size; |
| 176 | } | 178 | } |
| 177 | 179 | ||
| 178 | if (heap_rnd < stack_rnd) { | 180 | if (heap_rnd < stack_rnd) { |
| 179 | stack_region_start += heap_region_size; | 181 | m_stack_region_start += heap_region_size; |
| 180 | stack_region_end += heap_region_size; | 182 | m_stack_region_end += heap_region_size; |
| 181 | } else { | 183 | } else { |
| 182 | heap_region_start += stack_region_size; | 184 | m_heap_region_start += stack_region_size; |
| 183 | heap_region_end += stack_region_size; | 185 | m_heap_region_end += stack_region_size; |
| 184 | } | 186 | } |
| 185 | } | 187 | } |
| 186 | 188 | ||
| 187 | // Setup kernel map region | 189 | // Setup kernel map region |
| 188 | if (kernel_map_region_size) { | 190 | if (kernel_map_region_size) { |
| 189 | kernel_map_region_start = alloc_start + kmap_rnd; | 191 | m_kernel_map_region_start = alloc_start + kmap_rnd; |
| 190 | kernel_map_region_end = kernel_map_region_start + kernel_map_region_size; | 192 | m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size; |
| 191 | 193 | ||
| 192 | if (alias_rnd < kmap_rnd) { | 194 | if (alias_rnd < kmap_rnd) { |
| 193 | kernel_map_region_start += alias_region_size; | 195 | m_kernel_map_region_start += alias_region_size; |
| 194 | kernel_map_region_end += alias_region_size; | 196 | m_kernel_map_region_end += alias_region_size; |
| 195 | } else { | 197 | } else { |
| 196 | alias_region_start += kernel_map_region_size; | 198 | m_alias_region_start += kernel_map_region_size; |
| 197 | alias_region_end += kernel_map_region_size; | 199 | m_alias_region_end += kernel_map_region_size; |
| 198 | } | 200 | } |
| 199 | 201 | ||
| 200 | if (heap_rnd < kmap_rnd) { | 202 | if (heap_rnd < kmap_rnd) { |
| 201 | kernel_map_region_start += heap_region_size; | 203 | m_kernel_map_region_start += heap_region_size; |
| 202 | kernel_map_region_end += heap_region_size; | 204 | m_kernel_map_region_end += heap_region_size; |
| 203 | } else { | 205 | } else { |
| 204 | heap_region_start += kernel_map_region_size; | 206 | m_heap_region_start += kernel_map_region_size; |
| 205 | heap_region_end += kernel_map_region_size; | 207 | m_heap_region_end += kernel_map_region_size; |
| 206 | } | 208 | } |
| 207 | 209 | ||
| 208 | if (stack_region_size) { | 210 | if (stack_region_size) { |
| 209 | if (stack_rnd < kmap_rnd) { | 211 | if (stack_rnd < kmap_rnd) { |
| 210 | kernel_map_region_start += stack_region_size; | 212 | m_kernel_map_region_start += stack_region_size; |
| 211 | kernel_map_region_end += stack_region_size; | 213 | m_kernel_map_region_end += stack_region_size; |
| 212 | } else { | 214 | } else { |
| 213 | stack_region_start += kernel_map_region_size; | 215 | m_stack_region_start += kernel_map_region_size; |
| 214 | stack_region_end += kernel_map_region_size; | 216 | m_stack_region_end += kernel_map_region_size; |
| 215 | } | 217 | } |
| 216 | } | 218 | } |
| 217 | } | 219 | } |
| 218 | 220 | ||
| 219 | // Set heap members | 221 | // Set heap members |
| 220 | current_heap_end = heap_region_start; | 222 | m_current_heap_end = m_heap_region_start; |
| 221 | max_heap_size = 0; | 223 | m_max_heap_size = 0; |
| 222 | max_physical_memory_size = 0; | 224 | m_max_physical_memory_size = 0; |
| 223 | 225 | ||
| 224 | // Ensure that we regions inside our address space | 226 | // Ensure that we regions inside our address space |
| 225 | auto IsInAddressSpace = [&](VAddr addr) { | 227 | auto IsInAddressSpace = [&](VAddr addr) { |
| 226 | return address_space_start <= addr && addr <= address_space_end; | 228 | return m_address_space_start <= addr && addr <= m_address_space_end; |
| 227 | }; | 229 | }; |
| 228 | ASSERT(IsInAddressSpace(alias_region_start)); | 230 | ASSERT(IsInAddressSpace(m_alias_region_start)); |
| 229 | ASSERT(IsInAddressSpace(alias_region_end)); | 231 | ASSERT(IsInAddressSpace(m_alias_region_end)); |
| 230 | ASSERT(IsInAddressSpace(heap_region_start)); | 232 | ASSERT(IsInAddressSpace(m_heap_region_start)); |
| 231 | ASSERT(IsInAddressSpace(heap_region_end)); | 233 | ASSERT(IsInAddressSpace(m_heap_region_end)); |
| 232 | ASSERT(IsInAddressSpace(stack_region_start)); | 234 | ASSERT(IsInAddressSpace(m_stack_region_start)); |
| 233 | ASSERT(IsInAddressSpace(stack_region_end)); | 235 | ASSERT(IsInAddressSpace(m_stack_region_end)); |
| 234 | ASSERT(IsInAddressSpace(kernel_map_region_start)); | 236 | ASSERT(IsInAddressSpace(m_kernel_map_region_start)); |
| 235 | ASSERT(IsInAddressSpace(kernel_map_region_end)); | 237 | ASSERT(IsInAddressSpace(m_kernel_map_region_end)); |
| 236 | 238 | ||
| 237 | // Ensure that we selected regions that don't overlap | 239 | // Ensure that we selected regions that don't overlap |
| 238 | const VAddr alias_start{alias_region_start}; | 240 | const VAddr alias_start{m_alias_region_start}; |
| 239 | const VAddr alias_last{alias_region_end - 1}; | 241 | const VAddr alias_last{m_alias_region_end - 1}; |
| 240 | const VAddr heap_start{heap_region_start}; | 242 | const VAddr heap_start{m_heap_region_start}; |
| 241 | const VAddr heap_last{heap_region_end - 1}; | 243 | const VAddr heap_last{m_heap_region_end - 1}; |
| 242 | const VAddr stack_start{stack_region_start}; | 244 | const VAddr stack_start{m_stack_region_start}; |
| 243 | const VAddr stack_last{stack_region_end - 1}; | 245 | const VAddr stack_last{m_stack_region_end - 1}; |
| 244 | const VAddr kmap_start{kernel_map_region_start}; | 246 | const VAddr kmap_start{m_kernel_map_region_start}; |
| 245 | const VAddr kmap_last{kernel_map_region_end - 1}; | 247 | const VAddr kmap_last{m_kernel_map_region_end - 1}; |
| 246 | ASSERT(alias_last < heap_start || heap_last < alias_start); | 248 | ASSERT(alias_last < heap_start || heap_last < alias_start); |
| 247 | ASSERT(alias_last < stack_start || stack_last < alias_start); | 249 | ASSERT(alias_last < stack_start || stack_last < alias_start); |
| 248 | ASSERT(alias_last < kmap_start || kmap_last < alias_start); | 250 | ASSERT(alias_last < kmap_start || kmap_last < alias_start); |
| 249 | ASSERT(heap_last < stack_start || stack_last < heap_start); | 251 | ASSERT(heap_last < stack_start || stack_last < heap_start); |
| 250 | ASSERT(heap_last < kmap_start || kmap_last < heap_start); | 252 | ASSERT(heap_last < kmap_start || kmap_last < heap_start); |
| 251 | 253 | ||
| 252 | current_heap_end = heap_region_start; | 254 | m_current_heap_end = m_heap_region_start; |
| 253 | max_heap_size = 0; | 255 | m_max_heap_size = 0; |
| 254 | mapped_physical_memory_size = 0; | 256 | m_mapped_physical_memory_size = 0; |
| 255 | memory_pool = pool; | 257 | m_memory_pool = pool; |
| 256 | 258 | ||
| 257 | page_table_impl.Resize(address_space_width, PageBits); | 259 | m_page_table_impl.Resize(m_address_space_width, PageBits); |
| 258 | 260 | ||
| 259 | return memory_block_manager.Initialize(address_space_start, address_space_end, | 261 | return m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, |
| 260 | memory_block_slab_manager); | 262 | m_memory_block_slab_manager); |
| 261 | } | 263 | } |
| 262 | 264 | ||
| 263 | void KPageTable::Finalize() { | 265 | void KPageTable::Finalize() { |
| 264 | memory_block_manager.Finalize(memory_block_slab_manager, [&](VAddr addr, u64 size) { | 266 | m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) { |
| 265 | system.Memory().UnmapRegion(page_table_impl, addr, size); | 267 | m_system.Memory().UnmapRegion(m_page_table_impl, addr, size); |
| 266 | }); | 268 | }); |
| 267 | } | 269 | } |
| 268 | 270 | ||
| 269 | Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state, | 271 | Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state, |
| 270 | KMemoryPermission perm) { | 272 | KMemoryPermission perm) { |
| 271 | const u64 size{num_pages * PageSize}; | 273 | const u64 size{num_pages * PageSize}; |
| 272 | 274 | ||
| @@ -274,7 +276,7 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat | |||
| 274 | R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); | 276 | R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); |
| 275 | 277 | ||
| 276 | // Lock the table. | 278 | // Lock the table. |
| 277 | KScopedLightLock lk(general_lock); | 279 | KScopedLightLock lk(m_general_lock); |
| 278 | 280 | ||
| 279 | // Verify that the destination memory is unmapped. | 281 | // Verify that the destination memory is unmapped. |
| 280 | R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, | 282 | R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, |
| @@ -284,43 +286,43 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat | |||
| 284 | // Create an update allocator. | 286 | // Create an update allocator. |
| 285 | Result allocator_result{ResultSuccess}; | 287 | Result allocator_result{ResultSuccess}; |
| 286 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 288 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 287 | memory_block_slab_manager); | 289 | m_memory_block_slab_manager); |
| 288 | 290 | ||
| 289 | // Allocate and open. | 291 | // Allocate and open. |
| 290 | KPageGroup pg; | 292 | KPageGroup pg; |
| 291 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( | 293 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( |
| 292 | &pg, num_pages, | 294 | &pg, num_pages, |
| 293 | KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option))); | 295 | KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); |
| 294 | 296 | ||
| 295 | R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); | 297 | R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); |
| 296 | 298 | ||
| 297 | // Update the blocks. | 299 | // Update the blocks. |
| 298 | memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | 300 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, |
| 299 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | 301 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, |
| 300 | KMemoryBlockDisableMergeAttribute::None); | 302 | KMemoryBlockDisableMergeAttribute::None); |
| 301 | 303 | ||
| 302 | return ResultSuccess; | 304 | return ResultSuccess; |
| 303 | } | 305 | } |
| 304 | 306 | ||
| 305 | Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) { | 307 | Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) { |
| 306 | // Validate the mapping request. | 308 | // Validate the mapping request. |
| 307 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), | 309 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), |
| 308 | ResultInvalidMemoryRegion); | 310 | ResultInvalidMemoryRegion); |
| 309 | 311 | ||
| 310 | // Lock the table. | 312 | // Lock the table. |
| 311 | KScopedLightLock lk(general_lock); | 313 | KScopedLightLock lk(m_general_lock); |
| 312 | 314 | ||
| 313 | // Verify that the source memory is normal heap. | 315 | // Verify that the source memory is normal heap. |
| 314 | KMemoryState src_state{}; | 316 | KMemoryState src_state{}; |
| 315 | KMemoryPermission src_perm{}; | 317 | KMemoryPermission src_perm{}; |
| 316 | std::size_t num_src_allocator_blocks{}; | 318 | size_t num_src_allocator_blocks{}; |
| 317 | R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks, | 319 | R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks, |
| 318 | src_address, size, KMemoryState::All, KMemoryState::Normal, | 320 | src_address, size, KMemoryState::All, KMemoryState::Normal, |
| 319 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, | 321 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, |
| 320 | KMemoryAttribute::All, KMemoryAttribute::None)); | 322 | KMemoryAttribute::All, KMemoryAttribute::None)); |
| 321 | 323 | ||
| 322 | // Verify that the destination memory is unmapped. | 324 | // Verify that the destination memory is unmapped. |
| 323 | std::size_t num_dst_allocator_blocks{}; | 325 | size_t num_dst_allocator_blocks{}; |
| 324 | R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All, | 326 | R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All, |
| 325 | KMemoryState::Free, KMemoryPermission::None, | 327 | KMemoryState::Free, KMemoryPermission::None, |
| 326 | KMemoryPermission::None, KMemoryAttribute::None, | 328 | KMemoryPermission::None, KMemoryAttribute::None, |
| @@ -328,20 +330,22 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size | |||
| 328 | 330 | ||
| 329 | // Create an update allocator for the source. | 331 | // Create an update allocator for the source. |
| 330 | Result src_allocator_result{ResultSuccess}; | 332 | Result src_allocator_result{ResultSuccess}; |
| 331 | KMemoryBlockManagerUpdateAllocator src_allocator( | 333 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), |
| 332 | std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks); | 334 | m_memory_block_slab_manager, |
| 335 | num_src_allocator_blocks); | ||
| 333 | R_TRY(src_allocator_result); | 336 | R_TRY(src_allocator_result); |
| 334 | 337 | ||
| 335 | // Create an update allocator for the destination. | 338 | // Create an update allocator for the destination. |
| 336 | Result dst_allocator_result{ResultSuccess}; | 339 | Result dst_allocator_result{ResultSuccess}; |
| 337 | KMemoryBlockManagerUpdateAllocator dst_allocator( | 340 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), |
| 338 | std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks); | 341 | m_memory_block_slab_manager, |
| 342 | num_dst_allocator_blocks); | ||
| 339 | R_TRY(dst_allocator_result); | 343 | R_TRY(dst_allocator_result); |
| 340 | 344 | ||
| 341 | // Map the code memory. | 345 | // Map the code memory. |
| 342 | { | 346 | { |
| 343 | // Determine the number of pages being operated on. | 347 | // Determine the number of pages being operated on. |
| 344 | const std::size_t num_pages = size / PageSize; | 348 | const size_t num_pages = size / PageSize; |
| 345 | 349 | ||
| 346 | // Create page groups for the memory being mapped. | 350 | // Create page groups for the memory being mapped. |
| 347 | KPageGroup pg; | 351 | KPageGroup pg; |
| @@ -366,37 +370,37 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size | |||
| 366 | unprot_guard.Cancel(); | 370 | unprot_guard.Cancel(); |
| 367 | 371 | ||
| 368 | // Apply the memory block updates. | 372 | // Apply the memory block updates. |
| 369 | memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, | 373 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, |
| 370 | src_state, new_perm, KMemoryAttribute::Locked, | 374 | src_state, new_perm, KMemoryAttribute::Locked, |
| 371 | KMemoryBlockDisableMergeAttribute::Locked, | 375 | KMemoryBlockDisableMergeAttribute::Locked, |
| 372 | KMemoryBlockDisableMergeAttribute::None); | 376 | KMemoryBlockDisableMergeAttribute::None); |
| 373 | memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, | 377 | m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, |
| 374 | KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, | 378 | KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, |
| 375 | KMemoryBlockDisableMergeAttribute::Normal, | 379 | KMemoryBlockDisableMergeAttribute::Normal, |
| 376 | KMemoryBlockDisableMergeAttribute::None); | 380 | KMemoryBlockDisableMergeAttribute::None); |
| 377 | } | 381 | } |
| 378 | 382 | ||
| 379 | return ResultSuccess; | 383 | return ResultSuccess; |
| 380 | } | 384 | } |
| 381 | 385 | ||
| 382 | Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, | 386 | Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, |
| 383 | ICacheInvalidationStrategy icache_invalidation_strategy) { | 387 | ICacheInvalidationStrategy icache_invalidation_strategy) { |
| 384 | // Validate the mapping request. | 388 | // Validate the mapping request. |
| 385 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), | 389 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), |
| 386 | ResultInvalidMemoryRegion); | 390 | ResultInvalidMemoryRegion); |
| 387 | 391 | ||
| 388 | // Lock the table. | 392 | // Lock the table. |
| 389 | KScopedLightLock lk(general_lock); | 393 | KScopedLightLock lk(m_general_lock); |
| 390 | 394 | ||
| 391 | // Verify that the source memory is locked normal heap. | 395 | // Verify that the source memory is locked normal heap. |
| 392 | std::size_t num_src_allocator_blocks{}; | 396 | size_t num_src_allocator_blocks{}; |
| 393 | R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, | 397 | R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, |
| 394 | KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, | 398 | KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, |
| 395 | KMemoryPermission::None, KMemoryAttribute::All, | 399 | KMemoryPermission::None, KMemoryAttribute::All, |
| 396 | KMemoryAttribute::Locked)); | 400 | KMemoryAttribute::Locked)); |
| 397 | 401 | ||
| 398 | // Verify that the destination memory is aliasable code. | 402 | // Verify that the destination memory is aliasable code. |
| 399 | std::size_t num_dst_allocator_blocks{}; | 403 | size_t num_dst_allocator_blocks{}; |
| 400 | R_TRY(this->CheckMemoryStateContiguous( | 404 | R_TRY(this->CheckMemoryStateContiguous( |
| 401 | std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, | 405 | std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, |
| 402 | KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, | 406 | KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, |
| @@ -405,7 +409,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si | |||
| 405 | // Determine whether any pages being unmapped are code. | 409 | // Determine whether any pages being unmapped are code. |
| 406 | bool any_code_pages = false; | 410 | bool any_code_pages = false; |
| 407 | { | 411 | { |
| 408 | KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(dst_address); | 412 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address); |
| 409 | while (true) { | 413 | while (true) { |
| 410 | // Get the memory info. | 414 | // Get the memory info. |
| 411 | const KMemoryInfo info = it->GetMemoryInfo(); | 415 | const KMemoryInfo info = it->GetMemoryInfo(); |
| @@ -431,9 +435,9 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si | |||
| 431 | SCOPE_EXIT({ | 435 | SCOPE_EXIT({ |
| 432 | if (reprotected_pages && any_code_pages) { | 436 | if (reprotected_pages && any_code_pages) { |
| 433 | if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) { | 437 | if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) { |
| 434 | system.InvalidateCpuInstructionCacheRange(dst_address, size); | 438 | m_system.InvalidateCpuInstructionCacheRange(dst_address, size); |
| 435 | } else { | 439 | } else { |
| 436 | system.InvalidateCpuInstructionCaches(); | 440 | m_system.InvalidateCpuInstructionCaches(); |
| 437 | } | 441 | } |
| 438 | } | 442 | } |
| 439 | }); | 443 | }); |
| @@ -441,19 +445,19 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si | |||
| 441 | // Unmap. | 445 | // Unmap. |
| 442 | { | 446 | { |
| 443 | // Determine the number of pages being operated on. | 447 | // Determine the number of pages being operated on. |
| 444 | const std::size_t num_pages = size / PageSize; | 448 | const size_t num_pages = size / PageSize; |
| 445 | 449 | ||
| 446 | // Create an update allocator for the source. | 450 | // Create an update allocator for the source. |
| 447 | Result src_allocator_result{ResultSuccess}; | 451 | Result src_allocator_result{ResultSuccess}; |
| 448 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | 452 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), |
| 449 | memory_block_slab_manager, | 453 | m_memory_block_slab_manager, |
| 450 | num_src_allocator_blocks); | 454 | num_src_allocator_blocks); |
| 451 | R_TRY(src_allocator_result); | 455 | R_TRY(src_allocator_result); |
| 452 | 456 | ||
| 453 | // Create an update allocator for the destination. | 457 | // Create an update allocator for the destination. |
| 454 | Result dst_allocator_result{ResultSuccess}; | 458 | Result dst_allocator_result{ResultSuccess}; |
| 455 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | 459 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), |
| 456 | memory_block_slab_manager, | 460 | m_memory_block_slab_manager, |
| 457 | num_dst_allocator_blocks); | 461 | num_dst_allocator_blocks); |
| 458 | R_TRY(dst_allocator_result); | 462 | R_TRY(dst_allocator_result); |
| 459 | 463 | ||
| @@ -465,14 +469,14 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si | |||
| 465 | OperationType::ChangePermissions)); | 469 | OperationType::ChangePermissions)); |
| 466 | 470 | ||
| 467 | // Apply the memory block updates. | 471 | // Apply the memory block updates. |
| 468 | memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, | 472 | m_memory_block_manager.Update( |
| 469 | KMemoryState::None, KMemoryPermission::None, | 473 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, |
| 470 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | 474 | KMemoryPermission::None, KMemoryAttribute::None, |
| 471 | KMemoryBlockDisableMergeAttribute::Normal); | 475 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); |
| 472 | memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, | 476 | m_memory_block_manager.Update( |
| 473 | KMemoryState::Normal, KMemoryPermission::UserReadWrite, | 477 | std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal, |
| 474 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | 478 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, |
| 475 | KMemoryBlockDisableMergeAttribute::Locked); | 479 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); |
| 476 | 480 | ||
| 477 | // Note that we reprotected pages. | 481 | // Note that we reprotected pages. |
| 478 | reprotected_pages = true; | 482 | reprotected_pages = true; |
| @@ -481,9 +485,8 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si | |||
| 481 | return ResultSuccess; | 485 | return ResultSuccess; |
| 482 | } | 486 | } |
| 483 | 487 | ||
| 484 | VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, | 488 | VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, |
| 485 | std::size_t num_pages, std::size_t alignment, std::size_t offset, | 489 | size_t alignment, size_t offset, size_t guard_pages) { |
| 486 | std::size_t guard_pages) { | ||
| 487 | VAddr address = 0; | 490 | VAddr address = 0; |
| 488 | 491 | ||
| 489 | if (num_pages <= region_num_pages) { | 492 | if (num_pages <= region_num_pages) { |
| @@ -492,8 +495,8 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, | |||
| 492 | } | 495 | } |
| 493 | // Find the first free area. | 496 | // Find the first free area. |
| 494 | if (address == 0) { | 497 | if (address == 0) { |
| 495 | address = memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, | 498 | address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, |
| 496 | alignment, offset, guard_pages); | 499 | alignment, offset, guard_pages); |
| 497 | } | 500 | } |
| 498 | } | 501 | } |
| 499 | 502 | ||
| @@ -511,7 +514,8 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { | |||
| 511 | // Begin traversal. | 514 | // Begin traversal. |
| 512 | Common::PageTable::TraversalContext context; | 515 | Common::PageTable::TraversalContext context; |
| 513 | Common::PageTable::TraversalEntry next_entry; | 516 | Common::PageTable::TraversalEntry next_entry; |
| 514 | R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory); | 517 | R_UNLESS(m_page_table_impl.BeginTraversal(next_entry, context, addr), |
| 518 | ResultInvalidCurrentMemory); | ||
| 515 | 519 | ||
| 516 | // Prepare tracking variables. | 520 | // Prepare tracking variables. |
| 517 | PAddr cur_addr = next_entry.phys_addr; | 521 | PAddr cur_addr = next_entry.phys_addr; |
| @@ -519,9 +523,9 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { | |||
| 519 | size_t tot_size = cur_size; | 523 | size_t tot_size = cur_size; |
| 520 | 524 | ||
| 521 | // Iterate, adding to group as we go. | 525 | // Iterate, adding to group as we go. |
| 522 | const auto& memory_layout = system.Kernel().MemoryLayout(); | 526 | const auto& memory_layout = m_system.Kernel().MemoryLayout(); |
| 523 | while (tot_size < size) { | 527 | while (tot_size < size) { |
| 524 | R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context), | 528 | R_UNLESS(m_page_table_impl.ContinueTraversal(next_entry, context), |
| 525 | ResultInvalidCurrentMemory); | 529 | ResultInvalidCurrentMemory); |
| 526 | 530 | ||
| 527 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | 531 | if (next_entry.phys_addr != (cur_addr + cur_size)) { |
| @@ -557,7 +561,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu | |||
| 557 | 561 | ||
| 558 | const size_t size = num_pages * PageSize; | 562 | const size_t size = num_pages * PageSize; |
| 559 | const auto& pg = pg_ll.Nodes(); | 563 | const auto& pg = pg_ll.Nodes(); |
| 560 | const auto& memory_layout = system.Kernel().MemoryLayout(); | 564 | const auto& memory_layout = m_system.Kernel().MemoryLayout(); |
| 561 | 565 | ||
| 562 | // Empty groups are necessarily invalid. | 566 | // Empty groups are necessarily invalid. |
| 563 | if (pg.empty()) { | 567 | if (pg.empty()) { |
| @@ -584,7 +588,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu | |||
| 584 | // Begin traversal. | 588 | // Begin traversal. |
| 585 | Common::PageTable::TraversalContext context; | 589 | Common::PageTable::TraversalContext context; |
| 586 | Common::PageTable::TraversalEntry next_entry; | 590 | Common::PageTable::TraversalEntry next_entry; |
| 587 | if (!page_table_impl.BeginTraversal(next_entry, context, addr)) { | 591 | if (!m_page_table_impl.BeginTraversal(next_entry, context, addr)) { |
| 588 | return false; | 592 | return false; |
| 589 | } | 593 | } |
| 590 | 594 | ||
| @@ -595,7 +599,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu | |||
| 595 | 599 | ||
| 596 | // Iterate, comparing expected to actual. | 600 | // Iterate, comparing expected to actual. |
| 597 | while (tot_size < size) { | 601 | while (tot_size < size) { |
| 598 | if (!page_table_impl.ContinueTraversal(next_entry, context)) { | 602 | if (!m_page_table_impl.ContinueTraversal(next_entry, context)) { |
| 599 | return false; | 603 | return false; |
| 600 | } | 604 | } |
| 601 | 605 | ||
| @@ -641,11 +645,11 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu | |||
| 641 | return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); | 645 | return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); |
| 642 | } | 646 | } |
| 643 | 647 | ||
| 644 | Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, | 648 | Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, |
| 645 | VAddr src_addr) { | 649 | VAddr src_addr) { |
| 646 | KScopedLightLock lk(general_lock); | 650 | KScopedLightLock lk(m_general_lock); |
| 647 | 651 | ||
| 648 | const std::size_t num_pages{size / PageSize}; | 652 | const size_t num_pages{size / PageSize}; |
| 649 | 653 | ||
| 650 | // Check that the memory is mapped in the destination process. | 654 | // Check that the memory is mapped in the destination process. |
| 651 | size_t num_allocator_blocks; | 655 | size_t num_allocator_blocks; |
| @@ -663,48 +667,48 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTab | |||
| 663 | // Create an update allocator. | 667 | // Create an update allocator. |
| 664 | Result allocator_result{ResultSuccess}; | 668 | Result allocator_result{ResultSuccess}; |
| 665 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 669 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 666 | memory_block_slab_manager, num_allocator_blocks); | 670 | m_memory_block_slab_manager, num_allocator_blocks); |
| 667 | R_TRY(allocator_result); | 671 | R_TRY(allocator_result); |
| 668 | 672 | ||
| 669 | CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); | 673 | CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); |
| 670 | 674 | ||
| 671 | // Apply the memory block update. | 675 | // Apply the memory block update. |
| 672 | memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages, KMemoryState::Free, | 676 | m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages, |
| 673 | KMemoryPermission::None, KMemoryAttribute::None, | 677 | KMemoryState::Free, KMemoryPermission::None, |
| 674 | KMemoryBlockDisableMergeAttribute::None, | 678 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, |
| 675 | KMemoryBlockDisableMergeAttribute::Normal); | 679 | KMemoryBlockDisableMergeAttribute::Normal); |
| 676 | 680 | ||
| 677 | system.InvalidateCpuInstructionCaches(); | 681 | m_system.InvalidateCpuInstructionCaches(); |
| 678 | 682 | ||
| 679 | return ResultSuccess; | 683 | return ResultSuccess; |
| 680 | } | 684 | } |
| 681 | 685 | ||
| 682 | Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | 686 | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { |
| 683 | // Lock the physical memory lock. | 687 | // Lock the physical memory lock. |
| 684 | KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); | 688 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); |
| 685 | 689 | ||
| 686 | // Calculate the last address for convenience. | 690 | // Calculate the last address for convenience. |
| 687 | const VAddr last_address = address + size - 1; | 691 | const VAddr last_address = address + size - 1; |
| 688 | 692 | ||
| 689 | // Define iteration variables. | 693 | // Define iteration variables. |
| 690 | VAddr cur_address; | 694 | VAddr cur_address; |
| 691 | std::size_t mapped_size; | 695 | size_t mapped_size; |
| 692 | 696 | ||
| 693 | // The entire mapping process can be retried. | 697 | // The entire mapping process can be retried. |
| 694 | while (true) { | 698 | while (true) { |
| 695 | // Check if the memory is already mapped. | 699 | // Check if the memory is already mapped. |
| 696 | { | 700 | { |
| 697 | // Lock the table. | 701 | // Lock the table. |
| 698 | KScopedLightLock lk(general_lock); | 702 | KScopedLightLock lk(m_general_lock); |
| 699 | 703 | ||
| 700 | // Iterate over the memory. | 704 | // Iterate over the memory. |
| 701 | cur_address = address; | 705 | cur_address = address; |
| 702 | mapped_size = 0; | 706 | mapped_size = 0; |
| 703 | 707 | ||
| 704 | auto it = memory_block_manager.FindIterator(cur_address); | 708 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 705 | while (true) { | 709 | while (true) { |
| 706 | // Check that the iterator is valid. | 710 | // Check that the iterator is valid. |
| 707 | ASSERT(it != memory_block_manager.end()); | 711 | ASSERT(it != m_memory_block_manager.end()); |
| 708 | 712 | ||
| 709 | // Get the memory info. | 713 | // Get the memory info. |
| 710 | const KMemoryInfo info = it->GetMemoryInfo(); | 714 | const KMemoryInfo info = it->GetMemoryInfo(); |
| @@ -735,20 +739,20 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 735 | { | 739 | { |
| 736 | // Reserve the memory from the process resource limit. | 740 | // Reserve the memory from the process resource limit. |
| 737 | KScopedResourceReservation memory_reservation( | 741 | KScopedResourceReservation memory_reservation( |
| 738 | system.Kernel().CurrentProcess()->GetResourceLimit(), | 742 | m_system.Kernel().CurrentProcess()->GetResourceLimit(), |
| 739 | LimitableResource::PhysicalMemory, size - mapped_size); | 743 | LimitableResource::PhysicalMemory, size - mapped_size); |
| 740 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 744 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 741 | 745 | ||
| 742 | // Allocate pages for the new memory. | 746 | // Allocate pages for the new memory. |
| 743 | KPageGroup pg; | 747 | KPageGroup pg; |
| 744 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( | 748 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( |
| 745 | &pg, (size - mapped_size) / PageSize, | 749 | &pg, (size - mapped_size) / PageSize, |
| 746 | KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); | 750 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); |
| 747 | 751 | ||
| 748 | // Map the memory. | 752 | // Map the memory. |
| 749 | { | 753 | { |
| 750 | // Lock the table. | 754 | // Lock the table. |
| 751 | KScopedLightLock lk(general_lock); | 755 | KScopedLightLock lk(m_general_lock); |
| 752 | 756 | ||
| 753 | size_t num_allocator_blocks = 0; | 757 | size_t num_allocator_blocks = 0; |
| 754 | 758 | ||
| @@ -758,10 +762,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 758 | size_t checked_mapped_size = 0; | 762 | size_t checked_mapped_size = 0; |
| 759 | cur_address = address; | 763 | cur_address = address; |
| 760 | 764 | ||
| 761 | auto it = memory_block_manager.FindIterator(cur_address); | 765 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 762 | while (true) { | 766 | while (true) { |
| 763 | // Check that the iterator is valid. | 767 | // Check that the iterator is valid. |
| 764 | ASSERT(it != memory_block_manager.end()); | 768 | ASSERT(it != m_memory_block_manager.end()); |
| 765 | 769 | ||
| 766 | // Get the memory info. | 770 | // Get the memory info. |
| 767 | const KMemoryInfo info = it->GetMemoryInfo(); | 771 | const KMemoryInfo info = it->GetMemoryInfo(); |
| @@ -805,7 +809,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 805 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | 809 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); |
| 806 | Result allocator_result{ResultSuccess}; | 810 | Result allocator_result{ResultSuccess}; |
| 807 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 811 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 808 | memory_block_slab_manager, | 812 | m_memory_block_slab_manager, |
| 809 | num_allocator_blocks); | 813 | num_allocator_blocks); |
| 810 | R_TRY(allocator_result); | 814 | R_TRY(allocator_result); |
| 811 | 815 | ||
| @@ -818,10 +822,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 818 | // Iterate, unmapping the pages. | 822 | // Iterate, unmapping the pages. |
| 819 | cur_address = address; | 823 | cur_address = address; |
| 820 | 824 | ||
| 821 | auto it = memory_block_manager.FindIterator(cur_address); | 825 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 822 | while (true) { | 826 | while (true) { |
| 823 | // Check that the iterator is valid. | 827 | // Check that the iterator is valid. |
| 824 | ASSERT(it != memory_block_manager.end()); | 828 | ASSERT(it != m_memory_block_manager.end()); |
| 825 | 829 | ||
| 826 | // Get the memory info. | 830 | // Get the memory info. |
| 827 | const KMemoryInfo info = it->GetMemoryInfo(); | 831 | const KMemoryInfo info = it->GetMemoryInfo(); |
| @@ -857,10 +861,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 857 | PAddr pg_phys_addr = pg_it->GetAddress(); | 861 | PAddr pg_phys_addr = pg_it->GetAddress(); |
| 858 | size_t pg_pages = pg_it->GetNumPages(); | 862 | size_t pg_pages = pg_it->GetNumPages(); |
| 859 | 863 | ||
| 860 | auto it = memory_block_manager.FindIterator(cur_address); | 864 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 861 | while (true) { | 865 | while (true) { |
| 862 | // Check that the iterator is valid. | 866 | // Check that the iterator is valid. |
| 863 | ASSERT(it != memory_block_manager.end()); | 867 | ASSERT(it != m_memory_block_manager.end()); |
| 864 | 868 | ||
| 865 | // Get the memory info. | 869 | // Get the memory info. |
| 866 | const KMemoryInfo info = it->GetMemoryInfo(); | 870 | const KMemoryInfo info = it->GetMemoryInfo(); |
| @@ -913,10 +917,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 913 | memory_reservation.Commit(); | 917 | memory_reservation.Commit(); |
| 914 | 918 | ||
| 915 | // Increase our tracked mapped size. | 919 | // Increase our tracked mapped size. |
| 916 | mapped_physical_memory_size += (size - mapped_size); | 920 | m_mapped_physical_memory_size += (size - mapped_size); |
| 917 | 921 | ||
| 918 | // Update the relevant memory blocks. | 922 | // Update the relevant memory blocks. |
| 919 | memory_block_manager.UpdateIfMatch( | 923 | m_memory_block_manager.UpdateIfMatch( |
| 920 | std::addressof(allocator), address, size / PageSize, KMemoryState::Free, | 924 | std::addressof(allocator), address, size / PageSize, KMemoryState::Free, |
| 921 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, | 925 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, |
| 922 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); | 926 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); |
| @@ -930,20 +934,20 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 930 | } | 934 | } |
| 931 | } | 935 | } |
| 932 | 936 | ||
| 933 | Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | 937 | Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { |
| 934 | // Lock the physical memory lock. | 938 | // Lock the physical memory lock. |
| 935 | KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); | 939 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); |
| 936 | 940 | ||
| 937 | // Lock the table. | 941 | // Lock the table. |
| 938 | KScopedLightLock lk(general_lock); | 942 | KScopedLightLock lk(m_general_lock); |
| 939 | 943 | ||
| 940 | // Calculate the last address for convenience. | 944 | // Calculate the last address for convenience. |
| 941 | const VAddr last_address = address + size - 1; | 945 | const VAddr last_address = address + size - 1; |
| 942 | 946 | ||
| 943 | // Define iteration variables. | 947 | // Define iteration variables. |
| 944 | VAddr cur_address = 0; | 948 | VAddr cur_address = 0; |
| 945 | std::size_t mapped_size = 0; | 949 | size_t mapped_size = 0; |
| 946 | std::size_t num_allocator_blocks = 0; | 950 | size_t num_allocator_blocks = 0; |
| 947 | 951 | ||
| 948 | // Check if the memory is mapped. | 952 | // Check if the memory is mapped. |
| 949 | { | 953 | { |
| @@ -951,10 +955,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 951 | cur_address = address; | 955 | cur_address = address; |
| 952 | mapped_size = 0; | 956 | mapped_size = 0; |
| 953 | 957 | ||
| 954 | auto it = memory_block_manager.FindIterator(cur_address); | 958 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 955 | while (true) { | 959 | while (true) { |
| 956 | // Check that the iterator is valid. | 960 | // Check that the iterator is valid. |
| 957 | ASSERT(it != memory_block_manager.end()); | 961 | ASSERT(it != m_memory_block_manager.end()); |
| 958 | 962 | ||
| 959 | // Get the memory info. | 963 | // Get the memory info. |
| 960 | const KMemoryInfo info = it->GetMemoryInfo(); | 964 | const KMemoryInfo info = it->GetMemoryInfo(); |
| @@ -1053,7 +1057,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 1053 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | 1057 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); |
| 1054 | Result allocator_result{ResultSuccess}; | 1058 | Result allocator_result{ResultSuccess}; |
| 1055 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1059 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1056 | memory_block_slab_manager, num_allocator_blocks); | 1060 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1057 | R_TRY(allocator_result); | 1061 | R_TRY(allocator_result); |
| 1058 | 1062 | ||
| 1059 | // Reset the current tracking address, and make sure we clean up on failure. | 1063 | // Reset the current tracking address, and make sure we clean up on failure. |
| @@ -1064,7 +1068,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 1064 | cur_address = address; | 1068 | cur_address = address; |
| 1065 | 1069 | ||
| 1066 | // Iterate over the memory we unmapped. | 1070 | // Iterate over the memory we unmapped. |
| 1067 | auto it = memory_block_manager.FindIterator(cur_address); | 1071 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 1068 | auto pg_it = pg.Nodes().begin(); | 1072 | auto pg_it = pg.Nodes().begin(); |
| 1069 | PAddr pg_phys_addr = pg_it->GetAddress(); | 1073 | PAddr pg_phys_addr = pg_it->GetAddress(); |
| 1070 | size_t pg_pages = pg_it->GetNumPages(); | 1074 | size_t pg_pages = pg_it->GetNumPages(); |
| @@ -1119,10 +1123,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 1119 | }); | 1123 | }); |
| 1120 | 1124 | ||
| 1121 | // Iterate over the memory, unmapping as we go. | 1125 | // Iterate over the memory, unmapping as we go. |
| 1122 | auto it = memory_block_manager.FindIterator(cur_address); | 1126 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 1123 | while (true) { | 1127 | while (true) { |
| 1124 | // Check that the iterator is valid. | 1128 | // Check that the iterator is valid. |
| 1125 | ASSERT(it != memory_block_manager.end()); | 1129 | ASSERT(it != m_memory_block_manager.end()); |
| 1126 | 1130 | ||
| 1127 | // Get the memory info. | 1131 | // Get the memory info. |
| 1128 | const KMemoryInfo info = it->GetMemoryInfo(); | 1132 | const KMemoryInfo info = it->GetMemoryInfo(); |
| @@ -1149,20 +1153,20 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 1149 | } | 1153 | } |
| 1150 | 1154 | ||
| 1151 | // Release the memory resource. | 1155 | // Release the memory resource. |
| 1152 | mapped_physical_memory_size -= mapped_size; | 1156 | m_mapped_physical_memory_size -= mapped_size; |
| 1153 | auto process{system.Kernel().CurrentProcess()}; | 1157 | auto process{m_system.Kernel().CurrentProcess()}; |
| 1154 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); | 1158 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); |
| 1155 | 1159 | ||
| 1156 | // Update memory blocks. | 1160 | // Update memory blocks. |
| 1157 | memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, | 1161 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, |
| 1158 | KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None, | 1162 | KMemoryState::Free, KMemoryPermission::None, |
| 1159 | KMemoryBlockDisableMergeAttribute::None, | 1163 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, |
| 1160 | KMemoryBlockDisableMergeAttribute::None); | 1164 | KMemoryBlockDisableMergeAttribute::None); |
| 1161 | 1165 | ||
| 1162 | // TODO(bunnei): This is a workaround until the next set of changes, where we add reference | 1166 | // TODO(bunnei): This is a workaround until the next set of changes, where we add reference |
| 1163 | // counting for mapped pages. Until then, we must manually close the reference to the page | 1167 | // counting for mapped pages. Until then, we must manually close the reference to the page |
| 1164 | // group. | 1168 | // group. |
| 1165 | system.Kernel().MemoryManager().Close(pg); | 1169 | m_system.Kernel().MemoryManager().Close(pg); |
| 1166 | 1170 | ||
| 1167 | // We succeeded. | 1171 | // We succeeded. |
| 1168 | remap_guard.Cancel(); | 1172 | remap_guard.Cancel(); |
| @@ -1170,9 +1174,9 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 1170 | return ResultSuccess; | 1174 | return ResultSuccess; |
| 1171 | } | 1175 | } |
| 1172 | 1176 | ||
| 1173 | Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, std::size_t size) { | 1177 | Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) { |
| 1174 | // Lock the table. | 1178 | // Lock the table. |
| 1175 | KScopedLightLock lk(general_lock); | 1179 | KScopedLightLock lk(m_general_lock); |
| 1176 | 1180 | ||
| 1177 | // Validate that the source address's state is valid. | 1181 | // Validate that the source address's state is valid. |
| 1178 | KMemoryState src_state; | 1182 | KMemoryState src_state; |
| @@ -1192,19 +1196,21 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, std::size_t s | |||
| 1192 | 1196 | ||
| 1193 | // Create an update allocator for the source. | 1197 | // Create an update allocator for the source. |
| 1194 | Result src_allocator_result{ResultSuccess}; | 1198 | Result src_allocator_result{ResultSuccess}; |
| 1195 | KMemoryBlockManagerUpdateAllocator src_allocator( | 1199 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), |
| 1196 | std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks); | 1200 | m_memory_block_slab_manager, |
| 1201 | num_src_allocator_blocks); | ||
| 1197 | R_TRY(src_allocator_result); | 1202 | R_TRY(src_allocator_result); |
| 1198 | 1203 | ||
| 1199 | // Create an update allocator for the destination. | 1204 | // Create an update allocator for the destination. |
| 1200 | Result dst_allocator_result{ResultSuccess}; | 1205 | Result dst_allocator_result{ResultSuccess}; |
| 1201 | KMemoryBlockManagerUpdateAllocator dst_allocator( | 1206 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), |
| 1202 | std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks); | 1207 | m_memory_block_slab_manager, |
| 1208 | num_dst_allocator_blocks); | ||
| 1203 | R_TRY(dst_allocator_result); | 1209 | R_TRY(dst_allocator_result); |
| 1204 | 1210 | ||
| 1205 | // Map the memory. | 1211 | // Map the memory. |
| 1206 | KPageGroup page_linked_list; | 1212 | KPageGroup page_linked_list; |
| 1207 | const std::size_t num_pages{size / PageSize}; | 1213 | const size_t num_pages{size / PageSize}; |
| 1208 | const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( | 1214 | const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( |
| 1209 | KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); | 1215 | KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); |
| 1210 | const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; | 1216 | const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; |
| @@ -1223,21 +1229,21 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, std::size_t s | |||
| 1223 | } | 1229 | } |
| 1224 | 1230 | ||
| 1225 | // Apply the memory block updates. | 1231 | // Apply the memory block updates. |
| 1226 | memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, | 1232 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, |
| 1227 | new_src_perm, new_src_attr, | 1233 | new_src_perm, new_src_attr, |
| 1228 | KMemoryBlockDisableMergeAttribute::Locked, | 1234 | KMemoryBlockDisableMergeAttribute::Locked, |
| 1229 | KMemoryBlockDisableMergeAttribute::None); | 1235 | KMemoryBlockDisableMergeAttribute::None); |
| 1230 | memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, | 1236 | m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, |
| 1231 | KMemoryState::Stack, KMemoryPermission::UserReadWrite, | 1237 | KMemoryState::Stack, KMemoryPermission::UserReadWrite, |
| 1232 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | 1238 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, |
| 1233 | KMemoryBlockDisableMergeAttribute::None); | 1239 | KMemoryBlockDisableMergeAttribute::None); |
| 1234 | 1240 | ||
| 1235 | return ResultSuccess; | 1241 | return ResultSuccess; |
| 1236 | } | 1242 | } |
| 1237 | 1243 | ||
| 1238 | Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, std::size_t size) { | 1244 | Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) { |
| 1239 | // Lock the table. | 1245 | // Lock the table. |
| 1240 | KScopedLightLock lk(general_lock); | 1246 | KScopedLightLock lk(m_general_lock); |
| 1241 | 1247 | ||
| 1242 | // Validate that the source address's state is valid. | 1248 | // Validate that the source address's state is valid. |
| 1243 | KMemoryState src_state; | 1249 | KMemoryState src_state; |
| @@ -1258,19 +1264,21 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, std::size_t | |||
| 1258 | 1264 | ||
| 1259 | // Create an update allocator for the source. | 1265 | // Create an update allocator for the source. |
| 1260 | Result src_allocator_result{ResultSuccess}; | 1266 | Result src_allocator_result{ResultSuccess}; |
| 1261 | KMemoryBlockManagerUpdateAllocator src_allocator( | 1267 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), |
| 1262 | std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks); | 1268 | m_memory_block_slab_manager, |
| 1269 | num_src_allocator_blocks); | ||
| 1263 | R_TRY(src_allocator_result); | 1270 | R_TRY(src_allocator_result); |
| 1264 | 1271 | ||
| 1265 | // Create an update allocator for the destination. | 1272 | // Create an update allocator for the destination. |
| 1266 | Result dst_allocator_result{ResultSuccess}; | 1273 | Result dst_allocator_result{ResultSuccess}; |
| 1267 | KMemoryBlockManagerUpdateAllocator dst_allocator( | 1274 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), |
| 1268 | std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks); | 1275 | m_memory_block_slab_manager, |
| 1276 | num_dst_allocator_blocks); | ||
| 1269 | R_TRY(dst_allocator_result); | 1277 | R_TRY(dst_allocator_result); |
| 1270 | 1278 | ||
| 1271 | KPageGroup src_pages; | 1279 | KPageGroup src_pages; |
| 1272 | KPageGroup dst_pages; | 1280 | KPageGroup dst_pages; |
| 1273 | const std::size_t num_pages{size / PageSize}; | 1281 | const size_t num_pages{size / PageSize}; |
| 1274 | 1282 | ||
| 1275 | AddRegionToPages(src_address, num_pages, src_pages); | 1283 | AddRegionToPages(src_address, num_pages, src_pages); |
| 1276 | AddRegionToPages(dst_address, num_pages, dst_pages); | 1284 | AddRegionToPages(dst_address, num_pages, dst_pages); |
| @@ -1290,14 +1298,14 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, std::size_t | |||
| 1290 | } | 1298 | } |
| 1291 | 1299 | ||
| 1292 | // Apply the memory block updates. | 1300 | // Apply the memory block updates. |
| 1293 | memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, | 1301 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, |
| 1294 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | 1302 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, |
| 1295 | KMemoryBlockDisableMergeAttribute::None, | 1303 | KMemoryBlockDisableMergeAttribute::None, |
| 1296 | KMemoryBlockDisableMergeAttribute::Locked); | 1304 | KMemoryBlockDisableMergeAttribute::Locked); |
| 1297 | memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, | 1305 | m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, |
| 1298 | KMemoryState::None, KMemoryPermission::None, KMemoryAttribute::None, | 1306 | KMemoryState::None, KMemoryPermission::None, |
| 1299 | KMemoryBlockDisableMergeAttribute::None, | 1307 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, |
| 1300 | KMemoryBlockDisableMergeAttribute::Normal); | 1308 | KMemoryBlockDisableMergeAttribute::Normal); |
| 1301 | 1309 | ||
| 1302 | return ResultSuccess; | 1310 | return ResultSuccess; |
| 1303 | } | 1311 | } |
| @@ -1312,7 +1320,7 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, | |||
| 1312 | if (const auto result{ | 1320 | if (const auto result{ |
| 1313 | Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; | 1321 | Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; |
| 1314 | result.IsError()) { | 1322 | result.IsError()) { |
| 1315 | const std::size_t num_pages{(addr - cur_addr) / PageSize}; | 1323 | const size_t num_pages{(addr - cur_addr) / PageSize}; |
| 1316 | 1324 | ||
| 1317 | ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) | 1325 | ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) |
| 1318 | .IsSuccess()); | 1326 | .IsSuccess()); |
| @@ -1329,12 +1337,12 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, | |||
| 1329 | Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, | 1337 | Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, |
| 1330 | KMemoryPermission perm) { | 1338 | KMemoryPermission perm) { |
| 1331 | // Check that the map is in range. | 1339 | // Check that the map is in range. |
| 1332 | const std::size_t num_pages{page_linked_list.GetNumPages()}; | 1340 | const size_t num_pages{page_linked_list.GetNumPages()}; |
| 1333 | const std::size_t size{num_pages * PageSize}; | 1341 | const size_t size{num_pages * PageSize}; |
| 1334 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | 1342 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); |
| 1335 | 1343 | ||
| 1336 | // Lock the table. | 1344 | // Lock the table. |
| 1337 | KScopedLightLock lk(general_lock); | 1345 | KScopedLightLock lk(m_general_lock); |
| 1338 | 1346 | ||
| 1339 | // Check the memory state. | 1347 | // Check the memory state. |
| 1340 | R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, | 1348 | R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, |
| @@ -1344,23 +1352,22 @@ Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemory | |||
| 1344 | // Create an update allocator. | 1352 | // Create an update allocator. |
| 1345 | Result allocator_result{ResultSuccess}; | 1353 | Result allocator_result{ResultSuccess}; |
| 1346 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1354 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1347 | memory_block_slab_manager); | 1355 | m_memory_block_slab_manager); |
| 1348 | 1356 | ||
| 1349 | // Map the pages. | 1357 | // Map the pages. |
| 1350 | R_TRY(MapPages(address, page_linked_list, perm)); | 1358 | R_TRY(MapPages(address, page_linked_list, perm)); |
| 1351 | 1359 | ||
| 1352 | // Update the blocks. | 1360 | // Update the blocks. |
| 1353 | memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, | 1361 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, |
| 1354 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | 1362 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, |
| 1355 | KMemoryBlockDisableMergeAttribute::None); | 1363 | KMemoryBlockDisableMergeAttribute::None); |
| 1356 | 1364 | ||
| 1357 | return ResultSuccess; | 1365 | return ResultSuccess; |
| 1358 | } | 1366 | } |
| 1359 | 1367 | ||
| 1360 | Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, | 1368 | Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, |
| 1361 | PAddr phys_addr, bool is_pa_valid, VAddr region_start, | 1369 | bool is_pa_valid, VAddr region_start, size_t region_num_pages, |
| 1362 | std::size_t region_num_pages, KMemoryState state, | 1370 | KMemoryState state, KMemoryPermission perm) { |
| 1363 | KMemoryPermission perm) { | ||
| 1364 | ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); | 1371 | ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); |
| 1365 | 1372 | ||
| 1366 | // Ensure this is a valid map request. | 1373 | // Ensure this is a valid map request. |
| @@ -1369,7 +1376,7 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t | |||
| 1369 | R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); | 1376 | R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); |
| 1370 | 1377 | ||
| 1371 | // Lock the table. | 1378 | // Lock the table. |
| 1372 | KScopedLightLock lk(general_lock); | 1379 | KScopedLightLock lk(m_general_lock); |
| 1373 | 1380 | ||
| 1374 | // Find a random address to map at. | 1381 | // Find a random address to map at. |
| 1375 | VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, | 1382 | VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, |
| @@ -1385,7 +1392,7 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t | |||
| 1385 | // Create an update allocator. | 1392 | // Create an update allocator. |
| 1386 | Result allocator_result{ResultSuccess}; | 1393 | Result allocator_result{ResultSuccess}; |
| 1387 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1394 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1388 | memory_block_slab_manager); | 1395 | m_memory_block_slab_manager); |
| 1389 | 1396 | ||
| 1390 | // Perform mapping operation. | 1397 | // Perform mapping operation. |
| 1391 | if (is_pa_valid) { | 1398 | if (is_pa_valid) { |
| @@ -1395,9 +1402,9 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t | |||
| 1395 | } | 1402 | } |
| 1396 | 1403 | ||
| 1397 | // Update the blocks. | 1404 | // Update the blocks. |
| 1398 | memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | 1405 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, |
| 1399 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | 1406 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, |
| 1400 | KMemoryBlockDisableMergeAttribute::None); | 1407 | KMemoryBlockDisableMergeAttribute::None); |
| 1401 | 1408 | ||
| 1402 | // We successfully mapped the pages. | 1409 | // We successfully mapped the pages. |
| 1403 | *out_addr = addr; | 1410 | *out_addr = addr; |
| @@ -1424,12 +1431,12 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { | |||
| 1424 | 1431 | ||
| 1425 | Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) { | 1432 | Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) { |
| 1426 | // Check that the unmap is in range. | 1433 | // Check that the unmap is in range. |
| 1427 | const std::size_t num_pages{page_linked_list.GetNumPages()}; | 1434 | const size_t num_pages{page_linked_list.GetNumPages()}; |
| 1428 | const std::size_t size{num_pages * PageSize}; | 1435 | const size_t size{num_pages * PageSize}; |
| 1429 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 1436 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| 1430 | 1437 | ||
| 1431 | // Lock the table. | 1438 | // Lock the table. |
| 1432 | KScopedLightLock lk(general_lock); | 1439 | KScopedLightLock lk(m_general_lock); |
| 1433 | 1440 | ||
| 1434 | // Check the memory state. | 1441 | // Check the memory state. |
| 1435 | size_t num_allocator_blocks; | 1442 | size_t num_allocator_blocks; |
| @@ -1441,31 +1448,31 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo | |||
| 1441 | // Create an update allocator. | 1448 | // Create an update allocator. |
| 1442 | Result allocator_result{ResultSuccess}; | 1449 | Result allocator_result{ResultSuccess}; |
| 1443 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1450 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1444 | memory_block_slab_manager, num_allocator_blocks); | 1451 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1445 | R_TRY(allocator_result); | 1452 | R_TRY(allocator_result); |
| 1446 | 1453 | ||
| 1447 | // Perform the unmap. | 1454 | // Perform the unmap. |
| 1448 | R_TRY(UnmapPages(address, page_linked_list)); | 1455 | R_TRY(UnmapPages(address, page_linked_list)); |
| 1449 | 1456 | ||
| 1450 | // Update the blocks. | 1457 | // Update the blocks. |
| 1451 | memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | 1458 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, |
| 1452 | KMemoryPermission::None, KMemoryAttribute::None, | 1459 | KMemoryPermission::None, KMemoryAttribute::None, |
| 1453 | KMemoryBlockDisableMergeAttribute::None, | 1460 | KMemoryBlockDisableMergeAttribute::None, |
| 1454 | KMemoryBlockDisableMergeAttribute::Normal); | 1461 | KMemoryBlockDisableMergeAttribute::Normal); |
| 1455 | 1462 | ||
| 1456 | return ResultSuccess; | 1463 | return ResultSuccess; |
| 1457 | } | 1464 | } |
| 1458 | 1465 | ||
| 1459 | Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) { | 1466 | Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) { |
| 1460 | // Check that the unmap is in range. | 1467 | // Check that the unmap is in range. |
| 1461 | const std::size_t size = num_pages * PageSize; | 1468 | const size_t size = num_pages * PageSize; |
| 1462 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 1469 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| 1463 | 1470 | ||
| 1464 | // Lock the table. | 1471 | // Lock the table. |
| 1465 | KScopedLightLock lk(general_lock); | 1472 | KScopedLightLock lk(m_general_lock); |
| 1466 | 1473 | ||
| 1467 | // Check the memory state. | 1474 | // Check the memory state. |
| 1468 | std::size_t num_allocator_blocks{}; | 1475 | size_t num_allocator_blocks{}; |
| 1469 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | 1476 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, |
| 1470 | KMemoryState::All, state, KMemoryPermission::None, | 1477 | KMemoryState::All, state, KMemoryPermission::None, |
| 1471 | KMemoryPermission::None, KMemoryAttribute::All, | 1478 | KMemoryPermission::None, KMemoryAttribute::All, |
| @@ -1474,17 +1481,17 @@ Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState | |||
| 1474 | // Create an update allocator. | 1481 | // Create an update allocator. |
| 1475 | Result allocator_result{ResultSuccess}; | 1482 | Result allocator_result{ResultSuccess}; |
| 1476 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1483 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1477 | memory_block_slab_manager, num_allocator_blocks); | 1484 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1478 | R_TRY(allocator_result); | 1485 | R_TRY(allocator_result); |
| 1479 | 1486 | ||
| 1480 | // Perform the unmap. | 1487 | // Perform the unmap. |
| 1481 | R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); | 1488 | R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); |
| 1482 | 1489 | ||
| 1483 | // Update the blocks. | 1490 | // Update the blocks. |
| 1484 | memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | 1491 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, |
| 1485 | KMemoryPermission::None, KMemoryAttribute::None, | 1492 | KMemoryPermission::None, KMemoryAttribute::None, |
| 1486 | KMemoryBlockDisableMergeAttribute::None, | 1493 | KMemoryBlockDisableMergeAttribute::None, |
| 1487 | KMemoryBlockDisableMergeAttribute::Normal); | 1494 | KMemoryBlockDisableMergeAttribute::Normal); |
| 1488 | 1495 | ||
| 1489 | return ResultSuccess; | 1496 | return ResultSuccess; |
| 1490 | } | 1497 | } |
| @@ -1501,7 +1508,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n | |||
| 1501 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 1508 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| 1502 | 1509 | ||
| 1503 | // Lock the table. | 1510 | // Lock the table. |
| 1504 | KScopedLightLock lk(general_lock); | 1511 | KScopedLightLock lk(m_general_lock); |
| 1505 | 1512 | ||
| 1506 | // Check if state allows us to create the group. | 1513 | // Check if state allows us to create the group. |
| 1507 | R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, | 1514 | R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, |
| @@ -1514,12 +1521,12 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n | |||
| 1514 | return ResultSuccess; | 1521 | return ResultSuccess; |
| 1515 | } | 1522 | } |
| 1516 | 1523 | ||
| 1517 | Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, | 1524 | Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size, |
| 1518 | Svc::MemoryPermission svc_perm) { | 1525 | Svc::MemoryPermission svc_perm) { |
| 1519 | const size_t num_pages = size / PageSize; | 1526 | const size_t num_pages = size / PageSize; |
| 1520 | 1527 | ||
| 1521 | // Lock the table. | 1528 | // Lock the table. |
| 1522 | KScopedLightLock lk(general_lock); | 1529 | KScopedLightLock lk(m_general_lock); |
| 1523 | 1530 | ||
| 1524 | // Verify we can change the memory permission. | 1531 | // Verify we can change the memory permission. |
| 1525 | KMemoryState old_state; | 1532 | KMemoryState old_state; |
| @@ -1559,7 +1566,7 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, | |||
| 1559 | // Create an update allocator. | 1566 | // Create an update allocator. |
| 1560 | Result allocator_result{ResultSuccess}; | 1567 | Result allocator_result{ResultSuccess}; |
| 1561 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1568 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1562 | memory_block_slab_manager, num_allocator_blocks); | 1569 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1563 | R_TRY(allocator_result); | 1570 | R_TRY(allocator_result); |
| 1564 | 1571 | ||
| 1565 | // Perform mapping operation. | 1572 | // Perform mapping operation. |
| @@ -1568,29 +1575,29 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, | |||
| 1568 | R_TRY(Operate(addr, num_pages, new_perm, operation)); | 1575 | R_TRY(Operate(addr, num_pages, new_perm, operation)); |
| 1569 | 1576 | ||
| 1570 | // Update the blocks. | 1577 | // Update the blocks. |
| 1571 | memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, | 1578 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, |
| 1572 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | 1579 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, |
| 1573 | KMemoryBlockDisableMergeAttribute::None); | 1580 | KMemoryBlockDisableMergeAttribute::None); |
| 1574 | 1581 | ||
| 1575 | // Ensure cache coherency, if we're setting pages as executable. | 1582 | // Ensure cache coherency, if we're setting pages as executable. |
| 1576 | if (is_x) { | 1583 | if (is_x) { |
| 1577 | system.InvalidateCpuInstructionCacheRange(addr, size); | 1584 | m_system.InvalidateCpuInstructionCacheRange(addr, size); |
| 1578 | } | 1585 | } |
| 1579 | 1586 | ||
| 1580 | return ResultSuccess; | 1587 | return ResultSuccess; |
| 1581 | } | 1588 | } |
| 1582 | 1589 | ||
| 1583 | KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { | 1590 | KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { |
| 1584 | KScopedLightLock lk(general_lock); | 1591 | KScopedLightLock lk(m_general_lock); |
| 1585 | 1592 | ||
| 1586 | return memory_block_manager.FindBlock(addr)->GetMemoryInfo(); | 1593 | return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo(); |
| 1587 | } | 1594 | } |
| 1588 | 1595 | ||
| 1589 | KMemoryInfo KPageTable::QueryInfo(VAddr addr) { | 1596 | KMemoryInfo KPageTable::QueryInfo(VAddr addr) { |
| 1590 | if (!Contains(addr, 1)) { | 1597 | if (!Contains(addr, 1)) { |
| 1591 | return { | 1598 | return { |
| 1592 | .m_address = address_space_end, | 1599 | .m_address = m_address_space_end, |
| 1593 | .m_size = 0 - address_space_end, | 1600 | .m_size = 0 - m_address_space_end, |
| 1594 | .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible), | 1601 | .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible), |
| 1595 | .m_device_disable_merge_left_count = 0, | 1602 | .m_device_disable_merge_left_count = 0, |
| 1596 | .m_device_disable_merge_right_count = 0, | 1603 | .m_device_disable_merge_right_count = 0, |
| @@ -1607,12 +1614,11 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) { | |||
| 1607 | return QueryInfoImpl(addr); | 1614 | return QueryInfoImpl(addr); |
| 1608 | } | 1615 | } |
| 1609 | 1616 | ||
| 1610 | Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, | 1617 | Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) { |
| 1611 | Svc::MemoryPermission svc_perm) { | ||
| 1612 | const size_t num_pages = size / PageSize; | 1618 | const size_t num_pages = size / PageSize; |
| 1613 | 1619 | ||
| 1614 | // Lock the table. | 1620 | // Lock the table. |
| 1615 | KScopedLightLock lk(general_lock); | 1621 | KScopedLightLock lk(m_general_lock); |
| 1616 | 1622 | ||
| 1617 | // Verify we can change the memory permission. | 1623 | // Verify we can change the memory permission. |
| 1618 | KMemoryState old_state; | 1624 | KMemoryState old_state; |
| @@ -1631,27 +1637,27 @@ Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, | |||
| 1631 | // Create an update allocator. | 1637 | // Create an update allocator. |
| 1632 | Result allocator_result{ResultSuccess}; | 1638 | Result allocator_result{ResultSuccess}; |
| 1633 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1639 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1634 | memory_block_slab_manager, num_allocator_blocks); | 1640 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1635 | R_TRY(allocator_result); | 1641 | R_TRY(allocator_result); |
| 1636 | 1642 | ||
| 1637 | // Perform mapping operation. | 1643 | // Perform mapping operation. |
| 1638 | R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); | 1644 | R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); |
| 1639 | 1645 | ||
| 1640 | // Update the blocks. | 1646 | // Update the blocks. |
| 1641 | memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | 1647 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, |
| 1642 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | 1648 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, |
| 1643 | KMemoryBlockDisableMergeAttribute::None); | 1649 | KMemoryBlockDisableMergeAttribute::None); |
| 1644 | 1650 | ||
| 1645 | return ResultSuccess; | 1651 | return ResultSuccess; |
| 1646 | } | 1652 | } |
| 1647 | 1653 | ||
| 1648 | Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) { | 1654 | Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) { |
| 1649 | const size_t num_pages = size / PageSize; | 1655 | const size_t num_pages = size / PageSize; |
| 1650 | ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) == | 1656 | ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) == |
| 1651 | KMemoryAttribute::SetMask); | 1657 | KMemoryAttribute::SetMask); |
| 1652 | 1658 | ||
| 1653 | // Lock the table. | 1659 | // Lock the table. |
| 1654 | KScopedLightLock lk(general_lock); | 1660 | KScopedLightLock lk(m_general_lock); |
| 1655 | 1661 | ||
| 1656 | // Verify we can change the memory attribute. | 1662 | // Verify we can change the memory attribute. |
| 1657 | KMemoryState old_state; | 1663 | KMemoryState old_state; |
| @@ -1669,7 +1675,7 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3 | |||
| 1669 | // Create an update allocator. | 1675 | // Create an update allocator. |
| 1670 | Result allocator_result{ResultSuccess}; | 1676 | Result allocator_result{ResultSuccess}; |
| 1671 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1677 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1672 | memory_block_slab_manager, num_allocator_blocks); | 1678 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1673 | R_TRY(allocator_result); | 1679 | R_TRY(allocator_result); |
| 1674 | 1680 | ||
| 1675 | // Determine the new attribute. | 1681 | // Determine the new attribute. |
| @@ -1681,124 +1687,125 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3 | |||
| 1681 | this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); | 1687 | this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); |
| 1682 | 1688 | ||
| 1683 | // Update the blocks. | 1689 | // Update the blocks. |
| 1684 | memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, | 1690 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, |
| 1685 | new_attr, KMemoryBlockDisableMergeAttribute::None, | 1691 | new_attr, KMemoryBlockDisableMergeAttribute::None, |
| 1686 | KMemoryBlockDisableMergeAttribute::None); | 1692 | KMemoryBlockDisableMergeAttribute::None); |
| 1687 | 1693 | ||
| 1688 | return ResultSuccess; | 1694 | return ResultSuccess; |
| 1689 | } | 1695 | } |
| 1690 | 1696 | ||
| 1691 | Result KPageTable::SetMaxHeapSize(std::size_t size) { | 1697 | Result KPageTable::SetMaxHeapSize(size_t size) { |
| 1692 | // Lock the table. | 1698 | // Lock the table. |
| 1693 | KScopedLightLock lk(general_lock); | 1699 | KScopedLightLock lk(m_general_lock); |
| 1694 | 1700 | ||
| 1695 | // Only process page tables are allowed to set heap size. | 1701 | // Only process page tables are allowed to set heap size. |
| 1696 | ASSERT(!this->IsKernel()); | 1702 | ASSERT(!this->IsKernel()); |
| 1697 | 1703 | ||
| 1698 | max_heap_size = size; | 1704 | m_max_heap_size = size; |
| 1699 | 1705 | ||
| 1700 | return ResultSuccess; | 1706 | return ResultSuccess; |
| 1701 | } | 1707 | } |
| 1702 | 1708 | ||
| 1703 | Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { | 1709 | Result KPageTable::SetHeapSize(VAddr* out, size_t size) { |
| 1704 | // Lock the physical memory mutex. | 1710 | // Lock the physical memory mutex. |
| 1705 | KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); | 1711 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); |
| 1706 | 1712 | ||
| 1707 | // Try to perform a reduction in heap, instead of an extension. | 1713 | // Try to perform a reduction in heap, instead of an extension. |
| 1708 | VAddr cur_address{}; | 1714 | VAddr cur_address{}; |
| 1709 | std::size_t allocation_size{}; | 1715 | size_t allocation_size{}; |
| 1710 | { | 1716 | { |
| 1711 | // Lock the table. | 1717 | // Lock the table. |
| 1712 | KScopedLightLock lk(general_lock); | 1718 | KScopedLightLock lk(m_general_lock); |
| 1713 | 1719 | ||
| 1714 | // Validate that setting heap size is possible at all. | 1720 | // Validate that setting heap size is possible at all. |
| 1715 | R_UNLESS(!is_kernel, ResultOutOfMemory); | 1721 | R_UNLESS(!m_is_kernel, ResultOutOfMemory); |
| 1716 | R_UNLESS(size <= static_cast<std::size_t>(heap_region_end - heap_region_start), | 1722 | R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start), |
| 1717 | ResultOutOfMemory); | 1723 | ResultOutOfMemory); |
| 1718 | R_UNLESS(size <= max_heap_size, ResultOutOfMemory); | 1724 | R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory); |
| 1719 | 1725 | ||
| 1720 | if (size < GetHeapSize()) { | 1726 | if (size < GetHeapSize()) { |
| 1721 | // The size being requested is less than the current size, so we need to free the end of | 1727 | // The size being requested is less than the current size, so we need to free the end of |
| 1722 | // the heap. | 1728 | // the heap. |
| 1723 | 1729 | ||
| 1724 | // Validate memory state. | 1730 | // Validate memory state. |
| 1725 | std::size_t num_allocator_blocks; | 1731 | size_t num_allocator_blocks; |
| 1726 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), | 1732 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), |
| 1727 | heap_region_start + size, GetHeapSize() - size, | 1733 | m_heap_region_start + size, GetHeapSize() - size, |
| 1728 | KMemoryState::All, KMemoryState::Normal, | 1734 | KMemoryState::All, KMemoryState::Normal, |
| 1729 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, | 1735 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, |
| 1730 | KMemoryAttribute::All, KMemoryAttribute::None)); | 1736 | KMemoryAttribute::All, KMemoryAttribute::None)); |
| 1731 | 1737 | ||
| 1732 | // Create an update allocator. | 1738 | // Create an update allocator. |
| 1733 | Result allocator_result{ResultSuccess}; | 1739 | Result allocator_result{ResultSuccess}; |
| 1734 | KMemoryBlockManagerUpdateAllocator allocator( | 1740 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1735 | std::addressof(allocator_result), memory_block_slab_manager, num_allocator_blocks); | 1741 | m_memory_block_slab_manager, |
| 1742 | num_allocator_blocks); | ||
| 1736 | R_TRY(allocator_result); | 1743 | R_TRY(allocator_result); |
| 1737 | 1744 | ||
| 1738 | // Unmap the end of the heap. | 1745 | // Unmap the end of the heap. |
| 1739 | const auto num_pages = (GetHeapSize() - size) / PageSize; | 1746 | const auto num_pages = (GetHeapSize() - size) / PageSize; |
| 1740 | R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None, | 1747 | R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None, |
| 1741 | OperationType::Unmap)); | 1748 | OperationType::Unmap)); |
| 1742 | 1749 | ||
| 1743 | // Release the memory from the resource limit. | 1750 | // Release the memory from the resource limit. |
| 1744 | system.Kernel().CurrentProcess()->GetResourceLimit()->Release( | 1751 | m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release( |
| 1745 | LimitableResource::PhysicalMemory, num_pages * PageSize); | 1752 | LimitableResource::PhysicalMemory, num_pages * PageSize); |
| 1746 | 1753 | ||
| 1747 | // Apply the memory block update. | 1754 | // Apply the memory block update. |
| 1748 | memory_block_manager.Update(std::addressof(allocator), heap_region_start + size, | 1755 | m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, |
| 1749 | num_pages, KMemoryState::Free, KMemoryPermission::None, | 1756 | num_pages, KMemoryState::Free, KMemoryPermission::None, |
| 1750 | KMemoryAttribute::None, | 1757 | KMemoryAttribute::None, |
| 1751 | KMemoryBlockDisableMergeAttribute::None, | 1758 | KMemoryBlockDisableMergeAttribute::None, |
| 1752 | size == 0 ? KMemoryBlockDisableMergeAttribute::Normal | 1759 | size == 0 ? KMemoryBlockDisableMergeAttribute::Normal |
| 1753 | : KMemoryBlockDisableMergeAttribute::None); | 1760 | : KMemoryBlockDisableMergeAttribute::None); |
| 1754 | 1761 | ||
| 1755 | // Update the current heap end. | 1762 | // Update the current heap end. |
| 1756 | current_heap_end = heap_region_start + size; | 1763 | m_current_heap_end = m_heap_region_start + size; |
| 1757 | 1764 | ||
| 1758 | // Set the output. | 1765 | // Set the output. |
| 1759 | *out = heap_region_start; | 1766 | *out = m_heap_region_start; |
| 1760 | return ResultSuccess; | 1767 | return ResultSuccess; |
| 1761 | } else if (size == GetHeapSize()) { | 1768 | } else if (size == GetHeapSize()) { |
| 1762 | // The size requested is exactly the current size. | 1769 | // The size requested is exactly the current size. |
| 1763 | *out = heap_region_start; | 1770 | *out = m_heap_region_start; |
| 1764 | return ResultSuccess; | 1771 | return ResultSuccess; |
| 1765 | } else { | 1772 | } else { |
| 1766 | // We have to allocate memory. Determine how much to allocate and where while the table | 1773 | // We have to allocate memory. Determine how much to allocate and where while the table |
| 1767 | // is locked. | 1774 | // is locked. |
| 1768 | cur_address = current_heap_end; | 1775 | cur_address = m_current_heap_end; |
| 1769 | allocation_size = size - GetHeapSize(); | 1776 | allocation_size = size - GetHeapSize(); |
| 1770 | } | 1777 | } |
| 1771 | } | 1778 | } |
| 1772 | 1779 | ||
| 1773 | // Reserve memory for the heap extension. | 1780 | // Reserve memory for the heap extension. |
| 1774 | KScopedResourceReservation memory_reservation( | 1781 | KScopedResourceReservation memory_reservation( |
| 1775 | system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, | 1782 | m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, |
| 1776 | allocation_size); | 1783 | allocation_size); |
| 1777 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 1784 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 1778 | 1785 | ||
| 1779 | // Allocate pages for the heap extension. | 1786 | // Allocate pages for the heap extension. |
| 1780 | KPageGroup pg; | 1787 | KPageGroup pg; |
| 1781 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( | 1788 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( |
| 1782 | &pg, allocation_size / PageSize, | 1789 | &pg, allocation_size / PageSize, |
| 1783 | KMemoryManager::EncodeOption(memory_pool, allocation_option))); | 1790 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); |
| 1784 | 1791 | ||
| 1785 | // Clear all the newly allocated pages. | 1792 | // Clear all the newly allocated pages. |
| 1786 | for (const auto& it : pg.Nodes()) { | 1793 | for (const auto& it : pg.Nodes()) { |
| 1787 | std::memset(system.DeviceMemory().GetPointer<void>(it.GetAddress()), heap_fill_value, | 1794 | std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, |
| 1788 | it.GetSize()); | 1795 | it.GetSize()); |
| 1789 | } | 1796 | } |
| 1790 | 1797 | ||
| 1791 | // Map the pages. | 1798 | // Map the pages. |
| 1792 | { | 1799 | { |
| 1793 | // Lock the table. | 1800 | // Lock the table. |
| 1794 | KScopedLightLock lk(general_lock); | 1801 | KScopedLightLock lk(m_general_lock); |
| 1795 | 1802 | ||
| 1796 | // Ensure that the heap hasn't changed since we began executing. | 1803 | // Ensure that the heap hasn't changed since we began executing. |
| 1797 | ASSERT(cur_address == current_heap_end); | 1804 | ASSERT(cur_address == m_current_heap_end); |
| 1798 | 1805 | ||
| 1799 | // Check the memory state. | 1806 | // Check the memory state. |
| 1800 | std::size_t num_allocator_blocks{}; | 1807 | size_t num_allocator_blocks{}; |
| 1801 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end, | 1808 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, |
| 1802 | allocation_size, KMemoryState::All, KMemoryState::Free, | 1809 | allocation_size, KMemoryState::All, KMemoryState::Free, |
| 1803 | KMemoryPermission::None, KMemoryPermission::None, | 1810 | KMemoryPermission::None, KMemoryPermission::None, |
| 1804 | KMemoryAttribute::None, KMemoryAttribute::None)); | 1811 | KMemoryAttribute::None, KMemoryAttribute::None)); |
| @@ -1806,16 +1813,16 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { | |||
| 1806 | // Create an update allocator. | 1813 | // Create an update allocator. |
| 1807 | Result allocator_result{ResultSuccess}; | 1814 | Result allocator_result{ResultSuccess}; |
| 1808 | KMemoryBlockManagerUpdateAllocator allocator( | 1815 | KMemoryBlockManagerUpdateAllocator allocator( |
| 1809 | std::addressof(allocator_result), memory_block_slab_manager, num_allocator_blocks); | 1816 | std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks); |
| 1810 | R_TRY(allocator_result); | 1817 | R_TRY(allocator_result); |
| 1811 | 1818 | ||
| 1812 | // Map the pages. | 1819 | // Map the pages. |
| 1813 | const auto num_pages = allocation_size / PageSize; | 1820 | const auto num_pages = allocation_size / PageSize; |
| 1814 | R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup)); | 1821 | R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup)); |
| 1815 | 1822 | ||
| 1816 | // Clear all the newly allocated pages. | 1823 | // Clear all the newly allocated pages. |
| 1817 | for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { | 1824 | for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) { |
| 1818 | std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0, | 1825 | std::memset(m_system.Memory().GetPointer(m_current_heap_end + (cur_page * PageSize)), 0, |
| 1819 | PageSize); | 1826 | PageSize); |
| 1820 | } | 1827 | } |
| 1821 | 1828 | ||
| @@ -1823,27 +1830,27 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { | |||
| 1823 | memory_reservation.Commit(); | 1830 | memory_reservation.Commit(); |
| 1824 | 1831 | ||
| 1825 | // Apply the memory block update. | 1832 | // Apply the memory block update. |
| 1826 | memory_block_manager.Update( | 1833 | m_memory_block_manager.Update( |
| 1827 | std::addressof(allocator), current_heap_end, num_pages, KMemoryState::Normal, | 1834 | std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal, |
| 1828 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | 1835 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, |
| 1829 | heap_region_start == current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal | 1836 | m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal |
| 1830 | : KMemoryBlockDisableMergeAttribute::None, | 1837 | : KMemoryBlockDisableMergeAttribute::None, |
| 1831 | KMemoryBlockDisableMergeAttribute::None); | 1838 | KMemoryBlockDisableMergeAttribute::None); |
| 1832 | 1839 | ||
| 1833 | // Update the current heap end. | 1840 | // Update the current heap end. |
| 1834 | current_heap_end = heap_region_start + size; | 1841 | m_current_heap_end = m_heap_region_start + size; |
| 1835 | 1842 | ||
| 1836 | // Set the output. | 1843 | // Set the output. |
| 1837 | *out = heap_region_start; | 1844 | *out = m_heap_region_start; |
| 1838 | return ResultSuccess; | 1845 | return ResultSuccess; |
| 1839 | } | 1846 | } |
| 1840 | } | 1847 | } |
| 1841 | 1848 | ||
| 1842 | ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, | 1849 | ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align, |
| 1843 | bool is_map_only, VAddr region_start, | 1850 | bool is_map_only, VAddr region_start, |
| 1844 | std::size_t region_num_pages, KMemoryState state, | 1851 | size_t region_num_pages, KMemoryState state, |
| 1845 | KMemoryPermission perm, PAddr map_addr) { | 1852 | KMemoryPermission perm, PAddr map_addr) { |
| 1846 | KScopedLightLock lk(general_lock); | 1853 | KScopedLightLock lk(m_general_lock); |
| 1847 | 1854 | ||
| 1848 | if (!CanContain(region_start, region_num_pages * PageSize, state)) { | 1855 | if (!CanContain(region_start, region_num_pages * PageSize, state)) { |
| 1849 | return ResultInvalidCurrentMemory; | 1856 | return ResultInvalidCurrentMemory; |
| @@ -1862,33 +1869,98 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, | |||
| 1862 | // Create an update allocator. | 1869 | // Create an update allocator. |
| 1863 | Result allocator_result{ResultSuccess}; | 1870 | Result allocator_result{ResultSuccess}; |
| 1864 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1871 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1865 | memory_block_slab_manager); | 1872 | m_memory_block_slab_manager); |
| 1866 | 1873 | ||
| 1867 | if (is_map_only) { | 1874 | if (is_map_only) { |
| 1868 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); | 1875 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); |
| 1869 | } else { | 1876 | } else { |
| 1870 | KPageGroup page_group; | 1877 | KPageGroup page_group; |
| 1871 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( | 1878 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( |
| 1872 | &page_group, needed_num_pages, | 1879 | &page_group, needed_num_pages, |
| 1873 | KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); | 1880 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); |
| 1874 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); | 1881 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); |
| 1875 | } | 1882 | } |
| 1876 | 1883 | ||
| 1877 | // Update the blocks. | 1884 | // Update the blocks. |
| 1878 | memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm, | 1885 | m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm, |
| 1879 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | 1886 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, |
| 1880 | KMemoryBlockDisableMergeAttribute::None); | 1887 | KMemoryBlockDisableMergeAttribute::None); |
| 1881 | 1888 | ||
| 1882 | return addr; | 1889 | return addr; |
| 1883 | } | 1890 | } |
| 1884 | 1891 | ||
| 1885 | Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, std::size_t size) { | 1892 | Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, |
| 1893 | bool is_aligned) { | ||
| 1894 | // Lightly validate the range before doing anything else. | ||
| 1895 | const size_t num_pages = size / PageSize; | ||
| 1896 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 1897 | |||
| 1898 | // Lock the table. | ||
| 1899 | KScopedLightLock lk(m_general_lock); | ||
| 1900 | |||
| 1901 | // Check the memory state. | ||
| 1902 | const auto test_state = | ||
| 1903 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); | ||
| 1904 | size_t num_allocator_blocks; | ||
| 1905 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, | ||
| 1906 | test_state, perm, perm, | ||
| 1907 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, | ||
| 1908 | KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); | ||
| 1909 | |||
| 1910 | // Create an update allocator. | ||
| 1911 | Result allocator_result{ResultSuccess}; | ||
| 1912 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1913 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1914 | R_TRY(allocator_result); | ||
| 1915 | |||
| 1916 | // Update the memory blocks. | ||
| 1917 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | ||
| 1918 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); | ||
| 1919 | |||
| 1920 | return ResultSuccess; | ||
| 1921 | } | ||
| 1922 | |||
| 1923 | Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { | ||
| 1924 | // Lightly validate the range before doing anything else. | ||
| 1925 | const size_t num_pages = size / PageSize; | ||
| 1926 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 1927 | |||
| 1928 | // Lock the table. | ||
| 1929 | KScopedLightLock lk(m_general_lock); | ||
| 1930 | |||
| 1931 | // Check the memory state. | ||
| 1932 | size_t num_allocator_blocks; | ||
| 1933 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 1934 | std::addressof(num_allocator_blocks), address, size, | ||
| 1935 | KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, | ||
| 1936 | KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, | ||
| 1937 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 1938 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 1939 | |||
| 1940 | // Create an update allocator. | ||
| 1941 | Result allocator_result{ResultSuccess}; | ||
| 1942 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1943 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1944 | R_TRY(allocator_result); | ||
| 1945 | |||
| 1946 | // Update the memory blocks. | ||
| 1947 | const KMemoryBlockManager::MemoryBlockLockFunction lock_func = | ||
| 1948 | m_enable_device_address_space_merge | ||
| 1949 | ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare | ||
| 1950 | : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; | ||
| 1951 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, | ||
| 1952 | KMemoryPermission::None); | ||
| 1953 | |||
| 1954 | return ResultSuccess; | ||
| 1955 | } | ||
| 1956 | |||
| 1957 | Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) { | ||
| 1886 | // Lightly validate the range before doing anything else. | 1958 | // Lightly validate the range before doing anything else. |
| 1887 | const size_t num_pages = size / PageSize; | 1959 | const size_t num_pages = size / PageSize; |
| 1888 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 1960 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| 1889 | 1961 | ||
| 1890 | // Lock the table. | 1962 | // Lock the table. |
| 1891 | KScopedLightLock lk(general_lock); | 1963 | KScopedLightLock lk(m_general_lock); |
| 1892 | 1964 | ||
| 1893 | // Check the memory state. | 1965 | // Check the memory state. |
| 1894 | size_t num_allocator_blocks; | 1966 | size_t num_allocator_blocks; |
| @@ -1900,17 +1972,17 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, std::size_t size) | |||
| 1900 | // Create an update allocator. | 1972 | // Create an update allocator. |
| 1901 | Result allocator_result{ResultSuccess}; | 1973 | Result allocator_result{ResultSuccess}; |
| 1902 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1974 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1903 | memory_block_slab_manager, num_allocator_blocks); | 1975 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1904 | R_TRY(allocator_result); | 1976 | R_TRY(allocator_result); |
| 1905 | 1977 | ||
| 1906 | // Update the memory blocks. | 1978 | // Update the memory blocks. |
| 1907 | memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | 1979 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, |
| 1908 | &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); | 1980 | &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); |
| 1909 | 1981 | ||
| 1910 | return ResultSuccess; | 1982 | return ResultSuccess; |
| 1911 | } | 1983 | } |
| 1912 | 1984 | ||
| 1913 | Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) { | 1985 | Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { |
| 1914 | return this->LockMemoryAndOpen( | 1986 | return this->LockMemoryAndOpen( |
| 1915 | out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, | 1987 | out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, |
| 1916 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | 1988 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, |
| @@ -1920,7 +1992,7 @@ Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t si | |||
| 1920 | KMemoryAttribute::Locked); | 1992 | KMemoryAttribute::Locked); |
| 1921 | } | 1993 | } |
| 1922 | 1994 | ||
| 1923 | Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) { | 1995 | Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) { |
| 1924 | return this->UnlockMemory( | 1996 | return this->UnlockMemory( |
| 1925 | addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, | 1997 | addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, |
| 1926 | KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, | 1998 | KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, |
| @@ -1928,9 +2000,9 @@ Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPage | |||
| 1928 | } | 2000 | } |
| 1929 | 2001 | ||
| 1930 | bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { | 2002 | bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { |
| 1931 | auto start_ptr = system.DeviceMemory().GetPointer<u8>(addr); | 2003 | auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(addr); |
| 1932 | for (u64 offset{}; offset < size; offset += PageSize) { | 2004 | for (u64 offset{}; offset < size; offset += PageSize) { |
| 1933 | if (start_ptr != system.DeviceMemory().GetPointer<u8>(addr + offset)) { | 2005 | if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(addr + offset)) { |
| 1934 | return false; | 2006 | return false; |
| 1935 | } | 2007 | } |
| 1936 | start_ptr += PageSize; | 2008 | start_ptr += PageSize; |
| @@ -1938,8 +2010,7 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { | |||
| 1938 | return true; | 2010 | return true; |
| 1939 | } | 2011 | } |
| 1940 | 2012 | ||
| 1941 | void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, | 2013 | void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list) { |
| 1942 | KPageGroup& page_linked_list) { | ||
| 1943 | VAddr addr{start}; | 2014 | VAddr addr{start}; |
| 1944 | while (addr < start + (num_pages * PageSize)) { | 2015 | while (addr < start + (num_pages * PageSize)) { |
| 1945 | const PAddr paddr{GetPhysicalAddr(addr)}; | 2016 | const PAddr paddr{GetPhysicalAddr(addr)}; |
| @@ -1949,16 +2020,16 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, | |||
| 1949 | } | 2020 | } |
| 1950 | } | 2021 | } |
| 1951 | 2022 | ||
| 1952 | VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, | 2023 | VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, |
| 1953 | u64 needed_num_pages, std::size_t align) { | 2024 | size_t align) { |
| 1954 | if (is_aslr_enabled) { | 2025 | if (m_enable_aslr) { |
| 1955 | UNIMPLEMENTED(); | 2026 | UNIMPLEMENTED(); |
| 1956 | } | 2027 | } |
| 1957 | return memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, | 2028 | return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, |
| 1958 | IsKernel() ? 1 : 4); | 2029 | IsKernel() ? 1 : 4); |
| 1959 | } | 2030 | } |
| 1960 | 2031 | ||
| 1961 | Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, | 2032 | Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, |
| 1962 | OperationType operation) { | 2033 | OperationType operation) { |
| 1963 | ASSERT(this->IsLockedByCurrentThread()); | 2034 | ASSERT(this->IsLockedByCurrentThread()); |
| 1964 | 2035 | ||
| @@ -1967,11 +2038,11 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& | |||
| 1967 | ASSERT(num_pages == page_group.GetNumPages()); | 2038 | ASSERT(num_pages == page_group.GetNumPages()); |
| 1968 | 2039 | ||
| 1969 | for (const auto& node : page_group.Nodes()) { | 2040 | for (const auto& node : page_group.Nodes()) { |
| 1970 | const std::size_t size{node.GetNumPages() * PageSize}; | 2041 | const size_t size{node.GetNumPages() * PageSize}; |
| 1971 | 2042 | ||
| 1972 | switch (operation) { | 2043 | switch (operation) { |
| 1973 | case OperationType::MapGroup: | 2044 | case OperationType::MapGroup: |
| 1974 | system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress()); | 2045 | m_system.Memory().MapMemoryRegion(m_page_table_impl, addr, size, node.GetAddress()); |
| 1975 | break; | 2046 | break; |
| 1976 | default: | 2047 | default: |
| 1977 | ASSERT(false); | 2048 | ASSERT(false); |
| @@ -1983,7 +2054,7 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& | |||
| 1983 | return ResultSuccess; | 2054 | return ResultSuccess; |
| 1984 | } | 2055 | } |
| 1985 | 2056 | ||
| 1986 | Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, | 2057 | Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, |
| 1987 | OperationType operation, PAddr map_addr) { | 2058 | OperationType operation, PAddr map_addr) { |
| 1988 | ASSERT(this->IsLockedByCurrentThread()); | 2059 | ASSERT(this->IsLockedByCurrentThread()); |
| 1989 | 2060 | ||
| @@ -1993,12 +2064,12 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission | |||
| 1993 | 2064 | ||
| 1994 | switch (operation) { | 2065 | switch (operation) { |
| 1995 | case OperationType::Unmap: | 2066 | case OperationType::Unmap: |
| 1996 | system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize); | 2067 | m_system.Memory().UnmapRegion(m_page_table_impl, addr, num_pages * PageSize); |
| 1997 | break; | 2068 | break; |
| 1998 | case OperationType::Map: { | 2069 | case OperationType::Map: { |
| 1999 | ASSERT(map_addr); | 2070 | ASSERT(map_addr); |
| 2000 | ASSERT(Common::IsAligned(map_addr, PageSize)); | 2071 | ASSERT(Common::IsAligned(map_addr, PageSize)); |
| 2001 | system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr); | 2072 | m_system.Memory().MapMemoryRegion(m_page_table_impl, addr, num_pages * PageSize, map_addr); |
| 2002 | break; | 2073 | break; |
| 2003 | } | 2074 | } |
| 2004 | case OperationType::ChangePermissions: | 2075 | case OperationType::ChangePermissions: |
| @@ -2014,18 +2085,18 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { | |||
| 2014 | switch (state) { | 2085 | switch (state) { |
| 2015 | case KMemoryState::Free: | 2086 | case KMemoryState::Free: |
| 2016 | case KMemoryState::Kernel: | 2087 | case KMemoryState::Kernel: |
| 2017 | return address_space_start; | 2088 | return m_address_space_start; |
| 2018 | case KMemoryState::Normal: | 2089 | case KMemoryState::Normal: |
| 2019 | return heap_region_start; | 2090 | return m_heap_region_start; |
| 2020 | case KMemoryState::Ipc: | 2091 | case KMemoryState::Ipc: |
| 2021 | case KMemoryState::NonSecureIpc: | 2092 | case KMemoryState::NonSecureIpc: |
| 2022 | case KMemoryState::NonDeviceIpc: | 2093 | case KMemoryState::NonDeviceIpc: |
| 2023 | return alias_region_start; | 2094 | return m_alias_region_start; |
| 2024 | case KMemoryState::Stack: | 2095 | case KMemoryState::Stack: |
| 2025 | return stack_region_start; | 2096 | return m_stack_region_start; |
| 2026 | case KMemoryState::Static: | 2097 | case KMemoryState::Static: |
| 2027 | case KMemoryState::ThreadLocal: | 2098 | case KMemoryState::ThreadLocal: |
| 2028 | return kernel_map_region_start; | 2099 | return m_kernel_map_region_start; |
| 2029 | case KMemoryState::Io: | 2100 | case KMemoryState::Io: |
| 2030 | case KMemoryState::Shared: | 2101 | case KMemoryState::Shared: |
| 2031 | case KMemoryState::AliasCode: | 2102 | case KMemoryState::AliasCode: |
| @@ -2036,31 +2107,31 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { | |||
| 2036 | case KMemoryState::GeneratedCode: | 2107 | case KMemoryState::GeneratedCode: |
| 2037 | case KMemoryState::CodeOut: | 2108 | case KMemoryState::CodeOut: |
| 2038 | case KMemoryState::Coverage: | 2109 | case KMemoryState::Coverage: |
| 2039 | return alias_code_region_start; | 2110 | return m_alias_code_region_start; |
| 2040 | case KMemoryState::Code: | 2111 | case KMemoryState::Code: |
| 2041 | case KMemoryState::CodeData: | 2112 | case KMemoryState::CodeData: |
| 2042 | return code_region_start; | 2113 | return m_code_region_start; |
| 2043 | default: | 2114 | default: |
| 2044 | UNREACHABLE(); | 2115 | UNREACHABLE(); |
| 2045 | } | 2116 | } |
| 2046 | } | 2117 | } |
| 2047 | 2118 | ||
| 2048 | std::size_t KPageTable::GetRegionSize(KMemoryState state) const { | 2119 | size_t KPageTable::GetRegionSize(KMemoryState state) const { |
| 2049 | switch (state) { | 2120 | switch (state) { |
| 2050 | case KMemoryState::Free: | 2121 | case KMemoryState::Free: |
| 2051 | case KMemoryState::Kernel: | 2122 | case KMemoryState::Kernel: |
| 2052 | return address_space_end - address_space_start; | 2123 | return m_address_space_end - m_address_space_start; |
| 2053 | case KMemoryState::Normal: | 2124 | case KMemoryState::Normal: |
| 2054 | return heap_region_end - heap_region_start; | 2125 | return m_heap_region_end - m_heap_region_start; |
| 2055 | case KMemoryState::Ipc: | 2126 | case KMemoryState::Ipc: |
| 2056 | case KMemoryState::NonSecureIpc: | 2127 | case KMemoryState::NonSecureIpc: |
| 2057 | case KMemoryState::NonDeviceIpc: | 2128 | case KMemoryState::NonDeviceIpc: |
| 2058 | return alias_region_end - alias_region_start; | 2129 | return m_alias_region_end - m_alias_region_start; |
| 2059 | case KMemoryState::Stack: | 2130 | case KMemoryState::Stack: |
| 2060 | return stack_region_end - stack_region_start; | 2131 | return m_stack_region_end - m_stack_region_start; |
| 2061 | case KMemoryState::Static: | 2132 | case KMemoryState::Static: |
| 2062 | case KMemoryState::ThreadLocal: | 2133 | case KMemoryState::ThreadLocal: |
| 2063 | return kernel_map_region_end - kernel_map_region_start; | 2134 | return m_kernel_map_region_end - m_kernel_map_region_start; |
| 2064 | case KMemoryState::Io: | 2135 | case KMemoryState::Io: |
| 2065 | case KMemoryState::Shared: | 2136 | case KMemoryState::Shared: |
| 2066 | case KMemoryState::AliasCode: | 2137 | case KMemoryState::AliasCode: |
| @@ -2071,16 +2142,16 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const { | |||
| 2071 | case KMemoryState::GeneratedCode: | 2142 | case KMemoryState::GeneratedCode: |
| 2072 | case KMemoryState::CodeOut: | 2143 | case KMemoryState::CodeOut: |
| 2073 | case KMemoryState::Coverage: | 2144 | case KMemoryState::Coverage: |
| 2074 | return alias_code_region_end - alias_code_region_start; | 2145 | return m_alias_code_region_end - m_alias_code_region_start; |
| 2075 | case KMemoryState::Code: | 2146 | case KMemoryState::Code: |
| 2076 | case KMemoryState::CodeData: | 2147 | case KMemoryState::CodeData: |
| 2077 | return code_region_end - code_region_start; | 2148 | return m_code_region_end - m_code_region_start; |
| 2078 | default: | 2149 | default: |
| 2079 | UNREACHABLE(); | 2150 | UNREACHABLE(); |
| 2080 | } | 2151 | } |
| 2081 | } | 2152 | } |
| 2082 | 2153 | ||
| 2083 | bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { | 2154 | bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const { |
| 2084 | const VAddr end = addr + size; | 2155 | const VAddr end = addr + size; |
| 2085 | const VAddr last = end - 1; | 2156 | const VAddr last = end - 1; |
| 2086 | 2157 | ||
| @@ -2089,10 +2160,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co | |||
| 2089 | 2160 | ||
| 2090 | const bool is_in_region = | 2161 | const bool is_in_region = |
| 2091 | region_start <= addr && addr < end && last <= region_start + region_size - 1; | 2162 | region_start <= addr && addr < end && last <= region_start + region_size - 1; |
| 2092 | const bool is_in_heap = !(end <= heap_region_start || heap_region_end <= addr || | 2163 | const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || |
| 2093 | heap_region_start == heap_region_end); | 2164 | m_heap_region_start == m_heap_region_end); |
| 2094 | const bool is_in_alias = !(end <= alias_region_start || alias_region_end <= addr || | 2165 | const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || |
| 2095 | alias_region_start == alias_region_end); | 2166 | m_alias_region_start == m_alias_region_end); |
| 2096 | switch (state) { | 2167 | switch (state) { |
| 2097 | case KMemoryState::Free: | 2168 | case KMemoryState::Free: |
| 2098 | case KMemoryState::Kernel: | 2169 | case KMemoryState::Kernel: |
| @@ -2138,16 +2209,16 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_ | |||
| 2138 | return ResultSuccess; | 2209 | return ResultSuccess; |
| 2139 | } | 2210 | } |
| 2140 | 2211 | ||
| 2141 | Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, | 2212 | Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, |
| 2142 | std::size_t size, KMemoryState state_mask, | 2213 | KMemoryState state_mask, KMemoryState state, |
| 2143 | KMemoryState state, KMemoryPermission perm_mask, | 2214 | KMemoryPermission perm_mask, KMemoryPermission perm, |
| 2144 | KMemoryPermission perm, KMemoryAttribute attr_mask, | 2215 | KMemoryAttribute attr_mask, |
| 2145 | KMemoryAttribute attr) const { | 2216 | KMemoryAttribute attr) const { |
| 2146 | ASSERT(this->IsLockedByCurrentThread()); | 2217 | ASSERT(this->IsLockedByCurrentThread()); |
| 2147 | 2218 | ||
| 2148 | // Get information about the first block. | 2219 | // Get information about the first block. |
| 2149 | const VAddr last_addr = addr + size - 1; | 2220 | const VAddr last_addr = addr + size - 1; |
| 2150 | KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(addr); | 2221 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); |
| 2151 | KMemoryInfo info = it->GetMemoryInfo(); | 2222 | KMemoryInfo info = it->GetMemoryInfo(); |
| 2152 | 2223 | ||
| 2153 | // If the start address isn't aligned, we need a block. | 2224 | // If the start address isn't aligned, we need a block. |
| @@ -2165,7 +2236,7 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA | |||
| 2165 | 2236 | ||
| 2166 | // Advance our iterator. | 2237 | // Advance our iterator. |
| 2167 | it++; | 2238 | it++; |
| 2168 | ASSERT(it != memory_block_manager.cend()); | 2239 | ASSERT(it != m_memory_block_manager.cend()); |
| 2169 | info = it->GetMemoryInfo(); | 2240 | info = it->GetMemoryInfo(); |
| 2170 | } | 2241 | } |
| 2171 | 2242 | ||
| @@ -2181,8 +2252,8 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA | |||
| 2181 | } | 2252 | } |
| 2182 | 2253 | ||
| 2183 | Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | 2254 | Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, |
| 2184 | KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, | 2255 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, |
| 2185 | VAddr addr, std::size_t size, KMemoryState state_mask, | 2256 | VAddr addr, size_t size, KMemoryState state_mask, |
| 2186 | KMemoryState state, KMemoryPermission perm_mask, | 2257 | KMemoryState state, KMemoryPermission perm_mask, |
| 2187 | KMemoryPermission perm, KMemoryAttribute attr_mask, | 2258 | KMemoryPermission perm, KMemoryAttribute attr_mask, |
| 2188 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { | 2259 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { |
| @@ -2190,7 +2261,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* | |||
| 2190 | 2261 | ||
| 2191 | // Get information about the first block. | 2262 | // Get information about the first block. |
| 2192 | const VAddr last_addr = addr + size - 1; | 2263 | const VAddr last_addr = addr + size - 1; |
| 2193 | KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(addr); | 2264 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); |
| 2194 | KMemoryInfo info = it->GetMemoryInfo(); | 2265 | KMemoryInfo info = it->GetMemoryInfo(); |
| 2195 | 2266 | ||
| 2196 | // If the start address isn't aligned, we need a block. | 2267 | // If the start address isn't aligned, we need a block. |
| @@ -2218,7 +2289,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* | |||
| 2218 | 2289 | ||
| 2219 | // Advance our iterator. | 2290 | // Advance our iterator. |
| 2220 | it++; | 2291 | it++; |
| 2221 | ASSERT(it != memory_block_manager.cend()); | 2292 | ASSERT(it != m_memory_block_manager.cend()); |
| 2222 | info = it->GetMemoryInfo(); | 2293 | info = it->GetMemoryInfo(); |
| 2223 | } | 2294 | } |
| 2224 | 2295 | ||
| @@ -2257,7 +2328,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr | |||
| 2257 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); | 2328 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); |
| 2258 | 2329 | ||
| 2259 | // Lock the table. | 2330 | // Lock the table. |
| 2260 | KScopedLightLock lk(general_lock); | 2331 | KScopedLightLock lk(m_general_lock); |
| 2261 | 2332 | ||
| 2262 | // Check that the output page group is empty, if it exists. | 2333 | // Check that the output page group is empty, if it exists. |
| 2263 | if (out_pg) { | 2334 | if (out_pg) { |
| @@ -2288,7 +2359,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr | |||
| 2288 | // Create an update allocator. | 2359 | // Create an update allocator. |
| 2289 | Result allocator_result{ResultSuccess}; | 2360 | Result allocator_result{ResultSuccess}; |
| 2290 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 2361 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 2291 | memory_block_slab_manager, num_allocator_blocks); | 2362 | m_memory_block_slab_manager, num_allocator_blocks); |
| 2292 | R_TRY(allocator_result); | 2363 | R_TRY(allocator_result); |
| 2293 | 2364 | ||
| 2294 | // Decide on new perm and attr. | 2365 | // Decide on new perm and attr. |
| @@ -2301,9 +2372,9 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr | |||
| 2301 | } | 2372 | } |
| 2302 | 2373 | ||
| 2303 | // Apply the memory block updates. | 2374 | // Apply the memory block updates. |
| 2304 | memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | 2375 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, |
| 2305 | new_attr, KMemoryBlockDisableMergeAttribute::Locked, | 2376 | new_attr, KMemoryBlockDisableMergeAttribute::Locked, |
| 2306 | KMemoryBlockDisableMergeAttribute::None); | 2377 | KMemoryBlockDisableMergeAttribute::None); |
| 2307 | 2378 | ||
| 2308 | return ResultSuccess; | 2379 | return ResultSuccess; |
| 2309 | } | 2380 | } |
| @@ -2322,7 +2393,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask | |||
| 2322 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); | 2393 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); |
| 2323 | 2394 | ||
| 2324 | // Lock the table. | 2395 | // Lock the table. |
| 2325 | KScopedLightLock lk(general_lock); | 2396 | KScopedLightLock lk(m_general_lock); |
| 2326 | 2397 | ||
| 2327 | // Check the state. | 2398 | // Check the state. |
| 2328 | KMemoryState old_state{}; | 2399 | KMemoryState old_state{}; |
| @@ -2347,7 +2418,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask | |||
| 2347 | // Create an update allocator. | 2418 | // Create an update allocator. |
| 2348 | Result allocator_result{ResultSuccess}; | 2419 | Result allocator_result{ResultSuccess}; |
| 2349 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 2420 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 2350 | memory_block_slab_manager, num_allocator_blocks); | 2421 | m_memory_block_slab_manager, num_allocator_blocks); |
| 2351 | R_TRY(allocator_result); | 2422 | R_TRY(allocator_result); |
| 2352 | 2423 | ||
| 2353 | // Update permission, if we need to. | 2424 | // Update permission, if we need to. |
| @@ -2356,9 +2427,9 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask | |||
| 2356 | } | 2427 | } |
| 2357 | 2428 | ||
| 2358 | // Apply the memory block updates. | 2429 | // Apply the memory block updates. |
| 2359 | memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | 2430 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, |
| 2360 | new_attr, KMemoryBlockDisableMergeAttribute::None, | 2431 | new_attr, KMemoryBlockDisableMergeAttribute::None, |
| 2361 | KMemoryBlockDisableMergeAttribute::Locked); | 2432 | KMemoryBlockDisableMergeAttribute::Locked); |
| 2362 | 2433 | ||
| 2363 | return ResultSuccess; | 2434 | return ResultSuccess; |
| 2364 | } | 2435 | } |
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index fa11a0fe3..225854319 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h | |||
| @@ -36,60 +36,66 @@ public: | |||
| 36 | ~KPageTable(); | 36 | ~KPageTable(); |
| 37 | 37 | ||
| 38 | Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, | 38 | Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, |
| 39 | VAddr code_addr, std::size_t code_size, | 39 | VAddr code_addr, size_t code_size, |
| 40 | KMemoryBlockSlabManager* mem_block_slab_manager, | 40 | KMemoryBlockSlabManager* mem_block_slab_manager, |
| 41 | KMemoryManager::Pool pool); | 41 | KMemoryManager::Pool pool); |
| 42 | 42 | ||
| 43 | void Finalize(); | 43 | void Finalize(); |
| 44 | 44 | ||
| 45 | Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, | 45 | Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state, |
| 46 | KMemoryPermission perm); | 46 | KMemoryPermission perm); |
| 47 | Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); | 47 | Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size); |
| 48 | Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, | 48 | Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, |
| 49 | ICacheInvalidationStrategy icache_invalidation_strategy); | 49 | ICacheInvalidationStrategy icache_invalidation_strategy); |
| 50 | Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, | 50 | Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, |
| 51 | VAddr src_addr); | 51 | VAddr src_addr); |
| 52 | Result MapPhysicalMemory(VAddr addr, std::size_t size); | 52 | Result MapPhysicalMemory(VAddr addr, size_t size); |
| 53 | Result UnmapPhysicalMemory(VAddr addr, std::size_t size); | 53 | Result UnmapPhysicalMemory(VAddr addr, size_t size); |
| 54 | Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); | 54 | Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size); |
| 55 | Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); | 55 | Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size); |
| 56 | Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state, | 56 | Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state, |
| 57 | KMemoryPermission perm); | 57 | KMemoryPermission perm); |
| 58 | Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, | 58 | Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, |
| 59 | KMemoryState state, KMemoryPermission perm) { | 59 | KMemoryState state, KMemoryPermission perm) { |
| 60 | return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, | 60 | return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, |
| 61 | this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, | 61 | this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, |
| 62 | state, perm); | 62 | state, perm); |
| 63 | } | 63 | } |
| 64 | Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); | 64 | Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); |
| 65 | Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state); | 65 | Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state); |
| 66 | Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm); | 66 | Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm); |
| 67 | KMemoryInfo QueryInfo(VAddr addr); | 67 | KMemoryInfo QueryInfo(VAddr addr); |
| 68 | Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm); | 68 | Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm); |
| 69 | Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr); | 69 | Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr); |
| 70 | Result SetMaxHeapSize(std::size_t size); | 70 | Result SetMaxHeapSize(size_t size); |
| 71 | Result SetHeapSize(VAddr* out, std::size_t size); | 71 | Result SetHeapSize(VAddr* out, size_t size); |
| 72 | ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, | 72 | ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only, |
| 73 | bool is_map_only, VAddr region_start, | 73 | VAddr region_start, size_t region_num_pages, |
| 74 | std::size_t region_num_pages, KMemoryState state, | 74 | KMemoryState state, KMemoryPermission perm, |
| 75 | KMemoryPermission perm, PAddr map_addr = 0); | 75 | PAddr map_addr = 0); |
| 76 | Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); | 76 | |
| 77 | Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size); | 77 | Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, |
| 78 | Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg); | 78 | bool is_aligned); |
| 79 | Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size); | ||
| 80 | |||
| 81 | Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); | ||
| 82 | |||
| 83 | Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); | ||
| 84 | Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); | ||
| 79 | Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, | 85 | Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, |
| 80 | KMemoryState state_mask, KMemoryState state, | 86 | KMemoryState state_mask, KMemoryState state, |
| 81 | KMemoryPermission perm_mask, KMemoryPermission perm, | 87 | KMemoryPermission perm_mask, KMemoryPermission perm, |
| 82 | KMemoryAttribute attr_mask, KMemoryAttribute attr); | 88 | KMemoryAttribute attr_mask, KMemoryAttribute attr); |
| 83 | 89 | ||
| 84 | Common::PageTable& PageTableImpl() { | 90 | Common::PageTable& PageTableImpl() { |
| 85 | return page_table_impl; | 91 | return m_page_table_impl; |
| 86 | } | 92 | } |
| 87 | 93 | ||
| 88 | const Common::PageTable& PageTableImpl() const { | 94 | const Common::PageTable& PageTableImpl() const { |
| 89 | return page_table_impl; | 95 | return m_page_table_impl; |
| 90 | } | 96 | } |
| 91 | 97 | ||
| 92 | bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const; | 98 | bool CanContain(VAddr addr, size_t size, KMemoryState state) const; |
| 93 | 99 | ||
| 94 | private: | 100 | private: |
| 95 | enum class OperationType : u32 { | 101 | enum class OperationType : u32 { |
| @@ -104,30 +110,30 @@ private: | |||
| 104 | KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; | 110 | KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; |
| 105 | 111 | ||
| 106 | Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); | 112 | Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); |
| 107 | Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, | 113 | Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, |
| 108 | bool is_pa_valid, VAddr region_start, std::size_t region_num_pages, | 114 | bool is_pa_valid, VAddr region_start, size_t region_num_pages, |
| 109 | KMemoryState state, KMemoryPermission perm); | 115 | KMemoryState state, KMemoryPermission perm); |
| 110 | Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); | 116 | Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); |
| 111 | bool IsRegionContiguous(VAddr addr, u64 size) const; | 117 | bool IsRegionContiguous(VAddr addr, u64 size) const; |
| 112 | void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list); | 118 | void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list); |
| 113 | KMemoryInfo QueryInfoImpl(VAddr addr); | 119 | KMemoryInfo QueryInfoImpl(VAddr addr); |
| 114 | VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages, | 120 | VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, |
| 115 | std::size_t align); | 121 | size_t align); |
| 116 | Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, | 122 | Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, |
| 117 | OperationType operation); | 123 | OperationType operation); |
| 118 | Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, | 124 | Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, |
| 119 | OperationType operation, PAddr map_addr = 0); | 125 | PAddr map_addr = 0); |
| 120 | VAddr GetRegionAddress(KMemoryState state) const; | 126 | VAddr GetRegionAddress(KMemoryState state) const; |
| 121 | std::size_t GetRegionSize(KMemoryState state) const; | 127 | size_t GetRegionSize(KMemoryState state) const; |
| 122 | 128 | ||
| 123 | VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, | 129 | VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, |
| 124 | std::size_t alignment, std::size_t offset, std::size_t guard_pages); | 130 | size_t alignment, size_t offset, size_t guard_pages); |
| 125 | 131 | ||
| 126 | Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, | 132 | Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, |
| 127 | KMemoryState state_mask, KMemoryState state, | 133 | KMemoryState state_mask, KMemoryState state, |
| 128 | KMemoryPermission perm_mask, KMemoryPermission perm, | 134 | KMemoryPermission perm_mask, KMemoryPermission perm, |
| 129 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; | 135 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; |
| 130 | Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask, | 136 | Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask, |
| 131 | KMemoryState state, KMemoryPermission perm_mask, | 137 | KMemoryState state, KMemoryPermission perm_mask, |
| 132 | KMemoryPermission perm, KMemoryAttribute attr_mask, | 138 | KMemoryPermission perm, KMemoryAttribute attr_mask, |
| 133 | KMemoryAttribute attr) const { | 139 | KMemoryAttribute attr) const { |
| @@ -139,12 +145,12 @@ private: | |||
| 139 | KMemoryPermission perm_mask, KMemoryPermission perm, | 145 | KMemoryPermission perm_mask, KMemoryPermission perm, |
| 140 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; | 146 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; |
| 141 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | 147 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, |
| 142 | KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr, | 148 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr, |
| 143 | std::size_t size, KMemoryState state_mask, KMemoryState state, | 149 | size_t size, KMemoryState state_mask, KMemoryState state, |
| 144 | KMemoryPermission perm_mask, KMemoryPermission perm, | 150 | KMemoryPermission perm_mask, KMemoryPermission perm, |
| 145 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | 151 | KMemoryAttribute attr_mask, KMemoryAttribute attr, |
| 146 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; | 152 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; |
| 147 | Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, | 153 | Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size, |
| 148 | KMemoryState state_mask, KMemoryState state, | 154 | KMemoryState state_mask, KMemoryState state, |
| 149 | KMemoryPermission perm_mask, KMemoryPermission perm, | 155 | KMemoryPermission perm_mask, KMemoryPermission perm, |
| 150 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | 156 | KMemoryAttribute attr_mask, KMemoryAttribute attr, |
| @@ -152,8 +158,8 @@ private: | |||
| 152 | return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, | 158 | return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, |
| 153 | state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); | 159 | state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); |
| 154 | } | 160 | } |
| 155 | Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask, | 161 | Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, |
| 156 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | 162 | KMemoryPermission perm_mask, KMemoryPermission perm, |
| 157 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | 163 | KMemoryAttribute attr_mask, KMemoryAttribute attr, |
| 158 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { | 164 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { |
| 159 | return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, | 165 | return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, |
| @@ -175,13 +181,13 @@ private: | |||
| 175 | bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages); | 181 | bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages); |
| 176 | 182 | ||
| 177 | bool IsLockedByCurrentThread() const { | 183 | bool IsLockedByCurrentThread() const { |
| 178 | return general_lock.IsLockedByCurrentThread(); | 184 | return m_general_lock.IsLockedByCurrentThread(); |
| 179 | } | 185 | } |
| 180 | 186 | ||
| 181 | bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) { | 187 | bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) { |
| 182 | ASSERT(this->IsLockedByCurrentThread()); | 188 | ASSERT(this->IsLockedByCurrentThread()); |
| 183 | 189 | ||
| 184 | return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr); | 190 | return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr); |
| 185 | } | 191 | } |
| 186 | 192 | ||
| 187 | bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const { | 193 | bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const { |
| @@ -192,93 +198,93 @@ private: | |||
| 192 | return *out != 0; | 198 | return *out != 0; |
| 193 | } | 199 | } |
| 194 | 200 | ||
| 195 | mutable KLightLock general_lock; | 201 | mutable KLightLock m_general_lock; |
| 196 | mutable KLightLock map_physical_memory_lock; | 202 | mutable KLightLock m_map_physical_memory_lock; |
| 197 | 203 | ||
| 198 | public: | 204 | public: |
| 199 | constexpr VAddr GetAddressSpaceStart() const { | 205 | constexpr VAddr GetAddressSpaceStart() const { |
| 200 | return address_space_start; | 206 | return m_address_space_start; |
| 201 | } | 207 | } |
| 202 | constexpr VAddr GetAddressSpaceEnd() const { | 208 | constexpr VAddr GetAddressSpaceEnd() const { |
| 203 | return address_space_end; | 209 | return m_address_space_end; |
| 204 | } | 210 | } |
| 205 | constexpr std::size_t GetAddressSpaceSize() const { | 211 | constexpr size_t GetAddressSpaceSize() const { |
| 206 | return address_space_end - address_space_start; | 212 | return m_address_space_end - m_address_space_start; |
| 207 | } | 213 | } |
| 208 | constexpr VAddr GetHeapRegionStart() const { | 214 | constexpr VAddr GetHeapRegionStart() const { |
| 209 | return heap_region_start; | 215 | return m_heap_region_start; |
| 210 | } | 216 | } |
| 211 | constexpr VAddr GetHeapRegionEnd() const { | 217 | constexpr VAddr GetHeapRegionEnd() const { |
| 212 | return heap_region_end; | 218 | return m_heap_region_end; |
| 213 | } | 219 | } |
| 214 | constexpr std::size_t GetHeapRegionSize() const { | 220 | constexpr size_t GetHeapRegionSize() const { |
| 215 | return heap_region_end - heap_region_start; | 221 | return m_heap_region_end - m_heap_region_start; |
| 216 | } | 222 | } |
| 217 | constexpr VAddr GetAliasRegionStart() const { | 223 | constexpr VAddr GetAliasRegionStart() const { |
| 218 | return alias_region_start; | 224 | return m_alias_region_start; |
| 219 | } | 225 | } |
| 220 | constexpr VAddr GetAliasRegionEnd() const { | 226 | constexpr VAddr GetAliasRegionEnd() const { |
| 221 | return alias_region_end; | 227 | return m_alias_region_end; |
| 222 | } | 228 | } |
| 223 | constexpr std::size_t GetAliasRegionSize() const { | 229 | constexpr size_t GetAliasRegionSize() const { |
| 224 | return alias_region_end - alias_region_start; | 230 | return m_alias_region_end - m_alias_region_start; |
| 225 | } | 231 | } |
| 226 | constexpr VAddr GetStackRegionStart() const { | 232 | constexpr VAddr GetStackRegionStart() const { |
| 227 | return stack_region_start; | 233 | return m_stack_region_start; |
| 228 | } | 234 | } |
| 229 | constexpr VAddr GetStackRegionEnd() const { | 235 | constexpr VAddr GetStackRegionEnd() const { |
| 230 | return stack_region_end; | 236 | return m_stack_region_end; |
| 231 | } | 237 | } |
| 232 | constexpr std::size_t GetStackRegionSize() const { | 238 | constexpr size_t GetStackRegionSize() const { |
| 233 | return stack_region_end - stack_region_start; | 239 | return m_stack_region_end - m_stack_region_start; |
| 234 | } | 240 | } |
| 235 | constexpr VAddr GetKernelMapRegionStart() const { | 241 | constexpr VAddr GetKernelMapRegionStart() const { |
| 236 | return kernel_map_region_start; | 242 | return m_kernel_map_region_start; |
| 237 | } | 243 | } |
| 238 | constexpr VAddr GetKernelMapRegionEnd() const { | 244 | constexpr VAddr GetKernelMapRegionEnd() const { |
| 239 | return kernel_map_region_end; | 245 | return m_kernel_map_region_end; |
| 240 | } | 246 | } |
| 241 | constexpr VAddr GetCodeRegionStart() const { | 247 | constexpr VAddr GetCodeRegionStart() const { |
| 242 | return code_region_start; | 248 | return m_code_region_start; |
| 243 | } | 249 | } |
| 244 | constexpr VAddr GetCodeRegionEnd() const { | 250 | constexpr VAddr GetCodeRegionEnd() const { |
| 245 | return code_region_end; | 251 | return m_code_region_end; |
| 246 | } | 252 | } |
| 247 | constexpr VAddr GetAliasCodeRegionStart() const { | 253 | constexpr VAddr GetAliasCodeRegionStart() const { |
| 248 | return alias_code_region_start; | 254 | return m_alias_code_region_start; |
| 249 | } | 255 | } |
| 250 | constexpr VAddr GetAliasCodeRegionSize() const { | 256 | constexpr VAddr GetAliasCodeRegionSize() const { |
| 251 | return alias_code_region_end - alias_code_region_start; | 257 | return m_alias_code_region_end - m_alias_code_region_start; |
| 252 | } | 258 | } |
| 253 | std::size_t GetNormalMemorySize() { | 259 | size_t GetNormalMemorySize() { |
| 254 | KScopedLightLock lk(general_lock); | 260 | KScopedLightLock lk(m_general_lock); |
| 255 | return GetHeapSize() + mapped_physical_memory_size; | 261 | return GetHeapSize() + m_mapped_physical_memory_size; |
| 256 | } | 262 | } |
| 257 | constexpr std::size_t GetAddressSpaceWidth() const { | 263 | constexpr size_t GetAddressSpaceWidth() const { |
| 258 | return address_space_width; | 264 | return m_address_space_width; |
| 259 | } | 265 | } |
| 260 | constexpr std::size_t GetHeapSize() const { | 266 | constexpr size_t GetHeapSize() const { |
| 261 | return current_heap_end - heap_region_start; | 267 | return m_current_heap_end - m_heap_region_start; |
| 262 | } | 268 | } |
| 263 | constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { | 269 | constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const { |
| 264 | return address_space_start <= address && address + size - 1 <= address_space_end - 1; | 270 | return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1; |
| 265 | } | 271 | } |
| 266 | constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const { | 272 | constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const { |
| 267 | return alias_region_start > address || address + size - 1 > alias_region_end - 1; | 273 | return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1; |
| 268 | } | 274 | } |
| 269 | constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const { | 275 | constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const { |
| 270 | return stack_region_start > address || address + size - 1 > stack_region_end - 1; | 276 | return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1; |
| 271 | } | 277 | } |
| 272 | constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const { | 278 | constexpr bool IsInvalidRegion(VAddr address, size_t size) const { |
| 273 | return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; | 279 | return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; |
| 274 | } | 280 | } |
| 275 | constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const { | 281 | constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const { |
| 276 | return address + size > heap_region_start && heap_region_end > address; | 282 | return address + size > m_heap_region_start && m_heap_region_end > address; |
| 277 | } | 283 | } |
| 278 | constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const { | 284 | constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const { |
| 279 | return address + size > alias_region_start && alias_region_end > address; | 285 | return address + size > m_alias_region_start && m_alias_region_end > address; |
| 280 | } | 286 | } |
| 281 | constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const { | 287 | constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const { |
| 282 | if (IsInvalidRegion(address, size)) { | 288 | if (IsInvalidRegion(address, size)) { |
| 283 | return true; | 289 | return true; |
| 284 | } | 290 | } |
| @@ -290,77 +296,78 @@ public: | |||
| 290 | } | 296 | } |
| 291 | return {}; | 297 | return {}; |
| 292 | } | 298 | } |
| 293 | constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { | 299 | constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const { |
| 294 | return !IsOutsideASLRRegion(address, size); | 300 | return !IsOutsideASLRRegion(address, size); |
| 295 | } | 301 | } |
| 296 | constexpr std::size_t GetNumGuardPages() const { | 302 | constexpr size_t GetNumGuardPages() const { |
| 297 | return IsKernel() ? 1 : 4; | 303 | return IsKernel() ? 1 : 4; |
| 298 | } | 304 | } |
| 299 | PAddr GetPhysicalAddr(VAddr addr) const { | 305 | PAddr GetPhysicalAddr(VAddr addr) const { |
| 300 | const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; | 306 | const auto backing_addr = m_page_table_impl.backing_addr[addr >> PageBits]; |
| 301 | ASSERT(backing_addr); | 307 | ASSERT(backing_addr); |
| 302 | return backing_addr + addr; | 308 | return backing_addr + addr; |
| 303 | } | 309 | } |
| 304 | constexpr bool Contains(VAddr addr) const { | 310 | constexpr bool Contains(VAddr addr) const { |
| 305 | return address_space_start <= addr && addr <= address_space_end - 1; | 311 | return m_address_space_start <= addr && addr <= m_address_space_end - 1; |
| 306 | } | 312 | } |
| 307 | constexpr bool Contains(VAddr addr, std::size_t size) const { | 313 | constexpr bool Contains(VAddr addr, size_t size) const { |
| 308 | return address_space_start <= addr && addr < addr + size && | 314 | return m_address_space_start <= addr && addr < addr + size && |
| 309 | addr + size - 1 <= address_space_end - 1; | 315 | addr + size - 1 <= m_address_space_end - 1; |
| 310 | } | 316 | } |
| 311 | 317 | ||
| 312 | private: | 318 | private: |
| 313 | constexpr bool IsKernel() const { | 319 | constexpr bool IsKernel() const { |
| 314 | return is_kernel; | 320 | return m_is_kernel; |
| 315 | } | 321 | } |
| 316 | constexpr bool IsAslrEnabled() const { | 322 | constexpr bool IsAslrEnabled() const { |
| 317 | return is_aslr_enabled; | 323 | return m_enable_aslr; |
| 318 | } | 324 | } |
| 319 | 325 | ||
| 320 | constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { | 326 | constexpr bool ContainsPages(VAddr addr, size_t num_pages) const { |
| 321 | return (address_space_start <= addr) && | 327 | return (m_address_space_start <= addr) && |
| 322 | (num_pages <= (address_space_end - address_space_start) / PageSize) && | 328 | (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && |
| 323 | (addr + num_pages * PageSize - 1 <= address_space_end - 1); | 329 | (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); |
| 324 | } | 330 | } |
| 325 | 331 | ||
| 326 | private: | 332 | private: |
| 327 | VAddr address_space_start{}; | 333 | VAddr m_address_space_start{}; |
| 328 | VAddr address_space_end{}; | 334 | VAddr m_address_space_end{}; |
| 329 | VAddr heap_region_start{}; | 335 | VAddr m_heap_region_start{}; |
| 330 | VAddr heap_region_end{}; | 336 | VAddr m_heap_region_end{}; |
| 331 | VAddr current_heap_end{}; | 337 | VAddr m_current_heap_end{}; |
| 332 | VAddr alias_region_start{}; | 338 | VAddr m_alias_region_start{}; |
| 333 | VAddr alias_region_end{}; | 339 | VAddr m_alias_region_end{}; |
| 334 | VAddr stack_region_start{}; | 340 | VAddr m_stack_region_start{}; |
| 335 | VAddr stack_region_end{}; | 341 | VAddr m_stack_region_end{}; |
| 336 | VAddr kernel_map_region_start{}; | 342 | VAddr m_kernel_map_region_start{}; |
| 337 | VAddr kernel_map_region_end{}; | 343 | VAddr m_kernel_map_region_end{}; |
| 338 | VAddr code_region_start{}; | 344 | VAddr m_code_region_start{}; |
| 339 | VAddr code_region_end{}; | 345 | VAddr m_code_region_end{}; |
| 340 | VAddr alias_code_region_start{}; | 346 | VAddr m_alias_code_region_start{}; |
| 341 | VAddr alias_code_region_end{}; | 347 | VAddr m_alias_code_region_end{}; |
| 342 | 348 | ||
| 343 | std::size_t mapped_physical_memory_size{}; | 349 | size_t m_mapped_physical_memory_size{}; |
| 344 | std::size_t max_heap_size{}; | 350 | size_t m_max_heap_size{}; |
| 345 | std::size_t max_physical_memory_size{}; | 351 | size_t m_max_physical_memory_size{}; |
| 346 | std::size_t address_space_width{}; | 352 | size_t m_address_space_width{}; |
| 347 | 353 | ||
| 348 | KMemoryBlockManager memory_block_manager; | 354 | KMemoryBlockManager m_memory_block_manager; |
| 349 | 355 | ||
| 350 | bool is_kernel{}; | 356 | bool m_is_kernel{}; |
| 351 | bool is_aslr_enabled{}; | 357 | bool m_enable_aslr{}; |
| 352 | 358 | bool m_enable_device_address_space_merge{}; | |
| 353 | KMemoryBlockSlabManager* memory_block_slab_manager{}; | 359 | |
| 354 | 360 | KMemoryBlockSlabManager* m_memory_block_slab_manager{}; | |
| 355 | u32 heap_fill_value{}; | 361 | |
| 356 | const KMemoryRegion* cached_physical_heap_region{}; | 362 | u32 m_heap_fill_value{}; |
| 357 | 363 | const KMemoryRegion* m_cached_physical_heap_region{}; | |
| 358 | KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; | 364 | |
| 359 | KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; | 365 | KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; |
| 360 | 366 | KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront}; | |
| 361 | Common::PageTable page_table_impl; | 367 | |
| 362 | 368 | Common::PageTable m_page_table_impl; | |
| 363 | Core::System& system; | 369 | |
| 370 | Core::System& m_system; | ||
| 364 | }; | 371 | }; |
| 365 | 372 | ||
| 366 | } // namespace Kernel | 373 | } // namespace Kernel |
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index ddf273b5e..b60679021 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp | |||
| @@ -128,7 +128,8 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) | |||
| 128 | } | 128 | } |
| 129 | ASSERT(system.CurrentProcess() | 129 | ASSERT(system.CurrentProcess() |
| 130 | ->PageTable() | 130 | ->PageTable() |
| 131 | .LockForDeviceAddressSpace(handle_description->address, handle_description->size) | 131 | .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size, |
| 132 | Kernel::KMemoryPermission::None, true) | ||
| 132 | .IsSuccess()); | 133 | .IsSuccess()); |
| 133 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 134 | std::memcpy(output.data(), ¶ms, sizeof(params)); |
| 134 | return result; | 135 | return result; |