diff options
| author | 2018-11-13 18:52:18 -0800 | |
|---|---|---|
| committer | 2018-11-13 18:52:18 -0800 | |
| commit | 70f189d7af3eea0123963f781e6a579c8f4725bd (patch) | |
| tree | 82e03c9e4df7286c1944f40d4abe2f3a0d876426 /src/core | |
| parent | Merge pull request #1682 from lioncash/audio (diff) | |
| parent | vm_manager: Unstub GetTotalHeapUsage() (diff) | |
| download | yuzu-70f189d7af3eea0123963f781e6a579c8f4725bd.tar.gz yuzu-70f189d7af3eea0123963f781e6a579c8f4725bd.tar.xz yuzu-70f189d7af3eea0123963f781e6a579c8f4725bd.zip | |
Merge pull request #1680 from lioncash/mem
kernel/process: Migrate heap-related memory management out of the process class and into the vm manager
Diffstat (limited to 'src/core')
| -rw-r--r-- | src/core/hle/kernel/process.cpp | 76 | ||||
| -rw-r--r-- | src/core/hle/kernel/process.h | 11 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.cpp | 82 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.h | 15 |
4 files changed, 98 insertions, 86 deletions
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 420218d59..e78e3a950 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp | |||
| @@ -5,11 +5,9 @@ | |||
| 5 | #include <algorithm> | 5 | #include <algorithm> |
| 6 | #include <memory> | 6 | #include <memory> |
| 7 | #include "common/assert.h" | 7 | #include "common/assert.h" |
| 8 | #include "common/common_funcs.h" | ||
| 9 | #include "common/logging/log.h" | 8 | #include "common/logging/log.h" |
| 10 | #include "core/core.h" | 9 | #include "core/core.h" |
| 11 | #include "core/file_sys/program_metadata.h" | 10 | #include "core/file_sys/program_metadata.h" |
| 12 | #include "core/hle/kernel/errors.h" | ||
| 13 | #include "core/hle/kernel/kernel.h" | 11 | #include "core/hle/kernel/kernel.h" |
| 14 | #include "core/hle/kernel/process.h" | 12 | #include "core/hle/kernel/process.h" |
| 15 | #include "core/hle/kernel/resource_limit.h" | 13 | #include "core/hle/kernel/resource_limit.h" |
| @@ -241,83 +239,15 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) { | |||
| 241 | } | 239 | } |
| 242 | 240 | ||
| 243 | ResultVal<VAddr> Process::HeapAllocate(VAddr target, u64 size, VMAPermission perms) { | 241 | ResultVal<VAddr> Process::HeapAllocate(VAddr target, u64 size, VMAPermission perms) { |
| 244 | if (target < vm_manager.GetHeapRegionBaseAddress() || | 242 | return vm_manager.HeapAllocate(target, size, perms); |
| 245 | target + size > vm_manager.GetHeapRegionEndAddress() || target + size < target) { | ||
| 246 | return ERR_INVALID_ADDRESS; | ||
| 247 | } | ||
| 248 | |||
| 249 | if (heap_memory == nullptr) { | ||
| 250 | // Initialize heap | ||
| 251 | heap_memory = std::make_shared<std::vector<u8>>(); | ||
| 252 | heap_start = heap_end = target; | ||
| 253 | } else { | ||
| 254 | vm_manager.UnmapRange(heap_start, heap_end - heap_start); | ||
| 255 | } | ||
| 256 | |||
| 257 | // If necessary, expand backing vector to cover new heap extents. | ||
| 258 | if (target < heap_start) { | ||
| 259 | heap_memory->insert(begin(*heap_memory), heap_start - target, 0); | ||
| 260 | heap_start = target; | ||
| 261 | vm_manager.RefreshMemoryBlockMappings(heap_memory.get()); | ||
| 262 | } | ||
| 263 | if (target + size > heap_end) { | ||
| 264 | heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0); | ||
| 265 | heap_end = target + size; | ||
| 266 | vm_manager.RefreshMemoryBlockMappings(heap_memory.get()); | ||
| 267 | } | ||
| 268 | ASSERT(heap_end - heap_start == heap_memory->size()); | ||
| 269 | |||
| 270 | CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start, | ||
| 271 | size, MemoryState::Heap)); | ||
| 272 | vm_manager.Reprotect(vma, perms); | ||
| 273 | |||
| 274 | heap_used = size; | ||
| 275 | |||
| 276 | return MakeResult<VAddr>(heap_end - size); | ||
| 277 | } | 243 | } |
| 278 | 244 | ||
| 279 | ResultCode Process::HeapFree(VAddr target, u32 size) { | 245 | ResultCode Process::HeapFree(VAddr target, u32 size) { |
| 280 | if (target < vm_manager.GetHeapRegionBaseAddress() || | 246 | return vm_manager.HeapFree(target, size); |
| 281 | target + size > vm_manager.GetHeapRegionEndAddress() || target + size < target) { | ||
| 282 | return ERR_INVALID_ADDRESS; | ||
| 283 | } | ||
| 284 | |||
| 285 | if (size == 0) { | ||
| 286 | return RESULT_SUCCESS; | ||
| 287 | } | ||
| 288 | |||
| 289 | ResultCode result = vm_manager.UnmapRange(target, size); | ||
| 290 | if (result.IsError()) | ||
| 291 | return result; | ||
| 292 | |||
| 293 | heap_used -= size; | ||
| 294 | |||
| 295 | return RESULT_SUCCESS; | ||
| 296 | } | 247 | } |
| 297 | 248 | ||
| 298 | ResultCode Process::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) { | 249 | ResultCode Process::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) { |
| 299 | auto vma = vm_manager.FindVMA(src_addr); | 250 | return vm_manager.MirrorMemory(dst_addr, src_addr, size); |
| 300 | |||
| 301 | ASSERT_MSG(vma != vm_manager.vma_map.end(), "Invalid memory address"); | ||
| 302 | ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address"); | ||
| 303 | |||
| 304 | // The returned VMA might be a bigger one encompassing the desired address. | ||
| 305 | auto vma_offset = src_addr - vma->first; | ||
| 306 | ASSERT_MSG(vma_offset + size <= vma->second.size, | ||
| 307 | "Shared memory exceeds bounds of mapped block"); | ||
| 308 | |||
| 309 | const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block; | ||
| 310 | std::size_t backing_block_offset = vma->second.offset + vma_offset; | ||
| 311 | |||
| 312 | CASCADE_RESULT(auto new_vma, | ||
| 313 | vm_manager.MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size, | ||
| 314 | MemoryState::Mapped)); | ||
| 315 | // Protect mirror with permissions from old region | ||
| 316 | vm_manager.Reprotect(new_vma, vma->second.permissions); | ||
| 317 | // Remove permissions from old region | ||
| 318 | vm_manager.Reprotect(vma, VMAPermission::None); | ||
| 319 | |||
| 320 | return RESULT_SUCCESS; | ||
| 321 | } | 251 | } |
| 322 | 252 | ||
| 323 | ResultCode Process::UnmapMemory(VAddr dst_addr, VAddr /*src_addr*/, u64 size) { | 253 | ResultCode Process::UnmapMemory(VAddr dst_addr, VAddr /*src_addr*/, u64 size) { |
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 8d2616c79..f79f6d7a5 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h | |||
| @@ -292,17 +292,6 @@ private: | |||
| 292 | u32 allowed_thread_priority_mask = 0xFFFFFFFF; | 292 | u32 allowed_thread_priority_mask = 0xFFFFFFFF; |
| 293 | u32 is_virtual_address_memory_enabled = 0; | 293 | u32 is_virtual_address_memory_enabled = 0; |
| 294 | 294 | ||
| 295 | // Memory used to back the allocations in the regular heap. A single vector is used to cover | ||
| 296 | // the entire virtual address space extents that bound the allocations, including any holes. | ||
| 297 | // This makes deallocation and reallocation of holes fast and keeps process memory contiguous | ||
| 298 | // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. | ||
| 299 | std::shared_ptr<std::vector<u8>> heap_memory; | ||
| 300 | |||
| 301 | // The left/right bounds of the address space covered by heap_memory. | ||
| 302 | VAddr heap_start = 0; | ||
| 303 | VAddr heap_end = 0; | ||
| 304 | u64 heap_used = 0; | ||
| 305 | |||
| 306 | /// The Thread Local Storage area is allocated as processes create threads, | 295 | /// The Thread Local Storage area is allocated as processes create threads, |
| 307 | /// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part | 296 | /// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part |
| 308 | /// holds the TLS for a specific thread. This vector contains which parts are in use for each | 297 | /// holds the TLS for a specific thread. This vector contains which parts are in use for each |
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 1a92c8f70..ec7fd6150 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp | |||
| @@ -243,6 +243,85 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p | |||
| 243 | return RESULT_SUCCESS; | 243 | return RESULT_SUCCESS; |
| 244 | } | 244 | } |
| 245 | 245 | ||
| 246 | ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) { | ||
| 247 | if (target < GetHeapRegionBaseAddress() || target + size > GetHeapRegionEndAddress() || | ||
| 248 | target + size < target) { | ||
| 249 | return ERR_INVALID_ADDRESS; | ||
| 250 | } | ||
| 251 | |||
| 252 | if (heap_memory == nullptr) { | ||
| 253 | // Initialize heap | ||
| 254 | heap_memory = std::make_shared<std::vector<u8>>(); | ||
| 255 | heap_start = heap_end = target; | ||
| 256 | } else { | ||
| 257 | UnmapRange(heap_start, heap_end - heap_start); | ||
| 258 | } | ||
| 259 | |||
| 260 | // If necessary, expand backing vector to cover new heap extents. | ||
| 261 | if (target < heap_start) { | ||
| 262 | heap_memory->insert(begin(*heap_memory), heap_start - target, 0); | ||
| 263 | heap_start = target; | ||
| 264 | RefreshMemoryBlockMappings(heap_memory.get()); | ||
| 265 | } | ||
| 266 | if (target + size > heap_end) { | ||
| 267 | heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0); | ||
| 268 | heap_end = target + size; | ||
| 269 | RefreshMemoryBlockMappings(heap_memory.get()); | ||
| 270 | } | ||
| 271 | ASSERT(heap_end - heap_start == heap_memory->size()); | ||
| 272 | |||
| 273 | CASCADE_RESULT(auto vma, MapMemoryBlock(target, heap_memory, target - heap_start, size, | ||
| 274 | MemoryState::Heap)); | ||
| 275 | Reprotect(vma, perms); | ||
| 276 | |||
| 277 | heap_used = size; | ||
| 278 | |||
| 279 | return MakeResult<VAddr>(heap_end - size); | ||
| 280 | } | ||
| 281 | |||
| 282 | ResultCode VMManager::HeapFree(VAddr target, u64 size) { | ||
| 283 | if (target < GetHeapRegionBaseAddress() || target + size > GetHeapRegionEndAddress() || | ||
| 284 | target + size < target) { | ||
| 285 | return ERR_INVALID_ADDRESS; | ||
| 286 | } | ||
| 287 | |||
| 288 | if (size == 0) { | ||
| 289 | return RESULT_SUCCESS; | ||
| 290 | } | ||
| 291 | |||
| 292 | const ResultCode result = UnmapRange(target, size); | ||
| 293 | if (result.IsError()) { | ||
| 294 | return result; | ||
| 295 | } | ||
| 296 | |||
| 297 | heap_used -= size; | ||
| 298 | return RESULT_SUCCESS; | ||
| 299 | } | ||
| 300 | |||
| 301 | ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) { | ||
| 302 | const auto vma = FindVMA(src_addr); | ||
| 303 | |||
| 304 | ASSERT_MSG(vma != vma_map.end(), "Invalid memory address"); | ||
| 305 | ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address"); | ||
| 306 | |||
| 307 | // The returned VMA might be a bigger one encompassing the desired address. | ||
| 308 | const auto vma_offset = src_addr - vma->first; | ||
| 309 | ASSERT_MSG(vma_offset + size <= vma->second.size, | ||
| 310 | "Shared memory exceeds bounds of mapped block"); | ||
| 311 | |||
| 312 | const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block; | ||
| 313 | const std::size_t backing_block_offset = vma->second.offset + vma_offset; | ||
| 314 | |||
| 315 | CASCADE_RESULT(auto new_vma, MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size, | ||
| 316 | MemoryState::Mapped)); | ||
| 317 | // Protect mirror with permissions from old region | ||
| 318 | Reprotect(new_vma, vma->second.permissions); | ||
| 319 | // Remove permissions from old region | ||
| 320 | Reprotect(vma, VMAPermission::None); | ||
| 321 | |||
| 322 | return RESULT_SUCCESS; | ||
| 323 | } | ||
| 324 | |||
| 246 | void VMManager::RefreshMemoryBlockMappings(const std::vector<u8>* block) { | 325 | void VMManager::RefreshMemoryBlockMappings(const std::vector<u8>* block) { |
| 247 | // If this ever proves to have a noticeable performance impact, allow users of the function to | 326 | // If this ever proves to have a noticeable performance impact, allow users of the function to |
| 248 | // specify a specific range of addresses to limit the scan to. | 327 | // specify a specific range of addresses to limit the scan to. |
| @@ -495,8 +574,7 @@ u64 VMManager::GetTotalMemoryUsage() const { | |||
| 495 | } | 574 | } |
| 496 | 575 | ||
| 497 | u64 VMManager::GetTotalHeapUsage() const { | 576 | u64 VMManager::GetTotalHeapUsage() const { |
| 498 | LOG_WARNING(Kernel, "(STUBBED) called"); | 577 | return heap_used; |
| 499 | return 0x0; | ||
| 500 | } | 578 | } |
| 501 | 579 | ||
| 502 | VAddr VMManager::GetAddressSpaceBaseAddress() const { | 580 | VAddr VMManager::GetAddressSpaceBaseAddress() const { |
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 2447cbb8f..248cc46dc 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h | |||
| @@ -186,6 +186,11 @@ public: | |||
| 186 | /// Changes the permissions of a range of addresses, splitting VMAs as necessary. | 186 | /// Changes the permissions of a range of addresses, splitting VMAs as necessary. |
| 187 | ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms); | 187 | ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms); |
| 188 | 188 | ||
| 189 | ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms); | ||
| 190 | ResultCode HeapFree(VAddr target, u64 size); | ||
| 191 | |||
| 192 | ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size); | ||
| 193 | |||
| 189 | /** | 194 | /** |
| 190 | * Scans all VMAs and updates the page table range of any that use the given vector as backing | 195 | * Scans all VMAs and updates the page table range of any that use the given vector as backing |
| 191 | * memory. This should be called after any operation that causes reallocation of the vector. | 196 | * memory. This should be called after any operation that causes reallocation of the vector. |
| @@ -343,5 +348,15 @@ private: | |||
| 343 | 348 | ||
| 344 | VAddr tls_io_region_base = 0; | 349 | VAddr tls_io_region_base = 0; |
| 345 | VAddr tls_io_region_end = 0; | 350 | VAddr tls_io_region_end = 0; |
| 351 | |||
| 352 | // Memory used to back the allocations in the regular heap. A single vector is used to cover | ||
| 353 | // the entire virtual address space extents that bound the allocations, including any holes. | ||
| 354 | // This makes deallocation and reallocation of holes fast and keeps process memory contiguous | ||
| 355 | // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. | ||
| 356 | std::shared_ptr<std::vector<u8>> heap_memory; | ||
| 357 | // The left/right bounds of the address space covered by heap_memory. | ||
| 358 | VAddr heap_start = 0; | ||
| 359 | VAddr heap_end = 0; | ||
| 360 | u64 heap_used = 0; | ||
| 346 | }; | 361 | }; |
| 347 | } // namespace Kernel | 362 | } // namespace Kernel |