diff options
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/physical_memory.h | 5 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.cpp | 37 |
2 files changed, 16 insertions, 26 deletions
diff --git a/src/core/hle/kernel/physical_memory.h b/src/core/hle/kernel/physical_memory.h index 090565310..b689e8e8b 100644 --- a/src/core/hle/kernel/physical_memory.h +++ b/src/core/hle/kernel/physical_memory.h | |||
| @@ -14,6 +14,9 @@ namespace Kernel { | |||
| 14 | // - Second to ensure all host backing memory used is aligned to 256 bytes due | 14 | // - Second to ensure all host backing memory used is aligned to 256 bytes due |
| 15 | // to strict alignment restrictions on GPU memory. | 15 | // to strict alignment restrictions on GPU memory. |
| 16 | 16 | ||
| 17 | using PhysicalMemory = std::vector<u8, Common::AlignmentAllocator<u8, 256>>; | 17 | using PhysicalMemoryVector = std::vector<u8, Common::AlignmentAllocator<u8, 256>>; |
| 18 | class PhysicalMemory final : public PhysicalMemoryVector { | ||
| 19 | using PhysicalMemoryVector::PhysicalMemoryVector; | ||
| 20 | }; | ||
| 18 | 21 | ||
| 19 | } // namespace Kernel | 22 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index a9a20ef76..0b3500fce 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <algorithm> | 5 | #include <algorithm> |
| 6 | #include <cstring> | ||
| 6 | #include <iterator> | 7 | #include <iterator> |
| 7 | #include <utility> | 8 | #include <utility> |
| 8 | #include "common/alignment.h" | 9 | #include "common/alignment.h" |
| @@ -269,18 +270,9 @@ ResultVal<VAddr> VMManager::SetHeapSize(u64 size) { | |||
| 269 | // If necessary, expand backing vector to cover new heap extents in | 270 | // If necessary, expand backing vector to cover new heap extents in |
| 270 | // the case of allocating. Otherwise, shrink the backing memory, | 271 | // the case of allocating. Otherwise, shrink the backing memory, |
| 271 | // if a smaller heap has been requested. | 272 | // if a smaller heap has been requested. |
| 272 | const u64 old_heap_size = GetCurrentHeapSize(); | 273 | heap_memory->resize(size); |
| 273 | if (size > old_heap_size) { | 274 | heap_memory->shrink_to_fit(); |
| 274 | const u64 alloc_size = size - old_heap_size; | 275 | RefreshMemoryBlockMappings(heap_memory.get()); |
| 275 | |||
| 276 | heap_memory->insert(heap_memory->end(), alloc_size, 0); | ||
| 277 | RefreshMemoryBlockMappings(heap_memory.get()); | ||
| 278 | } else if (size < old_heap_size) { | ||
| 279 | heap_memory->resize(size); | ||
| 280 | heap_memory->shrink_to_fit(); | ||
| 281 | |||
| 282 | RefreshMemoryBlockMappings(heap_memory.get()); | ||
| 283 | } | ||
| 284 | 276 | ||
| 285 | heap_end = heap_region_base + size; | 277 | heap_end = heap_region_base + size; |
| 286 | ASSERT(GetCurrentHeapSize() == heap_memory->size()); | 278 | ASSERT(GetCurrentHeapSize() == heap_memory->size()); |
| @@ -752,24 +744,20 @@ void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryAre | |||
| 752 | // Always merge allocated memory blocks, even when they don't share the same backing block. | 744 | // Always merge allocated memory blocks, even when they don't share the same backing block. |
| 753 | if (left.type == VMAType::AllocatedMemoryBlock && | 745 | if (left.type == VMAType::AllocatedMemoryBlock && |
| 754 | (left.backing_block != right.backing_block || left.offset + left.size != right.offset)) { | 746 | (left.backing_block != right.backing_block || left.offset + left.size != right.offset)) { |
| 755 | const auto right_begin = right.backing_block->begin() + right.offset; | ||
| 756 | const auto right_end = right_begin + right.size; | ||
| 757 | 747 | ||
| 758 | // Check if we can save work. | 748 | // Check if we can save work. |
| 759 | if (left.offset == 0 && left.size == left.backing_block->size()) { | 749 | if (left.offset == 0 && left.size == left.backing_block->size()) { |
| 760 | // Fast case: left is an entire backing block. | 750 | // Fast case: left is an entire backing block. |
| 761 | left.backing_block->insert(left.backing_block->end(), right_begin, right_end); | 751 | left.backing_block->resize(left.size + right.size); |
| 752 | std::memcpy(left.backing_block->data() + left.size, | ||
| 753 | right.backing_block->data() + right.offset, right.size); | ||
| 762 | } else { | 754 | } else { |
| 763 | // Slow case: make a new memory block for left and right. | 755 | // Slow case: make a new memory block for left and right. |
| 764 | const auto left_begin = left.backing_block->begin() + left.offset; | ||
| 765 | const auto left_end = left_begin + left.size; | ||
| 766 | const auto left_size = static_cast<std::size_t>(std::distance(left_begin, left_end)); | ||
| 767 | const auto right_size = static_cast<std::size_t>(std::distance(right_begin, right_end)); | ||
| 768 | |||
| 769 | auto new_memory = std::make_shared<PhysicalMemory>(); | 756 | auto new_memory = std::make_shared<PhysicalMemory>(); |
| 770 | new_memory->reserve(left_size + right_size); | 757 | new_memory->resize(left.size + right.size); |
| 771 | new_memory->insert(new_memory->end(), left_begin, left_end); | 758 | std::memcpy(new_memory->data(), left.backing_block->data() + left.offset, left.size); |
| 772 | new_memory->insert(new_memory->end(), right_begin, right_end); | 759 | std::memcpy(new_memory->data() + left.size, right.backing_block->data() + right.offset, |
| 760 | right.size); | ||
| 773 | 761 | ||
| 774 | left.backing_block = std::move(new_memory); | 762 | left.backing_block = std::move(new_memory); |
| 775 | left.offset = 0; | 763 | left.offset = 0; |
| @@ -792,8 +780,7 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { | |||
| 792 | memory.UnmapRegion(page_table, vma.base, vma.size); | 780 | memory.UnmapRegion(page_table, vma.base, vma.size); |
| 793 | break; | 781 | break; |
| 794 | case VMAType::AllocatedMemoryBlock: | 782 | case VMAType::AllocatedMemoryBlock: |
| 795 | memory.MapMemoryRegion(page_table, vma.base, vma.size, | 783 | memory.MapMemoryRegion(page_table, vma.base, vma.size, *vma.backing_block, vma.offset); |
| 796 | vma.backing_block->data() + vma.offset); | ||
| 797 | break; | 784 | break; |
| 798 | case VMAType::BackingMemory: | 785 | case VMAType::BackingMemory: |
| 799 | memory.MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory); | 786 | memory.MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory); |