diff options
| author | 2015-08-22 14:01:57 -0700 | |
|---|---|---|
| committer | 2015-08-22 14:01:57 -0700 | |
| commit | 3efb205a68d38fe377b2c27349d91ec4c6a2d390 (patch) | |
| tree | 4326d8394e66fec3831ce6b9851da689e5aa78e8 /src/core/hle/kernel | |
| parent | Merge pull request #1056 from lioncash/emitter (diff) | |
| parent | Kernel: Remove unused legacy heap MapBlock_* functions (diff) | |
| download | yuzu-3efb205a68d38fe377b2c27349d91ec4c6a2d390.tar.gz yuzu-3efb205a68d38fe377b2c27349d91ec4c6a2d390.tar.xz yuzu-3efb205a68d38fe377b2c27349d91ec4c6a2d390.zip | |
Merge pull request #1025 from yuriks/heap-management
Kernel: Correct(er) handling of Heap and Linear Heap allocations
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 19 | ||||
| -rw-r--r-- | src/core/hle/kernel/memory.cpp | 136 | ||||
| -rw-r--r-- | src/core/hle/kernel/memory.h | 35 | ||||
| -rw-r--r-- | src/core/hle/kernel/process.cpp | 152 | ||||
| -rw-r--r-- | src/core/hle/kernel/process.h | 39 | ||||
| -rw-r--r-- | src/core/hle/kernel/resource_limit.cpp | 1 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.cpp | 118 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.h | 38 |
9 files changed, 497 insertions, 45 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 5711c0405..7a401a965 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -7,11 +7,14 @@ | |||
| 7 | #include "common/assert.h" | 7 | #include "common/assert.h" |
| 8 | #include "common/logging/log.h" | 8 | #include "common/logging/log.h" |
| 9 | 9 | ||
| 10 | #include "core/hle/config_mem.h" | ||
| 10 | #include "core/hle/kernel/kernel.h" | 11 | #include "core/hle/kernel/kernel.h" |
| 11 | #include "core/hle/kernel/resource_limit.h" | 12 | #include "core/hle/kernel/memory.h" |
| 12 | #include "core/hle/kernel/process.h" | 13 | #include "core/hle/kernel/process.h" |
| 14 | #include "core/hle/kernel/resource_limit.h" | ||
| 13 | #include "core/hle/kernel/thread.h" | 15 | #include "core/hle/kernel/thread.h" |
| 14 | #include "core/hle/kernel/timer.h" | 16 | #include "core/hle/kernel/timer.h" |
| 17 | #include "core/hle/shared_page.h" | ||
| 15 | 18 | ||
| 16 | namespace Kernel { | 19 | namespace Kernel { |
| 17 | 20 | ||
| @@ -119,6 +122,13 @@ void HandleTable::Clear() { | |||
| 119 | 122 | ||
| 120 | /// Initialize the kernel | 123 | /// Initialize the kernel |
| 121 | void Init() { | 124 | void Init() { |
| 125 | ConfigMem::Init(); | ||
| 126 | SharedPage::Init(); | ||
| 127 | |||
| 128 | // TODO(yuriks): The memory type parameter needs to be determined by the ExHeader field instead | ||
| 129 | // For now it defaults to the one with a largest allocation to the app | ||
| 130 | Kernel::MemoryInit(2); // Allocates 96MB to the application | ||
| 131 | |||
| 122 | Kernel::ResourceLimitsInit(); | 132 | Kernel::ResourceLimitsInit(); |
| 123 | Kernel::ThreadingInit(); | 133 | Kernel::ThreadingInit(); |
| 124 | Kernel::TimersInit(); | 134 | Kernel::TimersInit(); |
| @@ -131,11 +141,14 @@ void Init() { | |||
| 131 | 141 | ||
| 132 | /// Shutdown the kernel | 142 | /// Shutdown the kernel |
| 133 | void Shutdown() { | 143 | void Shutdown() { |
| 144 | g_handle_table.Clear(); // Free all kernel objects | ||
| 145 | |||
| 134 | Kernel::ThreadingShutdown(); | 146 | Kernel::ThreadingShutdown(); |
| 147 | g_current_process = nullptr; | ||
| 148 | |||
| 135 | Kernel::TimersShutdown(); | 149 | Kernel::TimersShutdown(); |
| 136 | Kernel::ResourceLimitsShutdown(); | 150 | Kernel::ResourceLimitsShutdown(); |
| 137 | g_handle_table.Clear(); // Free all kernel objects | 151 | Kernel::MemoryShutdown(); |
| 138 | g_current_process = nullptr; | ||
| 139 | } | 152 | } |
| 140 | 153 | ||
| 141 | } // namespace | 154 | } // namespace |
diff --git a/src/core/hle/kernel/memory.cpp b/src/core/hle/kernel/memory.cpp new file mode 100644 index 000000000..e4fc5f3c4 --- /dev/null +++ b/src/core/hle/kernel/memory.cpp | |||
| @@ -0,0 +1,136 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <map> | ||
| 6 | #include <memory> | ||
| 7 | #include <utility> | ||
| 8 | #include <vector> | ||
| 9 | |||
| 10 | #include "common/common_types.h" | ||
| 11 | #include "common/logging/log.h" | ||
| 12 | |||
| 13 | #include "core/hle/config_mem.h" | ||
| 14 | #include "core/hle/kernel/memory.h" | ||
| 15 | #include "core/hle/kernel/vm_manager.h" | ||
| 16 | #include "core/hle/result.h" | ||
| 17 | #include "core/hle/shared_page.h" | ||
| 18 | #include "core/memory.h" | ||
| 19 | #include "core/memory_setup.h" | ||
| 20 | |||
| 21 | //////////////////////////////////////////////////////////////////////////////////////////////////// | ||
| 22 | |||
| 23 | namespace Kernel { | ||
| 24 | |||
| 25 | static MemoryRegionInfo memory_regions[3]; | ||
| 26 | |||
| 27 | /// Size of the APPLICATION, SYSTEM and BASE memory regions (respectively) for each sytem | ||
| 28 | /// memory configuration type. | ||
| 29 | static const u32 memory_region_sizes[8][3] = { | ||
| 30 | // Old 3DS layouts | ||
| 31 | {0x04000000, 0x02C00000, 0x01400000}, // 0 | ||
| 32 | { /* This appears to be unused. */ }, // 1 | ||
| 33 | {0x06000000, 0x00C00000, 0x01400000}, // 2 | ||
| 34 | {0x05000000, 0x01C00000, 0x01400000}, // 3 | ||
| 35 | {0x04800000, 0x02400000, 0x01400000}, // 4 | ||
| 36 | {0x02000000, 0x04C00000, 0x01400000}, // 5 | ||
| 37 | |||
| 38 | // New 3DS layouts | ||
| 39 | {0x07C00000, 0x06400000, 0x02000000}, // 6 | ||
| 40 | {0x0B200000, 0x02E00000, 0x02000000}, // 7 | ||
| 41 | }; | ||
| 42 | |||
| 43 | void MemoryInit(u32 mem_type) { | ||
| 44 | // TODO(yuriks): On the n3DS, all o3DS configurations (<=5) are forced to 6 instead. | ||
| 45 | ASSERT_MSG(mem_type <= 5, "New 3DS memory configuration aren't supported yet!"); | ||
| 46 | ASSERT(mem_type != 1); | ||
| 47 | |||
| 48 | // The kernel allocation regions (APPLICATION, SYSTEM and BASE) are laid out in sequence, with | ||
| 49 | // the sizes specified in the memory_region_sizes table. | ||
| 50 | VAddr base = 0; | ||
| 51 | for (int i = 0; i < 3; ++i) { | ||
| 52 | memory_regions[i].base = base; | ||
| 53 | memory_regions[i].size = memory_region_sizes[mem_type][i]; | ||
| 54 | memory_regions[i].linear_heap_memory = std::make_shared<std::vector<u8>>(); | ||
| 55 | |||
| 56 | base += memory_regions[i].size; | ||
| 57 | } | ||
| 58 | |||
| 59 | // We must've allocated the entire FCRAM by the end | ||
| 60 | ASSERT(base == Memory::FCRAM_SIZE); | ||
| 61 | |||
| 62 | using ConfigMem::config_mem; | ||
| 63 | config_mem.app_mem_type = mem_type; | ||
| 64 | // app_mem_malloc does not always match the configured size for memory_region[0]: in case the | ||
| 65 | // n3DS type override is in effect it reports the size the game expects, not the real one. | ||
| 66 | config_mem.app_mem_alloc = memory_region_sizes[mem_type][0]; | ||
| 67 | config_mem.sys_mem_alloc = memory_regions[1].size; | ||
| 68 | config_mem.base_mem_alloc = memory_regions[2].size; | ||
| 69 | } | ||
| 70 | |||
| 71 | void MemoryShutdown() { | ||
| 72 | for (auto& region : memory_regions) { | ||
| 73 | region.base = 0; | ||
| 74 | region.size = 0; | ||
| 75 | region.linear_heap_memory = nullptr; | ||
| 76 | } | ||
| 77 | } | ||
| 78 | |||
| 79 | MemoryRegionInfo* GetMemoryRegion(MemoryRegion region) { | ||
| 80 | switch (region) { | ||
| 81 | case MemoryRegion::APPLICATION: | ||
| 82 | return &memory_regions[0]; | ||
| 83 | case MemoryRegion::SYSTEM: | ||
| 84 | return &memory_regions[1]; | ||
| 85 | case MemoryRegion::BASE: | ||
| 86 | return &memory_regions[2]; | ||
| 87 | default: | ||
| 88 | UNREACHABLE(); | ||
| 89 | } | ||
| 90 | } | ||
| 91 | |||
| 92 | } | ||
| 93 | |||
| 94 | namespace Memory { | ||
| 95 | |||
| 96 | namespace { | ||
| 97 | |||
| 98 | struct MemoryArea { | ||
| 99 | u32 base; | ||
| 100 | u32 size; | ||
| 101 | const char* name; | ||
| 102 | }; | ||
| 103 | |||
| 104 | // We don't declare the IO regions in here since its handled by other means. | ||
| 105 | static MemoryArea memory_areas[] = { | ||
| 106 | {SHARED_MEMORY_VADDR, SHARED_MEMORY_SIZE, "Shared Memory"}, // Shared memory | ||
| 107 | {VRAM_VADDR, VRAM_SIZE, "VRAM"}, // Video memory (VRAM) | ||
| 108 | {DSP_RAM_VADDR, DSP_RAM_SIZE, "DSP RAM"}, // DSP memory | ||
| 109 | {TLS_AREA_VADDR, TLS_AREA_SIZE, "TLS Area"}, // TLS memory | ||
| 110 | }; | ||
| 111 | |||
| 112 | } | ||
| 113 | |||
| 114 | void Init() { | ||
| 115 | InitMemoryMap(); | ||
| 116 | LOG_DEBUG(HW_Memory, "initialized OK"); | ||
| 117 | } | ||
| 118 | |||
| 119 | void InitLegacyAddressSpace(Kernel::VMManager& address_space) { | ||
| 120 | using namespace Kernel; | ||
| 121 | |||
| 122 | for (MemoryArea& area : memory_areas) { | ||
| 123 | auto block = std::make_shared<std::vector<u8>>(area.size); | ||
| 124 | address_space.MapMemoryBlock(area.base, std::move(block), 0, area.size, MemoryState::Private).Unwrap(); | ||
| 125 | } | ||
| 126 | |||
| 127 | auto cfg_mem_vma = address_space.MapBackingMemory(CONFIG_MEMORY_VADDR, | ||
| 128 | (u8*)&ConfigMem::config_mem, CONFIG_MEMORY_SIZE, MemoryState::Shared).MoveFrom(); | ||
| 129 | address_space.Reprotect(cfg_mem_vma, VMAPermission::Read); | ||
| 130 | |||
| 131 | auto shared_page_vma = address_space.MapBackingMemory(SHARED_PAGE_VADDR, | ||
| 132 | (u8*)&SharedPage::shared_page, SHARED_PAGE_SIZE, MemoryState::Shared).MoveFrom(); | ||
| 133 | address_space.Reprotect(shared_page_vma, VMAPermission::Read); | ||
| 134 | } | ||
| 135 | |||
| 136 | } // namespace | ||
diff --git a/src/core/hle/kernel/memory.h b/src/core/hle/kernel/memory.h new file mode 100644 index 000000000..36690b091 --- /dev/null +++ b/src/core/hle/kernel/memory.h | |||
| @@ -0,0 +1,35 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <memory> | ||
| 8 | |||
| 9 | #include "common/common_types.h" | ||
| 10 | |||
| 11 | #include "core/hle/kernel/process.h" | ||
| 12 | |||
| 13 | namespace Kernel { | ||
| 14 | |||
| 15 | class VMManager; | ||
| 16 | |||
| 17 | struct MemoryRegionInfo { | ||
| 18 | u32 base; // Not an address, but offset from start of FCRAM | ||
| 19 | u32 size; | ||
| 20 | |||
| 21 | std::shared_ptr<std::vector<u8>> linear_heap_memory; | ||
| 22 | }; | ||
| 23 | |||
| 24 | void MemoryInit(u32 mem_type); | ||
| 25 | void MemoryShutdown(); | ||
| 26 | MemoryRegionInfo* GetMemoryRegion(MemoryRegion region); | ||
| 27 | |||
| 28 | } | ||
| 29 | |||
| 30 | namespace Memory { | ||
| 31 | |||
| 32 | void Init(); | ||
| 33 | void InitLegacyAddressSpace(Kernel::VMManager& address_space); | ||
| 34 | |||
| 35 | } // namespace | ||
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index a7892c652..124047a53 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp | |||
| @@ -7,11 +7,11 @@ | |||
| 7 | #include "common/logging/log.h" | 7 | #include "common/logging/log.h" |
| 8 | #include "common/make_unique.h" | 8 | #include "common/make_unique.h" |
| 9 | 9 | ||
| 10 | #include "core/hle/kernel/memory.h" | ||
| 10 | #include "core/hle/kernel/process.h" | 11 | #include "core/hle/kernel/process.h" |
| 11 | #include "core/hle/kernel/resource_limit.h" | 12 | #include "core/hle/kernel/resource_limit.h" |
| 12 | #include "core/hle/kernel/thread.h" | 13 | #include "core/hle/kernel/thread.h" |
| 13 | #include "core/hle/kernel/vm_manager.h" | 14 | #include "core/hle/kernel/vm_manager.h" |
| 14 | #include "core/mem_map.h" | ||
| 15 | #include "core/memory.h" | 15 | #include "core/memory.h" |
| 16 | 16 | ||
| 17 | namespace Kernel { | 17 | namespace Kernel { |
| @@ -36,8 +36,7 @@ SharedPtr<Process> Process::Create(SharedPtr<CodeSet> code_set) { | |||
| 36 | process->codeset = std::move(code_set); | 36 | process->codeset = std::move(code_set); |
| 37 | process->flags.raw = 0; | 37 | process->flags.raw = 0; |
| 38 | process->flags.memory_region = MemoryRegion::APPLICATION; | 38 | process->flags.memory_region = MemoryRegion::APPLICATION; |
| 39 | process->address_space = Common::make_unique<VMManager>(); | 39 | Memory::InitLegacyAddressSpace(process->vm_manager); |
| 40 | Memory::InitLegacyAddressSpace(*process->address_space); | ||
| 41 | 40 | ||
| 42 | return process; | 41 | return process; |
| 43 | } | 42 | } |
| @@ -93,9 +92,11 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) { | |||
| 93 | mapping.unk_flag = false; | 92 | mapping.unk_flag = false; |
| 94 | } else if ((type & 0xFE0) == 0xFC0) { // 0x01FF | 93 | } else if ((type & 0xFE0) == 0xFC0) { // 0x01FF |
| 95 | // Kernel version | 94 | // Kernel version |
| 96 | int minor = descriptor & 0xFF; | 95 | kernel_version = descriptor & 0xFFFF; |
| 97 | int major = (descriptor >> 8) & 0xFF; | 96 | |
| 98 | LOG_INFO(Loader, "ExHeader kernel version ignored: %d.%d", major, minor); | 97 | int minor = kernel_version & 0xFF; |
| 98 | int major = (kernel_version >> 8) & 0xFF; | ||
| 99 | LOG_INFO(Loader, "ExHeader kernel version: %d.%d", major, minor); | ||
| 99 | } else { | 100 | } else { |
| 100 | LOG_ERROR(Loader, "Unhandled kernel caps descriptor: 0x%08X", descriptor); | 101 | LOG_ERROR(Loader, "Unhandled kernel caps descriptor: 0x%08X", descriptor); |
| 101 | } | 102 | } |
| @@ -103,20 +104,153 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) { | |||
| 103 | } | 104 | } |
| 104 | 105 | ||
| 105 | void Process::Run(s32 main_thread_priority, u32 stack_size) { | 106 | void Process::Run(s32 main_thread_priority, u32 stack_size) { |
| 107 | memory_region = GetMemoryRegion(flags.memory_region); | ||
| 108 | |||
| 106 | auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, MemoryState memory_state) { | 109 | auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, MemoryState memory_state) { |
| 107 | auto vma = address_space->MapMemoryBlock(segment.addr, codeset->memory, | 110 | auto vma = vm_manager.MapMemoryBlock(segment.addr, codeset->memory, |
| 108 | segment.offset, segment.size, memory_state).Unwrap(); | 111 | segment.offset, segment.size, memory_state).Unwrap(); |
| 109 | address_space->Reprotect(vma, permissions); | 112 | vm_manager.Reprotect(vma, permissions); |
| 113 | misc_memory_used += segment.size; | ||
| 110 | }; | 114 | }; |
| 111 | 115 | ||
| 116 | // Map CodeSet segments | ||
| 112 | MapSegment(codeset->code, VMAPermission::ReadExecute, MemoryState::Code); | 117 | MapSegment(codeset->code, VMAPermission::ReadExecute, MemoryState::Code); |
| 113 | MapSegment(codeset->rodata, VMAPermission::Read, MemoryState::Code); | 118 | MapSegment(codeset->rodata, VMAPermission::Read, MemoryState::Code); |
| 114 | MapSegment(codeset->data, VMAPermission::ReadWrite, MemoryState::Private); | 119 | MapSegment(codeset->data, VMAPermission::ReadWrite, MemoryState::Private); |
| 115 | 120 | ||
| 116 | address_space->LogLayout(); | 121 | // Allocate and map stack |
| 122 | vm_manager.MapMemoryBlock(Memory::HEAP_VADDR_END - stack_size, | ||
| 123 | std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size, MemoryState::Locked | ||
| 124 | ).Unwrap(); | ||
| 125 | misc_memory_used += stack_size; | ||
| 126 | |||
| 127 | vm_manager.LogLayout(Log::Level::Debug); | ||
| 117 | Kernel::SetupMainThread(codeset->entrypoint, main_thread_priority); | 128 | Kernel::SetupMainThread(codeset->entrypoint, main_thread_priority); |
| 118 | } | 129 | } |
| 119 | 130 | ||
| 131 | VAddr Process::GetLinearHeapBase() const { | ||
| 132 | return (kernel_version < 0x22C ? Memory::LINEAR_HEAP_VADDR : Memory::NEW_LINEAR_HEAP_SIZE) | ||
| 133 | + memory_region->base; | ||
| 134 | } | ||
| 135 | |||
| 136 | VAddr Process::GetLinearHeapLimit() const { | ||
| 137 | return GetLinearHeapBase() + memory_region->size; | ||
| 138 | } | ||
| 139 | |||
| 140 | ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms) { | ||
| 141 | if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) { | ||
| 142 | return ERR_INVALID_ADDRESS; | ||
| 143 | } | ||
| 144 | |||
| 145 | if (heap_memory == nullptr) { | ||
| 146 | // Initialize heap | ||
| 147 | heap_memory = std::make_shared<std::vector<u8>>(); | ||
| 148 | heap_start = heap_end = target; | ||
| 149 | } | ||
| 150 | |||
| 151 | // If necessary, expand backing vector to cover new heap extents. | ||
| 152 | if (target < heap_start) { | ||
| 153 | heap_memory->insert(begin(*heap_memory), heap_start - target, 0); | ||
| 154 | heap_start = target; | ||
| 155 | vm_manager.RefreshMemoryBlockMappings(heap_memory.get()); | ||
| 156 | } | ||
| 157 | if (target + size > heap_end) { | ||
| 158 | heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0); | ||
| 159 | heap_end = target + size; | ||
| 160 | vm_manager.RefreshMemoryBlockMappings(heap_memory.get()); | ||
| 161 | } | ||
| 162 | ASSERT(heap_end - heap_start == heap_memory->size()); | ||
| 163 | |||
| 164 | CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start, size, MemoryState::Private)); | ||
| 165 | vm_manager.Reprotect(vma, perms); | ||
| 166 | |||
| 167 | heap_used += size; | ||
| 168 | |||
| 169 | return MakeResult<VAddr>(heap_end - size); | ||
| 170 | } | ||
| 171 | |||
| 172 | ResultCode Process::HeapFree(VAddr target, u32 size) { | ||
| 173 | if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) { | ||
| 174 | return ERR_INVALID_ADDRESS; | ||
| 175 | } | ||
| 176 | |||
| 177 | ResultCode result = vm_manager.UnmapRange(target, size); | ||
| 178 | if (result.IsError()) return result; | ||
| 179 | |||
| 180 | heap_used -= size; | ||
| 181 | |||
| 182 | return RESULT_SUCCESS; | ||
| 183 | } | ||
| 184 | |||
| 185 | ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission perms) { | ||
| 186 | auto& linheap_memory = memory_region->linear_heap_memory; | ||
| 187 | |||
| 188 | VAddr heap_end = GetLinearHeapBase() + (u32)linheap_memory->size(); | ||
| 189 | // Games and homebrew only ever seem to pass 0 here (which lets the kernel decide the address), | ||
| 190 | // but explicit addresses are also accepted and respected. | ||
| 191 | if (target == 0) { | ||
| 192 | target = heap_end; | ||
| 193 | } | ||
| 194 | |||
| 195 | if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() || | ||
| 196 | target > heap_end || target + size < target) { | ||
| 197 | |||
| 198 | return ERR_INVALID_ADDRESS; | ||
| 199 | } | ||
| 200 | |||
| 201 | // Expansion of the linear heap is only allowed if you do an allocation immediatelly at its | ||
| 202 | // end. It's possible to free gaps in the middle of the heap and then reallocate them later, | ||
| 203 | // but expansions are only allowed at the end. | ||
| 204 | if (target == heap_end) { | ||
| 205 | linheap_memory->insert(linheap_memory->end(), size, 0); | ||
| 206 | vm_manager.RefreshMemoryBlockMappings(linheap_memory.get()); | ||
| 207 | } | ||
| 208 | |||
| 209 | // TODO(yuriks): As is, this lets processes map memory allocated by other processes from the | ||
| 210 | // same region. It is unknown if or how the 3DS kernel checks against this. | ||
| 211 | size_t offset = target - GetLinearHeapBase(); | ||
| 212 | CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, linheap_memory, offset, size, MemoryState::Continuous)); | ||
| 213 | vm_manager.Reprotect(vma, perms); | ||
| 214 | |||
| 215 | linear_heap_used += size; | ||
| 216 | |||
| 217 | return MakeResult<VAddr>(target); | ||
| 218 | } | ||
| 219 | |||
| 220 | ResultCode Process::LinearFree(VAddr target, u32 size) { | ||
| 221 | auto& linheap_memory = memory_region->linear_heap_memory; | ||
| 222 | |||
| 223 | if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() || | ||
| 224 | target + size < target) { | ||
| 225 | |||
| 226 | return ERR_INVALID_ADDRESS; | ||
| 227 | } | ||
| 228 | |||
| 229 | VAddr heap_end = GetLinearHeapBase() + (u32)linheap_memory->size(); | ||
| 230 | if (target + size > heap_end) { | ||
| 231 | return ERR_INVALID_ADDRESS_STATE; | ||
| 232 | } | ||
| 233 | |||
| 234 | ResultCode result = vm_manager.UnmapRange(target, size); | ||
| 235 | if (result.IsError()) return result; | ||
| 236 | |||
| 237 | linear_heap_used -= size; | ||
| 238 | |||
| 239 | if (target + size == heap_end) { | ||
| 240 | // End of linear heap has been freed, so check what's the last allocated block in it and | ||
| 241 | // reduce the size. | ||
| 242 | auto vma = vm_manager.FindVMA(target); | ||
| 243 | ASSERT(vma != vm_manager.vma_map.end()); | ||
| 244 | ASSERT(vma->second.type == VMAType::Free); | ||
| 245 | VAddr new_end = vma->second.base; | ||
| 246 | if (new_end >= GetLinearHeapBase()) { | ||
| 247 | linheap_memory->resize(new_end - GetLinearHeapBase()); | ||
| 248 | } | ||
| 249 | } | ||
| 250 | |||
| 251 | return RESULT_SUCCESS; | ||
| 252 | } | ||
| 253 | |||
| 120 | Kernel::Process::Process() {} | 254 | Kernel::Process::Process() {} |
| 121 | Kernel::Process::~Process() {} | 255 | Kernel::Process::~Process() {} |
| 122 | 256 | ||
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 83d3aceae..60e17f251 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include "common/common_types.h" | 15 | #include "common/common_types.h" |
| 16 | 16 | ||
| 17 | #include "core/hle/kernel/kernel.h" | 17 | #include "core/hle/kernel/kernel.h" |
| 18 | #include "core/hle/kernel/vm_manager.h" | ||
| 18 | 19 | ||
| 19 | namespace Kernel { | 20 | namespace Kernel { |
| 20 | 21 | ||
| @@ -48,7 +49,7 @@ union ProcessFlags { | |||
| 48 | }; | 49 | }; |
| 49 | 50 | ||
| 50 | class ResourceLimit; | 51 | class ResourceLimit; |
| 51 | class VMManager; | 52 | struct MemoryRegionInfo; |
| 52 | 53 | ||
| 53 | struct CodeSet final : public Object { | 54 | struct CodeSet final : public Object { |
| 54 | static SharedPtr<CodeSet> Create(std::string name, u64 program_id); | 55 | static SharedPtr<CodeSet> Create(std::string name, u64 program_id); |
| @@ -104,14 +105,12 @@ public: | |||
| 104 | /// processes access to specific I/O regions and device memory. | 105 | /// processes access to specific I/O regions and device memory. |
| 105 | boost::container::static_vector<AddressMapping, 8> address_mappings; | 106 | boost::container::static_vector<AddressMapping, 8> address_mappings; |
| 106 | ProcessFlags flags; | 107 | ProcessFlags flags; |
| 108 | /// Kernel compatibility version for this process | ||
| 109 | u16 kernel_version = 0; | ||
| 107 | 110 | ||
| 108 | /// The id of this process | 111 | /// The id of this process |
| 109 | u32 process_id = next_process_id++; | 112 | u32 process_id = next_process_id++; |
| 110 | 113 | ||
| 111 | /// Bitmask of the used TLS slots | ||
| 112 | std::bitset<300> used_tls_slots; | ||
| 113 | std::unique_ptr<VMManager> address_space; | ||
| 114 | |||
| 115 | /** | 114 | /** |
| 116 | * Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them | 115 | * Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them |
| 117 | * to this process. | 116 | * to this process. |
| @@ -123,6 +122,36 @@ public: | |||
| 123 | */ | 122 | */ |
| 124 | void Run(s32 main_thread_priority, u32 stack_size); | 123 | void Run(s32 main_thread_priority, u32 stack_size); |
| 125 | 124 | ||
| 125 | |||
| 126 | /////////////////////////////////////////////////////////////////////////////////////////////// | ||
| 127 | // Memory Management | ||
| 128 | |||
| 129 | VMManager vm_manager; | ||
| 130 | |||
| 131 | // Memory used to back the allocations in the regular heap. A single vector is used to cover | ||
| 132 | // the entire virtual address space extents that bound the allocations, including any holes. | ||
| 133 | // This makes deallocation and reallocation of holes fast and keeps process memory contiguous | ||
| 134 | // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. | ||
| 135 | std::shared_ptr<std::vector<u8>> heap_memory; | ||
| 136 | // The left/right bounds of the address space covered by heap_memory. | ||
| 137 | VAddr heap_start = 0, heap_end = 0; | ||
| 138 | |||
| 139 | u32 heap_used = 0, linear_heap_used = 0, misc_memory_used = 0; | ||
| 140 | |||
| 141 | MemoryRegionInfo* memory_region = nullptr; | ||
| 142 | |||
| 143 | /// Bitmask of the used TLS slots | ||
| 144 | std::bitset<300> used_tls_slots; | ||
| 145 | |||
| 146 | VAddr GetLinearHeapBase() const; | ||
| 147 | VAddr GetLinearHeapLimit() const; | ||
| 148 | |||
| 149 | ResultVal<VAddr> HeapAllocate(VAddr target, u32 size, VMAPermission perms); | ||
| 150 | ResultCode HeapFree(VAddr target, u32 size); | ||
| 151 | |||
| 152 | ResultVal<VAddr> LinearAllocate(VAddr target, u32 size, VMAPermission perms); | ||
| 153 | ResultCode LinearFree(VAddr target, u32 size); | ||
| 154 | |||
| 126 | private: | 155 | private: |
| 127 | Process(); | 156 | Process(); |
| 128 | ~Process() override; | 157 | ~Process() override; |
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp index 94b3e3298..67dde08c2 100644 --- a/src/core/hle/kernel/resource_limit.cpp +++ b/src/core/hle/kernel/resource_limit.cpp | |||
| @@ -6,7 +6,6 @@ | |||
| 6 | 6 | ||
| 7 | #include "common/logging/log.h" | 7 | #include "common/logging/log.h" |
| 8 | 8 | ||
| 9 | #include "core/mem_map.h" | ||
| 10 | #include "core/hle/kernel/resource_limit.h" | 9 | #include "core/hle/kernel/resource_limit.h" |
| 11 | 10 | ||
| 12 | namespace Kernel { | 11 | namespace Kernel { |
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 29ea6d531..c10126513 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -117,6 +117,7 @@ void Thread::Stop() { | |||
| 117 | wait_objects.clear(); | 117 | wait_objects.clear(); |
| 118 | 118 | ||
| 119 | Kernel::g_current_process->used_tls_slots[tls_index] = false; | 119 | Kernel::g_current_process->used_tls_slots[tls_index] = false; |
| 120 | g_current_process->misc_memory_used -= Memory::TLS_ENTRY_SIZE; | ||
| 120 | 121 | ||
| 121 | HLE::Reschedule(__func__); | 122 | HLE::Reschedule(__func__); |
| 122 | } | 123 | } |
| @@ -414,6 +415,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point, | |||
| 414 | } | 415 | } |
| 415 | 416 | ||
| 416 | ASSERT_MSG(thread->tls_index != -1, "Out of TLS space"); | 417 | ASSERT_MSG(thread->tls_index != -1, "Out of TLS space"); |
| 418 | g_current_process->misc_memory_used += Memory::TLS_ENTRY_SIZE; | ||
| 417 | 419 | ||
| 418 | // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used | 420 | // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used |
| 419 | // to initialize the context | 421 | // to initialize the context |
| @@ -504,7 +506,7 @@ void Thread::SetWaitSynchronizationOutput(s32 output) { | |||
| 504 | } | 506 | } |
| 505 | 507 | ||
| 506 | VAddr Thread::GetTLSAddress() const { | 508 | VAddr Thread::GetTLSAddress() const { |
| 507 | return Memory::TLS_AREA_VADDR + tls_index * 0x200; | 509 | return Memory::TLS_AREA_VADDR + tls_index * Memory::TLS_ENTRY_SIZE; |
| 508 | } | 510 | } |
| 509 | 511 | ||
| 510 | //////////////////////////////////////////////////////////////////////////////////////////////////// | 512 | //////////////////////////////////////////////////////////////////////////////////////////////////// |
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 205cc7b53..2610acf76 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp | |||
| @@ -11,6 +11,15 @@ | |||
| 11 | 11 | ||
| 12 | namespace Kernel { | 12 | namespace Kernel { |
| 13 | 13 | ||
| 14 | static const char* GetMemoryStateName(MemoryState state) { | ||
| 15 | static const char* names[] = { | ||
| 16 | "Free", "Reserved", "IO", "Static", "Code", "Private", "Shared", "Continuous", "Aliased", | ||
| 17 | "Alias", "AliasCode", "Locked", | ||
| 18 | }; | ||
| 19 | |||
| 20 | return names[(int)state]; | ||
| 21 | } | ||
| 22 | |||
| 14 | bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { | 23 | bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { |
| 15 | ASSERT(base + size == next.base); | 24 | ASSERT(base + size == next.base); |
| 16 | if (permissions != next.permissions || | 25 | if (permissions != next.permissions || |
| @@ -51,11 +60,15 @@ void VMManager::Reset() { | |||
| 51 | } | 60 | } |
| 52 | 61 | ||
| 53 | VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { | 62 | VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { |
| 54 | return std::prev(vma_map.upper_bound(target)); | 63 | if (target >= MAX_ADDRESS) { |
| 64 | return vma_map.end(); | ||
| 65 | } else { | ||
| 66 | return std::prev(vma_map.upper_bound(target)); | ||
| 67 | } | ||
| 55 | } | 68 | } |
| 56 | 69 | ||
| 57 | ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, | 70 | ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, |
| 58 | std::shared_ptr<std::vector<u8>> block, u32 offset, u32 size, MemoryState state) { | 71 | std::shared_ptr<std::vector<u8>> block, size_t offset, u32 size, MemoryState state) { |
| 59 | ASSERT(block != nullptr); | 72 | ASSERT(block != nullptr); |
| 60 | ASSERT(offset + size <= block->size()); | 73 | ASSERT(offset + size <= block->size()); |
| 61 | 74 | ||
| @@ -106,10 +119,8 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u3 | |||
| 106 | return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); | 119 | return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); |
| 107 | } | 120 | } |
| 108 | 121 | ||
| 109 | void VMManager::Unmap(VMAHandle vma_handle) { | 122 | VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) { |
| 110 | VMAIter iter = StripIterConstness(vma_handle); | 123 | VirtualMemoryArea& vma = vma_handle->second; |
| 111 | |||
| 112 | VirtualMemoryArea& vma = iter->second; | ||
| 113 | vma.type = VMAType::Free; | 124 | vma.type = VMAType::Free; |
| 114 | vma.permissions = VMAPermission::None; | 125 | vma.permissions = VMAPermission::None; |
| 115 | vma.meminfo_state = MemoryState::Free; | 126 | vma.meminfo_state = MemoryState::Free; |
| @@ -121,26 +132,67 @@ void VMManager::Unmap(VMAHandle vma_handle) { | |||
| 121 | 132 | ||
| 122 | UpdatePageTableForVMA(vma); | 133 | UpdatePageTableForVMA(vma); |
| 123 | 134 | ||
| 124 | MergeAdjacent(iter); | 135 | return MergeAdjacent(vma_handle); |
| 136 | } | ||
| 137 | |||
| 138 | ResultCode VMManager::UnmapRange(VAddr target, u32 size) { | ||
| 139 | CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); | ||
| 140 | VAddr target_end = target + size; | ||
| 141 | |||
| 142 | VMAIter end = vma_map.end(); | ||
| 143 | // The comparison against the end of the range must be done using addresses since VMAs can be | ||
| 144 | // merged during this process, causing invalidation of the iterators. | ||
| 145 | while (vma != end && vma->second.base < target_end) { | ||
| 146 | vma = std::next(Unmap(vma)); | ||
| 147 | } | ||
| 148 | |||
| 149 | ASSERT(FindVMA(target)->second.size >= size); | ||
| 150 | return RESULT_SUCCESS; | ||
| 125 | } | 151 | } |
| 126 | 152 | ||
| 127 | void VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) { | 153 | VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) { |
| 128 | VMAIter iter = StripIterConstness(vma_handle); | 154 | VMAIter iter = StripIterConstness(vma_handle); |
| 129 | 155 | ||
| 130 | VirtualMemoryArea& vma = iter->second; | 156 | VirtualMemoryArea& vma = iter->second; |
| 131 | vma.permissions = new_perms; | 157 | vma.permissions = new_perms; |
| 132 | UpdatePageTableForVMA(vma); | 158 | UpdatePageTableForVMA(vma); |
| 133 | 159 | ||
| 134 | MergeAdjacent(iter); | 160 | return MergeAdjacent(iter); |
| 161 | } | ||
| 162 | |||
| 163 | ResultCode VMManager::ReprotectRange(VAddr target, u32 size, VMAPermission new_perms) { | ||
| 164 | CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); | ||
| 165 | VAddr target_end = target + size; | ||
| 166 | |||
| 167 | VMAIter end = vma_map.end(); | ||
| 168 | // The comparison against the end of the range must be done using addresses since VMAs can be | ||
| 169 | // merged during this process, causing invalidation of the iterators. | ||
| 170 | while (vma != end && vma->second.base < target_end) { | ||
| 171 | vma = std::next(StripIterConstness(Reprotect(vma, new_perms))); | ||
| 172 | } | ||
| 173 | |||
| 174 | return RESULT_SUCCESS; | ||
| 135 | } | 175 | } |
| 136 | 176 | ||
| 137 | void VMManager::LogLayout() const { | 177 | void VMManager::RefreshMemoryBlockMappings(const std::vector<u8>* block) { |
| 178 | // If this ever proves to have a noticeable performance impact, allow users of the function to | ||
| 179 | // specify a specific range of addresses to limit the scan to. | ||
| 138 | for (const auto& p : vma_map) { | 180 | for (const auto& p : vma_map) { |
| 139 | const VirtualMemoryArea& vma = p.second; | 181 | const VirtualMemoryArea& vma = p.second; |
| 140 | LOG_DEBUG(Kernel, "%08X - %08X size: %8X %c%c%c", vma.base, vma.base + vma.size, vma.size, | 182 | if (block == vma.backing_block.get()) { |
| 183 | UpdatePageTableForVMA(vma); | ||
| 184 | } | ||
| 185 | } | ||
| 186 | } | ||
| 187 | |||
| 188 | void VMManager::LogLayout(Log::Level log_level) const { | ||
| 189 | for (const auto& p : vma_map) { | ||
| 190 | const VirtualMemoryArea& vma = p.second; | ||
| 191 | LOG_GENERIC(Log::Class::Kernel, log_level, "%08X - %08X size: %8X %c%c%c %s", | ||
| 192 | vma.base, vma.base + vma.size, vma.size, | ||
| 141 | (u8)vma.permissions & (u8)VMAPermission::Read ? 'R' : '-', | 193 | (u8)vma.permissions & (u8)VMAPermission::Read ? 'R' : '-', |
| 142 | (u8)vma.permissions & (u8)VMAPermission::Write ? 'W' : '-', | 194 | (u8)vma.permissions & (u8)VMAPermission::Write ? 'W' : '-', |
| 143 | (u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-'); | 195 | (u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-', GetMemoryStateName(vma.meminfo_state)); |
| 144 | } | 196 | } |
| 145 | } | 197 | } |
| 146 | 198 | ||
| @@ -151,21 +203,19 @@ VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle & iter) { | |||
| 151 | } | 203 | } |
| 152 | 204 | ||
| 153 | ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) { | 205 | ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) { |
| 154 | ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: %8X", size); | 206 | ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x%8X", size); |
| 155 | ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: %08X", base); | 207 | ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x%08X", base); |
| 156 | 208 | ||
| 157 | VMAIter vma_handle = StripIterConstness(FindVMA(base)); | 209 | VMAIter vma_handle = StripIterConstness(FindVMA(base)); |
| 158 | if (vma_handle == vma_map.end()) { | 210 | if (vma_handle == vma_map.end()) { |
| 159 | // Target address is outside the range managed by the kernel | 211 | // Target address is outside the range managed by the kernel |
| 160 | return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS, | 212 | return ERR_INVALID_ADDRESS; |
| 161 | ErrorSummary::InvalidArgument, ErrorLevel::Usage); // 0xE0E01BF5 | ||
| 162 | } | 213 | } |
| 163 | 214 | ||
| 164 | VirtualMemoryArea& vma = vma_handle->second; | 215 | VirtualMemoryArea& vma = vma_handle->second; |
| 165 | if (vma.type != VMAType::Free) { | 216 | if (vma.type != VMAType::Free) { |
| 166 | // Region is already allocated | 217 | // Region is already allocated |
| 167 | return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS, | 218 | return ERR_INVALID_ADDRESS_STATE; |
| 168 | ErrorSummary::InvalidState, ErrorLevel::Usage); // 0xE0A01BF5 | ||
| 169 | } | 219 | } |
| 170 | 220 | ||
| 171 | u32 start_in_vma = base - vma.base; | 221 | u32 start_in_vma = base - vma.base; |
| @@ -173,8 +223,7 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) { | |||
| 173 | 223 | ||
| 174 | if (end_in_vma > vma.size) { | 224 | if (end_in_vma > vma.size) { |
| 175 | // Requested allocation doesn't fit inside VMA | 225 | // Requested allocation doesn't fit inside VMA |
| 176 | return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS, | 226 | return ERR_INVALID_ADDRESS_STATE; |
| 177 | ErrorSummary::InvalidState, ErrorLevel::Usage); // 0xE0A01BF5 | ||
| 178 | } | 227 | } |
| 179 | 228 | ||
| 180 | if (end_in_vma != vma.size) { | 229 | if (end_in_vma != vma.size) { |
| @@ -189,6 +238,35 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) { | |||
| 189 | return MakeResult<VMAIter>(vma_handle); | 238 | return MakeResult<VMAIter>(vma_handle); |
| 190 | } | 239 | } |
| 191 | 240 | ||
| 241 | ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u32 size) { | ||
| 242 | ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x%8X", size); | ||
| 243 | ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x%08X", target); | ||
| 244 | |||
| 245 | VAddr target_end = target + size; | ||
| 246 | ASSERT(target_end >= target); | ||
| 247 | ASSERT(target_end <= MAX_ADDRESS); | ||
| 248 | ASSERT(size > 0); | ||
| 249 | |||
| 250 | VMAIter begin_vma = StripIterConstness(FindVMA(target)); | ||
| 251 | VMAIter i_end = vma_map.lower_bound(target_end); | ||
| 252 | for (auto i = begin_vma; i != i_end; ++i) { | ||
| 253 | if (i->second.type == VMAType::Free) { | ||
| 254 | return ERR_INVALID_ADDRESS_STATE; | ||
| 255 | } | ||
| 256 | } | ||
| 257 | |||
| 258 | if (target != begin_vma->second.base) { | ||
| 259 | begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base); | ||
| 260 | } | ||
| 261 | |||
| 262 | VMAIter end_vma = StripIterConstness(FindVMA(target_end)); | ||
| 263 | if (end_vma != vma_map.end() && target_end != end_vma->second.base) { | ||
| 264 | end_vma = SplitVMA(end_vma, target_end - end_vma->second.base); | ||
| 265 | } | ||
| 266 | |||
| 267 | return MakeResult<VMAIter>(begin_vma); | ||
| 268 | } | ||
| 269 | |||
| 192 | VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u32 offset_in_vma) { | 270 | VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u32 offset_in_vma) { |
| 193 | VirtualMemoryArea& old_vma = vma_handle->second; | 271 | VirtualMemoryArea& old_vma = vma_handle->second; |
| 194 | VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA | 272 | VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA |
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index b3795a94a..4e95f1f0c 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h | |||
| @@ -14,6 +14,14 @@ | |||
| 14 | 14 | ||
| 15 | namespace Kernel { | 15 | namespace Kernel { |
| 16 | 16 | ||
| 17 | const ResultCode ERR_INVALID_ADDRESS{ // 0xE0E01BF5 | ||
| 18 | ErrorDescription::InvalidAddress, ErrorModule::OS, | ||
| 19 | ErrorSummary::InvalidArgument, ErrorLevel::Usage}; | ||
| 20 | |||
| 21 | const ResultCode ERR_INVALID_ADDRESS_STATE{ // 0xE0A01BF5 | ||
| 22 | ErrorDescription::InvalidAddress, ErrorModule::OS, | ||
| 23 | ErrorSummary::InvalidState, ErrorLevel::Usage}; | ||
| 24 | |||
| 17 | enum class VMAType : u8 { | 25 | enum class VMAType : u8 { |
| 18 | /// VMA represents an unmapped region of the address space. | 26 | /// VMA represents an unmapped region of the address space. |
| 19 | Free, | 27 | Free, |
| @@ -75,7 +83,7 @@ struct VirtualMemoryArea { | |||
| 75 | /// Memory block backing this VMA. | 83 | /// Memory block backing this VMA. |
| 76 | std::shared_ptr<std::vector<u8>> backing_block = nullptr; | 84 | std::shared_ptr<std::vector<u8>> backing_block = nullptr; |
| 77 | /// Offset into the backing_memory the mapping starts from. | 85 | /// Offset into the backing_memory the mapping starts from. |
| 78 | u32 offset = 0; | 86 | size_t offset = 0; |
| 79 | 87 | ||
| 80 | // Settings for type = BackingMemory | 88 | // Settings for type = BackingMemory |
| 81 | /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed. | 89 | /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed. |
| @@ -141,7 +149,7 @@ public: | |||
| 141 | * @param state MemoryState tag to attach to the VMA. | 149 | * @param state MemoryState tag to attach to the VMA. |
| 142 | */ | 150 | */ |
| 143 | ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block, | 151 | ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block, |
| 144 | u32 offset, u32 size, MemoryState state); | 152 | size_t offset, u32 size, MemoryState state); |
| 145 | 153 | ||
| 146 | /** | 154 | /** |
| 147 | * Maps an unmanaged host memory pointer at a given address. | 155 | * Maps an unmanaged host memory pointer at a given address. |
| @@ -163,14 +171,23 @@ public: | |||
| 163 | */ | 171 | */ |
| 164 | ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state); | 172 | ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state); |
| 165 | 173 | ||
| 166 | /// Unmaps the given VMA. | 174 | /// Unmaps a range of addresses, splitting VMAs as necessary. |
| 167 | void Unmap(VMAHandle vma); | 175 | ResultCode UnmapRange(VAddr target, u32 size); |
| 168 | 176 | ||
| 169 | /// Changes the permissions of the given VMA. | 177 | /// Changes the permissions of the given VMA. |
| 170 | void Reprotect(VMAHandle vma, VMAPermission new_perms); | 178 | VMAHandle Reprotect(VMAHandle vma, VMAPermission new_perms); |
| 179 | |||
| 180 | /// Changes the permissions of a range of addresses, splitting VMAs as necessary. | ||
| 181 | ResultCode ReprotectRange(VAddr target, u32 size, VMAPermission new_perms); | ||
| 182 | |||
| 183 | /** | ||
| 184 | * Scans all VMAs and updates the page table range of any that use the given vector as backing | ||
| 185 | * memory. This should be called after any operation that causes reallocation of the vector. | ||
| 186 | */ | ||
| 187 | void RefreshMemoryBlockMappings(const std::vector<u8>* block); | ||
| 171 | 188 | ||
| 172 | /// Dumps the address space layout to the log, for debugging | 189 | /// Dumps the address space layout to the log, for debugging |
| 173 | void LogLayout() const; | 190 | void LogLayout(Log::Level log_level) const; |
| 174 | 191 | ||
| 175 | private: | 192 | private: |
| 176 | using VMAIter = decltype(vma_map)::iterator; | 193 | using VMAIter = decltype(vma_map)::iterator; |
| @@ -178,6 +195,9 @@ private: | |||
| 178 | /// Converts a VMAHandle to a mutable VMAIter. | 195 | /// Converts a VMAHandle to a mutable VMAIter. |
| 179 | VMAIter StripIterConstness(const VMAHandle& iter); | 196 | VMAIter StripIterConstness(const VMAHandle& iter); |
| 180 | 197 | ||
| 198 | /// Unmaps the given VMA. | ||
| 199 | VMAIter Unmap(VMAIter vma); | ||
| 200 | |||
| 181 | /** | 201 | /** |
| 182 | * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing | 202 | * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing |
| 183 | * the appropriate error checking. | 203 | * the appropriate error checking. |
| @@ -185,6 +205,12 @@ private: | |||
| 185 | ResultVal<VMAIter> CarveVMA(VAddr base, u32 size); | 205 | ResultVal<VMAIter> CarveVMA(VAddr base, u32 size); |
| 186 | 206 | ||
| 187 | /** | 207 | /** |
| 208 | * Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each | ||
| 209 | * end of the range. | ||
| 210 | */ | ||
| 211 | ResultVal<VMAIter> CarveVMARange(VAddr base, u32 size); | ||
| 212 | |||
| 213 | /** | ||
| 188 | * Splits a VMA in two, at the specified offset. | 214 | * Splits a VMA in two, at the specified offset. |
| 189 | * @returns the right side of the split, with the original iterator becoming the left side. | 215 | * @returns the right side of the split, with the original iterator becoming the left side. |
| 190 | */ | 216 | */ |