summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/CMakeLists.txt2
-rw-r--r--src/core/hle/kernel/memory/memory_manager.cpp177
-rw-r--r--src/core/hle/kernel/memory/memory_manager.h97
3 files changed, 276 insertions, 0 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 9fc5bd84b..ff38c6cc2 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -159,6 +159,8 @@ add_library(core STATIC
159 hle/kernel/memory/memory_block.h 159 hle/kernel/memory/memory_block.h
160 hle/kernel/memory/memory_block_manager.cpp 160 hle/kernel/memory/memory_block_manager.cpp
161 hle/kernel/memory/memory_block_manager.h 161 hle/kernel/memory/memory_block_manager.h
162 hle/kernel/memory/memory_manager.cpp
163 hle/kernel/memory/memory_manager.h
162 hle/kernel/memory/memory_types.h 164 hle/kernel/memory/memory_types.h
163 hle/kernel/memory/page_linked_list.h 165 hle/kernel/memory/page_linked_list.h
164 hle/kernel/memory/page_heap.cpp 166 hle/kernel/memory/page_heap.cpp
diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp
new file mode 100644
index 000000000..9c1bb981b
--- /dev/null
+++ b/src/core/hle/kernel/memory/memory_manager.cpp
@@ -0,0 +1,177 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6
7#include "common/alignment.h"
8#include "common/assert.h"
9#include "common/common_types.h"
10#include "common/scope_exit.h"
11#include "core/hle/kernel/errors.h"
12#include "core/hle/kernel/memory/memory_manager.h"
13#include "core/hle/kernel/memory/page_linked_list.h"
14
15namespace Kernel::Memory {
16
17std::size_t MemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
18 const std::size_t size{end_address - start_address};
19
20 // Calculate metadata sizes
21 const std::size_t ref_count_size{(size / PageSize) * sizeof(u16)};
22 const std::size_t optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) *
23 sizeof(u64)};
24 const std::size_t manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)};
25 const std::size_t page_heap_size{PageHeap::CalculateMetadataOverheadSize(size)};
26 const std::size_t total_metadata_size{manager_size + page_heap_size};
27 ASSERT(manager_size <= total_metadata_size);
28 ASSERT(Common::IsAligned(total_metadata_size, PageSize));
29
30 // Setup region
31 pool = new_pool;
32
33 // Initialize the manager's KPageHeap
34 heap.Initialize(start_address, size, page_heap_size);
35
36 // Free the memory to the heap
37 heap.Free(start_address, size / PageSize);
38
39 // Update the heap's used size
40 heap.UpdateUsedSize();
41
42 return total_metadata_size;
43}
44
45void MemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) {
46 ASSERT(pool < Pool::Count);
47 managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address);
48}
49
50VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
51 Direction dir) {
52 // Early return if we're allocating no pages
53 if (num_pages == 0) {
54 return {};
55 }
56
57 // Lock the pool that we're allocating from
58 const std::size_t pool_index{static_cast<std::size_t>(pool)};
59 std::lock_guard lock{pool_locks[pool_index]};
60
61 // Choose a heap based on our page size request
62 const s32 heap_index{PageHeap::GetAlignedBlockIndex(num_pages, align_pages)};
63
64 // Loop, trying to iterate from each block
65 // TODO (bunnei): Support multiple managers
66 Impl& chosen_manager{managers[pool_index]};
67 VAddr allocated_block{chosen_manager.AllocateBlock(heap_index)};
68
69 // If we failed to allocate, quit now
70 if (!allocated_block) {
71 return {};
72 }
73
74 // If we allocated more than we need, free some
75 const std::size_t allocated_pages{PageHeap::GetBlockNumPages(heap_index)};
76 if (allocated_pages > num_pages) {
77 chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
78 }
79
80 return allocated_block;
81}
82
83ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
84 Direction dir) {
85 ASSERT(page_list.GetNumPages() == 0);
86
87 // Early return if we're allocating no pages
88 if (num_pages == 0) {
89 return RESULT_SUCCESS;
90 }
91
92 // Lock the pool that we're allocating from
93 const std::size_t pool_index{static_cast<std::size_t>(pool)};
94 std::lock_guard lock{pool_locks[pool_index]};
95
96 // Choose a heap based on our page size request
97 const s32 heap_index{PageHeap::GetBlockIndex(num_pages)};
98 if (heap_index < 0) {
99 return ERR_OUT_OF_MEMORY;
100 }
101
102 // TODO (bunnei): Support multiple managers
103 Impl& chosen_manager{managers[pool_index]};
104
105 // Ensure that we don't leave anything un-freed
106 auto group_guard = detail::ScopeExit([&] {
107 for (const auto& it : page_list.Nodes()) {
108 const std::size_t num_pages{std::min(
109 it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
110 chosen_manager.Free(it.GetAddress(), num_pages);
111 }
112 });
113
114 // Keep allocating until we've allocated all our pages
115 for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
116 const std::size_t pages_per_alloc{PageHeap::GetBlockNumPages(index)};
117
118 while (num_pages >= pages_per_alloc) {
119 // Allocate a block
120 VAddr allocated_block{chosen_manager.AllocateBlock(index)};
121 if (allocated_block == 0) {
122 break;
123 }
124
125 // Safely add it to our group
126 {
127 auto block_guard = detail::ScopeExit(
128 [&] { chosen_manager.Free(allocated_block, pages_per_alloc); });
129
130 if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)};
131 result.IsError()) {
132 return result;
133 }
134
135 block_guard.Cancel();
136 }
137
138 num_pages -= pages_per_alloc;
139 }
140 }
141
142 // Only succeed if we allocated as many pages as we wanted
143 ASSERT(num_pages >= 0);
144 if (num_pages) {
145 return ERR_OUT_OF_MEMORY;
146 }
147
148 // We succeeded!
149 group_guard.Cancel();
150 return RESULT_SUCCESS;
151}
152
153ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
154 Direction dir) {
155 // Early return if we're freeing no pages
156 if (!num_pages) {
157 return RESULT_SUCCESS;
158 }
159
160 // Lock the pool that we're freeing from
161 const std::size_t pool_index{static_cast<std::size_t>(pool)};
162 std::lock_guard lock{pool_locks[pool_index]};
163
164 // TODO (bunnei): Support multiple managers
165 Impl& chosen_manager{managers[pool_index]};
166
167 // Free all of the pages
168 for (const auto& it : page_list.Nodes()) {
169 const std::size_t num_pages{std::min(
170 it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
171 chosen_manager.Free(it.GetAddress(), num_pages);
172 }
173
174 return RESULT_SUCCESS;
175}
176
177} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/memory_manager.h b/src/core/hle/kernel/memory/memory_manager.h
new file mode 100644
index 000000000..b078d7a5e
--- /dev/null
+++ b/src/core/hle/kernel/memory/memory_manager.h
@@ -0,0 +1,97 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <mutex>
9
10#include "common/common_funcs.h"
11#include "common/common_types.h"
12#include "core/hle/kernel/memory/page_heap.h"
13#include "core/hle/result.h"
14
15namespace Kernel::Memory {
16
17class PageLinkedList;
18
19class MemoryManager final : NonCopyable {
20public:
21 enum class Pool : u32 {
22 Application = 0,
23 Applet = 1,
24 System = 2,
25 SystemNonSecure = 3,
26
27 Count,
28
29 Shift = 4,
30 Mask = (0xF << Shift),
31 };
32
33 enum class Direction : u32 {
34 FromFront = 0,
35 FromBack = 1,
36
37 Shift = 0,
38 Mask = (0xF << Shift),
39 };
40
41 MemoryManager() = default;
42
43 constexpr std::size_t GetSize(Pool pool) const {
44 return managers[static_cast<std::size_t>(pool)].GetSize();
45 }
46
47 void InitializeManager(Pool pool, u64 start_address, u64 end_address);
48 VAddr AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
49 Direction dir = Direction::FromFront);
50 ResultCode Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
51 Direction dir = Direction::FromFront);
52 ResultCode Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
53 Direction dir = Direction::FromFront);
54
55 static constexpr std::size_t MaxManagerCount = 10;
56
57private:
58 class Impl final : NonCopyable {
59 private:
60 using RefCount = u16;
61
62 private:
63 PageHeap heap;
64 Pool pool{};
65
66 public:
67 Impl() = default;
68
69 std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address);
70
71 VAddr AllocateBlock(s32 index) {
72 return heap.AllocateBlock(index);
73 }
74
75 void Free(VAddr addr, std::size_t num_pages) {
76 heap.Free(addr, num_pages);
77 }
78
79 constexpr std::size_t GetSize() const {
80 return heap.GetSize();
81 }
82
83 constexpr VAddr GetAddress() const {
84 return heap.GetAddress();
85 }
86
87 constexpr VAddr GetEndAddress() const {
88 return heap.GetEndAddress();
89 }
90 };
91
92private:
93 std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks;
94 std::array<Impl, MaxManagerCount> managers;
95};
96
97} // namespace Kernel::Memory