summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.cpp10
-rw-r--r--src/core/hle/kernel/k_slab_heap.h154
-rw-r--r--src/core/memory.cpp21
-rw-r--r--src/core/memory.h9
4 files changed, 20 insertions, 174 deletions
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index 69ae405e6..10edede17 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -70,14 +70,22 @@ constexpr size_t SlabCountExtraKThread = 160;
70template <typename T> 70template <typename T>
71VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address, 71VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address,
72 size_t num_objects) { 72 size_t num_objects) {
73 // TODO(bunnei): This is just a place holder. We should initialize the appropriate KSlabHeap for
74 // kernel object type T with the backing kernel memory pointer once we emulate kernel memory.
75
73 const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*)); 76 const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
74 VAddr start = Common::AlignUp(address, alignof(T)); 77 VAddr start = Common::AlignUp(address, alignof(T));
75 78
79 // This is intentionally empty. Once KSlabHeap is fully implemented, we can replace this with
80 // the pointer to emulated memory to pass along. Until then, KSlabHeap will just allocate/free
81 // host memory.
82 void* backing_kernel_memory{};
83
76 if (size > 0) { 84 if (size > 0) {
77 const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); 85 const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
78 ASSERT(region != nullptr); 86 ASSERT(region != nullptr);
79 ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab)); 87 ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab));
80 T::InitializeSlabHeap(system.Kernel(), system.Memory().GetKernelBuffer(start, size), size); 88 T::InitializeSlabHeap(system.Kernel(), backing_kernel_memory, size);
81 } 89 }
82 90
83 return start + size; 91 return start + size;
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h
index 5ce9a1d7c..81d472a3e 100644
--- a/src/core/hle/kernel/k_slab_heap.h
+++ b/src/core/hle/kernel/k_slab_heap.h
@@ -4,165 +4,33 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <atomic>
8
9#include "common/assert.h"
10#include "common/common_types.h"
11
12namespace Kernel { 7namespace Kernel {
13 8
14namespace impl { 9class KernelCore;
15
16class KSlabHeapImpl final : NonCopyable {
17public:
18 struct Node {
19 Node* next{};
20 };
21
22 constexpr KSlabHeapImpl() = default;
23
24 void Initialize(std::size_t size) {
25 ASSERT(head == nullptr);
26 obj_size = size;
27 }
28
29 constexpr std::size_t GetObjectSize() const {
30 return obj_size;
31 }
32
33 Node* GetHead() const {
34 return head;
35 }
36
37 void* Allocate() {
38 Node* ret = head.load();
39
40 do {
41 if (ret == nullptr) {
42 break;
43 }
44 } while (!head.compare_exchange_weak(ret, ret->next));
45
46 return ret;
47 }
48
49 void Free(void* obj) {
50 Node* node = static_cast<Node*>(obj);
51
52 Node* cur_head = head.load();
53 do {
54 node->next = cur_head;
55 } while (!head.compare_exchange_weak(cur_head, node));
56 }
57
58private:
59 std::atomic<Node*> head{};
60 std::size_t obj_size{};
61};
62
63} // namespace impl
64
65class KSlabHeapBase : NonCopyable {
66public:
67 constexpr KSlabHeapBase() = default;
68
69 constexpr bool Contains(uintptr_t addr) const {
70 return start <= addr && addr < end;
71 }
72
73 constexpr std::size_t GetSlabHeapSize() const {
74 return (end - start) / GetObjectSize();
75 }
76
77 constexpr std::size_t GetObjectSize() const {
78 return impl.GetObjectSize();
79 }
80 10
81 constexpr uintptr_t GetSlabHeapAddress() const { 11/// This is a placeholder class to manage slab heaps for kernel objects. For now, we just allocate
82 return start; 12/// these with new/delete, but this can be re-implemented later to allocate these in emulated
83 } 13/// memory.
84
85 std::size_t GetObjectIndexImpl(const void* obj) const {
86 return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize();
87 }
88
89 std::size_t GetPeakIndex() const {
90 return GetObjectIndexImpl(reinterpret_cast<const void*>(peak));
91 }
92
93 void* AllocateImpl() {
94 return impl.Allocate();
95 }
96
97 void FreeImpl(void* obj) {
98 // Don't allow freeing an object that wasn't allocated from this heap
99 ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
100
101 impl.Free(obj);
102 }
103
104 void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) {
105 // Ensure we don't initialize a slab using null memory
106 ASSERT(memory != nullptr);
107
108 // Initialize the base allocator
109 impl.Initialize(obj_size);
110
111 // Set our tracking variables
112 const std::size_t num_obj = (memory_size / obj_size);
113 start = reinterpret_cast<uintptr_t>(memory);
114 end = start + num_obj * obj_size;
115 peak = start;
116
117 // Free the objects
118 u8* cur = reinterpret_cast<u8*>(end);
119
120 for (std::size_t i{}; i < num_obj; i++) {
121 cur -= obj_size;
122 impl.Free(cur);
123 }
124 }
125
126private:
127 using Impl = impl::KSlabHeapImpl;
128
129 Impl impl;
130 uintptr_t peak{};
131 uintptr_t start{};
132 uintptr_t end{};
133};
134 14
135template <typename T> 15template <typename T>
136class KSlabHeap final : public KSlabHeapBase { 16class KSlabHeap final : NonCopyable {
137public: 17public:
138 constexpr KSlabHeap() : KSlabHeapBase() {} 18 KSlabHeap() = default;
139 19
140 void Initialize(void* memory, std::size_t memory_size) { 20 void Initialize([[maybe_unused]] void* memory, [[maybe_unused]] std::size_t memory_size) {
141 InitializeImpl(sizeof(T), memory, memory_size); 21 // Placeholder that should initialize the backing slab heap implementation.
142 } 22 }
143 23
144 T* Allocate() { 24 T* Allocate() {
145 T* obj = static_cast<T*>(AllocateImpl()); 25 return new T();
146 if (obj != nullptr) {
147 new (obj) T();
148 }
149 return obj;
150 } 26 }
151 27
152 T* AllocateWithKernel(KernelCore& kernel) { 28 T* AllocateWithKernel(KernelCore& kernel) {
153 T* obj = static_cast<T*>(AllocateImpl()); 29 return new T(kernel);
154 if (obj != nullptr) {
155 new (obj) T(kernel);
156 }
157 return obj;
158 } 30 }
159 31
160 void Free(T* obj) { 32 void Free(T* obj) {
161 FreeImpl(obj); 33 delete obj;
162 }
163
164 constexpr std::size_t GetObjectIndex(const T* obj) const {
165 return GetObjectIndexImpl(obj);
166 } 34 }
167}; 35};
168 36
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index b4c56e1c1..bf2ef7816 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -82,22 +82,6 @@ struct Memory::Impl {
82 return nullptr; 82 return nullptr;
83 } 83 }
84 84
85 u8* GetKernelBuffer(VAddr start_vaddr, size_t size) {
86 // TODO(bunnei): This is just a workaround until we have kernel memory layout mapped &
87 // managed. Until then, we use this to allocate and access kernel memory regions.
88
89 auto search = kernel_memory_regions.find(start_vaddr);
90 if (search != kernel_memory_regions.end()) {
91 return search->second.get();
92 }
93
94 std::unique_ptr<u8[]> new_memory_region{new u8[size]};
95 u8* raw_ptr = new_memory_region.get();
96 kernel_memory_regions[start_vaddr] = std::move(new_memory_region);
97
98 return raw_ptr;
99 }
100
101 u8 Read8(const VAddr addr) { 85 u8 Read8(const VAddr addr) {
102 return Read<u8>(addr); 86 return Read<u8>(addr);
103 } 87 }
@@ -727,7 +711,6 @@ struct Memory::Impl {
727 } 711 }
728 712
729 Common::PageTable* current_page_table = nullptr; 713 Common::PageTable* current_page_table = nullptr;
730 std::unordered_map<VAddr, std::unique_ptr<u8[]>> kernel_memory_regions;
731 Core::System& system; 714 Core::System& system;
732}; 715};
733 716
@@ -765,10 +748,6 @@ u8* Memory::GetPointer(VAddr vaddr) {
765 return impl->GetPointer(vaddr); 748 return impl->GetPointer(vaddr);
766} 749}
767 750
768u8* Memory::GetKernelBuffer(VAddr start_vaddr, size_t size) {
769 return impl->GetKernelBuffer(start_vaddr, size);
770}
771
772const u8* Memory::GetPointer(VAddr vaddr) const { 751const u8* Memory::GetPointer(VAddr vaddr) const {
773 return impl->GetPointer(vaddr); 752 return impl->GetPointer(vaddr);
774} 753}
diff --git a/src/core/memory.h b/src/core/memory.h
index 345fd870d..c91eeced9 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -121,15 +121,6 @@ public:
121 */ 121 */
122 u8* GetPointer(VAddr vaddr); 122 u8* GetPointer(VAddr vaddr);
123 123
124 /**
125 * Gets a pointer to the start of a kernel heap allocated memory region. Will allocate one if it
126 * does not already exist.
127 *
128 * @param start_vaddr Start virtual address for the memory region.
129 * @param size Size of the memory region.
130 */
131 u8* GetKernelBuffer(VAddr start_vaddr, size_t size);
132
133 template <typename T> 124 template <typename T>
134 T* GetPointer(VAddr vaddr) { 125 T* GetPointer(VAddr vaddr) {
135 return reinterpret_cast<T*>(GetPointer(vaddr)); 126 return reinterpret_cast<T*>(GetPointer(vaddr));