summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/CMakeLists.txt1
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp29
-rw-r--r--src/core/hle/kernel/k_code_memory.h6
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp8
-rw-r--r--src/core/hle/kernel/k_page_group.cpp121
-rw-r--r--src/core/hle/kernel/k_page_group.h162
-rw-r--r--src/core/hle/kernel/k_page_table.cpp39
-rw-r--r--src/core/hle/kernel/k_page_table.h5
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp19
-rw-r--r--src/core/hle/kernel/memory_types.h3
-rw-r--r--src/core/hle/kernel/svc.cpp2
11 files changed, 270 insertions, 125 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 0252c8c31..5afdeb5ff 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -226,6 +226,7 @@ add_library(core STATIC
226 hle/kernel/k_page_buffer.h 226 hle/kernel/k_page_buffer.h
227 hle/kernel/k_page_heap.cpp 227 hle/kernel/k_page_heap.cpp
228 hle/kernel/k_page_heap.h 228 hle/kernel/k_page_heap.h
229 hle/kernel/k_page_group.cpp
229 hle/kernel/k_page_group.h 230 hle/kernel/k_page_group.h
230 hle/kernel/k_page_table.cpp 231 hle/kernel/k_page_table.cpp
231 hle/kernel/k_page_table.h 232 hle/kernel/k_page_table.h
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index 4b1c134d4..d9da1e600 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -27,13 +27,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
27 auto& page_table = m_owner->PageTable(); 27 auto& page_table = m_owner->PageTable();
28 28
29 // Construct the page group. 29 // Construct the page group.
30 m_page_group = {}; 30 m_page_group.emplace(kernel, page_table.GetBlockInfoManager());
31 31
32 // Lock the memory. 32 // Lock the memory.
33 R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size)) 33 R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size))
34 34
35 // Clear the memory. 35 // Clear the memory.
36 for (const auto& block : m_page_group.Nodes()) { 36 for (const auto& block : *m_page_group) {
37 std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize()); 37 std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
38 } 38 }
39 39
@@ -51,12 +51,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
51void KCodeMemory::Finalize() { 51void KCodeMemory::Finalize() {
52 // Unlock. 52 // Unlock.
53 if (!m_is_mapped && !m_is_owner_mapped) { 53 if (!m_is_mapped && !m_is_owner_mapped) {
54 const size_t size = m_page_group.GetNumPages() * PageSize; 54 const size_t size = m_page_group->GetNumPages() * PageSize;
55 m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group); 55 m_owner->PageTable().UnlockForCodeMemory(m_address, size, *m_page_group);
56 } 56 }
57 57
58 // Close the page group. 58 // Close the page group.
59 m_page_group = {}; 59 m_page_group->Close();
60 m_page_group->Finalize();
60 61
61 // Close our reference to our owner. 62 // Close our reference to our owner.
62 m_owner->Close(); 63 m_owner->Close();
@@ -64,7 +65,7 @@ void KCodeMemory::Finalize() {
64 65
65Result KCodeMemory::Map(VAddr address, size_t size) { 66Result KCodeMemory::Map(VAddr address, size_t size) {
66 // Validate the size. 67 // Validate the size.
67 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 68 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
68 69
69 // Lock ourselves. 70 // Lock ourselves.
70 KScopedLightLock lk(m_lock); 71 KScopedLightLock lk(m_lock);
@@ -74,7 +75,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
74 75
75 // Map the memory. 76 // Map the memory.
76 R_TRY(kernel.CurrentProcess()->PageTable().MapPages( 77 R_TRY(kernel.CurrentProcess()->PageTable().MapPages(
77 address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); 78 address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
78 79
79 // Mark ourselves as mapped. 80 // Mark ourselves as mapped.
80 m_is_mapped = true; 81 m_is_mapped = true;
@@ -84,13 +85,13 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
84 85
85Result KCodeMemory::Unmap(VAddr address, size_t size) { 86Result KCodeMemory::Unmap(VAddr address, size_t size) {
86 // Validate the size. 87 // Validate the size.
87 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 88 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
88 89
89 // Lock ourselves. 90 // Lock ourselves.
90 KScopedLightLock lk(m_lock); 91 KScopedLightLock lk(m_lock);
91 92
92 // Unmap the memory. 93 // Unmap the memory.
93 R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group, 94 R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group,
94 KMemoryState::CodeOut)); 95 KMemoryState::CodeOut));
95 96
96 // Mark ourselves as unmapped. 97 // Mark ourselves as unmapped.
@@ -101,7 +102,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
101 102
102Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { 103Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
103 // Validate the size. 104 // Validate the size.
104 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 105 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
105 106
106 // Lock ourselves. 107 // Lock ourselves.
107 KScopedLightLock lk(m_lock); 108 KScopedLightLock lk(m_lock);
@@ -125,7 +126,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
125 126
126 // Map the memory. 127 // Map the memory.
127 R_TRY( 128 R_TRY(
128 m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm)); 129 m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm));
129 130
130 // Mark ourselves as mapped. 131 // Mark ourselves as mapped.
131 m_is_owner_mapped = true; 132 m_is_owner_mapped = true;
@@ -135,13 +136,13 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
135 136
136Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { 137Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
137 // Validate the size. 138 // Validate the size.
138 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 139 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
139 140
140 // Lock ourselves. 141 // Lock ourselves.
141 KScopedLightLock lk(m_lock); 142 KScopedLightLock lk(m_lock);
142 143
143 // Unmap the memory. 144 // Unmap the memory.
144 R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode)); 145 R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode));
145 146
146 // Mark ourselves as unmapped. 147 // Mark ourselves as unmapped.
147 m_is_owner_mapped = false; 148 m_is_owner_mapped = false;
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h
index 2e7e1436a..5b260b385 100644
--- a/src/core/hle/kernel/k_code_memory.h
+++ b/src/core/hle/kernel/k_code_memory.h
@@ -3,6 +3,8 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <optional>
7
6#include "common/common_types.h" 8#include "common/common_types.h"
7#include "core/device_memory.h" 9#include "core/device_memory.h"
8#include "core/hle/kernel/k_auto_object.h" 10#include "core/hle/kernel/k_auto_object.h"
@@ -49,11 +51,11 @@ public:
49 return m_address; 51 return m_address;
50 } 52 }
51 size_t GetSize() const { 53 size_t GetSize() const {
52 return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0; 54 return m_is_initialized ? m_page_group->GetNumPages() * PageSize : 0;
53 } 55 }
54 56
55private: 57private:
56 KPageGroup m_page_group{}; 58 std::optional<KPageGroup> m_page_group{};
57 KProcess* m_owner{}; 59 KProcess* m_owner{};
58 VAddr m_address{}; 60 VAddr m_address{};
59 KLightLock m_lock; 61 KLightLock m_lock;
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index bd33571da..cd6ea388e 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -223,7 +223,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
223 223
224 // Ensure that we don't leave anything un-freed. 224 // Ensure that we don't leave anything un-freed.
225 ON_RESULT_FAILURE { 225 ON_RESULT_FAILURE {
226 for (const auto& it : out->Nodes()) { 226 for (const auto& it : *out) {
227 auto& manager = this->GetManager(it.GetAddress()); 227 auto& manager = this->GetManager(it.GetAddress());
228 const size_t node_num_pages = std::min<u64>( 228 const size_t node_num_pages = std::min<u64>(
229 it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); 229 it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
@@ -285,7 +285,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
285 m_has_optimized_process[static_cast<size_t>(pool)], true)); 285 m_has_optimized_process[static_cast<size_t>(pool)], true));
286 286
287 // Open the first reference to the pages. 287 // Open the first reference to the pages.
288 for (const auto& block : out->Nodes()) { 288 for (const auto& block : *out) {
289 PAddr cur_address = block.GetAddress(); 289 PAddr cur_address = block.GetAddress();
290 size_t remaining_pages = block.GetNumPages(); 290 size_t remaining_pages = block.GetNumPages();
291 while (remaining_pages > 0) { 291 while (remaining_pages > 0) {
@@ -335,7 +335,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
335 // Perform optimized memory tracking, if we should. 335 // Perform optimized memory tracking, if we should.
336 if (optimized) { 336 if (optimized) {
337 // Iterate over the allocated blocks. 337 // Iterate over the allocated blocks.
338 for (const auto& block : out->Nodes()) { 338 for (const auto& block : *out) {
339 // Get the block extents. 339 // Get the block extents.
340 const PAddr block_address = block.GetAddress(); 340 const PAddr block_address = block.GetAddress();
341 const size_t block_pages = block.GetNumPages(); 341 const size_t block_pages = block.GetNumPages();
@@ -391,7 +391,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
391 } 391 }
392 } else { 392 } else {
393 // Set all the allocated memory. 393 // Set all the allocated memory.
394 for (const auto& block : out->Nodes()) { 394 for (const auto& block : *out) {
395 std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, 395 std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
396 block.GetSize()); 396 block.GetSize());
397 } 397 }
diff --git a/src/core/hle/kernel/k_page_group.cpp b/src/core/hle/kernel/k_page_group.cpp
new file mode 100644
index 000000000..d8c644a33
--- /dev/null
+++ b/src/core/hle/kernel/k_page_group.cpp
@@ -0,0 +1,121 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/hle/kernel/k_dynamic_resource_manager.h"
5#include "core/hle/kernel/k_memory_manager.h"
6#include "core/hle/kernel/k_page_group.h"
7#include "core/hle/kernel/kernel.h"
8#include "core/hle/kernel/svc_results.h"
9
10namespace Kernel {
11
12void KPageGroup::Finalize() {
13 KBlockInfo* cur = m_first_block;
14 while (cur != nullptr) {
15 KBlockInfo* next = cur->GetNext();
16 m_manager->Free(cur);
17 cur = next;
18 }
19
20 m_first_block = nullptr;
21 m_last_block = nullptr;
22}
23
24void KPageGroup::CloseAndReset() {
25 auto& mm = m_kernel.MemoryManager();
26
27 KBlockInfo* cur = m_first_block;
28 while (cur != nullptr) {
29 KBlockInfo* next = cur->GetNext();
30 mm.Close(cur->GetAddress(), cur->GetNumPages());
31 m_manager->Free(cur);
32 cur = next;
33 }
34
35 m_first_block = nullptr;
36 m_last_block = nullptr;
37}
38
39size_t KPageGroup::GetNumPages() const {
40 size_t num_pages = 0;
41
42 for (const auto& it : *this) {
43 num_pages += it.GetNumPages();
44 }
45
46 return num_pages;
47}
48
49Result KPageGroup::AddBlock(KPhysicalAddress addr, size_t num_pages) {
50 // Succeed immediately if we're adding no pages.
51 R_SUCCEED_IF(num_pages == 0);
52
53 // Check for overflow.
54 ASSERT(addr < addr + num_pages * PageSize);
55
56 // Try to just append to the last block.
57 if (m_last_block != nullptr) {
58 R_SUCCEED_IF(m_last_block->TryConcatenate(addr, num_pages));
59 }
60
61 // Allocate a new block.
62 KBlockInfo* new_block = m_manager->Allocate();
63 R_UNLESS(new_block != nullptr, ResultOutOfResource);
64
65 // Initialize the block.
66 new_block->Initialize(addr, num_pages);
67
68 // Add the block to our list.
69 if (m_last_block != nullptr) {
70 m_last_block->SetNext(new_block);
71 } else {
72 m_first_block = new_block;
73 }
74 m_last_block = new_block;
75
76 R_SUCCEED();
77}
78
79void KPageGroup::Open() const {
80 auto& mm = m_kernel.MemoryManager();
81
82 for (const auto& it : *this) {
83 mm.Open(it.GetAddress(), it.GetNumPages());
84 }
85}
86
87void KPageGroup::OpenFirst() const {
88 auto& mm = m_kernel.MemoryManager();
89
90 for (const auto& it : *this) {
91 mm.OpenFirst(it.GetAddress(), it.GetNumPages());
92 }
93}
94
95void KPageGroup::Close() const {
96 auto& mm = m_kernel.MemoryManager();
97
98 for (const auto& it : *this) {
99 mm.Close(it.GetAddress(), it.GetNumPages());
100 }
101}
102
103bool KPageGroup::IsEquivalentTo(const KPageGroup& rhs) const {
104 auto lit = this->begin();
105 auto rit = rhs.begin();
106 auto lend = this->end();
107 auto rend = rhs.end();
108
109 while (lit != lend && rit != rend) {
110 if (*lit != *rit) {
111 return false;
112 }
113
114 ++lit;
115 ++rit;
116 }
117
118 return lit == lend && rit == rend;
119}
120
121} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h
index 316f172f2..b0b243e7d 100644
--- a/src/core/hle/kernel/k_page_group.h
+++ b/src/core/hle/kernel/k_page_group.h
@@ -1,4 +1,4 @@
1// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#pragma once 4#pragma once
@@ -13,24 +13,22 @@
13 13
14namespace Kernel { 14namespace Kernel {
15 15
16class KBlockInfoManager;
16class KPageGroup; 17class KPageGroup;
17 18
18class KBlockInfo { 19class KBlockInfo {
19private:
20 friend class KPageGroup;
21
22public: 20public:
23 constexpr KBlockInfo() = default; 21 constexpr explicit KBlockInfo() : m_next(nullptr) {}
24 22
25 constexpr void Initialize(PAddr addr, size_t np) { 23 constexpr void Initialize(KPhysicalAddress addr, size_t np) {
26 ASSERT(Common::IsAligned(addr, PageSize)); 24 ASSERT(Common::IsAligned(addr, PageSize));
27 ASSERT(static_cast<u32>(np) == np); 25 ASSERT(static_cast<u32>(np) == np);
28 26
29 m_page_index = static_cast<u32>(addr) / PageSize; 27 m_page_index = static_cast<u32>(addr / PageSize);
30 m_num_pages = static_cast<u32>(np); 28 m_num_pages = static_cast<u32>(np);
31 } 29 }
32 30
33 constexpr PAddr GetAddress() const { 31 constexpr KPhysicalAddress GetAddress() const {
34 return m_page_index * PageSize; 32 return m_page_index * PageSize;
35 } 33 }
36 constexpr size_t GetNumPages() const { 34 constexpr size_t GetNumPages() const {
@@ -39,10 +37,10 @@ public:
39 constexpr size_t GetSize() const { 37 constexpr size_t GetSize() const {
40 return this->GetNumPages() * PageSize; 38 return this->GetNumPages() * PageSize;
41 } 39 }
42 constexpr PAddr GetEndAddress() const { 40 constexpr KPhysicalAddress GetEndAddress() const {
43 return (m_page_index + m_num_pages) * PageSize; 41 return (m_page_index + m_num_pages) * PageSize;
44 } 42 }
45 constexpr PAddr GetLastAddress() const { 43 constexpr KPhysicalAddress GetLastAddress() const {
46 return this->GetEndAddress() - 1; 44 return this->GetEndAddress() - 1;
47 } 45 }
48 46
@@ -62,8 +60,8 @@ public:
62 return !(*this == rhs); 60 return !(*this == rhs);
63 } 61 }
64 62
65 constexpr bool IsStrictlyBefore(PAddr addr) const { 63 constexpr bool IsStrictlyBefore(KPhysicalAddress addr) const {
66 const PAddr end = this->GetEndAddress(); 64 const KPhysicalAddress end = this->GetEndAddress();
67 65
68 if (m_page_index != 0 && end == 0) { 66 if (m_page_index != 0 && end == 0) {
69 return false; 67 return false;
@@ -72,11 +70,11 @@ public:
72 return end < addr; 70 return end < addr;
73 } 71 }
74 72
75 constexpr bool operator<(PAddr addr) const { 73 constexpr bool operator<(KPhysicalAddress addr) const {
76 return this->IsStrictlyBefore(addr); 74 return this->IsStrictlyBefore(addr);
77 } 75 }
78 76
79 constexpr bool TryConcatenate(PAddr addr, size_t np) { 77 constexpr bool TryConcatenate(KPhysicalAddress addr, size_t np) {
80 if (addr != 0 && addr == this->GetEndAddress()) { 78 if (addr != 0 && addr == this->GetEndAddress()) {
81 m_num_pages += static_cast<u32>(np); 79 m_num_pages += static_cast<u32>(np);
82 return true; 80 return true;
@@ -90,96 +88,118 @@ private:
90 } 88 }
91 89
92private: 90private:
91 friend class KPageGroup;
92
93 KBlockInfo* m_next{}; 93 KBlockInfo* m_next{};
94 u32 m_page_index{}; 94 u32 m_page_index{};
95 u32 m_num_pages{}; 95 u32 m_num_pages{};
96}; 96};
97static_assert(sizeof(KBlockInfo) <= 0x10); 97static_assert(sizeof(KBlockInfo) <= 0x10);
98 98
99class KPageGroup final { 99class KPageGroup {
100public: 100public:
101 class Node final { 101 class Iterator {
102 public: 102 public:
103 constexpr Node(u64 addr_, std::size_t num_pages_) : addr{addr_}, num_pages{num_pages_} {} 103 using iterator_category = std::forward_iterator_tag;
104 using value_type = const KBlockInfo;
105 using difference_type = std::ptrdiff_t;
106 using pointer = value_type*;
107 using reference = value_type&;
108
109 constexpr explicit Iterator(pointer n) : m_node(n) {}
110
111 constexpr bool operator==(const Iterator& rhs) const {
112 return m_node == rhs.m_node;
113 }
114 constexpr bool operator!=(const Iterator& rhs) const {
115 return !(*this == rhs);
116 }
104 117
105 constexpr u64 GetAddress() const { 118 constexpr pointer operator->() const {
106 return addr; 119 return m_node;
120 }
121 constexpr reference operator*() const {
122 return *m_node;
107 } 123 }
108 124
109 constexpr std::size_t GetNumPages() const { 125 constexpr Iterator& operator++() {
110 return num_pages; 126 m_node = m_node->GetNext();
127 return *this;
111 } 128 }
112 129
113 constexpr std::size_t GetSize() const { 130 constexpr Iterator operator++(int) {
114 return GetNumPages() * PageSize; 131 const Iterator it{*this};
132 ++(*this);
133 return it;
115 } 134 }
116 135
117 private: 136 private:
118 u64 addr{}; 137 pointer m_node{};
119 std::size_t num_pages{};
120 }; 138 };
121 139
122public: 140 explicit KPageGroup(KernelCore& kernel, KBlockInfoManager* m)
123 KPageGroup() = default; 141 : m_kernel{kernel}, m_manager{m} {}
124 KPageGroup(u64 address, u64 num_pages) { 142 ~KPageGroup() {
125 ASSERT(AddBlock(address, num_pages).IsSuccess()); 143 this->Finalize();
126 } 144 }
127 145
128 constexpr std::list<Node>& Nodes() { 146 void CloseAndReset();
129 return nodes; 147 void Finalize();
130 }
131 148
132 constexpr const std::list<Node>& Nodes() const { 149 Iterator begin() const {
133 return nodes; 150 return Iterator{m_first_block};
151 }
152 Iterator end() const {
153 return Iterator{nullptr};
154 }
155 bool empty() const {
156 return m_first_block == nullptr;
134 } 157 }
135 158
136 std::size_t GetNumPages() const { 159 Result AddBlock(KPhysicalAddress addr, size_t num_pages);
137 std::size_t num_pages = 0; 160 void Open() const;
138 for (const Node& node : nodes) { 161 void OpenFirst() const;
139 num_pages += node.GetNumPages(); 162 void Close() const;
140 } 163
141 return num_pages; 164 size_t GetNumPages() const;
142 } 165
143 166 bool IsEquivalentTo(const KPageGroup& rhs) const;
144 bool IsEqual(KPageGroup& other) const { 167
145 auto this_node = nodes.begin(); 168 bool operator==(const KPageGroup& rhs) const {
146 auto other_node = other.nodes.begin(); 169 return this->IsEquivalentTo(rhs);
147 while (this_node != nodes.end() && other_node != other.nodes.end()) { 170 }
148 if (this_node->GetAddress() != other_node->GetAddress() ||
149 this_node->GetNumPages() != other_node->GetNumPages()) {
150 return false;
151 }
152 this_node = std::next(this_node);
153 other_node = std::next(other_node);
154 }
155 171
156 return this_node == nodes.end() && other_node == other.nodes.end(); 172 bool operator!=(const KPageGroup& rhs) const {
173 return !(*this == rhs);
157 } 174 }
158 175
159 Result AddBlock(u64 address, u64 num_pages) { 176private:
160 if (!num_pages) { 177 KernelCore& m_kernel;
161 return ResultSuccess; 178 KBlockInfo* m_first_block{};
179 KBlockInfo* m_last_block{};
180 KBlockInfoManager* m_manager{};
181};
182
183class KScopedPageGroup {
184public:
185 explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) {
186 if (m_pg) {
187 m_pg->Open();
162 } 188 }
163 if (!nodes.empty()) { 189 }
164 const auto node = nodes.back(); 190 explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {}
165 if (node.GetAddress() + node.GetNumPages() * PageSize == address) { 191 ~KScopedPageGroup() {
166 address = node.GetAddress(); 192 if (m_pg) {
167 num_pages += node.GetNumPages(); 193 m_pg->Close();
168 nodes.pop_back();
169 }
170 } 194 }
171 nodes.push_back({address, num_pages});
172 return ResultSuccess;
173 } 195 }
174 196
175 bool Empty() const { 197 void CancelClose() {
176 return nodes.empty(); 198 m_pg = nullptr;
177 } 199 }
178 200
179 void Finalize() {}
180
181private: 201private:
182 std::list<Node> nodes; 202 const KPageGroup* m_pg{};
183}; 203};
184 204
185} // namespace Kernel 205} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 612fc76fa..83131774c 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -100,7 +100,7 @@ constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType a
100 100
101KPageTable::KPageTable(Core::System& system_) 101KPageTable::KPageTable(Core::System& system_)
102 : m_general_lock{system_.Kernel()}, 102 : m_general_lock{system_.Kernel()},
103 m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} 103 m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {}
104 104
105KPageTable::~KPageTable() = default; 105KPageTable::~KPageTable() = default;
106 106
@@ -373,7 +373,7 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta
373 m_memory_block_slab_manager); 373 m_memory_block_slab_manager);
374 374
375 // Allocate and open. 375 // Allocate and open.
376 KPageGroup pg; 376 KPageGroup pg{m_kernel, m_block_info_manager};
377 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( 377 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
378 &pg, num_pages, 378 &pg, num_pages,
379 KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); 379 KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
@@ -432,7 +432,7 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
432 const size_t num_pages = size / PageSize; 432 const size_t num_pages = size / PageSize;
433 433
434 // Create page groups for the memory being mapped. 434 // Create page groups for the memory being mapped.
435 KPageGroup pg; 435 KPageGroup pg{m_kernel, m_block_info_manager};
436 AddRegionToPages(src_address, num_pages, pg); 436 AddRegionToPages(src_address, num_pages, pg);
437 437
438 // Reprotect the source as kernel-read/not mapped. 438 // Reprotect the source as kernel-read/not mapped.
@@ -593,7 +593,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
593 const size_t size = num_pages * PageSize; 593 const size_t size = num_pages * PageSize;
594 594
595 // We're making a new group, not adding to an existing one. 595 // We're making a new group, not adding to an existing one.
596 R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory); 596 R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
597 597
598 // Begin traversal. 598 // Begin traversal.
599 Common::PageTable::TraversalContext context; 599 Common::PageTable::TraversalContext context;
@@ -640,11 +640,10 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
640 R_SUCCEED(); 640 R_SUCCEED();
641} 641}
642 642
643bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { 643bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) {
644 ASSERT(this->IsLockedByCurrentThread()); 644 ASSERT(this->IsLockedByCurrentThread());
645 645
646 const size_t size = num_pages * PageSize; 646 const size_t size = num_pages * PageSize;
647 const auto& pg = pg_ll.Nodes();
648 const auto& memory_layout = m_system.Kernel().MemoryLayout(); 647 const auto& memory_layout = m_system.Kernel().MemoryLayout();
649 648
650 // Empty groups are necessarily invalid. 649 // Empty groups are necessarily invalid.
@@ -1572,7 +1571,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1572 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 1571 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
1573 1572
1574 // Allocate pages for the new memory. 1573 // Allocate pages for the new memory.
1575 KPageGroup pg; 1574 KPageGroup pg{m_kernel, m_block_info_manager};
1576 R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( 1575 R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
1577 &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); 1576 &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
1578 1577
@@ -1650,7 +1649,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1650 KScopedPageTableUpdater updater(this); 1649 KScopedPageTableUpdater updater(this);
1651 1650
1652 // Prepare to iterate over the memory. 1651 // Prepare to iterate over the memory.
1653 auto pg_it = pg.Nodes().begin(); 1652 auto pg_it = pg.begin();
1654 PAddr pg_phys_addr = pg_it->GetAddress(); 1653 PAddr pg_phys_addr = pg_it->GetAddress();
1655 size_t pg_pages = pg_it->GetNumPages(); 1654 size_t pg_pages = pg_it->GetNumPages();
1656 1655
@@ -1703,7 +1702,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1703 // Release any remaining unmapped memory. 1702 // Release any remaining unmapped memory.
1704 m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); 1703 m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
1705 m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); 1704 m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
1706 for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) { 1705 for (++pg_it; pg_it != pg.end(); ++pg_it) {
1707 m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), 1706 m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
1708 pg_it->GetNumPages()); 1707 pg_it->GetNumPages());
1709 m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), 1708 m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
@@ -1731,7 +1730,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1731 // Check if we're at the end of the physical block. 1730 // Check if we're at the end of the physical block.
1732 if (pg_pages == 0) { 1731 if (pg_pages == 0) {
1733 // Ensure there are more pages to map. 1732 // Ensure there are more pages to map.
1734 ASSERT(pg_it != pg.Nodes().end()); 1733 ASSERT(pg_it != pg.end());
1735 1734
1736 // Advance our physical block. 1735 // Advance our physical block.
1737 ++pg_it; 1736 ++pg_it;
@@ -1955,7 +1954,7 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size)
1955 R_TRY(dst_allocator_result); 1954 R_TRY(dst_allocator_result);
1956 1955
1957 // Map the memory. 1956 // Map the memory.
1958 KPageGroup page_linked_list; 1957 KPageGroup page_linked_list{m_kernel, m_block_info_manager};
1959 const size_t num_pages{size / PageSize}; 1958 const size_t num_pages{size / PageSize};
1960 const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( 1959 const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
1961 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); 1960 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
@@ -2022,14 +2021,14 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size
2022 num_dst_allocator_blocks); 2021 num_dst_allocator_blocks);
2023 R_TRY(dst_allocator_result); 2022 R_TRY(dst_allocator_result);
2024 2023
2025 KPageGroup src_pages; 2024 KPageGroup src_pages{m_kernel, m_block_info_manager};
2026 KPageGroup dst_pages; 2025 KPageGroup dst_pages{m_kernel, m_block_info_manager};
2027 const size_t num_pages{size / PageSize}; 2026 const size_t num_pages{size / PageSize};
2028 2027
2029 AddRegionToPages(src_address, num_pages, src_pages); 2028 AddRegionToPages(src_address, num_pages, src_pages);
2030 AddRegionToPages(dst_address, num_pages, dst_pages); 2029 AddRegionToPages(dst_address, num_pages, dst_pages);
2031 2030
2032 R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion); 2031 R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion);
2033 2032
2034 { 2033 {
2035 auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); 2034 auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
@@ -2060,7 +2059,7 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
2060 2059
2061 VAddr cur_addr{addr}; 2060 VAddr cur_addr{addr};
2062 2061
2063 for (const auto& node : page_linked_list.Nodes()) { 2062 for (const auto& node : page_linked_list) {
2064 if (const auto result{ 2063 if (const auto result{
2065 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; 2064 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
2066 result.IsError()) { 2065 result.IsError()) {
@@ -2160,7 +2159,7 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
2160 2159
2161 VAddr cur_addr{addr}; 2160 VAddr cur_addr{addr};
2162 2161
2163 for (const auto& node : page_linked_list.Nodes()) { 2162 for (const auto& node : page_linked_list) {
2164 if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, 2163 if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
2165 OperationType::Unmap)}; 2164 OperationType::Unmap)};
2166 result.IsError()) { 2165 result.IsError()) {
@@ -2527,13 +2526,13 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
2527 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 2526 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
2528 2527
2529 // Allocate pages for the heap extension. 2528 // Allocate pages for the heap extension.
2530 KPageGroup pg; 2529 KPageGroup pg{m_kernel, m_block_info_manager};
2531 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( 2530 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
2532 &pg, allocation_size / PageSize, 2531 &pg, allocation_size / PageSize,
2533 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); 2532 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
2534 2533
2535 // Clear all the newly allocated pages. 2534 // Clear all the newly allocated pages.
2536 for (const auto& it : pg.Nodes()) { 2535 for (const auto& it : pg) {
2537 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, 2536 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
2538 it.GetSize()); 2537 it.GetSize());
2539 } 2538 }
@@ -2610,7 +2609,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_
2610 if (is_map_only) { 2609 if (is_map_only) {
2611 R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); 2610 R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
2612 } else { 2611 } else {
2613 KPageGroup page_group; 2612 KPageGroup page_group{m_kernel, m_block_info_manager};
2614 R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( 2613 R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
2615 &page_group, needed_num_pages, 2614 &page_group, needed_num_pages,
2616 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); 2615 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
@@ -2795,7 +2794,7 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_
2795 ASSERT(num_pages > 0); 2794 ASSERT(num_pages > 0);
2796 ASSERT(num_pages == page_group.GetNumPages()); 2795 ASSERT(num_pages == page_group.GetNumPages());
2797 2796
2798 for (const auto& node : page_group.Nodes()) { 2797 for (const auto& node : page_group) {
2799 const size_t size{node.GetNumPages() * PageSize}; 2798 const size_t size{node.GetNumPages() * PageSize};
2800 2799
2801 switch (operation) { 2800 switch (operation) {
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index f1ca785d7..5df5ba1a9 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -107,6 +107,10 @@ public:
107 return *m_page_table_impl; 107 return *m_page_table_impl;
108 } 108 }
109 109
110 KBlockInfoManager* GetBlockInfoManager() {
111 return m_block_info_manager;
112 }
113
110 bool CanContain(VAddr addr, size_t size, KMemoryState state) const; 114 bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
111 115
112protected: 116protected:
@@ -488,6 +492,7 @@ private:
488 std::unique_ptr<Common::PageTable> m_page_table_impl; 492 std::unique_ptr<Common::PageTable> m_page_table_impl;
489 493
490 Core::System& m_system; 494 Core::System& m_system;
495 KernelCore& m_kernel;
491}; 496};
492 497
493} // namespace Kernel 498} // namespace Kernel
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 0aa68103c..3cf2b5d91 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -13,10 +13,7 @@
13namespace Kernel { 13namespace Kernel {
14 14
15KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} 15KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
16 16KSharedMemory::~KSharedMemory() = default;
17KSharedMemory::~KSharedMemory() {
18 kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size);
19}
20 17
21Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, 18Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
22 Svc::MemoryPermission owner_permission_, 19 Svc::MemoryPermission owner_permission_,
@@ -49,7 +46,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
49 R_UNLESS(physical_address != 0, ResultOutOfMemory); 46 R_UNLESS(physical_address != 0, ResultOutOfMemory);
50 47
51 //! Insert the result into our page group. 48 //! Insert the result into our page group.
52 page_group.emplace(physical_address, num_pages); 49 page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager());
50 page_group->AddBlock(physical_address, num_pages);
53 51
54 // Commit our reservation. 52 // Commit our reservation.
55 memory_reservation.Commit(); 53 memory_reservation.Commit();
@@ -62,7 +60,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
62 is_initialized = true; 60 is_initialized = true;
63 61
64 // Clear all pages in the memory. 62 // Clear all pages in the memory.
65 for (const auto& block : page_group->Nodes()) { 63 for (const auto& block : *page_group) {
66 std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize()); 64 std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize());
67 } 65 }
68 66
@@ -71,13 +69,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
71 69
72void KSharedMemory::Finalize() { 70void KSharedMemory::Finalize() {
73 // Close and finalize the page group. 71 // Close and finalize the page group.
74 // page_group->Close(); 72 page_group->Close();
75 // page_group->Finalize(); 73 page_group->Finalize();
76
77 //! HACK: Manually close.
78 for (const auto& block : page_group->Nodes()) {
79 kernel.MemoryManager().Close(block.GetAddress(), block.GetNumPages());
80 }
81 74
82 // Release the memory reservation. 75 // Release the memory reservation.
83 resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); 76 resource_limit->Release(LimitableResource::PhysicalMemoryMax, size);
diff --git a/src/core/hle/kernel/memory_types.h b/src/core/hle/kernel/memory_types.h
index 3975507bd..92b8b37ac 100644
--- a/src/core/hle/kernel/memory_types.h
+++ b/src/core/hle/kernel/memory_types.h
@@ -14,4 +14,7 @@ constexpr std::size_t PageSize{1 << PageBits};
14 14
15using Page = std::array<u8, PageSize>; 15using Page = std::array<u8, PageSize>;
16 16
17using KPhysicalAddress = PAddr;
18using KProcessAddress = VAddr;
19
17} // namespace Kernel 20} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 788ee2160..aca442196 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1485,7 +1485,7 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p
1485 ResultInvalidMemoryRegion); 1485 ResultInvalidMemoryRegion);
1486 1486
1487 // Create a new page group. 1487 // Create a new page group.
1488 KPageGroup pg; 1488 KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()};
1489 R_TRY(src_pt.MakeAndOpenPageGroup( 1489 R_TRY(src_pt.MakeAndOpenPageGroup(
1490 std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess, 1490 std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
1491 KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None, 1491 KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,