summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/CMakeLists.txt1
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp29
-rw-r--r--src/core/hle/kernel/k_code_memory.h6
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp8
-rw-r--r--src/core/hle/kernel/k_page_group.cpp121
-rw-r--r--src/core/hle/kernel/k_page_group.h163
-rw-r--r--src/core/hle/kernel/k_page_table.cpp142
-rw-r--r--src/core/hle/kernel/k_page_table.h9
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp19
-rw-r--r--src/core/hle/kernel/memory_types.h3
-rw-r--r--src/core/hle/kernel/svc.cpp2
11 files changed, 181 insertions, 322 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 5afdeb5ff..0252c8c31 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -226,7 +226,6 @@ add_library(core STATIC
226 hle/kernel/k_page_buffer.h 226 hle/kernel/k_page_buffer.h
227 hle/kernel/k_page_heap.cpp 227 hle/kernel/k_page_heap.cpp
228 hle/kernel/k_page_heap.h 228 hle/kernel/k_page_heap.h
229 hle/kernel/k_page_group.cpp
230 hle/kernel/k_page_group.h 229 hle/kernel/k_page_group.h
231 hle/kernel/k_page_table.cpp 230 hle/kernel/k_page_table.cpp
232 hle/kernel/k_page_table.h 231 hle/kernel/k_page_table.h
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index d9da1e600..4b1c134d4 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -27,13 +27,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
27 auto& page_table = m_owner->PageTable(); 27 auto& page_table = m_owner->PageTable();
28 28
29 // Construct the page group. 29 // Construct the page group.
30 m_page_group.emplace(kernel, page_table.GetBlockInfoManager()); 30 m_page_group = {};
31 31
32 // Lock the memory. 32 // Lock the memory.
33 R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size)) 33 R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size))
34 34
35 // Clear the memory. 35 // Clear the memory.
36 for (const auto& block : *m_page_group) { 36 for (const auto& block : m_page_group.Nodes()) {
37 std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize()); 37 std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
38 } 38 }
39 39
@@ -51,13 +51,12 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
51void KCodeMemory::Finalize() { 51void KCodeMemory::Finalize() {
52 // Unlock. 52 // Unlock.
53 if (!m_is_mapped && !m_is_owner_mapped) { 53 if (!m_is_mapped && !m_is_owner_mapped) {
54 const size_t size = m_page_group->GetNumPages() * PageSize; 54 const size_t size = m_page_group.GetNumPages() * PageSize;
55 m_owner->PageTable().UnlockForCodeMemory(m_address, size, *m_page_group); 55 m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group);
56 } 56 }
57 57
58 // Close the page group. 58 // Close the page group.
59 m_page_group->Close(); 59 m_page_group = {};
60 m_page_group->Finalize();
61 60
62 // Close our reference to our owner. 61 // Close our reference to our owner.
63 m_owner->Close(); 62 m_owner->Close();
@@ -65,7 +64,7 @@ void KCodeMemory::Finalize() {
65 64
66Result KCodeMemory::Map(VAddr address, size_t size) { 65Result KCodeMemory::Map(VAddr address, size_t size) {
67 // Validate the size. 66 // Validate the size.
68 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 67 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
69 68
70 // Lock ourselves. 69 // Lock ourselves.
71 KScopedLightLock lk(m_lock); 70 KScopedLightLock lk(m_lock);
@@ -75,7 +74,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
75 74
76 // Map the memory. 75 // Map the memory.
77 R_TRY(kernel.CurrentProcess()->PageTable().MapPages( 76 R_TRY(kernel.CurrentProcess()->PageTable().MapPages(
78 address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); 77 address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
79 78
80 // Mark ourselves as mapped. 79 // Mark ourselves as mapped.
81 m_is_mapped = true; 80 m_is_mapped = true;
@@ -85,13 +84,13 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
85 84
86Result KCodeMemory::Unmap(VAddr address, size_t size) { 85Result KCodeMemory::Unmap(VAddr address, size_t size) {
87 // Validate the size. 86 // Validate the size.
88 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 87 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
89 88
90 // Lock ourselves. 89 // Lock ourselves.
91 KScopedLightLock lk(m_lock); 90 KScopedLightLock lk(m_lock);
92 91
93 // Unmap the memory. 92 // Unmap the memory.
94 R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group, 93 R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group,
95 KMemoryState::CodeOut)); 94 KMemoryState::CodeOut));
96 95
97 // Mark ourselves as unmapped. 96 // Mark ourselves as unmapped.
@@ -102,7 +101,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
102 101
103Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { 102Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
104 // Validate the size. 103 // Validate the size.
105 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 104 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
106 105
107 // Lock ourselves. 106 // Lock ourselves.
108 KScopedLightLock lk(m_lock); 107 KScopedLightLock lk(m_lock);
@@ -126,7 +125,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
126 125
127 // Map the memory. 126 // Map the memory.
128 R_TRY( 127 R_TRY(
129 m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm)); 128 m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm));
130 129
131 // Mark ourselves as mapped. 130 // Mark ourselves as mapped.
132 m_is_owner_mapped = true; 131 m_is_owner_mapped = true;
@@ -136,13 +135,13 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
136 135
137Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { 136Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
138 // Validate the size. 137 // Validate the size.
139 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 138 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
140 139
141 // Lock ourselves. 140 // Lock ourselves.
142 KScopedLightLock lk(m_lock); 141 KScopedLightLock lk(m_lock);
143 142
144 // Unmap the memory. 143 // Unmap the memory.
145 R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode)); 144 R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode));
146 145
147 // Mark ourselves as unmapped. 146 // Mark ourselves as unmapped.
148 m_is_owner_mapped = false; 147 m_is_owner_mapped = false;
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h
index 5b260b385..2e7e1436a 100644
--- a/src/core/hle/kernel/k_code_memory.h
+++ b/src/core/hle/kernel/k_code_memory.h
@@ -3,8 +3,6 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <optional>
7
8#include "common/common_types.h" 6#include "common/common_types.h"
9#include "core/device_memory.h" 7#include "core/device_memory.h"
10#include "core/hle/kernel/k_auto_object.h" 8#include "core/hle/kernel/k_auto_object.h"
@@ -51,11 +49,11 @@ public:
51 return m_address; 49 return m_address;
52 } 50 }
53 size_t GetSize() const { 51 size_t GetSize() const {
54 return m_is_initialized ? m_page_group->GetNumPages() * PageSize : 0; 52 return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0;
55 } 53 }
56 54
57private: 55private:
58 std::optional<KPageGroup> m_page_group{}; 56 KPageGroup m_page_group{};
59 KProcess* m_owner{}; 57 KProcess* m_owner{};
60 VAddr m_address{}; 58 VAddr m_address{};
61 KLightLock m_lock; 59 KLightLock m_lock;
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index cd6ea388e..bd33571da 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -223,7 +223,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
223 223
224 // Ensure that we don't leave anything un-freed. 224 // Ensure that we don't leave anything un-freed.
225 ON_RESULT_FAILURE { 225 ON_RESULT_FAILURE {
226 for (const auto& it : *out) { 226 for (const auto& it : out->Nodes()) {
227 auto& manager = this->GetManager(it.GetAddress()); 227 auto& manager = this->GetManager(it.GetAddress());
228 const size_t node_num_pages = std::min<u64>( 228 const size_t node_num_pages = std::min<u64>(
229 it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); 229 it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
@@ -285,7 +285,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
285 m_has_optimized_process[static_cast<size_t>(pool)], true)); 285 m_has_optimized_process[static_cast<size_t>(pool)], true));
286 286
287 // Open the first reference to the pages. 287 // Open the first reference to the pages.
288 for (const auto& block : *out) { 288 for (const auto& block : out->Nodes()) {
289 PAddr cur_address = block.GetAddress(); 289 PAddr cur_address = block.GetAddress();
290 size_t remaining_pages = block.GetNumPages(); 290 size_t remaining_pages = block.GetNumPages();
291 while (remaining_pages > 0) { 291 while (remaining_pages > 0) {
@@ -335,7 +335,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
335 // Perform optimized memory tracking, if we should. 335 // Perform optimized memory tracking, if we should.
336 if (optimized) { 336 if (optimized) {
337 // Iterate over the allocated blocks. 337 // Iterate over the allocated blocks.
338 for (const auto& block : *out) { 338 for (const auto& block : out->Nodes()) {
339 // Get the block extents. 339 // Get the block extents.
340 const PAddr block_address = block.GetAddress(); 340 const PAddr block_address = block.GetAddress();
341 const size_t block_pages = block.GetNumPages(); 341 const size_t block_pages = block.GetNumPages();
@@ -391,7 +391,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
391 } 391 }
392 } else { 392 } else {
393 // Set all the allocated memory. 393 // Set all the allocated memory.
394 for (const auto& block : *out) { 394 for (const auto& block : out->Nodes()) {
395 std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, 395 std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
396 block.GetSize()); 396 block.GetSize());
397 } 397 }
diff --git a/src/core/hle/kernel/k_page_group.cpp b/src/core/hle/kernel/k_page_group.cpp
deleted file mode 100644
index d8c644a33..000000000
--- a/src/core/hle/kernel/k_page_group.cpp
+++ /dev/null
@@ -1,121 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/hle/kernel/k_dynamic_resource_manager.h"
5#include "core/hle/kernel/k_memory_manager.h"
6#include "core/hle/kernel/k_page_group.h"
7#include "core/hle/kernel/kernel.h"
8#include "core/hle/kernel/svc_results.h"
9
10namespace Kernel {
11
12void KPageGroup::Finalize() {
13 KBlockInfo* cur = m_first_block;
14 while (cur != nullptr) {
15 KBlockInfo* next = cur->GetNext();
16 m_manager->Free(cur);
17 cur = next;
18 }
19
20 m_first_block = nullptr;
21 m_last_block = nullptr;
22}
23
24void KPageGroup::CloseAndReset() {
25 auto& mm = m_kernel.MemoryManager();
26
27 KBlockInfo* cur = m_first_block;
28 while (cur != nullptr) {
29 KBlockInfo* next = cur->GetNext();
30 mm.Close(cur->GetAddress(), cur->GetNumPages());
31 m_manager->Free(cur);
32 cur = next;
33 }
34
35 m_first_block = nullptr;
36 m_last_block = nullptr;
37}
38
39size_t KPageGroup::GetNumPages() const {
40 size_t num_pages = 0;
41
42 for (const auto& it : *this) {
43 num_pages += it.GetNumPages();
44 }
45
46 return num_pages;
47}
48
49Result KPageGroup::AddBlock(KPhysicalAddress addr, size_t num_pages) {
50 // Succeed immediately if we're adding no pages.
51 R_SUCCEED_IF(num_pages == 0);
52
53 // Check for overflow.
54 ASSERT(addr < addr + num_pages * PageSize);
55
56 // Try to just append to the last block.
57 if (m_last_block != nullptr) {
58 R_SUCCEED_IF(m_last_block->TryConcatenate(addr, num_pages));
59 }
60
61 // Allocate a new block.
62 KBlockInfo* new_block = m_manager->Allocate();
63 R_UNLESS(new_block != nullptr, ResultOutOfResource);
64
65 // Initialize the block.
66 new_block->Initialize(addr, num_pages);
67
68 // Add the block to our list.
69 if (m_last_block != nullptr) {
70 m_last_block->SetNext(new_block);
71 } else {
72 m_first_block = new_block;
73 }
74 m_last_block = new_block;
75
76 R_SUCCEED();
77}
78
79void KPageGroup::Open() const {
80 auto& mm = m_kernel.MemoryManager();
81
82 for (const auto& it : *this) {
83 mm.Open(it.GetAddress(), it.GetNumPages());
84 }
85}
86
87void KPageGroup::OpenFirst() const {
88 auto& mm = m_kernel.MemoryManager();
89
90 for (const auto& it : *this) {
91 mm.OpenFirst(it.GetAddress(), it.GetNumPages());
92 }
93}
94
95void KPageGroup::Close() const {
96 auto& mm = m_kernel.MemoryManager();
97
98 for (const auto& it : *this) {
99 mm.Close(it.GetAddress(), it.GetNumPages());
100 }
101}
102
103bool KPageGroup::IsEquivalentTo(const KPageGroup& rhs) const {
104 auto lit = this->begin();
105 auto rit = rhs.begin();
106 auto lend = this->end();
107 auto rend = rhs.end();
108
109 while (lit != lend && rit != rend) {
110 if (*lit != *rit) {
111 return false;
112 }
113
114 ++lit;
115 ++rit;
116 }
117
118 return lit == lend && rit == rend;
119}
120
121} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h
index c07f17663..316f172f2 100644
--- a/src/core/hle/kernel/k_page_group.h
+++ b/src/core/hle/kernel/k_page_group.h
@@ -1,4 +1,4 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#pragma once 4#pragma once
@@ -13,23 +13,24 @@
13 13
14namespace Kernel { 14namespace Kernel {
15 15
16class KBlockInfoManager;
17class KernelCore;
18class KPageGroup; 16class KPageGroup;
19 17
20class KBlockInfo { 18class KBlockInfo {
19private:
20 friend class KPageGroup;
21
21public: 22public:
22 constexpr explicit KBlockInfo() : m_next(nullptr) {} 23 constexpr KBlockInfo() = default;
23 24
24 constexpr void Initialize(KPhysicalAddress addr, size_t np) { 25 constexpr void Initialize(PAddr addr, size_t np) {
25 ASSERT(Common::IsAligned(addr, PageSize)); 26 ASSERT(Common::IsAligned(addr, PageSize));
26 ASSERT(static_cast<u32>(np) == np); 27 ASSERT(static_cast<u32>(np) == np);
27 28
28 m_page_index = static_cast<u32>(addr / PageSize); 29 m_page_index = static_cast<u32>(addr) / PageSize;
29 m_num_pages = static_cast<u32>(np); 30 m_num_pages = static_cast<u32>(np);
30 } 31 }
31 32
32 constexpr KPhysicalAddress GetAddress() const { 33 constexpr PAddr GetAddress() const {
33 return m_page_index * PageSize; 34 return m_page_index * PageSize;
34 } 35 }
35 constexpr size_t GetNumPages() const { 36 constexpr size_t GetNumPages() const {
@@ -38,10 +39,10 @@ public:
38 constexpr size_t GetSize() const { 39 constexpr size_t GetSize() const {
39 return this->GetNumPages() * PageSize; 40 return this->GetNumPages() * PageSize;
40 } 41 }
41 constexpr KPhysicalAddress GetEndAddress() const { 42 constexpr PAddr GetEndAddress() const {
42 return (m_page_index + m_num_pages) * PageSize; 43 return (m_page_index + m_num_pages) * PageSize;
43 } 44 }
44 constexpr KPhysicalAddress GetLastAddress() const { 45 constexpr PAddr GetLastAddress() const {
45 return this->GetEndAddress() - 1; 46 return this->GetEndAddress() - 1;
46 } 47 }
47 48
@@ -61,8 +62,8 @@ public:
61 return !(*this == rhs); 62 return !(*this == rhs);
62 } 63 }
63 64
64 constexpr bool IsStrictlyBefore(KPhysicalAddress addr) const { 65 constexpr bool IsStrictlyBefore(PAddr addr) const {
65 const KPhysicalAddress end = this->GetEndAddress(); 66 const PAddr end = this->GetEndAddress();
66 67
67 if (m_page_index != 0 && end == 0) { 68 if (m_page_index != 0 && end == 0) {
68 return false; 69 return false;
@@ -71,11 +72,11 @@ public:
71 return end < addr; 72 return end < addr;
72 } 73 }
73 74
74 constexpr bool operator<(KPhysicalAddress addr) const { 75 constexpr bool operator<(PAddr addr) const {
75 return this->IsStrictlyBefore(addr); 76 return this->IsStrictlyBefore(addr);
76 } 77 }
77 78
78 constexpr bool TryConcatenate(KPhysicalAddress addr, size_t np) { 79 constexpr bool TryConcatenate(PAddr addr, size_t np) {
79 if (addr != 0 && addr == this->GetEndAddress()) { 80 if (addr != 0 && addr == this->GetEndAddress()) {
80 m_num_pages += static_cast<u32>(np); 81 m_num_pages += static_cast<u32>(np);
81 return true; 82 return true;
@@ -89,118 +90,96 @@ private:
89 } 90 }
90 91
91private: 92private:
92 friend class KPageGroup;
93
94 KBlockInfo* m_next{}; 93 KBlockInfo* m_next{};
95 u32 m_page_index{}; 94 u32 m_page_index{};
96 u32 m_num_pages{}; 95 u32 m_num_pages{};
97}; 96};
98static_assert(sizeof(KBlockInfo) <= 0x10); 97static_assert(sizeof(KBlockInfo) <= 0x10);
99 98
100class KPageGroup { 99class KPageGroup final {
101public: 100public:
102 class Iterator { 101 class Node final {
103 public: 102 public:
104 using iterator_category = std::forward_iterator_tag; 103 constexpr Node(u64 addr_, std::size_t num_pages_) : addr{addr_}, num_pages{num_pages_} {}
105 using value_type = const KBlockInfo;
106 using difference_type = std::ptrdiff_t;
107 using pointer = value_type*;
108 using reference = value_type&;
109
110 constexpr explicit Iterator(pointer n) : m_node(n) {}
111
112 constexpr bool operator==(const Iterator& rhs) const {
113 return m_node == rhs.m_node;
114 }
115 constexpr bool operator!=(const Iterator& rhs) const {
116 return !(*this == rhs);
117 }
118 104
119 constexpr pointer operator->() const { 105 constexpr u64 GetAddress() const {
120 return m_node; 106 return addr;
121 }
122 constexpr reference operator*() const {
123 return *m_node;
124 } 107 }
125 108
126 constexpr Iterator& operator++() { 109 constexpr std::size_t GetNumPages() const {
127 m_node = m_node->GetNext(); 110 return num_pages;
128 return *this;
129 } 111 }
130 112
131 constexpr Iterator operator++(int) { 113 constexpr std::size_t GetSize() const {
132 const Iterator it{*this}; 114 return GetNumPages() * PageSize;
133 ++(*this);
134 return it;
135 } 115 }
136 116
137 private: 117 private:
138 pointer m_node{}; 118 u64 addr{};
119 std::size_t num_pages{};
139 }; 120 };
140 121
141 explicit KPageGroup(KernelCore& kernel, KBlockInfoManager* m) 122public:
142 : m_kernel{kernel}, m_manager{m} {} 123 KPageGroup() = default;
143 ~KPageGroup() { 124 KPageGroup(u64 address, u64 num_pages) {
144 this->Finalize(); 125 ASSERT(AddBlock(address, num_pages).IsSuccess());
145 } 126 }
146 127
147 void CloseAndReset(); 128 constexpr std::list<Node>& Nodes() {
148 void Finalize(); 129 return nodes;
149
150 Iterator begin() const {
151 return Iterator{m_first_block};
152 }
153 Iterator end() const {
154 return Iterator{nullptr};
155 }
156 bool empty() const {
157 return m_first_block == nullptr;
158 } 130 }
159 131
160 Result AddBlock(KPhysicalAddress addr, size_t num_pages); 132 constexpr const std::list<Node>& Nodes() const {
161 void Open() const; 133 return nodes;
162 void OpenFirst() const;
163 void Close() const;
164
165 size_t GetNumPages() const;
166
167 bool IsEquivalentTo(const KPageGroup& rhs) const;
168
169 bool operator==(const KPageGroup& rhs) const {
170 return this->IsEquivalentTo(rhs);
171 } 134 }
172 135
173 bool operator!=(const KPageGroup& rhs) const { 136 std::size_t GetNumPages() const {
174 return !(*this == rhs); 137 std::size_t num_pages = 0;
175 } 138 for (const Node& node : nodes) {
139 num_pages += node.GetNumPages();
140 }
141 return num_pages;
142 }
143
144 bool IsEqual(KPageGroup& other) const {
145 auto this_node = nodes.begin();
146 auto other_node = other.nodes.begin();
147 while (this_node != nodes.end() && other_node != other.nodes.end()) {
148 if (this_node->GetAddress() != other_node->GetAddress() ||
149 this_node->GetNumPages() != other_node->GetNumPages()) {
150 return false;
151 }
152 this_node = std::next(this_node);
153 other_node = std::next(other_node);
154 }
176 155
177private: 156 return this_node == nodes.end() && other_node == other.nodes.end();
178 KernelCore& m_kernel; 157 }
179 KBlockInfo* m_first_block{};
180 KBlockInfo* m_last_block{};
181 KBlockInfoManager* m_manager{};
182};
183 158
184class KScopedPageGroup { 159 Result AddBlock(u64 address, u64 num_pages) {
185public: 160 if (!num_pages) {
186 explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) { 161 return ResultSuccess;
187 if (m_pg) {
188 m_pg->Open();
189 } 162 }
190 } 163 if (!nodes.empty()) {
191 explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {} 164 const auto node = nodes.back();
192 ~KScopedPageGroup() { 165 if (node.GetAddress() + node.GetNumPages() * PageSize == address) {
193 if (m_pg) { 166 address = node.GetAddress();
194 m_pg->Close(); 167 num_pages += node.GetNumPages();
168 nodes.pop_back();
169 }
195 } 170 }
171 nodes.push_back({address, num_pages});
172 return ResultSuccess;
196 } 173 }
197 174
198 void CancelClose() { 175 bool Empty() const {
199 m_pg = nullptr; 176 return nodes.empty();
200 } 177 }
201 178
179 void Finalize() {}
180
202private: 181private:
203 const KPageGroup* m_pg{}; 182 std::list<Node> nodes;
204}; 183};
205 184
206} // namespace Kernel 185} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 9c7ac22dc..612fc76fa 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -100,7 +100,7 @@ constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType a
100 100
101KPageTable::KPageTable(Core::System& system_) 101KPageTable::KPageTable(Core::System& system_)
102 : m_general_lock{system_.Kernel()}, 102 : m_general_lock{system_.Kernel()},
103 m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {} 103 m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {}
104 104
105KPageTable::~KPageTable() = default; 105KPageTable::~KPageTable() = default;
106 106
@@ -373,7 +373,7 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta
373 m_memory_block_slab_manager); 373 m_memory_block_slab_manager);
374 374
375 // Allocate and open. 375 // Allocate and open.
376 KPageGroup pg{m_kernel, m_block_info_manager}; 376 KPageGroup pg;
377 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( 377 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
378 &pg, num_pages, 378 &pg, num_pages,
379 KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); 379 KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
@@ -432,7 +432,7 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
432 const size_t num_pages = size / PageSize; 432 const size_t num_pages = size / PageSize;
433 433
434 // Create page groups for the memory being mapped. 434 // Create page groups for the memory being mapped.
435 KPageGroup pg{m_kernel, m_block_info_manager}; 435 KPageGroup pg;
436 AddRegionToPages(src_address, num_pages, pg); 436 AddRegionToPages(src_address, num_pages, pg);
437 437
438 // Reprotect the source as kernel-read/not mapped. 438 // Reprotect the source as kernel-read/not mapped.
@@ -593,7 +593,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
593 const size_t size = num_pages * PageSize; 593 const size_t size = num_pages * PageSize;
594 594
595 // We're making a new group, not adding to an existing one. 595 // We're making a new group, not adding to an existing one.
596 R_UNLESS(pg.empty(), ResultInvalidCurrentMemory); 596 R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory);
597 597
598 // Begin traversal. 598 // Begin traversal.
599 Common::PageTable::TraversalContext context; 599 Common::PageTable::TraversalContext context;
@@ -640,10 +640,11 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
640 R_SUCCEED(); 640 R_SUCCEED();
641} 641}
642 642
643bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) { 643bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) {
644 ASSERT(this->IsLockedByCurrentThread()); 644 ASSERT(this->IsLockedByCurrentThread());
645 645
646 const size_t size = num_pages * PageSize; 646 const size_t size = num_pages * PageSize;
647 const auto& pg = pg_ll.Nodes();
647 const auto& memory_layout = m_system.Kernel().MemoryLayout(); 648 const auto& memory_layout = m_system.Kernel().MemoryLayout();
648 649
649 // Empty groups are necessarily invalid. 650 // Empty groups are necessarily invalid.
@@ -941,6 +942,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
941 942
942 ON_RESULT_FAILURE { 943 ON_RESULT_FAILURE {
943 if (cur_mapped_addr != dst_addr) { 944 if (cur_mapped_addr != dst_addr) {
945 // HACK: Manually close the pages.
946 HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize);
947
944 ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, 948 ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize,
945 KMemoryPermission::None, OperationType::Unmap) 949 KMemoryPermission::None, OperationType::Unmap)
946 .IsSuccess()); 950 .IsSuccess());
@@ -1016,6 +1020,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1016 // Map the page. 1020 // Map the page.
1017 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); 1021 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page));
1018 1022
1023 // HACK: Manually open the pages.
1024 HACK_OpenPages(start_partial_page, 1);
1025
1019 // Update tracking extents. 1026 // Update tracking extents.
1020 cur_mapped_addr += PageSize; 1027 cur_mapped_addr += PageSize;
1021 cur_block_addr += PageSize; 1028 cur_block_addr += PageSize;
@@ -1044,6 +1051,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1044 R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, 1051 R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map,
1045 cur_block_addr)); 1052 cur_block_addr));
1046 1053
1054 // HACK: Manually open the pages.
1055 HACK_OpenPages(cur_block_addr, cur_block_size / PageSize);
1056
1047 // Update tracking extents. 1057 // Update tracking extents.
1048 cur_mapped_addr += cur_block_size; 1058 cur_mapped_addr += cur_block_size;
1049 cur_block_addr = next_entry.phys_addr; 1059 cur_block_addr = next_entry.phys_addr;
@@ -1063,6 +1073,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1063 R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, 1073 R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map,
1064 cur_block_addr)); 1074 cur_block_addr));
1065 1075
1076 // HACK: Manually open the pages.
1077 HACK_OpenPages(cur_block_addr, last_block_size / PageSize);
1078
1066 // Update tracking extents. 1079 // Update tracking extents.
1067 cur_mapped_addr += last_block_size; 1080 cur_mapped_addr += last_block_size;
1068 cur_block_addr += last_block_size; 1081 cur_block_addr += last_block_size;
@@ -1094,6 +1107,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1094 1107
1095 // Map the page. 1108 // Map the page.
1096 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); 1109 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page));
1110
1111 // HACK: Manually open the pages.
1112 HACK_OpenPages(end_partial_page, 1);
1097 } 1113 }
1098 1114
1099 // Update memory blocks to reflect our changes 1115 // Update memory blocks to reflect our changes
@@ -1195,6 +1211,9 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState
1195 const size_t aligned_size = aligned_end - aligned_start; 1211 const size_t aligned_size = aligned_end - aligned_start;
1196 const size_t aligned_num_pages = aligned_size / PageSize; 1212 const size_t aligned_num_pages = aligned_size / PageSize;
1197 1213
1214 // HACK: Manually close the pages.
1215 HACK_ClosePages(aligned_start, aligned_num_pages);
1216
1198 // Unmap the pages. 1217 // Unmap the pages.
1199 R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); 1218 R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap));
1200 1219
@@ -1482,6 +1501,17 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi
1482 } 1501 }
1483} 1502}
1484 1503
1504void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) {
1505 m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages);
1506}
1507
1508void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) {
1509 for (size_t index = 0; index < num_pages; ++index) {
1510 const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize));
1511 m_system.Kernel().MemoryManager().Close(paddr, 1);
1512 }
1513}
1514
1485Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { 1515Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1486 // Lock the physical memory lock. 1516 // Lock the physical memory lock.
1487 KScopedLightLock phys_lk(m_map_physical_memory_lock); 1517 KScopedLightLock phys_lk(m_map_physical_memory_lock);
@@ -1542,7 +1572,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1542 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 1572 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
1543 1573
1544 // Allocate pages for the new memory. 1574 // Allocate pages for the new memory.
1545 KPageGroup pg{m_kernel, m_block_info_manager}; 1575 KPageGroup pg;
1546 R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( 1576 R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
1547 &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); 1577 &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
1548 1578
@@ -1620,7 +1650,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1620 KScopedPageTableUpdater updater(this); 1650 KScopedPageTableUpdater updater(this);
1621 1651
1622 // Prepare to iterate over the memory. 1652 // Prepare to iterate over the memory.
1623 auto pg_it = pg.begin(); 1653 auto pg_it = pg.Nodes().begin();
1624 PAddr pg_phys_addr = pg_it->GetAddress(); 1654 PAddr pg_phys_addr = pg_it->GetAddress();
1625 size_t pg_pages = pg_it->GetNumPages(); 1655 size_t pg_pages = pg_it->GetNumPages();
1626 1656
@@ -1650,6 +1680,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1650 last_unmap_address + 1 - cur_address) / 1680 last_unmap_address + 1 - cur_address) /
1651 PageSize; 1681 PageSize;
1652 1682
1683 // HACK: Manually close the pages.
1684 HACK_ClosePages(cur_address, cur_pages);
1685
1653 // Unmap. 1686 // Unmap.
1654 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, 1687 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
1655 OperationType::Unmap) 1688 OperationType::Unmap)
@@ -1670,7 +1703,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1670 // Release any remaining unmapped memory. 1703 // Release any remaining unmapped memory.
1671 m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); 1704 m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
1672 m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); 1705 m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
1673 for (++pg_it; pg_it != pg.end(); ++pg_it) { 1706 for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) {
1674 m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), 1707 m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
1675 pg_it->GetNumPages()); 1708 pg_it->GetNumPages());
1676 m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), 1709 m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
@@ -1698,7 +1731,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1698 // Check if we're at the end of the physical block. 1731 // Check if we're at the end of the physical block.
1699 if (pg_pages == 0) { 1732 if (pg_pages == 0) {
1700 // Ensure there are more pages to map. 1733 // Ensure there are more pages to map.
1701 ASSERT(pg_it != pg.end()); 1734 ASSERT(pg_it != pg.Nodes().end());
1702 1735
1703 // Advance our physical block. 1736 // Advance our physical block.
1704 ++pg_it; 1737 ++pg_it;
@@ -1709,7 +1742,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1709 // Map whatever we can. 1742 // Map whatever we can.
1710 const size_t cur_pages = std::min(pg_pages, map_pages); 1743 const size_t cur_pages = std::min(pg_pages, map_pages);
1711 R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, 1744 R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
1712 OperationType::MapFirst, pg_phys_addr)); 1745 OperationType::Map, pg_phys_addr));
1746
1747 // HACK: Manually open the pages.
1748 HACK_OpenPages(pg_phys_addr, cur_pages);
1713 1749
1714 // Advance. 1750 // Advance.
1715 cur_address += cur_pages * PageSize; 1751 cur_address += cur_pages * PageSize;
@@ -1852,6 +1888,9 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
1852 last_address + 1 - cur_address) / 1888 last_address + 1 - cur_address) /
1853 PageSize; 1889 PageSize;
1854 1890
1891 // HACK: Manually close the pages.
1892 HACK_ClosePages(cur_address, cur_pages);
1893
1855 // Unmap. 1894 // Unmap.
1856 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) 1895 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)
1857 .IsSuccess()); 1896 .IsSuccess());
@@ -1916,7 +1955,7 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size)
1916 R_TRY(dst_allocator_result); 1955 R_TRY(dst_allocator_result);
1917 1956
1918 // Map the memory. 1957 // Map the memory.
1919 KPageGroup page_linked_list{m_kernel, m_block_info_manager}; 1958 KPageGroup page_linked_list;
1920 const size_t num_pages{size / PageSize}; 1959 const size_t num_pages{size / PageSize};
1921 const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( 1960 const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
1922 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); 1961 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
@@ -1983,14 +2022,14 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size
1983 num_dst_allocator_blocks); 2022 num_dst_allocator_blocks);
1984 R_TRY(dst_allocator_result); 2023 R_TRY(dst_allocator_result);
1985 2024
1986 KPageGroup src_pages{m_kernel, m_block_info_manager}; 2025 KPageGroup src_pages;
1987 KPageGroup dst_pages{m_kernel, m_block_info_manager}; 2026 KPageGroup dst_pages;
1988 const size_t num_pages{size / PageSize}; 2027 const size_t num_pages{size / PageSize};
1989 2028
1990 AddRegionToPages(src_address, num_pages, src_pages); 2029 AddRegionToPages(src_address, num_pages, src_pages);
1991 AddRegionToPages(dst_address, num_pages, dst_pages); 2030 AddRegionToPages(dst_address, num_pages, dst_pages);
1992 2031
1993 R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion); 2032 R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion);
1994 2033
1995 { 2034 {
1996 auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); 2035 auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
@@ -2021,7 +2060,7 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
2021 2060
2022 VAddr cur_addr{addr}; 2061 VAddr cur_addr{addr};
2023 2062
2024 for (const auto& node : page_linked_list) { 2063 for (const auto& node : page_linked_list.Nodes()) {
2025 if (const auto result{ 2064 if (const auto result{
2026 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; 2065 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
2027 result.IsError()) { 2066 result.IsError()) {
@@ -2121,7 +2160,7 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
2121 2160
2122 VAddr cur_addr{addr}; 2161 VAddr cur_addr{addr};
2123 2162
2124 for (const auto& node : page_linked_list) { 2163 for (const auto& node : page_linked_list.Nodes()) {
2125 if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, 2164 if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
2126 OperationType::Unmap)}; 2165 OperationType::Unmap)};
2127 result.IsError()) { 2166 result.IsError()) {
@@ -2488,13 +2527,13 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
2488 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 2527 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
2489 2528
2490 // Allocate pages for the heap extension. 2529 // Allocate pages for the heap extension.
2491 KPageGroup pg{m_kernel, m_block_info_manager}; 2530 KPageGroup pg;
2492 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( 2531 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
2493 &pg, allocation_size / PageSize, 2532 &pg, allocation_size / PageSize,
2494 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); 2533 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
2495 2534
2496 // Clear all the newly allocated pages. 2535 // Clear all the newly allocated pages.
2497 for (const auto& it : pg) { 2536 for (const auto& it : pg.Nodes()) {
2498 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, 2537 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
2499 it.GetSize()); 2538 it.GetSize());
2500 } 2539 }
@@ -2571,23 +2610,11 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_
2571 if (is_map_only) { 2610 if (is_map_only) {
2572 R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); 2611 R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
2573 } else { 2612 } else {
2574 // Create a page group tohold the pages we allocate. 2613 KPageGroup page_group;
2575 KPageGroup pg{m_kernel, m_block_info_manager}; 2614 R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
2576 2615 &page_group, needed_num_pages,
2577 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( 2616 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
2578 &pg, needed_num_pages, 2617 R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
2579 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
2580
2581 // Ensure that the page group is closed when we're done working with it.
2582 SCOPE_EXIT({ pg.Close(); });
2583
2584 // Clear all pages.
2585 for (const auto& it : pg) {
2586 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()),
2587 m_heap_fill_value, it.GetSize());
2588 }
2589
2590 R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup));
2591 } 2618 }
2592 2619
2593 // Update the blocks. 2620 // Update the blocks.
@@ -2768,28 +2795,19 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_
2768 ASSERT(num_pages > 0); 2795 ASSERT(num_pages > 0);
2769 ASSERT(num_pages == page_group.GetNumPages()); 2796 ASSERT(num_pages == page_group.GetNumPages());
2770 2797
2771 switch (operation) { 2798 for (const auto& node : page_group.Nodes()) {
2772 case OperationType::MapGroup: { 2799 const size_t size{node.GetNumPages() * PageSize};
2773 // We want to maintain a new reference to every page in the group.
2774 KScopedPageGroup spg(page_group);
2775
2776 for (const auto& node : page_group) {
2777 const size_t size{node.GetNumPages() * PageSize};
2778 2800
2779 // Map the pages. 2801 switch (operation) {
2802 case OperationType::MapGroup:
2780 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); 2803 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
2781 2804 break;
2782 addr += size; 2805 default:
2806 ASSERT(false);
2807 break;
2783 } 2808 }
2784 2809
2785 // We succeeded! We want to persist the reference to the pages. 2810 addr += size;
2786 spg.CancelClose();
2787
2788 break;
2789 }
2790 default:
2791 ASSERT(false);
2792 break;
2793 } 2811 }
2794 2812
2795 R_SUCCEED(); 2813 R_SUCCEED();
@@ -2804,29 +2822,13 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
2804 ASSERT(ContainsPages(addr, num_pages)); 2822 ASSERT(ContainsPages(addr, num_pages));
2805 2823
2806 switch (operation) { 2824 switch (operation) {
2807 case OperationType::Unmap: { 2825 case OperationType::Unmap:
2808 // Ensure that any pages we track close on exit.
2809 KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()};
2810 SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
2811
2812 this->AddRegionToPages(addr, num_pages, pages_to_close);
2813 m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); 2826 m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
2814 break; 2827 break;
2815 }
2816 case OperationType::MapFirst:
2817 case OperationType::Map: { 2828 case OperationType::Map: {
2818 ASSERT(map_addr); 2829 ASSERT(map_addr);
2819 ASSERT(Common::IsAligned(map_addr, PageSize)); 2830 ASSERT(Common::IsAligned(map_addr, PageSize));
2820 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); 2831 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
2821
2822 // Open references to pages, if we should.
2823 if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
2824 if (operation == OperationType::MapFirst) {
2825 m_kernel.MemoryManager().OpenFirst(map_addr, num_pages);
2826 } else {
2827 m_kernel.MemoryManager().Open(map_addr, num_pages);
2828 }
2829 }
2830 break; 2832 break;
2831 } 2833 }
2832 case OperationType::Separate: { 2834 case OperationType::Separate: {
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 0a454b05b..f1ca785d7 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -107,10 +107,6 @@ public:
107 return *m_page_table_impl; 107 return *m_page_table_impl;
108 } 108 }
109 109
110 KBlockInfoManager* GetBlockInfoManager() {
111 return m_block_info_manager;
112 }
113
114 bool CanContain(VAddr addr, size_t size, KMemoryState state) const; 110 bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
115 111
116protected: 112protected:
@@ -265,6 +261,10 @@ private:
265 void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, 261 void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
266 size_t size, KMemoryPermission prot_perm); 262 size_t size, KMemoryPermission prot_perm);
267 263
264 // HACK: These will be removed once we automatically manage page reference counts.
265 void HACK_OpenPages(PAddr phys_addr, size_t num_pages);
266 void HACK_ClosePages(VAddr virt_addr, size_t num_pages);
267
268 mutable KLightLock m_general_lock; 268 mutable KLightLock m_general_lock;
269 mutable KLightLock m_map_physical_memory_lock; 269 mutable KLightLock m_map_physical_memory_lock;
270 270
@@ -488,7 +488,6 @@ private:
488 std::unique_ptr<Common::PageTable> m_page_table_impl; 488 std::unique_ptr<Common::PageTable> m_page_table_impl;
489 489
490 Core::System& m_system; 490 Core::System& m_system;
491 KernelCore& m_kernel;
492}; 491};
493 492
494} // namespace Kernel 493} // namespace Kernel
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 3cf2b5d91..0aa68103c 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -13,7 +13,10 @@
13namespace Kernel { 13namespace Kernel {
14 14
15KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} 15KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
16KSharedMemory::~KSharedMemory() = default; 16
17KSharedMemory::~KSharedMemory() {
18 kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size);
19}
17 20
18Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, 21Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
19 Svc::MemoryPermission owner_permission_, 22 Svc::MemoryPermission owner_permission_,
@@ -46,8 +49,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
46 R_UNLESS(physical_address != 0, ResultOutOfMemory); 49 R_UNLESS(physical_address != 0, ResultOutOfMemory);
47 50
48 //! Insert the result into our page group. 51 //! Insert the result into our page group.
49 page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager()); 52 page_group.emplace(physical_address, num_pages);
50 page_group->AddBlock(physical_address, num_pages);
51 53
52 // Commit our reservation. 54 // Commit our reservation.
53 memory_reservation.Commit(); 55 memory_reservation.Commit();
@@ -60,7 +62,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
60 is_initialized = true; 62 is_initialized = true;
61 63
62 // Clear all pages in the memory. 64 // Clear all pages in the memory.
63 for (const auto& block : *page_group) { 65 for (const auto& block : page_group->Nodes()) {
64 std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize()); 66 std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize());
65 } 67 }
66 68
@@ -69,8 +71,13 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
69 71
70void KSharedMemory::Finalize() { 72void KSharedMemory::Finalize() {
71 // Close and finalize the page group. 73 // Close and finalize the page group.
72 page_group->Close(); 74 // page_group->Close();
73 page_group->Finalize(); 75 // page_group->Finalize();
76
77 //! HACK: Manually close.
78 for (const auto& block : page_group->Nodes()) {
79 kernel.MemoryManager().Close(block.GetAddress(), block.GetNumPages());
80 }
74 81
75 // Release the memory reservation. 82 // Release the memory reservation.
76 resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); 83 resource_limit->Release(LimitableResource::PhysicalMemoryMax, size);
diff --git a/src/core/hle/kernel/memory_types.h b/src/core/hle/kernel/memory_types.h
index 92b8b37ac..3975507bd 100644
--- a/src/core/hle/kernel/memory_types.h
+++ b/src/core/hle/kernel/memory_types.h
@@ -14,7 +14,4 @@ constexpr std::size_t PageSize{1 << PageBits};
14 14
15using Page = std::array<u8, PageSize>; 15using Page = std::array<u8, PageSize>;
16 16
17using KPhysicalAddress = PAddr;
18using KProcessAddress = VAddr;
19
20} // namespace Kernel 17} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index aca442196..788ee2160 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1485,7 +1485,7 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p
1485 ResultInvalidMemoryRegion); 1485 ResultInvalidMemoryRegion);
1486 1486
1487 // Create a new page group. 1487 // Create a new page group.
1488 KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()}; 1488 KPageGroup pg;
1489 R_TRY(src_pt.MakeAndOpenPageGroup( 1489 R_TRY(src_pt.MakeAndOpenPageGroup(
1490 std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess, 1490 std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
1491 KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None, 1491 KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,