summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/CMakeLists.txt1
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp29
-rw-r--r--src/core/hle/kernel/k_code_memory.h6
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp8
-rw-r--r--src/core/hle/kernel/k_page_group.cpp121
-rw-r--r--src/core/hle/kernel/k_page_group.h163
-rw-r--r--src/core/hle/kernel/k_page_table.cpp142
-rw-r--r--src/core/hle/kernel/k_page_table.h9
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp19
-rw-r--r--src/core/hle/kernel/memory_types.h3
-rw-r--r--src/core/hle/kernel/svc.cpp2
11 files changed, 322 insertions, 181 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 0252c8c31..5afdeb5ff 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -226,6 +226,7 @@ add_library(core STATIC
226 hle/kernel/k_page_buffer.h 226 hle/kernel/k_page_buffer.h
227 hle/kernel/k_page_heap.cpp 227 hle/kernel/k_page_heap.cpp
228 hle/kernel/k_page_heap.h 228 hle/kernel/k_page_heap.h
229 hle/kernel/k_page_group.cpp
229 hle/kernel/k_page_group.h 230 hle/kernel/k_page_group.h
230 hle/kernel/k_page_table.cpp 231 hle/kernel/k_page_table.cpp
231 hle/kernel/k_page_table.h 232 hle/kernel/k_page_table.h
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index 4b1c134d4..d9da1e600 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -27,13 +27,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
27 auto& page_table = m_owner->PageTable(); 27 auto& page_table = m_owner->PageTable();
28 28
29 // Construct the page group. 29 // Construct the page group.
30 m_page_group = {}; 30 m_page_group.emplace(kernel, page_table.GetBlockInfoManager());
31 31
32 // Lock the memory. 32 // Lock the memory.
33 R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size)) 33 R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size))
34 34
35 // Clear the memory. 35 // Clear the memory.
36 for (const auto& block : m_page_group.Nodes()) { 36 for (const auto& block : *m_page_group) {
37 std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize()); 37 std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
38 } 38 }
39 39
@@ -51,12 +51,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
51void KCodeMemory::Finalize() { 51void KCodeMemory::Finalize() {
52 // Unlock. 52 // Unlock.
53 if (!m_is_mapped && !m_is_owner_mapped) { 53 if (!m_is_mapped && !m_is_owner_mapped) {
54 const size_t size = m_page_group.GetNumPages() * PageSize; 54 const size_t size = m_page_group->GetNumPages() * PageSize;
55 m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group); 55 m_owner->PageTable().UnlockForCodeMemory(m_address, size, *m_page_group);
56 } 56 }
57 57
58 // Close the page group. 58 // Close the page group.
59 m_page_group = {}; 59 m_page_group->Close();
60 m_page_group->Finalize();
60 61
61 // Close our reference to our owner. 62 // Close our reference to our owner.
62 m_owner->Close(); 63 m_owner->Close();
@@ -64,7 +65,7 @@ void KCodeMemory::Finalize() {
64 65
65Result KCodeMemory::Map(VAddr address, size_t size) { 66Result KCodeMemory::Map(VAddr address, size_t size) {
66 // Validate the size. 67 // Validate the size.
67 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 68 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
68 69
69 // Lock ourselves. 70 // Lock ourselves.
70 KScopedLightLock lk(m_lock); 71 KScopedLightLock lk(m_lock);
@@ -74,7 +75,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
74 75
75 // Map the memory. 76 // Map the memory.
76 R_TRY(kernel.CurrentProcess()->PageTable().MapPages( 77 R_TRY(kernel.CurrentProcess()->PageTable().MapPages(
77 address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); 78 address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
78 79
79 // Mark ourselves as mapped. 80 // Mark ourselves as mapped.
80 m_is_mapped = true; 81 m_is_mapped = true;
@@ -84,13 +85,13 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
84 85
85Result KCodeMemory::Unmap(VAddr address, size_t size) { 86Result KCodeMemory::Unmap(VAddr address, size_t size) {
86 // Validate the size. 87 // Validate the size.
87 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 88 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
88 89
89 // Lock ourselves. 90 // Lock ourselves.
90 KScopedLightLock lk(m_lock); 91 KScopedLightLock lk(m_lock);
91 92
92 // Unmap the memory. 93 // Unmap the memory.
93 R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group, 94 R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group,
94 KMemoryState::CodeOut)); 95 KMemoryState::CodeOut));
95 96
96 // Mark ourselves as unmapped. 97 // Mark ourselves as unmapped.
@@ -101,7 +102,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
101 102
102Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { 103Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
103 // Validate the size. 104 // Validate the size.
104 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 105 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
105 106
106 // Lock ourselves. 107 // Lock ourselves.
107 KScopedLightLock lk(m_lock); 108 KScopedLightLock lk(m_lock);
@@ -125,7 +126,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
125 126
126 // Map the memory. 127 // Map the memory.
127 R_TRY( 128 R_TRY(
128 m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm)); 129 m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm));
129 130
130 // Mark ourselves as mapped. 131 // Mark ourselves as mapped.
131 m_is_owner_mapped = true; 132 m_is_owner_mapped = true;
@@ -135,13 +136,13 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
135 136
136Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { 137Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
137 // Validate the size. 138 // Validate the size.
138 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 139 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
139 140
140 // Lock ourselves. 141 // Lock ourselves.
141 KScopedLightLock lk(m_lock); 142 KScopedLightLock lk(m_lock);
142 143
143 // Unmap the memory. 144 // Unmap the memory.
144 R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode)); 145 R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode));
145 146
146 // Mark ourselves as unmapped. 147 // Mark ourselves as unmapped.
147 m_is_owner_mapped = false; 148 m_is_owner_mapped = false;
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h
index 2e7e1436a..5b260b385 100644
--- a/src/core/hle/kernel/k_code_memory.h
+++ b/src/core/hle/kernel/k_code_memory.h
@@ -3,6 +3,8 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <optional>
7
6#include "common/common_types.h" 8#include "common/common_types.h"
7#include "core/device_memory.h" 9#include "core/device_memory.h"
8#include "core/hle/kernel/k_auto_object.h" 10#include "core/hle/kernel/k_auto_object.h"
@@ -49,11 +51,11 @@ public:
49 return m_address; 51 return m_address;
50 } 52 }
51 size_t GetSize() const { 53 size_t GetSize() const {
52 return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0; 54 return m_is_initialized ? m_page_group->GetNumPages() * PageSize : 0;
53 } 55 }
54 56
55private: 57private:
56 KPageGroup m_page_group{}; 58 std::optional<KPageGroup> m_page_group{};
57 KProcess* m_owner{}; 59 KProcess* m_owner{};
58 VAddr m_address{}; 60 VAddr m_address{};
59 KLightLock m_lock; 61 KLightLock m_lock;
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index bd33571da..cd6ea388e 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -223,7 +223,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
223 223
224 // Ensure that we don't leave anything un-freed. 224 // Ensure that we don't leave anything un-freed.
225 ON_RESULT_FAILURE { 225 ON_RESULT_FAILURE {
226 for (const auto& it : out->Nodes()) { 226 for (const auto& it : *out) {
227 auto& manager = this->GetManager(it.GetAddress()); 227 auto& manager = this->GetManager(it.GetAddress());
228 const size_t node_num_pages = std::min<u64>( 228 const size_t node_num_pages = std::min<u64>(
229 it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); 229 it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
@@ -285,7 +285,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
285 m_has_optimized_process[static_cast<size_t>(pool)], true)); 285 m_has_optimized_process[static_cast<size_t>(pool)], true));
286 286
287 // Open the first reference to the pages. 287 // Open the first reference to the pages.
288 for (const auto& block : out->Nodes()) { 288 for (const auto& block : *out) {
289 PAddr cur_address = block.GetAddress(); 289 PAddr cur_address = block.GetAddress();
290 size_t remaining_pages = block.GetNumPages(); 290 size_t remaining_pages = block.GetNumPages();
291 while (remaining_pages > 0) { 291 while (remaining_pages > 0) {
@@ -335,7 +335,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
335 // Perform optimized memory tracking, if we should. 335 // Perform optimized memory tracking, if we should.
336 if (optimized) { 336 if (optimized) {
337 // Iterate over the allocated blocks. 337 // Iterate over the allocated blocks.
338 for (const auto& block : out->Nodes()) { 338 for (const auto& block : *out) {
339 // Get the block extents. 339 // Get the block extents.
340 const PAddr block_address = block.GetAddress(); 340 const PAddr block_address = block.GetAddress();
341 const size_t block_pages = block.GetNumPages(); 341 const size_t block_pages = block.GetNumPages();
@@ -391,7 +391,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
391 } 391 }
392 } else { 392 } else {
393 // Set all the allocated memory. 393 // Set all the allocated memory.
394 for (const auto& block : out->Nodes()) { 394 for (const auto& block : *out) {
395 std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, 395 std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
396 block.GetSize()); 396 block.GetSize());
397 } 397 }
diff --git a/src/core/hle/kernel/k_page_group.cpp b/src/core/hle/kernel/k_page_group.cpp
new file mode 100644
index 000000000..d8c644a33
--- /dev/null
+++ b/src/core/hle/kernel/k_page_group.cpp
@@ -0,0 +1,121 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/hle/kernel/k_dynamic_resource_manager.h"
5#include "core/hle/kernel/k_memory_manager.h"
6#include "core/hle/kernel/k_page_group.h"
7#include "core/hle/kernel/kernel.h"
8#include "core/hle/kernel/svc_results.h"
9
10namespace Kernel {
11
12void KPageGroup::Finalize() {
13 KBlockInfo* cur = m_first_block;
14 while (cur != nullptr) {
15 KBlockInfo* next = cur->GetNext();
16 m_manager->Free(cur);
17 cur = next;
18 }
19
20 m_first_block = nullptr;
21 m_last_block = nullptr;
22}
23
24void KPageGroup::CloseAndReset() {
25 auto& mm = m_kernel.MemoryManager();
26
27 KBlockInfo* cur = m_first_block;
28 while (cur != nullptr) {
29 KBlockInfo* next = cur->GetNext();
30 mm.Close(cur->GetAddress(), cur->GetNumPages());
31 m_manager->Free(cur);
32 cur = next;
33 }
34
35 m_first_block = nullptr;
36 m_last_block = nullptr;
37}
38
39size_t KPageGroup::GetNumPages() const {
40 size_t num_pages = 0;
41
42 for (const auto& it : *this) {
43 num_pages += it.GetNumPages();
44 }
45
46 return num_pages;
47}
48
49Result KPageGroup::AddBlock(KPhysicalAddress addr, size_t num_pages) {
50 // Succeed immediately if we're adding no pages.
51 R_SUCCEED_IF(num_pages == 0);
52
53 // Check for overflow.
54 ASSERT(addr < addr + num_pages * PageSize);
55
56 // Try to just append to the last block.
57 if (m_last_block != nullptr) {
58 R_SUCCEED_IF(m_last_block->TryConcatenate(addr, num_pages));
59 }
60
61 // Allocate a new block.
62 KBlockInfo* new_block = m_manager->Allocate();
63 R_UNLESS(new_block != nullptr, ResultOutOfResource);
64
65 // Initialize the block.
66 new_block->Initialize(addr, num_pages);
67
68 // Add the block to our list.
69 if (m_last_block != nullptr) {
70 m_last_block->SetNext(new_block);
71 } else {
72 m_first_block = new_block;
73 }
74 m_last_block = new_block;
75
76 R_SUCCEED();
77}
78
79void KPageGroup::Open() const {
80 auto& mm = m_kernel.MemoryManager();
81
82 for (const auto& it : *this) {
83 mm.Open(it.GetAddress(), it.GetNumPages());
84 }
85}
86
87void KPageGroup::OpenFirst() const {
88 auto& mm = m_kernel.MemoryManager();
89
90 for (const auto& it : *this) {
91 mm.OpenFirst(it.GetAddress(), it.GetNumPages());
92 }
93}
94
95void KPageGroup::Close() const {
96 auto& mm = m_kernel.MemoryManager();
97
98 for (const auto& it : *this) {
99 mm.Close(it.GetAddress(), it.GetNumPages());
100 }
101}
102
103bool KPageGroup::IsEquivalentTo(const KPageGroup& rhs) const {
104 auto lit = this->begin();
105 auto rit = rhs.begin();
106 auto lend = this->end();
107 auto rend = rhs.end();
108
109 while (lit != lend && rit != rend) {
110 if (*lit != *rit) {
111 return false;
112 }
113
114 ++lit;
115 ++rit;
116 }
117
118 return lit == lend && rit == rend;
119}
120
121} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h
index 316f172f2..c07f17663 100644
--- a/src/core/hle/kernel/k_page_group.h
+++ b/src/core/hle/kernel/k_page_group.h
@@ -1,4 +1,4 @@
1// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#pragma once 4#pragma once
@@ -13,24 +13,23 @@
13 13
14namespace Kernel { 14namespace Kernel {
15 15
16class KBlockInfoManager;
17class KernelCore;
16class KPageGroup; 18class KPageGroup;
17 19
18class KBlockInfo { 20class KBlockInfo {
19private:
20 friend class KPageGroup;
21
22public: 21public:
23 constexpr KBlockInfo() = default; 22 constexpr explicit KBlockInfo() : m_next(nullptr) {}
24 23
25 constexpr void Initialize(PAddr addr, size_t np) { 24 constexpr void Initialize(KPhysicalAddress addr, size_t np) {
26 ASSERT(Common::IsAligned(addr, PageSize)); 25 ASSERT(Common::IsAligned(addr, PageSize));
27 ASSERT(static_cast<u32>(np) == np); 26 ASSERT(static_cast<u32>(np) == np);
28 27
29 m_page_index = static_cast<u32>(addr) / PageSize; 28 m_page_index = static_cast<u32>(addr / PageSize);
30 m_num_pages = static_cast<u32>(np); 29 m_num_pages = static_cast<u32>(np);
31 } 30 }
32 31
33 constexpr PAddr GetAddress() const { 32 constexpr KPhysicalAddress GetAddress() const {
34 return m_page_index * PageSize; 33 return m_page_index * PageSize;
35 } 34 }
36 constexpr size_t GetNumPages() const { 35 constexpr size_t GetNumPages() const {
@@ -39,10 +38,10 @@ public:
39 constexpr size_t GetSize() const { 38 constexpr size_t GetSize() const {
40 return this->GetNumPages() * PageSize; 39 return this->GetNumPages() * PageSize;
41 } 40 }
42 constexpr PAddr GetEndAddress() const { 41 constexpr KPhysicalAddress GetEndAddress() const {
43 return (m_page_index + m_num_pages) * PageSize; 42 return (m_page_index + m_num_pages) * PageSize;
44 } 43 }
45 constexpr PAddr GetLastAddress() const { 44 constexpr KPhysicalAddress GetLastAddress() const {
46 return this->GetEndAddress() - 1; 45 return this->GetEndAddress() - 1;
47 } 46 }
48 47
@@ -62,8 +61,8 @@ public:
62 return !(*this == rhs); 61 return !(*this == rhs);
63 } 62 }
64 63
65 constexpr bool IsStrictlyBefore(PAddr addr) const { 64 constexpr bool IsStrictlyBefore(KPhysicalAddress addr) const {
66 const PAddr end = this->GetEndAddress(); 65 const KPhysicalAddress end = this->GetEndAddress();
67 66
68 if (m_page_index != 0 && end == 0) { 67 if (m_page_index != 0 && end == 0) {
69 return false; 68 return false;
@@ -72,11 +71,11 @@ public:
72 return end < addr; 71 return end < addr;
73 } 72 }
74 73
75 constexpr bool operator<(PAddr addr) const { 74 constexpr bool operator<(KPhysicalAddress addr) const {
76 return this->IsStrictlyBefore(addr); 75 return this->IsStrictlyBefore(addr);
77 } 76 }
78 77
79 constexpr bool TryConcatenate(PAddr addr, size_t np) { 78 constexpr bool TryConcatenate(KPhysicalAddress addr, size_t np) {
80 if (addr != 0 && addr == this->GetEndAddress()) { 79 if (addr != 0 && addr == this->GetEndAddress()) {
81 m_num_pages += static_cast<u32>(np); 80 m_num_pages += static_cast<u32>(np);
82 return true; 81 return true;
@@ -90,96 +89,118 @@ private:
90 } 89 }
91 90
92private: 91private:
92 friend class KPageGroup;
93
93 KBlockInfo* m_next{}; 94 KBlockInfo* m_next{};
94 u32 m_page_index{}; 95 u32 m_page_index{};
95 u32 m_num_pages{}; 96 u32 m_num_pages{};
96}; 97};
97static_assert(sizeof(KBlockInfo) <= 0x10); 98static_assert(sizeof(KBlockInfo) <= 0x10);
98 99
99class KPageGroup final { 100class KPageGroup {
100public: 101public:
101 class Node final { 102 class Iterator {
102 public: 103 public:
103 constexpr Node(u64 addr_, std::size_t num_pages_) : addr{addr_}, num_pages{num_pages_} {} 104 using iterator_category = std::forward_iterator_tag;
105 using value_type = const KBlockInfo;
106 using difference_type = std::ptrdiff_t;
107 using pointer = value_type*;
108 using reference = value_type&;
109
110 constexpr explicit Iterator(pointer n) : m_node(n) {}
111
112 constexpr bool operator==(const Iterator& rhs) const {
113 return m_node == rhs.m_node;
114 }
115 constexpr bool operator!=(const Iterator& rhs) const {
116 return !(*this == rhs);
117 }
104 118
105 constexpr u64 GetAddress() const { 119 constexpr pointer operator->() const {
106 return addr; 120 return m_node;
121 }
122 constexpr reference operator*() const {
123 return *m_node;
107 } 124 }
108 125
109 constexpr std::size_t GetNumPages() const { 126 constexpr Iterator& operator++() {
110 return num_pages; 127 m_node = m_node->GetNext();
128 return *this;
111 } 129 }
112 130
113 constexpr std::size_t GetSize() const { 131 constexpr Iterator operator++(int) {
114 return GetNumPages() * PageSize; 132 const Iterator it{*this};
133 ++(*this);
134 return it;
115 } 135 }
116 136
117 private: 137 private:
118 u64 addr{}; 138 pointer m_node{};
119 std::size_t num_pages{};
120 }; 139 };
121 140
122public: 141 explicit KPageGroup(KernelCore& kernel, KBlockInfoManager* m)
123 KPageGroup() = default; 142 : m_kernel{kernel}, m_manager{m} {}
124 KPageGroup(u64 address, u64 num_pages) { 143 ~KPageGroup() {
125 ASSERT(AddBlock(address, num_pages).IsSuccess()); 144 this->Finalize();
126 } 145 }
127 146
128 constexpr std::list<Node>& Nodes() { 147 void CloseAndReset();
129 return nodes; 148 void Finalize();
130 }
131 149
132 constexpr const std::list<Node>& Nodes() const { 150 Iterator begin() const {
133 return nodes; 151 return Iterator{m_first_block};
152 }
153 Iterator end() const {
154 return Iterator{nullptr};
155 }
156 bool empty() const {
157 return m_first_block == nullptr;
134 } 158 }
135 159
136 std::size_t GetNumPages() const { 160 Result AddBlock(KPhysicalAddress addr, size_t num_pages);
137 std::size_t num_pages = 0; 161 void Open() const;
138 for (const Node& node : nodes) { 162 void OpenFirst() const;
139 num_pages += node.GetNumPages(); 163 void Close() const;
140 } 164
141 return num_pages; 165 size_t GetNumPages() const;
142 } 166
143 167 bool IsEquivalentTo(const KPageGroup& rhs) const;
144 bool IsEqual(KPageGroup& other) const { 168
145 auto this_node = nodes.begin(); 169 bool operator==(const KPageGroup& rhs) const {
146 auto other_node = other.nodes.begin(); 170 return this->IsEquivalentTo(rhs);
147 while (this_node != nodes.end() && other_node != other.nodes.end()) { 171 }
148 if (this_node->GetAddress() != other_node->GetAddress() ||
149 this_node->GetNumPages() != other_node->GetNumPages()) {
150 return false;
151 }
152 this_node = std::next(this_node);
153 other_node = std::next(other_node);
154 }
155 172
156 return this_node == nodes.end() && other_node == other.nodes.end(); 173 bool operator!=(const KPageGroup& rhs) const {
174 return !(*this == rhs);
157 } 175 }
158 176
159 Result AddBlock(u64 address, u64 num_pages) { 177private:
160 if (!num_pages) { 178 KernelCore& m_kernel;
161 return ResultSuccess; 179 KBlockInfo* m_first_block{};
180 KBlockInfo* m_last_block{};
181 KBlockInfoManager* m_manager{};
182};
183
184class KScopedPageGroup {
185public:
186 explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) {
187 if (m_pg) {
188 m_pg->Open();
162 } 189 }
163 if (!nodes.empty()) { 190 }
164 const auto node = nodes.back(); 191 explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {}
165 if (node.GetAddress() + node.GetNumPages() * PageSize == address) { 192 ~KScopedPageGroup() {
166 address = node.GetAddress(); 193 if (m_pg) {
167 num_pages += node.GetNumPages(); 194 m_pg->Close();
168 nodes.pop_back();
169 }
170 } 195 }
171 nodes.push_back({address, num_pages});
172 return ResultSuccess;
173 } 196 }
174 197
175 bool Empty() const { 198 void CancelClose() {
176 return nodes.empty(); 199 m_pg = nullptr;
177 } 200 }
178 201
179 void Finalize() {}
180
181private: 202private:
182 std::list<Node> nodes; 203 const KPageGroup* m_pg{};
183}; 204};
184 205
185} // namespace Kernel 206} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 612fc76fa..9c7ac22dc 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -100,7 +100,7 @@ constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType a
100 100
101KPageTable::KPageTable(Core::System& system_) 101KPageTable::KPageTable(Core::System& system_)
102 : m_general_lock{system_.Kernel()}, 102 : m_general_lock{system_.Kernel()},
103 m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} 103 m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {}
104 104
105KPageTable::~KPageTable() = default; 105KPageTable::~KPageTable() = default;
106 106
@@ -373,7 +373,7 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta
373 m_memory_block_slab_manager); 373 m_memory_block_slab_manager);
374 374
375 // Allocate and open. 375 // Allocate and open.
376 KPageGroup pg; 376 KPageGroup pg{m_kernel, m_block_info_manager};
377 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( 377 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
378 &pg, num_pages, 378 &pg, num_pages,
379 KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); 379 KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
@@ -432,7 +432,7 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
432 const size_t num_pages = size / PageSize; 432 const size_t num_pages = size / PageSize;
433 433
434 // Create page groups for the memory being mapped. 434 // Create page groups for the memory being mapped.
435 KPageGroup pg; 435 KPageGroup pg{m_kernel, m_block_info_manager};
436 AddRegionToPages(src_address, num_pages, pg); 436 AddRegionToPages(src_address, num_pages, pg);
437 437
438 // Reprotect the source as kernel-read/not mapped. 438 // Reprotect the source as kernel-read/not mapped.
@@ -593,7 +593,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
593 const size_t size = num_pages * PageSize; 593 const size_t size = num_pages * PageSize;
594 594
595 // We're making a new group, not adding to an existing one. 595 // We're making a new group, not adding to an existing one.
596 R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory); 596 R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
597 597
598 // Begin traversal. 598 // Begin traversal.
599 Common::PageTable::TraversalContext context; 599 Common::PageTable::TraversalContext context;
@@ -640,11 +640,10 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
640 R_SUCCEED(); 640 R_SUCCEED();
641} 641}
642 642
643bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { 643bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) {
644 ASSERT(this->IsLockedByCurrentThread()); 644 ASSERT(this->IsLockedByCurrentThread());
645 645
646 const size_t size = num_pages * PageSize; 646 const size_t size = num_pages * PageSize;
647 const auto& pg = pg_ll.Nodes();
648 const auto& memory_layout = m_system.Kernel().MemoryLayout(); 647 const auto& memory_layout = m_system.Kernel().MemoryLayout();
649 648
650 // Empty groups are necessarily invalid. 649 // Empty groups are necessarily invalid.
@@ -942,9 +941,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
942 941
943 ON_RESULT_FAILURE { 942 ON_RESULT_FAILURE {
944 if (cur_mapped_addr != dst_addr) { 943 if (cur_mapped_addr != dst_addr) {
945 // HACK: Manually close the pages.
946 HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize);
947
948 ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, 944 ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize,
949 KMemoryPermission::None, OperationType::Unmap) 945 KMemoryPermission::None, OperationType::Unmap)
950 .IsSuccess()); 946 .IsSuccess());
@@ -1020,9 +1016,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1020 // Map the page. 1016 // Map the page.
1021 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); 1017 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page));
1022 1018
1023 // HACK: Manually open the pages.
1024 HACK_OpenPages(start_partial_page, 1);
1025
1026 // Update tracking extents. 1019 // Update tracking extents.
1027 cur_mapped_addr += PageSize; 1020 cur_mapped_addr += PageSize;
1028 cur_block_addr += PageSize; 1021 cur_block_addr += PageSize;
@@ -1051,9 +1044,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1051 R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, 1044 R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map,
1052 cur_block_addr)); 1045 cur_block_addr));
1053 1046
1054 // HACK: Manually open the pages.
1055 HACK_OpenPages(cur_block_addr, cur_block_size / PageSize);
1056
1057 // Update tracking extents. 1047 // Update tracking extents.
1058 cur_mapped_addr += cur_block_size; 1048 cur_mapped_addr += cur_block_size;
1059 cur_block_addr = next_entry.phys_addr; 1049 cur_block_addr = next_entry.phys_addr;
@@ -1073,9 +1063,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1073 R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, 1063 R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map,
1074 cur_block_addr)); 1064 cur_block_addr));
1075 1065
1076 // HACK: Manually open the pages.
1077 HACK_OpenPages(cur_block_addr, last_block_size / PageSize);
1078
1079 // Update tracking extents. 1066 // Update tracking extents.
1080 cur_mapped_addr += last_block_size; 1067 cur_mapped_addr += last_block_size;
1081 cur_block_addr += last_block_size; 1068 cur_block_addr += last_block_size;
@@ -1107,9 +1094,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1107 1094
1108 // Map the page. 1095 // Map the page.
1109 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); 1096 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page));
1110
1111 // HACK: Manually open the pages.
1112 HACK_OpenPages(end_partial_page, 1);
1113 } 1097 }
1114 1098
1115 // Update memory blocks to reflect our changes 1099 // Update memory blocks to reflect our changes
@@ -1211,9 +1195,6 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState
1211 const size_t aligned_size = aligned_end - aligned_start; 1195 const size_t aligned_size = aligned_end - aligned_start;
1212 const size_t aligned_num_pages = aligned_size / PageSize; 1196 const size_t aligned_num_pages = aligned_size / PageSize;
1213 1197
1214 // HACK: Manually close the pages.
1215 HACK_ClosePages(aligned_start, aligned_num_pages);
1216
1217 // Unmap the pages. 1198 // Unmap the pages.
1218 R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); 1199 R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap));
1219 1200
@@ -1501,17 +1482,6 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi
1501 } 1482 }
1502} 1483}
1503 1484
1504void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) {
1505 m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages);
1506}
1507
1508void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) {
1509 for (size_t index = 0; index < num_pages; ++index) {
1510 const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize));
1511 m_system.Kernel().MemoryManager().Close(paddr, 1);
1512 }
1513}
1514
1515Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { 1485Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1516 // Lock the physical memory lock. 1486 // Lock the physical memory lock.
1517 KScopedLightLock phys_lk(m_map_physical_memory_lock); 1487 KScopedLightLock phys_lk(m_map_physical_memory_lock);
@@ -1572,7 +1542,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1572 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 1542 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
1573 1543
1574 // Allocate pages for the new memory. 1544 // Allocate pages for the new memory.
1575 KPageGroup pg; 1545 KPageGroup pg{m_kernel, m_block_info_manager};
1576 R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( 1546 R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
1577 &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); 1547 &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
1578 1548
@@ -1650,7 +1620,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1650 KScopedPageTableUpdater updater(this); 1620 KScopedPageTableUpdater updater(this);
1651 1621
1652 // Prepare to iterate over the memory. 1622 // Prepare to iterate over the memory.
1653 auto pg_it = pg.Nodes().begin(); 1623 auto pg_it = pg.begin();
1654 PAddr pg_phys_addr = pg_it->GetAddress(); 1624 PAddr pg_phys_addr = pg_it->GetAddress();
1655 size_t pg_pages = pg_it->GetNumPages(); 1625 size_t pg_pages = pg_it->GetNumPages();
1656 1626
@@ -1680,9 +1650,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1680 last_unmap_address + 1 - cur_address) / 1650 last_unmap_address + 1 - cur_address) /
1681 PageSize; 1651 PageSize;
1682 1652
1683 // HACK: Manually close the pages.
1684 HACK_ClosePages(cur_address, cur_pages);
1685
1686 // Unmap. 1653 // Unmap.
1687 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, 1654 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
1688 OperationType::Unmap) 1655 OperationType::Unmap)
@@ -1703,7 +1670,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1703 // Release any remaining unmapped memory. 1670 // Release any remaining unmapped memory.
1704 m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); 1671 m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
1705 m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); 1672 m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
1706 for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) { 1673 for (++pg_it; pg_it != pg.end(); ++pg_it) {
1707 m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), 1674 m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
1708 pg_it->GetNumPages()); 1675 pg_it->GetNumPages());
1709 m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), 1676 m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
@@ -1731,7 +1698,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1731 // Check if we're at the end of the physical block. 1698 // Check if we're at the end of the physical block.
1732 if (pg_pages == 0) { 1699 if (pg_pages == 0) {
1733 // Ensure there are more pages to map. 1700 // Ensure there are more pages to map.
1734 ASSERT(pg_it != pg.Nodes().end()); 1701 ASSERT(pg_it != pg.end());
1735 1702
1736 // Advance our physical block. 1703 // Advance our physical block.
1737 ++pg_it; 1704 ++pg_it;
@@ -1742,10 +1709,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1742 // Map whatever we can. 1709 // Map whatever we can.
1743 const size_t cur_pages = std::min(pg_pages, map_pages); 1710 const size_t cur_pages = std::min(pg_pages, map_pages);
1744 R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, 1711 R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
1745 OperationType::Map, pg_phys_addr)); 1712 OperationType::MapFirst, pg_phys_addr));
1746
1747 // HACK: Manually open the pages.
1748 HACK_OpenPages(pg_phys_addr, cur_pages);
1749 1713
1750 // Advance. 1714 // Advance.
1751 cur_address += cur_pages * PageSize; 1715 cur_address += cur_pages * PageSize;
@@ -1888,9 +1852,6 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
1888 last_address + 1 - cur_address) / 1852 last_address + 1 - cur_address) /
1889 PageSize; 1853 PageSize;
1890 1854
1891 // HACK: Manually close the pages.
1892 HACK_ClosePages(cur_address, cur_pages);
1893
1894 // Unmap. 1855 // Unmap.
1895 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) 1856 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)
1896 .IsSuccess()); 1857 .IsSuccess());
@@ -1955,7 +1916,7 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size)
1955 R_TRY(dst_allocator_result); 1916 R_TRY(dst_allocator_result);
1956 1917
1957 // Map the memory. 1918 // Map the memory.
1958 KPageGroup page_linked_list; 1919 KPageGroup page_linked_list{m_kernel, m_block_info_manager};
1959 const size_t num_pages{size / PageSize}; 1920 const size_t num_pages{size / PageSize};
1960 const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( 1921 const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
1961 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); 1922 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
@@ -2022,14 +1983,14 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size
2022 num_dst_allocator_blocks); 1983 num_dst_allocator_blocks);
2023 R_TRY(dst_allocator_result); 1984 R_TRY(dst_allocator_result);
2024 1985
2025 KPageGroup src_pages; 1986 KPageGroup src_pages{m_kernel, m_block_info_manager};
2026 KPageGroup dst_pages; 1987 KPageGroup dst_pages{m_kernel, m_block_info_manager};
2027 const size_t num_pages{size / PageSize}; 1988 const size_t num_pages{size / PageSize};
2028 1989
2029 AddRegionToPages(src_address, num_pages, src_pages); 1990 AddRegionToPages(src_address, num_pages, src_pages);
2030 AddRegionToPages(dst_address, num_pages, dst_pages); 1991 AddRegionToPages(dst_address, num_pages, dst_pages);
2031 1992
2032 R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion); 1993 R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion);
2033 1994
2034 { 1995 {
2035 auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); 1996 auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
@@ -2060,7 +2021,7 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
2060 2021
2061 VAddr cur_addr{addr}; 2022 VAddr cur_addr{addr};
2062 2023
2063 for (const auto& node : page_linked_list.Nodes()) { 2024 for (const auto& node : page_linked_list) {
2064 if (const auto result{ 2025 if (const auto result{
2065 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; 2026 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
2066 result.IsError()) { 2027 result.IsError()) {
@@ -2160,7 +2121,7 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
2160 2121
2161 VAddr cur_addr{addr}; 2122 VAddr cur_addr{addr};
2162 2123
2163 for (const auto& node : page_linked_list.Nodes()) { 2124 for (const auto& node : page_linked_list) {
2164 if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, 2125 if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
2165 OperationType::Unmap)}; 2126 OperationType::Unmap)};
2166 result.IsError()) { 2127 result.IsError()) {
@@ -2527,13 +2488,13 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
2527 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 2488 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
2528 2489
2529 // Allocate pages for the heap extension. 2490 // Allocate pages for the heap extension.
2530 KPageGroup pg; 2491 KPageGroup pg{m_kernel, m_block_info_manager};
2531 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( 2492 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
2532 &pg, allocation_size / PageSize, 2493 &pg, allocation_size / PageSize,
2533 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); 2494 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
2534 2495
2535 // Clear all the newly allocated pages. 2496 // Clear all the newly allocated pages.
2536 for (const auto& it : pg.Nodes()) { 2497 for (const auto& it : pg) {
2537 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, 2498 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
2538 it.GetSize()); 2499 it.GetSize());
2539 } 2500 }
@@ -2610,11 +2571,23 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_
2610 if (is_map_only) { 2571 if (is_map_only) {
2611 R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); 2572 R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
2612 } else { 2573 } else {
2613 KPageGroup page_group; 2574 // Create a page group tohold the pages we allocate.
2614 R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( 2575 KPageGroup pg{m_kernel, m_block_info_manager};
2615 &page_group, needed_num_pages, 2576
2616 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); 2577 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
2617 R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); 2578 &pg, needed_num_pages,
2579 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
2580
2581 // Ensure that the page group is closed when we're done working with it.
2582 SCOPE_EXIT({ pg.Close(); });
2583
2584 // Clear all pages.
2585 for (const auto& it : pg) {
2586 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()),
2587 m_heap_fill_value, it.GetSize());
2588 }
2589
2590 R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup));
2618 } 2591 }
2619 2592
2620 // Update the blocks. 2593 // Update the blocks.
@@ -2795,19 +2768,28 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_
2795 ASSERT(num_pages > 0); 2768 ASSERT(num_pages > 0);
2796 ASSERT(num_pages == page_group.GetNumPages()); 2769 ASSERT(num_pages == page_group.GetNumPages());
2797 2770
2798 for (const auto& node : page_group.Nodes()) { 2771 switch (operation) {
2799 const size_t size{node.GetNumPages() * PageSize}; 2772 case OperationType::MapGroup: {
2773 // We want to maintain a new reference to every page in the group.
2774 KScopedPageGroup spg(page_group);
2775
2776 for (const auto& node : page_group) {
2777 const size_t size{node.GetNumPages() * PageSize};
2800 2778
2801 switch (operation) { 2779 // Map the pages.
2802 case OperationType::MapGroup:
2803 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); 2780 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
2804 break; 2781
2805 default: 2782 addr += size;
2806 ASSERT(false);
2807 break;
2808 } 2783 }
2809 2784
2810 addr += size; 2785 // We succeeded! We want to persist the reference to the pages.
2786 spg.CancelClose();
2787
2788 break;
2789 }
2790 default:
2791 ASSERT(false);
2792 break;
2811 } 2793 }
2812 2794
2813 R_SUCCEED(); 2795 R_SUCCEED();
@@ -2822,13 +2804,29 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
2822 ASSERT(ContainsPages(addr, num_pages)); 2804 ASSERT(ContainsPages(addr, num_pages));
2823 2805
2824 switch (operation) { 2806 switch (operation) {
2825 case OperationType::Unmap: 2807 case OperationType::Unmap: {
2808 // Ensure that any pages we track close on exit.
2809 KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()};
2810 SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
2811
2812 this->AddRegionToPages(addr, num_pages, pages_to_close);
2826 m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); 2813 m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
2827 break; 2814 break;
2815 }
2816 case OperationType::MapFirst:
2828 case OperationType::Map: { 2817 case OperationType::Map: {
2829 ASSERT(map_addr); 2818 ASSERT(map_addr);
2830 ASSERT(Common::IsAligned(map_addr, PageSize)); 2819 ASSERT(Common::IsAligned(map_addr, PageSize));
2831 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); 2820 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
2821
2822 // Open references to pages, if we should.
2823 if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
2824 if (operation == OperationType::MapFirst) {
2825 m_kernel.MemoryManager().OpenFirst(map_addr, num_pages);
2826 } else {
2827 m_kernel.MemoryManager().Open(map_addr, num_pages);
2828 }
2829 }
2832 break; 2830 break;
2833 } 2831 }
2834 case OperationType::Separate: { 2832 case OperationType::Separate: {
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index f1ca785d7..0a454b05b 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -107,6 +107,10 @@ public:
107 return *m_page_table_impl; 107 return *m_page_table_impl;
108 } 108 }
109 109
110 KBlockInfoManager* GetBlockInfoManager() {
111 return m_block_info_manager;
112 }
113
110 bool CanContain(VAddr addr, size_t size, KMemoryState state) const; 114 bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
111 115
112protected: 116protected:
@@ -261,10 +265,6 @@ private:
261 void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, 265 void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
262 size_t size, KMemoryPermission prot_perm); 266 size_t size, KMemoryPermission prot_perm);
263 267
264 // HACK: These will be removed once we automatically manage page reference counts.
265 void HACK_OpenPages(PAddr phys_addr, size_t num_pages);
266 void HACK_ClosePages(VAddr virt_addr, size_t num_pages);
267
268 mutable KLightLock m_general_lock; 268 mutable KLightLock m_general_lock;
269 mutable KLightLock m_map_physical_memory_lock; 269 mutable KLightLock m_map_physical_memory_lock;
270 270
@@ -488,6 +488,7 @@ private:
488 std::unique_ptr<Common::PageTable> m_page_table_impl; 488 std::unique_ptr<Common::PageTable> m_page_table_impl;
489 489
490 Core::System& m_system; 490 Core::System& m_system;
491 KernelCore& m_kernel;
491}; 492};
492 493
493} // namespace Kernel 494} // namespace Kernel
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 0aa68103c..3cf2b5d91 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -13,10 +13,7 @@
13namespace Kernel { 13namespace Kernel {
14 14
15KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} 15KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
16 16KSharedMemory::~KSharedMemory() = default;
17KSharedMemory::~KSharedMemory() {
18 kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size);
19}
20 17
21Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, 18Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
22 Svc::MemoryPermission owner_permission_, 19 Svc::MemoryPermission owner_permission_,
@@ -49,7 +46,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
49 R_UNLESS(physical_address != 0, ResultOutOfMemory); 46 R_UNLESS(physical_address != 0, ResultOutOfMemory);
50 47
51 //! Insert the result into our page group. 48 //! Insert the result into our page group.
52 page_group.emplace(physical_address, num_pages); 49 page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager());
50 page_group->AddBlock(physical_address, num_pages);
53 51
54 // Commit our reservation. 52 // Commit our reservation.
55 memory_reservation.Commit(); 53 memory_reservation.Commit();
@@ -62,7 +60,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
62 is_initialized = true; 60 is_initialized = true;
63 61
64 // Clear all pages in the memory. 62 // Clear all pages in the memory.
65 for (const auto& block : page_group->Nodes()) { 63 for (const auto& block : *page_group) {
66 std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize()); 64 std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize());
67 } 65 }
68 66
@@ -71,13 +69,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
71 69
72void KSharedMemory::Finalize() { 70void KSharedMemory::Finalize() {
73 // Close and finalize the page group. 71 // Close and finalize the page group.
74 // page_group->Close(); 72 page_group->Close();
75 // page_group->Finalize(); 73 page_group->Finalize();
76
77 //! HACK: Manually close.
78 for (const auto& block : page_group->Nodes()) {
79 kernel.MemoryManager().Close(block.GetAddress(), block.GetNumPages());
80 }
81 74
82 // Release the memory reservation. 75 // Release the memory reservation.
83 resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); 76 resource_limit->Release(LimitableResource::PhysicalMemoryMax, size);
diff --git a/src/core/hle/kernel/memory_types.h b/src/core/hle/kernel/memory_types.h
index 3975507bd..92b8b37ac 100644
--- a/src/core/hle/kernel/memory_types.h
+++ b/src/core/hle/kernel/memory_types.h
@@ -14,4 +14,7 @@ constexpr std::size_t PageSize{1 << PageBits};
14 14
15using Page = std::array<u8, PageSize>; 15using Page = std::array<u8, PageSize>;
16 16
17using KPhysicalAddress = PAddr;
18using KProcessAddress = VAddr;
19
17} // namespace Kernel 20} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 788ee2160..aca442196 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1485,7 +1485,7 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p
1485 ResultInvalidMemoryRegion); 1485 ResultInvalidMemoryRegion);
1486 1486
1487 // Create a new page group. 1487 // Create a new page group.
1488 KPageGroup pg; 1488 KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()};
1489 R_TRY(src_pt.MakeAndOpenPageGroup( 1489 R_TRY(src_pt.MakeAndOpenPageGroup(
1490 std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess, 1490 std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
1491 KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None, 1491 KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,