summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/common/CMakeLists.txt1
-rw-r--r--src/common/typed_address.h320
-rw-r--r--src/core/CMakeLists.txt1
-rw-r--r--src/core/arm/arm_interface.cpp10
-rw-r--r--src/core/arm/arm_interface.h8
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp6
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.h6
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp8
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.h6
-rw-r--r--src/core/core.cpp4
-rw-r--r--src/core/core.h4
-rw-r--r--src/core/debugger/gdbstub.cpp36
-rw-r--r--src/core/device_memory.h14
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp12
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h4
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp2
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.h4
-rw-r--r--src/core/hle/kernel/code_set.h6
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.cpp21
-rw-r--r--src/core/hle/kernel/initial_process.h2
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp32
-rw-r--r--src/core/hle/kernel/k_address_arbiter.h14
-rw-r--r--src/core/hle/kernel/k_client_session.cpp3
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp11
-rw-r--r--src/core/hle/kernel/k_code_memory.h16
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp22
-rw-r--r--src/core/hle/kernel/k_condition_variable.h8
-rw-r--r--src/core/hle/kernel/k_device_address_space.cpp8
-rw-r--r--src/core/hle/kernel/k_device_address_space.h13
-rw-r--r--src/core/hle/kernel/k_dynamic_page_manager.h19
-rw-r--r--src/core/hle/kernel/k_dynamic_slab_heap.h6
-rw-r--r--src/core/hle/kernel/k_memory_block.h22
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.cpp68
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.h32
-rw-r--r--src/core/hle/kernel/k_memory_layout.cpp13
-rw-r--r--src/core/hle/kernel/k_memory_layout.h78
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp51
-rw-r--r--src/core/hle/kernel/k_memory_manager.h48
-rw-r--r--src/core/hle/kernel/k_memory_region.h8
-rw-r--r--src/core/hle/kernel/k_page_buffer.cpp4
-rw-r--r--src/core/hle/kernel/k_page_buffer.h2
-rw-r--r--src/core/hle/kernel/k_page_group.h2
-rw-r--r--src/core/hle/kernel/k_page_heap.cpp40
-rw-r--r--src/core/hle/kernel/k_page_heap.h48
-rw-r--r--src/core/hle/kernel/k_page_table.cpp392
-rw-r--r--src/core/hle/kernel/k_page_table.h241
-rw-r--r--src/core/hle/kernel/k_page_table_manager.h14
-rw-r--r--src/core/hle/kernel/k_page_table_slab_heap.h12
-rw-r--r--src/core/hle/kernel/k_process.cpp35
-rw-r--r--src/core/hle/kernel/k_process.h43
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp2
-rw-r--r--src/core/hle/kernel/k_session_request.cpp15
-rw-r--r--src/core/hle/kernel/k_session_request.h55
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp5
-rw-r--r--src/core/hle/kernel/k_shared_memory.h8
-rw-r--r--src/core/hle/kernel/k_system_resource.h2
-rw-r--r--src/core/hle/kernel/k_thread.cpp28
-rw-r--r--src/core/hle/kernel/k_thread.h58
-rw-r--r--src/core/hle/kernel/k_thread_local_page.cpp6
-rw-r--r--src/core/hle/kernel/k_thread_local_page.h25
-rw-r--r--src/core/hle/kernel/k_transfer_memory.cpp2
-rw-r--r--src/core/hle/kernel/k_transfer_memory.h6
-rw-r--r--src/core/hle/kernel/k_typed_address.h12
-rw-r--r--src/core/hle/kernel/kernel.cpp108
-rw-r--r--src/core/hle/kernel/kernel.h3
-rw-r--r--src/core/hle/kernel/memory_types.h4
-rw-r--r--src/core/hle/kernel/svc/svc_address_arbiter.cpp8
-rw-r--r--src/core/hle/kernel/svc/svc_code_memory.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_condition_variable.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_debug_string.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_exception.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_info.cpp10
-rw-r--r--src/core/hle/kernel/svc/svc_lock.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_memory.cpp13
-rw-r--r--src/core/hle/kernel/svc/svc_physical_memory.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_port.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_process.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_process_memory.cpp12
-rw-r--r--src/core/hle/kernel/svc/svc_query_memory.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_shared_memory.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_synchronization.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_thread.cpp8
-rw-r--r--src/core/hle/kernel/svc/svc_transfer_memory.cpp2
-rw-r--r--src/core/hle/kernel/svc_types.h4
-rw-r--r--src/core/hle/service/hid/controllers/console_sixaxis.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/console_sixaxis.h6
-rw-r--r--src/core/hle/service/hid/controllers/palma.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/palma.h6
-rw-r--r--src/core/hle/service/hid/hidbus/hidbus_base.cpp2
-rw-r--r--src/core/hle/service/hid/hidbus/hidbus_base.h6
-rw-r--r--src/core/hle/service/hid/irsensor/image_transfer_processor.cpp2
-rw-r--r--src/core/hle/service/hid/irsensor/image_transfer_processor.h6
-rw-r--r--src/core/hle/service/jit/jit.cpp6
-rw-r--r--src/core/hle/service/ldr/ldr.cpp5
-rw-r--r--src/core/loader/deconstructed_rom_directory.cpp2
-rw-r--r--src/core/loader/kip.cpp2
-rw-r--r--src/core/loader/nso.cpp2
-rw-r--r--src/core/memory.cpp319
-rw-r--r--src/core/memory.h88
-rw-r--r--src/core/memory/cheat_engine.cpp6
-rw-r--r--src/core/reporter.cpp4
101 files changed, 1574 insertions, 1102 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 61ab68864..90805babe 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -132,6 +132,7 @@ add_library(common STATIC
132 time_zone.h 132 time_zone.h
133 tiny_mt.h 133 tiny_mt.h
134 tree.h 134 tree.h
135 typed_address.h
135 uint128.h 136 uint128.h
136 unique_function.h 137 unique_function.h
137 uuid.cpp 138 uuid.cpp
diff --git a/src/common/typed_address.h b/src/common/typed_address.h
new file mode 100644
index 000000000..cf7bbeae1
--- /dev/null
+++ b/src/common/typed_address.h
@@ -0,0 +1,320 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <compare>
7#include <type_traits>
8#include <fmt/format.h>
9
10#include "common/common_types.h"
11
12namespace Common {
13
14template <bool Virtual, typename T>
15class TypedAddress {
16public:
17 // Constructors.
18 constexpr inline TypedAddress() : m_address(0) {}
19 constexpr inline TypedAddress(uint64_t a) : m_address(a) {}
20
21 template <typename U>
22 constexpr inline explicit TypedAddress(const U* ptr)
23 : m_address(reinterpret_cast<uint64_t>(ptr)) {}
24
25 // Copy constructor.
26 constexpr inline TypedAddress(const TypedAddress& rhs) = default;
27
28 // Assignment operator.
29 constexpr inline TypedAddress& operator=(const TypedAddress& rhs) = default;
30
31 // Arithmetic operators.
32 template <typename I>
33 constexpr inline TypedAddress operator+(I rhs) const {
34 static_assert(std::is_integral_v<I>);
35 return m_address + rhs;
36 }
37
38 constexpr inline TypedAddress operator+(TypedAddress rhs) const {
39 return m_address + rhs.m_address;
40 }
41
42 constexpr inline TypedAddress operator++() {
43 return ++m_address;
44 }
45
46 constexpr inline TypedAddress operator++(int) {
47 return m_address++;
48 }
49
50 template <typename I>
51 constexpr inline TypedAddress operator-(I rhs) const {
52 static_assert(std::is_integral_v<I>);
53 return m_address - rhs;
54 }
55
56 constexpr inline ptrdiff_t operator-(TypedAddress rhs) const {
57 return m_address - rhs.m_address;
58 }
59
60 constexpr inline TypedAddress operator--() {
61 return --m_address;
62 }
63
64 constexpr inline TypedAddress operator--(int) {
65 return m_address--;
66 }
67
68 template <typename I>
69 constexpr inline TypedAddress operator+=(I rhs) {
70 static_assert(std::is_integral_v<I>);
71 m_address += rhs;
72 return *this;
73 }
74
75 template <typename I>
76 constexpr inline TypedAddress operator-=(I rhs) {
77 static_assert(std::is_integral_v<I>);
78 m_address -= rhs;
79 return *this;
80 }
81
82 // Logical operators.
83 constexpr inline uint64_t operator&(uint64_t mask) const {
84 return m_address & mask;
85 }
86
87 constexpr inline uint64_t operator|(uint64_t mask) const {
88 return m_address | mask;
89 }
90
91 template <typename I>
92 constexpr inline TypedAddress operator|=(I rhs) {
93 static_assert(std::is_integral_v<I>);
94 m_address |= rhs;
95 return *this;
96 }
97
98 constexpr inline uint64_t operator<<(int shift) const {
99 return m_address << shift;
100 }
101
102 constexpr inline uint64_t operator>>(int shift) const {
103 return m_address >> shift;
104 }
105
106 template <typename U>
107 constexpr inline size_t operator/(U size) const {
108 return m_address / size;
109 }
110
111 constexpr explicit operator bool() const {
112 return m_address != 0;
113 }
114
115 // constexpr inline uint64_t operator%(U align) const { return m_address % align; }
116
117 // Comparison operators.
118 constexpr bool operator==(const TypedAddress&) const = default;
119 constexpr bool operator!=(const TypedAddress&) const = default;
120 constexpr auto operator<=>(const TypedAddress&) const = default;
121
122 // For convenience, also define comparison operators versus uint64_t.
123 constexpr inline bool operator==(uint64_t rhs) const {
124 return m_address == rhs;
125 }
126
127 constexpr inline bool operator!=(uint64_t rhs) const {
128 return m_address != rhs;
129 }
130
131 // Allow getting the address explicitly, for use in accessors.
132 constexpr inline uint64_t GetValue() const {
133 return m_address;
134 }
135
136private:
137 uint64_t m_address{};
138};
139
140struct PhysicalAddressTag {};
141struct VirtualAddressTag {};
142struct ProcessAddressTag {};
143
144using PhysicalAddress = TypedAddress<false, PhysicalAddressTag>;
145using VirtualAddress = TypedAddress<true, VirtualAddressTag>;
146using ProcessAddress = TypedAddress<true, ProcessAddressTag>;
147
148// Define accessors.
149template <typename T>
150concept IsTypedAddress = std::same_as<T, PhysicalAddress> || std::same_as<T, VirtualAddress> ||
151 std::same_as<T, ProcessAddress>;
152
153template <typename T>
154constexpr inline T Null = [] {
155 if constexpr (std::is_same<T, uint64_t>::value) {
156 return 0;
157 } else {
158 static_assert(std::is_same<T, PhysicalAddress>::value ||
159 std::is_same<T, VirtualAddress>::value ||
160 std::is_same<T, ProcessAddress>::value);
161 return T(0);
162 }
163}();
164
165// Basic type validations.
166static_assert(sizeof(PhysicalAddress) == sizeof(uint64_t));
167static_assert(sizeof(VirtualAddress) == sizeof(uint64_t));
168static_assert(sizeof(ProcessAddress) == sizeof(uint64_t));
169
170static_assert(std::is_trivially_copyable_v<PhysicalAddress>);
171static_assert(std::is_trivially_copyable_v<VirtualAddress>);
172static_assert(std::is_trivially_copyable_v<ProcessAddress>);
173
174static_assert(std::is_trivially_copy_constructible_v<PhysicalAddress>);
175static_assert(std::is_trivially_copy_constructible_v<VirtualAddress>);
176static_assert(std::is_trivially_copy_constructible_v<ProcessAddress>);
177
178static_assert(std::is_trivially_move_constructible_v<PhysicalAddress>);
179static_assert(std::is_trivially_move_constructible_v<VirtualAddress>);
180static_assert(std::is_trivially_move_constructible_v<ProcessAddress>);
181
182static_assert(std::is_trivially_copy_assignable_v<PhysicalAddress>);
183static_assert(std::is_trivially_copy_assignable_v<VirtualAddress>);
184static_assert(std::is_trivially_copy_assignable_v<ProcessAddress>);
185
186static_assert(std::is_trivially_move_assignable_v<PhysicalAddress>);
187static_assert(std::is_trivially_move_assignable_v<VirtualAddress>);
188static_assert(std::is_trivially_move_assignable_v<ProcessAddress>);
189
190static_assert(std::is_trivially_destructible_v<PhysicalAddress>);
191static_assert(std::is_trivially_destructible_v<VirtualAddress>);
192static_assert(std::is_trivially_destructible_v<ProcessAddress>);
193
194static_assert(Null<uint64_t> == 0);
195static_assert(Null<PhysicalAddress> == Null<uint64_t>);
196static_assert(Null<VirtualAddress> == Null<uint64_t>);
197static_assert(Null<ProcessAddress> == Null<uint64_t>);
198
199// Constructor/assignment validations.
200static_assert([] {
201 const PhysicalAddress a(5);
202 PhysicalAddress b(a);
203 return b;
204}() == PhysicalAddress(5));
205static_assert([] {
206 const PhysicalAddress a(5);
207 PhysicalAddress b(10);
208 b = a;
209 return b;
210}() == PhysicalAddress(5));
211
212// Arithmetic validations.
213static_assert(PhysicalAddress(10) + 5 == PhysicalAddress(15));
214static_assert(PhysicalAddress(10) - 5 == PhysicalAddress(5));
215static_assert([] {
216 PhysicalAddress v(10);
217 v += 5;
218 return v;
219}() == PhysicalAddress(15));
220static_assert([] {
221 PhysicalAddress v(10);
222 v -= 5;
223 return v;
224}() == PhysicalAddress(5));
225static_assert(PhysicalAddress(10)++ == PhysicalAddress(10));
226static_assert(++PhysicalAddress(10) == PhysicalAddress(11));
227static_assert(PhysicalAddress(10)-- == PhysicalAddress(10));
228static_assert(--PhysicalAddress(10) == PhysicalAddress(9));
229
230// Logical validations.
231static_assert((PhysicalAddress(0b11111111) >> 1) == 0b01111111);
232static_assert((PhysicalAddress(0b10101010) >> 1) == 0b01010101);
233static_assert((PhysicalAddress(0b11111111) << 1) == 0b111111110);
234static_assert((PhysicalAddress(0b01010101) << 1) == 0b10101010);
235static_assert((PhysicalAddress(0b11111111) & 0b01010101) == 0b01010101);
236static_assert((PhysicalAddress(0b11111111) & 0b10101010) == 0b10101010);
237static_assert((PhysicalAddress(0b01010101) & 0b10101010) == 0b00000000);
238static_assert((PhysicalAddress(0b00000000) | 0b01010101) == 0b01010101);
239static_assert((PhysicalAddress(0b11111111) | 0b01010101) == 0b11111111);
240static_assert((PhysicalAddress(0b10101010) | 0b01010101) == 0b11111111);
241
242// Comparisons.
243static_assert(PhysicalAddress(0) == PhysicalAddress(0));
244static_assert(PhysicalAddress(0) != PhysicalAddress(1));
245static_assert(PhysicalAddress(0) < PhysicalAddress(1));
246static_assert(PhysicalAddress(0) <= PhysicalAddress(1));
247static_assert(PhysicalAddress(1) > PhysicalAddress(0));
248static_assert(PhysicalAddress(1) >= PhysicalAddress(0));
249
250static_assert(!(PhysicalAddress(0) == PhysicalAddress(1)));
251static_assert(!(PhysicalAddress(0) != PhysicalAddress(0)));
252static_assert(!(PhysicalAddress(1) < PhysicalAddress(0)));
253static_assert(!(PhysicalAddress(1) <= PhysicalAddress(0)));
254static_assert(!(PhysicalAddress(0) > PhysicalAddress(1)));
255static_assert(!(PhysicalAddress(0) >= PhysicalAddress(1)));
256
257} // namespace Common
258
259template <bool Virtual, typename T>
260constexpr inline uint64_t GetInteger(Common::TypedAddress<Virtual, T> address) {
261 return address.GetValue();
262}
263
264template <>
265struct fmt::formatter<Common::PhysicalAddress> {
266 constexpr auto parse(fmt::format_parse_context& ctx) {
267 return ctx.begin();
268 }
269 template <typename FormatContext>
270 auto format(const Common::PhysicalAddress& addr, FormatContext& ctx) {
271 return fmt::format_to(ctx.out(), "{:#x}", static_cast<u64>(addr.GetValue()));
272 }
273};
274
275template <>
276struct fmt::formatter<Common::ProcessAddress> {
277 constexpr auto parse(fmt::format_parse_context& ctx) {
278 return ctx.begin();
279 }
280 template <typename FormatContext>
281 auto format(const Common::ProcessAddress& addr, FormatContext& ctx) {
282 return fmt::format_to(ctx.out(), "{:#x}", static_cast<u64>(addr.GetValue()));
283 }
284};
285
286template <>
287struct fmt::formatter<Common::VirtualAddress> {
288 constexpr auto parse(fmt::format_parse_context& ctx) {
289 return ctx.begin();
290 }
291 template <typename FormatContext>
292 auto format(const Common::VirtualAddress& addr, FormatContext& ctx) {
293 return fmt::format_to(ctx.out(), "{:#x}", static_cast<u64>(addr.GetValue()));
294 }
295};
296
297namespace std {
298
299template <>
300struct hash<Common::PhysicalAddress> {
301 size_t operator()(const Common::PhysicalAddress& k) const noexcept {
302 return k.GetValue();
303 }
304};
305
306template <>
307struct hash<Common::ProcessAddress> {
308 size_t operator()(const Common::ProcessAddress& k) const noexcept {
309 return k.GetValue();
310 }
311};
312
313template <>
314struct hash<Common::VirtualAddress> {
315 size_t operator()(const Common::VirtualAddress& k) const noexcept {
316 return k.GetValue();
317 }
318};
319
320} // namespace std
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 378e6c023..4e677f287 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -278,6 +278,7 @@ add_library(core STATIC
278 hle/kernel/k_trace.h 278 hle/kernel/k_trace.h
279 hle/kernel/k_transfer_memory.cpp 279 hle/kernel/k_transfer_memory.cpp
280 hle/kernel/k_transfer_memory.h 280 hle/kernel/k_transfer_memory.h
281 hle/kernel/k_typed_address.h
281 hle/kernel/k_worker_task.h 282 hle/kernel/k_worker_task.h
282 hle/kernel/k_worker_task_manager.cpp 283 hle/kernel/k_worker_task_manager.cpp
283 hle/kernel/k_worker_task_manager.h 284 hle/kernel/k_worker_task_manager.h
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp
index 4a331d4c1..be3f55cd2 100644
--- a/src/core/arm/arm_interface.cpp
+++ b/src/core/arm/arm_interface.cpp
@@ -168,21 +168,21 @@ void ARM_Interface::LoadWatchpointArray(const WatchpointArray& wp) {
168} 168}
169 169
170const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint( 170const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint(
171 VAddr addr, u64 size, Kernel::DebugWatchpointType access_type) const { 171 u64 addr, u64 size, Kernel::DebugWatchpointType access_type) const {
172 if (!watchpoints) { 172 if (!watchpoints) {
173 return nullptr; 173 return nullptr;
174 } 174 }
175 175
176 const VAddr start_address{addr}; 176 const u64 start_address{addr};
177 const VAddr end_address{addr + size}; 177 const u64 end_address{addr + size};
178 178
179 for (size_t i = 0; i < Core::Hardware::NUM_WATCHPOINTS; i++) { 179 for (size_t i = 0; i < Core::Hardware::NUM_WATCHPOINTS; i++) {
180 const auto& watch{(*watchpoints)[i]}; 180 const auto& watch{(*watchpoints)[i]};
181 181
182 if (end_address <= watch.start_address) { 182 if (end_address <= GetInteger(watch.start_address)) {
183 continue; 183 continue;
184 } 184 }
185 if (start_address >= watch.end_address) { 185 if (start_address >= GetInteger(watch.end_address)) {
186 continue; 186 continue;
187 } 187 }
188 if ((access_type & watch.type) == Kernel::DebugWatchpointType::None) { 188 if ((access_type & watch.type) == Kernel::DebugWatchpointType::None) {
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index c40771c97..8e40702cc 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -78,7 +78,7 @@ public:
78 * @param addr Start address of the cache range to clear 78 * @param addr Start address of the cache range to clear
79 * @param size Size of the cache range to clear, starting at addr 79 * @param size Size of the cache range to clear, starting at addr
80 */ 80 */
81 virtual void InvalidateCacheRange(VAddr addr, std::size_t size) = 0; 81 virtual void InvalidateCacheRange(u64 addr, std::size_t size) = 0;
82 82
83 /** 83 /**
84 * Notifies CPU emulation that the current page table has changed. 84 * Notifies CPU emulation that the current page table has changed.
@@ -149,9 +149,9 @@ public:
149 */ 149 */
150 virtual void SetPSTATE(u32 pstate) = 0; 150 virtual void SetPSTATE(u32 pstate) = 0;
151 151
152 virtual VAddr GetTlsAddress() const = 0; 152 virtual u64 GetTlsAddress() const = 0;
153 153
154 virtual void SetTlsAddress(VAddr address) = 0; 154 virtual void SetTlsAddress(u64 address) = 0;
155 155
156 /** 156 /**
157 * Gets the value within the TPIDR_EL0 (read/write software thread ID) register. 157 * Gets the value within the TPIDR_EL0 (read/write software thread ID) register.
@@ -214,7 +214,7 @@ protected:
214 214
215 static void SymbolicateBacktrace(Core::System& system, std::vector<BacktraceEntry>& out); 215 static void SymbolicateBacktrace(Core::System& system, std::vector<BacktraceEntry>& out);
216 const Kernel::DebugWatchpoint* MatchingWatchpoint( 216 const Kernel::DebugWatchpoint* MatchingWatchpoint(
217 VAddr addr, u64 size, Kernel::DebugWatchpointType access_type) const; 217 u64 addr, u64 size, Kernel::DebugWatchpointType access_type) const;
218 218
219 virtual Dynarmic::HaltReason RunJit() = 0; 219 virtual Dynarmic::HaltReason RunJit() = 0;
220 virtual Dynarmic::HaltReason StepJit() = 0; 220 virtual Dynarmic::HaltReason StepJit() = 0;
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index 2a7570073..aa92d3fc3 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -155,7 +155,7 @@ public:
155 return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0); 155 return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
156 } 156 }
157 157
158 bool CheckMemoryAccess(VAddr addr, u64 size, Kernel::DebugWatchpointType type) { 158 bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) {
159 if (!check_memory_access) { 159 if (!check_memory_access) {
160 return true; 160 return true;
161 } 161 }
@@ -397,7 +397,7 @@ u64 ARM_Dynarmic_32::GetTlsAddress() const {
397 return cp15->uro; 397 return cp15->uro;
398} 398}
399 399
400void ARM_Dynarmic_32::SetTlsAddress(VAddr address) { 400void ARM_Dynarmic_32::SetTlsAddress(u64 address) {
401 cp15->uro = static_cast<u32>(address); 401 cp15->uro = static_cast<u32>(address);
402} 402}
403 403
@@ -439,7 +439,7 @@ void ARM_Dynarmic_32::ClearInstructionCache() {
439 jit.load()->ClearCache(); 439 jit.load()->ClearCache();
440} 440}
441 441
442void ARM_Dynarmic_32::InvalidateCacheRange(VAddr addr, std::size_t size) { 442void ARM_Dynarmic_32::InvalidateCacheRange(u64 addr, std::size_t size) {
443 jit.load()->InvalidateCacheRange(static_cast<u32>(addr), size); 443 jit.load()->InvalidateCacheRange(static_cast<u32>(addr), size);
444} 444}
445 445
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h
index d24ba2289..bce695daf 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.h
@@ -41,8 +41,8 @@ public:
41 void SetVectorReg(int index, u128 value) override; 41 void SetVectorReg(int index, u128 value) override;
42 u32 GetPSTATE() const override; 42 u32 GetPSTATE() const override;
43 void SetPSTATE(u32 pstate) override; 43 void SetPSTATE(u32 pstate) override;
44 VAddr GetTlsAddress() const override; 44 u64 GetTlsAddress() const override;
45 void SetTlsAddress(VAddr address) override; 45 void SetTlsAddress(u64 address) override;
46 void SetTPIDR_EL0(u64 value) override; 46 void SetTPIDR_EL0(u64 value) override;
47 u64 GetTPIDR_EL0() const override; 47 u64 GetTPIDR_EL0() const override;
48 48
@@ -60,7 +60,7 @@ public:
60 void ClearExclusiveState() override; 60 void ClearExclusiveState() override;
61 61
62 void ClearInstructionCache() override; 62 void ClearInstructionCache() override;
63 void InvalidateCacheRange(VAddr addr, std::size_t size) override; 63 void InvalidateCacheRange(u64 addr, std::size_t size) override;
64 void PageTableChanged(Common::PageTable& new_page_table, 64 void PageTableChanged(Common::PageTable& new_page_table,
65 std::size_t new_address_space_size_in_bits) override; 65 std::size_t new_address_space_size_in_bits) override;
66 66
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 7229fdc2a..67073c84d 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -117,7 +117,7 @@ public:
117 } 117 }
118 118
119 void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op, 119 void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op,
120 VAddr value) override { 120 u64 value) override {
121 switch (op) { 121 switch (op) {
122 case Dynarmic::A64::InstructionCacheOperation::InvalidateByVAToPoU: { 122 case Dynarmic::A64::InstructionCacheOperation::InvalidateByVAToPoU: {
123 static constexpr u64 ICACHE_LINE_SIZE = 64; 123 static constexpr u64 ICACHE_LINE_SIZE = 64;
@@ -199,7 +199,7 @@ public:
199 return parent.system.CoreTiming().GetClockTicks(); 199 return parent.system.CoreTiming().GetClockTicks();
200 } 200 }
201 201
202 bool CheckMemoryAccess(VAddr addr, u64 size, Kernel::DebugWatchpointType type) { 202 bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) {
203 if (!check_memory_access) { 203 if (!check_memory_access) {
204 return true; 204 return true;
205 } 205 }
@@ -452,7 +452,7 @@ u64 ARM_Dynarmic_64::GetTlsAddress() const {
452 return cb->tpidrro_el0; 452 return cb->tpidrro_el0;
453} 453}
454 454
455void ARM_Dynarmic_64::SetTlsAddress(VAddr address) { 455void ARM_Dynarmic_64::SetTlsAddress(u64 address) {
456 cb->tpidrro_el0 = address; 456 cb->tpidrro_el0 = address;
457} 457}
458 458
@@ -500,7 +500,7 @@ void ARM_Dynarmic_64::ClearInstructionCache() {
500 jit.load()->ClearCache(); 500 jit.load()->ClearCache();
501} 501}
502 502
503void ARM_Dynarmic_64::InvalidateCacheRange(VAddr addr, std::size_t size) { 503void ARM_Dynarmic_64::InvalidateCacheRange(u64 addr, std::size_t size) {
504 jit.load()->InvalidateCacheRange(addr, size); 504 jit.load()->InvalidateCacheRange(addr, size);
505} 505}
506 506
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h
index ed1a5eb96..e83599e82 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.h
@@ -38,8 +38,8 @@ public:
38 void SetVectorReg(int index, u128 value) override; 38 void SetVectorReg(int index, u128 value) override;
39 u32 GetPSTATE() const override; 39 u32 GetPSTATE() const override;
40 void SetPSTATE(u32 pstate) override; 40 void SetPSTATE(u32 pstate) override;
41 VAddr GetTlsAddress() const override; 41 u64 GetTlsAddress() const override;
42 void SetTlsAddress(VAddr address) override; 42 void SetTlsAddress(u64 address) override;
43 void SetTPIDR_EL0(u64 value) override; 43 void SetTPIDR_EL0(u64 value) override;
44 u64 GetTPIDR_EL0() const override; 44 u64 GetTPIDR_EL0() const override;
45 45
@@ -53,7 +53,7 @@ public:
53 void ClearExclusiveState() override; 53 void ClearExclusiveState() override;
54 54
55 void ClearInstructionCache() override; 55 void ClearInstructionCache() override;
56 void InvalidateCacheRange(VAddr addr, std::size_t size) override; 56 void InvalidateCacheRange(u64 addr, std::size_t size) override;
57 void PageTableChanged(Common::PageTable& new_page_table, 57 void PageTableChanged(Common::PageTable& new_page_table,
58 std::size_t new_address_space_size_in_bits) override; 58 std::size_t new_address_space_size_in_bits) override;
59 59
diff --git a/src/core/core.cpp b/src/core/core.cpp
index d2b597068..f6273ac39 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -564,7 +564,7 @@ void System::InvalidateCpuInstructionCaches() {
564 impl->kernel.InvalidateAllInstructionCaches(); 564 impl->kernel.InvalidateAllInstructionCaches();
565} 565}
566 566
567void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) { 567void System::InvalidateCpuInstructionCacheRange(u64 addr, std::size_t size) {
568 impl->kernel.InvalidateCpuInstructionCacheRange(addr, size); 568 impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
569} 569}
570 570
@@ -794,7 +794,7 @@ FileSys::VirtualFilesystem System::GetFilesystem() const {
794} 794}
795 795
796void System::RegisterCheatList(const std::vector<Memory::CheatEntry>& list, 796void System::RegisterCheatList(const std::vector<Memory::CheatEntry>& list,
797 const std::array<u8, 32>& build_id, VAddr main_region_begin, 797 const std::array<u8, 32>& build_id, u64 main_region_begin,
798 u64 main_region_size) { 798 u64 main_region_size) {
799 impl->cheat_engine = std::make_unique<Memory::CheatEngine>(*this, list, build_id); 799 impl->cheat_engine = std::make_unique<Memory::CheatEngine>(*this, list, build_id);
800 impl->cheat_engine->SetMainMemoryParameters(main_region_begin, main_region_size); 800 impl->cheat_engine->SetMainMemoryParameters(main_region_begin, main_region_size);
diff --git a/src/core/core.h b/src/core/core.h
index 5843696d4..7032240be 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -172,7 +172,7 @@ public:
172 */ 172 */
173 void InvalidateCpuInstructionCaches(); 173 void InvalidateCpuInstructionCaches();
174 174
175 void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size); 175 void InvalidateCpuInstructionCacheRange(u64 addr, std::size_t size);
176 176
177 /// Shutdown the main emulated process. 177 /// Shutdown the main emulated process.
178 void ShutdownMainProcess(); 178 void ShutdownMainProcess();
@@ -353,7 +353,7 @@ public:
353 [[nodiscard]] FileSys::VirtualFilesystem GetFilesystem() const; 353 [[nodiscard]] FileSys::VirtualFilesystem GetFilesystem() const;
354 354
355 void RegisterCheatList(const std::vector<Memory::CheatEntry>& list, 355 void RegisterCheatList(const std::vector<Memory::CheatEntry>& list,
356 const std::array<u8, 0x20>& build_id, VAddr main_region_begin, 356 const std::array<u8, 0x20>& build_id, u64 main_region_begin,
357 u64 main_region_size); 357 u64 main_region_size);
358 358
359 void SetAppletFrontendSet(Service::AM::Applets::AppletFrontendSet&& set); 359 void SetAppletFrontendSet(Service::AM::Applets::AppletFrontendSet&& set);
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp
index b2fe6bd7d..5cfb66b93 100644
--- a/src/core/debugger/gdbstub.cpp
+++ b/src/core/debugger/gdbstub.cpp
@@ -118,14 +118,14 @@ void GDBStub::Watchpoint(Kernel::KThread* thread, const Kernel::DebugWatchpoint&
118 118
119 switch (watch.type) { 119 switch (watch.type) {
120 case Kernel::DebugWatchpointType::Read: 120 case Kernel::DebugWatchpointType::Read:
121 SendReply(fmt::format("{}rwatch:{:x};", status, watch.start_address)); 121 SendReply(fmt::format("{}rwatch:{:x};", status, GetInteger(watch.start_address)));
122 break; 122 break;
123 case Kernel::DebugWatchpointType::Write: 123 case Kernel::DebugWatchpointType::Write:
124 SendReply(fmt::format("{}watch:{:x};", status, watch.start_address)); 124 SendReply(fmt::format("{}watch:{:x};", status, GetInteger(watch.start_address)));
125 break; 125 break;
126 case Kernel::DebugWatchpointType::ReadOrWrite: 126 case Kernel::DebugWatchpointType::ReadOrWrite:
127 default: 127 default:
128 SendReply(fmt::format("{}awatch:{:x};", status, watch.start_address)); 128 SendReply(fmt::format("{}awatch:{:x};", status, GetInteger(watch.start_address)));
129 break; 129 break;
130 } 130 }
131} 131}
@@ -554,8 +554,9 @@ void GDBStub::HandleQuery(std::string_view command) {
554 if (main != modules.end()) { 554 if (main != modules.end()) {
555 SendReply(fmt::format("TextSeg={:x}", main->first)); 555 SendReply(fmt::format("TextSeg={:x}", main->first));
556 } else { 556 } else {
557 SendReply(fmt::format("TextSeg={:x}", 557 SendReply(fmt::format(
558 system.ApplicationProcess()->PageTable().GetCodeRegionStart())); 558 "TextSeg={:x}",
559 GetInteger(system.ApplicationProcess()->PageTable().GetCodeRegionStart())));
559 } 560 }
560 } else if (command.starts_with("Xfer:libraries:read::")) { 561 } else if (command.starts_with("Xfer:libraries:read::")) {
561 Loader::AppLoader::Modules modules; 562 Loader::AppLoader::Modules modules;
@@ -757,17 +758,20 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
757 reply = fmt::format("Process: {:#x} ({})\n" 758 reply = fmt::format("Process: {:#x} ({})\n"
758 "Program Id: {:#018x}\n", 759 "Program Id: {:#018x}\n",
759 process->GetProcessId(), process->GetName(), process->GetProgramId()); 760 process->GetProcessId(), process->GetName(), process->GetProgramId());
760 reply += 761 reply += fmt::format("Layout:\n"
761 fmt::format("Layout:\n" 762 " Alias: {:#012x} - {:#012x}\n"
762 " Alias: {:#012x} - {:#012x}\n" 763 " Heap: {:#012x} - {:#012x}\n"
763 " Heap: {:#012x} - {:#012x}\n" 764 " Aslr: {:#012x} - {:#012x}\n"
764 " Aslr: {:#012x} - {:#012x}\n" 765 " Stack: {:#012x} - {:#012x}\n"
765 " Stack: {:#012x} - {:#012x}\n" 766 "Modules:\n",
766 "Modules:\n", 767 GetInteger(page_table.GetAliasRegionStart()),
767 page_table.GetAliasRegionStart(), page_table.GetAliasRegionEnd(), 768 GetInteger(page_table.GetAliasRegionEnd()),
768 page_table.GetHeapRegionStart(), page_table.GetHeapRegionEnd(), 769 GetInteger(page_table.GetHeapRegionStart()),
769 page_table.GetAliasCodeRegionStart(), page_table.GetAliasCodeRegionEnd(), 770 GetInteger(page_table.GetHeapRegionEnd()),
770 page_table.GetStackRegionStart(), page_table.GetStackRegionEnd()); 771 GetInteger(page_table.GetAliasCodeRegionStart()),
772 GetInteger(page_table.GetAliasCodeRegionEnd()),
773 GetInteger(page_table.GetStackRegionStart()),
774 GetInteger(page_table.GetStackRegionEnd()));
771 775
772 for (const auto& [vaddr, name] : modules) { 776 for (const auto& [vaddr, name] : modules) {
773 reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr, 777 reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr,
diff --git a/src/core/device_memory.h b/src/core/device_memory.h
index 90510733c..13388b73e 100644
--- a/src/core/device_memory.h
+++ b/src/core/device_memory.h
@@ -3,8 +3,8 @@
3 3
4#pragma once 4#pragma once
5 5
6#include "common/common_types.h"
7#include "common/host_memory.h" 6#include "common/host_memory.h"
7#include "common/typed_address.h"
8 8
9namespace Core { 9namespace Core {
10 10
@@ -25,20 +25,22 @@ public:
25 DeviceMemory(const DeviceMemory&) = delete; 25 DeviceMemory(const DeviceMemory&) = delete;
26 26
27 template <typename T> 27 template <typename T>
28 PAddr GetPhysicalAddr(const T* ptr) const { 28 Common::PhysicalAddress GetPhysicalAddr(const T* ptr) const {
29 return (reinterpret_cast<uintptr_t>(ptr) - 29 return (reinterpret_cast<uintptr_t>(ptr) -
30 reinterpret_cast<uintptr_t>(buffer.BackingBasePointer())) + 30 reinterpret_cast<uintptr_t>(buffer.BackingBasePointer())) +
31 DramMemoryMap::Base; 31 DramMemoryMap::Base;
32 } 32 }
33 33
34 template <typename T> 34 template <typename T>
35 T* GetPointer(PAddr addr) { 35 T* GetPointer(Common::PhysicalAddress addr) {
36 return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base)); 36 return reinterpret_cast<T*>(buffer.BackingBasePointer() +
37 (GetInteger(addr) - DramMemoryMap::Base));
37 } 38 }
38 39
39 template <typename T> 40 template <typename T>
40 const T* GetPointer(PAddr addr) const { 41 const T* GetPointer(Common::PhysicalAddress addr) const {
41 return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base)); 42 return reinterpret_cast<T*>(buffer.BackingBasePointer() +
43 (GetInteger(addr) - DramMemoryMap::Base));
42 } 44 }
43 45
44 Common::HostMemory buffer; 46 Common::HostMemory buffer;
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp
index 098ba6eac..24eb3f886 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp
@@ -76,22 +76,24 @@ void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout) {
76 76
77void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout) { 77void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout) {
78 const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize(); 78 const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
79 const PAddr physical_memory_base_address = 79 const KPhysicalAddress physical_memory_base_address =
80 KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress); 80 KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress);
81 81
82 // Insert blocks into the tree. 82 // Insert blocks into the tree.
83 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( 83 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
84 physical_memory_base_address, intended_memory_size, KMemoryRegionType_Dram)); 84 GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
85 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( 85 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
86 physical_memory_base_address, ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly)); 86 GetInteger(physical_memory_base_address), ReservedEarlyDramSize,
87 KMemoryRegionType_DramReservedEarly));
87 88
88 // Insert the KTrace block at the end of Dram, if KTrace is enabled. 89 // Insert the KTrace block at the end of Dram, if KTrace is enabled.
89 static_assert(!IsKTraceEnabled || KTraceBufferSize > 0); 90 static_assert(!IsKTraceEnabled || KTraceBufferSize > 0);
90 if constexpr (IsKTraceEnabled) { 91 if constexpr (IsKTraceEnabled) {
91 const PAddr ktrace_buffer_phys_addr = 92 const KPhysicalAddress ktrace_buffer_phys_addr =
92 physical_memory_base_address + intended_memory_size - KTraceBufferSize; 93 physical_memory_base_address + intended_memory_size - KTraceBufferSize;
93 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( 94 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
94 ktrace_buffer_phys_addr, KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer)); 95 GetInteger(ktrace_buffer_phys_addr), KTraceBufferSize,
96 KMemoryRegionType_KernelTraceBuffer));
95 } 97 }
96} 98}
97 99
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h
index d02ee61c3..f8fee4f5b 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h
@@ -3,10 +3,10 @@
3 3
4#pragma once 4#pragma once
5 5
6#include "common/common_types.h" 6#include "core/hle/kernel/k_typed_address.h"
7 7
8namespace Kernel { 8namespace Kernel {
9 9
10constexpr inline PAddr MainMemoryAddress = 0x80000000; 10constexpr inline KPhysicalAddress MainMemoryAddress = 0x80000000;
11 11
12} // namespace Kernel 12} // namespace Kernel
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index 5b8a248c8..42d1fcc28 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -61,7 +61,7 @@ size_t KSystemControl::Init::GetIntendedMemorySize() {
61 } 61 }
62} 62}
63 63
64PAddr KSystemControl::Init::GetKernelPhysicalBaseAddress(u64 base_address) { 64KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(KPhysicalAddress base_address) {
65 const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize(); 65 const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize();
66 const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize(); 66 const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
67 if (intended_dram_size * 2 < real_dram_size) { 67 if (intended_dram_size * 2 < real_dram_size) {
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
index 4b717d091..b477e8193 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
@@ -3,7 +3,7 @@
3 3
4#pragma once 4#pragma once
5 5
6#include "common/common_types.h" 6#include "core/hle/kernel/k_typed_address.h"
7 7
8namespace Kernel::Board::Nintendo::Nx { 8namespace Kernel::Board::Nintendo::Nx {
9 9
@@ -18,7 +18,7 @@ public:
18 // Initialization. 18 // Initialization.
19 static std::size_t GetRealMemorySize(); 19 static std::size_t GetRealMemorySize();
20 static std::size_t GetIntendedMemorySize(); 20 static std::size_t GetIntendedMemorySize();
21 static PAddr GetKernelPhysicalBaseAddress(u64 base_address); 21 static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address);
22 static bool ShouldIncreaseThreadResourceLimit(); 22 static bool ShouldIncreaseThreadResourceLimit();
23 static std::size_t GetApplicationPoolSize(); 23 static std::size_t GetApplicationPoolSize();
24 static std::size_t GetAppletPoolSize(); 24 static std::size_t GetAppletPoolSize();
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h
index 5220dbcb6..af1af2b78 100644
--- a/src/core/hle/kernel/code_set.h
+++ b/src/core/hle/kernel/code_set.h
@@ -5,7 +5,7 @@
5 5
6#include <cstddef> 6#include <cstddef>
7 7
8#include "common/common_types.h" 8#include "core/hle/kernel/k_typed_address.h"
9#include "core/hle/kernel/physical_memory.h" 9#include "core/hle/kernel/physical_memory.h"
10 10
11namespace Kernel { 11namespace Kernel {
@@ -36,7 +36,7 @@ struct CodeSet final {
36 std::size_t offset = 0; 36 std::size_t offset = 0;
37 37
38 /// The address to map this segment to. 38 /// The address to map this segment to.
39 VAddr addr = 0; 39 KProcessAddress addr = 0;
40 40
41 /// The size of this segment in bytes. 41 /// The size of this segment in bytes.
42 u32 size = 0; 42 u32 size = 0;
@@ -82,7 +82,7 @@ struct CodeSet final {
82 std::array<Segment, 3> segments; 82 std::array<Segment, 3> segments;
83 83
84 /// The entry point address for this code set. 84 /// The entry point address for this code set.
85 VAddr entrypoint = 0; 85 KProcessAddress entrypoint = 0;
86}; 86};
87 87
88} // namespace Kernel 88} // namespace Kernel
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index 5e4090e2b..1f2db673c 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -4,7 +4,6 @@
4#include "common/alignment.h" 4#include "common/alignment.h"
5#include "common/assert.h" 5#include "common/assert.h"
6#include "common/common_funcs.h" 6#include "common/common_funcs.h"
7#include "common/common_types.h"
8#include "core/core.h" 7#include "core/core.h"
9#include "core/device_memory.h" 8#include "core/device_memory.h"
10#include "core/hardware_properties.h" 9#include "core/hardware_properties.h"
@@ -30,6 +29,7 @@
30#include "core/hle/kernel/k_thread.h" 29#include "core/hle/kernel/k_thread.h"
31#include "core/hle/kernel/k_thread_local_page.h" 30#include "core/hle/kernel/k_thread_local_page.h"
32#include "core/hle/kernel/k_transfer_memory.h" 31#include "core/hle/kernel/k_transfer_memory.h"
32#include "core/hle/kernel/k_typed_address.h"
33 33
34namespace Kernel::Init { 34namespace Kernel::Init {
35 35
@@ -104,17 +104,18 @@ static_assert(KernelPageBufferAdditionalSize ==
104 104
105/// Helper function to translate from the slab virtual address to the reserved location in physical 105/// Helper function to translate from the slab virtual address to the reserved location in physical
106/// memory. 106/// memory.
107static PAddr TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, VAddr slab_addr) { 107static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout,
108 slab_addr -= memory_layout.GetSlabRegionAddress(); 108 KVirtualAddress slab_addr) {
109 return slab_addr + Core::DramMemoryMap::SlabHeapBase; 109 slab_addr -= GetInteger(memory_layout.GetSlabRegionAddress());
110 return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase;
110} 111}
111 112
112template <typename T> 113template <typename T>
113VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address, 114KVirtualAddress InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout,
114 size_t num_objects) { 115 KVirtualAddress address, size_t num_objects) {
115 116
116 const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*)); 117 const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
117 VAddr start = Common::AlignUp(address, alignof(T)); 118 KVirtualAddress start = Common::AlignUp(GetInteger(address), alignof(T));
118 119
119 // This should use the virtual memory address passed in, but currently, we do not setup the 120 // This should use the virtual memory address passed in, but currently, we do not setup the
120 // kernel virtual memory layout. Instead, we simply map these at a region of physical memory 121 // kernel virtual memory layout. Instead, we simply map these at a region of physical memory
@@ -195,7 +196,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
195 auto& kernel = system.Kernel(); 196 auto& kernel = system.Kernel();
196 197
197 // Get the start of the slab region, since that's where we'll be working. 198 // Get the start of the slab region, since that's where we'll be working.
198 VAddr address = memory_layout.GetSlabRegionAddress(); 199 KVirtualAddress address = memory_layout.GetSlabRegionAddress();
199 200
200 // Initialize slab type array to be in sorted order. 201 // Initialize slab type array to be in sorted order.
201 std::array<KSlabType, KSlabType_Count> slab_types; 202 std::array<KSlabType, KSlabType_Count> slab_types;
@@ -228,7 +229,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
228 } 229 }
229 230
230 // Track the gaps, so that we can free them to the unused slab tree. 231 // Track the gaps, so that we can free them to the unused slab tree.
231 VAddr gap_start = address; 232 KVirtualAddress gap_start = address;
232 size_t gap_size = 0; 233 size_t gap_size = 0;
233 234
234 for (size_t i = 0; i < slab_gaps.size(); i++) { 235 for (size_t i = 0; i < slab_gaps.size(); i++) {
@@ -280,7 +281,7 @@ void KPageBufferSlabHeap::Initialize(Core::System& system) {
280 // Allocate memory for the slab. 281 // Allocate memory for the slab.
281 constexpr auto AllocateOption = KMemoryManager::EncodeOption( 282 constexpr auto AllocateOption = KMemoryManager::EncodeOption(
282 KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront); 283 KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
283 const PAddr slab_address = 284 const KPhysicalAddress slab_address =
284 kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); 285 kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
285 ASSERT(slab_address != 0); 286 ASSERT(slab_address != 0);
286 287
diff --git a/src/core/hle/kernel/initial_process.h b/src/core/hle/kernel/initial_process.h
index af0fb23b6..82195f4f7 100644
--- a/src/core/hle/kernel/initial_process.h
+++ b/src/core/hle/kernel/initial_process.h
@@ -14,7 +14,7 @@ using namespace Common::Literals;
14 14
15constexpr std::size_t InitialProcessBinarySizeMax = 12_MiB; 15constexpr std::size_t InitialProcessBinarySizeMax = 12_MiB;
16 16
17static inline PAddr GetInitialProcessBinaryPhysicalAddress() { 17static inline KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() {
18 return Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetKernelPhysicalBaseAddress( 18 return Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetKernelPhysicalBaseAddress(
19 MainMemoryAddress); 19 MainMemoryAddress);
20} 20}
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index 30a4e6edb..274928dcf 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -8,6 +8,7 @@
8#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 8#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
9#include "core/hle/kernel/k_thread.h" 9#include "core/hle/kernel/k_thread.h"
10#include "core/hle/kernel/k_thread_queue.h" 10#include "core/hle/kernel/k_thread_queue.h"
11#include "core/hle/kernel/k_typed_address.h"
11#include "core/hle/kernel/kernel.h" 12#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/svc_results.h" 13#include "core/hle/kernel/svc_results.h"
13#include "core/memory.h" 14#include "core/memory.h"
@@ -20,12 +21,12 @@ KAddressArbiter::~KAddressArbiter() = default;
20 21
21namespace { 22namespace {
22 23
23bool ReadFromUser(Core::System& system, s32* out, VAddr address) { 24bool ReadFromUser(Core::System& system, s32* out, KProcessAddress address) {
24 *out = system.Memory().Read32(address); 25 *out = system.Memory().Read32(GetInteger(address));
25 return true; 26 return true;
26} 27}
27 28
28bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { 29bool DecrementIfLessThan(Core::System& system, s32* out, KProcessAddress address, s32 value) {
29 auto& monitor = system.Monitor(); 30 auto& monitor = system.Monitor();
30 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); 31 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
31 32
@@ -35,7 +36,8 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
35 // TODO(bunnei): We should call CanAccessAtomic(..) here. 36 // TODO(bunnei): We should call CanAccessAtomic(..) here.
36 37
37 // Load the value from the address. 38 // Load the value from the address.
38 const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address)); 39 const s32 current_value =
40 static_cast<s32>(monitor.ExclusiveRead32(current_core, GetInteger(address)));
39 41
40 // Compare it to the desired one. 42 // Compare it to the desired one.
41 if (current_value < value) { 43 if (current_value < value) {
@@ -43,7 +45,8 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
43 const s32 decrement_value = current_value - 1; 45 const s32 decrement_value = current_value - 1;
44 46
45 // Decrement and try to store. 47 // Decrement and try to store.
46 if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))) { 48 if (!monitor.ExclusiveWrite32(current_core, GetInteger(address),
49 static_cast<u32>(decrement_value))) {
47 // If we failed to store, try again. 50 // If we failed to store, try again.
48 DecrementIfLessThan(system, out, address, value); 51 DecrementIfLessThan(system, out, address, value);
49 } 52 }
@@ -57,7 +60,8 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
57 return true; 60 return true;
58} 61}
59 62
60bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { 63bool UpdateIfEqual(Core::System& system, s32* out, KProcessAddress address, s32 value,
64 s32 new_value) {
61 auto& monitor = system.Monitor(); 65 auto& monitor = system.Monitor();
62 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); 66 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
63 67
@@ -67,14 +71,16 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
67 // TODO(bunnei): We should call CanAccessAtomic(..) here. 71 // TODO(bunnei): We should call CanAccessAtomic(..) here.
68 72
69 // Load the value from the address. 73 // Load the value from the address.
70 const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address)); 74 const s32 current_value =
75 static_cast<s32>(monitor.ExclusiveRead32(current_core, GetInteger(address)));
71 76
72 // Compare it to the desired one. 77 // Compare it to the desired one.
73 if (current_value == value) { 78 if (current_value == value) {
74 // If equal, we want to try to write the new value. 79 // If equal, we want to try to write the new value.
75 80
76 // Try to store. 81 // Try to store.
77 if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(new_value))) { 82 if (!monitor.ExclusiveWrite32(current_core, GetInteger(address),
83 static_cast<u32>(new_value))) {
78 // If we failed to store, try again. 84 // If we failed to store, try again.
79 UpdateIfEqual(system, out, address, value, new_value); 85 UpdateIfEqual(system, out, address, value, new_value);
80 } 86 }
@@ -110,7 +116,7 @@ private:
110 116
111} // namespace 117} // namespace
112 118
113Result KAddressArbiter::Signal(VAddr addr, s32 count) { 119Result KAddressArbiter::Signal(uint64_t addr, s32 count) {
114 // Perform signaling. 120 // Perform signaling.
115 s32 num_waiters{}; 121 s32 num_waiters{};
116 { 122 {
@@ -133,7 +139,7 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) {
133 R_SUCCEED(); 139 R_SUCCEED();
134} 140}
135 141
136Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) { 142Result KAddressArbiter::SignalAndIncrementIfEqual(uint64_t addr, s32 value, s32 count) {
137 // Perform signaling. 143 // Perform signaling.
138 s32 num_waiters{}; 144 s32 num_waiters{};
139 { 145 {
@@ -162,7 +168,7 @@ Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 cou
162 R_SUCCEED(); 168 R_SUCCEED();
163} 169}
164 170
165Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) { 171Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(uint64_t addr, s32 value, s32 count) {
166 // Perform signaling. 172 // Perform signaling.
167 s32 num_waiters{}; 173 s32 num_waiters{};
168 { 174 {
@@ -225,7 +231,7 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val
225 R_SUCCEED(); 231 R_SUCCEED();
226} 232}
227 233
228Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) { 234Result KAddressArbiter::WaitIfLessThan(uint64_t addr, s32 value, bool decrement, s64 timeout) {
229 // Prepare to wait. 235 // Prepare to wait.
230 KThread* cur_thread = GetCurrentThreadPointer(m_kernel); 236 KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
231 KHardwareTimer* timer{}; 237 KHardwareTimer* timer{};
@@ -280,7 +286,7 @@ Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s6
280 return cur_thread->GetWaitResult(); 286 return cur_thread->GetWaitResult();
281} 287}
282 288
283Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { 289Result KAddressArbiter::WaitIfEqual(uint64_t addr, s32 value, s64 timeout) {
284 // Prepare to wait. 290 // Prepare to wait.
285 KThread* cur_thread = GetCurrentThreadPointer(m_kernel); 291 KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
286 KHardwareTimer* timer{}; 292 KHardwareTimer* timer{};
diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h
index 9a8c1ae94..3b70e1ab2 100644
--- a/src/core/hle/kernel/k_address_arbiter.h
+++ b/src/core/hle/kernel/k_address_arbiter.h
@@ -25,7 +25,7 @@ public:
25 explicit KAddressArbiter(Core::System& system); 25 explicit KAddressArbiter(Core::System& system);
26 ~KAddressArbiter(); 26 ~KAddressArbiter();
27 27
28 Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) { 28 Result SignalToAddress(uint64_t addr, Svc::SignalType type, s32 value, s32 count) {
29 switch (type) { 29 switch (type) {
30 case Svc::SignalType::Signal: 30 case Svc::SignalType::Signal:
31 R_RETURN(this->Signal(addr, count)); 31 R_RETURN(this->Signal(addr, count));
@@ -38,7 +38,7 @@ public:
38 } 38 }
39 } 39 }
40 40
41 Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value, s64 timeout) { 41 Result WaitForAddress(uint64_t addr, Svc::ArbitrationType type, s32 value, s64 timeout) {
42 switch (type) { 42 switch (type) {
43 case Svc::ArbitrationType::WaitIfLessThan: 43 case Svc::ArbitrationType::WaitIfLessThan:
44 R_RETURN(WaitIfLessThan(addr, value, false, timeout)); 44 R_RETURN(WaitIfLessThan(addr, value, false, timeout));
@@ -52,11 +52,11 @@ public:
52 } 52 }
53 53
54private: 54private:
55 Result Signal(VAddr addr, s32 count); 55 Result Signal(uint64_t addr, s32 count);
56 Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count); 56 Result SignalAndIncrementIfEqual(uint64_t addr, s32 value, s32 count);
57 Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count); 57 Result SignalAndModifyByWaitingCountIfEqual(uint64_t addr, s32 value, s32 count);
58 Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout); 58 Result WaitIfLessThan(uint64_t addr, s32 value, bool decrement, s64 timeout);
59 Result WaitIfEqual(VAddr addr, s32 value, s64 timeout); 59 Result WaitIfEqual(uint64_t addr, s32 value, s64 timeout);
60 60
61private: 61private:
62 ThreadTree m_tree; 62 ThreadTree m_tree;
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
index d998b2be2..72b66270d 100644
--- a/src/core/hle/kernel/k_client_session.cpp
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -29,7 +29,8 @@ Result KClientSession::SendSyncRequest() {
29 SCOPE_EXIT({ request->Close(); }); 29 SCOPE_EXIT({ request->Close(); });
30 30
31 // Initialize the request. 31 // Initialize the request.
32 request->Initialize(nullptr, GetCurrentThread(m_kernel).GetTlsAddress(), MessageBufferSize); 32 request->Initialize(nullptr, GetInteger(GetCurrentThread(m_kernel).GetTlsAddress()),
33 MessageBufferSize);
33 34
34 // Send the request. 35 // Send the request.
35 R_RETURN(m_parent->GetServerSession().OnRequest(request)); 36 R_RETURN(m_parent->GetServerSession().OnRequest(request));
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index 89df6b5d8..3583bee44 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -19,7 +19,8 @@ namespace Kernel {
19KCodeMemory::KCodeMemory(KernelCore& kernel) 19KCodeMemory::KCodeMemory(KernelCore& kernel)
20 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock(kernel) {} 20 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock(kernel) {}
21 21
22Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) { 22Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, KProcessAddress addr,
23 size_t size) {
23 // Set members. 24 // Set members.
24 m_owner = GetCurrentProcessPointer(m_kernel); 25 m_owner = GetCurrentProcessPointer(m_kernel);
25 26
@@ -63,7 +64,7 @@ void KCodeMemory::Finalize() {
63 m_owner->Close(); 64 m_owner->Close();
64} 65}
65 66
66Result KCodeMemory::Map(VAddr address, size_t size) { 67Result KCodeMemory::Map(KProcessAddress address, size_t size) {
67 // Validate the size. 68 // Validate the size.
68 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 69 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
69 70
@@ -83,7 +84,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
83 R_SUCCEED(); 84 R_SUCCEED();
84} 85}
85 86
86Result KCodeMemory::Unmap(VAddr address, size_t size) { 87Result KCodeMemory::Unmap(KProcessAddress address, size_t size) {
87 // Validate the size. 88 // Validate the size.
88 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 89 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
89 90
@@ -100,7 +101,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
100 R_SUCCEED(); 101 R_SUCCEED();
101} 102}
102 103
103Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { 104Result KCodeMemory::MapToOwner(KProcessAddress address, size_t size, Svc::MemoryPermission perm) {
104 // Validate the size. 105 // Validate the size.
105 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 106 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
106 107
@@ -134,7 +135,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
134 R_SUCCEED(); 135 R_SUCCEED();
135} 136}
136 137
137Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { 138Result KCodeMemory::UnmapFromOwner(KProcessAddress address, size_t size) {
138 // Validate the size. 139 // Validate the size.
139 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 140 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
140 141
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h
index 23cbb283b..26fe6b3dc 100644
--- a/src/core/hle/kernel/k_code_memory.h
+++ b/src/core/hle/kernel/k_code_memory.h
@@ -5,12 +5,12 @@
5 5
6#include <optional> 6#include <optional>
7 7
8#include "common/common_types.h"
9#include "core/device_memory.h" 8#include "core/device_memory.h"
10#include "core/hle/kernel/k_auto_object.h" 9#include "core/hle/kernel/k_auto_object.h"
11#include "core/hle/kernel/k_light_lock.h" 10#include "core/hle/kernel/k_light_lock.h"
12#include "core/hle/kernel/k_page_group.h" 11#include "core/hle/kernel/k_page_group.h"
13#include "core/hle/kernel/k_process.h" 12#include "core/hle/kernel/k_process.h"
13#include "core/hle/kernel/k_typed_address.h"
14#include "core/hle/kernel/slab_helpers.h" 14#include "core/hle/kernel/slab_helpers.h"
15#include "core/hle/kernel/svc_types.h" 15#include "core/hle/kernel/svc_types.h"
16#include "core/hle/result.h" 16#include "core/hle/result.h"
@@ -31,13 +31,13 @@ class KCodeMemory final
31public: 31public:
32 explicit KCodeMemory(KernelCore& kernel); 32 explicit KCodeMemory(KernelCore& kernel);
33 33
34 Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size); 34 Result Initialize(Core::DeviceMemory& device_memory, KProcessAddress address, size_t size);
35 void Finalize() override; 35 void Finalize() override;
36 36
37 Result Map(VAddr address, size_t size); 37 Result Map(KProcessAddress address, size_t size);
38 Result Unmap(VAddr address, size_t size); 38 Result Unmap(KProcessAddress address, size_t size);
39 Result MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm); 39 Result MapToOwner(KProcessAddress address, size_t size, Svc::MemoryPermission perm);
40 Result UnmapFromOwner(VAddr address, size_t size); 40 Result UnmapFromOwner(KProcessAddress address, size_t size);
41 41
42 bool IsInitialized() const override { 42 bool IsInitialized() const override {
43 return m_is_initialized; 43 return m_is_initialized;
@@ -47,7 +47,7 @@ public:
47 KProcess* GetOwner() const override { 47 KProcess* GetOwner() const override {
48 return m_owner; 48 return m_owner;
49 } 49 }
50 VAddr GetSourceAddress() const { 50 KProcessAddress GetSourceAddress() const {
51 return m_address; 51 return m_address;
52 } 52 }
53 size_t GetSize() const { 53 size_t GetSize() const {
@@ -57,7 +57,7 @@ public:
57private: 57private:
58 std::optional<KPageGroup> m_page_group{}; 58 std::optional<KPageGroup> m_page_group{};
59 KProcess* m_owner{}; 59 KProcess* m_owner{};
60 VAddr m_address{}; 60 KProcessAddress m_address{};
61 KLightLock m_lock; 61 KLightLock m_lock;
62 bool m_is_initialized{}; 62 bool m_is_initialized{};
63 bool m_is_owner_mapped{}; 63 bool m_is_owner_mapped{};
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 58b8609d8..c6634313f 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -18,23 +18,23 @@ namespace Kernel {
18 18
19namespace { 19namespace {
20 20
21bool ReadFromUser(Core::System& system, u32* out, VAddr address) { 21bool ReadFromUser(Core::System& system, u32* out, KProcessAddress address) {
22 *out = system.Memory().Read32(address); 22 *out = system.Memory().Read32(GetInteger(address));
23 return true; 23 return true;
24} 24}
25 25
26bool WriteToUser(Core::System& system, VAddr address, const u32* p) { 26bool WriteToUser(Core::System& system, KProcessAddress address, const u32* p) {
27 system.Memory().Write32(address, *p); 27 system.Memory().Write32(GetInteger(address), *p);
28 return true; 28 return true;
29} 29}
30 30
31bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, 31bool UpdateLockAtomic(Core::System& system, u32* out, KProcessAddress address, u32 if_zero,
32 u32 new_orr_mask) { 32 u32 new_orr_mask) {
33 auto& monitor = system.Monitor(); 33 auto& monitor = system.Monitor();
34 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); 34 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
35 35
36 // Load the value from the address. 36 // Load the value from the address.
37 const auto expected = monitor.ExclusiveRead32(current_core, address); 37 const auto expected = monitor.ExclusiveRead32(current_core, GetInteger(address));
38 38
39 // Orr in the new mask. 39 // Orr in the new mask.
40 u32 value = expected | new_orr_mask; 40 u32 value = expected | new_orr_mask;
@@ -45,7 +45,7 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
45 } 45 }
46 46
47 // Try to store. 47 // Try to store.
48 if (!monitor.ExclusiveWrite32(current_core, address, value)) { 48 if (!monitor.ExclusiveWrite32(current_core, GetInteger(address), value)) {
49 // If we failed to store, try again. 49 // If we failed to store, try again.
50 return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask); 50 return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask);
51 } 51 }
@@ -102,7 +102,7 @@ KConditionVariable::KConditionVariable(Core::System& system)
102 102
103KConditionVariable::~KConditionVariable() = default; 103KConditionVariable::~KConditionVariable() = default;
104 104
105Result KConditionVariable::SignalToAddress(VAddr addr) { 105Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
106 KThread* owner_thread = GetCurrentThreadPointer(m_kernel); 106 KThread* owner_thread = GetCurrentThreadPointer(m_kernel);
107 107
108 // Signal the address. 108 // Signal the address.
@@ -143,7 +143,7 @@ Result KConditionVariable::SignalToAddress(VAddr addr) {
143 } 143 }
144} 144}
145 145
146Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) { 146Result KConditionVariable::WaitForAddress(Handle handle, KProcessAddress addr, u32 value) {
147 KThread* cur_thread = GetCurrentThreadPointer(m_kernel); 147 KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
148 ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel); 148 ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel);
149 149
@@ -191,7 +191,7 @@ void KConditionVariable::SignalImpl(KThread* thread) {
191 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); 191 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
192 192
193 // Update the tag. 193 // Update the tag.
194 VAddr address = thread->GetAddressKey(); 194 KProcessAddress address = thread->GetAddressKey();
195 u32 own_tag = thread->GetAddressKeyValue(); 195 u32 own_tag = thread->GetAddressKeyValue();
196 196
197 u32 prev_tag{}; 197 u32 prev_tag{};
@@ -262,7 +262,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
262 } 262 }
263} 263}
264 264
265Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { 265Result KConditionVariable::Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout) {
266 // Prepare to wait. 266 // Prepare to wait.
267 KThread* cur_thread = GetCurrentThreadPointer(m_kernel); 267 KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
268 KHardwareTimer* timer{}; 268 KHardwareTimer* timer{};
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index fbd2c1fc0..8c2f3ae51 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -4,10 +4,10 @@
4#pragma once 4#pragma once
5 5
6#include "common/assert.h" 6#include "common/assert.h"
7#include "common/common_types.h"
8 7
9#include "core/hle/kernel/k_scheduler.h" 8#include "core/hle/kernel/k_scheduler.h"
10#include "core/hle/kernel/k_thread.h" 9#include "core/hle/kernel/k_thread.h"
10#include "core/hle/kernel/k_typed_address.h"
11#include "core/hle/kernel/kernel.h" 11#include "core/hle/kernel/kernel.h"
12#include "core/hle/result.h" 12#include "core/hle/result.h"
13 13
@@ -25,12 +25,12 @@ public:
25 ~KConditionVariable(); 25 ~KConditionVariable();
26 26
27 // Arbitration 27 // Arbitration
28 Result SignalToAddress(VAddr addr); 28 Result SignalToAddress(KProcessAddress addr);
29 Result WaitForAddress(Handle handle, VAddr addr, u32 value); 29 Result WaitForAddress(Handle handle, KProcessAddress addr, u32 value);
30 30
31 // Condition variable 31 // Condition variable
32 void Signal(u64 cv_key, s32 count); 32 void Signal(u64 cv_key, s32 count);
33 Result Wait(VAddr addr, u64 key, u32 value, s64 timeout); 33 Result Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout);
34 34
35private: 35private:
36 void SignalImpl(KThread* thread); 36 void SignalImpl(KThread* thread);
diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp
index a2fc4fe1f..f48896715 100644
--- a/src/core/hle/kernel/k_device_address_space.cpp
+++ b/src/core/hle/kernel/k_device_address_space.cpp
@@ -54,8 +54,8 @@ Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) {
54 R_SUCCEED(); 54 R_SUCCEED();
55} 55}
56 56
57Result KDeviceAddressSpace::Map(KPageTable* page_table, VAddr process_address, size_t size, 57Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_address,
58 u64 device_address, u32 option, bool is_aligned) { 58 size_t size, u64 device_address, u32 option, bool is_aligned) {
59 // Check that the address falls within the space. 59 // Check that the address falls within the space.
60 R_UNLESS((m_space_address <= device_address && 60 R_UNLESS((m_space_address <= device_address &&
61 device_address + size - 1 <= m_space_address + m_space_size - 1), 61 device_address + size - 1 <= m_space_address + m_space_size - 1),
@@ -113,8 +113,8 @@ Result KDeviceAddressSpace::Map(KPageTable* page_table, VAddr process_address, s
113 R_SUCCEED(); 113 R_SUCCEED();
114} 114}
115 115
116Result KDeviceAddressSpace::Unmap(KPageTable* page_table, VAddr process_address, size_t size, 116Result KDeviceAddressSpace::Unmap(KPageTable* page_table, KProcessAddress process_address,
117 u64 device_address) { 117 size_t size, u64 device_address) {
118 // Check that the address falls within the space. 118 // Check that the address falls within the space.
119 R_UNLESS((m_space_address <= device_address && 119 R_UNLESS((m_space_address <= device_address &&
120 device_address + size - 1 <= m_space_address + m_space_size - 1), 120 device_address + size - 1 <= m_space_address + m_space_size - 1),
diff --git a/src/core/hle/kernel/k_device_address_space.h b/src/core/hle/kernel/k_device_address_space.h
index b4a014c38..18556e3cc 100644
--- a/src/core/hle/kernel/k_device_address_space.h
+++ b/src/core/hle/kernel/k_device_address_space.h
@@ -5,8 +5,8 @@
5 5
6#include <string> 6#include <string>
7 7
8#include "common/common_types.h"
9#include "core/hle/kernel/k_page_table.h" 8#include "core/hle/kernel/k_page_table.h"
9#include "core/hle/kernel/k_typed_address.h"
10#include "core/hle/kernel/slab_helpers.h" 10#include "core/hle/kernel/slab_helpers.h"
11#include "core/hle/result.h" 11#include "core/hle/result.h"
12 12
@@ -31,23 +31,24 @@ public:
31 Result Attach(Svc::DeviceName device_name); 31 Result Attach(Svc::DeviceName device_name);
32 Result Detach(Svc::DeviceName device_name); 32 Result Detach(Svc::DeviceName device_name);
33 33
34 Result MapByForce(KPageTable* page_table, VAddr process_address, size_t size, 34 Result MapByForce(KPageTable* page_table, KProcessAddress process_address, size_t size,
35 u64 device_address, u32 option) { 35 u64 device_address, u32 option) {
36 R_RETURN(this->Map(page_table, process_address, size, device_address, option, false)); 36 R_RETURN(this->Map(page_table, process_address, size, device_address, option, false));
37 } 37 }
38 38
39 Result MapAligned(KPageTable* page_table, VAddr process_address, size_t size, 39 Result MapAligned(KPageTable* page_table, KProcessAddress process_address, size_t size,
40 u64 device_address, u32 option) { 40 u64 device_address, u32 option) {
41 R_RETURN(this->Map(page_table, process_address, size, device_address, option, true)); 41 R_RETURN(this->Map(page_table, process_address, size, device_address, option, true));
42 } 42 }
43 43
44 Result Unmap(KPageTable* page_table, VAddr process_address, size_t size, u64 device_address); 44 Result Unmap(KPageTable* page_table, KProcessAddress process_address, size_t size,
45 u64 device_address);
45 46
46 static void Initialize(); 47 static void Initialize();
47 48
48private: 49private:
49 Result Map(KPageTable* page_table, VAddr process_address, size_t size, u64 device_address, 50 Result Map(KPageTable* page_table, KProcessAddress process_address, size_t size,
50 u32 option, bool is_aligned); 51 u64 device_address, u32 option, bool is_aligned);
51 52
52private: 53private:
53 KLightLock m_lock; 54 KLightLock m_lock;
diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h
index ac80d60a1..ad11e84b7 100644
--- a/src/core/hle/kernel/k_dynamic_page_manager.h
+++ b/src/core/hle/kernel/k_dynamic_page_manager.h
@@ -6,9 +6,9 @@
6#include <vector> 6#include <vector>
7 7
8#include "common/alignment.h" 8#include "common/alignment.h"
9#include "common/common_types.h"
10#include "core/hle/kernel/k_page_bitmap.h" 9#include "core/hle/kernel/k_page_bitmap.h"
11#include "core/hle/kernel/k_spin_lock.h" 10#include "core/hle/kernel/k_spin_lock.h"
11#include "core/hle/kernel/k_typed_address.h"
12#include "core/hle/kernel/memory_types.h" 12#include "core/hle/kernel/memory_types.h"
13#include "core/hle/kernel/svc_results.h" 13#include "core/hle/kernel/svc_results.h"
14 14
@@ -26,23 +26,23 @@ public:
26 KDynamicPageManager() = default; 26 KDynamicPageManager() = default;
27 27
28 template <typename T> 28 template <typename T>
29 T* GetPointer(VAddr addr) { 29 T* GetPointer(KVirtualAddress addr) {
30 return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); 30 return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
31 } 31 }
32 32
33 template <typename T> 33 template <typename T>
34 const T* GetPointer(VAddr addr) const { 34 const T* GetPointer(KVirtualAddress addr) const {
35 return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); 35 return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
36 } 36 }
37 37
38 Result Initialize(VAddr memory, size_t size, size_t align) { 38 Result Initialize(KVirtualAddress memory, size_t size, size_t align) {
39 // We need to have positive size. 39 // We need to have positive size.
40 R_UNLESS(size > 0, ResultOutOfMemory); 40 R_UNLESS(size > 0, ResultOutOfMemory);
41 m_backing_memory.resize(size); 41 m_backing_memory.resize(size);
42 42
43 // Set addresses. 43 // Set addresses.
44 m_address = memory; 44 m_address = memory;
45 m_aligned_address = Common::AlignDown(memory, align); 45 m_aligned_address = Common::AlignDown(GetInteger(memory), align);
46 46
47 // Calculate extents. 47 // Calculate extents.
48 const size_t managed_size = m_address + size - m_aligned_address; 48 const size_t managed_size = m_address + size - m_aligned_address;
@@ -79,7 +79,7 @@ public:
79 R_SUCCEED(); 79 R_SUCCEED();
80 } 80 }
81 81
82 VAddr GetAddress() const { 82 KVirtualAddress GetAddress() const {
83 return m_address; 83 return m_address;
84 } 84 }
85 size_t GetSize() const { 85 size_t GetSize() const {
@@ -145,7 +145,8 @@ public:
145 KScopedSpinLock lk(m_lock); 145 KScopedSpinLock lk(m_lock);
146 146
147 // Set the bit for the free page. 147 // Set the bit for the free page.
148 size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer); 148 size_t offset =
149 (reinterpret_cast<uint64_t>(pb) - GetInteger(m_aligned_address)) / sizeof(PageBuffer);
149 m_page_bitmap.SetBit(offset); 150 m_page_bitmap.SetBit(offset);
150 151
151 // Decrement our used count. 152 // Decrement our used count.
@@ -158,8 +159,8 @@ private:
158 size_t m_used{}; 159 size_t m_used{};
159 size_t m_peak{}; 160 size_t m_peak{};
160 size_t m_count{}; 161 size_t m_count{};
161 VAddr m_address{}; 162 KVirtualAddress m_address{};
162 VAddr m_aligned_address{}; 163 KVirtualAddress m_aligned_address{};
163 size_t m_size{}; 164 size_t m_size{};
164 165
165 // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. 166 // TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
diff --git a/src/core/hle/kernel/k_dynamic_slab_heap.h b/src/core/hle/kernel/k_dynamic_slab_heap.h
index 3a0ddd050..76ed4cac1 100644
--- a/src/core/hle/kernel/k_dynamic_slab_heap.h
+++ b/src/core/hle/kernel/k_dynamic_slab_heap.h
@@ -19,7 +19,7 @@ class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
19public: 19public:
20 constexpr KDynamicSlabHeap() = default; 20 constexpr KDynamicSlabHeap() = default;
21 21
22 constexpr VAddr GetAddress() const { 22 constexpr KVirtualAddress GetAddress() const {
23 return m_address; 23 return m_address;
24 } 24 }
25 constexpr size_t GetSize() const { 25 constexpr size_t GetSize() const {
@@ -35,7 +35,7 @@ public:
35 return m_count.load(); 35 return m_count.load();
36 } 36 }
37 37
38 constexpr bool IsInRange(VAddr addr) const { 38 constexpr bool IsInRange(KVirtualAddress addr) const {
39 return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1; 39 return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
40 } 40 }
41 41
@@ -115,7 +115,7 @@ private:
115 std::atomic<size_t> m_used{}; 115 std::atomic<size_t> m_used{};
116 std::atomic<size_t> m_peak{}; 116 std::atomic<size_t> m_peak{};
117 std::atomic<size_t> m_count{}; 117 std::atomic<size_t> m_count{};
118 VAddr m_address{}; 118 KVirtualAddress m_address{};
119 size_t m_size{}; 119 size_t m_size{};
120}; 120};
121 121
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index e01929da6..41a29da24 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -5,8 +5,8 @@
5 5
6#include "common/alignment.h" 6#include "common/alignment.h"
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/common_types.h"
9#include "common/intrusive_red_black_tree.h" 8#include "common/intrusive_red_black_tree.h"
9#include "core/hle/kernel/k_typed_address.h"
10#include "core/hle/kernel/memory_types.h" 10#include "core/hle/kernel/memory_types.h"
11#include "core/hle/kernel/svc_types.h" 11#include "core/hle/kernel/svc_types.h"
12 12
@@ -282,7 +282,7 @@ class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>
282private: 282private:
283 u16 m_device_disable_merge_left_count{}; 283 u16 m_device_disable_merge_left_count{};
284 u16 m_device_disable_merge_right_count{}; 284 u16 m_device_disable_merge_right_count{};
285 VAddr m_address{}; 285 KProcessAddress m_address{};
286 size_t m_num_pages{}; 286 size_t m_num_pages{};
287 KMemoryState m_memory_state{KMemoryState::None}; 287 KMemoryState m_memory_state{KMemoryState::None};
288 u16 m_ipc_lock_count{}; 288 u16 m_ipc_lock_count{};
@@ -306,7 +306,7 @@ public:
306 } 306 }
307 307
308public: 308public:
309 constexpr VAddr GetAddress() const { 309 constexpr KProcessAddress GetAddress() const {
310 return m_address; 310 return m_address;
311 } 311 }
312 312
@@ -318,11 +318,11 @@ public:
318 return this->GetNumPages() * PageSize; 318 return this->GetNumPages() * PageSize;
319 } 319 }
320 320
321 constexpr VAddr GetEndAddress() const { 321 constexpr KProcessAddress GetEndAddress() const {
322 return this->GetAddress() + this->GetSize(); 322 return this->GetAddress() + this->GetSize();
323 } 323 }
324 324
325 constexpr VAddr GetLastAddress() const { 325 constexpr KProcessAddress GetLastAddress() const {
326 return this->GetEndAddress() - 1; 326 return this->GetEndAddress() - 1;
327 } 327 }
328 328
@@ -348,7 +348,7 @@ public:
348 348
349 constexpr KMemoryInfo GetMemoryInfo() const { 349 constexpr KMemoryInfo GetMemoryInfo() const {
350 return { 350 return {
351 .m_address = this->GetAddress(), 351 .m_address = GetInteger(this->GetAddress()),
352 .m_size = this->GetSize(), 352 .m_size = this->GetSize(),
353 .m_state = m_memory_state, 353 .m_state = m_memory_state,
354 .m_device_disable_merge_left_count = m_device_disable_merge_left_count, 354 .m_device_disable_merge_left_count = m_device_disable_merge_left_count,
@@ -366,12 +366,12 @@ public:
366public: 366public:
367 explicit KMemoryBlock() = default; 367 explicit KMemoryBlock() = default;
368 368
369 constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p, 369 constexpr KMemoryBlock(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p,
370 KMemoryAttribute attr) 370 KMemoryAttribute attr)
371 : Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(), m_address(addr), m_num_pages(np), 371 : Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(), m_address(addr), m_num_pages(np),
372 m_memory_state(ms), m_permission(p), m_attribute(attr) {} 372 m_memory_state(ms), m_permission(p), m_attribute(attr) {}
373 373
374 constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p, 374 constexpr void Initialize(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p,
375 KMemoryAttribute attr) { 375 KMemoryAttribute attr) {
376 m_device_disable_merge_left_count = 0; 376 m_device_disable_merge_left_count = 0;
377 m_device_disable_merge_right_count = 0; 377 m_device_disable_merge_right_count = 0;
@@ -408,7 +408,7 @@ public:
408 KMemoryBlockDisableMergeAttribute::None; 408 KMemoryBlockDisableMergeAttribute::None;
409 } 409 }
410 410
411 constexpr bool Contains(VAddr addr) const { 411 constexpr bool Contains(KProcessAddress addr) const {
412 return this->GetAddress() <= addr && addr <= this->GetEndAddress(); 412 return this->GetAddress() <= addr && addr <= this->GetEndAddress();
413 } 413 }
414 414
@@ -443,10 +443,10 @@ public:
443 } 443 }
444 } 444 }
445 445
446 constexpr void Split(KMemoryBlock* block, VAddr addr) { 446 constexpr void Split(KMemoryBlock* block, KProcessAddress addr) {
447 ASSERT(this->GetAddress() < addr); 447 ASSERT(this->GetAddress() < addr);
448 ASSERT(this->Contains(addr)); 448 ASSERT(this->Contains(addr));
449 ASSERT(Common::IsAligned(addr, PageSize)); 449 ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
450 450
451 block->m_address = m_address; 451 block->m_address = m_address;
452 block->m_num_pages = (addr - this->GetAddress()) / PageSize; 452 block->m_num_pages = (addr - this->GetAddress()) / PageSize;
diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp
index cf4c1e371..ab75f550e 100644
--- a/src/core/hle/kernel/k_memory_block_manager.cpp
+++ b/src/core/hle/kernel/k_memory_block_manager.cpp
@@ -7,7 +7,8 @@ namespace Kernel {
7 7
8KMemoryBlockManager::KMemoryBlockManager() = default; 8KMemoryBlockManager::KMemoryBlockManager() = default;
9 9
10Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) { 10Result KMemoryBlockManager::Initialize(KProcessAddress st, KProcessAddress nd,
11 KMemoryBlockSlabManager* slab_manager) {
11 // Allocate a block to encapsulate the address space, insert it into the tree. 12 // Allocate a block to encapsulate the address space, insert it into the tree.
12 KMemoryBlock* start_block = slab_manager->Allocate(); 13 KMemoryBlock* start_block = slab_manager->Allocate();
13 R_UNLESS(start_block != nullptr, ResultOutOfResource); 14 R_UNLESS(start_block != nullptr, ResultOutOfResource);
@@ -15,8 +16,8 @@ Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManag
15 // Set our start and end. 16 // Set our start and end.
16 m_start_address = st; 17 m_start_address = st;
17 m_end_address = nd; 18 m_end_address = nd;
18 ASSERT(Common::IsAligned(m_start_address, PageSize)); 19 ASSERT(Common::IsAligned(GetInteger(m_start_address), PageSize));
19 ASSERT(Common::IsAligned(m_end_address, PageSize)); 20 ASSERT(Common::IsAligned(GetInteger(m_end_address), PageSize));
20 21
21 // Initialize and insert the block. 22 // Initialize and insert the block.
22 start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize, 23 start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
@@ -40,12 +41,13 @@ void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
40 ASSERT(m_memory_block_tree.empty()); 41 ASSERT(m_memory_block_tree.empty());
41} 42}
42 43
43VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages, 44KProcessAddress KMemoryBlockManager::FindFreeArea(KProcessAddress region_start,
44 size_t num_pages, size_t alignment, size_t offset, 45 size_t region_num_pages, size_t num_pages,
45 size_t guard_pages) const { 46 size_t alignment, size_t offset,
47 size_t guard_pages) const {
46 if (num_pages > 0) { 48 if (num_pages > 0) {
47 const VAddr region_end = region_start + region_num_pages * PageSize; 49 const KProcessAddress region_end = region_start + region_num_pages * PageSize;
48 const VAddr region_last = region_end - 1; 50 const KProcessAddress region_last = region_end - 1;
49 for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend(); 51 for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
50 it++) { 52 it++) {
51 const KMemoryInfo info = it->GetMemoryInfo(); 53 const KMemoryInfo info = it->GetMemoryInfo();
@@ -56,17 +58,19 @@ VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pa
56 continue; 58 continue;
57 } 59 }
58 60
59 VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress(); 61 KProcessAddress area =
62 (info.GetAddress() <= GetInteger(region_start)) ? region_start : info.GetAddress();
60 area += guard_pages * PageSize; 63 area += guard_pages * PageSize;
61 64
62 const VAddr offset_area = Common::AlignDown(area, alignment) + offset; 65 const KProcessAddress offset_area =
66 Common::AlignDown(GetInteger(area), alignment) + offset;
63 area = (area <= offset_area) ? offset_area : offset_area + alignment; 67 area = (area <= offset_area) ? offset_area : offset_area + alignment;
64 68
65 const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize; 69 const KProcessAddress area_end = area + num_pages * PageSize + guard_pages * PageSize;
66 const VAddr area_last = area_end - 1; 70 const KProcessAddress area_last = area_end - 1;
67 71
68 if (info.GetAddress() <= area && area < area_last && area_last <= region_last && 72 if (info.GetAddress() <= GetInteger(area) && area < area_last &&
69 area_last <= info.GetLastAddress()) { 73 area_last <= region_last && area_last <= info.GetLastAddress()) {
70 return area; 74 return area;
71 } 75 }
72 } 76 }
@@ -76,7 +80,7 @@ VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pa
76} 80}
77 81
78void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, 82void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
79 VAddr address, size_t num_pages) { 83 KProcessAddress address, size_t num_pages) {
80 // Find the iterator now that we've updated. 84 // Find the iterator now that we've updated.
81 iterator it = this->FindIterator(address); 85 iterator it = this->FindIterator(address);
82 if (address != m_start_address) { 86 if (address != m_start_address) {
@@ -104,18 +108,18 @@ void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator*
104 } 108 }
105} 109}
106 110
107void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, 111void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
108 size_t num_pages, KMemoryState state, KMemoryPermission perm, 112 KProcessAddress address, size_t num_pages, KMemoryState state,
109 KMemoryAttribute attr, 113 KMemoryPermission perm, KMemoryAttribute attr,
110 KMemoryBlockDisableMergeAttribute set_disable_attr, 114 KMemoryBlockDisableMergeAttribute set_disable_attr,
111 KMemoryBlockDisableMergeAttribute clear_disable_attr) { 115 KMemoryBlockDisableMergeAttribute clear_disable_attr) {
112 // Ensure for auditing that we never end up with an invalid tree. 116 // Ensure for auditing that we never end up with an invalid tree.
113 KScopedMemoryBlockManagerAuditor auditor(this); 117 KScopedMemoryBlockManagerAuditor auditor(this);
114 ASSERT(Common::IsAligned(address, PageSize)); 118 ASSERT(Common::IsAligned(GetInteger(address), PageSize));
115 ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == 119 ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
116 KMemoryAttribute::None); 120 KMemoryAttribute::None);
117 121
118 VAddr cur_address = address; 122 KProcessAddress cur_address = address;
119 size_t remaining_pages = num_pages; 123 size_t remaining_pages = num_pages;
120 iterator it = this->FindIterator(address); 124 iterator it = this->FindIterator(address);
121 125
@@ -168,17 +172,17 @@ void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
168} 172}
169 173
170void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, 174void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
171 VAddr address, size_t num_pages, KMemoryState test_state, 175 KProcessAddress address, size_t num_pages,
172 KMemoryPermission test_perm, KMemoryAttribute test_attr, 176 KMemoryState test_state, KMemoryPermission test_perm,
173 KMemoryState state, KMemoryPermission perm, 177 KMemoryAttribute test_attr, KMemoryState state,
174 KMemoryAttribute attr) { 178 KMemoryPermission perm, KMemoryAttribute attr) {
175 // Ensure for auditing that we never end up with an invalid tree. 179 // Ensure for auditing that we never end up with an invalid tree.
176 KScopedMemoryBlockManagerAuditor auditor(this); 180 KScopedMemoryBlockManagerAuditor auditor(this);
177 ASSERT(Common::IsAligned(address, PageSize)); 181 ASSERT(Common::IsAligned(GetInteger(address), PageSize));
178 ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == 182 ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
179 KMemoryAttribute::None); 183 KMemoryAttribute::None);
180 184
181 VAddr cur_address = address; 185 KProcessAddress cur_address = address;
182 size_t remaining_pages = num_pages; 186 size_t remaining_pages = num_pages;
183 iterator it = this->FindIterator(address); 187 iterator it = this->FindIterator(address);
184 188
@@ -230,18 +234,18 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
230 this->CoalesceForUpdate(allocator, address, num_pages); 234 this->CoalesceForUpdate(allocator, address, num_pages);
231} 235}
232 236
233void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, 237void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator,
234 size_t num_pages, MemoryBlockLockFunction lock_func, 238 KProcessAddress address, size_t num_pages,
235 KMemoryPermission perm) { 239 MemoryBlockLockFunction lock_func, KMemoryPermission perm) {
236 // Ensure for auditing that we never end up with an invalid tree. 240 // Ensure for auditing that we never end up with an invalid tree.
237 KScopedMemoryBlockManagerAuditor auditor(this); 241 KScopedMemoryBlockManagerAuditor auditor(this);
238 ASSERT(Common::IsAligned(address, PageSize)); 242 ASSERT(Common::IsAligned(GetInteger(address), PageSize));
239 243
240 VAddr cur_address = address; 244 KProcessAddress cur_address = address;
241 size_t remaining_pages = num_pages; 245 size_t remaining_pages = num_pages;
242 iterator it = this->FindIterator(address); 246 iterator it = this->FindIterator(address);
243 247
244 const VAddr end_address = address + (num_pages * PageSize); 248 const KProcessAddress end_address = address + (num_pages * PageSize);
245 249
246 while (remaining_pages > 0) { 250 while (remaining_pages > 0) {
247 const size_t remaining_size = remaining_pages * PageSize; 251 const size_t remaining_size = remaining_pages * PageSize;
diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h
index d382722a6..7c0bd16f0 100644
--- a/src/core/hle/kernel/k_memory_block_manager.h
+++ b/src/core/hle/kernel/k_memory_block_manager.h
@@ -7,9 +7,9 @@
7#include <functional> 7#include <functional>
8 8
9#include "common/common_funcs.h" 9#include "common/common_funcs.h"
10#include "common/common_types.h"
11#include "core/hle/kernel/k_dynamic_resource_manager.h" 10#include "core/hle/kernel/k_dynamic_resource_manager.h"
12#include "core/hle/kernel/k_memory_block.h" 11#include "core/hle/kernel/k_memory_block.h"
12#include "core/hle/kernel/k_typed_address.h"
13 13
14namespace Kernel { 14namespace Kernel {
15 15
@@ -85,9 +85,10 @@ public:
85public: 85public:
86 KMemoryBlockManager(); 86 KMemoryBlockManager();
87 87
88 using HostUnmapCallback = std::function<void(VAddr, u64)>; 88 using HostUnmapCallback = std::function<void(Common::ProcessAddress, u64)>;
89 89
90 Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager); 90 Result Initialize(KProcessAddress st, KProcessAddress nd,
91 KMemoryBlockSlabManager* slab_manager);
91 void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback); 92 void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
92 93
93 iterator end() { 94 iterator end() {
@@ -100,27 +101,28 @@ public:
100 return m_memory_block_tree.cend(); 101 return m_memory_block_tree.cend();
101 } 102 }
102 103
103 VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, 104 KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
104 size_t alignment, size_t offset, size_t guard_pages) const; 105 size_t num_pages, size_t alignment, size_t offset,
106 size_t guard_pages) const;
105 107
106 void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, 108 void Update(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
107 KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, 109 size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
108 KMemoryBlockDisableMergeAttribute set_disable_attr, 110 KMemoryBlockDisableMergeAttribute set_disable_attr,
109 KMemoryBlockDisableMergeAttribute clear_disable_attr); 111 KMemoryBlockDisableMergeAttribute clear_disable_attr);
110 void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, 112 void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
111 MemoryBlockLockFunction lock_func, KMemoryPermission perm); 113 size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm);
112 114
113 void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, 115 void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
114 size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, 116 size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
115 KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, 117 KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
116 KMemoryAttribute attr); 118 KMemoryAttribute attr);
117 119
118 iterator FindIterator(VAddr address) const { 120 iterator FindIterator(KProcessAddress address) const {
119 return m_memory_block_tree.find(KMemoryBlock( 121 return m_memory_block_tree.find(KMemoryBlock(
120 address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None)); 122 address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None));
121 } 123 }
122 124
123 const KMemoryBlock* FindBlock(VAddr address) const { 125 const KMemoryBlock* FindBlock(KProcessAddress address) const {
124 if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) { 126 if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
125 return std::addressof(*it); 127 return std::addressof(*it);
126 } 128 }
@@ -132,12 +134,12 @@ public:
132 bool CheckState() const; 134 bool CheckState() const;
133 135
134private: 136private:
135 void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, 137 void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
136 size_t num_pages); 138 size_t num_pages);
137 139
138 MemoryBlockTree m_memory_block_tree; 140 MemoryBlockTree m_memory_block_tree;
139 VAddr m_start_address{}; 141 KProcessAddress m_start_address{};
140 VAddr m_end_address{}; 142 KProcessAddress m_end_address{};
141}; 143};
142 144
143class KScopedMemoryBlockManagerAuditor { 145class KScopedMemoryBlockManagerAuditor {
diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp
index 9ff751119..af40092c0 100644
--- a/src/core/hle/kernel/k_memory_layout.cpp
+++ b/src/core/hle/kernel/k_memory_layout.cpp
@@ -85,7 +85,8 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at
85 return true; 85 return true;
86} 86}
87 87
88VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) { 88KVirtualAddress KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment,
89 u32 type_id) {
89 // We want to find the total extents of the type id. 90 // We want to find the total extents of the type id.
90 const auto extents = this->GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id)); 91 const auto extents = this->GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id));
91 92
@@ -130,11 +131,13 @@ KMemoryLayout::KMemoryLayout()
130 m_virtual_linear_tree{m_memory_region_allocator}, m_physical_linear_tree{ 131 m_virtual_linear_tree{m_memory_region_allocator}, m_physical_linear_tree{
131 m_memory_region_allocator} {} 132 m_memory_region_allocator} {}
132 133
133void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, 134void KMemoryLayout::InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start,
134 VAddr linear_virtual_start) { 135 KVirtualAddress linear_virtual_start) {
135 // Set static differences. 136 // Set static differences.
136 m_linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start; 137 m_linear_phys_to_virt_diff =
137 m_linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start; 138 GetInteger(linear_virtual_start) - GetInteger(aligned_linear_phys_start);
139 m_linear_virt_to_phys_diff =
140 GetInteger(aligned_linear_phys_start) - GetInteger(linear_virtual_start);
138 141
139 // Initialize linear trees. 142 // Initialize linear trees.
140 for (auto& region : GetPhysicalMemoryRegionTree()) { 143 for (auto& region : GetPhysicalMemoryRegionTree()) {
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index 551b7a0e4..54a71df56 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -10,6 +10,7 @@
10#include "core/device_memory.h" 10#include "core/device_memory.h"
11#include "core/hle/kernel/k_memory_region.h" 11#include "core/hle/kernel/k_memory_region.h"
12#include "core/hle/kernel/k_memory_region_type.h" 12#include "core/hle/kernel/k_memory_region_type.h"
13#include "core/hle/kernel/k_typed_address.h"
13#include "core/hle/kernel/memory_types.h" 14#include "core/hle/kernel/memory_types.h"
14 15
15namespace Kernel { 16namespace Kernel {
@@ -69,10 +70,11 @@ constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelIniti
69 70
70//! NB: Use KThread::GetAddressKeyIsKernel(). 71//! NB: Use KThread::GetAddressKeyIsKernel().
71//! See explanation for deviation of GetAddressKey. 72//! See explanation for deviation of GetAddressKey.
72bool IsKernelAddressKey(VAddr key) = delete; 73bool IsKernelAddressKey(KProcessAddress key) = delete;
73 74
74constexpr bool IsKernelAddress(VAddr address) { 75constexpr bool IsKernelAddress(KProcessAddress address) {
75 return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd; 76 return KernelVirtualAddressSpaceBase <= GetInteger(address) &&
77 address < KernelVirtualAddressSpaceEnd;
76} 78}
77 79
78class KMemoryLayout final { 80class KMemoryLayout final {
@@ -104,38 +106,38 @@ public:
104 return m_physical_linear_tree; 106 return m_physical_linear_tree;
105 } 107 }
106 108
107 VAddr GetLinearVirtualAddress(PAddr address) const { 109 KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress address) const {
108 return address + m_linear_phys_to_virt_diff; 110 return GetInteger(address) + m_linear_phys_to_virt_diff;
109 } 111 }
110 PAddr GetLinearPhysicalAddress(VAddr address) const { 112 KPhysicalAddress GetLinearPhysicalAddress(KVirtualAddress address) const {
111 return address + m_linear_virt_to_phys_diff; 113 return GetInteger(address) + m_linear_virt_to_phys_diff;
112 } 114 }
113 115
114 const KMemoryRegion* FindVirtual(VAddr address) const { 116 const KMemoryRegion* FindVirtual(KVirtualAddress address) const {
115 return Find(address, GetVirtualMemoryRegionTree()); 117 return Find(address, GetVirtualMemoryRegionTree());
116 } 118 }
117 const KMemoryRegion* FindPhysical(PAddr address) const { 119 const KMemoryRegion* FindPhysical(KPhysicalAddress address) const {
118 return Find(address, GetPhysicalMemoryRegionTree()); 120 return Find(address, GetPhysicalMemoryRegionTree());
119 } 121 }
120 122
121 const KMemoryRegion* FindVirtualLinear(VAddr address) const { 123 const KMemoryRegion* FindVirtualLinear(KVirtualAddress address) const {
122 return Find(address, GetVirtualLinearMemoryRegionTree()); 124 return Find(address, GetVirtualLinearMemoryRegionTree());
123 } 125 }
124 const KMemoryRegion* FindPhysicalLinear(PAddr address) const { 126 const KMemoryRegion* FindPhysicalLinear(KPhysicalAddress address) const {
125 return Find(address, GetPhysicalLinearMemoryRegionTree()); 127 return Find(address, GetPhysicalLinearMemoryRegionTree());
126 } 128 }
127 129
128 VAddr GetMainStackTopAddress(s32 core_id) const { 130 KVirtualAddress GetMainStackTopAddress(s32 core_id) const {
129 return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscMainStack); 131 return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscMainStack);
130 } 132 }
131 VAddr GetIdleStackTopAddress(s32 core_id) const { 133 KVirtualAddress GetIdleStackTopAddress(s32 core_id) const {
132 return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscIdleStack); 134 return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscIdleStack);
133 } 135 }
134 VAddr GetExceptionStackTopAddress(s32 core_id) const { 136 KVirtualAddress GetExceptionStackTopAddress(s32 core_id) const {
135 return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack); 137 return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack);
136 } 138 }
137 139
138 VAddr GetSlabRegionAddress() const { 140 KVirtualAddress GetSlabRegionAddress() const {
139 return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)) 141 return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab))
140 .GetAddress(); 142 .GetAddress();
141 } 143 }
@@ -143,10 +145,10 @@ public:
143 const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const { 145 const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const {
144 return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type)); 146 return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type));
145 } 147 }
146 PAddr GetDevicePhysicalAddress(KMemoryRegionType type) const { 148 KPhysicalAddress GetDevicePhysicalAddress(KMemoryRegionType type) const {
147 return GetDeviceRegion(type).GetAddress(); 149 return GetDeviceRegion(type).GetAddress();
148 } 150 }
149 VAddr GetDeviceVirtualAddress(KMemoryRegionType type) const { 151 KVirtualAddress GetDeviceVirtualAddress(KMemoryRegionType type) const {
150 return GetDeviceRegion(type).GetPairAddress(); 152 return GetDeviceRegion(type).GetPairAddress();
151 } 153 }
152 154
@@ -175,11 +177,11 @@ public:
175 KMemoryRegionType_VirtualDramKernelSecureAppletMemory)); 177 KMemoryRegionType_VirtualDramKernelSecureAppletMemory));
176 } 178 }
177 179
178 const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const { 180 const KMemoryRegion& GetVirtualLinearRegion(KVirtualAddress address) const {
179 return Dereference(FindVirtualLinear(address)); 181 return Dereference(FindVirtualLinear(address));
180 } 182 }
181 183
182 const KMemoryRegion& GetPhysicalLinearRegion(PAddr address) const { 184 const KMemoryRegion& GetPhysicalLinearRegion(KPhysicalAddress address) const {
183 return Dereference(FindPhysicalLinear(address)); 185 return Dereference(FindPhysicalLinear(address));
184 } 186 }
185 187
@@ -193,29 +195,32 @@ public:
193 return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DTB); 195 return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DTB);
194 } 196 }
195 197
196 bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address) const { 198 bool IsHeapPhysicalAddress(const KMemoryRegion*& region, KPhysicalAddress address) const {
197 return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(), 199 return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(),
198 KMemoryRegionType_DramUserPool); 200 KMemoryRegionType_DramUserPool);
199 } 201 }
200 bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address) const { 202 bool IsHeapVirtualAddress(const KMemoryRegion*& region, KVirtualAddress address) const {
201 return IsTypedAddress(region, address, GetVirtualLinearMemoryRegionTree(), 203 return IsTypedAddress(region, address, GetVirtualLinearMemoryRegionTree(),
202 KMemoryRegionType_VirtualDramUserPool); 204 KMemoryRegionType_VirtualDramUserPool);
203 } 205 }
204 206
205 bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address, size_t size) const { 207 bool IsHeapPhysicalAddress(const KMemoryRegion*& region, KPhysicalAddress address,
208 size_t size) const {
206 return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(), 209 return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(),
207 KMemoryRegionType_DramUserPool); 210 KMemoryRegionType_DramUserPool);
208 } 211 }
209 bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address, size_t size) const { 212 bool IsHeapVirtualAddress(const KMemoryRegion*& region, KVirtualAddress address,
213 size_t size) const {
210 return IsTypedAddress(region, address, size, GetVirtualLinearMemoryRegionTree(), 214 return IsTypedAddress(region, address, size, GetVirtualLinearMemoryRegionTree(),
211 KMemoryRegionType_VirtualDramUserPool); 215 KMemoryRegionType_VirtualDramUserPool);
212 } 216 }
213 217
214 bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address) const { 218 bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region,
219 KPhysicalAddress address) const {
215 return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(), 220 return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(),
216 static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped)); 221 static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped));
217 } 222 }
218 bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address, 223 bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, KPhysicalAddress address,
219 size_t size) const { 224 size_t size) const {
220 return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(), 225 return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(),
221 static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped)); 226 static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped));
@@ -234,8 +239,8 @@ public:
234 return std::make_pair(total_size, kernel_size); 239 return std::make_pair(total_size, kernel_size);
235 } 240 }
236 241
237 void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, 242 void InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start,
238 VAddr linear_virtual_start); 243 KVirtualAddress linear_virtual_start);
239 static size_t GetResourceRegionSizeForInit(bool use_extra_resource); 244 static size_t GetResourceRegionSizeForInit(bool use_extra_resource);
240 245
241 auto GetKernelRegionExtents() const { 246 auto GetKernelRegionExtents() const {
@@ -261,8 +266,8 @@ public:
261 266
262 auto GetLinearRegionVirtualExtents() const { 267 auto GetLinearRegionVirtualExtents() const {
263 const auto physical = GetLinearRegionPhysicalExtents(); 268 const auto physical = GetLinearRegionPhysicalExtents();
264 return KMemoryRegion(GetLinearVirtualAddress(physical.GetAddress()), 269 return KMemoryRegion(GetInteger(GetLinearVirtualAddress(physical.GetAddress())),
265 GetLinearVirtualAddress(physical.GetLastAddress()), 0, 270 GetInteger(GetLinearVirtualAddress(physical.GetLastAddress())), 0,
266 KMemoryRegionType_None); 271 KMemoryRegionType_None);
267 } 272 }
268 273
@@ -334,12 +339,12 @@ private:
334 static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address, 339 static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address,
335 const KMemoryRegionTree& tree, KMemoryRegionType type) { 340 const KMemoryRegionTree& tree, KMemoryRegionType type) {
336 // Check if the cached region already contains the address. 341 // Check if the cached region already contains the address.
337 if (region != nullptr && region->Contains(address)) { 342 if (region != nullptr && region->Contains(GetInteger(address))) {
338 return true; 343 return true;
339 } 344 }
340 345
341 // Find the containing region, and update the cache. 346 // Find the containing region, and update the cache.
342 if (const KMemoryRegion* found = tree.Find(address); 347 if (const KMemoryRegion* found = tree.Find(GetInteger(address));
343 found != nullptr && found->IsDerivedFrom(type)) { 348 found != nullptr && found->IsDerivedFrom(type)) {
344 region = found; 349 region = found;
345 return true; 350 return true;
@@ -352,11 +357,12 @@ private:
352 static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address, size_t size, 357 static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address, size_t size,
353 const KMemoryRegionTree& tree, KMemoryRegionType type) { 358 const KMemoryRegionTree& tree, KMemoryRegionType type) {
354 // Get the end of the checked region. 359 // Get the end of the checked region.
355 const u64 last_address = address + size - 1; 360 const u64 last_address = GetInteger(address) + size - 1;
356 361
357 // Walk the tree to verify the region is correct. 362 // Walk the tree to verify the region is correct.
358 const KMemoryRegion* cur = 363 const KMemoryRegion* cur = (region != nullptr && region->Contains(GetInteger(address)))
359 (region != nullptr && region->Contains(address)) ? region : tree.Find(address); 364 ? region
365 : tree.Find(GetInteger(address));
360 while (cur != nullptr && cur->IsDerivedFrom(type)) { 366 while (cur != nullptr && cur->IsDerivedFrom(type)) {
361 if (last_address <= cur->GetLastAddress()) { 367 if (last_address <= cur->GetLastAddress()) {
362 region = cur; 368 region = cur;
@@ -370,7 +376,7 @@ private:
370 376
371 template <typename AddressType> 377 template <typename AddressType>
372 static const KMemoryRegion* Find(AddressType address, const KMemoryRegionTree& tree) { 378 static const KMemoryRegion* Find(AddressType address, const KMemoryRegionTree& tree) {
373 return tree.Find(address); 379 return tree.Find(GetInteger(address));
374 } 380 }
375 381
376 static KMemoryRegion& Dereference(KMemoryRegion* region) { 382 static KMemoryRegion& Dereference(KMemoryRegion* region) {
@@ -383,7 +389,7 @@ private:
383 return *region; 389 return *region;
384 } 390 }
385 391
386 VAddr GetStackTopAddress(s32 core_id, KMemoryRegionType type) const { 392 KVirtualAddress GetStackTopAddress(s32 core_id, KMemoryRegionType type) const {
387 const auto& region = Dereference( 393 const auto& region = Dereference(
388 GetVirtualMemoryRegionTree().FindByTypeAndAttribute(type, static_cast<u32>(core_id))); 394 GetVirtualMemoryRegionTree().FindByTypeAndAttribute(type, static_cast<u32>(core_id)));
389 ASSERT(region.GetEndAddress() != 0); 395 ASSERT(region.GetEndAddress() != 0);
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index cd6ea388e..74d8169e0 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -5,7 +5,6 @@
5 5
6#include "common/alignment.h" 6#include "common/alignment.h"
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/common_types.h"
9#include "common/scope_exit.h" 8#include "common/scope_exit.h"
10#include "core/core.h" 9#include "core/core.h"
11#include "core/device_memory.h" 10#include "core/device_memory.h"
@@ -44,10 +43,10 @@ KMemoryManager::KMemoryManager(Core::System& system)
44 KLightLock{system.Kernel()}, 43 KLightLock{system.Kernel()},
45 } {} 44 } {}
46 45
47void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) { 46void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) {
48 47
49 // Clear the management region to zero. 48 // Clear the management region to zero.
50 const VAddr management_region_end = management_region + management_region_size; 49 const KVirtualAddress management_region_end = management_region + management_region_size;
51 // std::memset(GetVoidPointer(management_region), 0, management_region_size); 50 // std::memset(GetVoidPointer(management_region), 0, management_region_size);
52 51
53 // Reset our manager count. 52 // Reset our manager count.
@@ -56,7 +55,7 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
56 // Traverse the virtual memory layout tree, initializing each manager as appropriate. 55 // Traverse the virtual memory layout tree, initializing each manager as appropriate.
57 while (m_num_managers != MaxManagerCount) { 56 while (m_num_managers != MaxManagerCount) {
58 // Locate the region that should initialize the current manager. 57 // Locate the region that should initialize the current manager.
59 PAddr region_address = 0; 58 KPhysicalAddress region_address = 0;
60 size_t region_size = 0; 59 size_t region_size = 0;
61 Pool region_pool = Pool::Count; 60 Pool region_pool = Pool::Count;
62 for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { 61 for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
@@ -70,8 +69,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
70 continue; 69 continue;
71 } 70 }
72 71
73 const PAddr cur_start = it.GetAddress(); 72 const KPhysicalAddress cur_start = it.GetAddress();
74 const PAddr cur_end = it.GetEndAddress(); 73 const KPhysicalAddress cur_end = it.GetEndAddress();
75 74
76 // Validate the region. 75 // Validate the region.
77 ASSERT(cur_end != 0); 76 ASSERT(cur_end != 0);
@@ -119,17 +118,17 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
119 118
120 // Free each region to its corresponding heap. 119 // Free each region to its corresponding heap.
121 size_t reserved_sizes[MaxManagerCount] = {}; 120 size_t reserved_sizes[MaxManagerCount] = {};
122 const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress(); 121 const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress();
123 const PAddr ini_end = ini_start + InitialProcessBinarySizeMax; 122 const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax;
124 const PAddr ini_last = ini_end - 1; 123 const KPhysicalAddress ini_last = ini_end - 1;
125 for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { 124 for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
126 if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { 125 if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
127 // Get the manager for the region. 126 // Get the manager for the region.
128 auto& manager = m_managers[it.GetAttributes()]; 127 auto& manager = m_managers[it.GetAttributes()];
129 128
130 const PAddr cur_start = it.GetAddress(); 129 const KPhysicalAddress cur_start = it.GetAddress();
131 const PAddr cur_last = it.GetLastAddress(); 130 const KPhysicalAddress cur_last = it.GetLastAddress();
132 const PAddr cur_end = it.GetEndAddress(); 131 const KPhysicalAddress cur_end = it.GetEndAddress();
133 132
134 if (cur_start <= ini_start && ini_last <= cur_last) { 133 if (cur_start <= ini_start && ini_last <= cur_last) {
135 // Free memory before the ini to the heap. 134 // Free memory before the ini to the heap.
@@ -175,7 +174,8 @@ void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
175 UNREACHABLE(); 174 UNREACHABLE();
176} 175}
177 176
178PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { 177KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages,
178 u32 option) {
179 // Early return if we're allocating no pages. 179 // Early return if we're allocating no pages.
180 if (num_pages == 0) { 180 if (num_pages == 0) {
181 return 0; 181 return 0;
@@ -190,7 +190,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
190 190
191 // Loop, trying to iterate from each block. 191 // Loop, trying to iterate from each block.
192 Impl* chosen_manager = nullptr; 192 Impl* chosen_manager = nullptr;
193 PAddr allocated_block = 0; 193 KPhysicalAddress allocated_block = 0;
194 for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; 194 for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr;
195 chosen_manager = this->GetNextManager(chosen_manager, dir)) { 195 chosen_manager = this->GetNextManager(chosen_manager, dir)) {
196 allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages); 196 allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages);
@@ -239,7 +239,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
239 cur_manager = this->GetNextManager(cur_manager, dir)) { 239 cur_manager = this->GetNextManager(cur_manager, dir)) {
240 while (num_pages >= pages_per_alloc) { 240 while (num_pages >= pages_per_alloc) {
241 // Allocate a block. 241 // Allocate a block.
242 PAddr allocated_block = cur_manager->AllocateBlock(index, random); 242 KPhysicalAddress allocated_block = cur_manager->AllocateBlock(index, random);
243 if (allocated_block == 0) { 243 if (allocated_block == 0) {
244 break; 244 break;
245 } 245 }
@@ -286,7 +286,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
286 286
287 // Open the first reference to the pages. 287 // Open the first reference to the pages.
288 for (const auto& block : *out) { 288 for (const auto& block : *out) {
289 PAddr cur_address = block.GetAddress(); 289 KPhysicalAddress cur_address = block.GetAddress();
290 size_t remaining_pages = block.GetNumPages(); 290 size_t remaining_pages = block.GetNumPages();
291 while (remaining_pages > 0) { 291 while (remaining_pages > 0) {
292 // Get the manager for the current address. 292 // Get the manager for the current address.
@@ -337,7 +337,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
337 // Iterate over the allocated blocks. 337 // Iterate over the allocated blocks.
338 for (const auto& block : *out) { 338 for (const auto& block : *out) {
339 // Get the block extents. 339 // Get the block extents.
340 const PAddr block_address = block.GetAddress(); 340 const KPhysicalAddress block_address = block.GetAddress();
341 const size_t block_pages = block.GetNumPages(); 341 const size_t block_pages = block.GetNumPages();
342 342
343 // If it has no pages, we don't need to do anything. 343 // If it has no pages, we don't need to do anything.
@@ -348,7 +348,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
348 // Fill all the pages that we need to fill. 348 // Fill all the pages that we need to fill.
349 bool any_new = false; 349 bool any_new = false;
350 { 350 {
351 PAddr cur_address = block_address; 351 KPhysicalAddress cur_address = block_address;
352 size_t remaining_pages = block_pages; 352 size_t remaining_pages = block_pages;
353 while (remaining_pages > 0) { 353 while (remaining_pages > 0) {
354 // Get the manager for the current address. 354 // Get the manager for the current address.
@@ -369,7 +369,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
369 // If there are new pages, update tracking for the allocation. 369 // If there are new pages, update tracking for the allocation.
370 if (any_new) { 370 if (any_new) {
371 // Update tracking for the allocation. 371 // Update tracking for the allocation.
372 PAddr cur_address = block_address; 372 KPhysicalAddress cur_address = block_address;
373 size_t remaining_pages = block_pages; 373 size_t remaining_pages = block_pages;
374 while (remaining_pages > 0) { 374 while (remaining_pages > 0) {
375 // Get the manager for the current address. 375 // Get the manager for the current address.
@@ -400,8 +400,9 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
400 R_SUCCEED(); 400 R_SUCCEED();
401} 401}
402 402
403size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, 403size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size,
404 VAddr management_end, Pool p) { 404 KVirtualAddress management, KVirtualAddress management_end,
405 Pool p) {
405 // Calculate management sizes. 406 // Calculate management sizes.
406 const size_t ref_count_size = (size / PageSize) * sizeof(u16); 407 const size_t ref_count_size = (size / PageSize) * sizeof(u16);
407 const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size); 408 const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size);
@@ -417,7 +418,7 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage
417 m_management_region = management; 418 m_management_region = management;
418 m_page_reference_counts.resize( 419 m_page_reference_counts.resize(
419 Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); 420 Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize);
420 ASSERT(Common::IsAligned(m_management_region, PageSize)); 421 ASSERT(Common::IsAligned(GetInteger(m_management_region), PageSize));
421 422
422 // Initialize the manager's KPageHeap. 423 // Initialize the manager's KPageHeap.
423 m_heap.Initialize(address, size, management + manager_size, page_heap_size); 424 m_heap.Initialize(address, size, management + manager_size, page_heap_size);
@@ -425,15 +426,15 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage
425 return total_management_size; 426 return total_management_size;
426} 427}
427 428
428void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) { 429void KMemoryManager::Impl::TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
429 UNREACHABLE(); 430 UNREACHABLE();
430} 431}
431 432
432void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) { 433void KMemoryManager::Impl::TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
433 UNREACHABLE(); 434 UNREACHABLE();
434} 435}
435 436
436bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages, 437bool KMemoryManager::Impl::ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages,
437 u8 fill_pattern) { 438 u8 fill_pattern) {
438 UNREACHABLE(); 439 UNREACHABLE();
439} 440}
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h
index 401d4e644..7e4b41319 100644
--- a/src/core/hle/kernel/k_memory_manager.h
+++ b/src/core/hle/kernel/k_memory_manager.h
@@ -7,10 +7,10 @@
7#include <tuple> 7#include <tuple>
8 8
9#include "common/common_funcs.h" 9#include "common/common_funcs.h"
10#include "common/common_types.h"
11#include "core/hle/kernel/k_light_lock.h" 10#include "core/hle/kernel/k_light_lock.h"
12#include "core/hle/kernel/k_memory_layout.h" 11#include "core/hle/kernel/k_memory_layout.h"
13#include "core/hle/kernel/k_page_heap.h" 12#include "core/hle/kernel/k_page_heap.h"
13#include "core/hle/kernel/k_typed_address.h"
14#include "core/hle/result.h" 14#include "core/hle/result.h"
15 15
16namespace Core { 16namespace Core {
@@ -50,21 +50,21 @@ public:
50 50
51 explicit KMemoryManager(Core::System& system); 51 explicit KMemoryManager(Core::System& system);
52 52
53 void Initialize(VAddr management_region, size_t management_region_size); 53 void Initialize(KVirtualAddress management_region, size_t management_region_size);
54 54
55 Result InitializeOptimizedMemory(u64 process_id, Pool pool); 55 Result InitializeOptimizedMemory(u64 process_id, Pool pool);
56 void FinalizeOptimizedMemory(u64 process_id, Pool pool); 56 void FinalizeOptimizedMemory(u64 process_id, Pool pool);
57 57
58 PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); 58 KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
59 Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option); 59 Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
60 Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id, 60 Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
61 u8 fill_pattern); 61 u8 fill_pattern);
62 62
63 Pool GetPool(PAddr address) const { 63 Pool GetPool(KPhysicalAddress address) const {
64 return this->GetManager(address).GetPool(); 64 return this->GetManager(address).GetPool();
65 } 65 }
66 66
67 void Open(PAddr address, size_t num_pages) { 67 void Open(KPhysicalAddress address, size_t num_pages) {
68 // Repeatedly open references until we've done so for all pages. 68 // Repeatedly open references until we've done so for all pages.
69 while (num_pages) { 69 while (num_pages) {
70 auto& manager = this->GetManager(address); 70 auto& manager = this->GetManager(address);
@@ -80,7 +80,7 @@ public:
80 } 80 }
81 } 81 }
82 82
83 void OpenFirst(PAddr address, size_t num_pages) { 83 void OpenFirst(KPhysicalAddress address, size_t num_pages) {
84 // Repeatedly open references until we've done so for all pages. 84 // Repeatedly open references until we've done so for all pages.
85 while (num_pages) { 85 while (num_pages) {
86 auto& manager = this->GetManager(address); 86 auto& manager = this->GetManager(address);
@@ -96,7 +96,7 @@ public:
96 } 96 }
97 } 97 }
98 98
99 void Close(PAddr address, size_t num_pages) { 99 void Close(KPhysicalAddress address, size_t num_pages) {
100 // Repeatedly close references until we've done so for all pages. 100 // Repeatedly close references until we've done so for all pages.
101 while (num_pages) { 101 while (num_pages) {
102 auto& manager = this->GetManager(address); 102 auto& manager = this->GetManager(address);
@@ -199,16 +199,16 @@ private:
199 public: 199 public:
200 Impl() = default; 200 Impl() = default;
201 201
202 size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end, 202 size_t Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management,
203 Pool p); 203 KVirtualAddress management_end, Pool p);
204 204
205 PAddr AllocateBlock(s32 index, bool random) { 205 KPhysicalAddress AllocateBlock(s32 index, bool random) {
206 return m_heap.AllocateBlock(index, random); 206 return m_heap.AllocateBlock(index, random);
207 } 207 }
208 PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { 208 KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
209 return m_heap.AllocateAligned(index, num_pages, align_pages); 209 return m_heap.AllocateAligned(index, num_pages, align_pages);
210 } 210 }
211 void Free(PAddr addr, size_t num_pages) { 211 void Free(KPhysicalAddress addr, size_t num_pages) {
212 m_heap.Free(addr, num_pages); 212 m_heap.Free(addr, num_pages);
213 } 213 }
214 214
@@ -220,10 +220,10 @@ private:
220 UNIMPLEMENTED(); 220 UNIMPLEMENTED();
221 } 221 }
222 222
223 void TrackUnoptimizedAllocation(PAddr block, size_t num_pages); 223 void TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages);
224 void TrackOptimizedAllocation(PAddr block, size_t num_pages); 224 void TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages);
225 225
226 bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern); 226 bool ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern);
227 227
228 constexpr Pool GetPool() const { 228 constexpr Pool GetPool() const {
229 return m_pool; 229 return m_pool;
@@ -231,7 +231,7 @@ private:
231 constexpr size_t GetSize() const { 231 constexpr size_t GetSize() const {
232 return m_heap.GetSize(); 232 return m_heap.GetSize();
233 } 233 }
234 constexpr PAddr GetEndAddress() const { 234 constexpr KPhysicalAddress GetEndAddress() const {
235 return m_heap.GetEndAddress(); 235 return m_heap.GetEndAddress();
236 } 236 }
237 237
@@ -243,10 +243,10 @@ private:
243 UNIMPLEMENTED(); 243 UNIMPLEMENTED();
244 } 244 }
245 245
246 constexpr size_t GetPageOffset(PAddr address) const { 246 constexpr size_t GetPageOffset(KPhysicalAddress address) const {
247 return m_heap.GetPageOffset(address); 247 return m_heap.GetPageOffset(address);
248 } 248 }
249 constexpr size_t GetPageOffsetToEnd(PAddr address) const { 249 constexpr size_t GetPageOffsetToEnd(KPhysicalAddress address) const {
250 return m_heap.GetPageOffsetToEnd(address); 250 return m_heap.GetPageOffsetToEnd(address);
251 } 251 }
252 252
@@ -263,7 +263,7 @@ private:
263 return m_prev; 263 return m_prev;
264 } 264 }
265 265
266 void OpenFirst(PAddr address, size_t num_pages) { 266 void OpenFirst(KPhysicalAddress address, size_t num_pages) {
267 size_t index = this->GetPageOffset(address); 267 size_t index = this->GetPageOffset(address);
268 const size_t end = index + num_pages; 268 const size_t end = index + num_pages;
269 while (index < end) { 269 while (index < end) {
@@ -274,7 +274,7 @@ private:
274 } 274 }
275 } 275 }
276 276
277 void Open(PAddr address, size_t num_pages) { 277 void Open(KPhysicalAddress address, size_t num_pages) {
278 size_t index = this->GetPageOffset(address); 278 size_t index = this->GetPageOffset(address);
279 const size_t end = index + num_pages; 279 const size_t end = index + num_pages;
280 while (index < end) { 280 while (index < end) {
@@ -285,7 +285,7 @@ private:
285 } 285 }
286 } 286 }
287 287
288 void Close(PAddr address, size_t num_pages) { 288 void Close(KPhysicalAddress address, size_t num_pages) {
289 size_t index = this->GetPageOffset(address); 289 size_t index = this->GetPageOffset(address);
290 const size_t end = index + num_pages; 290 const size_t end = index + num_pages;
291 291
@@ -323,18 +323,18 @@ private:
323 323
324 KPageHeap m_heap; 324 KPageHeap m_heap;
325 std::vector<RefCount> m_page_reference_counts; 325 std::vector<RefCount> m_page_reference_counts;
326 VAddr m_management_region{}; 326 KVirtualAddress m_management_region{};
327 Pool m_pool{}; 327 Pool m_pool{};
328 Impl* m_next{}; 328 Impl* m_next{};
329 Impl* m_prev{}; 329 Impl* m_prev{};
330 }; 330 };
331 331
332private: 332private:
333 Impl& GetManager(PAddr address) { 333 Impl& GetManager(KPhysicalAddress address) {
334 return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; 334 return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
335 } 335 }
336 336
337 const Impl& GetManager(PAddr address) const { 337 const Impl& GetManager(KPhysicalAddress address) const {
338 return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; 338 return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
339 } 339 }
340 340
diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h
index cfe86fb82..e3044f022 100644
--- a/src/core/hle/kernel/k_memory_region.h
+++ b/src/core/hle/kernel/k_memory_region.h
@@ -5,9 +5,9 @@
5 5
6#include "common/assert.h" 6#include "common/assert.h"
7#include "common/common_funcs.h" 7#include "common/common_funcs.h"
8#include "common/common_types.h"
9#include "common/intrusive_red_black_tree.h" 8#include "common/intrusive_red_black_tree.h"
10#include "core/hle/kernel/k_memory_region_type.h" 9#include "core/hle/kernel/k_memory_region_type.h"
10#include "core/hle/kernel/k_typed_address.h"
11 11
12namespace Kernel { 12namespace Kernel {
13 13
@@ -243,10 +243,10 @@ public:
243 void InsertDirectly(u64 address, u64 last_address, u32 attr = 0, u32 type_id = 0); 243 void InsertDirectly(u64 address, u64 last_address, u32 attr = 0, u32 type_id = 0);
244 bool Insert(u64 address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0); 244 bool Insert(u64 address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0);
245 245
246 VAddr GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id); 246 KVirtualAddress GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id);
247 247
248 VAddr GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id, 248 KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id,
249 size_t guard_size) { 249 size_t guard_size) {
250 return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size; 250 return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size;
251 } 251 }
252 252
diff --git a/src/core/hle/kernel/k_page_buffer.cpp b/src/core/hle/kernel/k_page_buffer.cpp
index 0c16dded4..e9830e6d9 100644
--- a/src/core/hle/kernel/k_page_buffer.cpp
+++ b/src/core/hle/kernel/k_page_buffer.cpp
@@ -10,8 +10,8 @@
10 10
11namespace Kernel { 11namespace Kernel {
12 12
13KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) { 13KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, KPhysicalAddress phys_addr) {
14 ASSERT(Common::IsAligned(phys_addr, PageSize)); 14 ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize));
15 return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr); 15 return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr);
16} 16}
17 17
diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h
index b7a3ccb4a..f6a7f1e39 100644
--- a/src/core/hle/kernel/k_page_buffer.h
+++ b/src/core/hle/kernel/k_page_buffer.h
@@ -26,7 +26,7 @@ public:
26 explicit KPageBuffer(KernelCore&) {} 26 explicit KPageBuffer(KernelCore&) {}
27 KPageBuffer() = default; 27 KPageBuffer() = default;
28 28
29 static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr); 29 static KPageBuffer* FromPhysicalAddress(Core::System& system, KPhysicalAddress phys_addr);
30 30
31private: 31private:
32 alignas(PageSize) std::array<u8, PageSize> m_buffer{}; 32 alignas(PageSize) std::array<u8, PageSize> m_buffer{};
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h
index c07f17663..b32909f05 100644
--- a/src/core/hle/kernel/k_page_group.h
+++ b/src/core/hle/kernel/k_page_group.h
@@ -22,7 +22,7 @@ public:
22 constexpr explicit KBlockInfo() : m_next(nullptr) {} 22 constexpr explicit KBlockInfo() : m_next(nullptr) {}
23 23
24 constexpr void Initialize(KPhysicalAddress addr, size_t np) { 24 constexpr void Initialize(KPhysicalAddress addr, size_t np) {
25 ASSERT(Common::IsAligned(addr, PageSize)); 25 ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
26 ASSERT(static_cast<u32>(np) == np); 26 ASSERT(static_cast<u32>(np) == np);
27 27
28 m_page_index = static_cast<u32>(addr / PageSize); 28 m_page_index = static_cast<u32>(addr / PageSize);
diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp
index 7b02c7d8b..95762b5a2 100644
--- a/src/core/hle/kernel/k_page_heap.cpp
+++ b/src/core/hle/kernel/k_page_heap.cpp
@@ -6,14 +6,14 @@
6 6
7namespace Kernel { 7namespace Kernel {
8 8
9void KPageHeap::Initialize(PAddr address, size_t size, VAddr management_address, 9void KPageHeap::Initialize(KPhysicalAddress address, size_t size,
10 size_t management_size, const size_t* block_shifts, 10 KVirtualAddress management_address, size_t management_size,
11 size_t num_block_shifts) { 11 const size_t* block_shifts, size_t num_block_shifts) {
12 // Check our assumptions. 12 // Check our assumptions.
13 ASSERT(Common::IsAligned(address, PageSize)); 13 ASSERT(Common::IsAligned(GetInteger(address), PageSize));
14 ASSERT(Common::IsAligned(size, PageSize)); 14 ASSERT(Common::IsAligned(size, PageSize));
15 ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts); 15 ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts);
16 const VAddr management_end = management_address + management_size; 16 const KVirtualAddress management_end = management_address + management_size;
17 17
18 // Set our members. 18 // Set our members.
19 m_heap_address = address; 19 m_heap_address = address;
@@ -31,7 +31,7 @@ void KPageHeap::Initialize(PAddr address, size_t size, VAddr management_address,
31 } 31 }
32 32
33 // Ensure we didn't overextend our bounds. 33 // Ensure we didn't overextend our bounds.
34 ASSERT(VAddr(cur_bitmap_storage) <= management_end); 34 ASSERT(KVirtualAddress(cur_bitmap_storage) <= management_end);
35} 35}
36 36
37size_t KPageHeap::GetNumFreePages() const { 37size_t KPageHeap::GetNumFreePages() const {
@@ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const {
44 return num_free; 44 return num_free;
45} 45}
46 46
47PAddr KPageHeap::AllocateByLinearSearch(s32 index) { 47KPhysicalAddress KPageHeap::AllocateByLinearSearch(s32 index) {
48 const size_t needed_size = m_blocks[index].GetSize(); 48 const size_t needed_size = m_blocks[index].GetSize();
49 49
50 for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { 50 for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
51 if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) { 51 if (const KPhysicalAddress addr = m_blocks[i].PopBlock(false); addr != 0) {
52 if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { 52 if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
53 this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); 53 this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
54 } 54 }
@@ -59,7 +59,7 @@ PAddr KPageHeap::AllocateByLinearSearch(s32 index) {
59 return 0; 59 return 0;
60} 60}
61 61
62PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) { 62KPhysicalAddress KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
63 // Get the size and required alignment. 63 // Get the size and required alignment.
64 const size_t needed_size = num_pages * PageSize; 64 const size_t needed_size = num_pages * PageSize;
65 const size_t align_size = align_pages * PageSize; 65 const size_t align_size = align_pages * PageSize;
@@ -110,7 +110,7 @@ PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_page
110 } 110 }
111 111
112 // Pop a block from the index we selected. 112 // Pop a block from the index we selected.
113 if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) { 113 if (KPhysicalAddress addr = m_blocks[index].PopBlock(true); addr != 0) {
114 // Determine how much size we have left over. 114 // Determine how much size we have left over.
115 if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size; 115 if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size;
116 leftover_size > 0) { 116 leftover_size > 0) {
@@ -141,13 +141,13 @@ PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_page
141 return 0; 141 return 0;
142} 142}
143 143
144void KPageHeap::FreeBlock(PAddr block, s32 index) { 144void KPageHeap::FreeBlock(KPhysicalAddress block, s32 index) {
145 do { 145 do {
146 block = m_blocks[index++].PushBlock(block); 146 block = m_blocks[index++].PushBlock(block);
147 } while (block != 0); 147 } while (block != 0);
148} 148}
149 149
150void KPageHeap::Free(PAddr addr, size_t num_pages) { 150void KPageHeap::Free(KPhysicalAddress addr, size_t num_pages) {
151 // Freeing no pages is a no-op. 151 // Freeing no pages is a no-op.
152 if (num_pages == 0) { 152 if (num_pages == 0) {
153 return; 153 return;
@@ -155,16 +155,16 @@ void KPageHeap::Free(PAddr addr, size_t num_pages) {
155 155
156 // Find the largest block size that we can free, and free as many as possible. 156 // Find the largest block size that we can free, and free as many as possible.
157 s32 big_index = static_cast<s32>(m_num_blocks) - 1; 157 s32 big_index = static_cast<s32>(m_num_blocks) - 1;
158 const PAddr start = addr; 158 const KPhysicalAddress start = addr;
159 const PAddr end = addr + num_pages * PageSize; 159 const KPhysicalAddress end = addr + num_pages * PageSize;
160 PAddr before_start = start; 160 KPhysicalAddress before_start = start;
161 PAddr before_end = start; 161 KPhysicalAddress before_end = start;
162 PAddr after_start = end; 162 KPhysicalAddress after_start = end;
163 PAddr after_end = end; 163 KPhysicalAddress after_end = end;
164 while (big_index >= 0) { 164 while (big_index >= 0) {
165 const size_t block_size = m_blocks[big_index].GetSize(); 165 const size_t block_size = m_blocks[big_index].GetSize();
166 const PAddr big_start = Common::AlignUp(start, block_size); 166 const KPhysicalAddress big_start = Common::AlignUp(GetInteger(start), block_size);
167 const PAddr big_end = Common::AlignDown(end, block_size); 167 const KPhysicalAddress big_end = Common::AlignDown(GetInteger(end), block_size);
168 if (big_start < big_end) { 168 if (big_start < big_end) {
169 // Free as many big blocks as we can. 169 // Free as many big blocks as we can.
170 for (auto block = big_start; block < big_end; block += block_size) { 170 for (auto block = big_start; block < big_end; block += block_size) {
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h
index 9021edcf7..c55225bac 100644
--- a/src/core/hle/kernel/k_page_heap.h
+++ b/src/core/hle/kernel/k_page_heap.h
@@ -8,8 +8,8 @@
8 8
9#include "common/alignment.h" 9#include "common/alignment.h"
10#include "common/common_funcs.h" 10#include "common/common_funcs.h"
11#include "common/common_types.h"
12#include "core/hle/kernel/k_page_bitmap.h" 11#include "core/hle/kernel/k_page_bitmap.h"
12#include "core/hle/kernel/k_typed_address.h"
13#include "core/hle/kernel/memory_types.h" 13#include "core/hle/kernel/memory_types.h"
14 14
15namespace Kernel { 15namespace Kernel {
@@ -18,24 +18,24 @@ class KPageHeap {
18public: 18public:
19 KPageHeap() = default; 19 KPageHeap() = default;
20 20
21 constexpr PAddr GetAddress() const { 21 constexpr KPhysicalAddress GetAddress() const {
22 return m_heap_address; 22 return m_heap_address;
23 } 23 }
24 constexpr size_t GetSize() const { 24 constexpr size_t GetSize() const {
25 return m_heap_size; 25 return m_heap_size;
26 } 26 }
27 constexpr PAddr GetEndAddress() const { 27 constexpr KPhysicalAddress GetEndAddress() const {
28 return this->GetAddress() + this->GetSize(); 28 return this->GetAddress() + this->GetSize();
29 } 29 }
30 constexpr size_t GetPageOffset(PAddr block) const { 30 constexpr size_t GetPageOffset(KPhysicalAddress block) const {
31 return (block - this->GetAddress()) / PageSize; 31 return (block - this->GetAddress()) / PageSize;
32 } 32 }
33 constexpr size_t GetPageOffsetToEnd(PAddr block) const { 33 constexpr size_t GetPageOffsetToEnd(KPhysicalAddress block) const {
34 return (this->GetEndAddress() - block) / PageSize; 34 return (this->GetEndAddress() - block) / PageSize;
35 } 35 }
36 36
37 void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address, 37 void Initialize(KPhysicalAddress heap_address, size_t heap_size,
38 size_t management_size) { 38 KVirtualAddress management_address, size_t management_size) {
39 return this->Initialize(heap_address, heap_size, management_address, management_size, 39 return this->Initialize(heap_address, heap_size, management_address, management_size,
40 MemoryBlockPageShifts.data(), NumMemoryBlockPageShifts); 40 MemoryBlockPageShifts.data(), NumMemoryBlockPageShifts);
41 } 41 }
@@ -53,7 +53,7 @@ public:
53 m_initial_used_size = m_heap_size - free_size - reserved_size; 53 m_initial_used_size = m_heap_size - free_size - reserved_size;
54 } 54 }
55 55
56 PAddr AllocateBlock(s32 index, bool random) { 56 KPhysicalAddress AllocateBlock(s32 index, bool random) {
57 if (random) { 57 if (random) {
58 const size_t block_pages = m_blocks[index].GetNumPages(); 58 const size_t block_pages = m_blocks[index].GetNumPages();
59 return this->AllocateByRandom(index, block_pages, block_pages); 59 return this->AllocateByRandom(index, block_pages, block_pages);
@@ -62,12 +62,12 @@ public:
62 } 62 }
63 } 63 }
64 64
65 PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { 65 KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
66 // TODO: linear search support? 66 // TODO: linear search support?
67 return this->AllocateByRandom(index, num_pages, align_pages); 67 return this->AllocateByRandom(index, num_pages, align_pages);
68 } 68 }
69 69
70 void Free(PAddr addr, size_t num_pages); 70 void Free(KPhysicalAddress addr, size_t num_pages);
71 71
72 static size_t CalculateManagementOverheadSize(size_t region_size) { 72 static size_t CalculateManagementOverheadSize(size_t region_size) {
73 return CalculateManagementOverheadSize(region_size, MemoryBlockPageShifts.data(), 73 return CalculateManagementOverheadSize(region_size, MemoryBlockPageShifts.data(),
@@ -125,24 +125,25 @@ private:
125 return this->GetNumFreeBlocks() * this->GetNumPages(); 125 return this->GetNumFreeBlocks() * this->GetNumPages();
126 } 126 }
127 127
128 u64* Initialize(PAddr addr, size_t size, size_t bs, size_t nbs, u64* bit_storage) { 128 u64* Initialize(KPhysicalAddress addr, size_t size, size_t bs, size_t nbs,
129 u64* bit_storage) {
129 // Set shifts. 130 // Set shifts.
130 m_block_shift = bs; 131 m_block_shift = bs;
131 m_next_block_shift = nbs; 132 m_next_block_shift = nbs;
132 133
133 // Align up the address. 134 // Align up the address.
134 PAddr end = addr + size; 135 KPhysicalAddress end = addr + size;
135 const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift) 136 const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift)
136 : (u64(1) << m_block_shift); 137 : (u64(1) << m_block_shift);
137 addr = Common::AlignDown(addr, align); 138 addr = Common::AlignDown(GetInteger(addr), align);
138 end = Common::AlignUp(end, align); 139 end = Common::AlignUp(GetInteger(end), align);
139 140
140 m_heap_address = addr; 141 m_heap_address = addr;
141 m_end_offset = (end - addr) / (u64(1) << m_block_shift); 142 m_end_offset = (end - addr) / (u64(1) << m_block_shift);
142 return m_bitmap.Initialize(bit_storage, m_end_offset); 143 return m_bitmap.Initialize(bit_storage, m_end_offset);
143 } 144 }
144 145
145 PAddr PushBlock(PAddr address) { 146 KPhysicalAddress PushBlock(KPhysicalAddress address) {
146 // Set the bit for the free block. 147 // Set the bit for the free block.
147 size_t offset = (address - m_heap_address) >> this->GetShift(); 148 size_t offset = (address - m_heap_address) >> this->GetShift();
148 m_bitmap.SetBit(offset); 149 m_bitmap.SetBit(offset);
@@ -161,7 +162,7 @@ private:
161 return {}; 162 return {};
162 } 163 }
163 164
164 PAddr PopBlock(bool random) { 165 KPhysicalAddress PopBlock(bool random) {
165 // Find a free block. 166 // Find a free block.
166 s64 soffset = m_bitmap.FindFreeBlock(random); 167 s64 soffset = m_bitmap.FindFreeBlock(random);
167 if (soffset < 0) { 168 if (soffset < 0) {
@@ -187,18 +188,19 @@ private:
187 188
188 private: 189 private:
189 KPageBitmap m_bitmap; 190 KPageBitmap m_bitmap;
190 PAddr m_heap_address{}; 191 KPhysicalAddress m_heap_address{};
191 uintptr_t m_end_offset{}; 192 uintptr_t m_end_offset{};
192 size_t m_block_shift{}; 193 size_t m_block_shift{};
193 size_t m_next_block_shift{}; 194 size_t m_next_block_shift{};
194 }; 195 };
195 196
196private: 197private:
197 void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address, 198 void Initialize(KPhysicalAddress heap_address, size_t heap_size,
198 size_t management_size, const size_t* block_shifts, size_t num_block_shifts); 199 KVirtualAddress management_address, size_t management_size,
200 const size_t* block_shifts, size_t num_block_shifts);
199 size_t GetNumFreePages() const; 201 size_t GetNumFreePages() const;
200 202
201 void FreeBlock(PAddr block, s32 index); 203 void FreeBlock(KPhysicalAddress block, s32 index);
202 204
203 static constexpr size_t NumMemoryBlockPageShifts{7}; 205 static constexpr size_t NumMemoryBlockPageShifts{7};
204 static constexpr std::array<size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{ 206 static constexpr std::array<size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
@@ -206,14 +208,14 @@ private:
206 }; 208 };
207 209
208private: 210private:
209 PAddr AllocateByLinearSearch(s32 index); 211 KPhysicalAddress AllocateByLinearSearch(s32 index);
210 PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages); 212 KPhysicalAddress AllocateByRandom(s32 index, size_t num_pages, size_t align_pages);
211 213
212 static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, 214 static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
213 size_t num_block_shifts); 215 size_t num_block_shifts);
214 216
215private: 217private:
216 PAddr m_heap_address{}; 218 KPhysicalAddress m_heap_address{};
217 size_t m_heap_size{}; 219 size_t m_heap_size{};
218 size_t m_initial_used_size{}; 220 size_t m_initial_used_size{};
219 size_t m_num_blocks{}; 221 size_t m_num_blocks{};
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 2e13d5d0d..cb39387ea 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -106,7 +106,7 @@ KPageTable::~KPageTable() = default;
106 106
107Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, 107Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
108 bool enable_das_merge, bool from_back, 108 bool enable_das_merge, bool from_back,
109 KMemoryManager::Pool pool, VAddr code_addr, 109 KMemoryManager::Pool pool, KProcessAddress code_addr,
110 size_t code_size, KSystemResource* system_resource, 110 size_t code_size, KSystemResource* system_resource,
111 KResourceLimit* resource_limit) { 111 KResourceLimit* resource_limit) {
112 112
@@ -119,8 +119,8 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
119 119
120 // Set our width and heap/alias sizes 120 // Set our width and heap/alias sizes
121 m_address_space_width = GetAddressSpaceWidthFromType(as_type); 121 m_address_space_width = GetAddressSpaceWidthFromType(as_type);
122 const VAddr start = 0; 122 const KProcessAddress start = 0;
123 const VAddr end{1ULL << m_address_space_width}; 123 const KProcessAddress end{1ULL << m_address_space_width};
124 size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; 124 size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
125 size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; 125 size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
126 126
@@ -135,8 +135,8 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
135 135
136 // Set code regions and determine remaining 136 // Set code regions and determine remaining
137 constexpr size_t RegionAlignment{2_MiB}; 137 constexpr size_t RegionAlignment{2_MiB};
138 VAddr process_code_start{}; 138 KProcessAddress process_code_start{};
139 VAddr process_code_end{}; 139 KProcessAddress process_code_end{};
140 size_t stack_region_size{}; 140 size_t stack_region_size{};
141 size_t kernel_map_region_size{}; 141 size_t kernel_map_region_size{};
142 142
@@ -149,8 +149,8 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
149 m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); 149 m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
150 m_alias_code_region_start = m_code_region_start; 150 m_alias_code_region_start = m_code_region_start;
151 m_alias_code_region_end = m_code_region_end; 151 m_alias_code_region_end = m_code_region_end;
152 process_code_start = Common::AlignDown(code_addr, RegionAlignment); 152 process_code_start = Common::AlignDown(GetInteger(code_addr), RegionAlignment);
153 process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); 153 process_code_end = Common::AlignUp(GetInteger(code_addr) + code_size, RegionAlignment);
154 } else { 154 } else {
155 stack_region_size = 0; 155 stack_region_size = 0;
156 kernel_map_region_size = 0; 156 kernel_map_region_size = 0;
@@ -178,7 +178,7 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
178 m_resource_limit = resource_limit; 178 m_resource_limit = resource_limit;
179 179
180 // Determine the region we can place our undetermineds in 180 // Determine the region we can place our undetermineds in
181 VAddr alloc_start{}; 181 KProcessAddress alloc_start{};
182 size_t alloc_size{}; 182 size_t alloc_size{};
183 if ((process_code_start - m_code_region_start) >= (end - process_code_end)) { 183 if ((process_code_start - m_code_region_start) >= (end - process_code_end)) {
184 alloc_start = m_code_region_start; 184 alloc_start = m_code_region_start;
@@ -292,7 +292,7 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
292 : KMemoryManager::Direction::FromFront); 292 : KMemoryManager::Direction::FromFront);
293 293
294 // Ensure that we regions inside our address space 294 // Ensure that we regions inside our address space
295 auto IsInAddressSpace = [&](VAddr addr) { 295 auto IsInAddressSpace = [&](KProcessAddress addr) {
296 return m_address_space_start <= addr && addr <= m_address_space_end; 296 return m_address_space_start <= addr && addr <= m_address_space_end;
297 }; 297 };
298 ASSERT(IsInAddressSpace(m_alias_region_start)); 298 ASSERT(IsInAddressSpace(m_alias_region_start));
@@ -305,14 +305,14 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
305 ASSERT(IsInAddressSpace(m_kernel_map_region_end)); 305 ASSERT(IsInAddressSpace(m_kernel_map_region_end));
306 306
307 // Ensure that we selected regions that don't overlap 307 // Ensure that we selected regions that don't overlap
308 const VAddr alias_start{m_alias_region_start}; 308 const KProcessAddress alias_start{m_alias_region_start};
309 const VAddr alias_last{m_alias_region_end - 1}; 309 const KProcessAddress alias_last{m_alias_region_end - 1};
310 const VAddr heap_start{m_heap_region_start}; 310 const KProcessAddress heap_start{m_heap_region_start};
311 const VAddr heap_last{m_heap_region_end - 1}; 311 const KProcessAddress heap_last{m_heap_region_end - 1};
312 const VAddr stack_start{m_stack_region_start}; 312 const KProcessAddress stack_start{m_stack_region_start};
313 const VAddr stack_last{m_stack_region_end - 1}; 313 const KProcessAddress stack_last{m_stack_region_end - 1};
314 const VAddr kmap_start{m_kernel_map_region_start}; 314 const KProcessAddress kmap_start{m_kernel_map_region_start};
315 const VAddr kmap_last{m_kernel_map_region_end - 1}; 315 const KProcessAddress kmap_last{m_kernel_map_region_end - 1};
316 ASSERT(alias_last < heap_start || heap_last < alias_start); 316 ASSERT(alias_last < heap_start || heap_last < alias_start);
317 ASSERT(alias_last < stack_start || stack_last < alias_start); 317 ASSERT(alias_last < stack_start || stack_last < alias_start);
318 ASSERT(alias_last < kmap_start || kmap_last < alias_start); 318 ASSERT(alias_last < kmap_start || kmap_last < alias_start);
@@ -334,9 +334,10 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
334 334
335void KPageTable::Finalize() { 335void KPageTable::Finalize() {
336 // Finalize memory blocks. 336 // Finalize memory blocks.
337 m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) { 337 m_memory_block_manager.Finalize(
338 m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size); 338 m_memory_block_slab_manager, [&](KProcessAddress addr, u64 size) {
339 }); 339 m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size);
340 });
340 341
341 // Release any insecure mapped memory. 342 // Release any insecure mapped memory.
342 if (m_mapped_insecure_memory) { 343 if (m_mapped_insecure_memory) {
@@ -352,7 +353,7 @@ void KPageTable::Finalize() {
352 m_page_table_impl.reset(); 353 m_page_table_impl.reset();
353} 354}
354 355
355Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state, 356Result KPageTable::MapProcessCode(KProcessAddress addr, size_t num_pages, KMemoryState state,
356 KMemoryPermission perm) { 357 KMemoryPermission perm) {
357 const u64 size{num_pages * PageSize}; 358 const u64 size{num_pages * PageSize};
358 359
@@ -388,7 +389,8 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta
388 R_SUCCEED(); 389 R_SUCCEED();
389} 390}
390 391
391Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) { 392Result KPageTable::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
393 size_t size) {
392 // Validate the mapping request. 394 // Validate the mapping request.
393 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), 395 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
394 ResultInvalidMemoryRegion); 396 ResultInvalidMemoryRegion);
@@ -473,7 +475,8 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
473 R_SUCCEED(); 475 R_SUCCEED();
474} 476}
475 477
476Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, 478Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
479 size_t size,
477 ICacheInvalidationStrategy icache_invalidation_strategy) { 480 ICacheInvalidationStrategy icache_invalidation_strategy) {
478 // Validate the mapping request. 481 // Validate the mapping request.
479 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), 482 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
@@ -525,7 +528,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t
525 SCOPE_EXIT({ 528 SCOPE_EXIT({
526 if (reprotected_pages && any_code_pages) { 529 if (reprotected_pages && any_code_pages) {
527 if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) { 530 if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
528 m_system.InvalidateCpuInstructionCacheRange(dst_address, size); 531 m_system.InvalidateCpuInstructionCacheRange(GetInteger(dst_address), size);
529 } else { 532 } else {
530 m_system.InvalidateCpuInstructionCaches(); 533 m_system.InvalidateCpuInstructionCaches();
531 } 534 }
@@ -575,9 +578,10 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t
575 R_SUCCEED(); 578 R_SUCCEED();
576} 579}
577 580
578VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, 581KProcessAddress KPageTable::FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
579 size_t alignment, size_t offset, size_t guard_pages) { 582 size_t num_pages, size_t alignment, size_t offset,
580 VAddr address = 0; 583 size_t guard_pages) {
584 KProcessAddress address = 0;
581 585
582 if (num_pages <= region_num_pages) { 586 if (num_pages <= region_num_pages) {
583 if (this->IsAslrEnabled()) { 587 if (this->IsAslrEnabled()) {
@@ -593,7 +597,7 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size
593 return address; 597 return address;
594} 598}
595 599
596Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { 600Result KPageTable::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
597 ASSERT(this->IsLockedByCurrentThread()); 601 ASSERT(this->IsLockedByCurrentThread());
598 602
599 const size_t size = num_pages * PageSize; 603 const size_t size = num_pages * PageSize;
@@ -604,11 +608,11 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
604 // Begin traversal. 608 // Begin traversal.
605 Common::PageTable::TraversalContext context; 609 Common::PageTable::TraversalContext context;
606 Common::PageTable::TraversalEntry next_entry; 610 Common::PageTable::TraversalEntry next_entry;
607 R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, addr), 611 R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr)),
608 ResultInvalidCurrentMemory); 612 ResultInvalidCurrentMemory);
609 613
610 // Prepare tracking variables. 614 // Prepare tracking variables.
611 PAddr cur_addr = next_entry.phys_addr; 615 KPhysicalAddress cur_addr = next_entry.phys_addr;
612 size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1)); 616 size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
613 size_t tot_size = cur_size; 617 size_t tot_size = cur_size;
614 618
@@ -646,7 +650,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
646 R_SUCCEED(); 650 R_SUCCEED();
647} 651}
648 652
649bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) { 653bool KPageTable::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
650 ASSERT(this->IsLockedByCurrentThread()); 654 ASSERT(this->IsLockedByCurrentThread());
651 655
652 const size_t size = num_pages * PageSize; 656 const size_t size = num_pages * PageSize;
@@ -659,7 +663,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_p
659 663
660 // We're going to validate that the group we'd expect is the group we see. 664 // We're going to validate that the group we'd expect is the group we see.
661 auto cur_it = pg.begin(); 665 auto cur_it = pg.begin();
662 PAddr cur_block_address = cur_it->GetAddress(); 666 KPhysicalAddress cur_block_address = cur_it->GetAddress();
663 size_t cur_block_pages = cur_it->GetNumPages(); 667 size_t cur_block_pages = cur_it->GetNumPages();
664 668
665 auto UpdateCurrentIterator = [&]() { 669 auto UpdateCurrentIterator = [&]() {
@@ -677,12 +681,12 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_p
677 // Begin traversal. 681 // Begin traversal.
678 Common::PageTable::TraversalContext context; 682 Common::PageTable::TraversalContext context;
679 Common::PageTable::TraversalEntry next_entry; 683 Common::PageTable::TraversalEntry next_entry;
680 if (!m_page_table_impl->BeginTraversal(next_entry, context, addr)) { 684 if (!m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr))) {
681 return false; 685 return false;
682 } 686 }
683 687
684 // Prepare tracking variables. 688 // Prepare tracking variables.
685 PAddr cur_addr = next_entry.phys_addr; 689 KPhysicalAddress cur_addr = next_entry.phys_addr;
686 size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1)); 690 size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
687 size_t tot_size = cur_size; 691 size_t tot_size = cur_size;
688 692
@@ -734,8 +738,8 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_p
734 return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); 738 return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
735} 739}
736 740
737Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, 741Result KPageTable::UnmapProcessMemory(KProcessAddress dst_addr, size_t size,
738 VAddr src_addr) { 742 KPageTable& src_page_table, KProcessAddress src_addr) {
739 // Acquire the table locks. 743 // Acquire the table locks.
740 KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock); 744 KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock);
741 745
@@ -774,8 +778,8 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s
774} 778}
775 779
776Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, 780Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
777 VAddr address, size_t size, KMemoryPermission test_perm, 781 KProcessAddress address, size_t size,
778 KMemoryState dst_state) { 782 KMemoryPermission test_perm, KMemoryState dst_state) {
779 // Validate pre-conditions. 783 // Validate pre-conditions.
780 ASSERT(this->IsLockedByCurrentThread()); 784 ASSERT(this->IsLockedByCurrentThread());
781 ASSERT(test_perm == KMemoryPermission::UserReadWrite || 785 ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
@@ -790,10 +794,10 @@ Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_bloc
790 : KMemoryPermission::UserRead; 794 : KMemoryPermission::UserRead;
791 795
792 // Get aligned extents. 796 // Get aligned extents.
793 const VAddr aligned_src_start = Common::AlignDown((address), PageSize); 797 const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize);
794 const VAddr aligned_src_end = Common::AlignUp((address) + size, PageSize); 798 const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize);
795 const VAddr mapping_src_start = Common::AlignUp((address), PageSize); 799 const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize);
796 const VAddr mapping_src_end = Common::AlignDown((address) + size, PageSize); 800 const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize);
797 801
798 const auto aligned_src_last = (aligned_src_end)-1; 802 const auto aligned_src_last = (aligned_src_end)-1;
799 const auto mapping_src_last = (mapping_src_end)-1; 803 const auto mapping_src_last = (mapping_src_end)-1;
@@ -840,14 +844,15 @@ Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_bloc
840 test_attr_mask, KMemoryAttribute::None)); 844 test_attr_mask, KMemoryAttribute::None));
841 845
842 if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() && 846 if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() &&
843 info.GetAddress() < (mapping_src_end)) { 847 info.GetAddress() < GetInteger(mapping_src_end)) {
844 const auto cur_start = 848 const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start)
845 info.GetAddress() >= (mapping_src_start) ? info.GetAddress() : (mapping_src_start); 849 ? info.GetAddress()
850 : (mapping_src_start);
846 const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress() 851 const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress()
847 : (mapping_src_end); 852 : (mapping_src_end);
848 const size_t cur_size = cur_end - cur_start; 853 const size_t cur_size = cur_end - cur_start;
849 854
850 if (info.GetAddress() < (mapping_src_start)) { 855 if (info.GetAddress() < GetInteger(mapping_src_start)) {
851 ++blocks_needed; 856 ++blocks_needed;
852 } 857 }
853 if (mapping_src_last < info.GetLastAddress()) { 858 if (mapping_src_last < info.GetLastAddress()) {
@@ -882,30 +887,32 @@ Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_bloc
882 R_SUCCEED(); 887 R_SUCCEED();
883} 888}
884 889
885Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr, 890Result KPageTable::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
886 KMemoryPermission test_perm, KMemoryState dst_state, 891 KProcessAddress src_addr, KMemoryPermission test_perm,
887 KPageTable& src_page_table, bool send) { 892 KMemoryState dst_state, KPageTable& src_page_table,
893 bool send) {
888 ASSERT(this->IsLockedByCurrentThread()); 894 ASSERT(this->IsLockedByCurrentThread());
889 ASSERT(src_page_table.IsLockedByCurrentThread()); 895 ASSERT(src_page_table.IsLockedByCurrentThread());
890 896
891 // Check that we can theoretically map. 897 // Check that we can theoretically map.
892 const VAddr region_start = m_alias_region_start; 898 const KProcessAddress region_start = m_alias_region_start;
893 const size_t region_size = m_alias_region_end - m_alias_region_start; 899 const size_t region_size = m_alias_region_end - m_alias_region_start;
894 R_UNLESS(size < region_size, ResultOutOfAddressSpace); 900 R_UNLESS(size < region_size, ResultOutOfAddressSpace);
895 901
896 // Get aligned source extents. 902 // Get aligned source extents.
897 const VAddr src_start = src_addr; 903 const KProcessAddress src_start = src_addr;
898 const VAddr src_end = src_addr + size; 904 const KProcessAddress src_end = src_addr + size;
899 const VAddr aligned_src_start = Common::AlignDown((src_start), PageSize); 905 const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize);
900 const VAddr aligned_src_end = Common::AlignUp((src_start) + size, PageSize); 906 const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize);
901 const VAddr mapping_src_start = Common::AlignUp((src_start), PageSize); 907 const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize);
902 const VAddr mapping_src_end = Common::AlignDown((src_start) + size, PageSize); 908 const KProcessAddress mapping_src_end =
909 Common::AlignDown(GetInteger(src_start) + size, PageSize);
903 const size_t aligned_src_size = aligned_src_end - aligned_src_start; 910 const size_t aligned_src_size = aligned_src_end - aligned_src_start;
904 const size_t mapping_src_size = 911 const size_t mapping_src_size =
905 (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0; 912 (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
906 913
907 // Select a random address to map at. 914 // Select a random address to map at.
908 VAddr dst_addr = 915 KProcessAddress dst_addr =
909 this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize, 916 this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
910 PageSize, 0, this->GetNumGuardPages()); 917 PageSize, 0, this->GetNumGuardPages());
911 918
@@ -930,9 +937,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
930 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 937 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
931 938
932 // Ensure that we manage page references correctly. 939 // Ensure that we manage page references correctly.
933 PAddr start_partial_page = 0; 940 KPhysicalAddress start_partial_page = 0;
934 PAddr end_partial_page = 0; 941 KPhysicalAddress end_partial_page = 0;
935 VAddr cur_mapped_addr = dst_addr; 942 KProcessAddress cur_mapped_addr = dst_addr;
936 943
937 // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll 944 // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
938 // free on scope exit. 945 // free on scope exit.
@@ -977,11 +984,12 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
977 // Begin traversal. 984 // Begin traversal.
978 Common::PageTable::TraversalContext context; 985 Common::PageTable::TraversalContext context;
979 Common::PageTable::TraversalEntry next_entry; 986 Common::PageTable::TraversalEntry next_entry;
980 bool traverse_valid = src_impl.BeginTraversal(next_entry, context, aligned_src_start); 987 bool traverse_valid =
988 src_impl.BeginTraversal(next_entry, context, GetInteger(aligned_src_start));
981 ASSERT(traverse_valid); 989 ASSERT(traverse_valid);
982 990
983 // Prepare tracking variables. 991 // Prepare tracking variables.
984 PAddr cur_block_addr = next_entry.phys_addr; 992 KPhysicalAddress cur_block_addr = next_entry.phys_addr;
985 size_t cur_block_size = 993 size_t cur_block_size =
986 next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1)); 994 next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1));
987 size_t tot_block_size = cur_block_size; 995 size_t tot_block_size = cur_block_size;
@@ -989,7 +997,7 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
989 // Map the start page, if we have one. 997 // Map the start page, if we have one.
990 if (start_partial_page != 0) { 998 if (start_partial_page != 0) {
991 // Ensure the page holds correct data. 999 // Ensure the page holds correct data.
992 const VAddr start_partial_virt = 1000 const KVirtualAddress start_partial_virt =
993 GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page); 1001 GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page);
994 if (send) { 1002 if (send) {
995 const size_t partial_offset = src_start - aligned_src_start; 1003 const size_t partial_offset = src_start - aligned_src_start;
@@ -1002,21 +1010,23 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1002 clear_size = 0; 1010 clear_size = 0;
1003 } 1011 }
1004 1012
1005 std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, 1013 std::memset(m_system.Memory().GetPointer<void>(GetInteger(start_partial_virt)),
1006 partial_offset); 1014 fill_val, partial_offset);
1007 std::memcpy( 1015 std::memcpy(
1008 m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset), 1016 m_system.Memory().GetPointer<void>(GetInteger(start_partial_virt) + partial_offset),
1009 m_system.Memory().GetPointer<void>( 1017 m_system.Memory().GetPointer<void>(
1010 GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), cur_block_addr) + 1018 GetInteger(
1019 GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), cur_block_addr)) +
1011 partial_offset), 1020 partial_offset),
1012 copy_size); 1021 copy_size);
1013 if (clear_size > 0) { 1022 if (clear_size > 0) {
1014 std::memset(m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset + 1023 std::memset(m_system.Memory().GetPointer<void>(GetInteger(start_partial_virt) +
1015 copy_size), 1024 partial_offset + copy_size),
1016 fill_val, clear_size); 1025 fill_val, clear_size);
1017 } 1026 }
1018 } else { 1027 } else {
1019 std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, PageSize); 1028 std::memset(m_system.Memory().GetPointer<void>(GetInteger(start_partial_virt)),
1029 fill_val, PageSize);
1020 } 1030 }
1021 1031
1022 // Map the page. 1032 // Map the page.
@@ -1061,7 +1071,8 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1061 } 1071 }
1062 1072
1063 // Handle the last direct-mapped page. 1073 // Handle the last direct-mapped page.
1064 if (const VAddr mapped_block_end = aligned_src_start + tot_block_size - cur_block_size; 1074 if (const KProcessAddress mapped_block_end =
1075 aligned_src_start + tot_block_size - cur_block_size;
1065 mapped_block_end < mapping_src_end) { 1076 mapped_block_end < mapping_src_end) {
1066 const size_t last_block_size = mapping_src_end - mapped_block_end; 1077 const size_t last_block_size = mapping_src_end - mapped_block_end;
1067 1078
@@ -1084,18 +1095,20 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1084 // Map the end page, if we have one. 1095 // Map the end page, if we have one.
1085 if (end_partial_page != 0) { 1096 if (end_partial_page != 0) {
1086 // Ensure the page holds correct data. 1097 // Ensure the page holds correct data.
1087 const VAddr end_partial_virt = 1098 const KVirtualAddress end_partial_virt =
1088 GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page); 1099 GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page);
1089 if (send) { 1100 if (send) {
1090 const size_t copy_size = src_end - mapping_src_end; 1101 const size_t copy_size = src_end - mapping_src_end;
1091 std::memcpy(m_system.Memory().GetPointer<void>(end_partial_virt), 1102 std::memcpy(m_system.Memory().GetPointer<void>(GetInteger(end_partial_virt)),
1092 m_system.Memory().GetPointer<void>(GetHeapVirtualAddress( 1103 m_system.Memory().GetPointer<void>(GetInteger(GetHeapVirtualAddress(
1093 m_system.Kernel().MemoryLayout(), cur_block_addr)), 1104 m_system.Kernel().MemoryLayout(), cur_block_addr))),
1094 copy_size); 1105 copy_size);
1095 std::memset(m_system.Memory().GetPointer<void>(end_partial_virt + copy_size), fill_val, 1106 std::memset(
1096 PageSize - copy_size); 1107 m_system.Memory().GetPointer<void>(GetInteger(end_partial_virt) + copy_size),
1108 fill_val, PageSize - copy_size);
1097 } else { 1109 } else {
1098 std::memset(m_system.Memory().GetPointer<void>(end_partial_virt), fill_val, PageSize); 1110 std::memset(m_system.Memory().GetPointer<void>(GetInteger(end_partial_virt)), fill_val,
1111 PageSize);
1099 } 1112 }
1100 1113
1101 // Map the page. 1114 // Map the page.
@@ -1116,7 +1129,7 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1116 R_SUCCEED(); 1129 R_SUCCEED();
1117} 1130}
1118 1131
1119Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, 1132Result KPageTable::SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
1120 KPageTable& src_page_table, KMemoryPermission test_perm, 1133 KPageTable& src_page_table, KMemoryPermission test_perm,
1121 KMemoryState dst_state, bool send) { 1134 KMemoryState dst_state, bool send) {
1122 // For convenience, alias this. 1135 // For convenience, alias this.
@@ -1142,8 +1155,8 @@ Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr,
1142 R_TRY(allocator_result); 1155 R_TRY(allocator_result);
1143 1156
1144 // Get the mapped extents. 1157 // Get the mapped extents.
1145 const VAddr src_map_start = Common::AlignUp((src_addr), PageSize); 1158 const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize);
1146 const VAddr src_map_end = Common::AlignDown((src_addr) + size, PageSize); 1159 const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize);
1147 const size_t src_map_size = src_map_end - src_map_start; 1160 const size_t src_map_size = src_map_end - src_map_start;
1148 1161
1149 // Ensure that we clean up appropriately if we fail after this. 1162 // Ensure that we clean up appropriately if we fail after this.
@@ -1172,7 +1185,8 @@ Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr,
1172 R_SUCCEED(); 1185 R_SUCCEED();
1173} 1186}
1174 1187
1175Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state) { 1188Result KPageTable::CleanupForIpcServer(KProcessAddress address, size_t size,
1189 KMemoryState dst_state) {
1176 // Validate the address. 1190 // Validate the address.
1177 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); 1191 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1178 1192
@@ -1196,8 +1210,8 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState
1196 KScopedPageTableUpdater updater(this); 1210 KScopedPageTableUpdater updater(this);
1197 1211
1198 // Get aligned extents. 1212 // Get aligned extents.
1199 const VAddr aligned_start = Common::AlignDown((address), PageSize); 1213 const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize);
1200 const VAddr aligned_end = Common::AlignUp((address) + size, PageSize); 1214 const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize);
1201 const size_t aligned_size = aligned_end - aligned_start; 1215 const size_t aligned_size = aligned_end - aligned_start;
1202 const size_t aligned_num_pages = aligned_size / PageSize; 1216 const size_t aligned_num_pages = aligned_size / PageSize;
1203 1217
@@ -1211,22 +1225,23 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState
1211 KMemoryBlockDisableMergeAttribute::Normal); 1225 KMemoryBlockDisableMergeAttribute::Normal);
1212 1226
1213 // Release from the resource limit as relevant. 1227 // Release from the resource limit as relevant.
1214 const VAddr mapping_start = Common::AlignUp((address), PageSize); 1228 const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
1215 const VAddr mapping_end = Common::AlignDown((address) + size, PageSize); 1229 const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
1216 const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0; 1230 const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
1217 m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, aligned_size - mapping_size); 1231 m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, aligned_size - mapping_size);
1218 1232
1219 R_SUCCEED(); 1233 R_SUCCEED();
1220} 1234}
1221 1235
1222Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state) { 1236Result KPageTable::CleanupForIpcClient(KProcessAddress address, size_t size,
1237 KMemoryState dst_state) {
1223 // Validate the address. 1238 // Validate the address.
1224 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); 1239 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1225 1240
1226 // Get aligned source extents. 1241 // Get aligned source extents.
1227 const VAddr mapping_start = Common::AlignUp((address), PageSize); 1242 const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
1228 const VAddr mapping_end = Common::AlignDown((address) + size, PageSize); 1243 const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
1229 const VAddr mapping_last = mapping_end - 1; 1244 const KProcessAddress mapping_last = mapping_end - 1;
1230 const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0; 1245 const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
1231 1246
1232 // If nothing was mapped, we're actually done immediately. 1247 // If nothing was mapped, we're actually done immediately.
@@ -1279,7 +1294,7 @@ Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState
1279 KMemoryInfo cur_info = start_it->GetMemoryInfo(); 1294 KMemoryInfo cur_info = start_it->GetMemoryInfo();
1280 1295
1281 // Create tracking variables. 1296 // Create tracking variables.
1282 VAddr cur_address = cur_info.GetAddress(); 1297 KProcessAddress cur_address = cur_info.GetAddress();
1283 size_t cur_size = cur_info.GetSize(); 1298 size_t cur_size = cur_info.GetSize();
1284 bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); 1299 bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
1285 bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; 1300 bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
@@ -1352,7 +1367,7 @@ Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState
1352 .IsSuccess()); 1367 .IsSuccess());
1353 1368
1354 // Create tracking variables. 1369 // Create tracking variables.
1355 VAddr cur_address = cur_info.GetAddress(); 1370 KProcessAddress cur_address = cur_info.GetAddress();
1356 size_t cur_size = cur_info.GetSize(); 1371 size_t cur_size = cur_info.GetSize();
1357 bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); 1372 bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
1358 bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; 1373 bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
@@ -1439,16 +1454,16 @@ Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState
1439} 1454}
1440 1455
1441void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list, 1456void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list,
1442 VAddr address, size_t size, 1457 KProcessAddress address, size_t size,
1443 KMemoryPermission prot_perm) { 1458 KMemoryPermission prot_perm) {
1444 ASSERT(this->IsLockedByCurrentThread()); 1459 ASSERT(this->IsLockedByCurrentThread());
1445 ASSERT(Common::IsAligned(address, PageSize)); 1460 ASSERT(Common::IsAligned(GetInteger(address), PageSize));
1446 ASSERT(Common::IsAligned(size, PageSize)); 1461 ASSERT(Common::IsAligned(size, PageSize));
1447 1462
1448 // Get the mapped extents. 1463 // Get the mapped extents.
1449 const VAddr src_map_start = address; 1464 const KProcessAddress src_map_start = address;
1450 const VAddr src_map_end = address + size; 1465 const KProcessAddress src_map_end = address + size;
1451 const VAddr src_map_last = src_map_end - 1; 1466 const KProcessAddress src_map_last = src_map_end - 1;
1452 1467
1453 // This function is only invoked when there's something to do. 1468 // This function is only invoked when there's something to do.
1454 ASSERT(src_map_end > src_map_start); 1469 ASSERT(src_map_end > src_map_start);
@@ -1458,8 +1473,9 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi
1458 while (true) { 1473 while (true) {
1459 const KMemoryInfo info = it->GetMemoryInfo(); 1474 const KMemoryInfo info = it->GetMemoryInfo();
1460 1475
1461 const auto cur_start = 1476 const auto cur_start = info.GetAddress() >= GetInteger(src_map_start)
1462 info.GetAddress() >= src_map_start ? info.GetAddress() : src_map_start; 1477 ? info.GetAddress()
1478 : GetInteger(src_map_start);
1463 const auto cur_end = 1479 const auto cur_end =
1464 src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress(); 1480 src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
1465 1481
@@ -1469,7 +1485,7 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi
1469 (info.GetIpcLockCount() != 0 && 1485 (info.GetIpcLockCount() != 0 &&
1470 (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) { 1486 (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
1471 // Check if we actually need to fix the protections on the block. 1487 // Check if we actually need to fix the protections on the block.
1472 if (cur_end == src_map_end || info.GetAddress() <= src_map_start || 1488 if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) ||
1473 (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) { 1489 (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
1474 ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(), 1490 ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(),
1475 OperationType::ChangePermissions) 1491 OperationType::ChangePermissions)
@@ -1488,15 +1504,15 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi
1488 } 1504 }
1489} 1505}
1490 1506
1491Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { 1507Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
1492 // Lock the physical memory lock. 1508 // Lock the physical memory lock.
1493 KScopedLightLock phys_lk(m_map_physical_memory_lock); 1509 KScopedLightLock phys_lk(m_map_physical_memory_lock);
1494 1510
1495 // Calculate the last address for convenience. 1511 // Calculate the last address for convenience.
1496 const VAddr last_address = address + size - 1; 1512 const KProcessAddress last_address = address + size - 1;
1497 1513
1498 // Define iteration variables. 1514 // Define iteration variables.
1499 VAddr cur_address; 1515 KProcessAddress cur_address;
1500 size_t mapped_size; 1516 size_t mapped_size;
1501 1517
1502 // The entire mapping process can be retried. 1518 // The entire mapping process can be retried.
@@ -1528,7 +1544,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1528 1544
1529 // Track the memory if it's mapped. 1545 // Track the memory if it's mapped.
1530 if (info.GetState() != KMemoryState::Free) { 1546 if (info.GetState() != KMemoryState::Free) {
1531 mapped_size += VAddr(info.GetEndAddress()) - cur_address; 1547 mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
1532 } 1548 }
1533 1549
1534 // Advance. 1550 // Advance.
@@ -1581,7 +1597,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1581 1597
1582 const bool is_free = info.GetState() == KMemoryState::Free; 1598 const bool is_free = info.GetState() == KMemoryState::Free;
1583 if (is_free) { 1599 if (is_free) {
1584 if (info.GetAddress() < address) { 1600 if (info.GetAddress() < GetInteger(address)) {
1585 ++num_allocator_blocks; 1601 ++num_allocator_blocks;
1586 } 1602 }
1587 if (last_address < info.GetLastAddress()) { 1603 if (last_address < info.GetLastAddress()) {
@@ -1599,7 +1615,8 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1599 1615
1600 // Track the memory if it's mapped. 1616 // Track the memory if it's mapped.
1601 if (!is_free) { 1617 if (!is_free) {
1602 checked_mapped_size += VAddr(info.GetEndAddress()) - cur_address; 1618 checked_mapped_size +=
1619 KProcessAddress(info.GetEndAddress()) - cur_address;
1603 } 1620 }
1604 1621
1605 // Advance. 1622 // Advance.
@@ -1627,7 +1644,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1627 1644
1628 // Prepare to iterate over the memory. 1645 // Prepare to iterate over the memory.
1629 auto pg_it = pg.begin(); 1646 auto pg_it = pg.begin();
1630 PAddr pg_phys_addr = pg_it->GetAddress(); 1647 KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
1631 size_t pg_pages = pg_it->GetNumPages(); 1648 size_t pg_pages = pg_it->GetNumPages();
1632 1649
1633 // Reset the current tracking address, and make sure we clean up on failure. 1650 // Reset the current tracking address, and make sure we clean up on failure.
@@ -1635,7 +1652,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1635 cur_address = address; 1652 cur_address = address;
1636 ON_RESULT_FAILURE { 1653 ON_RESULT_FAILURE {
1637 if (cur_address > address) { 1654 if (cur_address > address) {
1638 const VAddr last_unmap_address = cur_address - 1; 1655 const KProcessAddress last_unmap_address = cur_address - 1;
1639 1656
1640 // Iterate, unmapping the pages. 1657 // Iterate, unmapping the pages.
1641 cur_address = address; 1658 cur_address = address;
@@ -1652,7 +1669,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1652 if (info.GetState() == KMemoryState::Free) { 1669 if (info.GetState() == KMemoryState::Free) {
1653 // Determine the range to unmap. 1670 // Determine the range to unmap.
1654 const size_t cur_pages = 1671 const size_t cur_pages =
1655 std::min(VAddr(info.GetEndAddress()) - cur_address, 1672 std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
1656 last_unmap_address + 1 - cur_address) / 1673 last_unmap_address + 1 - cur_address) /
1657 PageSize; 1674 PageSize;
1658 1675
@@ -1695,9 +1712,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1695 // If it's unmapped, we need to map it. 1712 // If it's unmapped, we need to map it.
1696 if (info.GetState() == KMemoryState::Free) { 1713 if (info.GetState() == KMemoryState::Free) {
1697 // Determine the range to map. 1714 // Determine the range to map.
1698 size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, 1715 size_t map_pages =
1699 last_address + 1 - cur_address) / 1716 std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
1700 PageSize; 1717 last_address + 1 - cur_address) /
1718 PageSize;
1701 1719
1702 // While we have pages to map, map them. 1720 // While we have pages to map, map them.
1703 while (map_pages > 0) { 1721 while (map_pages > 0) {
@@ -1754,7 +1772,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1754 } 1772 }
1755} 1773}
1756 1774
1757Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { 1775Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
1758 // Lock the physical memory lock. 1776 // Lock the physical memory lock.
1759 KScopedLightLock phys_lk(m_map_physical_memory_lock); 1777 KScopedLightLock phys_lk(m_map_physical_memory_lock);
1760 1778
@@ -1762,13 +1780,13 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
1762 KScopedLightLock lk(m_general_lock); 1780 KScopedLightLock lk(m_general_lock);
1763 1781
1764 // Calculate the last address for convenience. 1782 // Calculate the last address for convenience.
1765 const VAddr last_address = address + size - 1; 1783 const KProcessAddress last_address = address + size - 1;
1766 1784
1767 // Define iteration variables. 1785 // Define iteration variables.
1768 VAddr map_start_address = 0; 1786 KProcessAddress map_start_address = 0;
1769 VAddr map_last_address = 0; 1787 KProcessAddress map_last_address = 0;
1770 1788
1771 VAddr cur_address; 1789 KProcessAddress cur_address;
1772 size_t mapped_size; 1790 size_t mapped_size;
1773 size_t num_allocator_blocks = 0; 1791 size_t num_allocator_blocks = 0;
1774 1792
@@ -1801,7 +1819,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
1801 map_last_address = 1819 map_last_address =
1802 (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address; 1820 (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
1803 1821
1804 if (info.GetAddress() < address) { 1822 if (info.GetAddress() < GetInteger(address)) {
1805 ++num_allocator_blocks; 1823 ++num_allocator_blocks;
1806 } 1824 }
1807 if (last_address < info.GetLastAddress()) { 1825 if (last_address < info.GetLastAddress()) {
@@ -1854,7 +1872,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
1854 // If the memory state is normal, we need to unmap it. 1872 // If the memory state is normal, we need to unmap it.
1855 if (info.GetState() == KMemoryState::Normal) { 1873 if (info.GetState() == KMemoryState::Normal) {
1856 // Determine the range to unmap. 1874 // Determine the range to unmap.
1857 const size_t cur_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, 1875 const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
1858 last_address + 1 - cur_address) / 1876 last_address + 1 - cur_address) /
1859 PageSize; 1877 PageSize;
1860 1878
@@ -2144,13 +2162,14 @@ void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress addre
2144 const KMemoryInfo info = it->GetMemoryInfo(); 2162 const KMemoryInfo info = it->GetMemoryInfo();
2145 2163
2146 // Determine the range to map. 2164 // Determine the range to map.
2147 KProcessAddress map_address = std::max<VAddr>(info.GetAddress(), start_address); 2165 KProcessAddress map_address = std::max<KProcessAddress>(info.GetAddress(), start_address);
2148 const KProcessAddress map_end_address = std::min<VAddr>(info.GetEndAddress(), end_address); 2166 const KProcessAddress map_end_address =
2167 std::min<KProcessAddress>(info.GetEndAddress(), end_address);
2149 ASSERT(map_end_address != map_address); 2168 ASSERT(map_end_address != map_address);
2150 2169
2151 // Determine if we should disable head merge. 2170 // Determine if we should disable head merge.
2152 const bool disable_head_merge = 2171 const bool disable_head_merge =
2153 info.GetAddress() >= start_address && 2172 info.GetAddress() >= GetInteger(start_address) &&
2154 True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal); 2173 True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
2155 const KPageProperties map_properties = { 2174 const KPageProperties map_properties = {
2156 info.GetPermission(), false, false, 2175 info.GetPermission(), false, false,
@@ -2214,7 +2233,7 @@ Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t
2214 KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 2233 KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
2215 0, this->GetNumGuardPages()); 2234 0, this->GetNumGuardPages());
2216 R_UNLESS(addr != 0, ResultOutOfMemory); 2235 R_UNLESS(addr != 0, ResultOutOfMemory);
2217 ASSERT(Common::IsAligned(addr, alignment)); 2236 ASSERT(Common::IsAligned(GetInteger(addr), alignment));
2218 ASSERT(this->CanContain(addr, num_pages * PageSize, state)); 2237 ASSERT(this->CanContain(addr, num_pages * PageSize, state));
2219 ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, 2238 ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
2220 KMemoryPermission::None, KMemoryPermission::None, 2239 KMemoryPermission::None, KMemoryPermission::None,
@@ -2455,7 +2474,7 @@ Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
2455 R_SUCCEED(); 2474 R_SUCCEED();
2456} 2475}
2457 2476
2458Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, 2477Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
2459 KMemoryState state_mask, KMemoryState state, 2478 KMemoryState state_mask, KMemoryState state,
2460 KMemoryPermission perm_mask, KMemoryPermission perm, 2479 KMemoryPermission perm_mask, KMemoryPermission perm,
2461 KMemoryAttribute attr_mask, KMemoryAttribute attr) { 2480 KMemoryAttribute attr_mask, KMemoryAttribute attr) {
@@ -2480,7 +2499,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n
2480 R_SUCCEED(); 2499 R_SUCCEED();
2481} 2500}
2482 2501
2483Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size, 2502Result KPageTable::SetProcessMemoryPermission(KProcessAddress addr, size_t size,
2484 Svc::MemoryPermission svc_perm) { 2503 Svc::MemoryPermission svc_perm) {
2485 const size_t num_pages = size / PageSize; 2504 const size_t num_pages = size / PageSize;
2486 2505
@@ -2541,23 +2560,23 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size,
2541 2560
2542 // Ensure cache coherency, if we're setting pages as executable. 2561 // Ensure cache coherency, if we're setting pages as executable.
2543 if (is_x) { 2562 if (is_x) {
2544 m_system.InvalidateCpuInstructionCacheRange(addr, size); 2563 m_system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size);
2545 } 2564 }
2546 2565
2547 R_SUCCEED(); 2566 R_SUCCEED();
2548} 2567}
2549 2568
2550KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { 2569KMemoryInfo KPageTable::QueryInfoImpl(KProcessAddress addr) {
2551 KScopedLightLock lk(m_general_lock); 2570 KScopedLightLock lk(m_general_lock);
2552 2571
2553 return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo(); 2572 return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo();
2554} 2573}
2555 2574
2556KMemoryInfo KPageTable::QueryInfo(VAddr addr) { 2575KMemoryInfo KPageTable::QueryInfo(KProcessAddress addr) {
2557 if (!Contains(addr, 1)) { 2576 if (!Contains(addr, 1)) {
2558 return { 2577 return {
2559 .m_address = m_address_space_end, 2578 .m_address = GetInteger(m_address_space_end),
2560 .m_size = 0 - m_address_space_end, 2579 .m_size = 0 - GetInteger(m_address_space_end),
2561 .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible), 2580 .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
2562 .m_device_disable_merge_left_count = 0, 2581 .m_device_disable_merge_left_count = 0,
2563 .m_device_disable_merge_right_count = 0, 2582 .m_device_disable_merge_right_count = 0,
@@ -2574,7 +2593,8 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
2574 return QueryInfoImpl(addr); 2593 return QueryInfoImpl(addr);
2575} 2594}
2576 2595
2577Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) { 2596Result KPageTable::SetMemoryPermission(KProcessAddress addr, size_t size,
2597 Svc::MemoryPermission svc_perm) {
2578 const size_t num_pages = size / PageSize; 2598 const size_t num_pages = size / PageSize;
2579 2599
2580 // Lock the table. 2600 // Lock the table.
@@ -2611,7 +2631,7 @@ Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermi
2611 R_SUCCEED(); 2631 R_SUCCEED();
2612} 2632}
2613 2633
2614Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) { 2634Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
2615 const size_t num_pages = size / PageSize; 2635 const size_t num_pages = size / PageSize;
2616 ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) == 2636 ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
2617 KMemoryAttribute::SetMask); 2637 KMemoryAttribute::SetMask);
@@ -2666,12 +2686,12 @@ Result KPageTable::SetMaxHeapSize(size_t size) {
2666 R_SUCCEED(); 2686 R_SUCCEED();
2667} 2687}
2668 2688
2669Result KPageTable::SetHeapSize(VAddr* out, size_t size) { 2689Result KPageTable::SetHeapSize(u64* out, size_t size) {
2670 // Lock the physical memory mutex. 2690 // Lock the physical memory mutex.
2671 KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); 2691 KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
2672 2692
2673 // Try to perform a reduction in heap, instead of an extension. 2693 // Try to perform a reduction in heap, instead of an extension.
2674 VAddr cur_address{}; 2694 KProcessAddress cur_address{};
2675 size_t allocation_size{}; 2695 size_t allocation_size{};
2676 { 2696 {
2677 // Lock the table. 2697 // Lock the table.
@@ -2722,11 +2742,11 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
2722 m_current_heap_end = m_heap_region_start + size; 2742 m_current_heap_end = m_heap_region_start + size;
2723 2743
2724 // Set the output. 2744 // Set the output.
2725 *out = m_heap_region_start; 2745 *out = GetInteger(m_heap_region_start);
2726 R_SUCCEED(); 2746 R_SUCCEED();
2727 } else if (size == GetHeapSize()) { 2747 } else if (size == GetHeapSize()) {
2728 // The size requested is exactly the current size. 2748 // The size requested is exactly the current size.
2729 *out = m_heap_region_start; 2749 *out = GetInteger(m_heap_region_start);
2730 R_SUCCEED(); 2750 R_SUCCEED();
2731 } else { 2751 } else {
2732 // We have to allocate memory. Determine how much to allocate and where while the table 2752 // We have to allocate memory. Determine how much to allocate and where while the table
@@ -2799,14 +2819,14 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
2799 m_current_heap_end = m_heap_region_start + size; 2819 m_current_heap_end = m_heap_region_start + size;
2800 2820
2801 // Set the output. 2821 // Set the output.
2802 *out = m_heap_region_start; 2822 *out = GetInteger(m_heap_region_start);
2803 R_SUCCEED(); 2823 R_SUCCEED();
2804 } 2824 }
2805} 2825}
2806 2826
2807Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, 2827Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address,
2808 KMemoryPermission perm, bool is_aligned, 2828 size_t size, KMemoryPermission perm,
2809 bool check_heap) { 2829 bool is_aligned, bool check_heap) {
2810 // Lightly validate the range before doing anything else. 2830 // Lightly validate the range before doing anything else.
2811 const size_t num_pages = size / PageSize; 2831 const size_t num_pages = size / PageSize;
2812 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); 2832 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -2842,7 +2862,8 @@ Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address,
2842 R_SUCCEED(); 2862 R_SUCCEED();
2843} 2863}
2844 2864
2845Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap) { 2865Result KPageTable::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size,
2866 bool check_heap) {
2846 // Lightly validate the range before doing anything else. 2867 // Lightly validate the range before doing anything else.
2847 const size_t num_pages = size / PageSize; 2868 const size_t num_pages = size / PageSize;
2848 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); 2869 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -2876,7 +2897,7 @@ Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bo
2876 R_SUCCEED(); 2897 R_SUCCEED();
2877} 2898}
2878 2899
2879Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) { 2900Result KPageTable::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
2880 // Lightly validate the range before doing anything else. 2901 // Lightly validate the range before doing anything else.
2881 const size_t num_pages = size / PageSize; 2902 const size_t num_pages = size / PageSize;
2882 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); 2903 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -2904,7 +2925,8 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {
2904 R_SUCCEED(); 2925 R_SUCCEED();
2905} 2926}
2906 2927
2907Result KPageTable::LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size) { 2928Result KPageTable::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address,
2929 size_t size) {
2908 R_RETURN(this->LockMemoryAndOpen( 2930 R_RETURN(this->LockMemoryAndOpen(
2909 nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer, 2931 nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
2910 KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All, 2932 KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
@@ -2913,7 +2935,7 @@ Result KPageTable::LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size)
2913 KMemoryAttribute::Locked)); 2935 KMemoryAttribute::Locked));
2914} 2936}
2915 2937
2916Result KPageTable::UnlockForIpcUserBuffer(VAddr address, size_t size) { 2938Result KPageTable::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
2917 R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer, 2939 R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
2918 KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None, 2940 KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
2919 KMemoryPermission::None, KMemoryAttribute::All, 2941 KMemoryPermission::None, KMemoryAttribute::All,
@@ -2921,7 +2943,7 @@ Result KPageTable::UnlockForIpcUserBuffer(VAddr address, size_t size) {
2921 KMemoryAttribute::Locked, nullptr)); 2943 KMemoryAttribute::Locked, nullptr));
2922} 2944}
2923 2945
2924Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { 2946Result KPageTable::LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size) {
2925 R_RETURN(this->LockMemoryAndOpen( 2947 R_RETURN(this->LockMemoryAndOpen(
2926 out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, 2948 out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
2927 KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, 2949 KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
@@ -2929,17 +2951,17 @@ Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) {
2929 KMemoryAttribute::Locked)); 2951 KMemoryAttribute::Locked));
2930} 2952}
2931 2953
2932Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) { 2954Result KPageTable::UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg) {
2933 R_RETURN(this->UnlockMemory( 2955 R_RETURN(this->UnlockMemory(
2934 addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, 2956 addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
2935 KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, 2957 KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
2936 KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg)); 2958 KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg));
2937} 2959}
2938 2960
2939bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { 2961bool KPageTable::IsRegionContiguous(KProcessAddress addr, u64 size) const {
2940 auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(addr); 2962 auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr));
2941 for (u64 offset{}; offset < size; offset += PageSize) { 2963 for (u64 offset{}; offset < size; offset += PageSize) {
2942 if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(addr + offset)) { 2964 if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr) + offset)) {
2943 return false; 2965 return false;
2944 } 2966 }
2945 start_ptr += PageSize; 2967 start_ptr += PageSize;
@@ -2947,18 +2969,19 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
2947 return true; 2969 return true;
2948} 2970}
2949 2971
2950void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list) { 2972void KPageTable::AddRegionToPages(KProcessAddress start, size_t num_pages,
2951 VAddr addr{start}; 2973 KPageGroup& page_linked_list) {
2974 KProcessAddress addr{start};
2952 while (addr < start + (num_pages * PageSize)) { 2975 while (addr < start + (num_pages * PageSize)) {
2953 const PAddr paddr{GetPhysicalAddr(addr)}; 2976 const KPhysicalAddress paddr{GetPhysicalAddr(addr)};
2954 ASSERT(paddr != 0); 2977 ASSERT(paddr != 0);
2955 page_linked_list.AddBlock(paddr, 1); 2978 page_linked_list.AddBlock(paddr, 1);
2956 addr += PageSize; 2979 addr += PageSize;
2957 } 2980 }
2958} 2981}
2959 2982
2960VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, 2983KProcessAddress KPageTable::AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
2961 size_t align) { 2984 u64 needed_num_pages, size_t align) {
2962 if (m_enable_aslr) { 2985 if (m_enable_aslr) {
2963 UNIMPLEMENTED(); 2986 UNIMPLEMENTED();
2964 } 2987 }
@@ -2966,11 +2989,11 @@ VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u6
2966 IsKernel() ? 1 : 4); 2989 IsKernel() ? 1 : 4);
2967} 2990}
2968 2991
2969Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, 2992Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
2970 OperationType operation) { 2993 OperationType operation) {
2971 ASSERT(this->IsLockedByCurrentThread()); 2994 ASSERT(this->IsLockedByCurrentThread());
2972 2995
2973 ASSERT(Common::IsAligned(addr, PageSize)); 2996 ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
2974 ASSERT(num_pages > 0); 2997 ASSERT(num_pages > 0);
2975 ASSERT(num_pages == page_group.GetNumPages()); 2998 ASSERT(num_pages == page_group.GetNumPages());
2976 2999
@@ -3001,12 +3024,12 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_
3001 R_SUCCEED(); 3024 R_SUCCEED();
3002} 3025}
3003 3026
3004Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, 3027Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
3005 OperationType operation, PAddr map_addr) { 3028 OperationType operation, KPhysicalAddress map_addr) {
3006 ASSERT(this->IsLockedByCurrentThread()); 3029 ASSERT(this->IsLockedByCurrentThread());
3007 3030
3008 ASSERT(num_pages > 0); 3031 ASSERT(num_pages > 0);
3009 ASSERT(Common::IsAligned(addr, PageSize)); 3032 ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
3010 ASSERT(ContainsPages(addr, num_pages)); 3033 ASSERT(ContainsPages(addr, num_pages));
3011 3034
3012 switch (operation) { 3035 switch (operation) {
@@ -3022,7 +3045,7 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
3022 case OperationType::MapFirst: 3045 case OperationType::MapFirst:
3023 case OperationType::Map: { 3046 case OperationType::Map: {
3024 ASSERT(map_addr); 3047 ASSERT(map_addr);
3025 ASSERT(Common::IsAligned(map_addr, PageSize)); 3048 ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize));
3026 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); 3049 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
3027 3050
3028 // Open references to pages, if we should. 3051 // Open references to pages, if we should.
@@ -3060,7 +3083,7 @@ void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
3060 } 3083 }
3061} 3084}
3062 3085
3063VAddr KPageTable::GetRegionAddress(KMemoryState state) const { 3086KProcessAddress KPageTable::GetRegionAddress(KMemoryState state) const {
3064 switch (state) { 3087 switch (state) {
3065 case KMemoryState::Free: 3088 case KMemoryState::Free:
3066 case KMemoryState::Kernel: 3089 case KMemoryState::Kernel:
@@ -3132,11 +3155,11 @@ size_t KPageTable::GetRegionSize(KMemoryState state) const {
3132 } 3155 }
3133} 3156}
3134 3157
3135bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const { 3158bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
3136 const VAddr end = addr + size; 3159 const KProcessAddress end = addr + size;
3137 const VAddr last = end - 1; 3160 const KProcessAddress last = end - 1;
3138 3161
3139 const VAddr region_start = this->GetRegionAddress(state); 3162 const KProcessAddress region_start = this->GetRegionAddress(state);
3140 const size_t region_size = this->GetRegionSize(state); 3163 const size_t region_size = this->GetRegionSize(state);
3141 3164
3142 const bool is_in_region = 3165 const bool is_in_region =
@@ -3191,21 +3214,21 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_
3191 R_SUCCEED(); 3214 R_SUCCEED();
3192} 3215}
3193 3216
3194Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, 3217Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr,
3195 KMemoryState state_mask, KMemoryState state, 3218 size_t size, KMemoryState state_mask,
3196 KMemoryPermission perm_mask, KMemoryPermission perm, 3219 KMemoryState state, KMemoryPermission perm_mask,
3197 KMemoryAttribute attr_mask, 3220 KMemoryPermission perm, KMemoryAttribute attr_mask,
3198 KMemoryAttribute attr) const { 3221 KMemoryAttribute attr) const {
3199 ASSERT(this->IsLockedByCurrentThread()); 3222 ASSERT(this->IsLockedByCurrentThread());
3200 3223
3201 // Get information about the first block. 3224 // Get information about the first block.
3202 const VAddr last_addr = addr + size - 1; 3225 const KProcessAddress last_addr = addr + size - 1;
3203 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); 3226 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
3204 KMemoryInfo info = it->GetMemoryInfo(); 3227 KMemoryInfo info = it->GetMemoryInfo();
3205 3228
3206 // If the start address isn't aligned, we need a block. 3229 // If the start address isn't aligned, we need a block.
3207 const size_t blocks_for_start_align = 3230 const size_t blocks_for_start_align =
3208 (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0; 3231 (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
3209 3232
3210 while (true) { 3233 while (true) {
3211 // Validate against the provided masks. 3234 // Validate against the provided masks.
@@ -3224,7 +3247,7 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr a
3224 3247
3225 // If the end address isn't aligned, we need a block. 3248 // If the end address isn't aligned, we need a block.
3226 const size_t blocks_for_end_align = 3249 const size_t blocks_for_end_align =
3227 (Common::AlignUp(addr + size, PageSize) != info.GetEndAddress()) ? 1 : 0; 3250 (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
3228 3251
3229 if (out_blocks_needed != nullptr) { 3252 if (out_blocks_needed != nullptr) {
3230 *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; 3253 *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
@@ -3235,20 +3258,20 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr a
3235 3258
3236Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, 3259Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
3237 KMemoryAttribute* out_attr, size_t* out_blocks_needed, 3260 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
3238 VAddr addr, size_t size, KMemoryState state_mask, 3261 KProcessAddress addr, size_t size, KMemoryState state_mask,
3239 KMemoryState state, KMemoryPermission perm_mask, 3262 KMemoryState state, KMemoryPermission perm_mask,
3240 KMemoryPermission perm, KMemoryAttribute attr_mask, 3263 KMemoryPermission perm, KMemoryAttribute attr_mask,
3241 KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { 3264 KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
3242 ASSERT(this->IsLockedByCurrentThread()); 3265 ASSERT(this->IsLockedByCurrentThread());
3243 3266
3244 // Get information about the first block. 3267 // Get information about the first block.
3245 const VAddr last_addr = addr + size - 1; 3268 const KProcessAddress last_addr = addr + size - 1;
3246 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); 3269 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
3247 KMemoryInfo info = it->GetMemoryInfo(); 3270 KMemoryInfo info = it->GetMemoryInfo();
3248 3271
3249 // If the start address isn't aligned, we need a block. 3272 // If the start address isn't aligned, we need a block.
3250 const size_t blocks_for_start_align = 3273 const size_t blocks_for_start_align =
3251 (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0; 3274 (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
3252 3275
3253 // Validate all blocks in the range have correct state. 3276 // Validate all blocks in the range have correct state.
3254 const KMemoryState first_state = info.m_state; 3277 const KMemoryState first_state = info.m_state;
@@ -3277,7 +3300,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
3277 3300
3278 // If the end address isn't aligned, we need a block. 3301 // If the end address isn't aligned, we need a block.
3279 const size_t blocks_for_end_align = 3302 const size_t blocks_for_end_align =
3280 (Common::AlignUp(addr + size, PageSize) != info.GetEndAddress()) ? 1 : 0; 3303 (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
3281 3304
3282 // Write output state. 3305 // Write output state.
3283 if (out_state != nullptr) { 3306 if (out_state != nullptr) {
@@ -3295,11 +3318,12 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
3295 R_SUCCEED(); 3318 R_SUCCEED();
3296} 3319}
3297 3320
3298Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, 3321Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
3299 KMemoryState state_mask, KMemoryState state, 3322 KProcessAddress addr, size_t size, KMemoryState state_mask,
3300 KMemoryPermission perm_mask, KMemoryPermission perm, 3323 KMemoryState state, KMemoryPermission perm_mask,
3301 KMemoryAttribute attr_mask, KMemoryAttribute attr, 3324 KMemoryPermission perm, KMemoryAttribute attr_mask,
3302 KMemoryPermission new_perm, KMemoryAttribute lock_attr) { 3325 KMemoryAttribute attr, KMemoryPermission new_perm,
3326 KMemoryAttribute lock_attr) {
3303 // Validate basic preconditions. 3327 // Validate basic preconditions.
3304 ASSERT((lock_attr & attr) == KMemoryAttribute::None); 3328 ASSERT((lock_attr & attr) == KMemoryAttribute::None);
3305 ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == 3329 ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
@@ -3329,8 +3353,8 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
3329 attr_mask, attr)); 3353 attr_mask, attr));
3330 3354
3331 // Get the physical address, if we're supposed to. 3355 // Get the physical address, if we're supposed to.
3332 if (out_paddr != nullptr) { 3356 if (out_KPhysicalAddress != nullptr) {
3333 ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr)); 3357 ASSERT(this->GetPhysicalAddressLocked(out_KPhysicalAddress, addr));
3334 } 3358 }
3335 3359
3336 // Make the page group, if we're supposed to. 3360 // Make the page group, if we're supposed to.
@@ -3361,7 +3385,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
3361 R_SUCCEED(); 3385 R_SUCCEED();
3362} 3386}
3363 3387
3364Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, 3388Result KPageTable::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
3365 KMemoryState state, KMemoryPermission perm_mask, 3389 KMemoryState state, KMemoryPermission perm_mask,
3366 KMemoryPermission perm, KMemoryAttribute attr_mask, 3390 KMemoryPermission perm, KMemoryAttribute attr_mask,
3367 KMemoryAttribute attr, KMemoryPermission new_perm, 3391 KMemoryAttribute attr, KMemoryPermission new_perm,
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 5c5356338..1917b2a98 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -6,7 +6,6 @@
6#include <memory> 6#include <memory>
7 7
8#include "common/common_funcs.h" 8#include "common/common_funcs.h"
9#include "common/common_types.h"
10#include "common/page_table.h" 9#include "common/page_table.h"
11#include "core/file_sys/program_metadata.h" 10#include "core/file_sys/program_metadata.h"
12#include "core/hle/kernel/k_dynamic_resource_manager.h" 11#include "core/hle/kernel/k_dynamic_resource_manager.h"
@@ -15,6 +14,7 @@
15#include "core/hle/kernel/k_memory_block_manager.h" 14#include "core/hle/kernel/k_memory_block_manager.h"
16#include "core/hle/kernel/k_memory_layout.h" 15#include "core/hle/kernel/k_memory_layout.h"
17#include "core/hle/kernel/k_memory_manager.h" 16#include "core/hle/kernel/k_memory_manager.h"
17#include "core/hle/kernel/k_typed_address.h"
18#include "core/hle/result.h" 18#include "core/hle/result.h"
19#include "core/memory.h" 19#include "core/memory.h"
20 20
@@ -65,45 +65,47 @@ public:
65 65
66 Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, 66 Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
67 bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, 67 bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
68 VAddr code_addr, size_t code_size, KSystemResource* system_resource, 68 KProcessAddress code_addr, size_t code_size,
69 KResourceLimit* resource_limit); 69 KSystemResource* system_resource, KResourceLimit* resource_limit);
70 70
71 void Finalize(); 71 void Finalize();
72 72
73 Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state, 73 Result MapProcessCode(KProcessAddress addr, size_t pages_count, KMemoryState state,
74 KMemoryPermission perm); 74 KMemoryPermission perm);
75 Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size); 75 Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
76 Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, 76 Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
77 ICacheInvalidationStrategy icache_invalidation_strategy); 77 ICacheInvalidationStrategy icache_invalidation_strategy);
78 Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, 78 Result UnmapProcessMemory(KProcessAddress dst_addr, size_t size, KPageTable& src_page_table,
79 VAddr src_addr); 79 KProcessAddress src_addr);
80 Result MapPhysicalMemory(VAddr addr, size_t size); 80 Result MapPhysicalMemory(KProcessAddress addr, size_t size);
81 Result UnmapPhysicalMemory(VAddr addr, size_t size); 81 Result UnmapPhysicalMemory(KProcessAddress addr, size_t size);
82 Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size); 82 Result MapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
83 Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size); 83 Result UnmapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
84 Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm); 84 Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
85 KMemoryInfo QueryInfo(VAddr addr); 85 Svc::MemoryPermission svc_perm);
86 Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm); 86 KMemoryInfo QueryInfo(KProcessAddress addr);
87 Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr); 87 Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
88 Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr);
88 Result SetMaxHeapSize(size_t size); 89 Result SetMaxHeapSize(size_t size);
89 Result SetHeapSize(VAddr* out, size_t size); 90 Result SetHeapSize(u64* out, size_t size);
90 Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, 91 Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
91 KMemoryPermission perm, bool is_aligned, bool check_heap); 92 KMemoryPermission perm, bool is_aligned, bool check_heap);
92 Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap); 93 Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
93 94
94 Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); 95 Result UnlockForDeviceAddressSpace(KProcessAddress addr, size_t size);
95 96
96 Result LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size); 97 Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
97 Result UnlockForIpcUserBuffer(VAddr address, size_t size); 98 Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
98 99
99 Result SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, KPageTable& src_page_table, 100 Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
100 KMemoryPermission test_perm, KMemoryState dst_state, bool send); 101 KPageTable& src_page_table, KMemoryPermission test_perm,
101 Result CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state); 102 KMemoryState dst_state, bool send);
102 Result CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state); 103 Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
104 Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
103 105
104 Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); 106 Result LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size);
105 Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); 107 Result UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg);
106 Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, 108 Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
107 KMemoryState state_mask, KMemoryState state, 109 KMemoryState state_mask, KMemoryState state,
108 KMemoryPermission perm_mask, KMemoryPermission perm, 110 KMemoryPermission perm_mask, KMemoryPermission perm,
109 KMemoryAttribute attr_mask, KMemoryAttribute attr); 111 KMemoryAttribute attr_mask, KMemoryAttribute attr);
@@ -120,7 +122,7 @@ public:
120 return m_block_info_manager; 122 return m_block_info_manager;
121 } 123 }
122 124
123 bool CanContain(VAddr addr, size_t size, KMemoryState state) const; 125 bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
124 126
125 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, 127 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
126 KPhysicalAddress phys_addr, KProcessAddress region_start, 128 KPhysicalAddress phys_addr, KProcessAddress region_start,
@@ -173,8 +175,8 @@ protected:
173 m_root = n; 175 m_root = n;
174 } 176 }
175 177
176 void Push(Core::Memory::Memory& memory, VAddr addr) { 178 void Push(Core::Memory::Memory& memory, KVirtualAddress addr) {
177 this->Push(memory.GetPointer<Node>(addr)); 179 this->Push(memory.GetPointer<Node>(GetInteger(addr)));
178 } 180 }
179 181
180 Node* Peek() const { 182 Node* Peek() const {
@@ -212,27 +214,28 @@ private:
212 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, 214 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
213 KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, 215 KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
214 size_t region_num_pages, KMemoryState state, KMemoryPermission perm); 216 size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
215 bool IsRegionContiguous(VAddr addr, u64 size) const; 217 bool IsRegionContiguous(KProcessAddress addr, u64 size) const;
216 void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list); 218 void AddRegionToPages(KProcessAddress start, size_t num_pages, KPageGroup& page_linked_list);
217 KMemoryInfo QueryInfoImpl(VAddr addr); 219 KMemoryInfo QueryInfoImpl(KProcessAddress addr);
218 VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, 220 KProcessAddress AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
219 size_t align); 221 u64 needed_num_pages, size_t align);
220 Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, 222 Result Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
221 OperationType operation); 223 OperationType operation);
222 Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, 224 Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
223 PAddr map_addr = 0); 225 OperationType operation, KPhysicalAddress map_addr = 0);
224 void FinalizeUpdate(PageLinkedList* page_list); 226 void FinalizeUpdate(PageLinkedList* page_list);
225 VAddr GetRegionAddress(KMemoryState state) const; 227 KProcessAddress GetRegionAddress(KMemoryState state) const;
226 size_t GetRegionSize(KMemoryState state) const; 228 size_t GetRegionSize(KMemoryState state) const;
227 229
228 VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, 230 KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
229 size_t alignment, size_t offset, size_t guard_pages); 231 size_t num_pages, size_t alignment, size_t offset,
232 size_t guard_pages);
230 233
231 Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, 234 Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
232 KMemoryState state_mask, KMemoryState state, 235 KMemoryState state_mask, KMemoryState state,
233 KMemoryPermission perm_mask, KMemoryPermission perm, 236 KMemoryPermission perm_mask, KMemoryPermission perm,
234 KMemoryAttribute attr_mask, KMemoryAttribute attr) const; 237 KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
235 Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask, 238 Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
236 KMemoryState state, KMemoryPermission perm_mask, 239 KMemoryState state, KMemoryPermission perm_mask,
237 KMemoryPermission perm, KMemoryAttribute attr_mask, 240 KMemoryPermission perm, KMemoryAttribute attr_mask,
238 KMemoryAttribute attr) const { 241 KMemoryAttribute attr) const {
@@ -244,12 +247,12 @@ private:
244 KMemoryPermission perm_mask, KMemoryPermission perm, 247 KMemoryPermission perm_mask, KMemoryPermission perm,
245 KMemoryAttribute attr_mask, KMemoryAttribute attr) const; 248 KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
246 Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, 249 Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
247 KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr, 250 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
248 size_t size, KMemoryState state_mask, KMemoryState state, 251 KProcessAddress addr, size_t size, KMemoryState state_mask,
249 KMemoryPermission perm_mask, KMemoryPermission perm, 252 KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
250 KMemoryAttribute attr_mask, KMemoryAttribute attr, 253 KMemoryAttribute attr_mask, KMemoryAttribute attr,
251 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; 254 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
252 Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size, 255 Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
253 KMemoryState state_mask, KMemoryState state, 256 KMemoryState state_mask, KMemoryState state,
254 KMemoryPermission perm_mask, KMemoryPermission perm, 257 KMemoryPermission perm_mask, KMemoryPermission perm,
255 KMemoryAttribute attr_mask, KMemoryAttribute attr, 258 KMemoryAttribute attr_mask, KMemoryAttribute attr,
@@ -258,39 +261,40 @@ private:
258 state_mask, state, perm_mask, perm, attr_mask, attr, 261 state_mask, state, perm_mask, perm, attr_mask, attr,
259 ignore_attr)); 262 ignore_attr));
260 } 263 }
261 Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, 264 Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
262 KMemoryPermission perm_mask, KMemoryPermission perm, 265 KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
263 KMemoryAttribute attr_mask, KMemoryAttribute attr, 266 KMemoryAttribute attr_mask, KMemoryAttribute attr,
264 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { 267 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
265 R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, 268 R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
266 attr_mask, attr, ignore_attr)); 269 attr_mask, attr, ignore_attr));
267 } 270 }
268 271
269 Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, 272 Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
270 KMemoryState state_mask, KMemoryState state, 273 KProcessAddress addr, size_t size, KMemoryState state_mask,
271 KMemoryPermission perm_mask, KMemoryPermission perm, 274 KMemoryState state, KMemoryPermission perm_mask,
272 KMemoryAttribute attr_mask, KMemoryAttribute attr, 275 KMemoryPermission perm, KMemoryAttribute attr_mask,
273 KMemoryPermission new_perm, KMemoryAttribute lock_attr); 276 KMemoryAttribute attr, KMemoryPermission new_perm,
274 Result UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, 277 KMemoryAttribute lock_attr);
275 KMemoryPermission perm_mask, KMemoryPermission perm, 278 Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
279 KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
276 KMemoryAttribute attr_mask, KMemoryAttribute attr, 280 KMemoryAttribute attr_mask, KMemoryAttribute attr,
277 KMemoryPermission new_perm, KMemoryAttribute lock_attr, 281 KMemoryPermission new_perm, KMemoryAttribute lock_attr,
278 const KPageGroup* pg); 282 const KPageGroup* pg);
279 283
280 Result MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages); 284 Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
281 bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages); 285 bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
282 286
283 bool IsLockedByCurrentThread() const { 287 bool IsLockedByCurrentThread() const {
284 return m_general_lock.IsLockedByCurrentThread(); 288 return m_general_lock.IsLockedByCurrentThread();
285 } 289 }
286 290
287 bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) { 291 bool IsHeapPhysicalAddress(const KMemoryLayout& layout, KPhysicalAddress phys_addr) {
288 ASSERT(this->IsLockedByCurrentThread()); 292 ASSERT(this->IsLockedByCurrentThread());
289 293
290 return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr); 294 return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
291 } 295 }
292 296
293 bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const { 297 bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
294 ASSERT(this->IsLockedByCurrentThread()); 298 ASSERT(this->IsLockedByCurrentThread());
295 299
296 *out = GetPhysicalAddr(virt_addr); 300 *out = GetPhysicalAddr(virt_addr);
@@ -298,12 +302,13 @@ private:
298 return *out != 0; 302 return *out != 0;
299 } 303 }
300 304
301 Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, VAddr address, 305 Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
302 size_t size, KMemoryPermission test_perm, KMemoryState dst_state); 306 KProcessAddress address, size_t size, KMemoryPermission test_perm,
303 Result SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr, 307 KMemoryState dst_state);
308 Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
304 KMemoryPermission test_perm, KMemoryState dst_state, 309 KMemoryPermission test_perm, KMemoryState dst_state,
305 KPageTable& src_page_table, bool send); 310 KPageTable& src_page_table, bool send);
306 void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, 311 void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
307 size_t size, KMemoryPermission prot_perm); 312 size_t size, KMemoryPermission prot_perm);
308 313
309 Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, 314 Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
@@ -315,61 +320,61 @@ private:
315 mutable KLightLock m_map_physical_memory_lock; 320 mutable KLightLock m_map_physical_memory_lock;
316 321
317public: 322public:
318 constexpr VAddr GetAddressSpaceStart() const { 323 constexpr KProcessAddress GetAddressSpaceStart() const {
319 return m_address_space_start; 324 return m_address_space_start;
320 } 325 }
321 constexpr VAddr GetAddressSpaceEnd() const { 326 constexpr KProcessAddress GetAddressSpaceEnd() const {
322 return m_address_space_end; 327 return m_address_space_end;
323 } 328 }
324 constexpr size_t GetAddressSpaceSize() const { 329 constexpr size_t GetAddressSpaceSize() const {
325 return m_address_space_end - m_address_space_start; 330 return m_address_space_end - m_address_space_start;
326 } 331 }
327 constexpr VAddr GetHeapRegionStart() const { 332 constexpr KProcessAddress GetHeapRegionStart() const {
328 return m_heap_region_start; 333 return m_heap_region_start;
329 } 334 }
330 constexpr VAddr GetHeapRegionEnd() const { 335 constexpr KProcessAddress GetHeapRegionEnd() const {
331 return m_heap_region_end; 336 return m_heap_region_end;
332 } 337 }
333 constexpr size_t GetHeapRegionSize() const { 338 constexpr size_t GetHeapRegionSize() const {
334 return m_heap_region_end - m_heap_region_start; 339 return m_heap_region_end - m_heap_region_start;
335 } 340 }
336 constexpr VAddr GetAliasRegionStart() const { 341 constexpr KProcessAddress GetAliasRegionStart() const {
337 return m_alias_region_start; 342 return m_alias_region_start;
338 } 343 }
339 constexpr VAddr GetAliasRegionEnd() const { 344 constexpr KProcessAddress GetAliasRegionEnd() const {
340 return m_alias_region_end; 345 return m_alias_region_end;
341 } 346 }
342 constexpr size_t GetAliasRegionSize() const { 347 constexpr size_t GetAliasRegionSize() const {
343 return m_alias_region_end - m_alias_region_start; 348 return m_alias_region_end - m_alias_region_start;
344 } 349 }
345 constexpr VAddr GetStackRegionStart() const { 350 constexpr KProcessAddress GetStackRegionStart() const {
346 return m_stack_region_start; 351 return m_stack_region_start;
347 } 352 }
348 constexpr VAddr GetStackRegionEnd() const { 353 constexpr KProcessAddress GetStackRegionEnd() const {
349 return m_stack_region_end; 354 return m_stack_region_end;
350 } 355 }
351 constexpr size_t GetStackRegionSize() const { 356 constexpr size_t GetStackRegionSize() const {
352 return m_stack_region_end - m_stack_region_start; 357 return m_stack_region_end - m_stack_region_start;
353 } 358 }
354 constexpr VAddr GetKernelMapRegionStart() const { 359 constexpr KProcessAddress GetKernelMapRegionStart() const {
355 return m_kernel_map_region_start; 360 return m_kernel_map_region_start;
356 } 361 }
357 constexpr VAddr GetKernelMapRegionEnd() const { 362 constexpr KProcessAddress GetKernelMapRegionEnd() const {
358 return m_kernel_map_region_end; 363 return m_kernel_map_region_end;
359 } 364 }
360 constexpr VAddr GetCodeRegionStart() const { 365 constexpr KProcessAddress GetCodeRegionStart() const {
361 return m_code_region_start; 366 return m_code_region_start;
362 } 367 }
363 constexpr VAddr GetCodeRegionEnd() const { 368 constexpr KProcessAddress GetCodeRegionEnd() const {
364 return m_code_region_end; 369 return m_code_region_end;
365 } 370 }
366 constexpr VAddr GetAliasCodeRegionStart() const { 371 constexpr KProcessAddress GetAliasCodeRegionStart() const {
367 return m_alias_code_region_start; 372 return m_alias_code_region_start;
368 } 373 }
369 constexpr VAddr GetAliasCodeRegionEnd() const { 374 constexpr KProcessAddress GetAliasCodeRegionEnd() const {
370 return m_alias_code_region_end; 375 return m_alias_code_region_end;
371 } 376 }
372 constexpr VAddr GetAliasCodeRegionSize() const { 377 constexpr size_t GetAliasCodeRegionSize() const {
373 return m_alias_code_region_end - m_alias_code_region_start; 378 return m_alias_code_region_end - m_alias_code_region_start;
374 } 379 }
375 size_t GetNormalMemorySize() { 380 size_t GetNormalMemorySize() {
@@ -382,25 +387,25 @@ public:
382 constexpr size_t GetHeapSize() const { 387 constexpr size_t GetHeapSize() const {
383 return m_current_heap_end - m_heap_region_start; 388 return m_current_heap_end - m_heap_region_start;
384 } 389 }
385 constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const { 390 constexpr bool IsInsideAddressSpace(KProcessAddress address, size_t size) const {
386 return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1; 391 return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1;
387 } 392 }
388 constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const { 393 constexpr bool IsOutsideAliasRegion(KProcessAddress address, size_t size) const {
389 return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1; 394 return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1;
390 } 395 }
391 constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const { 396 constexpr bool IsOutsideStackRegion(KProcessAddress address, size_t size) const {
392 return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1; 397 return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1;
393 } 398 }
394 constexpr bool IsInvalidRegion(VAddr address, size_t size) const { 399 constexpr bool IsInvalidRegion(KProcessAddress address, size_t size) const {
395 return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; 400 return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
396 } 401 }
397 constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const { 402 constexpr bool IsInsideHeapRegion(KProcessAddress address, size_t size) const {
398 return address + size > m_heap_region_start && m_heap_region_end > address; 403 return address + size > m_heap_region_start && m_heap_region_end > address;
399 } 404 }
400 constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const { 405 constexpr bool IsInsideAliasRegion(KProcessAddress address, size_t size) const {
401 return address + size > m_alias_region_start && m_alias_region_end > address; 406 return address + size > m_alias_region_start && m_alias_region_end > address;
402 } 407 }
403 constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const { 408 constexpr bool IsOutsideASLRRegion(KProcessAddress address, size_t size) const {
404 if (IsInvalidRegion(address, size)) { 409 if (IsInvalidRegion(address, size)) {
405 return true; 410 return true;
406 } 411 }
@@ -412,47 +417,53 @@ public:
412 } 417 }
413 return {}; 418 return {};
414 } 419 }
415 constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const { 420 constexpr bool IsInsideASLRRegion(KProcessAddress address, size_t size) const {
416 return !IsOutsideASLRRegion(address, size); 421 return !IsOutsideASLRRegion(address, size);
417 } 422 }
418 constexpr size_t GetNumGuardPages() const { 423 constexpr size_t GetNumGuardPages() const {
419 return IsKernel() ? 1 : 4; 424 return IsKernel() ? 1 : 4;
420 } 425 }
421 PAddr GetPhysicalAddr(VAddr addr) const { 426 KPhysicalAddress GetPhysicalAddr(KProcessAddress addr) const {
422 const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits]; 427 const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
423 ASSERT(backing_addr); 428 ASSERT(backing_addr);
424 return backing_addr + addr; 429 return backing_addr + GetInteger(addr);
425 } 430 }
426 constexpr bool Contains(VAddr addr) const { 431 constexpr bool Contains(KProcessAddress addr) const {
427 return m_address_space_start <= addr && addr <= m_address_space_end - 1; 432 return m_address_space_start <= addr && addr <= m_address_space_end - 1;
428 } 433 }
429 constexpr bool Contains(VAddr addr, size_t size) const { 434 constexpr bool Contains(KProcessAddress addr, size_t size) const {
430 return m_address_space_start <= addr && addr < addr + size && 435 return m_address_space_start <= addr && addr < addr + size &&
431 addr + size - 1 <= m_address_space_end - 1; 436 addr + size - 1 <= m_address_space_end - 1;
432 } 437 }
433 438
434public: 439public:
435 static VAddr GetLinearMappedVirtualAddress(const KMemoryLayout& layout, PAddr addr) { 440 static KVirtualAddress GetLinearMappedVirtualAddress(const KMemoryLayout& layout,
441 KPhysicalAddress addr) {
436 return layout.GetLinearVirtualAddress(addr); 442 return layout.GetLinearVirtualAddress(addr);
437 } 443 }
438 444
439 static PAddr GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, VAddr addr) { 445 static KPhysicalAddress GetLinearMappedPhysicalAddress(const KMemoryLayout& layout,
446 KVirtualAddress addr) {
440 return layout.GetLinearPhysicalAddress(addr); 447 return layout.GetLinearPhysicalAddress(addr);
441 } 448 }
442 449
443 static VAddr GetHeapVirtualAddress(const KMemoryLayout& layout, PAddr addr) { 450 static KVirtualAddress GetHeapVirtualAddress(const KMemoryLayout& layout,
451 KPhysicalAddress addr) {
444 return GetLinearMappedVirtualAddress(layout, addr); 452 return GetLinearMappedVirtualAddress(layout, addr);
445 } 453 }
446 454
447 static PAddr GetHeapPhysicalAddress(const KMemoryLayout& layout, VAddr addr) { 455 static KPhysicalAddress GetHeapPhysicalAddress(const KMemoryLayout& layout,
456 KVirtualAddress addr) {
448 return GetLinearMappedPhysicalAddress(layout, addr); 457 return GetLinearMappedPhysicalAddress(layout, addr);
449 } 458 }
450 459
451 static VAddr GetPageTableVirtualAddress(const KMemoryLayout& layout, PAddr addr) { 460 static KVirtualAddress GetPageTableVirtualAddress(const KMemoryLayout& layout,
461 KPhysicalAddress addr) {
452 return GetLinearMappedVirtualAddress(layout, addr); 462 return GetLinearMappedVirtualAddress(layout, addr);
453 } 463 }
454 464
455 static PAddr GetPageTablePhysicalAddress(const KMemoryLayout& layout, VAddr addr) { 465 static KPhysicalAddress GetPageTablePhysicalAddress(const KMemoryLayout& layout,
466 KVirtualAddress addr) {
456 return GetLinearMappedPhysicalAddress(layout, addr); 467 return GetLinearMappedPhysicalAddress(layout, addr);
457 } 468 }
458 469
@@ -464,7 +475,7 @@ private:
464 return m_enable_aslr; 475 return m_enable_aslr;
465 } 476 }
466 477
467 constexpr bool ContainsPages(VAddr addr, size_t num_pages) const { 478 constexpr bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
468 return (m_address_space_start <= addr) && 479 return (m_address_space_start <= addr) &&
469 (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && 480 (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
470 (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); 481 (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
@@ -489,21 +500,21 @@ private:
489 }; 500 };
490 501
491private: 502private:
492 VAddr m_address_space_start{}; 503 KProcessAddress m_address_space_start{};
493 VAddr m_address_space_end{}; 504 KProcessAddress m_address_space_end{};
494 VAddr m_heap_region_start{}; 505 KProcessAddress m_heap_region_start{};
495 VAddr m_heap_region_end{}; 506 KProcessAddress m_heap_region_end{};
496 VAddr m_current_heap_end{}; 507 KProcessAddress m_current_heap_end{};
497 VAddr m_alias_region_start{}; 508 KProcessAddress m_alias_region_start{};
498 VAddr m_alias_region_end{}; 509 KProcessAddress m_alias_region_end{};
499 VAddr m_stack_region_start{}; 510 KProcessAddress m_stack_region_start{};
500 VAddr m_stack_region_end{}; 511 KProcessAddress m_stack_region_end{};
501 VAddr m_kernel_map_region_start{}; 512 KProcessAddress m_kernel_map_region_start{};
502 VAddr m_kernel_map_region_end{}; 513 KProcessAddress m_kernel_map_region_end{};
503 VAddr m_code_region_start{}; 514 KProcessAddress m_code_region_start{};
504 VAddr m_code_region_end{}; 515 KProcessAddress m_code_region_end{};
505 VAddr m_alias_code_region_start{}; 516 KProcessAddress m_alias_code_region_start{};
506 VAddr m_alias_code_region_end{}; 517 KProcessAddress m_alias_code_region_end{};
507 518
508 size_t m_max_heap_size{}; 519 size_t m_max_heap_size{};
509 size_t m_mapped_physical_memory_size{}; 520 size_t m_mapped_physical_memory_size{};
diff --git a/src/core/hle/kernel/k_page_table_manager.h b/src/core/hle/kernel/k_page_table_manager.h
index 91a45cde3..4b0e034d0 100644
--- a/src/core/hle/kernel/k_page_table_manager.h
+++ b/src/core/hle/kernel/k_page_table_manager.h
@@ -5,9 +5,9 @@
5 5
6#include <atomic> 6#include <atomic>
7 7
8#include "common/common_types.h"
9#include "core/hle/kernel/k_dynamic_resource_manager.h" 8#include "core/hle/kernel/k_dynamic_resource_manager.h"
10#include "core/hle/kernel/k_page_table_slab_heap.h" 9#include "core/hle/kernel/k_page_table_slab_heap.h"
10#include "core/hle/kernel/k_typed_address.h"
11 11
12namespace Kernel { 12namespace Kernel {
13 13
@@ -26,23 +26,23 @@ public:
26 BaseHeap::Initialize(page_allocator, pt_heap); 26 BaseHeap::Initialize(page_allocator, pt_heap);
27 } 27 }
28 28
29 VAddr Allocate() { 29 KVirtualAddress Allocate() {
30 return VAddr(BaseHeap::Allocate()); 30 return KVirtualAddress(BaseHeap::Allocate());
31 } 31 }
32 32
33 RefCount GetRefCount(VAddr addr) const { 33 RefCount GetRefCount(KVirtualAddress addr) const {
34 return m_pt_heap->GetRefCount(addr); 34 return m_pt_heap->GetRefCount(addr);
35 } 35 }
36 36
37 void Open(VAddr addr, int count) { 37 void Open(KVirtualAddress addr, int count) {
38 return m_pt_heap->Open(addr, count); 38 return m_pt_heap->Open(addr, count);
39 } 39 }
40 40
41 bool Close(VAddr addr, int count) { 41 bool Close(KVirtualAddress addr, int count) {
42 return m_pt_heap->Close(addr, count); 42 return m_pt_heap->Close(addr, count);
43 } 43 }
44 44
45 bool IsInPageTableHeap(VAddr addr) const { 45 bool IsInPageTableHeap(KVirtualAddress addr) const {
46 return m_pt_heap->IsInRange(addr); 46 return m_pt_heap->IsInRange(addr);
47 } 47 }
48 48
diff --git a/src/core/hle/kernel/k_page_table_slab_heap.h b/src/core/hle/kernel/k_page_table_slab_heap.h
index 9a8d77316..7da0ea669 100644
--- a/src/core/hle/kernel/k_page_table_slab_heap.h
+++ b/src/core/hle/kernel/k_page_table_slab_heap.h
@@ -6,8 +6,8 @@
6#include <array> 6#include <array>
7#include <vector> 7#include <vector>
8 8
9#include "common/common_types.h"
10#include "core/hle/kernel/k_dynamic_slab_heap.h" 9#include "core/hle/kernel/k_dynamic_slab_heap.h"
10#include "core/hle/kernel/k_typed_address.h"
11#include "core/hle/kernel/slab_helpers.h" 11#include "core/hle/kernel/slab_helpers.h"
12 12
13namespace Kernel { 13namespace Kernel {
@@ -45,12 +45,12 @@ public:
45 this->Initialize(rc); 45 this->Initialize(rc);
46 } 46 }
47 47
48 RefCount GetRefCount(VAddr addr) { 48 RefCount GetRefCount(KVirtualAddress addr) {
49 ASSERT(this->IsInRange(addr)); 49 ASSERT(this->IsInRange(addr));
50 return *this->GetRefCountPointer(addr); 50 return *this->GetRefCountPointer(addr);
51 } 51 }
52 52
53 void Open(VAddr addr, int count) { 53 void Open(KVirtualAddress addr, int count) {
54 ASSERT(this->IsInRange(addr)); 54 ASSERT(this->IsInRange(addr));
55 55
56 *this->GetRefCountPointer(addr) += static_cast<RefCount>(count); 56 *this->GetRefCountPointer(addr) += static_cast<RefCount>(count);
@@ -58,7 +58,7 @@ public:
58 ASSERT(this->GetRefCount(addr) > 0); 58 ASSERT(this->GetRefCount(addr) > 0);
59 } 59 }
60 60
61 bool Close(VAddr addr, int count) { 61 bool Close(KVirtualAddress addr, int count) {
62 ASSERT(this->IsInRange(addr)); 62 ASSERT(this->IsInRange(addr));
63 ASSERT(this->GetRefCount(addr) >= count); 63 ASSERT(this->GetRefCount(addr) >= count);
64 64
@@ -66,7 +66,7 @@ public:
66 return this->GetRefCount(addr) == 0; 66 return this->GetRefCount(addr) == 0;
67 } 67 }
68 68
69 bool IsInPageTableHeap(VAddr addr) const { 69 bool IsInPageTableHeap(KVirtualAddress addr) const {
70 return this->IsInRange(addr); 70 return this->IsInRange(addr);
71 } 71 }
72 72
@@ -81,7 +81,7 @@ private:
81 } 81 }
82 } 82 }
83 83
84 RefCount* GetRefCountPointer(VAddr addr) { 84 RefCount* GetRefCountPointer(KVirtualAddress addr) {
85 return m_ref_counts.data() + ((addr - this->GetAddress()) / PageSize); 85 return m_ref_counts.data() + ((addr - this->GetAddress()) / PageSize);
86 } 86 }
87 87
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 9d18f4049..53f8139f3 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -36,8 +36,9 @@ namespace {
36 * @param owner_process The parent process for the main thread 36 * @param owner_process The parent process for the main thread
37 * @param priority The priority to give the main thread 37 * @param priority The priority to give the main thread
38 */ 38 */
39void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority, VAddr stack_top) { 39void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority,
40 const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); 40 KProcessAddress stack_top) {
41 const KProcessAddress entry_point = owner_process.PageTable().GetCodeRegionStart();
41 ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::ThreadCountMax, 1)); 42 ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::ThreadCountMax, 1));
42 43
43 KThread* thread = KThread::Create(system.Kernel()); 44 KThread* thread = KThread::Create(system.Kernel());
@@ -219,7 +220,7 @@ void KProcess::UnpinThread(KThread* thread) {
219 KScheduler::SetSchedulerUpdateNeeded(m_kernel); 220 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
220} 221}
221 222
222Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, 223Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] KProcessAddress address,
223 [[maybe_unused]] size_t size) { 224 [[maybe_unused]] size_t size) {
224 // Lock ourselves, to prevent concurrent access. 225 // Lock ourselves, to prevent concurrent access.
225 KScopedLightLock lk(m_state_lock); 226 KScopedLightLock lk(m_state_lock);
@@ -248,7 +249,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
248 R_SUCCEED(); 249 R_SUCCEED();
249} 250}
250 251
251void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, 252void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] KProcessAddress address,
252 [[maybe_unused]] size_t size) { 253 [[maybe_unused]] size_t size) {
253 // Lock ourselves, to prevent concurrent access. 254 // Lock ourselves, to prevent concurrent access.
254 KScopedLightLock lk(m_state_lock); 255 KScopedLightLock lk(m_state_lock);
@@ -399,8 +400,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
399 400
400 case FileSys::ProgramAddressSpaceType::Is32BitNoMap: 401 case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
401 m_memory_usage_capacity = 402 m_memory_usage_capacity =
402 m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart() + 403 (m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart()) +
403 m_page_table.GetAliasRegionEnd() - m_page_table.GetAliasRegionStart(); 404 (m_page_table.GetAliasRegionEnd() - m_page_table.GetAliasRegionStart());
404 break; 405 break;
405 406
406 default: 407 default:
@@ -492,9 +493,9 @@ void KProcess::Finalize() {
492 KSynchronizationObject::Finalize(); 493 KSynchronizationObject::Finalize();
493} 494}
494 495
495Result KProcess::CreateThreadLocalRegion(VAddr* out) { 496Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
496 KThreadLocalPage* tlp = nullptr; 497 KThreadLocalPage* tlp = nullptr;
497 VAddr tlr = 0; 498 KProcessAddress tlr = 0;
498 499
499 // See if we can get a region from a partially used TLP. 500 // See if we can get a region from a partially used TLP.
500 { 501 {
@@ -543,7 +544,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
543 R_SUCCEED(); 544 R_SUCCEED();
544} 545}
545 546
546Result KProcess::DeleteThreadLocalRegion(VAddr addr) { 547Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
547 KThreadLocalPage* page_to_free = nullptr; 548 KThreadLocalPage* page_to_free = nullptr;
548 549
549 // Release the region. 550 // Release the region.
@@ -551,10 +552,10 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
551 KScopedSchedulerLock sl{m_kernel}; 552 KScopedSchedulerLock sl{m_kernel};
552 553
553 // Try to find the page in the partially used list. 554 // Try to find the page in the partially used list.
554 auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); 555 auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(GetInteger(addr), PageSize));
555 if (it == m_partially_used_tlp_tree.end()) { 556 if (it == m_partially_used_tlp_tree.end()) {
556 // If we don't find it, it has to be in the fully used list. 557 // If we don't find it, it has to be in the fully used list.
557 it = m_fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); 558 it = m_fully_used_tlp_tree.find_key(Common::AlignDown(GetInteger(addr), PageSize));
558 R_UNLESS(it != m_fully_used_tlp_tree.end(), ResultInvalidAddress); 559 R_UNLESS(it != m_fully_used_tlp_tree.end(), ResultInvalidAddress);
559 560
560 // Release the region. 561 // Release the region.
@@ -591,7 +592,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
591 R_SUCCEED(); 592 R_SUCCEED();
592} 593}
593 594
594bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, 595bool KProcess::InsertWatchpoint(Core::System& system, KProcessAddress addr, u64 size,
595 DebugWatchpointType type) { 596 DebugWatchpointType type) {
596 const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) { 597 const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
597 return wp.type == DebugWatchpointType::None; 598 return wp.type == DebugWatchpointType::None;
@@ -605,7 +606,8 @@ bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
605 watch->end_address = addr + size; 606 watch->end_address = addr + size;
606 watch->type = type; 607 watch->type = type;
607 608
608 for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) { 609 for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
610 page += PageSize) {
609 m_debug_page_refcounts[page]++; 611 m_debug_page_refcounts[page]++;
610 system.Memory().MarkRegionDebug(page, PageSize, true); 612 system.Memory().MarkRegionDebug(page, PageSize, true);
611 } 613 }
@@ -613,7 +615,7 @@ bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
613 return true; 615 return true;
614} 616}
615 617
616bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, 618bool KProcess::RemoveWatchpoint(Core::System& system, KProcessAddress addr, u64 size,
617 DebugWatchpointType type) { 619 DebugWatchpointType type) {
618 const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) { 620 const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
619 return wp.start_address == addr && wp.end_address == addr + size && wp.type == type; 621 return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
@@ -627,7 +629,8 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
627 watch->end_address = 0; 629 watch->end_address = 0;
628 watch->type = DebugWatchpointType::None; 630 watch->type = DebugWatchpointType::None;
629 631
630 for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) { 632 for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
633 page += PageSize) {
631 m_debug_page_refcounts[page]--; 634 m_debug_page_refcounts[page]--;
632 if (!m_debug_page_refcounts[page]) { 635 if (!m_debug_page_refcounts[page]) {
633 system.Memory().MarkRegionDebug(page, PageSize, false); 636 system.Memory().MarkRegionDebug(page, PageSize, false);
@@ -637,7 +640,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
637 return true; 640 return true;
638} 641}
639 642
640void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { 643void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
641 const auto ReprotectSegment = [&](const CodeSet::Segment& segment, 644 const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
642 Svc::MemoryPermission permission) { 645 Svc::MemoryPermission permission) {
643 m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); 646 m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 7b7a971b8..04b6bbb86 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -8,7 +8,6 @@
8#include <list> 8#include <list>
9#include <map> 9#include <map>
10#include <string> 10#include <string>
11#include "common/common_types.h"
12#include "core/hle/kernel/k_address_arbiter.h" 11#include "core/hle/kernel/k_address_arbiter.h"
13#include "core/hle/kernel/k_auto_object.h" 12#include "core/hle/kernel/k_auto_object.h"
14#include "core/hle/kernel/k_condition_variable.h" 13#include "core/hle/kernel/k_condition_variable.h"
@@ -16,6 +15,7 @@
16#include "core/hle/kernel/k_page_table.h" 15#include "core/hle/kernel/k_page_table.h"
17#include "core/hle/kernel/k_synchronization_object.h" 16#include "core/hle/kernel/k_synchronization_object.h"
18#include "core/hle/kernel/k_thread_local_page.h" 17#include "core/hle/kernel/k_thread_local_page.h"
18#include "core/hle/kernel/k_typed_address.h"
19#include "core/hle/kernel/k_worker_task.h" 19#include "core/hle/kernel/k_worker_task.h"
20#include "core/hle/kernel/process_capability.h" 20#include "core/hle/kernel/process_capability.h"
21#include "core/hle/kernel/slab_helpers.h" 21#include "core/hle/kernel/slab_helpers.h"
@@ -59,8 +59,8 @@ enum class DebugWatchpointType : u8 {
59DECLARE_ENUM_FLAG_OPERATORS(DebugWatchpointType); 59DECLARE_ENUM_FLAG_OPERATORS(DebugWatchpointType);
60 60
61struct DebugWatchpoint { 61struct DebugWatchpoint {
62 VAddr start_address; 62 KProcessAddress start_address;
63 VAddr end_address; 63 KProcessAddress end_address;
64 DebugWatchpointType type; 64 DebugWatchpointType type;
65}; 65};
66 66
@@ -135,11 +135,11 @@ public:
135 return m_handle_table; 135 return m_handle_table;
136 } 136 }
137 137
138 Result SignalToAddress(VAddr address) { 138 Result SignalToAddress(KProcessAddress address) {
139 return m_condition_var.SignalToAddress(address); 139 return m_condition_var.SignalToAddress(address);
140 } 140 }
141 141
142 Result WaitForAddress(Handle handle, VAddr address, u32 tag) { 142 Result WaitForAddress(Handle handle, KProcessAddress address, u32 tag) {
143 return m_condition_var.WaitForAddress(handle, address, tag); 143 return m_condition_var.WaitForAddress(handle, address, tag);
144 } 144 }
145 145
@@ -147,20 +147,21 @@ public:
147 return m_condition_var.Signal(cv_key, count); 147 return m_condition_var.Signal(cv_key, count);
148 } 148 }
149 149
150 Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { 150 Result WaitConditionVariable(KProcessAddress address, u64 cv_key, u32 tag, s64 ns) {
151 R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns)); 151 R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns));
152 } 152 }
153 153
154 Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) { 154 Result SignalAddressArbiter(uint64_t address, Svc::SignalType signal_type, s32 value,
155 s32 count) {
155 R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count)); 156 R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
156 } 157 }
157 158
158 Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, 159 Result WaitAddressArbiter(uint64_t address, Svc::ArbitrationType arb_type, s32 value,
159 s64 timeout) { 160 s64 timeout) {
160 R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout)); 161 R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
161 } 162 }
162 163
163 VAddr GetProcessLocalRegionAddress() const { 164 KProcessAddress GetProcessLocalRegionAddress() const {
164 return m_plr_address; 165 return m_plr_address;
165 } 166 }
166 167
@@ -352,7 +353,7 @@ public:
352 */ 353 */
353 void PrepareForTermination(); 354 void PrepareForTermination();
354 355
355 void LoadModule(CodeSet code_set, VAddr base_addr); 356 void LoadModule(CodeSet code_set, KProcessAddress base_addr);
356 357
357 bool IsInitialized() const override { 358 bool IsInitialized() const override {
358 return m_is_initialized; 359 return m_is_initialized;
@@ -380,26 +381,28 @@ public:
380 return m_state_lock; 381 return m_state_lock;
381 } 382 }
382 383
383 Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size); 384 Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
384 void RemoveSharedMemory(KSharedMemory* shmem, VAddr address, size_t size); 385 void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
385 386
386 /////////////////////////////////////////////////////////////////////////////////////////////// 387 ///////////////////////////////////////////////////////////////////////////////////////////////
387 // Thread-local storage management 388 // Thread-local storage management
388 389
389 // Marks the next available region as used and returns the address of the slot. 390 // Marks the next available region as used and returns the address of the slot.
390 [[nodiscard]] Result CreateThreadLocalRegion(VAddr* out); 391 [[nodiscard]] Result CreateThreadLocalRegion(KProcessAddress* out);
391 392
392 // Frees a used TLS slot identified by the given address 393 // Frees a used TLS slot identified by the given address
393 Result DeleteThreadLocalRegion(VAddr addr); 394 Result DeleteThreadLocalRegion(KProcessAddress addr);
394 395
395 /////////////////////////////////////////////////////////////////////////////////////////////// 396 ///////////////////////////////////////////////////////////////////////////////////////////////
396 // Debug watchpoint management 397 // Debug watchpoint management
397 398
398 // Attempts to insert a watchpoint into a free slot. Returns false if none are available. 399 // Attempts to insert a watchpoint into a free slot. Returns false if none are available.
399 bool InsertWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type); 400 bool InsertWatchpoint(Core::System& system, KProcessAddress addr, u64 size,
401 DebugWatchpointType type);
400 402
401 // Attempts to remove the watchpoint specified by the given parameters. 403 // Attempts to remove the watchpoint specified by the given parameters.
402 bool RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type); 404 bool RemoveWatchpoint(Core::System& system, KProcessAddress addr, u64 size,
405 DebugWatchpointType type);
403 406
404 const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const { 407 const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
405 return m_watchpoints; 408 return m_watchpoints;
@@ -457,7 +460,7 @@ private:
457 /// Resource limit descriptor for this process 460 /// Resource limit descriptor for this process
458 KResourceLimit* m_resource_limit{}; 461 KResourceLimit* m_resource_limit{};
459 462
460 VAddr m_system_resource_address{}; 463 KVirtualAddress m_system_resource_address{};
461 464
462 /// The ideal CPU core for this process, threads are scheduled on this core by default. 465 /// The ideal CPU core for this process, threads are scheduled on this core by default.
463 u8 m_ideal_core = 0; 466 u8 m_ideal_core = 0;
@@ -485,7 +488,7 @@ private:
485 KConditionVariable m_condition_var; 488 KConditionVariable m_condition_var;
486 489
487 /// Address indicating the location of the process' dedicated TLS region. 490 /// Address indicating the location of the process' dedicated TLS region.
488 VAddr m_plr_address = 0; 491 KProcessAddress m_plr_address = 0;
489 492
490 /// Random values for svcGetInfo RandomEntropy 493 /// Random values for svcGetInfo RandomEntropy
491 std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{}; 494 std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{};
@@ -497,7 +500,7 @@ private:
497 std::list<KSharedMemoryInfo*> m_shared_memory_list; 500 std::list<KSharedMemoryInfo*> m_shared_memory_list;
498 501
499 /// Address of the top of the main thread's stack 502 /// Address of the top of the main thread's stack
500 VAddr m_main_thread_stack_top{}; 503 KProcessAddress m_main_thread_stack_top{};
501 504
502 /// Size of the main thread's stack 505 /// Size of the main thread's stack
503 std::size_t m_main_thread_stack_size{}; 506 std::size_t m_main_thread_stack_size{};
@@ -527,7 +530,7 @@ private:
527 std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{}; 530 std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
528 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{}; 531 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
529 std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{}; 532 std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
530 std::map<VAddr, u64> m_debug_page_refcounts; 533 std::map<KProcessAddress, u64> m_debug_page_refcounts;
531 534
532 KThread* m_exception_thread{}; 535 KThread* m_exception_thread{};
533 536
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index ecadf2916..faa12b4f0 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -511,7 +511,7 @@ void KScheduler::Reload(KThread* thread) {
511 auto& cpu_core = m_kernel.System().ArmInterface(m_core_id); 511 auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
512 cpu_core.LoadContext(thread->GetContext32()); 512 cpu_core.LoadContext(thread->GetContext32());
513 cpu_core.LoadContext(thread->GetContext64()); 513 cpu_core.LoadContext(thread->GetContext64());
514 cpu_core.SetTlsAddress(thread->GetTlsAddress()); 514 cpu_core.SetTlsAddress(GetInteger(thread->GetTlsAddress()));
515 cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0()); 515 cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0());
516 cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); 516 cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
517 cpu_core.ClearExclusiveState(); 517 cpu_core.ClearExclusiveState();
diff --git a/src/core/hle/kernel/k_session_request.cpp b/src/core/hle/kernel/k_session_request.cpp
index a329e5690..9a69b4ffc 100644
--- a/src/core/hle/kernel/k_session_request.cpp
+++ b/src/core/hle/kernel/k_session_request.cpp
@@ -6,8 +6,8 @@
6 6
7namespace Kernel { 7namespace Kernel {
8 8
9Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, size_t size, 9Result KSessionRequest::SessionMappings::PushMap(KProcessAddress client, KProcessAddress server,
10 KMemoryState state, size_t index) { 10 size_t size, KMemoryState state, size_t index) {
11 // At most 15 buffers of each type (4-bit descriptor counts). 11 // At most 15 buffers of each type (4-bit descriptor counts).
12 ASSERT(index < ((1ul << 4) - 1) * 3); 12 ASSERT(index < ((1ul << 4) - 1) * 3);
13 13
@@ -33,20 +33,21 @@ Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, siz
33 R_SUCCEED(); 33 R_SUCCEED();
34} 34}
35 35
36Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size, 36Result KSessionRequest::SessionMappings::PushSend(KProcessAddress client, KProcessAddress server,
37 KMemoryState state) { 37 size_t size, KMemoryState state) {
38 ASSERT(m_num_recv == 0); 38 ASSERT(m_num_recv == 0);
39 ASSERT(m_num_exch == 0); 39 ASSERT(m_num_exch == 0);
40 R_RETURN(this->PushMap(client, server, size, state, m_num_send++)); 40 R_RETURN(this->PushMap(client, server, size, state, m_num_send++));
41} 41}
42 42
43Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size, 43Result KSessionRequest::SessionMappings::PushReceive(KProcessAddress client, KProcessAddress server,
44 KMemoryState state) { 44 size_t size, KMemoryState state) {
45 ASSERT(m_num_exch == 0); 45 ASSERT(m_num_exch == 0);
46 R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv++)); 46 R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv++));
47} 47}
48 48
49Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size, 49Result KSessionRequest::SessionMappings::PushExchange(KProcessAddress client,
50 KProcessAddress server, size_t size,
50 KMemoryState state) { 51 KMemoryState state) {
51 R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++)); 52 R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++));
52} 53}
diff --git a/src/core/hle/kernel/k_session_request.h b/src/core/hle/kernel/k_session_request.h
index 5685048ba..b5f04907b 100644
--- a/src/core/hle/kernel/k_session_request.h
+++ b/src/core/hle/kernel/k_session_request.h
@@ -26,17 +26,17 @@ public:
26 26
27 class Mapping { 27 class Mapping {
28 public: 28 public:
29 constexpr void Set(VAddr c, VAddr s, size_t sz, KMemoryState st) { 29 constexpr void Set(KProcessAddress c, KProcessAddress s, size_t sz, KMemoryState st) {
30 m_client_address = c; 30 m_client_address = c;
31 m_server_address = s; 31 m_server_address = s;
32 m_size = sz; 32 m_size = sz;
33 m_state = st; 33 m_state = st;
34 } 34 }
35 35
36 constexpr VAddr GetClientAddress() const { 36 constexpr KProcessAddress GetClientAddress() const {
37 return m_client_address; 37 return m_client_address;
38 } 38 }
39 constexpr VAddr GetServerAddress() const { 39 constexpr KProcessAddress GetServerAddress() const {
40 return m_server_address; 40 return m_server_address;
41 } 41 }
42 constexpr size_t GetSize() const { 42 constexpr size_t GetSize() const {
@@ -47,8 +47,8 @@ public:
47 } 47 }
48 48
49 private: 49 private:
50 VAddr m_client_address{}; 50 KProcessAddress m_client_address{};
51 VAddr m_server_address{}; 51 KProcessAddress m_server_address{};
52 size_t m_size{}; 52 size_t m_size{};
53 KMemoryState m_state{}; 53 KMemoryState m_state{};
54 }; 54 };
@@ -69,14 +69,17 @@ public:
69 return m_num_exch; 69 return m_num_exch;
70 } 70 }
71 71
72 Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state); 72 Result PushSend(KProcessAddress client, KProcessAddress server, size_t size,
73 Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state); 73 KMemoryState state);
74 Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state); 74 Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size,
75 KMemoryState state);
76 Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size,
77 KMemoryState state);
75 78
76 VAddr GetSendClientAddress(size_t i) const { 79 KProcessAddress GetSendClientAddress(size_t i) const {
77 return GetSendMapping(i).GetClientAddress(); 80 return GetSendMapping(i).GetClientAddress();
78 } 81 }
79 VAddr GetSendServerAddress(size_t i) const { 82 KProcessAddress GetSendServerAddress(size_t i) const {
80 return GetSendMapping(i).GetServerAddress(); 83 return GetSendMapping(i).GetServerAddress();
81 } 84 }
82 size_t GetSendSize(size_t i) const { 85 size_t GetSendSize(size_t i) const {
@@ -86,10 +89,10 @@ public:
86 return GetSendMapping(i).GetMemoryState(); 89 return GetSendMapping(i).GetMemoryState();
87 } 90 }
88 91
89 VAddr GetReceiveClientAddress(size_t i) const { 92 KProcessAddress GetReceiveClientAddress(size_t i) const {
90 return GetReceiveMapping(i).GetClientAddress(); 93 return GetReceiveMapping(i).GetClientAddress();
91 } 94 }
92 VAddr GetReceiveServerAddress(size_t i) const { 95 KProcessAddress GetReceiveServerAddress(size_t i) const {
93 return GetReceiveMapping(i).GetServerAddress(); 96 return GetReceiveMapping(i).GetServerAddress();
94 } 97 }
95 size_t GetReceiveSize(size_t i) const { 98 size_t GetReceiveSize(size_t i) const {
@@ -99,10 +102,10 @@ public:
99 return GetReceiveMapping(i).GetMemoryState(); 102 return GetReceiveMapping(i).GetMemoryState();
100 } 103 }
101 104
102 VAddr GetExchangeClientAddress(size_t i) const { 105 KProcessAddress GetExchangeClientAddress(size_t i) const {
103 return GetExchangeMapping(i).GetClientAddress(); 106 return GetExchangeMapping(i).GetClientAddress();
104 } 107 }
105 VAddr GetExchangeServerAddress(size_t i) const { 108 KProcessAddress GetExchangeServerAddress(size_t i) const {
106 return GetExchangeMapping(i).GetServerAddress(); 109 return GetExchangeMapping(i).GetServerAddress();
107 } 110 }
108 size_t GetExchangeSize(size_t i) const { 111 size_t GetExchangeSize(size_t i) const {
@@ -113,7 +116,8 @@ public:
113 } 116 }
114 117
115 private: 118 private:
116 Result PushMap(VAddr client, VAddr server, size_t size, KMemoryState state, size_t index); 119 Result PushMap(KProcessAddress client, KProcessAddress server, size_t size,
120 KMemoryState state, size_t index);
117 121
118 const Mapping& GetSendMapping(size_t i) const { 122 const Mapping& GetSendMapping(size_t i) const {
119 ASSERT(i < m_num_send); 123 ASSERT(i < m_num_send);
@@ -227,22 +231,25 @@ public:
227 return m_mappings.GetExchangeCount(); 231 return m_mappings.GetExchangeCount();
228 } 232 }
229 233
230 Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state) { 234 Result PushSend(KProcessAddress client, KProcessAddress server, size_t size,
235 KMemoryState state) {
231 return m_mappings.PushSend(client, server, size, state); 236 return m_mappings.PushSend(client, server, size, state);
232 } 237 }
233 238
234 Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state) { 239 Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size,
240 KMemoryState state) {
235 return m_mappings.PushReceive(client, server, size, state); 241 return m_mappings.PushReceive(client, server, size, state);
236 } 242 }
237 243
238 Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state) { 244 Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size,
245 KMemoryState state) {
239 return m_mappings.PushExchange(client, server, size, state); 246 return m_mappings.PushExchange(client, server, size, state);
240 } 247 }
241 248
242 VAddr GetSendClientAddress(size_t i) const { 249 KProcessAddress GetSendClientAddress(size_t i) const {
243 return m_mappings.GetSendClientAddress(i); 250 return m_mappings.GetSendClientAddress(i);
244 } 251 }
245 VAddr GetSendServerAddress(size_t i) const { 252 KProcessAddress GetSendServerAddress(size_t i) const {
246 return m_mappings.GetSendServerAddress(i); 253 return m_mappings.GetSendServerAddress(i);
247 } 254 }
248 size_t GetSendSize(size_t i) const { 255 size_t GetSendSize(size_t i) const {
@@ -252,10 +259,10 @@ public:
252 return m_mappings.GetSendMemoryState(i); 259 return m_mappings.GetSendMemoryState(i);
253 } 260 }
254 261
255 VAddr GetReceiveClientAddress(size_t i) const { 262 KProcessAddress GetReceiveClientAddress(size_t i) const {
256 return m_mappings.GetReceiveClientAddress(i); 263 return m_mappings.GetReceiveClientAddress(i);
257 } 264 }
258 VAddr GetReceiveServerAddress(size_t i) const { 265 KProcessAddress GetReceiveServerAddress(size_t i) const {
259 return m_mappings.GetReceiveServerAddress(i); 266 return m_mappings.GetReceiveServerAddress(i);
260 } 267 }
261 size_t GetReceiveSize(size_t i) const { 268 size_t GetReceiveSize(size_t i) const {
@@ -265,10 +272,10 @@ public:
265 return m_mappings.GetReceiveMemoryState(i); 272 return m_mappings.GetReceiveMemoryState(i);
266 } 273 }
267 274
268 VAddr GetExchangeClientAddress(size_t i) const { 275 KProcessAddress GetExchangeClientAddress(size_t i) const {
269 return m_mappings.GetExchangeClientAddress(i); 276 return m_mappings.GetExchangeClientAddress(i);
270 } 277 }
271 VAddr GetExchangeServerAddress(size_t i) const { 278 KProcessAddress GetExchangeServerAddress(size_t i) const {
272 return m_mappings.GetExchangeServerAddress(i); 279 return m_mappings.GetExchangeServerAddress(i);
273 } 280 }
274 size_t GetExchangeSize(size_t i) const { 281 size_t GetExchangeSize(size_t i) const {
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 954e5befe..efb5699de 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -76,7 +76,7 @@ void KSharedMemory::Finalize() {
76 m_resource_limit->Close(); 76 m_resource_limit->Close();
77} 77}
78 78
79Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size, 79Result KSharedMemory::Map(KProcess& target_process, KProcessAddress address, std::size_t map_size,
80 Svc::MemoryPermission map_perm) { 80 Svc::MemoryPermission map_perm) {
81 // Validate the size. 81 // Validate the size.
82 R_UNLESS(m_size == map_size, ResultInvalidSize); 82 R_UNLESS(m_size == map_size, ResultInvalidSize);
@@ -94,7 +94,8 @@ Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t m
94 ConvertToKMemoryPermission(map_perm))); 94 ConvertToKMemoryPermission(map_perm)));
95} 95}
96 96
97Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) { 97Result KSharedMemory::Unmap(KProcess& target_process, KProcessAddress address,
98 std::size_t unmap_size) {
98 // Validate the size. 99 // Validate the size.
99 R_UNLESS(m_size == unmap_size, ResultInvalidSize); 100 R_UNLESS(m_size == unmap_size, ResultInvalidSize);
100 101
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h
index b4c4125bb..54b23d7ac 100644
--- a/src/core/hle/kernel/k_shared_memory.h
+++ b/src/core/hle/kernel/k_shared_memory.h
@@ -6,11 +6,11 @@
6#include <optional> 6#include <optional>
7#include <string> 7#include <string>
8 8
9#include "common/common_types.h"
10#include "core/device_memory.h" 9#include "core/device_memory.h"
11#include "core/hle/kernel/k_memory_block.h" 10#include "core/hle/kernel/k_memory_block.h"
12#include "core/hle/kernel/k_page_group.h" 11#include "core/hle/kernel/k_page_group.h"
13#include "core/hle/kernel/k_process.h" 12#include "core/hle/kernel/k_process.h"
13#include "core/hle/kernel/k_typed_address.h"
14#include "core/hle/kernel/slab_helpers.h" 14#include "core/hle/kernel/slab_helpers.h"
15#include "core/hle/result.h" 15#include "core/hle/result.h"
16 16
@@ -37,7 +37,7 @@ public:
37 * @param map_size Size of the shared memory block to map 37 * @param map_size Size of the shared memory block to map
38 * @param permissions Memory block map permissions (specified by SVC field) 38 * @param permissions Memory block map permissions (specified by SVC field)
39 */ 39 */
40 Result Map(KProcess& target_process, VAddr address, std::size_t map_size, 40 Result Map(KProcess& target_process, KProcessAddress address, std::size_t map_size,
41 Svc::MemoryPermission permissions); 41 Svc::MemoryPermission permissions);
42 42
43 /** 43 /**
@@ -46,7 +46,7 @@ public:
46 * @param address Address in system memory to unmap shared memory block 46 * @param address Address in system memory to unmap shared memory block
47 * @param unmap_size Size of the shared memory block to unmap 47 * @param unmap_size Size of the shared memory block to unmap
48 */ 48 */
49 Result Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size); 49 Result Unmap(KProcess& target_process, KProcessAddress address, std::size_t unmap_size);
50 50
51 /** 51 /**
52 * Gets a pointer to the shared memory block 52 * Gets a pointer to the shared memory block
@@ -79,7 +79,7 @@ private:
79 std::optional<KPageGroup> m_page_group{}; 79 std::optional<KPageGroup> m_page_group{};
80 Svc::MemoryPermission m_owner_permission{}; 80 Svc::MemoryPermission m_owner_permission{};
81 Svc::MemoryPermission m_user_permission{}; 81 Svc::MemoryPermission m_user_permission{};
82 PAddr m_physical_address{}; 82 KPhysicalAddress m_physical_address{};
83 std::size_t m_size{}; 83 std::size_t m_size{};
84 KResourceLimit* m_resource_limit{}; 84 KResourceLimit* m_resource_limit{};
85 bool m_is_initialized{}; 85 bool m_is_initialized{};
diff --git a/src/core/hle/kernel/k_system_resource.h b/src/core/hle/kernel/k_system_resource.h
index d36aaa9bd..6ea482185 100644
--- a/src/core/hle/kernel/k_system_resource.h
+++ b/src/core/hle/kernel/k_system_resource.h
@@ -130,7 +130,7 @@ private:
130 KBlockInfoSlabHeap m_block_info_heap; 130 KBlockInfoSlabHeap m_block_info_heap;
131 KPageTableSlabHeap m_page_table_heap; 131 KPageTableSlabHeap m_page_table_heap;
132 KResourceLimit* m_resource_limit{}; 132 KResourceLimit* m_resource_limit{};
133 VAddr m_resource_address{}; 133 KVirtualAddress m_resource_address{};
134 size_t m_resource_size{}; 134 size_t m_resource_size{};
135}; 135};
136 136
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index c0e3ecb45..9d101c640 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -48,8 +48,8 @@ static void ResetThreadContext32(Kernel::KThread::ThreadContext32& context, u32
48 context.fpscr = 0; 48 context.fpscr = 0;
49} 49}
50 50
51static void ResetThreadContext64(Kernel::KThread::ThreadContext64& context, VAddr stack_top, 51static void ResetThreadContext64(Kernel::KThread::ThreadContext64& context, u64 stack_top,
52 VAddr entry_point, u64 arg) { 52 u64 entry_point, u64 arg) {
53 context = {}; 53 context = {};
54 context.cpu_registers[0] = arg; 54 context.cpu_registers[0] = arg;
55 context.cpu_registers[18] = Kernel::KSystemControl::GenerateRandomU64() | 1; 55 context.cpu_registers[18] = Kernel::KSystemControl::GenerateRandomU64() | 1;
@@ -100,8 +100,8 @@ KThread::KThread(KernelCore& kernel)
100 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_activity_pause_lock{kernel} {} 100 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_activity_pause_lock{kernel} {}
101KThread::~KThread() = default; 101KThread::~KThread() = default;
102 102
103Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, 103Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top,
104 s32 virt_core, KProcess* owner, ThreadType type) { 104 s32 prio, s32 virt_core, KProcess* owner, ThreadType type) {
105 // Assert parameters are valid. 105 // Assert parameters are valid.
106 ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) || 106 ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) ||
107 (Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority)); 107 (Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
@@ -221,9 +221,9 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
221 } 221 }
222 222
223 // Initialize thread context. 223 // Initialize thread context.
224 ResetThreadContext64(m_thread_context_64, user_stack_top, func, arg); 224 ResetThreadContext64(m_thread_context_64, GetInteger(user_stack_top), GetInteger(func), arg);
225 ResetThreadContext32(m_thread_context_32, static_cast<u32>(user_stack_top), 225 ResetThreadContext32(m_thread_context_32, static_cast<u32>(GetInteger(user_stack_top)),
226 static_cast<u32>(func), static_cast<u32>(arg)); 226 static_cast<u32>(GetInteger(func)), static_cast<u32>(arg));
227 227
228 // Setup the stack parameters. 228 // Setup the stack parameters.
229 StackParameters& sp = this->GetStackParameters(); 229 StackParameters& sp = this->GetStackParameters();
@@ -249,8 +249,9 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
249} 249}
250 250
251Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, 251Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
252 VAddr user_stack_top, s32 prio, s32 core, KProcess* owner, 252 KProcessAddress user_stack_top, s32 prio, s32 core,
253 ThreadType type, std::function<void()>&& init_func) { 253 KProcess* owner, ThreadType type,
254 std::function<void()>&& init_func) {
254 // Initialize the thread. 255 // Initialize the thread.
255 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); 256 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
256 257
@@ -288,8 +289,8 @@ Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thre
288} 289}
289 290
290Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, 291Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
291 uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, 292 uintptr_t arg, KProcessAddress user_stack_top, s32 prio,
292 KProcess* owner) { 293 s32 virt_core, KProcess* owner) {
293 system.Kernel().GlobalSchedulerContext().AddThread(thread); 294 system.Kernel().GlobalSchedulerContext().AddThread(thread);
294 R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, 295 R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
295 ThreadType::User, system.GetCpuManager().GetGuestThreadFunc())); 296 ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
@@ -951,7 +952,7 @@ void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) {
951 m_held_lock_info_list.push_front(*lock_info); 952 m_held_lock_info_list.push_front(*lock_info);
952} 953}
953 954
954KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key, 955KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(KProcessAddress address_key,
955 bool is_kernel_address_key) { 956 bool is_kernel_address_key) {
956 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); 957 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
957 958
@@ -1087,7 +1088,8 @@ void KThread::RemoveWaiter(KThread* thread) {
1087 } 1088 }
1088} 1089}
1089 1090
1090KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key_) { 1091KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, KProcessAddress key,
1092 bool is_kernel_address_key_) {
1091 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); 1093 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
1092 1094
1093 // Get the relevant lock info. 1095 // Get the relevant lock info.
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index 53fa64369..0fa9672bf 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -14,7 +14,6 @@
14 14
15#include <boost/intrusive/list.hpp> 15#include <boost/intrusive/list.hpp>
16 16
17#include "common/common_types.h"
18#include "common/intrusive_red_black_tree.h" 17#include "common/intrusive_red_black_tree.h"
19#include "common/spin_lock.h" 18#include "common/spin_lock.h"
20#include "core/arm/arm_interface.h" 19#include "core/arm/arm_interface.h"
@@ -23,6 +22,7 @@
23#include "core/hle/kernel/k_spin_lock.h" 22#include "core/hle/kernel/k_spin_lock.h"
24#include "core/hle/kernel/k_synchronization_object.h" 23#include "core/hle/kernel/k_synchronization_object.h"
25#include "core/hle/kernel/k_timer_task.h" 24#include "core/hle/kernel/k_timer_task.h"
25#include "core/hle/kernel/k_typed_address.h"
26#include "core/hle/kernel/k_worker_task.h" 26#include "core/hle/kernel/k_worker_task.h"
27#include "core/hle/kernel/slab_helpers.h" 27#include "core/hle/kernel/slab_helpers.h"
28#include "core/hle/kernel/svc_common.h" 28#include "core/hle/kernel/svc_common.h"
@@ -46,7 +46,7 @@ class KProcess;
46class KScheduler; 46class KScheduler;
47class KThreadQueue; 47class KThreadQueue;
48 48
49using KThreadFunction = VAddr; 49using KThreadFunction = KProcessAddress;
50 50
51enum class ThreadType : u32 { 51enum class ThreadType : u32 {
52 Main = 0, 52 Main = 0,
@@ -230,9 +230,9 @@ public:
230 230
231 /* 231 /*
232 * Returns the Thread Local Storage address of the current thread 232 * Returns the Thread Local Storage address of the current thread
233 * @returns VAddr of the thread's TLS 233 * @returns Address of the thread's TLS
234 */ 234 */
235 VAddr GetTlsAddress() const { 235 KProcessAddress GetTlsAddress() const {
236 return m_tls_address; 236 return m_tls_address;
237 } 237 }
238 238
@@ -419,8 +419,8 @@ public:
419 KThreadFunction func, uintptr_t arg, s32 virt_core); 419 KThreadFunction func, uintptr_t arg, s32 virt_core);
420 420
421 static Result InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, 421 static Result InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
422 uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, 422 uintptr_t arg, KProcessAddress user_stack_top, s32 prio,
423 KProcess* owner); 423 s32 virt_core, KProcess* owner);
424 424
425 static Result InitializeServiceThread(Core::System& system, KThread* thread, 425 static Result InitializeServiceThread(Core::System& system, KThread* thread,
426 std::function<void()>&& thread_func, s32 prio, 426 std::function<void()>&& thread_func, s32 prio,
@@ -565,15 +565,15 @@ public:
565 565
566 Result GetThreadContext3(std::vector<u8>& out); 566 Result GetThreadContext3(std::vector<u8>& out);
567 567
568 KThread* RemoveUserWaiterByKey(bool* out_has_waiters, VAddr key) { 568 KThread* RemoveUserWaiterByKey(bool* out_has_waiters, KProcessAddress key) {
569 return this->RemoveWaiterByKey(out_has_waiters, key, false); 569 return this->RemoveWaiterByKey(out_has_waiters, key, false);
570 } 570 }
571 571
572 KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, VAddr key) { 572 KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, KProcessAddress key) {
573 return this->RemoveWaiterByKey(out_has_waiters, key, true); 573 return this->RemoveWaiterByKey(out_has_waiters, key, true);
574 } 574 }
575 575
576 VAddr GetAddressKey() const { 576 KProcessAddress GetAddressKey() const {
577 return m_address_key; 577 return m_address_key;
578 } 578 }
579 579
@@ -591,14 +591,14 @@ public:
591 // to cope with arbitrary host pointers making their way 591 // to cope with arbitrary host pointers making their way
592 // into things. 592 // into things.
593 593
594 void SetUserAddressKey(VAddr key, u32 val) { 594 void SetUserAddressKey(KProcessAddress key, u32 val) {
595 ASSERT(m_waiting_lock_info == nullptr); 595 ASSERT(m_waiting_lock_info == nullptr);
596 m_address_key = key; 596 m_address_key = key;
597 m_address_key_value = val; 597 m_address_key_value = val;
598 m_is_kernel_address_key = false; 598 m_is_kernel_address_key = false;
599 } 599 }
600 600
601 void SetKernelAddressKey(VAddr key) { 601 void SetKernelAddressKey(KProcessAddress key) {
602 ASSERT(m_waiting_lock_info == nullptr); 602 ASSERT(m_waiting_lock_info == nullptr);
603 m_address_key = key; 603 m_address_key = key;
604 m_is_kernel_address_key = true; 604 m_is_kernel_address_key = true;
@@ -637,12 +637,13 @@ public:
637 return m_argument; 637 return m_argument;
638 } 638 }
639 639
640 VAddr GetUserStackTop() const { 640 KProcessAddress GetUserStackTop() const {
641 return m_stack_top; 641 return m_stack_top;
642 } 642 }
643 643
644private: 644private:
645 KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key); 645 KThread* RemoveWaiterByKey(bool* out_has_waiters, KProcessAddress key,
646 bool is_kernel_address_key);
646 647
647 static constexpr size_t PriorityInheritanceCountMax = 10; 648 static constexpr size_t PriorityInheritanceCountMax = 10;
648 union SyncObjectBuffer { 649 union SyncObjectBuffer {
@@ -695,12 +696,13 @@ private:
695 696
696 void IncreaseBasePriority(s32 priority); 697 void IncreaseBasePriority(s32 priority);
697 698
698 Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, 699 Result Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio,
699 s32 virt_core, KProcess* owner, ThreadType type); 700 s32 virt_core, KProcess* owner, ThreadType type);
700 701
701 static Result InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, 702 static Result InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
702 VAddr user_stack_top, s32 prio, s32 core, KProcess* owner, 703 KProcessAddress user_stack_top, s32 prio, s32 core,
703 ThreadType type, std::function<void()>&& init_func); 704 KProcess* owner, ThreadType type,
705 std::function<void()>&& init_func);
704 706
705 // For core KThread implementation 707 // For core KThread implementation
706 ThreadContext32 m_thread_context_32{}; 708 ThreadContext32 m_thread_context_32{};
@@ -749,7 +751,8 @@ public:
749 public: 751 public:
750 explicit LockWithPriorityInheritanceInfo(KernelCore&) {} 752 explicit LockWithPriorityInheritanceInfo(KernelCore&) {}
751 753
752 static LockWithPriorityInheritanceInfo* Create(KernelCore& kernel, VAddr address_key, 754 static LockWithPriorityInheritanceInfo* Create(KernelCore& kernel,
755 KProcessAddress address_key,
753 bool is_kernel_address_key) { 756 bool is_kernel_address_key) {
754 // Create a new lock info. 757 // Create a new lock info.
755 auto* new_lock = LockWithPriorityInheritanceInfo::Allocate(kernel); 758 auto* new_lock = LockWithPriorityInheritanceInfo::Allocate(kernel);
@@ -797,7 +800,7 @@ public:
797 return m_tree; 800 return m_tree;
798 } 801 }
799 802
800 VAddr GetAddressKey() const { 803 KProcessAddress GetAddressKey() const {
801 return m_address_key; 804 return m_address_key;
802 } 805 }
803 bool GetIsKernelAddressKey() const { 806 bool GetIsKernelAddressKey() const {
@@ -812,7 +815,7 @@ public:
812 815
813 private: 816 private:
814 LockWithPriorityInheritanceThreadTree m_tree{}; 817 LockWithPriorityInheritanceThreadTree m_tree{};
815 VAddr m_address_key{}; 818 KProcessAddress m_address_key{};
816 KThread* m_owner{}; 819 KThread* m_owner{};
817 u32 m_waiter_count{}; 820 u32 m_waiter_count{};
818 bool m_is_kernel_address_key{}; 821 bool m_is_kernel_address_key{};
@@ -827,7 +830,8 @@ public:
827 } 830 }
828 831
829 void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info); 832 void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info);
830 LockWithPriorityInheritanceInfo* FindHeldLock(VAddr address_key, bool is_kernel_address_key); 833 LockWithPriorityInheritanceInfo* FindHeldLock(KProcessAddress address_key,
834 bool is_kernel_address_key);
831 835
832private: 836private:
833 using LockWithPriorityInheritanceInfoList = 837 using LockWithPriorityInheritanceInfoList =
@@ -839,11 +843,11 @@ private:
839 KAffinityMask m_physical_affinity_mask{}; 843 KAffinityMask m_physical_affinity_mask{};
840 u64 m_thread_id{}; 844 u64 m_thread_id{};
841 std::atomic<s64> m_cpu_time{}; 845 std::atomic<s64> m_cpu_time{};
842 VAddr m_address_key{}; 846 KProcessAddress m_address_key{};
843 KProcess* m_parent{}; 847 KProcess* m_parent{};
844 VAddr m_kernel_stack_top{}; 848 KVirtualAddress m_kernel_stack_top{};
845 u32* m_light_ipc_data{}; 849 u32* m_light_ipc_data{};
846 VAddr m_tls_address{}; 850 KProcessAddress m_tls_address{};
847 KLightLock m_activity_pause_lock; 851 KLightLock m_activity_pause_lock;
848 s64 m_schedule_count{}; 852 s64 m_schedule_count{};
849 s64 m_last_scheduled_tick{}; 853 s64 m_last_scheduled_tick{};
@@ -887,16 +891,16 @@ private:
887 891
888 // For debugging 892 // For debugging
889 std::vector<KSynchronizationObject*> m_wait_objects_for_debugging{}; 893 std::vector<KSynchronizationObject*> m_wait_objects_for_debugging{};
890 VAddr m_mutex_wait_address_for_debugging{}; 894 KProcessAddress m_mutex_wait_address_for_debugging{};
891 ThreadWaitReasonForDebugging m_wait_reason_for_debugging{}; 895 ThreadWaitReasonForDebugging m_wait_reason_for_debugging{};
892 uintptr_t m_argument{}; 896 uintptr_t m_argument{};
893 VAddr m_stack_top{}; 897 KProcessAddress m_stack_top{};
894 898
895public: 899public:
896 using ConditionVariableThreadTreeType = ConditionVariableThreadTree; 900 using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
897 901
898 void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key, 902 void SetConditionVariable(ConditionVariableThreadTree* tree, KProcessAddress address,
899 u32 value) { 903 u64 cv_key, u32 value) {
900 ASSERT(m_waiting_lock_info == nullptr); 904 ASSERT(m_waiting_lock_info == nullptr);
901 m_condvar_tree = tree; 905 m_condvar_tree = tree;
902 m_condvar_key = cv_key; 906 m_condvar_key = cv_key;
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp
index c2af6898a..b4a1e3cdb 100644
--- a/src/core/hle/kernel/k_thread_local_page.cpp
+++ b/src/core/hle/kernel/k_thread_local_page.cpp
@@ -37,7 +37,7 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
37 37
38Result KThreadLocalPage::Finalize() { 38Result KThreadLocalPage::Finalize() {
39 // Get the physical address of the page. 39 // Get the physical address of the page.
40 const PAddr phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr); 40 const KPhysicalAddress phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr);
41 ASSERT(phys_addr); 41 ASSERT(phys_addr);
42 42
43 // Unmap the page. 43 // Unmap the page.
@@ -49,7 +49,7 @@ Result KThreadLocalPage::Finalize() {
49 return ResultSuccess; 49 return ResultSuccess;
50} 50}
51 51
52VAddr KThreadLocalPage::Reserve() { 52KProcessAddress KThreadLocalPage::Reserve() {
53 for (size_t i = 0; i < m_is_region_free.size(); i++) { 53 for (size_t i = 0; i < m_is_region_free.size(); i++) {
54 if (m_is_region_free[i]) { 54 if (m_is_region_free[i]) {
55 m_is_region_free[i] = false; 55 m_is_region_free[i] = false;
@@ -60,7 +60,7 @@ VAddr KThreadLocalPage::Reserve() {
60 return 0; 60 return 0;
61} 61}
62 62
63void KThreadLocalPage::Release(VAddr addr) { 63void KThreadLocalPage::Release(KProcessAddress addr) {
64 m_is_region_free[this->GetRegionIndex(addr)] = true; 64 m_is_region_free[this->GetRegionIndex(addr)] = true;
65} 65}
66 66
diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h
index 71254eb55..813f32a7e 100644
--- a/src/core/hle/kernel/k_thread_local_page.h
+++ b/src/core/hle/kernel/k_thread_local_page.h
@@ -27,19 +27,20 @@ public:
27 static_assert(RegionsPerPage > 0); 27 static_assert(RegionsPerPage > 0);
28 28
29public: 29public:
30 constexpr explicit KThreadLocalPage(KernelCore&, VAddr addr = {}) : m_virt_addr(addr) { 30 constexpr explicit KThreadLocalPage(KernelCore&, KProcessAddress addr = {})
31 : m_virt_addr(addr) {
31 m_is_region_free.fill(true); 32 m_is_region_free.fill(true);
32 } 33 }
33 34
34 constexpr VAddr GetAddress() const { 35 constexpr KProcessAddress GetAddress() const {
35 return m_virt_addr; 36 return m_virt_addr;
36 } 37 }
37 38
38 Result Initialize(KernelCore& kernel, KProcess* process); 39 Result Initialize(KernelCore& kernel, KProcess* process);
39 Result Finalize(); 40 Result Finalize();
40 41
41 VAddr Reserve(); 42 KProcessAddress Reserve();
42 void Release(VAddr addr); 43 void Release(KProcessAddress addr);
43 44
44 bool IsAllUsed() const { 45 bool IsAllUsed() const {
45 return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(), 46 return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(),
@@ -60,7 +61,7 @@ public:
60 } 61 }
61 62
62public: 63public:
63 using RedBlackKeyType = VAddr; 64 using RedBlackKeyType = KProcessAddress;
64 65
65 static constexpr RedBlackKeyType GetRedBlackKey(const RedBlackKeyType& v) { 66 static constexpr RedBlackKeyType GetRedBlackKey(const RedBlackKeyType& v) {
66 return v; 67 return v;
@@ -72,8 +73,8 @@ public:
72 template <typename T> 73 template <typename T>
73 requires(std::same_as<T, KThreadLocalPage> || std::same_as<T, RedBlackKeyType>) 74 requires(std::same_as<T, KThreadLocalPage> || std::same_as<T, RedBlackKeyType>)
74 static constexpr int Compare(const T& lhs, const KThreadLocalPage& rhs) { 75 static constexpr int Compare(const T& lhs, const KThreadLocalPage& rhs) {
75 const VAddr lval = GetRedBlackKey(lhs); 76 const KProcessAddress lval = GetRedBlackKey(lhs);
76 const VAddr rval = GetRedBlackKey(rhs); 77 const KProcessAddress rval = GetRedBlackKey(rhs);
77 78
78 if (lval < rval) { 79 if (lval < rval) {
79 return -1; 80 return -1;
@@ -85,22 +86,22 @@ public:
85 } 86 }
86 87
87private: 88private:
88 constexpr VAddr GetRegionAddress(size_t i) const { 89 constexpr KProcessAddress GetRegionAddress(size_t i) const {
89 return this->GetAddress() + i * Svc::ThreadLocalRegionSize; 90 return this->GetAddress() + i * Svc::ThreadLocalRegionSize;
90 } 91 }
91 92
92 constexpr bool Contains(VAddr addr) const { 93 constexpr bool Contains(KProcessAddress addr) const {
93 return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize; 94 return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize;
94 } 95 }
95 96
96 constexpr size_t GetRegionIndex(VAddr addr) const { 97 constexpr size_t GetRegionIndex(KProcessAddress addr) const {
97 ASSERT(Common::IsAligned(addr, Svc::ThreadLocalRegionSize)); 98 ASSERT(Common::IsAligned(GetInteger(addr), Svc::ThreadLocalRegionSize));
98 ASSERT(this->Contains(addr)); 99 ASSERT(this->Contains(addr));
99 return (addr - this->GetAddress()) / Svc::ThreadLocalRegionSize; 100 return (addr - this->GetAddress()) / Svc::ThreadLocalRegionSize;
100 } 101 }
101 102
102private: 103private:
103 VAddr m_virt_addr{}; 104 KProcessAddress m_virt_addr{};
104 KProcess* m_owner{}; 105 KProcess* m_owner{};
105 KernelCore* m_kernel{}; 106 KernelCore* m_kernel{};
106 std::array<bool, RegionsPerPage> m_is_region_free{}; 107 std::array<bool, RegionsPerPage> m_is_region_free{};
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp
index 471349282..13d34125c 100644
--- a/src/core/hle/kernel/k_transfer_memory.cpp
+++ b/src/core/hle/kernel/k_transfer_memory.cpp
@@ -13,7 +13,7 @@ KTransferMemory::KTransferMemory(KernelCore& kernel)
13 13
14KTransferMemory::~KTransferMemory() = default; 14KTransferMemory::~KTransferMemory() = default;
15 15
16Result KTransferMemory::Initialize(VAddr address, std::size_t size, 16Result KTransferMemory::Initialize(KProcessAddress address, std::size_t size,
17 Svc::MemoryPermission owner_perm) { 17 Svc::MemoryPermission owner_perm) {
18 // Set members. 18 // Set members.
19 m_owner = GetCurrentProcessPointer(m_kernel); 19 m_owner = GetCurrentProcessPointer(m_kernel);
diff --git a/src/core/hle/kernel/k_transfer_memory.h b/src/core/hle/kernel/k_transfer_memory.h
index 3d4d795a5..54f97ccb4 100644
--- a/src/core/hle/kernel/k_transfer_memory.h
+++ b/src/core/hle/kernel/k_transfer_memory.h
@@ -26,7 +26,7 @@ public:
26 explicit KTransferMemory(KernelCore& kernel); 26 explicit KTransferMemory(KernelCore& kernel);
27 ~KTransferMemory() override; 27 ~KTransferMemory() override;
28 28
29 Result Initialize(VAddr address, std::size_t size, Svc::MemoryPermission owner_perm); 29 Result Initialize(KProcessAddress address, std::size_t size, Svc::MemoryPermission owner_perm);
30 30
31 void Finalize() override; 31 void Finalize() override;
32 32
@@ -44,7 +44,7 @@ public:
44 return m_owner; 44 return m_owner;
45 } 45 }
46 46
47 VAddr GetSourceAddress() const { 47 KProcessAddress GetSourceAddress() const {
48 return m_address; 48 return m_address;
49 } 49 }
50 50
@@ -54,7 +54,7 @@ public:
54 54
55private: 55private:
56 KProcess* m_owner{}; 56 KProcess* m_owner{};
57 VAddr m_address{}; 57 KProcessAddress m_address{};
58 Svc::MemoryPermission m_owner_perm{}; 58 Svc::MemoryPermission m_owner_perm{};
59 size_t m_size{}; 59 size_t m_size{};
60 bool m_is_initialized{}; 60 bool m_is_initialized{};
diff --git a/src/core/hle/kernel/k_typed_address.h b/src/core/hle/kernel/k_typed_address.h
new file mode 100644
index 000000000..d57535ba0
--- /dev/null
+++ b/src/core/hle/kernel/k_typed_address.h
@@ -0,0 +1,12 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/typed_address.h"
5
6namespace Kernel {
7
8using KPhysicalAddress = Common::PhysicalAddress;
9using KVirtualAddress = Common::VirtualAddress;
10using KProcessAddress = Common::ProcessAddress;
11
12} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 98ecaf12f..29809b2c5 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -271,9 +271,9 @@ struct KernelCore::Impl {
271 system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); 271 system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
272 } 272 }
273 273
274 void InitializeResourceManagers(KernelCore& kernel, VAddr address, size_t size) { 274 void InitializeResourceManagers(KernelCore& kernel, KVirtualAddress address, size_t size) {
275 // Ensure that the buffer is suitable for our use. 275 // Ensure that the buffer is suitable for our use.
276 ASSERT(Common::IsAligned(address, PageSize)); 276 ASSERT(Common::IsAligned(GetInteger(address), PageSize));
277 ASSERT(Common::IsAligned(size, PageSize)); 277 ASSERT(Common::IsAligned(size, PageSize));
278 278
279 // Ensure that we have space for our reference counts. 279 // Ensure that we have space for our reference counts.
@@ -462,29 +462,30 @@ struct KernelCore::Impl {
462 KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1); 462 KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1);
463 463
464 // Save start and end for ease of use. 464 // Save start and end for ease of use.
465 const VAddr code_start_virt_addr = KernelVirtualAddressCodeBase; 465 constexpr KVirtualAddress code_start_virt_addr = KernelVirtualAddressCodeBase;
466 const VAddr code_end_virt_addr = KernelVirtualAddressCodeEnd; 466 constexpr KVirtualAddress code_end_virt_addr = KernelVirtualAddressCodeEnd;
467 467
468 // Setup the containing kernel region. 468 // Setup the containing kernel region.
469 constexpr size_t KernelRegionSize = 1_GiB; 469 constexpr size_t KernelRegionSize = 1_GiB;
470 constexpr size_t KernelRegionAlign = 1_GiB; 470 constexpr size_t KernelRegionAlign = 1_GiB;
471 constexpr VAddr kernel_region_start = 471 constexpr KVirtualAddress kernel_region_start =
472 Common::AlignDown(code_start_virt_addr, KernelRegionAlign); 472 Common::AlignDown(GetInteger(code_start_virt_addr), KernelRegionAlign);
473 size_t kernel_region_size = KernelRegionSize; 473 size_t kernel_region_size = KernelRegionSize;
474 if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) { 474 if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
475 kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start; 475 kernel_region_size = KernelVirtualAddressSpaceEnd - GetInteger(kernel_region_start);
476 } 476 }
477 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( 477 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
478 kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel)); 478 GetInteger(kernel_region_start), kernel_region_size, KMemoryRegionType_Kernel));
479 479
480 // Setup the code region. 480 // Setup the code region.
481 constexpr size_t CodeRegionAlign = PageSize; 481 constexpr size_t CodeRegionAlign = PageSize;
482 constexpr VAddr code_region_start = 482 constexpr KVirtualAddress code_region_start =
483 Common::AlignDown(code_start_virt_addr, CodeRegionAlign); 483 Common::AlignDown(GetInteger(code_start_virt_addr), CodeRegionAlign);
484 constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign); 484 constexpr KVirtualAddress code_region_end =
485 Common::AlignUp(GetInteger(code_end_virt_addr), CodeRegionAlign);
485 constexpr size_t code_region_size = code_region_end - code_region_start; 486 constexpr size_t code_region_size = code_region_end - code_region_start;
486 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( 487 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
487 code_region_start, code_region_size, KMemoryRegionType_KernelCode)); 488 GetInteger(code_region_start), code_region_size, KMemoryRegionType_KernelCode));
488 489
489 // Setup board-specific device physical regions. 490 // Setup board-specific device physical regions.
490 Init::SetupDevicePhysicalMemoryRegions(*memory_layout); 491 Init::SetupDevicePhysicalMemoryRegions(*memory_layout);
@@ -520,11 +521,11 @@ struct KernelCore::Impl {
520 ASSERT(misc_region_size > 0); 521 ASSERT(misc_region_size > 0);
521 522
522 // Setup the misc region. 523 // Setup the misc region.
523 const VAddr misc_region_start = 524 const KVirtualAddress misc_region_start =
524 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion( 525 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
525 misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel); 526 misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
526 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( 527 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
527 misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); 528 GetInteger(misc_region_start), misc_region_size, KMemoryRegionType_KernelMisc));
528 529
529 // Determine if we'll use extra thread resources. 530 // Determine if we'll use extra thread resources.
530 const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); 531 const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
@@ -532,11 +533,11 @@ struct KernelCore::Impl {
532 // Setup the stack region. 533 // Setup the stack region.
533 constexpr size_t StackRegionSize = 14_MiB; 534 constexpr size_t StackRegionSize = 14_MiB;
534 constexpr size_t StackRegionAlign = KernelAslrAlignment; 535 constexpr size_t StackRegionAlign = KernelAslrAlignment;
535 const VAddr stack_region_start = 536 const KVirtualAddress stack_region_start =
536 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion( 537 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
537 StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel); 538 StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
538 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( 539 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
539 stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); 540 GetInteger(stack_region_start), StackRegionSize, KMemoryRegionType_KernelStack));
540 541
541 // Determine the size of the resource region. 542 // Determine the size of the resource region.
542 const size_t resource_region_size = 543 const size_t resource_region_size =
@@ -548,29 +549,29 @@ struct KernelCore::Impl {
548 ASSERT(slab_region_size <= resource_region_size); 549 ASSERT(slab_region_size <= resource_region_size);
549 550
550 // Setup the slab region. 551 // Setup the slab region.
551 const PAddr code_start_phys_addr = KernelPhysicalAddressCodeBase; 552 const KPhysicalAddress code_start_phys_addr = KernelPhysicalAddressCodeBase;
552 const PAddr code_end_phys_addr = code_start_phys_addr + code_region_size; 553 const KPhysicalAddress code_end_phys_addr = code_start_phys_addr + code_region_size;
553 const PAddr slab_start_phys_addr = code_end_phys_addr; 554 const KPhysicalAddress slab_start_phys_addr = code_end_phys_addr;
554 const PAddr slab_end_phys_addr = slab_start_phys_addr + slab_region_size; 555 const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
555 constexpr size_t SlabRegionAlign = KernelAslrAlignment; 556 constexpr size_t SlabRegionAlign = KernelAslrAlignment;
556 const size_t slab_region_needed_size = 557 const size_t slab_region_needed_size =
557 Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) - 558 Common::AlignUp(GetInteger(code_end_phys_addr) + slab_region_size, SlabRegionAlign) -
558 Common::AlignDown(code_end_phys_addr, SlabRegionAlign); 559 Common::AlignDown(GetInteger(code_end_phys_addr), SlabRegionAlign);
559 const VAddr slab_region_start = 560 const KVirtualAddress slab_region_start =
560 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion( 561 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
561 slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + 562 slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) +
562 (code_end_phys_addr % SlabRegionAlign); 563 (GetInteger(code_end_phys_addr) % SlabRegionAlign);
563 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( 564 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
564 slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab)); 565 GetInteger(slab_region_start), slab_region_size, KMemoryRegionType_KernelSlab));
565 566
566 // Setup the temp region. 567 // Setup the temp region.
567 constexpr size_t TempRegionSize = 128_MiB; 568 constexpr size_t TempRegionSize = 128_MiB;
568 constexpr size_t TempRegionAlign = KernelAslrAlignment; 569 constexpr size_t TempRegionAlign = KernelAslrAlignment;
569 const VAddr temp_region_start = 570 const KVirtualAddress temp_region_start =
570 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion( 571 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
571 TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel); 572 TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
572 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize, 573 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
573 KMemoryRegionType_KernelTemp)); 574 GetInteger(temp_region_start), TempRegionSize, KMemoryRegionType_KernelTemp));
574 575
575 // Automatically map in devices that have auto-map attributes. 576 // Automatically map in devices that have auto-map attributes.
576 for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) { 577 for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
@@ -596,35 +597,37 @@ struct KernelCore::Impl {
596 region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap); 597 region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap);
597 598
598 // Create a virtual pair region and insert it into the tree. 599 // Create a virtual pair region and insert it into the tree.
599 const PAddr map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize); 600 const KPhysicalAddress map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize);
600 const size_t map_size = 601 const size_t map_size =
601 Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr; 602 Common::AlignUp(region.GetEndAddress(), PageSize) - GetInteger(map_phys_addr);
602 const VAddr map_virt_addr = 603 const KVirtualAddress map_virt_addr =
603 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( 604 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
604 map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize); 605 map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
605 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( 606 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
606 map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice)); 607 GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscMappedDevice));
607 region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr); 608 region.SetPairAddress(GetInteger(map_virt_addr) + region.GetAddress() -
609 GetInteger(map_phys_addr));
608 } 610 }
609 611
610 Init::SetupDramPhysicalMemoryRegions(*memory_layout); 612 Init::SetupDramPhysicalMemoryRegions(*memory_layout);
611 613
612 // Insert a physical region for the kernel code region. 614 // Insert a physical region for the kernel code region.
613 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( 615 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
614 code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode)); 616 GetInteger(code_start_phys_addr), code_region_size, KMemoryRegionType_DramKernelCode));
615 617
616 // Insert a physical region for the kernel slab region. 618 // Insert a physical region for the kernel slab region.
617 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( 619 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
618 slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab)); 620 GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
619 621
620 // Determine size available for kernel page table heaps, requiring > 8 MB. 622 // Determine size available for kernel page table heaps, requiring > 8 MB.
621 const PAddr resource_end_phys_addr = slab_start_phys_addr + resource_region_size; 623 const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
622 const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr; 624 const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr;
623 ASSERT(page_table_heap_size / 4_MiB > 2); 625 ASSERT(page_table_heap_size / 4_MiB > 2);
624 626
625 // Insert a physical region for the kernel page table heap region 627 // Insert a physical region for the kernel page table heap region
626 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( 628 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
627 slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap)); 629 GetInteger(slab_end_phys_addr), page_table_heap_size,
630 KMemoryRegionType_DramKernelPtHeap));
628 631
629 // All DRAM regions that we haven't tagged by this point will be mapped under the linear 632 // All DRAM regions that we haven't tagged by this point will be mapped under the linear
630 // mapping. Tag them. 633 // mapping. Tag them.
@@ -646,20 +649,21 @@ struct KernelCore::Impl {
646 649
647 // Setup the linear mapping region. 650 // Setup the linear mapping region.
648 constexpr size_t LinearRegionAlign = 1_GiB; 651 constexpr size_t LinearRegionAlign = 1_GiB;
649 const PAddr aligned_linear_phys_start = 652 const KPhysicalAddress aligned_linear_phys_start =
650 Common::AlignDown(linear_extents.GetAddress(), LinearRegionAlign); 653 Common::AlignDown(linear_extents.GetAddress(), LinearRegionAlign);
651 const size_t linear_region_size = 654 const size_t linear_region_size =
652 Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) - 655 Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) -
653 aligned_linear_phys_start; 656 GetInteger(aligned_linear_phys_start);
654 const VAddr linear_region_start = 657 const KVirtualAddress linear_region_start =
655 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( 658 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
656 linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign); 659 linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
657 660
658 const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start; 661 const u64 linear_region_phys_to_virt_diff =
662 GetInteger(linear_region_start) - GetInteger(aligned_linear_phys_start);
659 663
660 // Map and create regions for all the linearly-mapped data. 664 // Map and create regions for all the linearly-mapped data.
661 { 665 {
662 PAddr cur_phys_addr = 0; 666 KPhysicalAddress cur_phys_addr = 0;
663 u64 cur_size = 0; 667 u64 cur_size = 0;
664 for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) { 668 for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
665 if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { 669 if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
@@ -678,15 +682,16 @@ struct KernelCore::Impl {
678 cur_size = region.GetSize(); 682 cur_size = region.GetSize();
679 } 683 }
680 684
681 const VAddr region_virt_addr = 685 const KVirtualAddress region_virt_addr =
682 region.GetAddress() + linear_region_phys_to_virt_diff; 686 region.GetAddress() + linear_region_phys_to_virt_diff;
683 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( 687 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
684 region_virt_addr, region.GetSize(), 688 GetInteger(region_virt_addr), region.GetSize(),
685 GetTypeForVirtualLinearMapping(region.GetType()))); 689 GetTypeForVirtualLinearMapping(region.GetType())));
686 region.SetPairAddress(region_virt_addr); 690 region.SetPairAddress(GetInteger(region_virt_addr));
687 691
688 KMemoryRegion* virt_region = 692 KMemoryRegion* virt_region =
689 memory_layout->GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr); 693 memory_layout->GetVirtualMemoryRegionTree().FindModifiable(
694 GetInteger(region_virt_addr));
690 ASSERT(virt_region != nullptr); 695 ASSERT(virt_region != nullptr);
691 virt_region->SetPairAddress(region.GetAddress()); 696 virt_region->SetPairAddress(region.GetAddress());
692 } 697 }
@@ -694,10 +699,11 @@ struct KernelCore::Impl {
694 699
695 // Insert regions for the initial page table region. 700 // Insert regions for the initial page table region.
696 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( 701 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
697 resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt)); 702 GetInteger(resource_end_phys_addr), KernelPageTableHeapSize,
703 KMemoryRegionType_DramKernelInitPt));
698 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( 704 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
699 resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize, 705 GetInteger(resource_end_phys_addr) + linear_region_phys_to_virt_diff,
700 KMemoryRegionType_VirtualDramKernelInitPt)); 706 KernelPageTableHeapSize, KMemoryRegionType_VirtualDramKernelInitPt));
701 707
702 // All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to 708 // All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to
703 // some pool partition. Tag them. 709 // some pool partition. Tag them.
@@ -969,12 +975,12 @@ void KernelCore::InvalidateAllInstructionCaches() {
969 } 975 }
970} 976}
971 977
972void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) { 978void KernelCore::InvalidateCpuInstructionCacheRange(KProcessAddress addr, std::size_t size) {
973 for (auto& physical_core : impl->cores) { 979 for (auto& physical_core : impl->cores) {
974 if (!physical_core->IsInitialized()) { 980 if (!physical_core->IsInitialized()) {
975 continue; 981 continue;
976 } 982 }
977 physical_core->ArmInterface().InvalidateCacheRange(addr, size); 983 physical_core->ArmInterface().InvalidateCacheRange(GetInteger(addr), size);
978 } 984 }
979} 985}
980 986
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 183a4d227..d5b08eeb5 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -14,6 +14,7 @@
14#include "core/hardware_properties.h" 14#include "core/hardware_properties.h"
15#include "core/hle/kernel/k_auto_object.h" 15#include "core/hle/kernel/k_auto_object.h"
16#include "core/hle/kernel/k_slab_heap.h" 16#include "core/hle/kernel/k_slab_heap.h"
17#include "core/hle/kernel/k_typed_address.h"
17#include "core/hle/kernel/svc_common.h" 18#include "core/hle/kernel/svc_common.h"
18 19
19namespace Core { 20namespace Core {
@@ -185,7 +186,7 @@ public:
185 186
186 void InvalidateAllInstructionCaches(); 187 void InvalidateAllInstructionCaches();
187 188
188 void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size); 189 void InvalidateCpuInstructionCacheRange(KProcessAddress addr, std::size_t size);
189 190
190 /// Registers all kernel objects with the global emulation state, this is purely for tracking 191 /// Registers all kernel objects with the global emulation state, this is purely for tracking
191 /// leaks after emulation has been shutdown. 192 /// leaks after emulation has been shutdown.
diff --git a/src/core/hle/kernel/memory_types.h b/src/core/hle/kernel/memory_types.h
index 92b8b37ac..18de675cc 100644
--- a/src/core/hle/kernel/memory_types.h
+++ b/src/core/hle/kernel/memory_types.h
@@ -6,6 +6,7 @@
6#include <array> 6#include <array>
7 7
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "core/hle/kernel/k_typed_address.h"
9 10
10namespace Kernel { 11namespace Kernel {
11 12
@@ -14,7 +15,4 @@ constexpr std::size_t PageSize{1 << PageBits};
14 15
15using Page = std::array<u8, PageSize>; 16using Page = std::array<u8, PageSize>;
16 17
17using KPhysicalAddress = PAddr;
18using KProcessAddress = VAddr;
19
20} // namespace Kernel 18} // namespace Kernel
diff --git a/src/core/hle/kernel/svc/svc_address_arbiter.cpp b/src/core/hle/kernel/svc/svc_address_arbiter.cpp
index 22071731b..04cc5ea64 100644
--- a/src/core/hle/kernel/svc/svc_address_arbiter.cpp
+++ b/src/core/hle/kernel/svc/svc_address_arbiter.cpp
@@ -37,7 +37,7 @@ constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
37} // namespace 37} // namespace
38 38
39// Wait for an address (via Address Arbiter) 39// Wait for an address (via Address Arbiter)
40Result WaitForAddress(Core::System& system, VAddr address, ArbitrationType arb_type, s32 value, 40Result WaitForAddress(Core::System& system, u64 address, ArbitrationType arb_type, s32 value,
41 s64 timeout_ns) { 41 s64 timeout_ns) {
42 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}", 42 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
43 address, arb_type, value, timeout_ns); 43 address, arb_type, value, timeout_ns);
@@ -68,7 +68,7 @@ Result WaitForAddress(Core::System& system, VAddr address, ArbitrationType arb_t
68} 68}
69 69
70// Signals to an address (via Address Arbiter) 70// Signals to an address (via Address Arbiter)
71Result SignalToAddress(Core::System& system, VAddr address, SignalType signal_type, s32 value, 71Result SignalToAddress(Core::System& system, u64 address, SignalType signal_type, s32 value,
72 s32 count) { 72 s32 count) {
73 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}", 73 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
74 address, signal_type, value, count); 74 address, signal_type, value, count);
@@ -82,12 +82,12 @@ Result SignalToAddress(Core::System& system, VAddr address, SignalType signal_ty
82 .SignalAddressArbiter(address, signal_type, value, count)); 82 .SignalAddressArbiter(address, signal_type, value, count));
83} 83}
84 84
85Result WaitForAddress64(Core::System& system, VAddr address, ArbitrationType arb_type, s32 value, 85Result WaitForAddress64(Core::System& system, u64 address, ArbitrationType arb_type, s32 value,
86 s64 timeout_ns) { 86 s64 timeout_ns) {
87 R_RETURN(WaitForAddress(system, address, arb_type, value, timeout_ns)); 87 R_RETURN(WaitForAddress(system, address, arb_type, value, timeout_ns));
88} 88}
89 89
90Result SignalToAddress64(Core::System& system, VAddr address, SignalType signal_type, s32 value, 90Result SignalToAddress64(Core::System& system, u64 address, SignalType signal_type, s32 value,
91 s32 count) { 91 s32 count) {
92 R_RETURN(SignalToAddress(system, address, signal_type, value, count)); 92 R_RETURN(SignalToAddress(system, address, signal_type, value, count));
93} 93}
diff --git a/src/core/hle/kernel/svc/svc_code_memory.cpp b/src/core/hle/kernel/svc/svc_code_memory.cpp
index 43feab986..687baff82 100644
--- a/src/core/hle/kernel/svc/svc_code_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_code_memory.cpp
@@ -29,7 +29,7 @@ constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(MemoryPermission perm)
29 29
30} // namespace 30} // namespace
31 31
32Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, uint64_t size) { 32Result CreateCodeMemory(Core::System& system, Handle* out, u64 address, uint64_t size) {
33 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size); 33 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
34 34
35 // Get kernel instance. 35 // Get kernel instance.
@@ -64,7 +64,7 @@ Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, uint64
64} 64}
65 65
66Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, 66Result ControlCodeMemory(Core::System& system, Handle code_memory_handle,
67 CodeMemoryOperation operation, VAddr address, uint64_t size, 67 CodeMemoryOperation operation, u64 address, uint64_t size,
68 MemoryPermission perm) { 68 MemoryPermission perm) {
69 69
70 LOG_TRACE(Kernel_SVC, 70 LOG_TRACE(Kernel_SVC,
diff --git a/src/core/hle/kernel/svc/svc_condition_variable.cpp b/src/core/hle/kernel/svc/svc_condition_variable.cpp
index 648ed23d0..ca120d67e 100644
--- a/src/core/hle/kernel/svc/svc_condition_variable.cpp
+++ b/src/core/hle/kernel/svc/svc_condition_variable.cpp
@@ -11,7 +11,7 @@
11namespace Kernel::Svc { 11namespace Kernel::Svc {
12 12
13/// Wait process wide key atomic 13/// Wait process wide key atomic
14Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, u32 tag, 14Result WaitProcessWideKeyAtomic(Core::System& system, u64 address, u64 cv_key, u32 tag,
15 s64 timeout_ns) { 15 s64 timeout_ns) {
16 LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address, 16 LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
17 cv_key, tag, timeout_ns); 17 cv_key, tag, timeout_ns);
@@ -43,7 +43,7 @@ Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_ke
43} 43}
44 44
45/// Signal process wide key 45/// Signal process wide key
46void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) { 46void SignalProcessWideKey(Core::System& system, u64 cv_key, s32 count) {
47 LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count); 47 LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
48 48
49 // Signal the condition variable. 49 // Signal the condition variable.
diff --git a/src/core/hle/kernel/svc/svc_debug_string.cpp b/src/core/hle/kernel/svc/svc_debug_string.cpp
index d4bf062d1..8771d2b01 100644
--- a/src/core/hle/kernel/svc/svc_debug_string.cpp
+++ b/src/core/hle/kernel/svc/svc_debug_string.cpp
@@ -8,7 +8,7 @@
8namespace Kernel::Svc { 8namespace Kernel::Svc {
9 9
10/// Used to output a message on a debug hardware unit - does nothing on a retail unit 10/// Used to output a message on a debug hardware unit - does nothing on a retail unit
11Result OutputDebugString(Core::System& system, VAddr address, u64 len) { 11Result OutputDebugString(Core::System& system, u64 address, u64 len) {
12 R_SUCCEED_IF(len == 0); 12 R_SUCCEED_IF(len == 0);
13 13
14 std::string str(len, '\0'); 14 std::string str(len, '\0');
diff --git a/src/core/hle/kernel/svc/svc_exception.cpp b/src/core/hle/kernel/svc/svc_exception.cpp
index c2782908d..4ab5f471f 100644
--- a/src/core/hle/kernel/svc/svc_exception.cpp
+++ b/src/core/hle/kernel/svc/svc_exception.cpp
@@ -20,7 +20,7 @@ void Break(Core::System& system, BreakReason reason, u64 info1, u64 info2) {
20 bool has_dumped_buffer{}; 20 bool has_dumped_buffer{};
21 std::vector<u8> debug_buffer; 21 std::vector<u8> debug_buffer;
22 22
23 const auto handle_debug_buffer = [&](VAddr addr, u64 sz) { 23 const auto handle_debug_buffer = [&](u64 addr, u64 sz) {
24 if (sz == 0 || addr == 0 || has_dumped_buffer) { 24 if (sz == 0 || addr == 0 || has_dumped_buffer) {
25 return; 25 return;
26 } 26 }
diff --git a/src/core/hle/kernel/svc/svc_info.cpp b/src/core/hle/kernel/svc/svc_info.cpp
index 04b6d6964..2b2c878b5 100644
--- a/src/core/hle/kernel/svc/svc_info.cpp
+++ b/src/core/hle/kernel/svc/svc_info.cpp
@@ -54,7 +54,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
54 R_SUCCEED(); 54 R_SUCCEED();
55 55
56 case InfoType::AliasRegionAddress: 56 case InfoType::AliasRegionAddress:
57 *result = process->PageTable().GetAliasRegionStart(); 57 *result = GetInteger(process->PageTable().GetAliasRegionStart());
58 R_SUCCEED(); 58 R_SUCCEED();
59 59
60 case InfoType::AliasRegionSize: 60 case InfoType::AliasRegionSize:
@@ -62,7 +62,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
62 R_SUCCEED(); 62 R_SUCCEED();
63 63
64 case InfoType::HeapRegionAddress: 64 case InfoType::HeapRegionAddress:
65 *result = process->PageTable().GetHeapRegionStart(); 65 *result = GetInteger(process->PageTable().GetHeapRegionStart());
66 R_SUCCEED(); 66 R_SUCCEED();
67 67
68 case InfoType::HeapRegionSize: 68 case InfoType::HeapRegionSize:
@@ -70,7 +70,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
70 R_SUCCEED(); 70 R_SUCCEED();
71 71
72 case InfoType::AslrRegionAddress: 72 case InfoType::AslrRegionAddress:
73 *result = process->PageTable().GetAliasCodeRegionStart(); 73 *result = GetInteger(process->PageTable().GetAliasCodeRegionStart());
74 R_SUCCEED(); 74 R_SUCCEED();
75 75
76 case InfoType::AslrRegionSize: 76 case InfoType::AslrRegionSize:
@@ -78,7 +78,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
78 R_SUCCEED(); 78 R_SUCCEED();
79 79
80 case InfoType::StackRegionAddress: 80 case InfoType::StackRegionAddress:
81 *result = process->PageTable().GetStackRegionStart(); 81 *result = GetInteger(process->PageTable().GetStackRegionStart());
82 R_SUCCEED(); 82 R_SUCCEED();
83 83
84 case InfoType::StackRegionSize: 84 case InfoType::StackRegionSize:
@@ -107,7 +107,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
107 R_SUCCEED(); 107 R_SUCCEED();
108 108
109 case InfoType::UserExceptionContextAddress: 109 case InfoType::UserExceptionContextAddress:
110 *result = process->GetProcessLocalRegionAddress(); 110 *result = GetInteger(process->GetProcessLocalRegionAddress());
111 R_SUCCEED(); 111 R_SUCCEED();
112 112
113 case InfoType::TotalNonSystemMemorySize: 113 case InfoType::TotalNonSystemMemorySize:
diff --git a/src/core/hle/kernel/svc/svc_lock.cpp b/src/core/hle/kernel/svc/svc_lock.cpp
index 3681279d6..1d7bc4246 100644
--- a/src/core/hle/kernel/svc/svc_lock.cpp
+++ b/src/core/hle/kernel/svc/svc_lock.cpp
@@ -9,7 +9,7 @@
9namespace Kernel::Svc { 9namespace Kernel::Svc {
10 10
11/// Attempts to locks a mutex 11/// Attempts to locks a mutex
12Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, u32 tag) { 12Result ArbitrateLock(Core::System& system, Handle thread_handle, u64 address, u32 tag) {
13 LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}", 13 LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
14 thread_handle, address, tag); 14 thread_handle, address, tag);
15 15
@@ -21,7 +21,7 @@ Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
21} 21}
22 22
23/// Unlock a mutex 23/// Unlock a mutex
24Result ArbitrateUnlock(Core::System& system, VAddr address) { 24Result ArbitrateUnlock(Core::System& system, u64 address) {
25 LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address); 25 LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
26 26
27 // Validate the input address. 27 // Validate the input address.
diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp
index 4db25a3b7..5dcb7f045 100644
--- a/src/core/hle/kernel/svc/svc_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_memory.cpp
@@ -22,15 +22,14 @@ constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) {
22// Checks if address + size is greater than the given address 22// Checks if address + size is greater than the given address
23// This can return false if the size causes an overflow of a 64-bit type 23// This can return false if the size causes an overflow of a 64-bit type
24// or if the given size is zero. 24// or if the given size is zero.
25constexpr bool IsValidAddressRange(VAddr address, u64 size) { 25constexpr bool IsValidAddressRange(u64 address, u64 size) {
26 return address + size > address; 26 return address + size > address;
27} 27}
28 28
29// Helper function that performs the common sanity checks for svcMapMemory 29// Helper function that performs the common sanity checks for svcMapMemory
30// and svcUnmapMemory. This is doable, as both functions perform their sanitizing 30// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
31// in the same order. 31// in the same order.
32Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr, 32Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 src_addr, u64 size) {
33 u64 size) {
34 if (!Common::Is4KBAligned(dst_addr)) { 33 if (!Common::Is4KBAligned(dst_addr)) {
35 LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr); 34 LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
36 R_THROW(ResultInvalidAddress); 35 R_THROW(ResultInvalidAddress);
@@ -99,7 +98,7 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAd
99 98
100} // namespace 99} // namespace
101 100
102Result SetMemoryPermission(Core::System& system, VAddr address, u64 size, MemoryPermission perm) { 101Result SetMemoryPermission(Core::System& system, u64 address, u64 size, MemoryPermission perm) {
103 LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size, 102 LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
104 perm); 103 perm);
105 104
@@ -120,7 +119,7 @@ Result SetMemoryPermission(Core::System& system, VAddr address, u64 size, Memory
120 R_RETURN(page_table.SetMemoryPermission(address, size, perm)); 119 R_RETURN(page_table.SetMemoryPermission(address, size, perm));
121} 120}
122 121
123Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attr) { 122Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask, u32 attr) {
124 LOG_DEBUG(Kernel_SVC, 123 LOG_DEBUG(Kernel_SVC,
125 "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address, 124 "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
126 size, mask, attr); 125 size, mask, attr);
@@ -145,7 +144,7 @@ Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mas
145} 144}
146 145
147/// Maps a memory range into a different range. 146/// Maps a memory range into a different range.
148Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) { 147Result MapMemory(Core::System& system, u64 dst_addr, u64 src_addr, u64 size) {
149 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, 148 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
150 src_addr, size); 149 src_addr, size);
151 150
@@ -160,7 +159,7 @@ Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size)
160} 159}
161 160
162/// Unmaps a region that was previously mapped with svcMapMemory 161/// Unmaps a region that was previously mapped with svcMapMemory
163Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) { 162Result UnmapMemory(Core::System& system, u64 dst_addr, u64 src_addr, u64 size) {
164 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, 163 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
165 src_addr, size); 164 src_addr, size);
166 165
diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp
index 63196e1ed..c2fbfb59a 100644
--- a/src/core/hle/kernel/svc/svc_physical_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp
@@ -8,7 +8,7 @@
8namespace Kernel::Svc { 8namespace Kernel::Svc {
9 9
10/// Set the process heap to a given Size. It can both extend and shrink the heap. 10/// Set the process heap to a given Size. It can both extend and shrink the heap.
11Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) { 11Result SetHeapSize(Core::System& system, u64* out_address, u64 size) {
12 LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size); 12 LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size);
13 13
14 // Validate size. 14 // Validate size.
@@ -20,7 +20,7 @@ Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
20} 20}
21 21
22/// Maps memory at a desired address 22/// Maps memory at a desired address
23Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { 23Result MapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
24 LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); 24 LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
25 25
26 if (!Common::Is4KBAligned(addr)) { 26 if (!Common::Is4KBAligned(addr)) {
@@ -69,7 +69,7 @@ Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
69} 69}
70 70
71/// Unmaps memory previously mapped via MapPhysicalMemory 71/// Unmaps memory previously mapped via MapPhysicalMemory
72Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { 72Result UnmapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
73 LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); 73 LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
74 74
75 if (!Common::Is4KBAligned(addr)) { 75 if (!Common::Is4KBAligned(addr)) {
diff --git a/src/core/hle/kernel/svc/svc_port.cpp b/src/core/hle/kernel/svc/svc_port.cpp
index 0b5556bc4..c6eb70422 100644
--- a/src/core/hle/kernel/svc/svc_port.cpp
+++ b/src/core/hle/kernel/svc/svc_port.cpp
@@ -12,7 +12,7 @@
12 12
13namespace Kernel::Svc { 13namespace Kernel::Svc {
14 14
15Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr user_name) { 15Result ConnectToNamedPort(Core::System& system, Handle* out, u64 user_name) {
16 // Copy the provided name from user memory to kernel memory. 16 // Copy the provided name from user memory to kernel memory.
17 auto string_name = system.Memory().ReadCString(user_name, KObjectName::NameLengthMax); 17 auto string_name = system.Memory().ReadCString(user_name, KObjectName::NameLengthMax);
18 18
diff --git a/src/core/hle/kernel/svc/svc_process.cpp b/src/core/hle/kernel/svc/svc_process.cpp
index b538c37e7..3c3579947 100644
--- a/src/core/hle/kernel/svc/svc_process.cpp
+++ b/src/core/hle/kernel/svc/svc_process.cpp
@@ -50,7 +50,7 @@ Result GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
50 R_SUCCEED(); 50 R_SUCCEED();
51} 51}
52 52
53Result GetProcessList(Core::System& system, s32* out_num_processes, VAddr out_process_ids, 53Result GetProcessList(Core::System& system, s32* out_num_processes, u64 out_process_ids,
54 int32_t out_process_ids_size) { 54 int32_t out_process_ids_size) {
55 LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}", 55 LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
56 out_process_ids, out_process_ids_size); 56 out_process_ids, out_process_ids_size);
diff --git a/src/core/hle/kernel/svc/svc_process_memory.cpp b/src/core/hle/kernel/svc/svc_process_memory.cpp
index f9210ca1e..aee0f2f36 100644
--- a/src/core/hle/kernel/svc/svc_process_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_process_memory.cpp
@@ -8,7 +8,7 @@
8namespace Kernel::Svc { 8namespace Kernel::Svc {
9namespace { 9namespace {
10 10
11constexpr bool IsValidAddressRange(VAddr address, u64 size) { 11constexpr bool IsValidAddressRange(u64 address, u64 size) {
12 return address + size > address; 12 return address + size > address;
13} 13}
14 14
@@ -26,7 +26,7 @@ constexpr bool IsValidProcessMemoryPermission(Svc::MemoryPermission perm) {
26 26
27} // namespace 27} // namespace
28 28
29Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, VAddr address, 29Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, u64 address,
30 u64 size, Svc::MemoryPermission perm) { 30 u64 size, Svc::MemoryPermission perm) {
31 LOG_TRACE(Kernel_SVC, 31 LOG_TRACE(Kernel_SVC,
32 "called, process_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}", 32 "called, process_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
@@ -56,8 +56,8 @@ Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, V
56 R_RETURN(page_table.SetProcessMemoryPermission(address, size, perm)); 56 R_RETURN(page_table.SetProcessMemoryPermission(address, size, perm));
57} 57}
58 58
59Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle, 59Result MapProcessMemory(Core::System& system, u64 dst_address, Handle process_handle,
60 VAddr src_address, u64 size) { 60 u64 src_address, u64 size) {
61 LOG_TRACE(Kernel_SVC, 61 LOG_TRACE(Kernel_SVC,
62 "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}", 62 "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
63 dst_address, process_handle, src_address, size); 63 dst_address, process_handle, src_address, size);
@@ -97,8 +97,8 @@ Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_
97 KMemoryPermission::UserReadWrite)); 97 KMemoryPermission::UserReadWrite));
98} 98}
99 99
100Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle, 100Result UnmapProcessMemory(Core::System& system, u64 dst_address, Handle process_handle,
101 VAddr src_address, u64 size) { 101 u64 src_address, u64 size) {
102 LOG_TRACE(Kernel_SVC, 102 LOG_TRACE(Kernel_SVC,
103 "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}", 103 "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
104 dst_address, process_handle, src_address, size); 104 dst_address, process_handle, src_address, size);
diff --git a/src/core/hle/kernel/svc/svc_query_memory.cpp b/src/core/hle/kernel/svc/svc_query_memory.cpp
index 457ebf950..5db5611f0 100644
--- a/src/core/hle/kernel/svc/svc_query_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_query_memory.cpp
@@ -8,7 +8,7 @@
8namespace Kernel::Svc { 8namespace Kernel::Svc {
9 9
10Result QueryMemory(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info, 10Result QueryMemory(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info,
11 VAddr query_address) { 11 u64 query_address) {
12 LOG_TRACE(Kernel_SVC, 12 LOG_TRACE(Kernel_SVC,
13 "called, out_memory_info=0x{:016X}, " 13 "called, out_memory_info=0x{:016X}, "
14 "query_address=0x{:016X}", 14 "query_address=0x{:016X}",
diff --git a/src/core/hle/kernel/svc/svc_shared_memory.cpp b/src/core/hle/kernel/svc/svc_shared_memory.cpp
index 40d878f17..a698596aa 100644
--- a/src/core/hle/kernel/svc/svc_shared_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_shared_memory.cpp
@@ -26,7 +26,7 @@ constexpr bool IsValidSharedMemoryPermission(MemoryPermission perm) {
26 26
27} // namespace 27} // namespace
28 28
29Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size, 29Result MapSharedMemory(Core::System& system, Handle shmem_handle, u64 address, u64 size,
30 Svc::MemoryPermission map_perm) { 30 Svc::MemoryPermission map_perm) {
31 LOG_TRACE(Kernel_SVC, 31 LOG_TRACE(Kernel_SVC,
32 "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}", 32 "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
@@ -64,7 +64,7 @@ Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
64 R_RETURN(shmem->Map(process, address, size, map_perm)); 64 R_RETURN(shmem->Map(process, address, size, map_perm));
65} 65}
66 66
67Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size) { 67Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, u64 address, u64 size) {
68 // Validate the address/size. 68 // Validate the address/size.
69 R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress); 69 R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
70 R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize); 70 R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
diff --git a/src/core/hle/kernel/svc/svc_synchronization.cpp b/src/core/hle/kernel/svc/svc_synchronization.cpp
index 660b45c23..e490a13ae 100644
--- a/src/core/hle/kernel/svc/svc_synchronization.cpp
+++ b/src/core/hle/kernel/svc/svc_synchronization.cpp
@@ -80,7 +80,7 @@ static Result WaitSynchronization(Core::System& system, int32_t* out_index, cons
80} 80}
81 81
82/// Wait for the given handles to synchronize, timeout after the specified nanoseconds 82/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
83Result WaitSynchronization(Core::System& system, int32_t* out_index, VAddr user_handles, 83Result WaitSynchronization(Core::System& system, int32_t* out_index, u64 user_handles,
84 int32_t num_handles, int64_t timeout_ns) { 84 int32_t num_handles, int64_t timeout_ns) {
85 LOG_TRACE(Kernel_SVC, "called user_handles={:#x}, num_handles={}, timeout_ns={}", user_handles, 85 LOG_TRACE(Kernel_SVC, "called user_handles={:#x}, num_handles={}, timeout_ns={}", user_handles,
86 num_handles, timeout_ns); 86 num_handles, timeout_ns);
diff --git a/src/core/hle/kernel/svc/svc_thread.cpp b/src/core/hle/kernel/svc/svc_thread.cpp
index 50991fb62..0be4858a2 100644
--- a/src/core/hle/kernel/svc/svc_thread.cpp
+++ b/src/core/hle/kernel/svc/svc_thread.cpp
@@ -19,8 +19,8 @@ constexpr bool IsValidVirtualCoreId(int32_t core_id) {
19} // Anonymous namespace 19} // Anonymous namespace
20 20
21/// Creates a new thread 21/// Creates a new thread
22Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg, 22Result CreateThread(Core::System& system, Handle* out_handle, u64 entry_point, u64 arg,
23 VAddr stack_bottom, s32 priority, s32 core_id) { 23 u64 stack_bottom, s32 priority, s32 core_id) {
24 LOG_DEBUG(Kernel_SVC, 24 LOG_DEBUG(Kernel_SVC,
25 "called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, " 25 "called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, "
26 "priority=0x{:08X}, core_id=0x{:08X}", 26 "priority=0x{:08X}, core_id=0x{:08X}",
@@ -129,7 +129,7 @@ void SleepThread(Core::System& system, s64 nanoseconds) {
129} 129}
130 130
131/// Gets the thread context 131/// Gets the thread context
132Result GetThreadContext3(Core::System& system, VAddr out_context, Handle thread_handle) { 132Result GetThreadContext3(Core::System& system, u64 out_context, Handle thread_handle) {
133 LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context, 133 LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
134 thread_handle); 134 thread_handle);
135 135
@@ -217,7 +217,7 @@ Result SetThreadPriority(Core::System& system, Handle thread_handle, s32 priorit
217 R_SUCCEED(); 217 R_SUCCEED();
218} 218}
219 219
220Result GetThreadList(Core::System& system, s32* out_num_threads, VAddr out_thread_ids, 220Result GetThreadList(Core::System& system, s32* out_num_threads, u64 out_thread_ids,
221 s32 out_thread_ids_size, Handle debug_handle) { 221 s32 out_thread_ids_size, Handle debug_handle) {
222 // TODO: Handle this case when debug events are supported. 222 // TODO: Handle this case when debug events are supported.
223 UNIMPLEMENTED_IF(debug_handle != InvalidHandle); 223 UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
diff --git a/src/core/hle/kernel/svc/svc_transfer_memory.cpp b/src/core/hle/kernel/svc/svc_transfer_memory.cpp
index 394f06728..82d469a37 100644
--- a/src/core/hle/kernel/svc/svc_transfer_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_transfer_memory.cpp
@@ -25,7 +25,7 @@ constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) {
25} // Anonymous namespace 25} // Anonymous namespace
26 26
27/// Creates a TransferMemory object 27/// Creates a TransferMemory object
28Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size, 28Result CreateTransferMemory(Core::System& system, Handle* out, u64 address, u64 size,
29 MemoryPermission map_perm) { 29 MemoryPermission map_perm) {
30 auto& kernel = system.Kernel(); 30 auto& kernel = system.Kernel();
31 31
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 39355d9c4..7f380ca4f 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -253,7 +253,7 @@ struct LastThreadContext {
253}; 253};
254 254
255struct PhysicalMemoryInfo { 255struct PhysicalMemoryInfo {
256 PAddr physical_address; 256 u64 physical_address;
257 u64 virtual_address; 257 u64 virtual_address;
258 u64 size; 258 u64 size;
259}; 259};
@@ -359,7 +359,7 @@ struct LastThreadContext {
359}; 359};
360 360
361struct PhysicalMemoryInfo { 361struct PhysicalMemoryInfo {
362 PAddr physical_address; 362 u64 physical_address;
363 u32 virtual_address; 363 u32 virtual_address;
364 u32 size; 364 u32 size;
365}; 365};
diff --git a/src/core/hle/service/hid/controllers/console_sixaxis.cpp b/src/core/hle/service/hid/controllers/console_sixaxis.cpp
index 478d38590..37f2e4405 100644
--- a/src/core/hle/service/hid/controllers/console_sixaxis.cpp
+++ b/src/core/hle/service/hid/controllers/console_sixaxis.cpp
@@ -63,7 +63,7 @@ void Controller_ConsoleSixAxis::OnUpdate(const Core::Timing::CoreTiming& core_ti
63 system.Memory().WriteBlock(transfer_memory, &seven_sixaxis_lifo, sizeof(seven_sixaxis_lifo)); 63 system.Memory().WriteBlock(transfer_memory, &seven_sixaxis_lifo, sizeof(seven_sixaxis_lifo));
64} 64}
65 65
66void Controller_ConsoleSixAxis::SetTransferMemoryAddress(VAddr t_mem) { 66void Controller_ConsoleSixAxis::SetTransferMemoryAddress(Common::ProcessAddress t_mem) {
67 transfer_memory = t_mem; 67 transfer_memory = t_mem;
68} 68}
69 69
diff --git a/src/core/hle/service/hid/controllers/console_sixaxis.h b/src/core/hle/service/hid/controllers/console_sixaxis.h
index 8d3e4081b..7015d924c 100644
--- a/src/core/hle/service/hid/controllers/console_sixaxis.h
+++ b/src/core/hle/service/hid/controllers/console_sixaxis.h
@@ -5,8 +5,8 @@
5 5
6#include <array> 6#include <array>
7 7
8#include "common/common_types.h"
9#include "common/quaternion.h" 8#include "common/quaternion.h"
9#include "common/typed_address.h"
10#include "core/hle/service/hid/controllers/controller_base.h" 10#include "core/hle/service/hid/controllers/controller_base.h"
11#include "core/hle/service/hid/ring_lifo.h" 11#include "core/hle/service/hid/ring_lifo.h"
12 12
@@ -34,7 +34,7 @@ public:
34 void OnUpdate(const Core::Timing::CoreTiming& core_timing) override; 34 void OnUpdate(const Core::Timing::CoreTiming& core_timing) override;
35 35
36 // Called on InitializeSevenSixAxisSensor 36 // Called on InitializeSevenSixAxisSensor
37 void SetTransferMemoryAddress(VAddr t_mem); 37 void SetTransferMemoryAddress(Common::ProcessAddress t_mem);
38 38
39 // Called on ResetSevenSixAxisSensorTimestamp 39 // Called on ResetSevenSixAxisSensorTimestamp
40 void ResetTimestamp(); 40 void ResetTimestamp();
@@ -66,7 +66,7 @@ private:
66 static_assert(sizeof(seven_sixaxis_lifo) == 0xA70, "SevenSixAxisState is an invalid size"); 66 static_assert(sizeof(seven_sixaxis_lifo) == 0xA70, "SevenSixAxisState is an invalid size");
67 67
68 SevenSixAxisState next_seven_sixaxis_state{}; 68 SevenSixAxisState next_seven_sixaxis_state{};
69 VAddr transfer_memory{}; 69 Common::ProcessAddress transfer_memory{};
70 ConsoleSharedMemory* shared_memory = nullptr; 70 ConsoleSharedMemory* shared_memory = nullptr;
71 Core::HID::EmulatedConsole* console = nullptr; 71 Core::HID::EmulatedConsole* console = nullptr;
72 72
diff --git a/src/core/hle/service/hid/controllers/palma.cpp b/src/core/hle/service/hid/controllers/palma.cpp
index bce51285c..14c67e454 100644
--- a/src/core/hle/service/hid/controllers/palma.cpp
+++ b/src/core/hle/service/hid/controllers/palma.cpp
@@ -152,7 +152,7 @@ Result Controller_Palma::WritePalmaRgbLedPatternEntry(const PalmaConnectionHandl
152} 152}
153 153
154Result Controller_Palma::WritePalmaWaveEntry(const PalmaConnectionHandle& handle, PalmaWaveSet wave, 154Result Controller_Palma::WritePalmaWaveEntry(const PalmaConnectionHandle& handle, PalmaWaveSet wave,
155 VAddr t_mem, u64 size) { 155 Common::ProcessAddress t_mem, u64 size) {
156 if (handle.npad_id != active_handle.npad_id) { 156 if (handle.npad_id != active_handle.npad_id) {
157 return InvalidPalmaHandle; 157 return InvalidPalmaHandle;
158 } 158 }
diff --git a/src/core/hle/service/hid/controllers/palma.h b/src/core/hle/service/hid/controllers/palma.h
index cf62f0dbc..a0491a819 100644
--- a/src/core/hle/service/hid/controllers/palma.h
+++ b/src/core/hle/service/hid/controllers/palma.h
@@ -5,7 +5,7 @@
5 5
6#include <array> 6#include <array>
7#include "common/common_funcs.h" 7#include "common/common_funcs.h"
8#include "common/common_types.h" 8#include "common/typed_address.h"
9#include "core/hle/service/hid/controllers/controller_base.h" 9#include "core/hle/service/hid/controllers/controller_base.h"
10#include "core/hle/service/hid/errors.h" 10#include "core/hle/service/hid/errors.h"
11 11
@@ -125,8 +125,8 @@ public:
125 Result ReadPalmaUniqueCode(const PalmaConnectionHandle& handle); 125 Result ReadPalmaUniqueCode(const PalmaConnectionHandle& handle);
126 Result SetPalmaUniqueCodeInvalid(const PalmaConnectionHandle& handle); 126 Result SetPalmaUniqueCodeInvalid(const PalmaConnectionHandle& handle);
127 Result WritePalmaRgbLedPatternEntry(const PalmaConnectionHandle& handle, u64 unknown); 127 Result WritePalmaRgbLedPatternEntry(const PalmaConnectionHandle& handle, u64 unknown);
128 Result WritePalmaWaveEntry(const PalmaConnectionHandle& handle, PalmaWaveSet wave, VAddr t_mem, 128 Result WritePalmaWaveEntry(const PalmaConnectionHandle& handle, PalmaWaveSet wave,
129 u64 size); 129 Common::ProcessAddress t_mem, u64 size);
130 Result SetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle, 130 Result SetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle,
131 s32 database_id_version_); 131 s32 database_id_version_);
132 Result GetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle); 132 Result GetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle);
diff --git a/src/core/hle/service/hid/hidbus/hidbus_base.cpp b/src/core/hle/service/hid/hidbus/hidbus_base.cpp
index dfd23ec04..ee522c36e 100644
--- a/src/core/hle/service/hid/hidbus/hidbus_base.cpp
+++ b/src/core/hle/service/hid/hidbus/hidbus_base.cpp
@@ -59,7 +59,7 @@ void HidbusBase::DisablePollingMode() {
59 polling_mode_enabled = false; 59 polling_mode_enabled = false;
60} 60}
61 61
62void HidbusBase::SetTransferMemoryAddress(VAddr t_mem) { 62void HidbusBase::SetTransferMemoryAddress(Common::ProcessAddress t_mem) {
63 transfer_memory = t_mem; 63 transfer_memory = t_mem;
64} 64}
65 65
diff --git a/src/core/hle/service/hid/hidbus/hidbus_base.h b/src/core/hle/service/hid/hidbus/hidbus_base.h
index 26313264d..ec41684e1 100644
--- a/src/core/hle/service/hid/hidbus/hidbus_base.h
+++ b/src/core/hle/service/hid/hidbus/hidbus_base.h
@@ -5,7 +5,7 @@
5 5
6#include <array> 6#include <array>
7#include <span> 7#include <span>
8#include "common/common_types.h" 8#include "common/typed_address.h"
9#include "core/hle/result.h" 9#include "core/hle/result.h"
10 10
11namespace Core { 11namespace Core {
@@ -138,7 +138,7 @@ public:
138 void DisablePollingMode(); 138 void DisablePollingMode();
139 139
140 // Called on EnableJoyPollingReceiveMode 140 // Called on EnableJoyPollingReceiveMode
141 void SetTransferMemoryAddress(VAddr t_mem); 141 void SetTransferMemoryAddress(Common::ProcessAddress t_mem);
142 142
143 Kernel::KReadableEvent& GetSendCommandAsycEvent() const; 143 Kernel::KReadableEvent& GetSendCommandAsycEvent() const;
144 144
@@ -174,7 +174,7 @@ protected:
174 JoyEnableSixAxisDataAccessor enable_sixaxis_data{}; 174 JoyEnableSixAxisDataAccessor enable_sixaxis_data{};
175 ButtonOnlyPollingDataAccessor button_only_data{}; 175 ButtonOnlyPollingDataAccessor button_only_data{};
176 176
177 VAddr transfer_memory{}; 177 Common::ProcessAddress transfer_memory{};
178 178
179 Core::System& system; 179 Core::System& system;
180 Kernel::KEvent* send_command_async_event; 180 Kernel::KEvent* send_command_async_event;
diff --git a/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp b/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
index a268750cd..ca5d067e8 100644
--- a/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
+++ b/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
@@ -140,7 +140,7 @@ void ImageTransferProcessor::SetConfig(
140 npad_device->SetCameraFormat(current_config.origin_format); 140 npad_device->SetCameraFormat(current_config.origin_format);
141} 141}
142 142
143void ImageTransferProcessor::SetTransferMemoryAddress(VAddr t_mem) { 143void ImageTransferProcessor::SetTransferMemoryAddress(Common::ProcessAddress t_mem) {
144 transfer_memory = t_mem; 144 transfer_memory = t_mem;
145} 145}
146 146
diff --git a/src/core/hle/service/hid/irsensor/image_transfer_processor.h b/src/core/hle/service/hid/irsensor/image_transfer_processor.h
index 7cfe04c8c..7f42d8453 100644
--- a/src/core/hle/service/hid/irsensor/image_transfer_processor.h
+++ b/src/core/hle/service/hid/irsensor/image_transfer_processor.h
@@ -3,7 +3,7 @@
3 3
4#pragma once 4#pragma once
5 5
6#include "common/common_types.h" 6#include "common/typed_address.h"
7#include "core/hid/irs_types.h" 7#include "core/hid/irs_types.h"
8#include "core/hle/service/hid/irsensor/processor_base.h" 8#include "core/hle/service/hid/irsensor/processor_base.h"
9 9
@@ -37,7 +37,7 @@ public:
37 void SetConfig(Core::IrSensor::PackedImageTransferProcessorExConfig config); 37 void SetConfig(Core::IrSensor::PackedImageTransferProcessorExConfig config);
38 38
39 // Transfer memory where the image data will be stored 39 // Transfer memory where the image data will be stored
40 void SetTransferMemoryAddress(VAddr t_mem); 40 void SetTransferMemoryAddress(Common::ProcessAddress t_mem);
41 41
42 Core::IrSensor::ImageTransferProcessorState GetState(std::vector<u8>& data) const; 42 Core::IrSensor::ImageTransferProcessorState GetState(std::vector<u8>& data) const;
43 43
@@ -72,6 +72,6 @@ private:
72 int callback_key{}; 72 int callback_key{};
73 73
74 Core::System& system; 74 Core::System& system;
75 VAddr transfer_memory{}; 75 Common::ProcessAddress transfer_memory{};
76}; 76};
77} // namespace Service::IRS 77} // namespace Service::IRS
diff --git a/src/core/hle/service/jit/jit.cpp b/src/core/hle/service/jit/jit.cpp
index 46bcfd695..607f27b21 100644
--- a/src/core/hle/service/jit/jit.cpp
+++ b/src/core/hle/service/jit/jit.cpp
@@ -195,7 +195,7 @@ public:
195 } 195 }
196 196
197 // Set up the configuration with the required TransferMemory address 197 // Set up the configuration with the required TransferMemory address
198 configuration.transfer_memory.offset = tmem->GetSourceAddress(); 198 configuration.transfer_memory.offset = GetInteger(tmem->GetSourceAddress());
199 configuration.transfer_memory.size = tmem_size; 199 configuration.transfer_memory.size = tmem_size;
200 200
201 // Gather up all the callbacks from the loaded plugin 201 // Gather up all the callbacks from the loaded plugin
@@ -383,12 +383,12 @@ public:
383 } 383 }
384 384
385 const CodeRange user_rx{ 385 const CodeRange user_rx{
386 .offset = rx_mem->GetSourceAddress(), 386 .offset = GetInteger(rx_mem->GetSourceAddress()),
387 .size = parameters.rx_size, 387 .size = parameters.rx_size,
388 }; 388 };
389 389
390 const CodeRange user_ro{ 390 const CodeRange user_ro{
391 .offset = ro_mem->GetSourceAddress(), 391 .offset = GetInteger(ro_mem->GetSourceAddress()),
392 .size = parameters.ro_size, 392 .size = parameters.ro_size,
393 }; 393 };
394 394
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index 6de96ed5b..437dc2ea5 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -337,7 +337,7 @@ public:
337 337
338 bool succeeded = false; 338 bool succeeded = false;
339 const auto map_region_end = 339 const auto map_region_end =
340 page_table.GetAliasCodeRegionStart() + page_table.GetAliasCodeRegionSize(); 340 GetInteger(page_table.GetAliasCodeRegionStart()) + page_table.GetAliasCodeRegionSize();
341 while (current_map_addr < map_region_end) { 341 while (current_map_addr < map_region_end) {
342 if (is_region_available(current_map_addr)) { 342 if (is_region_available(current_map_addr)) {
343 succeeded = true; 343 succeeded = true;
@@ -642,7 +642,8 @@ public:
642 LOG_WARNING(Service_LDR, "(STUBBED) called"); 642 LOG_WARNING(Service_LDR, "(STUBBED) called");
643 643
644 initialized = true; 644 initialized = true;
645 current_map_addr = system.ApplicationProcess()->PageTable().GetAliasCodeRegionStart(); 645 current_map_addr =
646 GetInteger(system.ApplicationProcess()->PageTable().GetAliasCodeRegionStart());
646 647
647 IPC::ResponseBuilder rb{ctx, 2}; 648 IPC::ResponseBuilder rb{ctx, 2};
648 rb.Push(ResultSuccess); 649 rb.Push(ResultSuccess);
diff --git a/src/core/loader/deconstructed_rom_directory.cpp b/src/core/loader/deconstructed_rom_directory.cpp
index 192571d35..3be9b71cf 100644
--- a/src/core/loader/deconstructed_rom_directory.cpp
+++ b/src/core/loader/deconstructed_rom_directory.cpp
@@ -153,7 +153,7 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
153 153
154 // Load NSO modules 154 // Load NSO modules
155 modules.clear(); 155 modules.clear();
156 const VAddr base_address{process.PageTable().GetCodeRegionStart()}; 156 const VAddr base_address{GetInteger(process.PageTable().GetCodeRegionStart())};
157 VAddr next_load_addr{base_address}; 157 VAddr next_load_addr{base_address};
158 const FileSys::PatchManager pm{metadata.GetTitleID(), system.GetFileSystemController(), 158 const FileSys::PatchManager pm{metadata.GetTitleID(), system.GetFileSystemController(),
159 system.GetContentProvider()}; 159 system.GetContentProvider()};
diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp
index d8a1bf82a..709e2564f 100644
--- a/src/core/loader/kip.cpp
+++ b/src/core/loader/kip.cpp
@@ -96,7 +96,7 @@ AppLoader::LoadResult AppLoader_KIP::Load(Kernel::KProcess& process,
96 } 96 }
97 97
98 codeset.memory = std::move(program_image); 98 codeset.memory = std::move(program_image);
99 const VAddr base_address = process.PageTable().GetCodeRegionStart(); 99 const VAddr base_address = GetInteger(process.PageTable().GetCodeRegionStart());
100 process.LoadModule(std::move(codeset), base_address); 100 process.LoadModule(std::move(codeset), base_address);
101 101
102 LOG_DEBUG(Loader, "loaded module {} @ 0x{:X}", kip->GetName(), base_address); 102 LOG_DEBUG(Loader, "loaded module {} @ 0x{:X}", kip->GetName(), base_address);
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp
index a5c384fb5..79639f5e4 100644
--- a/src/core/loader/nso.cpp
+++ b/src/core/loader/nso.cpp
@@ -167,7 +167,7 @@ AppLoader_NSO::LoadResult AppLoader_NSO::Load(Kernel::KProcess& process, Core::S
167 modules.clear(); 167 modules.clear();
168 168
169 // Load module 169 // Load module
170 const VAddr base_address = process.PageTable().GetCodeRegionStart(); 170 const VAddr base_address = GetInteger(process.PageTable().GetCodeRegionStart());
171 if (!LoadModule(process, system, *file, base_address, true, true)) { 171 if (!LoadModule(process, system, *file, base_address, true, true)) {
172 return {ResultStatus::ErrorLoadingNSO, {}}; 172 return {ResultStatus::ErrorLoadingNSO, {}};
173 } 173 }
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 4397fcfb1..95e070825 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -35,31 +35,35 @@ struct Memory::Impl {
35 system.ArmInterface(core_id).PageTableChanged(*current_page_table, address_space_width); 35 system.ArmInterface(core_id).PageTableChanged(*current_page_table, address_space_width);
36 } 36 }
37 37
38 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { 38 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
39 Common::PhysicalAddress target) {
39 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); 40 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
40 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base); 41 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
41 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target); 42 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
43 GetInteger(target));
42 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target, 44 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
43 Common::PageType::Memory); 45 Common::PageType::Memory);
44 46
45 if (Settings::IsFastmemEnabled()) { 47 if (Settings::IsFastmemEnabled()) {
46 system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size); 48 system.DeviceMemory().buffer.Map(GetInteger(base),
49 GetInteger(target) - DramMemoryMap::Base, size);
47 } 50 }
48 } 51 }
49 52
50 void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { 53 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) {
51 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); 54 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
52 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base); 55 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
53 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, 56 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
54 Common::PageType::Unmapped); 57 Common::PageType::Unmapped);
55 58
56 if (Settings::IsFastmemEnabled()) { 59 if (Settings::IsFastmemEnabled()) {
57 system.DeviceMemory().buffer.Unmap(base, size); 60 system.DeviceMemory().buffer.Unmap(GetInteger(base), size);
58 } 61 }
59 } 62 }
60 63
61 [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const { 64 [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(u64 vaddr) const {
62 const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]}; 65 const Common::PhysicalAddress paddr{
66 current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
63 67
64 if (!paddr) { 68 if (!paddr) {
65 return {}; 69 return {};
@@ -68,8 +72,9 @@ struct Memory::Impl {
68 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; 72 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
69 } 73 }
70 74
71 [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { 75 [[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const {
72 const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]}; 76 const Common::PhysicalAddress paddr{
77 current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
73 78
74 if (paddr == 0) { 79 if (paddr == 0) {
75 return {}; 80 return {};
@@ -78,11 +83,11 @@ struct Memory::Impl {
78 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; 83 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
79 } 84 }
80 85
81 u8 Read8(const VAddr addr) { 86 u8 Read8(const Common::ProcessAddress addr) {
82 return Read<u8>(addr); 87 return Read<u8>(addr);
83 } 88 }
84 89
85 u16 Read16(const VAddr addr) { 90 u16 Read16(const Common::ProcessAddress addr) {
86 if ((addr & 1) == 0) { 91 if ((addr & 1) == 0) {
87 return Read<u16_le>(addr); 92 return Read<u16_le>(addr);
88 } else { 93 } else {
@@ -92,7 +97,7 @@ struct Memory::Impl {
92 } 97 }
93 } 98 }
94 99
95 u32 Read32(const VAddr addr) { 100 u32 Read32(const Common::ProcessAddress addr) {
96 if ((addr & 3) == 0) { 101 if ((addr & 3) == 0) {
97 return Read<u32_le>(addr); 102 return Read<u32_le>(addr);
98 } else { 103 } else {
@@ -102,7 +107,7 @@ struct Memory::Impl {
102 } 107 }
103 } 108 }
104 109
105 u64 Read64(const VAddr addr) { 110 u64 Read64(const Common::ProcessAddress addr) {
106 if ((addr & 7) == 0) { 111 if ((addr & 7) == 0) {
107 return Read<u64_le>(addr); 112 return Read<u64_le>(addr);
108 } else { 113 } else {
@@ -112,11 +117,11 @@ struct Memory::Impl {
112 } 117 }
113 } 118 }
114 119
115 void Write8(const VAddr addr, const u8 data) { 120 void Write8(const Common::ProcessAddress addr, const u8 data) {
116 Write<u8>(addr, data); 121 Write<u8>(addr, data);
117 } 122 }
118 123
119 void Write16(const VAddr addr, const u16 data) { 124 void Write16(const Common::ProcessAddress addr, const u16 data) {
120 if ((addr & 1) == 0) { 125 if ((addr & 1) == 0) {
121 Write<u16_le>(addr, data); 126 Write<u16_le>(addr, data);
122 } else { 127 } else {
@@ -125,7 +130,7 @@ struct Memory::Impl {
125 } 130 }
126 } 131 }
127 132
128 void Write32(const VAddr addr, const u32 data) { 133 void Write32(const Common::ProcessAddress addr, const u32 data) {
129 if ((addr & 3) == 0) { 134 if ((addr & 3) == 0) {
130 Write<u32_le>(addr, data); 135 Write<u32_le>(addr, data);
131 } else { 136 } else {
@@ -134,7 +139,7 @@ struct Memory::Impl {
134 } 139 }
135 } 140 }
136 141
137 void Write64(const VAddr addr, const u64 data) { 142 void Write64(const Common::ProcessAddress addr, const u64 data) {
138 if ((addr & 7) == 0) { 143 if ((addr & 7) == 0) {
139 Write<u64_le>(addr, data); 144 Write<u64_le>(addr, data);
140 } else { 145 } else {
@@ -143,23 +148,23 @@ struct Memory::Impl {
143 } 148 }
144 } 149 }
145 150
146 bool WriteExclusive8(const VAddr addr, const u8 data, const u8 expected) { 151 bool WriteExclusive8(const Common::ProcessAddress addr, const u8 data, const u8 expected) {
147 return WriteExclusive<u8>(addr, data, expected); 152 return WriteExclusive<u8>(addr, data, expected);
148 } 153 }
149 154
150 bool WriteExclusive16(const VAddr addr, const u16 data, const u16 expected) { 155 bool WriteExclusive16(const Common::ProcessAddress addr, const u16 data, const u16 expected) {
151 return WriteExclusive<u16_le>(addr, data, expected); 156 return WriteExclusive<u16_le>(addr, data, expected);
152 } 157 }
153 158
154 bool WriteExclusive32(const VAddr addr, const u32 data, const u32 expected) { 159 bool WriteExclusive32(const Common::ProcessAddress addr, const u32 data, const u32 expected) {
155 return WriteExclusive<u32_le>(addr, data, expected); 160 return WriteExclusive<u32_le>(addr, data, expected);
156 } 161 }
157 162
158 bool WriteExclusive64(const VAddr addr, const u64 data, const u64 expected) { 163 bool WriteExclusive64(const Common::ProcessAddress addr, const u64 data, const u64 expected) {
159 return WriteExclusive<u64_le>(addr, data, expected); 164 return WriteExclusive<u64_le>(addr, data, expected);
160 } 165 }
161 166
162 std::string ReadCString(VAddr vaddr, std::size_t max_length) { 167 std::string ReadCString(Common::ProcessAddress vaddr, std::size_t max_length) {
163 std::string string; 168 std::string string;
164 string.reserve(max_length); 169 string.reserve(max_length);
165 for (std::size_t i = 0; i < max_length; ++i) { 170 for (std::size_t i = 0; i < max_length; ++i) {
@@ -174,8 +179,9 @@ struct Memory::Impl {
174 return string; 179 return string;
175 } 180 }
176 181
177 void WalkBlock(const Kernel::KProcess& process, const VAddr addr, const std::size_t size, 182 void WalkBlock(const Kernel::KProcess& process, const Common::ProcessAddress addr,
178 auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) { 183 const std::size_t size, auto on_unmapped, auto on_memory, auto on_rasterizer,
184 auto increment) {
179 const auto& page_table = process.PageTable().PageTableImpl(); 185 const auto& page_table = process.PageTable().PageTableImpl();
180 std::size_t remaining_size = size; 186 std::size_t remaining_size = size;
181 std::size_t page_index = addr >> YUZU_PAGEBITS; 187 std::size_t page_index = addr >> YUZU_PAGEBITS;
@@ -185,7 +191,7 @@ struct Memory::Impl {
185 const std::size_t copy_amount = 191 const std::size_t copy_amount =
186 std::min(static_cast<std::size_t>(YUZU_PAGESIZE) - page_offset, remaining_size); 192 std::min(static_cast<std::size_t>(YUZU_PAGESIZE) - page_offset, remaining_size);
187 const auto current_vaddr = 193 const auto current_vaddr =
188 static_cast<VAddr>((page_index << YUZU_PAGEBITS) + page_offset); 194 static_cast<u64>((page_index << YUZU_PAGEBITS) + page_offset);
189 195
190 const auto [pointer, type] = page_table.pointers[page_index].PointerType(); 196 const auto [pointer, type] = page_table.pointers[page_index].PointerType();
191 switch (type) { 197 switch (type) {
@@ -220,24 +226,24 @@ struct Memory::Impl {
220 } 226 }
221 227
222 template <bool UNSAFE> 228 template <bool UNSAFE>
223 void ReadBlockImpl(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer, 229 void ReadBlockImpl(const Kernel::KProcess& process, const Common::ProcessAddress src_addr,
224 const std::size_t size) { 230 void* dest_buffer, const std::size_t size) {
225 WalkBlock( 231 WalkBlock(
226 process, src_addr, size, 232 process, src_addr, size,
227 [src_addr, size, &dest_buffer](const std::size_t copy_amount, 233 [src_addr, size, &dest_buffer](const std::size_t copy_amount,
228 const VAddr current_vaddr) { 234 const Common::ProcessAddress current_vaddr) {
229 LOG_ERROR(HW_Memory, 235 LOG_ERROR(HW_Memory,
230 "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 236 "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
231 current_vaddr, src_addr, size); 237 GetInteger(current_vaddr), GetInteger(src_addr), size);
232 std::memset(dest_buffer, 0, copy_amount); 238 std::memset(dest_buffer, 0, copy_amount);
233 }, 239 },
234 [&](const std::size_t copy_amount, const u8* const src_ptr) { 240 [&](const std::size_t copy_amount, const u8* const src_ptr) {
235 std::memcpy(dest_buffer, src_ptr, copy_amount); 241 std::memcpy(dest_buffer, src_ptr, copy_amount);
236 }, 242 },
237 [&](const VAddr current_vaddr, const std::size_t copy_amount, 243 [&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
238 const u8* const host_ptr) { 244 const u8* const host_ptr) {
239 if constexpr (!UNSAFE) { 245 if constexpr (!UNSAFE) {
240 system.GPU().FlushRegion(current_vaddr, copy_amount); 246 system.GPU().FlushRegion(GetInteger(current_vaddr), copy_amount);
241 } 247 }
242 std::memcpy(dest_buffer, host_ptr, copy_amount); 248 std::memcpy(dest_buffer, host_ptr, copy_amount);
243 }, 249 },
@@ -246,30 +252,34 @@ struct Memory::Impl {
246 }); 252 });
247 } 253 }
248 254
249 void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { 255 void ReadBlock(const Common::ProcessAddress src_addr, void* dest_buffer,
256 const std::size_t size) {
250 ReadBlockImpl<false>(*system.ApplicationProcess(), src_addr, dest_buffer, size); 257 ReadBlockImpl<false>(*system.ApplicationProcess(), src_addr, dest_buffer, size);
251 } 258 }
252 259
253 void ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { 260 void ReadBlockUnsafe(const Common::ProcessAddress src_addr, void* dest_buffer,
261 const std::size_t size) {
254 ReadBlockImpl<true>(*system.ApplicationProcess(), src_addr, dest_buffer, size); 262 ReadBlockImpl<true>(*system.ApplicationProcess(), src_addr, dest_buffer, size);
255 } 263 }
256 264
257 template <bool UNSAFE> 265 template <bool UNSAFE>
258 void WriteBlockImpl(const Kernel::KProcess& process, const VAddr dest_addr, 266 void WriteBlockImpl(const Kernel::KProcess& process, const Common::ProcessAddress dest_addr,
259 const void* src_buffer, const std::size_t size) { 267 const void* src_buffer, const std::size_t size) {
260 WalkBlock( 268 WalkBlock(
261 process, dest_addr, size, 269 process, dest_addr, size,
262 [dest_addr, size](const std::size_t copy_amount, const VAddr current_vaddr) { 270 [dest_addr, size](const std::size_t copy_amount,
271 const Common::ProcessAddress current_vaddr) {
263 LOG_ERROR(HW_Memory, 272 LOG_ERROR(HW_Memory,
264 "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 273 "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
265 current_vaddr, dest_addr, size); 274 GetInteger(current_vaddr), GetInteger(dest_addr), size);
266 }, 275 },
267 [&](const std::size_t copy_amount, u8* const dest_ptr) { 276 [&](const std::size_t copy_amount, u8* const dest_ptr) {
268 std::memcpy(dest_ptr, src_buffer, copy_amount); 277 std::memcpy(dest_ptr, src_buffer, copy_amount);
269 }, 278 },
270 [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) { 279 [&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
280 u8* const host_ptr) {
271 if constexpr (!UNSAFE) { 281 if constexpr (!UNSAFE) {
272 system.GPU().InvalidateRegion(current_vaddr, copy_amount); 282 system.GPU().InvalidateRegion(GetInteger(current_vaddr), copy_amount);
273 } 283 }
274 std::memcpy(host_ptr, src_buffer, copy_amount); 284 std::memcpy(host_ptr, src_buffer, copy_amount);
275 }, 285 },
@@ -278,71 +288,77 @@ struct Memory::Impl {
278 }); 288 });
279 } 289 }
280 290
281 void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { 291 void WriteBlock(const Common::ProcessAddress dest_addr, const void* src_buffer,
292 const std::size_t size) {
282 WriteBlockImpl<false>(*system.ApplicationProcess(), dest_addr, src_buffer, size); 293 WriteBlockImpl<false>(*system.ApplicationProcess(), dest_addr, src_buffer, size);
283 } 294 }
284 295
285 void WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { 296 void WriteBlockUnsafe(const Common::ProcessAddress dest_addr, const void* src_buffer,
297 const std::size_t size) {
286 WriteBlockImpl<true>(*system.ApplicationProcess(), dest_addr, src_buffer, size); 298 WriteBlockImpl<true>(*system.ApplicationProcess(), dest_addr, src_buffer, size);
287 } 299 }
288 300
289 void ZeroBlock(const Kernel::KProcess& process, const VAddr dest_addr, const std::size_t size) { 301 void ZeroBlock(const Kernel::KProcess& process, const Common::ProcessAddress dest_addr,
302 const std::size_t size) {
290 WalkBlock( 303 WalkBlock(
291 process, dest_addr, size, 304 process, dest_addr, size,
292 [dest_addr, size](const std::size_t copy_amount, const VAddr current_vaddr) { 305 [dest_addr, size](const std::size_t copy_amount,
306 const Common::ProcessAddress current_vaddr) {
293 LOG_ERROR(HW_Memory, 307 LOG_ERROR(HW_Memory,
294 "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 308 "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
295 current_vaddr, dest_addr, size); 309 GetInteger(current_vaddr), GetInteger(dest_addr), size);
296 }, 310 },
297 [](const std::size_t copy_amount, u8* const dest_ptr) { 311 [](const std::size_t copy_amount, u8* const dest_ptr) {
298 std::memset(dest_ptr, 0, copy_amount); 312 std::memset(dest_ptr, 0, copy_amount);
299 }, 313 },
300 [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) { 314 [&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
301 system.GPU().InvalidateRegion(current_vaddr, copy_amount); 315 u8* const host_ptr) {
316 system.GPU().InvalidateRegion(GetInteger(current_vaddr), copy_amount);
302 std::memset(host_ptr, 0, copy_amount); 317 std::memset(host_ptr, 0, copy_amount);
303 }, 318 },
304 [](const std::size_t copy_amount) {}); 319 [](const std::size_t copy_amount) {});
305 } 320 }
306 321
307 void CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr, 322 void CopyBlock(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
308 const std::size_t size) { 323 Common::ProcessAddress src_addr, const std::size_t size) {
309 WalkBlock( 324 WalkBlock(
310 process, dest_addr, size, 325 process, dest_addr, size,
311 [&](const std::size_t copy_amount, const VAddr current_vaddr) { 326 [&](const std::size_t copy_amount, const Common::ProcessAddress current_vaddr) {
312 LOG_ERROR(HW_Memory, 327 LOG_ERROR(HW_Memory,
313 "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 328 "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
314 current_vaddr, src_addr, size); 329 GetInteger(current_vaddr), GetInteger(src_addr), size);
315 ZeroBlock(process, dest_addr, copy_amount); 330 ZeroBlock(process, dest_addr, copy_amount);
316 }, 331 },
317 [&](const std::size_t copy_amount, const u8* const src_ptr) { 332 [&](const std::size_t copy_amount, const u8* const src_ptr) {
318 WriteBlockImpl<false>(process, dest_addr, src_ptr, copy_amount); 333 WriteBlockImpl<false>(process, dest_addr, src_ptr, copy_amount);
319 }, 334 },
320 [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) { 335 [&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
321 system.GPU().FlushRegion(current_vaddr, copy_amount); 336 u8* const host_ptr) {
337 system.GPU().FlushRegion(GetInteger(current_vaddr), copy_amount);
322 WriteBlockImpl<false>(process, dest_addr, host_ptr, copy_amount); 338 WriteBlockImpl<false>(process, dest_addr, host_ptr, copy_amount);
323 }, 339 },
324 [&](const std::size_t copy_amount) { 340 [&](const std::size_t copy_amount) {
325 dest_addr += static_cast<VAddr>(copy_amount); 341 dest_addr += copy_amount;
326 src_addr += static_cast<VAddr>(copy_amount); 342 src_addr += copy_amount;
327 }); 343 });
328 } 344 }
329 345
330 template <typename Callback> 346 template <typename Callback>
331 Result PerformCacheOperation(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size, 347 Result PerformCacheOperation(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
332 Callback&& cb) { 348 std::size_t size, Callback&& cb) {
333 class InvalidMemoryException : public std::exception {}; 349 class InvalidMemoryException : public std::exception {};
334 350
335 try { 351 try {
336 WalkBlock( 352 WalkBlock(
337 process, dest_addr, size, 353 process, dest_addr, size,
338 [&](const std::size_t block_size, const VAddr current_vaddr) { 354 [&](const std::size_t block_size, const Common::ProcessAddress current_vaddr) {
339 LOG_ERROR(HW_Memory, "Unmapped cache maintenance @ {:#018X}", current_vaddr); 355 LOG_ERROR(HW_Memory, "Unmapped cache maintenance @ {:#018X}",
356 GetInteger(current_vaddr));
340 throw InvalidMemoryException(); 357 throw InvalidMemoryException();
341 }, 358 },
342 [&](const std::size_t block_size, u8* const host_ptr) {}, 359 [&](const std::size_t block_size, u8* const host_ptr) {},
343 [&](const VAddr current_vaddr, const std::size_t block_size, u8* const host_ptr) { 360 [&](const Common::ProcessAddress current_vaddr, const std::size_t block_size,
344 cb(current_vaddr, block_size); 361 u8* const host_ptr) { cb(current_vaddr, block_size); },
345 },
346 [](const std::size_t block_size) {}); 362 [](const std::size_t block_size) {});
347 } catch (InvalidMemoryException&) { 363 } catch (InvalidMemoryException&) {
348 return Kernel::ResultInvalidCurrentMemory; 364 return Kernel::ResultInvalidCurrentMemory;
@@ -351,34 +367,40 @@ struct Memory::Impl {
351 return ResultSuccess; 367 return ResultSuccess;
352 } 368 }
353 369
354 Result InvalidateDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) { 370 Result InvalidateDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
355 auto on_rasterizer = [&](const VAddr current_vaddr, const std::size_t block_size) { 371 std::size_t size) {
372 auto on_rasterizer = [&](const Common::ProcessAddress current_vaddr,
373 const std::size_t block_size) {
356 // dc ivac: Invalidate to point of coherency 374 // dc ivac: Invalidate to point of coherency
357 // GPU flush -> CPU invalidate 375 // GPU flush -> CPU invalidate
358 system.GPU().FlushRegion(current_vaddr, block_size); 376 system.GPU().FlushRegion(GetInteger(current_vaddr), block_size);
359 }; 377 };
360 return PerformCacheOperation(process, dest_addr, size, on_rasterizer); 378 return PerformCacheOperation(process, dest_addr, size, on_rasterizer);
361 } 379 }
362 380
363 Result StoreDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) { 381 Result StoreDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
364 auto on_rasterizer = [&](const VAddr current_vaddr, const std::size_t block_size) { 382 std::size_t size) {
383 auto on_rasterizer = [&](const Common::ProcessAddress current_vaddr,
384 const std::size_t block_size) {
365 // dc cvac: Store to point of coherency 385 // dc cvac: Store to point of coherency
366 // CPU flush -> GPU invalidate 386 // CPU flush -> GPU invalidate
367 system.GPU().InvalidateRegion(current_vaddr, block_size); 387 system.GPU().InvalidateRegion(GetInteger(current_vaddr), block_size);
368 }; 388 };
369 return PerformCacheOperation(process, dest_addr, size, on_rasterizer); 389 return PerformCacheOperation(process, dest_addr, size, on_rasterizer);
370 } 390 }
371 391
372 Result FlushDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) { 392 Result FlushDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
373 auto on_rasterizer = [&](const VAddr current_vaddr, const std::size_t block_size) { 393 std::size_t size) {
394 auto on_rasterizer = [&](const Common::ProcessAddress current_vaddr,
395 const std::size_t block_size) {
374 // dc civac: Store to point of coherency, and invalidate from cache 396 // dc civac: Store to point of coherency, and invalidate from cache
375 // CPU flush -> GPU invalidate 397 // CPU flush -> GPU invalidate
376 system.GPU().InvalidateRegion(current_vaddr, block_size); 398 system.GPU().InvalidateRegion(GetInteger(current_vaddr), block_size);
377 }; 399 };
378 return PerformCacheOperation(process, dest_addr, size, on_rasterizer); 400 return PerformCacheOperation(process, dest_addr, size, on_rasterizer);
379 } 401 }
380 402
381 void MarkRegionDebug(VAddr vaddr, u64 size, bool debug) { 403 void MarkRegionDebug(u64 vaddr, u64 size, bool debug) {
382 if (vaddr == 0) { 404 if (vaddr == 0) {
383 return; 405 return;
384 } 406 }
@@ -434,7 +456,7 @@ struct Memory::Impl {
434 } 456 }
435 } 457 }
436 458
437 void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { 459 void RasterizerMarkRegionCached(u64 vaddr, u64 size, bool cached) {
438 if (vaddr == 0) { 460 if (vaddr == 0) {
439 return; 461 return;
440 } 462 }
@@ -514,10 +536,12 @@ struct Memory::Impl {
514 * @param target The target address to begin mapping from. 536 * @param target The target address to begin mapping from.
515 * @param type The page type to map the memory as. 537 * @param type The page type to map the memory as.
516 */ 538 */
517 void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target, 539 void MapPages(Common::PageTable& page_table, Common::ProcessAddress base_address, u64 size,
518 Common::PageType type) { 540 Common::PhysicalAddress target, Common::PageType type) {
519 LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * YUZU_PAGESIZE, 541 auto base = GetInteger(base_address);
520 (base + size) * YUZU_PAGESIZE); 542
543 LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", GetInteger(target),
544 base * YUZU_PAGESIZE, (base + size) * YUZU_PAGESIZE);
521 545
522 // During boot, current_page_table might not be set yet, in which case we need not flush 546 // During boot, current_page_table might not be set yet, in which case we need not flush
523 if (system.IsPoweredOn()) { 547 if (system.IsPoweredOn()) {
@@ -530,7 +554,7 @@ struct Memory::Impl {
530 } 554 }
531 } 555 }
532 556
533 const VAddr end = base + size; 557 const Common::ProcessAddress end = base + size;
534 ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", 558 ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
535 base + page_table.pointers.size()); 559 base + page_table.pointers.size());
536 560
@@ -548,7 +572,7 @@ struct Memory::Impl {
548 while (base != end) { 572 while (base != end) {
549 page_table.pointers[base].Store( 573 page_table.pointers[base].Store(
550 system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type); 574 system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type);
551 page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS); 575 page_table.backing_addr[base] = GetInteger(target) - (base << YUZU_PAGEBITS);
552 576
553 ASSERT_MSG(page_table.pointers[base].Pointer(), 577 ASSERT_MSG(page_table.pointers[base].Pointer(),
554 "memory mapping base yield a nullptr within the table"); 578 "memory mapping base yield a nullptr within the table");
@@ -559,9 +583,9 @@ struct Memory::Impl {
559 } 583 }
560 } 584 }
561 585
562 [[nodiscard]] u8* GetPointerImpl(VAddr vaddr, auto on_unmapped, auto on_rasterizer) const { 586 [[nodiscard]] u8* GetPointerImpl(u64 vaddr, auto on_unmapped, auto on_rasterizer) const {
563 // AARCH64 masks the upper 16 bit of all memory accesses 587 // AARCH64 masks the upper 16 bit of all memory accesses
564 vaddr &= 0xffffffffffffULL; 588 vaddr = vaddr & 0xffffffffffffULL;
565 589
566 if (vaddr >= 1uLL << current_page_table->GetAddressSpaceBits()) { 590 if (vaddr >= 1uLL << current_page_table->GetAddressSpaceBits()) {
567 on_unmapped(); 591 on_unmapped();
@@ -593,15 +617,18 @@ struct Memory::Impl {
593 return nullptr; 617 return nullptr;
594 } 618 }
595 619
596 [[nodiscard]] u8* GetPointer(const VAddr vaddr) const { 620 [[nodiscard]] u8* GetPointer(const Common::ProcessAddress vaddr) const {
597 return GetPointerImpl( 621 return GetPointerImpl(
598 vaddr, [vaddr]() { LOG_ERROR(HW_Memory, "Unmapped GetPointer @ 0x{:016X}", vaddr); }, 622 GetInteger(vaddr),
623 [vaddr]() {
624 LOG_ERROR(HW_Memory, "Unmapped GetPointer @ 0x{:016X}", GetInteger(vaddr));
625 },
599 []() {}); 626 []() {});
600 } 627 }
601 628
602 [[nodiscard]] u8* GetPointerSilent(const VAddr vaddr) const { 629 [[nodiscard]] u8* GetPointerSilent(const Common::ProcessAddress vaddr) const {
603 return GetPointerImpl( 630 return GetPointerImpl(
604 vaddr, []() {}, []() {}); 631 GetInteger(vaddr), []() {}, []() {});
605 } 632 }
606 633
607 /** 634 /**
@@ -616,14 +643,15 @@ struct Memory::Impl {
616 * @returns The instance of T read from the specified virtual address. 643 * @returns The instance of T read from the specified virtual address.
617 */ 644 */
618 template <typename T> 645 template <typename T>
619 T Read(VAddr vaddr) { 646 T Read(Common::ProcessAddress vaddr) {
620 T result = 0; 647 T result = 0;
621 const u8* const ptr = GetPointerImpl( 648 const u8* const ptr = GetPointerImpl(
622 vaddr, 649 GetInteger(vaddr),
623 [vaddr]() { 650 [vaddr]() {
624 LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, vaddr); 651 LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8,
652 GetInteger(vaddr));
625 }, 653 },
626 [&]() { system.GPU().FlushRegion(vaddr, sizeof(T)); }); 654 [&]() { system.GPU().FlushRegion(GetInteger(vaddr), sizeof(T)); });
627 if (ptr) { 655 if (ptr) {
628 std::memcpy(&result, ptr, sizeof(T)); 656 std::memcpy(&result, ptr, sizeof(T));
629 } 657 }
@@ -640,28 +668,28 @@ struct Memory::Impl {
640 * is undefined. 668 * is undefined.
641 */ 669 */
642 template <typename T> 670 template <typename T>
643 void Write(VAddr vaddr, const T data) { 671 void Write(Common::ProcessAddress vaddr, const T data) {
644 u8* const ptr = GetPointerImpl( 672 u8* const ptr = GetPointerImpl(
645 vaddr, 673 GetInteger(vaddr),
646 [vaddr, data]() { 674 [vaddr, data]() {
647 LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8, 675 LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8,
648 vaddr, static_cast<u64>(data)); 676 GetInteger(vaddr), static_cast<u64>(data));
649 }, 677 },
650 [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); }); 678 [&]() { system.GPU().InvalidateRegion(GetInteger(vaddr), sizeof(T)); });
651 if (ptr) { 679 if (ptr) {
652 std::memcpy(ptr, &data, sizeof(T)); 680 std::memcpy(ptr, &data, sizeof(T));
653 } 681 }
654 } 682 }
655 683
656 template <typename T> 684 template <typename T>
657 bool WriteExclusive(VAddr vaddr, const T data, const T expected) { 685 bool WriteExclusive(Common::ProcessAddress vaddr, const T data, const T expected) {
658 u8* const ptr = GetPointerImpl( 686 u8* const ptr = GetPointerImpl(
659 vaddr, 687 GetInteger(vaddr),
660 [vaddr, data]() { 688 [vaddr, data]() {
661 LOG_ERROR(HW_Memory, "Unmapped WriteExclusive{} @ 0x{:016X} = 0x{:016X}", 689 LOG_ERROR(HW_Memory, "Unmapped WriteExclusive{} @ 0x{:016X} = 0x{:016X}",
662 sizeof(T) * 8, vaddr, static_cast<u64>(data)); 690 sizeof(T) * 8, GetInteger(vaddr), static_cast<u64>(data));
663 }, 691 },
664 [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); }); 692 [&]() { system.GPU().InvalidateRegion(GetInteger(vaddr), sizeof(T)); });
665 if (ptr) { 693 if (ptr) {
666 const auto volatile_pointer = reinterpret_cast<volatile T*>(ptr); 694 const auto volatile_pointer = reinterpret_cast<volatile T*>(ptr);
667 return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); 695 return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
@@ -669,14 +697,14 @@ struct Memory::Impl {
669 return true; 697 return true;
670 } 698 }
671 699
672 bool WriteExclusive128(VAddr vaddr, const u128 data, const u128 expected) { 700 bool WriteExclusive128(Common::ProcessAddress vaddr, const u128 data, const u128 expected) {
673 u8* const ptr = GetPointerImpl( 701 u8* const ptr = GetPointerImpl(
674 vaddr, 702 GetInteger(vaddr),
675 [vaddr, data]() { 703 [vaddr, data]() {
676 LOG_ERROR(HW_Memory, "Unmapped WriteExclusive128 @ 0x{:016X} = 0x{:016X}{:016X}", 704 LOG_ERROR(HW_Memory, "Unmapped WriteExclusive128 @ 0x{:016X} = 0x{:016X}{:016X}",
677 vaddr, static_cast<u64>(data[1]), static_cast<u64>(data[0])); 705 GetInteger(vaddr), static_cast<u64>(data[1]), static_cast<u64>(data[0]));
678 }, 706 },
679 [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(u128)); }); 707 [&]() { system.GPU().InvalidateRegion(GetInteger(vaddr), sizeof(u128)); });
680 if (ptr) { 708 if (ptr) {
681 const auto volatile_pointer = reinterpret_cast<volatile u64*>(ptr); 709 const auto volatile_pointer = reinterpret_cast<volatile u64*>(ptr);
682 return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); 710 return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
@@ -702,15 +730,16 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
702 impl->SetCurrentPageTable(process, core_id); 730 impl->SetCurrentPageTable(process, core_id);
703} 731}
704 732
705void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { 733void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
734 Common::PhysicalAddress target) {
706 impl->MapMemoryRegion(page_table, base, size, target); 735 impl->MapMemoryRegion(page_table, base, size, target);
707} 736}
708 737
709void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { 738void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) {
710 impl->UnmapRegion(page_table, base, size); 739 impl->UnmapRegion(page_table, base, size);
711} 740}
712 741
713bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { 742bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
714 const Kernel::KProcess& process = *system.ApplicationProcess(); 743 const Kernel::KProcess& process = *system.ApplicationProcess();
715 const auto& page_table = process.PageTable().PageTableImpl(); 744 const auto& page_table = process.PageTable().PageTableImpl();
716 const size_t page = vaddr >> YUZU_PAGEBITS; 745 const size_t page = vaddr >> YUZU_PAGEBITS;
@@ -722,9 +751,9 @@ bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
722 type == Common::PageType::DebugMemory; 751 type == Common::PageType::DebugMemory;
723} 752}
724 753
725bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const { 754bool Memory::IsValidVirtualAddressRange(Common::ProcessAddress base, u64 size) const {
726 VAddr end = base + size; 755 Common::ProcessAddress end = base + size;
727 VAddr page = Common::AlignDown(base, YUZU_PAGESIZE); 756 Common::ProcessAddress page = Common::AlignDown(GetInteger(base), YUZU_PAGESIZE);
728 757
729 for (; page < end; page += YUZU_PAGESIZE) { 758 for (; page < end; page += YUZU_PAGESIZE) {
730 if (!IsValidVirtualAddress(page)) { 759 if (!IsValidVirtualAddress(page)) {
@@ -735,131 +764,135 @@ bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const {
735 return true; 764 return true;
736} 765}
737 766
738u8* Memory::GetPointer(VAddr vaddr) { 767u8* Memory::GetPointer(Common::ProcessAddress vaddr) {
739 return impl->GetPointer(vaddr); 768 return impl->GetPointer(vaddr);
740} 769}
741 770
742u8* Memory::GetPointerSilent(VAddr vaddr) { 771u8* Memory::GetPointerSilent(Common::ProcessAddress vaddr) {
743 return impl->GetPointerSilent(vaddr); 772 return impl->GetPointerSilent(vaddr);
744} 773}
745 774
746const u8* Memory::GetPointer(VAddr vaddr) const { 775const u8* Memory::GetPointer(Common::ProcessAddress vaddr) const {
747 return impl->GetPointer(vaddr); 776 return impl->GetPointer(vaddr);
748} 777}
749 778
750u8 Memory::Read8(const VAddr addr) { 779u8 Memory::Read8(const Common::ProcessAddress addr) {
751 return impl->Read8(addr); 780 return impl->Read8(addr);
752} 781}
753 782
754u16 Memory::Read16(const VAddr addr) { 783u16 Memory::Read16(const Common::ProcessAddress addr) {
755 return impl->Read16(addr); 784 return impl->Read16(addr);
756} 785}
757 786
758u32 Memory::Read32(const VAddr addr) { 787u32 Memory::Read32(const Common::ProcessAddress addr) {
759 return impl->Read32(addr); 788 return impl->Read32(addr);
760} 789}
761 790
762u64 Memory::Read64(const VAddr addr) { 791u64 Memory::Read64(const Common::ProcessAddress addr) {
763 return impl->Read64(addr); 792 return impl->Read64(addr);
764} 793}
765 794
766void Memory::Write8(VAddr addr, u8 data) { 795void Memory::Write8(Common::ProcessAddress addr, u8 data) {
767 impl->Write8(addr, data); 796 impl->Write8(addr, data);
768} 797}
769 798
770void Memory::Write16(VAddr addr, u16 data) { 799void Memory::Write16(Common::ProcessAddress addr, u16 data) {
771 impl->Write16(addr, data); 800 impl->Write16(addr, data);
772} 801}
773 802
774void Memory::Write32(VAddr addr, u32 data) { 803void Memory::Write32(Common::ProcessAddress addr, u32 data) {
775 impl->Write32(addr, data); 804 impl->Write32(addr, data);
776} 805}
777 806
778void Memory::Write64(VAddr addr, u64 data) { 807void Memory::Write64(Common::ProcessAddress addr, u64 data) {
779 impl->Write64(addr, data); 808 impl->Write64(addr, data);
780} 809}
781 810
782bool Memory::WriteExclusive8(VAddr addr, u8 data, u8 expected) { 811bool Memory::WriteExclusive8(Common::ProcessAddress addr, u8 data, u8 expected) {
783 return impl->WriteExclusive8(addr, data, expected); 812 return impl->WriteExclusive8(addr, data, expected);
784} 813}
785 814
786bool Memory::WriteExclusive16(VAddr addr, u16 data, u16 expected) { 815bool Memory::WriteExclusive16(Common::ProcessAddress addr, u16 data, u16 expected) {
787 return impl->WriteExclusive16(addr, data, expected); 816 return impl->WriteExclusive16(addr, data, expected);
788} 817}
789 818
790bool Memory::WriteExclusive32(VAddr addr, u32 data, u32 expected) { 819bool Memory::WriteExclusive32(Common::ProcessAddress addr, u32 data, u32 expected) {
791 return impl->WriteExclusive32(addr, data, expected); 820 return impl->WriteExclusive32(addr, data, expected);
792} 821}
793 822
794bool Memory::WriteExclusive64(VAddr addr, u64 data, u64 expected) { 823bool Memory::WriteExclusive64(Common::ProcessAddress addr, u64 data, u64 expected) {
795 return impl->WriteExclusive64(addr, data, expected); 824 return impl->WriteExclusive64(addr, data, expected);
796} 825}
797 826
798bool Memory::WriteExclusive128(VAddr addr, u128 data, u128 expected) { 827bool Memory::WriteExclusive128(Common::ProcessAddress addr, u128 data, u128 expected) {
799 return impl->WriteExclusive128(addr, data, expected); 828 return impl->WriteExclusive128(addr, data, expected);
800} 829}
801 830
802std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) { 831std::string Memory::ReadCString(Common::ProcessAddress vaddr, std::size_t max_length) {
803 return impl->ReadCString(vaddr, max_length); 832 return impl->ReadCString(vaddr, max_length);
804} 833}
805 834
806void Memory::ReadBlock(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer, 835void Memory::ReadBlock(const Kernel::KProcess& process, const Common::ProcessAddress src_addr,
807 const std::size_t size) { 836 void* dest_buffer, const std::size_t size) {
808 impl->ReadBlockImpl<false>(process, src_addr, dest_buffer, size); 837 impl->ReadBlockImpl<false>(process, src_addr, dest_buffer, size);
809} 838}
810 839
811void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { 840void Memory::ReadBlock(const Common::ProcessAddress src_addr, void* dest_buffer,
841 const std::size_t size) {
812 impl->ReadBlock(src_addr, dest_buffer, size); 842 impl->ReadBlock(src_addr, dest_buffer, size);
813} 843}
814 844
815void Memory::ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { 845void Memory::ReadBlockUnsafe(const Common::ProcessAddress src_addr, void* dest_buffer,
846 const std::size_t size) {
816 impl->ReadBlockUnsafe(src_addr, dest_buffer, size); 847 impl->ReadBlockUnsafe(src_addr, dest_buffer, size);
817} 848}
818 849
819void Memory::WriteBlock(const Kernel::KProcess& process, VAddr dest_addr, const void* src_buffer, 850void Memory::WriteBlock(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
820 std::size_t size) { 851 const void* src_buffer, std::size_t size) {
821 impl->WriteBlockImpl<false>(process, dest_addr, src_buffer, size); 852 impl->WriteBlockImpl<false>(process, dest_addr, src_buffer, size);
822} 853}
823 854
824void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { 855void Memory::WriteBlock(const Common::ProcessAddress dest_addr, const void* src_buffer,
856 const std::size_t size) {
825 impl->WriteBlock(dest_addr, src_buffer, size); 857 impl->WriteBlock(dest_addr, src_buffer, size);
826} 858}
827 859
828void Memory::WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, 860void Memory::WriteBlockUnsafe(const Common::ProcessAddress dest_addr, const void* src_buffer,
829 const std::size_t size) { 861 const std::size_t size) {
830 impl->WriteBlockUnsafe(dest_addr, src_buffer, size); 862 impl->WriteBlockUnsafe(dest_addr, src_buffer, size);
831} 863}
832 864
833void Memory::CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr, 865void Memory::CopyBlock(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
834 const std::size_t size) { 866 Common::ProcessAddress src_addr, const std::size_t size) {
835 impl->CopyBlock(process, dest_addr, src_addr, size); 867 impl->CopyBlock(process, dest_addr, src_addr, size);
836} 868}
837 869
838void Memory::ZeroBlock(const Kernel::KProcess& process, VAddr dest_addr, const std::size_t size) { 870void Memory::ZeroBlock(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
871 const std::size_t size) {
839 impl->ZeroBlock(process, dest_addr, size); 872 impl->ZeroBlock(process, dest_addr, size);
840} 873}
841 874
842Result Memory::InvalidateDataCache(const Kernel::KProcess& process, VAddr dest_addr, 875Result Memory::InvalidateDataCache(const Kernel::KProcess& process,
843 const std::size_t size) { 876 Common::ProcessAddress dest_addr, const std::size_t size) {
844 return impl->InvalidateDataCache(process, dest_addr, size); 877 return impl->InvalidateDataCache(process, dest_addr, size);
845} 878}
846 879
847Result Memory::StoreDataCache(const Kernel::KProcess& process, VAddr dest_addr, 880Result Memory::StoreDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
848 const std::size_t size) { 881 const std::size_t size) {
849 return impl->StoreDataCache(process, dest_addr, size); 882 return impl->StoreDataCache(process, dest_addr, size);
850} 883}
851 884
852Result Memory::FlushDataCache(const Kernel::KProcess& process, VAddr dest_addr, 885Result Memory::FlushDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
853 const std::size_t size) { 886 const std::size_t size) {
854 return impl->FlushDataCache(process, dest_addr, size); 887 return impl->FlushDataCache(process, dest_addr, size);
855} 888}
856 889
857void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { 890void Memory::RasterizerMarkRegionCached(Common::ProcessAddress vaddr, u64 size, bool cached) {
858 impl->RasterizerMarkRegionCached(vaddr, size, cached); 891 impl->RasterizerMarkRegionCached(GetInteger(vaddr), size, cached);
859} 892}
860 893
861void Memory::MarkRegionDebug(VAddr vaddr, u64 size, bool debug) { 894void Memory::MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug) {
862 impl->MarkRegionDebug(vaddr, size, debug); 895 impl->MarkRegionDebug(GetInteger(vaddr), size, debug);
863} 896}
864 897
865} // namespace Core::Memory 898} // namespace Core::Memory
diff --git a/src/core/memory.h b/src/core/memory.h
index 31fe699d8..ed4e87739 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -6,7 +6,7 @@
6#include <cstddef> 6#include <cstddef>
7#include <memory> 7#include <memory>
8#include <string> 8#include <string>
9#include "common/common_types.h" 9#include "common/typed_address.h"
10#include "core/hle/result.h" 10#include "core/hle/result.h"
11 11
12namespace Common { 12namespace Common {
@@ -33,7 +33,7 @@ constexpr u64 YUZU_PAGESIZE = 1ULL << YUZU_PAGEBITS;
33constexpr u64 YUZU_PAGEMASK = YUZU_PAGESIZE - 1; 33constexpr u64 YUZU_PAGEMASK = YUZU_PAGESIZE - 1;
34 34
35/// Virtual user-space memory regions 35/// Virtual user-space memory regions
36enum : VAddr { 36enum : u64 {
37 /// TLS (Thread-Local Storage) related. 37 /// TLS (Thread-Local Storage) related.
38 TLS_ENTRY_SIZE = 0x200, 38 TLS_ENTRY_SIZE = 0x200,
39 39
@@ -74,7 +74,8 @@ public:
74 * @param target Buffer with the memory backing the mapping. Must be of length at least 74 * @param target Buffer with the memory backing the mapping. Must be of length at least
75 * `size`. 75 * `size`.
76 */ 76 */
77 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target); 77 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
78 Common::PhysicalAddress target);
78 79
79 /** 80 /**
80 * Unmaps a region of the emulated process address space. 81 * Unmaps a region of the emulated process address space.
@@ -83,7 +84,7 @@ public:
83 * @param base The address to begin unmapping at. 84 * @param base The address to begin unmapping at.
84 * @param size The amount of bytes to unmap. 85 * @param size The amount of bytes to unmap.
85 */ 86 */
86 void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size); 87 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size);
87 88
88 /** 89 /**
89 * Checks whether or not the supplied address is a valid virtual 90 * Checks whether or not the supplied address is a valid virtual
@@ -93,7 +94,7 @@ public:
93 * 94 *
94 * @returns True if the given virtual address is valid, false otherwise. 95 * @returns True if the given virtual address is valid, false otherwise.
95 */ 96 */
96 [[nodiscard]] bool IsValidVirtualAddress(VAddr vaddr) const; 97 [[nodiscard]] bool IsValidVirtualAddress(Common::ProcessAddress vaddr) const;
97 98
98 /** 99 /**
99 * Checks whether or not the supplied range of addresses are all valid 100 * Checks whether or not the supplied range of addresses are all valid
@@ -104,7 +105,7 @@ public:
104 * 105 *
105 * @returns True if all bytes in the given range are valid, false otherwise. 106 * @returns True if all bytes in the given range are valid, false otherwise.
106 */ 107 */
107 [[nodiscard]] bool IsValidVirtualAddressRange(VAddr base, u64 size) const; 108 [[nodiscard]] bool IsValidVirtualAddressRange(Common::ProcessAddress base, u64 size) const;
108 109
109 /** 110 /**
110 * Gets a pointer to the given address. 111 * Gets a pointer to the given address.
@@ -114,11 +115,11 @@ public:
114 * @returns The pointer to the given address, if the address is valid. 115 * @returns The pointer to the given address, if the address is valid.
115 * If the address is not valid, nullptr will be returned. 116 * If the address is not valid, nullptr will be returned.
116 */ 117 */
117 u8* GetPointer(VAddr vaddr); 118 u8* GetPointer(Common::ProcessAddress vaddr);
118 u8* GetPointerSilent(VAddr vaddr); 119 u8* GetPointerSilent(Common::ProcessAddress vaddr);
119 120
120 template <typename T> 121 template <typename T>
121 T* GetPointer(VAddr vaddr) { 122 T* GetPointer(Common::ProcessAddress vaddr) {
122 return reinterpret_cast<T*>(GetPointer(vaddr)); 123 return reinterpret_cast<T*>(GetPointer(vaddr));
123 } 124 }
124 125
@@ -130,10 +131,10 @@ public:
130 * @returns The pointer to the given address, if the address is valid. 131 * @returns The pointer to the given address, if the address is valid.
131 * If the address is not valid, nullptr will be returned. 132 * If the address is not valid, nullptr will be returned.
132 */ 133 */
133 [[nodiscard]] const u8* GetPointer(VAddr vaddr) const; 134 [[nodiscard]] const u8* GetPointer(Common::ProcessAddress vaddr) const;
134 135
135 template <typename T> 136 template <typename T>
136 const T* GetPointer(VAddr vaddr) const { 137 const T* GetPointer(Common::ProcessAddress vaddr) const {
137 return reinterpret_cast<T*>(GetPointer(vaddr)); 138 return reinterpret_cast<T*>(GetPointer(vaddr));
138 } 139 }
139 140
@@ -145,7 +146,7 @@ public:
145 * 146 *
146 * @returns the read 8-bit unsigned value. 147 * @returns the read 8-bit unsigned value.
147 */ 148 */
148 u8 Read8(VAddr addr); 149 u8 Read8(Common::ProcessAddress addr);
149 150
150 /** 151 /**
151 * Reads a 16-bit unsigned value from the current process' address space 152 * Reads a 16-bit unsigned value from the current process' address space
@@ -155,7 +156,7 @@ public:
155 * 156 *
156 * @returns the read 16-bit unsigned value. 157 * @returns the read 16-bit unsigned value.
157 */ 158 */
158 u16 Read16(VAddr addr); 159 u16 Read16(Common::ProcessAddress addr);
159 160
160 /** 161 /**
161 * Reads a 32-bit unsigned value from the current process' address space 162 * Reads a 32-bit unsigned value from the current process' address space
@@ -165,7 +166,7 @@ public:
165 * 166 *
166 * @returns the read 32-bit unsigned value. 167 * @returns the read 32-bit unsigned value.
167 */ 168 */
168 u32 Read32(VAddr addr); 169 u32 Read32(Common::ProcessAddress addr);
169 170
170 /** 171 /**
171 * Reads a 64-bit unsigned value from the current process' address space 172 * Reads a 64-bit unsigned value from the current process' address space
@@ -175,7 +176,7 @@ public:
175 * 176 *
176 * @returns the read 64-bit value. 177 * @returns the read 64-bit value.
177 */ 178 */
178 u64 Read64(VAddr addr); 179 u64 Read64(Common::ProcessAddress addr);
179 180
180 /** 181 /**
181 * Writes an 8-bit unsigned integer to the given virtual address in 182 * Writes an 8-bit unsigned integer to the given virtual address in
@@ -186,7 +187,7 @@ public:
186 * 187 *
187 * @post The memory at the given virtual address contains the specified data value. 188 * @post The memory at the given virtual address contains the specified data value.
188 */ 189 */
189 void Write8(VAddr addr, u8 data); 190 void Write8(Common::ProcessAddress addr, u8 data);
190 191
191 /** 192 /**
192 * Writes a 16-bit unsigned integer to the given virtual address in 193 * Writes a 16-bit unsigned integer to the given virtual address in
@@ -197,7 +198,7 @@ public:
197 * 198 *
198 * @post The memory range [addr, sizeof(data)) contains the given data value. 199 * @post The memory range [addr, sizeof(data)) contains the given data value.
199 */ 200 */
200 void Write16(VAddr addr, u16 data); 201 void Write16(Common::ProcessAddress addr, u16 data);
201 202
202 /** 203 /**
203 * Writes a 32-bit unsigned integer to the given virtual address in 204 * Writes a 32-bit unsigned integer to the given virtual address in
@@ -208,7 +209,7 @@ public:
208 * 209 *
209 * @post The memory range [addr, sizeof(data)) contains the given data value. 210 * @post The memory range [addr, sizeof(data)) contains the given data value.
210 */ 211 */
211 void Write32(VAddr addr, u32 data); 212 void Write32(Common::ProcessAddress addr, u32 data);
212 213
213 /** 214 /**
214 * Writes a 64-bit unsigned integer to the given virtual address in 215 * Writes a 64-bit unsigned integer to the given virtual address in
@@ -219,7 +220,7 @@ public:
219 * 220 *
220 * @post The memory range [addr, sizeof(data)) contains the given data value. 221 * @post The memory range [addr, sizeof(data)) contains the given data value.
221 */ 222 */
222 void Write64(VAddr addr, u64 data); 223 void Write64(Common::ProcessAddress addr, u64 data);
223 224
224 /** 225 /**
225 * Writes a 8-bit unsigned integer to the given virtual address in 226 * Writes a 8-bit unsigned integer to the given virtual address in
@@ -232,7 +233,7 @@ public:
232 * 233 *
233 * @post The memory range [addr, sizeof(data)) contains the given data value. 234 * @post The memory range [addr, sizeof(data)) contains the given data value.
234 */ 235 */
235 bool WriteExclusive8(VAddr addr, u8 data, u8 expected); 236 bool WriteExclusive8(Common::ProcessAddress addr, u8 data, u8 expected);
236 237
237 /** 238 /**
238 * Writes a 16-bit unsigned integer to the given virtual address in 239 * Writes a 16-bit unsigned integer to the given virtual address in
@@ -245,7 +246,7 @@ public:
245 * 246 *
246 * @post The memory range [addr, sizeof(data)) contains the given data value. 247 * @post The memory range [addr, sizeof(data)) contains the given data value.
247 */ 248 */
248 bool WriteExclusive16(VAddr addr, u16 data, u16 expected); 249 bool WriteExclusive16(Common::ProcessAddress addr, u16 data, u16 expected);
249 250
250 /** 251 /**
251 * Writes a 32-bit unsigned integer to the given virtual address in 252 * Writes a 32-bit unsigned integer to the given virtual address in
@@ -258,7 +259,7 @@ public:
258 * 259 *
259 * @post The memory range [addr, sizeof(data)) contains the given data value. 260 * @post The memory range [addr, sizeof(data)) contains the given data value.
260 */ 261 */
261 bool WriteExclusive32(VAddr addr, u32 data, u32 expected); 262 bool WriteExclusive32(Common::ProcessAddress addr, u32 data, u32 expected);
262 263
263 /** 264 /**
264 * Writes a 64-bit unsigned integer to the given virtual address in 265 * Writes a 64-bit unsigned integer to the given virtual address in
@@ -271,7 +272,7 @@ public:
271 * 272 *
272 * @post The memory range [addr, sizeof(data)) contains the given data value. 273 * @post The memory range [addr, sizeof(data)) contains the given data value.
273 */ 274 */
274 bool WriteExclusive64(VAddr addr, u64 data, u64 expected); 275 bool WriteExclusive64(Common::ProcessAddress addr, u64 data, u64 expected);
275 276
276 /** 277 /**
277 * Writes a 128-bit unsigned integer to the given virtual address in 278 * Writes a 128-bit unsigned integer to the given virtual address in
@@ -284,7 +285,7 @@ public:
284 * 285 *
285 * @post The memory range [addr, sizeof(data)) contains the given data value. 286 * @post The memory range [addr, sizeof(data)) contains the given data value.
286 */ 287 */
287 bool WriteExclusive128(VAddr addr, u128 data, u128 expected); 288 bool WriteExclusive128(Common::ProcessAddress addr, u128 data, u128 expected);
288 289
289 /** 290 /**
290 * Reads a null-terminated string from the given virtual address. 291 * Reads a null-terminated string from the given virtual address.
@@ -301,7 +302,7 @@ public:
301 * 302 *
302 * @returns The read string. 303 * @returns The read string.
303 */ 304 */
304 std::string ReadCString(VAddr vaddr, std::size_t max_length); 305 std::string ReadCString(Common::ProcessAddress vaddr, std::size_t max_length);
305 306
306 /** 307 /**
307 * Reads a contiguous block of bytes from a specified process' address space. 308 * Reads a contiguous block of bytes from a specified process' address space.
@@ -320,8 +321,8 @@ public:
320 * @post The range [dest_buffer, size) contains the read bytes from the 321 * @post The range [dest_buffer, size) contains the read bytes from the
321 * process' address space. 322 * process' address space.
322 */ 323 */
323 void ReadBlock(const Kernel::KProcess& process, VAddr src_addr, void* dest_buffer, 324 void ReadBlock(const Kernel::KProcess& process, Common::ProcessAddress src_addr,
324 std::size_t size); 325 void* dest_buffer, std::size_t size);
325 326
326 /** 327 /**
327 * Reads a contiguous block of bytes from the current process' address space. 328 * Reads a contiguous block of bytes from the current process' address space.
@@ -339,7 +340,7 @@ public:
339 * @post The range [dest_buffer, size) contains the read bytes from the 340 * @post The range [dest_buffer, size) contains the read bytes from the
340 * current process' address space. 341 * current process' address space.
341 */ 342 */
342 void ReadBlock(VAddr src_addr, void* dest_buffer, std::size_t size); 343 void ReadBlock(Common::ProcessAddress src_addr, void* dest_buffer, std::size_t size);
343 344
344 /** 345 /**
345 * Reads a contiguous block of bytes from the current process' address space. 346 * Reads a contiguous block of bytes from the current process' address space.
@@ -358,7 +359,7 @@ public:
358 * @post The range [dest_buffer, size) contains the read bytes from the 359 * @post The range [dest_buffer, size) contains the read bytes from the
359 * current process' address space. 360 * current process' address space.
360 */ 361 */
361 void ReadBlockUnsafe(VAddr src_addr, void* dest_buffer, std::size_t size); 362 void ReadBlockUnsafe(Common::ProcessAddress src_addr, void* dest_buffer, std::size_t size);
362 363
363 /** 364 /**
364 * Writes a range of bytes into a given process' address space at the specified 365 * Writes a range of bytes into a given process' address space at the specified
@@ -380,8 +381,8 @@ public:
380 * and will mark that region as invalidated to caches that the active 381 * and will mark that region as invalidated to caches that the active
381 * graphics backend may be maintaining over the course of execution. 382 * graphics backend may be maintaining over the course of execution.
382 */ 383 */
383 void WriteBlock(const Kernel::KProcess& process, VAddr dest_addr, const void* src_buffer, 384 void WriteBlock(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
384 std::size_t size); 385 const void* src_buffer, std::size_t size);
385 386
386 /** 387 /**
387 * Writes a range of bytes into the current process' address space at the specified 388 * Writes a range of bytes into the current process' address space at the specified
@@ -402,7 +403,7 @@ public:
402 * and will mark that region as invalidated to caches that the active 403 * and will mark that region as invalidated to caches that the active
403 * graphics backend may be maintaining over the course of execution. 404 * graphics backend may be maintaining over the course of execution.
404 */ 405 */
405 void WriteBlock(VAddr dest_addr, const void* src_buffer, std::size_t size); 406 void WriteBlock(Common::ProcessAddress dest_addr, const void* src_buffer, std::size_t size);
406 407
407 /** 408 /**
408 * Writes a range of bytes into the current process' address space at the specified 409 * Writes a range of bytes into the current process' address space at the specified
@@ -420,7 +421,8 @@ public:
420 * will be ignored and an error will be logged. 421 * will be ignored and an error will be logged.
421 * 422 *
422 */ 423 */
423 void WriteBlockUnsafe(VAddr dest_addr, const void* src_buffer, std::size_t size); 424 void WriteBlockUnsafe(Common::ProcessAddress dest_addr, const void* src_buffer,
425 std::size_t size);
424 426
425 /** 427 /**
426 * Copies data within a process' address space to another location within the 428 * Copies data within a process' address space to another location within the
@@ -434,8 +436,8 @@ public:
434 * @post The range [dest_addr, size) within the process' address space contains the 436 * @post The range [dest_addr, size) within the process' address space contains the
435 * same data within the range [src_addr, size). 437 * same data within the range [src_addr, size).
436 */ 438 */
437 void CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr, 439 void CopyBlock(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
438 std::size_t size); 440 Common::ProcessAddress src_addr, std::size_t size);
439 441
440 /** 442 /**
441 * Zeros a range of bytes within the current process' address space at the specified 443 * Zeros a range of bytes within the current process' address space at the specified
@@ -448,7 +450,8 @@ public:
448 * @post The range [dest_addr, size) within the process' address space contains the 450 * @post The range [dest_addr, size) within the process' address space contains the
449 * value 0. 451 * value 0.
450 */ 452 */
451 void ZeroBlock(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size); 453 void ZeroBlock(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
454 std::size_t size);
452 455
453 /** 456 /**
454 * Invalidates a range of bytes within the current process' address space at the specified 457 * Invalidates a range of bytes within the current process' address space at the specified
@@ -459,7 +462,8 @@ public:
459 * @param size The size of the range to invalidate, in bytes. 462 * @param size The size of the range to invalidate, in bytes.
460 * 463 *
461 */ 464 */
462 Result InvalidateDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size); 465 Result InvalidateDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
466 std::size_t size);
463 467
464 /** 468 /**
465 * Stores a range of bytes within the current process' address space at the specified 469 * Stores a range of bytes within the current process' address space at the specified
@@ -470,7 +474,8 @@ public:
470 * @param size The size of the range to store, in bytes. 474 * @param size The size of the range to store, in bytes.
471 * 475 *
472 */ 476 */
473 Result StoreDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size); 477 Result StoreDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
478 std::size_t size);
474 479
475 /** 480 /**
476 * Flushes a range of bytes within the current process' address space at the specified 481 * Flushes a range of bytes within the current process' address space at the specified
@@ -481,7 +486,8 @@ public:
481 * @param size The size of the range to flush, in bytes. 486 * @param size The size of the range to flush, in bytes.
482 * 487 *
483 */ 488 */
484 Result FlushDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size); 489 Result FlushDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
490 std::size_t size);
485 491
486 /** 492 /**
487 * Marks each page within the specified address range as cached or uncached. 493 * Marks each page within the specified address range as cached or uncached.
@@ -491,7 +497,7 @@ public:
491 * @param cached Whether or not any pages within the address range should be 497 * @param cached Whether or not any pages within the address range should be
492 * marked as cached or uncached. 498 * marked as cached or uncached.
493 */ 499 */
494 void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached); 500 void RasterizerMarkRegionCached(Common::ProcessAddress vaddr, u64 size, bool cached);
495 501
496 /** 502 /**
497 * Marks each page within the specified address range as debug or non-debug. 503 * Marks each page within the specified address range as debug or non-debug.
@@ -502,7 +508,7 @@ public:
502 * @param debug Whether or not any pages within the address range should be 508 * @param debug Whether or not any pages within the address range should be
503 * marked as debug or non-debug. 509 * marked as debug or non-debug.
504 */ 510 */
505 void MarkRegionDebug(VAddr vaddr, u64 size, bool debug); 511 void MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug);
506 512
507private: 513private:
508 Core::System& system; 514 Core::System& system;
diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp
index de729955f..d1284a3a7 100644
--- a/src/core/memory/cheat_engine.cpp
+++ b/src/core/memory/cheat_engine.cpp
@@ -201,17 +201,17 @@ void CheatEngine::Initialize() {
201 201
202 const auto& page_table = system.ApplicationProcess()->PageTable(); 202 const auto& page_table = system.ApplicationProcess()->PageTable();
203 metadata.heap_extents = { 203 metadata.heap_extents = {
204 .base = page_table.GetHeapRegionStart(), 204 .base = GetInteger(page_table.GetHeapRegionStart()),
205 .size = page_table.GetHeapRegionSize(), 205 .size = page_table.GetHeapRegionSize(),
206 }; 206 };
207 207
208 metadata.address_space_extents = { 208 metadata.address_space_extents = {
209 .base = page_table.GetAddressSpaceStart(), 209 .base = GetInteger(page_table.GetAddressSpaceStart()),
210 .size = page_table.GetAddressSpaceSize(), 210 .size = page_table.GetAddressSpaceSize(),
211 }; 211 };
212 212
213 metadata.alias_extents = { 213 metadata.alias_extents = {
214 .base = page_table.GetAliasCodeRegionStart(), 214 .base = GetInteger(page_table.GetAliasCodeRegionStart()),
215 .size = page_table.GetAliasCodeRegionSize(), 215 .size = page_table.GetAliasCodeRegionSize(),
216 }; 216 };
217 217
diff --git a/src/core/reporter.cpp b/src/core/reporter.cpp
index 004f2e57a..146c3f21e 100644
--- a/src/core/reporter.cpp
+++ b/src/core/reporter.cpp
@@ -117,8 +117,8 @@ json GetProcessorStateDataAuto(Core::System& system) {
117 arm.SaveContext(context); 117 arm.SaveContext(context);
118 118
119 return GetProcessorStateData(process->Is64BitProcess() ? "AArch64" : "AArch32", 119 return GetProcessorStateData(process->Is64BitProcess() ? "AArch64" : "AArch32",
120 process->PageTable().GetCodeRegionStart(), context.sp, context.pc, 120 GetInteger(process->PageTable().GetCodeRegionStart()), context.sp,
121 context.pstate, context.cpu_registers); 121 context.pc, context.pstate, context.cpu_registers);
122} 122}
123 123
124json GetBacktraceData(Core::System& system) { 124json GetBacktraceData(Core::System& system) {