summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
authorGravatar Fernando S2023-12-24 16:23:14 +0100
committerGravatar GitHub2023-12-24 16:23:14 +0100
commit05e3db3ac9edbff0e4885ef8b42d3a2427c9f027 (patch)
tree2f959b67638ab1134cfca19ac1f041552a68c335 /src/core/hle/kernel
parentMerge pull request #12412 from ameerj/gl-query-prims (diff)
parentkernel: fix resource limit imbalance (diff)
downloadyuzu-05e3db3ac9edbff0e4885ef8b42d3a2427c9f027.tar.gz
yuzu-05e3db3ac9edbff0e4885ef8b42d3a2427c9f027.tar.xz
yuzu-05e3db3ac9edbff0e4885ef8b42d3a2427c9f027.zip
Merge pull request #12394 from liamwhite/per-process-memory
general: properly support multiple memory instances
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp19
-rw-r--r--src/core/hle/kernel/k_client_port.cpp5
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp8
-rw-r--r--src/core/hle/kernel/k_handle_table.h8
-rw-r--r--src/core/hle/kernel/k_process.cpp50
-rw-r--r--src/core/hle/kernel/k_process.h17
-rw-r--r--src/core/hle/kernel/k_server_session.cpp1425
-rw-r--r--src/core/hle/kernel/k_server_session.h15
-rw-r--r--src/core/hle/kernel/k_session.cpp3
-rw-r--r--src/core/hle/kernel/k_thread.cpp3
-rw-r--r--src/core/hle/kernel/k_thread.h6
-rw-r--r--src/core/hle/kernel/kernel.cpp34
-rw-r--r--src/core/hle/kernel/kernel.h7
-rw-r--r--src/core/hle/kernel/message_buffer.h20
-rw-r--r--src/core/hle/kernel/svc/svc_info.cpp1
-rw-r--r--src/core/hle/kernel/svc/svc_ipc.cpp6
-rw-r--r--src/core/hle/kernel/svc_results.h2
17 files changed, 1289 insertions, 340 deletions
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index 78d43d729..48889253d 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -4,6 +4,7 @@
4#include "core/arm/exclusive_monitor.h" 4#include "core/arm/exclusive_monitor.h"
5#include "core/core.h" 5#include "core/core.h"
6#include "core/hle/kernel/k_address_arbiter.h" 6#include "core/hle/kernel/k_address_arbiter.h"
7#include "core/hle/kernel/k_process.h"
7#include "core/hle/kernel/k_scheduler.h" 8#include "core/hle/kernel/k_scheduler.h"
8#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 9#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
9#include "core/hle/kernel/k_thread.h" 10#include "core/hle/kernel/k_thread.h"
@@ -26,9 +27,9 @@ bool ReadFromUser(KernelCore& kernel, s32* out, KProcessAddress address) {
26 return true; 27 return true;
27} 28}
28 29
29bool DecrementIfLessThan(Core::System& system, s32* out, KProcessAddress address, s32 value) { 30bool DecrementIfLessThan(KernelCore& kernel, s32* out, KProcessAddress address, s32 value) {
30 auto& monitor = system.Monitor(); 31 auto& monitor = GetCurrentProcess(kernel).GetExclusiveMonitor();
31 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); 32 const auto current_core = kernel.CurrentPhysicalCoreIndex();
32 33
33 // NOTE: If scheduler lock is not held here, interrupt disable is required. 34 // NOTE: If scheduler lock is not held here, interrupt disable is required.
34 // KScopedInterruptDisable di; 35 // KScopedInterruptDisable di;
@@ -66,10 +67,10 @@ bool DecrementIfLessThan(Core::System& system, s32* out, KProcessAddress address
66 return true; 67 return true;
67} 68}
68 69
69bool UpdateIfEqual(Core::System& system, s32* out, KProcessAddress address, s32 value, 70bool UpdateIfEqual(KernelCore& kernel, s32* out, KProcessAddress address, s32 value,
70 s32 new_value) { 71 s32 new_value) {
71 auto& monitor = system.Monitor(); 72 auto& monitor = GetCurrentProcess(kernel).GetExclusiveMonitor();
72 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); 73 const auto current_core = kernel.CurrentPhysicalCoreIndex();
73 74
74 // NOTE: If scheduler lock is not held here, interrupt disable is required. 75 // NOTE: If scheduler lock is not held here, interrupt disable is required.
75 // KScopedInterruptDisable di; 76 // KScopedInterruptDisable di;
@@ -159,7 +160,7 @@ Result KAddressArbiter::SignalAndIncrementIfEqual(uint64_t addr, s32 value, s32
159 160
160 // Check the userspace value. 161 // Check the userspace value.
161 s32 user_value{}; 162 s32 user_value{};
162 R_UNLESS(UpdateIfEqual(m_system, std::addressof(user_value), addr, value, value + 1), 163 R_UNLESS(UpdateIfEqual(m_kernel, std::addressof(user_value), addr, value, value + 1),
163 ResultInvalidCurrentMemory); 164 ResultInvalidCurrentMemory);
164 R_UNLESS(user_value == value, ResultInvalidState); 165 R_UNLESS(user_value == value, ResultInvalidState);
165 166
@@ -219,7 +220,7 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(uint64_t addr, s32
219 s32 user_value{}; 220 s32 user_value{};
220 bool succeeded{}; 221 bool succeeded{};
221 if (value != new_value) { 222 if (value != new_value) {
222 succeeded = UpdateIfEqual(m_system, std::addressof(user_value), addr, value, new_value); 223 succeeded = UpdateIfEqual(m_kernel, std::addressof(user_value), addr, value, new_value);
223 } else { 224 } else {
224 succeeded = ReadFromUser(m_kernel, std::addressof(user_value), addr); 225 succeeded = ReadFromUser(m_kernel, std::addressof(user_value), addr);
225 } 226 }
@@ -262,7 +263,7 @@ Result KAddressArbiter::WaitIfLessThan(uint64_t addr, s32 value, bool decrement,
262 s32 user_value{}; 263 s32 user_value{};
263 bool succeeded{}; 264 bool succeeded{};
264 if (decrement) { 265 if (decrement) {
265 succeeded = DecrementIfLessThan(m_system, std::addressof(user_value), addr, value); 266 succeeded = DecrementIfLessThan(m_kernel, std::addressof(user_value), addr, value);
266 } else { 267 } else {
267 succeeded = ReadFromUser(m_kernel, std::addressof(user_value), addr); 268 succeeded = ReadFromUser(m_kernel, std::addressof(user_value), addr);
268 } 269 }
diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp
index 11b1b977e..68cea978a 100644
--- a/src/core/hle/kernel/k_client_port.cpp
+++ b/src/core/hle/kernel/k_client_port.cpp
@@ -58,9 +58,8 @@ Result KClientPort::CreateSession(KClientSession** out) {
58 KSession* session{}; 58 KSession* session{};
59 59
60 // Reserve a new session from the resource limit. 60 // Reserve a new session from the resource limit.
61 //! FIXME: we are reserving this from the wrong resource limit! 61 KScopedResourceReservation session_reservation(GetCurrentProcessPointer(m_kernel),
62 KScopedResourceReservation session_reservation( 62 LimitableResource::SessionCountMax);
63 m_kernel.ApplicationProcess()->GetResourceLimit(), LimitableResource::SessionCountMax);
64 R_UNLESS(session_reservation.Succeeded(), ResultLimitReached); 63 R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
65 64
66 // Allocate a session normally. 65 // Allocate a session normally.
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 7633a51fb..94ea3527a 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -28,10 +28,10 @@ bool WriteToUser(KernelCore& kernel, KProcessAddress address, const u32* p) {
28 return true; 28 return true;
29} 29}
30 30
31bool UpdateLockAtomic(Core::System& system, u32* out, KProcessAddress address, u32 if_zero, 31bool UpdateLockAtomic(KernelCore& kernel, u32* out, KProcessAddress address, u32 if_zero,
32 u32 new_orr_mask) { 32 u32 new_orr_mask) {
33 auto& monitor = system.Monitor(); 33 auto& monitor = GetCurrentProcess(kernel).GetExclusiveMonitor();
34 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); 34 const auto current_core = kernel.CurrentPhysicalCoreIndex();
35 35
36 u32 expected{}; 36 u32 expected{};
37 37
@@ -208,7 +208,7 @@ void KConditionVariable::SignalImpl(KThread* thread) {
208 // TODO(bunnei): We should call CanAccessAtomic(..) here. 208 // TODO(bunnei): We should call CanAccessAtomic(..) here.
209 can_access = true; 209 can_access = true;
210 if (can_access) [[likely]] { 210 if (can_access) [[likely]] {
211 UpdateLockAtomic(m_system, std::addressof(prev_tag), address, own_tag, 211 UpdateLockAtomic(m_kernel, std::addressof(prev_tag), address, own_tag,
212 Svc::HandleWaitMask); 212 Svc::HandleWaitMask);
213 } 213 }
214 } 214 }
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h
index d7660630c..4e6dcd66b 100644
--- a/src/core/hle/kernel/k_handle_table.h
+++ b/src/core/hle/kernel/k_handle_table.h
@@ -30,7 +30,7 @@ public:
30public: 30public:
31 explicit KHandleTable(KernelCore& kernel) : m_kernel(kernel) {} 31 explicit KHandleTable(KernelCore& kernel) : m_kernel(kernel) {}
32 32
33 Result Initialize(s32 size) { 33 Result Initialize(KProcess* owner, s32 size) {
34 // Check that the table size is valid. 34 // Check that the table size is valid.
35 R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory); 35 R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory);
36 36
@@ -44,6 +44,7 @@ public:
44 m_next_linear_id = MinLinearId; 44 m_next_linear_id = MinLinearId;
45 m_count = 0; 45 m_count = 0;
46 m_free_head_index = -1; 46 m_free_head_index = -1;
47 m_owner = owner;
47 48
48 // Free all entries. 49 // Free all entries.
49 for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) { 50 for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
@@ -90,8 +91,8 @@ public:
90 // Handle pseudo-handles. 91 // Handle pseudo-handles.
91 if constexpr (std::derived_from<KProcess, T>) { 92 if constexpr (std::derived_from<KProcess, T>) {
92 if (handle == Svc::PseudoHandle::CurrentProcess) { 93 if (handle == Svc::PseudoHandle::CurrentProcess) {
93 //! FIXME: this is the wrong process! 94 // TODO: this should be the current process
94 auto* const cur_process = m_kernel.ApplicationProcess(); 95 auto* const cur_process = m_owner;
95 ASSERT(cur_process != nullptr); 96 ASSERT(cur_process != nullptr);
96 return cur_process; 97 return cur_process;
97 } 98 }
@@ -301,6 +302,7 @@ private:
301 302
302private: 303private:
303 KernelCore& m_kernel; 304 KernelCore& m_kernel;
305 KProcess* m_owner{};
304 std::array<EntryInfo, MaxTableSize> m_entry_infos{}; 306 std::array<EntryInfo, MaxTableSize> m_entry_infos{};
305 std::array<KAutoObject*, MaxTableSize> m_objects{}; 307 std::array<KAutoObject*, MaxTableSize> m_objects{};
306 mutable KSpinLock m_lock; 308 mutable KSpinLock m_lock;
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 3a2635e1f..d6869c228 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -306,12 +306,16 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa
306 False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge); 306 False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
307 R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, 307 R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
308 params.code_address, params.code_num_pages * PageSize, 308 params.code_address, params.code_num_pages * PageSize,
309 m_system_resource, res_limit, this->GetMemory(), 0)); 309 m_system_resource, res_limit, m_memory, 0));
310 } 310 }
311 ON_RESULT_FAILURE_2 { 311 ON_RESULT_FAILURE_2 {
312 m_page_table.Finalize(); 312 m_page_table.Finalize();
313 }; 313 };
314 314
315 // Ensure our memory is initialized.
316 m_memory.SetCurrentPageTable(*this);
317 m_memory.SetGPUDirtyManagers(m_dirty_memory_managers);
318
315 // Ensure we can insert the code region. 319 // Ensure we can insert the code region.
316 R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize, 320 R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize,
317 KMemoryState::Code), 321 KMemoryState::Code),
@@ -399,12 +403,16 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params,
399 False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge); 403 False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
400 R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, 404 R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
401 params.code_address, code_size, m_system_resource, res_limit, 405 params.code_address, code_size, m_system_resource, res_limit,
402 this->GetMemory(), aslr_space_start)); 406 m_memory, aslr_space_start));
403 } 407 }
404 ON_RESULT_FAILURE_2 { 408 ON_RESULT_FAILURE_2 {
405 m_page_table.Finalize(); 409 m_page_table.Finalize();
406 }; 410 };
407 411
412 // Ensure our memory is initialized.
413 m_memory.SetCurrentPageTable(*this);
414 m_memory.SetGPUDirtyManagers(m_dirty_memory_managers);
415
408 // Ensure we can insert the code region. 416 // Ensure we can insert the code region.
409 R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState::Code), 417 R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState::Code),
410 ResultInvalidMemoryRegion); 418 ResultInvalidMemoryRegion);
@@ -1094,8 +1102,7 @@ void KProcess::UnpinThread(KThread* thread) {
1094 1102
1095Result KProcess::GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ids, 1103Result KProcess::GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ids,
1096 s32 max_out_count) { 1104 s32 max_out_count) {
1097 // TODO: use current memory reference 1105 auto& memory = this->GetMemory();
1098 auto& memory = m_kernel.System().ApplicationMemory();
1099 1106
1100 // Lock the list. 1107 // Lock the list.
1101 KScopedLightLock lk(m_list_lock); 1108 KScopedLightLock lk(m_list_lock);
@@ -1128,14 +1135,15 @@ void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
1128KProcess::KProcess(KernelCore& kernel) 1135KProcess::KProcess(KernelCore& kernel)
1129 : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel}, 1136 : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel},
1130 m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()}, 1137 m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()},
1131 m_handle_table{kernel} {} 1138 m_handle_table{kernel}, m_dirty_memory_managers{},
1139 m_exclusive_monitor{}, m_memory{kernel.System()} {}
1132KProcess::~KProcess() = default; 1140KProcess::~KProcess() = default;
1133 1141
1134Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size, 1142Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
1135 KProcessAddress aslr_space_start, bool is_hbl) { 1143 KProcessAddress aslr_space_start, bool is_hbl) {
1136 // Create a resource limit for the process. 1144 // Create a resource limit for the process.
1137 const auto physical_memory_size = 1145 const auto pool = static_cast<KMemoryManager::Pool>(metadata.GetPoolPartition());
1138 m_kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::Application); 1146 const auto physical_memory_size = m_kernel.MemoryManager().GetSize(pool);
1139 auto* res_limit = 1147 auto* res_limit =
1140 Kernel::CreateResourceLimitForProcess(m_kernel.System(), physical_memory_size); 1148 Kernel::CreateResourceLimitForProcess(m_kernel.System(), physical_memory_size);
1141 1149
@@ -1146,8 +1154,10 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
1146 Svc::CreateProcessFlag flag{}; 1154 Svc::CreateProcessFlag flag{};
1147 u64 code_address{}; 1155 u64 code_address{};
1148 1156
1149 // We are an application. 1157 // Determine if we are an application.
1150 flag |= Svc::CreateProcessFlag::IsApplication; 1158 if (pool == KMemoryManager::Pool::Application) {
1159 flag |= Svc::CreateProcessFlag::IsApplication;
1160 }
1151 1161
1152 // If we are 64-bit, create as such. 1162 // If we are 64-bit, create as such.
1153 if (metadata.Is64BitProgram()) { 1163 if (metadata.Is64BitProgram()) {
@@ -1196,8 +1206,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
1196 std::memcpy(params.name.data(), name.data(), sizeof(params.name)); 1206 std::memcpy(params.name.data(), name.data(), sizeof(params.name));
1197 1207
1198 // Initialize for application process. 1208 // Initialize for application process.
1199 R_TRY(this->Initialize(params, metadata.GetKernelCapabilities(), res_limit, 1209 R_TRY(this->Initialize(params, metadata.GetKernelCapabilities(), res_limit, pool,
1200 KMemoryManager::Pool::Application, aslr_space_start)); 1210 aslr_space_start));
1201 1211
1202 // Assign remaining properties. 1212 // Assign remaining properties.
1203 m_is_hbl = is_hbl; 1213 m_is_hbl = is_hbl;
@@ -1223,7 +1233,7 @@ void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
1223 ReprotectSegment(code_set.DataSegment(), Svc::MemoryPermission::ReadWrite); 1233 ReprotectSegment(code_set.DataSegment(), Svc::MemoryPermission::ReadWrite);
1224 1234
1225#ifdef HAS_NCE 1235#ifdef HAS_NCE
1226 if (Settings::IsNceEnabled()) { 1236 if (this->IsApplication() && Settings::IsNceEnabled()) {
1227 auto& buffer = m_kernel.System().DeviceMemory().buffer; 1237 auto& buffer = m_kernel.System().DeviceMemory().buffer;
1228 const auto& code = code_set.CodeSegment(); 1238 const auto& code = code_set.CodeSegment();
1229 const auto& patch = code_set.PatchSegment(); 1239 const auto& patch = code_set.PatchSegment();
@@ -1235,10 +1245,11 @@ void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
1235} 1245}
1236 1246
1237void KProcess::InitializeInterfaces() { 1247void KProcess::InitializeInterfaces() {
1238 this->GetMemory().SetCurrentPageTable(*this); 1248 m_exclusive_monitor =
1249 Core::MakeExclusiveMonitor(this->GetMemory(), Core::Hardware::NUM_CPU_CORES);
1239 1250
1240#ifdef HAS_NCE 1251#ifdef HAS_NCE
1241 if (this->Is64Bit() && Settings::IsNceEnabled()) { 1252 if (this->IsApplication() && Settings::IsNceEnabled()) {
1242 for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 1253 for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
1243 m_arm_interfaces[i] = std::make_unique<Core::ArmNce>(m_kernel.System(), true, i); 1254 m_arm_interfaces[i] = std::make_unique<Core::ArmNce>(m_kernel.System(), true, i);
1244 } 1255 }
@@ -1248,13 +1259,13 @@ void KProcess::InitializeInterfaces() {
1248 for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 1259 for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
1249 m_arm_interfaces[i] = std::make_unique<Core::ArmDynarmic64>( 1260 m_arm_interfaces[i] = std::make_unique<Core::ArmDynarmic64>(
1250 m_kernel.System(), m_kernel.IsMulticore(), this, 1261 m_kernel.System(), m_kernel.IsMulticore(), this,
1251 static_cast<Core::DynarmicExclusiveMonitor&>(m_kernel.GetExclusiveMonitor()), i); 1262 static_cast<Core::DynarmicExclusiveMonitor&>(*m_exclusive_monitor), i);
1252 } 1263 }
1253 } else { 1264 } else {
1254 for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 1265 for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
1255 m_arm_interfaces[i] = std::make_unique<Core::ArmDynarmic32>( 1266 m_arm_interfaces[i] = std::make_unique<Core::ArmDynarmic32>(
1256 m_kernel.System(), m_kernel.IsMulticore(), this, 1267 m_kernel.System(), m_kernel.IsMulticore(), this,
1257 static_cast<Core::DynarmicExclusiveMonitor&>(m_kernel.GetExclusiveMonitor()), i); 1268 static_cast<Core::DynarmicExclusiveMonitor&>(*m_exclusive_monitor), i);
1258 } 1269 }
1259 } 1270 }
1260} 1271}
@@ -1305,9 +1316,10 @@ bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointT
1305 return true; 1316 return true;
1306} 1317}
1307 1318
1308Core::Memory::Memory& KProcess::GetMemory() const { 1319void KProcess::GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback) {
1309 // TODO: per-process memory 1320 for (auto& manager : m_dirty_memory_managers) {
1310 return m_kernel.System().ApplicationMemory(); 1321 manager.Gather(callback);
1322 }
1311} 1323}
1312 1324
1313} // namespace Kernel 1325} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 4b114e39b..b5c6867a1 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -7,6 +7,7 @@
7 7
8#include "core/arm/arm_interface.h" 8#include "core/arm/arm_interface.h"
9#include "core/file_sys/program_metadata.h" 9#include "core/file_sys/program_metadata.h"
10#include "core/gpu_dirty_memory_manager.h"
10#include "core/hle/kernel/code_set.h" 11#include "core/hle/kernel/code_set.h"
11#include "core/hle/kernel/k_address_arbiter.h" 12#include "core/hle/kernel/k_address_arbiter.h"
12#include "core/hle/kernel/k_capabilities.h" 13#include "core/hle/kernel/k_capabilities.h"
@@ -17,6 +18,7 @@
17#include "core/hle/kernel/k_system_resource.h" 18#include "core/hle/kernel/k_system_resource.h"
18#include "core/hle/kernel/k_thread.h" 19#include "core/hle/kernel/k_thread.h"
19#include "core/hle/kernel/k_thread_local_page.h" 20#include "core/hle/kernel/k_thread_local_page.h"
21#include "core/memory.h"
20 22
21namespace Kernel { 23namespace Kernel {
22 24
@@ -126,6 +128,9 @@ private:
126#ifdef HAS_NCE 128#ifdef HAS_NCE
127 std::unordered_map<u64, u64> m_post_handlers{}; 129 std::unordered_map<u64, u64> m_post_handlers{};
128#endif 130#endif
131 std::array<Core::GPUDirtyMemoryManager, Core::Hardware::NUM_CPU_CORES> m_dirty_memory_managers;
132 std::unique_ptr<Core::ExclusiveMonitor> m_exclusive_monitor;
133 Core::Memory::Memory m_memory;
129 134
130private: 135private:
131 Result StartTermination(); 136 Result StartTermination();
@@ -502,7 +507,15 @@ public:
502 507
503 void InitializeInterfaces(); 508 void InitializeInterfaces();
504 509
505 Core::Memory::Memory& GetMemory() const; 510 Core::Memory::Memory& GetMemory() {
511 return m_memory;
512 }
513
514 void GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback);
515
516 Core::ExclusiveMonitor& GetExclusiveMonitor() const {
517 return *m_exclusive_monitor;
518 }
506 519
507public: 520public:
508 // Overridden parent functions. 521 // Overridden parent functions.
@@ -539,7 +552,7 @@ private:
539 552
540 Result InitializeHandleTable(s32 size) { 553 Result InitializeHandleTable(s32 size) {
541 // Try to initialize the handle table. 554 // Try to initialize the handle table.
542 R_TRY(m_handle_table.Initialize(size)); 555 R_TRY(m_handle_table.Initialize(this, size));
543 556
544 // We succeeded, so note that we did. 557 // We succeeded, so note that we did.
545 m_is_handle_table_initialized = true; 558 m_is_handle_table_initialized = true;
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index e33a88e24..f6ca3dc48 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -8,6 +8,7 @@
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "common/logging/log.h" 9#include "common/logging/log.h"
10#include "common/scope_exit.h" 10#include "common/scope_exit.h"
11#include "common/scratch_buffer.h"
11#include "core/core.h" 12#include "core/core.h"
12#include "core/core_timing.h" 13#include "core/core_timing.h"
13#include "core/hle/kernel/k_client_port.h" 14#include "core/hle/kernel/k_client_port.h"
@@ -29,12 +30,138 @@ namespace Kernel {
29 30
30namespace { 31namespace {
31 32
33constexpr inline size_t PointerTransferBufferAlignment = 0x10;
34constexpr inline size_t ReceiveListDataSize =
35 MessageBuffer::MessageHeader::ReceiveListCountType_CountMax *
36 MessageBuffer::ReceiveListEntry::GetDataSize() / sizeof(u32);
37
38using ThreadQueueImplForKServerSessionRequest = KThreadQueue;
39
40class ReceiveList {
41public:
42 static constexpr int GetEntryCount(const MessageBuffer::MessageHeader& header) {
43 const auto count = header.GetReceiveListCount();
44 switch (count) {
45 case MessageBuffer::MessageHeader::ReceiveListCountType_None:
46 return 0;
47 case MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer:
48 return 0;
49 case MessageBuffer::MessageHeader::ReceiveListCountType_ToSingleBuffer:
50 return 1;
51 default:
52 return count - MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset;
53 }
54 }
55
56 explicit ReceiveList(const u32* dst_msg, uint64_t dst_address,
57 KProcessPageTable& dst_page_table,
58 const MessageBuffer::MessageHeader& dst_header,
59 const MessageBuffer::SpecialHeader& dst_special_header, size_t msg_size,
60 size_t out_offset, s32 dst_recv_list_idx, bool is_tls) {
61 m_recv_list_count = dst_header.GetReceiveListCount();
62 m_msg_buffer_end = dst_address + sizeof(u32) * out_offset;
63 m_msg_buffer_space_end = dst_address + msg_size;
64
65 // NOTE: Nintendo calculates the receive list index here using the special header.
66 // We pre-calculate it in the caller, and pass it as a parameter.
67 (void)dst_special_header;
68
69 const u32* recv_list = dst_msg + dst_recv_list_idx;
70 const auto entry_count = GetEntryCount(dst_header);
71
72 if (is_tls) {
73 // Messages from TLS to TLS are contained within one page.
74 std::memcpy(m_data.data(), recv_list,
75 entry_count * MessageBuffer::ReceiveListEntry::GetDataSize());
76 } else {
77 // If any buffer is not from TLS, perform a normal read instead.
78 uint64_t cur_addr = dst_address + dst_recv_list_idx * sizeof(u32);
79 dst_page_table.GetMemory().ReadBlock(
80 cur_addr, m_data.data(),
81 entry_count * MessageBuffer::ReceiveListEntry::GetDataSize());
82 }
83 }
84
85 bool IsIndex() const {
86 return m_recv_list_count >
87 static_cast<s32>(MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset);
88 }
89
90 bool IsToMessageBuffer() const {
91 return m_recv_list_count ==
92 MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer;
93 }
94
95 void GetBuffer(uint64_t& out, size_t size, int& key) const {
96 switch (m_recv_list_count) {
97 case MessageBuffer::MessageHeader::ReceiveListCountType_None: {
98 out = 0;
99 break;
100 }
101 case MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer: {
102 const uint64_t buf =
103 Common::AlignUp(m_msg_buffer_end + key, PointerTransferBufferAlignment);
104
105 if ((buf < buf + size) && (buf + size <= m_msg_buffer_space_end)) {
106 out = buf;
107 key = static_cast<int>(buf + size - m_msg_buffer_end);
108 } else {
109 out = 0;
110 }
111 break;
112 }
113 case MessageBuffer::MessageHeader::ReceiveListCountType_ToSingleBuffer: {
114 const MessageBuffer::ReceiveListEntry entry(m_data[0], m_data[1]);
115 const uint64_t buf =
116 Common::AlignUp(entry.GetAddress() + key, PointerTransferBufferAlignment);
117
118 const uint64_t entry_addr = entry.GetAddress();
119 const size_t entry_size = entry.GetSize();
120
121 if ((buf < buf + size) && (entry_addr < entry_addr + entry_size) &&
122 (buf + size <= entry_addr + entry_size)) {
123 out = buf;
124 key = static_cast<int>(buf + size - entry_addr);
125 } else {
126 out = 0;
127 }
128 break;
129 }
130 default: {
131 if (key < m_recv_list_count -
132 static_cast<s32>(
133 MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset)) {
134 const MessageBuffer::ReceiveListEntry entry(m_data[2 * key + 0],
135 m_data[2 * key + 1]);
136
137 const uintptr_t entry_addr = entry.GetAddress();
138 const size_t entry_size = entry.GetSize();
139
140 if ((entry_addr < entry_addr + entry_size) && (entry_size >= size)) {
141 out = entry_addr;
142 }
143 } else {
144 out = 0;
145 }
146 break;
147 }
148 }
149 }
150
151private:
152 std::array<u32, ReceiveListDataSize> m_data;
153 s32 m_recv_list_count;
154 uint64_t m_msg_buffer_end;
155 uint64_t m_msg_buffer_space_end;
156};
157
32template <bool MoveHandleAllowed> 158template <bool MoveHandleAllowed>
33Result ProcessMessageSpecialData(KProcess& dst_process, KProcess& src_process, KThread& src_thread, 159Result ProcessMessageSpecialData(s32& offset, KProcess& dst_process, KProcess& src_process,
34 MessageBuffer& dst_msg, const MessageBuffer& src_msg, 160 KThread& src_thread, const MessageBuffer& dst_msg,
35 MessageBuffer::SpecialHeader& src_special_header) { 161 const MessageBuffer& src_msg,
162 const MessageBuffer::SpecialHeader& src_special_header) {
36 // Copy the special header to the destination. 163 // Copy the special header to the destination.
37 s32 offset = dst_msg.Set(src_special_header); 164 offset = dst_msg.Set(src_special_header);
38 165
39 // Copy the process ID. 166 // Copy the process ID.
40 if (src_special_header.GetHasProcessId()) { 167 if (src_special_header.GetHasProcessId()) {
@@ -110,6 +237,102 @@ Result ProcessMessageSpecialData(KProcess& dst_process, KProcess& src_process, K
110 R_RETURN(result); 237 R_RETURN(result);
111} 238}
112 239
240Result ProcessReceiveMessagePointerDescriptors(int& offset, int& pointer_key,
241 KProcessPageTable& dst_page_table,
242 KProcessPageTable& src_page_table,
243 const MessageBuffer& dst_msg,
244 const MessageBuffer& src_msg,
245 const ReceiveList& dst_recv_list, bool dst_user) {
246 // Get the offset at the start of processing.
247 const int cur_offset = offset;
248
249 // Get the pointer desc.
250 MessageBuffer::PointerDescriptor src_desc(src_msg, cur_offset);
251 offset += static_cast<int>(MessageBuffer::PointerDescriptor::GetDataSize() / sizeof(u32));
252
253 // Extract address/size.
254 const uint64_t src_pointer = src_desc.GetAddress();
255 const size_t recv_size = src_desc.GetSize();
256 uint64_t recv_pointer = 0;
257
258 // Process the buffer, if it has a size.
259 if (recv_size > 0) {
260 // If using indexing, set index.
261 if (dst_recv_list.IsIndex()) {
262 pointer_key = src_desc.GetIndex();
263 }
264
265 // Get the buffer.
266 dst_recv_list.GetBuffer(recv_pointer, recv_size, pointer_key);
267 R_UNLESS(recv_pointer != 0, ResultOutOfResource);
268
269 // Perform the pointer data copy.
270 if (dst_user) {
271 R_TRY(src_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(
272 dst_page_table, recv_pointer, recv_size, KMemoryState::FlagReferenceCounted,
273 KMemoryState::FlagReferenceCounted,
274 KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,
275 KMemoryAttribute::Uncached | KMemoryAttribute::Locked, KMemoryAttribute::Locked,
276 src_pointer, KMemoryState::FlagLinearMapped, KMemoryState::FlagLinearMapped,
277 KMemoryPermission::UserRead, KMemoryAttribute::Uncached, KMemoryAttribute::None));
278 } else {
279 R_TRY(src_page_table.CopyMemoryFromLinearToUser(
280 recv_pointer, recv_size, src_pointer, KMemoryState::FlagLinearMapped,
281 KMemoryState::FlagLinearMapped, KMemoryPermission::UserRead,
282 KMemoryAttribute::Uncached, KMemoryAttribute::None));
283 }
284 }
285
286 // Set the output descriptor.
287 dst_msg.Set(cur_offset, MessageBuffer::PointerDescriptor(reinterpret_cast<void*>(recv_pointer),
288 recv_size, src_desc.GetIndex()));
289
290 R_SUCCEED();
291}
292
293constexpr Result GetMapAliasMemoryState(KMemoryState& out,
294 MessageBuffer::MapAliasDescriptor::Attribute attr) {
295 switch (attr) {
296 case MessageBuffer::MapAliasDescriptor::Attribute::Ipc:
297 out = KMemoryState::Ipc;
298 break;
299 case MessageBuffer::MapAliasDescriptor::Attribute::NonSecureIpc:
300 out = KMemoryState::NonSecureIpc;
301 break;
302 case MessageBuffer::MapAliasDescriptor::Attribute::NonDeviceIpc:
303 out = KMemoryState::NonDeviceIpc;
304 break;
305 default:
306 R_THROW(ResultInvalidCombination);
307 }
308
309 R_SUCCEED();
310}
311
312constexpr Result GetMapAliasTestStateAndAttributeMask(KMemoryState& out_state,
313 KMemoryAttribute& out_attr_mask,
314 KMemoryState state) {
315 switch (state) {
316 case KMemoryState::Ipc:
317 out_state = KMemoryState::FlagCanUseIpc;
318 out_attr_mask =
319 KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
320 break;
321 case KMemoryState::NonSecureIpc:
322 out_state = KMemoryState::FlagCanUseNonSecureIpc;
323 out_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
324 break;
325 case KMemoryState::NonDeviceIpc:
326 out_state = KMemoryState::FlagCanUseNonDeviceIpc;
327 out_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
328 break;
329 default:
330 R_THROW(ResultInvalidCombination);
331 }
332
333 R_SUCCEED();
334}
335
113void CleanupSpecialData(KProcess& dst_process, u32* dst_msg_ptr, size_t dst_buffer_size) { 336void CleanupSpecialData(KProcess& dst_process, u32* dst_msg_ptr, size_t dst_buffer_size) {
114 // Parse the message. 337 // Parse the message.
115 const MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size); 338 const MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size);
@@ -144,166 +367,856 @@ void CleanupSpecialData(KProcess& dst_process, u32* dst_msg_ptr, size_t dst_buff
144 } 367 }
145} 368}
146 369
147} // namespace 370Result CleanupServerHandles(KernelCore& kernel, uint64_t message, size_t buffer_size,
371 KPhysicalAddress message_paddr) {
372 // Server is assumed to be current thread.
373 KThread& thread = GetCurrentThread(kernel);
148 374
149using ThreadQueueImplForKServerSessionRequest = KThreadQueue; 375 // Get the linear message pointer.
376 u32* msg_ptr;
377 if (message) {
378 msg_ptr = kernel.System().DeviceMemory().GetPointer<u32>(message_paddr);
379 } else {
380 msg_ptr = GetCurrentMemory(kernel).GetPointer<u32>(thread.GetTlsAddress());
381 buffer_size = MessageBufferSize;
382 message = GetInteger(thread.GetTlsAddress());
383 }
150 384
151KServerSession::KServerSession(KernelCore& kernel) 385 // Parse the message.
152 : KSynchronizationObject{kernel}, m_lock{m_kernel} {} 386 const MessageBuffer msg(msg_ptr, buffer_size);
387 const MessageBuffer::MessageHeader header(msg);
388 const MessageBuffer::SpecialHeader special_header(msg, header);
153 389
154KServerSession::~KServerSession() = default; 390 // Check that the size is big enough.
391 R_UNLESS(MessageBuffer::GetMessageBufferSize(header, special_header) <= buffer_size,
392 ResultInvalidCombination);
393
394 // If there's a special header, there may be move handles we need to close.
395 if (header.GetHasSpecialHeader()) {
396 // Determine the offset to the start of handles.
397 auto offset = msg.GetSpecialDataIndex(header, special_header);
398 if (special_header.GetHasProcessId()) {
399 offset += static_cast<int>(sizeof(u64) / sizeof(u32));
400 }
401 if (auto copy_count = special_header.GetCopyHandleCount(); copy_count > 0) {
402 offset += static_cast<int>((sizeof(Svc::Handle) * copy_count) / sizeof(u32));
403 }
155 404
156void KServerSession::Destroy() { 405 // Get the handle table.
157 m_parent->OnServerClosed(); 406 auto& handle_table = thread.GetOwnerProcess()->GetHandleTable();
158 407
159 this->CleanupRequests(); 408 // Close the handles.
409 for (auto i = 0; i < special_header.GetMoveHandleCount(); ++i) {
410 handle_table.Remove(msg.GetHandle(offset));
411 offset += static_cast<int>(sizeof(Svc::Handle) / sizeof(u32));
412 }
413 }
160 414
161 m_parent->Close(); 415 R_SUCCEED();
162} 416}
163 417
164void KServerSession::OnClientClosed() { 418Result CleanupServerMap(KSessionRequest* request, KProcess* server_process) {
165 KScopedLightLock lk{m_lock}; 419 // If there's no server process, there's nothing to clean up.
420 R_SUCCEED_IF(server_process == nullptr);
166 421
167 // Handle any pending requests. 422 // Get the page table.
168 KSessionRequest* prev_request = nullptr; 423 auto& server_page_table = server_process->GetPageTable();
169 while (true) {
170 // Declare variables for processing the request.
171 KSessionRequest* request = nullptr;
172 KEvent* event = nullptr;
173 KThread* thread = nullptr;
174 bool cur_request = false;
175 bool terminate = false;
176 424
177 // Get the next request. 425 // Cleanup Send mappings.
178 { 426 for (size_t i = 0; i < request->GetSendCount(); ++i) {
179 KScopedSchedulerLock sl{m_kernel}; 427 R_TRY(server_page_table.CleanupForIpcServer(request->GetSendServerAddress(i),
428 request->GetSendSize(i),
429 request->GetSendMemoryState(i)));
430 }
180 431
181 if (m_current_request != nullptr && m_current_request != prev_request) { 432 // Cleanup Receive mappings.
182 // Set the request, open a reference as we process it. 433 for (size_t i = 0; i < request->GetReceiveCount(); ++i) {
183 request = m_current_request; 434 R_TRY(server_page_table.CleanupForIpcServer(request->GetReceiveServerAddress(i),
184 request->Open(); 435 request->GetReceiveSize(i),
185 cur_request = true; 436 request->GetReceiveMemoryState(i)));
437 }
186 438
187 // Get thread and event for the request. 439 // Cleanup Exchange mappings.
188 thread = request->GetThread(); 440 for (size_t i = 0; i < request->GetExchangeCount(); ++i) {
189 event = request->GetEvent(); 441 R_TRY(server_page_table.CleanupForIpcServer(request->GetExchangeServerAddress(i),
442 request->GetExchangeSize(i),
443 request->GetExchangeMemoryState(i)));
444 }
190 445
191 // If the thread is terminating, handle that. 446 R_SUCCEED();
192 if (thread->IsTerminationRequested()) { 447}
193 request->ClearThread();
194 request->ClearEvent();
195 terminate = true;
196 }
197 448
198 prev_request = request; 449Result CleanupClientMap(KSessionRequest* request, KProcessPageTable* client_page_table) {
199 } else if (!m_request_list.empty()) { 450 // If there's no client page table, there's nothing to clean up.
200 // Pop the request from the front of the list. 451 R_SUCCEED_IF(client_page_table == nullptr);
201 request = std::addressof(m_request_list.front());
202 m_request_list.pop_front();
203 452
204 // Get thread and event for the request. 453 // Cleanup Send mappings.
205 thread = request->GetThread(); 454 for (size_t i = 0; i < request->GetSendCount(); ++i) {
206 event = request->GetEvent(); 455 R_TRY(client_page_table->CleanupForIpcClient(request->GetSendClientAddress(i),
207 } 456 request->GetSendSize(i),
457 request->GetSendMemoryState(i)));
458 }
459
460 // Cleanup Receive mappings.
461 for (size_t i = 0; i < request->GetReceiveCount(); ++i) {
462 R_TRY(client_page_table->CleanupForIpcClient(request->GetReceiveClientAddress(i),
463 request->GetReceiveSize(i),
464 request->GetReceiveMemoryState(i)));
465 }
466
467 // Cleanup Exchange mappings.
468 for (size_t i = 0; i < request->GetExchangeCount(); ++i) {
469 R_TRY(client_page_table->CleanupForIpcClient(request->GetExchangeClientAddress(i),
470 request->GetExchangeSize(i),
471 request->GetExchangeMemoryState(i)));
472 }
473
474 R_SUCCEED();
475}
476
477Result CleanupMap(KSessionRequest* request, KProcess* server_process,
478 KProcessPageTable* client_page_table) {
479 // Cleanup the server map.
480 R_TRY(CleanupServerMap(request, server_process));
481
482 // Cleanup the client map.
483 R_TRY(CleanupClientMap(request, client_page_table));
484
485 R_SUCCEED();
486}
487
488Result ProcessReceiveMessageMapAliasDescriptors(int& offset, KProcessPageTable& dst_page_table,
489 KProcessPageTable& src_page_table,
490 const MessageBuffer& dst_msg,
491 const MessageBuffer& src_msg,
492 KSessionRequest* request, KMemoryPermission perm,
493 bool send) {
494 // Get the offset at the start of processing.
495 const int cur_offset = offset;
496
497 // Get the map alias descriptor.
498 MessageBuffer::MapAliasDescriptor src_desc(src_msg, cur_offset);
499 offset += static_cast<int>(MessageBuffer::MapAliasDescriptor::GetDataSize() / sizeof(u32));
500
501 // Extract address/size.
502 const KProcessAddress src_address = src_desc.GetAddress();
503 const size_t size = src_desc.GetSize();
504 KProcessAddress dst_address = 0;
505
506 // Determine the result memory state.
507 KMemoryState dst_state;
508 R_TRY(GetMapAliasMemoryState(dst_state, src_desc.GetAttribute()));
509
510 // Process the buffer, if it has a size.
511 if (size > 0) {
512 // Set up the source pages for ipc.
513 R_TRY(dst_page_table.SetupForIpc(std::addressof(dst_address), size, src_address,
514 src_page_table, perm, dst_state, send));
515
516 // Ensure that we clean up on failure.
517 ON_RESULT_FAILURE {
518 dst_page_table.CleanupForIpcServer(dst_address, size, dst_state);
519 src_page_table.CleanupForIpcClient(src_address, size, dst_state);
520 };
521
522 // Push the appropriate mapping.
523 if (perm == KMemoryPermission::UserRead) {
524 R_TRY(request->PushSend(src_address, dst_address, size, dst_state));
525 } else if (send) {
526 R_TRY(request->PushExchange(src_address, dst_address, size, dst_state));
527 } else {
528 R_TRY(request->PushReceive(src_address, dst_address, size, dst_state));
208 } 529 }
530 }
209 531
210 // If there are no requests, we're done. 532 // Set the output descriptor.
211 if (request == nullptr) { 533 dst_msg.Set(cur_offset,
212 break; 534 MessageBuffer::MapAliasDescriptor(reinterpret_cast<void*>(GetInteger(dst_address)),
535 size, src_desc.GetAttribute()));
536
537 R_SUCCEED();
538}
539
540Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_message_buffer,
541 size_t dst_buffer_size, KPhysicalAddress dst_message_paddr,
542 KThread& src_thread, uint64_t src_message_buffer, size_t src_buffer_size,
543 KServerSession* session, KSessionRequest* request) {
544 // Prepare variables for receive.
545 KThread& dst_thread = GetCurrentThread(kernel);
546 KProcess& dst_process = *(dst_thread.GetOwnerProcess());
547 KProcess& src_process = *(src_thread.GetOwnerProcess());
548 auto& dst_page_table = dst_process.GetPageTable();
549 auto& src_page_table = src_process.GetPageTable();
550
551 // NOTE: Session is used only for debugging, and so may go unused.
552 (void)session;
553
554 // The receive list is initially not broken.
555 recv_list_broken = false;
556
557 // Set the server process for the request.
558 request->SetServerProcess(std::addressof(dst_process));
559
560 // Determine the message buffers.
561 u32 *dst_msg_ptr, *src_msg_ptr;
562 bool dst_user, src_user;
563
564 if (dst_message_buffer) {
565 dst_msg_ptr = kernel.System().DeviceMemory().GetPointer<u32>(dst_message_paddr);
566 dst_user = true;
567 } else {
568 dst_msg_ptr = dst_page_table.GetMemory().GetPointer<u32>(dst_thread.GetTlsAddress());
569 dst_buffer_size = MessageBufferSize;
570 dst_message_buffer = GetInteger(dst_thread.GetTlsAddress());
571 dst_user = false;
572 }
573
574 if (src_message_buffer) {
575 // NOTE: Nintendo does not check the result of this GetPhysicalAddress call.
576 src_msg_ptr = src_page_table.GetMemory().GetPointer<u32>(src_message_buffer);
577 src_user = true;
578 } else {
579 src_msg_ptr = src_page_table.GetMemory().GetPointer<u32>(src_thread.GetTlsAddress());
580 src_buffer_size = MessageBufferSize;
581 src_message_buffer = GetInteger(src_thread.GetTlsAddress());
582 src_user = false;
583 }
584
585 // Parse the headers.
586 const MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size);
587 const MessageBuffer src_msg(src_msg_ptr, src_buffer_size);
588 const MessageBuffer::MessageHeader dst_header(dst_msg);
589 const MessageBuffer::MessageHeader src_header(src_msg);
590 const MessageBuffer::SpecialHeader dst_special_header(dst_msg, dst_header);
591 const MessageBuffer::SpecialHeader src_special_header(src_msg, src_header);
592
593 // Get the end of the source message.
594 const size_t src_end_offset =
595 MessageBuffer::GetRawDataIndex(src_header, src_special_header) + src_header.GetRawCount();
596
597 // Ensure that the headers fit.
598 R_UNLESS(MessageBuffer::GetMessageBufferSize(dst_header, dst_special_header) <= dst_buffer_size,
599 ResultInvalidCombination);
600 R_UNLESS(MessageBuffer::GetMessageBufferSize(src_header, src_special_header) <= src_buffer_size,
601 ResultInvalidCombination);
602
603 // Ensure the receive list offset is after the end of raw data.
604 if (dst_header.GetReceiveListOffset()) {
605 R_UNLESS(dst_header.GetReceiveListOffset() >=
606 MessageBuffer::GetRawDataIndex(dst_header, dst_special_header) +
607 dst_header.GetRawCount(),
608 ResultInvalidCombination);
609 }
610
611 // Ensure that the destination buffer is big enough to receive the source.
612 R_UNLESS(dst_buffer_size >= src_end_offset * sizeof(u32), ResultMessageTooLarge);
613
614 // Get the receive list.
615 const s32 dst_recv_list_idx =
616 MessageBuffer::GetReceiveListIndex(dst_header, dst_special_header);
617 ReceiveList dst_recv_list(dst_msg_ptr, dst_message_buffer, dst_page_table, dst_header,
618 dst_special_header, dst_buffer_size, src_end_offset,
619 dst_recv_list_idx, !dst_user);
620
621 // Ensure that the source special header isn't invalid.
622 const bool src_has_special_header = src_header.GetHasSpecialHeader();
623 if (src_has_special_header) {
624 // Sending move handles from client -> server is not allowed.
625 R_UNLESS(src_special_header.GetMoveHandleCount() == 0, ResultInvalidCombination);
626 }
627
628 // Prepare for further processing.
629 int pointer_key = 0;
630 int offset = dst_msg.Set(src_header);
631
632 // Set up a guard to make sure that we end up in a clean state on error.
633 ON_RESULT_FAILURE {
634 // Cleanup mappings.
635 CleanupMap(request, std::addressof(dst_process), std::addressof(src_page_table));
636
637 // Cleanup special data.
638 if (src_header.GetHasSpecialHeader()) {
639 CleanupSpecialData(dst_process, dst_msg_ptr, dst_buffer_size);
213 } 640 }
214 641
215 // All requests must have threads. 642 // Cleanup the header if the receive list isn't broken.
216 ASSERT(thread != nullptr); 643 if (!recv_list_broken) {
644 dst_msg.Set(dst_header);
645 if (dst_header.GetHasSpecialHeader()) {
646 dst_msg.Set(dst_special_header);
647 }
648 }
649 };
650
651 // Process any special data.
652 if (src_header.GetHasSpecialHeader()) {
653 // After we process, make sure we track whether the receive list is broken.
654 SCOPE_EXIT({
655 if (offset > dst_recv_list_idx) {
656 recv_list_broken = true;
657 }
658 });
217 659
218 // Ensure that we close the request when done. 660 // Process special data.
219 SCOPE_EXIT({ request->Close(); }); 661 R_TRY(ProcessMessageSpecialData<false>(offset, dst_process, src_process, src_thread,
662 dst_msg, src_msg, src_special_header));
663 }
220 664
221 // If we're terminating, close a reference to the thread and event. 665 // Process any pointer buffers.
222 if (terminate) { 666 for (auto i = 0; i < src_header.GetPointerCount(); ++i) {
223 thread->Close(); 667 // After we process, make sure we track whether the receive list is broken.
224 if (event != nullptr) { 668 SCOPE_EXIT({
225 event->Close(); 669 if (offset > dst_recv_list_idx) {
670 recv_list_broken = true;
671 }
672 });
673
674 R_TRY(ProcessReceiveMessagePointerDescriptors(
675 offset, pointer_key, dst_page_table, src_page_table, dst_msg, src_msg, dst_recv_list,
676 dst_user && dst_header.GetReceiveListCount() ==
677 MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer));
678 }
679
680 // Process any map alias buffers.
681 for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) {
682 // After we process, make sure we track whether the receive list is broken.
683 SCOPE_EXIT({
684 if (offset > dst_recv_list_idx) {
685 recv_list_broken = true;
686 }
687 });
688
689 // We process in order send, recv, exch. Buffers after send (recv/exch) are ReadWrite.
690 const KMemoryPermission perm = (i >= src_header.GetSendCount())
691 ? KMemoryPermission::UserReadWrite
692 : KMemoryPermission::UserRead;
693
694 // Buffer is send if it is send or exch.
695 const bool send = (i < src_header.GetSendCount()) ||
696 (i >= src_header.GetSendCount() + src_header.GetReceiveCount());
697
698 R_TRY(ProcessReceiveMessageMapAliasDescriptors(offset, dst_page_table, src_page_table,
699 dst_msg, src_msg, request, perm, send));
700 }
701
702 // Process any raw data.
703 if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) {
704 // After we process, make sure we track whether the receive list is broken.
705 SCOPE_EXIT({
706 if (offset + raw_count > dst_recv_list_idx) {
707 recv_list_broken = true;
226 } 708 }
709 });
710
711 // Get the offset and size.
712 const size_t offset_words = offset * sizeof(u32);
713 const size_t raw_size = raw_count * sizeof(u32);
714
715 if (!dst_user && !src_user) {
716 // Fast case is TLS -> TLS, do raw memcpy if we can.
717 std::memcpy(dst_msg_ptr + offset, src_msg_ptr + offset, raw_size);
718 } else if (dst_user) {
719 // Determine how much fast size we can copy.
720 const size_t max_fast_size = std::min<size_t>(offset_words + raw_size, PageSize);
721 const size_t fast_size = max_fast_size - offset_words;
722
723 // Determine source state; if user buffer, we require heap, and otherwise only linear
724 // mapped (to enable tls use).
725 const auto src_state =
726 src_user ? KMemoryState::FlagReferenceCounted : KMemoryState::FlagLinearMapped;
727
728 // Determine the source permission. User buffer should be unmapped + read, TLS should be
729 // user readable.
730 const KMemoryPermission src_perm = static_cast<KMemoryPermission>(
731 src_user ? KMemoryPermission::NotMapped | KMemoryPermission::KernelRead
732 : KMemoryPermission::UserRead);
733
734 // Perform the fast part of the copy.
735 R_TRY(src_page_table.CopyMemoryFromLinearToKernel(
736 dst_msg_ptr + offset, fast_size, src_message_buffer + offset_words, src_state,
737 src_state, src_perm, KMemoryAttribute::Uncached, KMemoryAttribute::None));
738
739 // If the fast part of the copy didn't get everything, perform the slow part of the
740 // copy.
741 if (fast_size < raw_size) {
742 R_TRY(src_page_table.CopyMemoryFromHeapToHeap(
743 dst_page_table, dst_message_buffer + max_fast_size, raw_size - fast_size,
744 KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
745 KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,
746 KMemoryAttribute::Uncached | KMemoryAttribute::Locked, KMemoryAttribute::Locked,
747 src_message_buffer + max_fast_size, src_state, src_state, src_perm,
748 KMemoryAttribute::Uncached, KMemoryAttribute::None));
749 }
750 } else /* if (src_user) */ {
751 // The source is a user buffer, so it should be unmapped + readable.
752 constexpr KMemoryPermission SourcePermission = static_cast<KMemoryPermission>(
753 KMemoryPermission::NotMapped | KMemoryPermission::KernelRead);
754
755 // Copy the memory.
756 R_TRY(src_page_table.CopyMemoryFromLinearToUser(
757 dst_message_buffer + offset_words, raw_size, src_message_buffer + offset_words,
758 KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
759 SourcePermission, KMemoryAttribute::Uncached, KMemoryAttribute::None));
227 } 760 }
761 }
228 762
229 // If we need to, reply. 763 // We succeeded!
230 if (event != nullptr && !cur_request) { 764 R_SUCCEED();
231 // There must be no mappings. 765}
232 ASSERT(request->GetSendCount() == 0);
233 ASSERT(request->GetReceiveCount() == 0);
234 ASSERT(request->GetExchangeCount() == 0);
235 766
236 // // Get the process and page table. 767Result ProcessSendMessageReceiveMapping(KProcessPageTable& src_page_table,
237 // KProcess *client_process = thread->GetOwnerProcess(); 768 KProcessPageTable& dst_page_table,
238 // auto& client_pt = client_process->GetPageTable(); 769 KProcessAddress client_address,
770 KProcessAddress server_address, size_t size,
771 KMemoryState src_state) {
772 // If the size is zero, there's nothing to process.
773 R_SUCCEED_IF(size == 0);
774
775 // Get the memory state and attribute mask to test.
776 KMemoryState test_state;
777 KMemoryAttribute test_attr_mask;
778 R_TRY(GetMapAliasTestStateAndAttributeMask(test_state, test_attr_mask, src_state));
779
780 // Determine buffer extents.
781 KProcessAddress aligned_dst_start = Common::AlignDown(GetInteger(client_address), PageSize);
782 KProcessAddress aligned_dst_end = Common::AlignUp(GetInteger(client_address) + size, PageSize);
783 KProcessAddress mapping_dst_start = Common::AlignUp(GetInteger(client_address), PageSize);
784 KProcessAddress mapping_dst_end =
785 Common::AlignDown(GetInteger(client_address) + size, PageSize);
786
787 KProcessAddress mapping_src_end =
788 Common::AlignDown(GetInteger(server_address) + size, PageSize);
789
790 // If the start of the buffer is unaligned, handle that.
791 if (aligned_dst_start != mapping_dst_start) {
792 ASSERT(client_address < mapping_dst_start);
793 const size_t copy_size = std::min<size_t>(size, mapping_dst_start - client_address);
794 R_TRY(dst_page_table.CopyMemoryFromUserToLinear(
795 client_address, copy_size, test_state, test_state, KMemoryPermission::UserReadWrite,
796 test_attr_mask, KMemoryAttribute::None, server_address));
797 }
239 798
240 // // Reply to the request. 799 // If the end of the buffer is unaligned, handle that.
241 // ReplyAsyncError(client_process, request->GetAddress(), request->GetSize(), 800 if (mapping_dst_end < aligned_dst_end &&
242 // ResultSessionClosed); 801 (aligned_dst_start == mapping_dst_start || aligned_dst_start < mapping_dst_end)) {
802 const size_t copy_size = client_address + size - mapping_dst_end;
803 R_TRY(dst_page_table.CopyMemoryFromUserToLinear(
804 mapping_dst_end, copy_size, test_state, test_state, KMemoryPermission::UserReadWrite,
805 test_attr_mask, KMemoryAttribute::None, mapping_src_end));
806 }
243 807
244 // // Unlock the buffer. 808 R_SUCCEED();
245 // // NOTE: Nintendo does not check the result of this. 809}
246 // client_pt.UnlockForIpcUserBuffer(request->GetAddress(), request->GetSize());
247 810
248 // Signal the event. 811Result ProcessSendMessagePointerDescriptors(int& offset, int& pointer_key,
249 event->Signal(); 812 KProcessPageTable& src_page_table,
813 KProcessPageTable& dst_page_table,
814 const MessageBuffer& dst_msg,
815 const MessageBuffer& src_msg,
816 const ReceiveList& dst_recv_list, bool dst_user) {
817 // Get the offset at the start of processing.
818 const int cur_offset = offset;
819
820 // Get the pointer desc.
821 MessageBuffer::PointerDescriptor src_desc(src_msg, cur_offset);
822 offset += static_cast<int>(MessageBuffer::PointerDescriptor::GetDataSize() / sizeof(u32));
823
824 // Extract address/size.
825 const uint64_t src_pointer = src_desc.GetAddress();
826 const size_t recv_size = src_desc.GetSize();
827 uint64_t recv_pointer = 0;
828
829 // Process the buffer, if it has a size.
830 if (recv_size > 0) {
831 // If using indexing, set index.
832 if (dst_recv_list.IsIndex()) {
833 pointer_key = src_desc.GetIndex();
250 } 834 }
835
836 // Get the buffer.
837 dst_recv_list.GetBuffer(recv_pointer, recv_size, pointer_key);
838 R_UNLESS(recv_pointer != 0, ResultOutOfResource);
839
840 // Perform the pointer data copy.
841 const bool dst_heap = dst_user && dst_recv_list.IsToMessageBuffer();
842 const auto dst_state =
843 dst_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::FlagLinearMapped;
844 const KMemoryPermission dst_perm =
845 dst_heap ? KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite
846 : KMemoryPermission::UserReadWrite;
847 R_TRY(dst_page_table.CopyMemoryFromUserToLinear(
848 recv_pointer, recv_size, dst_state, dst_state, dst_perm, KMemoryAttribute::Uncached,
849 KMemoryAttribute::None, src_pointer));
251 } 850 }
252 851
253 // Notify. 852 // Set the output descriptor.
254 this->NotifyAvailable(ResultSessionClosed); 853 dst_msg.Set(cur_offset, MessageBuffer::PointerDescriptor(reinterpret_cast<void*>(recv_pointer),
854 recv_size, src_desc.GetIndex()));
855
856 R_SUCCEED();
255} 857}
256 858
257bool KServerSession::IsSignaled() const { 859Result SendMessage(KernelCore& kernel, uint64_t src_message_buffer, size_t src_buffer_size,
258 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); 860 KPhysicalAddress src_message_paddr, KThread& dst_thread,
861 uint64_t dst_message_buffer, size_t dst_buffer_size, KServerSession* session,
862 KSessionRequest* request) {
863 // Prepare variables for send.
864 KThread& src_thread = GetCurrentThread(kernel);
865 KProcess& dst_process = *(dst_thread.GetOwnerProcess());
866 KProcess& src_process = *(src_thread.GetOwnerProcess());
867 auto& dst_page_table = dst_process.GetPageTable();
868 auto& src_page_table = src_process.GetPageTable();
869
870 // NOTE: Session is used only for debugging, and so may go unused.
871 (void)session;
872
873 // Determine the message buffers.
874 u32 *dst_msg_ptr, *src_msg_ptr;
875 bool dst_user, src_user;
876
877 if (dst_message_buffer) {
878 // NOTE: Nintendo does not check the result of this GetPhysicalAddress call.
879 dst_msg_ptr = dst_page_table.GetMemory().GetPointer<u32>(dst_message_buffer);
880 dst_user = true;
881 } else {
882 dst_msg_ptr = dst_page_table.GetMemory().GetPointer<u32>(dst_thread.GetTlsAddress());
883 dst_buffer_size = MessageBufferSize;
884 dst_message_buffer = GetInteger(dst_thread.GetTlsAddress());
885 dst_user = false;
886 }
259 887
260 // If the client is closed, we're always signaled. 888 if (src_message_buffer) {
261 if (m_parent->IsClientClosed()) { 889 src_msg_ptr = src_page_table.GetMemory().GetPointer<u32>(src_message_buffer);
262 return true; 890 src_user = true;
891 } else {
892 src_msg_ptr = src_page_table.GetMemory().GetPointer<u32>(src_thread.GetTlsAddress());
893 src_buffer_size = MessageBufferSize;
894 src_message_buffer = GetInteger(src_thread.GetTlsAddress());
895 src_user = false;
263 } 896 }
264 897
265 // Otherwise, we're signaled if we have a request and aren't handling one. 898 // Parse the headers.
266 return !m_request_list.empty() && m_current_request == nullptr; 899 const MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size);
900 const MessageBuffer src_msg(src_msg_ptr, src_buffer_size);
901 const MessageBuffer::MessageHeader dst_header(dst_msg);
902 const MessageBuffer::MessageHeader src_header(src_msg);
903 const MessageBuffer::SpecialHeader dst_special_header(dst_msg, dst_header);
904 const MessageBuffer::SpecialHeader src_special_header(src_msg, src_header);
905
906 // Get the end of the source message.
907 const size_t src_end_offset =
908 MessageBuffer::GetRawDataIndex(src_header, src_special_header) + src_header.GetRawCount();
909
910 // Declare variables for processing.
911 int offset = 0;
912 int pointer_key = 0;
913 bool processed_special_data = false;
914
915 // Send the message.
916 {
917 // Make sure that we end up in a clean state on error.
918 ON_RESULT_FAILURE {
919 // Cleanup special data.
920 if (processed_special_data) {
921 if (src_header.GetHasSpecialHeader()) {
922 CleanupSpecialData(dst_process, dst_msg_ptr, dst_buffer_size);
923 }
924 } else {
925 CleanupServerHandles(kernel, src_user ? src_message_buffer : 0, src_buffer_size,
926 src_message_paddr);
927 }
928
929 // Cleanup mappings.
930 CleanupMap(request, std::addressof(src_process), std::addressof(dst_page_table));
931 };
932
933 // Ensure that the headers fit.
934 R_UNLESS(MessageBuffer::GetMessageBufferSize(src_header, src_special_header) <=
935 src_buffer_size,
936 ResultInvalidCombination);
937 R_UNLESS(MessageBuffer::GetMessageBufferSize(dst_header, dst_special_header) <=
938 dst_buffer_size,
939 ResultInvalidCombination);
940
941 // Ensure the receive list offset is after the end of raw data.
942 if (dst_header.GetReceiveListOffset()) {
943 R_UNLESS(dst_header.GetReceiveListOffset() >=
944 MessageBuffer::GetRawDataIndex(dst_header, dst_special_header) +
945 dst_header.GetRawCount(),
946 ResultInvalidCombination);
947 }
948
949 // Ensure that the destination buffer is big enough to receive the source.
950 R_UNLESS(dst_buffer_size >= src_end_offset * sizeof(u32), ResultMessageTooLarge);
951
952 // Replies must have no buffers.
953 R_UNLESS(src_header.GetSendCount() == 0, ResultInvalidCombination);
954 R_UNLESS(src_header.GetReceiveCount() == 0, ResultInvalidCombination);
955 R_UNLESS(src_header.GetExchangeCount() == 0, ResultInvalidCombination);
956
957 // Get the receive list.
958 const s32 dst_recv_list_idx =
959 MessageBuffer::GetReceiveListIndex(dst_header, dst_special_header);
960 ReceiveList dst_recv_list(dst_msg_ptr, dst_message_buffer, dst_page_table, dst_header,
961 dst_special_header, dst_buffer_size, src_end_offset,
962 dst_recv_list_idx, !dst_user);
963
964 // Handle any receive buffers.
965 for (size_t i = 0; i < request->GetReceiveCount(); ++i) {
966 R_TRY(ProcessSendMessageReceiveMapping(
967 src_page_table, dst_page_table, request->GetReceiveClientAddress(i),
968 request->GetReceiveServerAddress(i), request->GetReceiveSize(i),
969 request->GetReceiveMemoryState(i)));
970 }
971
972 // Handle any exchange buffers.
973 for (size_t i = 0; i < request->GetExchangeCount(); ++i) {
974 R_TRY(ProcessSendMessageReceiveMapping(
975 src_page_table, dst_page_table, request->GetExchangeClientAddress(i),
976 request->GetExchangeServerAddress(i), request->GetExchangeSize(i),
977 request->GetExchangeMemoryState(i)));
978 }
979
980 // Set the header.
981 offset = dst_msg.Set(src_header);
982
983 // Process any special data.
984 ASSERT(GetCurrentThreadPointer(kernel) == std::addressof(src_thread));
985 processed_special_data = true;
986 if (src_header.GetHasSpecialHeader()) {
987 R_TRY(ProcessMessageSpecialData<true>(offset, dst_process, src_process, src_thread,
988 dst_msg, src_msg, src_special_header));
989 }
990
991 // Process any pointer buffers.
992 for (auto i = 0; i < src_header.GetPointerCount(); ++i) {
993 R_TRY(ProcessSendMessagePointerDescriptors(
994 offset, pointer_key, src_page_table, dst_page_table, dst_msg, src_msg,
995 dst_recv_list,
996 dst_user &&
997 dst_header.GetReceiveListCount() ==
998 MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer));
999 }
1000
1001 // Clear any map alias buffers.
1002 for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) {
1003 offset = dst_msg.Set(offset, MessageBuffer::MapAliasDescriptor());
1004 }
1005
1006 // Process any raw data.
1007 if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) {
1008 // Get the offset and size.
1009 const size_t offset_words = offset * sizeof(u32);
1010 const size_t raw_size = raw_count * sizeof(u32);
1011
1012 if (!dst_user && !src_user) {
1013 // Fast case is TLS -> TLS, do raw memcpy if we can.
1014 std::memcpy(dst_msg_ptr + offset, src_msg_ptr + offset, raw_size);
1015 } else if (src_user) {
1016 // Determine how much fast size we can copy.
1017 const size_t max_fast_size = std::min<size_t>(offset_words + raw_size, PageSize);
1018 const size_t fast_size = max_fast_size - offset_words;
1019
1020 // Determine dst state; if user buffer, we require heap, and otherwise only linear
1021 // mapped (to enable tls use).
1022 const auto dst_state =
1023 dst_user ? KMemoryState::FlagReferenceCounted : KMemoryState::FlagLinearMapped;
1024
1025 // Determine the dst permission. User buffer should be unmapped + read, TLS should
1026 // be user readable.
1027 const KMemoryPermission dst_perm =
1028 dst_user ? KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite
1029 : KMemoryPermission::UserReadWrite;
1030
1031 // Perform the fast part of the copy.
1032 R_TRY(dst_page_table.CopyMemoryFromKernelToLinear(
1033 dst_message_buffer + offset_words, fast_size, dst_state, dst_state, dst_perm,
1034 KMemoryAttribute::Uncached, KMemoryAttribute::None, src_msg_ptr + offset));
1035
1036 // If the fast part of the copy didn't get everything, perform the slow part of the
1037 // copy.
1038 if (fast_size < raw_size) {
1039 R_TRY(dst_page_table.CopyMemoryFromHeapToHeap(
1040 dst_page_table, dst_message_buffer + max_fast_size, raw_size - fast_size,
1041 dst_state, dst_state, dst_perm, KMemoryAttribute::Uncached,
1042 KMemoryAttribute::None, src_message_buffer + max_fast_size,
1043 KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
1044 KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
1045 KMemoryAttribute::Uncached | KMemoryAttribute::Locked,
1046 KMemoryAttribute::Locked));
1047 }
1048 } else /* if (dst_user) */ {
1049 // The destination is a user buffer, so it should be unmapped + readable.
1050 constexpr KMemoryPermission DestinationPermission =
1051 KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite;
1052
1053 // Copy the memory.
1054 R_TRY(dst_page_table.CopyMemoryFromUserToLinear(
1055 dst_message_buffer + offset_words, raw_size, KMemoryState::FlagReferenceCounted,
1056 KMemoryState::FlagReferenceCounted, DestinationPermission,
1057 KMemoryAttribute::Uncached, KMemoryAttribute::None,
1058 src_message_buffer + offset_words));
1059 }
1060 }
1061 }
1062
1063 // Perform (and validate) any remaining cleanup.
1064 R_RETURN(CleanupMap(request, std::addressof(src_process), std::addressof(dst_page_table)));
267} 1065}
268 1066
269Result KServerSession::OnRequest(KSessionRequest* request) { 1067void ReplyAsyncError(KProcess* to_process, uint64_t to_msg_buf, size_t to_msg_buf_size,
270 // Create the wait queue. 1068 Result result) {
271 ThreadQueueImplForKServerSessionRequest wait_queue{m_kernel}; 1069 // Convert the address to a linear pointer.
1070 u32* to_msg = to_process->GetMemory().GetPointer<u32>(to_msg_buf);
1071
1072 // Set the error.
1073 MessageBuffer msg(to_msg, to_msg_buf_size);
1074 msg.SetAsyncResult(result);
1075}
1076
1077} // namespace
1078
1079KServerSession::KServerSession(KernelCore& kernel)
1080 : KSynchronizationObject{kernel}, m_lock{m_kernel} {}
1081
1082KServerSession::~KServerSession() = default;
1083
1084void KServerSession::Destroy() {
1085 m_parent->OnServerClosed();
1086
1087 this->CleanupRequests();
1088
1089 m_parent->Close();
1090}
1091
1092Result KServerSession::ReceiveRequest(uintptr_t server_message, uintptr_t server_buffer_size,
1093 KPhysicalAddress server_message_paddr,
1094 std::shared_ptr<Service::HLERequestContext>* out_context,
1095 std::weak_ptr<Service::SessionRequestManager> manager) {
1096 // Lock the session.
1097 KScopedLightLock lk{m_lock};
1098
1099 // Get the request and client thread.
1100 KSessionRequest* request;
1101 KThread* client_thread;
272 1102
273 { 1103 {
274 // Lock the scheduler.
275 KScopedSchedulerLock sl{m_kernel}; 1104 KScopedSchedulerLock sl{m_kernel};
276 1105
277 // Ensure that we can handle new requests. 1106 // Ensure that we can service the request.
278 R_UNLESS(!m_parent->IsServerClosed(), ResultSessionClosed); 1107 R_UNLESS(!m_parent->IsClientClosed(), ResultSessionClosed);
279 1108
280 // Check that we're not terminating. 1109 // Ensure we aren't already servicing a request.
281 R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested); 1110 R_UNLESS(m_current_request == nullptr, ResultNotFound);
282 1111
283 // Get whether we're empty. 1112 // Ensure we have a request to service.
284 const bool was_empty = m_request_list.empty(); 1113 R_UNLESS(!m_request_list.empty(), ResultNotFound);
285 1114
286 // Add the request to the list. 1115 // Pop the first request from the list.
287 request->Open(); 1116 request = std::addressof(m_request_list.front());
288 m_request_list.push_back(*request); 1117 m_request_list.pop_front();
289 1118
290 // If we were empty, signal. 1119 // Get the thread for the request.
291 if (was_empty) { 1120 client_thread = request->GetThread();
292 this->NotifyAvailable(); 1121 R_UNLESS(client_thread != nullptr, ResultSessionClosed);
1122
1123 // Open the client thread.
1124 client_thread->Open();
1125 }
1126
1127 SCOPE_EXIT({ client_thread->Close(); });
1128
1129 // Set the request as our current.
1130 m_current_request = request;
1131
1132 // Get the client address.
1133 uint64_t client_message = request->GetAddress();
1134 size_t client_buffer_size = request->GetSize();
1135 bool recv_list_broken = false;
1136
1137 // Receive the message.
1138 Result result = ResultSuccess;
1139
1140 if (out_context != nullptr) {
1141 // HLE request.
1142 if (!client_message) {
1143 client_message = GetInteger(client_thread->GetTlsAddress());
293 } 1144 }
1145 Core::Memory::Memory& memory{client_thread->GetOwnerProcess()->GetMemory()};
1146 u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))};
1147 *out_context =
1148 std::make_shared<Service::HLERequestContext>(m_kernel, memory, this, client_thread);
1149 (*out_context)->SetSessionRequestManager(manager);
1150 (*out_context)
1151 ->PopulateFromIncomingCommandBuffer(*client_thread->GetOwnerProcess(), cmd_buf);
1152 // We succeeded.
1153 R_SUCCEED();
1154 } else {
1155 result = ReceiveMessage(m_kernel, recv_list_broken, server_message, server_buffer_size,
1156 server_message_paddr, *client_thread, client_message,
1157 client_buffer_size, this, request);
1158 }
294 1159
295 // If we have a request event, this is asynchronous, and we don't need to wait. 1160 // Handle cleanup on receive failure.
296 R_SUCCEED_IF(request->GetEvent() != nullptr); 1161 if (R_FAILED(result)) {
1162 // Cache the result to return it to the client.
1163 const Result result_for_client = result;
297 1164
298 // This is a synchronous request, so we should wait for our request to complete. 1165 // Clear the current request.
299 GetCurrentThread(m_kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); 1166 {
300 GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue)); 1167 KScopedSchedulerLock sl(m_kernel);
1168 ASSERT(m_current_request == request);
1169 m_current_request = nullptr;
1170 if (!m_request_list.empty()) {
1171 this->NotifyAvailable();
1172 }
1173 }
1174
1175 // Reply to the client.
1176 {
1177 // After we reply, close our reference to the request.
1178 SCOPE_EXIT({ request->Close(); });
1179
1180 // Get the event to check whether the request is async.
1181 if (KEvent* event = request->GetEvent(); event != nullptr) {
1182 // The client sent an async request.
1183 KProcess* client = client_thread->GetOwnerProcess();
1184 auto& client_pt = client->GetPageTable();
1185
1186 // Send the async result.
1187 if (R_FAILED(result_for_client)) {
1188 ReplyAsyncError(client, client_message, client_buffer_size, result_for_client);
1189 }
1190
1191 // Unlock the client buffer.
1192 // NOTE: Nintendo does not check the result of this.
1193 client_pt.UnlockForIpcUserBuffer(client_message, client_buffer_size);
1194
1195 // Signal the event.
1196 event->Signal();
1197 } else {
1198 // End the client thread's wait.
1199 KScopedSchedulerLock sl(m_kernel);
1200
1201 if (!client_thread->IsTerminationRequested()) {
1202 client_thread->EndWait(result_for_client);
1203 }
1204 }
1205 }
1206
1207 // Set the server result.
1208 if (recv_list_broken) {
1209 result = ResultReceiveListBroken;
1210 } else {
1211 result = ResultNotFound;
1212 }
301 } 1213 }
302 1214
303 return GetCurrentThread(m_kernel).GetWaitResult(); 1215 R_RETURN(result);
304} 1216}
305 1217
306Result KServerSession::SendReply(bool is_hle) { 1218Result KServerSession::SendReply(uintptr_t server_message, uintptr_t server_buffer_size,
1219 KPhysicalAddress server_message_paddr, bool is_hle) {
307 // Lock the session. 1220 // Lock the session.
308 KScopedLightLock lk{m_lock}; 1221 KScopedLightLock lk{m_lock};
309 1222
@@ -327,7 +1240,7 @@ Result KServerSession::SendReply(bool is_hle) {
327 SCOPE_EXIT({ request->Close(); }); 1240 SCOPE_EXIT({ request->Close(); });
328 1241
329 // Extract relevant information from the request. 1242 // Extract relevant information from the request.
330 const uintptr_t client_message = request->GetAddress(); 1243 const uint64_t client_message = request->GetAddress();
331 const size_t client_buffer_size = request->GetSize(); 1244 const size_t client_buffer_size = request->GetSize();
332 KThread* client_thread = request->GetThread(); 1245 KThread* client_thread = request->GetThread();
333 KEvent* event = request->GetEvent(); 1246 KEvent* event = request->GetEvent();
@@ -342,31 +1255,28 @@ Result KServerSession::SendReply(bool is_hle) {
342 // HLE servers write directly to a pointer to the thread command buffer. Therefore 1255 // HLE servers write directly to a pointer to the thread command buffer. Therefore
343 // the reply has already been written in this case. 1256 // the reply has already been written in this case.
344 } else { 1257 } else {
345 Core::Memory::Memory& memory{client_thread->GetOwnerProcess()->GetMemory()}; 1258 result = SendMessage(m_kernel, server_message, server_buffer_size, server_message_paddr,
346 KThread* server_thread = GetCurrentThreadPointer(m_kernel); 1259 *client_thread, client_message, client_buffer_size, this, request);
347 KProcess& src_process = *client_thread->GetOwnerProcess(); 1260 }
348 KProcess& dst_process = *server_thread->GetOwnerProcess(); 1261 } else if (!is_hle) {
349 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); 1262 // Otherwise, we'll need to do some cleanup.
350 1263 KProcess* server_process = request->GetServerProcess();
351 auto* src_msg_buffer = memory.GetPointer<u32>(server_thread->GetTlsAddress()); 1264 KProcess* client_process =
352 auto* dst_msg_buffer = memory.GetPointer<u32>(client_message); 1265 (client_thread != nullptr) ? client_thread->GetOwnerProcess() : nullptr;
353 std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); 1266 KProcessPageTable* client_page_table =
354 1267 (client_process != nullptr) ? std::addressof(client_process->GetPageTable()) : nullptr;
355 // Translate special header ad-hoc. 1268
356 MessageBuffer src_msg(src_msg_buffer, client_buffer_size); 1269 // Cleanup server handles.
357 MessageBuffer::MessageHeader src_header(src_msg); 1270 result = CleanupServerHandles(m_kernel, server_message, server_buffer_size,
358 MessageBuffer::SpecialHeader src_special_header(src_msg, src_header); 1271 server_message_paddr);
359 if (src_header.GetHasSpecialHeader()) { 1272
360 MessageBuffer dst_msg(dst_msg_buffer, client_buffer_size); 1273 // Cleanup mappings.
361 result = ProcessMessageSpecialData<true>(dst_process, src_process, *server_thread, 1274 Result cleanup_map_result = CleanupMap(request, server_process, client_page_table);
362 dst_msg, src_msg, src_special_header); 1275
363 if (R_FAILED(result)) { 1276 // If we successfully cleaned up handles, use the map cleanup result as our result.
364 CleanupSpecialData(dst_process, dst_msg_buffer, client_buffer_size); 1277 if (R_SUCCEEDED(result)) {
365 } 1278 result = cleanup_map_result;
366 }
367 } 1279 }
368 } else {
369 result = ResultSessionClosed;
370 } 1280 }
371 1281
372 // Select a result for the client. 1282 // Select a result for the client.
@@ -381,19 +1291,18 @@ Result KServerSession::SendReply(bool is_hle) {
381 // If there's a client thread, update it. 1291 // If there's a client thread, update it.
382 if (client_thread != nullptr) { 1292 if (client_thread != nullptr) {
383 if (event != nullptr) { 1293 if (event != nullptr) {
384 // // Get the client process/page table. 1294 // Get the client process/page table.
385 // KProcess *client_process = client_thread->GetOwnerProcess(); 1295 KProcess* client_process = client_thread->GetOwnerProcess();
386 // KProcessPageTable *client_page_table = std::addressof(client_process->PageTable()); 1296 KProcessPageTable* client_page_table = std::addressof(client_process->GetPageTable());
387 1297
388 // // If we need to, reply with an async error. 1298 // If we need to, reply with an async error.
389 // if (R_FAILED(client_result)) { 1299 if (R_FAILED(client_result)) {
390 // ReplyAsyncError(client_process, client_message, client_buffer_size, 1300 ReplyAsyncError(client_process, client_message, client_buffer_size, client_result);
391 // client_result); 1301 }
392 // }
393 1302
394 // // Unlock the client buffer. 1303 // Unlock the client buffer.
395 // // NOTE: Nintendo does not check the result of this. 1304 // NOTE: Nintendo does not check the result of this.
396 // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size); 1305 client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size);
397 1306
398 // Signal the event. 1307 // Signal the event.
399 event->Signal(); 1308 event->Signal();
@@ -410,91 +1319,53 @@ Result KServerSession::SendReply(bool is_hle) {
410 R_RETURN(result); 1319 R_RETURN(result);
411} 1320}
412 1321
413Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext>* out_context, 1322Result KServerSession::OnRequest(KSessionRequest* request) {
414 std::weak_ptr<Service::SessionRequestManager> manager) { 1323 // Create the wait queue.
415 // Lock the session. 1324 ThreadQueueImplForKServerSessionRequest wait_queue{m_kernel};
416 KScopedLightLock lk{m_lock};
417
418 // Get the request and client thread.
419 KSessionRequest* request;
420 KThread* client_thread;
421 1325
422 { 1326 {
1327 // Lock the scheduler.
423 KScopedSchedulerLock sl{m_kernel}; 1328 KScopedSchedulerLock sl{m_kernel};
424 1329
425 // Ensure that we can service the request. 1330 // Ensure that we can handle new requests.
426 R_UNLESS(!m_parent->IsClientClosed(), ResultSessionClosed); 1331 R_UNLESS(!m_parent->IsServerClosed(), ResultSessionClosed);
427
428 // Ensure we aren't already servicing a request.
429 R_UNLESS(m_current_request == nullptr, ResultNotFound);
430 1332
431 // Ensure we have a request to service. 1333 // Check that we're not terminating.
432 R_UNLESS(!m_request_list.empty(), ResultNotFound); 1334 R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested);
433 1335
434 // Pop the first request from the list. 1336 // Get whether we're empty.
435 request = std::addressof(m_request_list.front()); 1337 const bool was_empty = m_request_list.empty();
436 m_request_list.pop_front();
437 1338
438 // Get the thread for the request. 1339 // Add the request to the list.
439 client_thread = request->GetThread(); 1340 request->Open();
440 R_UNLESS(client_thread != nullptr, ResultSessionClosed); 1341 m_request_list.push_back(*request);
441 1342
442 // Open the client thread. 1343 // If we were empty, signal.
443 client_thread->Open(); 1344 if (was_empty) {
444 } 1345 this->NotifyAvailable();
1346 }
445 1347
446 SCOPE_EXIT({ client_thread->Close(); }); 1348 // If we have a request event, this is asynchronous, and we don't need to wait.
1349 R_SUCCEED_IF(request->GetEvent() != nullptr);
447 1350
448 // Set the request as our current. 1351 // This is a synchronous request, so we should wait for our request to complete.
449 m_current_request = request; 1352 GetCurrentThread(m_kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
1353 GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue));
1354 }
450 1355
451 // Get the client address. 1356 return GetCurrentThread(m_kernel).GetWaitResult();
452 uintptr_t client_message = request->GetAddress(); 1357}
453 size_t client_buffer_size = request->GetSize();
454 // bool recv_list_broken = false;
455 1358
456 if (!client_message) { 1359bool KServerSession::IsSignaled() const {
457 client_message = GetInteger(client_thread->GetTlsAddress()); 1360 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
458 client_buffer_size = MessageBufferSize;
459 }
460 1361
461 // Receive the message. 1362 // If the client is closed, we're always signaled.
462 Core::Memory::Memory& memory{client_thread->GetOwnerProcess()->GetMemory()}; 1363 if (m_parent->IsClientClosed()) {
463 if (out_context != nullptr) { 1364 return true;
464 // HLE request.
465 u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))};
466 *out_context =
467 std::make_shared<Service::HLERequestContext>(m_kernel, memory, this, client_thread);
468 (*out_context)->SetSessionRequestManager(manager);
469 (*out_context)
470 ->PopulateFromIncomingCommandBuffer(*client_thread->GetOwnerProcess(), cmd_buf);
471 } else {
472 KThread* server_thread = GetCurrentThreadPointer(m_kernel);
473 KProcess& src_process = *client_thread->GetOwnerProcess();
474 KProcess& dst_process = *server_thread->GetOwnerProcess();
475 UNIMPLEMENTED_IF(client_thread->GetOwnerProcess() != server_thread->GetOwnerProcess());
476
477 auto* src_msg_buffer = memory.GetPointer<u32>(client_message);
478 auto* dst_msg_buffer = memory.GetPointer<u32>(server_thread->GetTlsAddress());
479 std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
480
481 // Translate special header ad-hoc.
482 // TODO: fix this mess
483 MessageBuffer src_msg(src_msg_buffer, client_buffer_size);
484 MessageBuffer::MessageHeader src_header(src_msg);
485 MessageBuffer::SpecialHeader src_special_header(src_msg, src_header);
486 if (src_header.GetHasSpecialHeader()) {
487 MessageBuffer dst_msg(dst_msg_buffer, client_buffer_size);
488 Result res = ProcessMessageSpecialData<false>(dst_process, src_process, *client_thread,
489 dst_msg, src_msg, src_special_header);
490 if (R_FAILED(res)) {
491 CleanupSpecialData(dst_process, dst_msg_buffer, client_buffer_size);
492 }
493 }
494 } 1365 }
495 1366
496 // We succeeded. 1367 // Otherwise, we're signaled if we have a request and aren't handling one.
497 R_SUCCEED(); 1368 return !m_request_list.empty() && m_current_request == nullptr;
498} 1369}
499 1370
500void KServerSession::CleanupRequests() { 1371void KServerSession::CleanupRequests() {
@@ -527,31 +1398,30 @@ void KServerSession::CleanupRequests() {
527 SCOPE_EXIT({ request->Close(); }); 1398 SCOPE_EXIT({ request->Close(); });
528 1399
529 // Extract relevant information from the request. 1400 // Extract relevant information from the request.
530 // const uintptr_t client_message = request->GetAddress(); 1401 const uint64_t client_message = request->GetAddress();
531 // const size_t client_buffer_size = request->GetSize(); 1402 const size_t client_buffer_size = request->GetSize();
532 KThread* client_thread = request->GetThread(); 1403 KThread* client_thread = request->GetThread();
533 KEvent* event = request->GetEvent(); 1404 KEvent* event = request->GetEvent();
534 1405
535 // KProcess *server_process = request->GetServerProcess(); 1406 KProcess* server_process = request->GetServerProcess();
536 // KProcess *client_process = (client_thread != nullptr) ? 1407 KProcess* client_process =
537 // client_thread->GetOwnerProcess() : nullptr; 1408 (client_thread != nullptr) ? client_thread->GetOwnerProcess() : nullptr;
538 // KProcessPageTable *client_page_table = (client_process != nullptr) ? 1409 KProcessPageTable* client_page_table =
539 // std::addressof(client_process->GetPageTable()) 1410 (client_process != nullptr) ? std::addressof(client_process->GetPageTable()) : nullptr;
540 // : nullptr;
541 1411
542 // Cleanup the mappings. 1412 // Cleanup the mappings.
543 // Result result = CleanupMap(request, server_process, client_page_table); 1413 Result result = CleanupMap(request, server_process, client_page_table);
544 1414
545 // If there's a client thread, update it. 1415 // If there's a client thread, update it.
546 if (client_thread != nullptr) { 1416 if (client_thread != nullptr) {
547 if (event != nullptr) { 1417 if (event != nullptr) {
548 // // We need to reply async. 1418 // We need to reply async.
549 // ReplyAsyncError(client_process, client_message, client_buffer_size, 1419 ReplyAsyncError(client_process, client_message, client_buffer_size,
550 // (R_SUCCEEDED(result) ? ResultSessionClosed : result)); 1420 (R_SUCCEEDED(result) ? ResultSessionClosed : result));
551 1421
552 // // Unlock the client buffer. 1422 // Unlock the client buffer.
553 // NOTE: Nintendo does not check the result of this. 1423 // NOTE: Nintendo does not check the result of this.
554 // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size); 1424 client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size);
555 1425
556 // Signal the event. 1426 // Signal the event.
557 event->Signal(); 1427 event->Signal();
@@ -567,4 +1437,97 @@ void KServerSession::CleanupRequests() {
567 } 1437 }
568} 1438}
569 1439
1440void KServerSession::OnClientClosed() {
1441 KScopedLightLock lk{m_lock};
1442
1443 // Handle any pending requests.
1444 KSessionRequest* prev_request = nullptr;
1445 while (true) {
1446 // Declare variables for processing the request.
1447 KSessionRequest* request = nullptr;
1448 KEvent* event = nullptr;
1449 KThread* thread = nullptr;
1450 bool cur_request = false;
1451 bool terminate = false;
1452
1453 // Get the next request.
1454 {
1455 KScopedSchedulerLock sl{m_kernel};
1456
1457 if (m_current_request != nullptr && m_current_request != prev_request) {
1458 // Set the request, open a reference as we process it.
1459 request = m_current_request;
1460 request->Open();
1461 cur_request = true;
1462
1463 // Get thread and event for the request.
1464 thread = request->GetThread();
1465 event = request->GetEvent();
1466
1467 // If the thread is terminating, handle that.
1468 if (thread->IsTerminationRequested()) {
1469 request->ClearThread();
1470 request->ClearEvent();
1471 terminate = true;
1472 }
1473
1474 prev_request = request;
1475 } else if (!m_request_list.empty()) {
1476 // Pop the request from the front of the list.
1477 request = std::addressof(m_request_list.front());
1478 m_request_list.pop_front();
1479
1480 // Get thread and event for the request.
1481 thread = request->GetThread();
1482 event = request->GetEvent();
1483 }
1484 }
1485
1486 // If there are no requests, we're done.
1487 if (request == nullptr) {
1488 break;
1489 }
1490
1491 // All requests must have threads.
1492 ASSERT(thread != nullptr);
1493
1494 // Ensure that we close the request when done.
1495 SCOPE_EXIT({ request->Close(); });
1496
1497 // If we're terminating, close a reference to the thread and event.
1498 if (terminate) {
1499 thread->Close();
1500 if (event != nullptr) {
1501 event->Close();
1502 }
1503 }
1504
1505 // If we need to, reply.
1506 if (event != nullptr && !cur_request) {
1507 // There must be no mappings.
1508 ASSERT(request->GetSendCount() == 0);
1509 ASSERT(request->GetReceiveCount() == 0);
1510 ASSERT(request->GetExchangeCount() == 0);
1511
1512 // Get the process and page table.
1513 KProcess* client_process = thread->GetOwnerProcess();
1514 auto& client_pt = client_process->GetPageTable();
1515
1516 // Reply to the request.
1517 ReplyAsyncError(client_process, request->GetAddress(), request->GetSize(),
1518 ResultSessionClosed);
1519
1520 // Unlock the buffer.
1521 // NOTE: Nintendo does not check the result of this.
1522 client_pt.UnlockForIpcUserBuffer(request->GetAddress(), request->GetSize());
1523
1524 // Signal the event.
1525 event->Signal();
1526 }
1527 }
1528
1529 // Notify.
1530 this->NotifyAvailable(ResultSessionClosed);
1531}
1532
570} // namespace Kernel 1533} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h
index 403891919..2876c231b 100644
--- a/src/core/hle/kernel/k_server_session.h
+++ b/src/core/hle/kernel/k_server_session.h
@@ -49,14 +49,21 @@ public:
49 bool IsSignaled() const override; 49 bool IsSignaled() const override;
50 void OnClientClosed(); 50 void OnClientClosed();
51 51
52 /// TODO: flesh these out to match the real kernel
53 Result OnRequest(KSessionRequest* request); 52 Result OnRequest(KSessionRequest* request);
54 Result SendReply(bool is_hle = false); 53 Result SendReply(uintptr_t server_message, uintptr_t server_buffer_size,
55 Result ReceiveRequest(std::shared_ptr<Service::HLERequestContext>* out_context = nullptr, 54 KPhysicalAddress server_message_paddr, bool is_hle = false);
55 Result ReceiveRequest(uintptr_t server_message, uintptr_t server_buffer_size,
56 KPhysicalAddress server_message_paddr,
57 std::shared_ptr<Service::HLERequestContext>* out_context = nullptr,
56 std::weak_ptr<Service::SessionRequestManager> manager = {}); 58 std::weak_ptr<Service::SessionRequestManager> manager = {});
57 59
58 Result SendReplyHLE() { 60 Result SendReplyHLE() {
59 return SendReply(true); 61 R_RETURN(this->SendReply(0, 0, 0, true));
62 }
63
64 Result ReceiveRequestHLE(std::shared_ptr<Service::HLERequestContext>* out_context,
65 std::weak_ptr<Service::SessionRequestManager> manager) {
66 R_RETURN(this->ReceiveRequest(0, 0, 0, out_context, manager));
60 } 67 }
61 68
62private: 69private:
diff --git a/src/core/hle/kernel/k_session.cpp b/src/core/hle/kernel/k_session.cpp
index 44d7a8f02..4a1f6027e 100644
--- a/src/core/hle/kernel/k_session.cpp
+++ b/src/core/hle/kernel/k_session.cpp
@@ -33,8 +33,7 @@ void KSession::Initialize(KClientPort* client_port, uintptr_t name) {
33 m_name = name; 33 m_name = name;
34 34
35 // Set our owner process. 35 // Set our owner process.
36 //! FIXME: this is the wrong process! 36 m_process = GetCurrentProcessPointer(m_kernel);
37 m_process = m_kernel.ApplicationProcess();
38 m_process->Open(); 37 m_process->Open();
39 38
40 // Set our port. 39 // Set our port.
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 7d9a6e9cf..24394d222 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -1422,8 +1422,7 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
1422} 1422}
1423 1423
1424Core::Memory::Memory& GetCurrentMemory(KernelCore& kernel) { 1424Core::Memory::Memory& GetCurrentMemory(KernelCore& kernel) {
1425 // TODO: per-process memory 1425 return GetCurrentProcess(kernel).GetMemory();
1426 return kernel.System().ApplicationMemory();
1427} 1426}
1428 1427
1429KScopedDisableDispatch::~KScopedDisableDispatch() { 1428KScopedDisableDispatch::~KScopedDisableDispatch() {
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index e9925d231..f13e232b2 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -314,11 +314,7 @@ public:
314 m_current_core_id = core; 314 m_current_core_id = core;
315 } 315 }
316 316
317 KProcess* GetOwnerProcess() { 317 KProcess* GetOwnerProcess() const {
318 return m_parent;
319 }
320
321 const KProcess* GetOwnerProcess() const {
322 return m_parent; 318 return m_parent;
323 } 319 }
324 320
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index e479dacde..c14d2d2f3 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -68,8 +68,6 @@ struct KernelCore::Impl {
68 68
69 global_object_list_container = std::make_unique<KAutoObjectWithListContainer>(kernel); 69 global_object_list_container = std::make_unique<KAutoObjectWithListContainer>(kernel);
70 global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel); 70 global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
71 global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
72 global_handle_table->Initialize(KHandleTable::MaxTableSize);
73 71
74 is_phantom_mode_for_singlecore = false; 72 is_phantom_mode_for_singlecore = false;
75 73
@@ -121,13 +119,8 @@ struct KernelCore::Impl {
121 next_user_process_id = KProcess::ProcessIdMin; 119 next_user_process_id = KProcess::ProcessIdMin;
122 next_thread_id = 1; 120 next_thread_id = 1;
123 121
124 global_handle_table->Finalize();
125 global_handle_table.reset();
126
127 preemption_event = nullptr; 122 preemption_event = nullptr;
128 123
129 exclusive_monitor.reset();
130
131 // Cleanup persistent kernel objects 124 // Cleanup persistent kernel objects
132 auto CleanupObject = [](KAutoObject* obj) { 125 auto CleanupObject = [](KAutoObject* obj) {
133 if (obj) { 126 if (obj) {
@@ -191,8 +184,6 @@ struct KernelCore::Impl {
191 } 184 }
192 185
193 void InitializePhysicalCores() { 186 void InitializePhysicalCores() {
194 exclusive_monitor =
195 Core::MakeExclusiveMonitor(system.ApplicationMemory(), Core::Hardware::NUM_CPU_CORES);
196 for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 187 for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
197 const s32 core{static_cast<s32>(i)}; 188 const s32 core{static_cast<s32>(i)};
198 189
@@ -791,10 +782,6 @@ struct KernelCore::Impl {
791 782
792 std::shared_ptr<Core::Timing::EventType> preemption_event; 783 std::shared_ptr<Core::Timing::EventType> preemption_event;
793 784
794 // This is the kernel's handle table or supervisor handle table which
795 // stores all the objects in place.
796 std::unique_ptr<KHandleTable> global_handle_table;
797
798 std::unique_ptr<KAutoObjectWithListContainer> global_object_list_container; 785 std::unique_ptr<KAutoObjectWithListContainer> global_object_list_container;
799 786
800 std::unique_ptr<KObjectNameGlobalData> object_name_global_data; 787 std::unique_ptr<KObjectNameGlobalData> object_name_global_data;
@@ -805,7 +792,6 @@ struct KernelCore::Impl {
805 std::mutex server_lock; 792 std::mutex server_lock;
806 std::vector<std::unique_ptr<Service::ServerManager>> server_managers; 793 std::vector<std::unique_ptr<Service::ServerManager>> server_managers;
807 794
808 std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
809 std::array<std::unique_ptr<Kernel::PhysicalCore>, Core::Hardware::NUM_CPU_CORES> cores; 795 std::array<std::unique_ptr<Kernel::PhysicalCore>, Core::Hardware::NUM_CPU_CORES> cores;
810 796
811 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others 797 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
@@ -882,10 +868,6 @@ KResourceLimit* KernelCore::GetSystemResourceLimit() {
882 return impl->system_resource_limit; 868 return impl->system_resource_limit;
883} 869}
884 870
885KScopedAutoObject<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
886 return impl->global_handle_table->GetObject<KThread>(handle);
887}
888
889void KernelCore::AppendNewProcess(KProcess* process) { 871void KernelCore::AppendNewProcess(KProcess* process) {
890 impl->process_list.push_back(process); 872 impl->process_list.push_back(process);
891} 873}
@@ -959,14 +941,6 @@ Kernel::KHardwareTimer& KernelCore::HardwareTimer() {
959 return *impl->hardware_timer; 941 return *impl->hardware_timer;
960} 942}
961 943
962Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() {
963 return *impl->exclusive_monitor;
964}
965
966const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
967 return *impl->exclusive_monitor;
968}
969
970KAutoObjectWithListContainer& KernelCore::ObjectListContainer() { 944KAutoObjectWithListContainer& KernelCore::ObjectListContainer() {
971 return *impl->global_object_list_container; 945 return *impl->global_object_list_container;
972} 946}
@@ -1030,14 +1004,6 @@ u64 KernelCore::CreateNewUserProcessID() {
1030 return impl->next_user_process_id++; 1004 return impl->next_user_process_id++;
1031} 1005}
1032 1006
1033KHandleTable& KernelCore::GlobalHandleTable() {
1034 return *impl->global_handle_table;
1035}
1036
1037const KHandleTable& KernelCore::GlobalHandleTable() const {
1038 return *impl->global_handle_table;
1039}
1040
1041void KernelCore::RegisterCoreThread(std::size_t core_id) { 1007void KernelCore::RegisterCoreThread(std::size_t core_id) {
1042 impl->RegisterCoreThread(core_id); 1008 impl->RegisterCoreThread(core_id);
1043} 1009}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 78c88902c..5d4102145 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -116,9 +116,6 @@ public:
116 /// Retrieves a shared pointer to the system resource limit instance. 116 /// Retrieves a shared pointer to the system resource limit instance.
117 KResourceLimit* GetSystemResourceLimit(); 117 KResourceLimit* GetSystemResourceLimit();
118 118
119 /// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table.
120 KScopedAutoObject<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
121
122 /// Adds the given shared pointer to an internal list of active processes. 119 /// Adds the given shared pointer to an internal list of active processes.
123 void AppendNewProcess(KProcess* process); 120 void AppendNewProcess(KProcess* process);
124 121
@@ -170,10 +167,6 @@ public:
170 /// Stops execution of 'id' core, in order to reschedule a new thread. 167 /// Stops execution of 'id' core, in order to reschedule a new thread.
171 void PrepareReschedule(std::size_t id); 168 void PrepareReschedule(std::size_t id);
172 169
173 Core::ExclusiveMonitor& GetExclusiveMonitor();
174
175 const Core::ExclusiveMonitor& GetExclusiveMonitor() const;
176
177 KAutoObjectWithListContainer& ObjectListContainer(); 170 KAutoObjectWithListContainer& ObjectListContainer();
178 171
179 const KAutoObjectWithListContainer& ObjectListContainer() const; 172 const KAutoObjectWithListContainer& ObjectListContainer() const;
diff --git a/src/core/hle/kernel/message_buffer.h b/src/core/hle/kernel/message_buffer.h
index 75b275310..d528a9bb3 100644
--- a/src/core/hle/kernel/message_buffer.h
+++ b/src/core/hle/kernel/message_buffer.h
@@ -18,13 +18,13 @@ public:
18 static constexpr inline u64 NullTag = 0; 18 static constexpr inline u64 NullTag = 0;
19 19
20 public: 20 public:
21 enum class ReceiveListCountType : u32 { 21 enum ReceiveListCountType : u32 {
22 None = 0, 22 ReceiveListCountType_None = 0,
23 ToMessageBuffer = 1, 23 ReceiveListCountType_ToMessageBuffer = 1,
24 ToSingleBuffer = 2, 24 ReceiveListCountType_ToSingleBuffer = 2,
25 25
26 CountOffset = 2, 26 ReceiveListCountType_CountOffset = 2,
27 CountMax = 13, 27 ReceiveListCountType_CountMax = 13,
28 }; 28 };
29 29
30 private: 30 private:
@@ -591,16 +591,16 @@ public:
591 // Add the size of the receive list. 591 // Add the size of the receive list.
592 const auto count = hdr.GetReceiveListCount(); 592 const auto count = hdr.GetReceiveListCount();
593 switch (count) { 593 switch (count) {
594 case MessageHeader::ReceiveListCountType::None: 594 case MessageHeader::ReceiveListCountType_None:
595 break; 595 break;
596 case MessageHeader::ReceiveListCountType::ToMessageBuffer: 596 case MessageHeader::ReceiveListCountType_ToMessageBuffer:
597 break; 597 break;
598 case MessageHeader::ReceiveListCountType::ToSingleBuffer: 598 case MessageHeader::ReceiveListCountType_ToSingleBuffer:
599 msg_size += ReceiveListEntry::GetDataSize(); 599 msg_size += ReceiveListEntry::GetDataSize();
600 break; 600 break;
601 default: 601 default:
602 msg_size += (static_cast<s32>(count) - 602 msg_size += (static_cast<s32>(count) -
603 static_cast<s32>(MessageHeader::ReceiveListCountType::CountOffset)) * 603 static_cast<s32>(MessageHeader::ReceiveListCountType_CountOffset)) *
604 ReceiveListEntry::GetDataSize(); 604 ReceiveListEntry::GetDataSize();
605 break; 605 break;
606 } 606 }
diff --git a/src/core/hle/kernel/svc/svc_info.cpp b/src/core/hle/kernel/svc/svc_info.cpp
index ada998772..231e4d0e1 100644
--- a/src/core/hle/kernel/svc/svc_info.cpp
+++ b/src/core/hle/kernel/svc/svc_info.cpp
@@ -118,7 +118,6 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
118 R_SUCCEED(); 118 R_SUCCEED();
119 119
120 case InfoType::IsApplication: 120 case InfoType::IsApplication:
121 LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application");
122 *result = process->IsApplication(); 121 *result = process->IsApplication();
123 R_SUCCEED(); 122 R_SUCCEED();
124 123
diff --git a/src/core/hle/kernel/svc/svc_ipc.cpp b/src/core/hle/kernel/svc/svc_ipc.cpp
index 47a3e7bb0..85cc4f561 100644
--- a/src/core/hle/kernel/svc/svc_ipc.cpp
+++ b/src/core/hle/kernel/svc/svc_ipc.cpp
@@ -48,8 +48,7 @@ Result ReplyAndReceiveImpl(KernelCore& kernel, int32_t* out_index, uintptr_t mes
48 }; 48 };
49 49
50 // Send the reply. 50 // Send the reply.
51 R_TRY(session->SendReply()); 51 R_TRY(session->SendReply(message, buffer_size, message_paddr));
52 // R_TRY(session->SendReply(message, buffer_size, message_paddr));
53 } 52 }
54 53
55 // Receive a message. 54 // Receive a message.
@@ -85,8 +84,7 @@ Result ReplyAndReceiveImpl(KernelCore& kernel, int32_t* out_index, uintptr_t mes
85 if (R_SUCCEEDED(result)) { 84 if (R_SUCCEEDED(result)) {
86 KServerSession* session = objs[index]->DynamicCast<KServerSession*>(); 85 KServerSession* session = objs[index]->DynamicCast<KServerSession*>();
87 if (session != nullptr) { 86 if (session != nullptr) {
88 // result = session->ReceiveRequest(message, buffer_size, message_paddr); 87 result = session->ReceiveRequest(message, buffer_size, message_paddr);
89 result = session->ReceiveRequest();
90 if (ResultNotFound == result) { 88 if (ResultNotFound == result) {
91 continue; 89 continue;
92 } 90 }
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h
index e1ad78607..38e71d516 100644
--- a/src/core/hle/kernel/svc_results.h
+++ b/src/core/hle/kernel/svc_results.h
@@ -38,7 +38,9 @@ constexpr Result ResultInvalidState{ErrorModule::Kernel, 125};
38constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126}; 38constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126};
39constexpr Result ResultPortClosed{ErrorModule::Kernel, 131}; 39constexpr Result ResultPortClosed{ErrorModule::Kernel, 131};
40constexpr Result ResultLimitReached{ErrorModule::Kernel, 132}; 40constexpr Result ResultLimitReached{ErrorModule::Kernel, 132};
41constexpr Result ResultReceiveListBroken{ErrorModule::Kernel, 258};
41constexpr Result ResultOutOfAddressSpace{ErrorModule::Kernel, 259}; 42constexpr Result ResultOutOfAddressSpace{ErrorModule::Kernel, 259};
43constexpr Result ResultMessageTooLarge{ErrorModule::Kernel, 260};
42constexpr Result ResultInvalidId{ErrorModule::Kernel, 519}; 44constexpr Result ResultInvalidId{ErrorModule::Kernel, 519};
43 45
44} // namespace Kernel 46} // namespace Kernel