diff options
| author | 2023-12-17 19:54:29 -0500 | |
|---|---|---|
| committer | 2023-12-22 21:52:49 -0500 | |
| commit | 35501ba41cefb6f103a96f032c22c14f4fce1e96 (patch) | |
| tree | 854d33905bb44193b5f171e1f77565add1884694 | |
| parent | kernel: instantiate memory separately for each guest process (diff) | |
| download | yuzu-35501ba41cefb6f103a96f032c22c14f4fce1e96.tar.gz yuzu-35501ba41cefb6f103a96f032c22c14f4fce1e96.tar.xz yuzu-35501ba41cefb6f103a96f032c22c14f4fce1e96.zip | |
k_server_session: process for guest servers
| -rw-r--r-- | src/core/hle/kernel/k_server_session.cpp | 1340 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_server_session.h | 15 | ||||
| -rw-r--r-- | src/core/hle/kernel/message_buffer.h | 20 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc/svc_ipc.cpp | 6 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc_results.h | 2 | ||||
| -rw-r--r-- | src/core/hle/service/server_manager.cpp | 2 |
6 files changed, 1135 insertions, 250 deletions
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index e33a88e24..db3a07f91 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "common/logging/log.h" | 9 | #include "common/logging/log.h" |
| 10 | #include "common/scope_exit.h" | 10 | #include "common/scope_exit.h" |
| 11 | #include "common/scratch_buffer.h" | ||
| 11 | #include "core/core.h" | 12 | #include "core/core.h" |
| 12 | #include "core/core_timing.h" | 13 | #include "core/core_timing.h" |
| 13 | #include "core/hle/kernel/k_client_port.h" | 14 | #include "core/hle/kernel/k_client_port.h" |
| @@ -29,12 +30,140 @@ namespace Kernel { | |||
| 29 | 30 | ||
| 30 | namespace { | 31 | namespace { |
| 31 | 32 | ||
| 33 | constexpr inline size_t PointerTransferBufferAlignment = 0x10; | ||
| 34 | constexpr inline size_t ReceiveListDataSize = | ||
| 35 | MessageBuffer::MessageHeader::ReceiveListCountType_CountMax * | ||
| 36 | MessageBuffer::ReceiveListEntry::GetDataSize() / sizeof(u32); | ||
| 37 | |||
| 38 | using ThreadQueueImplForKServerSessionRequest = KThreadQueue; | ||
| 39 | |||
| 40 | static thread_local Common::ScratchBuffer<u8> temp_buffer; | ||
| 41 | |||
| 42 | class ReceiveList { | ||
| 43 | public: | ||
| 44 | static constexpr int GetEntryCount(const MessageBuffer::MessageHeader& header) { | ||
| 45 | const auto count = header.GetReceiveListCount(); | ||
| 46 | switch (count) { | ||
| 47 | case MessageBuffer::MessageHeader::ReceiveListCountType_None: | ||
| 48 | return 0; | ||
| 49 | case MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer: | ||
| 50 | return 0; | ||
| 51 | case MessageBuffer::MessageHeader::ReceiveListCountType_ToSingleBuffer: | ||
| 52 | return 1; | ||
| 53 | default: | ||
| 54 | return count - MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset; | ||
| 55 | } | ||
| 56 | } | ||
| 57 | |||
| 58 | explicit ReceiveList(const u32* dst_msg, uint64_t dst_address, | ||
| 59 | KProcessPageTable& dst_page_table, | ||
| 60 | const MessageBuffer::MessageHeader& dst_header, | ||
| 61 | const MessageBuffer::SpecialHeader& dst_special_header, size_t msg_size, | ||
| 62 | size_t out_offset, s32 dst_recv_list_idx, bool is_tls) { | ||
| 63 | m_recv_list_count = dst_header.GetReceiveListCount(); | ||
| 64 | m_msg_buffer_end = dst_address + sizeof(u32) * out_offset; | ||
| 65 | m_msg_buffer_space_end = dst_address + msg_size; | ||
| 66 | |||
| 67 | // NOTE: Nintendo calculates the receive list index here using the special header. | ||
| 68 | // We pre-calculate it in the caller, and pass it as a parameter. | ||
| 69 | (void)dst_special_header; | ||
| 70 | |||
| 71 | const u32* recv_list = dst_msg + dst_recv_list_idx; | ||
| 72 | const auto entry_count = GetEntryCount(dst_header); | ||
| 73 | |||
| 74 | if (is_tls) { | ||
| 75 | // Messages from TLS to TLS are contained within one page. | ||
| 76 | std::memcpy(m_data.data(), recv_list, | ||
| 77 | entry_count * MessageBuffer::ReceiveListEntry::GetDataSize()); | ||
| 78 | } else { | ||
| 79 | // If any buffer is not from TLS, perform a normal read instead. | ||
| 80 | uint64_t cur_addr = dst_address + dst_recv_list_idx * sizeof(u32); | ||
| 81 | dst_page_table.GetMemory().ReadBlock( | ||
| 82 | cur_addr, m_data.data(), | ||
| 83 | entry_count * MessageBuffer::ReceiveListEntry::GetDataSize()); | ||
| 84 | } | ||
| 85 | } | ||
| 86 | |||
| 87 | bool IsIndex() const { | ||
| 88 | return m_recv_list_count > | ||
| 89 | static_cast<s32>(MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset); | ||
| 90 | } | ||
| 91 | |||
| 92 | bool IsToMessageBuffer() const { | ||
| 93 | return m_recv_list_count == | ||
| 94 | MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer; | ||
| 95 | } | ||
| 96 | |||
| 97 | void GetBuffer(uint64_t& out, size_t size, int& key) const { | ||
| 98 | switch (m_recv_list_count) { | ||
| 99 | case MessageBuffer::MessageHeader::ReceiveListCountType_None: { | ||
| 100 | out = 0; | ||
| 101 | break; | ||
| 102 | } | ||
| 103 | case MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer: { | ||
| 104 | const uint64_t buf = | ||
| 105 | Common::AlignUp(m_msg_buffer_end + key, PointerTransferBufferAlignment); | ||
| 106 | |||
| 107 | if ((buf < buf + size) && (buf + size <= m_msg_buffer_space_end)) { | ||
| 108 | out = buf; | ||
| 109 | key = static_cast<int>(buf + size - m_msg_buffer_end); | ||
| 110 | } else { | ||
| 111 | out = 0; | ||
| 112 | } | ||
| 113 | break; | ||
| 114 | } | ||
| 115 | case MessageBuffer::MessageHeader::ReceiveListCountType_ToSingleBuffer: { | ||
| 116 | const MessageBuffer::ReceiveListEntry entry(m_data[0], m_data[1]); | ||
| 117 | const uint64_t buf = | ||
| 118 | Common::AlignUp(entry.GetAddress() + key, PointerTransferBufferAlignment); | ||
| 119 | |||
| 120 | const uint64_t entry_addr = entry.GetAddress(); | ||
| 121 | const size_t entry_size = entry.GetSize(); | ||
| 122 | |||
| 123 | if ((buf < buf + size) && (entry_addr < entry_addr + entry_size) && | ||
| 124 | (buf + size <= entry_addr + entry_size)) { | ||
| 125 | out = buf; | ||
| 126 | key = static_cast<int>(buf + size - entry_addr); | ||
| 127 | } else { | ||
| 128 | out = 0; | ||
| 129 | } | ||
| 130 | break; | ||
| 131 | } | ||
| 132 | default: { | ||
| 133 | if (key < m_recv_list_count - | ||
| 134 | static_cast<s32>( | ||
| 135 | MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset)) { | ||
| 136 | const MessageBuffer::ReceiveListEntry entry(m_data[2 * key + 0], | ||
| 137 | m_data[2 * key + 1]); | ||
| 138 | |||
| 139 | const uintptr_t entry_addr = entry.GetAddress(); | ||
| 140 | const size_t entry_size = entry.GetSize(); | ||
| 141 | |||
| 142 | if ((entry_addr < entry_addr + entry_size) && (entry_size >= size)) { | ||
| 143 | out = entry_addr; | ||
| 144 | } | ||
| 145 | } else { | ||
| 146 | out = 0; | ||
| 147 | } | ||
| 148 | break; | ||
| 149 | } | ||
| 150 | } | ||
| 151 | } | ||
| 152 | |||
| 153 | private: | ||
| 154 | std::array<u32, ReceiveListDataSize> m_data; | ||
| 155 | s32 m_recv_list_count; | ||
| 156 | uint64_t m_msg_buffer_end; | ||
| 157 | uint64_t m_msg_buffer_space_end; | ||
| 158 | }; | ||
| 159 | |||
| 32 | template <bool MoveHandleAllowed> | 160 | template <bool MoveHandleAllowed> |
| 33 | Result ProcessMessageSpecialData(KProcess& dst_process, KProcess& src_process, KThread& src_thread, | 161 | Result ProcessMessageSpecialData(s32& offset, KProcess& dst_process, KProcess& src_process, |
| 34 | MessageBuffer& dst_msg, const MessageBuffer& src_msg, | 162 | KThread& src_thread, const MessageBuffer& dst_msg, |
| 35 | MessageBuffer::SpecialHeader& src_special_header) { | 163 | const MessageBuffer& src_msg, |
| 164 | const MessageBuffer::SpecialHeader& src_special_header) { | ||
| 36 | // Copy the special header to the destination. | 165 | // Copy the special header to the destination. |
| 37 | s32 offset = dst_msg.Set(src_special_header); | 166 | offset = dst_msg.Set(src_special_header); |
| 38 | 167 | ||
| 39 | // Copy the process ID. | 168 | // Copy the process ID. |
| 40 | if (src_special_header.GetHasProcessId()) { | 169 | if (src_special_header.GetHasProcessId()) { |
| @@ -110,6 +239,93 @@ Result ProcessMessageSpecialData(KProcess& dst_process, KProcess& src_process, K | |||
| 110 | R_RETURN(result); | 239 | R_RETURN(result); |
| 111 | } | 240 | } |
| 112 | 241 | ||
| 242 | Result ProcessReceiveMessagePointerDescriptors(int& offset, int& pointer_key, | ||
| 243 | KProcessPageTable& dst_page_table, | ||
| 244 | KProcessPageTable& src_page_table, | ||
| 245 | const MessageBuffer& dst_msg, | ||
| 246 | const MessageBuffer& src_msg, | ||
| 247 | const ReceiveList& dst_recv_list, bool dst_user) { | ||
| 248 | // Get the offset at the start of processing. | ||
| 249 | const int cur_offset = offset; | ||
| 250 | |||
| 251 | // Get the pointer desc. | ||
| 252 | MessageBuffer::PointerDescriptor src_desc(src_msg, cur_offset); | ||
| 253 | offset += static_cast<int>(MessageBuffer::PointerDescriptor::GetDataSize() / sizeof(u32)); | ||
| 254 | |||
| 255 | // Extract address/size. | ||
| 256 | const uint64_t src_pointer = src_desc.GetAddress(); | ||
| 257 | const size_t recv_size = src_desc.GetSize(); | ||
| 258 | uint64_t recv_pointer = 0; | ||
| 259 | |||
| 260 | // Process the buffer, if it has a size. | ||
| 261 | if (recv_size > 0) { | ||
| 262 | // If using indexing, set index. | ||
| 263 | if (dst_recv_list.IsIndex()) { | ||
| 264 | pointer_key = src_desc.GetIndex(); | ||
| 265 | } | ||
| 266 | |||
| 267 | // Get the buffer. | ||
| 268 | dst_recv_list.GetBuffer(recv_pointer, recv_size, pointer_key); | ||
| 269 | R_UNLESS(recv_pointer != 0, ResultOutOfResource); | ||
| 270 | |||
| 271 | // Perform the pointer data copy. | ||
| 272 | // TODO: KProcessPageTable::CopyMemoryFromHeapToHeapWithoutCheckDestination | ||
| 273 | // TODO: KProcessPageTable::CopyMemoryFromLinearToUser | ||
| 274 | |||
| 275 | temp_buffer.resize_destructive(recv_size); | ||
| 276 | src_page_table.GetMemory().ReadBlock(src_pointer, temp_buffer.data(), recv_size); | ||
| 277 | dst_page_table.GetMemory().WriteBlock(recv_pointer, temp_buffer.data(), recv_size); | ||
| 278 | } | ||
| 279 | |||
| 280 | // Set the output descriptor. | ||
| 281 | dst_msg.Set(cur_offset, MessageBuffer::PointerDescriptor(reinterpret_cast<void*>(recv_pointer), | ||
| 282 | recv_size, src_desc.GetIndex())); | ||
| 283 | |||
| 284 | R_SUCCEED(); | ||
| 285 | } | ||
| 286 | |||
| 287 | constexpr Result GetMapAliasMemoryState(KMemoryState& out, | ||
| 288 | MessageBuffer::MapAliasDescriptor::Attribute attr) { | ||
| 289 | switch (attr) { | ||
| 290 | case MessageBuffer::MapAliasDescriptor::Attribute::Ipc: | ||
| 291 | out = KMemoryState::Ipc; | ||
| 292 | break; | ||
| 293 | case MessageBuffer::MapAliasDescriptor::Attribute::NonSecureIpc: | ||
| 294 | out = KMemoryState::NonSecureIpc; | ||
| 295 | break; | ||
| 296 | case MessageBuffer::MapAliasDescriptor::Attribute::NonDeviceIpc: | ||
| 297 | out = KMemoryState::NonDeviceIpc; | ||
| 298 | break; | ||
| 299 | default: | ||
| 300 | R_THROW(ResultInvalidCombination); | ||
| 301 | } | ||
| 302 | |||
| 303 | R_SUCCEED(); | ||
| 304 | } | ||
| 305 | |||
| 306 | constexpr Result GetMapAliasTestStateAndAttributeMask(u32& out_state, u32& out_attr_mask, | ||
| 307 | KMemoryState state) { | ||
| 308 | switch (state) { | ||
| 309 | case KMemoryState::Ipc: | ||
| 310 | out_state = static_cast<u32>(KMemoryState::FlagCanUseIpc); | ||
| 311 | out_attr_mask = static_cast<u32>(KMemoryAttribute::Uncached | | ||
| 312 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked); | ||
| 313 | break; | ||
| 314 | case KMemoryState::NonSecureIpc: | ||
| 315 | out_state = static_cast<u32>(KMemoryState::FlagCanUseNonSecureIpc); | ||
| 316 | out_attr_mask = static_cast<u32>(KMemoryAttribute::Uncached | KMemoryAttribute::Locked); | ||
| 317 | break; | ||
| 318 | case KMemoryState::NonDeviceIpc: | ||
| 319 | out_state = static_cast<u32>(KMemoryState::FlagCanUseNonDeviceIpc); | ||
| 320 | out_attr_mask = static_cast<u32>(KMemoryAttribute::Uncached | KMemoryAttribute::Locked); | ||
| 321 | break; | ||
| 322 | default: | ||
| 323 | R_THROW(ResultInvalidCombination); | ||
| 324 | } | ||
| 325 | |||
| 326 | R_SUCCEED(); | ||
| 327 | } | ||
| 328 | |||
| 113 | void CleanupSpecialData(KProcess& dst_process, u32* dst_msg_ptr, size_t dst_buffer_size) { | 329 | void CleanupSpecialData(KProcess& dst_process, u32* dst_msg_ptr, size_t dst_buffer_size) { |
| 114 | // Parse the message. | 330 | // Parse the message. |
| 115 | const MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size); | 331 | const MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size); |
| @@ -144,166 +360,778 @@ void CleanupSpecialData(KProcess& dst_process, u32* dst_msg_ptr, size_t dst_buff | |||
| 144 | } | 360 | } |
| 145 | } | 361 | } |
| 146 | 362 | ||
| 147 | } // namespace | 363 | Result CleanupServerHandles(KernelCore& kernel, uint64_t message, size_t buffer_size, |
| 364 | KPhysicalAddress message_paddr) { | ||
| 365 | // Server is assumed to be current thread. | ||
| 366 | KThread& thread = GetCurrentThread(kernel); | ||
| 148 | 367 | ||
| 149 | using ThreadQueueImplForKServerSessionRequest = KThreadQueue; | 368 | // Get the linear message pointer. |
| 369 | u32* msg_ptr; | ||
| 370 | if (message) { | ||
| 371 | msg_ptr = kernel.System().DeviceMemory().GetPointer<u32>(message_paddr); | ||
| 372 | } else { | ||
| 373 | msg_ptr = GetCurrentMemory(kernel).GetPointer<u32>(thread.GetTlsAddress()); | ||
| 374 | buffer_size = MessageBufferSize; | ||
| 375 | message = GetInteger(thread.GetTlsAddress()); | ||
| 376 | } | ||
| 150 | 377 | ||
| 151 | KServerSession::KServerSession(KernelCore& kernel) | 378 | // Parse the message. |
| 152 | : KSynchronizationObject{kernel}, m_lock{m_kernel} {} | 379 | const MessageBuffer msg(msg_ptr, buffer_size); |
| 380 | const MessageBuffer::MessageHeader header(msg); | ||
| 381 | const MessageBuffer::SpecialHeader special_header(msg, header); | ||
| 153 | 382 | ||
| 154 | KServerSession::~KServerSession() = default; | 383 | // Check that the size is big enough. |
| 384 | R_UNLESS(MessageBuffer::GetMessageBufferSize(header, special_header) <= buffer_size, | ||
| 385 | ResultInvalidCombination); | ||
| 386 | |||
| 387 | // If there's a special header, there may be move handles we need to close. | ||
| 388 | if (header.GetHasSpecialHeader()) { | ||
| 389 | // Determine the offset to the start of handles. | ||
| 390 | auto offset = msg.GetSpecialDataIndex(header, special_header); | ||
| 391 | if (special_header.GetHasProcessId()) { | ||
| 392 | offset += static_cast<int>(sizeof(u64) / sizeof(u32)); | ||
| 393 | } | ||
| 394 | if (auto copy_count = special_header.GetCopyHandleCount(); copy_count > 0) { | ||
| 395 | offset += static_cast<int>((sizeof(Svc::Handle) * copy_count) / sizeof(u32)); | ||
| 396 | } | ||
| 155 | 397 | ||
| 156 | void KServerSession::Destroy() { | 398 | // Get the handle table. |
| 157 | m_parent->OnServerClosed(); | 399 | auto& handle_table = thread.GetOwnerProcess()->GetHandleTable(); |
| 158 | 400 | ||
| 159 | this->CleanupRequests(); | 401 | // Close the handles. |
| 402 | for (auto i = 0; i < special_header.GetMoveHandleCount(); ++i) { | ||
| 403 | handle_table.Remove(msg.GetHandle(offset)); | ||
| 404 | offset += static_cast<int>(sizeof(Svc::Handle) / sizeof(u32)); | ||
| 405 | } | ||
| 406 | } | ||
| 160 | 407 | ||
| 161 | m_parent->Close(); | 408 | R_SUCCEED(); |
| 162 | } | 409 | } |
| 163 | 410 | ||
| 164 | void KServerSession::OnClientClosed() { | 411 | Result CleanupServerMap(KSessionRequest* request, KProcess* server_process) { |
| 165 | KScopedLightLock lk{m_lock}; | 412 | // If there's no server process, there's nothing to clean up. |
| 413 | R_SUCCEED_IF(server_process == nullptr); | ||
| 166 | 414 | ||
| 167 | // Handle any pending requests. | 415 | // Get the page table. |
| 168 | KSessionRequest* prev_request = nullptr; | 416 | auto& server_page_table = server_process->GetPageTable(); |
| 169 | while (true) { | ||
| 170 | // Declare variables for processing the request. | ||
| 171 | KSessionRequest* request = nullptr; | ||
| 172 | KEvent* event = nullptr; | ||
| 173 | KThread* thread = nullptr; | ||
| 174 | bool cur_request = false; | ||
| 175 | bool terminate = false; | ||
| 176 | 417 | ||
| 177 | // Get the next request. | 418 | // Cleanup Send mappings. |
| 178 | { | 419 | for (size_t i = 0; i < request->GetSendCount(); ++i) { |
| 179 | KScopedSchedulerLock sl{m_kernel}; | 420 | R_TRY(server_page_table.CleanupForIpcServer(request->GetSendServerAddress(i), |
| 421 | request->GetSendSize(i), | ||
| 422 | request->GetSendMemoryState(i))); | ||
| 423 | } | ||
| 180 | 424 | ||
| 181 | if (m_current_request != nullptr && m_current_request != prev_request) { | 425 | // Cleanup Receive mappings. |
| 182 | // Set the request, open a reference as we process it. | 426 | for (size_t i = 0; i < request->GetReceiveCount(); ++i) { |
| 183 | request = m_current_request; | 427 | R_TRY(server_page_table.CleanupForIpcServer(request->GetReceiveServerAddress(i), |
| 184 | request->Open(); | 428 | request->GetReceiveSize(i), |
| 185 | cur_request = true; | 429 | request->GetReceiveMemoryState(i))); |
| 430 | } | ||
| 186 | 431 | ||
| 187 | // Get thread and event for the request. | 432 | // Cleanup Exchange mappings. |
| 188 | thread = request->GetThread(); | 433 | for (size_t i = 0; i < request->GetExchangeCount(); ++i) { |
| 189 | event = request->GetEvent(); | 434 | R_TRY(server_page_table.CleanupForIpcServer(request->GetExchangeServerAddress(i), |
| 435 | request->GetExchangeSize(i), | ||
| 436 | request->GetExchangeMemoryState(i))); | ||
| 437 | } | ||
| 190 | 438 | ||
| 191 | // If the thread is terminating, handle that. | 439 | R_SUCCEED(); |
| 192 | if (thread->IsTerminationRequested()) { | 440 | } |
| 193 | request->ClearThread(); | ||
| 194 | request->ClearEvent(); | ||
| 195 | terminate = true; | ||
| 196 | } | ||
| 197 | 441 | ||
| 198 | prev_request = request; | 442 | Result CleanupClientMap(KSessionRequest* request, KProcessPageTable* client_page_table) { |
| 199 | } else if (!m_request_list.empty()) { | 443 | // If there's no client page table, there's nothing to clean up. |
| 200 | // Pop the request from the front of the list. | 444 | R_SUCCEED_IF(client_page_table == nullptr); |
| 201 | request = std::addressof(m_request_list.front()); | ||
| 202 | m_request_list.pop_front(); | ||
| 203 | 445 | ||
| 204 | // Get thread and event for the request. | 446 | // Cleanup Send mappings. |
| 205 | thread = request->GetThread(); | 447 | for (size_t i = 0; i < request->GetSendCount(); ++i) { |
| 206 | event = request->GetEvent(); | 448 | R_TRY(client_page_table->CleanupForIpcClient(request->GetSendClientAddress(i), |
| 207 | } | 449 | request->GetSendSize(i), |
| 450 | request->GetSendMemoryState(i))); | ||
| 451 | } | ||
| 452 | |||
| 453 | // Cleanup Receive mappings. | ||
| 454 | for (size_t i = 0; i < request->GetReceiveCount(); ++i) { | ||
| 455 | R_TRY(client_page_table->CleanupForIpcClient(request->GetReceiveClientAddress(i), | ||
| 456 | request->GetReceiveSize(i), | ||
| 457 | request->GetReceiveMemoryState(i))); | ||
| 458 | } | ||
| 459 | |||
| 460 | // Cleanup Exchange mappings. | ||
| 461 | for (size_t i = 0; i < request->GetExchangeCount(); ++i) { | ||
| 462 | R_TRY(client_page_table->CleanupForIpcClient(request->GetExchangeClientAddress(i), | ||
| 463 | request->GetExchangeSize(i), | ||
| 464 | request->GetExchangeMemoryState(i))); | ||
| 465 | } | ||
| 466 | |||
| 467 | R_SUCCEED(); | ||
| 468 | } | ||
| 469 | |||
| 470 | Result CleanupMap(KSessionRequest* request, KProcess* server_process, | ||
| 471 | KProcessPageTable* client_page_table) { | ||
| 472 | // Cleanup the server map. | ||
| 473 | R_TRY(CleanupServerMap(request, server_process)); | ||
| 474 | |||
| 475 | // Cleanup the client map. | ||
| 476 | R_TRY(CleanupClientMap(request, client_page_table)); | ||
| 477 | |||
| 478 | R_SUCCEED(); | ||
| 479 | } | ||
| 480 | |||
| 481 | Result ProcessReceiveMessageMapAliasDescriptors(int& offset, KProcessPageTable& dst_page_table, | ||
| 482 | KProcessPageTable& src_page_table, | ||
| 483 | const MessageBuffer& dst_msg, | ||
| 484 | const MessageBuffer& src_msg, | ||
| 485 | KSessionRequest* request, KMemoryPermission perm, | ||
| 486 | bool send) { | ||
| 487 | // Get the offset at the start of processing. | ||
| 488 | const int cur_offset = offset; | ||
| 489 | |||
| 490 | // Get the map alias descriptor. | ||
| 491 | MessageBuffer::MapAliasDescriptor src_desc(src_msg, cur_offset); | ||
| 492 | offset += static_cast<int>(MessageBuffer::MapAliasDescriptor::GetDataSize() / sizeof(u32)); | ||
| 493 | |||
| 494 | // Extract address/size. | ||
| 495 | const KProcessAddress src_address = src_desc.GetAddress(); | ||
| 496 | const size_t size = src_desc.GetSize(); | ||
| 497 | KProcessAddress dst_address = 0; | ||
| 498 | |||
| 499 | // Determine the result memory state. | ||
| 500 | KMemoryState dst_state; | ||
| 501 | R_TRY(GetMapAliasMemoryState(dst_state, src_desc.GetAttribute())); | ||
| 502 | |||
| 503 | // Process the buffer, if it has a size. | ||
| 504 | if (size > 0) { | ||
| 505 | // Set up the source pages for ipc. | ||
| 506 | R_TRY(dst_page_table.SetupForIpc(std::addressof(dst_address), size, src_address, | ||
| 507 | src_page_table, perm, dst_state, send)); | ||
| 508 | |||
| 509 | // Ensure that we clean up on failure. | ||
| 510 | ON_RESULT_FAILURE { | ||
| 511 | dst_page_table.CleanupForIpcServer(dst_address, size, dst_state); | ||
| 512 | src_page_table.CleanupForIpcClient(src_address, size, dst_state); | ||
| 513 | }; | ||
| 514 | |||
| 515 | // Push the appropriate mapping. | ||
| 516 | if (perm == KMemoryPermission::UserRead) { | ||
| 517 | R_TRY(request->PushSend(src_address, dst_address, size, dst_state)); | ||
| 518 | } else if (send) { | ||
| 519 | R_TRY(request->PushExchange(src_address, dst_address, size, dst_state)); | ||
| 520 | } else { | ||
| 521 | R_TRY(request->PushReceive(src_address, dst_address, size, dst_state)); | ||
| 208 | } | 522 | } |
| 523 | } | ||
| 209 | 524 | ||
| 210 | // If there are no requests, we're done. | 525 | // Set the output descriptor. |
| 211 | if (request == nullptr) { | 526 | dst_msg.Set(cur_offset, |
| 212 | break; | 527 | MessageBuffer::MapAliasDescriptor(reinterpret_cast<void*>(GetInteger(dst_address)), |
| 528 | size, src_desc.GetAttribute())); | ||
| 529 | |||
| 530 | R_SUCCEED(); | ||
| 531 | } | ||
| 532 | |||
| 533 | Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_message_buffer, | ||
| 534 | size_t dst_buffer_size, KPhysicalAddress dst_message_paddr, | ||
| 535 | KThread& src_thread, uint64_t src_message_buffer, size_t src_buffer_size, | ||
| 536 | KServerSession* session, KSessionRequest* request) { | ||
| 537 | // Prepare variables for receive. | ||
| 538 | KThread& dst_thread = GetCurrentThread(kernel); | ||
| 539 | KProcess& dst_process = *(dst_thread.GetOwnerProcess()); | ||
| 540 | KProcess& src_process = *(src_thread.GetOwnerProcess()); | ||
| 541 | auto& dst_page_table = dst_process.GetPageTable(); | ||
| 542 | auto& src_page_table = src_process.GetPageTable(); | ||
| 543 | |||
| 544 | // NOTE: Session is used only for debugging, and so may go unused. | ||
| 545 | (void)session; | ||
| 546 | |||
| 547 | // The receive list is initially not broken. | ||
| 548 | recv_list_broken = false; | ||
| 549 | |||
| 550 | // Set the server process for the request. | ||
| 551 | request->SetServerProcess(std::addressof(dst_process)); | ||
| 552 | |||
| 553 | // Determine the message buffers. | ||
| 554 | u32 *dst_msg_ptr, *src_msg_ptr; | ||
| 555 | bool dst_user, src_user; | ||
| 556 | |||
| 557 | if (dst_message_buffer) { | ||
| 558 | dst_msg_ptr = kernel.System().DeviceMemory().GetPointer<u32>(dst_message_paddr); | ||
| 559 | dst_user = true; | ||
| 560 | } else { | ||
| 561 | dst_msg_ptr = dst_page_table.GetMemory().GetPointer<u32>(dst_thread.GetTlsAddress()); | ||
| 562 | dst_buffer_size = MessageBufferSize; | ||
| 563 | dst_message_buffer = GetInteger(dst_thread.GetTlsAddress()); | ||
| 564 | dst_user = false; | ||
| 565 | } | ||
| 566 | |||
| 567 | if (src_message_buffer) { | ||
| 568 | // NOTE: Nintendo does not check the result of this GetPhysicalAddress call. | ||
| 569 | src_msg_ptr = src_page_table.GetMemory().GetPointer<u32>(src_message_buffer); | ||
| 570 | src_user = true; | ||
| 571 | } else { | ||
| 572 | src_msg_ptr = src_page_table.GetMemory().GetPointer<u32>(src_thread.GetTlsAddress()); | ||
| 573 | src_buffer_size = MessageBufferSize; | ||
| 574 | src_message_buffer = GetInteger(src_thread.GetTlsAddress()); | ||
| 575 | src_user = false; | ||
| 576 | } | ||
| 577 | |||
| 578 | // Parse the headers. | ||
| 579 | const MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size); | ||
| 580 | const MessageBuffer src_msg(src_msg_ptr, src_buffer_size); | ||
| 581 | const MessageBuffer::MessageHeader dst_header(dst_msg); | ||
| 582 | const MessageBuffer::MessageHeader src_header(src_msg); | ||
| 583 | const MessageBuffer::SpecialHeader dst_special_header(dst_msg, dst_header); | ||
| 584 | const MessageBuffer::SpecialHeader src_special_header(src_msg, src_header); | ||
| 585 | |||
| 586 | // Get the end of the source message. | ||
| 587 | const size_t src_end_offset = | ||
| 588 | MessageBuffer::GetRawDataIndex(src_header, src_special_header) + src_header.GetRawCount(); | ||
| 589 | |||
| 590 | // Ensure that the headers fit. | ||
| 591 | R_UNLESS(MessageBuffer::GetMessageBufferSize(dst_header, dst_special_header) <= dst_buffer_size, | ||
| 592 | ResultInvalidCombination); | ||
| 593 | R_UNLESS(MessageBuffer::GetMessageBufferSize(src_header, src_special_header) <= src_buffer_size, | ||
| 594 | ResultInvalidCombination); | ||
| 595 | |||
| 596 | // Ensure the receive list offset is after the end of raw data. | ||
| 597 | if (dst_header.GetReceiveListOffset()) { | ||
| 598 | R_UNLESS(dst_header.GetReceiveListOffset() >= | ||
| 599 | MessageBuffer::GetRawDataIndex(dst_header, dst_special_header) + | ||
| 600 | dst_header.GetRawCount(), | ||
| 601 | ResultInvalidCombination); | ||
| 602 | } | ||
| 603 | |||
| 604 | // Ensure that the destination buffer is big enough to receive the source. | ||
| 605 | R_UNLESS(dst_buffer_size >= src_end_offset * sizeof(u32), ResultMessageTooLarge); | ||
| 606 | |||
| 607 | // Get the receive list. | ||
| 608 | const s32 dst_recv_list_idx = | ||
| 609 | MessageBuffer::GetReceiveListIndex(dst_header, dst_special_header); | ||
| 610 | ReceiveList dst_recv_list(dst_msg_ptr, dst_message_buffer, dst_page_table, dst_header, | ||
| 611 | dst_special_header, dst_buffer_size, src_end_offset, | ||
| 612 | dst_recv_list_idx, !dst_user); | ||
| 613 | |||
| 614 | // Ensure that the source special header isn't invalid. | ||
| 615 | const bool src_has_special_header = src_header.GetHasSpecialHeader(); | ||
| 616 | if (src_has_special_header) { | ||
| 617 | // Sending move handles from client -> server is not allowed. | ||
| 618 | R_UNLESS(src_special_header.GetMoveHandleCount() == 0, ResultInvalidCombination); | ||
| 619 | } | ||
| 620 | |||
| 621 | // Prepare for further processing. | ||
| 622 | int pointer_key = 0; | ||
| 623 | int offset = dst_msg.Set(src_header); | ||
| 624 | |||
| 625 | // Set up a guard to make sure that we end up in a clean state on error. | ||
| 626 | ON_RESULT_FAILURE { | ||
| 627 | // Cleanup mappings. | ||
| 628 | CleanupMap(request, std::addressof(dst_process), std::addressof(src_page_table)); | ||
| 629 | |||
| 630 | // Cleanup special data. | ||
| 631 | if (src_header.GetHasSpecialHeader()) { | ||
| 632 | CleanupSpecialData(dst_process, dst_msg_ptr, dst_buffer_size); | ||
| 213 | } | 633 | } |
| 214 | 634 | ||
| 215 | // All requests must have threads. | 635 | // Cleanup the header if the receive list isn't broken. |
| 216 | ASSERT(thread != nullptr); | 636 | if (!recv_list_broken) { |
| 637 | dst_msg.Set(dst_header); | ||
| 638 | if (dst_header.GetHasSpecialHeader()) { | ||
| 639 | dst_msg.Set(dst_special_header); | ||
| 640 | } | ||
| 641 | } | ||
| 642 | }; | ||
| 643 | |||
| 644 | // Process any special data. | ||
| 645 | if (src_header.GetHasSpecialHeader()) { | ||
| 646 | // After we process, make sure we track whether the receive list is broken. | ||
| 647 | SCOPE_EXIT({ | ||
| 648 | if (offset > dst_recv_list_idx) { | ||
| 649 | recv_list_broken = true; | ||
| 650 | } | ||
| 651 | }); | ||
| 217 | 652 | ||
| 218 | // Ensure that we close the request when done. | 653 | // Process special data. |
| 219 | SCOPE_EXIT({ request->Close(); }); | 654 | R_TRY(ProcessMessageSpecialData<false>(offset, dst_process, src_process, src_thread, |
| 655 | dst_msg, src_msg, src_special_header)); | ||
| 656 | } | ||
| 220 | 657 | ||
| 221 | // If we're terminating, close a reference to the thread and event. | 658 | // Process any pointer buffers. |
| 222 | if (terminate) { | 659 | for (auto i = 0; i < src_header.GetPointerCount(); ++i) { |
| 223 | thread->Close(); | 660 | // After we process, make sure we track whether the receive list is broken. |
| 224 | if (event != nullptr) { | 661 | SCOPE_EXIT({ |
| 225 | event->Close(); | 662 | if (offset > dst_recv_list_idx) { |
| 663 | recv_list_broken = true; | ||
| 664 | } | ||
| 665 | }); | ||
| 666 | |||
| 667 | R_TRY(ProcessReceiveMessagePointerDescriptors( | ||
| 668 | offset, pointer_key, dst_page_table, src_page_table, dst_msg, src_msg, dst_recv_list, | ||
| 669 | dst_user && dst_header.GetReceiveListCount() == | ||
| 670 | MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer)); | ||
| 671 | } | ||
| 672 | |||
| 673 | // Process any map alias buffers. | ||
| 674 | for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) { | ||
| 675 | // After we process, make sure we track whether the receive list is broken. | ||
| 676 | SCOPE_EXIT({ | ||
| 677 | if (offset > dst_recv_list_idx) { | ||
| 678 | recv_list_broken = true; | ||
| 679 | } | ||
| 680 | }); | ||
| 681 | |||
| 682 | // We process in order send, recv, exch. Buffers after send (recv/exch) are ReadWrite. | ||
| 683 | const KMemoryPermission perm = (i >= src_header.GetSendCount()) | ||
| 684 | ? KMemoryPermission::UserReadWrite | ||
| 685 | : KMemoryPermission::UserRead; | ||
| 686 | |||
| 687 | // Buffer is send if it is send or exch. | ||
| 688 | const bool send = (i < src_header.GetSendCount()) || | ||
| 689 | (i >= src_header.GetSendCount() + src_header.GetReceiveCount()); | ||
| 690 | |||
| 691 | R_TRY(ProcessReceiveMessageMapAliasDescriptors(offset, dst_page_table, src_page_table, | ||
| 692 | dst_msg, src_msg, request, perm, send)); | ||
| 693 | } | ||
| 694 | |||
| 695 | // Process any raw data. | ||
| 696 | if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) { | ||
| 697 | // After we process, make sure we track whether the receive list is broken. | ||
| 698 | SCOPE_EXIT({ | ||
| 699 | if (offset + raw_count > dst_recv_list_idx) { | ||
| 700 | recv_list_broken = true; | ||
| 226 | } | 701 | } |
| 702 | }); | ||
| 703 | |||
| 704 | // Get the offset and size. | ||
| 705 | const size_t offset_words = offset * sizeof(u32); | ||
| 706 | const size_t raw_size = raw_count * sizeof(u32); | ||
| 707 | |||
| 708 | if (!dst_user && !src_user) { | ||
| 709 | // Fast case is TLS -> TLS, do raw memcpy if we can. | ||
| 710 | std::memcpy(dst_msg_ptr + offset, src_msg_ptr + offset, raw_size); | ||
| 711 | } else { | ||
| 712 | // Copy the memory. | ||
| 713 | temp_buffer.resize_destructive(raw_size); | ||
| 714 | src_page_table.GetMemory().ReadBlock(src_message_buffer + offset_words, | ||
| 715 | temp_buffer.data(), raw_size); | ||
| 716 | dst_page_table.GetMemory().WriteBlock(dst_message_buffer + offset_words, | ||
| 717 | temp_buffer.data(), raw_size); | ||
| 227 | } | 718 | } |
| 719 | } | ||
| 228 | 720 | ||
| 229 | // If we need to, reply. | 721 | // We succeeded! |
| 230 | if (event != nullptr && !cur_request) { | 722 | R_SUCCEED(); |
| 231 | // There must be no mappings. | 723 | } |
| 232 | ASSERT(request->GetSendCount() == 0); | ||
| 233 | ASSERT(request->GetReceiveCount() == 0); | ||
| 234 | ASSERT(request->GetExchangeCount() == 0); | ||
| 235 | 724 | ||
| 236 | // // Get the process and page table. | 725 | Result ProcessSendMessageReceiveMapping(KProcessPageTable& src_page_table, |
| 237 | // KProcess *client_process = thread->GetOwnerProcess(); | 726 | KProcessPageTable& dst_page_table, |
| 238 | // auto& client_pt = client_process->GetPageTable(); | 727 | KProcessAddress client_address, |
| 728 | KProcessAddress server_address, size_t size, | ||
| 729 | KMemoryState src_state) { | ||
| 730 | // If the size is zero, there's nothing to process. | ||
| 731 | R_SUCCEED_IF(size == 0); | ||
| 732 | |||
| 733 | // Get the memory state and attribute mask to test. | ||
| 734 | u32 test_state; | ||
| 735 | u32 test_attr_mask; | ||
| 736 | R_TRY(GetMapAliasTestStateAndAttributeMask(test_state, test_attr_mask, src_state)); | ||
| 737 | |||
| 738 | // Determine buffer extents. | ||
| 739 | KProcessAddress aligned_dst_start = Common::AlignDown(GetInteger(client_address), PageSize); | ||
| 740 | KProcessAddress aligned_dst_end = Common::AlignUp(GetInteger(client_address) + size, PageSize); | ||
| 741 | KProcessAddress mapping_dst_start = Common::AlignUp(GetInteger(client_address), PageSize); | ||
| 742 | KProcessAddress mapping_dst_end = | ||
| 743 | Common::AlignDown(GetInteger(client_address) + size, PageSize); | ||
| 744 | |||
| 745 | KProcessAddress mapping_src_end = | ||
| 746 | Common::AlignDown(GetInteger(server_address) + size, PageSize); | ||
| 747 | |||
| 748 | // If the start of the buffer is unaligned, handle that. | ||
| 749 | if (aligned_dst_start != mapping_dst_start) { | ||
| 750 | ASSERT(client_address < mapping_dst_start); | ||
| 751 | const size_t copy_size = std::min<size_t>(size, mapping_dst_start - client_address); | ||
| 752 | temp_buffer.resize_destructive(copy_size); | ||
| 753 | src_page_table.GetMemory().ReadBlock(client_address, temp_buffer.data(), copy_size); | ||
| 754 | dst_page_table.GetMemory().WriteBlock(server_address, temp_buffer.data(), copy_size); | ||
| 755 | } | ||
| 239 | 756 | ||
| 240 | // // Reply to the request. | 757 | // If the end of the buffer is unaligned, handle that. |
| 241 | // ReplyAsyncError(client_process, request->GetAddress(), request->GetSize(), | 758 | if (mapping_dst_end < aligned_dst_end && |
| 242 | // ResultSessionClosed); | 759 | (aligned_dst_start == mapping_dst_start || aligned_dst_start < mapping_dst_end)) { |
| 760 | const size_t copy_size = client_address + size - mapping_dst_end; | ||
| 761 | temp_buffer.resize_destructive(copy_size); | ||
| 762 | src_page_table.GetMemory().ReadBlock(mapping_src_end, temp_buffer.data(), copy_size); | ||
| 763 | dst_page_table.GetMemory().WriteBlock(mapping_dst_end, temp_buffer.data(), copy_size); | ||
| 764 | } | ||
| 243 | 765 | ||
| 244 | // // Unlock the buffer. | 766 | R_SUCCEED(); |
| 245 | // // NOTE: Nintendo does not check the result of this. | 767 | } |
| 246 | // client_pt.UnlockForIpcUserBuffer(request->GetAddress(), request->GetSize()); | ||
| 247 | 768 | ||
| 248 | // Signal the event. | 769 | Result ProcessSendMessagePointerDescriptors(int& offset, int& pointer_key, |
| 249 | event->Signal(); | 770 | KProcessPageTable& src_page_table, |
| 771 | KProcessPageTable& dst_page_table, | ||
| 772 | const MessageBuffer& dst_msg, | ||
| 773 | const MessageBuffer& src_msg, | ||
| 774 | const ReceiveList& dst_recv_list, bool dst_user) { | ||
| 775 | // Get the offset at the start of processing. | ||
| 776 | const int cur_offset = offset; | ||
| 777 | |||
| 778 | // Get the pointer desc. | ||
| 779 | MessageBuffer::PointerDescriptor src_desc(src_msg, cur_offset); | ||
| 780 | offset += static_cast<int>(MessageBuffer::PointerDescriptor::GetDataSize() / sizeof(u32)); | ||
| 781 | |||
| 782 | // Extract address/size. | ||
| 783 | const uint64_t src_pointer = src_desc.GetAddress(); | ||
| 784 | const size_t recv_size = src_desc.GetSize(); | ||
| 785 | uint64_t recv_pointer = 0; | ||
| 786 | |||
| 787 | // Process the buffer, if it has a size. | ||
| 788 | if (recv_size > 0) { | ||
| 789 | // If using indexing, set index. | ||
| 790 | if (dst_recv_list.IsIndex()) { | ||
| 791 | pointer_key = src_desc.GetIndex(); | ||
| 250 | } | 792 | } |
| 793 | |||
| 794 | // Get the buffer. | ||
| 795 | dst_recv_list.GetBuffer(recv_pointer, recv_size, pointer_key); | ||
| 796 | R_UNLESS(recv_pointer != 0, ResultOutOfResource); | ||
| 797 | |||
| 798 | // Perform the pointer data copy. | ||
| 799 | temp_buffer.resize_destructive(recv_size); | ||
| 800 | src_page_table.GetMemory().ReadBlock(src_pointer, temp_buffer.data(), recv_size); | ||
| 801 | dst_page_table.GetMemory().WriteBlock(recv_pointer, temp_buffer.data(), recv_size); | ||
| 251 | } | 802 | } |
| 252 | 803 | ||
| 253 | // Notify. | 804 | // Set the output descriptor. |
| 254 | this->NotifyAvailable(ResultSessionClosed); | 805 | dst_msg.Set(cur_offset, MessageBuffer::PointerDescriptor(reinterpret_cast<void*>(recv_pointer), |
| 806 | recv_size, src_desc.GetIndex())); | ||
| 807 | |||
| 808 | R_SUCCEED(); | ||
| 255 | } | 809 | } |
| 256 | 810 | ||
| 257 | bool KServerSession::IsSignaled() const { | 811 | Result SendMessage(KernelCore& kernel, uint64_t src_message_buffer, size_t src_buffer_size, |
| 258 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); | 812 | KPhysicalAddress src_message_paddr, KThread& dst_thread, |
| 813 | uint64_t dst_message_buffer, size_t dst_buffer_size, KServerSession* session, | ||
| 814 | KSessionRequest* request) { | ||
| 815 | // Prepare variables for send. | ||
| 816 | KThread& src_thread = GetCurrentThread(kernel); | ||
| 817 | KProcess& dst_process = *(dst_thread.GetOwnerProcess()); | ||
| 818 | KProcess& src_process = *(src_thread.GetOwnerProcess()); | ||
| 819 | auto& dst_page_table = dst_process.GetPageTable(); | ||
| 820 | auto& src_page_table = src_process.GetPageTable(); | ||
| 821 | |||
| 822 | // NOTE: Session is used only for debugging, and so may go unused. | ||
| 823 | (void)session; | ||
| 824 | |||
| 825 | // Determine the message buffers. | ||
| 826 | u32 *dst_msg_ptr, *src_msg_ptr; | ||
| 827 | bool dst_user, src_user; | ||
| 828 | |||
| 829 | if (dst_message_buffer) { | ||
| 830 | // NOTE: Nintendo does not check the result of this GetPhysicalAddress call. | ||
| 831 | dst_msg_ptr = dst_page_table.GetMemory().GetPointer<u32>(dst_message_buffer); | ||
| 832 | dst_user = true; | ||
| 833 | } else { | ||
| 834 | dst_msg_ptr = dst_page_table.GetMemory().GetPointer<u32>(dst_thread.GetTlsAddress()); | ||
| 835 | dst_buffer_size = MessageBufferSize; | ||
| 836 | dst_message_buffer = GetInteger(dst_thread.GetTlsAddress()); | ||
| 837 | dst_user = false; | ||
| 838 | } | ||
| 259 | 839 | ||
| 260 | // If the client is closed, we're always signaled. | 840 | if (src_message_buffer) { |
| 261 | if (m_parent->IsClientClosed()) { | 841 | src_msg_ptr = src_page_table.GetMemory().GetPointer<u32>(src_message_buffer); |
| 262 | return true; | 842 | src_user = true; |
| 843 | } else { | ||
| 844 | src_msg_ptr = src_page_table.GetMemory().GetPointer<u32>(src_thread.GetTlsAddress()); | ||
| 845 | src_buffer_size = MessageBufferSize; | ||
| 846 | src_message_buffer = GetInteger(src_thread.GetTlsAddress()); | ||
| 847 | src_user = false; | ||
| 263 | } | 848 | } |
| 264 | 849 | ||
| 265 | // Otherwise, we're signaled if we have a request and aren't handling one. | 850 | // Parse the headers. |
| 266 | return !m_request_list.empty() && m_current_request == nullptr; | 851 | const MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size); |
| 852 | const MessageBuffer src_msg(src_msg_ptr, src_buffer_size); | ||
| 853 | const MessageBuffer::MessageHeader dst_header(dst_msg); | ||
| 854 | const MessageBuffer::MessageHeader src_header(src_msg); | ||
| 855 | const MessageBuffer::SpecialHeader dst_special_header(dst_msg, dst_header); | ||
| 856 | const MessageBuffer::SpecialHeader src_special_header(src_msg, src_header); | ||
| 857 | |||
| 858 | // Get the end of the source message. | ||
| 859 | const size_t src_end_offset = | ||
| 860 | MessageBuffer::GetRawDataIndex(src_header, src_special_header) + src_header.GetRawCount(); | ||
| 861 | |||
| 862 | // Declare variables for processing. | ||
| 863 | int offset = 0; | ||
| 864 | int pointer_key = 0; | ||
| 865 | bool processed_special_data = false; | ||
| 866 | |||
| 867 | // Send the message. | ||
| 868 | { | ||
| 869 | // Make sure that we end up in a clean state on error. | ||
| 870 | ON_RESULT_FAILURE { | ||
| 871 | // Cleanup special data. | ||
| 872 | if (processed_special_data) { | ||
| 873 | if (src_header.GetHasSpecialHeader()) { | ||
| 874 | CleanupSpecialData(dst_process, dst_msg_ptr, dst_buffer_size); | ||
| 875 | } | ||
| 876 | } else { | ||
| 877 | CleanupServerHandles(kernel, src_user ? src_message_buffer : 0, src_buffer_size, | ||
| 878 | src_message_paddr); | ||
| 879 | } | ||
| 880 | |||
| 881 | // Cleanup mappings. | ||
| 882 | CleanupMap(request, std::addressof(src_process), std::addressof(dst_page_table)); | ||
| 883 | }; | ||
| 884 | |||
| 885 | // Ensure that the headers fit. | ||
| 886 | R_UNLESS(MessageBuffer::GetMessageBufferSize(src_header, src_special_header) <= | ||
| 887 | src_buffer_size, | ||
| 888 | ResultInvalidCombination); | ||
| 889 | R_UNLESS(MessageBuffer::GetMessageBufferSize(dst_header, dst_special_header) <= | ||
| 890 | dst_buffer_size, | ||
| 891 | ResultInvalidCombination); | ||
| 892 | |||
| 893 | // Ensure the receive list offset is after the end of raw data. | ||
| 894 | if (dst_header.GetReceiveListOffset()) { | ||
| 895 | R_UNLESS(dst_header.GetReceiveListOffset() >= | ||
| 896 | MessageBuffer::GetRawDataIndex(dst_header, dst_special_header) + | ||
| 897 | dst_header.GetRawCount(), | ||
| 898 | ResultInvalidCombination); | ||
| 899 | } | ||
| 900 | |||
| 901 | // Ensure that the destination buffer is big enough to receive the source. | ||
| 902 | R_UNLESS(dst_buffer_size >= src_end_offset * sizeof(u32), ResultMessageTooLarge); | ||
| 903 | |||
| 904 | // Replies must have no buffers. | ||
| 905 | R_UNLESS(src_header.GetSendCount() == 0, ResultInvalidCombination); | ||
| 906 | R_UNLESS(src_header.GetReceiveCount() == 0, ResultInvalidCombination); | ||
| 907 | R_UNLESS(src_header.GetExchangeCount() == 0, ResultInvalidCombination); | ||
| 908 | |||
| 909 | // Get the receive list. | ||
| 910 | const s32 dst_recv_list_idx = | ||
| 911 | MessageBuffer::GetReceiveListIndex(dst_header, dst_special_header); | ||
| 912 | ReceiveList dst_recv_list(dst_msg_ptr, dst_message_buffer, dst_page_table, dst_header, | ||
| 913 | dst_special_header, dst_buffer_size, src_end_offset, | ||
| 914 | dst_recv_list_idx, !dst_user); | ||
| 915 | |||
| 916 | // Handle any receive buffers. | ||
| 917 | for (size_t i = 0; i < request->GetReceiveCount(); ++i) { | ||
| 918 | R_TRY(ProcessSendMessageReceiveMapping( | ||
| 919 | src_page_table, dst_page_table, request->GetReceiveClientAddress(i), | ||
| 920 | request->GetReceiveServerAddress(i), request->GetReceiveSize(i), | ||
| 921 | request->GetReceiveMemoryState(i))); | ||
| 922 | } | ||
| 923 | |||
| 924 | // Handle any exchange buffers. | ||
| 925 | for (size_t i = 0; i < request->GetExchangeCount(); ++i) { | ||
| 926 | R_TRY(ProcessSendMessageReceiveMapping( | ||
| 927 | src_page_table, dst_page_table, request->GetExchangeClientAddress(i), | ||
| 928 | request->GetExchangeServerAddress(i), request->GetExchangeSize(i), | ||
| 929 | request->GetExchangeMemoryState(i))); | ||
| 930 | } | ||
| 931 | |||
| 932 | // Set the header. | ||
| 933 | offset = dst_msg.Set(src_header); | ||
| 934 | |||
| 935 | // Process any special data. | ||
| 936 | ASSERT(GetCurrentThreadPointer(kernel) == std::addressof(src_thread)); | ||
| 937 | processed_special_data = true; | ||
| 938 | if (src_header.GetHasSpecialHeader()) { | ||
| 939 | R_TRY(ProcessMessageSpecialData<true>(offset, dst_process, src_process, src_thread, | ||
| 940 | dst_msg, src_msg, src_special_header)); | ||
| 941 | } | ||
| 942 | |||
| 943 | // Process any pointer buffers. | ||
| 944 | for (auto i = 0; i < src_header.GetPointerCount(); ++i) { | ||
| 945 | R_TRY(ProcessSendMessagePointerDescriptors( | ||
| 946 | offset, pointer_key, src_page_table, dst_page_table, dst_msg, src_msg, | ||
| 947 | dst_recv_list, | ||
| 948 | dst_user && | ||
| 949 | dst_header.GetReceiveListCount() == | ||
| 950 | MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer)); | ||
| 951 | } | ||
| 952 | |||
| 953 | // Clear any map alias buffers. | ||
| 954 | for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) { | ||
| 955 | offset = dst_msg.Set(offset, MessageBuffer::MapAliasDescriptor()); | ||
| 956 | } | ||
| 957 | |||
| 958 | // Process any raw data. | ||
| 959 | if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) { | ||
| 960 | // Get the offset and size. | ||
| 961 | const size_t offset_words = offset * sizeof(u32); | ||
| 962 | const size_t raw_size = raw_count * sizeof(u32); | ||
| 963 | |||
| 964 | if (!dst_user && !src_user) { | ||
| 965 | // Fast case is TLS -> TLS, do raw memcpy if we can. | ||
| 966 | std::memcpy(dst_msg_ptr + offset, src_msg_ptr + offset, raw_size); | ||
| 967 | } else { | ||
| 968 | // Copy the memory. | ||
| 969 | temp_buffer.resize_destructive(raw_size); | ||
| 970 | src_page_table.GetMemory().ReadBlock(src_message_buffer + offset_words, | ||
| 971 | temp_buffer.data(), raw_size); | ||
| 972 | dst_page_table.GetMemory().WriteBlock(dst_message_buffer + offset_words, | ||
| 973 | temp_buffer.data(), raw_size); | ||
| 974 | } | ||
| 975 | } | ||
| 976 | } | ||
| 977 | |||
| 978 | // Perform (and validate) any remaining cleanup. | ||
| 979 | R_RETURN(CleanupMap(request, std::addressof(src_process), std::addressof(dst_page_table))); | ||
| 267 | } | 980 | } |
| 268 | 981 | ||
| 269 | Result KServerSession::OnRequest(KSessionRequest* request) { | 982 | void ReplyAsyncError(KProcess* to_process, uint64_t to_msg_buf, size_t to_msg_buf_size, |
| 270 | // Create the wait queue. | 983 | Result result) { |
| 271 | ThreadQueueImplForKServerSessionRequest wait_queue{m_kernel}; | 984 | // Convert the address to a linear pointer. |
| 985 | u32* to_msg = to_process->GetMemory().GetPointer<u32>(to_msg_buf); | ||
| 986 | |||
| 987 | // Set the error. | ||
| 988 | MessageBuffer msg(to_msg, to_msg_buf_size); | ||
| 989 | msg.SetAsyncResult(result); | ||
| 990 | } | ||
| 991 | |||
| 992 | } // namespace | ||
| 993 | |||
| 994 | KServerSession::KServerSession(KernelCore& kernel) | ||
| 995 | : KSynchronizationObject{kernel}, m_lock{m_kernel} {} | ||
| 996 | |||
| 997 | KServerSession::~KServerSession() = default; | ||
| 998 | |||
| 999 | void KServerSession::Destroy() { | ||
| 1000 | m_parent->OnServerClosed(); | ||
| 1001 | |||
| 1002 | this->CleanupRequests(); | ||
| 1003 | |||
| 1004 | m_parent->Close(); | ||
| 1005 | } | ||
| 1006 | |||
| 1007 | Result KServerSession::ReceiveRequest(uintptr_t server_message, uintptr_t server_buffer_size, | ||
| 1008 | KPhysicalAddress server_message_paddr, | ||
| 1009 | std::shared_ptr<Service::HLERequestContext>* out_context, | ||
| 1010 | std::weak_ptr<Service::SessionRequestManager> manager) { | ||
| 1011 | // Lock the session. | ||
| 1012 | KScopedLightLock lk{m_lock}; | ||
| 1013 | |||
| 1014 | // Get the request and client thread. | ||
| 1015 | KSessionRequest* request; | ||
| 1016 | KThread* client_thread; | ||
| 272 | 1017 | ||
| 273 | { | 1018 | { |
| 274 | // Lock the scheduler. | ||
| 275 | KScopedSchedulerLock sl{m_kernel}; | 1019 | KScopedSchedulerLock sl{m_kernel}; |
| 276 | 1020 | ||
| 277 | // Ensure that we can handle new requests. | 1021 | // Ensure that we can service the request. |
| 278 | R_UNLESS(!m_parent->IsServerClosed(), ResultSessionClosed); | 1022 | R_UNLESS(!m_parent->IsClientClosed(), ResultSessionClosed); |
| 279 | 1023 | ||
| 280 | // Check that we're not terminating. | 1024 | // Ensure we aren't already servicing a request. |
| 281 | R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested); | 1025 | R_UNLESS(m_current_request == nullptr, ResultNotFound); |
| 282 | 1026 | ||
| 283 | // Get whether we're empty. | 1027 | // Ensure we have a request to service. |
| 284 | const bool was_empty = m_request_list.empty(); | 1028 | R_UNLESS(!m_request_list.empty(), ResultNotFound); |
| 285 | 1029 | ||
| 286 | // Add the request to the list. | 1030 | // Pop the first request from the list. |
| 287 | request->Open(); | 1031 | request = std::addressof(m_request_list.front()); |
| 288 | m_request_list.push_back(*request); | 1032 | m_request_list.pop_front(); |
| 289 | 1033 | ||
| 290 | // If we were empty, signal. | 1034 | // Get the thread for the request. |
| 291 | if (was_empty) { | 1035 | client_thread = request->GetThread(); |
| 292 | this->NotifyAvailable(); | 1036 | R_UNLESS(client_thread != nullptr, ResultSessionClosed); |
| 1037 | |||
| 1038 | // Open the client thread. | ||
| 1039 | client_thread->Open(); | ||
| 1040 | } | ||
| 1041 | |||
| 1042 | SCOPE_EXIT({ client_thread->Close(); }); | ||
| 1043 | |||
| 1044 | // Set the request as our current. | ||
| 1045 | m_current_request = request; | ||
| 1046 | |||
| 1047 | // Get the client address. | ||
| 1048 | uint64_t client_message = request->GetAddress(); | ||
| 1049 | size_t client_buffer_size = request->GetSize(); | ||
| 1050 | bool recv_list_broken = false; | ||
| 1051 | |||
| 1052 | // Receive the message. | ||
| 1053 | Result result = ResultSuccess; | ||
| 1054 | |||
| 1055 | if (out_context != nullptr) { | ||
| 1056 | // HLE request. | ||
| 1057 | if (!client_message) { | ||
| 1058 | client_message = GetInteger(client_thread->GetTlsAddress()); | ||
| 293 | } | 1059 | } |
| 1060 | Core::Memory::Memory& memory{client_thread->GetOwnerProcess()->GetMemory()}; | ||
| 1061 | u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))}; | ||
| 1062 | *out_context = | ||
| 1063 | std::make_shared<Service::HLERequestContext>(m_kernel, memory, this, client_thread); | ||
| 1064 | (*out_context)->SetSessionRequestManager(manager); | ||
| 1065 | (*out_context) | ||
| 1066 | ->PopulateFromIncomingCommandBuffer(*client_thread->GetOwnerProcess(), cmd_buf); | ||
| 1067 | // We succeeded. | ||
| 1068 | R_SUCCEED(); | ||
| 1069 | } else { | ||
| 1070 | result = ReceiveMessage(m_kernel, recv_list_broken, server_message, server_buffer_size, | ||
| 1071 | server_message_paddr, *client_thread, client_message, | ||
| 1072 | client_buffer_size, this, request); | ||
| 1073 | } | ||
| 294 | 1074 | ||
| 295 | // If we have a request event, this is asynchronous, and we don't need to wait. | 1075 | // Handle cleanup on receive failure. |
| 296 | R_SUCCEED_IF(request->GetEvent() != nullptr); | 1076 | if (R_FAILED(result)) { |
| 1077 | // Cache the result to return it to the client. | ||
| 1078 | const Result result_for_client = result; | ||
| 297 | 1079 | ||
| 298 | // This is a synchronous request, so we should wait for our request to complete. | 1080 | // Clear the current request. |
| 299 | GetCurrentThread(m_kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); | 1081 | { |
| 300 | GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue)); | 1082 | KScopedSchedulerLock sl(m_kernel); |
| 1083 | ASSERT(m_current_request == request); | ||
| 1084 | m_current_request = nullptr; | ||
| 1085 | if (!m_request_list.empty()) { | ||
| 1086 | this->NotifyAvailable(); | ||
| 1087 | } | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | // Reply to the client. | ||
| 1091 | { | ||
| 1092 | // After we reply, close our reference to the request. | ||
| 1093 | SCOPE_EXIT({ request->Close(); }); | ||
| 1094 | |||
| 1095 | // Get the event to check whether the request is async. | ||
| 1096 | if (KEvent* event = request->GetEvent(); event != nullptr) { | ||
| 1097 | // The client sent an async request. | ||
| 1098 | KProcess* client = client_thread->GetOwnerProcess(); | ||
| 1099 | auto& client_pt = client->GetPageTable(); | ||
| 1100 | |||
| 1101 | // Send the async result. | ||
| 1102 | if (R_FAILED(result_for_client)) { | ||
| 1103 | ReplyAsyncError(client, client_message, client_buffer_size, result_for_client); | ||
| 1104 | } | ||
| 1105 | |||
| 1106 | // Unlock the client buffer. | ||
| 1107 | // NOTE: Nintendo does not check the result of this. | ||
| 1108 | client_pt.UnlockForIpcUserBuffer(client_message, client_buffer_size); | ||
| 1109 | |||
| 1110 | // Signal the event. | ||
| 1111 | event->Signal(); | ||
| 1112 | } else { | ||
| 1113 | // End the client thread's wait. | ||
| 1114 | KScopedSchedulerLock sl(m_kernel); | ||
| 1115 | |||
| 1116 | if (!client_thread->IsTerminationRequested()) { | ||
| 1117 | client_thread->EndWait(result_for_client); | ||
| 1118 | } | ||
| 1119 | } | ||
| 1120 | } | ||
| 1121 | |||
| 1122 | // Set the server result. | ||
| 1123 | if (recv_list_broken) { | ||
| 1124 | result = ResultReceiveListBroken; | ||
| 1125 | } else { | ||
| 1126 | result = ResultNotFound; | ||
| 1127 | } | ||
| 301 | } | 1128 | } |
| 302 | 1129 | ||
| 303 | return GetCurrentThread(m_kernel).GetWaitResult(); | 1130 | R_RETURN(result); |
| 304 | } | 1131 | } |
| 305 | 1132 | ||
| 306 | Result KServerSession::SendReply(bool is_hle) { | 1133 | Result KServerSession::SendReply(uintptr_t server_message, uintptr_t server_buffer_size, |
| 1134 | KPhysicalAddress server_message_paddr, bool is_hle) { | ||
| 307 | // Lock the session. | 1135 | // Lock the session. |
| 308 | KScopedLightLock lk{m_lock}; | 1136 | KScopedLightLock lk{m_lock}; |
| 309 | 1137 | ||
| @@ -327,7 +1155,7 @@ Result KServerSession::SendReply(bool is_hle) { | |||
| 327 | SCOPE_EXIT({ request->Close(); }); | 1155 | SCOPE_EXIT({ request->Close(); }); |
| 328 | 1156 | ||
| 329 | // Extract relevant information from the request. | 1157 | // Extract relevant information from the request. |
| 330 | const uintptr_t client_message = request->GetAddress(); | 1158 | const uint64_t client_message = request->GetAddress(); |
| 331 | const size_t client_buffer_size = request->GetSize(); | 1159 | const size_t client_buffer_size = request->GetSize(); |
| 332 | KThread* client_thread = request->GetThread(); | 1160 | KThread* client_thread = request->GetThread(); |
| 333 | KEvent* event = request->GetEvent(); | 1161 | KEvent* event = request->GetEvent(); |
| @@ -342,31 +1170,28 @@ Result KServerSession::SendReply(bool is_hle) { | |||
| 342 | // HLE servers write directly to a pointer to the thread command buffer. Therefore | 1170 | // HLE servers write directly to a pointer to the thread command buffer. Therefore |
| 343 | // the reply has already been written in this case. | 1171 | // the reply has already been written in this case. |
| 344 | } else { | 1172 | } else { |
| 345 | Core::Memory::Memory& memory{client_thread->GetOwnerProcess()->GetMemory()}; | 1173 | result = SendMessage(m_kernel, server_message, server_buffer_size, server_message_paddr, |
| 346 | KThread* server_thread = GetCurrentThreadPointer(m_kernel); | 1174 | *client_thread, client_message, client_buffer_size, this, request); |
| 347 | KProcess& src_process = *client_thread->GetOwnerProcess(); | 1175 | } |
| 348 | KProcess& dst_process = *server_thread->GetOwnerProcess(); | 1176 | } else if (!is_hle) { |
| 349 | UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); | 1177 | // Otherwise, we'll need to do some cleanup. |
| 350 | 1178 | KProcess* server_process = request->GetServerProcess(); | |
| 351 | auto* src_msg_buffer = memory.GetPointer<u32>(server_thread->GetTlsAddress()); | 1179 | KProcess* client_process = |
| 352 | auto* dst_msg_buffer = memory.GetPointer<u32>(client_message); | 1180 | (client_thread != nullptr) ? client_thread->GetOwnerProcess() : nullptr; |
| 353 | std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); | 1181 | KProcessPageTable* client_page_table = |
| 354 | 1182 | (client_process != nullptr) ? std::addressof(client_process->GetPageTable()) : nullptr; | |
| 355 | // Translate special header ad-hoc. | 1183 | |
| 356 | MessageBuffer src_msg(src_msg_buffer, client_buffer_size); | 1184 | // Cleanup server handles. |
| 357 | MessageBuffer::MessageHeader src_header(src_msg); | 1185 | result = CleanupServerHandles(m_kernel, server_message, server_buffer_size, |
| 358 | MessageBuffer::SpecialHeader src_special_header(src_msg, src_header); | 1186 | server_message_paddr); |
| 359 | if (src_header.GetHasSpecialHeader()) { | 1187 | |
| 360 | MessageBuffer dst_msg(dst_msg_buffer, client_buffer_size); | 1188 | // Cleanup mappings. |
| 361 | result = ProcessMessageSpecialData<true>(dst_process, src_process, *server_thread, | 1189 | Result cleanup_map_result = CleanupMap(request, server_process, client_page_table); |
| 362 | dst_msg, src_msg, src_special_header); | 1190 | |
| 363 | if (R_FAILED(result)) { | 1191 | // If we successfully cleaned up handles, use the map cleanup result as our result. |
| 364 | CleanupSpecialData(dst_process, dst_msg_buffer, client_buffer_size); | 1192 | if (R_SUCCEEDED(result)) { |
| 365 | } | 1193 | result = cleanup_map_result; |
| 366 | } | ||
| 367 | } | 1194 | } |
| 368 | } else { | ||
| 369 | result = ResultSessionClosed; | ||
| 370 | } | 1195 | } |
| 371 | 1196 | ||
| 372 | // Select a result for the client. | 1197 | // Select a result for the client. |
| @@ -381,19 +1206,18 @@ Result KServerSession::SendReply(bool is_hle) { | |||
| 381 | // If there's a client thread, update it. | 1206 | // If there's a client thread, update it. |
| 382 | if (client_thread != nullptr) { | 1207 | if (client_thread != nullptr) { |
| 383 | if (event != nullptr) { | 1208 | if (event != nullptr) { |
| 384 | // // Get the client process/page table. | 1209 | // Get the client process/page table. |
| 385 | // KProcess *client_process = client_thread->GetOwnerProcess(); | 1210 | KProcess* client_process = client_thread->GetOwnerProcess(); |
| 386 | // KProcessPageTable *client_page_table = std::addressof(client_process->PageTable()); | 1211 | KProcessPageTable* client_page_table = std::addressof(client_process->GetPageTable()); |
| 387 | 1212 | ||
| 388 | // // If we need to, reply with an async error. | 1213 | // If we need to, reply with an async error. |
| 389 | // if (R_FAILED(client_result)) { | 1214 | if (R_FAILED(client_result)) { |
| 390 | // ReplyAsyncError(client_process, client_message, client_buffer_size, | 1215 | ReplyAsyncError(client_process, client_message, client_buffer_size, client_result); |
| 391 | // client_result); | 1216 | } |
| 392 | // } | ||
| 393 | 1217 | ||
| 394 | // // Unlock the client buffer. | 1218 | // Unlock the client buffer. |
| 395 | // // NOTE: Nintendo does not check the result of this. | 1219 | // NOTE: Nintendo does not check the result of this. |
| 396 | // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size); | 1220 | client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size); |
| 397 | 1221 | ||
| 398 | // Signal the event. | 1222 | // Signal the event. |
| 399 | event->Signal(); | 1223 | event->Signal(); |
| @@ -410,91 +1234,53 @@ Result KServerSession::SendReply(bool is_hle) { | |||
| 410 | R_RETURN(result); | 1234 | R_RETURN(result); |
| 411 | } | 1235 | } |
| 412 | 1236 | ||
| 413 | Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext>* out_context, | 1237 | Result KServerSession::OnRequest(KSessionRequest* request) { |
| 414 | std::weak_ptr<Service::SessionRequestManager> manager) { | 1238 | // Create the wait queue. |
| 415 | // Lock the session. | 1239 | ThreadQueueImplForKServerSessionRequest wait_queue{m_kernel}; |
| 416 | KScopedLightLock lk{m_lock}; | ||
| 417 | |||
| 418 | // Get the request and client thread. | ||
| 419 | KSessionRequest* request; | ||
| 420 | KThread* client_thread; | ||
| 421 | 1240 | ||
| 422 | { | 1241 | { |
| 1242 | // Lock the scheduler. | ||
| 423 | KScopedSchedulerLock sl{m_kernel}; | 1243 | KScopedSchedulerLock sl{m_kernel}; |
| 424 | 1244 | ||
| 425 | // Ensure that we can service the request. | 1245 | // Ensure that we can handle new requests. |
| 426 | R_UNLESS(!m_parent->IsClientClosed(), ResultSessionClosed); | 1246 | R_UNLESS(!m_parent->IsServerClosed(), ResultSessionClosed); |
| 427 | |||
| 428 | // Ensure we aren't already servicing a request. | ||
| 429 | R_UNLESS(m_current_request == nullptr, ResultNotFound); | ||
| 430 | 1247 | ||
| 431 | // Ensure we have a request to service. | 1248 | // Check that we're not terminating. |
| 432 | R_UNLESS(!m_request_list.empty(), ResultNotFound); | 1249 | R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested); |
| 433 | 1250 | ||
| 434 | // Pop the first request from the list. | 1251 | // Get whether we're empty. |
| 435 | request = std::addressof(m_request_list.front()); | 1252 | const bool was_empty = m_request_list.empty(); |
| 436 | m_request_list.pop_front(); | ||
| 437 | 1253 | ||
| 438 | // Get the thread for the request. | 1254 | // Add the request to the list. |
| 439 | client_thread = request->GetThread(); | 1255 | request->Open(); |
| 440 | R_UNLESS(client_thread != nullptr, ResultSessionClosed); | 1256 | m_request_list.push_back(*request); |
| 441 | 1257 | ||
| 442 | // Open the client thread. | 1258 | // If we were empty, signal. |
| 443 | client_thread->Open(); | 1259 | if (was_empty) { |
| 444 | } | 1260 | this->NotifyAvailable(); |
| 1261 | } | ||
| 445 | 1262 | ||
| 446 | SCOPE_EXIT({ client_thread->Close(); }); | 1263 | // If we have a request event, this is asynchronous, and we don't need to wait. |
| 1264 | R_SUCCEED_IF(request->GetEvent() != nullptr); | ||
| 447 | 1265 | ||
| 448 | // Set the request as our current. | 1266 | // This is a synchronous request, so we should wait for our request to complete. |
| 449 | m_current_request = request; | 1267 | GetCurrentThread(m_kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); |
| 1268 | GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue)); | ||
| 1269 | } | ||
| 450 | 1270 | ||
| 451 | // Get the client address. | 1271 | return GetCurrentThread(m_kernel).GetWaitResult(); |
| 452 | uintptr_t client_message = request->GetAddress(); | 1272 | } |
| 453 | size_t client_buffer_size = request->GetSize(); | ||
| 454 | // bool recv_list_broken = false; | ||
| 455 | 1273 | ||
| 456 | if (!client_message) { | 1274 | bool KServerSession::IsSignaled() const { |
| 457 | client_message = GetInteger(client_thread->GetTlsAddress()); | 1275 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 458 | client_buffer_size = MessageBufferSize; | ||
| 459 | } | ||
| 460 | 1276 | ||
| 461 | // Receive the message. | 1277 | // If the client is closed, we're always signaled. |
| 462 | Core::Memory::Memory& memory{client_thread->GetOwnerProcess()->GetMemory()}; | 1278 | if (m_parent->IsClientClosed()) { |
| 463 | if (out_context != nullptr) { | 1279 | return true; |
| 464 | // HLE request. | ||
| 465 | u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))}; | ||
| 466 | *out_context = | ||
| 467 | std::make_shared<Service::HLERequestContext>(m_kernel, memory, this, client_thread); | ||
| 468 | (*out_context)->SetSessionRequestManager(manager); | ||
| 469 | (*out_context) | ||
| 470 | ->PopulateFromIncomingCommandBuffer(*client_thread->GetOwnerProcess(), cmd_buf); | ||
| 471 | } else { | ||
| 472 | KThread* server_thread = GetCurrentThreadPointer(m_kernel); | ||
| 473 | KProcess& src_process = *client_thread->GetOwnerProcess(); | ||
| 474 | KProcess& dst_process = *server_thread->GetOwnerProcess(); | ||
| 475 | UNIMPLEMENTED_IF(client_thread->GetOwnerProcess() != server_thread->GetOwnerProcess()); | ||
| 476 | |||
| 477 | auto* src_msg_buffer = memory.GetPointer<u32>(client_message); | ||
| 478 | auto* dst_msg_buffer = memory.GetPointer<u32>(server_thread->GetTlsAddress()); | ||
| 479 | std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); | ||
| 480 | |||
| 481 | // Translate special header ad-hoc. | ||
| 482 | // TODO: fix this mess | ||
| 483 | MessageBuffer src_msg(src_msg_buffer, client_buffer_size); | ||
| 484 | MessageBuffer::MessageHeader src_header(src_msg); | ||
| 485 | MessageBuffer::SpecialHeader src_special_header(src_msg, src_header); | ||
| 486 | if (src_header.GetHasSpecialHeader()) { | ||
| 487 | MessageBuffer dst_msg(dst_msg_buffer, client_buffer_size); | ||
| 488 | Result res = ProcessMessageSpecialData<false>(dst_process, src_process, *client_thread, | ||
| 489 | dst_msg, src_msg, src_special_header); | ||
| 490 | if (R_FAILED(res)) { | ||
| 491 | CleanupSpecialData(dst_process, dst_msg_buffer, client_buffer_size); | ||
| 492 | } | ||
| 493 | } | ||
| 494 | } | 1280 | } |
| 495 | 1281 | ||
| 496 | // We succeeded. | 1282 | // Otherwise, we're signaled if we have a request and aren't handling one. |
| 497 | R_SUCCEED(); | 1283 | return !m_request_list.empty() && m_current_request == nullptr; |
| 498 | } | 1284 | } |
| 499 | 1285 | ||
| 500 | void KServerSession::CleanupRequests() { | 1286 | void KServerSession::CleanupRequests() { |
| @@ -527,31 +1313,30 @@ void KServerSession::CleanupRequests() { | |||
| 527 | SCOPE_EXIT({ request->Close(); }); | 1313 | SCOPE_EXIT({ request->Close(); }); |
| 528 | 1314 | ||
| 529 | // Extract relevant information from the request. | 1315 | // Extract relevant information from the request. |
| 530 | // const uintptr_t client_message = request->GetAddress(); | 1316 | const uint64_t client_message = request->GetAddress(); |
| 531 | // const size_t client_buffer_size = request->GetSize(); | 1317 | const size_t client_buffer_size = request->GetSize(); |
| 532 | KThread* client_thread = request->GetThread(); | 1318 | KThread* client_thread = request->GetThread(); |
| 533 | KEvent* event = request->GetEvent(); | 1319 | KEvent* event = request->GetEvent(); |
| 534 | 1320 | ||
| 535 | // KProcess *server_process = request->GetServerProcess(); | 1321 | KProcess* server_process = request->GetServerProcess(); |
| 536 | // KProcess *client_process = (client_thread != nullptr) ? | 1322 | KProcess* client_process = |
| 537 | // client_thread->GetOwnerProcess() : nullptr; | 1323 | (client_thread != nullptr) ? client_thread->GetOwnerProcess() : nullptr; |
| 538 | // KProcessPageTable *client_page_table = (client_process != nullptr) ? | 1324 | KProcessPageTable* client_page_table = |
| 539 | // std::addressof(client_process->GetPageTable()) | 1325 | (client_process != nullptr) ? std::addressof(client_process->GetPageTable()) : nullptr; |
| 540 | // : nullptr; | ||
| 541 | 1326 | ||
| 542 | // Cleanup the mappings. | 1327 | // Cleanup the mappings. |
| 543 | // Result result = CleanupMap(request, server_process, client_page_table); | 1328 | Result result = CleanupMap(request, server_process, client_page_table); |
| 544 | 1329 | ||
| 545 | // If there's a client thread, update it. | 1330 | // If there's a client thread, update it. |
| 546 | if (client_thread != nullptr) { | 1331 | if (client_thread != nullptr) { |
| 547 | if (event != nullptr) { | 1332 | if (event != nullptr) { |
| 548 | // // We need to reply async. | 1333 | // We need to reply async. |
| 549 | // ReplyAsyncError(client_process, client_message, client_buffer_size, | 1334 | ReplyAsyncError(client_process, client_message, client_buffer_size, |
| 550 | // (R_SUCCEEDED(result) ? ResultSessionClosed : result)); | 1335 | (R_SUCCEEDED(result) ? ResultSessionClosed : result)); |
| 551 | 1336 | ||
| 552 | // // Unlock the client buffer. | 1337 | // Unlock the client buffer. |
| 553 | // NOTE: Nintendo does not check the result of this. | 1338 | // NOTE: Nintendo does not check the result of this. |
| 554 | // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size); | 1339 | client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size); |
| 555 | 1340 | ||
| 556 | // Signal the event. | 1341 | // Signal the event. |
| 557 | event->Signal(); | 1342 | event->Signal(); |
| @@ -567,4 +1352,97 @@ void KServerSession::CleanupRequests() { | |||
| 567 | } | 1352 | } |
| 568 | } | 1353 | } |
| 569 | 1354 | ||
| 1355 | void KServerSession::OnClientClosed() { | ||
| 1356 | KScopedLightLock lk{m_lock}; | ||
| 1357 | |||
| 1358 | // Handle any pending requests. | ||
| 1359 | KSessionRequest* prev_request = nullptr; | ||
| 1360 | while (true) { | ||
| 1361 | // Declare variables for processing the request. | ||
| 1362 | KSessionRequest* request = nullptr; | ||
| 1363 | KEvent* event = nullptr; | ||
| 1364 | KThread* thread = nullptr; | ||
| 1365 | bool cur_request = false; | ||
| 1366 | bool terminate = false; | ||
| 1367 | |||
| 1368 | // Get the next request. | ||
| 1369 | { | ||
| 1370 | KScopedSchedulerLock sl{m_kernel}; | ||
| 1371 | |||
| 1372 | if (m_current_request != nullptr && m_current_request != prev_request) { | ||
| 1373 | // Set the request, open a reference as we process it. | ||
| 1374 | request = m_current_request; | ||
| 1375 | request->Open(); | ||
| 1376 | cur_request = true; | ||
| 1377 | |||
| 1378 | // Get thread and event for the request. | ||
| 1379 | thread = request->GetThread(); | ||
| 1380 | event = request->GetEvent(); | ||
| 1381 | |||
| 1382 | // If the thread is terminating, handle that. | ||
| 1383 | if (thread->IsTerminationRequested()) { | ||
| 1384 | request->ClearThread(); | ||
| 1385 | request->ClearEvent(); | ||
| 1386 | terminate = true; | ||
| 1387 | } | ||
| 1388 | |||
| 1389 | prev_request = request; | ||
| 1390 | } else if (!m_request_list.empty()) { | ||
| 1391 | // Pop the request from the front of the list. | ||
| 1392 | request = std::addressof(m_request_list.front()); | ||
| 1393 | m_request_list.pop_front(); | ||
| 1394 | |||
| 1395 | // Get thread and event for the request. | ||
| 1396 | thread = request->GetThread(); | ||
| 1397 | event = request->GetEvent(); | ||
| 1398 | } | ||
| 1399 | } | ||
| 1400 | |||
| 1401 | // If there are no requests, we're done. | ||
| 1402 | if (request == nullptr) { | ||
| 1403 | break; | ||
| 1404 | } | ||
| 1405 | |||
| 1406 | // All requests must have threads. | ||
| 1407 | ASSERT(thread != nullptr); | ||
| 1408 | |||
| 1409 | // Ensure that we close the request when done. | ||
| 1410 | SCOPE_EXIT({ request->Close(); }); | ||
| 1411 | |||
| 1412 | // If we're terminating, close a reference to the thread and event. | ||
| 1413 | if (terminate) { | ||
| 1414 | thread->Close(); | ||
| 1415 | if (event != nullptr) { | ||
| 1416 | event->Close(); | ||
| 1417 | } | ||
| 1418 | } | ||
| 1419 | |||
| 1420 | // If we need to, reply. | ||
| 1421 | if (event != nullptr && !cur_request) { | ||
| 1422 | // There must be no mappings. | ||
| 1423 | ASSERT(request->GetSendCount() == 0); | ||
| 1424 | ASSERT(request->GetReceiveCount() == 0); | ||
| 1425 | ASSERT(request->GetExchangeCount() == 0); | ||
| 1426 | |||
| 1427 | // Get the process and page table. | ||
| 1428 | KProcess* client_process = thread->GetOwnerProcess(); | ||
| 1429 | auto& client_pt = client_process->GetPageTable(); | ||
| 1430 | |||
| 1431 | // Reply to the request. | ||
| 1432 | ReplyAsyncError(client_process, request->GetAddress(), request->GetSize(), | ||
| 1433 | ResultSessionClosed); | ||
| 1434 | |||
| 1435 | // Unlock the buffer. | ||
| 1436 | // NOTE: Nintendo does not check the result of this. | ||
| 1437 | client_pt.UnlockForIpcUserBuffer(request->GetAddress(), request->GetSize()); | ||
| 1438 | |||
| 1439 | // Signal the event. | ||
| 1440 | event->Signal(); | ||
| 1441 | } | ||
| 1442 | } | ||
| 1443 | |||
| 1444 | // Notify. | ||
| 1445 | this->NotifyAvailable(ResultSessionClosed); | ||
| 1446 | } | ||
| 1447 | |||
| 570 | } // namespace Kernel | 1448 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h index 403891919..2876c231b 100644 --- a/src/core/hle/kernel/k_server_session.h +++ b/src/core/hle/kernel/k_server_session.h | |||
| @@ -49,14 +49,21 @@ public: | |||
| 49 | bool IsSignaled() const override; | 49 | bool IsSignaled() const override; |
| 50 | void OnClientClosed(); | 50 | void OnClientClosed(); |
| 51 | 51 | ||
| 52 | /// TODO: flesh these out to match the real kernel | ||
| 53 | Result OnRequest(KSessionRequest* request); | 52 | Result OnRequest(KSessionRequest* request); |
| 54 | Result SendReply(bool is_hle = false); | 53 | Result SendReply(uintptr_t server_message, uintptr_t server_buffer_size, |
| 55 | Result ReceiveRequest(std::shared_ptr<Service::HLERequestContext>* out_context = nullptr, | 54 | KPhysicalAddress server_message_paddr, bool is_hle = false); |
| 55 | Result ReceiveRequest(uintptr_t server_message, uintptr_t server_buffer_size, | ||
| 56 | KPhysicalAddress server_message_paddr, | ||
| 57 | std::shared_ptr<Service::HLERequestContext>* out_context = nullptr, | ||
| 56 | std::weak_ptr<Service::SessionRequestManager> manager = {}); | 58 | std::weak_ptr<Service::SessionRequestManager> manager = {}); |
| 57 | 59 | ||
| 58 | Result SendReplyHLE() { | 60 | Result SendReplyHLE() { |
| 59 | return SendReply(true); | 61 | R_RETURN(this->SendReply(0, 0, 0, true)); |
| 62 | } | ||
| 63 | |||
| 64 | Result ReceiveRequestHLE(std::shared_ptr<Service::HLERequestContext>* out_context, | ||
| 65 | std::weak_ptr<Service::SessionRequestManager> manager) { | ||
| 66 | R_RETURN(this->ReceiveRequest(0, 0, 0, out_context, manager)); | ||
| 60 | } | 67 | } |
| 61 | 68 | ||
| 62 | private: | 69 | private: |
diff --git a/src/core/hle/kernel/message_buffer.h b/src/core/hle/kernel/message_buffer.h index 75b275310..d528a9bb3 100644 --- a/src/core/hle/kernel/message_buffer.h +++ b/src/core/hle/kernel/message_buffer.h | |||
| @@ -18,13 +18,13 @@ public: | |||
| 18 | static constexpr inline u64 NullTag = 0; | 18 | static constexpr inline u64 NullTag = 0; |
| 19 | 19 | ||
| 20 | public: | 20 | public: |
| 21 | enum class ReceiveListCountType : u32 { | 21 | enum ReceiveListCountType : u32 { |
| 22 | None = 0, | 22 | ReceiveListCountType_None = 0, |
| 23 | ToMessageBuffer = 1, | 23 | ReceiveListCountType_ToMessageBuffer = 1, |
| 24 | ToSingleBuffer = 2, | 24 | ReceiveListCountType_ToSingleBuffer = 2, |
| 25 | 25 | ||
| 26 | CountOffset = 2, | 26 | ReceiveListCountType_CountOffset = 2, |
| 27 | CountMax = 13, | 27 | ReceiveListCountType_CountMax = 13, |
| 28 | }; | 28 | }; |
| 29 | 29 | ||
| 30 | private: | 30 | private: |
| @@ -591,16 +591,16 @@ public: | |||
| 591 | // Add the size of the receive list. | 591 | // Add the size of the receive list. |
| 592 | const auto count = hdr.GetReceiveListCount(); | 592 | const auto count = hdr.GetReceiveListCount(); |
| 593 | switch (count) { | 593 | switch (count) { |
| 594 | case MessageHeader::ReceiveListCountType::None: | 594 | case MessageHeader::ReceiveListCountType_None: |
| 595 | break; | 595 | break; |
| 596 | case MessageHeader::ReceiveListCountType::ToMessageBuffer: | 596 | case MessageHeader::ReceiveListCountType_ToMessageBuffer: |
| 597 | break; | 597 | break; |
| 598 | case MessageHeader::ReceiveListCountType::ToSingleBuffer: | 598 | case MessageHeader::ReceiveListCountType_ToSingleBuffer: |
| 599 | msg_size += ReceiveListEntry::GetDataSize(); | 599 | msg_size += ReceiveListEntry::GetDataSize(); |
| 600 | break; | 600 | break; |
| 601 | default: | 601 | default: |
| 602 | msg_size += (static_cast<s32>(count) - | 602 | msg_size += (static_cast<s32>(count) - |
| 603 | static_cast<s32>(MessageHeader::ReceiveListCountType::CountOffset)) * | 603 | static_cast<s32>(MessageHeader::ReceiveListCountType_CountOffset)) * |
| 604 | ReceiveListEntry::GetDataSize(); | 604 | ReceiveListEntry::GetDataSize(); |
| 605 | break; | 605 | break; |
| 606 | } | 606 | } |
diff --git a/src/core/hle/kernel/svc/svc_ipc.cpp b/src/core/hle/kernel/svc/svc_ipc.cpp index 47a3e7bb0..85cc4f561 100644 --- a/src/core/hle/kernel/svc/svc_ipc.cpp +++ b/src/core/hle/kernel/svc/svc_ipc.cpp | |||
| @@ -48,8 +48,7 @@ Result ReplyAndReceiveImpl(KernelCore& kernel, int32_t* out_index, uintptr_t mes | |||
| 48 | }; | 48 | }; |
| 49 | 49 | ||
| 50 | // Send the reply. | 50 | // Send the reply. |
| 51 | R_TRY(session->SendReply()); | 51 | R_TRY(session->SendReply(message, buffer_size, message_paddr)); |
| 52 | // R_TRY(session->SendReply(message, buffer_size, message_paddr)); | ||
| 53 | } | 52 | } |
| 54 | 53 | ||
| 55 | // Receive a message. | 54 | // Receive a message. |
| @@ -85,8 +84,7 @@ Result ReplyAndReceiveImpl(KernelCore& kernel, int32_t* out_index, uintptr_t mes | |||
| 85 | if (R_SUCCEEDED(result)) { | 84 | if (R_SUCCEEDED(result)) { |
| 86 | KServerSession* session = objs[index]->DynamicCast<KServerSession*>(); | 85 | KServerSession* session = objs[index]->DynamicCast<KServerSession*>(); |
| 87 | if (session != nullptr) { | 86 | if (session != nullptr) { |
| 88 | // result = session->ReceiveRequest(message, buffer_size, message_paddr); | 87 | result = session->ReceiveRequest(message, buffer_size, message_paddr); |
| 89 | result = session->ReceiveRequest(); | ||
| 90 | if (ResultNotFound == result) { | 88 | if (ResultNotFound == result) { |
| 91 | continue; | 89 | continue; |
| 92 | } | 90 | } |
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h index e1ad78607..38e71d516 100644 --- a/src/core/hle/kernel/svc_results.h +++ b/src/core/hle/kernel/svc_results.h | |||
| @@ -38,7 +38,9 @@ constexpr Result ResultInvalidState{ErrorModule::Kernel, 125}; | |||
| 38 | constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126}; | 38 | constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126}; |
| 39 | constexpr Result ResultPortClosed{ErrorModule::Kernel, 131}; | 39 | constexpr Result ResultPortClosed{ErrorModule::Kernel, 131}; |
| 40 | constexpr Result ResultLimitReached{ErrorModule::Kernel, 132}; | 40 | constexpr Result ResultLimitReached{ErrorModule::Kernel, 132}; |
| 41 | constexpr Result ResultReceiveListBroken{ErrorModule::Kernel, 258}; | ||
| 41 | constexpr Result ResultOutOfAddressSpace{ErrorModule::Kernel, 259}; | 42 | constexpr Result ResultOutOfAddressSpace{ErrorModule::Kernel, 259}; |
| 43 | constexpr Result ResultMessageTooLarge{ErrorModule::Kernel, 260}; | ||
| 42 | constexpr Result ResultInvalidId{ErrorModule::Kernel, 519}; | 44 | constexpr Result ResultInvalidId{ErrorModule::Kernel, 519}; |
| 43 | 45 | ||
| 44 | } // namespace Kernel | 46 | } // namespace Kernel |
diff --git a/src/core/hle/service/server_manager.cpp b/src/core/hle/service/server_manager.cpp index 6808247a9..ec599dac2 100644 --- a/src/core/hle/service/server_manager.cpp +++ b/src/core/hle/service/server_manager.cpp | |||
| @@ -372,7 +372,7 @@ Result ServerManager::OnSessionEvent(Kernel::KServerSession* session, | |||
| 372 | 372 | ||
| 373 | // Try to receive a message. | 373 | // Try to receive a message. |
| 374 | std::shared_ptr<HLERequestContext> context; | 374 | std::shared_ptr<HLERequestContext> context; |
| 375 | rc = session->ReceiveRequest(&context, manager); | 375 | rc = session->ReceiveRequestHLE(&context, manager); |
| 376 | 376 | ||
| 377 | // If the session has been closed, we're done. | 377 | // If the session has been closed, we're done. |
| 378 | if (rc == Kernel::ResultSessionClosed) { | 378 | if (rc == Kernel::ResultSessionClosed) { |