summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
authorGravatar bunnei2021-05-07 23:30:17 -0700
committerGravatar GitHub2021-05-07 23:30:17 -0700
commitfaa067f175cbf5e916ed75776817f0046e6731c4 (patch)
tree8ab02a72a6e4d6578848c8da2c02af02684aeec7 /src/core/hle/kernel
parentMerge pull request #6287 from lioncash/ldr-copy (diff)
parenthle: kernel: KPageTable: CanContain should not be constexpr. (diff)
downloadyuzu-faa067f175cbf5e916ed75776817f0046e6731c4.tar.gz
yuzu-faa067f175cbf5e916ed75776817f0046e6731c4.tar.xz
yuzu-faa067f175cbf5e916ed75776817f0046e6731c4.zip
Merge pull request #6266 from bunnei/kautoobject-refactor
Kernel Rework: Migrate kernel objects to KAutoObject
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/client_port.cpp47
-rw-r--r--src/core/hle/kernel/client_port.h63
-rw-r--r--src/core/hle/kernel/client_session.cpp53
-rw-r--r--src/core/hle/kernel/client_session.h68
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp6
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h8
-rw-r--r--src/core/hle/kernel/handle_table.cpp131
-rw-r--r--src/core/hle/kernel/handle_table.h144
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp38
-rw-r--r--src/core/hle/kernel/hle_ipc.h60
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.cpp192
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.h43
-rw-r--r--src/core/hle/kernel/k_auto_object.cpp14
-rw-r--r--src/core/hle/kernel/k_auto_object.h306
-rw-r--r--src/core/hle/kernel/k_auto_object_container.cpp28
-rw-r--r--src/core/hle/kernel/k_auto_object_container.h70
-rw-r--r--src/core/hle/kernel/k_class_token.cpp133
-rw-r--r--src/core/hle/kernel/k_class_token.h131
-rw-r--r--src/core/hle/kernel/k_client_port.cpp125
-rw-r--r--src/core/hle/kernel/k_client_port.h61
-rw-r--r--src/core/hle/kernel/k_client_session.cpp31
-rw-r--r--src/core/hle/kernel/k_client_session.h61
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp37
-rw-r--r--src/core/hle/kernel/k_event.cpp45
-rw-r--r--src/core/hle/kernel/k_event.h45
-rw-r--r--src/core/hle/kernel/k_handle_table.cpp135
-rw-r--r--src/core/hle/kernel/k_handle_table.h310
-rw-r--r--src/core/hle/kernel/k_linked_list.h238
-rw-r--r--src/core/hle/kernel/k_memory_block.h4
-rw-r--r--src/core/hle/kernel/k_page_table.cpp49
-rw-r--r--src/core/hle/kernel/k_page_table.h9
-rw-r--r--src/core/hle/kernel/k_port.cpp68
-rw-r--r--src/core/hle/kernel/k_port.h69
-rw-r--r--src/core/hle/kernel/k_process.cpp (renamed from src/core/hle/kernel/process.cpp)145
-rw-r--r--src/core/hle/kernel/k_process.h (renamed from src/core/hle/kernel/process.h)63
-rw-r--r--src/core/hle/kernel/k_readable_event.cpp15
-rw-r--r--src/core/hle/kernel/k_readable_event.h29
-rw-r--r--src/core/hle/kernel/k_resource_limit.cpp14
-rw-r--r--src/core/hle/kernel/k_resource_limit.h29
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp33
-rw-r--r--src/core/hle/kernel/k_scheduler.h8
-rw-r--r--src/core/hle/kernel/k_scoped_resource_reservation.h14
-rw-r--r--src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h2
-rw-r--r--src/core/hle/kernel/k_server_port.cpp104
-rw-r--r--src/core/hle/kernel/k_server_port.h80
-rw-r--r--src/core/hle/kernel/k_server_session.cpp (renamed from src/core/hle/kernel/server_session.cpp)82
-rw-r--r--src/core/hle/kernel/k_server_session.h (renamed from src/core/hle/kernel/server_session.h)82
-rw-r--r--src/core/hle/kernel/k_session.cpp85
-rw-r--r--src/core/hle/kernel/k_session.h96
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp84
-rw-r--r--src/core/hle/kernel/k_shared_memory.h68
-rw-r--r--src/core/hle/kernel/k_slab_heap.h9
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp10
-rw-r--r--src/core/hle/kernel/k_synchronization_object.h21
-rw-r--r--src/core/hle/kernel/k_thread.cpp115
-rw-r--r--src/core/hle/kernel/k_thread.h127
-rw-r--r--src/core/hle/kernel/k_transfer_memory.cpp45
-rw-r--r--src/core/hle/kernel/k_transfer_memory.h66
-rw-r--r--src/core/hle/kernel/k_writable_event.cpp18
-rw-r--r--src/core/hle/kernel/k_writable_event.h24
-rw-r--r--src/core/hle/kernel/kernel.cpp237
-rw-r--r--src/core/hle/kernel/kernel.h117
-rw-r--r--src/core/hle/kernel/object.cpp42
-rw-r--r--src/core/hle/kernel/object.h96
-rw-r--r--src/core/hle/kernel/process_capability.cpp18
-rw-r--r--src/core/hle/kernel/server_port.cpp54
-rw-r--r--src/core/hle/kernel/server_port.h98
-rw-r--r--src/core/hle/kernel/service_thread.cpp29
-rw-r--r--src/core/hle/kernel/service_thread.h4
-rw-r--r--src/core/hle/kernel/session.cpp41
-rw-r--r--src/core/hle/kernel/session.h64
-rw-r--r--src/core/hle/kernel/slab_helpers.h148
-rw-r--r--src/core/hle/kernel/svc.cpp995
-rw-r--r--src/core/hle/kernel/svc_common.h15
-rw-r--r--src/core/hle/kernel/svc_results.h18
-rw-r--r--src/core/hle/kernel/svc_wrap.h56
-rw-r--r--src/core/hle/kernel/time_manager.cpp17
-rw-r--r--src/core/hle/kernel/time_manager.h2
78 files changed, 4134 insertions, 2207 deletions
diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp
deleted file mode 100644
index 0b6957e31..000000000
--- a/src/core/hle/kernel/client_port.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
1// Copyright 2016 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/client_port.h"
6#include "core/hle/kernel/client_session.h"
7#include "core/hle/kernel/hle_ipc.h"
8#include "core/hle/kernel/object.h"
9#include "core/hle/kernel/server_port.h"
10#include "core/hle/kernel/session.h"
11#include "core/hle/kernel/svc_results.h"
12
13namespace Kernel {
14
15ClientPort::ClientPort(KernelCore& kernel) : Object{kernel} {}
16ClientPort::~ClientPort() = default;
17
18std::shared_ptr<ServerPort> ClientPort::GetServerPort() const {
19 return server_port;
20}
21
22ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
23 if (active_sessions >= max_sessions) {
24 return ResultMaxConnectionsReached;
25 }
26 active_sessions++;
27
28 auto [client, server] = Kernel::Session::Create(kernel, name);
29
30 if (server_port->HasHLEHandler()) {
31 server_port->GetHLEHandler()->ClientConnected(std::move(server));
32 } else {
33 server_port->AppendPendingSession(std::move(server));
34 }
35
36 return MakeResult(std::move(client));
37}
38
39void ClientPort::ConnectionClosed() {
40 if (active_sessions == 0) {
41 return;
42 }
43
44 --active_sessions;
45}
46
47} // namespace Kernel
diff --git a/src/core/hle/kernel/client_port.h b/src/core/hle/kernel/client_port.h
deleted file mode 100644
index 77559ebf9..000000000
--- a/src/core/hle/kernel/client_port.h
+++ /dev/null
@@ -1,63 +0,0 @@
1// Copyright 2016 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <string>
9
10#include "common/common_types.h"
11#include "core/hle/kernel/object.h"
12#include "core/hle/result.h"
13
14namespace Kernel {
15
16class ClientSession;
17class KernelCore;
18class ServerPort;
19
20class ClientPort final : public Object {
21public:
22 explicit ClientPort(KernelCore& kernel);
23 ~ClientPort() override;
24
25 friend class ServerPort;
26 std::string GetTypeName() const override {
27 return "ClientPort";
28 }
29 std::string GetName() const override {
30 return name;
31 }
32
33 static constexpr HandleType HANDLE_TYPE = HandleType::ClientPort;
34 HandleType GetHandleType() const override {
35 return HANDLE_TYPE;
36 }
37
38 std::shared_ptr<ServerPort> GetServerPort() const;
39
40 /**
41 * Creates a new Session pair, adds the created ServerSession to the associated ServerPort's
42 * list of pending sessions, and signals the ServerPort, causing any threads
43 * waiting on it to awake.
44 * @returns ClientSession The client endpoint of the created Session pair, or error code.
45 */
46 ResultVal<std::shared_ptr<ClientSession>> Connect();
47
48 /**
49 * Signifies that a previously active connection has been closed,
50 * decreasing the total number of active connections to this port.
51 */
52 void ConnectionClosed();
53
54 void Finalize() override {}
55
56private:
57 std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port.
58 u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have
59 u32 active_sessions = 0; ///< Number of currently open sessions to this port
60 std::string name; ///< Name of client port (optional)
61};
62
63} // namespace Kernel
diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp
deleted file mode 100644
index e230f365a..000000000
--- a/src/core/hle/kernel/client_session.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
1// Copyright 2019 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/client_session.h"
6#include "core/hle/kernel/hle_ipc.h"
7#include "core/hle/kernel/k_thread.h"
8#include "core/hle/kernel/server_session.h"
9#include "core/hle/kernel/session.h"
10#include "core/hle/kernel/svc_results.h"
11#include "core/hle/result.h"
12
13namespace Kernel {
14
15ClientSession::ClientSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
16
17ClientSession::~ClientSession() {
18 // This destructor will be called automatically when the last ClientSession handle is closed by
19 // the emulated application.
20 if (parent->Server()) {
21 parent->Server()->ClientDisconnected();
22 }
23}
24
25bool ClientSession::IsSignaled() const {
26 UNIMPLEMENTED();
27 return true;
28}
29
30ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kernel,
31 std::shared_ptr<Session> parent,
32 std::string name) {
33 std::shared_ptr<ClientSession> client_session{std::make_shared<ClientSession>(kernel)};
34
35 client_session->name = std::move(name);
36 client_session->parent = std::move(parent);
37
38 return MakeResult(std::move(client_session));
39}
40
41ResultCode ClientSession::SendSyncRequest(std::shared_ptr<KThread> thread,
42 Core::Memory::Memory& memory,
43 Core::Timing::CoreTiming& core_timing) {
44 // Keep ServerSession alive until we're done working with it.
45 if (!parent->Server()) {
46 return ResultSessionClosedByRemote;
47 }
48
49 // Signal the server session that new data is available
50 return parent->Server()->HandleSyncRequest(std::move(thread), memory, core_timing);
51}
52
53} // namespace Kernel
diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h
deleted file mode 100644
index 85aafeaf4..000000000
--- a/src/core/hle/kernel/client_session.h
+++ /dev/null
@@ -1,68 +0,0 @@
1// Copyright 2019 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <string>
9
10#include "core/hle/kernel/k_synchronization_object.h"
11#include "core/hle/result.h"
12
13union ResultCode;
14
15namespace Core::Memory {
16class Memory;
17}
18
19namespace Core::Timing {
20class CoreTiming;
21}
22
23namespace Kernel {
24
25class KernelCore;
26class Session;
27class KThread;
28
29class ClientSession final : public KSynchronizationObject {
30public:
31 explicit ClientSession(KernelCore& kernel);
32 ~ClientSession() override;
33
34 friend class Session;
35
36 std::string GetTypeName() const override {
37 return "ClientSession";
38 }
39
40 std::string GetName() const override {
41 return name;
42 }
43
44 static constexpr HandleType HANDLE_TYPE = HandleType::ClientSession;
45 HandleType GetHandleType() const override {
46 return HANDLE_TYPE;
47 }
48
49 ResultCode SendSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory,
50 Core::Timing::CoreTiming& core_timing);
51
52 bool IsSignaled() const override;
53
54 void Finalize() override {}
55
56private:
57 static ResultVal<std::shared_ptr<ClientSession>> Create(KernelCore& kernel,
58 std::shared_ptr<Session> parent,
59 std::string name = "Unknown");
60
61 /// The parent session, which links to the server endpoint.
62 std::shared_ptr<Session> parent;
63
64 /// Name of the client session (optional)
65 std::string name;
66};
67
68} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
index c6838649f..7c87cbada 100644
--- a/src/core/hle/kernel/global_scheduler_context.cpp
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -17,12 +17,12 @@ GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
17 17
18GlobalSchedulerContext::~GlobalSchedulerContext() = default; 18GlobalSchedulerContext::~GlobalSchedulerContext() = default;
19 19
20void GlobalSchedulerContext::AddThread(std::shared_ptr<KThread> thread) { 20void GlobalSchedulerContext::AddThread(KThread* thread) {
21 std::scoped_lock lock{global_list_guard}; 21 std::scoped_lock lock{global_list_guard};
22 thread_list.push_back(std::move(thread)); 22 thread_list.push_back(thread);
23} 23}
24 24
25void GlobalSchedulerContext::RemoveThread(std::shared_ptr<KThread> thread) { 25void GlobalSchedulerContext::RemoveThread(KThread* thread) {
26 std::scoped_lock lock{global_list_guard}; 26 std::scoped_lock lock{global_list_guard};
27 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), 27 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
28 thread_list.end()); 28 thread_list.end());
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
index 11592843e..ba8b67fd1 100644
--- a/src/core/hle/kernel/global_scheduler_context.h
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -38,13 +38,13 @@ public:
38 ~GlobalSchedulerContext(); 38 ~GlobalSchedulerContext();
39 39
40 /// Adds a new thread to the scheduler 40 /// Adds a new thread to the scheduler
41 void AddThread(std::shared_ptr<KThread> thread); 41 void AddThread(KThread* thread);
42 42
43 /// Removes a thread from the scheduler 43 /// Removes a thread from the scheduler
44 void RemoveThread(std::shared_ptr<KThread> thread); 44 void RemoveThread(KThread* thread);
45 45
46 /// Returns a list of all threads managed by the scheduler 46 /// Returns a list of all threads managed by the scheduler
47 [[nodiscard]] const std::vector<std::shared_ptr<KThread>>& GetThreadList() const { 47 [[nodiscard]] const std::vector<KThread*>& GetThreadList() const {
48 return thread_list; 48 return thread_list;
49 } 49 }
50 50
@@ -79,7 +79,7 @@ private:
79 LockType scheduler_lock; 79 LockType scheduler_lock;
80 80
81 /// Lists all thread ids that aren't deleted/etc. 81 /// Lists all thread ids that aren't deleted/etc.
82 std::vector<std::shared_ptr<KThread>> thread_list; 82 std::vector<KThread*> thread_list;
83 Common::SpinLock global_list_guard{}; 83 Common::SpinLock global_list_guard{};
84}; 84};
85 85
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
deleted file mode 100644
index f96d34078..000000000
--- a/src/core/hle/kernel/handle_table.cpp
+++ /dev/null
@@ -1,131 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <utility>
6#include "common/assert.h"
7#include "common/logging/log.h"
8#include "core/core.h"
9#include "core/hle/kernel/handle_table.h"
10#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/k_thread.h"
12#include "core/hle/kernel/kernel.h"
13#include "core/hle/kernel/process.h"
14#include "core/hle/kernel/svc_results.h"
15
16namespace Kernel {
17namespace {
18constexpr u16 GetSlot(Handle handle) {
19 return static_cast<u16>(handle >> 15);
20}
21
22constexpr u16 GetGeneration(Handle handle) {
23 return static_cast<u16>(handle & 0x7FFF);
24}
25} // Anonymous namespace
26
27HandleTable::HandleTable(KernelCore& kernel) : kernel{kernel} {
28 Clear();
29}
30
31HandleTable::~HandleTable() = default;
32
33ResultCode HandleTable::SetSize(s32 handle_table_size) {
34 if (static_cast<u32>(handle_table_size) > MAX_COUNT) {
35 LOG_ERROR(Kernel, "Handle table size {} is greater than {}", handle_table_size, MAX_COUNT);
36 return ResultOutOfMemory;
37 }
38
39 // Values less than or equal to zero indicate to use the maximum allowable
40 // size for the handle table in the actual kernel, so we ignore the given
41 // value in that case, since we assume this by default unless this function
42 // is called.
43 if (handle_table_size > 0) {
44 table_size = static_cast<u16>(handle_table_size);
45 }
46
47 return RESULT_SUCCESS;
48}
49
50ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) {
51 DEBUG_ASSERT(obj != nullptr);
52
53 const u16 slot = next_free_slot;
54 if (slot >= table_size) {
55 LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use.");
56 return ResultHandleTableFull;
57 }
58 next_free_slot = generations[slot];
59
60 const u16 generation = next_generation++;
61
62 // Overflow count so it fits in the 15 bits dedicated to the generation in the handle.
63 // Horizon OS uses zero to represent an invalid handle, so skip to 1.
64 if (next_generation >= (1 << 15)) {
65 next_generation = 1;
66 }
67
68 generations[slot] = generation;
69 objects[slot] = std::move(obj);
70
71 Handle handle = generation | (slot << 15);
72 return MakeResult<Handle>(handle);
73}
74
75ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
76 std::shared_ptr<Object> object = GetGeneric(handle);
77 if (object == nullptr) {
78 LOG_ERROR(Kernel, "Tried to duplicate invalid handle: {:08X}", handle);
79 return ResultInvalidHandle;
80 }
81 return Create(std::move(object));
82}
83
84ResultCode HandleTable::Close(Handle handle) {
85 if (!IsValid(handle)) {
86 LOG_ERROR(Kernel, "Handle is not valid! handle={:08X}", handle);
87 return ResultInvalidHandle;
88 }
89
90 const u16 slot = GetSlot(handle);
91
92 if (objects[slot].use_count() == 1) {
93 objects[slot]->Finalize();
94 }
95
96 objects[slot] = nullptr;
97
98 generations[slot] = next_free_slot;
99 next_free_slot = slot;
100 return RESULT_SUCCESS;
101}
102
103bool HandleTable::IsValid(Handle handle) const {
104 const std::size_t slot = GetSlot(handle);
105 const u16 generation = GetGeneration(handle);
106
107 return slot < table_size && objects[slot] != nullptr && generations[slot] == generation;
108}
109
110std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
111 if (handle == CurrentThread) {
112 return SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
113 } else if (handle == CurrentProcess) {
114 return SharedFrom(kernel.CurrentProcess());
115 }
116
117 if (!IsValid(handle)) {
118 return nullptr;
119 }
120 return objects[GetSlot(handle)];
121}
122
123void HandleTable::Clear() {
124 for (u16 i = 0; i < table_size; ++i) {
125 generations[i] = static_cast<u16>(i + 1);
126 objects[i] = nullptr;
127 }
128 next_free_slot = 0;
129}
130
131} // namespace Kernel
diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h
deleted file mode 100644
index c9dab8cdd..000000000
--- a/src/core/hle/kernel/handle_table.h
+++ /dev/null
@@ -1,144 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <cstddef>
9#include <memory>
10
11#include "common/common_types.h"
12#include "core/hle/kernel/object.h"
13#include "core/hle/result.h"
14
15namespace Kernel {
16
17class KernelCore;
18
19enum KernelHandle : Handle {
20 InvalidHandle = 0,
21 CurrentThread = 0xFFFF8000,
22 CurrentProcess = 0xFFFF8001,
23};
24
25/**
26 * This class allows the creation of Handles, which are references to objects that can be tested
27 * for validity and looked up. Here they are used to pass references to kernel objects to/from the
28 * emulated process. it has been designed so that it follows the same handle format and has
29 * approximately the same restrictions as the handle manager in the CTR-OS.
30 *
31 * Handles contain two sub-fields: a slot index (bits 31:15) and a generation value (bits 14:0).
32 * The slot index is used to index into the arrays in this class to access the data corresponding
33 * to the Handle.
34 *
35 * To prevent accidental use of a freed Handle whose slot has already been reused, a global counter
36 * is kept and incremented every time a Handle is created. This is the Handle's "generation". The
37 * value of the counter is stored into the Handle as well as in the handle table (in the
38 * "generations" array). When looking up a handle, the Handle's generation must match with the
39 * value stored on the class, otherwise the Handle is considered invalid.
40 *
41 * To find free slots when allocating a Handle without needing to scan the entire object array, the
42 * generations field of unallocated slots is re-purposed as a linked list of indices to free slots.
43 * When a Handle is created, an index is popped off the list and used for the new Handle. When it
44 * is destroyed, it is again pushed onto the list to be re-used by the next allocation. It is
45 * likely that this allocation strategy differs from the one used in CTR-OS, but this hasn't been
46 * verified and isn't likely to cause any problems.
47 */
48class HandleTable final : NonCopyable {
49public:
50 /// This is the maximum limit of handles allowed per process in Horizon
51 static constexpr std::size_t MAX_COUNT = 1024;
52
53 explicit HandleTable(KernelCore& kernel);
54 ~HandleTable();
55
56 /**
57 * Sets the number of handles that may be in use at one time
58 * for this handle table.
59 *
60 * @param handle_table_size The desired size to limit the handle table to.
61 *
62 * @returns an error code indicating if initialization was successful.
63 * If initialization was not successful, then ERR_OUT_OF_MEMORY
64 * will be returned.
65 *
66 * @pre handle_table_size must be within the range [0, 1024]
67 */
68 ResultCode SetSize(s32 handle_table_size);
69
70 /**
71 * Allocates a handle for the given object.
72 * @return The created Handle or one of the following errors:
73 * - `ERR_HANDLE_TABLE_FULL`: the maximum number of handles has been exceeded.
74 */
75 ResultVal<Handle> Create(std::shared_ptr<Object> obj);
76
77 /**
78 * Returns a new handle that points to the same object as the passed in handle.
79 * @return The duplicated Handle or one of the following errors:
80 * - `ERR_INVALID_HANDLE`: an invalid handle was passed in.
81 * - Any errors returned by `Create()`.
82 */
83 ResultVal<Handle> Duplicate(Handle handle);
84
85 /**
86 * Closes a handle, removing it from the table and decreasing the object's ref-count.
87 * @return `RESULT_SUCCESS` or one of the following errors:
88 * - `ERR_INVALID_HANDLE`: an invalid handle was passed in.
89 */
90 ResultCode Close(Handle handle);
91
92 /// Checks if a handle is valid and points to an existing object.
93 bool IsValid(Handle handle) const;
94
95 /**
96 * Looks up a handle.
97 * @return Pointer to the looked-up object, or `nullptr` if the handle is not valid.
98 */
99 std::shared_ptr<Object> GetGeneric(Handle handle) const;
100
101 /**
102 * Looks up a handle while verifying its type.
103 * @return Pointer to the looked-up object, or `nullptr` if the handle is not valid or its
104 * type differs from the requested one.
105 */
106 template <class T>
107 std::shared_ptr<T> Get(Handle handle) const {
108 return DynamicObjectCast<T>(GetGeneric(handle));
109 }
110
111 /// Closes all handles held in this table.
112 void Clear();
113
114private:
115 /// Stores the Object referenced by the handle or null if the slot is empty.
116 std::array<std::shared_ptr<Object>, MAX_COUNT> objects;
117
118 /**
119 * The value of `next_generation` when the handle was created, used to check for validity. For
120 * empty slots, contains the index of the next free slot in the list.
121 */
122 std::array<u16, MAX_COUNT> generations;
123
124 /**
125 * The limited size of the handle table. This can be specified by process
126 * capabilities in order to restrict the overall number of handles that
127 * can be created in a process instance
128 */
129 u16 table_size = static_cast<u16>(MAX_COUNT);
130
131 /**
132 * Global counter of the number of created handles. Stored in `generations` when a handle is
133 * created, and wraps around to 1 when it hits 0x8000.
134 */
135 u16 next_generation = 1;
136
137 /// Head of the free slots linked list.
138 u16 next_free_slot = 0;
139
140 /// Underlying kernel instance that this handle table operates under.
141 KernelCore& kernel;
142};
143
144} // namespace Kernel
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 2b363b1d9..b505d20a6 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -14,17 +14,16 @@
14#include "common/common_types.h" 14#include "common/common_types.h"
15#include "common/logging/log.h" 15#include "common/logging/log.h"
16#include "core/hle/ipc_helpers.h" 16#include "core/hle/ipc_helpers.h"
17#include "core/hle/kernel/handle_table.h"
18#include "core/hle/kernel/hle_ipc.h" 17#include "core/hle/kernel/hle_ipc.h"
18#include "core/hle/kernel/k_handle_table.h"
19#include "core/hle/kernel/k_process.h"
19#include "core/hle/kernel/k_readable_event.h" 20#include "core/hle/kernel/k_readable_event.h"
20#include "core/hle/kernel/k_scheduler.h" 21#include "core/hle/kernel/k_scheduler.h"
21#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 22#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
23#include "core/hle/kernel/k_server_session.h"
22#include "core/hle/kernel/k_thread.h" 24#include "core/hle/kernel/k_thread.h"
23#include "core/hle/kernel/k_writable_event.h" 25#include "core/hle/kernel/k_writable_event.h"
24#include "core/hle/kernel/kernel.h" 26#include "core/hle/kernel/kernel.h"
25#include "core/hle/kernel/object.h"
26#include "core/hle/kernel/process.h"
27#include "core/hle/kernel/server_session.h"
28#include "core/hle/kernel/svc_results.h" 27#include "core/hle/kernel/svc_results.h"
29#include "core/hle/kernel/time_manager.h" 28#include "core/hle/kernel/time_manager.h"
30#include "core/memory.h" 29#include "core/memory.h"
@@ -35,28 +34,23 @@ SessionRequestHandler::SessionRequestHandler() = default;
35 34
36SessionRequestHandler::~SessionRequestHandler() = default; 35SessionRequestHandler::~SessionRequestHandler() = default;
37 36
38void SessionRequestHandler::ClientConnected(std::shared_ptr<ServerSession> server_session) { 37void SessionRequestHandler::ClientConnected(KServerSession* session) {
39 server_session->SetHleHandler(shared_from_this()); 38 session->SetHleHandler(shared_from_this());
40 connected_sessions.push_back(std::move(server_session));
41} 39}
42 40
43void SessionRequestHandler::ClientDisconnected( 41void SessionRequestHandler::ClientDisconnected(KServerSession* session) {
44 const std::shared_ptr<ServerSession>& server_session) { 42 session->SetHleHandler(nullptr);
45 server_session->SetHleHandler(nullptr);
46 boost::range::remove_erase(connected_sessions, server_session);
47} 43}
48 44
49HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, 45HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_,
50 std::shared_ptr<ServerSession> server_session, 46 KServerSession* server_session_, KThread* thread_)
51 std::shared_ptr<KThread> thread) 47 : server_session(server_session_), thread(thread_), kernel{kernel_}, memory{memory_} {
52 : server_session(std::move(server_session)),
53 thread(std::move(thread)), kernel{kernel}, memory{memory} {
54 cmd_buf[0] = 0; 48 cmd_buf[0] = 0;
55} 49}
56 50
57HLERequestContext::~HLERequestContext() = default; 51HLERequestContext::~HLERequestContext() = default;
58 52
59void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, 53void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf,
60 bool incoming) { 54 bool incoming) {
61 IPC::RequestParser rp(src_cmdbuf); 55 IPC::RequestParser rp(src_cmdbuf);
62 command_header = rp.PopRaw<IPC::CommandHeader>(); 56 command_header = rp.PopRaw<IPC::CommandHeader>();
@@ -77,12 +71,12 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_
77 for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_copy; ++handle) { 71 for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_copy; ++handle) {
78 const u32 copy_handle{rp.Pop<Handle>()}; 72 const u32 copy_handle{rp.Pop<Handle>()};
79 copy_handles.push_back(copy_handle); 73 copy_handles.push_back(copy_handle);
80 copy_objects.push_back(handle_table.GetGeneric(copy_handle)); 74 copy_objects.push_back(handle_table.GetObject(copy_handle).GetPointerUnsafe());
81 } 75 }
82 for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_move; ++handle) { 76 for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_move; ++handle) {
83 const u32 move_handle{rp.Pop<Handle>()}; 77 const u32 move_handle{rp.Pop<Handle>()};
84 move_handles.push_back(move_handle); 78 move_handles.push_back(move_handle);
85 move_objects.push_back(handle_table.GetGeneric(move_handle)); 79 move_objects.push_back(handle_table.GetObject(move_handle).GetPointerUnsafe());
86 } 80 }
87 } else { 81 } else {
88 // For responses we just ignore the handles, they're empty and will be populated when 82 // For responses we just ignore the handles, they're empty and will be populated when
@@ -169,7 +163,7 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_
169 rp.Skip(1, false); // The command is actually an u64, but we don't use the high part. 163 rp.Skip(1, false); // The command is actually an u64, but we don't use the high part.
170} 164}
171 165
172ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const HandleTable& handle_table, 166ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
173 u32_le* src_cmdbuf) { 167 u32_le* src_cmdbuf) {
174 ParseCommandBuffer(handle_table, src_cmdbuf, true); 168 ParseCommandBuffer(handle_table, src_cmdbuf, true);
175 if (command_header->type == IPC::CommandType::Close) { 169 if (command_header->type == IPC::CommandType::Close) {
@@ -223,12 +217,12 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& thread) {
223 // for specific values in each of these descriptors. 217 // for specific values in each of these descriptors.
224 for (auto& object : copy_objects) { 218 for (auto& object : copy_objects) {
225 ASSERT(object != nullptr); 219 ASSERT(object != nullptr);
226 dst_cmdbuf[current_offset++] = handle_table.Create(object).Unwrap(); 220 R_TRY(handle_table.Add(&dst_cmdbuf[current_offset++], object));
227 } 221 }
228 222
229 for (auto& object : move_objects) { 223 for (auto& object : move_objects) {
230 ASSERT(object != nullptr); 224 ASSERT(object != nullptr);
231 dst_cmdbuf[current_offset++] = handle_table.Create(object).Unwrap(); 225 R_TRY(handle_table.Add(&dst_cmdbuf[current_offset++], object));
232 } 226 }
233 } 227 }
234 228
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index 6fba42615..fa031c121 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -16,7 +16,8 @@
16#include "common/concepts.h" 16#include "common/concepts.h"
17#include "common/swap.h" 17#include "common/swap.h"
18#include "core/hle/ipc.h" 18#include "core/hle/ipc.h"
19#include "core/hle/kernel/object.h" 19#include "core/hle/kernel/k_auto_object.h"
20#include "core/hle/kernel/svc_common.h"
20 21
21union ResultCode; 22union ResultCode;
22 23
@@ -35,13 +36,14 @@ class ServiceFrameworkBase;
35namespace Kernel { 36namespace Kernel {
36 37
37class Domain; 38class Domain;
38class HandleTable;
39class HLERequestContext; 39class HLERequestContext;
40class KernelCore; 40class KernelCore;
41class Process; 41class KHandleTable;
42class ServerSession; 42class KProcess;
43class KServerSession;
43class KThread; 44class KThread;
44class KReadableEvent; 45class KReadableEvent;
46class KSession;
45class KWritableEvent; 47class KWritableEvent;
46 48
47enum class ThreadWakeupReason; 49enum class ThreadWakeupReason;
@@ -71,20 +73,14 @@ public:
71 * associated ServerSession alive for the duration of the connection. 73 * associated ServerSession alive for the duration of the connection.
72 * @param server_session Owning pointer to the ServerSession associated with the connection. 74 * @param server_session Owning pointer to the ServerSession associated with the connection.
73 */ 75 */
74 void ClientConnected(std::shared_ptr<ServerSession> server_session); 76 void ClientConnected(KServerSession* session);
75 77
76 /** 78 /**
77 * Signals that a client has just disconnected from this HLE handler and releases the 79 * Signals that a client has just disconnected from this HLE handler and releases the
78 * associated ServerSession. 80 * associated ServerSession.
79 * @param server_session ServerSession associated with the connection. 81 * @param server_session ServerSession associated with the connection.
80 */ 82 */
81 void ClientDisconnected(const std::shared_ptr<ServerSession>& server_session); 83 void ClientDisconnected(KServerSession* session);
82
83protected:
84 /// List of sessions that are connected to this handler.
85 /// A ServerSession whose server endpoint is an HLE implementation is kept alive by this list
86 /// for the duration of the connection.
87 std::vector<std::shared_ptr<ServerSession>> connected_sessions;
88}; 84};
89 85
90/** 86/**
@@ -109,8 +105,7 @@ protected:
109class HLERequestContext { 105class HLERequestContext {
110public: 106public:
111 explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, 107 explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
112 std::shared_ptr<ServerSession> session, 108 KServerSession* session, KThread* thread);
113 std::shared_ptr<KThread> thread);
114 ~HLERequestContext(); 109 ~HLERequestContext();
115 110
116 /// Returns a pointer to the IPC command buffer for this request. 111 /// Returns a pointer to the IPC command buffer for this request.
@@ -122,12 +117,12 @@ public:
122 * Returns the session through which this request was made. This can be used as a map key to 117 * Returns the session through which this request was made. This can be used as a map key to
123 * access per-client data on services. 118 * access per-client data on services.
124 */ 119 */
125 const std::shared_ptr<Kernel::ServerSession>& Session() const { 120 Kernel::KServerSession* Session() {
126 return server_session; 121 return server_session;
127 } 122 }
128 123
129 /// Populates this context with data from the requesting process/thread. 124 /// Populates this context with data from the requesting process/thread.
130 ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table, 125 ResultCode PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
131 u32_le* src_cmdbuf); 126 u32_le* src_cmdbuf);
132 127
133 /// Writes data from this context back to the requesting process/thread. 128 /// Writes data from this context back to the requesting process/thread.
@@ -218,22 +213,12 @@ public:
218 return move_handles.at(index); 213 return move_handles.at(index);
219 } 214 }
220 215
221 template <typename T> 216 void AddMoveObject(KAutoObject* object) {
222 std::shared_ptr<T> GetCopyObject(std::size_t index) { 217 move_objects.emplace_back(object);
223 return DynamicObjectCast<T>(copy_objects.at(index));
224 }
225
226 template <typename T>
227 std::shared_ptr<T> GetMoveObject(std::size_t index) {
228 return DynamicObjectCast<T>(move_objects.at(index));
229 } 218 }
230 219
231 void AddMoveObject(std::shared_ptr<Object> object) { 220 void AddCopyObject(KAutoObject* object) {
232 move_objects.emplace_back(std::move(object)); 221 copy_objects.emplace_back(object);
233 }
234
235 void AddCopyObject(std::shared_ptr<Object> object) {
236 copy_objects.emplace_back(std::move(object));
237 } 222 }
238 223
239 void AddDomainObject(std::shared_ptr<SessionRequestHandler> object) { 224 void AddDomainObject(std::shared_ptr<SessionRequestHandler> object) {
@@ -276,10 +261,6 @@ public:
276 return *thread; 261 return *thread;
277 } 262 }
278 263
279 const KThread& GetThread() const {
280 return *thread;
281 }
282
283 bool IsThreadWaiting() const { 264 bool IsThreadWaiting() const {
284 return is_thread_waiting; 265 return is_thread_waiting;
285 } 266 }
@@ -287,16 +268,17 @@ public:
287private: 268private:
288 friend class IPC::ResponseBuilder; 269 friend class IPC::ResponseBuilder;
289 270
290 void ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, bool incoming); 271 void ParseCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf, bool incoming);
291 272
292 std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf; 273 std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
293 std::shared_ptr<Kernel::ServerSession> server_session; 274 Kernel::KServerSession* server_session{};
294 std::shared_ptr<KThread> thread; 275 KThread* thread;
276
295 // TODO(yuriks): Check common usage of this and optimize size accordingly 277 // TODO(yuriks): Check common usage of this and optimize size accordingly
296 boost::container::small_vector<Handle, 8> move_handles; 278 boost::container::small_vector<Handle, 8> move_handles;
297 boost::container::small_vector<Handle, 8> copy_handles; 279 boost::container::small_vector<Handle, 8> copy_handles;
298 boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects; 280 boost::container::small_vector<KAutoObject*, 8> move_objects;
299 boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects; 281 boost::container::small_vector<KAutoObject*, 8> copy_objects;
300 boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects; 282 boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects;
301 283
302 std::optional<IPC::CommandHeader> command_header; 284 std::optional<IPC::CommandHeader> command_header;
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
new file mode 100644
index 000000000..69ae405e6
--- /dev/null
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -0,0 +1,192 @@
1// Copyright 2021 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/alignment.h"
6#include "common/assert.h"
7#include "common/common_funcs.h"
8#include "common/common_types.h"
9#include "core/core.h"
10#include "core/hardware_properties.h"
11#include "core/hle/kernel/init/init_slab_setup.h"
12#include "core/hle/kernel/k_event.h"
13#include "core/hle/kernel/k_memory_layout.h"
14#include "core/hle/kernel/k_memory_manager.h"
15#include "core/hle/kernel/k_port.h"
16#include "core/hle/kernel/k_process.h"
17#include "core/hle/kernel/k_resource_limit.h"
18#include "core/hle/kernel/k_session.h"
19#include "core/hle/kernel/k_shared_memory.h"
20#include "core/hle/kernel/k_system_control.h"
21#include "core/hle/kernel/k_thread.h"
22#include "core/hle/kernel/k_transfer_memory.h"
23#include "core/hle/kernel/memory_types.h"
24#include "core/memory.h"
25
26namespace Kernel::Init {
27
28#define SLAB_COUNT(CLASS) kernel.SlabResourceCounts().num_##CLASS
29
30#define FOREACH_SLAB_TYPE(HANDLER, ...) \
31 HANDLER(KProcess, (SLAB_COUNT(KProcess)), ##__VA_ARGS__) \
32 HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \
33 HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \
34 HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
35 HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
36 HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
37 HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \
38 HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
39
40namespace {
41
42#define DEFINE_SLAB_TYPE_ENUM_MEMBER(NAME, COUNT, ...) KSlabType_##NAME,
43
44enum KSlabType : u32 {
45 FOREACH_SLAB_TYPE(DEFINE_SLAB_TYPE_ENUM_MEMBER) KSlabType_Count,
46};
47
48#undef DEFINE_SLAB_TYPE_ENUM_MEMBER
49
50// Constexpr counts.
51constexpr size_t SlabCountKProcess = 80;
52constexpr size_t SlabCountKThread = 800;
53constexpr size_t SlabCountKEvent = 700;
54constexpr size_t SlabCountKInterruptEvent = 100;
55constexpr size_t SlabCountKPort = 256 + 0x20; // Extra 0x20 ports over Nintendo for homebrew.
56constexpr size_t SlabCountKSharedMemory = 80;
57constexpr size_t SlabCountKTransferMemory = 200;
58constexpr size_t SlabCountKCodeMemory = 10;
59constexpr size_t SlabCountKDeviceAddressSpace = 300;
60constexpr size_t SlabCountKSession = 933;
61constexpr size_t SlabCountKLightSession = 100;
62constexpr size_t SlabCountKObjectName = 7;
63constexpr size_t SlabCountKResourceLimit = 5;
64constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES;
65constexpr size_t SlabCountKAlpha = 1;
66constexpr size_t SlabCountKBeta = 6;
67
68constexpr size_t SlabCountExtraKThread = 160;
69
70template <typename T>
71VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address,
72 size_t num_objects) {
73 const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
74 VAddr start = Common::AlignUp(address, alignof(T));
75
76 if (size > 0) {
77 const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
78 ASSERT(region != nullptr);
79 ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab));
80 T::InitializeSlabHeap(system.Kernel(), system.Memory().GetKernelBuffer(start, size), size);
81 }
82
83 return start + size;
84}
85
86} // namespace
87
88KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
89 return {
90 .num_KProcess = SlabCountKProcess,
91 .num_KThread = SlabCountKThread,
92 .num_KEvent = SlabCountKEvent,
93 .num_KInterruptEvent = SlabCountKInterruptEvent,
94 .num_KPort = SlabCountKPort,
95 .num_KSharedMemory = SlabCountKSharedMemory,
96 .num_KTransferMemory = SlabCountKTransferMemory,
97 .num_KCodeMemory = SlabCountKCodeMemory,
98 .num_KDeviceAddressSpace = SlabCountKDeviceAddressSpace,
99 .num_KSession = SlabCountKSession,
100 .num_KLightSession = SlabCountKLightSession,
101 .num_KObjectName = SlabCountKObjectName,
102 .num_KResourceLimit = SlabCountKResourceLimit,
103 .num_KDebug = SlabCountKDebug,
104 .num_KAlpha = SlabCountKAlpha,
105 .num_KBeta = SlabCountKBeta,
106 };
107}
108
109void InitializeSlabResourceCounts(KernelCore& kernel) {
110 kernel.SlabResourceCounts() = KSlabResourceCounts::CreateDefault();
111 if (KSystemControl::Init::ShouldIncreaseThreadResourceLimit()) {
112 kernel.SlabResourceCounts().num_KThread += SlabCountExtraKThread;
113 }
114}
115
116size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) {
117 size_t size = 0;
118
119#define ADD_SLAB_SIZE(NAME, COUNT, ...) \
120 { \
121 size += alignof(NAME); \
122 size += Common::AlignUp(sizeof(NAME) * (COUNT), alignof(void*)); \
123 };
124
125 // Add the size required for each slab.
126 FOREACH_SLAB_TYPE(ADD_SLAB_SIZE)
127
128#undef ADD_SLAB_SIZE
129
130 // Add the reserved size.
131 size += KernelSlabHeapGapsSize;
132
133 return size;
134}
135
136void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
137 auto& kernel = system.Kernel();
138
139 // Get the start of the slab region, since that's where we'll be working.
140 VAddr address = memory_layout.GetSlabRegionAddress();
141
142 // Initialize slab type array to be in sorted order.
143 std::array<KSlabType, KSlabType_Count> slab_types;
144 for (size_t i = 0; i < slab_types.size(); i++) {
145 slab_types[i] = static_cast<KSlabType>(i);
146 }
147
148 // N shuffles the slab type array with the following simple algorithm.
149 for (size_t i = 0; i < slab_types.size(); i++) {
150 const size_t rnd = KSystemControl::GenerateRandomRange(0, slab_types.size() - 1);
151 std::swap(slab_types[i], slab_types[rnd]);
152 }
153
154 // Create an array to represent the gaps between the slabs.
155 const size_t total_gap_size = KernelSlabHeapGapsSize;
156 std::array<size_t, slab_types.size()> slab_gaps;
157 for (size_t i = 0; i < slab_gaps.size(); i++) {
158 // Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange
159 // is inclusive. However, Nintendo also has the off-by-one error, and it's "harmless", so we
160 // will include it ourselves.
161 slab_gaps[i] = KSystemControl::GenerateRandomRange(0, total_gap_size);
162 }
163
164 // Sort the array, so that we can treat differences between values as offsets to the starts of
165 // slabs.
166 for (size_t i = 1; i < slab_gaps.size(); i++) {
167 for (size_t j = i; j > 0 && slab_gaps[j - 1] > slab_gaps[j]; j--) {
168 std::swap(slab_gaps[j], slab_gaps[j - 1]);
169 }
170 }
171
172 for (size_t i = 0; i < slab_types.size(); i++) {
173 // Add the random gap to the address.
174 address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
175
176#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \
177 case KSlabType_##NAME: \
178 address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \
179 break;
180
181 // Initialize the slabheap.
182 switch (slab_types[i]) {
183 // For each of the slab types, we want to initialize that heap.
184 FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP)
185 // If we somehow get an invalid type, abort.
186 default:
187 UNREACHABLE();
188 }
189 }
190}
191
192} // namespace Kernel::Init
diff --git a/src/core/hle/kernel/init/init_slab_setup.h b/src/core/hle/kernel/init/init_slab_setup.h
new file mode 100644
index 000000000..a8f7e0918
--- /dev/null
+++ b/src/core/hle/kernel/init/init_slab_setup.h
@@ -0,0 +1,43 @@
1// Copyright 2021 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7namespace Core {
8class System;
9} // namespace Core
10
11namespace Kernel {
12class KernelCore;
13class KMemoryLayout;
14} // namespace Kernel
15
16namespace Kernel::Init {
17
18struct KSlabResourceCounts {
19 static KSlabResourceCounts CreateDefault();
20
21 size_t num_KProcess;
22 size_t num_KThread;
23 size_t num_KEvent;
24 size_t num_KInterruptEvent;
25 size_t num_KPort;
26 size_t num_KSharedMemory;
27 size_t num_KTransferMemory;
28 size_t num_KCodeMemory;
29 size_t num_KDeviceAddressSpace;
30 size_t num_KSession;
31 size_t num_KLightSession;
32 size_t num_KObjectName;
33 size_t num_KResourceLimit;
34 size_t num_KDebug;
35 size_t num_KAlpha;
36 size_t num_KBeta;
37};
38
39void InitializeSlabResourceCounts(KernelCore& kernel);
40size_t CalculateTotalSlabHeapSize(const KernelCore& kernel);
41void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout);
42
43} // namespace Kernel::Init
diff --git a/src/core/hle/kernel/k_auto_object.cpp b/src/core/hle/kernel/k_auto_object.cpp
new file mode 100644
index 000000000..dbe237f09
--- /dev/null
+++ b/src/core/hle/kernel/k_auto_object.cpp
@@ -0,0 +1,14 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/k_auto_object.h"
6
7namespace Kernel {
8
9KAutoObject* KAutoObject::Create(KAutoObject* obj) {
10 obj->m_ref_count = 1;
11 return obj;
12}
13
14} // namespace Kernel
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
new file mode 100644
index 000000000..765e46670
--- /dev/null
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -0,0 +1,306 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8#include <string>
9
10#include "common/assert.h"
11#include "common/common_funcs.h"
12#include "common/common_types.h"
13#include "common/intrusive_red_black_tree.h"
14#include "core/hle/kernel/k_class_token.h"
15
16namespace Kernel {
17
18class KernelCore;
19class KProcess;
20
21#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
22 YUZU_NON_COPYABLE(CLASS); \
23 YUZU_NON_MOVEABLE(CLASS); \
24 \
25private: \
26 friend class ::Kernel::KClassTokenGenerator; \
27 static constexpr inline auto ObjectType = ::Kernel::KClassTokenGenerator::ObjectType::CLASS; \
28 static constexpr inline const char* const TypeName = #CLASS; \
29 static constexpr inline ClassTokenType ClassToken() { \
30 return ::Kernel::ClassToken<CLASS>; \
31 } \
32 \
33public: \
34 using BaseClass = BASE_CLASS; \
35 static constexpr TypeObj GetStaticTypeObj() { \
36 constexpr ClassTokenType Token = ClassToken(); \
37 return TypeObj(TypeName, Token); \
38 } \
39 static constexpr const char* GetStaticTypeName() { \
40 return TypeName; \
41 } \
42 virtual TypeObj GetTypeObj() const { \
43 return GetStaticTypeObj(); \
44 } \
45 virtual const char* GetTypeName() const { \
46 return GetStaticTypeName(); \
47 } \
48 \
49private: \
50 constexpr bool operator!=(const TypeObj& rhs)
51
52class KAutoObject {
53protected:
54 class TypeObj {
55 public:
56 constexpr explicit TypeObj(const char* n, ClassTokenType tok)
57 : m_name(n), m_class_token(tok) {}
58
59 constexpr const char* GetName() const {
60 return m_name;
61 }
62 constexpr ClassTokenType GetClassToken() const {
63 return m_class_token;
64 }
65
66 constexpr bool operator==(const TypeObj& rhs) const {
67 return this->GetClassToken() == rhs.GetClassToken();
68 }
69
70 constexpr bool operator!=(const TypeObj& rhs) const {
71 return this->GetClassToken() != rhs.GetClassToken();
72 }
73
74 constexpr bool IsDerivedFrom(const TypeObj& rhs) const {
75 return (this->GetClassToken() | rhs.GetClassToken()) == this->GetClassToken();
76 }
77
78 private:
79 const char* m_name;
80 ClassTokenType m_class_token;
81 };
82
83private:
84 KERNEL_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
85
86public:
87 explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {}
88 virtual ~KAutoObject() = default;
89
90 static KAutoObject* Create(KAutoObject* ptr);
91
92 // Destroy is responsible for destroying the auto object's resources when ref_count hits zero.
93 virtual void Destroy() {
94 UNIMPLEMENTED();
95 }
96
97 // Finalize is responsible for cleaning up resource, but does not destroy the object.
98 virtual void Finalize() {}
99
100 virtual KProcess* GetOwner() const {
101 return nullptr;
102 }
103
104 u32 GetReferenceCount() const {
105 return m_ref_count.load();
106 }
107
108 bool IsDerivedFrom(const TypeObj& rhs) const {
109 return this->GetTypeObj().IsDerivedFrom(rhs);
110 }
111
112 bool IsDerivedFrom(const KAutoObject& rhs) const {
113 return this->IsDerivedFrom(rhs.GetTypeObj());
114 }
115
116 template <typename Derived>
117 Derived DynamicCast() {
118 static_assert(std::is_pointer_v<Derived>);
119 using DerivedType = std::remove_pointer_t<Derived>;
120
121 if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) {
122 return static_cast<Derived>(this);
123 } else {
124 return nullptr;
125 }
126 }
127
128 template <typename Derived>
129 const Derived DynamicCast() const {
130 static_assert(std::is_pointer_v<Derived>);
131 using DerivedType = std::remove_pointer_t<Derived>;
132
133 if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) {
134 return static_cast<Derived>(this);
135 } else {
136 return nullptr;
137 }
138 }
139
140 bool Open() {
141 // Atomically increment the reference count, only if it's positive.
142 u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
143 do {
144 if (cur_ref_count == 0) {
145 return false;
146 }
147 ASSERT(cur_ref_count < cur_ref_count + 1);
148 } while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1,
149 std::memory_order_relaxed));
150
151 return true;
152 }
153
154 void Close() {
155 // Atomically decrement the reference count, not allowing it to become negative.
156 u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
157 do {
158 ASSERT(cur_ref_count > 0);
159 } while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1,
160 std::memory_order_relaxed));
161
162 // If ref count hits zero, destroy the object.
163 if (cur_ref_count - 1 == 0) {
164 this->Destroy();
165 }
166 }
167
168protected:
169 KernelCore& kernel;
170 std::string name;
171
172private:
173 std::atomic<u32> m_ref_count{};
174};
175
176class KAutoObjectWithListContainer;
177
178class KAutoObjectWithList : public KAutoObject {
179public:
180 explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_), kernel(kernel_) {}
181
182 static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) {
183 const u64 lid = lhs.GetId();
184 const u64 rid = rhs.GetId();
185
186 if (lid < rid) {
187 return -1;
188 } else if (lid > rid) {
189 return 1;
190 } else {
191 return 0;
192 }
193 }
194
195public:
196 virtual u64 GetId() const {
197 return reinterpret_cast<u64>(this);
198 }
199
200 virtual const std::string& GetName() const {
201 return name;
202 }
203
204private:
205 friend class KAutoObjectWithListContainer;
206
207private:
208 Common::IntrusiveRedBlackTreeNode list_node;
209
210protected:
211 KernelCore& kernel;
212};
213
214template <typename T>
215class KScopedAutoObject {
216 YUZU_NON_COPYABLE(KScopedAutoObject);
217
218public:
219 constexpr KScopedAutoObject() = default;
220
221 constexpr KScopedAutoObject(T* o) : m_obj(o) {
222 if (m_obj != nullptr) {
223 m_obj->Open();
224 }
225 }
226
227 ~KScopedAutoObject() {
228 if (m_obj != nullptr) {
229 m_obj->Close();
230 }
231 m_obj = nullptr;
232 }
233
234 template <typename U>
235 requires(std::derived_from<T, U> ||
236 std::derived_from<U, T>) constexpr KScopedAutoObject(KScopedAutoObject<U>&& rhs) {
237 if constexpr (std::derived_from<U, T>) {
238 // Upcast.
239 m_obj = rhs.m_obj;
240 rhs.m_obj = nullptr;
241 } else {
242 // Downcast.
243 T* derived = nullptr;
244 if (rhs.m_obj != nullptr) {
245 derived = rhs.m_obj->template DynamicCast<T*>();
246 if (derived == nullptr) {
247 rhs.m_obj->Close();
248 }
249 }
250
251 m_obj = derived;
252 rhs.m_obj = nullptr;
253 }
254 }
255
256 constexpr KScopedAutoObject<T>& operator=(KScopedAutoObject<T>&& rhs) {
257 rhs.Swap(*this);
258 return *this;
259 }
260
261 constexpr T* operator->() {
262 return m_obj;
263 }
264 constexpr T& operator*() {
265 return *m_obj;
266 }
267
268 constexpr void Reset(T* o) {
269 KScopedAutoObject(o).Swap(*this);
270 }
271
272 constexpr T* GetPointerUnsafe() {
273 return m_obj;
274 }
275
276 constexpr T* GetPointerUnsafe() const {
277 return m_obj;
278 }
279
280 constexpr T* ReleasePointerUnsafe() {
281 T* ret = m_obj;
282 m_obj = nullptr;
283 return ret;
284 }
285
286 constexpr bool IsNull() const {
287 return m_obj == nullptr;
288 }
289 constexpr bool IsNotNull() const {
290 return m_obj != nullptr;
291 }
292
293private:
294 template <typename U>
295 friend class KScopedAutoObject;
296
297private:
298 T* m_obj{};
299
300private:
301 constexpr void Swap(KScopedAutoObject& rhs) noexcept {
302 std::swap(m_obj, rhs.m_obj);
303 }
304};
305
306} // namespace Kernel
diff --git a/src/core/hle/kernel/k_auto_object_container.cpp b/src/core/hle/kernel/k_auto_object_container.cpp
new file mode 100644
index 000000000..fc0c28874
--- /dev/null
+++ b/src/core/hle/kernel/k_auto_object_container.cpp
@@ -0,0 +1,28 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/k_auto_object_container.h"
6
7namespace Kernel {
8
9void KAutoObjectWithListContainer::Register(KAutoObjectWithList* obj) {
10 KScopedLightLock lk(m_lock);
11
12 m_object_list.insert(*obj);
13}
14
15void KAutoObjectWithListContainer::Unregister(KAutoObjectWithList* obj) {
16 KScopedLightLock lk(m_lock);
17
18 m_object_list.erase(m_object_list.iterator_to(*obj));
19}
20
21size_t KAutoObjectWithListContainer::GetOwnedCount(KProcess* owner) {
22 KScopedLightLock lk(m_lock);
23
24 return std::count_if(m_object_list.begin(), m_object_list.end(),
25 [&](const auto& obj) { return obj.GetOwner() == owner; });
26}
27
28} // namespace Kernel
diff --git a/src/core/hle/kernel/k_auto_object_container.h b/src/core/hle/kernel/k_auto_object_container.h
new file mode 100644
index 000000000..ff40cf5a7
--- /dev/null
+++ b/src/core/hle/kernel/k_auto_object_container.h
@@ -0,0 +1,70 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8
9#include "common/assert.h"
10#include "common/common_funcs.h"
11#include "common/common_types.h"
12#include "common/intrusive_red_black_tree.h"
13#include "core/hle/kernel/k_auto_object.h"
14#include "core/hle/kernel/k_light_lock.h"
15
16namespace Kernel {
17
18class KernelCore;
19class KProcess;
20
21class KAutoObjectWithListContainer {
22 YUZU_NON_COPYABLE(KAutoObjectWithListContainer);
23 YUZU_NON_MOVEABLE(KAutoObjectWithListContainer);
24
25public:
26 using ListType = Common::IntrusiveRedBlackTreeMemberTraits<
27 &KAutoObjectWithList::list_node>::TreeType<KAutoObjectWithList>;
28
29public:
30 class ListAccessor : public KScopedLightLock {
31 public:
32 explicit ListAccessor(KAutoObjectWithListContainer* container)
33 : KScopedLightLock(container->m_lock), m_list(container->m_object_list) {}
34 explicit ListAccessor(KAutoObjectWithListContainer& container)
35 : KScopedLightLock(container.m_lock), m_list(container.m_object_list) {}
36
37 typename ListType::iterator begin() const {
38 return m_list.begin();
39 }
40
41 typename ListType::iterator end() const {
42 return m_list.end();
43 }
44
45 typename ListType::iterator find(typename ListType::const_reference ref) const {
46 return m_list.find(ref);
47 }
48
49 private:
50 ListType& m_list;
51 };
52
53 friend class ListAccessor;
54
55public:
56 KAutoObjectWithListContainer(KernelCore& kernel) : m_lock(kernel), m_object_list() {}
57
58 void Initialize() {}
59 void Finalize() {}
60
61 void Register(KAutoObjectWithList* obj);
62 void Unregister(KAutoObjectWithList* obj);
63 size_t GetOwnedCount(KProcess* owner);
64
65private:
66 KLightLock m_lock;
67 ListType m_object_list;
68};
69
70} // namespace Kernel
diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp
new file mode 100644
index 000000000..beb8a2a05
--- /dev/null
+++ b/src/core/hle/kernel/k_class_token.cpp
@@ -0,0 +1,133 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/k_auto_object.h"
6#include "core/hle/kernel/k_class_token.h"
7#include "core/hle/kernel/k_client_port.h"
8#include "core/hle/kernel/k_client_session.h"
9#include "core/hle/kernel/k_event.h"
10#include "core/hle/kernel/k_port.h"
11#include "core/hle/kernel/k_process.h"
12#include "core/hle/kernel/k_readable_event.h"
13#include "core/hle/kernel/k_resource_limit.h"
14#include "core/hle/kernel/k_server_port.h"
15#include "core/hle/kernel/k_server_session.h"
16#include "core/hle/kernel/k_session.h"
17#include "core/hle/kernel/k_shared_memory.h"
18#include "core/hle/kernel/k_synchronization_object.h"
19#include "core/hle/kernel/k_thread.h"
20#include "core/hle/kernel/k_transfer_memory.h"
21#include "core/hle/kernel/k_writable_event.h"
22
23namespace Kernel {
24
25// Ensure that we generate correct class tokens for all types.
26
27// Ensure that the absolute token values are correct.
28static_assert(ClassToken<KAutoObject> == 0b00000000'00000000);
29static_assert(ClassToken<KSynchronizationObject> == 0b00000000'00000001);
30static_assert(ClassToken<KReadableEvent> == 0b00000000'00000011);
31// static_assert(ClassToken<KInterruptEvent> == 0b00000111'00000011);
32// static_assert(ClassToken<KDebug> == 0b00001011'00000001);
33static_assert(ClassToken<KThread> == 0b00010011'00000001);
34static_assert(ClassToken<KServerPort> == 0b00100011'00000001);
35static_assert(ClassToken<KServerSession> == 0b01000011'00000001);
36static_assert(ClassToken<KClientPort> == 0b10000011'00000001);
37static_assert(ClassToken<KClientSession> == 0b00001101'00000000);
38static_assert(ClassToken<KProcess> == 0b00010101'00000001);
39static_assert(ClassToken<KResourceLimit> == 0b00100101'00000000);
40// static_assert(ClassToken<KLightSession> == 0b01000101'00000000);
41static_assert(ClassToken<KPort> == 0b10000101'00000000);
42static_assert(ClassToken<KSession> == 0b00011001'00000000);
43static_assert(ClassToken<KSharedMemory> == 0b00101001'00000000);
44static_assert(ClassToken<KEvent> == 0b01001001'00000000);
45static_assert(ClassToken<KWritableEvent> == 0b10001001'00000000);
46// static_assert(ClassToken<KLightClientSession> == 0b00110001'00000000);
47// static_assert(ClassToken<KLightServerSession> == 0b01010001'00000000);
48static_assert(ClassToken<KTransferMemory> == 0b10010001'00000000);
49// static_assert(ClassToken<KDeviceAddressSpace> == 0b01100001'00000000);
50// static_assert(ClassToken<KSessionRequest> == 0b10100001'00000000);
51// static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000);
52
53// Ensure that the token hierarchy is correct.
54
55// Base classes
56static_assert(ClassToken<KAutoObject> == (0b00000000));
57static_assert(ClassToken<KSynchronizationObject> == (0b00000001 | ClassToken<KAutoObject>));
58static_assert(ClassToken<KReadableEvent> == (0b00000010 | ClassToken<KSynchronizationObject>));
59
60// Final classes
61// static_assert(ClassToken<KInterruptEvent> == ((0b00000111 << 8) | ClassToken<KReadableEvent>));
62// static_assert(ClassToken<KDebug> == ((0b00001011 << 8) | ClassToken<KSynchronizationObject>));
63static_assert(ClassToken<KThread> == ((0b00010011 << 8) | ClassToken<KSynchronizationObject>));
64static_assert(ClassToken<KServerPort> == ((0b00100011 << 8) | ClassToken<KSynchronizationObject>));
65static_assert(ClassToken<KServerSession> ==
66 ((0b01000011 << 8) | ClassToken<KSynchronizationObject>));
67static_assert(ClassToken<KClientPort> == ((0b10000011 << 8) | ClassToken<KSynchronizationObject>));
68static_assert(ClassToken<KClientSession> == ((0b00001101 << 8) | ClassToken<KAutoObject>));
69static_assert(ClassToken<KProcess> == ((0b00010101 << 8) | ClassToken<KSynchronizationObject>));
70static_assert(ClassToken<KResourceLimit> == ((0b00100101 << 8) | ClassToken<KAutoObject>));
71// static_assert(ClassToken<KLightSession> == ((0b01000101 << 8) | ClassToken<KAutoObject>));
72static_assert(ClassToken<KPort> == ((0b10000101 << 8) | ClassToken<KAutoObject>));
73static_assert(ClassToken<KSession> == ((0b00011001 << 8) | ClassToken<KAutoObject>));
74static_assert(ClassToken<KSharedMemory> == ((0b00101001 << 8) | ClassToken<KAutoObject>));
75static_assert(ClassToken<KEvent> == ((0b01001001 << 8) | ClassToken<KAutoObject>));
76static_assert(ClassToken<KWritableEvent> == ((0b10001001 << 8) | ClassToken<KAutoObject>));
77// static_assert(ClassToken<KLightClientSession> == ((0b00110001 << 8) | ClassToken<KAutoObject>));
78// static_assert(ClassToken<KLightServerSession> == ((0b01010001 << 8) | ClassToken<KAutoObject>));
79static_assert(ClassToken<KTransferMemory> == ((0b10010001 << 8) | ClassToken<KAutoObject>));
80// static_assert(ClassToken<KDeviceAddressSpace> == ((0b01100001 << 8) | ClassToken<KAutoObject>));
81// static_assert(ClassToken<KSessionRequest> == ((0b10100001 << 8) | ClassToken<KAutoObject>));
82// static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>));
83
84// Ensure that the token hierarchy reflects the class hierarchy.
85
86// Base classes.
87static_assert(!std::is_final<KSynchronizationObject>::value &&
88 std::is_base_of<KAutoObject, KSynchronizationObject>::value);
89static_assert(!std::is_final<KReadableEvent>::value &&
90 std::is_base_of<KSynchronizationObject, KReadableEvent>::value);
91
92// Final classes
93// static_assert(std::is_final<KInterruptEvent>::value &&
94// std::is_base_of<KReadableEvent, KInterruptEvent>::value);
95// static_assert(std::is_final<KDebug>::value &&
96// std::is_base_of<KSynchronizationObject, KDebug>::value);
97static_assert(std::is_final<KThread>::value &&
98 std::is_base_of<KSynchronizationObject, KThread>::value);
99static_assert(std::is_final<KServerPort>::value &&
100 std::is_base_of<KSynchronizationObject, KServerPort>::value);
101static_assert(std::is_final<KServerSession>::value &&
102 std::is_base_of<KSynchronizationObject, KServerSession>::value);
103static_assert(std::is_final<KClientPort>::value &&
104 std::is_base_of<KSynchronizationObject, KClientPort>::value);
105static_assert(std::is_final<KClientSession>::value &&
106 std::is_base_of<KAutoObject, KClientSession>::value);
107static_assert(std::is_final<KProcess>::value &&
108 std::is_base_of<KSynchronizationObject, KProcess>::value);
109static_assert(std::is_final<KResourceLimit>::value &&
110 std::is_base_of<KAutoObject, KResourceLimit>::value);
111// static_assert(std::is_final<KLightSession>::value &&
112// std::is_base_of<KAutoObject, KLightSession>::value);
113static_assert(std::is_final<KPort>::value && std::is_base_of<KAutoObject, KPort>::value);
114static_assert(std::is_final<KSession>::value && std::is_base_of<KAutoObject, KSession>::value);
115static_assert(std::is_final<KSharedMemory>::value &&
116 std::is_base_of<KAutoObject, KSharedMemory>::value);
117static_assert(std::is_final<KEvent>::value && std::is_base_of<KAutoObject, KEvent>::value);
118static_assert(std::is_final<KWritableEvent>::value &&
119 std::is_base_of<KAutoObject, KWritableEvent>::value);
120// static_assert(std::is_final<KLightClientSession>::value &&
121// std::is_base_of<KAutoObject, KLightClientSession>::value);
122// static_assert(std::is_final<KLightServerSession>::value &&
123// std::is_base_of<KAutoObject, KLightServerSession>::value);
124static_assert(std::is_final<KTransferMemory>::value &&
125 std::is_base_of<KAutoObject, KTransferMemory>::value);
126// static_assert(std::is_final<KDeviceAddressSpace>::value &&
127// std::is_base_of<KAutoObject, KDeviceAddressSpace>::value);
128// static_assert(std::is_final<KSessionRequest>::value &&
129// std::is_base_of<KAutoObject, KSessionRequest>::value);
130// static_assert(std::is_final<KCodeMemory>::value &&
131// std::is_base_of<KAutoObject, KCodeMemory>::value);
132
133} // namespace Kernel
diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h
new file mode 100644
index 000000000..c28db49ec
--- /dev/null
+++ b/src/core/hle/kernel/k_class_token.h
@@ -0,0 +1,131 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8
9#include "common/assert.h"
10#include "common/bit_util.h"
11#include "common/common_types.h"
12
13namespace Kernel {
14
15class KAutoObject;
16
17class KClassTokenGenerator {
18public:
19 using TokenBaseType = u16;
20
21public:
22 static constexpr size_t BaseClassBits = 8;
23 static constexpr size_t FinalClassBits = (sizeof(TokenBaseType) * CHAR_BIT) - BaseClassBits;
24 // One bit per base class.
25 static constexpr size_t NumBaseClasses = BaseClassBits;
26 // Final classes are permutations of three bits.
27 static constexpr size_t NumFinalClasses = [] {
28 TokenBaseType index = 0;
29 for (size_t i = 0; i < FinalClassBits; i++) {
30 for (size_t j = i + 1; j < FinalClassBits; j++) {
31 for (size_t k = j + 1; k < FinalClassBits; k++) {
32 index++;
33 }
34 }
35 }
36 return index;
37 }();
38
39private:
40 template <TokenBaseType Index>
41 static constexpr inline TokenBaseType BaseClassToken = 1U << Index;
42
43 template <TokenBaseType Index>
44 static constexpr inline TokenBaseType FinalClassToken = [] {
45 TokenBaseType index = 0;
46 for (size_t i = 0; i < FinalClassBits; i++) {
47 for (size_t j = i + 1; j < FinalClassBits; j++) {
48 for (size_t k = j + 1; k < FinalClassBits; k++) {
49 if ((index++) == Index) {
50 return static_cast<TokenBaseType>(((1ULL << i) | (1ULL << j) | (1ULL << k))
51 << BaseClassBits);
52 }
53 }
54 }
55 }
56 }();
57
58 template <typename T>
59 static constexpr inline TokenBaseType GetClassToken() {
60 static_assert(std::is_base_of<KAutoObject, T>::value);
61 if constexpr (std::is_same<T, KAutoObject>::value) {
62 static_assert(T::ObjectType == ObjectType::KAutoObject);
63 return 0;
64 } else if constexpr (!std::is_final<T>::value) {
65 static_assert(ObjectType::BaseClassesStart <= T::ObjectType &&
66 T::ObjectType < ObjectType::BaseClassesEnd);
67 constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) -
68 static_cast<TokenBaseType>(ObjectType::BaseClassesStart);
69 return BaseClassToken<ClassIndex> | GetClassToken<typename T::BaseClass>();
70 } else if constexpr (ObjectType::FinalClassesStart <= T::ObjectType &&
71 T::ObjectType < ObjectType::FinalClassesEnd) {
72 constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) -
73 static_cast<TokenBaseType>(ObjectType::FinalClassesStart);
74 return FinalClassToken<ClassIndex> | GetClassToken<typename T::BaseClass>();
75 } else {
76 static_assert(!std::is_same<T, T>::value, "GetClassToken: Invalid Type");
77 }
78 };
79
80public:
81 enum class ObjectType {
82 KAutoObject,
83
84 BaseClassesStart,
85
86 KSynchronizationObject = BaseClassesStart,
87 KReadableEvent,
88
89 BaseClassesEnd,
90
91 FinalClassesStart = BaseClassesEnd,
92
93 KInterruptEvent = FinalClassesStart,
94 KDebug,
95 KThread,
96 KServerPort,
97 KServerSession,
98 KClientPort,
99 KClientSession,
100 KProcess,
101 KResourceLimit,
102 KLightSession,
103 KPort,
104 KSession,
105 KSharedMemory,
106 KEvent,
107 KWritableEvent,
108 KLightClientSession,
109 KLightServerSession,
110 KTransferMemory,
111 KDeviceAddressSpace,
112 KSessionRequest,
113 KCodeMemory,
114
115 // NOTE: True order for these has not been determined yet.
116 KAlpha,
117 KBeta,
118
119 FinalClassesEnd = FinalClassesStart + NumFinalClasses,
120 };
121
122 template <typename T>
123 static constexpr inline TokenBaseType ClassToken = GetClassToken<T>();
124};
125
126using ClassTokenType = KClassTokenGenerator::TokenBaseType;
127
128template <typename T>
129static constexpr inline ClassTokenType ClassToken = KClassTokenGenerator::ClassToken<T>;
130
131} // namespace Kernel
diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp
new file mode 100644
index 000000000..b6f1d713f
--- /dev/null
+++ b/src/core/hle/kernel/k_client_port.cpp
@@ -0,0 +1,125 @@
1// Copyright 2021 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/scope_exit.h"
6#include "core/hle/kernel/hle_ipc.h"
7#include "core/hle/kernel/k_client_port.h"
8#include "core/hle/kernel/k_port.h"
9#include "core/hle/kernel/k_scheduler.h"
10#include "core/hle/kernel/k_scoped_resource_reservation.h"
11#include "core/hle/kernel/k_session.h"
12#include "core/hle/kernel/svc_results.h"
13
14namespace Kernel {
15
16KClientPort::KClientPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
17KClientPort::~KClientPort() = default;
18
19void KClientPort::Initialize(KPort* parent_, s32 max_sessions_, std::string&& name_) {
20 // Set member variables.
21 num_sessions = 0;
22 peak_sessions = 0;
23 parent = parent_;
24 max_sessions = max_sessions_;
25 name = std::move(name_);
26}
27
28void KClientPort::OnSessionFinalized() {
29 KScopedSchedulerLock sl{kernel};
30
31 const auto prev = num_sessions--;
32 if (prev == max_sessions) {
33 this->NotifyAvailable();
34 }
35}
36
37void KClientPort::OnServerClosed() {}
38
39bool KClientPort::IsLight() const {
40 return this->GetParent()->IsLight();
41}
42
43bool KClientPort::IsServerClosed() const {
44 return this->GetParent()->IsServerClosed();
45}
46
47void KClientPort::Destroy() {
48 // Note with our parent that we're closed.
49 parent->OnClientClosed();
50
51 // Close our reference to our parent.
52 parent->Close();
53}
54
55bool KClientPort::IsSignaled() const {
56 return num_sessions < max_sessions;
57}
58
59ResultCode KClientPort::CreateSession(KClientSession** out) {
60 // Reserve a new session from the resource limit.
61 KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(),
62 LimitableResource::Sessions);
63 R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
64
65 // Update the session counts.
66 {
67 // Atomically increment the number of sessions.
68 s32 new_sessions;
69 {
70 const auto max = max_sessions;
71 auto cur_sessions = num_sessions.load(std::memory_order_acquire);
72 do {
73 R_UNLESS(cur_sessions < max, ResultOutOfSessions);
74 new_sessions = cur_sessions + 1;
75 } while (!num_sessions.compare_exchange_weak(cur_sessions, new_sessions,
76 std::memory_order_relaxed));
77 }
78
79 // Atomically update the peak session tracking.
80 {
81 auto peak = peak_sessions.load(std::memory_order_acquire);
82 do {
83 if (peak >= new_sessions) {
84 break;
85 }
86 } while (!peak_sessions.compare_exchange_weak(peak, new_sessions,
87 std::memory_order_relaxed));
88 }
89 }
90
91 // Create a new session.
92 KSession* session = KSession::Create(kernel);
93 if (session == nullptr) {
94 /* Decrement the session count. */
95 const auto prev = num_sessions--;
96 if (prev == max_sessions) {
97 this->NotifyAvailable();
98 }
99
100 return ResultOutOfResource;
101 }
102
103 // Initialize the session.
104 session->Initialize(this, parent->GetName());
105
106 // Commit the session reservation.
107 session_reservation.Commit();
108
109 // Register the session.
110 KSession::Register(kernel, session);
111 auto session_guard = SCOPE_GUARD({
112 session->GetClientSession().Close();
113 session->GetServerSession().Close();
114 });
115
116 // Enqueue the session with our parent.
117 R_TRY(parent->EnqueueSession(std::addressof(session->GetServerSession())));
118
119 // We succeeded, so set the output.
120 session_guard.Cancel();
121 *out = std::addressof(session->GetClientSession());
122 return RESULT_SUCCESS;
123}
124
125} // namespace Kernel
diff --git a/src/core/hle/kernel/k_client_port.h b/src/core/hle/kernel/k_client_port.h
new file mode 100644
index 000000000..ec1d7e12e
--- /dev/null
+++ b/src/core/hle/kernel/k_client_port.h
@@ -0,0 +1,61 @@
1// Copyright 2016 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <string>
9
10#include "common/common_types.h"
11#include "core/hle/kernel/k_synchronization_object.h"
12#include "core/hle/result.h"
13
14namespace Kernel {
15
16class KClientSession;
17class KernelCore;
18class KPort;
19
20class KClientPort final : public KSynchronizationObject {
21 KERNEL_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject);
22
23public:
24 explicit KClientPort(KernelCore& kernel);
25 virtual ~KClientPort() override;
26
27 void Initialize(KPort* parent_, s32 max_sessions_, std::string&& name_);
28 void OnSessionFinalized();
29 void OnServerClosed();
30
31 const KPort* GetParent() const {
32 return parent;
33 }
34
35 s32 GetNumSessions() const {
36 return num_sessions;
37 }
38 s32 GetPeakSessions() const {
39 return peak_sessions;
40 }
41 s32 GetMaxSessions() const {
42 return max_sessions;
43 }
44
45 bool IsLight() const;
46 bool IsServerClosed() const;
47
48 // Overridden virtual functions.
49 virtual void Destroy() override;
50 virtual bool IsSignaled() const override;
51
52 ResultCode CreateSession(KClientSession** out);
53
54private:
55 std::atomic<s32> num_sessions{};
56 std::atomic<s32> peak_sessions{};
57 s32 max_sessions{};
58 KPort* parent{};
59};
60
61} // namespace Kernel
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
new file mode 100644
index 000000000..0618dc246
--- /dev/null
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -0,0 +1,31 @@
1// Copyright 2021 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/hle_ipc.h"
6#include "core/hle/kernel/k_client_session.h"
7#include "core/hle/kernel/k_server_session.h"
8#include "core/hle/kernel/k_session.h"
9#include "core/hle/kernel/k_thread.h"
10#include "core/hle/kernel/svc_results.h"
11#include "core/hle/result.h"
12
13namespace Kernel {
14
15KClientSession::KClientSession(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
16KClientSession::~KClientSession() = default;
17
18void KClientSession::Destroy() {
19 parent->OnClientClosed();
20 parent->Close();
21}
22
23void KClientSession::OnServerClosed() {}
24
25ResultCode KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
26 Core::Timing::CoreTiming& core_timing) {
27 // Signal the server session that new data is available
28 return parent->GetServerSession().HandleSyncRequest(thread, memory, core_timing);
29}
30
31} // namespace Kernel
diff --git a/src/core/hle/kernel/k_client_session.h b/src/core/hle/kernel/k_client_session.h
new file mode 100644
index 000000000..6476a588b
--- /dev/null
+++ b/src/core/hle/kernel/k_client_session.h
@@ -0,0 +1,61 @@
1// Copyright 2021 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <string>
9
10#include "core/hle/kernel/k_auto_object.h"
11#include "core/hle/kernel/k_synchronization_object.h"
12#include "core/hle/kernel/slab_helpers.h"
13#include "core/hle/result.h"
14
15union ResultCode;
16
17namespace Core::Memory {
18class Memory;
19}
20
21namespace Core::Timing {
22class CoreTiming;
23}
24
25namespace Kernel {
26
27class KernelCore;
28class KSession;
29class KThread;
30
31class KClientSession final
32 : public KAutoObjectWithSlabHeapAndContainer<KClientSession, KAutoObjectWithList> {
33 KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
34
35public:
36 explicit KClientSession(KernelCore& kernel);
37 virtual ~KClientSession();
38
39 void Initialize(KSession* parent_, std::string&& name_) {
40 // Set member variables.
41 parent = parent_;
42 name = std::move(name_);
43 }
44
45 virtual void Destroy() override;
46 static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
47
48 KSession* GetParent() const {
49 return parent;
50 }
51
52 ResultCode SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
53 Core::Timing::CoreTiming& core_timing);
54
55 void OnServerClosed();
56
57private:
58 KSession* parent{};
59};
60
61} // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 170d8fa0d..f51cf3e7b 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -7,12 +7,13 @@
7#include "core/arm/exclusive_monitor.h" 7#include "core/arm/exclusive_monitor.h"
8#include "core/core.h" 8#include "core/core.h"
9#include "core/hle/kernel/k_condition_variable.h" 9#include "core/hle/kernel/k_condition_variable.h"
10#include "core/hle/kernel/k_linked_list.h"
11#include "core/hle/kernel/k_process.h"
10#include "core/hle/kernel/k_scheduler.h" 12#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 13#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
12#include "core/hle/kernel/k_synchronization_object.h" 14#include "core/hle/kernel/k_synchronization_object.h"
13#include "core/hle/kernel/k_thread.h" 15#include "core/hle/kernel/k_thread.h"
14#include "core/hle/kernel/kernel.h" 16#include "core/hle/kernel/kernel.h"
15#include "core/hle/kernel/process.h"
16#include "core/hle/kernel/svc_common.h" 17#include "core/hle/kernel/svc_common.h"
17#include "core/hle/kernel/svc_results.h" 18#include "core/hle/kernel/svc_results.h"
18#include "core/memory.h" 19#include "core/memory.h"
@@ -107,8 +108,8 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
107 108
108 // Wait for the address. 109 // Wait for the address.
109 { 110 {
110 std::shared_ptr<KThread> owner_thread; 111 KScopedAutoObject<KThread> owner_thread;
111 ASSERT(!owner_thread); 112 ASSERT(owner_thread.IsNull());
112 { 113 {
113 KScopedSchedulerLock sl(kernel); 114 KScopedSchedulerLock sl(kernel);
114 cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); 115 cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
@@ -126,8 +127,10 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
126 R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS); 127 R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS);
127 128
128 // Get the lock owner thread. 129 // Get the lock owner thread.
129 owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(handle); 130 owner_thread =
130 R_UNLESS(owner_thread, ResultInvalidHandle); 131 kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>(
132 handle);
133 R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle);
131 134
132 // Update the lock. 135 // Update the lock.
133 cur_thread->SetAddressKey(addr, value); 136 cur_thread->SetAddressKey(addr, value);
@@ -137,7 +140,7 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
137 cur_thread->SetMutexWaitAddressForDebugging(addr); 140 cur_thread->SetMutexWaitAddressForDebugging(addr);
138 } 141 }
139 } 142 }
140 ASSERT(owner_thread); 143 ASSERT(owner_thread.IsNotNull());
141 } 144 }
142 145
143 // Remove the thread as a waiter from the lock owner. 146 // Remove the thread as a waiter from the lock owner.
@@ -176,19 +179,22 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
176 179
177 KThread* thread_to_close = nullptr; 180 KThread* thread_to_close = nullptr;
178 if (can_access) { 181 if (can_access) {
179 if (prev_tag == InvalidHandle) { 182 if (prev_tag == Svc::InvalidHandle) {
180 // If nobody held the lock previously, we're all good. 183 // If nobody held the lock previously, we're all good.
181 thread->SetSyncedObject(nullptr, RESULT_SUCCESS); 184 thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
182 thread->Wakeup(); 185 thread->Wakeup();
183 } else { 186 } else {
184 // Get the previous owner. 187 // Get the previous owner.
185 auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>( 188 KThread* owner_thread = kernel.CurrentProcess()
186 prev_tag & ~Svc::HandleWaitMask); 189 ->GetHandleTable()
190 .GetObjectWithoutPseudoHandle<KThread>(
191 static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
192 .ReleasePointerUnsafe();
187 193
188 if (owner_thread) { 194 if (owner_thread) {
189 // Add the thread as a waiter on the owner. 195 // Add the thread as a waiter on the owner.
190 owner_thread->AddWaiter(thread); 196 owner_thread->AddWaiter(thread);
191 thread_to_close = owner_thread.get(); 197 thread_to_close = owner_thread;
192 } else { 198 } else {
193 // The lock was tagged with a thread that doesn't exist. 199 // The lock was tagged with a thread that doesn't exist.
194 thread->SetSyncedObject(nullptr, ResultInvalidState); 200 thread->SetSyncedObject(nullptr, ResultInvalidState);
@@ -208,9 +214,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
208 // Prepare for signaling. 214 // Prepare for signaling.
209 constexpr int MaxThreads = 16; 215 constexpr int MaxThreads = 16;
210 216
211 // TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using 217 KLinkedList<KThread> thread_list{kernel};
212 // std::shared_ptr.
213 std::vector<std::shared_ptr<KThread>> thread_list;
214 std::array<KThread*, MaxThreads> thread_array; 218 std::array<KThread*, MaxThreads> thread_array;
215 s32 num_to_close{}; 219 s32 num_to_close{};
216 220
@@ -228,7 +232,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
228 if (num_to_close < MaxThreads) { 232 if (num_to_close < MaxThreads) {
229 thread_array[num_to_close++] = thread; 233 thread_array[num_to_close++] = thread;
230 } else { 234 } else {
231 thread_list.push_back(SharedFrom(thread)); 235 thread_list.push_back(*thread);
232 } 236 }
233 } 237 }
234 238
@@ -250,8 +254,9 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
250 } 254 }
251 255
252 // Close threads in the list. 256 // Close threads in the list.
253 for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) { 257 for (auto it = thread_list.begin(); it != thread_list.end();
254 (*it)->Close(); 258 it = thread_list.erase(kernel, it)) {
259 (*it).Close();
255 } 260 }
256} 261}
257 262
diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp
index bb2fa4ad5..986355b78 100644
--- a/src/core/hle/kernel/k_event.cpp
+++ b/src/core/hle/kernel/k_event.cpp
@@ -3,30 +3,53 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "core/hle/kernel/k_event.h" 5#include "core/hle/kernel/k_event.h"
6#include "core/hle/kernel/k_readable_event.h" 6#include "core/hle/kernel/k_process.h"
7#include "core/hle/kernel/k_writable_event.h" 7#include "core/hle/kernel/k_resource_limit.h"
8 8
9namespace Kernel { 9namespace Kernel {
10 10
11KEvent::KEvent(KernelCore& kernel, std::string&& name) : Object{kernel, std::move(name)} {} 11KEvent::KEvent(KernelCore& kernel)
12 : KAutoObjectWithSlabHeapAndContainer{kernel}, readable_event{kernel}, writable_event{kernel} {}
12 13
13KEvent::~KEvent() = default; 14KEvent::~KEvent() = default;
14 15
15std::shared_ptr<KEvent> KEvent::Create(KernelCore& kernel, std::string&& name) { 16void KEvent::Initialize(std::string&& name_) {
16 return std::make_shared<KEvent>(kernel, std::move(name)); 17 // Increment reference count.
17} 18 // Because reference count is one on creation, this will result
19 // in a reference count of two. Thus, when both readable and
20 // writable events are closed this object will be destroyed.
21 Open();
18 22
19void KEvent::Initialize() {
20 // Create our sub events. 23 // Create our sub events.
21 readable_event = std::make_shared<KReadableEvent>(kernel, GetName() + ":Readable"); 24 KAutoObject::Create(std::addressof(readable_event));
22 writable_event = std::make_shared<KWritableEvent>(kernel, GetName() + ":Writable"); 25 KAutoObject::Create(std::addressof(writable_event));
23 26
24 // Initialize our sub sessions. 27 // Initialize our sub sessions.
25 readable_event->Initialize(this); 28 readable_event.Initialize(this, name_ + ":Readable");
26 writable_event->Initialize(this); 29 writable_event.Initialize(this, name_ + ":Writable");
30
31 // Set our owner process.
32 owner = kernel.CurrentProcess();
33 if (owner) {
34 owner->Open();
35 }
27 36
28 // Mark initialized. 37 // Mark initialized.
38 name = std::move(name_);
29 initialized = true; 39 initialized = true;
30} 40}
31 41
42void KEvent::Finalize() {
43 KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList>::Finalize();
44}
45
46void KEvent::PostDestroy(uintptr_t arg) {
47 // Release the event count resource the owner process holds.
48 KProcess* owner = reinterpret_cast<KProcess*>(arg);
49 if (owner) {
50 owner->GetResourceLimit()->Release(LimitableResource::Events, 1);
51 owner->Close();
52 }
53}
54
32} // namespace Kernel 55} // namespace Kernel
diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h
index 2fb887129..4ca869930 100644
--- a/src/core/hle/kernel/k_event.h
+++ b/src/core/hle/kernel/k_event.h
@@ -4,53 +4,54 @@
4 4
5#pragma once 5#pragma once
6 6
7#include "core/hle/kernel/object.h" 7#include "core/hle/kernel/k_readable_event.h"
8#include "core/hle/kernel/k_writable_event.h"
9#include "core/hle/kernel/slab_helpers.h"
8 10
9namespace Kernel { 11namespace Kernel {
10 12
11class KernelCore; 13class KernelCore;
12class KReadableEvent; 14class KReadableEvent;
13class KWritableEvent; 15class KWritableEvent;
16class KProcess;
14 17
15class KEvent final : public Object { 18class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList> {
16public: 19 KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
17 explicit KEvent(KernelCore& kernel, std::string&& name);
18 ~KEvent() override;
19 20
20 static std::shared_ptr<KEvent> Create(KernelCore& kernel, std::string&& name); 21public:
22 explicit KEvent(KernelCore& kernel);
23 virtual ~KEvent();
21 24
22 void Initialize(); 25 void Initialize(std::string&& name);
23 26
24 void Finalize() override {} 27 virtual void Finalize() override;
25 28
26 std::string GetTypeName() const override { 29 virtual bool IsInitialized() const override {
27 return "KEvent"; 30 return initialized;
28 } 31 }
29 32
30 static constexpr HandleType HANDLE_TYPE = HandleType::Event; 33 virtual uintptr_t GetPostDestroyArgument() const override {
31 HandleType GetHandleType() const override { 34 return reinterpret_cast<uintptr_t>(owner);
32 return HANDLE_TYPE;
33 } 35 }
34 36
35 std::shared_ptr<KReadableEvent>& GetReadableEvent() { 37 static void PostDestroy(uintptr_t arg);
36 return readable_event;
37 }
38 38
39 std::shared_ptr<KWritableEvent>& GetWritableEvent() { 39 virtual KProcess* GetOwner() const override {
40 return writable_event; 40 return owner;
41 } 41 }
42 42
43 const std::shared_ptr<KReadableEvent>& GetReadableEvent() const { 43 KReadableEvent& GetReadableEvent() {
44 return readable_event; 44 return readable_event;
45 } 45 }
46 46
47 const std::shared_ptr<KWritableEvent>& GetWritableEvent() const { 47 KWritableEvent& GetWritableEvent() {
48 return writable_event; 48 return writable_event;
49 } 49 }
50 50
51private: 51private:
52 std::shared_ptr<KReadableEvent> readable_event; 52 KReadableEvent readable_event;
53 std::shared_ptr<KWritableEvent> writable_event; 53 KWritableEvent writable_event;
54 KProcess* owner{};
54 bool initialized{}; 55 bool initialized{};
55}; 56};
56 57
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp
new file mode 100644
index 000000000..0378447f6
--- /dev/null
+++ b/src/core/hle/kernel/k_handle_table.cpp
@@ -0,0 +1,135 @@
1// Copyright 2021 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/k_handle_table.h"
6
7namespace Kernel {
8
9KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {}
10KHandleTable ::~KHandleTable() = default;
11
12ResultCode KHandleTable::Finalize() {
13 // Get the table and clear our record of it.
14 u16 saved_table_size = 0;
15 {
16 KScopedSpinLock lk(m_lock);
17
18 std::swap(m_table_size, saved_table_size);
19 }
20
21 // Close and free all entries.
22 for (size_t i = 0; i < saved_table_size; i++) {
23 if (KAutoObject* obj = m_objects[i]; obj != nullptr) {
24 obj->Close();
25 }
26 }
27
28 return RESULT_SUCCESS;
29}
30
31bool KHandleTable::Remove(Handle handle) {
32 // Don't allow removal of a pseudo-handle.
33 if (Svc::IsPseudoHandle(handle)) {
34 return false;
35 }
36
37 // Handles must not have reserved bits set.
38 const auto handle_pack = HandlePack(handle);
39 if (handle_pack.reserved != 0) {
40 return false;
41 }
42
43 // Find the object and free the entry.
44 KAutoObject* obj = nullptr;
45 {
46 KScopedSpinLock lk(m_lock);
47
48 if (this->IsValidHandle(handle)) {
49 const auto index = handle_pack.index;
50
51 obj = m_objects[index];
52 this->FreeEntry(index);
53 } else {
54 return false;
55 }
56 }
57
58 // Close the object.
59 obj->Close();
60 return true;
61}
62
63ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
64 KScopedSpinLock lk(m_lock);
65
66 // Never exceed our capacity.
67 R_UNLESS(m_count < m_table_size, ResultOutOfHandles);
68
69 // Allocate entry, set output handle.
70 {
71 const auto linear_id = this->AllocateLinearId();
72 const auto index = this->AllocateEntry();
73
74 m_entry_infos[index].info = {.linear_id = linear_id, .type = type};
75 m_objects[index] = obj;
76
77 obj->Open();
78
79 *out_handle = EncodeHandle(static_cast<u16>(index), linear_id);
80 }
81
82 return RESULT_SUCCESS;
83}
84
85ResultCode KHandleTable::Reserve(Handle* out_handle) {
86 KScopedSpinLock lk(m_lock);
87
88 // Never exceed our capacity.
89 R_UNLESS(m_count < m_table_size, ResultOutOfHandles);
90
91 *out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId());
92 return RESULT_SUCCESS;
93}
94
95void KHandleTable::Unreserve(Handle handle) {
96 KScopedSpinLock lk(m_lock);
97
98 // Unpack the handle.
99 const auto handle_pack = HandlePack(handle);
100 const auto index = handle_pack.index;
101 const auto linear_id = handle_pack.linear_id;
102 const auto reserved = handle_pack.reserved;
103 ASSERT(reserved == 0);
104 ASSERT(linear_id != 0);
105
106 if (index < m_table_size) {
107 // NOTE: This code does not check the linear id.
108 ASSERT(m_objects[index] == nullptr);
109 this->FreeEntry(index);
110 }
111}
112
113void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
114 KScopedSpinLock lk(m_lock);
115
116 // Unpack the handle.
117 const auto handle_pack = HandlePack(handle);
118 const auto index = handle_pack.index;
119 const auto linear_id = handle_pack.linear_id;
120 const auto reserved = handle_pack.reserved;
121 ASSERT(reserved == 0);
122 ASSERT(linear_id != 0);
123
124 if (index < m_table_size) {
125 // Set the entry.
126 ASSERT(m_objects[index] == nullptr);
127
128 m_entry_infos[index].info = {.linear_id = static_cast<u16>(linear_id), .type = type};
129 m_objects[index] = obj;
130
131 obj->Open();
132 }
133}
134
135} // namespace Kernel
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h
new file mode 100644
index 000000000..ba9dd061d
--- /dev/null
+++ b/src/core/hle/kernel/k_handle_table.h
@@ -0,0 +1,310 @@
1// Copyright 2021 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8
9#include "common/assert.h"
10#include "common/bit_field.h"
11#include "common/bit_util.h"
12#include "common/common_types.h"
13#include "core/hle/kernel/k_auto_object.h"
14#include "core/hle/kernel/k_spin_lock.h"
15#include "core/hle/kernel/k_thread.h"
16#include "core/hle/kernel/kernel.h"
17#include "core/hle/kernel/svc_common.h"
18#include "core/hle/kernel/svc_results.h"
19#include "core/hle/result.h"
20
21namespace Kernel {
22
23class KernelCore;
24
25class KHandleTable {
26 YUZU_NON_COPYABLE(KHandleTable);
27 YUZU_NON_MOVEABLE(KHandleTable);
28
29public:
30 static constexpr size_t MaxTableSize = 1024;
31
32public:
33 explicit KHandleTable(KernelCore& kernel_);
34 ~KHandleTable();
35
36 ResultCode Initialize(s32 size) {
37 R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory);
38
39 // Initialize all fields.
40 m_max_count = 0;
41 m_table_size = static_cast<u16>((size <= 0) ? MaxTableSize : size);
42 m_next_linear_id = MinLinearId;
43 m_count = 0;
44 m_free_head_index = -1;
45
46 // Free all entries.
47 for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
48 m_objects[i] = nullptr;
49 m_entry_infos[i].next_free_index = i - 1;
50 m_free_head_index = i;
51 }
52
53 return RESULT_SUCCESS;
54 }
55
56 size_t GetTableSize() const {
57 return m_table_size;
58 }
59 size_t GetCount() const {
60 return m_count;
61 }
62 size_t GetMaxCount() const {
63 return m_max_count;
64 }
65
66 ResultCode Finalize();
67 bool Remove(Handle handle);
68
69 template <typename T = KAutoObject>
70 KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
71 // Lock and look up in table.
72 KScopedSpinLock lk(m_lock);
73
74 if constexpr (std::is_same_v<T, KAutoObject>) {
75 return this->GetObjectImpl(handle);
76 } else {
77 if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) {
78 return obj->DynamicCast<T*>();
79 } else {
80 return nullptr;
81 }
82 }
83 }
84
85 template <typename T = KAutoObject>
86 KScopedAutoObject<T> GetObject(Handle handle) const {
87 // Handle pseudo-handles.
88 if constexpr (std::derived_from<KProcess, T>) {
89 if (handle == Svc::PseudoHandle::CurrentProcess) {
90 auto* const cur_process = kernel.CurrentProcess();
91 ASSERT(cur_process != nullptr);
92 return cur_process;
93 }
94 } else if constexpr (std::derived_from<KThread, T>) {
95 if (handle == Svc::PseudoHandle::CurrentThread) {
96 auto* const cur_thread = GetCurrentThreadPointer(kernel);
97 ASSERT(cur_thread != nullptr);
98 return cur_thread;
99 }
100 }
101
102 return this->template GetObjectWithoutPseudoHandle<T>(handle);
103 }
104
105 ResultCode Reserve(Handle* out_handle);
106 void Unreserve(Handle handle);
107
108 template <typename T>
109 ResultCode Add(Handle* out_handle, T* obj) {
110 static_assert(std::is_base_of_v<KAutoObject, T>);
111 return this->Add(out_handle, obj, obj->GetTypeObj().GetClassToken());
112 }
113
114 template <typename T>
115 void Register(Handle handle, T* obj) {
116 static_assert(std::is_base_of_v<KAutoObject, T>);
117 return this->Register(handle, obj, obj->GetTypeObj().GetClassToken());
118 }
119
120 template <typename T>
121 bool GetMultipleObjects(T** out, const Handle* handles, size_t num_handles) const {
122 // Try to convert and open all the handles.
123 size_t num_opened;
124 {
125 // Lock the table.
126 KScopedSpinLock lk(m_lock);
127 for (num_opened = 0; num_opened < num_handles; num_opened++) {
128 // Get the current handle.
129 const auto cur_handle = handles[num_opened];
130
131 // Get the object for the current handle.
132 KAutoObject* cur_object = this->GetObjectImpl(cur_handle);
133 if (cur_object == nullptr) {
134 break;
135 }
136
137 // Cast the current object to the desired type.
138 T* cur_t = cur_object->DynamicCast<T*>();
139 if (cur_t == nullptr) {
140 break;
141 }
142
143 // Open a reference to the current object.
144 cur_t->Open();
145 out[num_opened] = cur_t;
146 }
147 }
148
149 // If we converted every object, succeed.
150 if (num_opened == num_handles) {
151 return true;
152 }
153
154 // If we didn't convert entry object, close the ones we opened.
155 for (size_t i = 0; i < num_opened; i++) {
156 out[i]->Close();
157 }
158
159 return false;
160 }
161
162private:
163 ResultCode Add(Handle* out_handle, KAutoObject* obj, u16 type);
164 void Register(Handle handle, KAutoObject* obj, u16 type);
165
166 s32 AllocateEntry() {
167 ASSERT(m_count < m_table_size);
168
169 const auto index = m_free_head_index;
170
171 m_free_head_index = m_entry_infos[index].GetNextFreeIndex();
172
173 m_max_count = std::max(m_max_count, ++m_count);
174
175 return index;
176 }
177
178 void FreeEntry(s32 index) {
179 ASSERT(m_count > 0);
180
181 m_objects[index] = nullptr;
182 m_entry_infos[index].next_free_index = m_free_head_index;
183
184 m_free_head_index = index;
185
186 --m_count;
187 }
188
189 u16 AllocateLinearId() {
190 const u16 id = m_next_linear_id++;
191 if (m_next_linear_id > MaxLinearId) {
192 m_next_linear_id = MinLinearId;
193 }
194 return id;
195 }
196
197 bool IsValidHandle(Handle handle) const {
198 // Unpack the handle.
199 const auto handle_pack = HandlePack(handle);
200 const auto raw_value = handle_pack.raw;
201 const auto index = handle_pack.index;
202 const auto linear_id = handle_pack.linear_id;
203 const auto reserved = handle_pack.reserved;
204 ASSERT(reserved == 0);
205
206 // Validate our indexing information.
207 if (raw_value == 0) {
208 return false;
209 }
210 if (linear_id == 0) {
211 return false;
212 }
213 if (index >= m_table_size) {
214 return false;
215 }
216
217 // Check that there's an object, and our serial id is correct.
218 if (m_objects[index] == nullptr) {
219 return false;
220 }
221 if (m_entry_infos[index].GetLinearId() != linear_id) {
222 return false;
223 }
224
225 return true;
226 }
227
228 KAutoObject* GetObjectImpl(Handle handle) const {
229 // Handles must not have reserved bits set.
230 const auto handle_pack = HandlePack(handle);
231 if (handle_pack.reserved != 0) {
232 return nullptr;
233 }
234
235 if (this->IsValidHandle(handle)) {
236 return m_objects[handle_pack.index];
237 } else {
238 return nullptr;
239 }
240 }
241
242 KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const {
243
244 // Index must be in bounds.
245 if (index >= m_table_size) {
246 return nullptr;
247 }
248
249 // Ensure entry has an object.
250 if (KAutoObject* obj = m_objects[index]; obj != nullptr) {
251 *out_handle = EncodeHandle(static_cast<u16>(index), m_entry_infos[index].GetLinearId());
252 return obj;
253 } else {
254 return nullptr;
255 }
256 }
257
258private:
259 union HandlePack {
260 HandlePack() = default;
261 HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {}
262
263 u32 raw;
264 BitField<0, 15, u32> index;
265 BitField<15, 15, u32> linear_id;
266 BitField<30, 2, u32> reserved;
267 };
268
269 static constexpr u16 MinLinearId = 1;
270 static constexpr u16 MaxLinearId = 0x7FFF;
271
272 static constexpr Handle EncodeHandle(u16 index, u16 linear_id) {
273 HandlePack handle{};
274 handle.index.Assign(index);
275 handle.linear_id.Assign(linear_id);
276 handle.reserved.Assign(0);
277 return handle.raw;
278 }
279
280 union EntryInfo {
281 struct {
282 u16 linear_id;
283 u16 type;
284 } info;
285 s32 next_free_index;
286
287 constexpr u16 GetLinearId() const {
288 return info.linear_id;
289 }
290 constexpr u16 GetType() const {
291 return info.type;
292 }
293 constexpr s32 GetNextFreeIndex() const {
294 return next_free_index;
295 }
296 };
297
298private:
299 std::array<EntryInfo, MaxTableSize> m_entry_infos{};
300 std::array<KAutoObject*, MaxTableSize> m_objects{};
301 s32 m_free_head_index{-1};
302 u16 m_table_size{};
303 u16 m_max_count{};
304 u16 m_next_linear_id{MinLinearId};
305 u16 m_count{};
306 mutable KSpinLock m_lock;
307 KernelCore& kernel;
308};
309
310} // namespace Kernel
diff --git a/src/core/hle/kernel/k_linked_list.h b/src/core/hle/kernel/k_linked_list.h
new file mode 100644
index 000000000..500f44685
--- /dev/null
+++ b/src/core/hle/kernel/k_linked_list.h
@@ -0,0 +1,238 @@
1// Copyright 2021 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <boost/intrusive/list.hpp>
8
9#include "common/assert.h"
10#include "core/hle/kernel/slab_helpers.h"
11
12namespace Kernel {
13
14class KernelCore;
15
16class KLinkedListNode : public boost::intrusive::list_base_hook<>,
17 public KSlabAllocated<KLinkedListNode> {
18
19public:
20 KLinkedListNode() = default;
21
22 void Initialize(void* it) {
23 m_item = it;
24 }
25
26 void* GetItem() const {
27 return m_item;
28 }
29
30private:
31 void* m_item = nullptr;
32};
33
34template <typename T>
35class KLinkedList : private boost::intrusive::list<KLinkedListNode> {
36private:
37 using BaseList = boost::intrusive::list<KLinkedListNode>;
38
39public:
40 template <bool Const>
41 class Iterator;
42
43 using value_type = T;
44 using size_type = size_t;
45 using difference_type = ptrdiff_t;
46 using pointer = value_type*;
47 using const_pointer = const value_type*;
48 using reference = value_type&;
49 using const_reference = const value_type&;
50 using iterator = Iterator<false>;
51 using const_iterator = Iterator<true>;
52 using reverse_iterator = std::reverse_iterator<iterator>;
53 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
54
55 template <bool Const>
56 class Iterator {
57 private:
58 using BaseIterator = BaseList::iterator;
59 friend class KLinkedList;
60
61 public:
62 using iterator_category = std::bidirectional_iterator_tag;
63 using value_type = typename KLinkedList::value_type;
64 using difference_type = typename KLinkedList::difference_type;
65 using pointer = std::conditional_t<Const, KLinkedList::const_pointer, KLinkedList::pointer>;
66 using reference =
67 std::conditional_t<Const, KLinkedList::const_reference, KLinkedList::reference>;
68
69 public:
70 explicit Iterator(BaseIterator it) : m_base_it(it) {}
71
72 pointer GetItem() const {
73 return static_cast<pointer>(m_base_it->GetItem());
74 }
75
76 bool operator==(const Iterator& rhs) const {
77 return m_base_it == rhs.m_base_it;
78 }
79
80 bool operator!=(const Iterator& rhs) const {
81 return !(*this == rhs);
82 }
83
84 pointer operator->() const {
85 return this->GetItem();
86 }
87
88 reference operator*() const {
89 return *this->GetItem();
90 }
91
92 Iterator& operator++() {
93 ++m_base_it;
94 return *this;
95 }
96
97 Iterator& operator--() {
98 --m_base_it;
99 return *this;
100 }
101
102 Iterator operator++(int) {
103 const Iterator it{*this};
104 ++(*this);
105 return it;
106 }
107
108 Iterator operator--(int) {
109 const Iterator it{*this};
110 --(*this);
111 return it;
112 }
113
114 operator Iterator<true>() const {
115 return Iterator<true>(m_base_it);
116 }
117
118 private:
119 BaseIterator m_base_it;
120 };
121
122public:
123 constexpr KLinkedList(KernelCore& kernel_) : BaseList(), kernel{kernel_} {}
124
125 ~KLinkedList() {
126 // Erase all elements.
127 for (auto it = this->begin(); it != this->end(); it = this->erase(kernel, it)) {
128 }
129
130 // Ensure we succeeded.
131 ASSERT(this->empty());
132 }
133
134 // Iterator accessors.
135 iterator begin() {
136 return iterator(BaseList::begin());
137 }
138
139 const_iterator begin() const {
140 return const_iterator(BaseList::begin());
141 }
142
143 iterator end() {
144 return iterator(BaseList::end());
145 }
146
147 const_iterator end() const {
148 return const_iterator(BaseList::end());
149 }
150
151 const_iterator cbegin() const {
152 return this->begin();
153 }
154
155 const_iterator cend() const {
156 return this->end();
157 }
158
159 reverse_iterator rbegin() {
160 return reverse_iterator(this->end());
161 }
162
163 const_reverse_iterator rbegin() const {
164 return const_reverse_iterator(this->end());
165 }
166
167 reverse_iterator rend() {
168 return reverse_iterator(this->begin());
169 }
170
171 const_reverse_iterator rend() const {
172 return const_reverse_iterator(this->begin());
173 }
174
175 const_reverse_iterator crbegin() const {
176 return this->rbegin();
177 }
178
179 const_reverse_iterator crend() const {
180 return this->rend();
181 }
182
183 // Content management.
184 using BaseList::empty;
185 using BaseList::size;
186
187 reference back() {
188 return *(--this->end());
189 }
190
191 const_reference back() const {
192 return *(--this->end());
193 }
194
195 reference front() {
196 return *this->begin();
197 }
198
199 const_reference front() const {
200 return *this->begin();
201 }
202
203 iterator insert(const_iterator pos, reference ref) {
204 KLinkedListNode* node = KLinkedListNode::Allocate(kernel);
205 ASSERT(node != nullptr);
206 node->Initialize(std::addressof(ref));
207 return iterator(BaseList::insert(pos.m_base_it, *node));
208 }
209
210 void push_back(reference ref) {
211 this->insert(this->end(), ref);
212 }
213
214 void push_front(reference ref) {
215 this->insert(this->begin(), ref);
216 }
217
218 void pop_back() {
219 this->erase(--this->end());
220 }
221
222 void pop_front() {
223 this->erase(this->begin());
224 }
225
226 iterator erase(KernelCore& kernel, const iterator pos) {
227 KLinkedListNode* freed_node = std::addressof(*pos.m_base_it);
228 iterator ret = iterator(BaseList::erase(pos.m_base_it));
229 KLinkedListNode::Free(kernel, freed_node);
230
231 return ret;
232 }
233
234private:
235 KernelCore& kernel;
236};
237
238} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index c5b9c5e85..a7fdb5fb8 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -134,6 +134,10 @@ enum class KMemoryPermission : u8 {
134}; 134};
135DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission); 135DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission);
136 136
137constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission perm) {
138 return static_cast<KMemoryPermission>(perm);
139}
140
137enum class KMemoryAttribute : u8 { 141enum class KMemoryAttribute : u8 {
138 None = 0x00, 142 None = 0x00,
139 Mask = 0x7F, 143 Mask = 0x7F,
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index d09d5ce48..d4ce98ee3 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -11,11 +11,11 @@
11#include "core/hle/kernel/k_memory_block_manager.h" 11#include "core/hle/kernel/k_memory_block_manager.h"
12#include "core/hle/kernel/k_page_linked_list.h" 12#include "core/hle/kernel/k_page_linked_list.h"
13#include "core/hle/kernel/k_page_table.h" 13#include "core/hle/kernel/k_page_table.h"
14#include "core/hle/kernel/k_process.h"
14#include "core/hle/kernel/k_resource_limit.h" 15#include "core/hle/kernel/k_resource_limit.h"
15#include "core/hle/kernel/k_scoped_resource_reservation.h" 16#include "core/hle/kernel/k_scoped_resource_reservation.h"
16#include "core/hle/kernel/k_system_control.h" 17#include "core/hle/kernel/k_system_control.h"
17#include "core/hle/kernel/kernel.h" 18#include "core/hle/kernel/kernel.h"
18#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/svc_results.h" 19#include "core/hle/kernel/svc_results.h"
20#include "core/memory.h" 20#include "core/memory.h"
21 21
@@ -420,7 +420,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
420 remaining_size); 420 remaining_size);
421 if (!memory_reservation.Succeeded()) { 421 if (!memory_reservation.Succeeded()) {
422 LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size); 422 LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size);
423 return ResultResourceLimitedExceeded; 423 return ResultLimitReached;
424 } 424 }
425 425
426 KPageLinkedList page_linked_list; 426 KPageLinkedList page_linked_list;
@@ -578,7 +578,7 @@ ResultCode KPageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) {
578 AddRegionToPages(dst_addr, num_pages, dst_pages); 578 AddRegionToPages(dst_addr, num_pages, dst_pages);
579 579
580 if (!dst_pages.IsEqual(src_pages)) { 580 if (!dst_pages.IsEqual(src_pages)) {
581 return ResultInvalidMemoryRange; 581 return ResultInvalidMemoryRegion;
582 } 582 }
583 583
584 { 584 {
@@ -641,6 +641,45 @@ ResultCode KPageTable::MapPages(VAddr addr, KPageLinkedList& page_linked_list, K
641 return RESULT_SUCCESS; 641 return RESULT_SUCCESS;
642} 642}
643 643
644ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
645 VAddr cur_addr{addr};
646
647 for (const auto& node : page_linked_list.Nodes()) {
648 const std::size_t num_pages{(addr - cur_addr) / PageSize};
649 if (const auto result{
650 Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)};
651 result.IsError()) {
652 return result;
653 }
654
655 cur_addr += node.GetNumPages() * PageSize;
656 }
657
658 return RESULT_SUCCESS;
659}
660
661ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
662 KMemoryState state) {
663 std::lock_guard lock{page_table_lock};
664
665 const std::size_t num_pages{page_linked_list.GetNumPages()};
666 const std::size_t size{num_pages * PageSize};
667
668 if (!CanContain(addr, size, state)) {
669 return ResultInvalidCurrentMemory;
670 }
671
672 if (IsRegionMapped(addr, num_pages * PageSize)) {
673 return ResultInvalidCurrentMemory;
674 }
675
676 CASCADE_CODE(UnmapPages(addr, page_linked_list));
677
678 block_manager->Update(addr, num_pages, state, KMemoryPermission::None);
679
680 return RESULT_SUCCESS;
681}
682
644ResultCode KPageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, 683ResultCode KPageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size,
645 KMemoryPermission perm) { 684 KMemoryPermission perm) {
646 685
@@ -790,7 +829,7 @@ ResultVal<VAddr> KPageTable::SetHeapSize(std::size_t size) {
790 829
791 if (!memory_reservation.Succeeded()) { 830 if (!memory_reservation.Succeeded()) {
792 LOG_ERROR(Kernel, "Could not reserve heap extension of size {:X} bytes", delta); 831 LOG_ERROR(Kernel, "Could not reserve heap extension of size {:X} bytes", delta);
793 return ResultResourceLimitedExceeded; 832 return ResultLimitReached;
794 } 833 }
795 834
796 KPageLinkedList page_linked_list; 835 KPageLinkedList page_linked_list;
@@ -1067,7 +1106,7 @@ constexpr std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
1067 } 1106 }
1068} 1107}
1069 1108
1070constexpr bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { 1109bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const {
1071 const VAddr end{addr + size}; 1110 const VAddr end{addr + size};
1072 const VAddr last{end - 1}; 1111 const VAddr last{end - 1};
1073 const VAddr region_start{GetRegionAddress(state)}; 1112 const VAddr region_start{GetRegionAddress(state)};
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 49b824379..8c2cc03eb 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -40,6 +40,7 @@ public:
40 ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size); 40 ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size);
41 ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state, 41 ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
42 KMemoryPermission perm); 42 KMemoryPermission perm);
43 ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
43 ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, KMemoryPermission perm); 44 ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, KMemoryPermission perm);
44 KMemoryInfo QueryInfo(VAddr addr); 45 KMemoryInfo QueryInfo(VAddr addr);
45 ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); 46 ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
@@ -63,6 +64,8 @@ public:
63 return page_table_impl; 64 return page_table_impl;
64 } 65 }
65 66
67 bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const;
68
66private: 69private:
67 enum class OperationType : u32 { 70 enum class OperationType : u32 {
68 Map, 71 Map,
@@ -79,6 +82,7 @@ private:
79 ResultCode InitializeMemoryLayout(VAddr start, VAddr end); 82 ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
80 ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list, 83 ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
81 KMemoryPermission perm); 84 KMemoryPermission perm);
85 ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
82 void MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end); 86 void MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end);
83 bool IsRegionMapped(VAddr address, u64 size); 87 bool IsRegionMapped(VAddr address, u64 size);
84 bool IsRegionContiguous(VAddr addr, u64 size) const; 88 bool IsRegionContiguous(VAddr addr, u64 size) const;
@@ -92,7 +96,6 @@ private:
92 OperationType operation, PAddr map_addr = 0); 96 OperationType operation, PAddr map_addr = 0);
93 constexpr VAddr GetRegionAddress(KMemoryState state) const; 97 constexpr VAddr GetRegionAddress(KMemoryState state) const;
94 constexpr std::size_t GetRegionSize(KMemoryState state) const; 98 constexpr std::size_t GetRegionSize(KMemoryState state) const;
95 constexpr bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const;
96 99
97 constexpr ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, 100 constexpr ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
98 KMemoryState state, KMemoryPermission perm_mask, 101 KMemoryState state, KMemoryPermission perm_mask,
@@ -216,8 +219,6 @@ public:
216 constexpr PAddr GetPhysicalAddr(VAddr addr) { 219 constexpr PAddr GetPhysicalAddr(VAddr addr) {
217 return page_table_impl.backing_addr[addr >> PageBits] + addr; 220 return page_table_impl.backing_addr[addr >> PageBits] + addr;
218 } 221 }
219
220private:
221 constexpr bool Contains(VAddr addr) const { 222 constexpr bool Contains(VAddr addr) const {
222 return address_space_start <= addr && addr <= address_space_end - 1; 223 return address_space_start <= addr && addr <= address_space_end - 1;
223 } 224 }
@@ -225,6 +226,8 @@ private:
225 return address_space_start <= addr && addr < addr + size && 226 return address_space_start <= addr && addr < addr + size &&
226 addr + size - 1 <= address_space_end - 1; 227 addr + size - 1 <= address_space_end - 1;
227 } 228 }
229
230private:
228 constexpr bool IsKernel() const { 231 constexpr bool IsKernel() const {
229 return is_kernel; 232 return is_kernel;
230 } 233 }
diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp
new file mode 100644
index 000000000..734aa2a8c
--- /dev/null
+++ b/src/core/hle/kernel/k_port.cpp
@@ -0,0 +1,68 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/hle_ipc.h"
6#include "core/hle/kernel/k_port.h"
7#include "core/hle/kernel/k_scheduler.h"
8#include "core/hle/kernel/svc_results.h"
9
10namespace Kernel {
11
12KPort::KPort(KernelCore& kernel)
13 : KAutoObjectWithSlabHeapAndContainer{kernel}, server{kernel}, client{kernel} {}
14
15KPort::~KPort() = default;
16
17void KPort::Initialize(s32 max_sessions_, bool is_light_, const std::string& name_) {
18 // Open a new reference count to the initialized port.
19 Open();
20
21 // Create and initialize our server/client pair.
22 KAutoObject::Create(std::addressof(server));
23 KAutoObject::Create(std::addressof(client));
24 server.Initialize(this, name_ + ":Server");
25 client.Initialize(this, max_sessions_, name_ + ":Client");
26
27 // Set our member variables.
28 is_light = is_light_;
29 name = name_;
30 state = State::Normal;
31}
32
33void KPort::OnClientClosed() {
34 KScopedSchedulerLock sl{kernel};
35
36 if (state == State::Normal) {
37 state = State::ClientClosed;
38 }
39}
40
41void KPort::OnServerClosed() {
42 KScopedSchedulerLock sl{kernel};
43
44 if (state == State::Normal) {
45 state = State::ServerClosed;
46 }
47}
48
49bool KPort::IsServerClosed() const {
50 KScopedSchedulerLock sl{kernel};
51 return state == State::ServerClosed;
52}
53
54ResultCode KPort::EnqueueSession(KServerSession* session) {
55 KScopedSchedulerLock sl{kernel};
56
57 R_UNLESS(state == State::Normal, ResultPortClosed);
58
59 if (server.HasHLEHandler()) {
60 server.GetHLEHandler()->ClientConnected(session);
61 } else {
62 server.EnqueueSession(session);
63 }
64
65 return RESULT_SUCCESS;
66}
67
68} // namespace Kernel
diff --git a/src/core/hle/kernel/k_port.h b/src/core/hle/kernel/k_port.h
new file mode 100644
index 000000000..f1b2838d8
--- /dev/null
+++ b/src/core/hle/kernel/k_port.h
@@ -0,0 +1,69 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <string>
9
10#include "common/common_types.h"
11#include "core/hle/kernel/k_client_port.h"
12#include "core/hle/kernel/k_server_port.h"
13#include "core/hle/kernel/slab_helpers.h"
14#include "core/hle/result.h"
15
16namespace Kernel {
17
18class KServerSession;
19
20class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort, KAutoObjectWithList> {
21 KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject);
22
23public:
24 explicit KPort(KernelCore& kernel);
25 virtual ~KPort();
26
27 static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
28
29 void Initialize(s32 max_sessions_, bool is_light_, const std::string& name_);
30 void OnClientClosed();
31 void OnServerClosed();
32
33 bool IsLight() const {
34 return is_light;
35 }
36
37 bool IsServerClosed() const;
38
39 ResultCode EnqueueSession(KServerSession* session);
40
41 KClientPort& GetClientPort() {
42 return client;
43 }
44 KServerPort& GetServerPort() {
45 return server;
46 }
47 const KClientPort& GetClientPort() const {
48 return client;
49 }
50 const KServerPort& GetServerPort() const {
51 return server;
52 }
53
54private:
55 enum class State : u8 {
56 Invalid = 0,
57 Normal = 1,
58 ClientClosed = 2,
59 ServerClosed = 3,
60 };
61
62private:
63 KServerPort server;
64 KClientPort client;
65 State state{State::Invalid};
66 bool is_light{};
67};
68
69} // namespace Kernel
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/k_process.cpp
index e35deb8e2..174318180 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -17,13 +17,14 @@
17#include "core/hle/kernel/code_set.h" 17#include "core/hle/kernel/code_set.h"
18#include "core/hle/kernel/k_memory_block_manager.h" 18#include "core/hle/kernel/k_memory_block_manager.h"
19#include "core/hle/kernel/k_page_table.h" 19#include "core/hle/kernel/k_page_table.h"
20#include "core/hle/kernel/k_process.h"
20#include "core/hle/kernel/k_resource_limit.h" 21#include "core/hle/kernel/k_resource_limit.h"
21#include "core/hle/kernel/k_scheduler.h" 22#include "core/hle/kernel/k_scheduler.h"
22#include "core/hle/kernel/k_scoped_resource_reservation.h" 23#include "core/hle/kernel/k_scoped_resource_reservation.h"
24#include "core/hle/kernel/k_shared_memory.h"
23#include "core/hle/kernel/k_slab_heap.h" 25#include "core/hle/kernel/k_slab_heap.h"
24#include "core/hle/kernel/k_thread.h" 26#include "core/hle/kernel/k_thread.h"
25#include "core/hle/kernel/kernel.h" 27#include "core/hle/kernel/kernel.h"
26#include "core/hle/kernel/process.h"
27#include "core/hle/kernel/svc_results.h" 28#include "core/hle/kernel/svc_results.h"
28#include "core/hle/lock.h" 29#include "core/hle/lock.h"
29#include "core/memory.h" 30#include "core/memory.h"
@@ -37,17 +38,20 @@ namespace {
37 * @param owner_process The parent process for the main thread 38 * @param owner_process The parent process for the main thread
38 * @param priority The priority to give the main thread 39 * @param priority The priority to give the main thread
39 */ 40 */
40void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) { 41void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority, VAddr stack_top) {
41 const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); 42 const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
42 ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1)); 43 ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1));
43 auto thread_res =
44 KThread::CreateUserThread(system, ThreadType::User, "main", entry_point, priority, 0,
45 owner_process.GetIdealCoreId(), stack_top, &owner_process);
46 44
47 std::shared_ptr<KThread> thread = std::move(thread_res).Unwrap(); 45 KThread* thread = KThread::Create(system.Kernel());
46 ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority,
47 owner_process.GetIdealCoreId(), &owner_process)
48 .IsSuccess());
48 49
49 // Register 1 must be a handle to the main thread 50 // Register 1 must be a handle to the main thread
50 const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap(); 51 Handle thread_handle{};
52 owner_process.GetHandleTable().Add(&thread_handle, thread);
53
54 thread->SetName("main");
51 thread->GetContext32().cpu_registers[0] = 0; 55 thread->GetContext32().cpu_registers[0] = 0;
52 thread->GetContext64().cpu_registers[0] = 0; 56 thread->GetContext64().cpu_registers[0] = 0;
53 thread->GetContext32().cpu_registers[1] = thread_handle; 57 thread->GetContext32().cpu_registers[1] = thread_handle;
@@ -114,10 +118,10 @@ private:
114 std::bitset<num_slot_entries> is_slot_used; 118 std::bitset<num_slot_entries> is_slot_used;
115}; 119};
116 120
117std::shared_ptr<Process> Process::Create(Core::System& system, std::string name, ProcessType type) { 121ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string name,
122 ProcessType type) {
118 auto& kernel = system.Kernel(); 123 auto& kernel = system.Kernel();
119 124
120 std::shared_ptr<Process> process = std::make_shared<Process>(system);
121 process->name = std::move(name); 125 process->name = std::move(name);
122 126
123 process->resource_limit = kernel.GetSystemResourceLimit(); 127 process->resource_limit = kernel.GetSystemResourceLimit();
@@ -126,6 +130,7 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
126 process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() 130 process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
127 : kernel.CreateNewUserProcessID(); 131 : kernel.CreateNewUserProcessID();
128 process->capabilities.InitializeForMetadatalessProcess(); 132 process->capabilities.InitializeForMetadatalessProcess();
133 process->is_initialized = true;
129 134
130 std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr))); 135 std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr)));
131 std::uniform_int_distribution<u64> distribution; 136 std::uniform_int_distribution<u64> distribution;
@@ -133,14 +138,18 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
133 [&] { return distribution(rng); }); 138 [&] { return distribution(rng); });
134 139
135 kernel.AppendNewProcess(process); 140 kernel.AppendNewProcess(process);
136 return process; 141
142 // Open a reference to the resource limit.
143 process->resource_limit->Open();
144
145 return RESULT_SUCCESS;
137} 146}
138 147
139std::shared_ptr<KResourceLimit> Process::GetResourceLimit() const { 148KResourceLimit* KProcess::GetResourceLimit() const {
140 return resource_limit; 149 return resource_limit;
141} 150}
142 151
143void Process::IncrementThreadCount() { 152void KProcess::IncrementThreadCount() {
144 ASSERT(num_threads >= 0); 153 ASSERT(num_threads >= 0);
145 num_created_threads++; 154 num_created_threads++;
146 155
@@ -149,7 +158,7 @@ void Process::IncrementThreadCount() {
149 } 158 }
150} 159}
151 160
152void Process::DecrementThreadCount() { 161void KProcess::DecrementThreadCount() {
153 ASSERT(num_threads > 0); 162 ASSERT(num_threads > 0);
154 163
155 if (const auto count = --num_threads; count == 0) { 164 if (const auto count = --num_threads; count == 0) {
@@ -157,31 +166,34 @@ void Process::DecrementThreadCount() {
157 } 166 }
158} 167}
159 168
160u64 Process::GetTotalPhysicalMemoryAvailable() const { 169u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
161 const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + 170 const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
162 page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size + 171 page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
163 main_thread_stack_size}; 172 main_thread_stack_size};
164 ASSERT(capacity == kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application)); 173 if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
174 capacity != pool_size) {
175 LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size);
176 }
165 if (capacity < memory_usage_capacity) { 177 if (capacity < memory_usage_capacity) {
166 return capacity; 178 return capacity;
167 } 179 }
168 return memory_usage_capacity; 180 return memory_usage_capacity;
169} 181}
170 182
171u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { 183u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
172 return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); 184 return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
173} 185}
174 186
175u64 Process::GetTotalPhysicalMemoryUsed() const { 187u64 KProcess::GetTotalPhysicalMemoryUsed() const {
176 return image_size + main_thread_stack_size + page_table->GetTotalHeapSize() + 188 return image_size + main_thread_stack_size + page_table->GetTotalHeapSize() +
177 GetSystemResourceSize(); 189 GetSystemResourceSize();
178} 190}
179 191
180u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { 192u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
181 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); 193 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
182} 194}
183 195
184bool Process::ReleaseUserException(KThread* thread) { 196bool KProcess::ReleaseUserException(KThread* thread) {
185 KScopedSchedulerLock sl{kernel}; 197 KScopedSchedulerLock sl{kernel};
186 198
187 if (exception_thread == thread) { 199 if (exception_thread == thread) {
@@ -206,7 +218,7 @@ bool Process::ReleaseUserException(KThread* thread) {
206 } 218 }
207} 219}
208 220
209void Process::PinCurrentThread() { 221void KProcess::PinCurrentThread() {
210 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 222 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
211 223
212 // Get the current thread. 224 // Get the current thread.
@@ -221,7 +233,7 @@ void Process::PinCurrentThread() {
221 KScheduler::SetSchedulerUpdateNeeded(kernel); 233 KScheduler::SetSchedulerUpdateNeeded(kernel);
222} 234}
223 235
224void Process::UnpinCurrentThread() { 236void KProcess::UnpinCurrentThread() {
225 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 237 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
226 238
227 // Get the current thread. 239 // Get the current thread.
@@ -236,15 +248,39 @@ void Process::UnpinCurrentThread() {
236 KScheduler::SetSchedulerUpdateNeeded(kernel); 248 KScheduler::SetSchedulerUpdateNeeded(kernel);
237} 249}
238 250
239void Process::RegisterThread(const KThread* thread) { 251ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
252 [[maybe_unused]] size_t size) {
253 // Lock ourselves, to prevent concurrent access.
254 KScopedLightLock lk(state_lock);
255
256 // TODO(bunnei): Manage KSharedMemoryInfo list here.
257
258 // Open a reference to the shared memory.
259 shmem->Open();
260
261 return RESULT_SUCCESS;
262}
263
264void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
265 [[maybe_unused]] size_t size) {
266 // Lock ourselves, to prevent concurrent access.
267 KScopedLightLock lk(state_lock);
268
269 // TODO(bunnei): Manage KSharedMemoryInfo list here.
270
271 // Close a reference to the shared memory.
272 shmem->Close();
273}
274
275void KProcess::RegisterThread(const KThread* thread) {
240 thread_list.push_back(thread); 276 thread_list.push_back(thread);
241} 277}
242 278
243void Process::UnregisterThread(const KThread* thread) { 279void KProcess::UnregisterThread(const KThread* thread) {
244 thread_list.remove(thread); 280 thread_list.remove(thread);
245} 281}
246 282
247ResultCode Process::Reset() { 283ResultCode KProcess::Reset() {
248 // Lock the process and the scheduler. 284 // Lock the process and the scheduler.
249 KScopedLightLock lk(state_lock); 285 KScopedLightLock lk(state_lock);
250 KScopedSchedulerLock sl{kernel}; 286 KScopedSchedulerLock sl{kernel};
@@ -258,8 +294,8 @@ ResultCode Process::Reset() {
258 return RESULT_SUCCESS; 294 return RESULT_SUCCESS;
259} 295}
260 296
261ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, 297ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
262 std::size_t code_size) { 298 std::size_t code_size) {
263 program_id = metadata.GetTitleID(); 299 program_id = metadata.GetTitleID();
264 ideal_core = metadata.GetMainThreadCore(); 300 ideal_core = metadata.GetMainThreadCore();
265 is_64bit_process = metadata.Is64BitProgram(); 301 is_64bit_process = metadata.Is64BitProgram();
@@ -271,7 +307,7 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
271 if (!memory_reservation.Succeeded()) { 307 if (!memory_reservation.Succeeded()) {
272 LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", 308 LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
273 code_size + system_resource_size); 309 code_size + system_resource_size);
274 return ResultResourceLimitedExceeded; 310 return ResultLimitReached;
275 } 311 }
276 // Initialize proces address space 312 // Initialize proces address space
277 if (const ResultCode result{ 313 if (const ResultCode result{
@@ -318,10 +354,10 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
318 tls_region_address = CreateTLSRegion(); 354 tls_region_address = CreateTLSRegion();
319 memory_reservation.Commit(); 355 memory_reservation.Commit();
320 356
321 return handle_table.SetSize(capabilities.GetHandleTableSize()); 357 return handle_table.Initialize(capabilities.GetHandleTableSize());
322} 358}
323 359
324void Process::Run(s32 main_thread_priority, u64 stack_size) { 360void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
325 AllocateMainThreadStack(stack_size); 361 AllocateMainThreadStack(stack_size);
326 resource_limit->Reserve(LimitableResource::Threads, 1); 362 resource_limit->Reserve(LimitableResource::Threads, 1);
327 resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); 363 resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
@@ -331,18 +367,18 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
331 367
332 ChangeStatus(ProcessStatus::Running); 368 ChangeStatus(ProcessStatus::Running);
333 369
334 SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top); 370 SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
335} 371}
336 372
337void Process::PrepareForTermination() { 373void KProcess::PrepareForTermination() {
338 ChangeStatus(ProcessStatus::Exiting); 374 ChangeStatus(ProcessStatus::Exiting);
339 375
340 const auto stop_threads = [this](const std::vector<std::shared_ptr<KThread>>& thread_list) { 376 const auto stop_threads = [this](const std::vector<KThread*>& thread_list) {
341 for (auto& thread : thread_list) { 377 for (auto& thread : thread_list) {
342 if (thread->GetOwnerProcess() != this) 378 if (thread->GetOwnerProcess() != this)
343 continue; 379 continue;
344 380
345 if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread()) 381 if (thread == kernel.CurrentScheduler()->GetCurrentThread())
346 continue; 382 continue;
347 383
348 // TODO(Subv): When are the other running/ready threads terminated? 384 // TODO(Subv): When are the other running/ready threads terminated?
@@ -353,7 +389,7 @@ void Process::PrepareForTermination() {
353 } 389 }
354 }; 390 };
355 391
356 stop_threads(system.GlobalSchedulerContext().GetThreadList()); 392 stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
357 393
358 FreeTLSRegion(tls_region_address); 394 FreeTLSRegion(tls_region_address);
359 tls_region_address = 0; 395 tls_region_address = 0;
@@ -366,6 +402,16 @@ void Process::PrepareForTermination() {
366 ChangeStatus(ProcessStatus::Exited); 402 ChangeStatus(ProcessStatus::Exited);
367} 403}
368 404
405void KProcess::Finalize() {
406 // Release memory to the resource limit.
407 if (resource_limit != nullptr) {
408 resource_limit->Close();
409 }
410
411 // Perform inherited finalization.
412 KAutoObjectWithSlabHeapAndContainer<KProcess, KSynchronizationObject>::Finalize();
413}
414
369/** 415/**
370 * Attempts to find a TLS page that contains a free slot for 416 * Attempts to find a TLS page that contains a free slot for
371 * use by a thread. 417 * use by a thread.
@@ -379,8 +425,8 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
379 [](const auto& page) { return page.HasAvailableSlots(); }); 425 [](const auto& page) { return page.HasAvailableSlots(); });
380} 426}
381 427
382VAddr Process::CreateTLSRegion() { 428VAddr KProcess::CreateTLSRegion() {
383 KScopedSchedulerLock lock(system.Kernel()); 429 KScopedSchedulerLock lock(kernel);
384 if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; 430 if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
385 tls_page_iter != tls_pages.cend()) { 431 tls_page_iter != tls_pages.cend()) {
386 return *tls_page_iter->ReserveSlot(); 432 return *tls_page_iter->ReserveSlot();
@@ -391,7 +437,7 @@ VAddr Process::CreateTLSRegion() {
391 437
392 const VAddr start{page_table->GetKernelMapRegionStart()}; 438 const VAddr start{page_table->GetKernelMapRegionStart()};
393 const VAddr size{page_table->GetKernelMapRegionEnd() - start}; 439 const VAddr size{page_table->GetKernelMapRegionEnd() - start};
394 const PAddr tls_map_addr{system.DeviceMemory().GetPhysicalAddr(tls_page_ptr)}; 440 const PAddr tls_map_addr{kernel.System().DeviceMemory().GetPhysicalAddr(tls_page_ptr)};
395 const VAddr tls_page_addr{page_table 441 const VAddr tls_page_addr{page_table
396 ->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize, 442 ->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize,
397 KMemoryState::ThreadLocal, 443 KMemoryState::ThreadLocal,
@@ -410,8 +456,8 @@ VAddr Process::CreateTLSRegion() {
410 return *reserve_result; 456 return *reserve_result;
411} 457}
412 458
413void Process::FreeTLSRegion(VAddr tls_address) { 459void KProcess::FreeTLSRegion(VAddr tls_address) {
414 KScopedSchedulerLock lock(system.Kernel()); 460 KScopedSchedulerLock lock(kernel);
415 const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); 461 const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
416 auto iter = 462 auto iter =
417 std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { 463 std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
@@ -425,33 +471,34 @@ void Process::FreeTLSRegion(VAddr tls_address) {
425 iter->ReleaseSlot(tls_address); 471 iter->ReleaseSlot(tls_address);
426} 472}
427 473
428void Process::LoadModule(CodeSet code_set, VAddr base_addr) { 474void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
429 std::lock_guard lock{HLE::g_hle_lock}; 475 std::lock_guard lock{HLE::g_hle_lock};
430 const auto ReprotectSegment = [&](const CodeSet::Segment& segment, 476 const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
431 KMemoryPermission permission) { 477 KMemoryPermission permission) {
432 page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission); 478 page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission);
433 }; 479 };
434 480
435 system.Memory().WriteBlock(*this, base_addr, code_set.memory.data(), code_set.memory.size()); 481 kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
482 code_set.memory.size());
436 483
437 ReprotectSegment(code_set.CodeSegment(), KMemoryPermission::ReadAndExecute); 484 ReprotectSegment(code_set.CodeSegment(), KMemoryPermission::ReadAndExecute);
438 ReprotectSegment(code_set.RODataSegment(), KMemoryPermission::Read); 485 ReprotectSegment(code_set.RODataSegment(), KMemoryPermission::Read);
439 ReprotectSegment(code_set.DataSegment(), KMemoryPermission::ReadAndWrite); 486 ReprotectSegment(code_set.DataSegment(), KMemoryPermission::ReadAndWrite);
440} 487}
441 488
442bool Process::IsSignaled() const { 489bool KProcess::IsSignaled() const {
443 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 490 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
444 return is_signaled; 491 return is_signaled;
445} 492}
446 493
447Process::Process(Core::System& system) 494KProcess::KProcess(KernelCore& kernel)
448 : KSynchronizationObject{system.Kernel()}, page_table{std::make_unique<KPageTable>(system)}, 495 : KAutoObjectWithSlabHeapAndContainer{kernel},
449 handle_table{system.Kernel()}, address_arbiter{system}, condition_var{system}, 496 page_table{std::make_unique<KPageTable>(kernel.System())}, handle_table{kernel},
450 state_lock{system.Kernel()}, system{system} {} 497 address_arbiter{kernel.System()}, condition_var{kernel.System()}, state_lock{kernel} {}
451 498
452Process::~Process() = default; 499KProcess::~KProcess() = default;
453 500
454void Process::ChangeStatus(ProcessStatus new_status) { 501void KProcess::ChangeStatus(ProcessStatus new_status) {
455 if (status == new_status) { 502 if (status == new_status) {
456 return; 503 return;
457 } 504 }
@@ -461,7 +508,7 @@ void Process::ChangeStatus(ProcessStatus new_status) {
461 NotifyAvailable(); 508 NotifyAvailable();
462} 509}
463 510
464ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) { 511ResultCode KProcess::AllocateMainThreadStack(std::size_t stack_size) {
465 ASSERT(stack_size); 512 ASSERT(stack_size);
466 513
467 // The kernel always ensures that the given stack size is page aligned. 514 // The kernel always ensures that the given stack size is page aligned.
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/k_process.h
index 45eefb90e..62ab26b05 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -11,11 +11,13 @@
11#include <unordered_map> 11#include <unordered_map>
12#include <vector> 12#include <vector>
13#include "common/common_types.h" 13#include "common/common_types.h"
14#include "core/hle/kernel/handle_table.h"
15#include "core/hle/kernel/k_address_arbiter.h" 14#include "core/hle/kernel/k_address_arbiter.h"
15#include "core/hle/kernel/k_auto_object.h"
16#include "core/hle/kernel/k_condition_variable.h" 16#include "core/hle/kernel/k_condition_variable.h"
17#include "core/hle/kernel/k_handle_table.h"
17#include "core/hle/kernel/k_synchronization_object.h" 18#include "core/hle/kernel/k_synchronization_object.h"
18#include "core/hle/kernel/process_capability.h" 19#include "core/hle/kernel/process_capability.h"
20#include "core/hle/kernel/slab_helpers.h"
19#include "core/hle/result.h" 21#include "core/hle/result.h"
20 22
21namespace Core { 23namespace Core {
@@ -60,10 +62,13 @@ enum class ProcessStatus {
60 DebugBreak, 62 DebugBreak,
61}; 63};
62 64
63class Process final : public KSynchronizationObject { 65class KProcess final
66 : public KAutoObjectWithSlabHeapAndContainer<KProcess, KSynchronizationObject> {
67 KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
68
64public: 69public:
65 explicit Process(Core::System& system); 70 explicit KProcess(KernelCore& kernel);
66 ~Process() override; 71 ~KProcess() override;
67 72
68 enum : u64 { 73 enum : u64 {
69 /// Lowest allowed process ID for a kernel initial process. 74 /// Lowest allowed process ID for a kernel initial process.
@@ -85,20 +90,8 @@ public:
85 90
86 static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4; 91 static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
87 92
88 static std::shared_ptr<Process> Create(Core::System& system, std::string name, 93 static ResultCode Initialize(KProcess* process, Core::System& system, std::string name,
89 ProcessType type); 94 ProcessType type);
90
91 std::string GetTypeName() const override {
92 return "Process";
93 }
94 std::string GetName() const override {
95 return name;
96 }
97
98 static constexpr HandleType HANDLE_TYPE = HandleType::Process;
99 HandleType GetHandleType() const override {
100 return HANDLE_TYPE;
101 }
102 95
103 /// Gets a reference to the process' page table. 96 /// Gets a reference to the process' page table.
104 KPageTable& PageTable() { 97 KPageTable& PageTable() {
@@ -111,12 +104,12 @@ public:
111 } 104 }
112 105
113 /// Gets a reference to the process' handle table. 106 /// Gets a reference to the process' handle table.
114 HandleTable& GetHandleTable() { 107 KHandleTable& GetHandleTable() {
115 return handle_table; 108 return handle_table;
116 } 109 }
117 110
118 /// Gets a const reference to the process' handle table. 111 /// Gets a const reference to the process' handle table.
119 const HandleTable& GetHandleTable() const { 112 const KHandleTable& GetHandleTable() const {
120 return handle_table; 113 return handle_table;
121 } 114 }
122 115
@@ -167,7 +160,7 @@ public:
167 } 160 }
168 161
169 /// Gets the resource limit descriptor for this process 162 /// Gets the resource limit descriptor for this process
170 std::shared_ptr<KResourceLimit> GetResourceLimit() const; 163 KResourceLimit* GetResourceLimit() const;
171 164
172 /// Gets the ideal CPU core ID for this process 165 /// Gets the ideal CPU core ID for this process
173 u8 GetIdealCoreId() const { 166 u8 GetIdealCoreId() const {
@@ -338,9 +331,19 @@ public:
338 331
339 void LoadModule(CodeSet code_set, VAddr base_addr); 332 void LoadModule(CodeSet code_set, VAddr base_addr);
340 333
341 bool IsSignaled() const override; 334 virtual bool IsInitialized() const override {
335 return is_initialized;
336 }
337
338 static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
339
340 virtual void Finalize();
341
342 virtual u64 GetId() const override final {
343 return GetProcessID();
344 }
342 345
343 void Finalize() override {} 346 virtual bool IsSignaled() const override;
344 347
345 void PinCurrentThread(); 348 void PinCurrentThread();
346 void UnpinCurrentThread(); 349 void UnpinCurrentThread();
@@ -349,6 +352,9 @@ public:
349 return state_lock; 352 return state_lock;
350 } 353 }
351 354
355 ResultCode AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
356 void RemoveSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
357
352 /////////////////////////////////////////////////////////////////////////////////////////////// 358 ///////////////////////////////////////////////////////////////////////////////////////////////
353 // Thread-local storage management 359 // Thread-local storage management
354 360
@@ -399,7 +405,7 @@ private:
399 u32 system_resource_size = 0; 405 u32 system_resource_size = 0;
400 406
401 /// Resource limit descriptor for this process 407 /// Resource limit descriptor for this process
402 std::shared_ptr<KResourceLimit> resource_limit; 408 KResourceLimit* resource_limit{};
403 409
404 /// The ideal CPU core for this process, threads are scheduled on this core by default. 410 /// The ideal CPU core for this process, threads are scheduled on this core by default.
405 u8 ideal_core = 0; 411 u8 ideal_core = 0;
@@ -423,7 +429,7 @@ private:
423 u64 total_process_running_time_ticks = 0; 429 u64 total_process_running_time_ticks = 0;
424 430
425 /// Per-process handle table for storing created object handles in. 431 /// Per-process handle table for storing created object handles in.
426 HandleTable handle_table; 432 KHandleTable handle_table;
427 433
428 /// Per-process address arbiter. 434 /// Per-process address arbiter.
429 KAddressArbiter address_arbiter; 435 KAddressArbiter address_arbiter;
@@ -454,14 +460,12 @@ private:
454 /// Process total image size 460 /// Process total image size
455 std::size_t image_size{}; 461 std::size_t image_size{};
456 462
457 /// Name of this process
458 std::string name;
459
460 /// Schedule count of this process 463 /// Schedule count of this process
461 s64 schedule_count{}; 464 s64 schedule_count{};
462 465
463 bool is_signaled{}; 466 bool is_signaled{};
464 bool is_suspended{}; 467 bool is_suspended{};
468 bool is_initialized{};
465 469
466 std::atomic<s32> num_created_threads{}; 470 std::atomic<s32> num_created_threads{};
467 std::atomic<u16> num_threads{}; 471 std::atomic<u16> num_threads{};
@@ -474,9 +478,6 @@ private:
474 KThread* exception_thread{}; 478 KThread* exception_thread{};
475 479
476 KLightLock state_lock; 480 KLightLock state_lock;
477
478 /// System context
479 Core::System& system;
480}; 481};
481 482
482} // namespace Kernel 483} // namespace Kernel
diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp
index 4b4d34857..8fef4bb00 100644
--- a/src/core/hle/kernel/k_readable_event.cpp
+++ b/src/core/hle/kernel/k_readable_event.cpp
@@ -2,21 +2,18 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm>
6#include "common/assert.h" 5#include "common/assert.h"
7#include "common/common_funcs.h" 6#include "core/hle/kernel/k_event.h"
8#include "common/logging/log.h"
9#include "core/hle/kernel/k_readable_event.h" 7#include "core/hle/kernel/k_readable_event.h"
10#include "core/hle/kernel/k_scheduler.h" 8#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/k_thread.h" 9#include "core/hle/kernel/k_thread.h"
12#include "core/hle/kernel/kernel.h" 10#include "core/hle/kernel/kernel.h"
13#include "core/hle/kernel/object.h"
14#include "core/hle/kernel/svc_results.h" 11#include "core/hle/kernel/svc_results.h"
15 12
16namespace Kernel { 13namespace Kernel {
17 14
18KReadableEvent::KReadableEvent(KernelCore& kernel, std::string&& name) 15KReadableEvent::KReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {}
19 : KSynchronizationObject{kernel, std::move(name)} {} 16
20KReadableEvent::~KReadableEvent() = default; 17KReadableEvent::~KReadableEvent() = default;
21 18
22bool KReadableEvent::IsSignaled() const { 19bool KReadableEvent::IsSignaled() const {
@@ -25,6 +22,12 @@ bool KReadableEvent::IsSignaled() const {
25 return is_signaled; 22 return is_signaled;
26} 23}
27 24
25void KReadableEvent::Destroy() {
26 if (parent) {
27 parent->Close();
28 }
29}
30
28ResultCode KReadableEvent::Signal() { 31ResultCode KReadableEvent::Signal() {
29 KScopedSchedulerLock lk{kernel}; 32 KScopedSchedulerLock lk{kernel};
30 33
diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h
index e6f0fd900..1783ef0b8 100644
--- a/src/core/hle/kernel/k_readable_event.h
+++ b/src/core/hle/kernel/k_readable_event.h
@@ -4,8 +4,9 @@
4 4
5#pragma once 5#pragma once
6 6
7#include "core/hle/kernel/k_auto_object.h"
7#include "core/hle/kernel/k_synchronization_object.h" 8#include "core/hle/kernel/k_synchronization_object.h"
8#include "core/hle/kernel/object.h" 9#include "core/hle/kernel/slab_helpers.h"
9#include "core/hle/result.h" 10#include "core/hle/result.h"
10 11
11namespace Kernel { 12namespace Kernel {
@@ -13,31 +14,25 @@ namespace Kernel {
13class KernelCore; 14class KernelCore;
14class KEvent; 15class KEvent;
15 16
16class KReadableEvent final : public KSynchronizationObject { 17class KReadableEvent : public KSynchronizationObject {
18 KERNEL_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject);
19
17public: 20public:
18 explicit KReadableEvent(KernelCore& kernel, std::string&& name); 21 explicit KReadableEvent(KernelCore& kernel);
19 ~KReadableEvent() override; 22 ~KReadableEvent() override;
20 23
21 std::string GetTypeName() const override { 24 void Initialize(KEvent* parent_, std::string&& name_) {
22 return "KReadableEvent"; 25 is_signaled = false;
23 } 26 parent = parent_;
24 27 name = std::move(name_);
25 static constexpr HandleType HANDLE_TYPE = HandleType::ReadableEvent;
26 HandleType GetHandleType() const override {
27 return HANDLE_TYPE;
28 } 28 }
29 29
30 KEvent* GetParent() const { 30 KEvent* GetParent() const {
31 return parent; 31 return parent;
32 } 32 }
33 33
34 void Initialize(KEvent* parent_) { 34 virtual bool IsSignaled() const override;
35 is_signaled = false; 35 virtual void Destroy() override;
36 parent = parent_;
37 }
38
39 bool IsSignaled() const override;
40 void Finalize() override {}
41 36
42 ResultCode Signal(); 37 ResultCode Signal();
43 ResultCode Clear(); 38 ResultCode Clear();
diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp
index d05b34ea3..ad5095bfd 100644
--- a/src/core/hle/kernel/k_resource_limit.cpp
+++ b/src/core/hle/kernel/k_resource_limit.cpp
@@ -10,10 +10,16 @@
10namespace Kernel { 10namespace Kernel {
11constexpr s64 DefaultTimeout = 10000000000; // 10 seconds 11constexpr s64 DefaultTimeout = 10000000000; // 10 seconds
12 12
13KResourceLimit::KResourceLimit(KernelCore& kernel, const Core::Timing::CoreTiming& core_timing_) 13KResourceLimit::KResourceLimit(KernelCore& kernel)
14 : Object{kernel}, lock{kernel}, cond_var{kernel}, core_timing(core_timing_) {} 14 : KAutoObjectWithSlabHeapAndContainer{kernel}, lock{kernel}, cond_var{kernel} {}
15KResourceLimit::~KResourceLimit() = default; 15KResourceLimit::~KResourceLimit() = default;
16 16
17void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing_) {
18 core_timing = core_timing_;
19}
20
21void KResourceLimit::Finalize() {}
22
17s64 KResourceLimit::GetLimitValue(LimitableResource which) const { 23s64 KResourceLimit::GetLimitValue(LimitableResource which) const {
18 const auto index = static_cast<std::size_t>(which); 24 const auto index = static_cast<std::size_t>(which);
19 s64 value{}; 25 s64 value{};
@@ -78,7 +84,7 @@ ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
78} 84}
79 85
80bool KResourceLimit::Reserve(LimitableResource which, s64 value) { 86bool KResourceLimit::Reserve(LimitableResource which, s64 value) {
81 return Reserve(which, value, core_timing.GetGlobalTimeNs().count() + DefaultTimeout); 87 return Reserve(which, value, core_timing->GetGlobalTimeNs().count() + DefaultTimeout);
82} 88}
83 89
84bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) { 90bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
@@ -109,7 +115,7 @@ bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
109 } 115 }
110 116
111 if (current_hints[index] + value <= limit_values[index] && 117 if (current_hints[index] + value <= limit_values[index] &&
112 (timeout < 0 || core_timing.GetGlobalTimeNs().count() < timeout)) { 118 (timeout < 0 || core_timing->GetGlobalTimeNs().count() < timeout)) {
113 waiter_count++; 119 waiter_count++;
114 cond_var.Wait(&lock, timeout); 120 cond_var.Wait(&lock, timeout);
115 waiter_count--; 121 waiter_count--;
diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h
index 4542317d0..66ebf32df 100644
--- a/src/core/hle/kernel/k_resource_limit.h
+++ b/src/core/hle/kernel/k_resource_limit.h
@@ -8,7 +8,6 @@
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "core/hle/kernel/k_light_condition_variable.h" 9#include "core/hle/kernel/k_light_condition_variable.h"
10#include "core/hle/kernel/k_light_lock.h" 10#include "core/hle/kernel/k_light_lock.h"
11#include "core/hle/kernel/object.h"
12 11
13union ResultCode; 12union ResultCode;
14 13
@@ -32,10 +31,16 @@ constexpr bool IsValidResourceType(LimitableResource type) {
32 return type < LimitableResource::Count; 31 return type < LimitableResource::Count;
33} 32}
34 33
35class KResourceLimit final : public Object { 34class KResourceLimit final
35 : public KAutoObjectWithSlabHeapAndContainer<KResourceLimit, KAutoObjectWithList> {
36 KERNEL_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject);
37
36public: 38public:
37 explicit KResourceLimit(KernelCore& kernel, const Core::Timing::CoreTiming& core_timing_); 39 explicit KResourceLimit(KernelCore& kernel);
38 ~KResourceLimit(); 40 virtual ~KResourceLimit();
41
42 void Initialize(const Core::Timing::CoreTiming* core_timing_);
43 virtual void Finalize() override;
39 44
40 s64 GetLimitValue(LimitableResource which) const; 45 s64 GetLimitValue(LimitableResource which) const;
41 s64 GetCurrentValue(LimitableResource which) const; 46 s64 GetCurrentValue(LimitableResource which) const;
@@ -49,19 +54,7 @@ public:
49 void Release(LimitableResource which, s64 value); 54 void Release(LimitableResource which, s64 value);
50 void Release(LimitableResource which, s64 value, s64 hint); 55 void Release(LimitableResource which, s64 value, s64 hint);
51 56
52 std::string GetTypeName() const override { 57 static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
53 return "KResourceLimit";
54 }
55 std::string GetName() const override {
56 return GetTypeName();
57 }
58
59 static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit;
60 HandleType GetHandleType() const override {
61 return HANDLE_TYPE;
62 }
63
64 virtual void Finalize() override {}
65 58
66private: 59private:
67 using ResourceArray = std::array<s64, static_cast<std::size_t>(LimitableResource::Count)>; 60 using ResourceArray = std::array<s64, static_cast<std::size_t>(LimitableResource::Count)>;
@@ -72,6 +65,6 @@ private:
72 mutable KLightLock lock; 65 mutable KLightLock lock;
73 s32 waiter_count{}; 66 s32 waiter_count{};
74 KLightConditionVariable cond_var; 67 KLightConditionVariable cond_var;
75 const Core::Timing::CoreTiming& core_timing; 68 const Core::Timing::CoreTiming* core_timing{};
76}; 69};
77} // namespace Kernel 70} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index d1df97305..0115fe6d1 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -15,12 +15,12 @@
15#include "core/core.h" 15#include "core/core.h"
16#include "core/core_timing.h" 16#include "core/core_timing.h"
17#include "core/cpu_manager.h" 17#include "core/cpu_manager.h"
18#include "core/hle/kernel/k_process.h"
18#include "core/hle/kernel/k_scheduler.h" 19#include "core/hle/kernel/k_scheduler.h"
19#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 20#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
20#include "core/hle/kernel/k_thread.h" 21#include "core/hle/kernel/k_thread.h"
21#include "core/hle/kernel/kernel.h" 22#include "core/hle/kernel/kernel.h"
22#include "core/hle/kernel/physical_core.h" 23#include "core/hle/kernel/physical_core.h"
23#include "core/hle/kernel/process.h"
24#include "core/hle/kernel/time_manager.h" 24#include "core/hle/kernel/time_manager.h"
25 25
26namespace Kernel { 26namespace Kernel {
@@ -71,7 +71,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
71 } 71 }
72 if (state.should_count_idle) { 72 if (state.should_count_idle) {
73 if (highest_thread != nullptr) { 73 if (highest_thread != nullptr) {
74 if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) { 74 if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) {
75 process->SetRunningThread(core_id, highest_thread, state.idle_count); 75 process->SetRunningThread(core_id, highest_thread, state.idle_count);
76 } 76 }
77 } else { 77 } else {
@@ -104,7 +104,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
104 if (top_thread != nullptr) { 104 if (top_thread != nullptr) {
105 // If the thread has no waiters, we need to check if the process has a thread pinned. 105 // If the thread has no waiters, we need to check if the process has a thread pinned.
106 if (top_thread->GetNumKernelWaiters() == 0) { 106 if (top_thread->GetNumKernelWaiters() == 0) {
107 if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) { 107 if (KProcess* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
108 if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id)); 108 if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
109 pinned != nullptr && pinned != top_thread) { 109 pinned != nullptr && pinned != top_thread) {
110 // We prefer our parent's pinned thread if possible. However, we also don't 110 // We prefer our parent's pinned thread if possible. However, we also don't
@@ -411,7 +411,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
411 411
412 // Get the current thread and process. 412 // Get the current thread and process.
413 KThread& cur_thread = Kernel::GetCurrentThread(kernel); 413 KThread& cur_thread = Kernel::GetCurrentThread(kernel);
414 Process& cur_process = *kernel.CurrentProcess(); 414 KProcess& cur_process = *kernel.CurrentProcess();
415 415
416 // If the thread's yield count matches, there's nothing for us to do. 416 // If the thread's yield count matches, there's nothing for us to do.
417 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { 417 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
@@ -450,7 +450,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
450 450
451 // Get the current thread and process. 451 // Get the current thread and process.
452 KThread& cur_thread = Kernel::GetCurrentThread(kernel); 452 KThread& cur_thread = Kernel::GetCurrentThread(kernel);
453 Process& cur_process = *kernel.CurrentProcess(); 453 KProcess& cur_process = *kernel.CurrentProcess();
454 454
455 // If the thread's yield count matches, there's nothing for us to do. 455 // If the thread's yield count matches, there's nothing for us to do.
456 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { 456 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
@@ -538,7 +538,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
538 538
539 // Get the current thread and process. 539 // Get the current thread and process.
540 KThread& cur_thread = Kernel::GetCurrentThread(kernel); 540 KThread& cur_thread = Kernel::GetCurrentThread(kernel);
541 Process& cur_process = *kernel.CurrentProcess(); 541 KProcess& cur_process = *kernel.CurrentProcess();
542 542
543 // If the thread's yield count matches, there's nothing for us to do. 543 // If the thread's yield count matches, there's nothing for us to do.
544 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { 544 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
@@ -617,7 +617,12 @@ KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core
617 state.highest_priority_thread = nullptr; 617 state.highest_priority_thread = nullptr;
618} 618}
619 619
620KScheduler::~KScheduler() = default; 620KScheduler::~KScheduler() {
621 if (idle_thread) {
622 idle_thread->Close();
623 idle_thread = nullptr;
624 }
625}
621 626
622KThread* KScheduler::GetCurrentThread() const { 627KThread* KScheduler::GetCurrentThread() const {
623 if (auto result = current_thread.load(); result) { 628 if (auto result = current_thread.load(); result) {
@@ -719,7 +724,7 @@ void KScheduler::ScheduleImpl() {
719 724
720 current_thread.store(next_thread); 725 current_thread.store(next_thread);
721 726
722 Process* const previous_process = system.Kernel().CurrentProcess(); 727 KProcess* const previous_process = system.Kernel().CurrentProcess();
723 728
724 UpdateLastContextSwitchTime(previous_thread, previous_process); 729 UpdateLastContextSwitchTime(previous_thread, previous_process);
725 730
@@ -775,7 +780,7 @@ void KScheduler::SwitchToCurrent() {
775 } 780 }
776} 781}
777 782
778void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process) { 783void KScheduler::UpdateLastContextSwitchTime(KThread* thread, KProcess* process) {
779 const u64 prev_switch_ticks = last_context_switch_time; 784 const u64 prev_switch_ticks = last_context_switch_time;
780 const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); 785 const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
781 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; 786 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
@@ -792,13 +797,9 @@ void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process)
792} 797}
793 798
794void KScheduler::Initialize() { 799void KScheduler::Initialize() {
795 std::string name = "Idle Thread Id:" + std::to_string(core_id); 800 idle_thread = KThread::Create(system.Kernel());
796 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc(); 801 ASSERT(KThread::InitializeIdleThread(system, idle_thread, core_id).IsSuccess());
797 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); 802 idle_thread->SetName(fmt::format("IdleThread:{}", core_id));
798 auto thread_res = KThread::CreateThread(
799 system, ThreadType::Main, name, 0, KThread::IdleThreadPriority, 0,
800 static_cast<u32>(core_id), 0, nullptr, std::move(init_func), init_func_parameter);
801 idle_thread = thread_res.Unwrap().get();
802} 803}
803 804
804KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel) 805KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 8e32865aa..b789a64a4 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -24,7 +24,7 @@ class System;
24namespace Kernel { 24namespace Kernel {
25 25
26class KernelCore; 26class KernelCore;
27class Process; 27class KProcess;
28class SchedulerLock; 28class SchedulerLock;
29class KThread; 29class KThread;
30 30
@@ -165,7 +165,7 @@ private:
165 * most recent tick count retrieved. No special arithmetic is 165 * most recent tick count retrieved. No special arithmetic is
166 * applied to it. 166 * applied to it.
167 */ 167 */
168 void UpdateLastContextSwitchTime(KThread* thread, Process* process); 168 void UpdateLastContextSwitchTime(KThread* thread, KProcess* process);
169 169
170 static void OnSwitch(void* this_scheduler); 170 static void OnSwitch(void* this_scheduler);
171 void SwitchToCurrent(); 171 void SwitchToCurrent();
@@ -173,12 +173,12 @@ private:
173 KThread* prev_thread{}; 173 KThread* prev_thread{};
174 std::atomic<KThread*> current_thread{}; 174 std::atomic<KThread*> current_thread{};
175 175
176 KThread* idle_thread; 176 KThread* idle_thread{};
177 177
178 std::shared_ptr<Common::Fiber> switch_fiber{}; 178 std::shared_ptr<Common::Fiber> switch_fiber{};
179 179
180 struct SchedulingState { 180 struct SchedulingState {
181 std::atomic<bool> needs_scheduling; 181 std::atomic<bool> needs_scheduling{};
182 bool interrupt_task_thread_runnable{}; 182 bool interrupt_task_thread_runnable{};
183 bool should_count_idle{}; 183 bool should_count_idle{};
184 u64 idle_count{}; 184 u64 idle_count{};
diff --git a/src/core/hle/kernel/k_scoped_resource_reservation.h b/src/core/hle/kernel/k_scoped_resource_reservation.h
index c5deca00b..07272075d 100644
--- a/src/core/hle/kernel/k_scoped_resource_reservation.h
+++ b/src/core/hle/kernel/k_scoped_resource_reservation.h
@@ -8,15 +8,14 @@
8#pragma once 8#pragma once
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "core/hle/kernel/k_process.h"
11#include "core/hle/kernel/k_resource_limit.h" 12#include "core/hle/kernel/k_resource_limit.h"
12#include "core/hle/kernel/process.h"
13 13
14namespace Kernel { 14namespace Kernel {
15 15
16class KScopedResourceReservation { 16class KScopedResourceReservation {
17public: 17public:
18 explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r, 18 explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v, s64 timeout)
19 s64 v, s64 timeout)
20 : resource_limit(std::move(l)), value(v), resource(r) { 19 : resource_limit(std::move(l)), value(v), resource(r) {
21 if (resource_limit && value) { 20 if (resource_limit && value) {
22 success = resource_limit->Reserve(resource, value, timeout); 21 success = resource_limit->Reserve(resource, value, timeout);
@@ -25,8 +24,7 @@ public:
25 } 24 }
26 } 25 }
27 26
28 explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r, 27 explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v = 1)
29 s64 v = 1)
30 : resource_limit(std::move(l)), value(v), resource(r) { 28 : resource_limit(std::move(l)), value(v), resource(r) {
31 if (resource_limit && value) { 29 if (resource_limit && value) {
32 success = resource_limit->Reserve(resource, value); 30 success = resource_limit->Reserve(resource, value);
@@ -35,10 +33,10 @@ public:
35 } 33 }
36 } 34 }
37 35
38 explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v, s64 t) 36 explicit KScopedResourceReservation(const KProcess* p, LimitableResource r, s64 v, s64 t)
39 : KScopedResourceReservation(p->GetResourceLimit(), r, v, t) {} 37 : KScopedResourceReservation(p->GetResourceLimit(), r, v, t) {}
40 38
41 explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v = 1) 39 explicit KScopedResourceReservation(const KProcess* p, LimitableResource r, s64 v = 1)
42 : KScopedResourceReservation(p->GetResourceLimit(), r, v) {} 40 : KScopedResourceReservation(p->GetResourceLimit(), r, v) {}
43 41
44 ~KScopedResourceReservation() noexcept { 42 ~KScopedResourceReservation() noexcept {
@@ -58,7 +56,7 @@ public:
58 } 56 }
59 57
60private: 58private:
61 std::shared_ptr<KResourceLimit> resource_limit; 59 KResourceLimit* resource_limit{};
62 s64 value; 60 s64 value;
63 LimitableResource resource; 61 LimitableResource resource;
64 bool success; 62 bool success;
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
index ebecf0c77..b5d405744 100644
--- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
+++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
@@ -8,7 +8,7 @@
8#pragma once 8#pragma once
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "core/hle/kernel/handle_table.h" 11#include "core/hle/kernel/k_handle_table.h"
12#include "core/hle/kernel/k_thread.h" 12#include "core/hle/kernel/k_thread.h"
13#include "core/hle/kernel/kernel.h" 13#include "core/hle/kernel/kernel.h"
14#include "core/hle/kernel/time_manager.h" 14#include "core/hle/kernel/time_manager.h"
diff --git a/src/core/hle/kernel/k_server_port.cpp b/src/core/hle/kernel/k_server_port.cpp
new file mode 100644
index 000000000..5e44c48e2
--- /dev/null
+++ b/src/core/hle/kernel/k_server_port.cpp
@@ -0,0 +1,104 @@
1// Copyright 2021 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <tuple>
6#include "common/assert.h"
7#include "core/hle/kernel/k_client_port.h"
8#include "core/hle/kernel/k_port.h"
9#include "core/hle/kernel/k_scheduler.h"
10#include "core/hle/kernel/k_server_port.h"
11#include "core/hle/kernel/k_server_session.h"
12#include "core/hle/kernel/k_thread.h"
13#include "core/hle/kernel/svc_results.h"
14
15namespace Kernel {
16
17KServerPort::KServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
18KServerPort::~KServerPort() = default;
19
20void KServerPort::Initialize(KPort* parent_, std::string&& name_) {
21 // Set member variables.
22 parent = parent_;
23 name = std::move(name_);
24}
25
26bool KServerPort::IsLight() const {
27 return this->GetParent()->IsLight();
28}
29
30void KServerPort::CleanupSessions() {
31 // Ensure our preconditions are met.
32 if (this->IsLight()) {
33 UNIMPLEMENTED();
34 }
35
36 // Cleanup the session list.
37 while (true) {
38 // Get the last session in the list
39 KServerSession* session = nullptr;
40 {
41 KScopedSchedulerLock sl{kernel};
42 if (!session_list.empty()) {
43 session = std::addressof(session_list.front());
44 session_list.pop_front();
45 }
46 }
47
48 // Close the session.
49 if (session != nullptr) {
50 session->Close();
51 } else {
52 break;
53 }
54 }
55}
56
57void KServerPort::Destroy() {
58 // Note with our parent that we're closed.
59 parent->OnServerClosed();
60
61 // Perform necessary cleanup of our session lists.
62 this->CleanupSessions();
63
64 // Close our reference to our parent.
65 parent->Close();
66}
67
68bool KServerPort::IsSignaled() const {
69 if (this->IsLight()) {
70 UNIMPLEMENTED();
71 return false;
72 } else {
73 return !session_list.empty();
74 }
75}
76
77void KServerPort::EnqueueSession(KServerSession* session) {
78 ASSERT(!this->IsLight());
79
80 KScopedSchedulerLock sl{kernel};
81
82 // Add the session to our queue.
83 session_list.push_back(*session);
84 if (session_list.size() == 1) {
85 this->NotifyAvailable();
86 }
87}
88
89KServerSession* KServerPort::AcceptSession() {
90 ASSERT(!this->IsLight());
91
92 KScopedSchedulerLock sl{kernel};
93
94 // Return the first session in the list.
95 if (session_list.empty()) {
96 return nullptr;
97 }
98
99 KServerSession* session = std::addressof(session_list.front());
100 session_list.pop_front();
101 return session;
102}
103
104} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_port.h b/src/core/hle/kernel/k_server_port.h
new file mode 100644
index 000000000..558c8ed4d
--- /dev/null
+++ b/src/core/hle/kernel/k_server_port.h
@@ -0,0 +1,80 @@
1// Copyright 2021 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <string>
9#include <utility>
10#include <vector>
11
12#include <boost/intrusive/list.hpp>
13
14#include "common/common_types.h"
15#include "core/hle/kernel/k_server_session.h"
16#include "core/hle/kernel/k_synchronization_object.h"
17#include "core/hle/result.h"
18
19namespace Kernel {
20
21class KernelCore;
22class KPort;
23class SessionRequestHandler;
24
25class KServerPort final : public KSynchronizationObject {
26 KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject);
27
28private:
29 using SessionList = boost::intrusive::list<KServerSession>;
30
31public:
32 explicit KServerPort(KernelCore& kernel);
33 virtual ~KServerPort() override;
34
35 using HLEHandler = std::shared_ptr<SessionRequestHandler>;
36
37 void Initialize(KPort* parent_, std::string&& name_);
38
39 /// Whether or not this server port has an HLE handler available.
40 bool HasHLEHandler() const {
41 return hle_handler != nullptr;
42 }
43
44 /// Gets the HLE handler for this port.
45 HLEHandler GetHLEHandler() const {
46 return hle_handler;
47 }
48
49 /**
50 * Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
51 * will inherit a reference to this handler.
52 */
53 void SetHleHandler(HLEHandler hle_handler_) {
54 hle_handler = std::move(hle_handler_);
55 }
56
57 void EnqueueSession(KServerSession* pending_session);
58
59 KServerSession* AcceptSession();
60
61 const KPort* GetParent() const {
62 return parent;
63 }
64
65 bool IsLight() const;
66
67 // Overridden virtual functions.
68 virtual void Destroy() override;
69 virtual bool IsSignaled() const override;
70
71private:
72 void CleanupSessions();
73
74private:
75 SessionList session_list;
76 HLEHandler hle_handler;
77 KPort* parent{};
78};
79
80} // namespace Kernel
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index 790dbb998..c8acaa453 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -10,49 +10,39 @@
10#include "common/logging/log.h" 10#include "common/logging/log.h"
11#include "core/core_timing.h" 11#include "core/core_timing.h"
12#include "core/hle/ipc_helpers.h" 12#include "core/hle/ipc_helpers.h"
13#include "core/hle/kernel/client_port.h"
14#include "core/hle/kernel/client_session.h"
15#include "core/hle/kernel/handle_table.h"
16#include "core/hle/kernel/hle_ipc.h" 13#include "core/hle/kernel/hle_ipc.h"
14#include "core/hle/kernel/k_client_port.h"
15#include "core/hle/kernel/k_handle_table.h"
16#include "core/hle/kernel/k_process.h"
17#include "core/hle/kernel/k_scheduler.h" 17#include "core/hle/kernel/k_scheduler.h"
18#include "core/hle/kernel/k_server_session.h"
19#include "core/hle/kernel/k_session.h"
18#include "core/hle/kernel/k_thread.h" 20#include "core/hle/kernel/k_thread.h"
19#include "core/hle/kernel/kernel.h" 21#include "core/hle/kernel/kernel.h"
20#include "core/hle/kernel/process.h"
21#include "core/hle/kernel/server_session.h"
22#include "core/hle/kernel/session.h"
23#include "core/memory.h" 22#include "core/memory.h"
24 23
25namespace Kernel { 24namespace Kernel {
26 25
27ServerSession::ServerSession(KernelCore& kernel) : KSynchronizationObject{kernel} {} 26KServerSession::KServerSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
28 27
29ServerSession::~ServerSession() { 28KServerSession::~KServerSession() {
30 kernel.ReleaseServiceThread(service_thread); 29 kernel.ReleaseServiceThread(service_thread);
31} 30}
32 31
33ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kernel, 32void KServerSession::Initialize(KSession* parent_, std::string&& name_) {
34 std::shared_ptr<Session> parent, 33 // Set member variables.
35 std::string name) { 34 parent = parent_;
36 std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)}; 35 name = std::move(name_);
37 36 service_thread = kernel.CreateServiceThread(name);
38 session->name = std::move(name);
39 session->parent = std::move(parent);
40 session->service_thread = kernel.CreateServiceThread(session->name);
41
42 return MakeResult(std::move(session));
43} 37}
44 38
45bool ServerSession::IsSignaled() const { 39void KServerSession::Destroy() {
46 // Closed sessions should never wait, an error will be returned from svcReplyAndReceive. 40 parent->OnServerClosed();
47 if (!parent->Client()) {
48 return true;
49 }
50 41
51 // Wait if we have no pending requests, or if we're currently handling a request. 42 parent->Close();
52 return !pending_requesting_threads.empty() && currently_handling == nullptr;
53} 43}
54 44
55void ServerSession::ClientDisconnected() { 45void KServerSession::OnClientClosed() {
56 // We keep a shared pointer to the hle handler to keep it alive throughout 46 // We keep a shared pointer to the hle handler to keep it alive throughout
57 // the call to ClientDisconnected, as ClientDisconnected invalidates the 47 // the call to ClientDisconnected, as ClientDisconnected invalidates the
58 // hle_handler member itself during the course of the function executing. 48 // hle_handler member itself during the course of the function executing.
@@ -60,24 +50,31 @@ void ServerSession::ClientDisconnected() {
60 if (handler) { 50 if (handler) {
61 // Note that after this returns, this server session's hle_handler is 51 // Note that after this returns, this server session's hle_handler is
62 // invalidated (set to null). 52 // invalidated (set to null).
63 handler->ClientDisconnected(SharedFrom(this)); 53 handler->ClientDisconnected(this);
54 }
55}
56
57bool KServerSession::IsSignaled() const {
58 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
59
60 // If the client is closed, we're always signaled.
61 if (parent->IsClientClosed()) {
62 return true;
64 } 63 }
65 64
66 // Clean up the list of client threads with pending requests, they are unneeded now that the 65 // Otherwise, we're signaled if we have a request and aren't handling one.
67 // client endpoint is closed. 66 return false;
68 pending_requesting_threads.clear();
69 currently_handling = nullptr;
70} 67}
71 68
72void ServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) { 69void KServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) {
73 domain_request_handlers.push_back(std::move(handler)); 70 domain_request_handlers.push_back(std::move(handler));
74} 71}
75 72
76std::size_t ServerSession::NumDomainRequestHandlers() const { 73std::size_t KServerSession::NumDomainRequestHandlers() const {
77 return domain_request_handlers.size(); 74 return domain_request_handlers.size();
78} 75}
79 76
80ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) { 77ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
81 if (!context.HasDomainMessageHeader()) { 78 if (!context.HasDomainMessageHeader()) {
82 return RESULT_SUCCESS; 79 return RESULT_SUCCESS;
83 } 80 }
@@ -116,23 +113,21 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
116 return RESULT_SUCCESS; 113 return RESULT_SUCCESS;
117} 114}
118 115
119ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<KThread> thread, 116ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
120 Core::Memory::Memory& memory) {
121 u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; 117 u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
122 auto context = 118 auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread);
123 std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), std::move(thread));
124 119
125 context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); 120 context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
126 121
127 if (auto strong_ptr = service_thread.lock()) { 122 if (auto strong_ptr = service_thread.lock()) {
128 strong_ptr->QueueSyncRequest(*this, std::move(context)); 123 strong_ptr->QueueSyncRequest(*parent, std::move(context));
129 return RESULT_SUCCESS; 124 return RESULT_SUCCESS;
130 } 125 }
131 126
132 return RESULT_SUCCESS; 127 return RESULT_SUCCESS;
133} 128}
134 129
135ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) { 130ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
136 ResultCode result = RESULT_SUCCESS; 131 ResultCode result = RESULT_SUCCESS;
137 // If the session has been converted to a domain, handle the domain request 132 // If the session has been converted to a domain, handle the domain request
138 if (IsDomain() && context.HasDomainMessageHeader()) { 133 if (IsDomain() && context.HasDomainMessageHeader()) {
@@ -161,10 +156,9 @@ ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
161 return result; 156 return result;
162} 157}
163 158
164ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<KThread> thread, 159ResultCode KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
165 Core::Memory::Memory& memory, 160 Core::Timing::CoreTiming& core_timing) {
166 Core::Timing::CoreTiming& core_timing) { 161 return QueueSyncRequest(thread, memory);
167 return QueueSyncRequest(std::move(thread), memory);
168} 162}
169 163
170} // namespace Kernel 164} // namespace Kernel
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/k_server_session.h
index c42d5ee59..77095bb85 100644
--- a/src/core/hle/kernel/server_session.h
+++ b/src/core/hle/kernel/k_server_session.h
@@ -9,6 +9,8 @@
9#include <utility> 9#include <utility>
10#include <vector> 10#include <vector>
11 11
12#include <boost/intrusive/list.hpp>
13
12#include "common/threadsafe_queue.h" 14#include "common/threadsafe_queue.h"
13#include "core/hle/kernel/k_synchronization_object.h" 15#include "core/hle/kernel/k_synchronization_object.h"
14#include "core/hle/kernel/service_thread.h" 16#include "core/hle/kernel/service_thread.h"
@@ -27,55 +29,35 @@ namespace Kernel {
27 29
28class HLERequestContext; 30class HLERequestContext;
29class KernelCore; 31class KernelCore;
30class Session; 32class KSession;
31class SessionRequestHandler; 33class SessionRequestHandler;
32class KThread; 34class KThread;
33 35
34/** 36class KServerSession final : public KSynchronizationObject,
35 * Kernel object representing the server endpoint of an IPC session. Sessions are the basic CTR-OS 37 public boost::intrusive::list_base_hook<> {
36 * primitive for communication between different processes, and are used to implement service calls 38 KERNEL_AUTOOBJECT_TRAITS(KServerSession, KSynchronizationObject);
37 * to the various system services. 39
38 *
39 * To make a service call, the client must write the command header and parameters to the buffer
40 * located at offset 0x80 of the TLS (Thread-Local Storage) area, then execute a SendSyncRequest
41 * SVC call with its ClientSession handle. The kernel will read the command header, using it to
42 * marshall the parameters to the process at the server endpoint of the session.
43 * After the server replies to the request, the response is marshalled back to the caller's
44 * TLS buffer and control is transferred back to it.
45 */
46class ServerSession final : public KSynchronizationObject {
47 friend class ServiceThread; 40 friend class ServiceThread;
48 41
49public: 42public:
50 explicit ServerSession(KernelCore& kernel); 43 explicit KServerSession(KernelCore& kernel);
51 ~ServerSession() override; 44 virtual ~KServerSession() override;
52 45
53 friend class Session; 46 virtual void Destroy() override;
54 47
55 static ResultVal<std::shared_ptr<ServerSession>> Create(KernelCore& kernel, 48 void Initialize(KSession* parent_, std::string&& name_);
56 std::shared_ptr<Session> parent,
57 std::string name = "Unknown");
58 49
59 std::string GetTypeName() const override { 50 KSession* GetParent() {
60 return "ServerSession"; 51 return parent;
61 } 52 }
62 53
63 std::string GetName() const override { 54 const KSession* GetParent() const {
64 return name; 55 return parent;
65 } 56 }
66 57
67 static constexpr HandleType HANDLE_TYPE = HandleType::ServerSession; 58 virtual bool IsSignaled() const override;
68 HandleType GetHandleType() const override {
69 return HANDLE_TYPE;
70 }
71 59
72 Session* GetParent() { 60 void OnClientClosed();
73 return parent.get();
74 }
75
76 const Session* GetParent() const {
77 return parent.get();
78 }
79 61
80 /** 62 /**
81 * Sets the HLE handler for the session. This handler will be called to service IPC requests 63 * Sets the HLE handler for the session. This handler will be called to service IPC requests
@@ -95,12 +77,9 @@ public:
95 * 77 *
96 * @returns ResultCode from the operation. 78 * @returns ResultCode from the operation.
97 */ 79 */
98 ResultCode HandleSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory, 80 ResultCode HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
99 Core::Timing::CoreTiming& core_timing); 81 Core::Timing::CoreTiming& core_timing);
100 82
101 /// Called when a client disconnection occurs.
102 void ClientDisconnected();
103
104 /// Adds a new domain request handler to the collection of request handlers within 83 /// Adds a new domain request handler to the collection of request handlers within
105 /// this ServerSession instance. 84 /// this ServerSession instance.
106 void AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler); 85 void AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler);
@@ -124,13 +103,9 @@ public:
124 convert_to_domain = true; 103 convert_to_domain = true;
125 } 104 }
126 105
127 bool IsSignaled() const override;
128
129 void Finalize() override {}
130
131private: 106private:
132 /// Queues a sync request from the emulated application. 107 /// Queues a sync request from the emulated application.
133 ResultCode QueueSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory); 108 ResultCode QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
134 109
135 /// Completes a sync request from the emulated application. 110 /// Completes a sync request from the emulated application.
136 ResultCode CompleteSyncRequest(HLERequestContext& context); 111 ResultCode CompleteSyncRequest(HLERequestContext& context);
@@ -139,33 +114,20 @@ private:
139 /// object handle. 114 /// object handle.
140 ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context); 115 ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context);
141 116
142 /// The parent session, which links to the client endpoint.
143 std::shared_ptr<Session> parent;
144
145 /// This session's HLE request handler (applicable when not a domain) 117 /// This session's HLE request handler (applicable when not a domain)
146 std::shared_ptr<SessionRequestHandler> hle_handler; 118 std::shared_ptr<SessionRequestHandler> hle_handler;
147 119
148 /// This is the list of domain request handlers (after conversion to a domain) 120 /// This is the list of domain request handlers (after conversion to a domain)
149 std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers; 121 std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers;
150 122
151 /// List of threads that are pending a response after a sync request. This list is processed in
152 /// a LIFO manner, thus, the last request will be dispatched first.
153 /// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
154 std::vector<std::shared_ptr<KThread>> pending_requesting_threads;
155
156 /// Thread whose request is currently being handled. A request is considered "handled" when a
157 /// response is sent via svcReplyAndReceive.
158 /// TODO(Subv): Find a better name for this.
159 std::shared_ptr<KThread> currently_handling;
160
161 /// When set to True, converts the session to a domain at the end of the command 123 /// When set to True, converts the session to a domain at the end of the command
162 bool convert_to_domain{}; 124 bool convert_to_domain{};
163 125
164 /// The name of this session (optional)
165 std::string name;
166
167 /// Thread to dispatch service requests 126 /// Thread to dispatch service requests
168 std::weak_ptr<ServiceThread> service_thread; 127 std::weak_ptr<ServiceThread> service_thread;
128
129 /// KSession that owns this KServerSession
130 KSession* parent{};
169}; 131};
170 132
171} // namespace Kernel 133} // namespace Kernel
diff --git a/src/core/hle/kernel/k_session.cpp b/src/core/hle/kernel/k_session.cpp
new file mode 100644
index 000000000..7b0bc177d
--- /dev/null
+++ b/src/core/hle/kernel/k_session.cpp
@@ -0,0 +1,85 @@
1// Copyright 2021 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "core/hle/kernel/k_client_port.h"
7#include "core/hle/kernel/k_client_session.h"
8#include "core/hle/kernel/k_scoped_resource_reservation.h"
9#include "core/hle/kernel/k_server_session.h"
10#include "core/hle/kernel/k_session.h"
11
12namespace Kernel {
13
14KSession::KSession(KernelCore& kernel)
15 : KAutoObjectWithSlabHeapAndContainer{kernel}, server{kernel}, client{kernel} {}
16KSession::~KSession() = default;
17
18void KSession::Initialize(KClientPort* port_, const std::string& name_) {
19 // Increment reference count.
20 // Because reference count is one on creation, this will result
21 // in a reference count of two. Thus, when both server and client are closed
22 // this object will be destroyed.
23 Open();
24
25 // Create our sub sessions.
26 KAutoObject::Create(std::addressof(server));
27 KAutoObject::Create(std::addressof(client));
28
29 // Initialize our sub sessions.
30 server.Initialize(this, name_ + ":Server");
31 client.Initialize(this, name_ + ":Client");
32
33 // Set state and name.
34 SetState(State::Normal);
35 name = name_;
36
37 // Set our owner process.
38 process = kernel.CurrentProcess();
39 process->Open();
40
41 // Set our port.
42 port = port_;
43 if (port != nullptr) {
44 port->Open();
45 }
46
47 // Mark initialized.
48 initialized = true;
49}
50
51void KSession::Finalize() {
52 if (port == nullptr) {
53 return;
54 }
55
56 port->OnSessionFinalized();
57 port->Close();
58}
59
60void KSession::OnServerClosed() {
61 if (GetState() != State::Normal) {
62 return;
63 }
64
65 SetState(State::ServerClosed);
66 client.OnServerClosed();
67}
68
69void KSession::OnClientClosed() {
70 if (GetState() != State::Normal) {
71 return;
72 }
73
74 SetState(State::ClientClosed);
75 server.OnClientClosed();
76}
77
78void KSession::PostDestroy(uintptr_t arg) {
79 // Release the session count resource the owner process holds.
80 KProcess* owner = reinterpret_cast<KProcess*>(arg);
81 owner->GetResourceLimit()->Release(LimitableResource::Sessions, 1);
82 owner->Close();
83}
84
85} // namespace Kernel
diff --git a/src/core/hle/kernel/k_session.h b/src/core/hle/kernel/k_session.h
new file mode 100644
index 000000000..4321b7885
--- /dev/null
+++ b/src/core/hle/kernel/k_session.h
@@ -0,0 +1,96 @@
1// Copyright 2021 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8#include <string>
9
10#include "core/hle/kernel/k_client_session.h"
11#include "core/hle/kernel/k_server_session.h"
12#include "core/hle/kernel/slab_helpers.h"
13
14namespace Kernel {
15
16class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession, KAutoObjectWithList> {
17 KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject);
18
19public:
20 explicit KSession(KernelCore& kernel);
21 virtual ~KSession() override;
22
23 void Initialize(KClientPort* port_, const std::string& name_);
24
25 virtual void Finalize() override;
26
27 virtual bool IsInitialized() const override {
28 return initialized;
29 }
30
31 virtual uintptr_t GetPostDestroyArgument() const override {
32 return reinterpret_cast<uintptr_t>(process);
33 }
34
35 static void PostDestroy(uintptr_t arg);
36
37 void OnServerClosed();
38
39 void OnClientClosed();
40
41 bool IsServerClosed() const {
42 return this->GetState() != State::Normal;
43 }
44
45 bool IsClientClosed() const {
46 return this->GetState() != State::Normal;
47 }
48
49 KClientSession& GetClientSession() {
50 return client;
51 }
52
53 KServerSession& GetServerSession() {
54 return server;
55 }
56
57 const KClientSession& GetClientSession() const {
58 return client;
59 }
60
61 const KServerSession& GetServerSession() const {
62 return server;
63 }
64
65 const KClientPort* GetParent() const {
66 return port;
67 }
68
69private:
70 enum class State : u8 {
71 Invalid = 0,
72 Normal = 1,
73 ClientClosed = 2,
74 ServerClosed = 3,
75 };
76
77private:
78 void SetState(State state) {
79 atomic_state = static_cast<u8>(state);
80 }
81
82 State GetState() const {
83 return static_cast<State>(atomic_state.load(std::memory_order_relaxed));
84 }
85
86private:
87 KServerSession server;
88 KClientSession client;
89 std::atomic<std::underlying_type_t<State>> atomic_state{
90 static_cast<std::underlying_type_t<State>>(State::Invalid)};
91 KClientPort* port{};
92 KProcess* process{};
93 bool initialized{};
94};
95
96} // namespace Kernel
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 9b14f42b5..1da57a4c3 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -8,50 +8,74 @@
8#include "core/hle/kernel/k_scoped_resource_reservation.h" 8#include "core/hle/kernel/k_scoped_resource_reservation.h"
9#include "core/hle/kernel/k_shared_memory.h" 9#include "core/hle/kernel/k_shared_memory.h"
10#include "core/hle/kernel/kernel.h" 10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/svc_results.h"
11 12
12namespace Kernel { 13namespace Kernel {
13 14
14KSharedMemory::KSharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory) 15KSharedMemory::KSharedMemory(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
15 : Object{kernel}, device_memory{device_memory} {}
16 16
17KSharedMemory::~KSharedMemory() { 17KSharedMemory::~KSharedMemory() {
18 kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size); 18 kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size);
19} 19}
20 20
21std::shared_ptr<KSharedMemory> KSharedMemory::Create( 21ResultCode KSharedMemory::Initialize(KernelCore& kernel_, Core::DeviceMemory& device_memory_,
22 KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process, 22 KProcess* owner_process_, KPageLinkedList&& page_list_,
23 KPageLinkedList&& page_list, KMemoryPermission owner_permission, 23 Svc::MemoryPermission owner_permission_,
24 KMemoryPermission user_permission, PAddr physical_address, std::size_t size, std::string name) { 24 Svc::MemoryPermission user_permission_,
25 PAddr physical_address_, std::size_t size_,
26 std::string name_) {
27 // Set members.
28 owner_process = owner_process_;
29 device_memory = &device_memory_;
30 page_list = std::move(page_list_);
31 owner_permission = owner_permission_;
32 user_permission = user_permission_;
33 physical_address = physical_address_;
34 size = size_;
35 name = name_;
25 36
26 const auto resource_limit = kernel.GetSystemResourceLimit(); 37 // Get the resource limit.
27 KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, 38 KResourceLimit* reslimit = kernel.GetSystemResourceLimit();
28 size);
29 ASSERT(memory_reservation.Succeeded());
30 39
31 std::shared_ptr<KSharedMemory> shared_memory{ 40 // Reserve memory for ourselves.
32 std::make_shared<KSharedMemory>(kernel, device_memory)}; 41 KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemory,
33 42 size_);
34 shared_memory->owner_process = owner_process; 43 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
35 shared_memory->page_list = std::move(page_list);
36 shared_memory->owner_permission = owner_permission;
37 shared_memory->user_permission = user_permission;
38 shared_memory->physical_address = physical_address;
39 shared_memory->size = size;
40 shared_memory->name = name;
41 44
45 // Commit our reservation.
42 memory_reservation.Commit(); 46 memory_reservation.Commit();
43 return shared_memory; 47
48 // Set our resource limit.
49 resource_limit = reslimit;
50 resource_limit->Open();
51
52 // Mark initialized.
53 is_initialized = true;
54
55 // Clear all pages in the memory.
56 std::memset(device_memory_.GetPointer(physical_address_), 0, size_);
57
58 return RESULT_SUCCESS;
59}
60
61void KSharedMemory::Finalize() {
62 // Release the memory reservation.
63 resource_limit->Release(LimitableResource::PhysicalMemory, size);
64 resource_limit->Close();
65
66 // Perform inherited finalization.
67 KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize();
44} 68}
45 69
46ResultCode KSharedMemory::Map(Process& target_process, VAddr address, std::size_t size, 70ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t size,
47 KMemoryPermission permissions) { 71 Svc::MemoryPermission permissions) {
48 const u64 page_count{(size + PageSize - 1) / PageSize}; 72 const u64 page_count{(size + PageSize - 1) / PageSize};
49 73
50 if (page_list.GetNumPages() != page_count) { 74 if (page_list.GetNumPages() != page_count) {
51 UNIMPLEMENTED_MSG("Page count does not match"); 75 UNIMPLEMENTED_MSG("Page count does not match");
52 } 76 }
53 77
54 const KMemoryPermission expected = 78 const Svc::MemoryPermission expected =
55 &target_process == owner_process ? owner_permission : user_permission; 79 &target_process == owner_process ? owner_permission : user_permission;
56 80
57 if (permissions != expected) { 81 if (permissions != expected) {
@@ -59,7 +83,17 @@ ResultCode KSharedMemory::Map(Process& target_process, VAddr address, std::size_
59 } 83 }
60 84
61 return target_process.PageTable().MapPages(address, page_list, KMemoryState::Shared, 85 return target_process.PageTable().MapPages(address, page_list, KMemoryState::Shared,
62 permissions); 86 ConvertToKMemoryPermission(permissions));
87}
88
89ResultCode KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t size) {
90 const u64 page_count{(size + PageSize - 1) / PageSize};
91
92 if (page_list.GetNumPages() != page_count) {
93 UNIMPLEMENTED_MSG("Page count does not match");
94 }
95
96 return target_process.PageTable().UnmapPages(address, page_list, KMemoryState::Shared);
63} 97}
64 98
65} // namespace Kernel 99} // namespace Kernel
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h
index 016e34be5..28939c93c 100644
--- a/src/core/hle/kernel/k_shared_memory.h
+++ b/src/core/hle/kernel/k_shared_memory.h
@@ -11,37 +11,27 @@
11#include "core/device_memory.h" 11#include "core/device_memory.h"
12#include "core/hle/kernel/k_memory_block.h" 12#include "core/hle/kernel/k_memory_block.h"
13#include "core/hle/kernel/k_page_linked_list.h" 13#include "core/hle/kernel/k_page_linked_list.h"
14#include "core/hle/kernel/object.h" 14#include "core/hle/kernel/k_process.h"
15#include "core/hle/kernel/process.h" 15#include "core/hle/kernel/slab_helpers.h"
16#include "core/hle/result.h" 16#include "core/hle/result.h"
17 17
18namespace Kernel { 18namespace Kernel {
19 19
20class KernelCore; 20class KernelCore;
21 21
22class KSharedMemory final : public Object { 22class KSharedMemory final
23 : public KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList> {
24 KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
25
23public: 26public:
24 explicit KSharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory); 27 explicit KSharedMemory(KernelCore& kernel);
25 ~KSharedMemory() override; 28 ~KSharedMemory() override;
26 29
27 static std::shared_ptr<KSharedMemory> Create( 30 ResultCode Initialize(KernelCore& kernel_, Core::DeviceMemory& device_memory_,
28 KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process, 31 KProcess* owner_process_, KPageLinkedList&& page_list_,
29 KPageLinkedList&& page_list, KMemoryPermission owner_permission, 32 Svc::MemoryPermission owner_permission_,
30 KMemoryPermission user_permission, PAddr physical_address, std::size_t size, 33 Svc::MemoryPermission user_permission_, PAddr physical_address_,
31 std::string name); 34 std::size_t size_, std::string name_);
32
33 std::string GetTypeName() const override {
34 return "SharedMemory";
35 }
36
37 std::string GetName() const override {
38 return name;
39 }
40
41 static constexpr HandleType HANDLE_TYPE = HandleType::SharedMemory;
42 HandleType GetHandleType() const override {
43 return HANDLE_TYPE;
44 }
45 35
46 /** 36 /**
47 * Maps a shared memory block to an address in the target process' address space 37 * Maps a shared memory block to an address in the target process' address space
@@ -50,8 +40,16 @@ public:
50 * @param size Size of the shared memory block to map 40 * @param size Size of the shared memory block to map
51 * @param permissions Memory block map permissions (specified by SVC field) 41 * @param permissions Memory block map permissions (specified by SVC field)
52 */ 42 */
53 ResultCode Map(Process& target_process, VAddr address, std::size_t size, 43 ResultCode Map(KProcess& target_process, VAddr address, std::size_t size,
54 KMemoryPermission permissions); 44 Svc::MemoryPermission permissions);
45
46 /**
47 * Unmaps a shared memory block from an address in the target process' address space
48 * @param target_process Process on which to unmap the memory block
49 * @param address Address in system memory to unmap shared memory block
50 * @param size Size of the shared memory block to unmap
51 */
52 ResultCode Unmap(KProcess& target_process, VAddr address, std::size_t size);
55 53
56 /** 54 /**
57 * Gets a pointer to the shared memory block 55 * Gets a pointer to the shared memory block
@@ -59,7 +57,7 @@ public:
59 * @return A pointer to the shared memory block from the specified offset 57 * @return A pointer to the shared memory block from the specified offset
60 */ 58 */
61 u8* GetPointer(std::size_t offset = 0) { 59 u8* GetPointer(std::size_t offset = 0) {
62 return device_memory.GetPointer(physical_address + offset); 60 return device_memory->GetPointer(physical_address + offset);
63 } 61 }
64 62
65 /** 63 /**
@@ -68,20 +66,26 @@ public:
68 * @return A pointer to the shared memory block from the specified offset 66 * @return A pointer to the shared memory block from the specified offset
69 */ 67 */
70 const u8* GetPointer(std::size_t offset = 0) const { 68 const u8* GetPointer(std::size_t offset = 0) const {
71 return device_memory.GetPointer(physical_address + offset); 69 return device_memory->GetPointer(physical_address + offset);
72 } 70 }
73 71
74 void Finalize() override {} 72 virtual void Finalize() override;
73
74 virtual bool IsInitialized() const override {
75 return is_initialized;
76 }
77 static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
75 78
76private: 79private:
77 Core::DeviceMemory& device_memory; 80 Core::DeviceMemory* device_memory;
78 Process* owner_process{}; 81 KProcess* owner_process{};
79 KPageLinkedList page_list; 82 KPageLinkedList page_list;
80 KMemoryPermission owner_permission{}; 83 Svc::MemoryPermission owner_permission{};
81 KMemoryPermission user_permission{}; 84 Svc::MemoryPermission user_permission{};
82 PAddr physical_address{}; 85 PAddr physical_address{};
83 std::size_t size{}; 86 std::size_t size{};
84 std::string name; 87 KResourceLimit* resource_limit{};
88 bool is_initialized{};
85}; 89};
86 90
87} // namespace Kernel 91} // namespace Kernel
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h
index aa4471d2f..5ce9a1d7c 100644
--- a/src/core/hle/kernel/k_slab_heap.h
+++ b/src/core/hle/kernel/k_slab_heap.h
@@ -97,6 +97,7 @@ public:
97 void FreeImpl(void* obj) { 97 void FreeImpl(void* obj) {
98 // Don't allow freeing an object that wasn't allocated from this heap 98 // Don't allow freeing an object that wasn't allocated from this heap
99 ASSERT(Contains(reinterpret_cast<uintptr_t>(obj))); 99 ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
100
100 impl.Free(obj); 101 impl.Free(obj);
101 } 102 }
102 103
@@ -148,6 +149,14 @@ public:
148 return obj; 149 return obj;
149 } 150 }
150 151
152 T* AllocateWithKernel(KernelCore& kernel) {
153 T* obj = static_cast<T*>(AllocateImpl());
154 if (obj != nullptr) {
155 new (obj) T(kernel);
156 }
157 return obj;
158 }
159
151 void Free(T* obj) { 160 void Free(T* obj) {
152 FreeImpl(obj); 161 FreeImpl(obj);
153 } 162 }
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
index 82f72a0fe..460b8a714 100644
--- a/src/core/hle/kernel/k_synchronization_object.cpp
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -13,6 +13,11 @@
13 13
14namespace Kernel { 14namespace Kernel {
15 15
16void KSynchronizationObject::Finalize() {
17 this->OnFinalizeSynchronizationObject();
18 KAutoObject::Finalize();
19}
20
16ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, 21ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
17 KSynchronizationObject** objects, const s32 num_objects, 22 KSynchronizationObject** objects, const s32 num_objects,
18 s64 timeout) { 23 s64 timeout) {
@@ -130,10 +135,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
130 return wait_result; 135 return wait_result;
131} 136}
132 137
133KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {} 138KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : KAutoObjectWithList{kernel} {}
134
135KSynchronizationObject::KSynchronizationObject(KernelCore& kernel, std::string&& name)
136 : Object{kernel, std::move(name)} {}
137 139
138KSynchronizationObject::~KSynchronizationObject() = default; 140KSynchronizationObject::~KSynchronizationObject() = default;
139 141
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h
index 5803718fd..a41dd1220 100644
--- a/src/core/hle/kernel/k_synchronization_object.h
+++ b/src/core/hle/kernel/k_synchronization_object.h
@@ -6,7 +6,7 @@
6 6
7#include <vector> 7#include <vector>
8 8
9#include "core/hle/kernel/object.h" 9#include "core/hle/kernel/k_auto_object.h"
10#include "core/hle/result.h" 10#include "core/hle/result.h"
11 11
12namespace Kernel { 12namespace Kernel {
@@ -16,7 +16,9 @@ class Synchronization;
16class KThread; 16class KThread;
17 17
18/// Class that represents a Kernel object that a thread can be waiting on 18/// Class that represents a Kernel object that a thread can be waiting on
19class KSynchronizationObject : public Object { 19class KSynchronizationObject : public KAutoObjectWithList {
20 KERNEL_AUTOOBJECT_TRAITS(KSynchronizationObject, KAutoObject);
21
20public: 22public:
21 struct ThreadListNode { 23 struct ThreadListNode {
22 ThreadListNode* next{}; 24 ThreadListNode* next{};
@@ -27,15 +29,18 @@ public:
27 KSynchronizationObject** objects, const s32 num_objects, 29 KSynchronizationObject** objects, const s32 num_objects,
28 s64 timeout); 30 s64 timeout);
29 31
32 virtual void Finalize() override;
33
30 [[nodiscard]] virtual bool IsSignaled() const = 0; 34 [[nodiscard]] virtual bool IsSignaled() const = 0;
31 35
32 [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const; 36 [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
33 37
34protected: 38protected:
35 explicit KSynchronizationObject(KernelCore& kernel); 39 explicit KSynchronizationObject(KernelCore& kernel);
36 explicit KSynchronizationObject(KernelCore& kernel, std::string&& name);
37 virtual ~KSynchronizationObject(); 40 virtual ~KSynchronizationObject();
38 41
42 virtual void OnFinalizeSynchronizationObject() {}
43
39 void NotifyAvailable(ResultCode result); 44 void NotifyAvailable(ResultCode result);
40 void NotifyAvailable() { 45 void NotifyAvailable() {
41 return this->NotifyAvailable(RESULT_SUCCESS); 46 return this->NotifyAvailable(RESULT_SUCCESS);
@@ -46,14 +51,4 @@ private:
46 ThreadListNode* thread_list_tail{}; 51 ThreadListNode* thread_list_tail{};
47}; 52};
48 53
49// Specialization of DynamicObjectCast for KSynchronizationObjects
50template <>
51inline std::shared_ptr<KSynchronizationObject> DynamicObjectCast<KSynchronizationObject>(
52 std::shared_ptr<Object> object) {
53 if (object != nullptr && object->IsWaitable()) {
54 return std::static_pointer_cast<KSynchronizationObject>(object);
55 }
56 return nullptr;
57}
58
59} // namespace Kernel 54} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index e0f53287c..ef6dfeeca 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -18,17 +18,16 @@
18#include "core/core.h" 18#include "core/core.h"
19#include "core/cpu_manager.h" 19#include "core/cpu_manager.h"
20#include "core/hardware_properties.h" 20#include "core/hardware_properties.h"
21#include "core/hle/kernel/handle_table.h"
22#include "core/hle/kernel/k_condition_variable.h" 21#include "core/hle/kernel/k_condition_variable.h"
22#include "core/hle/kernel/k_handle_table.h"
23#include "core/hle/kernel/k_memory_layout.h" 23#include "core/hle/kernel/k_memory_layout.h"
24#include "core/hle/kernel/k_process.h"
24#include "core/hle/kernel/k_resource_limit.h" 25#include "core/hle/kernel/k_resource_limit.h"
25#include "core/hle/kernel/k_scheduler.h" 26#include "core/hle/kernel/k_scheduler.h"
26#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 27#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
27#include "core/hle/kernel/k_thread.h" 28#include "core/hle/kernel/k_thread.h"
28#include "core/hle/kernel/k_thread_queue.h" 29#include "core/hle/kernel/k_thread_queue.h"
29#include "core/hle/kernel/kernel.h" 30#include "core/hle/kernel/kernel.h"
30#include "core/hle/kernel/object.h"
31#include "core/hle/kernel/process.h"
32#include "core/hle/kernel/svc_results.h" 31#include "core/hle/kernel/svc_results.h"
33#include "core/hle/kernel/time_manager.h" 32#include "core/hle/kernel/time_manager.h"
34#include "core/hle/result.h" 33#include "core/hle/result.h"
@@ -62,11 +61,11 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
62namespace Kernel { 61namespace Kernel {
63 62
64KThread::KThread(KernelCore& kernel) 63KThread::KThread(KernelCore& kernel)
65 : KSynchronizationObject{kernel}, activity_pause_lock{kernel} {} 64 : KAutoObjectWithSlabHeapAndContainer{kernel}, activity_pause_lock{kernel} {}
66KThread::~KThread() = default; 65KThread::~KThread() = default;
67 66
68ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, 67ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
69 s32 virt_core, Process* owner, ThreadType type) { 68 s32 virt_core, KProcess* owner, ThreadType type) {
70 // Assert parameters are valid. 69 // Assert parameters are valid.
71 ASSERT((type == ThreadType::Main) || 70 ASSERT((type == ThreadType::Main) ||
72 (Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority)); 71 (Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
@@ -177,6 +176,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
177 // Set parent, if relevant. 176 // Set parent, if relevant.
178 if (owner != nullptr) { 177 if (owner != nullptr) {
179 parent = owner; 178 parent = owner;
179 parent->Open();
180 parent->IncrementThreadCount(); 180 parent->IncrementThreadCount();
181 } 181 }
182 182
@@ -209,14 +209,56 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
209} 209}
210 210
211ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, 211ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
212 VAddr user_stack_top, s32 prio, s32 core, Process* owner, 212 VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
213 ThreadType type) { 213 ThreadType type, std::function<void(void*)>&& init_func,
214 void* init_func_parameter) {
214 // Initialize the thread. 215 // Initialize the thread.
215 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); 216 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
216 217
218 // Initialize host context.
219 thread->host_context =
220 std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
221
217 return RESULT_SUCCESS; 222 return RESULT_SUCCESS;
218} 223}
219 224
225ResultCode KThread::InitializeDummyThread(KThread* thread) {
226 return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Main);
227}
228
229ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
230 return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
231 Core::CpuManager::GetIdleThreadStartFunc(),
232 system.GetCpuManager().GetStartFuncParamater());
233}
234
235ResultCode KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
236 KThreadFunction func, uintptr_t arg,
237 s32 virt_core) {
238 return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
239 Core::CpuManager::GetSuspendThreadStartFunc(),
240 system.GetCpuManager().GetStartFuncParamater());
241}
242
243ResultCode KThread::InitializeUserThread(Core::System& system, KThread* thread,
244 KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
245 s32 prio, s32 virt_core, KProcess* owner) {
246 system.Kernel().GlobalSchedulerContext().AddThread(thread);
247 return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
248 ThreadType::User, Core::CpuManager::GetGuestThreadStartFunc(),
249 system.GetCpuManager().GetStartFuncParamater());
250}
251
252void KThread::PostDestroy(uintptr_t arg) {
253 KProcess* owner = reinterpret_cast<KProcess*>(arg & ~1ULL);
254 const bool resource_limit_release_hint = (arg & 1);
255 const s64 hint_value = (resource_limit_release_hint ? 0 : 1);
256 if (owner != nullptr) {
257 owner->GetResourceLimit()->Release(LimitableResource::Threads, 1, hint_value);
258 owner->Close();
259 }
260}
261
220void KThread::Finalize() { 262void KThread::Finalize() {
221 // If the thread has an owner process, unregister it. 263 // If the thread has an owner process, unregister it.
222 if (parent != nullptr) { 264 if (parent != nullptr) {
@@ -246,8 +288,10 @@ void KThread::Finalize() {
246 // Decrement the parent process's thread count. 288 // Decrement the parent process's thread count.
247 if (parent != nullptr) { 289 if (parent != nullptr) {
248 parent->DecrementThreadCount(); 290 parent->DecrementThreadCount();
249 parent->GetResourceLimit()->Release(LimitableResource::Threads, 1);
250 } 291 }
292
293 // Perform inherited finalization.
294 KAutoObjectWithSlabHeapAndContainer<KThread, KSynchronizationObject>::Finalize();
251} 295}
252 296
253bool KThread::IsSignaled() const { 297bool KThread::IsSignaled() const {
@@ -294,6 +338,9 @@ void KThread::StartTermination() {
294 338
295 // Register terminated dpc flag. 339 // Register terminated dpc flag.
296 RegisterDpc(DpcFlag::Terminated); 340 RegisterDpc(DpcFlag::Terminated);
341
342 // Close the thread.
343 this->Close();
297} 344}
298 345
299void KThread::Pin() { 346void KThread::Pin() {
@@ -932,7 +979,7 @@ void KThread::Exit() {
932 979
933 // Release the thread resource hint from parent. 980 // Release the thread resource hint from parent.
934 if (parent != nullptr) { 981 if (parent != nullptr) {
935 // TODO(bunnei): Hint that the resource is about to be released. 982 parent->GetResourceLimit()->Release(Kernel::LimitableResource::Threads, 0, 1);
936 resource_limit_release_hint = true; 983 resource_limit_release_hint = true;
937 } 984 }
938 985
@@ -995,56 +1042,6 @@ std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
995 return host_context; 1042 return host_context;
996} 1043}
997 1044
998ResultVal<std::shared_ptr<KThread>> KThread::CreateThread(Core::System& system,
999 ThreadType type_flags, std::string name,
1000 VAddr entry_point, u32 priority, u64 arg,
1001 s32 processor_id, VAddr stack_top,
1002 Process* owner_process) {
1003 auto& kernel = system.Kernel();
1004
1005 std::shared_ptr<KThread> thread = std::make_shared<KThread>(kernel);
1006
1007 if (const auto result =
1008 thread->InitializeThread(thread.get(), entry_point, arg, stack_top, priority,
1009 processor_id, owner_process, type_flags);
1010 result.IsError()) {
1011 return result;
1012 }
1013
1014 thread->name = name;
1015
1016 auto& scheduler = kernel.GlobalSchedulerContext();
1017 scheduler.AddThread(thread);
1018
1019 return MakeResult<std::shared_ptr<KThread>>(std::move(thread));
1020}
1021
1022ResultVal<std::shared_ptr<KThread>> KThread::CreateThread(
1023 Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, u32 priority,
1024 u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
1025 std::function<void(void*)>&& thread_start_func, void* thread_start_parameter) {
1026 auto thread_result = CreateThread(system, type_flags, name, entry_point, priority, arg,
1027 processor_id, stack_top, owner_process);
1028
1029 if (thread_result.Succeeded()) {
1030 (*thread_result)->host_context =
1031 std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
1032 }
1033
1034 return thread_result;
1035}
1036
1037ResultVal<std::shared_ptr<KThread>> KThread::CreateUserThread(
1038 Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, u32 priority,
1039 u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process) {
1040 std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
1041
1042 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
1043
1044 return CreateThread(system, type_flags, name, entry_point, priority, arg, processor_id,
1045 stack_top, owner_process, std::move(init_func), init_func_parameter);
1046}
1047
1048KThread* GetCurrentThreadPointer(KernelCore& kernel) { 1045KThread* GetCurrentThreadPointer(KernelCore& kernel) {
1049 return kernel.GetCurrentEmuThread(); 1046 return kernel.GetCurrentEmuThread();
1050} 1047}
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index b442dfe57..4145ef56c 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -19,7 +19,7 @@
19#include "core/hle/kernel/k_light_lock.h" 19#include "core/hle/kernel/k_light_lock.h"
20#include "core/hle/kernel/k_spin_lock.h" 20#include "core/hle/kernel/k_spin_lock.h"
21#include "core/hle/kernel/k_synchronization_object.h" 21#include "core/hle/kernel/k_synchronization_object.h"
22#include "core/hle/kernel/object.h" 22#include "core/hle/kernel/slab_helpers.h"
23#include "core/hle/kernel/svc_common.h" 23#include "core/hle/kernel/svc_common.h"
24#include "core/hle/kernel/svc_types.h" 24#include "core/hle/kernel/svc_types.h"
25#include "core/hle/result.h" 25#include "core/hle/result.h"
@@ -37,7 +37,7 @@ namespace Kernel {
37 37
38class GlobalSchedulerContext; 38class GlobalSchedulerContext;
39class KernelCore; 39class KernelCore;
40class Process; 40class KProcess;
41class KScheduler; 41class KScheduler;
42class KThreadQueue; 42class KThreadQueue;
43 43
@@ -99,9 +99,13 @@ enum class ThreadWaitReasonForDebugging : u32 {
99[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel); 99[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
100[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel); 100[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
101 101
102class KThread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> { 102class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KSynchronizationObject>,
103 public boost::intrusive::list_base_hook<> {
104 KERNEL_AUTOOBJECT_TRAITS(KThread, KSynchronizationObject);
105
106private:
103 friend class KScheduler; 107 friend class KScheduler;
104 friend class Process; 108 friend class KProcess;
105 109
106public: 110public:
107 static constexpr s32 DefaultThreadPriority = 44; 111 static constexpr s32 DefaultThreadPriority = 44;
@@ -115,74 +119,10 @@ public:
115 using ThreadContext64 = Core::ARM_Interface::ThreadContext64; 119 using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
116 using WaiterList = boost::intrusive::list<KThread>; 120 using WaiterList = boost::intrusive::list<KThread>;
117 121
118 /**
119 * Creates and returns a new thread.
120 * @param system The instance of the whole system
121 * @param name The friendly name desired for the thread
122 * @param entry_point The address at which the thread should start execution
123 * @param priority The thread's priority
124 * @param arg User data to pass to the thread
125 * @param processor_id The ID(s) of the processors on which the thread is desired to be run
126 * @param stack_top The address of the thread's stack top
127 * @param owner_process The parent process for the thread, if null, it's a kernel thread
128 * @return A shared pointer to the newly created thread
129 */
130 [[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateThread(
131 Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
132 u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
133
134 /**
135 * Creates and returns a new thread, with a specified entry point.
136 * @param system The instance of the whole system
137 * @param name The friendly name desired for the thread
138 * @param entry_point The address at which the thread should start execution
139 * @param priority The thread's priority
140 * @param arg User data to pass to the thread
141 * @param processor_id The ID(s) of the processors on which the thread is desired to be run
142 * @param stack_top The address of the thread's stack top
143 * @param owner_process The parent process for the thread, if null, it's a kernel thread
144 * @param thread_start_func The function where the host context will start.
145 * @param thread_start_parameter The parameter which will passed to host context on init
146 * @return A shared pointer to the newly created thread
147 */
148 [[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateThread(
149 Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
150 u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
151 std::function<void(void*)>&& thread_start_func, void* thread_start_parameter);
152
153 /**
154 * Creates and returns a new thread for the emulated "user" process.
155 * @param system The instance of the whole system
156 * @param name The friendly name desired for the thread
157 * @param entry_point The address at which the thread should start execution
158 * @param priority The thread's priority
159 * @param arg User data to pass to the thread
160 * @param processor_id The ID(s) of the processors on which the thread is desired to be run
161 * @param stack_top The address of the thread's stack top
162 * @param owner_process The parent process for the thread, if null, it's a kernel thread
163 * @return A shared pointer to the newly created thread
164 */
165 [[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateUserThread(
166 Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
167 u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
168
169 [[nodiscard]] std::string GetName() const override {
170 return name;
171 }
172
173 void SetName(std::string new_name) { 122 void SetName(std::string new_name) {
174 name = std::move(new_name); 123 name = std::move(new_name);
175 } 124 }
176 125
177 [[nodiscard]] std::string GetTypeName() const override {
178 return "Thread";
179 }
180
181 static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
182 [[nodiscard]] HandleType GetHandleType() const override {
183 return HANDLE_TYPE;
184 }
185
186 /** 126 /**
187 * Gets the thread's current priority 127 * Gets the thread's current priority
188 * @return The current thread's priority 128 * @return The current thread's priority
@@ -257,10 +197,6 @@ public:
257 197
258 void Suspend(); 198 void Suspend();
259 199
260 void Finalize() override;
261
262 bool IsSignaled() const override;
263
264 void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) { 200 void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) {
265 synced_object = obj; 201 synced_object = obj;
266 wait_result = wait_res; 202 wait_result = wait_res;
@@ -354,11 +290,11 @@ public:
354 current_core_id = core; 290 current_core_id = core;
355 } 291 }
356 292
357 [[nodiscard]] Process* GetOwnerProcess() { 293 [[nodiscard]] KProcess* GetOwnerProcess() {
358 return parent; 294 return parent;
359 } 295 }
360 296
361 [[nodiscard]] const Process* GetOwnerProcess() const { 297 [[nodiscard]] const KProcess* GetOwnerProcess() const {
362 return parent; 298 return parent;
363 } 299 }
364 300
@@ -422,6 +358,40 @@ public:
422 return termination_requested || GetRawState() == ThreadState::Terminated; 358 return termination_requested || GetRawState() == ThreadState::Terminated;
423 } 359 }
424 360
361 [[nodiscard]] virtual u64 GetId() const override final {
362 return this->GetThreadID();
363 }
364
365 [[nodiscard]] virtual bool IsInitialized() const override {
366 return initialized;
367 }
368
369 [[nodiscard]] virtual uintptr_t GetPostDestroyArgument() const override {
370 return reinterpret_cast<uintptr_t>(parent) | (resource_limit_release_hint ? 1 : 0);
371 }
372
373 virtual void Finalize() override;
374
375 [[nodiscard]] virtual bool IsSignaled() const override;
376
377 static void PostDestroy(uintptr_t arg);
378
379 [[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
380
381 [[nodiscard]] static ResultCode InitializeIdleThread(Core::System& system, KThread* thread,
382 s32 virt_core);
383
384 [[nodiscard]] static ResultCode InitializeHighPriorityThread(Core::System& system,
385 KThread* thread,
386 KThreadFunction func,
387 uintptr_t arg, s32 virt_core);
388
389 [[nodiscard]] static ResultCode InitializeUserThread(Core::System& system, KThread* thread,
390 KThreadFunction func, uintptr_t arg,
391 VAddr user_stack_top, s32 prio,
392 s32 virt_core, KProcess* owner);
393
394public:
425 struct StackParameters { 395 struct StackParameters {
426 u8 svc_permission[0x10]; 396 u8 svc_permission[0x10];
427 std::atomic<u8> dpc_flags; 397 std::atomic<u8> dpc_flags;
@@ -671,11 +641,13 @@ private:
671 void StartTermination(); 641 void StartTermination();
672 642
673 [[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, 643 [[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
674 s32 prio, s32 virt_core, Process* owner, ThreadType type); 644 s32 prio, s32 virt_core, KProcess* owner, ThreadType type);
675 645
676 [[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func, 646 [[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func,
677 uintptr_t arg, VAddr user_stack_top, s32 prio, 647 uintptr_t arg, VAddr user_stack_top, s32 prio,
678 s32 core, Process* owner, ThreadType type); 648 s32 core, KProcess* owner, ThreadType type,
649 std::function<void(void*)>&& init_func,
650 void* init_func_parameter);
679 651
680 static void RestorePriority(KernelCore& kernel, KThread* thread); 652 static void RestorePriority(KernelCore& kernel, KThread* thread);
681 653
@@ -697,7 +669,7 @@ private:
697 std::atomic<s64> cpu_time{}; 669 std::atomic<s64> cpu_time{};
698 KSynchronizationObject* synced_object{}; 670 KSynchronizationObject* synced_object{};
699 VAddr address_key{}; 671 VAddr address_key{};
700 Process* parent{}; 672 KProcess* parent{};
701 VAddr kernel_stack_top{}; 673 VAddr kernel_stack_top{};
702 u32* light_ipc_data{}; 674 u32* light_ipc_data{};
703 VAddr tls_address{}; 675 VAddr tls_address{};
@@ -742,7 +714,6 @@ private:
742 VAddr mutex_wait_address_for_debugging{}; 714 VAddr mutex_wait_address_for_debugging{};
743 ThreadWaitReasonForDebugging wait_reason_for_debugging{}; 715 ThreadWaitReasonForDebugging wait_reason_for_debugging{};
744 ThreadType thread_type_for_debugging{}; 716 ThreadType thread_type_for_debugging{};
745 std::string name;
746 717
747public: 718public:
748 using ConditionVariableThreadTreeType = ConditionVariableThreadTree; 719 using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp
new file mode 100644
index 000000000..201617d32
--- /dev/null
+++ b/src/core/hle/kernel/k_transfer_memory.cpp
@@ -0,0 +1,45 @@
1// Copyright 2021 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/k_process.h"
6#include "core/hle/kernel/k_resource_limit.h"
7#include "core/hle/kernel/k_transfer_memory.h"
8#include "core/hle/kernel/kernel.h"
9
10namespace Kernel {
11
12KTransferMemory::KTransferMemory(KernelCore& kernel)
13 : KAutoObjectWithSlabHeapAndContainer{kernel} {}
14
15KTransferMemory::~KTransferMemory() = default;
16
17ResultCode KTransferMemory::Initialize(VAddr address_, std::size_t size_,
18 Svc::MemoryPermission owner_perm_) {
19 // Set members.
20 owner = kernel.CurrentProcess();
21
22 // TODO(bunnei): Lock for transfer memory
23
24 // Set remaining tracking members.
25 owner->Open();
26 owner_perm = owner_perm_;
27 address = address_;
28 size = size_;
29 is_initialized = true;
30
31 return RESULT_SUCCESS;
32}
33
34void KTransferMemory::Finalize() {
35 // Perform inherited finalization.
36 KAutoObjectWithSlabHeapAndContainer<KTransferMemory, KAutoObjectWithList>::Finalize();
37}
38
39void KTransferMemory::PostDestroy(uintptr_t arg) {
40 KProcess* owner = reinterpret_cast<KProcess*>(arg);
41 owner->GetResourceLimit()->Release(LimitableResource::TransferMemory, 1);
42 owner->Close();
43}
44
45} // namespace Kernel
diff --git a/src/core/hle/kernel/k_transfer_memory.h b/src/core/hle/kernel/k_transfer_memory.h
new file mode 100644
index 000000000..f56398b9c
--- /dev/null
+++ b/src/core/hle/kernel/k_transfer_memory.h
@@ -0,0 +1,66 @@
1// Copyright 2021 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8
9#include "core/hle/kernel/slab_helpers.h"
10#include "core/hle/kernel/svc_types.h"
11#include "core/hle/result.h"
12
13union ResultCode;
14
15namespace Core::Memory {
16class Memory;
17}
18
19namespace Kernel {
20
21class KernelCore;
22class KProcess;
23
24class KTransferMemory final
25 : public KAutoObjectWithSlabHeapAndContainer<KTransferMemory, KAutoObjectWithList> {
26 KERNEL_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject);
27
28public:
29 explicit KTransferMemory(KernelCore& kernel);
30 virtual ~KTransferMemory() override;
31
32 ResultCode Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_);
33
34 virtual void Finalize() override;
35
36 virtual bool IsInitialized() const override {
37 return is_initialized;
38 }
39
40 virtual uintptr_t GetPostDestroyArgument() const override {
41 return reinterpret_cast<uintptr_t>(owner);
42 }
43
44 static void PostDestroy(uintptr_t arg);
45
46 KProcess* GetOwner() const {
47 return owner;
48 }
49
50 VAddr GetSourceAddress() const {
51 return address;
52 }
53
54 size_t GetSize() const {
55 return is_initialized ? size * PageSize : 0;
56 }
57
58private:
59 KProcess* owner{};
60 VAddr address{};
61 Svc::MemoryPermission owner_perm{};
62 size_t size{};
63 bool is_initialized{};
64};
65
66} // namespace Kernel
diff --git a/src/core/hle/kernel/k_writable_event.cpp b/src/core/hle/kernel/k_writable_event.cpp
index 25c52edb2..a430e0661 100644
--- a/src/core/hle/kernel/k_writable_event.cpp
+++ b/src/core/hle/kernel/k_writable_event.cpp
@@ -8,20 +8,28 @@
8 8
9namespace Kernel { 9namespace Kernel {
10 10
11KWritableEvent::KWritableEvent(KernelCore& kernel, std::string&& name) 11KWritableEvent::KWritableEvent(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
12 : Object{kernel, std::move(name)} {} 12
13KWritableEvent::~KWritableEvent() = default; 13KWritableEvent::~KWritableEvent() = default;
14 14
15void KWritableEvent::Initialize(KEvent* parent_) { 15void KWritableEvent::Initialize(KEvent* parent_, std::string&& name_) {
16 parent = parent_; 16 parent = parent_;
17 name = std::move(name_);
18 parent->GetReadableEvent().Open();
17} 19}
18 20
19ResultCode KWritableEvent::Signal() { 21ResultCode KWritableEvent::Signal() {
20 return parent->GetReadableEvent()->Signal(); 22 return parent->GetReadableEvent().Signal();
21} 23}
22 24
23ResultCode KWritableEvent::Clear() { 25ResultCode KWritableEvent::Clear() {
24 return parent->GetReadableEvent()->Clear(); 26 return parent->GetReadableEvent().Clear();
27}
28
29void KWritableEvent::Destroy() {
30 // Close our references.
31 parent->GetReadableEvent().Close();
32 parent->Close();
25} 33}
26 34
27} // namespace Kernel 35} // namespace Kernel
diff --git a/src/core/hle/kernel/k_writable_event.h b/src/core/hle/kernel/k_writable_event.h
index 518f5448d..154d2382c 100644
--- a/src/core/hle/kernel/k_writable_event.h
+++ b/src/core/hle/kernel/k_writable_event.h
@@ -4,7 +4,8 @@
4 4
5#pragma once 5#pragma once
6 6
7#include "core/hle/kernel/object.h" 7#include "core/hle/kernel/k_auto_object.h"
8#include "core/hle/kernel/slab_helpers.h"
8#include "core/hle/result.h" 9#include "core/hle/result.h"
9 10
10namespace Kernel { 11namespace Kernel {
@@ -12,24 +13,19 @@ namespace Kernel {
12class KernelCore; 13class KernelCore;
13class KEvent; 14class KEvent;
14 15
15class KWritableEvent final : public Object { 16class KWritableEvent final
17 : public KAutoObjectWithSlabHeapAndContainer<KWritableEvent, KAutoObjectWithList> {
18 KERNEL_AUTOOBJECT_TRAITS(KWritableEvent, KAutoObject);
19
16public: 20public:
17 explicit KWritableEvent(KernelCore& kernel, std::string&& name); 21 explicit KWritableEvent(KernelCore& kernel);
18 ~KWritableEvent() override; 22 ~KWritableEvent() override;
19 23
20 std::string GetTypeName() const override { 24 virtual void Destroy() override;
21 return "KWritableEvent";
22 }
23
24 static constexpr HandleType HANDLE_TYPE = HandleType::WritableEvent;
25 HandleType GetHandleType() const override {
26 return HANDLE_TYPE;
27 }
28
29 void Initialize(KEvent* parent_);
30 25
31 void Finalize() override {} 26 static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
32 27
28 void Initialize(KEvent* parent_, std::string&& name_);
33 ResultCode Signal(); 29 ResultCode Signal();
34 ResultCode Clear(); 30 ResultCode Clear();
35 31
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 5c4f45ab4..32bbf2d9b 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -26,10 +26,12 @@
26#include "core/cpu_manager.h" 26#include "core/cpu_manager.h"
27#include "core/device_memory.h" 27#include "core/device_memory.h"
28#include "core/hardware_properties.h" 28#include "core/hardware_properties.h"
29#include "core/hle/kernel/client_port.h" 29#include "core/hle/kernel/init/init_slab_setup.h"
30#include "core/hle/kernel/handle_table.h" 30#include "core/hle/kernel/k_client_port.h"
31#include "core/hle/kernel/k_handle_table.h"
31#include "core/hle/kernel/k_memory_layout.h" 32#include "core/hle/kernel/k_memory_layout.h"
32#include "core/hle/kernel/k_memory_manager.h" 33#include "core/hle/kernel/k_memory_manager.h"
34#include "core/hle/kernel/k_process.h"
33#include "core/hle/kernel/k_resource_limit.h" 35#include "core/hle/kernel/k_resource_limit.h"
34#include "core/hle/kernel/k_scheduler.h" 36#include "core/hle/kernel/k_scheduler.h"
35#include "core/hle/kernel/k_shared_memory.h" 37#include "core/hle/kernel/k_shared_memory.h"
@@ -37,7 +39,6 @@
37#include "core/hle/kernel/k_thread.h" 39#include "core/hle/kernel/k_thread.h"
38#include "core/hle/kernel/kernel.h" 40#include "core/hle/kernel/kernel.h"
39#include "core/hle/kernel/physical_core.h" 41#include "core/hle/kernel/physical_core.h"
40#include "core/hle/kernel/process.h"
41#include "core/hle/kernel/service_thread.h" 42#include "core/hle/kernel/service_thread.h"
42#include "core/hle/kernel/svc_results.h" 43#include "core/hle/kernel/svc_results.h"
43#include "core/hle/kernel/time_manager.h" 44#include "core/hle/kernel/time_manager.h"
@@ -51,7 +52,7 @@ namespace Kernel {
51 52
52struct KernelCore::Impl { 53struct KernelCore::Impl {
53 explicit Impl(Core::System& system, KernelCore& kernel) 54 explicit Impl(Core::System& system, KernelCore& kernel)
54 : time_manager{system}, global_handle_table{kernel}, system{system} {} 55 : time_manager{system}, object_list_container{kernel}, system{system} {}
55 56
56 void SetMulticore(bool is_multicore) { 57 void SetMulticore(bool is_multicore) {
57 this->is_multicore = is_multicore; 58 this->is_multicore = is_multicore;
@@ -59,8 +60,7 @@ struct KernelCore::Impl {
59 60
60 void Initialize(KernelCore& kernel) { 61 void Initialize(KernelCore& kernel) {
61 global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel); 62 global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
62 63 global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
63 RegisterHostThread();
64 64
65 service_thread_manager = 65 service_thread_manager =
66 std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager"); 66 std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager");
@@ -69,14 +69,20 @@ struct KernelCore::Impl {
69 InitializePhysicalCores(); 69 InitializePhysicalCores();
70 70
71 // Derive the initial memory layout from the emulated board 71 // Derive the initial memory layout from the emulated board
72 Init::InitializeSlabResourceCounts(kernel);
72 KMemoryLayout memory_layout; 73 KMemoryLayout memory_layout;
73 DeriveInitialMemoryLayout(memory_layout); 74 DeriveInitialMemoryLayout(memory_layout);
74 InitializeMemoryLayout(memory_layout); 75 Init::InitializeSlabHeaps(system, memory_layout);
76
77 // Initialize kernel memory and resources.
75 InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout); 78 InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout);
76 InitializeSlabHeaps(); 79 InitializeMemoryLayout(memory_layout);
80 InitializePageSlab();
77 InitializeSchedulers(); 81 InitializeSchedulers();
78 InitializeSuspendThreads(); 82 InitializeSuspendThreads();
79 InitializePreemption(kernel); 83 InitializePreemption(kernel);
84
85 RegisterHostThread();
80 } 86 }
81 87
82 void InitializeCores() { 88 void InitializeCores() {
@@ -93,34 +99,49 @@ struct KernelCore::Impl {
93 service_threads.clear(); 99 service_threads.clear();
94 100
95 next_object_id = 0; 101 next_object_id = 0;
96 next_kernel_process_id = Process::InitialKIPIDMin; 102 next_kernel_process_id = KProcess::InitialKIPIDMin;
97 next_user_process_id = Process::ProcessIDMin; 103 next_user_process_id = KProcess::ProcessIDMin;
98 next_thread_id = 1; 104 next_thread_id = 1;
99 105
100 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 106 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
101 if (suspend_threads[i]) { 107 if (suspend_threads[core_id]) {
102 suspend_threads[i].reset(); 108 suspend_threads[core_id]->Close();
109 suspend_threads[core_id] = nullptr;
103 } 110 }
111
112 schedulers[core_id].reset();
104 } 113 }
105 114
106 cores.clear(); 115 cores.clear();
107 116
108 current_process = nullptr; 117 if (current_process) {
118 current_process->Close();
119 current_process = nullptr;
120 }
109 121
110 global_handle_table.Clear(); 122 global_handle_table.reset();
111 123
112 preemption_event = nullptr; 124 preemption_event = nullptr;
113 125
126 for (auto& iter : named_ports) {
127 iter.second->Close();
128 }
114 named_ports.clear(); 129 named_ports.clear();
115 130
116 exclusive_monitor.reset(); 131 exclusive_monitor.reset();
117 132
118 hid_shared_mem = nullptr; 133 // Cleanup persistent kernel objects
119 font_shared_mem = nullptr; 134 auto CleanupObject = [](KAutoObject* obj) {
120 irs_shared_mem = nullptr; 135 if (obj) {
121 time_shared_mem = nullptr; 136 obj->Close();
122 137 obj = nullptr;
123 system_resource_limit = nullptr; 138 }
139 };
140 CleanupObject(hid_shared_mem);
141 CleanupObject(font_shared_mem);
142 CleanupObject(irs_shared_mem);
143 CleanupObject(time_shared_mem);
144 CleanupObject(system_resource_limit);
124 145
125 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others 146 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
126 next_host_thread_id = Core::Hardware::NUM_CPU_CORES; 147 next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
@@ -145,7 +166,9 @@ struct KernelCore::Impl {
145 void InitializeSystemResourceLimit(KernelCore& kernel, 166 void InitializeSystemResourceLimit(KernelCore& kernel,
146 const Core::Timing::CoreTiming& core_timing, 167 const Core::Timing::CoreTiming& core_timing,
147 const KMemoryLayout& memory_layout) { 168 const KMemoryLayout& memory_layout) {
148 system_resource_limit = std::make_shared<KResourceLimit>(kernel, core_timing); 169 system_resource_limit = KResourceLimit::Create(system.Kernel());
170 system_resource_limit->Initialize(&core_timing);
171
149 const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes(); 172 const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes();
150 173
151 // If setting the default system values fails, then something seriously wrong has occurred. 174 // If setting the default system values fails, then something seriously wrong has occurred.
@@ -189,19 +212,16 @@ struct KernelCore::Impl {
189 } 212 }
190 213
191 void InitializeSuspendThreads() { 214 void InitializeSuspendThreads() {
192 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 215 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
193 std::string name = "Suspend Thread Id:" + std::to_string(i); 216 suspend_threads[core_id] = KThread::Create(system.Kernel());
194 std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc(); 217 ASSERT(KThread::InitializeHighPriorityThread(system, suspend_threads[core_id], {}, {},
195 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); 218 core_id)
196 auto thread_res = KThread::CreateThread( 219 .IsSuccess());
197 system, ThreadType::HighPriority, std::move(name), 0, 0, 0, static_cast<u32>(i), 0, 220 suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
198 nullptr, std::move(init_func), init_func_parameter);
199
200 suspend_threads[i] = std::move(thread_res).Unwrap();
201 } 221 }
202 } 222 }
203 223
204 void MakeCurrentProcess(Process* process) { 224 void MakeCurrentProcess(KProcess* process) {
205 current_process = process; 225 current_process = process;
206 if (process == nullptr) { 226 if (process == nullptr) {
207 return; 227 return;
@@ -232,11 +252,15 @@ struct KernelCore::Impl {
232 252
233 // Gets the dummy KThread for the caller, allocating a new one if this is the first time 253 // Gets the dummy KThread for the caller, allocating a new one if this is the first time
234 KThread* GetHostDummyThread() { 254 KThread* GetHostDummyThread() {
235 const thread_local auto thread = 255 auto make_thread = [this]() {
236 KThread::CreateThread( 256 std::unique_ptr<KThread> thread = std::make_unique<KThread>(system.Kernel());
237 system, ThreadType::Main, fmt::format("DummyThread:{}", GetHostThreadId()), 0, 257 KAutoObject::Create(thread.get());
238 KThread::DefaultThreadPriority, 0, static_cast<u32>(3), 0, nullptr) 258 ASSERT(KThread::InitializeDummyThread(thread.get()).IsSuccess());
239 .Unwrap(); 259 thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
260 return std::move(thread);
261 };
262
263 thread_local auto thread = make_thread();
240 return thread.get(); 264 return thread.get();
241 } 265 }
242 266
@@ -371,7 +395,8 @@ struct KernelCore::Impl {
371 const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit(); 395 const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit();
372 396
373 // Determine the size of the slab region. 397 // Determine the size of the slab region.
374 const size_t slab_region_size = Common::AlignUp(KernelSlabHeapSize, PageSize); 398 const size_t slab_region_size =
399 Common::AlignUp(Init::CalculateTotalSlabHeapSize(system.Kernel()), PageSize);
375 ASSERT(slab_region_size <= resource_region_size); 400 ASSERT(slab_region_size <= resource_region_size);
376 401
377 // Setup the slab region. 402 // Setup the slab region.
@@ -569,25 +594,30 @@ struct KernelCore::Impl {
569 const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size}; 594 const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size};
570 const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size}; 595 const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size};
571 596
572 hid_shared_mem = Kernel::KSharedMemory::Create( 597 hid_shared_mem = KSharedMemory::Create(system.Kernel());
573 system.Kernel(), system.DeviceMemory(), nullptr, {hid_phys_addr, hid_size / PageSize}, 598 font_shared_mem = KSharedMemory::Create(system.Kernel());
574 KMemoryPermission::None, KMemoryPermission::Read, hid_phys_addr, hid_size, 599 irs_shared_mem = KSharedMemory::Create(system.Kernel());
575 "HID:SharedMemory"); 600 time_shared_mem = KSharedMemory::Create(system.Kernel());
576 font_shared_mem = Kernel::KSharedMemory::Create( 601
577 system.Kernel(), system.DeviceMemory(), nullptr, {font_phys_addr, font_size / PageSize}, 602 hid_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr,
578 KMemoryPermission::None, KMemoryPermission::Read, font_phys_addr, font_size, 603 {hid_phys_addr, hid_size / PageSize},
579 "Font:SharedMemory"); 604 Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
580 irs_shared_mem = Kernel::KSharedMemory::Create( 605 hid_phys_addr, hid_size, "HID:SharedMemory");
581 system.Kernel(), system.DeviceMemory(), nullptr, {irs_phys_addr, irs_size / PageSize}, 606 font_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr,
582 KMemoryPermission::None, KMemoryPermission::Read, irs_phys_addr, irs_size, 607 {font_phys_addr, font_size / PageSize},
583 "IRS:SharedMemory"); 608 Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
584 time_shared_mem = Kernel::KSharedMemory::Create( 609 font_phys_addr, font_size, "Font:SharedMemory");
585 system.Kernel(), system.DeviceMemory(), nullptr, {time_phys_addr, time_size / PageSize}, 610 irs_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr,
586 KMemoryPermission::None, KMemoryPermission::Read, time_phys_addr, time_size, 611 {irs_phys_addr, irs_size / PageSize},
587 "Time:SharedMemory"); 612 Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
613 irs_phys_addr, irs_size, "IRS:SharedMemory");
614 time_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr,
615 {time_phys_addr, time_size / PageSize},
616 Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
617 time_phys_addr, time_size, "Time:SharedMemory");
588 } 618 }
589 619
590 void InitializeSlabHeaps() { 620 void InitializePageSlab() {
591 // Allocate slab heaps 621 // Allocate slab heaps
592 user_slab_heap_pages = std::make_unique<KSlabHeap<Page>>(); 622 user_slab_heap_pages = std::make_unique<KSlabHeap<Page>>();
593 623
@@ -596,30 +626,33 @@ struct KernelCore::Impl {
596 // Reserve slab heaps 626 // Reserve slab heaps
597 ASSERT( 627 ASSERT(
598 system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size)); 628 system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size));
599 // Initialize slab heaps 629 // Initialize slab heap
600 user_slab_heap_pages->Initialize( 630 user_slab_heap_pages->Initialize(
601 system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase), 631 system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
602 user_slab_heap_size); 632 user_slab_heap_size);
603 } 633 }
604 634
605 std::atomic<u32> next_object_id{0}; 635 std::atomic<u32> next_object_id{0};
606 std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin}; 636 std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
607 std::atomic<u64> next_user_process_id{Process::ProcessIDMin}; 637 std::atomic<u64> next_user_process_id{KProcess::ProcessIDMin};
608 std::atomic<u64> next_thread_id{1}; 638 std::atomic<u64> next_thread_id{1};
609 639
610 // Lists all processes that exist in the current session. 640 // Lists all processes that exist in the current session.
611 std::vector<std::shared_ptr<Process>> process_list; 641 std::vector<KProcess*> process_list;
612 Process* current_process = nullptr; 642 KProcess* current_process{};
613 std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context; 643 std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
614 Kernel::TimeManager time_manager; 644 Kernel::TimeManager time_manager;
615 645
616 std::shared_ptr<KResourceLimit> system_resource_limit; 646 Init::KSlabResourceCounts slab_resource_counts{};
647 KResourceLimit* system_resource_limit{};
617 648
618 std::shared_ptr<Core::Timing::EventType> preemption_event; 649 std::shared_ptr<Core::Timing::EventType> preemption_event;
619 650
620 // This is the kernel's handle table or supervisor handle table which 651 // This is the kernel's handle table or supervisor handle table which
621 // stores all the objects in place. 652 // stores all the objects in place.
622 HandleTable global_handle_table; 653 std::unique_ptr<KHandleTable> global_handle_table;
654
655 KAutoObjectWithListContainer object_list_container;
623 656
624 /// Map of named ports managed by the kernel, which can be retrieved using 657 /// Map of named ports managed by the kernel, which can be retrieved using
625 /// the ConnectToPort SVC. 658 /// the ConnectToPort SVC.
@@ -636,10 +669,10 @@ struct KernelCore::Impl {
636 std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages; 669 std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages;
637 670
638 // Shared memory for services 671 // Shared memory for services
639 std::shared_ptr<Kernel::KSharedMemory> hid_shared_mem; 672 Kernel::KSharedMemory* hid_shared_mem{};
640 std::shared_ptr<Kernel::KSharedMemory> font_shared_mem; 673 Kernel::KSharedMemory* font_shared_mem{};
641 std::shared_ptr<Kernel::KSharedMemory> irs_shared_mem; 674 Kernel::KSharedMemory* irs_shared_mem{};
642 std::shared_ptr<Kernel::KSharedMemory> time_shared_mem; 675 Kernel::KSharedMemory* time_shared_mem{};
643 676
644 // Threads used for services 677 // Threads used for services
645 std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads; 678 std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
@@ -648,7 +681,7 @@ struct KernelCore::Impl {
648 // the release of itself 681 // the release of itself
649 std::unique_ptr<Common::ThreadWorker> service_thread_manager; 682 std::unique_ptr<Common::ThreadWorker> service_thread_manager;
650 683
651 std::array<std::shared_ptr<KThread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; 684 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads;
652 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; 685 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
653 std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; 686 std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
654 687
@@ -663,15 +696,14 @@ struct KernelCore::Impl {
663}; 696};
664 697
665KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system, *this)} {} 698KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system, *this)} {}
666KernelCore::~KernelCore() { 699KernelCore::~KernelCore() = default;
667 Shutdown();
668}
669 700
670void KernelCore::SetMulticore(bool is_multicore) { 701void KernelCore::SetMulticore(bool is_multicore) {
671 impl->SetMulticore(is_multicore); 702 impl->SetMulticore(is_multicore);
672} 703}
673 704
674void KernelCore::Initialize() { 705void KernelCore::Initialize() {
706 slab_heap_container = std::make_unique<SlabHeapContainer>();
675 impl->Initialize(*this); 707 impl->Initialize(*this);
676} 708}
677 709
@@ -683,31 +715,35 @@ void KernelCore::Shutdown() {
683 impl->Shutdown(); 715 impl->Shutdown();
684} 716}
685 717
686std::shared_ptr<KResourceLimit> KernelCore::GetSystemResourceLimit() const { 718const KResourceLimit* KernelCore::GetSystemResourceLimit() const {
687 return impl->system_resource_limit; 719 return impl->system_resource_limit;
688} 720}
689 721
690std::shared_ptr<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const { 722KResourceLimit* KernelCore::GetSystemResourceLimit() {
691 return impl->global_handle_table.Get<KThread>(handle); 723 return impl->system_resource_limit;
724}
725
726KScopedAutoObject<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
727 return impl->global_handle_table->GetObject<KThread>(handle);
692} 728}
693 729
694void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) { 730void KernelCore::AppendNewProcess(KProcess* process) {
695 impl->process_list.push_back(std::move(process)); 731 impl->process_list.push_back(process);
696} 732}
697 733
698void KernelCore::MakeCurrentProcess(Process* process) { 734void KernelCore::MakeCurrentProcess(KProcess* process) {
699 impl->MakeCurrentProcess(process); 735 impl->MakeCurrentProcess(process);
700} 736}
701 737
702Process* KernelCore::CurrentProcess() { 738KProcess* KernelCore::CurrentProcess() {
703 return impl->current_process; 739 return impl->current_process;
704} 740}
705 741
706const Process* KernelCore::CurrentProcess() const { 742const KProcess* KernelCore::CurrentProcess() const {
707 return impl->current_process; 743 return impl->current_process;
708} 744}
709 745
710const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const { 746const std::vector<KProcess*>& KernelCore::GetProcessList() const {
711 return impl->process_list; 747 return impl->process_list;
712} 748}
713 749
@@ -781,6 +817,14 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
781 return *impl->exclusive_monitor; 817 return *impl->exclusive_monitor;
782} 818}
783 819
820KAutoObjectWithListContainer& KernelCore::ObjectListContainer() {
821 return impl->object_list_container;
822}
823
824const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const {
825 return impl->object_list_container;
826}
827
784void KernelCore::InvalidateAllInstructionCaches() { 828void KernelCore::InvalidateAllInstructionCaches() {
785 for (auto& physical_core : impl->cores) { 829 for (auto& physical_core : impl->cores) {
786 physical_core.ArmInterface().ClearInstructionCache(); 830 physical_core.ArmInterface().ClearInstructionCache();
@@ -800,8 +844,9 @@ void KernelCore::PrepareReschedule(std::size_t id) {
800 // TODO: Reimplement, this 844 // TODO: Reimplement, this
801} 845}
802 846
803void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) { 847void KernelCore::AddNamedPort(std::string name, KClientPort* port) {
804 impl->named_ports.emplace(std::move(name), std::move(port)); 848 port->Open();
849 impl->named_ports.emplace(std::move(name), port);
805} 850}
806 851
807KernelCore::NamedPortTable::iterator KernelCore::FindNamedPort(const std::string& name) { 852KernelCore::NamedPortTable::iterator KernelCore::FindNamedPort(const std::string& name) {
@@ -833,12 +878,12 @@ u64 KernelCore::CreateNewUserProcessID() {
833 return impl->next_user_process_id++; 878 return impl->next_user_process_id++;
834} 879}
835 880
836Kernel::HandleTable& KernelCore::GlobalHandleTable() { 881KHandleTable& KernelCore::GlobalHandleTable() {
837 return impl->global_handle_table; 882 return *impl->global_handle_table;
838} 883}
839 884
840const Kernel::HandleTable& KernelCore::GlobalHandleTable() const { 885const KHandleTable& KernelCore::GlobalHandleTable() const {
841 return impl->global_handle_table; 886 return *impl->global_handle_table;
842} 887}
843 888
844void KernelCore::RegisterCoreThread(std::size_t core_id) { 889void KernelCore::RegisterCoreThread(std::size_t core_id) {
@@ -910,9 +955,9 @@ void KernelCore::Suspend(bool in_suspention) {
910 { 955 {
911 KScopedSchedulerLock lock(*this); 956 KScopedSchedulerLock lock(*this);
912 const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting; 957 const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting;
913 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 958 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
914 impl->suspend_threads[i]->SetState(state); 959 impl->suspend_threads[core_id]->SetState(state);
915 impl->suspend_threads[i]->SetWaitReasonForDebugging( 960 impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
916 ThreadWaitReasonForDebugging::Suspended); 961 ThreadWaitReasonForDebugging::Suspended);
917 } 962 }
918 } 963 }
@@ -952,6 +997,14 @@ void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> servi
952 }); 997 });
953} 998}
954 999
1000Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() {
1001 return impl->slab_resource_counts;
1002}
1003
1004const Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() const {
1005 return impl->slab_resource_counts;
1006}
1007
955bool KernelCore::IsPhantomModeForSingleCore() const { 1008bool KernelCore::IsPhantomModeForSingleCore() const {
956 return impl->IsPhantomModeForSingleCore(); 1009 return impl->IsPhantomModeForSingleCore();
957} 1010}
@@ -960,4 +1013,12 @@ void KernelCore::SetIsPhantomModeForSingleCore(bool value) {
960 impl->SetIsPhantomModeForSingleCore(value); 1013 impl->SetIsPhantomModeForSingleCore(value);
961} 1014}
962 1015
1016Core::System& KernelCore::System() {
1017 return impl->system;
1018}
1019
1020const Core::System& KernelCore::System() const {
1021 return impl->system;
1022}
1023
963} // namespace Kernel 1024} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index a500e63bc..51aaccbc7 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -11,8 +11,10 @@
11#include <vector> 11#include <vector>
12#include "core/arm/cpu_interrupt_handler.h" 12#include "core/arm/cpu_interrupt_handler.h"
13#include "core/hardware_properties.h" 13#include "core/hardware_properties.h"
14#include "core/hle/kernel/k_auto_object.h"
15#include "core/hle/kernel/k_slab_heap.h"
14#include "core/hle/kernel/memory_types.h" 16#include "core/hle/kernel/memory_types.h"
15#include "core/hle/kernel/object.h" 17#include "core/hle/kernel/svc_common.h"
16 18
17namespace Core { 19namespace Core {
18class CPUInterruptHandler; 20class CPUInterruptHandler;
@@ -27,20 +29,32 @@ struct EventType;
27 29
28namespace Kernel { 30namespace Kernel {
29 31
30class ClientPort; 32class KClientPort;
31class GlobalSchedulerContext; 33class GlobalSchedulerContext;
32class HandleTable; 34class KAutoObjectWithListContainer;
35class KClientSession;
36class KEvent;
37class KHandleTable;
38class KLinkedListNode;
33class KMemoryManager; 39class KMemoryManager;
40class KPort;
41class KProcess;
34class KResourceLimit; 42class KResourceLimit;
35class KScheduler; 43class KScheduler;
44class KSession;
36class KSharedMemory; 45class KSharedMemory;
37class KThread; 46class KThread;
47class KTransferMemory;
48class KWritableEvent;
38class PhysicalCore; 49class PhysicalCore;
39class Process;
40class ServiceThread; 50class ServiceThread;
41class Synchronization; 51class Synchronization;
42class TimeManager; 52class TimeManager;
43 53
54namespace Init {
55struct KSlabResourceCounts;
56}
57
44template <typename T> 58template <typename T>
45class KSlabHeap; 59class KSlabHeap;
46 60
@@ -51,7 +65,7 @@ constexpr EmuThreadHandle EmuThreadHandleReserved{1ULL << 63};
51/// Represents a single instance of the kernel. 65/// Represents a single instance of the kernel.
52class KernelCore { 66class KernelCore {
53private: 67private:
54 using NamedPortTable = std::unordered_map<std::string, std::shared_ptr<ClientPort>>; 68 using NamedPortTable = std::unordered_map<std::string, KClientPort*>;
55 69
56public: 70public:
57 /// Constructs an instance of the kernel using the given System 71 /// Constructs an instance of the kernel using the given System
@@ -83,25 +97,28 @@ public:
83 void Shutdown(); 97 void Shutdown();
84 98
85 /// Retrieves a shared pointer to the system resource limit instance. 99 /// Retrieves a shared pointer to the system resource limit instance.
86 std::shared_ptr<KResourceLimit> GetSystemResourceLimit() const; 100 const KResourceLimit* GetSystemResourceLimit() const;
101
102 /// Retrieves a shared pointer to the system resource limit instance.
103 KResourceLimit* GetSystemResourceLimit();
87 104
88 /// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table. 105 /// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table.
89 std::shared_ptr<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const; 106 KScopedAutoObject<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
90 107
91 /// Adds the given shared pointer to an internal list of active processes. 108 /// Adds the given shared pointer to an internal list of active processes.
92 void AppendNewProcess(std::shared_ptr<Process> process); 109 void AppendNewProcess(KProcess* process);
93 110
94 /// Makes the given process the new current process. 111 /// Makes the given process the new current process.
95 void MakeCurrentProcess(Process* process); 112 void MakeCurrentProcess(KProcess* process);
96 113
97 /// Retrieves a pointer to the current process. 114 /// Retrieves a pointer to the current process.
98 Process* CurrentProcess(); 115 KProcess* CurrentProcess();
99 116
100 /// Retrieves a const pointer to the current process. 117 /// Retrieves a const pointer to the current process.
101 const Process* CurrentProcess() const; 118 const KProcess* CurrentProcess() const;
102 119
103 /// Retrieves the list of processes. 120 /// Retrieves the list of processes.
104 const std::vector<std::shared_ptr<Process>>& GetProcessList() const; 121 const std::vector<KProcess*>& GetProcessList() const;
105 122
106 /// Gets the sole instance of the global scheduler 123 /// Gets the sole instance of the global scheduler
107 Kernel::GlobalSchedulerContext& GlobalSchedulerContext(); 124 Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
@@ -143,6 +160,10 @@ public:
143 160
144 const Core::ExclusiveMonitor& GetExclusiveMonitor() const; 161 const Core::ExclusiveMonitor& GetExclusiveMonitor() const;
145 162
163 KAutoObjectWithListContainer& ObjectListContainer();
164
165 const KAutoObjectWithListContainer& ObjectListContainer() const;
166
146 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts(); 167 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts();
147 168
148 const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const; 169 const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const;
@@ -152,7 +173,7 @@ public:
152 void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size); 173 void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
153 174
154 /// Adds a port to the named port table 175 /// Adds a port to the named port table
155 void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port); 176 void AddNamedPort(std::string name, KClientPort* port);
156 177
157 /// Finds a port within the named port table with the given name. 178 /// Finds a port within the named port table with the given name.
158 NamedPortTable::iterator FindNamedPort(const std::string& name); 179 NamedPortTable::iterator FindNamedPort(const std::string& name);
@@ -225,9 +246,10 @@ public:
225 246
226 /** 247 /**
227 * Creates an HLE service thread, which are used to execute service routines asynchronously. 248 * Creates an HLE service thread, which are used to execute service routines asynchronously.
228 * While these are allocated per ServerSession, these need to be owned and managed outside of 249 * While these are allocated per ServerSession, these need to be owned and managed outside
229 * ServerSession to avoid a circular dependency. 250 * of ServerSession to avoid a circular dependency.
230 * @param name String name for the ServerSession creating this thread, used for debug purposes. 251 * @param name String name for the ServerSession creating this thread, used for debug
252 * purposes.
231 * @returns The a weak pointer newly created service thread. 253 * @returns The a weak pointer newly created service thread.
232 */ 254 */
233 std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name); 255 std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name);
@@ -243,9 +265,45 @@ public:
243 bool IsPhantomModeForSingleCore() const; 265 bool IsPhantomModeForSingleCore() const;
244 void SetIsPhantomModeForSingleCore(bool value); 266 void SetIsPhantomModeForSingleCore(bool value);
245 267
268 Core::System& System();
269 const Core::System& System() const;
270
271 /// Gets the slab heap for the specified kernel object type.
272 template <typename T>
273 KSlabHeap<T>& SlabHeap() {
274 if constexpr (std::is_same_v<T, KClientSession>) {
275 return slab_heap_container->client_session;
276 } else if constexpr (std::is_same_v<T, KEvent>) {
277 return slab_heap_container->event;
278 } else if constexpr (std::is_same_v<T, KLinkedListNode>) {
279 return slab_heap_container->linked_list_node;
280 } else if constexpr (std::is_same_v<T, KPort>) {
281 return slab_heap_container->port;
282 } else if constexpr (std::is_same_v<T, KProcess>) {
283 return slab_heap_container->process;
284 } else if constexpr (std::is_same_v<T, KResourceLimit>) {
285 return slab_heap_container->resource_limit;
286 } else if constexpr (std::is_same_v<T, KSession>) {
287 return slab_heap_container->session;
288 } else if constexpr (std::is_same_v<T, KSharedMemory>) {
289 return slab_heap_container->shared_memory;
290 } else if constexpr (std::is_same_v<T, KThread>) {
291 return slab_heap_container->thread;
292 } else if constexpr (std::is_same_v<T, KTransferMemory>) {
293 return slab_heap_container->transfer_memory;
294 } else if constexpr (std::is_same_v<T, KWritableEvent>) {
295 return slab_heap_container->writeable_event;
296 }
297 }
298
299 /// Gets the current slab resource counts.
300 Init::KSlabResourceCounts& SlabResourceCounts();
301
302 /// Gets the current slab resource counts.
303 const Init::KSlabResourceCounts& SlabResourceCounts() const;
304
246private: 305private:
247 friend class Object; 306 friend class KProcess;
248 friend class Process;
249 friend class KThread; 307 friend class KThread;
250 308
251 /// Creates a new object ID, incrementing the internal object ID counter. 309 /// Creates a new object ID, incrementing the internal object ID counter.
@@ -261,14 +319,33 @@ private:
261 u64 CreateNewThreadID(); 319 u64 CreateNewThreadID();
262 320
263 /// Provides a reference to the global handle table. 321 /// Provides a reference to the global handle table.
264 Kernel::HandleTable& GlobalHandleTable(); 322 KHandleTable& GlobalHandleTable();
265 323
266 /// Provides a const reference to the global handle table. 324 /// Provides a const reference to the global handle table.
267 const Kernel::HandleTable& GlobalHandleTable() const; 325 const KHandleTable& GlobalHandleTable() const;
268 326
269 struct Impl; 327 struct Impl;
270 std::unique_ptr<Impl> impl; 328 std::unique_ptr<Impl> impl;
329
271 bool exception_exited{}; 330 bool exception_exited{};
331
332private:
333 /// Helper to encapsulate all slab heaps in a single heap allocated container
334 struct SlabHeapContainer {
335 KSlabHeap<KClientSession> client_session;
336 KSlabHeap<KEvent> event;
337 KSlabHeap<KLinkedListNode> linked_list_node;
338 KSlabHeap<KPort> port;
339 KSlabHeap<KProcess> process;
340 KSlabHeap<KResourceLimit> resource_limit;
341 KSlabHeap<KSession> session;
342 KSlabHeap<KSharedMemory> shared_memory;
343 KSlabHeap<KThread> thread;
344 KSlabHeap<KTransferMemory> transfer_memory;
345 KSlabHeap<KWritableEvent> writeable_event;
346 };
347
348 std::unique_ptr<SlabHeapContainer> slab_heap_container;
272}; 349};
273 350
274} // namespace Kernel 351} // namespace Kernel
diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp
deleted file mode 100644
index d7f40c403..000000000
--- a/src/core/hle/kernel/object.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
1// Copyright 2018 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "core/hle/kernel/kernel.h"
7#include "core/hle/kernel/object.h"
8
9namespace Kernel {
10
11Object::Object(KernelCore& kernel_)
12 : kernel{kernel_}, object_id{kernel_.CreateNewObjectID()}, name{"[UNKNOWN KERNEL OBJECT]"} {}
13Object::Object(KernelCore& kernel_, std::string&& name_)
14 : kernel{kernel_}, object_id{kernel_.CreateNewObjectID()}, name{std::move(name_)} {}
15Object::~Object() = default;
16
17bool Object::IsWaitable() const {
18 switch (GetHandleType()) {
19 case HandleType::ReadableEvent:
20 case HandleType::Thread:
21 case HandleType::Process:
22 case HandleType::ServerPort:
23 case HandleType::ServerSession:
24 return true;
25
26 case HandleType::Unknown:
27 case HandleType::Event:
28 case HandleType::WritableEvent:
29 case HandleType::SharedMemory:
30 case HandleType::TransferMemory:
31 case HandleType::ResourceLimit:
32 case HandleType::ClientPort:
33 case HandleType::ClientSession:
34 case HandleType::Session:
35 return false;
36 }
37
38 UNREACHABLE();
39 return false;
40}
41
42} // namespace Kernel
diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h
deleted file mode 100644
index 501e58b33..000000000
--- a/src/core/hle/kernel/object.h
+++ /dev/null
@@ -1,96 +0,0 @@
1// Copyright 2018 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8#include <memory>
9#include <string>
10
11#include "common/common_types.h"
12
13namespace Kernel {
14
15class KernelCore;
16
17using Handle = u32;
18
19enum class HandleType : u32 {
20 Unknown,
21 Event,
22 WritableEvent,
23 ReadableEvent,
24 SharedMemory,
25 TransferMemory,
26 Thread,
27 Process,
28 ResourceLimit,
29 ClientPort,
30 ServerPort,
31 ClientSession,
32 ServerSession,
33 Session,
34};
35
36class Object : NonCopyable, public std::enable_shared_from_this<Object> {
37public:
38 explicit Object(KernelCore& kernel_);
39 explicit Object(KernelCore& kernel_, std::string&& name_);
40 virtual ~Object();
41
42 /// Returns a unique identifier for the object. For debugging purposes only.
43 u32 GetObjectId() const {
44 return object_id.load(std::memory_order_relaxed);
45 }
46
47 virtual std::string GetTypeName() const {
48 return "[BAD KERNEL OBJECT TYPE]";
49 }
50 virtual std::string GetName() const {
51 return name;
52 }
53 virtual HandleType GetHandleType() const = 0;
54
55 void Close() {
56 // TODO(bunnei): This is a placeholder to decrement the reference count, which we will use
57 // when we implement KAutoObject instead of using shared_ptr.
58 }
59
60 /**
61 * Check if a thread can wait on the object
62 * @return True if a thread can wait on the object, otherwise false
63 */
64 bool IsWaitable() const;
65
66 virtual void Finalize() = 0;
67
68protected:
69 /// The kernel instance this object was created under.
70 KernelCore& kernel;
71
72private:
73 std::atomic<u32> object_id{0};
74 std::string name;
75};
76
77template <typename T>
78std::shared_ptr<T> SharedFrom(T* raw) {
79 if (raw == nullptr)
80 return nullptr;
81 return std::static_pointer_cast<T>(raw->shared_from_this());
82}
83
84/**
85 * Attempts to downcast the given Object pointer to a pointer to T.
86 * @return Derived pointer to the object, or `nullptr` if `object` isn't of type T.
87 */
88template <typename T>
89inline std::shared_ptr<T> DynamicObjectCast(std::shared_ptr<Object> object) {
90 if (object != nullptr && object->GetHandleType() == T::HANDLE_TYPE) {
91 return std::static_pointer_cast<T>(object);
92 }
93 return nullptr;
94}
95
96} // namespace Kernel
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
index 1006ee50c..fcb8b1ea5 100644
--- a/src/core/hle/kernel/process_capability.cpp
+++ b/src/core/hle/kernel/process_capability.cpp
@@ -6,7 +6,7 @@
6 6
7#include "common/bit_util.h" 7#include "common/bit_util.h"
8#include "common/logging/log.h" 8#include "common/logging/log.h"
9#include "core/hle/kernel/handle_table.h" 9#include "core/hle/kernel/k_handle_table.h"
10#include "core/hle/kernel/k_page_table.h" 10#include "core/hle/kernel/k_page_table.h"
11#include "core/hle/kernel/process_capability.h" 11#include "core/hle/kernel/process_capability.h"
12#include "core/hle/kernel/svc_results.h" 12#include "core/hle/kernel/svc_results.h"
@@ -99,7 +99,7 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() {
99 interrupt_capabilities.set(); 99 interrupt_capabilities.set();
100 100
101 // Allow using the maximum possible amount of handles 101 // Allow using the maximum possible amount of handles
102 handle_table_size = static_cast<s32>(HandleTable::MAX_COUNT); 102 handle_table_size = static_cast<s32>(KHandleTable::MaxTableSize);
103 103
104 // Allow all debugging capabilities. 104 // Allow all debugging capabilities.
105 is_debuggable = true; 105 is_debuggable = true;
@@ -159,7 +159,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
159 const auto type = GetCapabilityType(flag); 159 const auto type = GetCapabilityType(flag);
160 160
161 if (type == CapabilityType::Unset) { 161 if (type == CapabilityType::Unset) {
162 return ResultInvalidCapabilityDescriptor; 162 return ResultInvalidArgument;
163 } 163 }
164 164
165 // Bail early on ignorable entries, as one would expect, 165 // Bail early on ignorable entries, as one would expect,
@@ -202,7 +202,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
202 } 202 }
203 203
204 LOG_ERROR(Kernel, "Invalid capability type! type={}", type); 204 LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
205 return ResultInvalidCapabilityDescriptor; 205 return ResultInvalidArgument;
206} 206}
207 207
208void ProcessCapabilities::Clear() { 208void ProcessCapabilities::Clear() {
@@ -225,7 +225,7 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
225 if (priority_mask != 0 || core_mask != 0) { 225 if (priority_mask != 0 || core_mask != 0) {
226 LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}", 226 LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
227 priority_mask, core_mask); 227 priority_mask, core_mask);
228 return ResultInvalidCapabilityDescriptor; 228 return ResultInvalidArgument;
229 } 229 }
230 230
231 const u32 core_num_min = (flags >> 16) & 0xFF; 231 const u32 core_num_min = (flags >> 16) & 0xFF;
@@ -329,7 +329,7 @@ ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
329 const u32 reserved = flags >> 17; 329 const u32 reserved = flags >> 17;
330 if (reserved != 0) { 330 if (reserved != 0) {
331 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); 331 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
332 return ResultReservedValue; 332 return ResultReservedUsed;
333 } 333 }
334 334
335 program_type = static_cast<ProgramType>((flags >> 14) & 0b111); 335 program_type = static_cast<ProgramType>((flags >> 14) & 0b111);
@@ -349,7 +349,7 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
349 LOG_ERROR(Kernel, 349 LOG_ERROR(Kernel,
350 "Kernel version is non zero or flags are too small! major_version={}, flags={}", 350 "Kernel version is non zero or flags are too small! major_version={}, flags={}",
351 major_version, flags); 351 major_version, flags);
352 return ResultInvalidCapabilityDescriptor; 352 return ResultInvalidArgument;
353 } 353 }
354 354
355 kernel_version = flags; 355 kernel_version = flags;
@@ -360,7 +360,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
360 const u32 reserved = flags >> 26; 360 const u32 reserved = flags >> 26;
361 if (reserved != 0) { 361 if (reserved != 0) {
362 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); 362 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
363 return ResultReservedValue; 363 return ResultReservedUsed;
364 } 364 }
365 365
366 handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF); 366 handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF);
@@ -371,7 +371,7 @@ ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) {
371 const u32 reserved = flags >> 19; 371 const u32 reserved = flags >> 19;
372 if (reserved != 0) { 372 if (reserved != 0) {
373 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); 373 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
374 return ResultReservedValue; 374 return ResultReservedUsed;
375 } 375 }
376 376
377 is_debuggable = (flags & 0x20000) != 0; 377 is_debuggable = (flags & 0x20000) != 0;
diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp
deleted file mode 100644
index 5d17346ad..000000000
--- a/src/core/hle/kernel/server_port.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
1// Copyright 2016 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <tuple>
6#include "common/assert.h"
7#include "core/hle/kernel/client_port.h"
8#include "core/hle/kernel/k_thread.h"
9#include "core/hle/kernel/object.h"
10#include "core/hle/kernel/server_port.h"
11#include "core/hle/kernel/server_session.h"
12#include "core/hle/kernel/svc_results.h"
13
14namespace Kernel {
15
16ServerPort::ServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
17ServerPort::~ServerPort() = default;
18
19ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
20 if (pending_sessions.empty()) {
21 return ResultNotFound;
22 }
23
24 auto session = std::move(pending_sessions.back());
25 pending_sessions.pop_back();
26 return MakeResult(std::move(session));
27}
28
29void ServerPort::AppendPendingSession(std::shared_ptr<ServerSession> pending_session) {
30 pending_sessions.push_back(std::move(pending_session));
31 if (pending_sessions.size() == 1) {
32 NotifyAvailable();
33 }
34}
35
36bool ServerPort::IsSignaled() const {
37 return !pending_sessions.empty();
38}
39
40ServerPort::PortPair ServerPort::CreatePortPair(KernelCore& kernel, u32 max_sessions,
41 std::string name) {
42 std::shared_ptr<ServerPort> server_port = std::make_shared<ServerPort>(kernel);
43 std::shared_ptr<ClientPort> client_port = std::make_shared<ClientPort>(kernel);
44
45 server_port->name = name + "_Server";
46 client_port->name = name + "_Client";
47 client_port->server_port = server_port;
48 client_port->max_sessions = max_sessions;
49 client_port->active_sessions = 0;
50
51 return std::make_pair(std::move(server_port), std::move(client_port));
52}
53
54} // namespace Kernel
diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h
deleted file mode 100644
index 29b4f2509..000000000
--- a/src/core/hle/kernel/server_port.h
+++ /dev/null
@@ -1,98 +0,0 @@
1// Copyright 2016 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <string>
9#include <utility>
10#include <vector>
11#include "common/common_types.h"
12#include "core/hle/kernel/k_synchronization_object.h"
13#include "core/hle/kernel/object.h"
14#include "core/hle/result.h"
15
16namespace Kernel {
17
18class ClientPort;
19class KernelCore;
20class ServerSession;
21class SessionRequestHandler;
22
23class ServerPort final : public KSynchronizationObject {
24public:
25 explicit ServerPort(KernelCore& kernel);
26 ~ServerPort() override;
27
28 using HLEHandler = std::shared_ptr<SessionRequestHandler>;
29 using PortPair = std::pair<std::shared_ptr<ServerPort>, std::shared_ptr<ClientPort>>;
30
31 /**
32 * Creates a pair of ServerPort and an associated ClientPort.
33 *
34 * @param kernel The kernel instance to create the port pair under.
35 * @param max_sessions Maximum number of sessions to the port
36 * @param name Optional name of the ports
37 * @return The created port tuple
38 */
39 static PortPair CreatePortPair(KernelCore& kernel, u32 max_sessions,
40 std::string name = "UnknownPort");
41
42 std::string GetTypeName() const override {
43 return "ServerPort";
44 }
45 std::string GetName() const override {
46 return name;
47 }
48
49 static constexpr HandleType HANDLE_TYPE = HandleType::ServerPort;
50 HandleType GetHandleType() const override {
51 return HANDLE_TYPE;
52 }
53
54 /**
55 * Accepts a pending incoming connection on this port. If there are no pending sessions, will
56 * return ERR_NO_PENDING_SESSIONS.
57 */
58 ResultVal<std::shared_ptr<ServerSession>> Accept();
59
60 /// Whether or not this server port has an HLE handler available.
61 bool HasHLEHandler() const {
62 return hle_handler != nullptr;
63 }
64
65 /// Gets the HLE handler for this port.
66 HLEHandler GetHLEHandler() const {
67 return hle_handler;
68 }
69
70 /**
71 * Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
72 * will inherit a reference to this handler.
73 */
74 void SetHleHandler(HLEHandler hle_handler_) {
75 hle_handler = std::move(hle_handler_);
76 }
77
78 /// Appends a ServerSession to the collection of ServerSessions
79 /// waiting to be accepted by this port.
80 void AppendPendingSession(std::shared_ptr<ServerSession> pending_session);
81
82 bool IsSignaled() const override;
83
84 void Finalize() override {}
85
86private:
87 /// ServerSessions waiting to be accepted by the port
88 std::vector<std::shared_ptr<ServerSession>> pending_sessions;
89
90 /// This session's HLE request handler template (optional)
91 /// ServerSessions created from this port inherit a reference to this handler.
92 HLEHandler hle_handler;
93
94 /// Name of the port (optional)
95 std::string name;
96};
97
98} // namespace Kernel
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp
index ee46f3e21..04be8a502 100644
--- a/src/core/hle/kernel/service_thread.cpp
+++ b/src/core/hle/kernel/service_thread.cpp
@@ -13,8 +13,8 @@
13#include "common/scope_exit.h" 13#include "common/scope_exit.h"
14#include "common/thread.h" 14#include "common/thread.h"
15#include "core/core.h" 15#include "core/core.h"
16#include "core/hle/kernel/k_session.h"
16#include "core/hle/kernel/kernel.h" 17#include "core/hle/kernel/kernel.h"
17#include "core/hle/kernel/server_session.h"
18#include "core/hle/kernel/service_thread.h" 18#include "core/hle/kernel/service_thread.h"
19#include "core/hle/lock.h" 19#include "core/hle/lock.h"
20#include "video_core/renderer_base.h" 20#include "video_core/renderer_base.h"
@@ -26,7 +26,7 @@ public:
26 explicit Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name); 26 explicit Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name);
27 ~Impl(); 27 ~Impl();
28 28
29 void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context); 29 void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
30 30
31private: 31private:
32 std::vector<std::thread> threads; 32 std::vector<std::thread> threads;
@@ -69,18 +69,27 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
69 }); 69 });
70} 70}
71 71
72void ServiceThread::Impl::QueueSyncRequest(ServerSession& session, 72void ServiceThread::Impl::QueueSyncRequest(KSession& session,
73 std::shared_ptr<HLERequestContext>&& context) { 73 std::shared_ptr<HLERequestContext>&& context) {
74 { 74 {
75 std::unique_lock lock{queue_mutex}; 75 std::unique_lock lock{queue_mutex};
76 76
77 // ServerSession owns the service thread, so we cannot caption a strong pointer here in the 77 // Open a reference to the session to ensure it is not closes while the service request
78 // event that the ServerSession is terminated. 78 // completes asynchronously.
79 std::weak_ptr<ServerSession> weak_ptr{SharedFrom(&session)}; 79 session.Open();
80 requests.emplace([weak_ptr, context{std::move(context)}]() { 80
81 if (auto strong_ptr = weak_ptr.lock()) { 81 requests.emplace([session_ptr{&session}, context{std::move(context)}]() {
82 strong_ptr->CompleteSyncRequest(*context); 82 // Close the reference.
83 SCOPE_EXIT({ session_ptr->Close(); });
84
85 // If the session has been closed, we are done.
86 if (session_ptr->IsServerClosed()) {
87 return;
83 } 88 }
89
90 // Complete the service request.
91 KScopedAutoObject server_session{&session_ptr->GetServerSession()};
92 server_session->CompleteSyncRequest(*context);
84 }); 93 });
85 } 94 }
86 condition.notify_one(); 95 condition.notify_one();
@@ -102,7 +111,7 @@ ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const
102 111
103ServiceThread::~ServiceThread() = default; 112ServiceThread::~ServiceThread() = default;
104 113
105void ServiceThread::QueueSyncRequest(ServerSession& session, 114void ServiceThread::QueueSyncRequest(KSession& session,
106 std::shared_ptr<HLERequestContext>&& context) { 115 std::shared_ptr<HLERequestContext>&& context) {
107 impl->QueueSyncRequest(session, std::move(context)); 116 impl->QueueSyncRequest(session, std::move(context));
108} 117}
diff --git a/src/core/hle/kernel/service_thread.h b/src/core/hle/kernel/service_thread.h
index 025ab8fb5..6a7fd7c56 100644
--- a/src/core/hle/kernel/service_thread.h
+++ b/src/core/hle/kernel/service_thread.h
@@ -11,14 +11,14 @@ namespace Kernel {
11 11
12class HLERequestContext; 12class HLERequestContext;
13class KernelCore; 13class KernelCore;
14class ServerSession; 14class KSession;
15 15
16class ServiceThread final { 16class ServiceThread final {
17public: 17public:
18 explicit ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name); 18 explicit ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name);
19 ~ServiceThread(); 19 ~ServiceThread();
20 20
21 void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context); 21 void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
22 22
23private: 23private:
24 class Impl; 24 class Impl;
diff --git a/src/core/hle/kernel/session.cpp b/src/core/hle/kernel/session.cpp
deleted file mode 100644
index 8830d4e91..000000000
--- a/src/core/hle/kernel/session.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
1// Copyright 2019 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "core/hle/kernel/client_session.h"
7#include "core/hle/kernel/k_scoped_resource_reservation.h"
8#include "core/hle/kernel/server_session.h"
9#include "core/hle/kernel/session.h"
10
11namespace Kernel {
12
13Session::Session(KernelCore& kernel) : KSynchronizationObject{kernel} {}
14Session::~Session() {
15 // Release reserved resource when the Session pair was created.
16 kernel.GetSystemResourceLimit()->Release(LimitableResource::Sessions, 1);
17}
18
19Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
20 // Reserve a new session from the resource limit.
21 KScopedResourceReservation session_reservation(kernel.GetSystemResourceLimit(),
22 LimitableResource::Sessions);
23 ASSERT(session_reservation.Succeeded());
24 auto session{std::make_shared<Session>(kernel)};
25 auto client_session{Kernel::ClientSession::Create(kernel, session, name + "_Client").Unwrap()};
26 auto server_session{Kernel::ServerSession::Create(kernel, session, name + "_Server").Unwrap()};
27
28 session->name = std::move(name);
29 session->client = client_session;
30 session->server = server_session;
31
32 session_reservation.Commit();
33 return std::make_pair(std::move(client_session), std::move(server_session));
34}
35
36bool Session::IsSignaled() const {
37 UNIMPLEMENTED();
38 return true;
39}
40
41} // namespace Kernel
diff --git a/src/core/hle/kernel/session.h b/src/core/hle/kernel/session.h
deleted file mode 100644
index fa3c5651a..000000000
--- a/src/core/hle/kernel/session.h
+++ /dev/null
@@ -1,64 +0,0 @@
1// Copyright 2019 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <string>
9#include <utility>
10
11#include "core/hle/kernel/k_synchronization_object.h"
12
13namespace Kernel {
14
15class ClientSession;
16class ServerSession;
17
18/**
19 * Parent structure to link the client and server endpoints of a session with their associated
20 * client port.
21 */
22class Session final : public KSynchronizationObject {
23public:
24 explicit Session(KernelCore& kernel);
25 ~Session() override;
26
27 using SessionPair = std::pair<std::shared_ptr<ClientSession>, std::shared_ptr<ServerSession>>;
28
29 static SessionPair Create(KernelCore& kernel, std::string name = "Unknown");
30
31 std::string GetName() const override {
32 return name;
33 }
34
35 static constexpr HandleType HANDLE_TYPE = HandleType::Session;
36 HandleType GetHandleType() const override {
37 return HANDLE_TYPE;
38 }
39
40 bool IsSignaled() const override;
41
42 void Finalize() override {}
43
44 std::shared_ptr<ClientSession> Client() {
45 if (auto result{client.lock()}) {
46 return result;
47 }
48 return {};
49 }
50
51 std::shared_ptr<ServerSession> Server() {
52 if (auto result{server.lock()}) {
53 return result;
54 }
55 return {};
56 }
57
58private:
59 std::string name;
60 std::weak_ptr<ClientSession> client;
61 std::weak_ptr<ServerSession> server;
62};
63
64} // namespace Kernel
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h
new file mode 100644
index 000000000..0c5995db0
--- /dev/null
+++ b/src/core/hle/kernel/slab_helpers.h
@@ -0,0 +1,148 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8
9#include "common/assert.h"
10#include "common/common_funcs.h"
11#include "common/common_types.h"
12#include "common/intrusive_red_black_tree.h"
13#include "core/hle/kernel/k_auto_object.h"
14#include "core/hle/kernel/k_auto_object_container.h"
15#include "core/hle/kernel/k_light_lock.h"
16#include "core/hle/kernel/k_slab_heap.h"
17#include "core/hle/kernel/kernel.h"
18
19namespace Kernel {
20
21template <class Derived>
22class KSlabAllocated {
23public:
24 constexpr KSlabAllocated() = default;
25
26 size_t GetSlabIndex(KernelCore& kernel) const {
27 return kernel.SlabHeap<Derived>().GetIndex(static_cast<const Derived*>(this));
28 }
29
30public:
31 static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) {
32 kernel.SlabHeap<Derived>().Initialize(memory, memory_size);
33 }
34
35 static Derived* Allocate(KernelCore& kernel) {
36 return kernel.SlabHeap<Derived>().Allocate();
37 }
38
39 static void Free(KernelCore& kernel, Derived* obj) {
40 kernel.SlabHeap<Derived>().Free(obj);
41 }
42
43 static size_t GetObjectSize(KernelCore& kernel) {
44 return kernel.SlabHeap<Derived>().GetObjectSize();
45 }
46
47 static size_t GetSlabHeapSize(KernelCore& kernel) {
48 return kernel.SlabHeap<Derived>().GetSlabHeapSize();
49 }
50
51 static size_t GetPeakIndex(KernelCore& kernel) {
52 return kernel.SlabHeap<Derived>().GetPeakIndex();
53 }
54
55 static uintptr_t GetSlabHeapAddress(KernelCore& kernel) {
56 return kernel.SlabHeap<Derived>().GetSlabHeapAddress();
57 }
58
59 static size_t GetNumRemaining(KernelCore& kernel) {
60 return kernel.SlabHeap<Derived>().GetNumRemaining();
61 }
62};
63
64template <typename Derived, typename Base>
65class KAutoObjectWithSlabHeapAndContainer : public Base {
66 static_assert(std::is_base_of<KAutoObjectWithList, Base>::value);
67
68private:
69 static Derived* Allocate(KernelCore& kernel) {
70 return kernel.SlabHeap<Derived>().AllocateWithKernel(kernel);
71 }
72
73 static void Free(KernelCore& kernel, Derived* obj) {
74 kernel.SlabHeap<Derived>().Free(obj);
75 }
76
77public:
78 KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {}
79 virtual ~KAutoObjectWithSlabHeapAndContainer() {}
80
81 virtual void Destroy() override {
82 const bool is_initialized = this->IsInitialized();
83 uintptr_t arg = 0;
84 if (is_initialized) {
85 kernel.ObjectListContainer().Unregister(this);
86 arg = this->GetPostDestroyArgument();
87 this->Finalize();
88 }
89 Free(kernel, static_cast<Derived*>(this));
90 if (is_initialized) {
91 Derived::PostDestroy(arg);
92 }
93 }
94
95 virtual bool IsInitialized() const {
96 return true;
97 }
98 virtual uintptr_t GetPostDestroyArgument() const {
99 return 0;
100 }
101
102 size_t GetSlabIndex() const {
103 return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this));
104 }
105
106public:
107 static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) {
108 kernel.SlabHeap<Derived>().Initialize(memory, memory_size);
109 kernel.ObjectListContainer().Initialize();
110 }
111
112 static Derived* Create(KernelCore& kernel) {
113 Derived* obj = Allocate(kernel);
114 if (obj != nullptr) {
115 KAutoObject::Create(obj);
116 }
117 return obj;
118 }
119
120 static void Register(KernelCore& kernel, Derived* obj) {
121 return kernel.ObjectListContainer().Register(obj);
122 }
123
124 static size_t GetObjectSize(KernelCore& kernel) {
125 return kernel.SlabHeap<Derived>().GetObjectSize();
126 }
127
128 static size_t GetSlabHeapSize(KernelCore& kernel) {
129 return kernel.SlabHeap<Derived>().GetSlabHeapSize();
130 }
131
132 static size_t GetPeakIndex(KernelCore& kernel) {
133 return kernel.SlabHeap<Derived>().GetPeakIndex();
134 }
135
136 static uintptr_t GetSlabHeapAddress(KernelCore& kernel) {
137 return kernel.SlabHeap<Derived>().GetSlabHeapAddress();
138 }
139
140 static size_t GetNumRemaining(KernelCore& kernel) {
141 return kernel.SlabHeap<Derived>().GetNumRemaining();
142 }
143
144protected:
145 KernelCore& kernel;
146};
147
148} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index bebb86154..52011be9c 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -21,15 +21,16 @@
21#include "core/core_timing.h" 21#include "core/core_timing.h"
22#include "core/core_timing_util.h" 22#include "core/core_timing_util.h"
23#include "core/cpu_manager.h" 23#include "core/cpu_manager.h"
24#include "core/hle/kernel/client_port.h"
25#include "core/hle/kernel/client_session.h"
26#include "core/hle/kernel/handle_table.h"
27#include "core/hle/kernel/k_address_arbiter.h" 24#include "core/hle/kernel/k_address_arbiter.h"
25#include "core/hle/kernel/k_client_port.h"
26#include "core/hle/kernel/k_client_session.h"
28#include "core/hle/kernel/k_condition_variable.h" 27#include "core/hle/kernel/k_condition_variable.h"
29#include "core/hle/kernel/k_event.h" 28#include "core/hle/kernel/k_event.h"
29#include "core/hle/kernel/k_handle_table.h"
30#include "core/hle/kernel/k_memory_block.h" 30#include "core/hle/kernel/k_memory_block.h"
31#include "core/hle/kernel/k_memory_layout.h" 31#include "core/hle/kernel/k_memory_layout.h"
32#include "core/hle/kernel/k_page_table.h" 32#include "core/hle/kernel/k_page_table.h"
33#include "core/hle/kernel/k_process.h"
33#include "core/hle/kernel/k_readable_event.h" 34#include "core/hle/kernel/k_readable_event.h"
34#include "core/hle/kernel/k_resource_limit.h" 35#include "core/hle/kernel/k_resource_limit.h"
35#include "core/hle/kernel/k_scheduler.h" 36#include "core/hle/kernel/k_scheduler.h"
@@ -38,16 +39,15 @@
38#include "core/hle/kernel/k_shared_memory.h" 39#include "core/hle/kernel/k_shared_memory.h"
39#include "core/hle/kernel/k_synchronization_object.h" 40#include "core/hle/kernel/k_synchronization_object.h"
40#include "core/hle/kernel/k_thread.h" 41#include "core/hle/kernel/k_thread.h"
42#include "core/hle/kernel/k_transfer_memory.h"
41#include "core/hle/kernel/k_writable_event.h" 43#include "core/hle/kernel/k_writable_event.h"
42#include "core/hle/kernel/kernel.h" 44#include "core/hle/kernel/kernel.h"
43#include "core/hle/kernel/physical_core.h" 45#include "core/hle/kernel/physical_core.h"
44#include "core/hle/kernel/process.h"
45#include "core/hle/kernel/svc.h" 46#include "core/hle/kernel/svc.h"
46#include "core/hle/kernel/svc_results.h" 47#include "core/hle/kernel/svc_results.h"
47#include "core/hle/kernel/svc_types.h" 48#include "core/hle/kernel/svc_types.h"
48#include "core/hle/kernel/svc_wrap.h" 49#include "core/hle/kernel/svc_wrap.h"
49#include "core/hle/kernel/time_manager.h" 50#include "core/hle/kernel/time_manager.h"
50#include "core/hle/kernel/transfer_memory.h"
51#include "core/hle/lock.h" 51#include "core/hle/lock.h"
52#include "core/hle/result.h" 52#include "core/hle/result.h"
53#include "core/hle/service/service.h" 53#include "core/hle/service/service.h"
@@ -113,7 +113,7 @@ ResultCode MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr,
113 LOG_ERROR(Kernel_SVC, 113 LOG_ERROR(Kernel_SVC,
114 "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}", 114 "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}",
115 dst_addr, size); 115 dst_addr, size);
116 return ResultInvalidMemoryRange; 116 return ResultInvalidMemoryRegion;
117 } 117 }
118 118
119 if (manager.IsInsideHeapRegion(dst_addr, size)) { 119 if (manager.IsInsideHeapRegion(dst_addr, size)) {
@@ -121,7 +121,7 @@ ResultCode MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr,
121 "Destination does not fit within the heap region, addr=0x{:016X}, " 121 "Destination does not fit within the heap region, addr=0x{:016X}, "
122 "size=0x{:016X}", 122 "size=0x{:016X}",
123 dst_addr, size); 123 dst_addr, size);
124 return ResultInvalidMemoryRange; 124 return ResultInvalidMemoryRegion;
125 } 125 }
126 126
127 if (manager.IsInsideAliasRegion(dst_addr, size)) { 127 if (manager.IsInsideAliasRegion(dst_addr, size)) {
@@ -129,7 +129,7 @@ ResultCode MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr,
129 "Destination does not fit within the map region, addr=0x{:016X}, " 129 "Destination does not fit within the map region, addr=0x{:016X}, "
130 "size=0x{:016X}", 130 "size=0x{:016X}",
131 dst_addr, size); 131 dst_addr, size);
132 return ResultInvalidMemoryRange; 132 return ResultInvalidMemoryRegion;
133 } 133 }
134 134
135 return RESULT_SUCCESS; 135 return RESULT_SUCCESS;
@@ -141,38 +141,6 @@ enum class ResourceLimitValueType {
141 PeakValue, 141 PeakValue,
142}; 142};
143 143
144ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit,
145 u32 resource_type, ResourceLimitValueType value_type) {
146 std::lock_guard lock{HLE::g_hle_lock};
147 const auto type = static_cast<LimitableResource>(resource_type);
148 if (!IsValidResourceType(type)) {
149 LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type);
150 return ResultInvalidEnumValue;
151 }
152
153 const auto* const current_process = system.Kernel().CurrentProcess();
154 ASSERT(current_process != nullptr);
155
156 const auto resource_limit_object =
157 current_process->GetHandleTable().Get<KResourceLimit>(resource_limit);
158 if (!resource_limit_object) {
159 LOG_ERROR(Kernel_SVC, "Handle to non-existent resource limit instance used. Handle={:08X}",
160 resource_limit);
161 return ResultInvalidHandle;
162 }
163
164 switch (value_type) {
165 case ResourceLimitValueType::CurrentValue:
166 return MakeResult(resource_limit_object->GetCurrentValue(type));
167 case ResourceLimitValueType::LimitValue:
168 return MakeResult(resource_limit_object->GetLimitValue(type));
169 case ResourceLimitValueType::PeakValue:
170 return MakeResult(resource_limit_object->GetPeakValue(type));
171 default:
172 LOG_ERROR(Kernel_SVC, "Invalid resource value_type: '{}'", value_type);
173 return ResultInvalidEnumValue;
174 }
175}
176} // Anonymous namespace 144} // Anonymous namespace
177 145
178/// Set the process heap to a given Size. It can both extend and shrink the heap. 146/// Set the process heap to a given Size. It can both extend and shrink the heap.
@@ -291,11 +259,8 @@ static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr
291} 259}
292 260
293/// Connect to an OS service given the port name, returns the handle to the port to out 261/// Connect to an OS service given the port name, returns the handle to the port to out
294static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle, 262static ResultCode ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
295 VAddr port_name_address) {
296 std::lock_guard lock{HLE::g_hle_lock};
297 auto& memory = system.Memory(); 263 auto& memory = system.Memory();
298
299 if (!memory.IsValidVirtualAddress(port_name_address)) { 264 if (!memory.IsValidVirtualAddress(port_name_address)) {
300 LOG_ERROR(Kernel_SVC, 265 LOG_ERROR(Kernel_SVC,
301 "Port Name Address is not a valid virtual address, port_name_address=0x{:016X}", 266 "Port Name Address is not a valid virtual address, port_name_address=0x{:016X}",
@@ -314,21 +279,33 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
314 279
315 LOG_TRACE(Kernel_SVC, "called port_name={}", port_name); 280 LOG_TRACE(Kernel_SVC, "called port_name={}", port_name);
316 281
282 // Get the current handle table.
317 auto& kernel = system.Kernel(); 283 auto& kernel = system.Kernel();
284 auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
285
286 // Find the client port.
318 const auto it = kernel.FindNamedPort(port_name); 287 const auto it = kernel.FindNamedPort(port_name);
319 if (!kernel.IsValidNamedPort(it)) { 288 if (!kernel.IsValidNamedPort(it)) {
320 LOG_WARNING(Kernel_SVC, "tried to connect to unknown port: {}", port_name); 289 LOG_WARNING(Kernel_SVC, "tried to connect to unknown port: {}", port_name);
321 return ResultNotFound; 290 return ResultNotFound;
322 } 291 }
292 auto port = it->second;
323 293
324 auto client_port = it->second; 294 // Reserve a handle for the port.
295 // NOTE: Nintendo really does write directly to the output handle here.
296 R_TRY(handle_table.Reserve(out));
297 auto handle_guard = SCOPE_GUARD({ handle_table.Unreserve(*out); });
325 298
326 std::shared_ptr<ClientSession> client_session; 299 // Create a session.
327 CASCADE_RESULT(client_session, client_port->Connect()); 300 KClientSession* session{};
301 R_TRY(port->CreateSession(std::addressof(session)));
328 302
329 // Return the client session 303 // Register the session in the table, close the extra reference.
330 auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); 304 handle_table.Register(*out, session);
331 CASCADE_RESULT(*out_handle, handle_table.Create(client_session)); 305 session->Close();
306
307 // We succeeded.
308 handle_guard.Cancel();
332 return RESULT_SUCCESS; 309 return RESULT_SUCCESS;
333} 310}
334 311
@@ -340,14 +317,12 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
340 317
341/// Makes a blocking IPC call to an OS service. 318/// Makes a blocking IPC call to an OS service.
342static ResultCode SendSyncRequest(Core::System& system, Handle handle) { 319static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
320
343 auto& kernel = system.Kernel(); 321 auto& kernel = system.Kernel();
344 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
345 std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle);
346 if (!session) {
347 LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle);
348 return ResultInvalidHandle;
349 }
350 322
323 KScopedAutoObject session =
324 kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
325 R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
351 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); 326 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
352 327
353 auto thread = kernel.CurrentScheduler()->GetCurrentThread(); 328 auto thread = kernel.CurrentScheduler()->GetCurrentThread();
@@ -355,7 +330,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
355 KScopedSchedulerLock lock(kernel); 330 KScopedSchedulerLock lock(kernel);
356 thread->SetState(ThreadState::Waiting); 331 thread->SetState(ThreadState::Waiting);
357 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); 332 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
358 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); 333 session->SendSyncRequest(thread, system.Memory(), system.CoreTiming());
359 } 334 }
360 335
361 KSynchronizationObject* dummy{}; 336 KSynchronizationObject* dummy{};
@@ -368,18 +343,13 @@ static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
368 343
369/// Get the ID for the specified thread. 344/// Get the ID for the specified thread.
370static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) { 345static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
371 LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
372
373 // Get the thread from its handle. 346 // Get the thread from its handle.
374 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 347 KScopedAutoObject thread =
375 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); 348 system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
376 if (!thread) { 349 R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
377 LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
378 return ResultInvalidHandle;
379 }
380 350
381 // Get the thread's id. 351 // Get the thread's id.
382 *out_thread_id = thread->GetThreadID(); 352 *out_thread_id = thread->GetId();
383 return RESULT_SUCCESS; 353 return RESULT_SUCCESS;
384} 354}
385 355
@@ -395,110 +365,101 @@ static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low,
395} 365}
396 366
397/// Gets the ID of the specified process or a specified thread's owning process. 367/// Gets the ID of the specified process or a specified thread's owning process.
398static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle handle) { 368static ResultCode GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
399 LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle); 369 LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle);
400 370
401 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 371 // Get the object from the handle table.
402 const std::shared_ptr<Process> process = handle_table.Get<Process>(handle); 372 KScopedAutoObject obj =
403 if (process) { 373 system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KAutoObject>(
404 *process_id = process->GetProcessID(); 374 static_cast<Handle>(handle));
405 return RESULT_SUCCESS; 375 R_UNLESS(obj.IsNotNull(), ResultInvalidHandle);
376
377 // Get the process from the object.
378 KProcess* process = nullptr;
379 if (KProcess* p = obj->DynamicCast<KProcess*>(); p != nullptr) {
380 // The object is a process, so we can use it directly.
381 process = p;
382 } else if (KThread* t = obj->DynamicCast<KThread*>(); t != nullptr) {
383 // The object is a thread, so we want to use its parent.
384 process = reinterpret_cast<KThread*>(obj.GetPointerUnsafe())->GetOwnerProcess();
385 } else {
386 // TODO(bunnei): This should also handle debug objects before returning.
387 UNIMPLEMENTED_MSG("Debug objects not implemented");
406 } 388 }
407 389
408 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle); 390 // Make sure the target process exists.
409 if (thread) { 391 R_UNLESS(process != nullptr, ResultInvalidHandle);
410 const Process* const owner_process = thread->GetOwnerProcess();
411 if (!owner_process) {
412 LOG_ERROR(Kernel_SVC, "Non-existent owning process encountered.");
413 return ResultInvalidHandle;
414 }
415
416 *process_id = owner_process->GetProcessID();
417 return RESULT_SUCCESS;
418 }
419 392
420 // NOTE: This should also handle debug objects before returning. 393 // Get the process id.
394 *out_process_id = process->GetId();
421 395
422 LOG_ERROR(Kernel_SVC, "Handle does not exist, handle=0x{:08X}", handle);
423 return ResultInvalidHandle; 396 return ResultInvalidHandle;
424} 397}
425 398
426static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32* process_id_high, 399static ResultCode GetProcessId32(Core::System& system, u32* out_process_id_low,
427 Handle handle) { 400 u32* out_process_id_high, Handle handle) {
428 u64 process_id{}; 401 u64 out_process_id{};
429 const auto result = GetProcessId(system, &process_id, handle); 402 const auto result = GetProcessId(system, &out_process_id, handle);
430 *process_id_low = static_cast<u32>(process_id); 403 *out_process_id_low = static_cast<u32>(out_process_id);
431 *process_id_high = static_cast<u32>(process_id >> 32); 404 *out_process_id_high = static_cast<u32>(out_process_id >> 32);
432 return result; 405 return result;
433} 406}
434 407
435/// Wait for the given handles to synchronize, timeout after the specified nanoseconds 408/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
436static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address, 409static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
437 u64 handle_count, s64 nano_seconds) { 410 u64 num_handles, s64 nano_seconds) {
438 LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}", 411 LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, num_handles={}, nano_seconds={}",
439 handles_address, handle_count, nano_seconds); 412 handles_address, num_handles, nano_seconds);
440 413
441 auto& memory = system.Memory(); 414 // Ensure number of handles is valid.
442 if (!memory.IsValidVirtualAddress(handles_address)) { 415 R_UNLESS(0 <= num_handles && num_handles <= ArgumentHandleCountMax, ResultOutOfRange);
443 LOG_ERROR(Kernel_SVC,
444 "Handle address is not a valid virtual address, handle_address=0x{:016X}",
445 handles_address);
446 return ResultInvalidPointer;
447 }
448
449 static constexpr u64 MaxHandles = 0x40;
450
451 if (handle_count > MaxHandles) {
452 LOG_ERROR(Kernel_SVC, "Handle count specified is too large, expected {} but got {}",
453 MaxHandles, handle_count);
454 return ResultOutOfRange;
455 }
456 416
457 auto& kernel = system.Kernel(); 417 auto& kernel = system.Kernel();
458 std::vector<KSynchronizationObject*> objects(handle_count); 418 std::vector<KSynchronizationObject*> objs(num_handles);
459 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); 419 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
420 Handle* handles = system.Memory().GetPointer<Handle>(handles_address);
460 421
461 for (u64 i = 0; i < handle_count; ++i) { 422 // Copy user handles.
462 const Handle handle = memory.Read32(handles_address + i * sizeof(Handle)); 423 if (num_handles > 0) {
463 const auto object = handle_table.Get<KSynchronizationObject>(handle); 424 // Convert the handles to objects.
425 R_UNLESS(handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles,
426 num_handles),
427 ResultInvalidHandle);
428 }
464 429
465 if (object == nullptr) { 430 // Ensure handles are closed when we're done.
466 LOG_ERROR(Kernel_SVC, "Object is a nullptr"); 431 SCOPE_EXIT({
467 return ResultInvalidHandle; 432 for (u64 i = 0; i < num_handles; ++i) {
433 objs[i]->Close();
468 } 434 }
435 });
469 436
470 objects[i] = object.get(); 437 return KSynchronizationObject::Wait(kernel, index, objs.data(), static_cast<s32>(objs.size()),
471 } 438 nano_seconds);
472 return KSynchronizationObject::Wait(kernel, index, objects.data(),
473 static_cast<s32>(objects.size()), nano_seconds);
474} 439}
475 440
476static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address, 441static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
477 s32 handle_count, u32 timeout_high, s32* index) { 442 s32 num_handles, u32 timeout_high, s32* index) {
478 const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)}; 443 const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
479 return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds); 444 return WaitSynchronization(system, index, handles_address, num_handles, nano_seconds);
480} 445}
481 446
482/// Resumes a thread waiting on WaitSynchronization 447/// Resumes a thread waiting on WaitSynchronization
483static ResultCode CancelSynchronization(Core::System& system, Handle thread_handle) { 448static ResultCode CancelSynchronization(Core::System& system, Handle handle) {
484 LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle); 449 LOG_TRACE(Kernel_SVC, "called handle=0x{:X}", handle);
485 450
486 // Get the thread from its handle. 451 // Get the thread from its handle.
487 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 452 KScopedAutoObject thread =
488 std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); 453 system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(
489 454 static_cast<Handle>(handle));
490 if (!thread) {
491 LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
492 return ResultInvalidHandle;
493 }
494 455
495 // Cancel the thread's wait. 456 // Cancel the thread's wait.
496 thread->WaitCancel(); 457 thread->WaitCancel();
497 return RESULT_SUCCESS; 458 return RESULT_SUCCESS;
498} 459}
499 460
500static ResultCode CancelSynchronization32(Core::System& system, Handle thread_handle) { 461static ResultCode CancelSynchronization32(Core::System& system, Handle handle) {
501 return CancelSynchronization(system, thread_handle); 462 return CancelSynchronization(system, handle);
502} 463}
503 464
504/// Attempts to locks a mutex 465/// Attempts to locks a mutex
@@ -678,7 +639,7 @@ static void OutputDebugString(Core::System& system, VAddr address, u64 len) {
678} 639}
679 640
680/// Gets system/memory information for the current process 641/// Gets system/memory information for the current process
681static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 handle, 642static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle,
682 u64 info_sub_id) { 643 u64 info_sub_id) {
683 std::lock_guard lock{HLE::g_hle_lock}; 644 std::lock_guard lock{HLE::g_hle_lock};
684 LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id, 645 LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
@@ -744,10 +705,9 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
744 return ResultInvalidEnumValue; 705 return ResultInvalidEnumValue;
745 } 706 }
746 707
747 const auto& current_process_handle_table = 708 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
748 system.Kernel().CurrentProcess()->GetHandleTable(); 709 KScopedAutoObject process = handle_table.GetObject<KProcess>(handle);
749 const auto process = current_process_handle_table.Get<Process>(static_cast<Handle>(handle)); 710 if (process.IsNull()) {
750 if (!process) {
751 LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}", 711 LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}",
752 info_id, info_sub_id, handle); 712 info_id, info_sub_id, handle);
753 return ResultInvalidHandle; 713 return ResultInvalidHandle;
@@ -851,21 +811,19 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
851 return ResultInvalidCombination; 811 return ResultInvalidCombination;
852 } 812 }
853 813
854 Process* const current_process = system.Kernel().CurrentProcess(); 814 KProcess* const current_process = system.Kernel().CurrentProcess();
855 HandleTable& handle_table = current_process->GetHandleTable(); 815 KHandleTable& handle_table = current_process->GetHandleTable();
856 const auto resource_limit = current_process->GetResourceLimit(); 816 const auto resource_limit = current_process->GetResourceLimit();
857 if (!resource_limit) { 817 if (!resource_limit) {
858 *result = KernelHandle::InvalidHandle; 818 *result = Svc::InvalidHandle;
859 // Yes, the kernel considers this a successful operation. 819 // Yes, the kernel considers this a successful operation.
860 return RESULT_SUCCESS; 820 return RESULT_SUCCESS;
861 } 821 }
862 822
863 const auto table_result = handle_table.Create(resource_limit); 823 Handle handle{};
864 if (table_result.Failed()) { 824 R_TRY(handle_table.Add(&handle, resource_limit));
865 return table_result.Code();
866 }
867 825
868 *result = *table_result; 826 *result = handle;
869 return RESULT_SUCCESS; 827 return RESULT_SUCCESS;
870 } 828 }
871 829
@@ -876,9 +834,9 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
876 return ResultInvalidHandle; 834 return ResultInvalidHandle;
877 } 835 }
878 836
879 if (info_sub_id >= Process::RANDOM_ENTROPY_SIZE) { 837 if (info_sub_id >= KProcess::RANDOM_ENTROPY_SIZE) {
880 LOG_ERROR(Kernel_SVC, "Entropy size is out of range, expected {} but got {}", 838 LOG_ERROR(Kernel_SVC, "Entropy size is out of range, expected {} but got {}",
881 Process::RANDOM_ENTROPY_SIZE, info_sub_id); 839 KProcess::RANDOM_ENTROPY_SIZE, info_sub_id);
882 return ResultInvalidCombination; 840 return ResultInvalidCombination;
883 } 841 }
884 842
@@ -899,9 +857,10 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
899 return ResultInvalidCombination; 857 return ResultInvalidCombination;
900 } 858 }
901 859
902 const auto thread = system.Kernel().CurrentProcess()->GetHandleTable().Get<KThread>( 860 KScopedAutoObject thread =
903 static_cast<Handle>(handle)); 861 system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(
904 if (!thread) { 862 static_cast<Handle>(handle));
863 if (thread.IsNull()) {
905 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", 864 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}",
906 static_cast<Handle>(handle)); 865 static_cast<Handle>(handle));
907 return ResultInvalidHandle; 866 return ResultInvalidHandle;
@@ -910,7 +869,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
910 const auto& core_timing = system.CoreTiming(); 869 const auto& core_timing = system.CoreTiming();
911 const auto& scheduler = *system.Kernel().CurrentScheduler(); 870 const auto& scheduler = *system.Kernel().CurrentScheduler();
912 const auto* const current_thread = scheduler.GetCurrentThread(); 871 const auto* const current_thread = scheduler.GetCurrentThread();
913 const bool same_thread = current_thread == thread.get(); 872 const bool same_thread = current_thread == thread.GetPointerUnsafe();
914 873
915 const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks(); 874 const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
916 u64 out_ticks = 0; 875 u64 out_ticks = 0;
@@ -966,10 +925,10 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
966 925
967 if (!(addr < addr + size)) { 926 if (!(addr < addr + size)) {
968 LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address"); 927 LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
969 return ResultInvalidMemoryRange; 928 return ResultInvalidMemoryRegion;
970 } 929 }
971 930
972 Process* const current_process{system.Kernel().CurrentProcess()}; 931 KProcess* const current_process{system.Kernel().CurrentProcess()};
973 auto& page_table{current_process->PageTable()}; 932 auto& page_table{current_process->PageTable()};
974 933
975 if (current_process->GetSystemResourceSize() == 0) { 934 if (current_process->GetSystemResourceSize() == 0) {
@@ -981,14 +940,14 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
981 LOG_ERROR(Kernel_SVC, 940 LOG_ERROR(Kernel_SVC,
982 "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, 941 "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
983 size); 942 size);
984 return ResultInvalidMemoryRange; 943 return ResultInvalidMemoryRegion;
985 } 944 }
986 945
987 if (page_table.IsOutsideAliasRegion(addr, size)) { 946 if (page_table.IsOutsideAliasRegion(addr, size)) {
988 LOG_ERROR(Kernel_SVC, 947 LOG_ERROR(Kernel_SVC,
989 "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr, 948 "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
990 size); 949 size);
991 return ResultInvalidMemoryRange; 950 return ResultInvalidMemoryRegion;
992 } 951 }
993 952
994 return page_table.MapPhysicalMemory(addr, size); 953 return page_table.MapPhysicalMemory(addr, size);
@@ -1020,10 +979,10 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
1020 979
1021 if (!(addr < addr + size)) { 980 if (!(addr < addr + size)) {
1022 LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address"); 981 LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
1023 return ResultInvalidMemoryRange; 982 return ResultInvalidMemoryRegion;
1024 } 983 }
1025 984
1026 Process* const current_process{system.Kernel().CurrentProcess()}; 985 KProcess* const current_process{system.Kernel().CurrentProcess()};
1027 auto& page_table{current_process->PageTable()}; 986 auto& page_table{current_process->PageTable()};
1028 987
1029 if (current_process->GetSystemResourceSize() == 0) { 988 if (current_process->GetSystemResourceSize() == 0) {
@@ -1035,14 +994,14 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
1035 LOG_ERROR(Kernel_SVC, 994 LOG_ERROR(Kernel_SVC,
1036 "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, 995 "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
1037 size); 996 size);
1038 return ResultInvalidMemoryRange; 997 return ResultInvalidMemoryRegion;
1039 } 998 }
1040 999
1041 if (page_table.IsOutsideAliasRegion(addr, size)) { 1000 if (page_table.IsOutsideAliasRegion(addr, size)) {
1042 LOG_ERROR(Kernel_SVC, 1001 LOG_ERROR(Kernel_SVC,
1043 "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr, 1002 "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
1044 size); 1003 size);
1045 return ResultInvalidMemoryRange; 1004 return ResultInvalidMemoryRegion;
1046 } 1005 }
1047 1006
1048 return page_table.UnmapPhysicalMemory(addr, size); 1007 return page_table.UnmapPhysicalMemory(addr, size);
@@ -1062,37 +1021,19 @@ static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle,
1062 constexpr auto IsValidThreadActivity = [](ThreadActivity activity) { 1021 constexpr auto IsValidThreadActivity = [](ThreadActivity activity) {
1063 return activity == ThreadActivity::Runnable || activity == ThreadActivity::Paused; 1022 return activity == ThreadActivity::Runnable || activity == ThreadActivity::Paused;
1064 }; 1023 };
1065 if (!IsValidThreadActivity(thread_activity)) { 1024 R_UNLESS(IsValidThreadActivity(thread_activity), ResultInvalidEnumValue);
1066 LOG_ERROR(Kernel_SVC, "Invalid thread activity value provided (activity={})",
1067 thread_activity);
1068 return ResultInvalidEnumValue;
1069 }
1070 1025
1071 // Get the thread from its handle. 1026 // Get the thread from its handle.
1072 auto& kernel = system.Kernel(); 1027 KScopedAutoObject thread =
1073 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); 1028 system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
1074 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); 1029 R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
1075 if (!thread) {
1076 LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
1077 return ResultInvalidHandle;
1078 }
1079 1030
1080 // Check that the activity is being set on a non-current thread for the current process. 1031 // Check that the activity is being set on a non-current thread for the current process.
1081 if (thread->GetOwnerProcess() != kernel.CurrentProcess()) { 1032 R_UNLESS(thread->GetOwnerProcess() == system.Kernel().CurrentProcess(), ResultInvalidHandle);
1082 LOG_ERROR(Kernel_SVC, "Invalid owning process for the created thread."); 1033 R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(system.Kernel()), ResultBusy);
1083 return ResultInvalidHandle;
1084 }
1085 if (thread.get() == GetCurrentThreadPointer(kernel)) {
1086 LOG_ERROR(Kernel_SVC, "Thread is busy");
1087 return ResultBusy;
1088 }
1089 1034
1090 // Set the activity. 1035 // Set the activity.
1091 const auto set_result = thread->SetActivity(thread_activity); 1036 R_TRY(thread->SetActivity(thread_activity));
1092 if (set_result.IsError()) {
1093 LOG_ERROR(Kernel_SVC, "Failed to set thread activity.");
1094 return set_result;
1095 }
1096 1037
1097 return RESULT_SUCCESS; 1038 return RESULT_SUCCESS;
1098} 1039}
@@ -1107,36 +1048,55 @@ static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Hand
1107 LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context, 1048 LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
1108 thread_handle); 1049 thread_handle);
1109 1050
1051 auto& kernel = system.Kernel();
1052
1110 // Get the thread from its handle. 1053 // Get the thread from its handle.
1111 const auto* current_process = system.Kernel().CurrentProcess(); 1054 KScopedAutoObject thread =
1112 const std::shared_ptr<KThread> thread = 1055 kernel.CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
1113 current_process->GetHandleTable().Get<KThread>(thread_handle); 1056 R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
1114 if (!thread) {
1115 LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={})", thread_handle);
1116 return ResultInvalidHandle;
1117 }
1118 1057
1119 // Require the handle be to a non-current thread in the current process. 1058 // Require the handle be to a non-current thread in the current process.
1120 if (thread->GetOwnerProcess() != current_process) { 1059 const auto* current_process = kernel.CurrentProcess();
1121 LOG_ERROR(Kernel_SVC, "Thread owning process is not the current process."); 1060 R_UNLESS(current_process == thread->GetOwnerProcess(), ResultInvalidId);
1122 return ResultInvalidHandle;
1123 }
1124 if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
1125 LOG_ERROR(Kernel_SVC, "Current thread is busy.");
1126 return ResultBusy;
1127 }
1128 1061
1129 // Get the thread context. 1062 // Verify that the thread isn't terminated.
1130 std::vector<u8> context; 1063 R_UNLESS(thread->GetState() != ThreadState::Terminated, ResultTerminationRequested);
1131 const auto context_result = thread->GetThreadContext3(context); 1064
1132 if (context_result.IsError()) { 1065 /// Check that the thread is not the current one.
1133 LOG_ERROR(Kernel_SVC, "Unable to successfully retrieve thread context (result: {})", 1066 /// NOTE: Nintendo does not check this, and thus the following loop will deadlock.
1134 context_result.raw); 1067 R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(kernel), ResultInvalidId);
1135 return context_result; 1068
1136 } 1069 // Try to get the thread context until the thread isn't current on any core.
1070 while (true) {
1071 KScopedSchedulerLock sl{kernel};
1137 1072
1138 // Copy the thread context to user space. 1073 // TODO(bunnei): Enforce that thread is suspended for debug here.
1139 system.Memory().WriteBlock(out_context, context.data(), context.size()); 1074
1075 // If the thread's raw state isn't runnable, check if it's current on some core.
1076 if (thread->GetRawState() != ThreadState::Runnable) {
1077 bool current = false;
1078 for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
1079 if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetCurrentThread()) {
1080 current = true;
1081 }
1082 break;
1083 }
1084
1085 // If the thread is current, retry until it isn't.
1086 if (current) {
1087 continue;
1088 }
1089 }
1090
1091 // Get the thread context.
1092 std::vector<u8> context;
1093 R_TRY(thread->GetThreadContext3(context));
1094
1095 // Copy the thread context to user space.
1096 system.Memory().WriteBlock(out_context, context.data(), context.size());
1097
1098 return RESULT_SUCCESS;
1099 }
1140 1100
1141 return RESULT_SUCCESS; 1101 return RESULT_SUCCESS;
1142} 1102}
@@ -1150,12 +1110,9 @@ static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Han
1150 LOG_TRACE(Kernel_SVC, "called"); 1110 LOG_TRACE(Kernel_SVC, "called");
1151 1111
1152 // Get the thread from its handle. 1112 // Get the thread from its handle.
1153 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1113 KScopedAutoObject thread =
1154 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle); 1114 system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(handle);
1155 if (!thread) { 1115 R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
1156 LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", handle);
1157 return ResultInvalidHandle;
1158 }
1159 1116
1160 // Get the thread's priority. 1117 // Get the thread's priority.
1161 *out_priority = thread->GetPriority(); 1118 *out_priority = thread->GetPriority();
@@ -1167,30 +1124,26 @@ static ResultCode GetThreadPriority32(Core::System& system, u32* out_priority, H
1167} 1124}
1168 1125
1169/// Sets the priority for the specified thread 1126/// Sets the priority for the specified thread
1170static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 priority) { 1127static ResultCode SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) {
1171 LOG_TRACE(Kernel_SVC, "called"); 1128 // Get the current process.
1129 KProcess& process = *system.Kernel().CurrentProcess();
1172 1130
1173 // Validate the priority. 1131 // Validate the priority.
1174 if (HighestThreadPriority > priority || priority > LowestThreadPriority) { 1132 R_UNLESS(HighestThreadPriority <= priority && priority <= LowestThreadPriority,
1175 LOG_ERROR(Kernel_SVC, "Invalid thread priority specified (priority={})", priority); 1133 ResultInvalidPriority);
1176 return ResultInvalidPriority; 1134 R_UNLESS(process.CheckThreadPriority(priority), ResultInvalidPriority);
1177 }
1178 1135
1179 // Get the thread from its handle. 1136 // Get the thread from its handle.
1180 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1137 KScopedAutoObject thread = process.GetHandleTable().GetObject<KThread>(thread_handle);
1181 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle); 1138 R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
1182 if (!thread) {
1183 LOG_ERROR(Kernel_SVC, "Invalid handle provided (handle={:08X})", handle);
1184 return ResultInvalidHandle;
1185 }
1186 1139
1187 // Set the thread priority. 1140 // Set the thread priority.
1188 thread->SetBasePriority(priority); 1141 thread->SetBasePriority(priority);
1189 return RESULT_SUCCESS; 1142 return RESULT_SUCCESS;
1190} 1143}
1191 1144
1192static ResultCode SetThreadPriority32(Core::System& system, Handle handle, u32 priority) { 1145static ResultCode SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) {
1193 return SetThreadPriority(system, handle, priority); 1146 return SetThreadPriority(system, thread_handle, priority);
1194} 1147}
1195 1148
1196/// Get which CPU core is executing the current thread 1149/// Get which CPU core is executing the current thread
@@ -1203,82 +1156,97 @@ static u32 GetCurrentProcessorNumber32(Core::System& system) {
1203 return GetCurrentProcessorNumber(system); 1156 return GetCurrentProcessorNumber(system);
1204} 1157}
1205 1158
1206static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr, 1159constexpr bool IsValidSharedMemoryPermission(Svc::MemoryPermission perm) {
1207 u64 size, u32 permissions) { 1160 switch (perm) {
1208 std::lock_guard lock{HLE::g_hle_lock}; 1161 case Svc::MemoryPermission::Read:
1162 case Svc::MemoryPermission::ReadWrite:
1163 return true;
1164 default:
1165 return false;
1166 }
1167}
1168
1169constexpr bool IsValidRemoteSharedMemoryPermission(Svc::MemoryPermission perm) {
1170 return IsValidSharedMemoryPermission(perm) || perm == Svc::MemoryPermission::DontCare;
1171}
1172
1173static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
1174 u64 size, Svc::MemoryPermission map_perm) {
1209 LOG_TRACE(Kernel_SVC, 1175 LOG_TRACE(Kernel_SVC,
1210 "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}", 1176 "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
1211 shared_memory_handle, addr, size, permissions); 1177 shmem_handle, address, size, map_perm);
1212 1178
1213 if (!Common::Is4KBAligned(addr)) { 1179 // Validate the address/size.
1214 LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, addr=0x{:016X}", addr); 1180 R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
1215 return ResultInvalidAddress; 1181 R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
1216 } 1182 R_UNLESS(size > 0, ResultInvalidSize);
1183 R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
1217 1184
1218 if (size == 0) { 1185 // Validate the permission.
1219 LOG_ERROR(Kernel_SVC, "Size is 0"); 1186 R_UNLESS(IsValidSharedMemoryPermission(map_perm), ResultInvalidNewMemoryPermission);
1220 return ResultInvalidSize;
1221 }
1222 1187
1223 if (!Common::Is4KBAligned(size)) { 1188 // Get the current process.
1224 LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, size=0x{:016X}", size); 1189 auto& process = *system.Kernel().CurrentProcess();
1225 return ResultInvalidSize; 1190 auto& page_table = process.PageTable();
1226 }
1227 1191
1228 if (!IsValidAddressRange(addr, size)) { 1192 // Get the shared memory.
1229 LOG_ERROR(Kernel_SVC, "Region is not a valid address range, addr=0x{:016X}, size=0x{:016X}", 1193 KScopedAutoObject shmem = process.GetHandleTable().GetObject<KSharedMemory>(shmem_handle);
1230 addr, size); 1194 R_UNLESS(shmem.IsNotNull(), ResultInvalidHandle);
1231 return ResultInvalidCurrentMemory;
1232 }
1233 1195
1234 const auto permission_type = static_cast<MemoryPermission>(permissions); 1196 // Verify that the mapping is in range.
1235 if ((permission_type | MemoryPermission::Write) != MemoryPermission::ReadWrite) { 1197 R_UNLESS(page_table.CanContain(address, size, KMemoryState::Shared), ResultInvalidMemoryRegion);
1236 LOG_ERROR(Kernel_SVC, "Expected Read or ReadWrite permission but got permissions=0x{:08X}",
1237 permissions);
1238 return ResultInvalidMemoryPermissions;
1239 }
1240 1198
1241 auto* const current_process{system.Kernel().CurrentProcess()}; 1199 // Add the shared memory to the process.
1242 auto& page_table{current_process->PageTable()}; 1200 R_TRY(process.AddSharedMemory(shmem.GetPointerUnsafe(), address, size));
1243 1201
1244 if (page_table.IsInvalidRegion(addr, size)) { 1202 // Ensure that we clean up the shared memory if we fail to map it.
1245 LOG_ERROR(Kernel_SVC, 1203 auto guard =
1246 "Addr does not fit within the valid region, addr=0x{:016X}, " 1204 SCOPE_GUARD({ process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); });
1247 "size=0x{:016X}",
1248 addr, size);
1249 return ResultInvalidMemoryRange;
1250 }
1251 1205
1252 if (page_table.IsInsideHeapRegion(addr, size)) { 1206 // Map the shared memory.
1253 LOG_ERROR(Kernel_SVC, 1207 R_TRY(shmem->Map(process, address, size, map_perm));
1254 "Addr does not fit within the heap region, addr=0x{:016X}, "
1255 "size=0x{:016X}",
1256 addr, size);
1257 return ResultInvalidMemoryRange;
1258 }
1259 1208
1260 if (page_table.IsInsideAliasRegion(addr, size)) { 1209 // We succeeded.
1261 LOG_ERROR(Kernel_SVC, 1210 guard.Cancel();
1262 "Address does not fit within the map region, addr=0x{:016X}, " 1211 return RESULT_SUCCESS;
1263 "size=0x{:016X}", 1212}
1264 addr, size);
1265 return ResultInvalidMemoryRange;
1266 }
1267 1213
1268 auto shared_memory{current_process->GetHandleTable().Get<KSharedMemory>(shared_memory_handle)}; 1214static ResultCode MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
1269 if (!shared_memory) { 1215 u32 size, Svc::MemoryPermission map_perm) {
1270 LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}", 1216 return MapSharedMemory(system, shmem_handle, address, size, map_perm);
1271 shared_memory_handle); 1217}
1272 return ResultInvalidHandle; 1218
1273 } 1219static ResultCode UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
1220 u64 size) {
1221 // Validate the address/size.
1222 R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
1223 R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
1224 R_UNLESS(size > 0, ResultInvalidSize);
1225 R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
1226
1227 // Get the current process.
1228 auto& process = *system.Kernel().CurrentProcess();
1229 auto& page_table = process.PageTable();
1230
1231 // Get the shared memory.
1232 KScopedAutoObject shmem = process.GetHandleTable().GetObject<KSharedMemory>(shmem_handle);
1233 R_UNLESS(shmem.IsNotNull(), ResultInvalidHandle);
1274 1234
1275 return shared_memory->Map(*current_process, addr, size, 1235 // Verify that the mapping is in range.
1276 static_cast<KMemoryPermission>(permission_type)); 1236 R_UNLESS(page_table.CanContain(address, size, KMemoryState::Shared), ResultInvalidMemoryRegion);
1237
1238 // Unmap the shared memory.
1239 R_TRY(shmem->Unmap(process, address, size));
1240
1241 // Remove the shared memory from the process.
1242 process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size);
1243
1244 return RESULT_SUCCESS;
1277} 1245}
1278 1246
1279static ResultCode MapSharedMemory32(Core::System& system, Handle shared_memory_handle, u32 addr, 1247static ResultCode UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
1280 u32 size, u32 permissions) { 1248 u32 size) {
1281 return MapSharedMemory(system, shared_memory_handle, addr, size, permissions); 1249 return UnmapSharedMemory(system, shmem_handle, address, size);
1282} 1250}
1283 1251
1284static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, 1252static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
@@ -1287,8 +1255,8 @@ static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_add
1287 std::lock_guard lock{HLE::g_hle_lock}; 1255 std::lock_guard lock{HLE::g_hle_lock};
1288 LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address); 1256 LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
1289 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1257 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1290 std::shared_ptr<Process> process = handle_table.Get<Process>(process_handle); 1258 KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
1291 if (!process) { 1259 if (process.IsNull()) {
1292 LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}", 1260 LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
1293 process_handle); 1261 process_handle);
1294 return ResultInvalidHandle; 1262 return ResultInvalidHandle;
@@ -1369,8 +1337,8 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
1369 } 1337 }
1370 1338
1371 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1339 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1372 auto process = handle_table.Get<Process>(process_handle); 1340 KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
1373 if (!process) { 1341 if (process.IsNull()) {
1374 LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).", 1342 LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
1375 process_handle); 1343 process_handle);
1376 return ResultInvalidHandle; 1344 return ResultInvalidHandle;
@@ -1390,7 +1358,7 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
1390 "Destination address range is not within the ASLR region (dst_address=0x{:016X}, " 1358 "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
1391 "size=0x{:016X}).", 1359 "size=0x{:016X}).",
1392 dst_address, size); 1360 dst_address, size);
1393 return ResultInvalidMemoryRange; 1361 return ResultInvalidMemoryRegion;
1394 } 1362 }
1395 1363
1396 return page_table.MapProcessCodeMemory(dst_address, src_address, size); 1364 return page_table.MapProcessCodeMemory(dst_address, src_address, size);
@@ -1437,8 +1405,8 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
1437 } 1405 }
1438 1406
1439 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1407 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1440 auto process = handle_table.Get<Process>(process_handle); 1408 KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
1441 if (!process) { 1409 if (process.IsNull()) {
1442 LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).", 1410 LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
1443 process_handle); 1411 process_handle);
1444 return ResultInvalidHandle; 1412 return ResultInvalidHandle;
@@ -1458,7 +1426,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
1458 "Destination address range is not within the ASLR region (dst_address=0x{:016X}, " 1426 "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
1459 "size=0x{:016X}).", 1427 "size=0x{:016X}).",
1460 dst_address, size); 1428 dst_address, size);
1461 return ResultInvalidMemoryRange; 1429 return ResultInvalidMemoryRegion;
1462 } 1430 }
1463 1431
1464 return page_table.UnmapProcessCodeMemory(dst_address, src_address, size); 1432 return page_table.UnmapProcessCodeMemory(dst_address, src_address, size);
@@ -1483,7 +1451,7 @@ static void ExitProcess32(Core::System& system) {
1483 ExitProcess(system); 1451 ExitProcess(system);
1484} 1452}
1485 1453
1486static constexpr bool IsValidCoreId(int32_t core_id) { 1454static constexpr bool IsValidVirtualCoreId(int32_t core_id) {
1487 return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES)); 1455 return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES));
1488} 1456}
1489 1457
@@ -1503,7 +1471,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
1503 } 1471 }
1504 1472
1505 // Validate arguments. 1473 // Validate arguments.
1506 if (!IsValidCoreId(core_id)) { 1474 if (!IsValidVirtualCoreId(core_id)) {
1507 LOG_ERROR(Kernel_SVC, "Invalid Core ID specified (id={})", core_id); 1475 LOG_ERROR(Kernel_SVC, "Invalid Core ID specified (id={})", core_id);
1508 return ResultInvalidCoreId; 1476 return ResultInvalidCoreId;
1509 } 1477 }
@@ -1521,35 +1489,42 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
1521 return ResultInvalidPriority; 1489 return ResultInvalidPriority;
1522 } 1490 }
1523 1491
1492 // Reserve a new thread from the process resource limit (waiting up to 100ms).
1524 KScopedResourceReservation thread_reservation( 1493 KScopedResourceReservation thread_reservation(
1525 kernel.CurrentProcess(), LimitableResource::Threads, 1, 1494 kernel.CurrentProcess(), LimitableResource::Threads, 1,
1526 system.CoreTiming().GetGlobalTimeNs().count() + 100000000); 1495 system.CoreTiming().GetGlobalTimeNs().count() + 100000000);
1527 if (!thread_reservation.Succeeded()) { 1496 if (!thread_reservation.Succeeded()) {
1528 LOG_ERROR(Kernel_SVC, "Could not reserve a new thread"); 1497 LOG_ERROR(Kernel_SVC, "Could not reserve a new thread");
1529 return ResultResourceLimitedExceeded; 1498 return ResultLimitReached;
1530 } 1499 }
1531 1500
1532 std::shared_ptr<KThread> thread; 1501 // Create the thread.
1533 { 1502 KThread* thread = KThread::Create(kernel);
1534 KScopedLightLock lk{process.GetStateLock()}; 1503 if (!thread) {
1535 CASCADE_RESULT(thread, 1504 LOG_ERROR(Kernel_SVC, "Unable to create new threads. Thread creation limit reached.");
1536 KThread::CreateUserThread(system, ThreadType::User, "", entry_point, 1505 return ResultOutOfResource;
1537 priority, arg, core_id, stack_bottom, &process));
1538 } 1506 }
1507 SCOPE_EXIT({ thread->Close(); });
1539 1508
1540 const auto new_thread_handle = process.GetHandleTable().Create(thread); 1509 // Initialize the thread.
1541 if (new_thread_handle.Failed()) { 1510 {
1542 LOG_ERROR(Kernel_SVC, "Failed to create handle with error=0x{:X}", 1511 KScopedLightLock lk{process.GetStateLock()};
1543 new_thread_handle.Code().raw); 1512 R_TRY(KThread::InitializeUserThread(system, thread, entry_point, arg, stack_bottom,
1544 return new_thread_handle.Code(); 1513 priority, core_id, &process));
1545 } 1514 }
1546 *out_handle = *new_thread_handle;
1547 1515
1548 // Set the thread name for debugging purposes. 1516 // Set the thread name for debugging purposes.
1549 thread->SetName( 1517 thread->SetName(fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *out_handle));
1550 fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle)); 1518
1519 // Commit the thread reservation.
1551 thread_reservation.Commit(); 1520 thread_reservation.Commit();
1552 1521
1522 // Register the new thread.
1523 KThread::Register(kernel, thread);
1524
1525 // Add the thread to the handle table.
1526 R_TRY(process.GetHandleTable().Add(out_handle, thread));
1527
1553 return RESULT_SUCCESS; 1528 return RESULT_SUCCESS;
1554} 1529}
1555 1530
@@ -1563,21 +1538,15 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
1563 LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle); 1538 LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
1564 1539
1565 // Get the thread from its handle. 1540 // Get the thread from its handle.
1566 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1541 KScopedAutoObject thread =
1567 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); 1542 system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
1568 if (!thread) { 1543 R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
1569 LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
1570 return ResultInvalidHandle;
1571 }
1572 1544
1573 // Try to start the thread. 1545 // Try to start the thread.
1574 const auto run_result = thread->Run(); 1546 R_TRY(thread->Run());
1575 if (run_result.IsError()) { 1547
1576 LOG_ERROR(Kernel_SVC, 1548 // If we succeeded, persist a reference to the thread.
1577 "Unable to successfuly start thread (thread handle={:08X}, result={})", 1549 thread->Open();
1578 thread_handle, run_result.raw);
1579 return run_result;
1580 }
1581 1550
1582 return RESULT_SUCCESS; 1551 return RESULT_SUCCESS;
1583} 1552}
@@ -1591,7 +1560,7 @@ static void ExitThread(Core::System& system) {
1591 LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC()); 1560 LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
1592 1561
1593 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); 1562 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
1594 system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread)); 1563 system.GlobalSchedulerContext().RemoveThread(current_thread);
1595 current_thread->Exit(); 1564 current_thread->Exit();
1596} 1565}
1597 1566
@@ -1824,8 +1793,11 @@ static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high)
1824static ResultCode CloseHandle(Core::System& system, Handle handle) { 1793static ResultCode CloseHandle(Core::System& system, Handle handle) {
1825 LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle); 1794 LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
1826 1795
1827 auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1796 // Remove the handle.
1828 return handle_table.Close(handle); 1797 R_UNLESS(system.Kernel().CurrentProcess()->GetHandleTable().Remove(handle),
1798 ResultInvalidHandle);
1799
1800 return RESULT_SUCCESS;
1829} 1801}
1830 1802
1831static ResultCode CloseHandle32(Core::System& system, Handle handle) { 1803static ResultCode CloseHandle32(Core::System& system, Handle handle) {
@@ -1841,16 +1813,16 @@ static ResultCode ResetSignal(Core::System& system, Handle handle) {
1841 1813
1842 // Try to reset as readable event. 1814 // Try to reset as readable event.
1843 { 1815 {
1844 auto readable_event = handle_table.Get<KReadableEvent>(handle); 1816 KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(handle);
1845 if (readable_event) { 1817 if (readable_event.IsNotNull()) {
1846 return readable_event->Reset(); 1818 return readable_event->Reset();
1847 } 1819 }
1848 } 1820 }
1849 1821
1850 // Try to reset as process. 1822 // Try to reset as process.
1851 { 1823 {
1852 auto process = handle_table.Get<Process>(handle); 1824 KScopedAutoObject process = handle_table.GetObject<KProcess>(handle);
1853 if (process) { 1825 if (process.IsNotNull()) {
1854 return process->Reset(); 1826 return process->Reset();
1855 } 1827 }
1856 } 1828 }
@@ -1864,65 +1836,68 @@ static ResultCode ResetSignal32(Core::System& system, Handle handle) {
1864 return ResetSignal(system, handle); 1836 return ResetSignal(system, handle);
1865} 1837}
1866 1838
1867/// Creates a TransferMemory object 1839static constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) {
1868static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAddr addr, u64 size, 1840 switch (perm) {
1869 u32 permissions) { 1841 case MemoryPermission::None:
1870 std::lock_guard lock{HLE::g_hle_lock}; 1842 case MemoryPermission::Read:
1871 LOG_DEBUG(Kernel_SVC, "called addr=0x{:X}, size=0x{:X}, perms=0x{:08X}", addr, size, 1843 case MemoryPermission::ReadWrite:
1872 permissions); 1844 return true;
1873 1845 default:
1874 if (!Common::Is4KBAligned(addr)) { 1846 return false;
1875 LOG_ERROR(Kernel_SVC, "Address ({:016X}) is not page aligned!", addr);
1876 return ResultInvalidAddress;
1877 } 1847 }
1848}
1878 1849
1879 if (!Common::Is4KBAligned(size) || size == 0) { 1850/// Creates a TransferMemory object
1880 LOG_ERROR(Kernel_SVC, "Size ({:016X}) is not page aligned or equal to zero!", size); 1851static ResultCode CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
1881 return ResultInvalidAddress; 1852 MemoryPermission map_perm) {
1882 } 1853 auto& kernel = system.Kernel();
1883 1854
1884 if (!IsValidAddressRange(addr, size)) { 1855 // Validate the size.
1885 LOG_ERROR(Kernel_SVC, "Address and size cause overflow! (address={:016X}, size={:016X})", 1856 R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
1886 addr, size); 1857 R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
1887 return ResultInvalidCurrentMemory; 1858 R_UNLESS(size > 0, ResultInvalidSize);
1888 } 1859 R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
1889 1860
1890 const auto perms{static_cast<MemoryPermission>(permissions)}; 1861 // Validate the permissions.
1891 if (perms > MemoryPermission::ReadWrite || perms == MemoryPermission::Write) { 1862 R_UNLESS(IsValidTransferMemoryPermission(map_perm), ResultInvalidNewMemoryPermission);
1892 LOG_ERROR(Kernel_SVC, "Invalid memory permissions for transfer memory! (perms={:08X})", 1863
1893 permissions); 1864 // Get the current process and handle table.
1894 return ResultInvalidMemoryPermissions; 1865 auto& process = *kernel.CurrentProcess();
1895 } 1866 auto& handle_table = process.GetHandleTable();
1896 1867
1897 auto& kernel = system.Kernel();
1898 // Reserve a new transfer memory from the process resource limit. 1868 // Reserve a new transfer memory from the process resource limit.
1899 KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(), 1869 KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(),
1900 LimitableResource::TransferMemory); 1870 LimitableResource::TransferMemory);
1901 if (!trmem_reservation.Succeeded()) { 1871 R_UNLESS(trmem_reservation.Succeeded(), ResultLimitReached);
1902 LOG_ERROR(Kernel_SVC, "Could not reserve a new transfer memory");
1903 return ResultResourceLimitedExceeded;
1904 }
1905 auto transfer_mem_handle = TransferMemory::Create(kernel, system.Memory(), addr, size,
1906 static_cast<KMemoryPermission>(perms));
1907 1872
1908 if (const auto reserve_result{transfer_mem_handle->Reserve()}; reserve_result.IsError()) { 1873 // Create the transfer memory.
1909 return reserve_result; 1874 KTransferMemory* trmem = KTransferMemory::Create(kernel);
1910 } 1875 R_UNLESS(trmem != nullptr, ResultOutOfResource);
1911 1876
1912 auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); 1877 // Ensure the only reference is in the handle table when we're done.
1913 const auto result{handle_table.Create(std::move(transfer_mem_handle))}; 1878 SCOPE_EXIT({ trmem->Close(); });
1914 if (result.Failed()) { 1879
1915 return result.Code(); 1880 // Ensure that the region is in range.
1916 } 1881 R_UNLESS(process.PageTable().Contains(address, size), ResultInvalidCurrentMemory);
1882
1883 // Initialize the transfer memory.
1884 R_TRY(trmem->Initialize(address, size, map_perm));
1885
1886 // Commit the reservation.
1917 trmem_reservation.Commit(); 1887 trmem_reservation.Commit();
1918 1888
1919 *handle = *result; 1889 // Register the transfer memory.
1890 KTransferMemory::Register(kernel, trmem);
1891
1892 // Add the transfer memory to the handle table.
1893 R_TRY(handle_table.Add(out, trmem));
1894
1920 return RESULT_SUCCESS; 1895 return RESULT_SUCCESS;
1921} 1896}
1922 1897
1923static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u32 addr, u32 size, 1898static ResultCode CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size,
1924 u32 permissions) { 1899 MemoryPermission map_perm) {
1925 return CreateTransferMemory(system, handle, addr, size, permissions); 1900 return CreateTransferMemory(system, out, address, size, map_perm);
1926} 1901}
1927 1902
1928static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id, 1903static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
@@ -1930,19 +1905,12 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
1930 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); 1905 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
1931 1906
1932 // Get the thread from its handle. 1907 // Get the thread from its handle.
1933 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1908 KScopedAutoObject thread =
1934 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); 1909 system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
1935 if (!thread) { 1910 R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
1936 LOG_ERROR(Kernel_SVC, "Invalid thread handle specified (handle={:08X})", thread_handle);
1937 return ResultInvalidHandle;
1938 }
1939 1911
1940 // Get the core mask. 1912 // Get the core mask.
1941 const auto result = thread->GetCoreMask(out_core_id, out_affinity_mask); 1913 R_TRY(thread->GetCoreMask(out_core_id, out_affinity_mask));
1942 if (result.IsError()) {
1943 LOG_ERROR(Kernel_SVC, "Unable to successfully retrieve core mask (result={})", result.raw);
1944 return result;
1945 }
1946 1914
1947 return RESULT_SUCCESS; 1915 return RESULT_SUCCESS;
1948} 1916}
@@ -1958,58 +1926,33 @@ static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle
1958 1926
1959static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id, 1927static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
1960 u64 affinity_mask) { 1928 u64 affinity_mask) {
1961 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core_id=0x{:X}, affinity_mask=0x{:016X}",
1962 thread_handle, core_id, affinity_mask);
1963
1964 const auto& current_process = *system.Kernel().CurrentProcess();
1965
1966 // Determine the core id/affinity mask. 1929 // Determine the core id/affinity mask.
1967 if (core_id == Svc::IdealCoreUseProcessValue) { 1930 if (core_id == IdealCoreUseProcessValue) {
1968 core_id = current_process.GetIdealCoreId(); 1931 core_id = system.Kernel().CurrentProcess()->GetIdealCoreId();
1969 affinity_mask = (1ULL << core_id); 1932 affinity_mask = (1ULL << core_id);
1970 } else { 1933 } else {
1971 // Validate the affinity mask. 1934 // Validate the affinity mask.
1972 const u64 process_core_mask = current_process.GetCoreMask(); 1935 const u64 process_core_mask = system.Kernel().CurrentProcess()->GetCoreMask();
1973 if ((affinity_mask | process_core_mask) != process_core_mask) { 1936 R_UNLESS((affinity_mask | process_core_mask) == process_core_mask, ResultInvalidCoreId);
1974 LOG_ERROR(Kernel_SVC, 1937 R_UNLESS(affinity_mask != 0, ResultInvalidCombination);
1975 "Affinity mask does match the process core mask (affinity mask={:016X}, core "
1976 "mask={:016X})",
1977 affinity_mask, process_core_mask);
1978 return ResultInvalidCoreId;
1979 }
1980 if (affinity_mask == 0) {
1981 LOG_ERROR(Kernel_SVC, "Affinity mask is zero.");
1982 return ResultInvalidCombination;
1983 }
1984 1938
1985 // Validate the core id. 1939 // Validate the core id.
1986 if (IsValidCoreId(core_id)) { 1940 if (IsValidVirtualCoreId(core_id)) {
1987 if (((1ULL << core_id) & affinity_mask) == 0) { 1941 R_UNLESS(((1ULL << core_id) & affinity_mask) != 0, ResultInvalidCombination);
1988 LOG_ERROR(Kernel_SVC, "Invalid core ID (ID={})", core_id);
1989 return ResultInvalidCombination;
1990 }
1991 } else { 1942 } else {
1992 if (core_id != IdealCoreNoUpdate && core_id != IdealCoreDontCare) { 1943 R_UNLESS(core_id == IdealCoreNoUpdate || core_id == IdealCoreDontCare,
1993 LOG_ERROR(Kernel_SVC, "Invalid core ID (ID={})", core_id); 1944 ResultInvalidCoreId);
1994 return ResultInvalidCoreId;
1995 }
1996 } 1945 }
1997 } 1946 }
1998 1947
1999 // Get the thread from its handle. 1948 // Get the thread from its handle.
2000 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1949 KScopedAutoObject thread =
2001 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); 1950 system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
2002 if (!thread) { 1951 R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
2003 LOG_ERROR(Kernel_SVC, "Invalid thread handle (handle={:08X})", thread_handle);
2004 return ResultInvalidHandle;
2005 }
2006 1952
2007 // Set the core mask. 1953 // Set the core mask.
2008 const auto set_result = thread->SetCoreMask(core_id, affinity_mask); 1954 R_TRY(thread->SetCoreMask(core_id, affinity_mask));
2009 if (set_result.IsError()) { 1955
2010 LOG_ERROR(Kernel_SVC, "Unable to successfully set core mask (result={})", set_result.raw);
2011 return set_result;
2012 }
2013 return RESULT_SUCCESS; 1956 return RESULT_SUCCESS;
2014} 1957}
2015 1958
@@ -2022,27 +1965,12 @@ static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle
2022static ResultCode SignalEvent(Core::System& system, Handle event_handle) { 1965static ResultCode SignalEvent(Core::System& system, Handle event_handle) {
2023 LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle); 1966 LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
2024 1967
2025 auto& kernel = system.Kernel();
2026 // Get the current handle table. 1968 // Get the current handle table.
2027 const HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable(); 1969 const KHandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
2028
2029 // Reserve a new event from the process resource limit.
2030 KScopedResourceReservation event_reservation(kernel.CurrentProcess(),
2031 LimitableResource::Events);
2032 if (!event_reservation.Succeeded()) {
2033 LOG_ERROR(Kernel, "Could not reserve a new event");
2034 return ResultResourceLimitedExceeded;
2035 }
2036 1970
2037 // Get the writable event. 1971 // Get the writable event.
2038 auto writable_event = handle_table.Get<KWritableEvent>(event_handle); 1972 KScopedAutoObject writable_event = handle_table.GetObject<KWritableEvent>(event_handle);
2039 if (!writable_event) { 1973 R_UNLESS(writable_event.IsNotNull(), ResultInvalidHandle);
2040 LOG_ERROR(Kernel_SVC, "Invalid event handle provided (handle={:08X})", event_handle);
2041 return ResultInvalidHandle;
2042 }
2043
2044 // Commit the successfuly reservation.
2045 event_reservation.Commit();
2046 1974
2047 return writable_event->Signal(); 1975 return writable_event->Signal();
2048} 1976}
@@ -2059,16 +1987,16 @@ static ResultCode ClearEvent(Core::System& system, Handle event_handle) {
2059 1987
2060 // Try to clear the writable event. 1988 // Try to clear the writable event.
2061 { 1989 {
2062 auto writable_event = handle_table.Get<KWritableEvent>(event_handle); 1990 KScopedAutoObject writable_event = handle_table.GetObject<KWritableEvent>(event_handle);
2063 if (writable_event) { 1991 if (writable_event.IsNotNull()) {
2064 return writable_event->Clear(); 1992 return writable_event->Clear();
2065 } 1993 }
2066 } 1994 }
2067 1995
2068 // Try to clear the readable event. 1996 // Try to clear the readable event.
2069 { 1997 {
2070 auto readable_event = handle_table.Get<KReadableEvent>(event_handle); 1998 KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(event_handle);
2071 if (readable_event) { 1999 if (readable_event.IsNotNull()) {
2072 return readable_event->Clear(); 2000 return readable_event->Clear();
2073 } 2001 }
2074 } 2002 }
@@ -2087,34 +2015,40 @@ static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* o
2087 2015
2088 // Get the kernel reference and handle table. 2016 // Get the kernel reference and handle table.
2089 auto& kernel = system.Kernel(); 2017 auto& kernel = system.Kernel();
2090 HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable(); 2018 auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
2019
2020 // Reserve a new event from the process resource limit
2021 KScopedResourceReservation event_reservation(kernel.CurrentProcess(),
2022 LimitableResource::Events);
2023 R_UNLESS(event_reservation.Succeeded(), ResultLimitReached);
2091 2024
2092 // Create a new event. 2025 // Create a new event.
2093 const auto event = KEvent::Create(kernel, "CreateEvent"); 2026 KEvent* event = KEvent::Create(kernel);
2094 if (!event) { 2027 R_UNLESS(event != nullptr, ResultOutOfResource);
2095 LOG_ERROR(Kernel_SVC, "Unable to create new events. Event creation limit reached.");
2096 return ResultOutOfResource;
2097 }
2098 2028
2099 // Initialize the event. 2029 // Initialize the event.
2100 event->Initialize(); 2030 event->Initialize("CreateEvent");
2031
2032 // Commit the thread reservation.
2033 event_reservation.Commit();
2034
2035 // Ensure that we clean up the event (and its only references are handle table) on function end.
2036 SCOPE_EXIT({
2037 event->GetWritableEvent().Close();
2038 event->GetReadableEvent().Close();
2039 });
2040
2041 // Register the event.
2042 KEvent::Register(kernel, event);
2101 2043
2102 // Add the writable event to the handle table. 2044 // Add the writable event to the handle table.
2103 const auto write_create_result = handle_table.Create(event->GetWritableEvent()); 2045 R_TRY(handle_table.Add(out_write, std::addressof(event->GetWritableEvent())));
2104 if (write_create_result.Failed()) {
2105 return write_create_result.Code();
2106 }
2107 *out_write = *write_create_result;
2108 2046
2109 // Add the writable event to the handle table. 2047 // Add the writable event to the handle table.
2110 auto handle_guard = SCOPE_GUARD({ handle_table.Close(*write_create_result); }); 2048 auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*out_write); });
2111 2049
2112 // Add the readable event to the handle table. 2050 // Add the readable event to the handle table.
2113 const auto read_create_result = handle_table.Create(event->GetReadableEvent()); 2051 R_TRY(handle_table.Add(out_read, std::addressof(event->GetReadableEvent())));
2114 if (read_create_result.Failed()) {
2115 return read_create_result.Code();
2116 }
2117 *out_read = *read_create_result;
2118 2052
2119 // We succeeded. 2053 // We succeeded.
2120 handle_guard.Cancel(); 2054 handle_guard.Cancel();
@@ -2134,8 +2068,8 @@ static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_
2134 }; 2068 };
2135 2069
2136 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 2070 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
2137 const auto process = handle_table.Get<Process>(process_handle); 2071 KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
2138 if (!process) { 2072 if (process.IsNull()) {
2139 LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}", 2073 LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
2140 process_handle); 2074 process_handle);
2141 return ResultInvalidHandle; 2075 return ResultInvalidHandle;
@@ -2152,83 +2086,86 @@ static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_
2152} 2086}
2153 2087
2154static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) { 2088static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) {
2155 std::lock_guard lock{HLE::g_hle_lock};
2156 LOG_DEBUG(Kernel_SVC, "called"); 2089 LOG_DEBUG(Kernel_SVC, "called");
2157 2090
2091 // Create a new resource limit.
2158 auto& kernel = system.Kernel(); 2092 auto& kernel = system.Kernel();
2159 auto resource_limit = std::make_shared<KResourceLimit>(kernel, system.CoreTiming()); 2093 KResourceLimit* resource_limit = KResourceLimit::Create(kernel);
2094 R_UNLESS(resource_limit != nullptr, ResultOutOfResource);
2160 2095
2161 auto* const current_process = kernel.CurrentProcess(); 2096 // Ensure we don't leak a reference to the limit.
2162 ASSERT(current_process != nullptr); 2097 SCOPE_EXIT({ resource_limit->Close(); });
2163 2098
2164 const auto handle = current_process->GetHandleTable().Create(std::move(resource_limit)); 2099 // Initialize the resource limit.
2165 if (handle.Failed()) { 2100 resource_limit->Initialize(&system.CoreTiming());
2166 return handle.Code(); 2101
2167 } 2102 // Register the limit.
2103 KResourceLimit::Register(kernel, resource_limit);
2104
2105 // Add the limit to the handle table.
2106 R_TRY(kernel.CurrentProcess()->GetHandleTable().Add(out_handle, resource_limit));
2168 2107
2169 *out_handle = *handle;
2170 return RESULT_SUCCESS; 2108 return RESULT_SUCCESS;
2171} 2109}
2172 2110
2173static ResultCode GetResourceLimitLimitValue(Core::System& system, u64* out_value, 2111static ResultCode GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value,
2174 Handle resource_limit, u32 resource_type) { 2112 Handle resource_limit_handle,
2175 LOG_DEBUG(Kernel_SVC, "called. Handle={:08X}, Resource type={}", resource_limit, resource_type); 2113 LimitableResource which) {
2114 LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
2115 which);
2176 2116
2177 const auto limit_value = RetrieveResourceLimitValue(system, resource_limit, resource_type, 2117 // Validate the resource.
2178 ResourceLimitValueType::LimitValue); 2118 R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
2179 if (limit_value.Failed()) { 2119
2180 return limit_value.Code(); 2120 // Get the resource limit.
2181 } 2121 auto& kernel = system.Kernel();
2122 KScopedAutoObject resource_limit =
2123 kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
2124 R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
2125
2126 // Get the limit value.
2127 *out_limit_value = resource_limit->GetLimitValue(which);
2182 2128
2183 *out_value = static_cast<u64>(*limit_value);
2184 return RESULT_SUCCESS; 2129 return RESULT_SUCCESS;
2185} 2130}
2186 2131
2187static ResultCode GetResourceLimitCurrentValue(Core::System& system, u64* out_value, 2132static ResultCode GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value,
2188 Handle resource_limit, u32 resource_type) { 2133 Handle resource_limit_handle,
2189 LOG_DEBUG(Kernel_SVC, "called. Handle={:08X}, Resource type={}", resource_limit, resource_type); 2134 LimitableResource which) {
2135 LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
2136 which);
2190 2137
2191 const auto current_value = RetrieveResourceLimitValue(system, resource_limit, resource_type, 2138 // Validate the resource.
2192 ResourceLimitValueType::CurrentValue); 2139 R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
2193 if (current_value.Failed()) { 2140
2194 return current_value.Code(); 2141 // Get the resource limit.
2195 } 2142 auto& kernel = system.Kernel();
2143 KScopedAutoObject resource_limit =
2144 kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
2145 R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
2146
2147 // Get the current value.
2148 *out_current_value = resource_limit->GetCurrentValue(which);
2196 2149
2197 *out_value = static_cast<u64>(*current_value);
2198 return RESULT_SUCCESS; 2150 return RESULT_SUCCESS;
2199} 2151}
2200 2152
2201static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resource_limit, 2153static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
2202 u32 resource_type, u64 value) { 2154 LimitableResource which, u64 limit_value) {
2203 LOG_DEBUG(Kernel_SVC, "called. Handle={:08X}, Resource type={}, Value={}", resource_limit, 2155 LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}, limit_value={}",
2204 resource_type, value); 2156 resource_limit_handle, which, limit_value);
2205 2157
2206 const auto type = static_cast<LimitableResource>(resource_type); 2158 // Validate the resource.
2207 if (!IsValidResourceType(type)) { 2159 R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
2208 LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type);
2209 return ResultInvalidEnumValue;
2210 }
2211
2212 auto* const current_process = system.Kernel().CurrentProcess();
2213 ASSERT(current_process != nullptr);
2214 2160
2215 auto resource_limit_object = 2161 // Get the resource limit.
2216 current_process->GetHandleTable().Get<KResourceLimit>(resource_limit); 2162 auto& kernel = system.Kernel();
2217 if (!resource_limit_object) { 2163 KScopedAutoObject resource_limit =
2218 LOG_ERROR(Kernel_SVC, "Handle to non-existent resource limit instance used. Handle={:08X}", 2164 kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
2219 resource_limit); 2165 R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
2220 return ResultInvalidHandle;
2221 }
2222 2166
2223 const auto set_result = resource_limit_object->SetLimitValue(type, static_cast<s64>(value)); 2167 // Set the limit value.
2224 if (set_result.IsError()) { 2168 R_TRY(resource_limit->SetLimitValue(which, limit_value));
2225 LOG_ERROR(Kernel_SVC,
2226 "Attempted to lower resource limit ({}) for category '{}' below its current "
2227 "value ({})",
2228 resource_limit_object->GetLimitValue(type), resource_type,
2229 resource_limit_object->GetCurrentValue(type));
2230 return set_result;
2231 }
2232 2169
2233 return RESULT_SUCCESS; 2170 return RESULT_SUCCESS;
2234} 2171}
@@ -2351,7 +2288,7 @@ static const FunctionDef SVC_Table_32[] = {
2351 {0x11, SvcWrap32<SignalEvent32>, "SignalEvent32"}, 2288 {0x11, SvcWrap32<SignalEvent32>, "SignalEvent32"},
2352 {0x12, SvcWrap32<ClearEvent32>, "ClearEvent32"}, 2289 {0x12, SvcWrap32<ClearEvent32>, "ClearEvent32"},
2353 {0x13, SvcWrap32<MapSharedMemory32>, "MapSharedMemory32"}, 2290 {0x13, SvcWrap32<MapSharedMemory32>, "MapSharedMemory32"},
2354 {0x14, nullptr, "UnmapSharedMemory32"}, 2291 {0x14, SvcWrap32<UnmapSharedMemory32>, "UnmapSharedMemory32"},
2355 {0x15, SvcWrap32<CreateTransferMemory32>, "CreateTransferMemory32"}, 2292 {0x15, SvcWrap32<CreateTransferMemory32>, "CreateTransferMemory32"},
2356 {0x16, SvcWrap32<CloseHandle32>, "CloseHandle32"}, 2293 {0x16, SvcWrap32<CloseHandle32>, "CloseHandle32"},
2357 {0x17, SvcWrap32<ResetSignal32>, "ResetSignal32"}, 2294 {0x17, SvcWrap32<ResetSignal32>, "ResetSignal32"},
@@ -2546,7 +2483,7 @@ static const FunctionDef SVC_Table_64[] = {
2546 {0x11, SvcWrap64<SignalEvent>, "SignalEvent"}, 2483 {0x11, SvcWrap64<SignalEvent>, "SignalEvent"},
2547 {0x12, SvcWrap64<ClearEvent>, "ClearEvent"}, 2484 {0x12, SvcWrap64<ClearEvent>, "ClearEvent"},
2548 {0x13, SvcWrap64<MapSharedMemory>, "MapSharedMemory"}, 2485 {0x13, SvcWrap64<MapSharedMemory>, "MapSharedMemory"},
2549 {0x14, nullptr, "UnmapSharedMemory"}, 2486 {0x14, SvcWrap64<UnmapSharedMemory>, "UnmapSharedMemory"},
2550 {0x15, SvcWrap64<CreateTransferMemory>, "CreateTransferMemory"}, 2487 {0x15, SvcWrap64<CreateTransferMemory>, "CreateTransferMemory"},
2551 {0x16, SvcWrap64<CloseHandle>, "CloseHandle"}, 2488 {0x16, SvcWrap64<CloseHandle>, "CloseHandle"},
2552 {0x17, SvcWrap64<ResetSignal>, "ResetSignal"}, 2489 {0x17, SvcWrap64<ResetSignal>, "ResetSignal"},
diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h
index 4af049551..60ea2c405 100644
--- a/src/core/hle/kernel/svc_common.h
+++ b/src/core/hle/kernel/svc_common.h
@@ -6,9 +6,24 @@
6 6
7#include "common/common_types.h" 7#include "common/common_types.h"
8 8
9namespace Kernel {
10using Handle = u32;
11}
12
9namespace Kernel::Svc { 13namespace Kernel::Svc {
10 14
11constexpr s32 ArgumentHandleCountMax = 0x40; 15constexpr s32 ArgumentHandleCountMax = 0x40;
12constexpr u32 HandleWaitMask{1u << 30}; 16constexpr u32 HandleWaitMask{1u << 30};
13 17
18constexpr inline Handle InvalidHandle = Handle(0);
19
20enum PseudoHandle : Handle {
21 CurrentThread = 0xFFFF8000,
22 CurrentProcess = 0xFFFF8001,
23};
24
25constexpr bool IsPseudoHandle(Handle handle) {
26 return handle == PseudoHandle::CurrentProcess || handle == PseudoHandle::CurrentThread;
27}
28
14} // namespace Kernel::Svc 29} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h
index a26d9f2c9..53a940723 100644
--- a/src/core/hle/kernel/svc_results.h
+++ b/src/core/hle/kernel/svc_results.h
@@ -10,18 +10,18 @@ namespace Kernel {
10 10
11// Confirmed Switch kernel error codes 11// Confirmed Switch kernel error codes
12 12
13constexpr ResultCode ResultMaxConnectionsReached{ErrorModule::Kernel, 7}; 13constexpr ResultCode ResultOutOfSessions{ErrorModule::Kernel, 7};
14constexpr ResultCode ResultInvalidCapabilityDescriptor{ErrorModule::Kernel, 14}; 14constexpr ResultCode ResultInvalidArgument{ErrorModule::Kernel, 14};
15constexpr ResultCode ResultNoSynchronizationObject{ErrorModule::Kernel, 57}; 15constexpr ResultCode ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
16constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59}; 16constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59};
17constexpr ResultCode ResultInvalidSize{ErrorModule::Kernel, 101}; 17constexpr ResultCode ResultInvalidSize{ErrorModule::Kernel, 101};
18constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102}; 18constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102};
19constexpr ResultCode ResultOutOfResource{ErrorModule::Kernel, 103}; 19constexpr ResultCode ResultOutOfResource{ErrorModule::Kernel, 103};
20constexpr ResultCode ResultOutOfMemory{ErrorModule::Kernel, 104}; 20constexpr ResultCode ResultOutOfMemory{ErrorModule::Kernel, 104};
21constexpr ResultCode ResultHandleTableFull{ErrorModule::Kernel, 105}; 21constexpr ResultCode ResultOutOfHandles{ErrorModule::Kernel, 105};
22constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106}; 22constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
23constexpr ResultCode ResultInvalidMemoryPermissions{ErrorModule::Kernel, 108}; 23constexpr ResultCode ResultInvalidNewMemoryPermission{ErrorModule::Kernel, 108};
24constexpr ResultCode ResultInvalidMemoryRange{ErrorModule::Kernel, 110}; 24constexpr ResultCode ResultInvalidMemoryRegion{ErrorModule::Kernel, 110};
25constexpr ResultCode ResultInvalidPriority{ErrorModule::Kernel, 112}; 25constexpr ResultCode ResultInvalidPriority{ErrorModule::Kernel, 112};
26constexpr ResultCode ResultInvalidCoreId{ErrorModule::Kernel, 113}; 26constexpr ResultCode ResultInvalidCoreId{ErrorModule::Kernel, 113};
27constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114}; 27constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114};
@@ -33,9 +33,11 @@ constexpr ResultCode ResultOutOfRange{ErrorModule::Kernel, 119};
33constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120}; 33constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120};
34constexpr ResultCode ResultNotFound{ErrorModule::Kernel, 121}; 34constexpr ResultCode ResultNotFound{ErrorModule::Kernel, 121};
35constexpr ResultCode ResultBusy{ErrorModule::Kernel, 122}; 35constexpr ResultCode ResultBusy{ErrorModule::Kernel, 122};
36constexpr ResultCode ResultSessionClosedByRemote{ErrorModule::Kernel, 123}; 36constexpr ResultCode ResultSessionClosed{ErrorModule::Kernel, 123};
37constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125}; 37constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125};
38constexpr ResultCode ResultReservedValue{ErrorModule::Kernel, 126}; 38constexpr ResultCode ResultReservedUsed{ErrorModule::Kernel, 126};
39constexpr ResultCode ResultResourceLimitedExceeded{ErrorModule::Kernel, 132}; 39constexpr ResultCode ResultPortClosed{ErrorModule::Kernel, 131};
40constexpr ResultCode ResultLimitReached{ErrorModule::Kernel, 132};
41constexpr ResultCode ResultInvalidId{ErrorModule::Kernel, 519};
40 42
41} // namespace Kernel 43} // namespace Kernel
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 96afd544b..913b16494 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -154,15 +154,28 @@ void SvcWrap64(Core::System& system) {
154 FuncReturn(system, retval); 154 FuncReturn(system, retval);
155} 155}
156 156
157// Used by GetResourceLimitLimitValue.
158template <ResultCode func(Core::System&, u64*, Handle, LimitableResource)>
159void SvcWrap64(Core::System& system) {
160 u64 param_1 = 0;
161 const u32 retval = func(system, &param_1, static_cast<Handle>(Param(system, 1)),
162 static_cast<LimitableResource>(Param(system, 2)))
163 .raw;
164
165 system.CurrentArmInterface().SetReg(1, param_1);
166 FuncReturn(system, retval);
167}
168
157template <ResultCode func(Core::System&, u32, u64)> 169template <ResultCode func(Core::System&, u32, u64)>
158void SvcWrap64(Core::System& system) { 170void SvcWrap64(Core::System& system) {
159 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1)).raw); 171 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1)).raw);
160} 172}
161 173
162template <ResultCode func(Core::System&, u32, u32, u64)> 174// Used by SetResourceLimitLimitValue
175template <ResultCode func(Core::System&, Handle, LimitableResource, u64)>
163void SvcWrap64(Core::System& system) { 176void SvcWrap64(Core::System& system) {
164 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), 177 FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
165 static_cast<u32>(Param(system, 1)), Param(system, 2)) 178 static_cast<LimitableResource>(Param(system, 1)), Param(system, 2))
166 .raw); 179 .raw);
167} 180}
168 181
@@ -219,10 +232,11 @@ void SvcWrap64(Core::System& system) {
219 func(system, Param(system, 0), Param(system, 1), static_cast<u32>(Param(system, 2))).raw); 232 func(system, Param(system, 0), Param(system, 1), static_cast<u32>(Param(system, 2))).raw);
220} 233}
221 234
222template <ResultCode func(Core::System&, u32, u64, u64, u32)> 235// Used by MapSharedMemory
236template <ResultCode func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)>
223void SvcWrap64(Core::System& system) { 237void SvcWrap64(Core::System& system) {
224 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), 238 FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)), Param(system, 1),
225 Param(system, 2), static_cast<u32>(Param(system, 3))) 239 Param(system, 2), static_cast<Svc::MemoryPermission>(Param(system, 3)))
226 .raw); 240 .raw);
227} 241}
228 242
@@ -252,11 +266,13 @@ void SvcWrap64(Core::System& system) {
252 .raw); 266 .raw);
253} 267}
254 268
255template <ResultCode func(Core::System&, u64*, u64, u64, u64)> 269// Used by GetInfo
270template <ResultCode func(Core::System&, u64*, u64, Handle, u64)>
256void SvcWrap64(Core::System& system) { 271void SvcWrap64(Core::System& system) {
257 u64 param_1 = 0; 272 u64 param_1 = 0;
258 const u32 retval = 273 const u32 retval = func(system, &param_1, Param(system, 1),
259 func(system, &param_1, Param(system, 1), Param(system, 2), Param(system, 3)).raw; 274 static_cast<Handle>(Param(system, 2)), Param(system, 3))
275 .raw;
260 276
261 system.CurrentArmInterface().SetReg(1, param_1); 277 system.CurrentArmInterface().SetReg(1, param_1);
262 FuncReturn(system, retval); 278 FuncReturn(system, retval);
@@ -273,11 +289,12 @@ void SvcWrap64(Core::System& system) {
273 FuncReturn(system, retval); 289 FuncReturn(system, retval);
274} 290}
275 291
276template <ResultCode func(Core::System&, u32*, u64, u64, u32)> 292// Used by CreateTransferMemory
293template <ResultCode func(Core::System&, Handle*, u64, u64, Svc::MemoryPermission)>
277void SvcWrap64(Core::System& system) { 294void SvcWrap64(Core::System& system) {
278 u32 param_1 = 0; 295 u32 param_1 = 0;
279 const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2), 296 const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2),
280 static_cast<u32>(Param(system, 3))) 297 static_cast<Svc::MemoryPermission>(Param(system, 3)))
281 .raw; 298 .raw;
282 299
283 system.CurrentArmInterface().SetReg(1, param_1); 300 system.CurrentArmInterface().SetReg(1, param_1);
@@ -537,6 +554,16 @@ void SvcWrap32(Core::System& system) {
537 FuncReturn(system, retval); 554 FuncReturn(system, retval);
538} 555}
539 556
557// Used by MapSharedMemory32
558template <ResultCode func(Core::System&, Handle, u32, u32, Svc::MemoryPermission)>
559void SvcWrap32(Core::System& system) {
560 const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
561 static_cast<u32>(Param(system, 1)), static_cast<u32>(Param(system, 2)),
562 static_cast<Svc::MemoryPermission>(Param(system, 3)))
563 .raw;
564 FuncReturn(system, retval);
565}
566
540// Used by SetThreadCoreMask32 567// Used by SetThreadCoreMask32
541template <ResultCode func(Core::System&, Handle, s32, u32, u32)> 568template <ResultCode func(Core::System&, Handle, s32, u32, u32)>
542void SvcWrap32(Core::System& system) { 569void SvcWrap32(Core::System& system) {
@@ -586,11 +613,12 @@ void SvcWrap32(Core::System& system) {
586} 613}
587 614
588// Used by CreateTransferMemory32 615// Used by CreateTransferMemory32
589template <ResultCode func(Core::System&, Handle*, u32, u32, u32)> 616template <ResultCode func(Core::System&, Handle*, u32, u32, Svc::MemoryPermission)>
590void SvcWrap32(Core::System& system) { 617void SvcWrap32(Core::System& system) {
591 Handle handle = 0; 618 Handle handle = 0;
592 const u32 retval = 619 const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2),
593 func(system, &handle, Param32(system, 1), Param32(system, 2), Param32(system, 3)).raw; 620 static_cast<Svc::MemoryPermission>(Param32(system, 3)))
621 .raw;
594 system.CurrentArmInterface().SetReg(1, handle); 622 system.CurrentArmInterface().SetReg(1, handle);
595 FuncReturn(system, retval); 623 FuncReturn(system, retval);
596} 624}
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index fd0630019..ae9b4be2f 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -6,7 +6,6 @@
6#include "core/core.h" 6#include "core/core.h"
7#include "core/core_timing.h" 7#include "core/core_timing.h"
8#include "core/core_timing_util.h" 8#include "core/core_timing_util.h"
9#include "core/hle/kernel/handle_table.h"
10#include "core/hle/kernel/k_scheduler.h" 9#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/k_thread.h" 10#include "core/hle/kernel/k_thread.h"
12#include "core/hle/kernel/kernel.h" 11#include "core/hle/kernel/kernel.h"
@@ -15,16 +14,12 @@
15namespace Kernel { 14namespace Kernel {
16 15
17TimeManager::TimeManager(Core::System& system_) : system{system_} { 16TimeManager::TimeManager(Core::System& system_) : system{system_} {
18 time_manager_event_type = Core::Timing::CreateEvent( 17 time_manager_event_type =
19 "Kernel::TimeManagerCallback", 18 Core::Timing::CreateEvent("Kernel::TimeManagerCallback",
20 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { 19 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
21 std::shared_ptr<KThread> thread; 20 KThread* thread = reinterpret_cast<KThread*>(thread_handle);
22 { 21 thread->Wakeup();
23 std::lock_guard lock{mutex}; 22 });
24 thread = SharedFrom<KThread>(reinterpret_cast<KThread*>(thread_handle));
25 }
26 thread->Wakeup();
27 });
28} 23}
29 24
30void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) { 25void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h
index 0d7f05f30..2d175a9c4 100644
--- a/src/core/hle/kernel/time_manager.h
+++ b/src/core/hle/kernel/time_manager.h
@@ -8,8 +8,6 @@
8#include <mutex> 8#include <mutex>
9#include <unordered_map> 9#include <unordered_map>
10 10
11#include "core/hle/kernel/object.h"
12
13namespace Core { 11namespace Core {
14class System; 12class System;
15} // namespace Core 13} // namespace Core