summaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/core_cpu.cpp6
-rw-r--r--src/core/frontend/emu_window.cpp6
-rw-r--r--src/core/hle/kernel/code_set.h3
-rw-r--r--src/core/hle/kernel/kernel.cpp8
-rw-r--r--src/core/hle/kernel/kernel.h3
-rw-r--r--src/core/hle/kernel/process.cpp34
-rw-r--r--src/core/hle/kernel/process.h30
-rw-r--r--src/core/hle/kernel/readable_event.cpp2
-rw-r--r--src/core/hle/kernel/readable_event.h2
-rw-r--r--src/core/hle/kernel/resource_limit.cpp7
-rw-r--r--src/core/hle/kernel/resource_limit.h11
-rw-r--r--src/core/hle/kernel/scheduler.cpp18
-rw-r--r--src/core/hle/kernel/scheduler.h2
-rw-r--r--src/core/hle/kernel/server_port.cpp2
-rw-r--r--src/core/hle/kernel/server_port.h2
-rw-r--r--src/core/hle/kernel/server_session.cpp2
-rw-r--r--src/core/hle/kernel/server_session.h2
-rw-r--r--src/core/hle/kernel/shared_memory.cpp11
-rw-r--r--src/core/hle/kernel/shared_memory.h10
-rw-r--r--src/core/hle/kernel/svc.cpp93
-rw-r--r--src/core/hle/kernel/svc_wrap.h8
-rw-r--r--src/core/hle/kernel/thread.cpp20
-rw-r--r--src/core/hle/kernel/thread.h6
-rw-r--r--src/core/hle/kernel/wait_object.h2
-rw-r--r--src/core/hle/service/fatal/fatal.cpp89
-rw-r--r--src/core/hle/service/nfc/nfc.cpp2
-rw-r--r--src/core/hle/service/nfp/nfp.cpp2
-rw-r--r--src/core/loader/elf.cpp2
-rw-r--r--src/core/loader/nro.cpp2
-rw-r--r--src/core/loader/nso.cpp2
-rw-r--r--src/core/perf_stats.cpp10
-rw-r--r--src/core/settings.cpp1
-rw-r--r--src/core/settings.h1
33 files changed, 279 insertions, 122 deletions
diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp
index 1eefed6d0..e75741db0 100644
--- a/src/core/core_cpu.cpp
+++ b/src/core/core_cpu.cpp
@@ -22,7 +22,7 @@
22namespace Core { 22namespace Core {
23 23
24void CpuBarrier::NotifyEnd() { 24void CpuBarrier::NotifyEnd() {
25 std::unique_lock<std::mutex> lock(mutex); 25 std::unique_lock lock{mutex};
26 end = true; 26 end = true;
27 condition.notify_all(); 27 condition.notify_all();
28} 28}
@@ -34,7 +34,7 @@ bool CpuBarrier::Rendezvous() {
34 } 34 }
35 35
36 if (!end) { 36 if (!end) {
37 std::unique_lock<std::mutex> lock(mutex); 37 std::unique_lock lock{mutex};
38 38
39 --cores_waiting; 39 --cores_waiting;
40 if (!cores_waiting) { 40 if (!cores_waiting) {
@@ -131,7 +131,7 @@ void Cpu::Reschedule() {
131 131
132 reschedule_pending = false; 132 reschedule_pending = false;
133 // Lock the global kernel mutex when we manipulate the HLE state 133 // Lock the global kernel mutex when we manipulate the HLE state
134 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 134 std::lock_guard lock{HLE::g_hle_lock};
135 scheduler->Reschedule(); 135 scheduler->Reschedule();
136} 136}
137 137
diff --git a/src/core/frontend/emu_window.cpp b/src/core/frontend/emu_window.cpp
index e29afd630..1320bbe77 100644
--- a/src/core/frontend/emu_window.cpp
+++ b/src/core/frontend/emu_window.cpp
@@ -30,7 +30,7 @@ private:
30 explicit Device(std::weak_ptr<TouchState>&& touch_state) : touch_state(touch_state) {} 30 explicit Device(std::weak_ptr<TouchState>&& touch_state) : touch_state(touch_state) {}
31 std::tuple<float, float, bool> GetStatus() const override { 31 std::tuple<float, float, bool> GetStatus() const override {
32 if (auto state = touch_state.lock()) { 32 if (auto state = touch_state.lock()) {
33 std::lock_guard<std::mutex> guard(state->mutex); 33 std::lock_guard guard{state->mutex};
34 return std::make_tuple(state->touch_x, state->touch_y, state->touch_pressed); 34 return std::make_tuple(state->touch_x, state->touch_y, state->touch_pressed);
35 } 35 }
36 return std::make_tuple(0.0f, 0.0f, false); 36 return std::make_tuple(0.0f, 0.0f, false);
@@ -81,7 +81,7 @@ void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) {
81 if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y)) 81 if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y))
82 return; 82 return;
83 83
84 std::lock_guard<std::mutex> guard(touch_state->mutex); 84 std::lock_guard guard{touch_state->mutex};
85 touch_state->touch_x = static_cast<float>(framebuffer_x - framebuffer_layout.screen.left) / 85 touch_state->touch_x = static_cast<float>(framebuffer_x - framebuffer_layout.screen.left) /
86 (framebuffer_layout.screen.right - framebuffer_layout.screen.left); 86 (framebuffer_layout.screen.right - framebuffer_layout.screen.left);
87 touch_state->touch_y = static_cast<float>(framebuffer_y - framebuffer_layout.screen.top) / 87 touch_state->touch_y = static_cast<float>(framebuffer_y - framebuffer_layout.screen.top) /
@@ -91,7 +91,7 @@ void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) {
91} 91}
92 92
93void EmuWindow::TouchReleased() { 93void EmuWindow::TouchReleased() {
94 std::lock_guard<std::mutex> guard(touch_state->mutex); 94 std::lock_guard guard{touch_state->mutex};
95 touch_state->touch_pressed = false; 95 touch_state->touch_pressed = false;
96 touch_state->touch_x = 0; 96 touch_state->touch_x = 0;
97 touch_state->touch_y = 0; 97 touch_state->touch_y = 0;
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h
index 834fd23d2..879957dcb 100644
--- a/src/core/hle/kernel/code_set.h
+++ b/src/core/hle/kernel/code_set.h
@@ -5,7 +5,6 @@
5#pragma once 5#pragma once
6 6
7#include <cstddef> 7#include <cstddef>
8#include <memory>
9#include <vector> 8#include <vector>
10 9
11#include "common/common_types.h" 10#include "common/common_types.h"
@@ -78,7 +77,7 @@ struct CodeSet final {
78 } 77 }
79 78
80 /// The overall data that backs this code set. 79 /// The overall data that backs this code set.
81 std::shared_ptr<std::vector<u8>> memory; 80 std::vector<u8> memory;
82 81
83 /// The segments that comprise this code set. 82 /// The segments that comprise this code set.
84 std::array<Segment, 3> segments; 83 std::array<Segment, 3> segments;
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 3b73be67b..3f14bfa86 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -34,7 +34,7 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
34 const auto& system = Core::System::GetInstance(); 34 const auto& system = Core::System::GetInstance();
35 35
36 // Lock the global kernel mutex when we enter the kernel HLE. 36 // Lock the global kernel mutex when we enter the kernel HLE.
37 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 37 std::lock_guard lock{HLE::g_hle_lock};
38 38
39 SharedPtr<Thread> thread = 39 SharedPtr<Thread> thread =
40 system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle); 40 system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle);
@@ -115,7 +115,7 @@ struct KernelCore::Impl {
115 115
116 // Creates the default system resource limit 116 // Creates the default system resource limit
117 void InitializeSystemResourceLimit(KernelCore& kernel) { 117 void InitializeSystemResourceLimit(KernelCore& kernel) {
118 system_resource_limit = ResourceLimit::Create(kernel, "System"); 118 system_resource_limit = ResourceLimit::Create(kernel);
119 119
120 // If setting the default system values fails, then something seriously wrong has occurred. 120 // If setting the default system values fails, then something seriously wrong has occurred.
121 ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x200000000) 121 ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x200000000)
@@ -191,6 +191,10 @@ const Process* KernelCore::CurrentProcess() const {
191 return impl->current_process; 191 return impl->current_process;
192} 192}
193 193
194const std::vector<SharedPtr<Process>>& KernelCore::GetProcessList() const {
195 return impl->process_list;
196}
197
194void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) { 198void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) {
195 impl->named_ports.emplace(std::move(name), std::move(port)); 199 impl->named_ports.emplace(std::move(name), std::move(port));
196} 200}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 03ea5b659..6b8738599 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -72,6 +72,9 @@ public:
72 /// Retrieves a const pointer to the current process. 72 /// Retrieves a const pointer to the current process.
73 const Process* CurrentProcess() const; 73 const Process* CurrentProcess() const;
74 74
75 /// Retrieves the list of processes.
76 const std::vector<SharedPtr<Process>>& GetProcessList() const;
77
75 /// Adds a port to the named port table 78 /// Adds a port to the named port table
76 void AddNamedPort(std::string name, SharedPtr<ClientPort> port); 79 void AddNamedPort(std::string name, SharedPtr<ClientPort> port);
77 80
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 0d782e4ba..041267318 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -5,6 +5,7 @@
5#include <algorithm> 5#include <algorithm>
6#include <memory> 6#include <memory>
7#include <random> 7#include <random>
8#include "common/alignment.h"
8#include "common/assert.h" 9#include "common/assert.h"
9#include "common/logging/log.h" 10#include "common/logging/log.h"
10#include "core/core.h" 11#include "core/core.h"
@@ -75,6 +76,18 @@ SharedPtr<ResourceLimit> Process::GetResourceLimit() const {
75 return resource_limit; 76 return resource_limit;
76} 77}
77 78
79u64 Process::GetTotalPhysicalMemoryUsed() const {
80 return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size;
81}
82
83void Process::RegisterThread(const Thread* thread) {
84 thread_list.push_back(thread);
85}
86
87void Process::UnregisterThread(const Thread* thread) {
88 thread_list.remove(thread);
89}
90
78ResultCode Process::ClearSignalState() { 91ResultCode Process::ClearSignalState() {
79 if (status == ProcessStatus::Exited) { 92 if (status == ProcessStatus::Exited) {
80 LOG_ERROR(Kernel, "called on a terminated process instance."); 93 LOG_ERROR(Kernel, "called on a terminated process instance.");
@@ -107,14 +120,17 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) {
107 return handle_table.SetSize(capabilities.GetHandleTableSize()); 120 return handle_table.SetSize(capabilities.GetHandleTableSize());
108} 121}
109 122
110void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) { 123void Process::Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size) {
124 // The kernel always ensures that the given stack size is page aligned.
125 main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE);
126
111 // Allocate and map the main thread stack 127 // Allocate and map the main thread stack
112 // TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part 128 // TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part
113 // of the user address space. 129 // of the user address space.
130 const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size;
114 vm_manager 131 vm_manager
115 .MapMemoryBlock(vm_manager.GetTLSIORegionEndAddress() - stack_size, 132 .MapMemoryBlock(mapping_address, std::make_shared<std::vector<u8>>(main_thread_stack_size),
116 std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size, 133 0, main_thread_stack_size, MemoryState::Stack)
117 MemoryState::Stack)
118 .Unwrap(); 134 .Unwrap();
119 135
120 vm_manager.LogLayout(); 136 vm_manager.LogLayout();
@@ -210,11 +226,13 @@ void Process::FreeTLSSlot(VAddr tls_address) {
210} 226}
211 227
212void Process::LoadModule(CodeSet module_, VAddr base_addr) { 228void Process::LoadModule(CodeSet module_, VAddr base_addr) {
229 const auto memory = std::make_shared<std::vector<u8>>(std::move(module_.memory));
230
213 const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions, 231 const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions,
214 MemoryState memory_state) { 232 MemoryState memory_state) {
215 const auto vma = vm_manager 233 const auto vma = vm_manager
216 .MapMemoryBlock(segment.addr + base_addr, module_.memory, 234 .MapMemoryBlock(segment.addr + base_addr, memory, segment.offset,
217 segment.offset, segment.size, memory_state) 235 segment.size, memory_state)
218 .Unwrap(); 236 .Unwrap();
219 vm_manager.Reprotect(vma, permissions); 237 vm_manager.Reprotect(vma, permissions);
220 }; 238 };
@@ -224,6 +242,8 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) {
224 MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData); 242 MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData);
225 MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData); 243 MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData);
226 244
245 code_memory_size += module_.memory.size();
246
227 // Clear instruction cache in CPU JIT 247 // Clear instruction cache in CPU JIT
228 system.InvalidateCpuInstructionCaches(); 248 system.InvalidateCpuInstructionCaches();
229} 249}
@@ -237,7 +257,7 @@ void Process::Acquire(Thread* thread) {
237 ASSERT_MSG(!ShouldWait(thread), "Object unavailable!"); 257 ASSERT_MSG(!ShouldWait(thread), "Object unavailable!");
238} 258}
239 259
240bool Process::ShouldWait(Thread* thread) const { 260bool Process::ShouldWait(const Thread* thread) const {
241 return !is_signaled; 261 return !is_signaled;
242} 262}
243 263
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index a0217d3d8..f060f2a3b 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -7,6 +7,7 @@
7#include <array> 7#include <array>
8#include <bitset> 8#include <bitset>
9#include <cstddef> 9#include <cstddef>
10#include <list>
10#include <string> 11#include <string>
11#include <vector> 12#include <vector>
12#include <boost/container/static_vector.hpp> 13#include <boost/container/static_vector.hpp>
@@ -186,6 +187,22 @@ public:
186 return random_entropy.at(index); 187 return random_entropy.at(index);
187 } 188 }
188 189
190 /// Retrieves the total physical memory used by this process in bytes.
191 u64 GetTotalPhysicalMemoryUsed() const;
192
193 /// Gets the list of all threads created with this process as their owner.
194 const std::list<const Thread*>& GetThreadList() const {
195 return thread_list;
196 }
197
198 /// Registers a thread as being created under this process,
199 /// adding it to this process' thread list.
200 void RegisterThread(const Thread* thread);
201
202 /// Unregisters a thread from this process, removing it
203 /// from this process' thread list.
204 void UnregisterThread(const Thread* thread);
205
189 /// Clears the signaled state of the process if and only if it's signaled. 206 /// Clears the signaled state of the process if and only if it's signaled.
190 /// 207 ///
191 /// @pre The process must not be already terminated. If this is called on a 208 /// @pre The process must not be already terminated. If this is called on a
@@ -210,7 +227,7 @@ public:
210 /** 227 /**
211 * Applies address space changes and launches the process main thread. 228 * Applies address space changes and launches the process main thread.
212 */ 229 */
213 void Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size); 230 void Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size);
214 231
215 /** 232 /**
216 * Prepares a process for termination by stopping all of its threads 233 * Prepares a process for termination by stopping all of its threads
@@ -234,7 +251,7 @@ private:
234 ~Process() override; 251 ~Process() override;
235 252
236 /// Checks if the specified thread should wait until this process is available. 253 /// Checks if the specified thread should wait until this process is available.
237 bool ShouldWait(Thread* thread) const override; 254 bool ShouldWait(const Thread* thread) const override;
238 255
239 /// Acquires/locks this process for the specified thread if it's available. 256 /// Acquires/locks this process for the specified thread if it's available.
240 void Acquire(Thread* thread) override; 257 void Acquire(Thread* thread) override;
@@ -247,6 +264,12 @@ private:
247 /// Memory manager for this process. 264 /// Memory manager for this process.
248 Kernel::VMManager vm_manager; 265 Kernel::VMManager vm_manager;
249 266
267 /// Size of the main thread's stack in bytes.
268 u64 main_thread_stack_size = 0;
269
270 /// Size of the loaded code memory in bytes.
271 u64 code_memory_size = 0;
272
250 /// Current status of the process 273 /// Current status of the process
251 ProcessStatus status; 274 ProcessStatus status;
252 275
@@ -299,6 +322,9 @@ private:
299 /// Random values for svcGetInfo RandomEntropy 322 /// Random values for svcGetInfo RandomEntropy
300 std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy; 323 std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy;
301 324
325 /// List of threads that are running with this process as their owner.
326 std::list<const Thread*> thread_list;
327
302 /// System context 328 /// System context
303 Core::System& system; 329 Core::System& system;
304 330
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 0e5083f70..c2b798a4e 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -14,7 +14,7 @@ namespace Kernel {
14ReadableEvent::ReadableEvent(KernelCore& kernel) : WaitObject{kernel} {} 14ReadableEvent::ReadableEvent(KernelCore& kernel) : WaitObject{kernel} {}
15ReadableEvent::~ReadableEvent() = default; 15ReadableEvent::~ReadableEvent() = default;
16 16
17bool ReadableEvent::ShouldWait(Thread* thread) const { 17bool ReadableEvent::ShouldWait(const Thread* thread) const {
18 return !signaled; 18 return !signaled;
19} 19}
20 20
diff --git a/src/core/hle/kernel/readable_event.h b/src/core/hle/kernel/readable_event.h
index 77a9c362c..2eb9dcbb7 100644
--- a/src/core/hle/kernel/readable_event.h
+++ b/src/core/hle/kernel/readable_event.h
@@ -36,7 +36,7 @@ public:
36 return HANDLE_TYPE; 36 return HANDLE_TYPE;
37 } 37 }
38 38
39 bool ShouldWait(Thread* thread) const override; 39 bool ShouldWait(const Thread* thread) const override;
40 void Acquire(Thread* thread) override; 40 void Acquire(Thread* thread) override;
41 41
42 /// Unconditionally clears the readable event's state. 42 /// Unconditionally clears the readable event's state.
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index 2f9695005..173f69915 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -16,11 +16,8 @@ constexpr std::size_t ResourceTypeToIndex(ResourceType type) {
16ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {} 16ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {}
17ResourceLimit::~ResourceLimit() = default; 17ResourceLimit::~ResourceLimit() = default;
18 18
19SharedPtr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel, std::string name) { 19SharedPtr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
20 SharedPtr<ResourceLimit> resource_limit(new ResourceLimit(kernel)); 20 return new ResourceLimit(kernel);
21
22 resource_limit->name = std::move(name);
23 return resource_limit;
24} 21}
25 22
26s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const { 23s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const {
diff --git a/src/core/hle/kernel/resource_limit.h b/src/core/hle/kernel/resource_limit.h
index 59dc11c22..70e09858a 100644
--- a/src/core/hle/kernel/resource_limit.h
+++ b/src/core/hle/kernel/resource_limit.h
@@ -31,16 +31,14 @@ constexpr bool IsValidResourceType(ResourceType type) {
31 31
32class ResourceLimit final : public Object { 32class ResourceLimit final : public Object {
33public: 33public:
34 /** 34 /// Creates a resource limit object.
35 * Creates a resource limit object. 35 static SharedPtr<ResourceLimit> Create(KernelCore& kernel);
36 */
37 static SharedPtr<ResourceLimit> Create(KernelCore& kernel, std::string name = "Unknown");
38 36
39 std::string GetTypeName() const override { 37 std::string GetTypeName() const override {
40 return "ResourceLimit"; 38 return "ResourceLimit";
41 } 39 }
42 std::string GetName() const override { 40 std::string GetName() const override {
43 return name; 41 return GetTypeName();
44 } 42 }
45 43
46 static const HandleType HANDLE_TYPE = HandleType::ResourceLimit; 44 static const HandleType HANDLE_TYPE = HandleType::ResourceLimit;
@@ -95,9 +93,6 @@ private:
95 ResourceArray limits{}; 93 ResourceArray limits{};
96 /// Current resource limit values. 94 /// Current resource limit values.
97 ResourceArray values{}; 95 ResourceArray values{};
98
99 /// Name of resource limit object.
100 std::string name;
101}; 96};
102 97
103} // namespace Kernel 98} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 6d0f13ecf..ac501bf7f 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -29,7 +29,7 @@ Scheduler::~Scheduler() {
29} 29}
30 30
31bool Scheduler::HaveReadyThreads() const { 31bool Scheduler::HaveReadyThreads() const {
32 std::lock_guard<std::mutex> lock(scheduler_mutex); 32 std::lock_guard lock{scheduler_mutex};
33 return !ready_queue.empty(); 33 return !ready_queue.empty();
34} 34}
35 35
@@ -132,7 +132,7 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
132} 132}
133 133
134void Scheduler::Reschedule() { 134void Scheduler::Reschedule() {
135 std::lock_guard<std::mutex> lock(scheduler_mutex); 135 std::lock_guard lock{scheduler_mutex};
136 136
137 Thread* cur = GetCurrentThread(); 137 Thread* cur = GetCurrentThread();
138 Thread* next = PopNextReadyThread(); 138 Thread* next = PopNextReadyThread();
@@ -148,35 +148,35 @@ void Scheduler::Reschedule() {
148 SwitchContext(next); 148 SwitchContext(next);
149} 149}
150 150
151void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) { 151void Scheduler::AddThread(SharedPtr<Thread> thread) {
152 std::lock_guard<std::mutex> lock(scheduler_mutex); 152 std::lock_guard lock{scheduler_mutex};
153 153
154 thread_list.push_back(std::move(thread)); 154 thread_list.push_back(std::move(thread));
155} 155}
156 156
157void Scheduler::RemoveThread(Thread* thread) { 157void Scheduler::RemoveThread(Thread* thread) {
158 std::lock_guard<std::mutex> lock(scheduler_mutex); 158 std::lock_guard lock{scheduler_mutex};
159 159
160 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), 160 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
161 thread_list.end()); 161 thread_list.end());
162} 162}
163 163
164void Scheduler::ScheduleThread(Thread* thread, u32 priority) { 164void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
165 std::lock_guard<std::mutex> lock(scheduler_mutex); 165 std::lock_guard lock{scheduler_mutex};
166 166
167 ASSERT(thread->GetStatus() == ThreadStatus::Ready); 167 ASSERT(thread->GetStatus() == ThreadStatus::Ready);
168 ready_queue.add(thread, priority); 168 ready_queue.add(thread, priority);
169} 169}
170 170
171void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { 171void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
172 std::lock_guard<std::mutex> lock(scheduler_mutex); 172 std::lock_guard lock{scheduler_mutex};
173 173
174 ASSERT(thread->GetStatus() == ThreadStatus::Ready); 174 ASSERT(thread->GetStatus() == ThreadStatus::Ready);
175 ready_queue.remove(thread, priority); 175 ready_queue.remove(thread, priority);
176} 176}
177 177
178void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { 178void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
179 std::lock_guard<std::mutex> lock(scheduler_mutex); 179 std::lock_guard lock{scheduler_mutex};
180 if (thread->GetPriority() == priority) { 180 if (thread->GetPriority() == priority) {
181 return; 181 return;
182 } 182 }
@@ -187,7 +187,7 @@ void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
187} 187}
188 188
189Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { 189Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
190 std::lock_guard<std::mutex> lock(scheduler_mutex); 190 std::lock_guard lock{scheduler_mutex};
191 191
192 const u32 mask = 1U << core; 192 const u32 mask = 1U << core;
193 for (auto* thread : ready_queue) { 193 for (auto* thread : ready_queue) {
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 44baeb713..b29bf7be8 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -38,7 +38,7 @@ public:
38 u64 GetLastContextSwitchTicks() const; 38 u64 GetLastContextSwitchTicks() const;
39 39
40 /// Adds a new thread to the scheduler 40 /// Adds a new thread to the scheduler
41 void AddThread(SharedPtr<Thread> thread, u32 priority); 41 void AddThread(SharedPtr<Thread> thread);
42 42
43 /// Removes a thread from the scheduler 43 /// Removes a thread from the scheduler
44 void RemoveThread(Thread* thread); 44 void RemoveThread(Thread* thread);
diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp
index 0e1515c89..708fdf9e1 100644
--- a/src/core/hle/kernel/server_port.cpp
+++ b/src/core/hle/kernel/server_port.cpp
@@ -30,7 +30,7 @@ void ServerPort::AppendPendingSession(SharedPtr<ServerSession> pending_session)
30 pending_sessions.push_back(std::move(pending_session)); 30 pending_sessions.push_back(std::move(pending_session));
31} 31}
32 32
33bool ServerPort::ShouldWait(Thread* thread) const { 33bool ServerPort::ShouldWait(const Thread* thread) const {
34 // If there are no pending sessions, we wait until a new one is added. 34 // If there are no pending sessions, we wait until a new one is added.
35 return pending_sessions.empty(); 35 return pending_sessions.empty();
36} 36}
diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h
index 9bc667cf2..76293cb8b 100644
--- a/src/core/hle/kernel/server_port.h
+++ b/src/core/hle/kernel/server_port.h
@@ -75,7 +75,7 @@ public:
75 /// waiting to be accepted by this port. 75 /// waiting to be accepted by this port.
76 void AppendPendingSession(SharedPtr<ServerSession> pending_session); 76 void AppendPendingSession(SharedPtr<ServerSession> pending_session);
77 77
78 bool ShouldWait(Thread* thread) const override; 78 bool ShouldWait(const Thread* thread) const override;
79 void Acquire(Thread* thread) override; 79 void Acquire(Thread* thread) override;
80 80
81private: 81private:
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 4d8a337a7..40cec143e 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -46,7 +46,7 @@ ResultVal<SharedPtr<ServerSession>> ServerSession::Create(KernelCore& kernel, st
46 return MakeResult(std::move(server_session)); 46 return MakeResult(std::move(server_session));
47} 47}
48 48
49bool ServerSession::ShouldWait(Thread* thread) const { 49bool ServerSession::ShouldWait(const Thread* thread) const {
50 // Closed sessions should never wait, an error will be returned from svcReplyAndReceive. 50 // Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
51 if (parent->client == nullptr) 51 if (parent->client == nullptr)
52 return false; 52 return false;
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h
index aea4ccfeb..79b84bade 100644
--- a/src/core/hle/kernel/server_session.h
+++ b/src/core/hle/kernel/server_session.h
@@ -82,7 +82,7 @@ public:
82 */ 82 */
83 ResultCode HandleSyncRequest(SharedPtr<Thread> thread); 83 ResultCode HandleSyncRequest(SharedPtr<Thread> thread);
84 84
85 bool ShouldWait(Thread* thread) const override; 85 bool ShouldWait(const Thread* thread) const override;
86 86
87 void Acquire(Thread* thread) override; 87 void Acquire(Thread* thread) override;
88 88
diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp
index 62861da36..f15c5ee36 100644
--- a/src/core/hle/kernel/shared_memory.cpp
+++ b/src/core/hle/kernel/shared_memory.cpp
@@ -9,7 +9,6 @@
9#include "core/hle/kernel/errors.h" 9#include "core/hle/kernel/errors.h"
10#include "core/hle/kernel/kernel.h" 10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/shared_memory.h" 11#include "core/hle/kernel/shared_memory.h"
12#include "core/memory.h"
13 12
14namespace Kernel { 13namespace Kernel {
15 14
@@ -119,7 +118,15 @@ ResultCode SharedMemory::Map(Process& target_process, VAddr address, MemoryPermi
119 ConvertPermissions(permissions)); 118 ConvertPermissions(permissions));
120} 119}
121 120
122ResultCode SharedMemory::Unmap(Process& target_process, VAddr address) { 121ResultCode SharedMemory::Unmap(Process& target_process, VAddr address, u64 unmap_size) {
122 if (unmap_size != size) {
123 LOG_ERROR(Kernel,
124 "Invalid size passed to Unmap. Size must be equal to the size of the "
125 "memory managed. Shared memory size=0x{:016X}, Unmap size=0x{:016X}",
126 size, unmap_size);
127 return ERR_INVALID_SIZE;
128 }
129
123 // TODO(Subv): Verify what happens if the application tries to unmap an address that is not 130 // TODO(Subv): Verify what happens if the application tries to unmap an address that is not
124 // mapped to a SharedMemory. 131 // mapped to a SharedMemory.
125 return target_process.VMManager().UnmapRange(address, size); 132 return target_process.VMManager().UnmapRange(address, size);
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index dab2a6bea..37e18c443 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -104,11 +104,17 @@ public:
104 104
105 /** 105 /**
106 * Unmaps a shared memory block from the specified address in system memory 106 * Unmaps a shared memory block from the specified address in system memory
107 *
107 * @param target_process Process from which to unmap the memory block. 108 * @param target_process Process from which to unmap the memory block.
108 * @param address Address in system memory where the shared memory block is mapped 109 * @param address Address in system memory where the shared memory block is mapped.
110 * @param unmap_size The amount of bytes to unmap from this shared memory instance.
111 *
109 * @return Result code of the unmap operation 112 * @return Result code of the unmap operation
113 *
114 * @pre The given size to unmap must be the same size as the amount of memory managed by
115 * the SharedMemory instance itself, otherwise ERR_INVALID_SIZE will be returned.
110 */ 116 */
111 ResultCode Unmap(Process& target_process, VAddr address); 117 ResultCode Unmap(Process& target_process, VAddr address, u64 unmap_size);
112 118
113 /** 119 /**
114 * Gets a pointer to the shared memory block 120 * Gets a pointer to the shared memory block
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 11796e5e5..ab10db3df 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -709,7 +709,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
709 HeapRegionBaseAddr = 4, 709 HeapRegionBaseAddr = 4,
710 HeapRegionSize = 5, 710 HeapRegionSize = 5,
711 TotalMemoryUsage = 6, 711 TotalMemoryUsage = 6,
712 TotalHeapUsage = 7, 712 TotalPhysicalMemoryUsed = 7,
713 IsCurrentProcessBeingDebugged = 8, 713 IsCurrentProcessBeingDebugged = 8,
714 RegisterResourceLimit = 9, 714 RegisterResourceLimit = 9,
715 IdleTickCount = 10, 715 IdleTickCount = 10,
@@ -745,7 +745,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
745 case GetInfoType::NewMapRegionBaseAddr: 745 case GetInfoType::NewMapRegionBaseAddr:
746 case GetInfoType::NewMapRegionSize: 746 case GetInfoType::NewMapRegionSize:
747 case GetInfoType::TotalMemoryUsage: 747 case GetInfoType::TotalMemoryUsage:
748 case GetInfoType::TotalHeapUsage: 748 case GetInfoType::TotalPhysicalMemoryUsed:
749 case GetInfoType::IsVirtualAddressMemoryEnabled: 749 case GetInfoType::IsVirtualAddressMemoryEnabled:
750 case GetInfoType::PersonalMmHeapUsage: 750 case GetInfoType::PersonalMmHeapUsage:
751 case GetInfoType::TitleId: 751 case GetInfoType::TitleId:
@@ -805,8 +805,8 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
805 *result = process->VMManager().GetTotalMemoryUsage(); 805 *result = process->VMManager().GetTotalMemoryUsage();
806 return RESULT_SUCCESS; 806 return RESULT_SUCCESS;
807 807
808 case GetInfoType::TotalHeapUsage: 808 case GetInfoType::TotalPhysicalMemoryUsed:
809 *result = process->VMManager().GetCurrentHeapSize(); 809 *result = process->GetTotalPhysicalMemoryUsed();
810 return RESULT_SUCCESS; 810 return RESULT_SUCCESS;
811 811
812 case GetInfoType::IsVirtualAddressMemoryEnabled: 812 case GetInfoType::IsVirtualAddressMemoryEnabled:
@@ -1140,7 +1140,7 @@ static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64
1140 return ERR_INVALID_MEMORY_RANGE; 1140 return ERR_INVALID_MEMORY_RANGE;
1141 } 1141 }
1142 1142
1143 return shared_memory->Unmap(*current_process, addr); 1143 return shared_memory->Unmap(*current_process, addr, size);
1144} 1144}
1145 1145
1146static ResultCode QueryProcessMemory(VAddr memory_info_address, VAddr page_info_address, 1146static ResultCode QueryProcessMemory(VAddr memory_info_address, VAddr page_info_address,
@@ -1983,6 +1983,83 @@ static ResultCode SetResourceLimitLimitValue(Handle resource_limit, u32 resource
1983 return RESULT_SUCCESS; 1983 return RESULT_SUCCESS;
1984} 1984}
1985 1985
1986static ResultCode GetProcessList(u32* out_num_processes, VAddr out_process_ids,
1987 u32 out_process_ids_size) {
1988 LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
1989 out_process_ids, out_process_ids_size);
1990
1991 // If the supplied size is negative or greater than INT32_MAX / sizeof(u64), bail.
1992 if ((out_process_ids_size & 0xF0000000) != 0) {
1993 LOG_ERROR(Kernel_SVC,
1994 "Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}",
1995 out_process_ids_size);
1996 return ERR_OUT_OF_RANGE;
1997 }
1998
1999 const auto& kernel = Core::System::GetInstance().Kernel();
2000 const auto& vm_manager = kernel.CurrentProcess()->VMManager();
2001 const auto total_copy_size = out_process_ids_size * sizeof(u64);
2002
2003 if (out_process_ids_size > 0 &&
2004 !vm_manager.IsWithinAddressSpace(out_process_ids, total_copy_size)) {
2005 LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
2006 out_process_ids, out_process_ids + total_copy_size);
2007 return ERR_INVALID_ADDRESS_STATE;
2008 }
2009
2010 const auto& process_list = kernel.GetProcessList();
2011 const auto num_processes = process_list.size();
2012 const auto copy_amount = std::min(std::size_t{out_process_ids_size}, num_processes);
2013
2014 for (std::size_t i = 0; i < copy_amount; ++i) {
2015 Memory::Write64(out_process_ids, process_list[i]->GetProcessID());
2016 out_process_ids += sizeof(u64);
2017 }
2018
2019 *out_num_processes = static_cast<u32>(num_processes);
2020 return RESULT_SUCCESS;
2021}
2022
2023ResultCode GetThreadList(u32* out_num_threads, VAddr out_thread_ids, u32 out_thread_ids_size,
2024 Handle debug_handle) {
2025 // TODO: Handle this case when debug events are supported.
2026 UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
2027
2028 LOG_DEBUG(Kernel_SVC, "called. out_thread_ids=0x{:016X}, out_thread_ids_size={}",
2029 out_thread_ids, out_thread_ids_size);
2030
2031 // If the size is negative or larger than INT32_MAX / sizeof(u64)
2032 if ((out_thread_ids_size & 0xF0000000) != 0) {
2033 LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}",
2034 out_thread_ids_size);
2035 return ERR_OUT_OF_RANGE;
2036 }
2037
2038 const auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
2039 const auto& vm_manager = current_process->VMManager();
2040 const auto total_copy_size = out_thread_ids_size * sizeof(u64);
2041
2042 if (out_thread_ids_size > 0 &&
2043 !vm_manager.IsWithinAddressSpace(out_thread_ids, total_copy_size)) {
2044 LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
2045 out_thread_ids, out_thread_ids + total_copy_size);
2046 return ERR_INVALID_ADDRESS_STATE;
2047 }
2048
2049 const auto& thread_list = current_process->GetThreadList();
2050 const auto num_threads = thread_list.size();
2051 const auto copy_amount = std::min(std::size_t{out_thread_ids_size}, num_threads);
2052
2053 auto list_iter = thread_list.cbegin();
2054 for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
2055 Memory::Write64(out_thread_ids, (*list_iter)->GetThreadID());
2056 out_thread_ids += sizeof(u64);
2057 }
2058
2059 *out_num_threads = static_cast<u32>(num_threads);
2060 return RESULT_SUCCESS;
2061}
2062
1986namespace { 2063namespace {
1987struct FunctionDef { 2064struct FunctionDef {
1988 using Func = void(); 2065 using Func = void();
@@ -2095,8 +2172,8 @@ static const FunctionDef SVC_Table[] = {
2095 {0x62, nullptr, "TerminateDebugProcess"}, 2172 {0x62, nullptr, "TerminateDebugProcess"},
2096 {0x63, nullptr, "GetDebugEvent"}, 2173 {0x63, nullptr, "GetDebugEvent"},
2097 {0x64, nullptr, "ContinueDebugEvent"}, 2174 {0x64, nullptr, "ContinueDebugEvent"},
2098 {0x65, nullptr, "GetProcessList"}, 2175 {0x65, SvcWrap<GetProcessList>, "GetProcessList"},
2099 {0x66, nullptr, "GetThreadList"}, 2176 {0x66, SvcWrap<GetThreadList>, "GetThreadList"},
2100 {0x67, nullptr, "GetDebugThreadContext"}, 2177 {0x67, nullptr, "GetDebugThreadContext"},
2101 {0x68, nullptr, "SetDebugThreadContext"}, 2178 {0x68, nullptr, "SetDebugThreadContext"},
2102 {0x69, nullptr, "QueryDebugProcessMemory"}, 2179 {0x69, nullptr, "QueryDebugProcessMemory"},
@@ -2138,7 +2215,7 @@ void CallSVC(u32 immediate) {
2138 MICROPROFILE_SCOPE(Kernel_SVC); 2215 MICROPROFILE_SCOPE(Kernel_SVC);
2139 2216
2140 // Lock the global kernel mutex when we enter the kernel HLE. 2217 // Lock the global kernel mutex when we enter the kernel HLE.
2141 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 2218 std::lock_guard lock{HLE::g_hle_lock};
2142 2219
2143 const FunctionDef* info = GetSVCInfo(immediate); 2220 const FunctionDef* info = GetSVCInfo(immediate);
2144 if (info) { 2221 if (info) {
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 2a2c2c5ea..b3733680f 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -78,6 +78,14 @@ void SvcWrap() {
78 FuncReturn(retval); 78 FuncReturn(retval);
79} 79}
80 80
81template <ResultCode func(u32*, u64, u32)>
82void SvcWrap() {
83 u32 param_1 = 0;
84 const u32 retval = func(&param_1, Param(1), static_cast<u32>(Param(2))).raw;
85 Core::CurrentArmInterface().SetReg(1, param_1);
86 FuncReturn(retval);
87}
88
81template <ResultCode func(u64*, u32)> 89template <ResultCode func(u64*, u32)>
82void SvcWrap() { 90void SvcWrap() {
83 u64 param_1 = 0; 91 u64 param_1 = 0;
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index e5853c46f..1b891f632 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -28,7 +28,7 @@
28 28
29namespace Kernel { 29namespace Kernel {
30 30
31bool Thread::ShouldWait(Thread* thread) const { 31bool Thread::ShouldWait(const Thread* thread) const {
32 return status != ThreadStatus::Dead; 32 return status != ThreadStatus::Dead;
33} 33}
34 34
@@ -62,6 +62,8 @@ void Thread::Stop() {
62 } 62 }
63 wait_objects.clear(); 63 wait_objects.clear();
64 64
65 owner_process->UnregisterThread(this);
66
65 // Mark the TLS slot in the thread's page as free. 67 // Mark the TLS slot in the thread's page as free.
66 owner_process->FreeTLSSlot(tls_address); 68 owner_process->FreeTLSSlot(tls_address);
67} 69}
@@ -199,9 +201,11 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
199 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); 201 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
200 thread->owner_process = &owner_process; 202 thread->owner_process = &owner_process;
201 thread->scheduler = &system.Scheduler(processor_id); 203 thread->scheduler = &system.Scheduler(processor_id);
202 thread->scheduler->AddThread(thread, priority); 204 thread->scheduler->AddThread(thread);
203 thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread); 205 thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread);
204 206
207 thread->owner_process->RegisterThread(thread.get());
208
205 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used 209 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
206 // to initialize the context 210 // to initialize the context
207 ResetThreadContext(thread->context, stack_top, entry_point, arg); 211 ResetThreadContext(thread->context, stack_top, entry_point, arg);
@@ -229,16 +233,16 @@ void Thread::SetWaitSynchronizationOutput(s32 output) {
229 context.cpu_registers[1] = output; 233 context.cpu_registers[1] = output;
230} 234}
231 235
232s32 Thread::GetWaitObjectIndex(WaitObject* object) const { 236s32 Thread::GetWaitObjectIndex(const WaitObject* object) const {
233 ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything"); 237 ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything");
234 auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object); 238 const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object);
235 return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1); 239 return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1);
236} 240}
237 241
238VAddr Thread::GetCommandBufferAddress() const { 242VAddr Thread::GetCommandBufferAddress() const {
239 // Offset from the start of TLS at which the IPC command buffer begins. 243 // Offset from the start of TLS at which the IPC command buffer begins.
240 static constexpr int CommandHeaderOffset = 0x80; 244 constexpr u64 command_header_offset = 0x80;
241 return GetTLSAddress() + CommandHeaderOffset; 245 return GetTLSAddress() + command_header_offset;
242} 246}
243 247
244void Thread::SetStatus(ThreadStatus new_status) { 248void Thread::SetStatus(ThreadStatus new_status) {
@@ -352,7 +356,7 @@ void Thread::ChangeScheduler() {
352 if (*new_processor_id != processor_id) { 356 if (*new_processor_id != processor_id) {
353 // Remove thread from previous core's scheduler 357 // Remove thread from previous core's scheduler
354 scheduler->RemoveThread(this); 358 scheduler->RemoveThread(this);
355 next_scheduler.AddThread(this, current_priority); 359 next_scheduler.AddThread(this);
356 } 360 }
357 361
358 processor_id = *new_processor_id; 362 processor_id = *new_processor_id;
@@ -367,7 +371,7 @@ void Thread::ChangeScheduler() {
367 system.CpuCore(processor_id).PrepareReschedule(); 371 system.CpuCore(processor_id).PrepareReschedule();
368} 372}
369 373
370bool Thread::AllWaitObjectsReady() { 374bool Thread::AllWaitObjectsReady() const {
371 return std::none_of( 375 return std::none_of(
372 wait_objects.begin(), wait_objects.end(), 376 wait_objects.begin(), wait_objects.end(),
373 [this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); }); 377 [this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); });
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 9c684758c..73e5d1bb4 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -111,7 +111,7 @@ public:
111 return HANDLE_TYPE; 111 return HANDLE_TYPE;
112 } 112 }
113 113
114 bool ShouldWait(Thread* thread) const override; 114 bool ShouldWait(const Thread* thread) const override;
115 void Acquire(Thread* thread) override; 115 void Acquire(Thread* thread) override;
116 116
117 /** 117 /**
@@ -205,7 +205,7 @@ public:
205 * object in the list. 205 * object in the list.
206 * @param object Object to query the index of. 206 * @param object Object to query the index of.
207 */ 207 */
208 s32 GetWaitObjectIndex(WaitObject* object) const; 208 s32 GetWaitObjectIndex(const WaitObject* object) const;
209 209
210 /** 210 /**
211 * Stops a thread, invalidating it from further use 211 * Stops a thread, invalidating it from further use
@@ -299,7 +299,7 @@ public:
299 } 299 }
300 300
301 /// Determines whether all the objects this thread is waiting on are ready. 301 /// Determines whether all the objects this thread is waiting on are ready.
302 bool AllWaitObjectsReady(); 302 bool AllWaitObjectsReady() const;
303 303
304 const MutexWaitingThreads& GetMutexWaitingThreads() const { 304 const MutexWaitingThreads& GetMutexWaitingThreads() const {
305 return wait_mutex_threads; 305 return wait_mutex_threads;
diff --git a/src/core/hle/kernel/wait_object.h b/src/core/hle/kernel/wait_object.h
index 5987fb971..04464a51a 100644
--- a/src/core/hle/kernel/wait_object.h
+++ b/src/core/hle/kernel/wait_object.h
@@ -24,7 +24,7 @@ public:
24 * @param thread The thread about which we're deciding. 24 * @param thread The thread about which we're deciding.
25 * @return True if the current thread should wait due to this object being unavailable 25 * @return True if the current thread should wait due to this object being unavailable
26 */ 26 */
27 virtual bool ShouldWait(Thread* thread) const = 0; 27 virtual bool ShouldWait(const Thread* thread) const = 0;
28 28
29 /// Acquire/lock the object for the specified thread if it is available 29 /// Acquire/lock the object for the specified thread if it is available
30 virtual void Acquire(Thread* thread) = 0; 30 virtual void Acquire(Thread* thread) = 0;
diff --git a/src/core/hle/service/fatal/fatal.cpp b/src/core/hle/service/fatal/fatal.cpp
index 770590d0b..2c229bcad 100644
--- a/src/core/hle/service/fatal/fatal.cpp
+++ b/src/core/hle/service/fatal/fatal.cpp
@@ -25,21 +25,34 @@ Module::Interface::Interface(std::shared_ptr<Module> module, const char* name)
25Module::Interface::~Interface() = default; 25Module::Interface::~Interface() = default;
26 26
27struct FatalInfo { 27struct FatalInfo {
28 std::array<u64_le, 31> registers{}; // TODO(ogniK): See if this actually is registers or 28 enum class Architecture : s32 {
29 // not(find a game which has non zero valeus) 29 AArch64,
30 u64_le unk0{}; 30 AArch32,
31 u64_le unk1{}; 31 };
32 u64_le unk2{}; 32
33 u64_le unk3{}; 33 const char* ArchAsString() const {
34 u64_le unk4{}; 34 return arch == Architecture::AArch64 ? "AArch64" : "AArch32";
35 u64_le unk5{}; 35 }
36 u64_le unk6{}; 36
37 std::array<u64_le, 31> registers{};
38 u64_le sp{};
39 u64_le pc{};
40 u64_le pstate{};
41 u64_le afsr0{};
42 u64_le afsr1{};
43 u64_le esr{};
44 u64_le far{};
37 45
38 std::array<u64_le, 32> backtrace{}; 46 std::array<u64_le, 32> backtrace{};
39 u64_le unk7{}; 47 u64_le program_entry_point{};
40 u64_le unk8{}; 48
49 // Bit flags that indicate which registers have been set with values
50 // for this context. The service itself uses these to determine which
51 // registers to specifically print out.
52 u64_le set_flags{};
53
41 u32_le backtrace_size{}; 54 u32_le backtrace_size{};
42 u32_le unk9{}; 55 Architecture arch{};
43 u32_le unk10{}; // TODO(ogniK): Is this even used or is it just padding? 56 u32_le unk10{}; // TODO(ogniK): Is this even used or is it just padding?
44}; 57};
45static_assert(sizeof(FatalInfo) == 0x250, "FatalInfo is an invalid size"); 58static_assert(sizeof(FatalInfo) == 0x250, "FatalInfo is an invalid size");
@@ -52,36 +65,36 @@ enum class FatalType : u32 {
52 65
53static void GenerateErrorReport(ResultCode error_code, const FatalInfo& info) { 66static void GenerateErrorReport(ResultCode error_code, const FatalInfo& info) {
54 const auto title_id = Core::CurrentProcess()->GetTitleID(); 67 const auto title_id = Core::CurrentProcess()->GetTitleID();
55 std::string crash_report = 68 std::string crash_report = fmt::format(
56 fmt::format("Yuzu {}-{} crash report\n" 69 "Yuzu {}-{} crash report\n"
57 "Title ID: {:016x}\n" 70 "Title ID: {:016x}\n"
58 "Result: 0x{:X} ({:04}-{:04d})\n" 71 "Result: 0x{:X} ({:04}-{:04d})\n"
59 "\n", 72 "Set flags: 0x{:16X}\n"
60 Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw, 73 "Program entry point: 0x{:16X}\n"
61 2000 + static_cast<u32>(error_code.module.Value()), 74 "\n",
62 static_cast<u32>(error_code.description.Value()), info.unk8, info.unk7); 75 Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw,
76 2000 + static_cast<u32>(error_code.module.Value()),
77 static_cast<u32>(error_code.description.Value()), info.set_flags, info.program_entry_point);
63 if (info.backtrace_size != 0x0) { 78 if (info.backtrace_size != 0x0) {
64 crash_report += "Registers:\n"; 79 crash_report += "Registers:\n";
65 // TODO(ogniK): This is just a guess, find a game which actually has non zero values
66 for (size_t i = 0; i < info.registers.size(); i++) { 80 for (size_t i = 0; i < info.registers.size(); i++) {
67 crash_report += 81 crash_report +=
68 fmt::format(" X[{:02d}]: {:016x}\n", i, info.registers[i]); 82 fmt::format(" X[{:02d}]: {:016x}\n", i, info.registers[i]);
69 } 83 }
70 crash_report += fmt::format(" Unknown 0: {:016x}\n", info.unk0); 84 crash_report += fmt::format(" SP: {:016x}\n", info.sp);
71 crash_report += fmt::format(" Unknown 1: {:016x}\n", info.unk1); 85 crash_report += fmt::format(" PC: {:016x}\n", info.pc);
72 crash_report += fmt::format(" Unknown 2: {:016x}\n", info.unk2); 86 crash_report += fmt::format(" PSTATE: {:016x}\n", info.pstate);
73 crash_report += fmt::format(" Unknown 3: {:016x}\n", info.unk3); 87 crash_report += fmt::format(" AFSR0: {:016x}\n", info.afsr0);
74 crash_report += fmt::format(" Unknown 4: {:016x}\n", info.unk4); 88 crash_report += fmt::format(" AFSR1: {:016x}\n", info.afsr1);
75 crash_report += fmt::format(" Unknown 5: {:016x}\n", info.unk5); 89 crash_report += fmt::format(" ESR: {:016x}\n", info.esr);
76 crash_report += fmt::format(" Unknown 6: {:016x}\n", info.unk6); 90 crash_report += fmt::format(" FAR: {:016x}\n", info.far);
77 crash_report += "\nBacktrace:\n"; 91 crash_report += "\nBacktrace:\n";
78 for (size_t i = 0; i < info.backtrace_size; i++) { 92 for (size_t i = 0; i < info.backtrace_size; i++) {
79 crash_report += 93 crash_report +=
80 fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]); 94 fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]);
81 } 95 }
82 crash_report += fmt::format("\nUnknown 7: 0x{:016x}\n", info.unk7); 96
83 crash_report += fmt::format("Unknown 8: 0x{:016x}\n", info.unk8); 97 crash_report += fmt::format("Architecture: {}\n", info.ArchAsString());
84 crash_report += fmt::format("Unknown 9: 0x{:016x}\n", info.unk9);
85 crash_report += fmt::format("Unknown 10: 0x{:016x}\n", info.unk10); 98 crash_report += fmt::format("Unknown 10: 0x{:016x}\n", info.unk10);
86 } 99 }
87 100
@@ -125,13 +138,13 @@ static void ThrowFatalError(ResultCode error_code, FatalType fatal_type, const F
125 case FatalType::ErrorReport: 138 case FatalType::ErrorReport:
126 GenerateErrorReport(error_code, info); 139 GenerateErrorReport(error_code, info);
127 break; 140 break;
128 }; 141 }
129} 142}
130 143
131void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) { 144void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) {
132 LOG_ERROR(Service_Fatal, "called"); 145 LOG_ERROR(Service_Fatal, "called");
133 IPC::RequestParser rp{ctx}; 146 IPC::RequestParser rp{ctx};
134 auto error_code = rp.Pop<ResultCode>(); 147 const auto error_code = rp.Pop<ResultCode>();
135 148
136 ThrowFatalError(error_code, FatalType::ErrorScreen, {}); 149 ThrowFatalError(error_code, FatalType::ErrorScreen, {});
137 IPC::ResponseBuilder rb{ctx, 2}; 150 IPC::ResponseBuilder rb{ctx, 2};
@@ -141,8 +154,8 @@ void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) {
141void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) { 154void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) {
142 LOG_ERROR(Service_Fatal, "called"); 155 LOG_ERROR(Service_Fatal, "called");
143 IPC::RequestParser rp(ctx); 156 IPC::RequestParser rp(ctx);
144 auto error_code = rp.Pop<ResultCode>(); 157 const auto error_code = rp.Pop<ResultCode>();
145 auto fatal_type = rp.PopEnum<FatalType>(); 158 const auto fatal_type = rp.PopEnum<FatalType>();
146 159
147 ThrowFatalError(error_code, fatal_type, {}); // No info is passed with ThrowFatalWithPolicy 160 ThrowFatalError(error_code, fatal_type, {}); // No info is passed with ThrowFatalWithPolicy
148 IPC::ResponseBuilder rb{ctx, 2}; 161 IPC::ResponseBuilder rb{ctx, 2};
@@ -152,9 +165,9 @@ void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) {
152void Module::Interface::ThrowFatalWithCpuContext(Kernel::HLERequestContext& ctx) { 165void Module::Interface::ThrowFatalWithCpuContext(Kernel::HLERequestContext& ctx) {
153 LOG_ERROR(Service_Fatal, "called"); 166 LOG_ERROR(Service_Fatal, "called");
154 IPC::RequestParser rp(ctx); 167 IPC::RequestParser rp(ctx);
155 auto error_code = rp.Pop<ResultCode>(); 168 const auto error_code = rp.Pop<ResultCode>();
156 auto fatal_type = rp.PopEnum<FatalType>(); 169 const auto fatal_type = rp.PopEnum<FatalType>();
157 auto fatal_info = ctx.ReadBuffer(); 170 const auto fatal_info = ctx.ReadBuffer();
158 FatalInfo info{}; 171 FatalInfo info{};
159 172
160 ASSERT_MSG(fatal_info.size() == sizeof(FatalInfo), "Invalid fatal info buffer size!"); 173 ASSERT_MSG(fatal_info.size() == sizeof(FatalInfo), "Invalid fatal info buffer size!");
diff --git a/src/core/hle/service/nfc/nfc.cpp b/src/core/hle/service/nfc/nfc.cpp
index 5c62d42ba..ca88bf97f 100644
--- a/src/core/hle/service/nfc/nfc.cpp
+++ b/src/core/hle/service/nfc/nfc.cpp
@@ -150,7 +150,7 @@ private:
150 150
151 IPC::ResponseBuilder rb{ctx, 3}; 151 IPC::ResponseBuilder rb{ctx, 3};
152 rb.Push(RESULT_SUCCESS); 152 rb.Push(RESULT_SUCCESS);
153 rb.PushRaw<u8>(Settings::values.enable_nfc); 153 rb.PushRaw<u8>(true);
154 } 154 }
155 155
156 void GetStateOld(Kernel::HLERequestContext& ctx) { 156 void GetStateOld(Kernel::HLERequestContext& ctx) {
diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp
index 1c4482e47..c6babdd4d 100644
--- a/src/core/hle/service/nfp/nfp.cpp
+++ b/src/core/hle/service/nfp/nfp.cpp
@@ -335,7 +335,7 @@ void Module::Interface::CreateUserInterface(Kernel::HLERequestContext& ctx) {
335} 335}
336 336
337bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) { 337bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) {
338 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 338 std::lock_guard lock{HLE::g_hle_lock};
339 if (buffer.size() < sizeof(AmiiboFile)) { 339 if (buffer.size() < sizeof(AmiiboFile)) {
340 return false; 340 return false;
341 } 341 }
diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp
index 8b1920f22..46ac372f6 100644
--- a/src/core/loader/elf.cpp
+++ b/src/core/loader/elf.cpp
@@ -341,7 +341,7 @@ Kernel::CodeSet ElfReader::LoadInto(VAddr vaddr) {
341 } 341 }
342 342
343 codeset.entrypoint = base_addr + header->e_entry; 343 codeset.entrypoint = base_addr + header->e_entry;
344 codeset.memory = std::make_shared<std::vector<u8>>(std::move(program_image)); 344 codeset.memory = std::move(program_image);
345 345
346 LOG_DEBUG(Loader, "Done loading."); 346 LOG_DEBUG(Loader, "Done loading.");
347 347
diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp
index 5de02a94b..31e4a0c84 100644
--- a/src/core/loader/nro.cpp
+++ b/src/core/loader/nro.cpp
@@ -187,7 +187,7 @@ static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data,
187 program_image.resize(static_cast<u32>(program_image.size()) + bss_size); 187 program_image.resize(static_cast<u32>(program_image.size()) + bss_size);
188 188
189 // Load codeset for current process 189 // Load codeset for current process
190 codeset.memory = std::make_shared<std::vector<u8>>(std::move(program_image)); 190 codeset.memory = std::move(program_image);
191 process.LoadModule(std::move(codeset), load_base); 191 process.LoadModule(std::move(codeset), load_base);
192 192
193 // Register module with GDBStub 193 // Register module with GDBStub
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp
index 714d85a59..babc7e646 100644
--- a/src/core/loader/nso.cpp
+++ b/src/core/loader/nso.cpp
@@ -161,7 +161,7 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process,
161 } 161 }
162 162
163 // Load codeset for current process 163 // Load codeset for current process
164 codeset.memory = std::make_shared<std::vector<u8>>(std::move(program_image)); 164 codeset.memory = std::move(program_image);
165 process.LoadModule(std::move(codeset), load_base); 165 process.LoadModule(std::move(codeset), load_base);
166 166
167 // Register module with GDBStub 167 // Register module with GDBStub
diff --git a/src/core/perf_stats.cpp b/src/core/perf_stats.cpp
index c716a462b..4afd6c8a3 100644
--- a/src/core/perf_stats.cpp
+++ b/src/core/perf_stats.cpp
@@ -18,13 +18,13 @@ using std::chrono::microseconds;
18namespace Core { 18namespace Core {
19 19
20void PerfStats::BeginSystemFrame() { 20void PerfStats::BeginSystemFrame() {
21 std::lock_guard<std::mutex> lock(object_mutex); 21 std::lock_guard lock{object_mutex};
22 22
23 frame_begin = Clock::now(); 23 frame_begin = Clock::now();
24} 24}
25 25
26void PerfStats::EndSystemFrame() { 26void PerfStats::EndSystemFrame() {
27 std::lock_guard<std::mutex> lock(object_mutex); 27 std::lock_guard lock{object_mutex};
28 28
29 auto frame_end = Clock::now(); 29 auto frame_end = Clock::now();
30 accumulated_frametime += frame_end - frame_begin; 30 accumulated_frametime += frame_end - frame_begin;
@@ -35,13 +35,13 @@ void PerfStats::EndSystemFrame() {
35} 35}
36 36
37void PerfStats::EndGameFrame() { 37void PerfStats::EndGameFrame() {
38 std::lock_guard<std::mutex> lock(object_mutex); 38 std::lock_guard lock{object_mutex};
39 39
40 game_frames += 1; 40 game_frames += 1;
41} 41}
42 42
43PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) { 43PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) {
44 std::lock_guard<std::mutex> lock(object_mutex); 44 std::lock_guard lock{object_mutex};
45 45
46 const auto now = Clock::now(); 46 const auto now = Clock::now();
47 // Walltime elapsed since stats were reset 47 // Walltime elapsed since stats were reset
@@ -67,7 +67,7 @@ PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us
67} 67}
68 68
69double PerfStats::GetLastFrameTimeScale() { 69double PerfStats::GetLastFrameTimeScale() {
70 std::lock_guard<std::mutex> lock(object_mutex); 70 std::lock_guard lock{object_mutex};
71 71
72 constexpr double FRAME_LENGTH = 1.0 / 60; 72 constexpr double FRAME_LENGTH = 1.0 / 60;
73 return duration_cast<DoubleSecs>(previous_frame_length).count() / FRAME_LENGTH; 73 return duration_cast<DoubleSecs>(previous_frame_length).count() / FRAME_LENGTH;
diff --git a/src/core/settings.cpp b/src/core/settings.cpp
index 6dd3139cc..6d32ebea3 100644
--- a/src/core/settings.cpp
+++ b/src/core/settings.cpp
@@ -82,7 +82,6 @@ void LogSetting(const std::string& name, const T& value) {
82void LogSettings() { 82void LogSettings() {
83 LOG_INFO(Config, "yuzu Configuration:"); 83 LOG_INFO(Config, "yuzu Configuration:");
84 LogSetting("System_UseDockedMode", Settings::values.use_docked_mode); 84 LogSetting("System_UseDockedMode", Settings::values.use_docked_mode);
85 LogSetting("System_EnableNfc", Settings::values.enable_nfc);
86 LogSetting("System_RngSeed", Settings::values.rng_seed.value_or(0)); 85 LogSetting("System_RngSeed", Settings::values.rng_seed.value_or(0));
87 LogSetting("System_CurrentUser", Settings::values.current_user); 86 LogSetting("System_CurrentUser", Settings::values.current_user);
88 LogSetting("System_LanguageIndex", Settings::values.language_index); 87 LogSetting("System_LanguageIndex", Settings::values.language_index);
diff --git a/src/core/settings.h b/src/core/settings.h
index cdfb2f742..d543eb32f 100644
--- a/src/core/settings.h
+++ b/src/core/settings.h
@@ -349,7 +349,6 @@ struct TouchscreenInput {
349struct Values { 349struct Values {
350 // System 350 // System
351 bool use_docked_mode; 351 bool use_docked_mode;
352 bool enable_nfc;
353 std::optional<u32> rng_seed; 352 std::optional<u32> rng_seed;
354 // Measured in seconds since epoch 353 // Measured in seconds since epoch
355 std::optional<std::chrono::seconds> custom_rtc; 354 std::optional<std::chrono::seconds> custom_rtc;