summaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/CMakeLists.txt16
-rw-r--r--src/core/arm/arm_interface.cpp57
-rw-r--r--src/core/arm/arm_interface.h20
-rw-r--r--src/core/arm/cpu_interrupt_handler.cpp27
-rw-r--r--src/core/arm/cpu_interrupt_handler.h39
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp103
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.h12
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp110
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.h26
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_cp15.cpp81
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_cp15.h126
-rw-r--r--src/core/arm/dynarmic/arm_exclusive_monitor.cpp76
-rw-r--r--src/core/arm/dynarmic/arm_exclusive_monitor.h48
-rw-r--r--src/core/arm/exclusive_monitor.cpp2
-rw-r--r--src/core/arm/exclusive_monitor.h6
-rw-r--r--src/core/arm/unicorn/arm_unicorn.cpp19
-rw-r--r--src/core/arm/unicorn/arm_unicorn.h5
-rw-r--r--src/core/core.cpp130
-rw-r--r--src/core/core.h48
-rw-r--r--src/core/core_manager.cpp67
-rw-r--r--src/core/core_manager.h63
-rw-r--r--src/core/core_timing.cpp254
-rw-r--r--src/core/core_timing.h123
-rw-r--r--src/core/core_timing_util.cpp44
-rw-r--r--src/core/core_timing_util.h18
-rw-r--r--src/core/cpu_manager.cpp368
-rw-r--r--src/core/cpu_manager.h80
-rw-r--r--src/core/crypto/key_manager.cpp12
-rw-r--r--src/core/crypto/key_manager.h13
-rw-r--r--src/core/file_sys/bis_factory.cpp2
-rw-r--r--src/core/file_sys/card_image.cpp4
-rw-r--r--src/core/file_sys/card_image.h2
-rw-r--r--src/core/file_sys/content_archive.cpp5
-rw-r--r--src/core/file_sys/content_archive.h5
-rw-r--r--src/core/file_sys/registered_cache.cpp6
-rw-r--r--src/core/file_sys/registered_cache.h2
-rw-r--r--src/core/file_sys/submission_package.cpp4
-rw-r--r--src/core/file_sys/submission_package.h2
-rw-r--r--src/core/file_sys/system_archive/mii_model.cpp2
-rw-r--r--src/core/file_sys/system_archive/shared_font.cpp2
-rw-r--r--src/core/file_sys/xts_archive.h2
-rw-r--r--src/core/frontend/framebuffer_layout.cpp2
-rw-r--r--src/core/gdbstub/gdbstub.cpp1
-rw-r--r--src/core/hardware_properties.h4
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp212
-rw-r--r--src/core/hle/kernel/address_arbiter.h3
-rw-r--r--src/core/hle/kernel/client_port.cpp2
-rw-r--r--src/core/hle/kernel/errors.h1
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp87
-rw-r--r--src/core/hle/kernel/kernel.cpp255
-rw-r--r--src/core/hle/kernel/kernel.h39
-rw-r--r--src/core/hle/kernel/memory/memory_manager.cpp5
-rw-r--r--src/core/hle/kernel/mutex.cpp118
-rw-r--r--src/core/hle/kernel/mutex.h4
-rw-r--r--src/core/hle/kernel/physical_core.cpp52
-rw-r--r--src/core/hle/kernel/physical_core.h44
-rw-r--r--src/core/hle/kernel/process.cpp31
-rw-r--r--src/core/hle/kernel/readable_event.cpp5
-rw-r--r--src/core/hle/kernel/resource_limit.cpp6
-rw-r--r--src/core/hle/kernel/scheduler.cpp571
-rw-r--r--src/core/hle/kernel/scheduler.h123
-rw-r--r--src/core/hle/kernel/server_session.cpp16
-rw-r--r--src/core/hle/kernel/svc.cpp464
-rw-r--r--src/core/hle/kernel/svc_wrap.h137
-rw-r--r--src/core/hle/kernel/synchronization.cpp137
-rw-r--r--src/core/hle/kernel/synchronization_object.cpp64
-rw-r--r--src/core/hle/kernel/synchronization_object.h18
-rw-r--r--src/core/hle/kernel/thread.cpp424
-rw-r--r--src/core/hle/kernel/thread.h277
-rw-r--r--src/core/hle/kernel/time_manager.cpp23
-rw-r--r--src/core/hle/kernel/time_manager.h4
-rw-r--r--src/core/hle/service/acc/acc.cpp350
-rw-r--r--src/core/hle/service/acc/acc.h1
-rw-r--r--src/core/hle/service/acc/acc_aa.cpp4
-rw-r--r--src/core/hle/service/acc/acc_su.cpp34
-rw-r--r--src/core/hle/service/acc/acc_u0.cpp18
-rw-r--r--src/core/hle/service/acc/acc_u1.cpp29
-rw-r--r--src/core/hle/service/am/am.cpp80
-rw-r--r--src/core/hle/service/am/am.h3
-rw-r--r--src/core/hle/service/am/applets/software_keyboard.cpp6
-rw-r--r--src/core/hle/service/am/spsm.cpp16
-rw-r--r--src/core/hle/service/aoc/aoc_u.cpp1
-rw-r--r--src/core/hle/service/bcat/bcat.cpp2
-rw-r--r--src/core/hle/service/bcat/module.cpp3
-rw-r--r--src/core/hle/service/bpc/bpc.cpp20
-rw-r--r--src/core/hle/service/btdrv/btdrv.cpp167
-rw-r--r--src/core/hle/service/btm/btm.cpp147
-rw-r--r--src/core/hle/service/caps/caps.cpp2
-rw-r--r--src/core/hle/service/caps/caps.h76
-rw-r--r--src/core/hle/service/caps/caps_a.cpp2
-rw-r--r--src/core/hle/service/caps/caps_a.h2
-rw-r--r--src/core/hle/service/caps/caps_c.cpp2
-rw-r--r--src/core/hle/service/caps/caps_c.h2
-rw-r--r--src/core/hle/service/caps/caps_sc.cpp2
-rw-r--r--src/core/hle/service/caps/caps_sc.h2
-rw-r--r--src/core/hle/service/caps/caps_ss.cpp2
-rw-r--r--src/core/hle/service/caps/caps_ss.h2
-rw-r--r--src/core/hle/service/caps/caps_su.cpp2
-rw-r--r--src/core/hle/service/caps/caps_su.h2
-rw-r--r--src/core/hle/service/caps/caps_u.cpp26
-rw-r--r--src/core/hle/service/caps/caps_u.h2
-rw-r--r--src/core/hle/service/es/es.cpp49
-rw-r--r--src/core/hle/service/eupld/eupld.cpp1
-rw-r--r--src/core/hle/service/friend/friend.cpp6
-rw-r--r--src/core/hle/service/grc/grc.cpp3
-rw-r--r--src/core/hle/service/hid/controllers/debug_pad.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/gesture.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/keyboard.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/mouse.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp10
-rw-r--r--src/core/hle/service/hid/controllers/npad.h10
-rw-r--r--src/core/hle/service/hid/controllers/stubbed.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/touchscreen.cpp4
-rw-r--r--src/core/hle/service/hid/controllers/xpad.cpp2
-rw-r--r--src/core/hle/service/hid/hid.cpp164
-rw-r--r--src/core/hle/service/hid/hid.h16
-rw-r--r--src/core/hle/service/hid/irs.cpp2
-rw-r--r--src/core/hle/service/lbl/lbl.cpp1
-rw-r--r--src/core/hle/service/ldn/ldn.cpp1
-rw-r--r--src/core/hle/service/ldr/ldr.cpp105
-rw-r--r--src/core/hle/service/lm/manager.cpp3
-rw-r--r--src/core/hle/service/mig/mig.cpp6
-rw-r--r--src/core/hle/service/mm/mm_u.cpp32
-rw-r--r--src/core/hle/service/ncm/ncm.cpp20
-rw-r--r--src/core/hle/service/nfc/nfc.cpp6
-rw-r--r--src/core/hle/service/ns/ns.cpp3
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp28
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h18
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp64
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.h21
-rw-r--r--src/core/hle/service/prepo/prepo.cpp7
-rw-r--r--src/core/hle/service/set/set.cpp78
-rw-r--r--src/core/hle/service/set/set.h2
-rw-r--r--src/core/hle/service/sm/sm.cpp2
-rw-r--r--src/core/hle/service/spl/module.cpp2
-rw-r--r--src/core/hle/service/time/standard_steady_clock_core.cpp5
-rw-r--r--src/core/hle/service/time/tick_based_steady_clock_core.cpp5
-rw-r--r--src/core/hle/service/time/time.cpp5
-rw-r--r--src/core/hle/service/time/time_sharedmemory.cpp3
-rw-r--r--src/core/hle/service/vi/vi.cpp22
-rw-r--r--src/core/memory.cpp116
-rw-r--r--src/core/memory.h67
-rw-r--r--src/core/memory/cheat_engine.cpp8
-rw-r--r--src/core/perf_stats.cpp5
-rw-r--r--src/core/settings.cpp89
-rw-r--r--src/core/settings.h126
-rw-r--r--src/core/telemetry_session.cpp27
-rw-r--r--src/core/tools/freezer.cpp8
148 files changed, 5002 insertions, 2327 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 47418006b..d1f173f42 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -7,6 +7,16 @@ endif()
7add_library(core STATIC 7add_library(core STATIC
8 arm/arm_interface.h 8 arm/arm_interface.h
9 arm/arm_interface.cpp 9 arm/arm_interface.cpp
10 arm/cpu_interrupt_handler.cpp
11 arm/cpu_interrupt_handler.h
12 arm/dynarmic/arm_dynarmic_32.cpp
13 arm/dynarmic/arm_dynarmic_32.h
14 arm/dynarmic/arm_dynarmic_64.cpp
15 arm/dynarmic/arm_dynarmic_64.h
16 arm/dynarmic/arm_dynarmic_cp15.cpp
17 arm/dynarmic/arm_dynarmic_cp15.h
18 arm/dynarmic/arm_exclusive_monitor.cpp
19 arm/dynarmic/arm_exclusive_monitor.h
10 arm/exclusive_monitor.cpp 20 arm/exclusive_monitor.cpp
11 arm/exclusive_monitor.h 21 arm/exclusive_monitor.h
12 arm/unicorn/arm_unicorn.cpp 22 arm/unicorn/arm_unicorn.cpp
@@ -15,8 +25,6 @@ add_library(core STATIC
15 constants.h 25 constants.h
16 core.cpp 26 core.cpp
17 core.h 27 core.h
18 core_manager.cpp
19 core_manager.h
20 core_timing.cpp 28 core_timing.cpp
21 core_timing.h 29 core_timing.h
22 core_timing_util.cpp 30 core_timing_util.cpp
@@ -606,11 +614,11 @@ endif()
606create_target_directory_groups(core) 614create_target_directory_groups(core)
607 615
608target_link_libraries(core PUBLIC common PRIVATE audio_core video_core) 616target_link_libraries(core PUBLIC common PRIVATE audio_core video_core)
609target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt::fmt nlohmann_json::nlohmann_json mbedtls Opus::Opus unicorn) 617target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt::fmt nlohmann_json::nlohmann_json mbedtls opus unicorn zip)
610 618
611if (YUZU_ENABLE_BOXCAT) 619if (YUZU_ENABLE_BOXCAT)
612 target_compile_definitions(core PRIVATE -DYUZU_ENABLE_BOXCAT) 620 target_compile_definitions(core PRIVATE -DYUZU_ENABLE_BOXCAT)
613 target_link_libraries(core PRIVATE httplib nlohmann_json::nlohmann_json zip) 621 target_link_libraries(core PRIVATE httplib nlohmann_json::nlohmann_json)
614endif() 622endif()
615 623
616if (ENABLE_WEB_SERVICE) 624if (ENABLE_WEB_SERVICE)
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp
index d079a1bc8..d2295ed90 100644
--- a/src/core/arm/arm_interface.cpp
+++ b/src/core/arm/arm_interface.cpp
@@ -139,6 +139,63 @@ std::optional<std::string> GetSymbolName(const Symbols& symbols, VAddr func_addr
139 139
140constexpr u64 SEGMENT_BASE = 0x7100000000ull; 140constexpr u64 SEGMENT_BASE = 0x7100000000ull;
141 141
142std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktraceFromContext(
143 System& system, const ThreadContext64& ctx) {
144 std::vector<BacktraceEntry> out;
145 auto& memory = system.Memory();
146
147 auto fp = ctx.cpu_registers[29];
148 auto lr = ctx.cpu_registers[30];
149 while (true) {
150 out.push_back({"", 0, lr, 0});
151 if (!fp) {
152 break;
153 }
154 lr = memory.Read64(fp + 8) - 4;
155 fp = memory.Read64(fp);
156 }
157
158 std::map<VAddr, std::string> modules;
159 auto& loader{system.GetAppLoader()};
160 if (loader.ReadNSOModules(modules) != Loader::ResultStatus::Success) {
161 return {};
162 }
163
164 std::map<std::string, Symbols> symbols;
165 for (const auto& module : modules) {
166 symbols.insert_or_assign(module.second, GetSymbols(module.first, memory));
167 }
168
169 for (auto& entry : out) {
170 VAddr base = 0;
171 for (auto iter = modules.rbegin(); iter != modules.rend(); ++iter) {
172 const auto& module{*iter};
173 if (entry.original_address >= module.first) {
174 entry.module = module.second;
175 base = module.first;
176 break;
177 }
178 }
179
180 entry.offset = entry.original_address - base;
181 entry.address = SEGMENT_BASE + entry.offset;
182
183 if (entry.module.empty())
184 entry.module = "unknown";
185
186 const auto symbol_set = symbols.find(entry.module);
187 if (symbol_set != symbols.end()) {
188 const auto symbol = GetSymbolName(symbol_set->second, entry.offset);
189 if (symbol.has_value()) {
190 // TODO(DarkLordZach): Add demangling of symbol names.
191 entry.name = *symbol;
192 }
193 }
194 }
195
196 return out;
197}
198
142std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktrace() const { 199std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktrace() const {
143 std::vector<BacktraceEntry> out; 200 std::vector<BacktraceEntry> out;
144 auto& memory = system.Memory(); 201 auto& memory = system.Memory();
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index cb2e640e2..1f24051e4 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -7,6 +7,7 @@
7#include <array> 7#include <array>
8#include <vector> 8#include <vector>
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "core/hardware_properties.h"
10 11
11namespace Common { 12namespace Common {
12struct PageTable; 13struct PageTable;
@@ -18,25 +19,29 @@ enum class VMAPermission : u8;
18 19
19namespace Core { 20namespace Core {
20class System; 21class System;
22class CPUInterruptHandler;
23
24using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>;
21 25
22/// Generic ARMv8 CPU interface 26/// Generic ARMv8 CPU interface
23class ARM_Interface : NonCopyable { 27class ARM_Interface : NonCopyable {
24public: 28public:
25 explicit ARM_Interface(System& system_) : system{system_} {} 29 explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers, bool uses_wall_clock)
30 : system{system_}, interrupt_handlers{interrupt_handlers}, uses_wall_clock{
31 uses_wall_clock} {}
26 virtual ~ARM_Interface() = default; 32 virtual ~ARM_Interface() = default;
27 33
28 struct ThreadContext32 { 34 struct ThreadContext32 {
29 std::array<u32, 16> cpu_registers{}; 35 std::array<u32, 16> cpu_registers{};
36 std::array<u32, 64> extension_registers{};
30 u32 cpsr{}; 37 u32 cpsr{};
31 std::array<u8, 4> padding{};
32 std::array<u64, 32> fprs{};
33 u32 fpscr{}; 38 u32 fpscr{};
34 u32 fpexc{}; 39 u32 fpexc{};
35 u32 tpidr{}; 40 u32 tpidr{};
36 }; 41 };
37 // Internally within the kernel, it expects the AArch32 version of the 42 // Internally within the kernel, it expects the AArch32 version of the
38 // thread context to be 344 bytes in size. 43 // thread context to be 344 bytes in size.
39 static_assert(sizeof(ThreadContext32) == 0x158); 44 static_assert(sizeof(ThreadContext32) == 0x150);
40 45
41 struct ThreadContext64 { 46 struct ThreadContext64 {
42 std::array<u64, 31> cpu_registers{}; 47 std::array<u64, 31> cpu_registers{};
@@ -143,6 +148,8 @@ public:
143 */ 148 */
144 virtual void SetTPIDR_EL0(u64 value) = 0; 149 virtual void SetTPIDR_EL0(u64 value) = 0;
145 150
151 virtual void ChangeProcessorID(std::size_t new_core_id) = 0;
152
146 virtual void SaveContext(ThreadContext32& ctx) = 0; 153 virtual void SaveContext(ThreadContext32& ctx) = 0;
147 virtual void SaveContext(ThreadContext64& ctx) = 0; 154 virtual void SaveContext(ThreadContext64& ctx) = 0;
148 virtual void LoadContext(const ThreadContext32& ctx) = 0; 155 virtual void LoadContext(const ThreadContext32& ctx) = 0;
@@ -162,6 +169,9 @@ public:
162 std::string name; 169 std::string name;
163 }; 170 };
164 171
172 static std::vector<BacktraceEntry> GetBacktraceFromContext(System& system,
173 const ThreadContext64& ctx);
174
165 std::vector<BacktraceEntry> GetBacktrace() const; 175 std::vector<BacktraceEntry> GetBacktrace() const;
166 176
167 /// fp (= r29) points to the last frame record. 177 /// fp (= r29) points to the last frame record.
@@ -175,6 +185,8 @@ public:
175protected: 185protected:
176 /// System context that this ARM interface is running under. 186 /// System context that this ARM interface is running under.
177 System& system; 187 System& system;
188 CPUInterrupts& interrupt_handlers;
189 bool uses_wall_clock;
178}; 190};
179 191
180} // namespace Core 192} // namespace Core
diff --git a/src/core/arm/cpu_interrupt_handler.cpp b/src/core/arm/cpu_interrupt_handler.cpp
new file mode 100644
index 000000000..df0350881
--- /dev/null
+++ b/src/core/arm/cpu_interrupt_handler.cpp
@@ -0,0 +1,27 @@
1// Copyright 2020 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/thread.h"
6#include "core/arm/cpu_interrupt_handler.h"
7
8namespace Core {
9
10CPUInterruptHandler::CPUInterruptHandler() : is_interrupted{} {
11 interrupt_event = std::make_unique<Common::Event>();
12}
13
14CPUInterruptHandler::~CPUInterruptHandler() = default;
15
16void CPUInterruptHandler::SetInterrupt(bool is_interrupted_) {
17 if (is_interrupted_) {
18 interrupt_event->Set();
19 }
20 this->is_interrupted = is_interrupted_;
21}
22
23void CPUInterruptHandler::AwaitInterrupt() {
24 interrupt_event->Wait();
25}
26
27} // namespace Core
diff --git a/src/core/arm/cpu_interrupt_handler.h b/src/core/arm/cpu_interrupt_handler.h
new file mode 100644
index 000000000..3d062d326
--- /dev/null
+++ b/src/core/arm/cpu_interrupt_handler.h
@@ -0,0 +1,39 @@
1// Copyright 2020 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8
9namespace Common {
10class Event;
11}
12
13namespace Core {
14
15class CPUInterruptHandler {
16public:
17 CPUInterruptHandler();
18 ~CPUInterruptHandler();
19
20 CPUInterruptHandler(const CPUInterruptHandler&) = delete;
21 CPUInterruptHandler& operator=(const CPUInterruptHandler&) = delete;
22
23 CPUInterruptHandler(CPUInterruptHandler&&) = default;
24 CPUInterruptHandler& operator=(CPUInterruptHandler&&) = default;
25
26 bool IsInterrupted() const {
27 return is_interrupted;
28 }
29
30 void SetInterrupt(bool is_interrupted);
31
32 void AwaitInterrupt();
33
34private:
35 bool is_interrupted{};
36 std::unique_ptr<Common::Event> interrupt_event;
37};
38
39} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index 9bc86e3b9..0d4ab95b7 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -7,15 +7,17 @@
7#include <dynarmic/A32/a32.h> 7#include <dynarmic/A32/a32.h>
8#include <dynarmic/A32/config.h> 8#include <dynarmic/A32/config.h>
9#include <dynarmic/A32/context.h> 9#include <dynarmic/A32/context.h>
10#include "common/microprofile.h" 10#include "common/logging/log.h"
11#include "common/page_table.h"
12#include "core/arm/cpu_interrupt_handler.h"
11#include "core/arm/dynarmic/arm_dynarmic_32.h" 13#include "core/arm/dynarmic/arm_dynarmic_32.h"
12#include "core/arm/dynarmic/arm_dynarmic_64.h"
13#include "core/arm/dynarmic/arm_dynarmic_cp15.h" 14#include "core/arm/dynarmic/arm_dynarmic_cp15.h"
15#include "core/arm/dynarmic/arm_exclusive_monitor.h"
14#include "core/core.h" 16#include "core/core.h"
15#include "core/core_manager.h"
16#include "core/core_timing.h" 17#include "core/core_timing.h"
17#include "core/hle/kernel/svc.h" 18#include "core/hle/kernel/svc.h"
18#include "core/memory.h" 19#include "core/memory.h"
20#include "core/settings.h"
19 21
20namespace Core { 22namespace Core {
21 23
@@ -49,8 +51,22 @@ public:
49 parent.system.Memory().Write64(vaddr, value); 51 parent.system.Memory().Write64(vaddr, value);
50 } 52 }
51 53
54 bool MemoryWriteExclusive8(u32 vaddr, u8 value, u8 expected) override {
55 return parent.system.Memory().WriteExclusive8(vaddr, value, expected);
56 }
57 bool MemoryWriteExclusive16(u32 vaddr, u16 value, u16 expected) override {
58 return parent.system.Memory().WriteExclusive16(vaddr, value, expected);
59 }
60 bool MemoryWriteExclusive32(u32 vaddr, u32 value, u32 expected) override {
61 return parent.system.Memory().WriteExclusive32(vaddr, value, expected);
62 }
63 bool MemoryWriteExclusive64(u32 vaddr, u64 value, u64 expected) override {
64 return parent.system.Memory().WriteExclusive64(vaddr, value, expected);
65 }
66
52 void InterpreterFallback(u32 pc, std::size_t num_instructions) override { 67 void InterpreterFallback(u32 pc, std::size_t num_instructions) override {
53 UNIMPLEMENTED(); 68 UNIMPLEMENTED_MSG("This should never happen, pc = {:08X}, code = {:08X}", pc,
69 MemoryReadCode(pc));
54 } 70 }
55 71
56 void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override { 72 void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override {
@@ -61,7 +77,7 @@ public:
61 case Dynarmic::A32::Exception::Breakpoint: 77 case Dynarmic::A32::Exception::Breakpoint:
62 break; 78 break;
63 } 79 }
64 LOG_CRITICAL(HW_GPU, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})", 80 LOG_CRITICAL(Core_ARM, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
65 static_cast<std::size_t>(exception), pc, MemoryReadCode(pc)); 81 static_cast<std::size_t>(exception), pc, MemoryReadCode(pc));
66 UNIMPLEMENTED(); 82 UNIMPLEMENTED();
67 } 83 }
@@ -71,26 +87,36 @@ public:
71 } 87 }
72 88
73 void AddTicks(u64 ticks) override { 89 void AddTicks(u64 ticks) override {
90 if (parent.uses_wall_clock) {
91 return;
92 }
74 // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a 93 // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
75 // rough approximation of the amount of executed ticks in the system, it may be thrown off 94 // rough approximation of the amount of executed ticks in the system, it may be thrown off
76 // if not all cores are doing a similar amount of work. Instead of doing this, we should 95 // if not all cores are doing a similar amount of work. Instead of doing this, we should
77 // device a way so that timing is consistent across all cores without increasing the ticks 4 96 // device a way so that timing is consistent across all cores without increasing the ticks 4
78 // times. 97 // times.
79 u64 amortized_ticks = (ticks - num_interpreted_instructions) / Core::NUM_CPU_CORES; 98 u64 amortized_ticks =
99 (ticks - num_interpreted_instructions) / Core::Hardware::NUM_CPU_CORES;
80 // Always execute at least one tick. 100 // Always execute at least one tick.
81 amortized_ticks = std::max<u64>(amortized_ticks, 1); 101 amortized_ticks = std::max<u64>(amortized_ticks, 1);
82 102
83 parent.system.CoreTiming().AddTicks(amortized_ticks); 103 parent.system.CoreTiming().AddTicks(amortized_ticks);
84 num_interpreted_instructions = 0; 104 num_interpreted_instructions = 0;
85 } 105 }
106
86 u64 GetTicksRemaining() override { 107 u64 GetTicksRemaining() override {
87 return std::max(parent.system.CoreTiming().GetDowncount(), {}); 108 if (parent.uses_wall_clock) {
109 if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) {
110 return minimum_run_cycles;
111 }
112 return 0U;
113 }
114 return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
88 } 115 }
89 116
90 ARM_Dynarmic_32& parent; 117 ARM_Dynarmic_32& parent;
91 std::size_t num_interpreted_instructions{}; 118 std::size_t num_interpreted_instructions{};
92 u64 tpidrro_el0{}; 119 static constexpr u64 minimum_run_cycles = 1000U;
93 u64 tpidr_el0{};
94}; 120};
95 121
96std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable& page_table, 122std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable& page_table,
@@ -99,26 +125,46 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable&
99 config.callbacks = cb.get(); 125 config.callbacks = cb.get();
100 // TODO(bunnei): Implement page table for 32-bit 126 // TODO(bunnei): Implement page table for 32-bit
101 // config.page_table = &page_table.pointers; 127 // config.page_table = &page_table.pointers;
102 config.coprocessors[15] = std::make_shared<DynarmicCP15>((u32*)&CP15_regs[0]); 128 config.coprocessors[15] = cp15;
103 config.define_unpredictable_behaviour = true; 129 config.define_unpredictable_behaviour = true;
130 static constexpr std::size_t PAGE_BITS = 12;
131 static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS);
132 config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
133 page_table.pointers.data());
134 config.absolute_offset_page_table = true;
135 config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
136 config.only_detect_misalignment_via_page_table_on_page_boundary = true;
137
138 // Multi-process state
139 config.processor_id = core_index;
140 config.global_monitor = &exclusive_monitor.monitor;
141
142 // Timing
143 config.wall_clock_cntpct = uses_wall_clock;
144
145 // Optimizations
146 if (Settings::values.disable_cpu_opt) {
147 config.enable_optimizations = false;
148 config.enable_fast_dispatch = false;
149 }
150
104 return std::make_unique<Dynarmic::A32::Jit>(config); 151 return std::make_unique<Dynarmic::A32::Jit>(config);
105} 152}
106 153
107MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_32, "ARM JIT", "Dynarmic", MP_RGB(255, 64, 64));
108
109void ARM_Dynarmic_32::Run() { 154void ARM_Dynarmic_32::Run() {
110 MICROPROFILE_SCOPE(ARM_Jit_Dynarmic_32);
111 jit->Run(); 155 jit->Run();
112} 156}
113 157
114void ARM_Dynarmic_32::Step() { 158void ARM_Dynarmic_32::Step() {
115 cb->InterpreterFallback(jit->Regs()[15], 1); 159 jit->Step();
116} 160}
117 161
118ARM_Dynarmic_32::ARM_Dynarmic_32(System& system, ExclusiveMonitor& exclusive_monitor, 162ARM_Dynarmic_32::ARM_Dynarmic_32(System& system, CPUInterrupts& interrupt_handlers,
163 bool uses_wall_clock, ExclusiveMonitor& exclusive_monitor,
119 std::size_t core_index) 164 std::size_t core_index)
120 : ARM_Interface{system}, 165 : ARM_Interface{system, interrupt_handlers, uses_wall_clock},
121 cb(std::make_unique<DynarmicCallbacks32>(*this)), core_index{core_index}, 166 cb(std::make_unique<DynarmicCallbacks32>(*this)),
167 cp15(std::make_shared<DynarmicCP15>(*this)), core_index{core_index},
122 exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {} 168 exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
123 169
124ARM_Dynarmic_32::~ARM_Dynarmic_32() = default; 170ARM_Dynarmic_32::~ARM_Dynarmic_32() = default;
@@ -154,32 +200,40 @@ void ARM_Dynarmic_32::SetPSTATE(u32 cpsr) {
154} 200}
155 201
156u64 ARM_Dynarmic_32::GetTlsAddress() const { 202u64 ARM_Dynarmic_32::GetTlsAddress() const {
157 return CP15_regs[static_cast<std::size_t>(CP15Register::CP15_THREAD_URO)]; 203 return cp15->uro;
158} 204}
159 205
160void ARM_Dynarmic_32::SetTlsAddress(VAddr address) { 206void ARM_Dynarmic_32::SetTlsAddress(VAddr address) {
161 CP15_regs[static_cast<std::size_t>(CP15Register::CP15_THREAD_URO)] = static_cast<u32>(address); 207 cp15->uro = static_cast<u32>(address);
162} 208}
163 209
164u64 ARM_Dynarmic_32::GetTPIDR_EL0() const { 210u64 ARM_Dynarmic_32::GetTPIDR_EL0() const {
165 return cb->tpidr_el0; 211 return cp15->uprw;
166} 212}
167 213
168void ARM_Dynarmic_32::SetTPIDR_EL0(u64 value) { 214void ARM_Dynarmic_32::SetTPIDR_EL0(u64 value) {
169 cb->tpidr_el0 = value; 215 cp15->uprw = static_cast<u32>(value);
216}
217
218void ARM_Dynarmic_32::ChangeProcessorID(std::size_t new_core_id) {
219 jit->ChangeProcessorID(new_core_id);
170} 220}
171 221
172void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) { 222void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
173 Dynarmic::A32::Context context; 223 Dynarmic::A32::Context context;
174 jit->SaveContext(context); 224 jit->SaveContext(context);
175 ctx.cpu_registers = context.Regs(); 225 ctx.cpu_registers = context.Regs();
226 ctx.extension_registers = context.ExtRegs();
176 ctx.cpsr = context.Cpsr(); 227 ctx.cpsr = context.Cpsr();
228 ctx.fpscr = context.Fpscr();
177} 229}
178 230
179void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) { 231void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) {
180 Dynarmic::A32::Context context; 232 Dynarmic::A32::Context context;
181 context.Regs() = ctx.cpu_registers; 233 context.Regs() = ctx.cpu_registers;
234 context.ExtRegs() = ctx.extension_registers;
182 context.SetCpsr(ctx.cpsr); 235 context.SetCpsr(ctx.cpsr);
236 context.SetFpscr(ctx.fpscr);
183 jit->LoadContext(context); 237 jit->LoadContext(context);
184} 238}
185 239
@@ -188,10 +242,15 @@ void ARM_Dynarmic_32::PrepareReschedule() {
188} 242}
189 243
190void ARM_Dynarmic_32::ClearInstructionCache() { 244void ARM_Dynarmic_32::ClearInstructionCache() {
245 if (!jit) {
246 return;
247 }
191 jit->ClearCache(); 248 jit->ClearCache();
192} 249}
193 250
194void ARM_Dynarmic_32::ClearExclusiveState() {} 251void ARM_Dynarmic_32::ClearExclusiveState() {
252 jit->ClearExclusiveState();
253}
195 254
196void ARM_Dynarmic_32::PageTableChanged(Common::PageTable& page_table, 255void ARM_Dynarmic_32::PageTableChanged(Common::PageTable& page_table,
197 std::size_t new_address_space_size_in_bits) { 256 std::size_t new_address_space_size_in_bits) {
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h
index 8ba9cea8f..2bab31b92 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.h
@@ -9,7 +9,7 @@
9 9
10#include <dynarmic/A32/a32.h> 10#include <dynarmic/A32/a32.h>
11#include <dynarmic/A64/a64.h> 11#include <dynarmic/A64/a64.h>
12#include <dynarmic/A64/exclusive_monitor.h> 12#include <dynarmic/exclusive_monitor.h>
13#include "common/common_types.h" 13#include "common/common_types.h"
14#include "common/hash.h" 14#include "common/hash.h"
15#include "core/arm/arm_interface.h" 15#include "core/arm/arm_interface.h"
@@ -21,13 +21,16 @@ class Memory;
21 21
22namespace Core { 22namespace Core {
23 23
24class CPUInterruptHandler;
24class DynarmicCallbacks32; 25class DynarmicCallbacks32;
26class DynarmicCP15;
25class DynarmicExclusiveMonitor; 27class DynarmicExclusiveMonitor;
26class System; 28class System;
27 29
28class ARM_Dynarmic_32 final : public ARM_Interface { 30class ARM_Dynarmic_32 final : public ARM_Interface {
29public: 31public:
30 ARM_Dynarmic_32(System& system, ExclusiveMonitor& exclusive_monitor, std::size_t core_index); 32 ARM_Dynarmic_32(System& system, CPUInterrupts& interrupt_handlers, bool uses_wall_clock,
33 ExclusiveMonitor& exclusive_monitor, std::size_t core_index);
31 ~ARM_Dynarmic_32() override; 34 ~ARM_Dynarmic_32() override;
32 35
33 void SetPC(u64 pc) override; 36 void SetPC(u64 pc) override;
@@ -44,6 +47,7 @@ public:
44 void SetTlsAddress(VAddr address) override; 47 void SetTlsAddress(VAddr address) override;
45 void SetTPIDR_EL0(u64 value) override; 48 void SetTPIDR_EL0(u64 value) override;
46 u64 GetTPIDR_EL0() const override; 49 u64 GetTPIDR_EL0() const override;
50 void ChangeProcessorID(std::size_t new_core_id) override;
47 51
48 void SaveContext(ThreadContext32& ctx) override; 52 void SaveContext(ThreadContext32& ctx) override;
49 void SaveContext(ThreadContext64& ctx) override {} 53 void SaveContext(ThreadContext64& ctx) override {}
@@ -66,12 +70,14 @@ private:
66 std::unordered_map<JitCacheKey, std::shared_ptr<Dynarmic::A32::Jit>, Common::PairHash>; 70 std::unordered_map<JitCacheKey, std::shared_ptr<Dynarmic::A32::Jit>, Common::PairHash>;
67 71
68 friend class DynarmicCallbacks32; 72 friend class DynarmicCallbacks32;
73 friend class DynarmicCP15;
74
69 std::unique_ptr<DynarmicCallbacks32> cb; 75 std::unique_ptr<DynarmicCallbacks32> cb;
70 JitCacheType jit_cache; 76 JitCacheType jit_cache;
71 std::shared_ptr<Dynarmic::A32::Jit> jit; 77 std::shared_ptr<Dynarmic::A32::Jit> jit;
78 std::shared_ptr<DynarmicCP15> cp15;
72 std::size_t core_index; 79 std::size_t core_index;
73 DynarmicExclusiveMonitor& exclusive_monitor; 80 DynarmicExclusiveMonitor& exclusive_monitor;
74 std::array<u32, 84> CP15_regs{};
75}; 81};
76 82
77} // namespace Core 83} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 337b97be9..790981034 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -7,11 +7,11 @@
7#include <dynarmic/A64/a64.h> 7#include <dynarmic/A64/a64.h>
8#include <dynarmic/A64/config.h> 8#include <dynarmic/A64/config.h>
9#include "common/logging/log.h" 9#include "common/logging/log.h"
10#include "common/microprofile.h"
11#include "common/page_table.h" 10#include "common/page_table.h"
11#include "core/arm/cpu_interrupt_handler.h"
12#include "core/arm/dynarmic/arm_dynarmic_64.h" 12#include "core/arm/dynarmic/arm_dynarmic_64.h"
13#include "core/arm/dynarmic/arm_exclusive_monitor.h"
13#include "core/core.h" 14#include "core/core.h"
14#include "core/core_manager.h"
15#include "core/core_timing.h" 15#include "core/core_timing.h"
16#include "core/core_timing_util.h" 16#include "core/core_timing_util.h"
17#include "core/gdbstub/gdbstub.h" 17#include "core/gdbstub/gdbstub.h"
@@ -65,6 +65,22 @@ public:
65 memory.Write64(vaddr + 8, value[1]); 65 memory.Write64(vaddr + 8, value[1]);
66 } 66 }
67 67
68 bool MemoryWriteExclusive8(u64 vaddr, std::uint8_t value, std::uint8_t expected) override {
69 return parent.system.Memory().WriteExclusive8(vaddr, value, expected);
70 }
71 bool MemoryWriteExclusive16(u64 vaddr, std::uint16_t value, std::uint16_t expected) override {
72 return parent.system.Memory().WriteExclusive16(vaddr, value, expected);
73 }
74 bool MemoryWriteExclusive32(u64 vaddr, std::uint32_t value, std::uint32_t expected) override {
75 return parent.system.Memory().WriteExclusive32(vaddr, value, expected);
76 }
77 bool MemoryWriteExclusive64(u64 vaddr, std::uint64_t value, std::uint64_t expected) override {
78 return parent.system.Memory().WriteExclusive64(vaddr, value, expected);
79 }
80 bool MemoryWriteExclusive128(u64 vaddr, Vector value, Vector expected) override {
81 return parent.system.Memory().WriteExclusive128(vaddr, value, expected);
82 }
83
68 void InterpreterFallback(u64 pc, std::size_t num_instructions) override { 84 void InterpreterFallback(u64 pc, std::size_t num_instructions) override {
69 LOG_INFO(Core_ARM, "Unicorn fallback @ 0x{:X} for {} instructions (instr = {:08X})", pc, 85 LOG_INFO(Core_ARM, "Unicorn fallback @ 0x{:X} for {} instructions (instr = {:08X})", pc,
70 num_instructions, MemoryReadCode(pc)); 86 num_instructions, MemoryReadCode(pc));
@@ -98,8 +114,8 @@ public:
98 } 114 }
99 [[fallthrough]]; 115 [[fallthrough]];
100 default: 116 default:
101 ASSERT_MSG(false, "ExceptionRaised(exception = {}, pc = {:X})", 117 ASSERT_MSG(false, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
102 static_cast<std::size_t>(exception), pc); 118 static_cast<std::size_t>(exception), pc, MemoryReadCode(pc));
103 } 119 }
104 } 120 }
105 121
@@ -108,29 +124,42 @@ public:
108 } 124 }
109 125
110 void AddTicks(u64 ticks) override { 126 void AddTicks(u64 ticks) override {
127 if (parent.uses_wall_clock) {
128 return;
129 }
111 // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a 130 // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
112 // rough approximation of the amount of executed ticks in the system, it may be thrown off 131 // rough approximation of the amount of executed ticks in the system, it may be thrown off
113 // if not all cores are doing a similar amount of work. Instead of doing this, we should 132 // if not all cores are doing a similar amount of work. Instead of doing this, we should
114 // device a way so that timing is consistent across all cores without increasing the ticks 4 133 // device a way so that timing is consistent across all cores without increasing the ticks 4
115 // times. 134 // times.
116 u64 amortized_ticks = (ticks - num_interpreted_instructions) / Core::NUM_CPU_CORES; 135 u64 amortized_ticks =
136 (ticks - num_interpreted_instructions) / Core::Hardware::NUM_CPU_CORES;
117 // Always execute at least one tick. 137 // Always execute at least one tick.
118 amortized_ticks = std::max<u64>(amortized_ticks, 1); 138 amortized_ticks = std::max<u64>(amortized_ticks, 1);
119 139
120 parent.system.CoreTiming().AddTicks(amortized_ticks); 140 parent.system.CoreTiming().AddTicks(amortized_ticks);
121 num_interpreted_instructions = 0; 141 num_interpreted_instructions = 0;
122 } 142 }
143
123 u64 GetTicksRemaining() override { 144 u64 GetTicksRemaining() override {
124 return std::max(parent.system.CoreTiming().GetDowncount(), s64{0}); 145 if (parent.uses_wall_clock) {
146 if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) {
147 return minimum_run_cycles;
148 }
149 return 0U;
150 }
151 return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
125 } 152 }
153
126 u64 GetCNTPCT() override { 154 u64 GetCNTPCT() override {
127 return Timing::CpuCyclesToClockCycles(parent.system.CoreTiming().GetTicks()); 155 return parent.system.CoreTiming().GetClockTicks();
128 } 156 }
129 157
130 ARM_Dynarmic_64& parent; 158 ARM_Dynarmic_64& parent;
131 std::size_t num_interpreted_instructions = 0; 159 std::size_t num_interpreted_instructions = 0;
132 u64 tpidrro_el0 = 0; 160 u64 tpidrro_el0 = 0;
133 u64 tpidr_el0 = 0; 161 u64 tpidr_el0 = 0;
162 static constexpr u64 minimum_run_cycles = 1000U;
134}; 163};
135 164
136std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable& page_table, 165std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable& page_table,
@@ -168,14 +197,13 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
168 config.enable_fast_dispatch = false; 197 config.enable_fast_dispatch = false;
169 } 198 }
170 199
200 // Timing
201 config.wall_clock_cntpct = uses_wall_clock;
202
171 return std::make_shared<Dynarmic::A64::Jit>(config); 203 return std::make_shared<Dynarmic::A64::Jit>(config);
172} 204}
173 205
174MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_64, "ARM JIT", "Dynarmic", MP_RGB(255, 64, 64));
175
176void ARM_Dynarmic_64::Run() { 206void ARM_Dynarmic_64::Run() {
177 MICROPROFILE_SCOPE(ARM_Jit_Dynarmic_64);
178
179 jit->Run(); 207 jit->Run();
180} 208}
181 209
@@ -183,11 +211,16 @@ void ARM_Dynarmic_64::Step() {
183 cb->InterpreterFallback(jit->GetPC(), 1); 211 cb->InterpreterFallback(jit->GetPC(), 1);
184} 212}
185 213
186ARM_Dynarmic_64::ARM_Dynarmic_64(System& system, ExclusiveMonitor& exclusive_monitor, 214ARM_Dynarmic_64::ARM_Dynarmic_64(System& system, CPUInterrupts& interrupt_handlers,
215 bool uses_wall_clock, ExclusiveMonitor& exclusive_monitor,
187 std::size_t core_index) 216 std::size_t core_index)
188 : ARM_Interface{system}, cb(std::make_unique<DynarmicCallbacks64>(*this)), 217 : ARM_Interface{system, interrupt_handlers, uses_wall_clock},
189 inner_unicorn{system, ARM_Unicorn::Arch::AArch64}, core_index{core_index}, 218 cb(std::make_unique<DynarmicCallbacks64>(*this)), inner_unicorn{system, interrupt_handlers,
190 exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {} 219 uses_wall_clock,
220 ARM_Unicorn::Arch::AArch64,
221 core_index},
222 core_index{core_index}, exclusive_monitor{
223 dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
191 224
192ARM_Dynarmic_64::~ARM_Dynarmic_64() = default; 225ARM_Dynarmic_64::~ARM_Dynarmic_64() = default;
193 226
@@ -239,6 +272,10 @@ void ARM_Dynarmic_64::SetTPIDR_EL0(u64 value) {
239 cb->tpidr_el0 = value; 272 cb->tpidr_el0 = value;
240} 273}
241 274
275void ARM_Dynarmic_64::ChangeProcessorID(std::size_t new_core_id) {
276 jit->ChangeProcessorID(new_core_id);
277}
278
242void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) { 279void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
243 ctx.cpu_registers = jit->GetRegisters(); 280 ctx.cpu_registers = jit->GetRegisters();
244 ctx.sp = jit->GetSP(); 281 ctx.sp = jit->GetSP();
@@ -266,6 +303,9 @@ void ARM_Dynarmic_64::PrepareReschedule() {
266} 303}
267 304
268void ARM_Dynarmic_64::ClearInstructionCache() { 305void ARM_Dynarmic_64::ClearInstructionCache() {
306 if (!jit) {
307 return;
308 }
269 jit->ClearCache(); 309 jit->ClearCache();
270} 310}
271 311
@@ -285,44 +325,4 @@ void ARM_Dynarmic_64::PageTableChanged(Common::PageTable& page_table,
285 jit_cache.emplace(key, jit); 325 jit_cache.emplace(key, jit);
286} 326}
287 327
288DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count)
289 : monitor(core_count), memory{memory} {}
290
291DynarmicExclusiveMonitor::~DynarmicExclusiveMonitor() = default;
292
293void DynarmicExclusiveMonitor::SetExclusive(std::size_t core_index, VAddr addr) {
294 // Size doesn't actually matter.
295 monitor.Mark(core_index, addr, 16);
296}
297
298void DynarmicExclusiveMonitor::ClearExclusive() {
299 monitor.Clear();
300}
301
302bool DynarmicExclusiveMonitor::ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) {
303 return monitor.DoExclusiveOperation(core_index, vaddr, 1, [&] { memory.Write8(vaddr, value); });
304}
305
306bool DynarmicExclusiveMonitor::ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) {
307 return monitor.DoExclusiveOperation(core_index, vaddr, 2,
308 [&] { memory.Write16(vaddr, value); });
309}
310
311bool DynarmicExclusiveMonitor::ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) {
312 return monitor.DoExclusiveOperation(core_index, vaddr, 4,
313 [&] { memory.Write32(vaddr, value); });
314}
315
316bool DynarmicExclusiveMonitor::ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) {
317 return monitor.DoExclusiveOperation(core_index, vaddr, 8,
318 [&] { memory.Write64(vaddr, value); });
319}
320
321bool DynarmicExclusiveMonitor::ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) {
322 return monitor.DoExclusiveOperation(core_index, vaddr, 16, [&] {
323 memory.Write64(vaddr + 0, value[0]);
324 memory.Write64(vaddr + 8, value[1]);
325 });
326}
327
328} // namespace Core 328} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h
index 647cecaf0..403c55961 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.h
@@ -8,7 +8,6 @@
8#include <unordered_map> 8#include <unordered_map>
9 9
10#include <dynarmic/A64/a64.h> 10#include <dynarmic/A64/a64.h>
11#include <dynarmic/A64/exclusive_monitor.h>
12#include "common/common_types.h" 11#include "common/common_types.h"
13#include "common/hash.h" 12#include "common/hash.h"
14#include "core/arm/arm_interface.h" 13#include "core/arm/arm_interface.h"
@@ -22,12 +21,14 @@ class Memory;
22namespace Core { 21namespace Core {
23 22
24class DynarmicCallbacks64; 23class DynarmicCallbacks64;
24class CPUInterruptHandler;
25class DynarmicExclusiveMonitor; 25class DynarmicExclusiveMonitor;
26class System; 26class System;
27 27
28class ARM_Dynarmic_64 final : public ARM_Interface { 28class ARM_Dynarmic_64 final : public ARM_Interface {
29public: 29public:
30 ARM_Dynarmic_64(System& system, ExclusiveMonitor& exclusive_monitor, std::size_t core_index); 30 ARM_Dynarmic_64(System& system, CPUInterrupts& interrupt_handlers, bool uses_wall_clock,
31 ExclusiveMonitor& exclusive_monitor, std::size_t core_index);
31 ~ARM_Dynarmic_64() override; 32 ~ARM_Dynarmic_64() override;
32 33
33 void SetPC(u64 pc) override; 34 void SetPC(u64 pc) override;
@@ -44,6 +45,7 @@ public:
44 void SetTlsAddress(VAddr address) override; 45 void SetTlsAddress(VAddr address) override;
45 void SetTPIDR_EL0(u64 value) override; 46 void SetTPIDR_EL0(u64 value) override;
46 u64 GetTPIDR_EL0() const override; 47 u64 GetTPIDR_EL0() const override;
48 void ChangeProcessorID(std::size_t new_core_id) override;
47 49
48 void SaveContext(ThreadContext32& ctx) override {} 50 void SaveContext(ThreadContext32& ctx) override {}
49 void SaveContext(ThreadContext64& ctx) override; 51 void SaveContext(ThreadContext64& ctx) override;
@@ -75,24 +77,4 @@ private:
75 DynarmicExclusiveMonitor& exclusive_monitor; 77 DynarmicExclusiveMonitor& exclusive_monitor;
76}; 78};
77 79
78class DynarmicExclusiveMonitor final : public ExclusiveMonitor {
79public:
80 explicit DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count);
81 ~DynarmicExclusiveMonitor() override;
82
83 void SetExclusive(std::size_t core_index, VAddr addr) override;
84 void ClearExclusive() override;
85
86 bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) override;
87 bool ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) override;
88 bool ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) override;
89 bool ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) override;
90 bool ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) override;
91
92private:
93 friend class ARM_Dynarmic_64;
94 Dynarmic::A64::ExclusiveMonitor monitor;
95 Core::Memory::Memory& memory;
96};
97
98} // namespace Core 80} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp b/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp
index 3fdcdebde..54556e0f9 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp
@@ -2,79 +2,132 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <fmt/format.h>
6#include "common/logging/log.h"
7#include "core/arm/dynarmic/arm_dynarmic_32.h"
5#include "core/arm/dynarmic/arm_dynarmic_cp15.h" 8#include "core/arm/dynarmic/arm_dynarmic_cp15.h"
9#include "core/core.h"
10#include "core/core_timing.h"
11#include "core/core_timing_util.h"
6 12
7using Callback = Dynarmic::A32::Coprocessor::Callback; 13using Callback = Dynarmic::A32::Coprocessor::Callback;
8using CallbackOrAccessOneWord = Dynarmic::A32::Coprocessor::CallbackOrAccessOneWord; 14using CallbackOrAccessOneWord = Dynarmic::A32::Coprocessor::CallbackOrAccessOneWord;
9using CallbackOrAccessTwoWords = Dynarmic::A32::Coprocessor::CallbackOrAccessTwoWords; 15using CallbackOrAccessTwoWords = Dynarmic::A32::Coprocessor::CallbackOrAccessTwoWords;
10 16
17template <>
18struct fmt::formatter<Dynarmic::A32::CoprocReg> {
19 constexpr auto parse(format_parse_context& ctx) {
20 return ctx.begin();
21 }
22 template <typename FormatContext>
23 auto format(const Dynarmic::A32::CoprocReg& reg, FormatContext& ctx) {
24 return format_to(ctx.out(), "cp{}", static_cast<size_t>(reg));
25 }
26};
27
28namespace Core {
29
30static u32 dummy_value;
31
11std::optional<Callback> DynarmicCP15::CompileInternalOperation(bool two, unsigned opc1, 32std::optional<Callback> DynarmicCP15::CompileInternalOperation(bool two, unsigned opc1,
12 CoprocReg CRd, CoprocReg CRn, 33 CoprocReg CRd, CoprocReg CRn,
13 CoprocReg CRm, unsigned opc2) { 34 CoprocReg CRm, unsigned opc2) {
35 LOG_CRITICAL(Core_ARM, "CP15: cdp{} p15, {}, {}, {}, {}, {}", two ? "2" : "", opc1, CRd, CRn,
36 CRm, opc2);
14 return {}; 37 return {};
15} 38}
16 39
17CallbackOrAccessOneWord DynarmicCP15::CompileSendOneWord(bool two, unsigned opc1, CoprocReg CRn, 40CallbackOrAccessOneWord DynarmicCP15::CompileSendOneWord(bool two, unsigned opc1, CoprocReg CRn,
18 CoprocReg CRm, unsigned opc2) { 41 CoprocReg CRm, unsigned opc2) {
19 // TODO(merry): Privileged CP15 registers
20
21 if (!two && CRn == CoprocReg::C7 && opc1 == 0 && CRm == CoprocReg::C5 && opc2 == 4) { 42 if (!two && CRn == CoprocReg::C7 && opc1 == 0 && CRm == CoprocReg::C5 && opc2 == 4) {
43 // CP15_FLUSH_PREFETCH_BUFFER
22 // This is a dummy write, we ignore the value written here. 44 // This is a dummy write, we ignore the value written here.
23 return &CP15[static_cast<std::size_t>(CP15Register::CP15_FLUSH_PREFETCH_BUFFER)]; 45 return &dummy_value;
24 } 46 }
25 47
26 if (!two && CRn == CoprocReg::C7 && opc1 == 0 && CRm == CoprocReg::C10) { 48 if (!two && CRn == CoprocReg::C7 && opc1 == 0 && CRm == CoprocReg::C10) {
27 switch (opc2) { 49 switch (opc2) {
28 case 4: 50 case 4:
51 // CP15_DATA_SYNC_BARRIER
29 // This is a dummy write, we ignore the value written here. 52 // This is a dummy write, we ignore the value written here.
30 return &CP15[static_cast<std::size_t>(CP15Register::CP15_DATA_SYNC_BARRIER)]; 53 return &dummy_value;
31 case 5: 54 case 5:
55 // CP15_DATA_MEMORY_BARRIER
32 // This is a dummy write, we ignore the value written here. 56 // This is a dummy write, we ignore the value written here.
33 return &CP15[static_cast<std::size_t>(CP15Register::CP15_DATA_MEMORY_BARRIER)]; 57 return &dummy_value;
34 default:
35 return {};
36 } 58 }
37 } 59 }
38 60
39 if (!two && CRn == CoprocReg::C13 && opc1 == 0 && CRm == CoprocReg::C0 && opc2 == 2) { 61 if (!two && CRn == CoprocReg::C13 && opc1 == 0 && CRm == CoprocReg::C0 && opc2 == 2) {
40 return &CP15[static_cast<std::size_t>(CP15Register::CP15_THREAD_UPRW)]; 62 // CP15_THREAD_UPRW
63 return &uprw;
41 } 64 }
42 65
66 LOG_CRITICAL(Core_ARM, "CP15: mcr{} p15, {}, <Rt>, {}, {}, {}", two ? "2" : "", opc1, CRn, CRm,
67 opc2);
43 return {}; 68 return {};
44} 69}
45 70
46CallbackOrAccessTwoWords DynarmicCP15::CompileSendTwoWords(bool two, unsigned opc, CoprocReg CRm) { 71CallbackOrAccessTwoWords DynarmicCP15::CompileSendTwoWords(bool two, unsigned opc, CoprocReg CRm) {
72 LOG_CRITICAL(Core_ARM, "CP15: mcrr{} p15, {}, <Rt>, <Rt2>, {}", two ? "2" : "", opc, CRm);
47 return {}; 73 return {};
48} 74}
49 75
50CallbackOrAccessOneWord DynarmicCP15::CompileGetOneWord(bool two, unsigned opc1, CoprocReg CRn, 76CallbackOrAccessOneWord DynarmicCP15::CompileGetOneWord(bool two, unsigned opc1, CoprocReg CRn,
51 CoprocReg CRm, unsigned opc2) { 77 CoprocReg CRm, unsigned opc2) {
52 // TODO(merry): Privileged CP15 registers
53
54 if (!two && CRn == CoprocReg::C13 && opc1 == 0 && CRm == CoprocReg::C0) { 78 if (!two && CRn == CoprocReg::C13 && opc1 == 0 && CRm == CoprocReg::C0) {
55 switch (opc2) { 79 switch (opc2) {
56 case 2: 80 case 2:
57 return &CP15[static_cast<std::size_t>(CP15Register::CP15_THREAD_UPRW)]; 81 // CP15_THREAD_UPRW
82 return &uprw;
58 case 3: 83 case 3:
59 return &CP15[static_cast<std::size_t>(CP15Register::CP15_THREAD_URO)]; 84 // CP15_THREAD_URO
60 default: 85 return &uro;
61 return {};
62 } 86 }
63 } 87 }
64 88
89 LOG_CRITICAL(Core_ARM, "CP15: mrc{} p15, {}, <Rt>, {}, {}, {}", two ? "2" : "", opc1, CRn, CRm,
90 opc2);
65 return {}; 91 return {};
66} 92}
67 93
68CallbackOrAccessTwoWords DynarmicCP15::CompileGetTwoWords(bool two, unsigned opc, CoprocReg CRm) { 94CallbackOrAccessTwoWords DynarmicCP15::CompileGetTwoWords(bool two, unsigned opc, CoprocReg CRm) {
95 if (!two && opc == 0 && CRm == CoprocReg::C14) {
96 // CNTPCT
97 const auto callback = static_cast<u64 (*)(Dynarmic::A32::Jit*, void*, u32, u32)>(
98 [](Dynarmic::A32::Jit*, void* arg, u32, u32) -> u64 {
99 ARM_Dynarmic_32& parent = *(ARM_Dynarmic_32*)arg;
100 return parent.system.CoreTiming().GetClockTicks();
101 });
102 return Dynarmic::A32::Coprocessor::Callback{callback, (void*)&parent};
103 }
104
105 LOG_CRITICAL(Core_ARM, "CP15: mrrc{} p15, {}, <Rt>, <Rt2>, {}", two ? "2" : "", opc, CRm);
69 return {}; 106 return {};
70} 107}
71 108
72std::optional<Callback> DynarmicCP15::CompileLoadWords(bool two, bool long_transfer, CoprocReg CRd, 109std::optional<Callback> DynarmicCP15::CompileLoadWords(bool two, bool long_transfer, CoprocReg CRd,
73 std::optional<u8> option) { 110 std::optional<u8> option) {
111 if (option) {
112 LOG_CRITICAL(Core_ARM, "CP15: mrrc{}{} p15, {}, [...], {}", two ? "2" : "",
113 long_transfer ? "l" : "", CRd, *option);
114 } else {
115 LOG_CRITICAL(Core_ARM, "CP15: mrrc{}{} p15, {}, [...]", two ? "2" : "",
116 long_transfer ? "l" : "", CRd);
117 }
74 return {}; 118 return {};
75} 119}
76 120
77std::optional<Callback> DynarmicCP15::CompileStoreWords(bool two, bool long_transfer, CoprocReg CRd, 121std::optional<Callback> DynarmicCP15::CompileStoreWords(bool two, bool long_transfer, CoprocReg CRd,
78 std::optional<u8> option) { 122 std::optional<u8> option) {
123 if (option) {
124 LOG_CRITICAL(Core_ARM, "CP15: mrrc{}{} p15, {}, [...], {}", two ? "2" : "",
125 long_transfer ? "l" : "", CRd, *option);
126 } else {
127 LOG_CRITICAL(Core_ARM, "CP15: mrrc{}{} p15, {}, [...]", two ? "2" : "",
128 long_transfer ? "l" : "", CRd);
129 }
79 return {}; 130 return {};
80} 131}
132
133} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_cp15.h b/src/core/arm/dynarmic/arm_dynarmic_cp15.h
index 07bcde5f9..7356d252e 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_cp15.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_cp15.h
@@ -10,128 +10,15 @@
10#include <dynarmic/A32/coprocessor.h> 10#include <dynarmic/A32/coprocessor.h>
11#include "common/common_types.h" 11#include "common/common_types.h"
12 12
13enum class CP15Register { 13namespace Core {
14 // c0 - Information registers
15 CP15_MAIN_ID,
16 CP15_CACHE_TYPE,
17 CP15_TCM_STATUS,
18 CP15_TLB_TYPE,
19 CP15_CPU_ID,
20 CP15_PROCESSOR_FEATURE_0,
21 CP15_PROCESSOR_FEATURE_1,
22 CP15_DEBUG_FEATURE_0,
23 CP15_AUXILIARY_FEATURE_0,
24 CP15_MEMORY_MODEL_FEATURE_0,
25 CP15_MEMORY_MODEL_FEATURE_1,
26 CP15_MEMORY_MODEL_FEATURE_2,
27 CP15_MEMORY_MODEL_FEATURE_3,
28 CP15_ISA_FEATURE_0,
29 CP15_ISA_FEATURE_1,
30 CP15_ISA_FEATURE_2,
31 CP15_ISA_FEATURE_3,
32 CP15_ISA_FEATURE_4,
33 14
34 // c1 - Control registers 15class ARM_Dynarmic_32;
35 CP15_CONTROL,
36 CP15_AUXILIARY_CONTROL,
37 CP15_COPROCESSOR_ACCESS_CONTROL,
38
39 // c2 - Translation table registers
40 CP15_TRANSLATION_BASE_TABLE_0,
41 CP15_TRANSLATION_BASE_TABLE_1,
42 CP15_TRANSLATION_BASE_CONTROL,
43 CP15_DOMAIN_ACCESS_CONTROL,
44 CP15_RESERVED,
45
46 // c5 - Fault status registers
47 CP15_FAULT_STATUS,
48 CP15_INSTR_FAULT_STATUS,
49 CP15_COMBINED_DATA_FSR = CP15_FAULT_STATUS,
50 CP15_INST_FSR,
51
52 // c6 - Fault Address registers
53 CP15_FAULT_ADDRESS,
54 CP15_COMBINED_DATA_FAR = CP15_FAULT_ADDRESS,
55 CP15_WFAR,
56 CP15_IFAR,
57
58 // c7 - Cache operation registers
59 CP15_WAIT_FOR_INTERRUPT,
60 CP15_PHYS_ADDRESS,
61 CP15_INVALIDATE_INSTR_CACHE,
62 CP15_INVALIDATE_INSTR_CACHE_USING_MVA,
63 CP15_INVALIDATE_INSTR_CACHE_USING_INDEX,
64 CP15_FLUSH_PREFETCH_BUFFER,
65 CP15_FLUSH_BRANCH_TARGET_CACHE,
66 CP15_FLUSH_BRANCH_TARGET_CACHE_ENTRY,
67 CP15_INVALIDATE_DATA_CACHE,
68 CP15_INVALIDATE_DATA_CACHE_LINE_USING_MVA,
69 CP15_INVALIDATE_DATA_CACHE_LINE_USING_INDEX,
70 CP15_INVALIDATE_DATA_AND_INSTR_CACHE,
71 CP15_CLEAN_DATA_CACHE,
72 CP15_CLEAN_DATA_CACHE_LINE_USING_MVA,
73 CP15_CLEAN_DATA_CACHE_LINE_USING_INDEX,
74 CP15_DATA_SYNC_BARRIER,
75 CP15_DATA_MEMORY_BARRIER,
76 CP15_CLEAN_AND_INVALIDATE_DATA_CACHE,
77 CP15_CLEAN_AND_INVALIDATE_DATA_CACHE_LINE_USING_MVA,
78 CP15_CLEAN_AND_INVALIDATE_DATA_CACHE_LINE_USING_INDEX,
79
80 // c8 - TLB operations
81 CP15_INVALIDATE_ITLB,
82 CP15_INVALIDATE_ITLB_SINGLE_ENTRY,
83 CP15_INVALIDATE_ITLB_ENTRY_ON_ASID_MATCH,
84 CP15_INVALIDATE_ITLB_ENTRY_ON_MVA,
85 CP15_INVALIDATE_DTLB,
86 CP15_INVALIDATE_DTLB_SINGLE_ENTRY,
87 CP15_INVALIDATE_DTLB_ENTRY_ON_ASID_MATCH,
88 CP15_INVALIDATE_DTLB_ENTRY_ON_MVA,
89 CP15_INVALIDATE_UTLB,
90 CP15_INVALIDATE_UTLB_SINGLE_ENTRY,
91 CP15_INVALIDATE_UTLB_ENTRY_ON_ASID_MATCH,
92 CP15_INVALIDATE_UTLB_ENTRY_ON_MVA,
93
94 // c9 - Data cache lockdown register
95 CP15_DATA_CACHE_LOCKDOWN,
96
97 // c10 - TLB/Memory map registers
98 CP15_TLB_LOCKDOWN,
99 CP15_PRIMARY_REGION_REMAP,
100 CP15_NORMAL_REGION_REMAP,
101
102 // c13 - Thread related registers
103 CP15_PID,
104 CP15_CONTEXT_ID,
105 CP15_THREAD_UPRW, // Thread ID register - User/Privileged Read/Write
106 CP15_THREAD_URO, // Thread ID register - User Read Only (Privileged R/W)
107 CP15_THREAD_PRW, // Thread ID register - Privileged R/W only.
108
109 // c15 - Performance and TLB lockdown registers
110 CP15_PERFORMANCE_MONITOR_CONTROL,
111 CP15_CYCLE_COUNTER,
112 CP15_COUNT_0,
113 CP15_COUNT_1,
114 CP15_READ_MAIN_TLB_LOCKDOWN_ENTRY,
115 CP15_WRITE_MAIN_TLB_LOCKDOWN_ENTRY,
116 CP15_MAIN_TLB_LOCKDOWN_VIRT_ADDRESS,
117 CP15_MAIN_TLB_LOCKDOWN_PHYS_ADDRESS,
118 CP15_MAIN_TLB_LOCKDOWN_ATTRIBUTE,
119 CP15_TLB_DEBUG_CONTROL,
120
121 // Skyeye defined
122 CP15_TLB_FAULT_ADDR,
123 CP15_TLB_FAULT_STATUS,
124
125 // Not an actual register.
126 // All registers should be defined above this.
127 CP15_REGISTER_COUNT,
128};
129 16
130class DynarmicCP15 final : public Dynarmic::A32::Coprocessor { 17class DynarmicCP15 final : public Dynarmic::A32::Coprocessor {
131public: 18public:
132 using CoprocReg = Dynarmic::A32::CoprocReg; 19 using CoprocReg = Dynarmic::A32::CoprocReg;
133 20
134 explicit DynarmicCP15(u32* cp15) : CP15(cp15){}; 21 explicit DynarmicCP15(ARM_Dynarmic_32& parent) : parent(parent) {}
135 22
136 std::optional<Callback> CompileInternalOperation(bool two, unsigned opc1, CoprocReg CRd, 23 std::optional<Callback> CompileInternalOperation(bool two, unsigned opc1, CoprocReg CRd,
137 CoprocReg CRn, CoprocReg CRm, 24 CoprocReg CRn, CoprocReg CRm,
@@ -147,6 +34,9 @@ public:
147 std::optional<Callback> CompileStoreWords(bool two, bool long_transfer, CoprocReg CRd, 34 std::optional<Callback> CompileStoreWords(bool two, bool long_transfer, CoprocReg CRd,
148 std::optional<u8> option) override; 35 std::optional<u8> option) override;
149 36
150private: 37 ARM_Dynarmic_32& parent;
151 u32* CP15{}; 38 u32 uprw;
39 u32 uro;
152}; 40};
41
42} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_exclusive_monitor.cpp b/src/core/arm/dynarmic/arm_exclusive_monitor.cpp
new file mode 100644
index 000000000..4e209f6a5
--- /dev/null
+++ b/src/core/arm/dynarmic/arm_exclusive_monitor.cpp
@@ -0,0 +1,76 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <cinttypes>
6#include <memory>
7#include "core/arm/dynarmic/arm_exclusive_monitor.h"
8#include "core/memory.h"
9
10namespace Core {
11
12DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count)
13 : monitor(core_count), memory{memory} {}
14
15DynarmicExclusiveMonitor::~DynarmicExclusiveMonitor() = default;
16
17u8 DynarmicExclusiveMonitor::ExclusiveRead8(std::size_t core_index, VAddr addr) {
18 return monitor.ReadAndMark<u8>(core_index, addr, [&]() -> u8 { return memory.Read8(addr); });
19}
20
21u16 DynarmicExclusiveMonitor::ExclusiveRead16(std::size_t core_index, VAddr addr) {
22 return monitor.ReadAndMark<u16>(core_index, addr, [&]() -> u16 { return memory.Read16(addr); });
23}
24
25u32 DynarmicExclusiveMonitor::ExclusiveRead32(std::size_t core_index, VAddr addr) {
26 return monitor.ReadAndMark<u32>(core_index, addr, [&]() -> u32 { return memory.Read32(addr); });
27}
28
29u64 DynarmicExclusiveMonitor::ExclusiveRead64(std::size_t core_index, VAddr addr) {
30 return monitor.ReadAndMark<u64>(core_index, addr, [&]() -> u64 { return memory.Read64(addr); });
31}
32
33u128 DynarmicExclusiveMonitor::ExclusiveRead128(std::size_t core_index, VAddr addr) {
34 return monitor.ReadAndMark<u128>(core_index, addr, [&]() -> u128 {
35 u128 result;
36 result[0] = memory.Read64(addr);
37 result[1] = memory.Read64(addr + 8);
38 return result;
39 });
40}
41
42void DynarmicExclusiveMonitor::ClearExclusive() {
43 monitor.Clear();
44}
45
46bool DynarmicExclusiveMonitor::ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) {
47 return monitor.DoExclusiveOperation<u8>(core_index, vaddr, [&](u8 expected) -> bool {
48 return memory.WriteExclusive8(vaddr, value, expected);
49 });
50}
51
52bool DynarmicExclusiveMonitor::ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) {
53 return monitor.DoExclusiveOperation<u16>(core_index, vaddr, [&](u16 expected) -> bool {
54 return memory.WriteExclusive16(vaddr, value, expected);
55 });
56}
57
58bool DynarmicExclusiveMonitor::ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) {
59 return monitor.DoExclusiveOperation<u32>(core_index, vaddr, [&](u32 expected) -> bool {
60 return memory.WriteExclusive32(vaddr, value, expected);
61 });
62}
63
64bool DynarmicExclusiveMonitor::ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) {
65 return monitor.DoExclusiveOperation<u64>(core_index, vaddr, [&](u64 expected) -> bool {
66 return memory.WriteExclusive64(vaddr, value, expected);
67 });
68}
69
70bool DynarmicExclusiveMonitor::ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) {
71 return monitor.DoExclusiveOperation<u128>(core_index, vaddr, [&](u128 expected) -> bool {
72 return memory.WriteExclusive128(vaddr, value, expected);
73 });
74}
75
76} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_exclusive_monitor.h b/src/core/arm/dynarmic/arm_exclusive_monitor.h
new file mode 100644
index 000000000..964f4a55d
--- /dev/null
+++ b/src/core/arm/dynarmic/arm_exclusive_monitor.h
@@ -0,0 +1,48 @@
1// Copyright 2020 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <unordered_map>
9
10#include <dynarmic/exclusive_monitor.h>
11
12#include "common/common_types.h"
13#include "core/arm/dynarmic/arm_dynarmic_32.h"
14#include "core/arm/dynarmic/arm_dynarmic_64.h"
15#include "core/arm/exclusive_monitor.h"
16
17namespace Core::Memory {
18class Memory;
19}
20
21namespace Core {
22
23class DynarmicExclusiveMonitor final : public ExclusiveMonitor {
24public:
25 explicit DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count);
26 ~DynarmicExclusiveMonitor() override;
27
28 u8 ExclusiveRead8(std::size_t core_index, VAddr addr) override;
29 u16 ExclusiveRead16(std::size_t core_index, VAddr addr) override;
30 u32 ExclusiveRead32(std::size_t core_index, VAddr addr) override;
31 u64 ExclusiveRead64(std::size_t core_index, VAddr addr) override;
32 u128 ExclusiveRead128(std::size_t core_index, VAddr addr) override;
33 void ClearExclusive() override;
34
35 bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) override;
36 bool ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) override;
37 bool ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) override;
38 bool ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) override;
39 bool ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) override;
40
41private:
42 friend class ARM_Dynarmic_32;
43 friend class ARM_Dynarmic_64;
44 Dynarmic::ExclusiveMonitor monitor;
45 Core::Memory::Memory& memory;
46};
47
48} // namespace Core
diff --git a/src/core/arm/exclusive_monitor.cpp b/src/core/arm/exclusive_monitor.cpp
index b32401e0b..d8cba369d 100644
--- a/src/core/arm/exclusive_monitor.cpp
+++ b/src/core/arm/exclusive_monitor.cpp
@@ -3,7 +3,7 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#ifdef ARCHITECTURE_x86_64 5#ifdef ARCHITECTURE_x86_64
6#include "core/arm/dynarmic/arm_dynarmic_64.h" 6#include "core/arm/dynarmic/arm_exclusive_monitor.h"
7#endif 7#endif
8#include "core/arm/exclusive_monitor.h" 8#include "core/arm/exclusive_monitor.h"
9#include "core/memory.h" 9#include "core/memory.h"
diff --git a/src/core/arm/exclusive_monitor.h b/src/core/arm/exclusive_monitor.h
index ccd73b80f..62f6e6023 100644
--- a/src/core/arm/exclusive_monitor.h
+++ b/src/core/arm/exclusive_monitor.h
@@ -18,7 +18,11 @@ class ExclusiveMonitor {
18public: 18public:
19 virtual ~ExclusiveMonitor(); 19 virtual ~ExclusiveMonitor();
20 20
21 virtual void SetExclusive(std::size_t core_index, VAddr addr) = 0; 21 virtual u8 ExclusiveRead8(std::size_t core_index, VAddr addr) = 0;
22 virtual u16 ExclusiveRead16(std::size_t core_index, VAddr addr) = 0;
23 virtual u32 ExclusiveRead32(std::size_t core_index, VAddr addr) = 0;
24 virtual u64 ExclusiveRead64(std::size_t core_index, VAddr addr) = 0;
25 virtual u128 ExclusiveRead128(std::size_t core_index, VAddr addr) = 0;
22 virtual void ClearExclusive() = 0; 26 virtual void ClearExclusive() = 0;
23 27
24 virtual bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) = 0; 28 virtual bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) = 0;
diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp
index e40e9626a..1df3f3ed1 100644
--- a/src/core/arm/unicorn/arm_unicorn.cpp
+++ b/src/core/arm/unicorn/arm_unicorn.cpp
@@ -6,6 +6,7 @@
6#include <unicorn/arm64.h> 6#include <unicorn/arm64.h>
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/microprofile.h" 8#include "common/microprofile.h"
9#include "core/arm/cpu_interrupt_handler.h"
9#include "core/arm/unicorn/arm_unicorn.h" 10#include "core/arm/unicorn/arm_unicorn.h"
10#include "core/core.h" 11#include "core/core.h"
11#include "core/core_timing.h" 12#include "core/core_timing.h"
@@ -62,7 +63,9 @@ static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int si
62 return false; 63 return false;
63} 64}
64 65
65ARM_Unicorn::ARM_Unicorn(System& system, Arch architecture) : ARM_Interface{system} { 66ARM_Unicorn::ARM_Unicorn(System& system, CPUInterrupts& interrupt_handlers, bool uses_wall_clock,
67 Arch architecture, std::size_t core_index)
68 : ARM_Interface{system, interrupt_handlers, uses_wall_clock}, core_index{core_index} {
66 const auto arch = architecture == Arch::AArch32 ? UC_ARCH_ARM : UC_ARCH_ARM64; 69 const auto arch = architecture == Arch::AArch32 ? UC_ARCH_ARM : UC_ARCH_ARM64;
67 CHECKED(uc_open(arch, UC_MODE_ARM, &uc)); 70 CHECKED(uc_open(arch, UC_MODE_ARM, &uc));
68 71
@@ -156,12 +159,20 @@ void ARM_Unicorn::SetTPIDR_EL0(u64 value) {
156 CHECKED(uc_reg_write(uc, UC_ARM64_REG_TPIDR_EL0, &value)); 159 CHECKED(uc_reg_write(uc, UC_ARM64_REG_TPIDR_EL0, &value));
157} 160}
158 161
162void ARM_Unicorn::ChangeProcessorID(std::size_t new_core_id) {
163 core_index = new_core_id;
164}
165
159void ARM_Unicorn::Run() { 166void ARM_Unicorn::Run() {
160 if (GDBStub::IsServerEnabled()) { 167 if (GDBStub::IsServerEnabled()) {
161 ExecuteInstructions(std::max(4000000U, 0U)); 168 ExecuteInstructions(std::max(4000000U, 0U));
162 } else { 169 } else {
163 ExecuteInstructions( 170 while (true) {
164 std::max(std::size_t(system.CoreTiming().GetDowncount()), std::size_t{0})); 171 if (interrupt_handlers[core_index].IsInterrupted()) {
172 return;
173 }
174 ExecuteInstructions(10);
175 }
165 } 176 }
166} 177}
167 178
@@ -183,8 +194,6 @@ void ARM_Unicorn::ExecuteInstructions(std::size_t num_instructions) {
183 UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, page_buffer.data())); 194 UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, page_buffer.data()));
184 CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions)); 195 CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions));
185 CHECKED(uc_mem_unmap(uc, map_addr, page_buffer.size())); 196 CHECKED(uc_mem_unmap(uc, map_addr, page_buffer.size()));
186
187 system.CoreTiming().AddTicks(num_instructions);
188 if (GDBStub::IsServerEnabled()) { 197 if (GDBStub::IsServerEnabled()) {
189 if (last_bkpt_hit && last_bkpt.type == GDBStub::BreakpointType::Execute) { 198 if (last_bkpt_hit && last_bkpt.type == GDBStub::BreakpointType::Execute) {
190 uc_reg_write(uc, UC_ARM64_REG_PC, &last_bkpt.address); 199 uc_reg_write(uc, UC_ARM64_REG_PC, &last_bkpt.address);
diff --git a/src/core/arm/unicorn/arm_unicorn.h b/src/core/arm/unicorn/arm_unicorn.h
index 725c65085..810aff311 100644
--- a/src/core/arm/unicorn/arm_unicorn.h
+++ b/src/core/arm/unicorn/arm_unicorn.h
@@ -20,7 +20,8 @@ public:
20 AArch64, // 64-bit ARM 20 AArch64, // 64-bit ARM
21 }; 21 };
22 22
23 explicit ARM_Unicorn(System& system, Arch architecture); 23 explicit ARM_Unicorn(System& system, CPUInterrupts& interrupt_handlers, bool uses_wall_clock,
24 Arch architecture, std::size_t core_index);
24 ~ARM_Unicorn() override; 25 ~ARM_Unicorn() override;
25 26
26 void SetPC(u64 pc) override; 27 void SetPC(u64 pc) override;
@@ -35,6 +36,7 @@ public:
35 void SetTlsAddress(VAddr address) override; 36 void SetTlsAddress(VAddr address) override;
36 void SetTPIDR_EL0(u64 value) override; 37 void SetTPIDR_EL0(u64 value) override;
37 u64 GetTPIDR_EL0() const override; 38 u64 GetTPIDR_EL0() const override;
39 void ChangeProcessorID(std::size_t new_core_id) override;
38 void PrepareReschedule() override; 40 void PrepareReschedule() override;
39 void ClearExclusiveState() override; 41 void ClearExclusiveState() override;
40 void ExecuteInstructions(std::size_t num_instructions); 42 void ExecuteInstructions(std::size_t num_instructions);
@@ -55,6 +57,7 @@ private:
55 uc_engine* uc{}; 57 uc_engine* uc{};
56 GDBStub::BreakpointAddress last_bkpt{}; 58 GDBStub::BreakpointAddress last_bkpt{};
57 bool last_bkpt_hit = false; 59 bool last_bkpt_hit = false;
60 std::size_t core_index;
58}; 61};
59 62
60} // namespace Core 63} // namespace Core
diff --git a/src/core/core.cpp b/src/core/core.cpp
index f9f8a3000..69a1aa0a5 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -8,10 +8,10 @@
8 8
9#include "common/file_util.h" 9#include "common/file_util.h"
10#include "common/logging/log.h" 10#include "common/logging/log.h"
11#include "common/microprofile.h"
11#include "common/string_util.h" 12#include "common/string_util.h"
12#include "core/arm/exclusive_monitor.h" 13#include "core/arm/exclusive_monitor.h"
13#include "core/core.h" 14#include "core/core.h"
14#include "core/core_manager.h"
15#include "core/core_timing.h" 15#include "core/core_timing.h"
16#include "core/cpu_manager.h" 16#include "core/cpu_manager.h"
17#include "core/device_memory.h" 17#include "core/device_memory.h"
@@ -51,6 +51,11 @@
51#include "video_core/renderer_base.h" 51#include "video_core/renderer_base.h"
52#include "video_core/video_core.h" 52#include "video_core/video_core.h"
53 53
54MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_CPU0, "ARM JIT", "Dynarmic CPU 0", MP_RGB(255, 64, 64));
55MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_CPU1, "ARM JIT", "Dynarmic CPU 1", MP_RGB(255, 64, 64));
56MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_CPU2, "ARM JIT", "Dynarmic CPU 2", MP_RGB(255, 64, 64));
57MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_CPU3, "ARM JIT", "Dynarmic CPU 3", MP_RGB(255, 64, 64));
58
54namespace Core { 59namespace Core {
55 60
56namespace { 61namespace {
@@ -117,23 +122,22 @@ struct System::Impl {
117 : kernel{system}, fs_controller{system}, memory{system}, 122 : kernel{system}, fs_controller{system}, memory{system},
118 cpu_manager{system}, reporter{system}, applet_manager{system} {} 123 cpu_manager{system}, reporter{system}, applet_manager{system} {}
119 124
120 CoreManager& CurrentCoreManager() { 125 ResultStatus Run() {
121 return cpu_manager.GetCurrentCoreManager(); 126 status = ResultStatus::Success;
122 }
123 127
124 Kernel::PhysicalCore& CurrentPhysicalCore() { 128 kernel.Suspend(false);
125 const auto index = cpu_manager.GetActiveCoreIndex(); 129 core_timing.SyncPause(false);
126 return kernel.PhysicalCore(index); 130 cpu_manager.Pause(false);
127 }
128 131
129 Kernel::PhysicalCore& GetPhysicalCore(std::size_t index) { 132 return status;
130 return kernel.PhysicalCore(index);
131 } 133 }
132 134
133 ResultStatus RunLoop(bool tight_loop) { 135 ResultStatus Pause() {
134 status = ResultStatus::Success; 136 status = ResultStatus::Success;
135 137
136 cpu_manager.RunLoop(tight_loop); 138 core_timing.SyncPause(true);
139 kernel.Suspend(true);
140 cpu_manager.Pause(true);
137 141
138 return status; 142 return status;
139 } 143 }
@@ -143,14 +147,22 @@ struct System::Impl {
143 147
144 device_memory = std::make_unique<Core::DeviceMemory>(system); 148 device_memory = std::make_unique<Core::DeviceMemory>(system);
145 149
146 core_timing.Initialize(); 150 is_multicore = Settings::values.use_multi_core.GetValue();
151 is_async_gpu = is_multicore || Settings::values.use_asynchronous_gpu_emulation.GetValue();
152
153 kernel.SetMulticore(is_multicore);
154 cpu_manager.SetMulticore(is_multicore);
155 cpu_manager.SetAsyncGpu(is_async_gpu);
156 core_timing.SetMulticore(is_multicore);
157
158 core_timing.Initialize([&system]() { system.RegisterHostThread(); });
147 kernel.Initialize(); 159 kernel.Initialize();
148 cpu_manager.Initialize(); 160 cpu_manager.Initialize();
149 161
150 const auto current_time = std::chrono::duration_cast<std::chrono::seconds>( 162 const auto current_time = std::chrono::duration_cast<std::chrono::seconds>(
151 std::chrono::system_clock::now().time_since_epoch()); 163 std::chrono::system_clock::now().time_since_epoch());
152 Settings::values.custom_rtc_differential = 164 Settings::values.custom_rtc_differential =
153 Settings::values.custom_rtc.value_or(current_time) - current_time; 165 Settings::values.custom_rtc.GetValue().value_or(current_time) - current_time;
154 166
155 // Create a default fs if one doesn't already exist. 167 // Create a default fs if one doesn't already exist.
156 if (virtual_filesystem == nullptr) 168 if (virtual_filesystem == nullptr)
@@ -180,6 +192,11 @@ struct System::Impl {
180 is_powered_on = true; 192 is_powered_on = true;
181 exit_lock = false; 193 exit_lock = false;
182 194
195 microprofile_dynarmic[0] = MICROPROFILE_TOKEN(ARM_Jit_Dynarmic_CPU0);
196 microprofile_dynarmic[1] = MICROPROFILE_TOKEN(ARM_Jit_Dynarmic_CPU1);
197 microprofile_dynarmic[2] = MICROPROFILE_TOKEN(ARM_Jit_Dynarmic_CPU2);
198 microprofile_dynarmic[3] = MICROPROFILE_TOKEN(ARM_Jit_Dynarmic_CPU3);
199
183 LOG_DEBUG(Core, "Initialized OK"); 200 LOG_DEBUG(Core, "Initialized OK");
184 201
185 return ResultStatus::Success; 202 return ResultStatus::Success;
@@ -277,8 +294,6 @@ struct System::Impl {
277 service_manager.reset(); 294 service_manager.reset();
278 cheat_engine.reset(); 295 cheat_engine.reset();
279 telemetry_session.reset(); 296 telemetry_session.reset();
280 perf_stats.reset();
281 gpu_core.reset();
282 device_memory.reset(); 297 device_memory.reset();
283 298
284 // Close all CPU/threading state 299 // Close all CPU/threading state
@@ -290,6 +305,8 @@ struct System::Impl {
290 305
291 // Close app loader 306 // Close app loader
292 app_loader.reset(); 307 app_loader.reset();
308 gpu_core.reset();
309 perf_stats.reset();
293 310
294 // Clear all applets 311 // Clear all applets
295 applet_manager.ClearAll(); 312 applet_manager.ClearAll();
@@ -382,25 +399,35 @@ struct System::Impl {
382 399
383 std::unique_ptr<Core::PerfStats> perf_stats; 400 std::unique_ptr<Core::PerfStats> perf_stats;
384 Core::FrameLimiter frame_limiter; 401 Core::FrameLimiter frame_limiter;
402
403 bool is_multicore{};
404 bool is_async_gpu{};
405
406 std::array<u64, Core::Hardware::NUM_CPU_CORES> dynarmic_ticks{};
407 std::array<MicroProfileToken, Core::Hardware::NUM_CPU_CORES> microprofile_dynarmic{};
385}; 408};
386 409
387System::System() : impl{std::make_unique<Impl>(*this)} {} 410System::System() : impl{std::make_unique<Impl>(*this)} {}
388System::~System() = default; 411System::~System() = default;
389 412
390CoreManager& System::CurrentCoreManager() { 413CpuManager& System::GetCpuManager() {
391 return impl->CurrentCoreManager(); 414 return impl->cpu_manager;
415}
416
417const CpuManager& System::GetCpuManager() const {
418 return impl->cpu_manager;
392} 419}
393 420
394const CoreManager& System::CurrentCoreManager() const { 421System::ResultStatus System::Run() {
395 return impl->CurrentCoreManager(); 422 return impl->Run();
396} 423}
397 424
398System::ResultStatus System::RunLoop(bool tight_loop) { 425System::ResultStatus System::Pause() {
399 return impl->RunLoop(tight_loop); 426 return impl->Pause();
400} 427}
401 428
402System::ResultStatus System::SingleStep() { 429System::ResultStatus System::SingleStep() {
403 return RunLoop(false); 430 return ResultStatus::Success;
404} 431}
405 432
406void System::InvalidateCpuInstructionCaches() { 433void System::InvalidateCpuInstructionCaches() {
@@ -416,7 +443,7 @@ bool System::IsPoweredOn() const {
416} 443}
417 444
418void System::PrepareReschedule() { 445void System::PrepareReschedule() {
419 impl->CurrentPhysicalCore().Stop(); 446 // Deprecated, does nothing, kept for backward compatibility.
420} 447}
421 448
422void System::PrepareReschedule(const u32 core_index) { 449void System::PrepareReschedule(const u32 core_index) {
@@ -436,31 +463,41 @@ const TelemetrySession& System::TelemetrySession() const {
436} 463}
437 464
438ARM_Interface& System::CurrentArmInterface() { 465ARM_Interface& System::CurrentArmInterface() {
439 return impl->CurrentPhysicalCore().ArmInterface(); 466 return impl->kernel.CurrentScheduler().GetCurrentThread()->ArmInterface();
440} 467}
441 468
442const ARM_Interface& System::CurrentArmInterface() const { 469const ARM_Interface& System::CurrentArmInterface() const {
443 return impl->CurrentPhysicalCore().ArmInterface(); 470 return impl->kernel.CurrentScheduler().GetCurrentThread()->ArmInterface();
444} 471}
445 472
446std::size_t System::CurrentCoreIndex() const { 473std::size_t System::CurrentCoreIndex() const {
447 return impl->cpu_manager.GetActiveCoreIndex(); 474 std::size_t core = impl->kernel.GetCurrentHostThreadID();
475 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
476 return core;
448} 477}
449 478
450Kernel::Scheduler& System::CurrentScheduler() { 479Kernel::Scheduler& System::CurrentScheduler() {
451 return impl->CurrentPhysicalCore().Scheduler(); 480 return impl->kernel.CurrentScheduler();
452} 481}
453 482
454const Kernel::Scheduler& System::CurrentScheduler() const { 483const Kernel::Scheduler& System::CurrentScheduler() const {
455 return impl->CurrentPhysicalCore().Scheduler(); 484 return impl->kernel.CurrentScheduler();
485}
486
487Kernel::PhysicalCore& System::CurrentPhysicalCore() {
488 return impl->kernel.CurrentPhysicalCore();
489}
490
491const Kernel::PhysicalCore& System::CurrentPhysicalCore() const {
492 return impl->kernel.CurrentPhysicalCore();
456} 493}
457 494
458Kernel::Scheduler& System::Scheduler(std::size_t core_index) { 495Kernel::Scheduler& System::Scheduler(std::size_t core_index) {
459 return impl->GetPhysicalCore(core_index).Scheduler(); 496 return impl->kernel.Scheduler(core_index);
460} 497}
461 498
462const Kernel::Scheduler& System::Scheduler(std::size_t core_index) const { 499const Kernel::Scheduler& System::Scheduler(std::size_t core_index) const {
463 return impl->GetPhysicalCore(core_index).Scheduler(); 500 return impl->kernel.Scheduler(core_index);
464} 501}
465 502
466/// Gets the global scheduler 503/// Gets the global scheduler
@@ -490,20 +527,15 @@ const Kernel::Process* System::CurrentProcess() const {
490} 527}
491 528
492ARM_Interface& System::ArmInterface(std::size_t core_index) { 529ARM_Interface& System::ArmInterface(std::size_t core_index) {
493 return impl->GetPhysicalCore(core_index).ArmInterface(); 530 auto* thread = impl->kernel.Scheduler(core_index).GetCurrentThread();
531 ASSERT(thread && !thread->IsHLEThread());
532 return thread->ArmInterface();
494} 533}
495 534
496const ARM_Interface& System::ArmInterface(std::size_t core_index) const { 535const ARM_Interface& System::ArmInterface(std::size_t core_index) const {
497 return impl->GetPhysicalCore(core_index).ArmInterface(); 536 auto* thread = impl->kernel.Scheduler(core_index).GetCurrentThread();
498} 537 ASSERT(thread && !thread->IsHLEThread());
499 538 return thread->ArmInterface();
500CoreManager& System::GetCoreManager(std::size_t core_index) {
501 return impl->cpu_manager.GetCoreManager(core_index);
502}
503
504const CoreManager& System::GetCoreManager(std::size_t core_index) const {
505 ASSERT(core_index < NUM_CPU_CORES);
506 return impl->cpu_manager.GetCoreManager(core_index);
507} 539}
508 540
509ExclusiveMonitor& System::Monitor() { 541ExclusiveMonitor& System::Monitor() {
@@ -722,4 +754,18 @@ void System::RegisterHostThread() {
722 impl->kernel.RegisterHostThread(); 754 impl->kernel.RegisterHostThread();
723} 755}
724 756
757void System::EnterDynarmicProfile() {
758 std::size_t core = impl->kernel.GetCurrentHostThreadID();
759 impl->dynarmic_ticks[core] = MicroProfileEnter(impl->microprofile_dynarmic[core]);
760}
761
762void System::ExitDynarmicProfile() {
763 std::size_t core = impl->kernel.GetCurrentHostThreadID();
764 MicroProfileLeave(impl->microprofile_dynarmic[core], impl->dynarmic_ticks[core]);
765}
766
767bool System::IsMulticore() const {
768 return impl->is_multicore;
769}
770
725} // namespace Core 771} // namespace Core
diff --git a/src/core/core.h b/src/core/core.h
index acc53d6a1..5c6cfbffe 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -27,6 +27,7 @@ class VfsFilesystem;
27namespace Kernel { 27namespace Kernel {
28class GlobalScheduler; 28class GlobalScheduler;
29class KernelCore; 29class KernelCore;
30class PhysicalCore;
30class Process; 31class Process;
31class Scheduler; 32class Scheduler;
32} // namespace Kernel 33} // namespace Kernel
@@ -90,7 +91,7 @@ class InterruptManager;
90namespace Core { 91namespace Core {
91 92
92class ARM_Interface; 93class ARM_Interface;
93class CoreManager; 94class CpuManager;
94class DeviceMemory; 95class DeviceMemory;
95class ExclusiveMonitor; 96class ExclusiveMonitor;
96class FrameLimiter; 97class FrameLimiter;
@@ -136,16 +137,16 @@ public:
136 }; 137 };
137 138
138 /** 139 /**
139 * Run the core CPU loop 140 * Run the OS and Application
140 * This function runs the core for the specified number of CPU instructions before trying to 141 * This function will start emulation and run the relevant devices
141 * update hardware. This is much faster than SingleStep (and should be equivalent), as the CPU 142 */
142 * is not required to do a full dispatch with each instruction. NOTE: the number of instructions 143 ResultStatus Run();
143 * requested is not guaranteed to run, as this will be interrupted preemptively if a hardware 144
144 * update is requested (e.g. on a thread switch). 145 /**
145 * @param tight_loop If false, the CPU single-steps. 146 * Pause the OS and Application
146 * @return Result status, indicating whether or not the operation succeeded. 147 * This function will pause emulation and stop the relevant devices
147 */ 148 */
148 ResultStatus RunLoop(bool tight_loop = true); 149 ResultStatus Pause();
149 150
150 /** 151 /**
151 * Step the CPU one instruction 152 * Step the CPU one instruction
@@ -209,17 +210,21 @@ public:
209 /// Gets the scheduler for the CPU core that is currently running 210 /// Gets the scheduler for the CPU core that is currently running
210 const Kernel::Scheduler& CurrentScheduler() const; 211 const Kernel::Scheduler& CurrentScheduler() const;
211 212
213 /// Gets the physical core for the CPU core that is currently running
214 Kernel::PhysicalCore& CurrentPhysicalCore();
215
216 /// Gets the physical core for the CPU core that is currently running
217 const Kernel::PhysicalCore& CurrentPhysicalCore() const;
218
212 /// Gets a reference to an ARM interface for the CPU core with the specified index 219 /// Gets a reference to an ARM interface for the CPU core with the specified index
213 ARM_Interface& ArmInterface(std::size_t core_index); 220 ARM_Interface& ArmInterface(std::size_t core_index);
214 221
215 /// Gets a const reference to an ARM interface from the CPU core with the specified index 222 /// Gets a const reference to an ARM interface from the CPU core with the specified index
216 const ARM_Interface& ArmInterface(std::size_t core_index) const; 223 const ARM_Interface& ArmInterface(std::size_t core_index) const;
217 224
218 /// Gets a CPU interface to the CPU core with the specified index 225 CpuManager& GetCpuManager();
219 CoreManager& GetCoreManager(std::size_t core_index);
220 226
221 /// Gets a CPU interface to the CPU core with the specified index 227 const CpuManager& GetCpuManager() const;
222 const CoreManager& GetCoreManager(std::size_t core_index) const;
223 228
224 /// Gets a reference to the exclusive monitor 229 /// Gets a reference to the exclusive monitor
225 ExclusiveMonitor& Monitor(); 230 ExclusiveMonitor& Monitor();
@@ -370,14 +375,17 @@ public:
370 /// Register a host thread as an auxiliary thread. 375 /// Register a host thread as an auxiliary thread.
371 void RegisterHostThread(); 376 void RegisterHostThread();
372 377
373private: 378 /// Enter Dynarmic Microprofile
374 System(); 379 void EnterDynarmicProfile();
380
381 /// Exit Dynarmic Microprofile
382 void ExitDynarmicProfile();
375 383
376 /// Returns the currently running CPU core 384 /// Tells if system is running on multicore.
377 CoreManager& CurrentCoreManager(); 385 bool IsMulticore() const;
378 386
379 /// Returns the currently running CPU core 387private:
380 const CoreManager& CurrentCoreManager() const; 388 System();
381 389
382 /** 390 /**
383 * Initialize the emulated system. 391 * Initialize the emulated system.
diff --git a/src/core/core_manager.cpp b/src/core/core_manager.cpp
deleted file mode 100644
index b6b797c80..000000000
--- a/src/core/core_manager.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <condition_variable>
6#include <mutex>
7
8#include "common/logging/log.h"
9#include "core/arm/exclusive_monitor.h"
10#include "core/arm/unicorn/arm_unicorn.h"
11#include "core/core.h"
12#include "core/core_manager.h"
13#include "core/core_timing.h"
14#include "core/hle/kernel/kernel.h"
15#include "core/hle/kernel/physical_core.h"
16#include "core/hle/kernel/scheduler.h"
17#include "core/hle/kernel/thread.h"
18#include "core/hle/lock.h"
19#include "core/settings.h"
20
21namespace Core {
22
23CoreManager::CoreManager(System& system, std::size_t core_index)
24 : global_scheduler{system.GlobalScheduler()}, physical_core{system.Kernel().PhysicalCore(
25 core_index)},
26 core_timing{system.CoreTiming()}, core_index{core_index} {}
27
28CoreManager::~CoreManager() = default;
29
30void CoreManager::RunLoop(bool tight_loop) {
31 Reschedule();
32
33 // If we don't have a currently active thread then don't execute instructions,
34 // instead advance to the next event and try to yield to the next thread
35 if (Kernel::GetCurrentThread() == nullptr) {
36 LOG_TRACE(Core, "Core-{} idling", core_index);
37 core_timing.Idle();
38 } else {
39 if (tight_loop) {
40 physical_core.Run();
41 } else {
42 physical_core.Step();
43 }
44 }
45 core_timing.Advance();
46
47 Reschedule();
48}
49
50void CoreManager::SingleStep() {
51 return RunLoop(false);
52}
53
54void CoreManager::PrepareReschedule() {
55 physical_core.Stop();
56}
57
58void CoreManager::Reschedule() {
59 // Lock the global kernel mutex when we manipulate the HLE state
60 std::lock_guard lock(HLE::g_hle_lock);
61
62 global_scheduler.SelectThread(core_index);
63
64 physical_core.Scheduler().TryDoContextSwitch();
65}
66
67} // namespace Core
diff --git a/src/core/core_manager.h b/src/core/core_manager.h
deleted file mode 100644
index d525de00a..000000000
--- a/src/core/core_manager.h
+++ /dev/null
@@ -1,63 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8#include <cstddef>
9#include <memory>
10#include "common/common_types.h"
11
12namespace Kernel {
13class GlobalScheduler;
14class PhysicalCore;
15} // namespace Kernel
16
17namespace Core {
18class System;
19}
20
21namespace Core::Timing {
22class CoreTiming;
23}
24
25namespace Core::Memory {
26class Memory;
27}
28
29namespace Core {
30
31constexpr unsigned NUM_CPU_CORES{4};
32
33class CoreManager {
34public:
35 CoreManager(System& system, std::size_t core_index);
36 ~CoreManager();
37
38 void RunLoop(bool tight_loop = true);
39
40 void SingleStep();
41
42 void PrepareReschedule();
43
44 bool IsMainCore() const {
45 return core_index == 0;
46 }
47
48 std::size_t CoreIndex() const {
49 return core_index;
50 }
51
52private:
53 void Reschedule();
54
55 Kernel::GlobalScheduler& global_scheduler;
56 Kernel::PhysicalCore& physical_core;
57 Timing::CoreTiming& core_timing;
58
59 std::atomic<bool> reschedule_pending = false;
60 std::size_t core_index;
61};
62
63} // namespace Core
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index 46d4178c4..a63e60461 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -1,29 +1,27 @@
1// Copyright 2008 Dolphin Emulator Project / 2017 Citra Emulator Project 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2+ 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "core/core_timing.h"
6
7#include <algorithm> 5#include <algorithm>
8#include <mutex> 6#include <mutex>
9#include <string> 7#include <string>
10#include <tuple> 8#include <tuple>
11 9
12#include "common/assert.h" 10#include "common/assert.h"
13#include "common/thread.h" 11#include "common/microprofile.h"
12#include "core/core_timing.h"
14#include "core/core_timing_util.h" 13#include "core/core_timing_util.h"
15#include "core/hardware_properties.h"
16 14
17namespace Core::Timing { 15namespace Core::Timing {
18 16
19constexpr int MAX_SLICE_LENGTH = 10000; 17constexpr u64 MAX_SLICE_LENGTH = 4000;
20 18
21std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) { 19std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) {
22 return std::make_shared<EventType>(std::move(callback), std::move(name)); 20 return std::make_shared<EventType>(std::move(callback), std::move(name));
23} 21}
24 22
25struct CoreTiming::Event { 23struct CoreTiming::Event {
26 s64 time; 24 u64 time;
27 u64 fifo_order; 25 u64 fifo_order;
28 u64 userdata; 26 u64 userdata;
29 std::weak_ptr<EventType> type; 27 std::weak_ptr<EventType> type;
@@ -39,51 +37,90 @@ struct CoreTiming::Event {
39 } 37 }
40}; 38};
41 39
42CoreTiming::CoreTiming() = default; 40CoreTiming::CoreTiming() {
43CoreTiming::~CoreTiming() = default; 41 clock =
42 Common::CreateBestMatchingClock(Core::Hardware::BASE_CLOCK_RATE, Core::Hardware::CNTFREQ);
43}
44 44
45void CoreTiming::Initialize() { 45CoreTiming::~CoreTiming() = default;
46 downcounts.fill(MAX_SLICE_LENGTH);
47 time_slice.fill(MAX_SLICE_LENGTH);
48 slice_length = MAX_SLICE_LENGTH;
49 global_timer = 0;
50 idled_cycles = 0;
51 current_context = 0;
52 46
53 // The time between CoreTiming being initialized and the first call to Advance() is considered 47void CoreTiming::ThreadEntry(CoreTiming& instance) {
54 // the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before 48 constexpr char name[] = "yuzu:HostTiming";
55 // executing the first cycle of each slice to prepare the slice length and downcount for 49 MicroProfileOnThreadCreate(name);
56 // that slice. 50 Common::SetCurrentThreadName(name);
57 is_global_timer_sane = true; 51 Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh);
52 instance.on_thread_init();
53 instance.ThreadLoop();
54}
58 55
56void CoreTiming::Initialize(std::function<void(void)>&& on_thread_init_) {
57 on_thread_init = std::move(on_thread_init_);
59 event_fifo_id = 0; 58 event_fifo_id = 0;
60 59 shutting_down = false;
60 ticks = 0;
61 const auto empty_timed_callback = [](u64, s64) {}; 61 const auto empty_timed_callback = [](u64, s64) {};
62 ev_lost = CreateEvent("_lost_event", empty_timed_callback); 62 ev_lost = CreateEvent("_lost_event", empty_timed_callback);
63 if (is_multicore) {
64 timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this));
65 }
63} 66}
64 67
65void CoreTiming::Shutdown() { 68void CoreTiming::Shutdown() {
69 paused = true;
70 shutting_down = true;
71 pause_event.Set();
72 event.Set();
73 if (timer_thread) {
74 timer_thread->join();
75 }
66 ClearPendingEvents(); 76 ClearPendingEvents();
77 timer_thread.reset();
78 has_started = false;
67} 79}
68 80
69void CoreTiming::ScheduleEvent(s64 cycles_into_future, const std::shared_ptr<EventType>& event_type, 81void CoreTiming::Pause(bool is_paused) {
70 u64 userdata) { 82 paused = is_paused;
71 std::lock_guard guard{inner_mutex}; 83 pause_event.Set();
72 const s64 timeout = GetTicks() + cycles_into_future; 84}
73 85
74 // If this event needs to be scheduled before the next advance(), force one early 86void CoreTiming::SyncPause(bool is_paused) {
75 if (!is_global_timer_sane) { 87 if (is_paused == paused && paused_set == paused) {
76 ForceExceptionCheck(cycles_into_future); 88 return;
89 }
90 Pause(is_paused);
91 if (timer_thread) {
92 if (!is_paused) {
93 pause_event.Set();
94 }
95 event.Set();
96 while (paused_set != is_paused)
97 ;
77 } 98 }
99}
78 100
79 event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type}); 101bool CoreTiming::IsRunning() const {
102 return !paused_set;
103}
80 104
81 std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); 105bool CoreTiming::HasPendingEvents() const {
106 return !(wait_set && event_queue.empty());
82} 107}
83 108
84void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) { 109void CoreTiming::ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
85 std::lock_guard guard{inner_mutex}; 110 u64 userdata) {
111 {
112 std::scoped_lock scope{basic_lock};
113 const u64 timeout = static_cast<u64>(GetGlobalTimeNs().count() + ns_into_future);
114
115 event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
86 116
117 std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
118 }
119 event.Set();
120}
121
122void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) {
123 std::scoped_lock scope{basic_lock};
87 const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { 124 const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
88 return e.type.lock().get() == event_type.get() && e.userdata == userdata; 125 return e.type.lock().get() == event_type.get() && e.userdata == userdata;
89 }); 126 });
@@ -95,21 +132,39 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u
95 } 132 }
96} 133}
97 134
98u64 CoreTiming::GetTicks() const { 135void CoreTiming::AddTicks(u64 ticks) {
99 u64 ticks = static_cast<u64>(global_timer); 136 this->ticks += ticks;
100 if (!is_global_timer_sane) { 137 downcount -= ticks;
101 ticks += accumulated_ticks; 138}
139
140void CoreTiming::Idle() {
141 if (!event_queue.empty()) {
142 const u64 next_event_time = event_queue.front().time;
143 const u64 next_ticks = nsToCycles(std::chrono::nanoseconds(next_event_time)) + 10U;
144 if (next_ticks > ticks) {
145 ticks = next_ticks;
146 }
147 return;
102 } 148 }
103 return ticks; 149 ticks += 1000U;
104} 150}
105 151
106u64 CoreTiming::GetIdleTicks() const { 152void CoreTiming::ResetTicks() {
107 return static_cast<u64>(idled_cycles); 153 downcount = MAX_SLICE_LENGTH;
108} 154}
109 155
110void CoreTiming::AddTicks(u64 ticks) { 156u64 CoreTiming::GetCPUTicks() const {
111 accumulated_ticks += ticks; 157 if (is_multicore) {
112 downcounts[current_context] -= static_cast<s64>(ticks); 158 return clock->GetCPUCycles();
159 }
160 return ticks;
161}
162
163u64 CoreTiming::GetClockTicks() const {
164 if (is_multicore) {
165 return clock->GetClockCycles();
166 }
167 return CpuCyclesToClockCycles(ticks);
113} 168}
114 169
115void CoreTiming::ClearPendingEvents() { 170void CoreTiming::ClearPendingEvents() {
@@ -117,7 +172,7 @@ void CoreTiming::ClearPendingEvents() {
117} 172}
118 173
119void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { 174void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
120 std::lock_guard guard{inner_mutex}; 175 std::scoped_lock lock{basic_lock};
121 176
122 const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { 177 const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
123 return e.type.lock().get() == event_type.get(); 178 return e.type.lock().get() == event_type.get();
@@ -130,97 +185,68 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
130 } 185 }
131} 186}
132 187
133void CoreTiming::ForceExceptionCheck(s64 cycles) { 188std::optional<s64> CoreTiming::Advance() {
134 cycles = std::max<s64>(0, cycles); 189 std::scoped_lock lock{advance_lock, basic_lock};
135 if (downcounts[current_context] <= cycles) { 190 global_timer = GetGlobalTimeNs().count();
136 return;
137 }
138
139 // downcount is always (much) smaller than MAX_INT so we can safely cast cycles to an int
140 // here. Account for cycles already executed by adjusting the g.slice_length
141 downcounts[current_context] = static_cast<int>(cycles);
142}
143
144std::optional<u64> CoreTiming::NextAvailableCore(const s64 needed_ticks) const {
145 const u64 original_context = current_context;
146 u64 next_context = (original_context + 1) % num_cpu_cores;
147 while (next_context != original_context) {
148 if (time_slice[next_context] >= needed_ticks) {
149 return {next_context};
150 } else if (time_slice[next_context] >= 0) {
151 return std::nullopt;
152 }
153 next_context = (next_context + 1) % num_cpu_cores;
154 }
155 return std::nullopt;
156}
157
158void CoreTiming::Advance() {
159 std::unique_lock<std::mutex> guard(inner_mutex);
160
161 const u64 cycles_executed = accumulated_ticks;
162 time_slice[current_context] = std::max<s64>(0, time_slice[current_context] - accumulated_ticks);
163 global_timer += cycles_executed;
164
165 is_global_timer_sane = true;
166 191
167 while (!event_queue.empty() && event_queue.front().time <= global_timer) { 192 while (!event_queue.empty() && event_queue.front().time <= global_timer) {
168 Event evt = std::move(event_queue.front()); 193 Event evt = std::move(event_queue.front());
169 std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>()); 194 std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
170 event_queue.pop_back(); 195 event_queue.pop_back();
171 inner_mutex.unlock(); 196 basic_lock.unlock();
172 197
173 if (auto event_type{evt.type.lock()}) { 198 if (auto event_type{evt.type.lock()}) {
174 event_type->callback(evt.userdata, global_timer - evt.time); 199 event_type->callback(evt.userdata, global_timer - evt.time);
175 } 200 }
176 201
177 inner_mutex.lock(); 202 basic_lock.lock();
203 global_timer = GetGlobalTimeNs().count();
178 } 204 }
179 205
180 is_global_timer_sane = false;
181
182 // Still events left (scheduled in the future)
183 if (!event_queue.empty()) { 206 if (!event_queue.empty()) {
184 const s64 needed_ticks = 207 const s64 next_time = event_queue.front().time - global_timer;
185 std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH); 208 return next_time;
186 const auto next_core = NextAvailableCore(needed_ticks); 209 } else {
187 if (next_core) { 210 return std::nullopt;
188 downcounts[*next_core] = needed_ticks;
189 }
190 } 211 }
191
192 accumulated_ticks = 0;
193
194 downcounts[current_context] = time_slice[current_context];
195} 212}
196 213
197void CoreTiming::ResetRun() { 214void CoreTiming::ThreadLoop() {
198 downcounts.fill(MAX_SLICE_LENGTH); 215 has_started = true;
199 time_slice.fill(MAX_SLICE_LENGTH); 216 while (!shutting_down) {
200 current_context = 0; 217 while (!paused) {
201 // Still events left (scheduled in the future) 218 paused_set = false;
202 if (!event_queue.empty()) { 219 const auto next_time = Advance();
203 const s64 needed_ticks = 220 if (next_time) {
204 std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH); 221 if (*next_time > 0) {
205 downcounts[current_context] = needed_ticks; 222 std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time);
223 event.WaitFor(next_time_ns);
224 }
225 } else {
226 wait_set = true;
227 event.Wait();
228 }
229 wait_set = false;
230 }
231 paused_set = true;
232 clock->Pause(true);
233 pause_event.Wait();
234 clock->Pause(false);
206 } 235 }
207
208 is_global_timer_sane = false;
209 accumulated_ticks = 0;
210} 236}
211 237
212void CoreTiming::Idle() { 238std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
213 accumulated_ticks += downcounts[current_context]; 239 if (is_multicore) {
214 idled_cycles += downcounts[current_context]; 240 return clock->GetTimeNS();
215 downcounts[current_context] = 0; 241 }
242 return CyclesToNs(ticks);
216} 243}
217 244
218std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const { 245std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
219 return std::chrono::microseconds{GetTicks() * 1000000 / Hardware::BASE_CLOCK_RATE}; 246 if (is_multicore) {
220} 247 return clock->GetTimeUS();
221 248 }
222s64 CoreTiming::GetDowncount() const { 249 return CyclesToUs(ticks);
223 return downcounts[current_context];
224} 250}
225 251
226} // namespace Core::Timing 252} // namespace Core::Timing
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index d50f4eb8a..72faaab64 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -1,19 +1,25 @@
1// Copyright 2008 Dolphin Emulator Project / 2017 Citra Emulator Project 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2+ 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#pragma once 5#pragma once
6 6
7#include <atomic>
7#include <chrono> 8#include <chrono>
8#include <functional> 9#include <functional>
9#include <memory> 10#include <memory>
10#include <mutex> 11#include <mutex>
11#include <optional> 12#include <optional>
12#include <string> 13#include <string>
14#include <thread>
13#include <vector> 15#include <vector>
14 16
15#include "common/common_types.h" 17#include "common/common_types.h"
18#include "common/spin_lock.h"
19#include "common/thread.h"
16#include "common/threadsafe_queue.h" 20#include "common/threadsafe_queue.h"
21#include "common/wall_clock.h"
22#include "core/hardware_properties.h"
17 23
18namespace Core::Timing { 24namespace Core::Timing {
19 25
@@ -56,16 +62,40 @@ public:
56 62
57 /// CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is 63 /// CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is
58 /// required to end slice - 1 and start slice 0 before the first cycle of code is executed. 64 /// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
59 void Initialize(); 65 void Initialize(std::function<void(void)>&& on_thread_init_);
60 66
61 /// Tears down all timing related functionality. 67 /// Tears down all timing related functionality.
62 void Shutdown(); 68 void Shutdown();
63 69
64 /// After the first Advance, the slice lengths and the downcount will be reduced whenever an 70 /// Sets if emulation is multicore or single core, must be set before Initialize
65 /// event is scheduled earlier than the current values. 71 void SetMulticore(bool is_multicore) {
66 /// 72 this->is_multicore = is_multicore;
67 /// Scheduling from a callback will not update the downcount until the Advance() completes. 73 }
68 void ScheduleEvent(s64 cycles_into_future, const std::shared_ptr<EventType>& event_type, 74
75 /// Check if it's using host timing.
76 bool IsHostTiming() const {
77 return is_multicore;
78 }
79
80 /// Pauses/Unpauses the execution of the timer thread.
81 void Pause(bool is_paused);
82
83 /// Pauses/Unpauses the execution of the timer thread and waits until paused.
84 void SyncPause(bool is_paused);
85
86 /// Checks if core timing is running.
87 bool IsRunning() const;
88
89 /// Checks if the timer thread has started.
90 bool HasStarted() const {
91 return has_started;
92 }
93
94 /// Checks if there are any pending time events.
95 bool HasPendingEvents() const;
96
97 /// Schedules an event in core timing
98 void ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
69 u64 userdata = 0); 99 u64 userdata = 0);
70 100
71 void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata); 101 void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata);
@@ -73,41 +103,30 @@ public:
73 /// We only permit one event of each type in the queue at a time. 103 /// We only permit one event of each type in the queue at a time.
74 void RemoveEvent(const std::shared_ptr<EventType>& event_type); 104 void RemoveEvent(const std::shared_ptr<EventType>& event_type);
75 105
76 void ForceExceptionCheck(s64 cycles);
77
78 /// This should only be called from the emu thread, if you are calling it any other thread,
79 /// you are doing something evil
80 u64 GetTicks() const;
81
82 u64 GetIdleTicks() const;
83
84 void AddTicks(u64 ticks); 106 void AddTicks(u64 ticks);
85 107
86 /// Advance must be called at the beginning of dispatcher loops, not the end. Advance() ends 108 void ResetTicks();
87 /// the previous timing slice and begins the next one, you must Advance from the previous
88 /// slice to the current one before executing any cycles. CoreTiming starts in slice -1 so an
89 /// Advance() is required to initialize the slice length before the first cycle of emulated
90 /// instructions is executed.
91 void Advance();
92 109
93 /// Pretend that the main CPU has executed enough cycles to reach the next event.
94 void Idle(); 110 void Idle();
95 111
96 std::chrono::microseconds GetGlobalTimeUs() const; 112 s64 GetDowncount() const {
113 return downcount;
114 }
97 115
98 void ResetRun(); 116 /// Returns current time in emulated CPU cycles
117 u64 GetCPUTicks() const;
99 118
100 s64 GetDowncount() const; 119 /// Returns current time in emulated in Clock cycles
120 u64 GetClockTicks() const;
101 121
102 void SwitchContext(u64 new_context) { 122 /// Returns current time in microseconds.
103 current_context = new_context; 123 std::chrono::microseconds GetGlobalTimeUs() const;
104 }
105 124
106 bool CanCurrentContextRun() const { 125 /// Returns current time in nanoseconds.
107 return time_slice[current_context] > 0; 126 std::chrono::nanoseconds GetGlobalTimeNs() const;
108 }
109 127
110 std::optional<u64> NextAvailableCore(const s64 needed_ticks) const; 128 /// Checks for events manually and returns time in nanoseconds for next event, threadsafe.
129 std::optional<s64> Advance();
111 130
112private: 131private:
113 struct Event; 132 struct Event;
@@ -115,21 +134,14 @@ private:
115 /// Clear all pending events. This should ONLY be done on exit. 134 /// Clear all pending events. This should ONLY be done on exit.
116 void ClearPendingEvents(); 135 void ClearPendingEvents();
117 136
118 static constexpr u64 num_cpu_cores = 4; 137 static void ThreadEntry(CoreTiming& instance);
138 void ThreadLoop();
119 139
120 s64 global_timer = 0; 140 std::unique_ptr<Common::WallClock> clock;
121 s64 idled_cycles = 0;
122 s64 slice_length = 0;
123 u64 accumulated_ticks = 0;
124 std::array<s64, num_cpu_cores> downcounts{};
125 // Slice of time assigned to each core per run.
126 std::array<s64, num_cpu_cores> time_slice{};
127 u64 current_context = 0;
128 141
129 // Are we in a function that has been called from Advance() 142 u64 global_timer = 0;
130 // If events are scheduled from a function that gets called from Advance(), 143
131 // don't change slice_length and downcount. 144 std::chrono::nanoseconds start_point;
132 bool is_global_timer_sane = false;
133 145
134 // The queue is a min-heap using std::make_heap/push_heap/pop_heap. 146 // The queue is a min-heap using std::make_heap/push_heap/pop_heap.
135 // We don't use std::priority_queue because we need to be able to serialize, unserialize and 147 // We don't use std::priority_queue because we need to be able to serialize, unserialize and
@@ -139,8 +151,23 @@ private:
139 u64 event_fifo_id = 0; 151 u64 event_fifo_id = 0;
140 152
141 std::shared_ptr<EventType> ev_lost; 153 std::shared_ptr<EventType> ev_lost;
142 154 Common::Event event{};
143 std::mutex inner_mutex; 155 Common::Event pause_event{};
156 Common::SpinLock basic_lock{};
157 Common::SpinLock advance_lock{};
158 std::unique_ptr<std::thread> timer_thread;
159 std::atomic<bool> paused{};
160 std::atomic<bool> paused_set{};
161 std::atomic<bool> wait_set{};
162 std::atomic<bool> shutting_down{};
163 std::atomic<bool> has_started{};
164 std::function<void(void)> on_thread_init{};
165
166 bool is_multicore{};
167
168 /// Cycle timing
169 u64 ticks{};
170 s64 downcount{};
144}; 171};
145 172
146/// Creates a core timing event with the given name and callback. 173/// Creates a core timing event with the given name and callback.
diff --git a/src/core/core_timing_util.cpp b/src/core/core_timing_util.cpp
index de50d3b14..aefc63663 100644
--- a/src/core/core_timing_util.cpp
+++ b/src/core/core_timing_util.cpp
@@ -38,15 +38,23 @@ s64 usToCycles(std::chrono::microseconds us) {
38} 38}
39 39
40s64 nsToCycles(std::chrono::nanoseconds ns) { 40s64 nsToCycles(std::chrono::nanoseconds ns) {
41 if (static_cast<u64>(ns.count() / 1000000000) > MAX_VALUE_TO_MULTIPLY) { 41 const u128 temporal = Common::Multiply64Into128(ns.count(), Hardware::BASE_CLOCK_RATE);
42 LOG_ERROR(Core_Timing, "Integer overflow, use max value"); 42 return Common::Divide128On32(temporal, static_cast<u32>(1000000000)).first;
43 return std::numeric_limits<s64>::max(); 43}
44 } 44
45 if (static_cast<u64>(ns.count()) > MAX_VALUE_TO_MULTIPLY) { 45u64 msToClockCycles(std::chrono::milliseconds ns) {
46 LOG_DEBUG(Core_Timing, "Time very big, do rounding"); 46 const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
47 return Hardware::BASE_CLOCK_RATE * (ns.count() / 1000000000); 47 return Common::Divide128On32(temp, 1000).first;
48 } 48}
49 return (Hardware::BASE_CLOCK_RATE * ns.count()) / 1000000000; 49
50u64 usToClockCycles(std::chrono::microseconds ns) {
51 const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
52 return Common::Divide128On32(temp, 1000000).first;
53}
54
55u64 nsToClockCycles(std::chrono::nanoseconds ns) {
56 const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
57 return Common::Divide128On32(temp, 1000000000).first;
50} 58}
51 59
52u64 CpuCyclesToClockCycles(u64 ticks) { 60u64 CpuCyclesToClockCycles(u64 ticks) {
@@ -54,4 +62,22 @@ u64 CpuCyclesToClockCycles(u64 ticks) {
54 return Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first; 62 return Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
55} 63}
56 64
65std::chrono::milliseconds CyclesToMs(s64 cycles) {
66 const u128 temporal = Common::Multiply64Into128(cycles, 1000);
67 u64 ms = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
68 return std::chrono::milliseconds(ms);
69}
70
71std::chrono::nanoseconds CyclesToNs(s64 cycles) {
72 const u128 temporal = Common::Multiply64Into128(cycles, 1000000000);
73 u64 ns = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
74 return std::chrono::nanoseconds(ns);
75}
76
77std::chrono::microseconds CyclesToUs(s64 cycles) {
78 const u128 temporal = Common::Multiply64Into128(cycles, 1000000);
79 u64 us = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
80 return std::chrono::microseconds(us);
81}
82
57} // namespace Core::Timing 83} // namespace Core::Timing
diff --git a/src/core/core_timing_util.h b/src/core/core_timing_util.h
index addc72b19..2ed979e14 100644
--- a/src/core/core_timing_util.h
+++ b/src/core/core_timing_util.h
@@ -13,18 +13,12 @@ namespace Core::Timing {
13s64 msToCycles(std::chrono::milliseconds ms); 13s64 msToCycles(std::chrono::milliseconds ms);
14s64 usToCycles(std::chrono::microseconds us); 14s64 usToCycles(std::chrono::microseconds us);
15s64 nsToCycles(std::chrono::nanoseconds ns); 15s64 nsToCycles(std::chrono::nanoseconds ns);
16 16u64 msToClockCycles(std::chrono::milliseconds ns);
17inline std::chrono::milliseconds CyclesToMs(s64 cycles) { 17u64 usToClockCycles(std::chrono::microseconds ns);
18 return std::chrono::milliseconds(cycles * 1000 / Hardware::BASE_CLOCK_RATE); 18u64 nsToClockCycles(std::chrono::nanoseconds ns);
19} 19std::chrono::milliseconds CyclesToMs(s64 cycles);
20 20std::chrono::nanoseconds CyclesToNs(s64 cycles);
21inline std::chrono::nanoseconds CyclesToNs(s64 cycles) { 21std::chrono::microseconds CyclesToUs(s64 cycles);
22 return std::chrono::nanoseconds(cycles * 1000000000 / Hardware::BASE_CLOCK_RATE);
23}
24
25inline std::chrono::microseconds CyclesToUs(s64 cycles) {
26 return std::chrono::microseconds(cycles * 1000000 / Hardware::BASE_CLOCK_RATE);
27}
28 22
29u64 CpuCyclesToClockCycles(u64 ticks); 23u64 CpuCyclesToClockCycles(u64 ticks);
30 24
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 70ddbdcca..32afcf3ae 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -2,80 +2,372 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/fiber.h"
6#include "common/microprofile.h"
7#include "common/thread.h"
5#include "core/arm/exclusive_monitor.h" 8#include "core/arm/exclusive_monitor.h"
6#include "core/core.h" 9#include "core/core.h"
7#include "core/core_manager.h"
8#include "core/core_timing.h" 10#include "core/core_timing.h"
9#include "core/cpu_manager.h" 11#include "core/cpu_manager.h"
10#include "core/gdbstub/gdbstub.h" 12#include "core/gdbstub/gdbstub.h"
13#include "core/hle/kernel/kernel.h"
14#include "core/hle/kernel/physical_core.h"
15#include "core/hle/kernel/scheduler.h"
16#include "core/hle/kernel/thread.h"
17#include "video_core/gpu.h"
11 18
12namespace Core { 19namespace Core {
13 20
14CpuManager::CpuManager(System& system) : system{system} {} 21CpuManager::CpuManager(System& system) : system{system} {}
15CpuManager::~CpuManager() = default; 22CpuManager::~CpuManager() = default;
16 23
24void CpuManager::ThreadStart(CpuManager& cpu_manager, std::size_t core) {
25 cpu_manager.RunThread(core);
26}
27
17void CpuManager::Initialize() { 28void CpuManager::Initialize() {
18 for (std::size_t index = 0; index < core_managers.size(); ++index) { 29 running_mode = true;
19 core_managers[index] = std::make_unique<CoreManager>(system, index); 30 if (is_multicore) {
31 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
32 core_data[core].host_thread =
33 std::make_unique<std::thread>(ThreadStart, std::ref(*this), core);
34 }
35 } else {
36 core_data[0].host_thread = std::make_unique<std::thread>(ThreadStart, std::ref(*this), 0);
20 } 37 }
21} 38}
22 39
23void CpuManager::Shutdown() { 40void CpuManager::Shutdown() {
24 for (auto& cpu_core : core_managers) { 41 running_mode = false;
25 cpu_core.reset(); 42 Pause(false);
43 if (is_multicore) {
44 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
45 core_data[core].host_thread->join();
46 core_data[core].host_thread.reset();
47 }
48 } else {
49 core_data[0].host_thread->join();
50 core_data[0].host_thread.reset();
26 } 51 }
27} 52}
28 53
29CoreManager& CpuManager::GetCoreManager(std::size_t index) { 54std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() {
30 return *core_managers.at(index); 55 return std::function<void(void*)>(GuestThreadFunction);
31} 56}
32 57
33const CoreManager& CpuManager::GetCoreManager(std::size_t index) const { 58std::function<void(void*)> CpuManager::GetIdleThreadStartFunc() {
34 return *core_managers.at(index); 59 return std::function<void(void*)>(IdleThreadFunction);
35} 60}
36 61
37CoreManager& CpuManager::GetCurrentCoreManager() { 62std::function<void(void*)> CpuManager::GetSuspendThreadStartFunc() {
38 // Otherwise, use single-threaded mode active_core variable 63 return std::function<void(void*)>(SuspendThreadFunction);
39 return *core_managers[active_core];
40} 64}
41 65
42const CoreManager& CpuManager::GetCurrentCoreManager() const { 66void CpuManager::GuestThreadFunction(void* cpu_manager_) {
43 // Otherwise, use single-threaded mode active_core variable 67 CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
44 return *core_managers[active_core]; 68 if (cpu_manager->is_multicore) {
69 cpu_manager->MultiCoreRunGuestThread();
70 } else {
71 cpu_manager->SingleCoreRunGuestThread();
72 }
45} 73}
46 74
47void CpuManager::RunLoop(bool tight_loop) { 75void CpuManager::GuestRewindFunction(void* cpu_manager_) {
48 if (GDBStub::IsServerEnabled()) { 76 CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
49 GDBStub::HandlePacket(); 77 if (cpu_manager->is_multicore) {
78 cpu_manager->MultiCoreRunGuestLoop();
79 } else {
80 cpu_manager->SingleCoreRunGuestLoop();
81 }
82}
50 83
51 // If the loop is halted and we want to step, use a tiny (1) number of instructions to 84void CpuManager::IdleThreadFunction(void* cpu_manager_) {
52 // execute. Otherwise, get out of the loop function. 85 CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
53 if (GDBStub::GetCpuHaltFlag()) { 86 if (cpu_manager->is_multicore) {
54 if (GDBStub::GetCpuStepFlag()) { 87 cpu_manager->MultiCoreRunIdleThread();
55 tight_loop = false; 88 } else {
56 } else { 89 cpu_manager->SingleCoreRunIdleThread();
57 return; 90 }
91}
92
93void CpuManager::SuspendThreadFunction(void* cpu_manager_) {
94 CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
95 if (cpu_manager->is_multicore) {
96 cpu_manager->MultiCoreRunSuspendThread();
97 } else {
98 cpu_manager->SingleCoreRunSuspendThread();
99 }
100}
101
102void* CpuManager::GetStartFuncParamater() {
103 return static_cast<void*>(this);
104}
105
106///////////////////////////////////////////////////////////////////////////////
107/// MultiCore ///
108///////////////////////////////////////////////////////////////////////////////
109
110void CpuManager::MultiCoreRunGuestThread() {
111 auto& kernel = system.Kernel();
112 {
113 auto& sched = kernel.CurrentScheduler();
114 sched.OnThreadStart();
115 }
116 MultiCoreRunGuestLoop();
117}
118
119void CpuManager::MultiCoreRunGuestLoop() {
120 auto& kernel = system.Kernel();
121 auto* thread = kernel.CurrentScheduler().GetCurrentThread();
122 while (true) {
123 auto* physical_core = &kernel.CurrentPhysicalCore();
124 auto& arm_interface = thread->ArmInterface();
125 system.EnterDynarmicProfile();
126 while (!physical_core->IsInterrupted()) {
127 arm_interface.Run();
128 physical_core = &kernel.CurrentPhysicalCore();
129 }
130 system.ExitDynarmicProfile();
131 arm_interface.ClearExclusiveState();
132 auto& scheduler = kernel.CurrentScheduler();
133 scheduler.TryDoContextSwitch();
134 }
135}
136
137void CpuManager::MultiCoreRunIdleThread() {
138 auto& kernel = system.Kernel();
139 while (true) {
140 auto& physical_core = kernel.CurrentPhysicalCore();
141 physical_core.Idle();
142 auto& scheduler = kernel.CurrentScheduler();
143 scheduler.TryDoContextSwitch();
144 }
145}
146
147void CpuManager::MultiCoreRunSuspendThread() {
148 auto& kernel = system.Kernel();
149 {
150 auto& sched = kernel.CurrentScheduler();
151 sched.OnThreadStart();
152 }
153 while (true) {
154 auto core = kernel.GetCurrentHostThreadID();
155 auto& scheduler = kernel.CurrentScheduler();
156 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
157 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
158 ASSERT(scheduler.ContextSwitchPending());
159 ASSERT(core == kernel.GetCurrentHostThreadID());
160 scheduler.TryDoContextSwitch();
161 }
162}
163
164void CpuManager::MultiCorePause(bool paused) {
165 if (!paused) {
166 bool all_not_barrier = false;
167 while (!all_not_barrier) {
168 all_not_barrier = true;
169 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
170 all_not_barrier &=
171 !core_data[core].is_running.load() && core_data[core].initialized.load();
172 }
173 }
174 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
175 core_data[core].enter_barrier->Set();
176 }
177 if (paused_state.load()) {
178 bool all_barrier = false;
179 while (!all_barrier) {
180 all_barrier = true;
181 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
182 all_barrier &=
183 core_data[core].is_paused.load() && core_data[core].initialized.load();
184 }
185 }
186 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
187 core_data[core].exit_barrier->Set();
188 }
189 }
190 } else {
191 /// Wait until all cores are paused.
192 bool all_barrier = false;
193 while (!all_barrier) {
194 all_barrier = true;
195 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
196 all_barrier &=
197 core_data[core].is_paused.load() && core_data[core].initialized.load();
58 } 198 }
59 } 199 }
200 /// Don't release the barrier
60 } 201 }
202 paused_state = paused;
203}
204
205///////////////////////////////////////////////////////////////////////////////
206/// SingleCore ///
207///////////////////////////////////////////////////////////////////////////////
61 208
62 auto& core_timing = system.CoreTiming(); 209void CpuManager::SingleCoreRunGuestThread() {
63 core_timing.ResetRun(); 210 auto& kernel = system.Kernel();
64 bool keep_running{}; 211 {
65 do { 212 auto& sched = kernel.CurrentScheduler();
66 keep_running = false; 213 sched.OnThreadStart();
67 for (active_core = 0; active_core < NUM_CPU_CORES; ++active_core) { 214 }
68 core_timing.SwitchContext(active_core); 215 SingleCoreRunGuestLoop();
69 if (core_timing.CanCurrentContextRun()) { 216}
70 core_managers[active_core]->RunLoop(tight_loop); 217
218void CpuManager::SingleCoreRunGuestLoop() {
219 auto& kernel = system.Kernel();
220 auto* thread = kernel.CurrentScheduler().GetCurrentThread();
221 while (true) {
222 auto* physical_core = &kernel.CurrentPhysicalCore();
223 auto& arm_interface = thread->ArmInterface();
224 system.EnterDynarmicProfile();
225 if (!physical_core->IsInterrupted()) {
226 arm_interface.Run();
227 physical_core = &kernel.CurrentPhysicalCore();
228 }
229 system.ExitDynarmicProfile();
230 thread->SetPhantomMode(true);
231 system.CoreTiming().Advance();
232 thread->SetPhantomMode(false);
233 arm_interface.ClearExclusiveState();
234 PreemptSingleCore();
235 auto& scheduler = kernel.Scheduler(current_core);
236 scheduler.TryDoContextSwitch();
237 }
238}
239
240void CpuManager::SingleCoreRunIdleThread() {
241 auto& kernel = system.Kernel();
242 while (true) {
243 auto& physical_core = kernel.CurrentPhysicalCore();
244 PreemptSingleCore(false);
245 system.CoreTiming().AddTicks(1000U);
246 idle_count++;
247 auto& scheduler = physical_core.Scheduler();
248 scheduler.TryDoContextSwitch();
249 }
250}
251
252void CpuManager::SingleCoreRunSuspendThread() {
253 auto& kernel = system.Kernel();
254 {
255 auto& sched = kernel.CurrentScheduler();
256 sched.OnThreadStart();
257 }
258 while (true) {
259 auto core = kernel.GetCurrentHostThreadID();
260 auto& scheduler = kernel.CurrentScheduler();
261 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
262 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
263 ASSERT(scheduler.ContextSwitchPending());
264 ASSERT(core == kernel.GetCurrentHostThreadID());
265 scheduler.TryDoContextSwitch();
266 }
267}
268
269void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
270 std::size_t old_core = current_core;
271 auto& scheduler = system.Kernel().Scheduler(old_core);
272 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
273 if (idle_count >= 4 || from_running_enviroment) {
274 if (!from_running_enviroment) {
275 system.CoreTiming().Idle();
276 idle_count = 0;
277 }
278 current_thread->SetPhantomMode(true);
279 system.CoreTiming().Advance();
280 current_thread->SetPhantomMode(false);
281 }
282 current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
283 system.CoreTiming().ResetTicks();
284 scheduler.Unload();
285 auto& next_scheduler = system.Kernel().Scheduler(current_core);
286 Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
287 /// May have changed scheduler
288 auto& current_scheduler = system.Kernel().Scheduler(current_core);
289 current_scheduler.Reload();
290 auto* currrent_thread2 = current_scheduler.GetCurrentThread();
291 if (!currrent_thread2->IsIdleThread()) {
292 idle_count = 0;
293 }
294}
295
296void CpuManager::SingleCorePause(bool paused) {
297 if (!paused) {
298 bool all_not_barrier = false;
299 while (!all_not_barrier) {
300 all_not_barrier = !core_data[0].is_running.load() && core_data[0].initialized.load();
301 }
302 core_data[0].enter_barrier->Set();
303 if (paused_state.load()) {
304 bool all_barrier = false;
305 while (!all_barrier) {
306 all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
71 } 307 }
72 keep_running |= core_timing.CanCurrentContextRun(); 308 core_data[0].exit_barrier->Set();
73 } 309 }
74 } while (keep_running); 310 } else {
311 /// Wait until all cores are paused.
312 bool all_barrier = false;
313 while (!all_barrier) {
314 all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
315 }
316 /// Don't release the barrier
317 }
318 paused_state = paused;
319}
320
321void CpuManager::Pause(bool paused) {
322 if (is_multicore) {
323 MultiCorePause(paused);
324 } else {
325 SingleCorePause(paused);
326 }
327}
75 328
76 if (GDBStub::IsServerEnabled()) { 329void CpuManager::RunThread(std::size_t core) {
77 GDBStub::SetCpuStepFlag(false); 330 /// Initialization
331 system.RegisterCoreThread(core);
332 std::string name;
333 if (is_multicore) {
334 name = "yuzu:CoreCPUThread_" + std::to_string(core);
335 } else {
336 name = "yuzu:CPUThread";
337 }
338 MicroProfileOnThreadCreate(name.c_str());
339 Common::SetCurrentThreadName(name.c_str());
340 Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
341 auto& data = core_data[core];
342 data.enter_barrier = std::make_unique<Common::Event>();
343 data.exit_barrier = std::make_unique<Common::Event>();
344 data.host_context = Common::Fiber::ThreadToFiber();
345 data.is_running = false;
346 data.initialized = true;
347 const bool sc_sync = !is_async_gpu && !is_multicore;
348 bool sc_sync_first_use = sc_sync;
349 /// Running
350 while (running_mode) {
351 data.is_running = false;
352 data.enter_barrier->Wait();
353 if (sc_sync_first_use) {
354 system.GPU().ObtainContext();
355 sc_sync_first_use = false;
356 }
357 auto& scheduler = system.Kernel().CurrentScheduler();
358 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
359 data.is_running = true;
360 Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext());
361 data.is_running = false;
362 data.is_paused = true;
363 data.exit_barrier->Wait();
364 data.is_paused = false;
78 } 365 }
366 /// Time to cleanup
367 data.host_context->Exit();
368 data.enter_barrier.reset();
369 data.exit_barrier.reset();
370 data.initialized = false;
79} 371}
80 372
81} // namespace Core 373} // namespace Core
diff --git a/src/core/cpu_manager.h b/src/core/cpu_manager.h
index 97554d1bb..35929ed94 100644
--- a/src/core/cpu_manager.h
+++ b/src/core/cpu_manager.h
@@ -5,12 +5,19 @@
5#pragma once 5#pragma once
6 6
7#include <array> 7#include <array>
8#include <atomic>
9#include <functional>
8#include <memory> 10#include <memory>
11#include <thread>
9#include "core/hardware_properties.h" 12#include "core/hardware_properties.h"
10 13
14namespace Common {
15class Event;
16class Fiber;
17} // namespace Common
18
11namespace Core { 19namespace Core {
12 20
13class CoreManager;
14class System; 21class System;
15 22
16class CpuManager { 23class CpuManager {
@@ -24,24 +31,75 @@ public:
24 CpuManager& operator=(const CpuManager&) = delete; 31 CpuManager& operator=(const CpuManager&) = delete;
25 CpuManager& operator=(CpuManager&&) = delete; 32 CpuManager& operator=(CpuManager&&) = delete;
26 33
34 /// Sets if emulation is multicore or single core, must be set before Initialize
35 void SetMulticore(bool is_multicore) {
36 this->is_multicore = is_multicore;
37 }
38
39 /// Sets if emulation is using an asynchronous GPU.
40 void SetAsyncGpu(bool is_async_gpu) {
41 this->is_async_gpu = is_async_gpu;
42 }
43
27 void Initialize(); 44 void Initialize();
28 void Shutdown(); 45 void Shutdown();
29 46
30 CoreManager& GetCoreManager(std::size_t index); 47 void Pause(bool paused);
31 const CoreManager& GetCoreManager(std::size_t index) const;
32 48
33 CoreManager& GetCurrentCoreManager(); 49 std::function<void(void*)> GetGuestThreadStartFunc();
34 const CoreManager& GetCurrentCoreManager() const; 50 std::function<void(void*)> GetIdleThreadStartFunc();
51 std::function<void(void*)> GetSuspendThreadStartFunc();
52 void* GetStartFuncParamater();
35 53
36 std::size_t GetActiveCoreIndex() const { 54 void PreemptSingleCore(bool from_running_enviroment = true);
37 return active_core;
38 }
39 55
40 void RunLoop(bool tight_loop); 56 std::size_t CurrentCore() const {
57 return current_core.load();
58 }
41 59
42private: 60private:
43 std::array<std::unique_ptr<CoreManager>, Hardware::NUM_CPU_CORES> core_managers; 61 static void GuestThreadFunction(void* cpu_manager);
44 std::size_t active_core{}; ///< Active core, only used in single thread mode 62 static void GuestRewindFunction(void* cpu_manager);
63 static void IdleThreadFunction(void* cpu_manager);
64 static void SuspendThreadFunction(void* cpu_manager);
65
66 void MultiCoreRunGuestThread();
67 void MultiCoreRunGuestLoop();
68 void MultiCoreRunIdleThread();
69 void MultiCoreRunSuspendThread();
70 void MultiCorePause(bool paused);
71
72 void SingleCoreRunGuestThread();
73 void SingleCoreRunGuestLoop();
74 void SingleCoreRunIdleThread();
75 void SingleCoreRunSuspendThread();
76 void SingleCorePause(bool paused);
77
78 static void ThreadStart(CpuManager& cpu_manager, std::size_t core);
79
80 void RunThread(std::size_t core);
81
82 struct CoreData {
83 std::shared_ptr<Common::Fiber> host_context;
84 std::unique_ptr<Common::Event> enter_barrier;
85 std::unique_ptr<Common::Event> exit_barrier;
86 std::atomic<bool> is_running;
87 std::atomic<bool> is_paused;
88 std::atomic<bool> initialized;
89 std::unique_ptr<std::thread> host_thread;
90 };
91
92 std::atomic<bool> running_mode{};
93 std::atomic<bool> paused_state{};
94
95 std::array<CoreData, Core::Hardware::NUM_CPU_CORES> core_data{};
96
97 bool is_async_gpu{};
98 bool is_multicore{};
99 std::atomic<std::size_t> current_core{};
100 std::size_t preemption_count{};
101 std::size_t idle_count{};
102 static constexpr std::size_t max_cycle_runs = 5;
45 103
46 System& system; 104 System& system;
47}; 105};
diff --git a/src/core/crypto/key_manager.cpp b/src/core/crypto/key_manager.cpp
index 8997c7082..f87fe0abc 100644
--- a/src/core/crypto/key_manager.cpp
+++ b/src/core/crypto/key_manager.cpp
@@ -695,8 +695,9 @@ void KeyManager::WriteKeyToFile(KeyCategory category, std::string_view keyname,
695} 695}
696 696
697void KeyManager::SetKey(S128KeyType id, Key128 key, u64 field1, u64 field2) { 697void KeyManager::SetKey(S128KeyType id, Key128 key, u64 field1, u64 field2) {
698 if (s128_keys.find({id, field1, field2}) != s128_keys.end()) 698 if (s128_keys.find({id, field1, field2}) != s128_keys.end() || key == Key128{}) {
699 return; 699 return;
700 }
700 if (id == S128KeyType::Titlekey) { 701 if (id == S128KeyType::Titlekey) {
701 Key128 rights_id; 702 Key128 rights_id;
702 std::memcpy(rights_id.data(), &field2, sizeof(u64)); 703 std::memcpy(rights_id.data(), &field2, sizeof(u64));
@@ -716,8 +717,9 @@ void KeyManager::SetKey(S128KeyType id, Key128 key, u64 field1, u64 field2) {
716 return std::tie(elem.second.type, elem.second.field1, elem.second.field2) == 717 return std::tie(elem.second.type, elem.second.field1, elem.second.field2) ==
717 std::tie(id, field1, field2); 718 std::tie(id, field1, field2);
718 }); 719 });
719 if (iter2 != s128_file_id.end()) 720 if (iter2 != s128_file_id.end()) {
720 WriteKeyToFile(category, iter2->first, key); 721 WriteKeyToFile(category, iter2->first, key);
722 }
721 723
722 // Variable cases 724 // Variable cases
723 if (id == S128KeyType::KeyArea) { 725 if (id == S128KeyType::KeyArea) {
@@ -745,16 +747,18 @@ void KeyManager::SetKey(S128KeyType id, Key128 key, u64 field1, u64 field2) {
745} 747}
746 748
747void KeyManager::SetKey(S256KeyType id, Key256 key, u64 field1, u64 field2) { 749void KeyManager::SetKey(S256KeyType id, Key256 key, u64 field1, u64 field2) {
748 if (s256_keys.find({id, field1, field2}) != s256_keys.end()) 750 if (s256_keys.find({id, field1, field2}) != s256_keys.end() || key == Key256{}) {
749 return; 751 return;
752 }
750 const auto iter = std::find_if( 753 const auto iter = std::find_if(
751 s256_file_id.begin(), s256_file_id.end(), 754 s256_file_id.begin(), s256_file_id.end(),
752 [&id, &field1, &field2](const std::pair<std::string, KeyIndex<S256KeyType>> elem) { 755 [&id, &field1, &field2](const std::pair<std::string, KeyIndex<S256KeyType>> elem) {
753 return std::tie(elem.second.type, elem.second.field1, elem.second.field2) == 756 return std::tie(elem.second.type, elem.second.field1, elem.second.field2) ==
754 std::tie(id, field1, field2); 757 std::tie(id, field1, field2);
755 }); 758 });
756 if (iter != s256_file_id.end()) 759 if (iter != s256_file_id.end()) {
757 WriteKeyToFile(KeyCategory::Standard, iter->first, key); 760 WriteKeyToFile(KeyCategory::Standard, iter->first, key);
761 }
758 s256_keys[{id, field1, field2}] = key; 762 s256_keys[{id, field1, field2}] = key;
759} 763}
760 764
diff --git a/src/core/crypto/key_manager.h b/src/core/crypto/key_manager.h
index 7265c4171..9269a73f2 100644
--- a/src/core/crypto/key_manager.h
+++ b/src/core/crypto/key_manager.h
@@ -223,7 +223,16 @@ bool operator<(const KeyIndex<KeyType>& lhs, const KeyIndex<KeyType>& rhs) {
223 223
224class KeyManager { 224class KeyManager {
225public: 225public:
226 KeyManager(); 226 static KeyManager& Instance() {
227 static KeyManager instance;
228 return instance;
229 }
230
231 KeyManager(const KeyManager&) = delete;
232 KeyManager& operator=(const KeyManager&) = delete;
233
234 KeyManager(KeyManager&&) = delete;
235 KeyManager& operator=(KeyManager&&) = delete;
227 236
228 bool HasKey(S128KeyType id, u64 field1 = 0, u64 field2 = 0) const; 237 bool HasKey(S128KeyType id, u64 field1 = 0, u64 field2 = 0) const;
229 bool HasKey(S256KeyType id, u64 field1 = 0, u64 field2 = 0) const; 238 bool HasKey(S256KeyType id, u64 field1 = 0, u64 field2 = 0) const;
@@ -257,6 +266,8 @@ public:
257 bool AddTicketPersonalized(Ticket raw); 266 bool AddTicketPersonalized(Ticket raw);
258 267
259private: 268private:
269 KeyManager();
270
260 std::map<KeyIndex<S128KeyType>, Key128> s128_keys; 271 std::map<KeyIndex<S128KeyType>, Key128> s128_keys;
261 std::map<KeyIndex<S256KeyType>, Key256> s256_keys; 272 std::map<KeyIndex<S256KeyType>, Key256> s256_keys;
262 273
diff --git a/src/core/file_sys/bis_factory.cpp b/src/core/file_sys/bis_factory.cpp
index 0af44f340..8935a62c3 100644
--- a/src/core/file_sys/bis_factory.cpp
+++ b/src/core/file_sys/bis_factory.cpp
@@ -79,7 +79,7 @@ VirtualDir BISFactory::OpenPartition(BisPartitionId id) const {
79} 79}
80 80
81VirtualFile BISFactory::OpenPartitionStorage(BisPartitionId id) const { 81VirtualFile BISFactory::OpenPartitionStorage(BisPartitionId id) const {
82 Core::Crypto::KeyManager keys; 82 auto& keys = Core::Crypto::KeyManager::Instance();
83 Core::Crypto::PartitionDataManager pdm{ 83 Core::Crypto::PartitionDataManager pdm{
84 Core::System::GetInstance().GetFilesystem()->OpenDirectory( 84 Core::System::GetInstance().GetFilesystem()->OpenDirectory(
85 FileUtil::GetUserPath(FileUtil::UserPath::SysDataDir), Mode::Read)}; 85 FileUtil::GetUserPath(FileUtil::UserPath::SysDataDir), Mode::Read)};
diff --git a/src/core/file_sys/card_image.cpp b/src/core/file_sys/card_image.cpp
index 07d0c8d5d..664a47e7f 100644
--- a/src/core/file_sys/card_image.cpp
+++ b/src/core/file_sys/card_image.cpp
@@ -178,7 +178,7 @@ u32 XCI::GetSystemUpdateVersion() {
178 return 0; 178 return 0;
179 179
180 for (const auto& file : update->GetFiles()) { 180 for (const auto& file : update->GetFiles()) {
181 NCA nca{file, nullptr, 0, keys}; 181 NCA nca{file, nullptr, 0};
182 182
183 if (nca.GetStatus() != Loader::ResultStatus::Success) 183 if (nca.GetStatus() != Loader::ResultStatus::Success)
184 continue; 184 continue;
@@ -286,7 +286,7 @@ Loader::ResultStatus XCI::AddNCAFromPartition(XCIPartition part) {
286 continue; 286 continue;
287 } 287 }
288 288
289 auto nca = std::make_shared<NCA>(file, nullptr, 0, keys); 289 auto nca = std::make_shared<NCA>(file, nullptr, 0);
290 if (nca->IsUpdate()) { 290 if (nca->IsUpdate()) {
291 continue; 291 continue;
292 } 292 }
diff --git a/src/core/file_sys/card_image.h b/src/core/file_sys/card_image.h
index c2ee0ea99..e1b136426 100644
--- a/src/core/file_sys/card_image.h
+++ b/src/core/file_sys/card_image.h
@@ -140,6 +140,6 @@ private:
140 140
141 u64 update_normal_partition_end; 141 u64 update_normal_partition_end;
142 142
143 Core::Crypto::KeyManager keys; 143 Core::Crypto::KeyManager& keys = Core::Crypto::KeyManager::Instance();
144}; 144};
145} // namespace FileSys 145} // namespace FileSys
diff --git a/src/core/file_sys/content_archive.cpp b/src/core/file_sys/content_archive.cpp
index b8bbdd1ef..473245d5a 100644
--- a/src/core/file_sys/content_archive.cpp
+++ b/src/core/file_sys/content_archive.cpp
@@ -118,9 +118,8 @@ static bool IsValidNCA(const NCAHeader& header) {
118 return header.magic == Common::MakeMagic('N', 'C', 'A', '3'); 118 return header.magic == Common::MakeMagic('N', 'C', 'A', '3');
119} 119}
120 120
121NCA::NCA(VirtualFile file_, VirtualFile bktr_base_romfs_, u64 bktr_base_ivfc_offset, 121NCA::NCA(VirtualFile file_, VirtualFile bktr_base_romfs_, u64 bktr_base_ivfc_offset)
122 Core::Crypto::KeyManager keys_) 122 : file(std::move(file_)), bktr_base_romfs(std::move(bktr_base_romfs_)) {
123 : file(std::move(file_)), bktr_base_romfs(std::move(bktr_base_romfs_)), keys(std::move(keys_)) {
124 if (file == nullptr) { 123 if (file == nullptr) {
125 status = Loader::ResultStatus::ErrorNullFile; 124 status = Loader::ResultStatus::ErrorNullFile;
126 return; 125 return;
diff --git a/src/core/file_sys/content_archive.h b/src/core/file_sys/content_archive.h
index e249079b5..d25cbcf91 100644
--- a/src/core/file_sys/content_archive.h
+++ b/src/core/file_sys/content_archive.h
@@ -99,8 +99,7 @@ inline bool IsDirectoryLogoPartition(const VirtualDir& pfs) {
99class NCA : public ReadOnlyVfsDirectory { 99class NCA : public ReadOnlyVfsDirectory {
100public: 100public:
101 explicit NCA(VirtualFile file, VirtualFile bktr_base_romfs = nullptr, 101 explicit NCA(VirtualFile file, VirtualFile bktr_base_romfs = nullptr,
102 u64 bktr_base_ivfc_offset = 0, 102 u64 bktr_base_ivfc_offset = 0);
103 Core::Crypto::KeyManager keys = Core::Crypto::KeyManager());
104 ~NCA() override; 103 ~NCA() override;
105 104
106 Loader::ResultStatus GetStatus() const; 105 Loader::ResultStatus GetStatus() const;
@@ -159,7 +158,7 @@ private:
159 bool encrypted = false; 158 bool encrypted = false;
160 bool is_update = false; 159 bool is_update = false;
161 160
162 Core::Crypto::KeyManager keys; 161 Core::Crypto::KeyManager& keys = Core::Crypto::KeyManager::Instance();
163}; 162};
164 163
165} // namespace FileSys 164} // namespace FileSys
diff --git a/src/core/file_sys/registered_cache.cpp b/src/core/file_sys/registered_cache.cpp
index ba5f76288..27c1b0233 100644
--- a/src/core/file_sys/registered_cache.cpp
+++ b/src/core/file_sys/registered_cache.cpp
@@ -408,7 +408,7 @@ void RegisteredCache::ProcessFiles(const std::vector<NcaID>& ids) {
408 408
409 if (file == nullptr) 409 if (file == nullptr)
410 continue; 410 continue;
411 const auto nca = std::make_shared<NCA>(parser(file, id), nullptr, 0, keys); 411 const auto nca = std::make_shared<NCA>(parser(file, id), nullptr, 0);
412 if (nca->GetStatus() != Loader::ResultStatus::Success || 412 if (nca->GetStatus() != Loader::ResultStatus::Success ||
413 nca->GetType() != NCAContentType::Meta) { 413 nca->GetType() != NCAContentType::Meta) {
414 continue; 414 continue;
@@ -486,7 +486,7 @@ std::unique_ptr<NCA> RegisteredCache::GetEntry(u64 title_id, ContentRecordType t
486 const auto raw = GetEntryRaw(title_id, type); 486 const auto raw = GetEntryRaw(title_id, type);
487 if (raw == nullptr) 487 if (raw == nullptr)
488 return nullptr; 488 return nullptr;
489 return std::make_unique<NCA>(raw, nullptr, 0, keys); 489 return std::make_unique<NCA>(raw, nullptr, 0);
490} 490}
491 491
492template <typename T> 492template <typename T>
@@ -865,7 +865,7 @@ std::unique_ptr<NCA> ManualContentProvider::GetEntry(u64 title_id, ContentRecord
865 const auto res = GetEntryRaw(title_id, type); 865 const auto res = GetEntryRaw(title_id, type);
866 if (res == nullptr) 866 if (res == nullptr)
867 return nullptr; 867 return nullptr;
868 return std::make_unique<NCA>(res, nullptr, 0, keys); 868 return std::make_unique<NCA>(res, nullptr, 0);
869} 869}
870 870
871std::vector<ContentProviderEntry> ManualContentProvider::ListEntriesFilter( 871std::vector<ContentProviderEntry> ManualContentProvider::ListEntriesFilter(
diff --git a/src/core/file_sys/registered_cache.h b/src/core/file_sys/registered_cache.h
index d1eec240e..f339cd17b 100644
--- a/src/core/file_sys/registered_cache.h
+++ b/src/core/file_sys/registered_cache.h
@@ -88,7 +88,7 @@ public:
88 88
89protected: 89protected:
90 // A single instance of KeyManager to be used by GetEntry() 90 // A single instance of KeyManager to be used by GetEntry()
91 Core::Crypto::KeyManager keys; 91 Core::Crypto::KeyManager& keys = Core::Crypto::KeyManager::Instance();
92}; 92};
93 93
94class PlaceholderCache { 94class PlaceholderCache {
diff --git a/src/core/file_sys/submission_package.cpp b/src/core/file_sys/submission_package.cpp
index ef3084681..175a8266a 100644
--- a/src/core/file_sys/submission_package.cpp
+++ b/src/core/file_sys/submission_package.cpp
@@ -21,7 +21,7 @@
21namespace FileSys { 21namespace FileSys {
22namespace { 22namespace {
23void SetTicketKeys(const std::vector<VirtualFile>& files) { 23void SetTicketKeys(const std::vector<VirtualFile>& files) {
24 Core::Crypto::KeyManager keys; 24 auto& keys = Core::Crypto::KeyManager::Instance();
25 25
26 for (const auto& ticket_file : files) { 26 for (const auto& ticket_file : files) {
27 if (ticket_file == nullptr) { 27 if (ticket_file == nullptr) {
@@ -285,7 +285,7 @@ void NSP::ReadNCAs(const std::vector<VirtualFile>& files) {
285 continue; 285 continue;
286 } 286 }
287 287
288 auto next_nca = std::make_shared<NCA>(std::move(next_file), nullptr, 0, keys); 288 auto next_nca = std::make_shared<NCA>(std::move(next_file), nullptr, 0);
289 if (next_nca->GetType() == NCAContentType::Program) { 289 if (next_nca->GetType() == NCAContentType::Program) {
290 program_status[cnmt.GetTitleID()] = next_nca->GetStatus(); 290 program_status[cnmt.GetTitleID()] = next_nca->GetStatus();
291 } 291 }
diff --git a/src/core/file_sys/submission_package.h b/src/core/file_sys/submission_package.h
index ee9b6ce17..cf89de6a9 100644
--- a/src/core/file_sys/submission_package.h
+++ b/src/core/file_sys/submission_package.h
@@ -73,7 +73,7 @@ private:
73 std::map<u64, std::map<std::pair<TitleType, ContentRecordType>, std::shared_ptr<NCA>>> ncas; 73 std::map<u64, std::map<std::pair<TitleType, ContentRecordType>, std::shared_ptr<NCA>>> ncas;
74 std::vector<VirtualFile> ticket_files; 74 std::vector<VirtualFile> ticket_files;
75 75
76 Core::Crypto::KeyManager keys; 76 Core::Crypto::KeyManager& keys = Core::Crypto::KeyManager::Instance();
77 77
78 VirtualFile romfs; 78 VirtualFile romfs;
79 VirtualDir exefs; 79 VirtualDir exefs;
diff --git a/src/core/file_sys/system_archive/mii_model.cpp b/src/core/file_sys/system_archive/mii_model.cpp
index 6a9add87c..61bb67945 100644
--- a/src/core/file_sys/system_archive/mii_model.cpp
+++ b/src/core/file_sys/system_archive/mii_model.cpp
@@ -40,7 +40,7 @@ VirtualDir MiiModel() {
40 out->AddFile(std::make_shared<ArrayVfsFile<MiiModelData::SHAPE_MID.size()>>( 40 out->AddFile(std::make_shared<ArrayVfsFile<MiiModelData::SHAPE_MID.size()>>(
41 MiiModelData::SHAPE_MID, "ShapeMid.dat")); 41 MiiModelData::SHAPE_MID, "ShapeMid.dat"));
42 42
43 return std::move(out); 43 return out;
44} 44}
45 45
46} // namespace FileSys::SystemArchive 46} // namespace FileSys::SystemArchive
diff --git a/src/core/file_sys/system_archive/shared_font.cpp b/src/core/file_sys/system_archive/shared_font.cpp
index 2c05eb42e..c5cdf7d9b 100644
--- a/src/core/file_sys/system_archive/shared_font.cpp
+++ b/src/core/file_sys/system_archive/shared_font.cpp
@@ -23,7 +23,7 @@ VirtualFile PackBFTTF(const std::array<u8, Size>& data, const std::string& name)
23 23
24 std::vector<u8> bfttf(Size + sizeof(u64)); 24 std::vector<u8> bfttf(Size + sizeof(u64));
25 25
26 u64 offset = 0; 26 size_t offset = 0;
27 Service::NS::EncryptSharedFont(vec, bfttf, offset); 27 Service::NS::EncryptSharedFont(vec, bfttf, offset);
28 return std::make_shared<VectorVfsFile>(std::move(bfttf), name); 28 return std::make_shared<VectorVfsFile>(std::move(bfttf), name);
29} 29}
diff --git a/src/core/file_sys/xts_archive.h b/src/core/file_sys/xts_archive.h
index 7704dee90..563531bb6 100644
--- a/src/core/file_sys/xts_archive.h
+++ b/src/core/file_sys/xts_archive.h
@@ -62,6 +62,6 @@ private:
62 62
63 VirtualFile dec_file; 63 VirtualFile dec_file;
64 64
65 Core::Crypto::KeyManager keys; 65 Core::Crypto::KeyManager& keys = Core::Crypto::KeyManager::Instance();
66}; 66};
67} // namespace FileSys 67} // namespace FileSys
diff --git a/src/core/frontend/framebuffer_layout.cpp b/src/core/frontend/framebuffer_layout.cpp
index d0c43447c..c1fbc235b 100644
--- a/src/core/frontend/framebuffer_layout.cpp
+++ b/src/core/frontend/framebuffer_layout.cpp
@@ -29,7 +29,7 @@ FramebufferLayout DefaultFrameLayout(u32 width, u32 height) {
29 29
30 const float window_aspect_ratio = static_cast<float>(height) / width; 30 const float window_aspect_ratio = static_cast<float>(height) / width;
31 const float emulation_aspect_ratio = EmulationAspectRatio( 31 const float emulation_aspect_ratio = EmulationAspectRatio(
32 static_cast<AspectRatio>(Settings::values.aspect_ratio), window_aspect_ratio); 32 static_cast<AspectRatio>(Settings::values.aspect_ratio.GetValue()), window_aspect_ratio);
33 33
34 const Common::Rectangle<u32> screen_window_area{0, 0, width, height}; 34 const Common::Rectangle<u32> screen_window_area{0, 0, width, height};
35 Common::Rectangle<u32> screen = MaxRectangle(screen_window_area, emulation_aspect_ratio); 35 Common::Rectangle<u32> screen = MaxRectangle(screen_window_area, emulation_aspect_ratio);
diff --git a/src/core/gdbstub/gdbstub.cpp b/src/core/gdbstub/gdbstub.cpp
index 70c0f8b80..79f22a403 100644
--- a/src/core/gdbstub/gdbstub.cpp
+++ b/src/core/gdbstub/gdbstub.cpp
@@ -35,7 +35,6 @@
35#include "common/swap.h" 35#include "common/swap.h"
36#include "core/arm/arm_interface.h" 36#include "core/arm/arm_interface.h"
37#include "core/core.h" 37#include "core/core.h"
38#include "core/core_manager.h"
39#include "core/gdbstub/gdbstub.h" 38#include "core/gdbstub/gdbstub.h"
40#include "core/hle/kernel/memory/page_table.h" 39#include "core/hle/kernel/memory/page_table.h"
41#include "core/hle/kernel/process.h" 40#include "core/hle/kernel/process.h"
diff --git a/src/core/hardware_properties.h b/src/core/hardware_properties.h
index b04e046ed..456b41e1b 100644
--- a/src/core/hardware_properties.h
+++ b/src/core/hardware_properties.h
@@ -42,6 +42,10 @@ struct EmuThreadHandle {
42 constexpr u32 invalid_handle = 0xFFFFFFFF; 42 constexpr u32 invalid_handle = 0xFFFFFFFF;
43 return {invalid_handle, invalid_handle}; 43 return {invalid_handle, invalid_handle};
44 } 44 }
45
46 bool IsInvalid() const {
47 return (*this) == InvalidHandle();
48 }
45}; 49};
46 50
47} // namespace Core 51} // namespace Core
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 8475b698c..4d2a9b35d 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -7,11 +7,15 @@
7 7
8#include "common/assert.h" 8#include "common/assert.h"
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "core/arm/exclusive_monitor.h"
10#include "core/core.h" 11#include "core/core.h"
11#include "core/hle/kernel/address_arbiter.h" 12#include "core/hle/kernel/address_arbiter.h"
12#include "core/hle/kernel/errors.h" 13#include "core/hle/kernel/errors.h"
14#include "core/hle/kernel/handle_table.h"
15#include "core/hle/kernel/kernel.h"
13#include "core/hle/kernel/scheduler.h" 16#include "core/hle/kernel/scheduler.h"
14#include "core/hle/kernel/thread.h" 17#include "core/hle/kernel/thread.h"
18#include "core/hle/kernel/time_manager.h"
15#include "core/hle/result.h" 19#include "core/hle/result.h"
16#include "core/memory.h" 20#include "core/memory.h"
17 21
@@ -20,6 +24,7 @@ namespace Kernel {
20// Wake up num_to_wake (or all) threads in a vector. 24// Wake up num_to_wake (or all) threads in a vector.
21void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, 25void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
22 s32 num_to_wake) { 26 s32 num_to_wake) {
27 auto& time_manager = system.Kernel().TimeManager();
23 // Only process up to 'target' threads, unless 'target' is <= 0, in which case process 28 // Only process up to 'target' threads, unless 'target' is <= 0, in which case process
24 // them all. 29 // them all.
25 std::size_t last = waiting_threads.size(); 30 std::size_t last = waiting_threads.size();
@@ -29,12 +34,10 @@ void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& wai
29 34
30 // Signal the waiting threads. 35 // Signal the waiting threads.
31 for (std::size_t i = 0; i < last; i++) { 36 for (std::size_t i = 0; i < last; i++) {
32 ASSERT(waiting_threads[i]->GetStatus() == ThreadStatus::WaitArb); 37 waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
33 waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS);
34 RemoveThread(waiting_threads[i]); 38 RemoveThread(waiting_threads[i]);
35 waiting_threads[i]->SetArbiterWaitAddress(0); 39 waiting_threads[i]->WaitForArbitration(false);
36 waiting_threads[i]->ResumeFromWait(); 40 waiting_threads[i]->ResumeFromWait();
37 system.PrepareReschedule(waiting_threads[i]->GetProcessorID());
38 } 41 }
39} 42}
40 43
@@ -56,6 +59,7 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v
56} 59}
57 60
58ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { 61ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
62 SchedulerLock lock(system.Kernel());
59 const std::vector<std::shared_ptr<Thread>> waiting_threads = 63 const std::vector<std::shared_ptr<Thread>> waiting_threads =
60 GetThreadsWaitingOnAddress(address); 64 GetThreadsWaitingOnAddress(address);
61 WakeThreads(waiting_threads, num_to_wake); 65 WakeThreads(waiting_threads, num_to_wake);
@@ -64,6 +68,7 @@ ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
64 68
65ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, 69ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
66 s32 num_to_wake) { 70 s32 num_to_wake) {
71 SchedulerLock lock(system.Kernel());
67 auto& memory = system.Memory(); 72 auto& memory = system.Memory();
68 73
69 // Ensure that we can write to the address. 74 // Ensure that we can write to the address.
@@ -71,16 +76,24 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32
71 return ERR_INVALID_ADDRESS_STATE; 76 return ERR_INVALID_ADDRESS_STATE;
72 } 77 }
73 78
74 if (static_cast<s32>(memory.Read32(address)) != value) { 79 const std::size_t current_core = system.CurrentCoreIndex();
75 return ERR_INVALID_STATE; 80 auto& monitor = system.Monitor();
76 } 81 u32 current_value;
82 do {
83 current_value = monitor.ExclusiveRead32(current_core, address);
84
85 if (current_value != value) {
86 return ERR_INVALID_STATE;
87 }
88 current_value++;
89 } while (!monitor.ExclusiveWrite32(current_core, address, current_value));
77 90
78 memory.Write32(address, static_cast<u32>(value + 1));
79 return SignalToAddressOnly(address, num_to_wake); 91 return SignalToAddressOnly(address, num_to_wake);
80} 92}
81 93
82ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, 94ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
83 s32 num_to_wake) { 95 s32 num_to_wake) {
96 SchedulerLock lock(system.Kernel());
84 auto& memory = system.Memory(); 97 auto& memory = system.Memory();
85 98
86 // Ensure that we can write to the address. 99 // Ensure that we can write to the address.
@@ -92,29 +105,33 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
92 const std::vector<std::shared_ptr<Thread>> waiting_threads = 105 const std::vector<std::shared_ptr<Thread>> waiting_threads =
93 GetThreadsWaitingOnAddress(address); 106 GetThreadsWaitingOnAddress(address);
94 107
95 // Determine the modified value depending on the waiting count. 108 const std::size_t current_core = system.CurrentCoreIndex();
109 auto& monitor = system.Monitor();
96 s32 updated_value; 110 s32 updated_value;
97 if (num_to_wake <= 0) { 111 do {
98 if (waiting_threads.empty()) { 112 updated_value = monitor.ExclusiveRead32(current_core, address);
99 updated_value = value + 1; 113
100 } else { 114 if (updated_value != value) {
101 updated_value = value - 1; 115 return ERR_INVALID_STATE;
102 } 116 }
103 } else { 117 // Determine the modified value depending on the waiting count.
104 if (waiting_threads.empty()) { 118 if (num_to_wake <= 0) {
105 updated_value = value + 1; 119 if (waiting_threads.empty()) {
106 } else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) { 120 updated_value = value + 1;
107 updated_value = value - 1; 121 } else {
122 updated_value = value - 1;
123 }
108 } else { 124 } else {
109 updated_value = value; 125 if (waiting_threads.empty()) {
126 updated_value = value + 1;
127 } else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
128 updated_value = value - 1;
129 } else {
130 updated_value = value;
131 }
110 } 132 }
111 } 133 } while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
112 134
113 if (static_cast<s32>(memory.Read32(address)) != value) {
114 return ERR_INVALID_STATE;
115 }
116
117 memory.Write32(address, static_cast<u32>(updated_value));
118 WakeThreads(waiting_threads, num_to_wake); 135 WakeThreads(waiting_threads, num_to_wake);
119 return RESULT_SUCCESS; 136 return RESULT_SUCCESS;
120} 137}
@@ -136,60 +153,127 @@ ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s
136ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, 153ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
137 bool should_decrement) { 154 bool should_decrement) {
138 auto& memory = system.Memory(); 155 auto& memory = system.Memory();
156 auto& kernel = system.Kernel();
157 Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
139 158
140 // Ensure that we can read the address. 159 Handle event_handle = InvalidHandle;
141 if (!memory.IsValidVirtualAddress(address)) { 160 {
142 return ERR_INVALID_ADDRESS_STATE; 161 SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
143 } 162
163 if (current_thread->IsPendingTermination()) {
164 lock.CancelSleep();
165 return ERR_THREAD_TERMINATING;
166 }
167
168 // Ensure that we can read the address.
169 if (!memory.IsValidVirtualAddress(address)) {
170 lock.CancelSleep();
171 return ERR_INVALID_ADDRESS_STATE;
172 }
173
174 s32 current_value = static_cast<s32>(memory.Read32(address));
175 if (current_value >= value) {
176 lock.CancelSleep();
177 return ERR_INVALID_STATE;
178 }
179
180 current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
181
182 s32 decrement_value;
183
184 const std::size_t current_core = system.CurrentCoreIndex();
185 auto& monitor = system.Monitor();
186 do {
187 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
188 if (should_decrement) {
189 decrement_value = current_value - 1;
190 } else {
191 decrement_value = current_value;
192 }
193 } while (
194 !monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value)));
195
196 // Short-circuit without rescheduling, if timeout is zero.
197 if (timeout == 0) {
198 lock.CancelSleep();
199 return RESULT_TIMEOUT;
200 }
144 201
145 const s32 cur_value = static_cast<s32>(memory.Read32(address)); 202 current_thread->SetArbiterWaitAddress(address);
146 if (cur_value >= value) { 203 InsertThread(SharedFrom(current_thread));
147 return ERR_INVALID_STATE; 204 current_thread->SetStatus(ThreadStatus::WaitArb);
205 current_thread->WaitForArbitration(true);
148 } 206 }
149 207
150 if (should_decrement) { 208 if (event_handle != InvalidHandle) {
151 memory.Write32(address, static_cast<u32>(cur_value - 1)); 209 auto& time_manager = kernel.TimeManager();
210 time_manager.UnscheduleTimeEvent(event_handle);
152 } 211 }
153 212
154 // Short-circuit without rescheduling, if timeout is zero. 213 {
155 if (timeout == 0) { 214 SchedulerLock lock(kernel);
156 return RESULT_TIMEOUT; 215 if (current_thread->IsWaitingForArbitration()) {
216 RemoveThread(SharedFrom(current_thread));
217 current_thread->WaitForArbitration(false);
218 }
157 } 219 }
158 220
159 return WaitForAddressImpl(address, timeout); 221 return current_thread->GetSignalingResult();
160} 222}
161 223
162ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { 224ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
163 auto& memory = system.Memory(); 225 auto& memory = system.Memory();
226 auto& kernel = system.Kernel();
227 Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
164 228
165 // Ensure that we can read the address. 229 Handle event_handle = InvalidHandle;
166 if (!memory.IsValidVirtualAddress(address)) { 230 {
167 return ERR_INVALID_ADDRESS_STATE; 231 SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
168 } 232
233 if (current_thread->IsPendingTermination()) {
234 lock.CancelSleep();
235 return ERR_THREAD_TERMINATING;
236 }
237
238 // Ensure that we can read the address.
239 if (!memory.IsValidVirtualAddress(address)) {
240 lock.CancelSleep();
241 return ERR_INVALID_ADDRESS_STATE;
242 }
169 243
170 // Only wait for the address if equal. 244 s32 current_value = static_cast<s32>(memory.Read32(address));
171 if (static_cast<s32>(memory.Read32(address)) != value) { 245 if (current_value != value) {
172 return ERR_INVALID_STATE; 246 lock.CancelSleep();
247 return ERR_INVALID_STATE;
248 }
249
250 // Short-circuit without rescheduling, if timeout is zero.
251 if (timeout == 0) {
252 lock.CancelSleep();
253 return RESULT_TIMEOUT;
254 }
255
256 current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
257 current_thread->SetArbiterWaitAddress(address);
258 InsertThread(SharedFrom(current_thread));
259 current_thread->SetStatus(ThreadStatus::WaitArb);
260 current_thread->WaitForArbitration(true);
173 } 261 }
174 262
175 // Short-circuit without rescheduling if timeout is zero. 263 if (event_handle != InvalidHandle) {
176 if (timeout == 0) { 264 auto& time_manager = kernel.TimeManager();
177 return RESULT_TIMEOUT; 265 time_manager.UnscheduleTimeEvent(event_handle);
178 } 266 }
179 267
180 return WaitForAddressImpl(address, timeout); 268 {
181} 269 SchedulerLock lock(kernel);
270 if (current_thread->IsWaitingForArbitration()) {
271 RemoveThread(SharedFrom(current_thread));
272 current_thread->WaitForArbitration(false);
273 }
274 }
182 275
183ResultCode AddressArbiter::WaitForAddressImpl(VAddr address, s64 timeout) { 276 return current_thread->GetSignalingResult();
184 Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
185 current_thread->SetArbiterWaitAddress(address);
186 InsertThread(SharedFrom(current_thread));
187 current_thread->SetStatus(ThreadStatus::WaitArb);
188 current_thread->InvalidateWakeupCallback();
189 current_thread->WakeAfterDelay(timeout);
190
191 system.PrepareReschedule(current_thread->GetProcessorID());
192 return RESULT_TIMEOUT;
193} 277}
194 278
195void AddressArbiter::HandleWakeupThread(std::shared_ptr<Thread> thread) { 279void AddressArbiter::HandleWakeupThread(std::shared_ptr<Thread> thread) {
@@ -221,9 +305,9 @@ void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
221 const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(), 305 const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
222 [&thread](const auto& entry) { return thread == entry; }); 306 [&thread](const auto& entry) { return thread == entry; });
223 307
224 ASSERT(iter != thread_list.cend()); 308 if (iter != thread_list.cend()) {
225 309 thread_list.erase(iter);
226 thread_list.erase(iter); 310 }
227} 311}
228 312
229std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress( 313std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h
index f958eee5a..0b05d533c 100644
--- a/src/core/hle/kernel/address_arbiter.h
+++ b/src/core/hle/kernel/address_arbiter.h
@@ -73,9 +73,6 @@ private:
73 /// Waits on an address if the value passed is equal to the argument value. 73 /// Waits on an address if the value passed is equal to the argument value.
74 ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout); 74 ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
75 75
76 // Waits on the given address with a timeout in nanoseconds
77 ResultCode WaitForAddressImpl(VAddr address, s64 timeout);
78
79 /// Wake up num_to_wake (or all) threads in a vector. 76 /// Wake up num_to_wake (or all) threads in a vector.
80 void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake); 77 void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
81 78
diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp
index 5498fd313..8aff2227a 100644
--- a/src/core/hle/kernel/client_port.cpp
+++ b/src/core/hle/kernel/client_port.cpp
@@ -34,7 +34,7 @@ ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
34 } 34 }
35 35
36 // Wake the threads waiting on the ServerPort 36 // Wake the threads waiting on the ServerPort
37 server_port->WakeupAllWaitingThreads(); 37 server_port->Signal();
38 38
39 return MakeResult(std::move(client)); 39 return MakeResult(std::move(client));
40} 40}
diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h
index 29bfa3621..d4e5d88cf 100644
--- a/src/core/hle/kernel/errors.h
+++ b/src/core/hle/kernel/errors.h
@@ -12,6 +12,7 @@ namespace Kernel {
12 12
13constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7}; 13constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
14constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14}; 14constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
15constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59};
15constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101}; 16constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
16constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102}; 17constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
17constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103}; 18constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103};
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index ba0eac4c2..9277b5d08 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -14,14 +14,17 @@
14#include "common/common_types.h" 14#include "common/common_types.h"
15#include "common/logging/log.h" 15#include "common/logging/log.h"
16#include "core/hle/ipc_helpers.h" 16#include "core/hle/ipc_helpers.h"
17#include "core/hle/kernel/errors.h"
17#include "core/hle/kernel/handle_table.h" 18#include "core/hle/kernel/handle_table.h"
18#include "core/hle/kernel/hle_ipc.h" 19#include "core/hle/kernel/hle_ipc.h"
19#include "core/hle/kernel/kernel.h" 20#include "core/hle/kernel/kernel.h"
20#include "core/hle/kernel/object.h" 21#include "core/hle/kernel/object.h"
21#include "core/hle/kernel/process.h" 22#include "core/hle/kernel/process.h"
22#include "core/hle/kernel/readable_event.h" 23#include "core/hle/kernel/readable_event.h"
24#include "core/hle/kernel/scheduler.h"
23#include "core/hle/kernel/server_session.h" 25#include "core/hle/kernel/server_session.h"
24#include "core/hle/kernel/thread.h" 26#include "core/hle/kernel/thread.h"
27#include "core/hle/kernel/time_manager.h"
25#include "core/hle/kernel/writable_event.h" 28#include "core/hle/kernel/writable_event.h"
26#include "core/memory.h" 29#include "core/memory.h"
27 30
@@ -46,15 +49,6 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
46 const std::string& reason, u64 timeout, WakeupCallback&& callback, 49 const std::string& reason, u64 timeout, WakeupCallback&& callback,
47 std::shared_ptr<WritableEvent> writable_event) { 50 std::shared_ptr<WritableEvent> writable_event) {
48 // Put the client thread to sleep until the wait event is signaled or the timeout expires. 51 // Put the client thread to sleep until the wait event is signaled or the timeout expires.
49 thread->SetWakeupCallback(
50 [context = *this, callback](ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
51 std::shared_ptr<SynchronizationObject> object,
52 std::size_t index) mutable -> bool {
53 ASSERT(thread->GetStatus() == ThreadStatus::WaitHLEEvent);
54 callback(thread, context, reason);
55 context.WriteToOutgoingCommandBuffer(*thread);
56 return true;
57 });
58 52
59 if (!writable_event) { 53 if (!writable_event) {
60 // Create event if not provided 54 // Create event if not provided
@@ -62,14 +56,26 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
62 writable_event = pair.writable; 56 writable_event = pair.writable;
63 } 57 }
64 58
65 const auto readable_event{writable_event->GetReadableEvent()}; 59 {
66 writable_event->Clear(); 60 Handle event_handle = InvalidHandle;
67 thread->SetStatus(ThreadStatus::WaitHLEEvent); 61 SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout);
68 thread->SetSynchronizationObjects({readable_event}); 62 thread->SetHLECallback(
69 readable_event->AddWaitingThread(thread); 63 [context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool {
70 64 ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT
71 if (timeout > 0) { 65 ? ThreadWakeupReason::Timeout
72 thread->WakeAfterDelay(timeout); 66 : ThreadWakeupReason::Signal;
67 callback(thread, context, reason);
68 context.WriteToOutgoingCommandBuffer(*thread);
69 return true;
70 });
71 const auto readable_event{writable_event->GetReadableEvent()};
72 writable_event->Clear();
73 thread->SetHLESyncObject(readable_event.get());
74 thread->SetStatus(ThreadStatus::WaitHLEEvent);
75 thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
76 readable_event->AddWaitingThread(thread);
77 lock.Release();
78 thread->SetHLETimeEvent(event_handle);
73 } 79 }
74 80
75 is_thread_waiting = true; 81 is_thread_waiting = true;
@@ -282,18 +288,18 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
282} 288}
283 289
284std::vector<u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const { 290std::vector<u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
285 std::vector<u8> buffer; 291 std::vector<u8> buffer{};
286 const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && 292 const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
287 BufferDescriptorA()[buffer_index].Size()}; 293 BufferDescriptorA()[buffer_index].Size()};
288 294
289 if (is_buffer_a) { 295 if (is_buffer_a) {
290 ASSERT_MSG(BufferDescriptorA().size() > buffer_index, 296 ASSERT_OR_EXECUTE_MSG(BufferDescriptorA().size() > buffer_index, { return buffer; },
291 "BufferDescriptorA invalid buffer_index {}", buffer_index); 297 "BufferDescriptorA invalid buffer_index {}", buffer_index);
292 buffer.resize(BufferDescriptorA()[buffer_index].Size()); 298 buffer.resize(BufferDescriptorA()[buffer_index].Size());
293 memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size()); 299 memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size());
294 } else { 300 } else {
295 ASSERT_MSG(BufferDescriptorX().size() > buffer_index, 301 ASSERT_OR_EXECUTE_MSG(BufferDescriptorX().size() > buffer_index, { return buffer; },
296 "BufferDescriptorX invalid buffer_index {}", buffer_index); 302 "BufferDescriptorX invalid buffer_index {}", buffer_index);
297 buffer.resize(BufferDescriptorX()[buffer_index].Size()); 303 buffer.resize(BufferDescriptorX()[buffer_index].Size());
298 memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size()); 304 memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size());
299 } 305 }
@@ -318,16 +324,16 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
318 } 324 }
319 325
320 if (is_buffer_b) { 326 if (is_buffer_b) {
321 ASSERT_MSG(BufferDescriptorB().size() > buffer_index, 327 ASSERT_OR_EXECUTE_MSG(BufferDescriptorB().size() > buffer_index &&
322 "BufferDescriptorB invalid buffer_index {}", buffer_index); 328 BufferDescriptorB()[buffer_index].Size() >= size,
323 ASSERT_MSG(BufferDescriptorB()[buffer_index].Size() >= size, 329 { return 0; }, "BufferDescriptorB is invalid, index={}, size={}",
324 "BufferDescriptorB buffer_index {} is not large enough", buffer_index); 330 buffer_index, size);
325 memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size); 331 memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size);
326 } else { 332 } else {
327 ASSERT_MSG(BufferDescriptorC().size() > buffer_index, 333 ASSERT_OR_EXECUTE_MSG(BufferDescriptorC().size() > buffer_index &&
328 "BufferDescriptorC invalid buffer_index {}", buffer_index); 334 BufferDescriptorC()[buffer_index].Size() >= size,
329 ASSERT_MSG(BufferDescriptorC()[buffer_index].Size() >= size, 335 { return 0; }, "BufferDescriptorC is invalid, index={}, size={}",
330 "BufferDescriptorC buffer_index {} is not large enough", buffer_index); 336 buffer_index, size);
331 memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size); 337 memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size);
332 } 338 }
333 339
@@ -338,16 +344,12 @@ std::size_t HLERequestContext::GetReadBufferSize(std::size_t buffer_index) const
338 const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && 344 const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
339 BufferDescriptorA()[buffer_index].Size()}; 345 BufferDescriptorA()[buffer_index].Size()};
340 if (is_buffer_a) { 346 if (is_buffer_a) {
341 ASSERT_MSG(BufferDescriptorA().size() > buffer_index, 347 ASSERT_OR_EXECUTE_MSG(BufferDescriptorA().size() > buffer_index, { return 0; },
342 "BufferDescriptorA invalid buffer_index {}", buffer_index); 348 "BufferDescriptorA invalid buffer_index {}", buffer_index);
343 ASSERT_MSG(BufferDescriptorA()[buffer_index].Size() > 0,
344 "BufferDescriptorA buffer_index {} is empty", buffer_index);
345 return BufferDescriptorA()[buffer_index].Size(); 349 return BufferDescriptorA()[buffer_index].Size();
346 } else { 350 } else {
347 ASSERT_MSG(BufferDescriptorX().size() > buffer_index, 351 ASSERT_OR_EXECUTE_MSG(BufferDescriptorX().size() > buffer_index, { return 0; },
348 "BufferDescriptorX invalid buffer_index {}", buffer_index); 352 "BufferDescriptorX invalid buffer_index {}", buffer_index);
349 ASSERT_MSG(BufferDescriptorX()[buffer_index].Size() > 0,
350 "BufferDescriptorX buffer_index {} is empty", buffer_index);
351 return BufferDescriptorX()[buffer_index].Size(); 353 return BufferDescriptorX()[buffer_index].Size();
352 } 354 }
353} 355}
@@ -356,14 +358,15 @@ std::size_t HLERequestContext::GetWriteBufferSize(std::size_t buffer_index) cons
356 const bool is_buffer_b{BufferDescriptorB().size() > buffer_index && 358 const bool is_buffer_b{BufferDescriptorB().size() > buffer_index &&
357 BufferDescriptorB()[buffer_index].Size()}; 359 BufferDescriptorB()[buffer_index].Size()};
358 if (is_buffer_b) { 360 if (is_buffer_b) {
359 ASSERT_MSG(BufferDescriptorB().size() > buffer_index, 361 ASSERT_OR_EXECUTE_MSG(BufferDescriptorB().size() > buffer_index, { return 0; },
360 "BufferDescriptorB invalid buffer_index {}", buffer_index); 362 "BufferDescriptorB invalid buffer_index {}", buffer_index);
361 return BufferDescriptorB()[buffer_index].Size(); 363 return BufferDescriptorB()[buffer_index].Size();
362 } else { 364 } else {
363 ASSERT_MSG(BufferDescriptorC().size() > buffer_index, 365 ASSERT_OR_EXECUTE_MSG(BufferDescriptorC().size() > buffer_index, { return 0; },
364 "BufferDescriptorC invalid buffer_index {}", buffer_index); 366 "BufferDescriptorC invalid buffer_index {}", buffer_index);
365 return BufferDescriptorC()[buffer_index].Size(); 367 return BufferDescriptorC()[buffer_index].Size();
366 } 368 }
369 return 0;
367} 370}
368 371
369std::string HLERequestContext::Description() const { 372std::string HLERequestContext::Description() const {
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 7655382fa..1f2af7a1b 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -2,6 +2,7 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <array>
5#include <atomic> 6#include <atomic>
6#include <bitset> 7#include <bitset>
7#include <functional> 8#include <functional>
@@ -13,11 +14,15 @@
13 14
14#include "common/assert.h" 15#include "common/assert.h"
15#include "common/logging/log.h" 16#include "common/logging/log.h"
17#include "common/microprofile.h"
18#include "common/thread.h"
16#include "core/arm/arm_interface.h" 19#include "core/arm/arm_interface.h"
20#include "core/arm/cpu_interrupt_handler.h"
17#include "core/arm/exclusive_monitor.h" 21#include "core/arm/exclusive_monitor.h"
18#include "core/core.h" 22#include "core/core.h"
19#include "core/core_timing.h" 23#include "core/core_timing.h"
20#include "core/core_timing_util.h" 24#include "core/core_timing_util.h"
25#include "core/cpu_manager.h"
21#include "core/device_memory.h" 26#include "core/device_memory.h"
22#include "core/hardware_properties.h" 27#include "core/hardware_properties.h"
23#include "core/hle/kernel/client_port.h" 28#include "core/hle/kernel/client_port.h"
@@ -39,85 +44,28 @@
39#include "core/hle/result.h" 44#include "core/hle/result.h"
40#include "core/memory.h" 45#include "core/memory.h"
41 46
42namespace Kernel { 47MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
43
44/**
45 * Callback that will wake up the thread it was scheduled for
46 * @param thread_handle The handle of the thread that's been awoken
47 * @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
48 */
49static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
50 const auto proper_handle = static_cast<Handle>(thread_handle);
51 const auto& system = Core::System::GetInstance();
52
53 // Lock the global kernel mutex when we enter the kernel HLE.
54 std::lock_guard lock{HLE::g_hle_lock};
55
56 std::shared_ptr<Thread> thread =
57 system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
58 if (thread == nullptr) {
59 LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle);
60 return;
61 }
62
63 bool resume = true;
64
65 if (thread->GetStatus() == ThreadStatus::WaitSynch ||
66 thread->GetStatus() == ThreadStatus::WaitHLEEvent) {
67 // Remove the thread from each of its waiting objects' waitlists
68 for (const auto& object : thread->GetSynchronizationObjects()) {
69 object->RemoveWaitingThread(thread);
70 }
71 thread->ClearSynchronizationObjects();
72
73 // Invoke the wakeup callback before clearing the wait objects
74 if (thread->HasWakeupCallback()) {
75 resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Timeout, thread, nullptr, 0);
76 }
77 } else if (thread->GetStatus() == ThreadStatus::WaitMutex ||
78 thread->GetStatus() == ThreadStatus::WaitCondVar) {
79 thread->SetMutexWaitAddress(0);
80 thread->SetWaitHandle(0);
81 if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
82 thread->GetOwnerProcess()->RemoveConditionVariableThread(thread);
83 thread->SetCondVarWaitAddress(0);
84 }
85
86 auto* const lock_owner = thread->GetLockOwner();
87 // Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance
88 // and don't have a lock owner unless SignalProcessWideKey was called first and the thread
89 // wasn't awakened due to the mutex already being acquired.
90 if (lock_owner != nullptr) {
91 lock_owner->RemoveMutexWaiter(thread);
92 }
93 }
94 48
95 if (thread->GetStatus() == ThreadStatus::WaitArb) { 49namespace Kernel {
96 auto& address_arbiter = thread->GetOwnerProcess()->GetAddressArbiter();
97 address_arbiter.HandleWakeupThread(thread);
98 }
99
100 if (resume) {
101 if (thread->GetStatus() == ThreadStatus::WaitCondVar ||
102 thread->GetStatus() == ThreadStatus::WaitArb) {
103 thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
104 }
105 thread->ResumeFromWait();
106 }
107}
108 50
109struct KernelCore::Impl { 51struct KernelCore::Impl {
110 explicit Impl(Core::System& system, KernelCore& kernel) 52 explicit Impl(Core::System& system, KernelCore& kernel)
111 : global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {} 53 : global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {}
112 54
55 void SetMulticore(bool is_multicore) {
56 this->is_multicore = is_multicore;
57 }
58
113 void Initialize(KernelCore& kernel) { 59 void Initialize(KernelCore& kernel) {
114 Shutdown(); 60 Shutdown();
61 RegisterHostThread();
115 62
116 InitializePhysicalCores(); 63 InitializePhysicalCores();
117 InitializeSystemResourceLimit(kernel); 64 InitializeSystemResourceLimit(kernel);
118 InitializeMemoryLayout(); 65 InitializeMemoryLayout();
119 InitializeThreads(); 66 InitializePreemption(kernel);
120 InitializePreemption(); 67 InitializeSchedulers();
68 InitializeSuspendThreads();
121 } 69 }
122 70
123 void Shutdown() { 71 void Shutdown() {
@@ -126,13 +74,26 @@ struct KernelCore::Impl {
126 next_user_process_id = Process::ProcessIDMin; 74 next_user_process_id = Process::ProcessIDMin;
127 next_thread_id = 1; 75 next_thread_id = 1;
128 76
77 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
78 if (suspend_threads[i]) {
79 suspend_threads[i].reset();
80 }
81 }
82
83 for (std::size_t i = 0; i < cores.size(); i++) {
84 cores[i].Shutdown();
85 schedulers[i].reset();
86 }
87 cores.clear();
88
89 registered_core_threads.reset();
90
129 process_list.clear(); 91 process_list.clear();
130 current_process = nullptr; 92 current_process = nullptr;
131 93
132 system_resource_limit = nullptr; 94 system_resource_limit = nullptr;
133 95
134 global_handle_table.Clear(); 96 global_handle_table.Clear();
135 thread_wakeup_event_type = nullptr;
136 preemption_event = nullptr; 97 preemption_event = nullptr;
137 98
138 global_scheduler.Shutdown(); 99 global_scheduler.Shutdown();
@@ -145,13 +106,21 @@ struct KernelCore::Impl {
145 cores.clear(); 106 cores.clear();
146 107
147 exclusive_monitor.reset(); 108 exclusive_monitor.reset();
109 host_thread_ids.clear();
148 } 110 }
149 111
150 void InitializePhysicalCores() { 112 void InitializePhysicalCores() {
151 exclusive_monitor = 113 exclusive_monitor =
152 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); 114 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
153 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 115 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
154 cores.emplace_back(system, i, *exclusive_monitor); 116 schedulers[i] = std::make_unique<Kernel::Scheduler>(system, i);
117 cores.emplace_back(system, i, *schedulers[i], interrupts[i]);
118 }
119 }
120
121 void InitializeSchedulers() {
122 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
123 cores[i].Scheduler().Initialize();
155 } 124 }
156 } 125 }
157 126
@@ -173,15 +142,13 @@ struct KernelCore::Impl {
173 } 142 }
174 } 143 }
175 144
176 void InitializeThreads() { 145 void InitializePreemption(KernelCore& kernel) {
177 thread_wakeup_event_type = 146 preemption_event = Core::Timing::CreateEvent(
178 Core::Timing::CreateEvent("ThreadWakeupCallback", ThreadWakeupCallback); 147 "PreemptionCallback", [this, &kernel](u64 userdata, s64 cycles_late) {
179 } 148 {
180 149 SchedulerLock lock(kernel);
181 void InitializePreemption() { 150 global_scheduler.PreemptThreads();
182 preemption_event = 151 }
183 Core::Timing::CreateEvent("PreemptionCallback", [this](u64 userdata, s64 cycles_late) {
184 global_scheduler.PreemptThreads();
185 s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); 152 s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
186 system.CoreTiming().ScheduleEvent(time_interval, preemption_event); 153 system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
187 }); 154 });
@@ -190,6 +157,20 @@ struct KernelCore::Impl {
190 system.CoreTiming().ScheduleEvent(time_interval, preemption_event); 157 system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
191 } 158 }
192 159
160 void InitializeSuspendThreads() {
161 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
162 std::string name = "Suspend Thread Id:" + std::to_string(i);
163 std::function<void(void*)> init_func =
164 system.GetCpuManager().GetSuspendThreadStartFunc();
165 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
166 ThreadType type =
167 static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND);
168 auto thread_res = Thread::Create(system, type, name, 0, 0, 0, static_cast<u32>(i), 0,
169 nullptr, std::move(init_func), init_func_parameter);
170 suspend_threads[i] = std::move(thread_res).Unwrap();
171 }
172 }
173
193 void MakeCurrentProcess(Process* process) { 174 void MakeCurrentProcess(Process* process) {
194 current_process = process; 175 current_process = process;
195 176
@@ -197,15 +178,17 @@ struct KernelCore::Impl {
197 return; 178 return;
198 } 179 }
199 180
200 for (auto& core : cores) { 181 u32 core_id = GetCurrentHostThreadID();
201 core.SetIs64Bit(process->Is64BitProcess()); 182 if (core_id < Core::Hardware::NUM_CPU_CORES) {
183 system.Memory().SetCurrentPageTable(*process, core_id);
202 } 184 }
203
204 system.Memory().SetCurrentPageTable(*process);
205 } 185 }
206 186
207 void RegisterCoreThread(std::size_t core_id) { 187 void RegisterCoreThread(std::size_t core_id) {
208 std::unique_lock lock{register_thread_mutex}; 188 std::unique_lock lock{register_thread_mutex};
189 if (!is_multicore) {
190 single_core_thread_id = std::this_thread::get_id();
191 }
209 const std::thread::id this_id = std::this_thread::get_id(); 192 const std::thread::id this_id = std::this_thread::get_id();
210 const auto it = host_thread_ids.find(this_id); 193 const auto it = host_thread_ids.find(this_id);
211 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); 194 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
@@ -219,12 +202,19 @@ struct KernelCore::Impl {
219 std::unique_lock lock{register_thread_mutex}; 202 std::unique_lock lock{register_thread_mutex};
220 const std::thread::id this_id = std::this_thread::get_id(); 203 const std::thread::id this_id = std::this_thread::get_id();
221 const auto it = host_thread_ids.find(this_id); 204 const auto it = host_thread_ids.find(this_id);
222 ASSERT(it == host_thread_ids.end()); 205 if (it != host_thread_ids.end()) {
206 return;
207 }
223 host_thread_ids[this_id] = registered_thread_ids++; 208 host_thread_ids[this_id] = registered_thread_ids++;
224 } 209 }
225 210
226 u32 GetCurrentHostThreadID() const { 211 u32 GetCurrentHostThreadID() const {
227 const std::thread::id this_id = std::this_thread::get_id(); 212 const std::thread::id this_id = std::this_thread::get_id();
213 if (!is_multicore) {
214 if (single_core_thread_id == this_id) {
215 return static_cast<u32>(system.GetCpuManager().CurrentCore());
216 }
217 }
228 const auto it = host_thread_ids.find(this_id); 218 const auto it = host_thread_ids.find(this_id);
229 if (it == host_thread_ids.end()) { 219 if (it == host_thread_ids.end()) {
230 return Core::INVALID_HOST_THREAD_ID; 220 return Core::INVALID_HOST_THREAD_ID;
@@ -240,7 +230,7 @@ struct KernelCore::Impl {
240 } 230 }
241 const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler(); 231 const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler();
242 const Kernel::Thread* current = sched.GetCurrentThread(); 232 const Kernel::Thread* current = sched.GetCurrentThread();
243 if (current != nullptr) { 233 if (current != nullptr && !current->IsPhantomMode()) {
244 result.guest_handle = current->GetGlobalHandle(); 234 result.guest_handle = current->GetGlobalHandle();
245 } else { 235 } else {
246 result.guest_handle = InvalidHandle; 236 result.guest_handle = InvalidHandle;
@@ -313,7 +303,6 @@ struct KernelCore::Impl {
313 303
314 std::shared_ptr<ResourceLimit> system_resource_limit; 304 std::shared_ptr<ResourceLimit> system_resource_limit;
315 305
316 std::shared_ptr<Core::Timing::EventType> thread_wakeup_event_type;
317 std::shared_ptr<Core::Timing::EventType> preemption_event; 306 std::shared_ptr<Core::Timing::EventType> preemption_event;
318 307
319 // This is the kernel's handle table or supervisor handle table which 308 // This is the kernel's handle table or supervisor handle table which
@@ -343,6 +332,15 @@ struct KernelCore::Impl {
343 std::shared_ptr<Kernel::SharedMemory> irs_shared_mem; 332 std::shared_ptr<Kernel::SharedMemory> irs_shared_mem;
344 std::shared_ptr<Kernel::SharedMemory> time_shared_mem; 333 std::shared_ptr<Kernel::SharedMemory> time_shared_mem;
345 334
335 std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
336 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
337 std::array<std::unique_ptr<Kernel::Scheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
338
339 bool is_multicore{};
340 std::thread::id single_core_thread_id{};
341
342 std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{};
343
346 // System context 344 // System context
347 Core::System& system; 345 Core::System& system;
348}; 346};
@@ -352,6 +350,10 @@ KernelCore::~KernelCore() {
352 Shutdown(); 350 Shutdown();
353} 351}
354 352
353void KernelCore::SetMulticore(bool is_multicore) {
354 impl->SetMulticore(is_multicore);
355}
356
355void KernelCore::Initialize() { 357void KernelCore::Initialize() {
356 impl->Initialize(*this); 358 impl->Initialize(*this);
357} 359}
@@ -397,11 +399,11 @@ const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const {
397} 399}
398 400
399Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) { 401Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) {
400 return impl->cores[id].Scheduler(); 402 return *impl->schedulers[id];
401} 403}
402 404
403const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const { 405const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const {
404 return impl->cores[id].Scheduler(); 406 return *impl->schedulers[id];
405} 407}
406 408
407Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) { 409Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) {
@@ -412,6 +414,39 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
412 return impl->cores[id]; 414 return impl->cores[id];
413} 415}
414 416
417Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
418 u32 core_id = impl->GetCurrentHostThreadID();
419 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
420 return impl->cores[core_id];
421}
422
423const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
424 u32 core_id = impl->GetCurrentHostThreadID();
425 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
426 return impl->cores[core_id];
427}
428
429Kernel::Scheduler& KernelCore::CurrentScheduler() {
430 u32 core_id = impl->GetCurrentHostThreadID();
431 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
432 return *impl->schedulers[core_id];
433}
434
435const Kernel::Scheduler& KernelCore::CurrentScheduler() const {
436 u32 core_id = impl->GetCurrentHostThreadID();
437 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
438 return *impl->schedulers[core_id];
439}
440
441std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
442 return impl->interrupts;
443}
444
445const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts()
446 const {
447 return impl->interrupts;
448}
449
415Kernel::Synchronization& KernelCore::Synchronization() { 450Kernel::Synchronization& KernelCore::Synchronization() {
416 return impl->synchronization; 451 return impl->synchronization;
417} 452}
@@ -437,15 +472,17 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
437} 472}
438 473
439void KernelCore::InvalidateAllInstructionCaches() { 474void KernelCore::InvalidateAllInstructionCaches() {
440 for (std::size_t i = 0; i < impl->global_scheduler.CpuCoresCount(); i++) { 475 auto& threads = GlobalScheduler().GetThreadList();
441 PhysicalCore(i).ArmInterface().ClearInstructionCache(); 476 for (auto& thread : threads) {
477 if (!thread->IsHLEThread()) {
478 auto& arm_interface = thread->ArmInterface();
479 arm_interface.ClearInstructionCache();
480 }
442 } 481 }
443} 482}
444 483
445void KernelCore::PrepareReschedule(std::size_t id) { 484void KernelCore::PrepareReschedule(std::size_t id) {
446 if (id < impl->global_scheduler.CpuCoresCount()) { 485 // TODO: Reimplement, this
447 impl->cores[id].Stop();
448 }
449} 486}
450 487
451void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) { 488void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) {
@@ -481,10 +518,6 @@ u64 KernelCore::CreateNewUserProcessID() {
481 return impl->next_user_process_id++; 518 return impl->next_user_process_id++;
482} 519}
483 520
484const std::shared_ptr<Core::Timing::EventType>& KernelCore::ThreadWakeupCallbackEventType() const {
485 return impl->thread_wakeup_event_type;
486}
487
488Kernel::HandleTable& KernelCore::GlobalHandleTable() { 521Kernel::HandleTable& KernelCore::GlobalHandleTable() {
489 return impl->global_handle_table; 522 return impl->global_handle_table;
490} 523}
@@ -557,4 +590,34 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const {
557 return *impl->time_shared_mem; 590 return *impl->time_shared_mem;
558} 591}
559 592
593void KernelCore::Suspend(bool in_suspention) {
594 const bool should_suspend = exception_exited || in_suspention;
595 {
596 SchedulerLock lock(*this);
597 ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep;
598 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
599 impl->suspend_threads[i]->SetStatus(status);
600 }
601 }
602}
603
604bool KernelCore::IsMulticore() const {
605 return impl->is_multicore;
606}
607
608void KernelCore::ExceptionalExit() {
609 exception_exited = true;
610 Suspend(true);
611}
612
613void KernelCore::EnterSVCProfile() {
614 std::size_t core = impl->GetCurrentHostThreadID();
615 impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
616}
617
618void KernelCore::ExitSVCProfile() {
619 std::size_t core = impl->GetCurrentHostThreadID();
620 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
621}
622
560} // namespace Kernel 623} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 83de1f542..49bd47e89 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -4,15 +4,17 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array>
7#include <memory> 8#include <memory>
8#include <string> 9#include <string>
9#include <unordered_map> 10#include <unordered_map>
10#include <vector> 11#include <vector>
12#include "core/hardware_properties.h"
11#include "core/hle/kernel/memory/memory_types.h" 13#include "core/hle/kernel/memory/memory_types.h"
12#include "core/hle/kernel/object.h" 14#include "core/hle/kernel/object.h"
13 15
14namespace Core { 16namespace Core {
15struct EmuThreadHandle; 17class CPUInterruptHandler;
16class ExclusiveMonitor; 18class ExclusiveMonitor;
17class System; 19class System;
18} // namespace Core 20} // namespace Core
@@ -65,6 +67,9 @@ public:
65 KernelCore(KernelCore&&) = delete; 67 KernelCore(KernelCore&&) = delete;
66 KernelCore& operator=(KernelCore&&) = delete; 68 KernelCore& operator=(KernelCore&&) = delete;
67 69
70 /// Sets if emulation is multicore or single core, must be set before Initialize
71 void SetMulticore(bool is_multicore);
72
68 /// Resets the kernel to a clean slate for use. 73 /// Resets the kernel to a clean slate for use.
69 void Initialize(); 74 void Initialize();
70 75
@@ -110,6 +115,18 @@ public:
110 /// Gets the an instance of the respective physical CPU core. 115 /// Gets the an instance of the respective physical CPU core.
111 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; 116 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
112 117
118 /// Gets the sole instance of the Scheduler at the current running core.
119 Kernel::Scheduler& CurrentScheduler();
120
121 /// Gets the sole instance of the Scheduler at the current running core.
122 const Kernel::Scheduler& CurrentScheduler() const;
123
124 /// Gets the an instance of the current physical CPU core.
125 Kernel::PhysicalCore& CurrentPhysicalCore();
126
127 /// Gets the an instance of the current physical CPU core.
128 const Kernel::PhysicalCore& CurrentPhysicalCore() const;
129
113 /// Gets the an instance of the Synchronization Interface. 130 /// Gets the an instance of the Synchronization Interface.
114 Kernel::Synchronization& Synchronization(); 131 Kernel::Synchronization& Synchronization();
115 132
@@ -129,6 +146,10 @@ public:
129 146
130 const Core::ExclusiveMonitor& GetExclusiveMonitor() const; 147 const Core::ExclusiveMonitor& GetExclusiveMonitor() const;
131 148
149 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts();
150
151 const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const;
152
132 void InvalidateAllInstructionCaches(); 153 void InvalidateAllInstructionCaches();
133 154
134 /// Adds a port to the named port table 155 /// Adds a port to the named port table
@@ -191,6 +212,18 @@ public:
191 /// Gets the shared memory object for Time services. 212 /// Gets the shared memory object for Time services.
192 const Kernel::SharedMemory& GetTimeSharedMem() const; 213 const Kernel::SharedMemory& GetTimeSharedMem() const;
193 214
215 /// Suspend/unsuspend the OS.
216 void Suspend(bool in_suspention);
217
218 /// Exceptional exit the OS.
219 void ExceptionalExit();
220
221 bool IsMulticore() const;
222
223 void EnterSVCProfile();
224
225 void ExitSVCProfile();
226
194private: 227private:
195 friend class Object; 228 friend class Object;
196 friend class Process; 229 friend class Process;
@@ -208,9 +241,6 @@ private:
208 /// Creates a new thread ID, incrementing the internal thread ID counter. 241 /// Creates a new thread ID, incrementing the internal thread ID counter.
209 u64 CreateNewThreadID(); 242 u64 CreateNewThreadID();
210 243
211 /// Retrieves the event type used for thread wakeup callbacks.
212 const std::shared_ptr<Core::Timing::EventType>& ThreadWakeupCallbackEventType() const;
213
214 /// Provides a reference to the global handle table. 244 /// Provides a reference to the global handle table.
215 Kernel::HandleTable& GlobalHandleTable(); 245 Kernel::HandleTable& GlobalHandleTable();
216 246
@@ -219,6 +249,7 @@ private:
219 249
220 struct Impl; 250 struct Impl;
221 std::unique_ptr<Impl> impl; 251 std::unique_ptr<Impl> impl;
252 bool exception_exited{};
222}; 253};
223 254
224} // namespace Kernel 255} // namespace Kernel
diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp
index 6b432e1b2..acf13585c 100644
--- a/src/core/hle/kernel/memory/memory_manager.cpp
+++ b/src/core/hle/kernel/memory/memory_manager.cpp
@@ -104,7 +104,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
104 // Ensure that we don't leave anything un-freed 104 // Ensure that we don't leave anything un-freed
105 auto group_guard = detail::ScopeExit([&] { 105 auto group_guard = detail::ScopeExit([&] {
106 for (const auto& it : page_list.Nodes()) { 106 for (const auto& it : page_list.Nodes()) {
107 const auto min_num_pages{std::min( 107 const auto min_num_pages{std::min<size_t>(
108 it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)}; 108 it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
109 chosen_manager.Free(it.GetAddress(), min_num_pages); 109 chosen_manager.Free(it.GetAddress(), min_num_pages);
110 } 110 }
@@ -139,7 +139,6 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
139 } 139 }
140 140
141 // Only succeed if we allocated as many pages as we wanted 141 // Only succeed if we allocated as many pages as we wanted
142 ASSERT(num_pages >= 0);
143 if (num_pages) { 142 if (num_pages) {
144 return ERR_OUT_OF_MEMORY; 143 return ERR_OUT_OF_MEMORY;
145 } 144 }
@@ -165,7 +164,7 @@ ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages,
165 164
166 // Free all of the pages 165 // Free all of the pages
167 for (const auto& it : page_list.Nodes()) { 166 for (const auto& it : page_list.Nodes()) {
168 const auto min_num_pages{std::min( 167 const auto min_num_pages{std::min<size_t>(
169 it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)}; 168 it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
170 chosen_manager.Free(it.GetAddress(), min_num_pages); 169 chosen_manager.Free(it.GetAddress(), min_num_pages);
171 } 170 }
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 7869eb32b..8f6c944d1 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -34,8 +34,6 @@ static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThr
34 if (thread->GetMutexWaitAddress() != mutex_addr) 34 if (thread->GetMutexWaitAddress() != mutex_addr)
35 continue; 35 continue;
36 36
37 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
38
39 ++num_waiters; 37 ++num_waiters;
40 if (highest_priority_thread == nullptr || 38 if (highest_priority_thread == nullptr ||
41 thread->GetPriority() < highest_priority_thread->GetPriority()) { 39 thread->GetPriority() < highest_priority_thread->GetPriority()) {
@@ -49,6 +47,7 @@ static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThr
49/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner. 47/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
50static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread, 48static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread,
51 std::shared_ptr<Thread> new_owner) { 49 std::shared_ptr<Thread> new_owner) {
50 current_thread->RemoveMutexWaiter(new_owner);
52 const auto threads = current_thread->GetMutexWaitingThreads(); 51 const auto threads = current_thread->GetMutexWaitingThreads();
53 for (const auto& thread : threads) { 52 for (const auto& thread : threads) {
54 if (thread->GetMutexWaitAddress() != mutex_addr) 53 if (thread->GetMutexWaitAddress() != mutex_addr)
@@ -72,85 +71,100 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
72 return ERR_INVALID_ADDRESS; 71 return ERR_INVALID_ADDRESS;
73 } 72 }
74 73
75 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 74 auto& kernel = system.Kernel();
76 std::shared_ptr<Thread> current_thread = 75 std::shared_ptr<Thread> current_thread =
77 SharedFrom(system.CurrentScheduler().GetCurrentThread()); 76 SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
78 std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle); 77 {
79 std::shared_ptr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle); 78 SchedulerLock lock(kernel);
79 // The mutex address must be 4-byte aligned
80 if ((address % sizeof(u32)) != 0) {
81 return ERR_INVALID_ADDRESS;
82 }
80 83
81 // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another 84 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
82 // thread. 85 std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
83 ASSERT(requesting_thread == current_thread); 86 std::shared_ptr<Thread> requesting_thread =
87 handle_table.Get<Thread>(requesting_thread_handle);
84 88
85 const u32 addr_value = system.Memory().Read32(address); 89 // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of
90 // another thread.
91 ASSERT(requesting_thread == current_thread);
86 92
87 // If the mutex isn't being held, just return success. 93 current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
88 if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
89 return RESULT_SUCCESS;
90 }
91 94
92 if (holding_thread == nullptr) { 95 const u32 addr_value = system.Memory().Read32(address);
93 LOG_ERROR(Kernel, "Holding thread does not exist! thread_handle={:08X}", 96
94 holding_thread_handle); 97 // If the mutex isn't being held, just return success.
95 return ERR_INVALID_HANDLE; 98 if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
96 } 99 return RESULT_SUCCESS;
100 }
97 101
98 // Wait until the mutex is released 102 if (holding_thread == nullptr) {
99 current_thread->SetMutexWaitAddress(address); 103 return ERR_INVALID_HANDLE;
100 current_thread->SetWaitHandle(requesting_thread_handle); 104 }
101 105
102 current_thread->SetStatus(ThreadStatus::WaitMutex); 106 // Wait until the mutex is released
103 current_thread->InvalidateWakeupCallback(); 107 current_thread->SetMutexWaitAddress(address);
108 current_thread->SetWaitHandle(requesting_thread_handle);
104 109
105 // Update the lock holder thread's priority to prevent priority inversion. 110 current_thread->SetStatus(ThreadStatus::WaitMutex);
106 holding_thread->AddMutexWaiter(current_thread);
107 111
108 system.PrepareReschedule(); 112 // Update the lock holder thread's priority to prevent priority inversion.
113 holding_thread->AddMutexWaiter(current_thread);
114 }
109 115
110 return RESULT_SUCCESS; 116 {
117 SchedulerLock lock(kernel);
118 auto* owner = current_thread->GetLockOwner();
119 if (owner != nullptr) {
120 owner->RemoveMutexWaiter(current_thread);
121 }
122 }
123 return current_thread->GetSignalingResult();
111} 124}
112 125
113ResultCode Mutex::Release(VAddr address) { 126std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thread> owner,
127 VAddr address) {
114 // The mutex address must be 4-byte aligned 128 // The mutex address must be 4-byte aligned
115 if ((address % sizeof(u32)) != 0) { 129 if ((address % sizeof(u32)) != 0) {
116 LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address); 130 LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
117 return ERR_INVALID_ADDRESS; 131 return {ERR_INVALID_ADDRESS, nullptr};
118 } 132 }
119 133
120 std::shared_ptr<Thread> current_thread = 134 auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address);
121 SharedFrom(system.CurrentScheduler().GetCurrentThread()); 135 if (new_owner == nullptr) {
122 auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(current_thread, address);
123
124 // There are no more threads waiting for the mutex, release it completely.
125 if (thread == nullptr) {
126 system.Memory().Write32(address, 0); 136 system.Memory().Write32(address, 0);
127 return RESULT_SUCCESS; 137 return {RESULT_SUCCESS, nullptr};
128 } 138 }
129
130 // Transfer the ownership of the mutex from the previous owner to the new one. 139 // Transfer the ownership of the mutex from the previous owner to the new one.
131 TransferMutexOwnership(address, current_thread, thread); 140 TransferMutexOwnership(address, owner, new_owner);
132 141 u32 mutex_value = new_owner->GetWaitHandle();
133 u32 mutex_value = thread->GetWaitHandle();
134
135 if (num_waiters >= 2) { 142 if (num_waiters >= 2) {
136 // Notify the guest that there are still some threads waiting for the mutex 143 // Notify the guest that there are still some threads waiting for the mutex
137 mutex_value |= Mutex::MutexHasWaitersFlag; 144 mutex_value |= Mutex::MutexHasWaitersFlag;
138 } 145 }
146 new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
147 new_owner->SetLockOwner(nullptr);
148 new_owner->ResumeFromWait();
139 149
140 // Grant the mutex to the next waiting thread and resume it.
141 system.Memory().Write32(address, mutex_value); 150 system.Memory().Write32(address, mutex_value);
151 return {RESULT_SUCCESS, new_owner};
152}
142 153
143 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); 154ResultCode Mutex::Release(VAddr address) {
144 thread->ResumeFromWait(); 155 auto& kernel = system.Kernel();
156 SchedulerLock lock(kernel);
145 157
146 thread->SetLockOwner(nullptr); 158 std::shared_ptr<Thread> current_thread =
147 thread->SetCondVarWaitAddress(0); 159 SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
148 thread->SetMutexWaitAddress(0);
149 thread->SetWaitHandle(0);
150 thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
151 160
152 system.PrepareReschedule(); 161 auto [result, new_owner] = Unlock(current_thread, address);
153 162
154 return RESULT_SUCCESS; 163 if (result != RESULT_SUCCESS && new_owner != nullptr) {
164 new_owner->SetSynchronizationResults(nullptr, result);
165 }
166
167 return result;
155} 168}
169
156} // namespace Kernel 170} // namespace Kernel
diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h
index b904de2e8..3b81dc3df 100644
--- a/src/core/hle/kernel/mutex.h
+++ b/src/core/hle/kernel/mutex.h
@@ -28,6 +28,10 @@ public:
28 ResultCode TryAcquire(VAddr address, Handle holding_thread_handle, 28 ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
29 Handle requesting_thread_handle); 29 Handle requesting_thread_handle);
30 30
31 /// Unlocks a mutex for owner at address
32 std::pair<ResultCode, std::shared_ptr<Thread>> Unlock(std::shared_ptr<Thread> owner,
33 VAddr address);
34
31 /// Releases the mutex at the specified address. 35 /// Releases the mutex at the specified address.
32 ResultCode Release(VAddr address); 36 ResultCode Release(VAddr address);
33 37
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index a15011076..c6bbdb080 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -2,12 +2,15 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/assert.h"
5#include "common/logging/log.h" 6#include "common/logging/log.h"
7#include "common/spin_lock.h"
6#include "core/arm/arm_interface.h" 8#include "core/arm/arm_interface.h"
7#ifdef ARCHITECTURE_x86_64 9#ifdef ARCHITECTURE_x86_64
8#include "core/arm/dynarmic/arm_dynarmic_32.h" 10#include "core/arm/dynarmic/arm_dynarmic_32.h"
9#include "core/arm/dynarmic/arm_dynarmic_64.h" 11#include "core/arm/dynarmic/arm_dynarmic_64.h"
10#endif 12#endif
13#include "core/arm/cpu_interrupt_handler.h"
11#include "core/arm/exclusive_monitor.h" 14#include "core/arm/exclusive_monitor.h"
12#include "core/arm/unicorn/arm_unicorn.h" 15#include "core/arm/unicorn/arm_unicorn.h"
13#include "core/core.h" 16#include "core/core.h"
@@ -17,50 +20,37 @@
17 20
18namespace Kernel { 21namespace Kernel {
19 22
20PhysicalCore::PhysicalCore(Core::System& system, std::size_t id, 23PhysicalCore::PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler,
21 Core::ExclusiveMonitor& exclusive_monitor) 24 Core::CPUInterruptHandler& interrupt_handler)
22 : core_index{id} { 25 : interrupt_handler{interrupt_handler}, core_index{id}, scheduler{scheduler} {
23#ifdef ARCHITECTURE_x86_64
24 arm_interface_32 =
25 std::make_unique<Core::ARM_Dynarmic_32>(system, exclusive_monitor, core_index);
26 arm_interface_64 =
27 std::make_unique<Core::ARM_Dynarmic_64>(system, exclusive_monitor, core_index);
28
29#else
30 using Core::ARM_Unicorn;
31 arm_interface_32 = std::make_unique<ARM_Unicorn>(system, ARM_Unicorn::Arch::AArch32);
32 arm_interface_64 = std::make_unique<ARM_Unicorn>(system, ARM_Unicorn::Arch::AArch64);
33 LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
34#endif
35 26
36 scheduler = std::make_unique<Kernel::Scheduler>(system, core_index); 27 guard = std::make_unique<Common::SpinLock>();
37} 28}
38 29
39PhysicalCore::~PhysicalCore() = default; 30PhysicalCore::~PhysicalCore() = default;
40 31
41void PhysicalCore::Run() { 32void PhysicalCore::Idle() {
42 arm_interface->Run(); 33 interrupt_handler.AwaitInterrupt();
43 arm_interface->ClearExclusiveState();
44} 34}
45 35
46void PhysicalCore::Step() { 36void PhysicalCore::Shutdown() {
47 arm_interface->Step(); 37 scheduler.Shutdown();
48} 38}
49 39
50void PhysicalCore::Stop() { 40bool PhysicalCore::IsInterrupted() const {
51 arm_interface->PrepareReschedule(); 41 return interrupt_handler.IsInterrupted();
52} 42}
53 43
54void PhysicalCore::Shutdown() { 44void PhysicalCore::Interrupt() {
55 scheduler->Shutdown(); 45 guard->lock();
46 interrupt_handler.SetInterrupt(true);
47 guard->unlock();
56} 48}
57 49
58void PhysicalCore::SetIs64Bit(bool is_64_bit) { 50void PhysicalCore::ClearInterrupt() {
59 if (is_64_bit) { 51 guard->lock();
60 arm_interface = arm_interface_64.get(); 52 interrupt_handler.SetInterrupt(false);
61 } else { 53 guard->unlock();
62 arm_interface = arm_interface_32.get();
63 }
64} 54}
65 55
66} // namespace Kernel 56} // namespace Kernel
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index 3269166be..d7a7a951c 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -7,12 +7,17 @@
7#include <cstddef> 7#include <cstddef>
8#include <memory> 8#include <memory>
9 9
10namespace Common {
11class SpinLock;
12}
13
10namespace Kernel { 14namespace Kernel {
11class Scheduler; 15class Scheduler;
12} // namespace Kernel 16} // namespace Kernel
13 17
14namespace Core { 18namespace Core {
15class ARM_Interface; 19class ARM_Interface;
20class CPUInterruptHandler;
16class ExclusiveMonitor; 21class ExclusiveMonitor;
17class System; 22class System;
18} // namespace Core 23} // namespace Core
@@ -21,7 +26,8 @@ namespace Kernel {
21 26
22class PhysicalCore { 27class PhysicalCore {
23public: 28public:
24 PhysicalCore(Core::System& system, std::size_t id, Core::ExclusiveMonitor& exclusive_monitor); 29 PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler,
30 Core::CPUInterruptHandler& interrupt_handler);
25 ~PhysicalCore(); 31 ~PhysicalCore();
26 32
27 PhysicalCore(const PhysicalCore&) = delete; 33 PhysicalCore(const PhysicalCore&) = delete;
@@ -30,23 +36,18 @@ public:
30 PhysicalCore(PhysicalCore&&) = default; 36 PhysicalCore(PhysicalCore&&) = default;
31 PhysicalCore& operator=(PhysicalCore&&) = default; 37 PhysicalCore& operator=(PhysicalCore&&) = default;
32 38
33 /// Execute current jit state 39 void Idle();
34 void Run(); 40 /// Interrupt this physical core.
35 /// Execute a single instruction in current jit. 41 void Interrupt();
36 void Step();
37 /// Stop JIT execution/exit
38 void Stop();
39 42
40 // Shutdown this physical core. 43 /// Clear this core's interrupt
41 void Shutdown(); 44 void ClearInterrupt();
42 45
43 Core::ARM_Interface& ArmInterface() { 46 /// Check if this core is interrupted
44 return *arm_interface; 47 bool IsInterrupted() const;
45 }
46 48
47 const Core::ARM_Interface& ArmInterface() const { 49 // Shutdown this physical core.
48 return *arm_interface; 50 void Shutdown();
49 }
50 51
51 bool IsMainCore() const { 52 bool IsMainCore() const {
52 return core_index == 0; 53 return core_index == 0;
@@ -61,21 +62,18 @@ public:
61 } 62 }
62 63
63 Kernel::Scheduler& Scheduler() { 64 Kernel::Scheduler& Scheduler() {
64 return *scheduler; 65 return scheduler;
65 } 66 }
66 67
67 const Kernel::Scheduler& Scheduler() const { 68 const Kernel::Scheduler& Scheduler() const {
68 return *scheduler; 69 return scheduler;
69 } 70 }
70 71
71 void SetIs64Bit(bool is_64_bit);
72
73private: 72private:
73 Core::CPUInterruptHandler& interrupt_handler;
74 std::size_t core_index; 74 std::size_t core_index;
75 std::unique_ptr<Core::ARM_Interface> arm_interface_32; 75 Kernel::Scheduler& scheduler;
76 std::unique_ptr<Core::ARM_Interface> arm_interface_64; 76 std::unique_ptr<Common::SpinLock> guard;
77 std::unique_ptr<Kernel::Scheduler> scheduler;
78 Core::ARM_Interface* arm_interface{};
79}; 77};
80 78
81} // namespace Kernel 79} // namespace Kernel
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 36724569f..c6fcb56ad 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -22,6 +22,7 @@
22#include "core/hle/kernel/resource_limit.h" 22#include "core/hle/kernel/resource_limit.h"
23#include "core/hle/kernel/scheduler.h" 23#include "core/hle/kernel/scheduler.h"
24#include "core/hle/kernel/thread.h" 24#include "core/hle/kernel/thread.h"
25#include "core/hle/lock.h"
25#include "core/memory.h" 26#include "core/memory.h"
26#include "core/settings.h" 27#include "core/settings.h"
27 28
@@ -30,14 +31,15 @@ namespace {
30/** 31/**
31 * Sets up the primary application thread 32 * Sets up the primary application thread
32 * 33 *
34 * @param system The system instance to create the main thread under.
33 * @param owner_process The parent process for the main thread 35 * @param owner_process The parent process for the main thread
34 * @param kernel The kernel instance to create the main thread under.
35 * @param priority The priority to give the main thread 36 * @param priority The priority to give the main thread
36 */ 37 */
37void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority, VAddr stack_top) { 38void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) {
38 const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); 39 const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
39 auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0, 40 ThreadType type = THREADTYPE_USER;
40 owner_process.GetIdealCore(), stack_top, owner_process); 41 auto thread_res = Thread::Create(system, type, "main", entry_point, priority, 0,
42 owner_process.GetIdealCore(), stack_top, &owner_process);
41 43
42 std::shared_ptr<Thread> thread = std::move(thread_res).Unwrap(); 44 std::shared_ptr<Thread> thread = std::move(thread_res).Unwrap();
43 45
@@ -48,8 +50,12 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority, V
48 thread->GetContext32().cpu_registers[1] = thread_handle; 50 thread->GetContext32().cpu_registers[1] = thread_handle;
49 thread->GetContext64().cpu_registers[1] = thread_handle; 51 thread->GetContext64().cpu_registers[1] = thread_handle;
50 52
53 auto& kernel = system.Kernel();
51 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires 54 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
52 thread->ResumeFromWait(); 55 {
56 SchedulerLock lock{kernel};
57 thread->SetStatus(ThreadStatus::Ready);
58 }
53} 59}
54} // Anonymous namespace 60} // Anonymous namespace
55 61
@@ -117,7 +123,7 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
117 : kernel.CreateNewUserProcessID(); 123 : kernel.CreateNewUserProcessID();
118 process->capabilities.InitializeForMetadatalessProcess(); 124 process->capabilities.InitializeForMetadatalessProcess();
119 125
120 std::mt19937 rng(Settings::values.rng_seed.value_or(0)); 126 std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(0));
121 std::uniform_int_distribution<u64> distribution; 127 std::uniform_int_distribution<u64> distribution;
122 std::generate(process->random_entropy.begin(), process->random_entropy.end(), 128 std::generate(process->random_entropy.begin(), process->random_entropy.end(),
123 [&] { return distribution(rng); }); 129 [&] { return distribution(rng); });
@@ -132,7 +138,8 @@ std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const {
132 138
133u64 Process::GetTotalPhysicalMemoryAvailable() const { 139u64 Process::GetTotalPhysicalMemoryAvailable() const {
134 const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) + 140 const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) +
135 page_table->GetTotalHeapSize() + image_size + main_thread_stack_size}; 141 page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
142 main_thread_stack_size};
136 143
137 if (capacity < memory_usage_capacity) { 144 if (capacity < memory_usage_capacity) {
138 return capacity; 145 return capacity;
@@ -146,7 +153,8 @@ u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
146} 153}
147 154
148u64 Process::GetTotalPhysicalMemoryUsed() const { 155u64 Process::GetTotalPhysicalMemoryUsed() const {
149 return image_size + main_thread_stack_size + page_table->GetTotalHeapSize(); 156 return image_size + main_thread_stack_size + page_table->GetTotalHeapSize() +
157 GetSystemResourceSize();
150} 158}
151 159
152u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { 160u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
@@ -180,7 +188,6 @@ void Process::RemoveConditionVariableThread(std::shared_ptr<Thread> thread) {
180 } 188 }
181 ++it; 189 ++it;
182 } 190 }
183 UNREACHABLE();
184} 191}
185 192
186std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads( 193std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads(
@@ -205,6 +212,7 @@ void Process::UnregisterThread(const Thread* thread) {
205} 212}
206 213
207ResultCode Process::ClearSignalState() { 214ResultCode Process::ClearSignalState() {
215 SchedulerLock lock(system.Kernel());
208 if (status == ProcessStatus::Exited) { 216 if (status == ProcessStatus::Exited) {
209 LOG_ERROR(Kernel, "called on a terminated process instance."); 217 LOG_ERROR(Kernel, "called on a terminated process instance.");
210 return ERR_INVALID_STATE; 218 return ERR_INVALID_STATE;
@@ -292,7 +300,7 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
292 300
293 ChangeStatus(ProcessStatus::Running); 301 ChangeStatus(ProcessStatus::Running);
294 302
295 SetupMainThread(*this, kernel, main_thread_priority, main_thread_stack_top); 303 SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top);
296 resource_limit->Reserve(ResourceType::Threads, 1); 304 resource_limit->Reserve(ResourceType::Threads, 1);
297 resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size); 305 resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size);
298} 306}
@@ -338,6 +346,7 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
338} 346}
339 347
340VAddr Process::CreateTLSRegion() { 348VAddr Process::CreateTLSRegion() {
349 SchedulerLock lock(system.Kernel());
341 if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; 350 if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
342 tls_page_iter != tls_pages.cend()) { 351 tls_page_iter != tls_pages.cend()) {
343 return *tls_page_iter->ReserveSlot(); 352 return *tls_page_iter->ReserveSlot();
@@ -368,6 +377,7 @@ VAddr Process::CreateTLSRegion() {
368} 377}
369 378
370void Process::FreeTLSRegion(VAddr tls_address) { 379void Process::FreeTLSRegion(VAddr tls_address) {
380 SchedulerLock lock(system.Kernel());
371 const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); 381 const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
372 auto iter = 382 auto iter =
373 std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { 383 std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
@@ -382,6 +392,7 @@ void Process::FreeTLSRegion(VAddr tls_address) {
382} 392}
383 393
384void Process::LoadModule(CodeSet code_set, VAddr base_addr) { 394void Process::LoadModule(CodeSet code_set, VAddr base_addr) {
395 std::lock_guard lock{HLE::g_hle_lock};
385 const auto ReprotectSegment = [&](const CodeSet::Segment& segment, 396 const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
386 Memory::MemoryPermission permission) { 397 Memory::MemoryPermission permission) {
387 page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission); 398 page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission);
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 00860fcbd..6e286419e 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -6,8 +6,10 @@
6#include "common/assert.h" 6#include "common/assert.h"
7#include "common/logging/log.h" 7#include "common/logging/log.h"
8#include "core/hle/kernel/errors.h" 8#include "core/hle/kernel/errors.h"
9#include "core/hle/kernel/kernel.h"
9#include "core/hle/kernel/object.h" 10#include "core/hle/kernel/object.h"
10#include "core/hle/kernel/readable_event.h" 11#include "core/hle/kernel/readable_event.h"
12#include "core/hle/kernel/scheduler.h"
11#include "core/hle/kernel/thread.h" 13#include "core/hle/kernel/thread.h"
12 14
13namespace Kernel { 15namespace Kernel {
@@ -37,8 +39,9 @@ void ReadableEvent::Clear() {
37} 39}
38 40
39ResultCode ReadableEvent::Reset() { 41ResultCode ReadableEvent::Reset() {
42 SchedulerLock lock(kernel);
40 if (!is_signaled) { 43 if (!is_signaled) {
41 LOG_ERROR(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}", 44 LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
42 GetObjectId(), GetTypeName(), GetName()); 45 GetObjectId(), GetTypeName(), GetName());
43 return ERR_INVALID_STATE; 46 return ERR_INVALID_STATE;
44 } 47 }
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index d9beaa3a4..212e442f4 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -24,13 +24,9 @@ bool ResourceLimit::Reserve(ResourceType resource, s64 amount, u64 timeout) {
24 const std::size_t index{ResourceTypeToIndex(resource)}; 24 const std::size_t index{ResourceTypeToIndex(resource)};
25 25
26 s64 new_value = current[index] + amount; 26 s64 new_value = current[index] + amount;
27 while (new_value > limit[index] && available[index] + amount <= limit[index]) { 27 if (new_value > limit[index] && available[index] + amount <= limit[index]) {
28 // TODO(bunnei): This is wrong for multicore, we should wait the calling thread for timeout 28 // TODO(bunnei): This is wrong for multicore, we should wait the calling thread for timeout
29 new_value = current[index] + amount; 29 new_value = current[index] + amount;
30
31 if (timeout >= 0) {
32 break;
33 }
34 } 30 }
35 31
36 if (new_value <= limit[index]) { 32 if (new_value <= limit[index]) {
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 1140c72a3..7b929781c 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -6,16 +6,21 @@
6// licensed under GPLv2 or later under exception provided by the author. 6// licensed under GPLv2 or later under exception provided by the author.
7 7
8#include <algorithm> 8#include <algorithm>
9#include <mutex>
9#include <set> 10#include <set>
10#include <unordered_set> 11#include <unordered_set>
11#include <utility> 12#include <utility>
12 13
13#include "common/assert.h" 14#include "common/assert.h"
15#include "common/bit_util.h"
16#include "common/fiber.h"
14#include "common/logging/log.h" 17#include "common/logging/log.h"
15#include "core/arm/arm_interface.h" 18#include "core/arm/arm_interface.h"
16#include "core/core.h" 19#include "core/core.h"
17#include "core/core_timing.h" 20#include "core/core_timing.h"
21#include "core/cpu_manager.h"
18#include "core/hle/kernel/kernel.h" 22#include "core/hle/kernel/kernel.h"
23#include "core/hle/kernel/physical_core.h"
19#include "core/hle/kernel/process.h" 24#include "core/hle/kernel/process.h"
20#include "core/hle/kernel/scheduler.h" 25#include "core/hle/kernel/scheduler.h"
21#include "core/hle/kernel/time_manager.h" 26#include "core/hle/kernel/time_manager.h"
@@ -27,103 +32,148 @@ GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
27GlobalScheduler::~GlobalScheduler() = default; 32GlobalScheduler::~GlobalScheduler() = default;
28 33
29void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) { 34void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) {
35 std::scoped_lock lock{global_list_guard};
30 thread_list.push_back(std::move(thread)); 36 thread_list.push_back(std::move(thread));
31} 37}
32 38
33void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) { 39void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
40 std::scoped_lock lock{global_list_guard};
34 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), 41 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
35 thread_list.end()); 42 thread_list.end());
36} 43}
37 44
38void GlobalScheduler::UnloadThread(std::size_t core) { 45u32 GlobalScheduler::SelectThreads() {
39 Scheduler& sched = kernel.Scheduler(core); 46 ASSERT(is_locked);
40 sched.UnloadThread();
41}
42
43void GlobalScheduler::SelectThread(std::size_t core) {
44 const auto update_thread = [](Thread* thread, Scheduler& sched) { 47 const auto update_thread = [](Thread* thread, Scheduler& sched) {
45 if (thread != sched.selected_thread.get()) { 48 std::scoped_lock lock{sched.guard};
49 if (thread != sched.selected_thread_set.get()) {
46 if (thread == nullptr) { 50 if (thread == nullptr) {
47 ++sched.idle_selection_count; 51 ++sched.idle_selection_count;
48 } 52 }
49 sched.selected_thread = SharedFrom(thread); 53 sched.selected_thread_set = SharedFrom(thread);
50 } 54 }
51 sched.is_context_switch_pending = sched.selected_thread != sched.current_thread; 55 const bool reschedule_pending =
56 sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread);
57 sched.is_context_switch_pending = reschedule_pending;
52 std::atomic_thread_fence(std::memory_order_seq_cst); 58 std::atomic_thread_fence(std::memory_order_seq_cst);
59 return reschedule_pending;
53 }; 60 };
54 Scheduler& sched = kernel.Scheduler(core); 61 if (!is_reselection_pending.load()) {
55 Thread* current_thread = nullptr; 62 return 0;
56 // Step 1: Get top thread in schedule queue.
57 current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
58 if (current_thread) {
59 update_thread(current_thread, sched);
60 return;
61 } 63 }
62 // Step 2: Try selecting a suggested thread. 64 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> top_threads{};
63 Thread* winner = nullptr; 65
64 std::set<s32> sug_cores; 66 u32 idle_cores{};
65 for (auto thread : suggested_queue[core]) { 67
66 s32 this_core = thread->GetProcessorID(); 68 // Step 1: Get top thread in schedule queue.
67 Thread* thread_on_core = nullptr; 69 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
68 if (this_core >= 0) { 70 Thread* top_thread =
69 thread_on_core = scheduled_queue[this_core].front(); 71 scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
70 } 72 if (top_thread != nullptr) {
71 if (this_core < 0 || thread != thread_on_core) { 73 // TODO(Blinkhawk): Implement Thread Pinning
72 winner = thread; 74 } else {
73 break; 75 idle_cores |= (1ul << core);
74 } 76 }
75 sug_cores.insert(this_core); 77 top_threads[core] = top_thread;
76 } 78 }
77 // if we got a suggested thread, select it, else do a second pass. 79
78 if (winner && winner->GetPriority() > 2) { 80 while (idle_cores != 0) {
79 if (winner->IsRunning()) { 81 u32 core_id = Common::CountTrailingZeroes32(idle_cores);
80 UnloadThread(static_cast<u32>(winner->GetProcessorID())); 82
83 if (!suggested_queue[core_id].empty()) {
84 std::array<s32, Core::Hardware::NUM_CPU_CORES> migration_candidates{};
85 std::size_t num_candidates = 0;
86 auto iter = suggested_queue[core_id].begin();
87 Thread* suggested = nullptr;
88 // Step 2: Try selecting a suggested thread.
89 while (iter != suggested_queue[core_id].end()) {
90 suggested = *iter;
91 iter++;
92 s32 suggested_core_id = suggested->GetProcessorID();
93 Thread* top_thread =
94 suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr;
95 if (top_thread != suggested) {
96 if (top_thread != nullptr &&
97 top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
98 suggested = nullptr;
99 break;
100 // There's a too high thread to do core migration, cancel
101 }
102 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
103 break;
104 }
105 suggested = nullptr;
106 migration_candidates[num_candidates++] = suggested_core_id;
107 }
108 // Step 3: Select a suggested thread from another core
109 if (suggested == nullptr) {
110 for (std::size_t i = 0; i < num_candidates; i++) {
111 s32 candidate_core = migration_candidates[i];
112 suggested = top_threads[candidate_core];
113 auto it = scheduled_queue[candidate_core].begin();
114 it++;
115 Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
116 if (next != nullptr) {
117 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
118 suggested);
119 top_threads[candidate_core] = next;
120 break;
121 } else {
122 suggested = nullptr;
123 }
124 }
125 }
126 top_threads[core_id] = suggested;
81 } 127 }
82 TransferToCore(winner->GetPriority(), static_cast<s32>(core), winner); 128
83 update_thread(winner, sched); 129 idle_cores &= ~(1ul << core_id);
84 return;
85 } 130 }
86 // Step 3: Select a suggested thread from another core 131 u32 cores_needing_context_switch{};
87 for (auto& src_core : sug_cores) { 132 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
88 auto it = scheduled_queue[src_core].begin(); 133 Scheduler& sched = kernel.Scheduler(core);
89 it++; 134 ASSERT(top_threads[core] == nullptr || top_threads[core]->GetProcessorID() == core);
90 if (it != scheduled_queue[src_core].end()) { 135 if (update_thread(top_threads[core], sched)) {
91 Thread* thread_on_core = scheduled_queue[src_core].front(); 136 cores_needing_context_switch |= (1ul << core);
92 Thread* to_change = *it;
93 if (thread_on_core->IsRunning() || to_change->IsRunning()) {
94 UnloadThread(static_cast<u32>(src_core));
95 }
96 TransferToCore(thread_on_core->GetPriority(), static_cast<s32>(core), thread_on_core);
97 current_thread = thread_on_core;
98 break;
99 } 137 }
100 } 138 }
101 update_thread(current_thread, sched); 139 return cores_needing_context_switch;
102} 140}
103 141
104bool GlobalScheduler::YieldThread(Thread* yielding_thread) { 142bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
143 ASSERT(is_locked);
105 // Note: caller should use critical section, etc. 144 // Note: caller should use critical section, etc.
145 if (!yielding_thread->IsRunnable()) {
146 // Normally this case shouldn't happen except for SetThreadActivity.
147 is_reselection_pending.store(true, std::memory_order_release);
148 return false;
149 }
106 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 150 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
107 const u32 priority = yielding_thread->GetPriority(); 151 const u32 priority = yielding_thread->GetPriority();
108 152
109 // Yield the thread 153 // Yield the thread
110 const Thread* const winner = scheduled_queue[core_id].front(priority); 154 Reschedule(priority, core_id, yielding_thread);
111 ASSERT_MSG(yielding_thread == winner, "Thread yielding without being in front"); 155 const Thread* const winner = scheduled_queue[core_id].front();
112 scheduled_queue[core_id].yield(priority); 156 if (kernel.GetCurrentHostThreadID() != core_id) {
157 is_reselection_pending.store(true, std::memory_order_release);
158 }
113 159
114 return AskForReselectionOrMarkRedundant(yielding_thread, winner); 160 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
115} 161}
116 162
117bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { 163bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
164 ASSERT(is_locked);
118 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 165 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
119 // etc. 166 // etc.
167 if (!yielding_thread->IsRunnable()) {
168 // Normally this case shouldn't happen except for SetThreadActivity.
169 is_reselection_pending.store(true, std::memory_order_release);
170 return false;
171 }
120 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 172 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
121 const u32 priority = yielding_thread->GetPriority(); 173 const u32 priority = yielding_thread->GetPriority();
122 174
123 // Yield the thread 175 // Yield the thread
124 ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), 176 Reschedule(priority, core_id, yielding_thread);
125 "Thread yielding without being in front");
126 scheduled_queue[core_id].yield(priority);
127 177
128 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads; 178 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
129 for (std::size_t i = 0; i < current_threads.size(); i++) { 179 for (std::size_t i = 0; i < current_threads.size(); i++) {
@@ -153,21 +203,28 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
153 203
154 if (winner != nullptr) { 204 if (winner != nullptr) {
155 if (winner != yielding_thread) { 205 if (winner != yielding_thread) {
156 if (winner->IsRunning()) {
157 UnloadThread(static_cast<u32>(winner->GetProcessorID()));
158 }
159 TransferToCore(winner->GetPriority(), s32(core_id), winner); 206 TransferToCore(winner->GetPriority(), s32(core_id), winner);
160 } 207 }
161 } else { 208 } else {
162 winner = next_thread; 209 winner = next_thread;
163 } 210 }
164 211
212 if (kernel.GetCurrentHostThreadID() != core_id) {
213 is_reselection_pending.store(true, std::memory_order_release);
214 }
215
165 return AskForReselectionOrMarkRedundant(yielding_thread, winner); 216 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
166} 217}
167 218
168bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { 219bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
220 ASSERT(is_locked);
169 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 221 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
170 // etc. 222 // etc.
223 if (!yielding_thread->IsRunnable()) {
224 // Normally this case shouldn't happen except for SetThreadActivity.
225 is_reselection_pending.store(true, std::memory_order_release);
226 return false;
227 }
171 Thread* winner = nullptr; 228 Thread* winner = nullptr;
172 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 229 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
173 230
@@ -195,25 +252,31 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
195 } 252 }
196 if (winner != nullptr) { 253 if (winner != nullptr) {
197 if (winner != yielding_thread) { 254 if (winner != yielding_thread) {
198 if (winner->IsRunning()) {
199 UnloadThread(static_cast<u32>(winner->GetProcessorID()));
200 }
201 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner); 255 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
202 } 256 }
203 } else { 257 } else {
204 winner = yielding_thread; 258 winner = yielding_thread;
205 } 259 }
260 } else {
261 winner = scheduled_queue[core_id].front();
262 }
263
264 if (kernel.GetCurrentHostThreadID() != core_id) {
265 is_reselection_pending.store(true, std::memory_order_release);
206 } 266 }
207 267
208 return AskForReselectionOrMarkRedundant(yielding_thread, winner); 268 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
209} 269}
210 270
211void GlobalScheduler::PreemptThreads() { 271void GlobalScheduler::PreemptThreads() {
272 ASSERT(is_locked);
212 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 273 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
213 const u32 priority = preemption_priorities[core_id]; 274 const u32 priority = preemption_priorities[core_id];
214 275
215 if (scheduled_queue[core_id].size(priority) > 0) { 276 if (scheduled_queue[core_id].size(priority) > 0) {
216 scheduled_queue[core_id].front(priority)->IncrementYieldCount(); 277 if (scheduled_queue[core_id].size(priority) > 1) {
278 scheduled_queue[core_id].front(priority)->IncrementYieldCount();
279 }
217 scheduled_queue[core_id].yield(priority); 280 scheduled_queue[core_id].yield(priority);
218 if (scheduled_queue[core_id].size(priority) > 1) { 281 if (scheduled_queue[core_id].size(priority) > 1) {
219 scheduled_queue[core_id].front(priority)->IncrementYieldCount(); 282 scheduled_queue[core_id].front(priority)->IncrementYieldCount();
@@ -247,9 +310,6 @@ void GlobalScheduler::PreemptThreads() {
247 } 310 }
248 311
249 if (winner != nullptr) { 312 if (winner != nullptr) {
250 if (winner->IsRunning()) {
251 UnloadThread(static_cast<u32>(winner->GetProcessorID()));
252 }
253 TransferToCore(winner->GetPriority(), s32(core_id), winner); 313 TransferToCore(winner->GetPriority(), s32(core_id), winner);
254 current_thread = 314 current_thread =
255 winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread; 315 winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
@@ -280,9 +340,6 @@ void GlobalScheduler::PreemptThreads() {
280 } 340 }
281 341
282 if (winner != nullptr) { 342 if (winner != nullptr) {
283 if (winner->IsRunning()) {
284 UnloadThread(static_cast<u32>(winner->GetProcessorID()));
285 }
286 TransferToCore(winner->GetPriority(), s32(core_id), winner); 343 TransferToCore(winner->GetPriority(), s32(core_id), winner);
287 current_thread = winner; 344 current_thread = winner;
288 } 345 }
@@ -292,34 +349,65 @@ void GlobalScheduler::PreemptThreads() {
292 } 349 }
293} 350}
294 351
352void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
353 Core::EmuThreadHandle global_thread) {
354 u32 current_core = global_thread.host_handle;
355 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
356 (current_core < Core::Hardware::NUM_CPU_CORES);
357 while (cores_pending_reschedule != 0) {
358 u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
359 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
360 if (!must_context_switch || core != current_core) {
361 auto& phys_core = kernel.PhysicalCore(core);
362 phys_core.Interrupt();
363 } else {
364 must_context_switch = true;
365 }
366 cores_pending_reschedule &= ~(1ul << core);
367 }
368 if (must_context_switch) {
369 auto& core_scheduler = kernel.CurrentScheduler();
370 kernel.ExitSVCProfile();
371 core_scheduler.TryDoContextSwitch();
372 kernel.EnterSVCProfile();
373 }
374}
375
295void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { 376void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) {
377 ASSERT(is_locked);
296 suggested_queue[core].add(thread, priority); 378 suggested_queue[core].add(thread, priority);
297} 379}
298 380
299void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) { 381void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) {
382 ASSERT(is_locked);
300 suggested_queue[core].remove(thread, priority); 383 suggested_queue[core].remove(thread, priority);
301} 384}
302 385
303void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) { 386void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) {
387 ASSERT(is_locked);
304 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); 388 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
305 scheduled_queue[core].add(thread, priority); 389 scheduled_queue[core].add(thread, priority);
306} 390}
307 391
308void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) { 392void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) {
393 ASSERT(is_locked);
309 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); 394 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
310 scheduled_queue[core].add(thread, priority, false); 395 scheduled_queue[core].add(thread, priority, false);
311} 396}
312 397
313void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) { 398void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) {
399 ASSERT(is_locked);
314 scheduled_queue[core].remove(thread, priority); 400 scheduled_queue[core].remove(thread, priority);
315 scheduled_queue[core].add(thread, priority); 401 scheduled_queue[core].add(thread, priority);
316} 402}
317 403
318void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) { 404void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) {
405 ASSERT(is_locked);
319 scheduled_queue[core].remove(thread, priority); 406 scheduled_queue[core].remove(thread, priority);
320} 407}
321 408
322void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { 409void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
410 ASSERT(is_locked);
323 const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; 411 const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
324 const s32 source_core = thread->GetProcessorID(); 412 const s32 source_core = thread->GetProcessorID();
325 if (source_core == destination_core || !schedulable) { 413 if (source_core == destination_core || !schedulable) {
@@ -349,6 +437,108 @@ bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread,
349 } 437 }
350} 438}
351 439
440void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
441 if (old_flags == thread->scheduling_state) {
442 return;
443 }
444 ASSERT(is_locked);
445
446 if (old_flags == static_cast<u32>(ThreadSchedStatus::Runnable)) {
447 // In this case the thread was running, now it's pausing/exitting
448 if (thread->processor_id >= 0) {
449 Unschedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
450 }
451
452 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
453 if (core != static_cast<u32>(thread->processor_id) &&
454 ((thread->affinity_mask >> core) & 1) != 0) {
455 Unsuggest(thread->current_priority, core, thread);
456 }
457 }
458 } else if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
459 // The thread is now set to running from being stopped
460 if (thread->processor_id >= 0) {
461 Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
462 }
463
464 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
465 if (core != static_cast<u32>(thread->processor_id) &&
466 ((thread->affinity_mask >> core) & 1) != 0) {
467 Suggest(thread->current_priority, core, thread);
468 }
469 }
470 }
471
472 SetReselectionPending();
473}
474
475void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) {
476 if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable)) {
477 return;
478 }
479 ASSERT(is_locked);
480 if (thread->processor_id >= 0) {
481 Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread);
482 }
483
484 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
485 if (core != static_cast<u32>(thread->processor_id) &&
486 ((thread->affinity_mask >> core) & 1) != 0) {
487 Unsuggest(old_priority, core, thread);
488 }
489 }
490
491 if (thread->processor_id >= 0) {
492 if (thread == kernel.CurrentScheduler().GetCurrentThread()) {
493 SchedulePrepend(thread->current_priority, static_cast<u32>(thread->processor_id),
494 thread);
495 } else {
496 Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
497 }
498 }
499
500 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
501 if (core != static_cast<u32>(thread->processor_id) &&
502 ((thread->affinity_mask >> core) & 1) != 0) {
503 Suggest(thread->current_priority, core, thread);
504 }
505 }
506 thread->IncrementYieldCount();
507 SetReselectionPending();
508}
509
510void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask,
511 s32 old_core) {
512 if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable) ||
513 thread->current_priority >= THREADPRIO_COUNT) {
514 return;
515 }
516 ASSERT(is_locked);
517
518 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
519 if (((old_affinity_mask >> core) & 1) != 0) {
520 if (core == static_cast<u32>(old_core)) {
521 Unschedule(thread->current_priority, core, thread);
522 } else {
523 Unsuggest(thread->current_priority, core, thread);
524 }
525 }
526 }
527
528 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
529 if (((thread->affinity_mask >> core) & 1) != 0) {
530 if (core == static_cast<u32>(thread->processor_id)) {
531 Schedule(thread->current_priority, core, thread);
532 } else {
533 Suggest(thread->current_priority, core, thread);
534 }
535 }
536 }
537
538 thread->IncrementYieldCount();
539 SetReselectionPending();
540}
541
352void GlobalScheduler::Shutdown() { 542void GlobalScheduler::Shutdown() {
353 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 543 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
354 scheduled_queue[core].clear(); 544 scheduled_queue[core].clear();
@@ -359,10 +549,12 @@ void GlobalScheduler::Shutdown() {
359 549
360void GlobalScheduler::Lock() { 550void GlobalScheduler::Lock() {
361 Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID(); 551 Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID();
552 ASSERT(!current_thread.IsInvalid());
362 if (current_thread == current_owner) { 553 if (current_thread == current_owner) {
363 ++scope_lock; 554 ++scope_lock;
364 } else { 555 } else {
365 inner_lock.lock(); 556 inner_lock.lock();
557 is_locked = true;
366 current_owner = current_thread; 558 current_owner = current_thread;
367 ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle()); 559 ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle());
368 scope_lock = 1; 560 scope_lock = 1;
@@ -374,17 +566,18 @@ void GlobalScheduler::Unlock() {
374 ASSERT(scope_lock > 0); 566 ASSERT(scope_lock > 0);
375 return; 567 return;
376 } 568 }
377 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 569 u32 cores_pending_reschedule = SelectThreads();
378 SelectThread(i); 570 Core::EmuThreadHandle leaving_thread = current_owner;
379 }
380 current_owner = Core::EmuThreadHandle::InvalidHandle(); 571 current_owner = Core::EmuThreadHandle::InvalidHandle();
381 scope_lock = 1; 572 scope_lock = 1;
573 is_locked = false;
382 inner_lock.unlock(); 574 inner_lock.unlock();
383 // TODO(Blinkhawk): Setup the interrupts and change context on current core. 575 EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread);
384} 576}
385 577
386Scheduler::Scheduler(Core::System& system, std::size_t core_id) 578Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) {
387 : system{system}, core_id{core_id} {} 579 switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this);
580}
388 581
389Scheduler::~Scheduler() = default; 582Scheduler::~Scheduler() = default;
390 583
@@ -393,56 +586,128 @@ bool Scheduler::HaveReadyThreads() const {
393} 586}
394 587
395Thread* Scheduler::GetCurrentThread() const { 588Thread* Scheduler::GetCurrentThread() const {
396 return current_thread.get(); 589 if (current_thread) {
590 return current_thread.get();
591 }
592 return idle_thread.get();
397} 593}
398 594
399Thread* Scheduler::GetSelectedThread() const { 595Thread* Scheduler::GetSelectedThread() const {
400 return selected_thread.get(); 596 return selected_thread.get();
401} 597}
402 598
403void Scheduler::SelectThreads() {
404 system.GlobalScheduler().SelectThread(core_id);
405}
406
407u64 Scheduler::GetLastContextSwitchTicks() const { 599u64 Scheduler::GetLastContextSwitchTicks() const {
408 return last_context_switch_time; 600 return last_context_switch_time;
409} 601}
410 602
411void Scheduler::TryDoContextSwitch() { 603void Scheduler::TryDoContextSwitch() {
604 auto& phys_core = system.Kernel().CurrentPhysicalCore();
605 if (phys_core.IsInterrupted()) {
606 phys_core.ClearInterrupt();
607 }
608 guard.lock();
412 if (is_context_switch_pending) { 609 if (is_context_switch_pending) {
413 SwitchContext(); 610 SwitchContext();
611 } else {
612 guard.unlock();
414 } 613 }
415} 614}
416 615
417void Scheduler::UnloadThread() { 616void Scheduler::OnThreadStart() {
418 Thread* const previous_thread = GetCurrentThread(); 617 SwitchContextStep2();
419 Process* const previous_process = system.Kernel().CurrentProcess(); 618}
420 619
421 UpdateLastContextSwitchTime(previous_thread, previous_process); 620void Scheduler::Unload() {
621 Thread* thread = current_thread.get();
622 if (thread) {
623 thread->SetContinuousOnSVC(false);
624 thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
625 thread->SetIsRunning(false);
626 if (!thread->IsHLEThread() && !thread->HasExited()) {
627 Core::ARM_Interface& cpu_core = thread->ArmInterface();
628 cpu_core.SaveContext(thread->GetContext32());
629 cpu_core.SaveContext(thread->GetContext64());
630 // Save the TPIDR_EL0 system register in case it was modified.
631 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
632 cpu_core.ClearExclusiveState();
633 }
634 thread->context_guard.unlock();
635 }
636}
422 637
423 // Save context for previous thread 638void Scheduler::Reload() {
424 if (previous_thread) { 639 Thread* thread = current_thread.get();
425 system.ArmInterface(core_id).SaveContext(previous_thread->GetContext32()); 640 if (thread) {
426 system.ArmInterface(core_id).SaveContext(previous_thread->GetContext64()); 641 ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
427 // Save the TPIDR_EL0 system register in case it was modified. 642 "Thread must be runnable.");
428 previous_thread->SetTPIDR_EL0(system.ArmInterface(core_id).GetTPIDR_EL0()); 643
644 // Cancel any outstanding wakeup events for this thread
645 thread->SetIsRunning(true);
646 thread->SetWasRunning(false);
647 thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
429 648
430 if (previous_thread->GetStatus() == ThreadStatus::Running) { 649 auto* const thread_owner_process = thread->GetOwnerProcess();
431 // This is only the case when a reschedule is triggered without the current thread 650 if (thread_owner_process != nullptr) {
432 // yielding execution (i.e. an event triggered, system core time-sliced, etc) 651 system.Kernel().MakeCurrentProcess(thread_owner_process);
433 previous_thread->SetStatus(ThreadStatus::Ready); 652 }
653 if (!thread->IsHLEThread()) {
654 Core::ARM_Interface& cpu_core = thread->ArmInterface();
655 cpu_core.LoadContext(thread->GetContext32());
656 cpu_core.LoadContext(thread->GetContext64());
657 cpu_core.SetTlsAddress(thread->GetTLSAddress());
658 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
659 cpu_core.ChangeProcessorID(this->core_id);
660 cpu_core.ClearExclusiveState();
434 } 661 }
435 previous_thread->SetIsRunning(false);
436 } 662 }
437 current_thread = nullptr; 663}
664
665void Scheduler::SwitchContextStep2() {
666 Thread* previous_thread = current_thread_prev.get();
667 Thread* new_thread = selected_thread.get();
668
669 // Load context of new thread
670 Process* const previous_process =
671 previous_thread != nullptr ? previous_thread->GetOwnerProcess() : nullptr;
672
673 if (new_thread) {
674 ASSERT_MSG(new_thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
675 "Thread must be runnable.");
676
677 // Cancel any outstanding wakeup events for this thread
678 new_thread->SetIsRunning(true);
679 new_thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
680 new_thread->SetWasRunning(false);
681
682 auto* const thread_owner_process = current_thread->GetOwnerProcess();
683 if (thread_owner_process != nullptr) {
684 system.Kernel().MakeCurrentProcess(thread_owner_process);
685 }
686 if (!new_thread->IsHLEThread()) {
687 Core::ARM_Interface& cpu_core = new_thread->ArmInterface();
688 cpu_core.LoadContext(new_thread->GetContext32());
689 cpu_core.LoadContext(new_thread->GetContext64());
690 cpu_core.SetTlsAddress(new_thread->GetTLSAddress());
691 cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
692 cpu_core.ChangeProcessorID(this->core_id);
693 cpu_core.ClearExclusiveState();
694 }
695 }
696
697 TryDoContextSwitch();
438} 698}
439 699
440void Scheduler::SwitchContext() { 700void Scheduler::SwitchContext() {
441 Thread* const previous_thread = GetCurrentThread(); 701 current_thread_prev = current_thread;
442 Thread* const new_thread = GetSelectedThread(); 702 selected_thread = selected_thread_set;
703 Thread* previous_thread = current_thread_prev.get();
704 Thread* new_thread = selected_thread.get();
705 current_thread = selected_thread;
443 706
444 is_context_switch_pending = false; 707 is_context_switch_pending = false;
708
445 if (new_thread == previous_thread) { 709 if (new_thread == previous_thread) {
710 guard.unlock();
446 return; 711 return;
447 } 712 }
448 713
@@ -452,51 +717,76 @@ void Scheduler::SwitchContext() {
452 717
453 // Save context for previous thread 718 // Save context for previous thread
454 if (previous_thread) { 719 if (previous_thread) {
455 system.ArmInterface(core_id).SaveContext(previous_thread->GetContext32()); 720 if (new_thread != nullptr && new_thread->IsSuspendThread()) {
456 system.ArmInterface(core_id).SaveContext(previous_thread->GetContext64()); 721 previous_thread->SetWasRunning(true);
457 // Save the TPIDR_EL0 system register in case it was modified.
458 previous_thread->SetTPIDR_EL0(system.ArmInterface(core_id).GetTPIDR_EL0());
459
460 if (previous_thread->GetStatus() == ThreadStatus::Running) {
461 // This is only the case when a reschedule is triggered without the current thread
462 // yielding execution (i.e. an event triggered, system core time-sliced, etc)
463 previous_thread->SetStatus(ThreadStatus::Ready);
464 } 722 }
723 previous_thread->SetContinuousOnSVC(false);
724 previous_thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
465 previous_thread->SetIsRunning(false); 725 previous_thread->SetIsRunning(false);
726 if (!previous_thread->IsHLEThread() && !previous_thread->HasExited()) {
727 Core::ARM_Interface& cpu_core = previous_thread->ArmInterface();
728 cpu_core.SaveContext(previous_thread->GetContext32());
729 cpu_core.SaveContext(previous_thread->GetContext64());
730 // Save the TPIDR_EL0 system register in case it was modified.
731 previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
732 cpu_core.ClearExclusiveState();
733 }
734 previous_thread->context_guard.unlock();
466 } 735 }
467 736
468 // Load context of new thread 737 std::shared_ptr<Common::Fiber>* old_context;
469 if (new_thread) { 738 if (previous_thread != nullptr) {
470 ASSERT_MSG(new_thread->GetProcessorID() == s32(this->core_id), 739 old_context = &previous_thread->GetHostContext();
471 "Thread must be assigned to this core."); 740 } else {
472 ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, 741 old_context = &idle_thread->GetHostContext();
473 "Thread must be ready to become running."); 742 }
743 guard.unlock();
474 744
475 // Cancel any outstanding wakeup events for this thread 745 Common::Fiber::YieldTo(*old_context, switch_fiber);
476 new_thread->CancelWakeupTimer(); 746 /// When a thread wakes up, the scheduler may have changed to other in another core.
477 current_thread = SharedFrom(new_thread); 747 auto& next_scheduler = system.Kernel().CurrentScheduler();
478 new_thread->SetStatus(ThreadStatus::Running); 748 next_scheduler.SwitchContextStep2();
479 new_thread->SetIsRunning(true); 749}
480 750
481 auto* const thread_owner_process = current_thread->GetOwnerProcess(); 751void Scheduler::OnSwitch(void* this_scheduler) {
482 if (previous_process != thread_owner_process) { 752 Scheduler* sched = static_cast<Scheduler*>(this_scheduler);
483 system.Kernel().MakeCurrentProcess(thread_owner_process); 753 sched->SwitchToCurrent();
484 } 754}
485 755
486 system.ArmInterface(core_id).LoadContext(new_thread->GetContext32()); 756void Scheduler::SwitchToCurrent() {
487 system.ArmInterface(core_id).LoadContext(new_thread->GetContext64()); 757 while (true) {
488 system.ArmInterface(core_id).SetTlsAddress(new_thread->GetTLSAddress()); 758 {
489 system.ArmInterface(core_id).SetTPIDR_EL0(new_thread->GetTPIDR_EL0()); 759 std::scoped_lock lock{guard};
490 } else { 760 selected_thread = selected_thread_set;
491 current_thread = nullptr; 761 current_thread = selected_thread;
492 // Note: We do not reset the current process and current page table when idling because 762 is_context_switch_pending = false;
493 // technically we haven't changed processes, our threads are just paused. 763 }
764 while (!is_context_switch_pending) {
765 if (current_thread != nullptr && !current_thread->IsHLEThread()) {
766 current_thread->context_guard.lock();
767 if (!current_thread->IsRunnable()) {
768 current_thread->context_guard.unlock();
769 break;
770 }
771 if (current_thread->GetProcessorID() != core_id) {
772 current_thread->context_guard.unlock();
773 break;
774 }
775 }
776 std::shared_ptr<Common::Fiber>* next_context;
777 if (current_thread != nullptr) {
778 next_context = &current_thread->GetHostContext();
779 } else {
780 next_context = &idle_thread->GetHostContext();
781 }
782 Common::Fiber::YieldTo(switch_fiber, *next_context);
783 }
494 } 784 }
495} 785}
496 786
497void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { 787void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
498 const u64 prev_switch_ticks = last_context_switch_time; 788 const u64 prev_switch_ticks = last_context_switch_time;
499 const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks(); 789 const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
500 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; 790 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
501 791
502 if (thread != nullptr) { 792 if (thread != nullptr) {
@@ -510,6 +800,16 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
510 last_context_switch_time = most_recent_switch_ticks; 800 last_context_switch_time = most_recent_switch_ticks;
511} 801}
512 802
803void Scheduler::Initialize() {
804 std::string name = "Idle Thread Id:" + std::to_string(core_id);
805 std::function<void(void*)> init_func = system.GetCpuManager().GetIdleThreadStartFunc();
806 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
807 ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
808 auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
809 nullptr, std::move(init_func), init_func_parameter);
810 idle_thread = std::move(thread_res).Unwrap();
811}
812
513void Scheduler::Shutdown() { 813void Scheduler::Shutdown() {
514 current_thread = nullptr; 814 current_thread = nullptr;
515 selected_thread = nullptr; 815 selected_thread = nullptr;
@@ -538,4 +838,13 @@ SchedulerLockAndSleep::~SchedulerLockAndSleep() {
538 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); 838 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
539} 839}
540 840
841void SchedulerLockAndSleep::Release() {
842 if (sleep_cancelled) {
843 return;
844 }
845 auto& time_manager = kernel.TimeManager();
846 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
847 sleep_cancelled = true;
848}
849
541} // namespace Kernel 850} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 07df33f9c..b3b4b5169 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -11,9 +11,14 @@
11 11
12#include "common/common_types.h" 12#include "common/common_types.h"
13#include "common/multi_level_queue.h" 13#include "common/multi_level_queue.h"
14#include "common/spin_lock.h"
14#include "core/hardware_properties.h" 15#include "core/hardware_properties.h"
15#include "core/hle/kernel/thread.h" 16#include "core/hle/kernel/thread.h"
16 17
18namespace Common {
19class Fiber;
20}
21
17namespace Core { 22namespace Core {
18class ARM_Interface; 23class ARM_Interface;
19class System; 24class System;
@@ -41,41 +46,17 @@ public:
41 return thread_list; 46 return thread_list;
42 } 47 }
43 48
44 /** 49 /// Notify the scheduler a thread's status has changed.
45 * Add a thread to the suggested queue of a cpu core. Suggested threads may be 50 void AdjustSchedulingOnStatus(Thread* thread, u32 old_flags);
46 * picked if no thread is scheduled to run on the core.
47 */
48 void Suggest(u32 priority, std::size_t core, Thread* thread);
49
50 /**
51 * Remove a thread to the suggested queue of a cpu core. Suggested threads may be
52 * picked if no thread is scheduled to run on the core.
53 */
54 void Unsuggest(u32 priority, std::size_t core, Thread* thread);
55
56 /**
57 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
58 * back the queue in its priority level.
59 */
60 void Schedule(u32 priority, std::size_t core, Thread* thread);
61
62 /**
63 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
64 * front the queue in its priority level.
65 */
66 void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
67 51
68 /// Reschedule an already scheduled thread based on a new priority 52 /// Notify the scheduler a thread's priority has changed.
69 void Reschedule(u32 priority, std::size_t core, Thread* thread); 53 void AdjustSchedulingOnPriority(Thread* thread, u32 old_priority);
70
71 /// Unschedules a thread.
72 void Unschedule(u32 priority, std::size_t core, Thread* thread);
73 54
74 /// Selects a core and forces it to unload its current thread's context 55 /// Notify the scheduler a thread's core and/or affinity mask has changed.
75 void UnloadThread(std::size_t core); 56 void AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, s32 old_core);
76 57
77 /** 58 /**
78 * Takes care of selecting the new scheduled thread in three steps: 59 * Takes care of selecting the new scheduled threads in three steps:
79 * 60 *
80 * 1. First a thread is selected from the top of the priority queue. If no thread 61 * 1. First a thread is selected from the top of the priority queue. If no thread
81 * is obtained then we move to step two, else we are done. 62 * is obtained then we move to step two, else we are done.
@@ -85,8 +66,10 @@ public:
85 * 66 *
86 * 3. Third is no suggested thread is found, we do a second pass and pick a running 67 * 3. Third is no suggested thread is found, we do a second pass and pick a running
87 * thread in another core and swap it with its current thread. 68 * thread in another core and swap it with its current thread.
69 *
70 * returns the cores needing scheduling.
88 */ 71 */
89 void SelectThread(std::size_t core); 72 u32 SelectThreads();
90 73
91 bool HaveReadyThreads(std::size_t core_id) const { 74 bool HaveReadyThreads(std::size_t core_id) const {
92 return !scheduled_queue[core_id].empty(); 75 return !scheduled_queue[core_id].empty();
@@ -149,6 +132,40 @@ private:
149 /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling 132 /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
150 /// and reschedules current core if needed. 133 /// and reschedules current core if needed.
151 void Unlock(); 134 void Unlock();
135
136 void EnableInterruptAndSchedule(u32 cores_pending_reschedule,
137 Core::EmuThreadHandle global_thread);
138
139 /**
140 * Add a thread to the suggested queue of a cpu core. Suggested threads may be
141 * picked if no thread is scheduled to run on the core.
142 */
143 void Suggest(u32 priority, std::size_t core, Thread* thread);
144
145 /**
146 * Remove a thread to the suggested queue of a cpu core. Suggested threads may be
147 * picked if no thread is scheduled to run on the core.
148 */
149 void Unsuggest(u32 priority, std::size_t core, Thread* thread);
150
151 /**
152 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
153 * back the queue in its priority level.
154 */
155 void Schedule(u32 priority, std::size_t core, Thread* thread);
156
157 /**
158 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
159 * front the queue in its priority level.
160 */
161 void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
162
163 /// Reschedule an already scheduled thread based on a new priority
164 void Reschedule(u32 priority, std::size_t core, Thread* thread);
165
166 /// Unschedules a thread.
167 void Unschedule(u32 priority, std::size_t core, Thread* thread);
168
152 /** 169 /**
153 * Transfers a thread into an specific core. If the destination_core is -1 170 * Transfers a thread into an specific core. If the destination_core is -1
154 * it will be unscheduled from its source code and added into its suggested 171 * it will be unscheduled from its source code and added into its suggested
@@ -170,10 +187,13 @@ private:
170 std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; 187 std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
171 188
172 /// Scheduler lock mechanisms. 189 /// Scheduler lock mechanisms.
173 std::mutex inner_lock{}; // TODO(Blinkhawk): Replace for a SpinLock 190 bool is_locked{};
191 Common::SpinLock inner_lock{};
174 std::atomic<s64> scope_lock{}; 192 std::atomic<s64> scope_lock{};
175 Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()}; 193 Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()};
176 194
195 Common::SpinLock global_list_guard{};
196
177 /// Lists all thread ids that aren't deleted/etc. 197 /// Lists all thread ids that aren't deleted/etc.
178 std::vector<std::shared_ptr<Thread>> thread_list; 198 std::vector<std::shared_ptr<Thread>> thread_list;
179 KernelCore& kernel; 199 KernelCore& kernel;
@@ -190,11 +210,11 @@ public:
190 /// Reschedules to the next available thread (call after current thread is suspended) 210 /// Reschedules to the next available thread (call after current thread is suspended)
191 void TryDoContextSwitch(); 211 void TryDoContextSwitch();
192 212
193 /// Unloads currently running thread 213 /// The next two are for SingleCore Only.
194 void UnloadThread(); 214 /// Unload current thread before preempting core.
195 215 void Unload();
196 /// Select the threads in top of the scheduling multilist. 216 /// Reload current thread after core preemption.
197 void SelectThreads(); 217 void Reload();
198 218
199 /// Gets the current running thread 219 /// Gets the current running thread
200 Thread* GetCurrentThread() const; 220 Thread* GetCurrentThread() const;
@@ -209,15 +229,30 @@ public:
209 return is_context_switch_pending; 229 return is_context_switch_pending;
210 } 230 }
211 231
232 void Initialize();
233
212 /// Shutdowns the scheduler. 234 /// Shutdowns the scheduler.
213 void Shutdown(); 235 void Shutdown();
214 236
237 void OnThreadStart();
238
239 std::shared_ptr<Common::Fiber>& ControlContext() {
240 return switch_fiber;
241 }
242
243 const std::shared_ptr<Common::Fiber>& ControlContext() const {
244 return switch_fiber;
245 }
246
215private: 247private:
216 friend class GlobalScheduler; 248 friend class GlobalScheduler;
217 249
218 /// Switches the CPU's active thread context to that of the specified thread 250 /// Switches the CPU's active thread context to that of the specified thread
219 void SwitchContext(); 251 void SwitchContext();
220 252
253 /// When a thread wakes up, it must run this through it's new scheduler
254 void SwitchContextStep2();
255
221 /** 256 /**
222 * Called on every context switch to update the internal timestamp 257 * Called on every context switch to update the internal timestamp
223 * This also updates the running time ticks for the given thread and 258 * This also updates the running time ticks for the given thread and
@@ -231,14 +266,24 @@ private:
231 */ 266 */
232 void UpdateLastContextSwitchTime(Thread* thread, Process* process); 267 void UpdateLastContextSwitchTime(Thread* thread, Process* process);
233 268
269 static void OnSwitch(void* this_scheduler);
270 void SwitchToCurrent();
271
234 std::shared_ptr<Thread> current_thread = nullptr; 272 std::shared_ptr<Thread> current_thread = nullptr;
235 std::shared_ptr<Thread> selected_thread = nullptr; 273 std::shared_ptr<Thread> selected_thread = nullptr;
274 std::shared_ptr<Thread> current_thread_prev = nullptr;
275 std::shared_ptr<Thread> selected_thread_set = nullptr;
276 std::shared_ptr<Thread> idle_thread = nullptr;
277
278 std::shared_ptr<Common::Fiber> switch_fiber = nullptr;
236 279
237 Core::System& system; 280 Core::System& system;
238 u64 last_context_switch_time = 0; 281 u64 last_context_switch_time = 0;
239 u64 idle_selection_count = 0; 282 u64 idle_selection_count = 0;
240 const std::size_t core_id; 283 const std::size_t core_id;
241 284
285 Common::SpinLock guard{};
286
242 bool is_context_switch_pending = false; 287 bool is_context_switch_pending = false;
243}; 288};
244 289
@@ -261,6 +306,8 @@ public:
261 sleep_cancelled = true; 306 sleep_cancelled = true;
262 } 307 }
263 308
309 void Release();
310
264private: 311private:
265 Handle& event_handle; 312 Handle& event_handle;
266 Thread* time_task; 313 Thread* time_task;
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 25438b86b..7b23a6889 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -17,6 +17,7 @@
17#include "core/hle/kernel/hle_ipc.h" 17#include "core/hle/kernel/hle_ipc.h"
18#include "core/hle/kernel/kernel.h" 18#include "core/hle/kernel/kernel.h"
19#include "core/hle/kernel/process.h" 19#include "core/hle/kernel/process.h"
20#include "core/hle/kernel/scheduler.h"
20#include "core/hle/kernel/server_session.h" 21#include "core/hle/kernel/server_session.h"
21#include "core/hle/kernel/session.h" 22#include "core/hle/kernel/session.h"
22#include "core/hle/kernel/thread.h" 23#include "core/hle/kernel/thread.h"
@@ -168,9 +169,12 @@ ResultCode ServerSession::CompleteSyncRequest() {
168 } 169 }
169 170
170 // Some service requests require the thread to block 171 // Some service requests require the thread to block
171 if (!context.IsThreadWaiting()) { 172 {
172 context.GetThread().ResumeFromWait(); 173 SchedulerLock lock(kernel);
173 context.GetThread().SetWaitSynchronizationResult(result); 174 if (!context.IsThreadWaiting()) {
175 context.GetThread().ResumeFromWait();
176 context.GetThread().SetSynchronizationResults(nullptr, result);
177 }
174 } 178 }
175 179
176 request_queue.Pop(); 180 request_queue.Pop();
@@ -180,8 +184,10 @@ ResultCode ServerSession::CompleteSyncRequest() {
180 184
181ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread, 185ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
182 Core::Memory::Memory& memory) { 186 Core::Memory::Memory& memory) {
183 Core::System::GetInstance().CoreTiming().ScheduleEvent(20000, request_event, {}); 187 ResultCode result = QueueSyncRequest(std::move(thread), memory);
184 return QueueSyncRequest(std::move(thread), memory); 188 const u64 delay = kernel.IsMulticore() ? 0U : 20000U;
189 Core::System::GetInstance().CoreTiming().ScheduleEvent(delay, request_event, {});
190 return result;
185} 191}
186 192
187} // namespace Kernel 193} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 4ae4529f5..5db19dcf3 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -10,14 +10,15 @@
10 10
11#include "common/alignment.h" 11#include "common/alignment.h"
12#include "common/assert.h" 12#include "common/assert.h"
13#include "common/fiber.h"
13#include "common/logging/log.h" 14#include "common/logging/log.h"
14#include "common/microprofile.h" 15#include "common/microprofile.h"
15#include "common/string_util.h" 16#include "common/string_util.h"
16#include "core/arm/exclusive_monitor.h" 17#include "core/arm/exclusive_monitor.h"
17#include "core/core.h" 18#include "core/core.h"
18#include "core/core_manager.h"
19#include "core/core_timing.h" 19#include "core/core_timing.h"
20#include "core/core_timing_util.h" 20#include "core/core_timing_util.h"
21#include "core/cpu_manager.h"
21#include "core/hle/kernel/address_arbiter.h" 22#include "core/hle/kernel/address_arbiter.h"
22#include "core/hle/kernel/client_port.h" 23#include "core/hle/kernel/client_port.h"
23#include "core/hle/kernel/client_session.h" 24#include "core/hle/kernel/client_session.h"
@@ -27,6 +28,7 @@
27#include "core/hle/kernel/memory/memory_block.h" 28#include "core/hle/kernel/memory/memory_block.h"
28#include "core/hle/kernel/memory/page_table.h" 29#include "core/hle/kernel/memory/page_table.h"
29#include "core/hle/kernel/mutex.h" 30#include "core/hle/kernel/mutex.h"
31#include "core/hle/kernel/physical_core.h"
30#include "core/hle/kernel/process.h" 32#include "core/hle/kernel/process.h"
31#include "core/hle/kernel/readable_event.h" 33#include "core/hle/kernel/readable_event.h"
32#include "core/hle/kernel/resource_limit.h" 34#include "core/hle/kernel/resource_limit.h"
@@ -37,6 +39,7 @@
37#include "core/hle/kernel/svc_wrap.h" 39#include "core/hle/kernel/svc_wrap.h"
38#include "core/hle/kernel/synchronization.h" 40#include "core/hle/kernel/synchronization.h"
39#include "core/hle/kernel/thread.h" 41#include "core/hle/kernel/thread.h"
42#include "core/hle/kernel/time_manager.h"
40#include "core/hle/kernel/transfer_memory.h" 43#include "core/hle/kernel/transfer_memory.h"
41#include "core/hle/kernel/writable_event.h" 44#include "core/hle/kernel/writable_event.h"
42#include "core/hle/lock.h" 45#include "core/hle/lock.h"
@@ -133,6 +136,7 @@ enum class ResourceLimitValueType {
133 136
134ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit, 137ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit,
135 u32 resource_type, ResourceLimitValueType value_type) { 138 u32 resource_type, ResourceLimitValueType value_type) {
139 std::lock_guard lock{HLE::g_hle_lock};
136 const auto type = static_cast<ResourceType>(resource_type); 140 const auto type = static_cast<ResourceType>(resource_type);
137 if (!IsValidResourceType(type)) { 141 if (!IsValidResourceType(type)) {
138 LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type); 142 LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type);
@@ -160,6 +164,7 @@ ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_
160 164
161/// Set the process heap to a given Size. It can both extend and shrink the heap. 165/// Set the process heap to a given Size. It can both extend and shrink the heap.
162static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_size) { 166static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_size) {
167 std::lock_guard lock{HLE::g_hle_lock};
163 LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size); 168 LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size);
164 169
165 // Size must be a multiple of 0x200000 (2MB) and be equal to or less than 8GB. 170 // Size must be a multiple of 0x200000 (2MB) and be equal to or less than 8GB.
@@ -190,6 +195,7 @@ static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_s
190 195
191static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, 196static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
192 u32 attribute) { 197 u32 attribute) {
198 std::lock_guard lock{HLE::g_hle_lock};
193 LOG_DEBUG(Kernel_SVC, 199 LOG_DEBUG(Kernel_SVC,
194 "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address, 200 "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
195 size, mask, attribute); 201 size, mask, attribute);
@@ -226,8 +232,15 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
226 static_cast<Memory::MemoryAttribute>(attribute)); 232 static_cast<Memory::MemoryAttribute>(attribute));
227} 233}
228 234
235static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
236 u32 attribute) {
237 return SetMemoryAttribute(system, static_cast<VAddr>(address), static_cast<std::size_t>(size),
238 mask, attribute);
239}
240
229/// Maps a memory range into a different range. 241/// Maps a memory range into a different range.
230static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) { 242static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
243 std::lock_guard lock{HLE::g_hle_lock};
231 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, 244 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
232 src_addr, size); 245 src_addr, size);
233 246
@@ -241,8 +254,14 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr
241 return page_table.Map(dst_addr, src_addr, size); 254 return page_table.Map(dst_addr, src_addr, size);
242} 255}
243 256
257static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
258 return MapMemory(system, static_cast<VAddr>(dst_addr), static_cast<VAddr>(src_addr),
259 static_cast<std::size_t>(size));
260}
261
244/// Unmaps a region that was previously mapped with svcMapMemory 262/// Unmaps a region that was previously mapped with svcMapMemory
245static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) { 263static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
264 std::lock_guard lock{HLE::g_hle_lock};
246 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, 265 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
247 src_addr, size); 266 src_addr, size);
248 267
@@ -256,9 +275,15 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
256 return page_table.Unmap(dst_addr, src_addr, size); 275 return page_table.Unmap(dst_addr, src_addr, size);
257} 276}
258 277
278static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
279 return UnmapMemory(system, static_cast<VAddr>(dst_addr), static_cast<VAddr>(src_addr),
280 static_cast<std::size_t>(size));
281}
282
259/// Connect to an OS service given the port name, returns the handle to the port to out 283/// Connect to an OS service given the port name, returns the handle to the port to out
260static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle, 284static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
261 VAddr port_name_address) { 285 VAddr port_name_address) {
286 std::lock_guard lock{HLE::g_hle_lock};
262 auto& memory = system.Memory(); 287 auto& memory = system.Memory();
263 288
264 if (!memory.IsValidVirtualAddress(port_name_address)) { 289 if (!memory.IsValidVirtualAddress(port_name_address)) {
@@ -317,11 +342,30 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
317 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); 342 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
318 343
319 auto thread = system.CurrentScheduler().GetCurrentThread(); 344 auto thread = system.CurrentScheduler().GetCurrentThread();
320 thread->InvalidateWakeupCallback(); 345 {
321 thread->SetStatus(ThreadStatus::WaitIPC); 346 SchedulerLock lock(system.Kernel());
322 system.PrepareReschedule(thread->GetProcessorID()); 347 thread->InvalidateHLECallback();
348 thread->SetStatus(ThreadStatus::WaitIPC);
349 session->SendSyncRequest(SharedFrom(thread), system.Memory());
350 }
351
352 if (thread->HasHLECallback()) {
353 Handle event_handle = thread->GetHLETimeEvent();
354 if (event_handle != InvalidHandle) {
355 auto& time_manager = system.Kernel().TimeManager();
356 time_manager.UnscheduleTimeEvent(event_handle);
357 }
358
359 {
360 SchedulerLock lock(system.Kernel());
361 auto* sync_object = thread->GetHLESyncObject();
362 sync_object->RemoveWaitingThread(SharedFrom(thread));
363 }
364
365 thread->InvokeHLECallback(SharedFrom(thread));
366 }
323 367
324 return session->SendSyncRequest(SharedFrom(thread), system.Memory()); 368 return thread->GetSignalingResult();
325} 369}
326 370
327static ResultCode SendSyncRequest32(Core::System& system, Handle handle) { 371static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
@@ -383,6 +427,15 @@ static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle han
383 return ERR_INVALID_HANDLE; 427 return ERR_INVALID_HANDLE;
384} 428}
385 429
430static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32* process_id_high,
431 Handle handle) {
432 u64 process_id{};
433 const auto result = GetProcessId(system, &process_id, handle);
434 *process_id_low = static_cast<u32>(process_id);
435 *process_id_high = static_cast<u32>(process_id >> 32);
436 return result;
437}
438
386/// Wait for the given handles to synchronize, timeout after the specified nanoseconds 439/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
387static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address, 440static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address,
388 u64 handle_count, s64 nano_seconds) { 441 u64 handle_count, s64 nano_seconds) {
@@ -447,10 +500,13 @@ static ResultCode CancelSynchronization(Core::System& system, Handle thread_hand
447 } 500 }
448 501
449 thread->CancelWait(); 502 thread->CancelWait();
450 system.PrepareReschedule(thread->GetProcessorID());
451 return RESULT_SUCCESS; 503 return RESULT_SUCCESS;
452} 504}
453 505
506static ResultCode CancelSynchronization32(Core::System& system, Handle thread_handle) {
507 return CancelSynchronization(system, thread_handle);
508}
509
454/// Attempts to locks a mutex, creating it if it does not already exist 510/// Attempts to locks a mutex, creating it if it does not already exist
455static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle, 511static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle,
456 VAddr mutex_addr, Handle requesting_thread_handle) { 512 VAddr mutex_addr, Handle requesting_thread_handle) {
@@ -475,6 +531,12 @@ static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_hand
475 requesting_thread_handle); 531 requesting_thread_handle);
476} 532}
477 533
534static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle,
535 u32 mutex_addr, Handle requesting_thread_handle) {
536 return ArbitrateLock(system, holding_thread_handle, static_cast<VAddr>(mutex_addr),
537 requesting_thread_handle);
538}
539
478/// Unlock a mutex 540/// Unlock a mutex
479static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) { 541static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
480 LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr); 542 LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr);
@@ -494,6 +556,10 @@ static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
494 return current_process->GetMutex().Release(mutex_addr); 556 return current_process->GetMutex().Release(mutex_addr);
495} 557}
496 558
559static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) {
560 return ArbitrateUnlock(system, static_cast<VAddr>(mutex_addr));
561}
562
497enum class BreakType : u32 { 563enum class BreakType : u32 {
498 Panic = 0, 564 Panic = 0,
499 AssertionFailed = 1, 565 AssertionFailed = 1,
@@ -594,6 +660,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
594 info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt); 660 info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
595 661
596 if (!break_reason.signal_debugger) { 662 if (!break_reason.signal_debugger) {
663 SchedulerLock lock(system.Kernel());
597 LOG_CRITICAL( 664 LOG_CRITICAL(
598 Debug_Emulated, 665 Debug_Emulated,
599 "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}", 666 "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
@@ -605,14 +672,16 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
605 const auto thread_processor_id = current_thread->GetProcessorID(); 672 const auto thread_processor_id = current_thread->GetProcessorID();
606 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace(); 673 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
607 674
608 system.Kernel().CurrentProcess()->PrepareForTermination();
609
610 // Kill the current thread 675 // Kill the current thread
676 system.Kernel().ExceptionalExit();
611 current_thread->Stop(); 677 current_thread->Stop();
612 system.PrepareReschedule();
613 } 678 }
614} 679}
615 680
681static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) {
682 Break(system, reason, static_cast<u64>(info1), static_cast<u64>(info2));
683}
684
616/// Used to output a message on a debug hardware unit - does nothing on a retail unit 685/// Used to output a message on a debug hardware unit - does nothing on a retail unit
617static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr address, u64 len) { 686static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr address, u64 len) {
618 if (len == 0) { 687 if (len == 0) {
@@ -627,6 +696,7 @@ static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr addre
627/// Gets system/memory information for the current process 696/// Gets system/memory information for the current process
628static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 handle, 697static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 handle,
629 u64 info_sub_id) { 698 u64 info_sub_id) {
699 std::lock_guard lock{HLE::g_hle_lock};
630 LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id, 700 LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
631 info_sub_id, handle); 701 info_sub_id, handle);
632 702
@@ -863,9 +933,9 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
863 if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) { 933 if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
864 const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks(); 934 const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks();
865 935
866 out_ticks = thread_ticks + (core_timing.GetTicks() - prev_ctx_ticks); 936 out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
867 } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { 937 } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
868 out_ticks = core_timing.GetTicks() - prev_ctx_ticks; 938 out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
869 } 939 }
870 940
871 *result = out_ticks; 941 *result = out_ticks;
@@ -892,6 +962,7 @@ static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_h
892 962
893/// Maps memory at a desired address 963/// Maps memory at a desired address
894static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { 964static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
965 std::lock_guard lock{HLE::g_hle_lock};
895 LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); 966 LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
896 967
897 if (!Common::Is4KBAligned(addr)) { 968 if (!Common::Is4KBAligned(addr)) {
@@ -939,8 +1010,13 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
939 return page_table.MapPhysicalMemory(addr, size); 1010 return page_table.MapPhysicalMemory(addr, size);
940} 1011}
941 1012
1013static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
1014 return MapPhysicalMemory(system, static_cast<VAddr>(addr), static_cast<std::size_t>(size));
1015}
1016
942/// Unmaps memory previously mapped via MapPhysicalMemory 1017/// Unmaps memory previously mapped via MapPhysicalMemory
943static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { 1018static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
1019 std::lock_guard lock{HLE::g_hle_lock};
944 LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); 1020 LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
945 1021
946 if (!Common::Is4KBAligned(addr)) { 1022 if (!Common::Is4KBAligned(addr)) {
@@ -988,6 +1064,10 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
988 return page_table.UnmapPhysicalMemory(addr, size); 1064 return page_table.UnmapPhysicalMemory(addr, size);
989} 1065}
990 1066
1067static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
1068 return UnmapPhysicalMemory(system, static_cast<VAddr>(addr), static_cast<std::size_t>(size));
1069}
1070
991/// Sets the thread activity 1071/// Sets the thread activity
992static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) { 1072static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) {
993 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity); 1073 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity);
@@ -1017,10 +1097,11 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act
1017 return ERR_BUSY; 1097 return ERR_BUSY;
1018 } 1098 }
1019 1099
1020 thread->SetActivity(static_cast<ThreadActivity>(activity)); 1100 return thread->SetActivity(static_cast<ThreadActivity>(activity));
1101}
1021 1102
1022 system.PrepareReschedule(thread->GetProcessorID()); 1103static ResultCode SetThreadActivity32(Core::System& system, Handle handle, u32 activity) {
1023 return RESULT_SUCCESS; 1104 return SetThreadActivity(system, handle, activity);
1024} 1105}
1025 1106
1026/// Gets the thread context 1107/// Gets the thread context
@@ -1064,6 +1145,10 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H
1064 return RESULT_SUCCESS; 1145 return RESULT_SUCCESS;
1065} 1146}
1066 1147
1148static ResultCode GetThreadContext32(Core::System& system, u32 thread_context, Handle handle) {
1149 return GetThreadContext(system, static_cast<VAddr>(thread_context), handle);
1150}
1151
1067/// Gets the priority for the specified thread 1152/// Gets the priority for the specified thread
1068static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle handle) { 1153static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle handle) {
1069 LOG_TRACE(Kernel_SVC, "called"); 1154 LOG_TRACE(Kernel_SVC, "called");
@@ -1071,6 +1156,7 @@ static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle
1071 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1156 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1072 const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle); 1157 const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle);
1073 if (!thread) { 1158 if (!thread) {
1159 *priority = 0;
1074 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle); 1160 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
1075 return ERR_INVALID_HANDLE; 1161 return ERR_INVALID_HANDLE;
1076 } 1162 }
@@ -1105,18 +1191,26 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri
1105 1191
1106 thread->SetPriority(priority); 1192 thread->SetPriority(priority);
1107 1193
1108 system.PrepareReschedule(thread->GetProcessorID());
1109 return RESULT_SUCCESS; 1194 return RESULT_SUCCESS;
1110} 1195}
1111 1196
1197static ResultCode SetThreadPriority32(Core::System& system, Handle handle, u32 priority) {
1198 return SetThreadPriority(system, handle, priority);
1199}
1200
1112/// Get which CPU core is executing the current thread 1201/// Get which CPU core is executing the current thread
1113static u32 GetCurrentProcessorNumber(Core::System& system) { 1202static u32 GetCurrentProcessorNumber(Core::System& system) {
1114 LOG_TRACE(Kernel_SVC, "called"); 1203 LOG_TRACE(Kernel_SVC, "called");
1115 return system.CurrentScheduler().GetCurrentThread()->GetProcessorID(); 1204 return static_cast<u32>(system.CurrentPhysicalCore().CoreIndex());
1205}
1206
1207static u32 GetCurrentProcessorNumber32(Core::System& system) {
1208 return GetCurrentProcessorNumber(system);
1116} 1209}
1117 1210
1118static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr, 1211static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr,
1119 u64 size, u32 permissions) { 1212 u64 size, u32 permissions) {
1213 std::lock_guard lock{HLE::g_hle_lock};
1120 LOG_TRACE(Kernel_SVC, 1214 LOG_TRACE(Kernel_SVC,
1121 "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}", 1215 "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
1122 shared_memory_handle, addr, size, permissions); 1216 shared_memory_handle, addr, size, permissions);
@@ -1187,9 +1281,16 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
1187 return shared_memory->Map(*current_process, addr, size, permission_type); 1281 return shared_memory->Map(*current_process, addr, size, permission_type);
1188} 1282}
1189 1283
1284static ResultCode MapSharedMemory32(Core::System& system, Handle shared_memory_handle, u32 addr,
1285 u32 size, u32 permissions) {
1286 return MapSharedMemory(system, shared_memory_handle, static_cast<VAddr>(addr),
1287 static_cast<std::size_t>(size), permissions);
1288}
1289
1190static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, 1290static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
1191 VAddr page_info_address, Handle process_handle, 1291 VAddr page_info_address, Handle process_handle,
1192 VAddr address) { 1292 VAddr address) {
1293 std::lock_guard lock{HLE::g_hle_lock};
1193 LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address); 1294 LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
1194 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1295 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1195 std::shared_ptr<Process> process = handle_table.Get<Process>(process_handle); 1296 std::shared_ptr<Process> process = handle_table.Get<Process>(process_handle);
@@ -1372,6 +1473,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
1372/// Exits the current process 1473/// Exits the current process
1373static void ExitProcess(Core::System& system) { 1474static void ExitProcess(Core::System& system) {
1374 auto* current_process = system.Kernel().CurrentProcess(); 1475 auto* current_process = system.Kernel().CurrentProcess();
1476 UNIMPLEMENTED();
1375 1477
1376 LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); 1478 LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
1377 ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running, 1479 ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
@@ -1381,8 +1483,10 @@ static void ExitProcess(Core::System& system) {
1381 1483
1382 // Kill the current thread 1484 // Kill the current thread
1383 system.CurrentScheduler().GetCurrentThread()->Stop(); 1485 system.CurrentScheduler().GetCurrentThread()->Stop();
1486}
1384 1487
1385 system.PrepareReschedule(); 1488static void ExitProcess32(Core::System& system) {
1489 ExitProcess(system);
1386} 1490}
1387 1491
1388/// Creates a new thread 1492/// Creates a new thread
@@ -1428,9 +1532,10 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
1428 1532
1429 ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1)); 1533 ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1));
1430 1534
1535 ThreadType type = THREADTYPE_USER;
1431 CASCADE_RESULT(std::shared_ptr<Thread> thread, 1536 CASCADE_RESULT(std::shared_ptr<Thread> thread,
1432 Thread::Create(kernel, "", entry_point, priority, arg, processor_id, stack_top, 1537 Thread::Create(system, type, "", entry_point, priority, arg, processor_id,
1433 *current_process)); 1538 stack_top, current_process));
1434 1539
1435 const auto new_thread_handle = current_process->GetHandleTable().Create(thread); 1540 const auto new_thread_handle = current_process->GetHandleTable().Create(thread);
1436 if (new_thread_handle.Failed()) { 1541 if (new_thread_handle.Failed()) {
@@ -1444,11 +1549,15 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
1444 thread->SetName( 1549 thread->SetName(
1445 fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle)); 1550 fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle));
1446 1551
1447 system.PrepareReschedule(thread->GetProcessorID());
1448
1449 return RESULT_SUCCESS; 1552 return RESULT_SUCCESS;
1450} 1553}
1451 1554
1555static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
1556 u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
1557 return CreateThread(system, out_handle, static_cast<VAddr>(entry_point), static_cast<u64>(arg),
1558 static_cast<VAddr>(stack_top), priority, processor_id);
1559}
1560
1452/// Starts the thread for the provided handle 1561/// Starts the thread for the provided handle
1453static ResultCode StartThread(Core::System& system, Handle thread_handle) { 1562static ResultCode StartThread(Core::System& system, Handle thread_handle) {
1454 LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle); 1563 LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
@@ -1463,13 +1572,11 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
1463 1572
1464 ASSERT(thread->GetStatus() == ThreadStatus::Dormant); 1573 ASSERT(thread->GetStatus() == ThreadStatus::Dormant);
1465 1574
1466 thread->ResumeFromWait(); 1575 return thread->Start();
1467 1576}
1468 if (thread->GetStatus() == ThreadStatus::Ready) {
1469 system.PrepareReschedule(thread->GetProcessorID());
1470 }
1471 1577
1472 return RESULT_SUCCESS; 1578static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
1579 return StartThread(system, thread_handle);
1473} 1580}
1474 1581
1475/// Called when a thread exits 1582/// Called when a thread exits
@@ -1477,9 +1584,12 @@ static void ExitThread(Core::System& system) {
1477 LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC()); 1584 LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
1478 1585
1479 auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); 1586 auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
1480 current_thread->Stop();
1481 system.GlobalScheduler().RemoveThread(SharedFrom(current_thread)); 1587 system.GlobalScheduler().RemoveThread(SharedFrom(current_thread));
1482 system.PrepareReschedule(); 1588 current_thread->Stop();
1589}
1590
1591static void ExitThread32(Core::System& system) {
1592 ExitThread(system);
1483} 1593}
1484 1594
1485/// Sleep the current thread 1595/// Sleep the current thread
@@ -1498,15 +1608,21 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
1498 1608
1499 if (nanoseconds <= 0) { 1609 if (nanoseconds <= 0) {
1500 switch (static_cast<SleepType>(nanoseconds)) { 1610 switch (static_cast<SleepType>(nanoseconds)) {
1501 case SleepType::YieldWithoutLoadBalancing: 1611 case SleepType::YieldWithoutLoadBalancing: {
1502 is_redundant = current_thread->YieldSimple(); 1612 auto pair = current_thread->YieldSimple();
1613 is_redundant = pair.second;
1503 break; 1614 break;
1504 case SleepType::YieldWithLoadBalancing: 1615 }
1505 is_redundant = current_thread->YieldAndBalanceLoad(); 1616 case SleepType::YieldWithLoadBalancing: {
1617 auto pair = current_thread->YieldAndBalanceLoad();
1618 is_redundant = pair.second;
1506 break; 1619 break;
1507 case SleepType::YieldAndWaitForLoadBalancing: 1620 }
1508 is_redundant = current_thread->YieldAndWaitForLoadBalancing(); 1621 case SleepType::YieldAndWaitForLoadBalancing: {
1622 auto pair = current_thread->YieldAndWaitForLoadBalancing();
1623 is_redundant = pair.second;
1509 break; 1624 break;
1625 }
1510 default: 1626 default:
1511 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); 1627 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
1512 } 1628 }
@@ -1514,13 +1630,18 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
1514 current_thread->Sleep(nanoseconds); 1630 current_thread->Sleep(nanoseconds);
1515 } 1631 }
1516 1632
1517 if (is_redundant) { 1633 if (is_redundant && !system.Kernel().IsMulticore()) {
1518 // If it's redundant, the core is pretty much idle. Some games keep idling 1634 system.Kernel().ExitSVCProfile();
1519 // a core while it's doing nothing, we advance timing to avoid costly continuous 1635 system.CoreTiming().AddTicks(1000U);
1520 // calls. 1636 system.GetCpuManager().PreemptSingleCore();
1521 system.CoreTiming().AddTicks(2000); 1637 system.Kernel().EnterSVCProfile();
1522 } 1638 }
1523 system.PrepareReschedule(current_thread->GetProcessorID()); 1639}
1640
1641static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) {
1642 const s64 nanoseconds = static_cast<s64>(static_cast<u64>(nanoseconds_low) |
1643 (static_cast<u64>(nanoseconds_high) << 32));
1644 SleepThread(system, nanoseconds);
1524} 1645}
1525 1646
1526/// Wait process wide key atomic 1647/// Wait process wide key atomic
@@ -1547,31 +1668,69 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
1547 } 1668 }
1548 1669
1549 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); 1670 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
1550 1671 auto& kernel = system.Kernel();
1672 Handle event_handle;
1673 Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
1551 auto* const current_process = system.Kernel().CurrentProcess(); 1674 auto* const current_process = system.Kernel().CurrentProcess();
1552 const auto& handle_table = current_process->GetHandleTable(); 1675 {
1553 std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); 1676 SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
1554 ASSERT(thread); 1677 const auto& handle_table = current_process->GetHandleTable();
1678 std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
1679 ASSERT(thread);
1680
1681 current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
1682
1683 if (thread->IsPendingTermination()) {
1684 lock.CancelSleep();
1685 return ERR_THREAD_TERMINATING;
1686 }
1687
1688 const auto release_result = current_process->GetMutex().Release(mutex_addr);
1689 if (release_result.IsError()) {
1690 lock.CancelSleep();
1691 return release_result;
1692 }
1693
1694 if (nano_seconds == 0) {
1695 lock.CancelSleep();
1696 return RESULT_TIMEOUT;
1697 }
1555 1698
1556 const auto release_result = current_process->GetMutex().Release(mutex_addr); 1699 current_thread->SetCondVarWaitAddress(condition_variable_addr);
1557 if (release_result.IsError()) { 1700 current_thread->SetMutexWaitAddress(mutex_addr);
1558 return release_result; 1701 current_thread->SetWaitHandle(thread_handle);
1702 current_thread->SetStatus(ThreadStatus::WaitCondVar);
1703 current_process->InsertConditionVariableThread(SharedFrom(current_thread));
1559 } 1704 }
1560 1705
1561 Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); 1706 if (event_handle != InvalidHandle) {
1562 current_thread->SetCondVarWaitAddress(condition_variable_addr); 1707 auto& time_manager = kernel.TimeManager();
1563 current_thread->SetMutexWaitAddress(mutex_addr); 1708 time_manager.UnscheduleTimeEvent(event_handle);
1564 current_thread->SetWaitHandle(thread_handle); 1709 }
1565 current_thread->SetStatus(ThreadStatus::WaitCondVar); 1710
1566 current_thread->InvalidateWakeupCallback(); 1711 {
1567 current_process->InsertConditionVariableThread(SharedFrom(current_thread)); 1712 SchedulerLock lock(kernel);
1568 1713
1569 current_thread->WakeAfterDelay(nano_seconds); 1714 auto* owner = current_thread->GetLockOwner();
1715 if (owner != nullptr) {
1716 owner->RemoveMutexWaiter(SharedFrom(current_thread));
1717 }
1570 1718
1719 current_process->RemoveConditionVariableThread(SharedFrom(current_thread));
1720 }
1571 // Note: Deliberately don't attempt to inherit the lock owner's priority. 1721 // Note: Deliberately don't attempt to inherit the lock owner's priority.
1572 1722
1573 system.PrepareReschedule(current_thread->GetProcessorID()); 1723 return current_thread->GetSignalingResult();
1574 return RESULT_SUCCESS; 1724}
1725
1726static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr,
1727 u32 condition_variable_addr, Handle thread_handle,
1728 u32 nanoseconds_low, u32 nanoseconds_high) {
1729 const s64 nanoseconds =
1730 static_cast<s64>(nanoseconds_low | (static_cast<u64>(nanoseconds_high) << 32));
1731 return WaitProcessWideKeyAtomic(system, static_cast<VAddr>(mutex_addr),
1732 static_cast<VAddr>(condition_variable_addr), thread_handle,
1733 nanoseconds);
1575} 1734}
1576 1735
1577/// Signal process wide key 1736/// Signal process wide key
@@ -1582,7 +1741,9 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
1582 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); 1741 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
1583 1742
1584 // Retrieve a list of all threads that are waiting for this condition variable. 1743 // Retrieve a list of all threads that are waiting for this condition variable.
1585 auto* const current_process = system.Kernel().CurrentProcess(); 1744 auto& kernel = system.Kernel();
1745 SchedulerLock lock(kernel);
1746 auto* const current_process = kernel.CurrentProcess();
1586 std::vector<std::shared_ptr<Thread>> waiting_threads = 1747 std::vector<std::shared_ptr<Thread>> waiting_threads =
1587 current_process->GetConditionVariableThreads(condition_variable_addr); 1748 current_process->GetConditionVariableThreads(condition_variable_addr);
1588 1749
@@ -1591,7 +1752,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
1591 std::size_t last = waiting_threads.size(); 1752 std::size_t last = waiting_threads.size();
1592 if (target > 0) 1753 if (target > 0)
1593 last = std::min(waiting_threads.size(), static_cast<std::size_t>(target)); 1754 last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
1594 1755 auto& time_manager = kernel.TimeManager();
1595 for (std::size_t index = 0; index < last; ++index) { 1756 for (std::size_t index = 0; index < last; ++index) {
1596 auto& thread = waiting_threads[index]; 1757 auto& thread = waiting_threads[index];
1597 1758
@@ -1599,7 +1760,6 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
1599 1760
1600 // liberate Cond Var Thread. 1761 // liberate Cond Var Thread.
1601 current_process->RemoveConditionVariableThread(thread); 1762 current_process->RemoveConditionVariableThread(thread);
1602 thread->SetCondVarWaitAddress(0);
1603 1763
1604 const std::size_t current_core = system.CurrentCoreIndex(); 1764 const std::size_t current_core = system.CurrentCoreIndex();
1605 auto& monitor = system.Monitor(); 1765 auto& monitor = system.Monitor();
@@ -1610,10 +1770,8 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
1610 u32 update_val = 0; 1770 u32 update_val = 0;
1611 const VAddr mutex_address = thread->GetMutexWaitAddress(); 1771 const VAddr mutex_address = thread->GetMutexWaitAddress();
1612 do { 1772 do {
1613 monitor.SetExclusive(current_core, mutex_address);
1614
1615 // If the mutex is not yet acquired, acquire it. 1773 // If the mutex is not yet acquired, acquire it.
1616 mutex_val = memory.Read32(mutex_address); 1774 mutex_val = monitor.ExclusiveRead32(current_core, mutex_address);
1617 1775
1618 if (mutex_val != 0) { 1776 if (mutex_val != 0) {
1619 update_val = mutex_val | Mutex::MutexHasWaitersFlag; 1777 update_val = mutex_val | Mutex::MutexHasWaitersFlag;
@@ -1621,33 +1779,28 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
1621 update_val = thread->GetWaitHandle(); 1779 update_val = thread->GetWaitHandle();
1622 } 1780 }
1623 } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val)); 1781 } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val));
1782 monitor.ClearExclusive();
1624 if (mutex_val == 0) { 1783 if (mutex_val == 0) {
1625 // We were able to acquire the mutex, resume this thread. 1784 // We were able to acquire the mutex, resume this thread.
1626 ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
1627 thread->ResumeFromWait();
1628
1629 auto* const lock_owner = thread->GetLockOwner(); 1785 auto* const lock_owner = thread->GetLockOwner();
1630 if (lock_owner != nullptr) { 1786 if (lock_owner != nullptr) {
1631 lock_owner->RemoveMutexWaiter(thread); 1787 lock_owner->RemoveMutexWaiter(thread);
1632 } 1788 }
1633 1789
1634 thread->SetLockOwner(nullptr); 1790 thread->SetLockOwner(nullptr);
1635 thread->SetMutexWaitAddress(0); 1791 thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
1636 thread->SetWaitHandle(0); 1792 thread->ResumeFromWait();
1637 thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
1638 system.PrepareReschedule(thread->GetProcessorID());
1639 } else { 1793 } else {
1640 // The mutex is already owned by some other thread, make this thread wait on it. 1794 // The mutex is already owned by some other thread, make this thread wait on it.
1641 const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask); 1795 const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
1642 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1796 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1643 auto owner = handle_table.Get<Thread>(owner_handle); 1797 auto owner = handle_table.Get<Thread>(owner_handle);
1644 ASSERT(owner); 1798 ASSERT(owner);
1645 ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar); 1799 if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
1646 thread->InvalidateWakeupCallback(); 1800 thread->SetStatus(ThreadStatus::WaitMutex);
1647 thread->SetStatus(ThreadStatus::WaitMutex); 1801 }
1648 1802
1649 owner->AddMutexWaiter(thread); 1803 owner->AddMutexWaiter(thread);
1650 system.PrepareReschedule(thread->GetProcessorID());
1651 } 1804 }
1652 } 1805 }
1653} 1806}
@@ -1678,12 +1831,15 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type,
1678 auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter(); 1831 auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
1679 const ResultCode result = 1832 const ResultCode result =
1680 address_arbiter.WaitForAddress(address, arbitration_type, value, timeout); 1833 address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
1681 if (result == RESULT_SUCCESS) {
1682 system.PrepareReschedule();
1683 }
1684 return result; 1834 return result;
1685} 1835}
1686 1836
1837static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value,
1838 u32 timeout_low, u32 timeout_high) {
1839 s64 timeout = static_cast<s64>(timeout_low | (static_cast<u64>(timeout_high) << 32));
1840 return WaitForAddress(system, static_cast<VAddr>(address), type, value, timeout);
1841}
1842
1687// Signals to an address (via Address Arbiter) 1843// Signals to an address (via Address Arbiter)
1688static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value, 1844static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value,
1689 s32 num_to_wake) { 1845 s32 num_to_wake) {
@@ -1707,6 +1863,11 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type,
1707 return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake); 1863 return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake);
1708} 1864}
1709 1865
1866static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value,
1867 s32 num_to_wake) {
1868 return SignalToAddress(system, static_cast<VAddr>(address), type, value, num_to_wake);
1869}
1870
1710static void KernelDebug([[maybe_unused]] Core::System& system, 1871static void KernelDebug([[maybe_unused]] Core::System& system,
1711 [[maybe_unused]] u32 kernel_debug_type, [[maybe_unused]] u64 param1, 1872 [[maybe_unused]] u32 kernel_debug_type, [[maybe_unused]] u64 param1,
1712 [[maybe_unused]] u64 param2, [[maybe_unused]] u64 param3) { 1873 [[maybe_unused]] u64 param2, [[maybe_unused]] u64 param3) {
@@ -1725,14 +1886,21 @@ static u64 GetSystemTick(Core::System& system) {
1725 auto& core_timing = system.CoreTiming(); 1886 auto& core_timing = system.CoreTiming();
1726 1887
1727 // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick) 1888 // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
1728 const u64 result{Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks())}; 1889 const u64 result{system.CoreTiming().GetClockTicks()};
1729 1890
1730 // Advance time to defeat dumb games that busy-wait for the frame to end. 1891 if (!system.Kernel().IsMulticore()) {
1731 core_timing.AddTicks(400); 1892 core_timing.AddTicks(400U);
1893 }
1732 1894
1733 return result; 1895 return result;
1734} 1896}
1735 1897
1898static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) {
1899 u64 time = GetSystemTick(system);
1900 *time_low = static_cast<u32>(time);
1901 *time_high = static_cast<u32>(time >> 32);
1902}
1903
1736/// Close a handle 1904/// Close a handle
1737static ResultCode CloseHandle(Core::System& system, Handle handle) { 1905static ResultCode CloseHandle(Core::System& system, Handle handle) {
1738 LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle); 1906 LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
@@ -1765,9 +1933,14 @@ static ResultCode ResetSignal(Core::System& system, Handle handle) {
1765 return ERR_INVALID_HANDLE; 1933 return ERR_INVALID_HANDLE;
1766} 1934}
1767 1935
1936static ResultCode ResetSignal32(Core::System& system, Handle handle) {
1937 return ResetSignal(system, handle);
1938}
1939
1768/// Creates a TransferMemory object 1940/// Creates a TransferMemory object
1769static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAddr addr, u64 size, 1941static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAddr addr, u64 size,
1770 u32 permissions) { 1942 u32 permissions) {
1943 std::lock_guard lock{HLE::g_hle_lock};
1771 LOG_DEBUG(Kernel_SVC, "called addr=0x{:X}, size=0x{:X}, perms=0x{:08X}", addr, size, 1944 LOG_DEBUG(Kernel_SVC, "called addr=0x{:X}, size=0x{:X}, perms=0x{:08X}", addr, size,
1772 permissions); 1945 permissions);
1773 1946
@@ -1812,6 +1985,12 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
1812 return RESULT_SUCCESS; 1985 return RESULT_SUCCESS;
1813} 1986}
1814 1987
1988static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u32 addr, u32 size,
1989 u32 permissions) {
1990 return CreateTransferMemory(system, handle, static_cast<VAddr>(addr),
1991 static_cast<std::size_t>(size), permissions);
1992}
1993
1815static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core, 1994static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core,
1816 u64* mask) { 1995 u64* mask) {
1817 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); 1996 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
@@ -1821,6 +2000,8 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
1821 if (!thread) { 2000 if (!thread) {
1822 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}", 2001 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
1823 thread_handle); 2002 thread_handle);
2003 *core = 0;
2004 *mask = 0;
1824 return ERR_INVALID_HANDLE; 2005 return ERR_INVALID_HANDLE;
1825 } 2006 }
1826 2007
@@ -1830,6 +2011,15 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
1830 return RESULT_SUCCESS; 2011 return RESULT_SUCCESS;
1831} 2012}
1832 2013
2014static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, u32* core,
2015 u32* mask_low, u32* mask_high) {
2016 u64 mask{};
2017 const auto result = GetThreadCoreMask(system, thread_handle, core, &mask);
2018 *mask_high = static_cast<u32>(mask >> 32);
2019 *mask_low = static_cast<u32>(mask);
2020 return result;
2021}
2022
1833static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, u32 core, 2023static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, u32 core,
1834 u64 affinity_mask) { 2024 u64 affinity_mask) {
1835 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core=0x{:X}, affinity_mask=0x{:016X}", 2025 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core=0x{:X}, affinity_mask=0x{:016X}",
@@ -1861,7 +2051,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
1861 return ERR_INVALID_COMBINATION; 2051 return ERR_INVALID_COMBINATION;
1862 } 2052 }
1863 2053
1864 if (core < Core::NUM_CPU_CORES) { 2054 if (core < Core::Hardware::NUM_CPU_CORES) {
1865 if ((affinity_mask & (1ULL << core)) == 0) { 2055 if ((affinity_mask & (1ULL << core)) == 0) {
1866 LOG_ERROR(Kernel_SVC, 2056 LOG_ERROR(Kernel_SVC,
1867 "Core is not enabled for the current mask, core={}, mask={:016X}", core, 2057 "Core is not enabled for the current mask, core={}, mask={:016X}", core,
@@ -1883,11 +2073,14 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
1883 return ERR_INVALID_HANDLE; 2073 return ERR_INVALID_HANDLE;
1884 } 2074 }
1885 2075
1886 system.PrepareReschedule(thread->GetProcessorID()); 2076 return thread->SetCoreAndAffinityMask(core, affinity_mask);
1887 thread->ChangeCore(core, affinity_mask); 2077}
1888 system.PrepareReschedule(thread->GetProcessorID());
1889 2078
1890 return RESULT_SUCCESS; 2079static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core,
2080 u32 affinity_mask_low, u32 affinity_mask_high) {
2081 const u64 affinity_mask =
2082 static_cast<u64>(affinity_mask_low) | (static_cast<u64>(affinity_mask_high) << 32);
2083 return SetThreadCoreMask(system, thread_handle, core, affinity_mask);
1891} 2084}
1892 2085
1893static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) { 2086static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) {
@@ -1918,6 +2111,10 @@ static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle
1918 return RESULT_SUCCESS; 2111 return RESULT_SUCCESS;
1919} 2112}
1920 2113
2114static ResultCode CreateEvent32(Core::System& system, Handle* write_handle, Handle* read_handle) {
2115 return CreateEvent(system, write_handle, read_handle);
2116}
2117
1921static ResultCode ClearEvent(Core::System& system, Handle handle) { 2118static ResultCode ClearEvent(Core::System& system, Handle handle) {
1922 LOG_TRACE(Kernel_SVC, "called, event=0x{:08X}", handle); 2119 LOG_TRACE(Kernel_SVC, "called, event=0x{:08X}", handle);
1923 2120
@@ -1939,6 +2136,10 @@ static ResultCode ClearEvent(Core::System& system, Handle handle) {
1939 return ERR_INVALID_HANDLE; 2136 return ERR_INVALID_HANDLE;
1940} 2137}
1941 2138
2139static ResultCode ClearEvent32(Core::System& system, Handle handle) {
2140 return ClearEvent(system, handle);
2141}
2142
1942static ResultCode SignalEvent(Core::System& system, Handle handle) { 2143static ResultCode SignalEvent(Core::System& system, Handle handle) {
1943 LOG_DEBUG(Kernel_SVC, "called. Handle=0x{:08X}", handle); 2144 LOG_DEBUG(Kernel_SVC, "called. Handle=0x{:08X}", handle);
1944 2145
@@ -1951,10 +2152,13 @@ static ResultCode SignalEvent(Core::System& system, Handle handle) {
1951 } 2152 }
1952 2153
1953 writable_event->Signal(); 2154 writable_event->Signal();
1954 system.PrepareReschedule();
1955 return RESULT_SUCCESS; 2155 return RESULT_SUCCESS;
1956} 2156}
1957 2157
2158static ResultCode SignalEvent32(Core::System& system, Handle handle) {
2159 return SignalEvent(system, handle);
2160}
2161
1958static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) { 2162static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
1959 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type); 2163 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type);
1960 2164
@@ -1982,6 +2186,7 @@ static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_
1982} 2186}
1983 2187
1984static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) { 2188static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) {
2189 std::lock_guard lock{HLE::g_hle_lock};
1985 LOG_DEBUG(Kernel_SVC, "called"); 2190 LOG_DEBUG(Kernel_SVC, "called");
1986 2191
1987 auto& kernel = system.Kernel(); 2192 auto& kernel = system.Kernel();
@@ -2139,6 +2344,15 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
2139 return RESULT_SUCCESS; 2344 return RESULT_SUCCESS;
2140} 2345}
2141 2346
2347static ResultCode FlushProcessDataCache32(Core::System& system, Handle handle, u32 address,
2348 u32 size) {
2349 // Note(Blinkhawk): For emulation purposes of the data cache this is mostly a nope
2350 // as all emulation is done in the same cache level in host architecture, thus data cache
2351 // does not need flushing.
2352 LOG_DEBUG(Kernel_SVC, "called");
2353 return RESULT_SUCCESS;
2354}
2355
2142namespace { 2356namespace {
2143struct FunctionDef { 2357struct FunctionDef {
2144 using Func = void(Core::System&); 2358 using Func = void(Core::System&);
@@ -2153,57 +2367,57 @@ static const FunctionDef SVC_Table_32[] = {
2153 {0x00, nullptr, "Unknown"}, 2367 {0x00, nullptr, "Unknown"},
2154 {0x01, SvcWrap32<SetHeapSize32>, "SetHeapSize32"}, 2368 {0x01, SvcWrap32<SetHeapSize32>, "SetHeapSize32"},
2155 {0x02, nullptr, "Unknown"}, 2369 {0x02, nullptr, "Unknown"},
2156 {0x03, nullptr, "SetMemoryAttribute32"}, 2370 {0x03, SvcWrap32<SetMemoryAttribute32>, "SetMemoryAttribute32"},
2157 {0x04, nullptr, "MapMemory32"}, 2371 {0x04, SvcWrap32<MapMemory32>, "MapMemory32"},
2158 {0x05, nullptr, "UnmapMemory32"}, 2372 {0x05, SvcWrap32<UnmapMemory32>, "UnmapMemory32"},
2159 {0x06, SvcWrap32<QueryMemory32>, "QueryMemory32"}, 2373 {0x06, SvcWrap32<QueryMemory32>, "QueryMemory32"},
2160 {0x07, nullptr, "ExitProcess32"}, 2374 {0x07, SvcWrap32<ExitProcess32>, "ExitProcess32"},
2161 {0x08, nullptr, "CreateThread32"}, 2375 {0x08, SvcWrap32<CreateThread32>, "CreateThread32"},
2162 {0x09, nullptr, "StartThread32"}, 2376 {0x09, SvcWrap32<StartThread32>, "StartThread32"},
2163 {0x0a, nullptr, "ExitThread32"}, 2377 {0x0a, SvcWrap32<ExitThread32>, "ExitThread32"},
2164 {0x0b, nullptr, "SleepThread32"}, 2378 {0x0b, SvcWrap32<SleepThread32>, "SleepThread32"},
2165 {0x0c, SvcWrap32<GetThreadPriority32>, "GetThreadPriority32"}, 2379 {0x0c, SvcWrap32<GetThreadPriority32>, "GetThreadPriority32"},
2166 {0x0d, nullptr, "SetThreadPriority32"}, 2380 {0x0d, SvcWrap32<SetThreadPriority32>, "SetThreadPriority32"},
2167 {0x0e, nullptr, "GetThreadCoreMask32"}, 2381 {0x0e, SvcWrap32<GetThreadCoreMask32>, "GetThreadCoreMask32"},
2168 {0x0f, nullptr, "SetThreadCoreMask32"}, 2382 {0x0f, SvcWrap32<SetThreadCoreMask32>, "SetThreadCoreMask32"},
2169 {0x10, nullptr, "GetCurrentProcessorNumber32"}, 2383 {0x10, SvcWrap32<GetCurrentProcessorNumber32>, "GetCurrentProcessorNumber32"},
2170 {0x11, nullptr, "SignalEvent32"}, 2384 {0x11, SvcWrap32<SignalEvent32>, "SignalEvent32"},
2171 {0x12, nullptr, "ClearEvent32"}, 2385 {0x12, SvcWrap32<ClearEvent32>, "ClearEvent32"},
2172 {0x13, nullptr, "MapSharedMemory32"}, 2386 {0x13, SvcWrap32<MapSharedMemory32>, "MapSharedMemory32"},
2173 {0x14, nullptr, "UnmapSharedMemory32"}, 2387 {0x14, nullptr, "UnmapSharedMemory32"},
2174 {0x15, nullptr, "CreateTransferMemory32"}, 2388 {0x15, SvcWrap32<CreateTransferMemory32>, "CreateTransferMemory32"},
2175 {0x16, SvcWrap32<CloseHandle32>, "CloseHandle32"}, 2389 {0x16, SvcWrap32<CloseHandle32>, "CloseHandle32"},
2176 {0x17, nullptr, "ResetSignal32"}, 2390 {0x17, SvcWrap32<ResetSignal32>, "ResetSignal32"},
2177 {0x18, SvcWrap32<WaitSynchronization32>, "WaitSynchronization32"}, 2391 {0x18, SvcWrap32<WaitSynchronization32>, "WaitSynchronization32"},
2178 {0x19, nullptr, "CancelSynchronization32"}, 2392 {0x19, SvcWrap32<CancelSynchronization32>, "CancelSynchronization32"},
2179 {0x1a, nullptr, "ArbitrateLock32"}, 2393 {0x1a, SvcWrap32<ArbitrateLock32>, "ArbitrateLock32"},
2180 {0x1b, nullptr, "ArbitrateUnlock32"}, 2394 {0x1b, SvcWrap32<ArbitrateUnlock32>, "ArbitrateUnlock32"},
2181 {0x1c, nullptr, "WaitProcessWideKeyAtomic32"}, 2395 {0x1c, SvcWrap32<WaitProcessWideKeyAtomic32>, "WaitProcessWideKeyAtomic32"},
2182 {0x1d, SvcWrap32<SignalProcessWideKey32>, "SignalProcessWideKey32"}, 2396 {0x1d, SvcWrap32<SignalProcessWideKey32>, "SignalProcessWideKey32"},
2183 {0x1e, nullptr, "GetSystemTick32"}, 2397 {0x1e, SvcWrap32<GetSystemTick32>, "GetSystemTick32"},
2184 {0x1f, SvcWrap32<ConnectToNamedPort32>, "ConnectToNamedPort32"}, 2398 {0x1f, SvcWrap32<ConnectToNamedPort32>, "ConnectToNamedPort32"},
2185 {0x20, nullptr, "Unknown"}, 2399 {0x20, nullptr, "Unknown"},
2186 {0x21, SvcWrap32<SendSyncRequest32>, "SendSyncRequest32"}, 2400 {0x21, SvcWrap32<SendSyncRequest32>, "SendSyncRequest32"},
2187 {0x22, nullptr, "SendSyncRequestWithUserBuffer32"}, 2401 {0x22, nullptr, "SendSyncRequestWithUserBuffer32"},
2188 {0x23, nullptr, "Unknown"}, 2402 {0x23, nullptr, "Unknown"},
2189 {0x24, nullptr, "GetProcessId32"}, 2403 {0x24, SvcWrap32<GetProcessId32>, "GetProcessId32"},
2190 {0x25, SvcWrap32<GetThreadId32>, "GetThreadId32"}, 2404 {0x25, SvcWrap32<GetThreadId32>, "GetThreadId32"},
2191 {0x26, nullptr, "Break32"}, 2405 {0x26, SvcWrap32<Break32>, "Break32"},
2192 {0x27, nullptr, "OutputDebugString32"}, 2406 {0x27, nullptr, "OutputDebugString32"},
2193 {0x28, nullptr, "Unknown"}, 2407 {0x28, nullptr, "Unknown"},
2194 {0x29, SvcWrap32<GetInfo32>, "GetInfo32"}, 2408 {0x29, SvcWrap32<GetInfo32>, "GetInfo32"},
2195 {0x2a, nullptr, "Unknown"}, 2409 {0x2a, nullptr, "Unknown"},
2196 {0x2b, nullptr, "Unknown"}, 2410 {0x2b, nullptr, "Unknown"},
2197 {0x2c, nullptr, "MapPhysicalMemory32"}, 2411 {0x2c, SvcWrap32<MapPhysicalMemory32>, "MapPhysicalMemory32"},
2198 {0x2d, nullptr, "UnmapPhysicalMemory32"}, 2412 {0x2d, SvcWrap32<UnmapPhysicalMemory32>, "UnmapPhysicalMemory32"},
2199 {0x2e, nullptr, "Unknown"}, 2413 {0x2e, nullptr, "Unknown"},
2200 {0x2f, nullptr, "Unknown"}, 2414 {0x2f, nullptr, "Unknown"},
2201 {0x30, nullptr, "Unknown"}, 2415 {0x30, nullptr, "Unknown"},
2202 {0x31, nullptr, "Unknown"}, 2416 {0x31, nullptr, "Unknown"},
2203 {0x32, nullptr, "SetThreadActivity32"}, 2417 {0x32, SvcWrap32<SetThreadActivity32>, "SetThreadActivity32"},
2204 {0x33, nullptr, "GetThreadContext32"}, 2418 {0x33, SvcWrap32<GetThreadContext32>, "GetThreadContext32"},
2205 {0x34, nullptr, "WaitForAddress32"}, 2419 {0x34, SvcWrap32<WaitForAddress32>, "WaitForAddress32"},
2206 {0x35, nullptr, "SignalToAddress32"}, 2420 {0x35, SvcWrap32<SignalToAddress32>, "SignalToAddress32"},
2207 {0x36, nullptr, "Unknown"}, 2421 {0x36, nullptr, "Unknown"},
2208 {0x37, nullptr, "Unknown"}, 2422 {0x37, nullptr, "Unknown"},
2209 {0x38, nullptr, "Unknown"}, 2423 {0x38, nullptr, "Unknown"},
@@ -2219,7 +2433,7 @@ static const FunctionDef SVC_Table_32[] = {
2219 {0x42, nullptr, "Unknown"}, 2433 {0x42, nullptr, "Unknown"},
2220 {0x43, nullptr, "ReplyAndReceive32"}, 2434 {0x43, nullptr, "ReplyAndReceive32"},
2221 {0x44, nullptr, "Unknown"}, 2435 {0x44, nullptr, "Unknown"},
2222 {0x45, nullptr, "CreateEvent32"}, 2436 {0x45, SvcWrap32<CreateEvent32>, "CreateEvent32"},
2223 {0x46, nullptr, "Unknown"}, 2437 {0x46, nullptr, "Unknown"},
2224 {0x47, nullptr, "Unknown"}, 2438 {0x47, nullptr, "Unknown"},
2225 {0x48, nullptr, "Unknown"}, 2439 {0x48, nullptr, "Unknown"},
@@ -2245,7 +2459,7 @@ static const FunctionDef SVC_Table_32[] = {
2245 {0x5c, nullptr, "Unknown"}, 2459 {0x5c, nullptr, "Unknown"},
2246 {0x5d, nullptr, "Unknown"}, 2460 {0x5d, nullptr, "Unknown"},
2247 {0x5e, nullptr, "Unknown"}, 2461 {0x5e, nullptr, "Unknown"},
2248 {0x5F, nullptr, "FlushProcessDataCache32"}, 2462 {0x5F, SvcWrap32<FlushProcessDataCache32>, "FlushProcessDataCache32"},
2249 {0x60, nullptr, "Unknown"}, 2463 {0x60, nullptr, "Unknown"},
2250 {0x61, nullptr, "Unknown"}, 2464 {0x61, nullptr, "Unknown"},
2251 {0x62, nullptr, "Unknown"}, 2465 {0x62, nullptr, "Unknown"},
@@ -2423,13 +2637,10 @@ static const FunctionDef* GetSVCInfo64(u32 func_num) {
2423 return &SVC_Table_64[func_num]; 2637 return &SVC_Table_64[func_num];
2424} 2638}
2425 2639
2426MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
2427
2428void Call(Core::System& system, u32 immediate) { 2640void Call(Core::System& system, u32 immediate) {
2429 MICROPROFILE_SCOPE(Kernel_SVC); 2641 system.ExitDynarmicProfile();
2430 2642 auto& kernel = system.Kernel();
2431 // Lock the global kernel mutex when we enter the kernel HLE. 2643 kernel.EnterSVCProfile();
2432 std::lock_guard lock{HLE::g_hle_lock};
2433 2644
2434 const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate) 2645 const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
2435 : GetSVCInfo32(immediate); 2646 : GetSVCInfo32(immediate);
@@ -2442,6 +2653,9 @@ void Call(Core::System& system, u32 immediate) {
2442 } else { 2653 } else {
2443 LOG_CRITICAL(Kernel_SVC, "Unknown SVC function 0x{:X}", immediate); 2654 LOG_CRITICAL(Kernel_SVC, "Unknown SVC function 0x{:X}", immediate);
2444 } 2655 }
2656
2657 kernel.ExitSVCProfile();
2658 system.EnterDynarmicProfile();
2445} 2659}
2446 2660
2447} // namespace Kernel::Svc 2661} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 7d735e3fa..0b6dd9df0 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -350,13 +350,50 @@ void SvcWrap64(Core::System& system) {
350 func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2)); 350 func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2));
351} 351}
352 352
353// Used by QueryMemory32 353// Used by QueryMemory32, ArbitrateLock32
354template <ResultCode func(Core::System&, u32, u32, u32)> 354template <ResultCode func(Core::System&, u32, u32, u32)>
355void SvcWrap32(Core::System& system) { 355void SvcWrap32(Core::System& system) {
356 FuncReturn32(system, 356 FuncReturn32(system,
357 func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2)).raw); 357 func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2)).raw);
358} 358}
359 359
360// Used by Break32
361template <void func(Core::System&, u32, u32, u32)>
362void SvcWrap32(Core::System& system) {
363 func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2));
364}
365
366// Used by ExitProcess32, ExitThread32
367template <void func(Core::System&)>
368void SvcWrap32(Core::System& system) {
369 func(system);
370}
371
372// Used by GetCurrentProcessorNumber32
373template <u32 func(Core::System&)>
374void SvcWrap32(Core::System& system) {
375 FuncReturn32(system, func(system));
376}
377
378// Used by SleepThread32
379template <void func(Core::System&, u32, u32)>
380void SvcWrap32(Core::System& system) {
381 func(system, Param32(system, 0), Param32(system, 1));
382}
383
384// Used by CreateThread32
385template <ResultCode func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
386void SvcWrap32(Core::System& system) {
387 Handle param_1 = 0;
388
389 const u32 retval = func(system, &param_1, Param32(system, 0), Param32(system, 1),
390 Param32(system, 2), Param32(system, 3), Param32(system, 4))
391 .raw;
392
393 system.CurrentArmInterface().SetReg(1, param_1);
394 FuncReturn(system, retval);
395}
396
360// Used by GetInfo32 397// Used by GetInfo32
361template <ResultCode func(Core::System&, u32*, u32*, u32, u32, u32, u32)> 398template <ResultCode func(Core::System&, u32*, u32*, u32, u32, u32, u32)>
362void SvcWrap32(Core::System& system) { 399void SvcWrap32(Core::System& system) {
@@ -393,18 +430,114 @@ void SvcWrap32(Core::System& system) {
393 FuncReturn(system, retval); 430 FuncReturn(system, retval);
394} 431}
395 432
433// Used by GetSystemTick32
434template <void func(Core::System&, u32*, u32*)>
435void SvcWrap32(Core::System& system) {
436 u32 param_1 = 0;
437 u32 param_2 = 0;
438
439 func(system, &param_1, &param_2);
440 system.CurrentArmInterface().SetReg(0, param_1);
441 system.CurrentArmInterface().SetReg(1, param_2);
442}
443
444// Used by CreateEvent32
445template <ResultCode func(Core::System&, Handle*, Handle*)>
446void SvcWrap32(Core::System& system) {
447 Handle param_1 = 0;
448 Handle param_2 = 0;
449
450 const u32 retval = func(system, &param_1, &param_2).raw;
451 system.CurrentArmInterface().SetReg(1, param_1);
452 system.CurrentArmInterface().SetReg(2, param_2);
453 FuncReturn(system, retval);
454}
455
456// Used by GetThreadId32
457template <ResultCode func(Core::System&, Handle, u32*, u32*, u32*)>
458void SvcWrap32(Core::System& system) {
459 u32 param_1 = 0;
460 u32 param_2 = 0;
461 u32 param_3 = 0;
462
463 const u32 retval = func(system, Param32(system, 2), &param_1, &param_2, &param_3).raw;
464 system.CurrentArmInterface().SetReg(1, param_1);
465 system.CurrentArmInterface().SetReg(2, param_2);
466 system.CurrentArmInterface().SetReg(3, param_3);
467 FuncReturn(system, retval);
468}
469
396// Used by SignalProcessWideKey32 470// Used by SignalProcessWideKey32
397template <void func(Core::System&, u32, s32)> 471template <void func(Core::System&, u32, s32)>
398void SvcWrap32(Core::System& system) { 472void SvcWrap32(Core::System& system) {
399 func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1))); 473 func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1)));
400} 474}
401 475
402// Used by SendSyncRequest32 476// Used by SetThreadPriority32
477template <ResultCode func(Core::System&, Handle, u32)>
478void SvcWrap32(Core::System& system) {
479 const u32 retval =
480 func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw;
481 FuncReturn(system, retval);
482}
483
484// Used by SetThreadCoreMask32
485template <ResultCode func(Core::System&, Handle, u32, u32, u32)>
486void SvcWrap32(Core::System& system) {
487 const u32 retval =
488 func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
489 static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
490 .raw;
491 FuncReturn(system, retval);
492}
493
494// Used by WaitProcessWideKeyAtomic32
495template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)>
496void SvcWrap32(Core::System& system) {
497 const u32 retval =
498 func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
499 static_cast<Handle>(Param(system, 2)), static_cast<u32>(Param(system, 3)),
500 static_cast<u32>(Param(system, 4)))
501 .raw;
502 FuncReturn(system, retval);
503}
504
505// Used by WaitForAddress32
506template <ResultCode func(Core::System&, u32, u32, s32, u32, u32)>
507void SvcWrap32(Core::System& system) {
508 const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
509 static_cast<u32>(Param(system, 1)), static_cast<s32>(Param(system, 2)),
510 static_cast<u32>(Param(system, 3)), static_cast<u32>(Param(system, 4)))
511 .raw;
512 FuncReturn(system, retval);
513}
514
515// Used by SignalToAddress32
516template <ResultCode func(Core::System&, u32, u32, s32, s32)>
517void SvcWrap32(Core::System& system) {
518 const u32 retval =
519 func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
520 static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
521 .raw;
522 FuncReturn(system, retval);
523}
524
525// Used by SendSyncRequest32, ArbitrateUnlock32
403template <ResultCode func(Core::System&, u32)> 526template <ResultCode func(Core::System&, u32)>
404void SvcWrap32(Core::System& system) { 527void SvcWrap32(Core::System& system) {
405 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw); 528 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
406} 529}
407 530
531// Used by CreateTransferMemory32
532template <ResultCode func(Core::System&, Handle*, u32, u32, u32)>
533void SvcWrap32(Core::System& system) {
534 Handle handle = 0;
535 const u32 retval =
536 func(system, &handle, Param32(system, 1), Param32(system, 2), Param32(system, 3)).raw;
537 system.CurrentArmInterface().SetReg(1, handle);
538 FuncReturn(system, retval);
539}
540
408// Used by WaitSynchronization32 541// Used by WaitSynchronization32
409template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)> 542template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)>
410void SvcWrap32(Core::System& system) { 543void SvcWrap32(Core::System& system) {
diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp
index dc37fad1a..851b702a5 100644
--- a/src/core/hle/kernel/synchronization.cpp
+++ b/src/core/hle/kernel/synchronization.cpp
@@ -10,78 +10,107 @@
10#include "core/hle/kernel/synchronization.h" 10#include "core/hle/kernel/synchronization.h"
11#include "core/hle/kernel/synchronization_object.h" 11#include "core/hle/kernel/synchronization_object.h"
12#include "core/hle/kernel/thread.h" 12#include "core/hle/kernel/thread.h"
13#include "core/hle/kernel/time_manager.h"
13 14
14namespace Kernel { 15namespace Kernel {
15 16
16/// Default thread wakeup callback for WaitSynchronization
17static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
18 std::shared_ptr<SynchronizationObject> object,
19 std::size_t index) {
20 ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
21
22 if (reason == ThreadWakeupReason::Timeout) {
23 thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
24 return true;
25 }
26
27 ASSERT(reason == ThreadWakeupReason::Signal);
28 thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
29 thread->SetWaitSynchronizationOutput(static_cast<u32>(index));
30 return true;
31}
32
33Synchronization::Synchronization(Core::System& system) : system{system} {} 17Synchronization::Synchronization(Core::System& system) : system{system} {}
34 18
35void Synchronization::SignalObject(SynchronizationObject& obj) const { 19void Synchronization::SignalObject(SynchronizationObject& obj) const {
20 auto& kernel = system.Kernel();
21 SchedulerLock lock(kernel);
22 auto& time_manager = kernel.TimeManager();
36 if (obj.IsSignaled()) { 23 if (obj.IsSignaled()) {
37 obj.WakeupAllWaitingThreads(); 24 for (auto thread : obj.GetWaitingThreads()) {
25 if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
26 if (thread->GetStatus() != ThreadStatus::WaitHLEEvent) {
27 ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
28 ASSERT(thread->IsWaitingSync());
29 }
30 thread->SetSynchronizationResults(&obj, RESULT_SUCCESS);
31 thread->ResumeFromWait();
32 }
33 }
34 obj.ClearWaitingThreads();
38 } 35 }
39} 36}
40 37
41std::pair<ResultCode, Handle> Synchronization::WaitFor( 38std::pair<ResultCode, Handle> Synchronization::WaitFor(
42 std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) { 39 std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
40 auto& kernel = system.Kernel();
43 auto* const thread = system.CurrentScheduler().GetCurrentThread(); 41 auto* const thread = system.CurrentScheduler().GetCurrentThread();
44 // Find the first object that is acquirable in the provided list of objects 42 Handle event_handle = InvalidHandle;
45 const auto itr = std::find_if(sync_objects.begin(), sync_objects.end(), 43 {
46 [thread](const std::shared_ptr<SynchronizationObject>& object) { 44 SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
47 return object->IsSignaled(); 45 const auto itr =
48 }); 46 std::find_if(sync_objects.begin(), sync_objects.end(),
49 47 [thread](const std::shared_ptr<SynchronizationObject>& object) {
50 if (itr != sync_objects.end()) { 48 return object->IsSignaled();
51 // We found a ready object, acquire it and set the result value 49 });
52 SynchronizationObject* object = itr->get(); 50
53 object->Acquire(thread); 51 if (itr != sync_objects.end()) {
54 const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr)); 52 // We found a ready object, acquire it and set the result value
55 return {RESULT_SUCCESS, index}; 53 SynchronizationObject* object = itr->get();
54 object->Acquire(thread);
55 const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
56 lock.CancelSleep();
57 return {RESULT_SUCCESS, index};
58 }
59
60 if (nano_seconds == 0) {
61 lock.CancelSleep();
62 return {RESULT_TIMEOUT, InvalidHandle};
63 }
64
65 if (thread->IsPendingTermination()) {
66 lock.CancelSleep();
67 return {ERR_THREAD_TERMINATING, InvalidHandle};
68 }
69
70 if (thread->IsSyncCancelled()) {
71 thread->SetSyncCancelled(false);
72 lock.CancelSleep();
73 return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle};
74 }
75
76 for (auto& object : sync_objects) {
77 object->AddWaitingThread(SharedFrom(thread));
78 }
79
80 thread->SetSynchronizationObjects(&sync_objects);
81 thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
82 thread->SetStatus(ThreadStatus::WaitSynch);
83 thread->SetWaitingSync(true);
56 } 84 }
85 thread->SetWaitingSync(false);
57 86
58 // No objects were ready to be acquired, prepare to suspend the thread. 87 if (event_handle != InvalidHandle) {
59 88 auto& time_manager = kernel.TimeManager();
60 // If a timeout value of 0 was provided, just return the Timeout error code instead of 89 time_manager.UnscheduleTimeEvent(event_handle);
61 // suspending the thread.
62 if (nano_seconds == 0) {
63 return {RESULT_TIMEOUT, InvalidHandle};
64 } 90 }
65 91
66 if (thread->IsSyncCancelled()) { 92 {
67 thread->SetSyncCancelled(false); 93 SchedulerLock lock(kernel);
68 return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle}; 94 ResultCode signaling_result = thread->GetSignalingResult();
95 SynchronizationObject* signaling_object = thread->GetSignalingObject();
96 thread->SetSynchronizationObjects(nullptr);
97 auto shared_thread = SharedFrom(thread);
98 for (auto& obj : sync_objects) {
99 obj->RemoveWaitingThread(shared_thread);
100 }
101 if (signaling_object != nullptr) {
102 const auto itr = std::find_if(
103 sync_objects.begin(), sync_objects.end(),
104 [signaling_object](const std::shared_ptr<SynchronizationObject>& object) {
105 return object.get() == signaling_object;
106 });
107 ASSERT(itr != sync_objects.end());
108 signaling_object->Acquire(thread);
109 const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
110 return {signaling_result, index};
111 }
112 return {signaling_result, -1};
69 } 113 }
70
71 for (auto& object : sync_objects) {
72 object->AddWaitingThread(SharedFrom(thread));
73 }
74
75 thread->SetSynchronizationObjects(std::move(sync_objects));
76 thread->SetStatus(ThreadStatus::WaitSynch);
77
78 // Create an event to wake the thread up after the specified nanosecond delay has passed
79 thread->WakeAfterDelay(nano_seconds);
80 thread->SetWakeupCallback(DefaultThreadWakeupCallback);
81
82 system.PrepareReschedule(thread->GetProcessorID());
83
84 return {RESULT_TIMEOUT, InvalidHandle};
85} 114}
86 115
87} // namespace Kernel 116} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization_object.cpp b/src/core/hle/kernel/synchronization_object.cpp
index 43f3eef18..ba4d39157 100644
--- a/src/core/hle/kernel/synchronization_object.cpp
+++ b/src/core/hle/kernel/synchronization_object.cpp
@@ -38,68 +38,8 @@ void SynchronizationObject::RemoveWaitingThread(std::shared_ptr<Thread> thread)
38 waiting_threads.erase(itr); 38 waiting_threads.erase(itr);
39} 39}
40 40
41std::shared_ptr<Thread> SynchronizationObject::GetHighestPriorityReadyThread() const { 41void SynchronizationObject::ClearWaitingThreads() {
42 Thread* candidate = nullptr; 42 waiting_threads.clear();
43 u32 candidate_priority = THREADPRIO_LOWEST + 1;
44
45 for (const auto& thread : waiting_threads) {
46 const ThreadStatus thread_status = thread->GetStatus();
47
48 // The list of waiting threads must not contain threads that are not waiting to be awakened.
49 ASSERT_MSG(thread_status == ThreadStatus::WaitSynch ||
50 thread_status == ThreadStatus::WaitHLEEvent,
51 "Inconsistent thread statuses in waiting_threads");
52
53 if (thread->GetPriority() >= candidate_priority)
54 continue;
55
56 if (ShouldWait(thread.get()))
57 continue;
58
59 candidate = thread.get();
60 candidate_priority = thread->GetPriority();
61 }
62
63 return SharedFrom(candidate);
64}
65
66void SynchronizationObject::WakeupWaitingThread(std::shared_ptr<Thread> thread) {
67 ASSERT(!ShouldWait(thread.get()));
68
69 if (!thread) {
70 return;
71 }
72
73 if (thread->IsSleepingOnWait()) {
74 for (const auto& object : thread->GetSynchronizationObjects()) {
75 ASSERT(!object->ShouldWait(thread.get()));
76 object->Acquire(thread.get());
77 }
78 } else {
79 Acquire(thread.get());
80 }
81
82 const std::size_t index = thread->GetSynchronizationObjectIndex(SharedFrom(this));
83
84 thread->ClearSynchronizationObjects();
85
86 thread->CancelWakeupTimer();
87
88 bool resume = true;
89 if (thread->HasWakeupCallback()) {
90 resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Signal, thread, SharedFrom(this),
91 index);
92 }
93 if (resume) {
94 thread->ResumeFromWait();
95 kernel.PrepareReschedule(thread->GetProcessorID());
96 }
97}
98
99void SynchronizationObject::WakeupAllWaitingThreads() {
100 while (auto thread = GetHighestPriorityReadyThread()) {
101 WakeupWaitingThread(thread);
102 }
103} 43}
104 44
105const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const { 45const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const {
diff --git a/src/core/hle/kernel/synchronization_object.h b/src/core/hle/kernel/synchronization_object.h
index 741c31faf..f89b24204 100644
--- a/src/core/hle/kernel/synchronization_object.h
+++ b/src/core/hle/kernel/synchronization_object.h
@@ -12,6 +12,7 @@
12namespace Kernel { 12namespace Kernel {
13 13
14class KernelCore; 14class KernelCore;
15class Synchronization;
15class Thread; 16class Thread;
16 17
17/// Class that represents a Kernel object that a thread can be waiting on 18/// Class that represents a Kernel object that a thread can be waiting on
@@ -49,24 +50,11 @@ public:
49 */ 50 */
50 void RemoveWaitingThread(std::shared_ptr<Thread> thread); 51 void RemoveWaitingThread(std::shared_ptr<Thread> thread);
51 52
52 /**
53 * Wake up all threads waiting on this object that can be awoken, in priority order,
54 * and set the synchronization result and output of the thread.
55 */
56 void WakeupAllWaitingThreads();
57
58 /**
59 * Wakes up a single thread waiting on this object.
60 * @param thread Thread that is waiting on this object to wakeup.
61 */
62 void WakeupWaitingThread(std::shared_ptr<Thread> thread);
63
64 /// Obtains the highest priority thread that is ready to run from this object's waiting list.
65 std::shared_ptr<Thread> GetHighestPriorityReadyThread() const;
66
67 /// Get a const reference to the waiting threads list for debug use 53 /// Get a const reference to the waiting threads list for debug use
68 const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const; 54 const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const;
69 55
56 void ClearWaitingThreads();
57
70protected: 58protected:
71 bool is_signaled{}; // Tells if this sync object is signalled; 59 bool is_signaled{}; // Tells if this sync object is signalled;
72 60
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index db7f379ac..2b1092697 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -9,12 +9,21 @@
9 9
10#include "common/assert.h" 10#include "common/assert.h"
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "common/fiber.h"
12#include "common/logging/log.h" 13#include "common/logging/log.h"
13#include "common/thread_queue_list.h" 14#include "common/thread_queue_list.h"
14#include "core/arm/arm_interface.h" 15#include "core/arm/arm_interface.h"
16#ifdef ARCHITECTURE_x86_64
17#include "core/arm/dynarmic/arm_dynarmic_32.h"
18#include "core/arm/dynarmic/arm_dynarmic_64.h"
19#endif
20#include "core/arm/cpu_interrupt_handler.h"
21#include "core/arm/exclusive_monitor.h"
22#include "core/arm/unicorn/arm_unicorn.h"
15#include "core/core.h" 23#include "core/core.h"
16#include "core/core_timing.h" 24#include "core/core_timing.h"
17#include "core/core_timing_util.h" 25#include "core/core_timing_util.h"
26#include "core/cpu_manager.h"
18#include "core/hardware_properties.h" 27#include "core/hardware_properties.h"
19#include "core/hle/kernel/errors.h" 28#include "core/hle/kernel/errors.h"
20#include "core/hle/kernel/handle_table.h" 29#include "core/hle/kernel/handle_table.h"
@@ -23,6 +32,7 @@
23#include "core/hle/kernel/process.h" 32#include "core/hle/kernel/process.h"
24#include "core/hle/kernel/scheduler.h" 33#include "core/hle/kernel/scheduler.h"
25#include "core/hle/kernel/thread.h" 34#include "core/hle/kernel/thread.h"
35#include "core/hle/kernel/time_manager.h"
26#include "core/hle/result.h" 36#include "core/hle/result.h"
27#include "core/memory.h" 37#include "core/memory.h"
28 38
@@ -44,46 +54,26 @@ Thread::Thread(KernelCore& kernel) : SynchronizationObject{kernel} {}
44Thread::~Thread() = default; 54Thread::~Thread() = default;
45 55
46void Thread::Stop() { 56void Thread::Stop() {
47 // Cancel any outstanding wakeup events for this thread 57 {
48 Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(), 58 SchedulerLock lock(kernel);
49 global_handle); 59 SetStatus(ThreadStatus::Dead);
50 kernel.GlobalHandleTable().Close(global_handle); 60 Signal();
51 global_handle = 0; 61 kernel.GlobalHandleTable().Close(global_handle);
52 SetStatus(ThreadStatus::Dead);
53 Signal();
54
55 // Clean up any dangling references in objects that this thread was waiting for
56 for (auto& wait_object : wait_objects) {
57 wait_object->RemoveWaitingThread(SharedFrom(this));
58 }
59 wait_objects.clear();
60
61 owner_process->UnregisterThread(this);
62
63 // Mark the TLS slot in the thread's page as free.
64 owner_process->FreeTLSRegion(tls_address);
65}
66
67void Thread::WakeAfterDelay(s64 nanoseconds) {
68 // Don't schedule a wakeup if the thread wants to wait forever
69 if (nanoseconds == -1)
70 return;
71 62
72 // This function might be called from any thread so we have to be cautious and use the 63 if (owner_process) {
73 // thread-safe version of ScheduleEvent. 64 owner_process->UnregisterThread(this);
74 const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds});
75 Core::System::GetInstance().CoreTiming().ScheduleEvent(
76 cycles, kernel.ThreadWakeupCallbackEventType(), global_handle);
77}
78 65
79void Thread::CancelWakeupTimer() { 66 // Mark the TLS slot in the thread's page as free.
80 Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(), 67 owner_process->FreeTLSRegion(tls_address);
81 global_handle); 68 }
69 arm_interface.reset();
70 has_exited = true;
71 }
72 global_handle = 0;
82} 73}
83 74
84void Thread::ResumeFromWait() { 75void Thread::ResumeFromWait() {
85 ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects"); 76 SchedulerLock lock(kernel);
86
87 switch (status) { 77 switch (status) {
88 case ThreadStatus::Paused: 78 case ThreadStatus::Paused:
89 case ThreadStatus::WaitSynch: 79 case ThreadStatus::WaitSynch:
@@ -99,7 +89,7 @@ void Thread::ResumeFromWait() {
99 case ThreadStatus::Ready: 89 case ThreadStatus::Ready:
100 // The thread's wakeup callback must have already been cleared when the thread was first 90 // The thread's wakeup callback must have already been cleared when the thread was first
101 // awoken. 91 // awoken.
102 ASSERT(wakeup_callback == nullptr); 92 ASSERT(hle_callback == nullptr);
103 // If the thread is waiting on multiple wait objects, it might be awoken more than once 93 // If the thread is waiting on multiple wait objects, it might be awoken more than once
104 // before actually resuming. We can ignore subsequent wakeups if the thread status has 94 // before actually resuming. We can ignore subsequent wakeups if the thread status has
105 // already been set to ThreadStatus::Ready. 95 // already been set to ThreadStatus::Ready.
@@ -115,24 +105,31 @@ void Thread::ResumeFromWait() {
115 return; 105 return;
116 } 106 }
117 107
118 wakeup_callback = nullptr; 108 SetStatus(ThreadStatus::Ready);
109}
110
111void Thread::OnWakeUp() {
112 SchedulerLock lock(kernel);
119 113
120 if (activity == ThreadActivity::Paused) { 114 SetStatus(ThreadStatus::Ready);
121 SetStatus(ThreadStatus::Paused); 115}
122 return;
123 }
124 116
117ResultCode Thread::Start() {
118 SchedulerLock lock(kernel);
125 SetStatus(ThreadStatus::Ready); 119 SetStatus(ThreadStatus::Ready);
120 return RESULT_SUCCESS;
126} 121}
127 122
128void Thread::CancelWait() { 123void Thread::CancelWait() {
129 if (GetSchedulingStatus() != ThreadSchedStatus::Paused) { 124 SchedulerLock lock(kernel);
125 if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) {
130 is_sync_cancelled = true; 126 is_sync_cancelled = true;
131 return; 127 return;
132 } 128 }
129 // TODO(Blinkhawk): Implement cancel of server session
133 is_sync_cancelled = false; 130 is_sync_cancelled = false;
134 SetWaitSynchronizationResult(ERR_SYNCHRONIZATION_CANCELED); 131 SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED);
135 ResumeFromWait(); 132 SetStatus(ThreadStatus::Ready);
136} 133}
137 134
138static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, 135static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
@@ -153,12 +150,29 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
153 context.fpcr = 0; 150 context.fpcr = 0;
154} 151}
155 152
156ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::string name, 153std::shared_ptr<Common::Fiber>& Thread::GetHostContext() {
157 VAddr entry_point, u32 priority, u64 arg, 154 return host_context;
158 s32 processor_id, VAddr stack_top, 155}
159 Process& owner_process) { 156
157ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
158 std::string name, VAddr entry_point, u32 priority,
159 u64 arg, s32 processor_id, VAddr stack_top,
160 Process* owner_process) {
161 std::function<void(void*)> init_func = system.GetCpuManager().GetGuestThreadStartFunc();
162 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
163 return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
164 owner_process, std::move(init_func), init_func_parameter);
165}
166
167ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
168 std::string name, VAddr entry_point, u32 priority,
169 u64 arg, s32 processor_id, VAddr stack_top,
170 Process* owner_process,
171 std::function<void(void*)>&& thread_start_func,
172 void* thread_start_parameter) {
173 auto& kernel = system.Kernel();
160 // Check if priority is in ranged. Lowest priority -> highest priority id. 174 // Check if priority is in ranged. Lowest priority -> highest priority id.
161 if (priority > THREADPRIO_LOWEST) { 175 if (priority > THREADPRIO_LOWEST && ((type_flags & THREADTYPE_IDLE) == 0)) {
162 LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority); 176 LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority);
163 return ERR_INVALID_THREAD_PRIORITY; 177 return ERR_INVALID_THREAD_PRIORITY;
164 } 178 }
@@ -168,11 +182,12 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::strin
168 return ERR_INVALID_PROCESSOR_ID; 182 return ERR_INVALID_PROCESSOR_ID;
169 } 183 }
170 184
171 auto& system = Core::System::GetInstance(); 185 if (owner_process) {
172 if (!system.Memory().IsValidVirtualAddress(owner_process, entry_point)) { 186 if (!system.Memory().IsValidVirtualAddress(*owner_process, entry_point)) {
173 LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point); 187 LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
174 // TODO (bunnei): Find the correct error code to use here 188 // TODO (bunnei): Find the correct error code to use here
175 return RESULT_UNKNOWN; 189 return RESULT_UNKNOWN;
190 }
176 } 191 }
177 192
178 std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel); 193 std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
@@ -183,51 +198,82 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::strin
183 thread->stack_top = stack_top; 198 thread->stack_top = stack_top;
184 thread->tpidr_el0 = 0; 199 thread->tpidr_el0 = 0;
185 thread->nominal_priority = thread->current_priority = priority; 200 thread->nominal_priority = thread->current_priority = priority;
186 thread->last_running_ticks = system.CoreTiming().GetTicks(); 201 thread->last_running_ticks = 0;
187 thread->processor_id = processor_id; 202 thread->processor_id = processor_id;
188 thread->ideal_core = processor_id; 203 thread->ideal_core = processor_id;
189 thread->affinity_mask = 1ULL << processor_id; 204 thread->affinity_mask = 1ULL << processor_id;
190 thread->wait_objects.clear(); 205 thread->wait_objects = nullptr;
191 thread->mutex_wait_address = 0; 206 thread->mutex_wait_address = 0;
192 thread->condvar_wait_address = 0; 207 thread->condvar_wait_address = 0;
193 thread->wait_handle = 0; 208 thread->wait_handle = 0;
194 thread->name = std::move(name); 209 thread->name = std::move(name);
195 thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); 210 thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
196 thread->owner_process = &owner_process; 211 thread->owner_process = owner_process;
197 auto& scheduler = kernel.GlobalScheduler(); 212 thread->type = type_flags;
198 scheduler.AddThread(thread); 213 if ((type_flags & THREADTYPE_IDLE) == 0) {
199 thread->tls_address = thread->owner_process->CreateTLSRegion(); 214 auto& scheduler = kernel.GlobalScheduler();
200 215 scheduler.AddThread(thread);
201 thread->owner_process->RegisterThread(thread.get()); 216 }
217 if (owner_process) {
218 thread->tls_address = thread->owner_process->CreateTLSRegion();
219 thread->owner_process->RegisterThread(thread.get());
220 } else {
221 thread->tls_address = 0;
222 }
223 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
224 // to initialize the context
225 thread->arm_interface.reset();
226 if ((type_flags & THREADTYPE_HLE) == 0) {
227#ifdef ARCHITECTURE_x86_64
228 if (owner_process && !owner_process->Is64BitProcess()) {
229 thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
230 system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(),
231 processor_id);
232 } else {
233 thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
234 system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(),
235 processor_id);
236 }
202 237
203 ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top), 238#else
204 static_cast<u32>(entry_point), static_cast<u32>(arg)); 239 if (owner_process && !owner_process->Is64BitProcess()) {
205 ResetThreadContext64(thread->context_64, stack_top, entry_point, arg); 240 thread->arm_interface = std::make_shared<Core::ARM_Unicorn>(
241 system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch32,
242 processor_id);
243 } else {
244 thread->arm_interface = std::make_shared<Core::ARM_Unicorn>(
245 system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch64,
246 processor_id);
247 }
248 LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
249#endif
250 ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top),
251 static_cast<u32>(entry_point), static_cast<u32>(arg));
252 ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
253 }
254 thread->host_context =
255 std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
206 256
207 return MakeResult<std::shared_ptr<Thread>>(std::move(thread)); 257 return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
208} 258}
209 259
210void Thread::SetPriority(u32 priority) { 260void Thread::SetPriority(u32 priority) {
261 SchedulerLock lock(kernel);
211 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, 262 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
212 "Invalid priority value."); 263 "Invalid priority value.");
213 nominal_priority = priority; 264 nominal_priority = priority;
214 UpdatePriority(); 265 UpdatePriority();
215} 266}
216 267
217void Thread::SetWaitSynchronizationResult(ResultCode result) { 268void Thread::SetSynchronizationResults(SynchronizationObject* object, ResultCode result) {
218 context_32.cpu_registers[0] = result.raw; 269 signaling_object = object;
219 context_64.cpu_registers[0] = result.raw; 270 signaling_result = result;
220}
221
222void Thread::SetWaitSynchronizationOutput(s32 output) {
223 context_32.cpu_registers[1] = output;
224 context_64.cpu_registers[1] = output;
225} 271}
226 272
227s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const { 273s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const {
228 ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything"); 274 ASSERT_MSG(!wait_objects->empty(), "Thread is not waiting for anything");
229 const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object); 275 const auto match = std::find(wait_objects->rbegin(), wait_objects->rend(), object);
230 return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1); 276 return static_cast<s32>(std::distance(match, wait_objects->rend()) - 1);
231} 277}
232 278
233VAddr Thread::GetCommandBufferAddress() const { 279VAddr Thread::GetCommandBufferAddress() const {
@@ -236,6 +282,14 @@ VAddr Thread::GetCommandBufferAddress() const {
236 return GetTLSAddress() + command_header_offset; 282 return GetTLSAddress() + command_header_offset;
237} 283}
238 284
285Core::ARM_Interface& Thread::ArmInterface() {
286 return *arm_interface;
287}
288
289const Core::ARM_Interface& Thread::ArmInterface() const {
290 return *arm_interface;
291}
292
239void Thread::SetStatus(ThreadStatus new_status) { 293void Thread::SetStatus(ThreadStatus new_status) {
240 if (new_status == status) { 294 if (new_status == status) {
241 return; 295 return;
@@ -257,10 +311,6 @@ void Thread::SetStatus(ThreadStatus new_status) {
257 break; 311 break;
258 } 312 }
259 313
260 if (status == ThreadStatus::Running) {
261 last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks();
262 }
263
264 status = new_status; 314 status = new_status;
265} 315}
266 316
@@ -341,75 +391,116 @@ void Thread::UpdatePriority() {
341 lock_owner->UpdatePriority(); 391 lock_owner->UpdatePriority();
342} 392}
343 393
344void Thread::ChangeCore(u32 core, u64 mask) {
345 SetCoreAndAffinityMask(core, mask);
346}
347
348bool Thread::AllSynchronizationObjectsReady() const { 394bool Thread::AllSynchronizationObjectsReady() const {
349 return std::none_of(wait_objects.begin(), wait_objects.end(), 395 return std::none_of(wait_objects->begin(), wait_objects->end(),
350 [this](const std::shared_ptr<SynchronizationObject>& object) { 396 [this](const std::shared_ptr<SynchronizationObject>& object) {
351 return object->ShouldWait(this); 397 return object->ShouldWait(this);
352 }); 398 });
353} 399}
354 400
355bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, 401bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) {
356 std::shared_ptr<SynchronizationObject> object, 402 ASSERT(hle_callback);
357 std::size_t index) { 403 return hle_callback(std::move(thread));
358 ASSERT(wakeup_callback);
359 return wakeup_callback(reason, std::move(thread), std::move(object), index);
360} 404}
361 405
362void Thread::SetActivity(ThreadActivity value) { 406ResultCode Thread::SetActivity(ThreadActivity value) {
363 activity = value; 407 SchedulerLock lock(kernel);
408
409 auto sched_status = GetSchedulingStatus();
410
411 if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) {
412 return ERR_INVALID_STATE;
413 }
414
415 if (IsPendingTermination()) {
416 return RESULT_SUCCESS;
417 }
364 418
365 if (value == ThreadActivity::Paused) { 419 if (value == ThreadActivity::Paused) {
366 // Set status if not waiting 420 if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) != 0) {
367 if (status == ThreadStatus::Ready || status == ThreadStatus::Running) { 421 return ERR_INVALID_STATE;
368 SetStatus(ThreadStatus::Paused); 422 }
369 kernel.PrepareReschedule(processor_id); 423 AddSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
424 } else {
425 if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) == 0) {
426 return ERR_INVALID_STATE;
370 } 427 }
371 } else if (status == ThreadStatus::Paused) { 428 RemoveSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
372 // Ready to reschedule
373 ResumeFromWait();
374 } 429 }
430 return RESULT_SUCCESS;
375} 431}
376 432
377void Thread::Sleep(s64 nanoseconds) { 433ResultCode Thread::Sleep(s64 nanoseconds) {
378 // Sleep current thread and check for next thread to schedule 434 Handle event_handle{};
379 SetStatus(ThreadStatus::WaitSleep); 435 {
436 SchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
437 SetStatus(ThreadStatus::WaitSleep);
438 }
380 439
381 // Create an event to wake the thread up after the specified nanosecond delay has passed 440 if (event_handle != InvalidHandle) {
382 WakeAfterDelay(nanoseconds); 441 auto& time_manager = kernel.TimeManager();
442 time_manager.UnscheduleTimeEvent(event_handle);
443 }
444 return RESULT_SUCCESS;
445}
446
447std::pair<ResultCode, bool> Thread::YieldSimple() {
448 bool is_redundant = false;
449 {
450 SchedulerLock lock(kernel);
451 is_redundant = kernel.GlobalScheduler().YieldThread(this);
452 }
453 return {RESULT_SUCCESS, is_redundant};
454}
455
456std::pair<ResultCode, bool> Thread::YieldAndBalanceLoad() {
457 bool is_redundant = false;
458 {
459 SchedulerLock lock(kernel);
460 is_redundant = kernel.GlobalScheduler().YieldThreadAndBalanceLoad(this);
461 }
462 return {RESULT_SUCCESS, is_redundant};
383} 463}
384 464
385bool Thread::YieldSimple() { 465std::pair<ResultCode, bool> Thread::YieldAndWaitForLoadBalancing() {
386 auto& scheduler = kernel.GlobalScheduler(); 466 bool is_redundant = false;
387 return scheduler.YieldThread(this); 467 {
468 SchedulerLock lock(kernel);
469 is_redundant = kernel.GlobalScheduler().YieldThreadAndWaitForLoadBalancing(this);
470 }
471 return {RESULT_SUCCESS, is_redundant};
388} 472}
389 473
390bool Thread::YieldAndBalanceLoad() { 474void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
391 auto& scheduler = kernel.GlobalScheduler(); 475 const u32 old_state = scheduling_state;
392 return scheduler.YieldThreadAndBalanceLoad(this); 476 pausing_state |= static_cast<u32>(flag);
477 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
478 scheduling_state = base_scheduling | pausing_state;
479 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
393} 480}
394 481
395bool Thread::YieldAndWaitForLoadBalancing() { 482void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
396 auto& scheduler = kernel.GlobalScheduler(); 483 const u32 old_state = scheduling_state;
397 return scheduler.YieldThreadAndWaitForLoadBalancing(this); 484 pausing_state &= ~static_cast<u32>(flag);
485 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
486 scheduling_state = base_scheduling | pausing_state;
487 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
398} 488}
399 489
400void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { 490void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
401 const u32 old_flags = scheduling_state; 491 const u32 old_state = scheduling_state;
402 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) | 492 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
403 static_cast<u32>(new_status); 493 static_cast<u32>(new_status);
404 AdjustSchedulingOnStatus(old_flags); 494 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
405} 495}
406 496
407void Thread::SetCurrentPriority(u32 new_priority) { 497void Thread::SetCurrentPriority(u32 new_priority) {
408 const u32 old_priority = std::exchange(current_priority, new_priority); 498 const u32 old_priority = std::exchange(current_priority, new_priority);
409 AdjustSchedulingOnPriority(old_priority); 499 kernel.GlobalScheduler().AdjustSchedulingOnPriority(this, old_priority);
410} 500}
411 501
412ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { 502ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
503 SchedulerLock lock(kernel);
413 const auto HighestSetCore = [](u64 mask, u32 max_cores) { 504 const auto HighestSetCore = [](u64 mask, u32 max_cores) {
414 for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) { 505 for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
415 if (((mask >> core) & 1) != 0) { 506 if (((mask >> core) & 1) != 0) {
@@ -443,111 +534,12 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
443 processor_id = ideal_core; 534 processor_id = ideal_core;
444 } 535 }
445 } 536 }
446 AdjustSchedulingOnAffinity(old_affinity_mask, old_core); 537 kernel.GlobalScheduler().AdjustSchedulingOnAffinity(this, old_affinity_mask, old_core);
447 } 538 }
448 } 539 }
449 return RESULT_SUCCESS; 540 return RESULT_SUCCESS;
450} 541}
451 542
452void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
453 if (old_flags == scheduling_state) {
454 return;
455 }
456
457 auto& scheduler = kernel.GlobalScheduler();
458 if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) ==
459 ThreadSchedStatus::Runnable) {
460 // In this case the thread was running, now it's pausing/exitting
461 if (processor_id >= 0) {
462 scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this);
463 }
464
465 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
466 if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
467 scheduler.Unsuggest(current_priority, core, this);
468 }
469 }
470 } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) {
471 // The thread is now set to running from being stopped
472 if (processor_id >= 0) {
473 scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this);
474 }
475
476 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
477 if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
478 scheduler.Suggest(current_priority, core, this);
479 }
480 }
481 }
482
483 scheduler.SetReselectionPending();
484}
485
486void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
487 if (GetSchedulingStatus() != ThreadSchedStatus::Runnable) {
488 return;
489 }
490 auto& scheduler = kernel.GlobalScheduler();
491 if (processor_id >= 0) {
492 scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this);
493 }
494
495 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
496 if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
497 scheduler.Unsuggest(old_priority, core, this);
498 }
499 }
500
501 // Add thread to the new priority queues.
502 Thread* current_thread = GetCurrentThread();
503
504 if (processor_id >= 0) {
505 if (current_thread == this) {
506 scheduler.SchedulePrepend(current_priority, static_cast<u32>(processor_id), this);
507 } else {
508 scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this);
509 }
510 }
511
512 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
513 if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
514 scheduler.Suggest(current_priority, core, this);
515 }
516 }
517
518 scheduler.SetReselectionPending();
519}
520
521void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) {
522 auto& scheduler = kernel.GlobalScheduler();
523 if (GetSchedulingStatus() != ThreadSchedStatus::Runnable ||
524 current_priority >= THREADPRIO_COUNT) {
525 return;
526 }
527
528 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
529 if (((old_affinity_mask >> core) & 1) != 0) {
530 if (core == static_cast<u32>(old_core)) {
531 scheduler.Unschedule(current_priority, core, this);
532 } else {
533 scheduler.Unsuggest(current_priority, core, this);
534 }
535 }
536 }
537
538 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
539 if (((affinity_mask >> core) & 1) != 0) {
540 if (core == static_cast<u32>(processor_id)) {
541 scheduler.Schedule(current_priority, core, this);
542 } else {
543 scheduler.Suggest(current_priority, core, this);
544 }
545 }
546 }
547
548 scheduler.SetReselectionPending();
549}
550
551//////////////////////////////////////////////////////////////////////////////////////////////////// 543////////////////////////////////////////////////////////////////////////////////////////////////////
552 544
553/** 545/**
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 23fdef8a4..c0342c462 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -6,26 +6,47 @@
6 6
7#include <functional> 7#include <functional>
8#include <string> 8#include <string>
9#include <utility>
9#include <vector> 10#include <vector>
10 11
11#include "common/common_types.h" 12#include "common/common_types.h"
13#include "common/spin_lock.h"
12#include "core/arm/arm_interface.h" 14#include "core/arm/arm_interface.h"
13#include "core/hle/kernel/object.h" 15#include "core/hle/kernel/object.h"
14#include "core/hle/kernel/synchronization_object.h" 16#include "core/hle/kernel/synchronization_object.h"
15#include "core/hle/result.h" 17#include "core/hle/result.h"
16 18
19namespace Common {
20class Fiber;
21}
22
23namespace Core {
24class ARM_Interface;
25class System;
26} // namespace Core
27
17namespace Kernel { 28namespace Kernel {
18 29
30class GlobalScheduler;
19class KernelCore; 31class KernelCore;
20class Process; 32class Process;
21class Scheduler; 33class Scheduler;
22 34
23enum ThreadPriority : u32 { 35enum ThreadPriority : u32 {
24 THREADPRIO_HIGHEST = 0, ///< Highest thread priority 36 THREADPRIO_HIGHEST = 0, ///< Highest thread priority
25 THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps 37 THREADPRIO_MAX_CORE_MIGRATION = 2, ///< Highest priority for a core migration
26 THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps 38 THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
27 THREADPRIO_LOWEST = 63, ///< Lowest thread priority 39 THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps
28 THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities. 40 THREADPRIO_LOWEST = 63, ///< Lowest thread priority
41 THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities.
42};
43
44enum ThreadType : u32 {
45 THREADTYPE_USER = 0x1,
46 THREADTYPE_KERNEL = 0x2,
47 THREADTYPE_HLE = 0x4,
48 THREADTYPE_IDLE = 0x8,
49 THREADTYPE_SUSPEND = 0x10,
29}; 50};
30 51
31enum ThreadProcessorId : s32 { 52enum ThreadProcessorId : s32 {
@@ -107,26 +128,45 @@ public:
107 128
108 using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>; 129 using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>;
109 130
110 using WakeupCallback = 131 using HLECallback = std::function<bool(std::shared_ptr<Thread> thread)>;
111 std::function<bool(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, 132
112 std::shared_ptr<SynchronizationObject> object, std::size_t index)>; 133 /**
134 * Creates and returns a new thread. The new thread is immediately scheduled
135 * @param system The instance of the whole system
136 * @param name The friendly name desired for the thread
137 * @param entry_point The address at which the thread should start execution
138 * @param priority The thread's priority
139 * @param arg User data to pass to the thread
140 * @param processor_id The ID(s) of the processors on which the thread is desired to be run
141 * @param stack_top The address of the thread's stack top
142 * @param owner_process The parent process for the thread, if null, it's a kernel thread
143 * @return A shared pointer to the newly created thread
144 */
145 static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
146 std::string name, VAddr entry_point,
147 u32 priority, u64 arg, s32 processor_id,
148 VAddr stack_top, Process* owner_process);
113 149
114 /** 150 /**
115 * Creates and returns a new thread. The new thread is immediately scheduled 151 * Creates and returns a new thread. The new thread is immediately scheduled
116 * @param kernel The kernel instance this thread will be created under. 152 * @param system The instance of the whole system
117 * @param name The friendly name desired for the thread 153 * @param name The friendly name desired for the thread
118 * @param entry_point The address at which the thread should start execution 154 * @param entry_point The address at which the thread should start execution
119 * @param priority The thread's priority 155 * @param priority The thread's priority
120 * @param arg User data to pass to the thread 156 * @param arg User data to pass to the thread
121 * @param processor_id The ID(s) of the processors on which the thread is desired to be run 157 * @param processor_id The ID(s) of the processors on which the thread is desired to be run
122 * @param stack_top The address of the thread's stack top 158 * @param stack_top The address of the thread's stack top
123 * @param owner_process The parent process for the thread 159 * @param owner_process The parent process for the thread, if null, it's a kernel thread
160 * @param thread_start_func The function where the host context will start.
161 * @param thread_start_parameter The parameter which will passed to host context on init
124 * @return A shared pointer to the newly created thread 162 * @return A shared pointer to the newly created thread
125 */ 163 */
126 static ResultVal<std::shared_ptr<Thread>> Create(KernelCore& kernel, std::string name, 164 static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
127 VAddr entry_point, u32 priority, u64 arg, 165 std::string name, VAddr entry_point,
128 s32 processor_id, VAddr stack_top, 166 u32 priority, u64 arg, s32 processor_id,
129 Process& owner_process); 167 VAddr stack_top, Process* owner_process,
168 std::function<void(void*)>&& thread_start_func,
169 void* thread_start_parameter);
130 170
131 std::string GetName() const override { 171 std::string GetName() const override {
132 return name; 172 return name;
@@ -181,7 +221,7 @@ public:
181 void UpdatePriority(); 221 void UpdatePriority();
182 222
183 /// Changes the core that the thread is running or scheduled to run on. 223 /// Changes the core that the thread is running or scheduled to run on.
184 void ChangeCore(u32 core, u64 mask); 224 ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
185 225
186 /** 226 /**
187 * Gets the thread's thread ID 227 * Gets the thread's thread ID
@@ -194,6 +234,10 @@ public:
194 /// Resumes a thread from waiting 234 /// Resumes a thread from waiting
195 void ResumeFromWait(); 235 void ResumeFromWait();
196 236
237 void OnWakeUp();
238
239 ResultCode Start();
240
197 /// Cancels a waiting operation that this thread may or may not be within. 241 /// Cancels a waiting operation that this thread may or may not be within.
198 /// 242 ///
199 /// When the thread is within a waiting state, this will set the thread's 243 /// When the thread is within a waiting state, this will set the thread's
@@ -202,26 +246,19 @@ public:
202 /// 246 ///
203 void CancelWait(); 247 void CancelWait();
204 248
205 /** 249 void SetSynchronizationResults(SynchronizationObject* object, ResultCode result);
206 * Schedules an event to wake up the specified thread after the specified delay
207 * @param nanoseconds The time this thread will be allowed to sleep for
208 */
209 void WakeAfterDelay(s64 nanoseconds);
210 250
211 /// Cancel any outstanding wakeup events for this thread 251 Core::ARM_Interface& ArmInterface();
212 void CancelWakeupTimer();
213 252
214 /** 253 const Core::ARM_Interface& ArmInterface() const;
215 * Sets the result after the thread awakens (from svcWaitSynchronization)
216 * @param result Value to set to the returned result
217 */
218 void SetWaitSynchronizationResult(ResultCode result);
219 254
220 /** 255 SynchronizationObject* GetSignalingObject() const {
221 * Sets the output parameter value after the thread awakens (from svcWaitSynchronization) 256 return signaling_object;
222 * @param output Value to set to the output parameter 257 }
223 */ 258
224 void SetWaitSynchronizationOutput(s32 output); 259 ResultCode GetSignalingResult() const {
260 return signaling_result;
261 }
225 262
226 /** 263 /**
227 * Retrieves the index that this particular object occupies in the list of objects 264 * Retrieves the index that this particular object occupies in the list of objects
@@ -269,11 +306,6 @@ public:
269 */ 306 */
270 VAddr GetCommandBufferAddress() const; 307 VAddr GetCommandBufferAddress() const;
271 308
272 /// Returns whether this thread is waiting on objects from a WaitSynchronization call.
273 bool IsSleepingOnWait() const {
274 return status == ThreadStatus::WaitSynch;
275 }
276
277 ThreadContext32& GetContext32() { 309 ThreadContext32& GetContext32() {
278 return context_32; 310 return context_32;
279 } 311 }
@@ -290,6 +322,28 @@ public:
290 return context_64; 322 return context_64;
291 } 323 }
292 324
325 bool IsHLEThread() const {
326 return (type & THREADTYPE_HLE) != 0;
327 }
328
329 bool IsSuspendThread() const {
330 return (type & THREADTYPE_SUSPEND) != 0;
331 }
332
333 bool IsIdleThread() const {
334 return (type & THREADTYPE_IDLE) != 0;
335 }
336
337 bool WasRunning() const {
338 return was_running;
339 }
340
341 void SetWasRunning(bool value) {
342 was_running = value;
343 }
344
345 std::shared_ptr<Common::Fiber>& GetHostContext();
346
293 ThreadStatus GetStatus() const { 347 ThreadStatus GetStatus() const {
294 return status; 348 return status;
295 } 349 }
@@ -325,18 +379,18 @@ public:
325 } 379 }
326 380
327 const ThreadSynchronizationObjects& GetSynchronizationObjects() const { 381 const ThreadSynchronizationObjects& GetSynchronizationObjects() const {
328 return wait_objects; 382 return *wait_objects;
329 } 383 }
330 384
331 void SetSynchronizationObjects(ThreadSynchronizationObjects objects) { 385 void SetSynchronizationObjects(ThreadSynchronizationObjects* objects) {
332 wait_objects = std::move(objects); 386 wait_objects = objects;
333 } 387 }
334 388
335 void ClearSynchronizationObjects() { 389 void ClearSynchronizationObjects() {
336 for (const auto& waiting_object : wait_objects) { 390 for (const auto& waiting_object : *wait_objects) {
337 waiting_object->RemoveWaitingThread(SharedFrom(this)); 391 waiting_object->RemoveWaitingThread(SharedFrom(this));
338 } 392 }
339 wait_objects.clear(); 393 wait_objects->clear();
340 } 394 }
341 395
342 /// Determines whether all the objects this thread is waiting on are ready. 396 /// Determines whether all the objects this thread is waiting on are ready.
@@ -386,26 +440,35 @@ public:
386 arb_wait_address = address; 440 arb_wait_address = address;
387 } 441 }
388 442
389 bool HasWakeupCallback() const { 443 bool HasHLECallback() const {
390 return wakeup_callback != nullptr; 444 return hle_callback != nullptr;
391 } 445 }
392 446
393 void SetWakeupCallback(WakeupCallback callback) { 447 void SetHLECallback(HLECallback callback) {
394 wakeup_callback = std::move(callback); 448 hle_callback = std::move(callback);
395 } 449 }
396 450
397 void InvalidateWakeupCallback() { 451 void SetHLETimeEvent(Handle time_event) {
398 SetWakeupCallback(nullptr); 452 hle_time_event = time_event;
399 } 453 }
400 454
401 /** 455 void SetHLESyncObject(SynchronizationObject* object) {
402 * Invokes the thread's wakeup callback. 456 hle_object = object;
403 * 457 }
404 * @pre A valid wakeup callback has been set. Violating this precondition 458
405 * will cause an assertion to trigger. 459 Handle GetHLETimeEvent() const {
406 */ 460 return hle_time_event;
407 bool InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, 461 }
408 std::shared_ptr<SynchronizationObject> object, std::size_t index); 462
463 SynchronizationObject* GetHLESyncObject() const {
464 return hle_object;
465 }
466
467 void InvalidateHLECallback() {
468 SetHLECallback(nullptr);
469 }
470
471 bool InvokeHLECallback(std::shared_ptr<Thread> thread);
409 472
410 u32 GetIdealCore() const { 473 u32 GetIdealCore() const {
411 return ideal_core; 474 return ideal_core;
@@ -415,23 +478,19 @@ public:
415 return affinity_mask; 478 return affinity_mask;
416 } 479 }
417 480
418 ThreadActivity GetActivity() const { 481 ResultCode SetActivity(ThreadActivity value);
419 return activity;
420 }
421
422 void SetActivity(ThreadActivity value);
423 482
424 /// Sleeps this thread for the given amount of nanoseconds. 483 /// Sleeps this thread for the given amount of nanoseconds.
425 void Sleep(s64 nanoseconds); 484 ResultCode Sleep(s64 nanoseconds);
426 485
427 /// Yields this thread without rebalancing loads. 486 /// Yields this thread without rebalancing loads.
428 bool YieldSimple(); 487 std::pair<ResultCode, bool> YieldSimple();
429 488
430 /// Yields this thread and does a load rebalancing. 489 /// Yields this thread and does a load rebalancing.
431 bool YieldAndBalanceLoad(); 490 std::pair<ResultCode, bool> YieldAndBalanceLoad();
432 491
433 /// Yields this thread and if the core is left idle, loads are rebalanced 492 /// Yields this thread and if the core is left idle, loads are rebalanced
434 bool YieldAndWaitForLoadBalancing(); 493 std::pair<ResultCode, bool> YieldAndWaitForLoadBalancing();
435 494
436 void IncrementYieldCount() { 495 void IncrementYieldCount() {
437 yield_count++; 496 yield_count++;
@@ -446,6 +505,10 @@ public:
446 static_cast<u32>(ThreadSchedMasks::LowMask)); 505 static_cast<u32>(ThreadSchedMasks::LowMask));
447 } 506 }
448 507
508 bool IsRunnable() const {
509 return scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable);
510 }
511
449 bool IsRunning() const { 512 bool IsRunning() const {
450 return is_running; 513 return is_running;
451 } 514 }
@@ -466,17 +529,67 @@ public:
466 return global_handle; 529 return global_handle;
467 } 530 }
468 531
532 bool IsWaitingForArbitration() const {
533 return waiting_for_arbitration;
534 }
535
536 void WaitForArbitration(bool set) {
537 waiting_for_arbitration = set;
538 }
539
540 bool IsWaitingSync() const {
541 return is_waiting_on_sync;
542 }
543
544 void SetWaitingSync(bool is_waiting) {
545 is_waiting_on_sync = is_waiting;
546 }
547
548 bool IsPendingTermination() const {
549 return will_be_terminated || GetSchedulingStatus() == ThreadSchedStatus::Exited;
550 }
551
552 bool IsPaused() const {
553 return pausing_state != 0;
554 }
555
556 bool IsContinuousOnSVC() const {
557 return is_continuous_on_svc;
558 }
559
560 void SetContinuousOnSVC(bool is_continuous) {
561 is_continuous_on_svc = is_continuous;
562 }
563
564 bool IsPhantomMode() const {
565 return is_phantom_mode;
566 }
567
568 void SetPhantomMode(bool phantom) {
569 is_phantom_mode = phantom;
570 }
571
572 bool HasExited() const {
573 return has_exited;
574 }
575
469private: 576private:
577 friend class GlobalScheduler;
578 friend class Scheduler;
579
470 void SetSchedulingStatus(ThreadSchedStatus new_status); 580 void SetSchedulingStatus(ThreadSchedStatus new_status);
581 void AddSchedulingFlag(ThreadSchedFlags flag);
582 void RemoveSchedulingFlag(ThreadSchedFlags flag);
583
471 void SetCurrentPriority(u32 new_priority); 584 void SetCurrentPriority(u32 new_priority);
472 ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
473 585
474 void AdjustSchedulingOnStatus(u32 old_flags);
475 void AdjustSchedulingOnPriority(u32 old_priority);
476 void AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core); 586 void AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core);
477 587
588 Common::SpinLock context_guard{};
478 ThreadContext32 context_32{}; 589 ThreadContext32 context_32{};
479 ThreadContext64 context_64{}; 590 ThreadContext64 context_64{};
591 std::unique_ptr<Core::ARM_Interface> arm_interface{};
592 std::shared_ptr<Common::Fiber> host_context{};
480 593
481 u64 thread_id = 0; 594 u64 thread_id = 0;
482 595
@@ -485,6 +598,8 @@ private:
485 VAddr entry_point = 0; 598 VAddr entry_point = 0;
486 VAddr stack_top = 0; 599 VAddr stack_top = 0;
487 600
601 ThreadType type;
602
488 /// Nominal thread priority, as set by the emulated application. 603 /// Nominal thread priority, as set by the emulated application.
489 /// The nominal priority is the thread priority without priority 604 /// The nominal priority is the thread priority without priority
490 /// inheritance taken into account. 605 /// inheritance taken into account.
@@ -509,7 +624,10 @@ private:
509 624
510 /// Objects that the thread is waiting on, in the same order as they were 625 /// Objects that the thread is waiting on, in the same order as they were
511 /// passed to WaitSynchronization. 626 /// passed to WaitSynchronization.
512 ThreadSynchronizationObjects wait_objects; 627 ThreadSynchronizationObjects* wait_objects;
628
629 SynchronizationObject* signaling_object;
630 ResultCode signaling_result{RESULT_SUCCESS};
513 631
514 /// List of threads that are waiting for a mutex that is held by this thread. 632 /// List of threads that are waiting for a mutex that is held by this thread.
515 MutexWaitingThreads wait_mutex_threads; 633 MutexWaitingThreads wait_mutex_threads;
@@ -526,30 +644,39 @@ private:
526 644
527 /// If waiting for an AddressArbiter, this is the address being waited on. 645 /// If waiting for an AddressArbiter, this is the address being waited on.
528 VAddr arb_wait_address{0}; 646 VAddr arb_wait_address{0};
647 bool waiting_for_arbitration{};
529 648
530 /// Handle used as userdata to reference this object when inserting into the CoreTiming queue. 649 /// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
531 Handle global_handle = 0; 650 Handle global_handle = 0;
532 651
533 /// Callback that will be invoked when the thread is resumed from a waiting state. If the thread 652 /// Callback for HLE Events
534 /// was waiting via WaitSynchronization then the object will be the last object that became 653 HLECallback hle_callback;
535 /// available. In case of a timeout, the object will be nullptr. 654 Handle hle_time_event;
536 WakeupCallback wakeup_callback; 655 SynchronizationObject* hle_object;
537 656
538 Scheduler* scheduler = nullptr; 657 Scheduler* scheduler = nullptr;
539 658
540 u32 ideal_core{0xFFFFFFFF}; 659 u32 ideal_core{0xFFFFFFFF};
541 u64 affinity_mask{0x1}; 660 u64 affinity_mask{0x1};
542 661
543 ThreadActivity activity = ThreadActivity::Normal;
544
545 s32 ideal_core_override = -1; 662 s32 ideal_core_override = -1;
546 u64 affinity_mask_override = 0x1; 663 u64 affinity_mask_override = 0x1;
547 u32 affinity_override_count = 0; 664 u32 affinity_override_count = 0;
548 665
549 u32 scheduling_state = 0; 666 u32 scheduling_state = 0;
667 u32 pausing_state = 0;
550 bool is_running = false; 668 bool is_running = false;
669 bool is_waiting_on_sync = false;
551 bool is_sync_cancelled = false; 670 bool is_sync_cancelled = false;
552 671
672 bool is_continuous_on_svc = false;
673
674 bool will_be_terminated = false;
675 bool is_phantom_mode = false;
676 bool has_exited = false;
677
678 bool was_running = false;
679
553 std::string name; 680 std::string name;
554}; 681};
555 682
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index 21b290468..941305e8e 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -8,30 +8,37 @@
8#include "core/core_timing_util.h" 8#include "core/core_timing_util.h"
9#include "core/hle/kernel/handle_table.h" 9#include "core/hle/kernel/handle_table.h"
10#include "core/hle/kernel/kernel.h" 10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/scheduler.h"
11#include "core/hle/kernel/thread.h" 12#include "core/hle/kernel/thread.h"
12#include "core/hle/kernel/time_manager.h" 13#include "core/hle/kernel/time_manager.h"
13 14
14namespace Kernel { 15namespace Kernel {
15 16
16TimeManager::TimeManager(Core::System& system) : system{system} { 17TimeManager::TimeManager(Core::System& system_) : system{system_} {
17 time_manager_event_type = Core::Timing::CreateEvent( 18 time_manager_event_type = Core::Timing::CreateEvent(
18 "Kernel::TimeManagerCallback", [this](u64 thread_handle, [[maybe_unused]] s64 cycles_late) { 19 "Kernel::TimeManagerCallback", [this](u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
20 SchedulerLock lock(system.Kernel());
19 Handle proper_handle = static_cast<Handle>(thread_handle); 21 Handle proper_handle = static_cast<Handle>(thread_handle);
22 if (cancelled_events[proper_handle]) {
23 return;
24 }
20 std::shared_ptr<Thread> thread = 25 std::shared_ptr<Thread> thread =
21 this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); 26 this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
22 thread->ResumeFromWait(); 27 thread->OnWakeUp();
23 }); 28 });
24} 29}
25 30
26void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) { 31void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) {
32 event_handle = timetask->GetGlobalHandle();
27 if (nanoseconds > 0) { 33 if (nanoseconds > 0) {
28 ASSERT(timetask); 34 ASSERT(timetask);
29 event_handle = timetask->GetGlobalHandle(); 35 ASSERT(timetask->GetStatus() != ThreadStatus::Ready);
30 const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds}); 36 ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex);
31 system.CoreTiming().ScheduleEvent(cycles, time_manager_event_type, event_handle); 37 system.CoreTiming().ScheduleEvent(nanoseconds, time_manager_event_type, event_handle);
32 } else { 38 } else {
33 event_handle = InvalidHandle; 39 event_handle = InvalidHandle;
34 } 40 }
41 cancelled_events[event_handle] = false;
35} 42}
36 43
37void TimeManager::UnscheduleTimeEvent(Handle event_handle) { 44void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
@@ -39,6 +46,12 @@ void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
39 return; 46 return;
40 } 47 }
41 system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle); 48 system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle);
49 cancelled_events[event_handle] = true;
50}
51
52void TimeManager::CancelTimeEvent(Thread* time_task) {
53 Handle event_handle = time_task->GetGlobalHandle();
54 UnscheduleTimeEvent(event_handle);
42} 55}
43 56
44} // namespace Kernel 57} // namespace Kernel
diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h
index eaec486d1..307a18765 100644
--- a/src/core/hle/kernel/time_manager.h
+++ b/src/core/hle/kernel/time_manager.h
@@ -5,6 +5,7 @@
5#pragma once 5#pragma once
6 6
7#include <memory> 7#include <memory>
8#include <unordered_map>
8 9
9#include "core/hle/kernel/object.h" 10#include "core/hle/kernel/object.h"
10 11
@@ -35,9 +36,12 @@ public:
35 /// Unschedule an existing time event 36 /// Unschedule an existing time event
36 void UnscheduleTimeEvent(Handle event_handle); 37 void UnscheduleTimeEvent(Handle event_handle);
37 38
39 void CancelTimeEvent(Thread* time_task);
40
38private: 41private:
39 Core::System& system; 42 Core::System& system;
40 std::shared_ptr<Core::Timing::EventType> time_manager_event_type; 43 std::shared_ptr<Core::Timing::EventType> time_manager_event_type;
44 std::unordered_map<Handle, bool> cancelled_events;
41}; 45};
42 46
43} // namespace Kernel 47} // namespace Kernel
diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp
index 630a8b048..8ac856ec3 100644
--- a/src/core/hle/service/acc/acc.cpp
+++ b/src/core/hle/service/acc/acc.cpp
@@ -44,6 +44,218 @@ static constexpr u32 SanitizeJPEGSize(std::size_t size) {
44 return static_cast<u32>(std::min(size, max_jpeg_image_size)); 44 return static_cast<u32>(std::min(size, max_jpeg_image_size));
45} 45}
46 46
47class IManagerForSystemService final : public ServiceFramework<IManagerForSystemService> {
48public:
49 explicit IManagerForSystemService(Common::UUID user_id)
50 : ServiceFramework("IManagerForSystemService") {
51 // clang-format off
52 static const FunctionInfo functions[] = {
53 {0, nullptr, "CheckAvailability"},
54 {1, nullptr, "GetAccountId"},
55 {2, nullptr, "EnsureIdTokenCacheAsync"},
56 {3, nullptr, "LoadIdTokenCache"},
57 {100, nullptr, "SetSystemProgramIdentification"},
58 {101, nullptr, "RefreshNotificationTokenAsync"}, // 7.0.0+
59 {110, nullptr, "GetServiceEntryRequirementCache"}, // 4.0.0+
60 {111, nullptr, "InvalidateServiceEntryRequirementCache"}, // 4.0.0+
61 {112, nullptr, "InvalidateTokenCache"}, // 4.0.0 - 6.2.0
62 {113, nullptr, "GetServiceEntryRequirementCacheForOnlinePlay"}, // 6.1.0+
63 {120, nullptr, "GetNintendoAccountId"},
64 {121, nullptr, "CalculateNintendoAccountAuthenticationFingerprint"}, // 9.0.0+
65 {130, nullptr, "GetNintendoAccountUserResourceCache"},
66 {131, nullptr, "RefreshNintendoAccountUserResourceCacheAsync"},
67 {132, nullptr, "RefreshNintendoAccountUserResourceCacheAsyncIfSecondsElapsed"},
68 {133, nullptr, "GetNintendoAccountVerificationUrlCache"}, // 9.0.0+
69 {134, nullptr, "RefreshNintendoAccountVerificationUrlCache"}, // 9.0.0+
70 {135, nullptr, "RefreshNintendoAccountVerificationUrlCacheAsyncIfSecondsElapsed"}, // 9.0.0+
71 {140, nullptr, "GetNetworkServiceLicenseCache"}, // 5.0.0+
72 {141, nullptr, "RefreshNetworkServiceLicenseCacheAsync"}, // 5.0.0+
73 {142, nullptr, "RefreshNetworkServiceLicenseCacheAsyncIfSecondsElapsed"}, // 5.0.0+
74 {150, nullptr, "CreateAuthorizationRequest"},
75 };
76 // clang-format on
77
78 RegisterHandlers(functions);
79 }
80};
81
82// 3.0.0+
83class IFloatingRegistrationRequest final : public ServiceFramework<IFloatingRegistrationRequest> {
84public:
85 explicit IFloatingRegistrationRequest(Common::UUID user_id)
86 : ServiceFramework("IFloatingRegistrationRequest") {
87 // clang-format off
88 static const FunctionInfo functions[] = {
89 {0, nullptr, "GetSessionId"},
90 {12, nullptr, "GetAccountId"},
91 {13, nullptr, "GetLinkedNintendoAccountId"},
92 {14, nullptr, "GetNickname"},
93 {15, nullptr, "GetProfileImage"},
94 {21, nullptr, "LoadIdTokenCache"},
95 {100, nullptr, "RegisterUser"}, // [1.0.0-3.0.2] RegisterAsync
96 {101, nullptr, "RegisterUserWithUid"}, // [1.0.0-3.0.2] RegisterWithUidAsync
97 {102, nullptr, "RegisterNetworkServiceAccountAsync"}, // 4.0.0+
98 {103, nullptr, "RegisterNetworkServiceAccountWithUidAsync"}, // 4.0.0+
99 {110, nullptr, "SetSystemProgramIdentification"},
100 {111, nullptr, "EnsureIdTokenCacheAsync"},
101 };
102 // clang-format on
103
104 RegisterHandlers(functions);
105 }
106};
107
108class IAdministrator final : public ServiceFramework<IAdministrator> {
109public:
110 explicit IAdministrator(Common::UUID user_id) : ServiceFramework("IAdministrator") {
111 // clang-format off
112 static const FunctionInfo functions[] = {
113 {0, nullptr, "CheckAvailability"},
114 {1, nullptr, "GetAccountId"},
115 {2, nullptr, "EnsureIdTokenCacheAsync"},
116 {3, nullptr, "LoadIdTokenCache"},
117 {100, nullptr, "SetSystemProgramIdentification"},
118 {101, nullptr, "RefreshNotificationTokenAsync"}, // 7.0.0+
119 {110, nullptr, "GetServiceEntryRequirementCache"}, // 4.0.0+
120 {111, nullptr, "InvalidateServiceEntryRequirementCache"}, // 4.0.0+
121 {112, nullptr, "InvalidateTokenCache"}, // 4.0.0 - 6.2.0
122 {113, nullptr, "GetServiceEntryRequirementCacheForOnlinePlay"}, // 6.1.0+
123 {120, nullptr, "GetNintendoAccountId"},
124 {121, nullptr, "CalculateNintendoAccountAuthenticationFingerprint"}, // 9.0.0+
125 {130, nullptr, "GetNintendoAccountUserResourceCache"},
126 {131, nullptr, "RefreshNintendoAccountUserResourceCacheAsync"},
127 {132, nullptr, "RefreshNintendoAccountUserResourceCacheAsyncIfSecondsElapsed"},
128 {133, nullptr, "GetNintendoAccountVerificationUrlCache"}, // 9.0.0+
129 {134, nullptr, "RefreshNintendoAccountVerificationUrlCacheAsync"}, // 9.0.0+
130 {135, nullptr, "RefreshNintendoAccountVerificationUrlCacheAsyncIfSecondsElapsed"}, // 9.0.0+
131 {140, nullptr, "GetNetworkServiceLicenseCache"}, // 5.0.0+
132 {141, nullptr, "RefreshNetworkServiceLicenseCacheAsync"}, // 5.0.0+
133 {142, nullptr, "RefreshNetworkServiceLicenseCacheAsyncIfSecondsElapsed"}, // 5.0.0+
134 {150, nullptr, "CreateAuthorizationRequest"},
135 {200, nullptr, "IsRegistered"},
136 {201, nullptr, "RegisterAsync"},
137 {202, nullptr, "UnregisterAsync"},
138 {203, nullptr, "DeleteRegistrationInfoLocally"},
139 {220, nullptr, "SynchronizeProfileAsync"},
140 {221, nullptr, "UploadProfileAsync"},
141 {222, nullptr, "SynchronizaProfileAsyncIfSecondsElapsed"},
142 {250, nullptr, "IsLinkedWithNintendoAccount"},
143 {251, nullptr, "CreateProcedureToLinkWithNintendoAccount"},
144 {252, nullptr, "ResumeProcedureToLinkWithNintendoAccount"},
145 {255, nullptr, "CreateProcedureToUpdateLinkageStateOfNintendoAccount"},
146 {256, nullptr, "ResumeProcedureToUpdateLinkageStateOfNintendoAccount"},
147 {260, nullptr, "CreateProcedureToLinkNnidWithNintendoAccount"}, // 3.0.0+
148 {261, nullptr, "ResumeProcedureToLinkNnidWithNintendoAccount"}, // 3.0.0+
149 {280, nullptr, "ProxyProcedureToAcquireApplicationAuthorizationForNintendoAccount"},
150 {290, nullptr, "GetRequestForNintendoAccountUserResourceView"}, // 8.0.0+
151 {300, nullptr, "TryRecoverNintendoAccountUserStateAsync"}, // 6.0.0+
152 {400, nullptr, "IsServiceEntryRequirementCacheRefreshRequiredForOnlinePlay"}, // 6.1.0+
153 {401, nullptr, "RefreshServiceEntryRequirementCacheForOnlinePlayAsync"}, // 6.1.0+
154 {900, nullptr, "GetAuthenticationInfoForWin"}, // 9.0.0+
155 {901, nullptr, "ImportAsyncForWin"}, // 9.0.0+
156 {997, nullptr, "DebugUnlinkNintendoAccountAsync"},
157 {998, nullptr, "DebugSetAvailabilityErrorDetail"},
158 };
159 // clang-format on
160
161 RegisterHandlers(functions);
162 }
163};
164
165class IAuthorizationRequest final : public ServiceFramework<IAuthorizationRequest> {
166public:
167 explicit IAuthorizationRequest(Common::UUID user_id)
168 : ServiceFramework("IAuthorizationRequest") {
169 // clang-format off
170 static const FunctionInfo functions[] = {
171 {0, nullptr, "GetSessionId"},
172 {10, nullptr, "InvokeWithoutInteractionAsync"},
173 {19, nullptr, "IsAuthorized"},
174 {20, nullptr, "GetAuthorizationCode"},
175 {21, nullptr, "GetIdToken"},
176 {22, nullptr, "GetState"},
177 };
178 // clang-format on
179
180 RegisterHandlers(functions);
181 }
182};
183
184class IOAuthProcedure final : public ServiceFramework<IOAuthProcedure> {
185public:
186 explicit IOAuthProcedure(Common::UUID user_id) : ServiceFramework("IOAuthProcedure") {
187 // clang-format off
188 static const FunctionInfo functions[] = {
189 {0, nullptr, "PrepareAsync"},
190 {1, nullptr, "GetRequest"},
191 {2, nullptr, "ApplyResponse"},
192 {3, nullptr, "ApplyResponseAsync"},
193 {10, nullptr, "Suspend"},
194 };
195 // clang-format on
196
197 RegisterHandlers(functions);
198 }
199};
200
201// 3.0.0+
202class IOAuthProcedureForExternalNsa final : public ServiceFramework<IOAuthProcedureForExternalNsa> {
203public:
204 explicit IOAuthProcedureForExternalNsa(Common::UUID user_id)
205 : ServiceFramework("IOAuthProcedureForExternalNsa") {
206 // clang-format off
207 static const FunctionInfo functions[] = {
208 {0, nullptr, "PrepareAsync"},
209 {1, nullptr, "GetRequest"},
210 {2, nullptr, "ApplyResponse"},
211 {3, nullptr, "ApplyResponseAsync"},
212 {10, nullptr, "Suspend"},
213 {100, nullptr, "GetAccountId"},
214 {101, nullptr, "GetLinkedNintendoAccountId"},
215 {102, nullptr, "GetNickname"},
216 {103, nullptr, "GetProfileImage"},
217 };
218 // clang-format on
219
220 RegisterHandlers(functions);
221 }
222};
223
224class IOAuthProcedureForNintendoAccountLinkage final
225 : public ServiceFramework<IOAuthProcedureForNintendoAccountLinkage> {
226public:
227 explicit IOAuthProcedureForNintendoAccountLinkage(Common::UUID user_id)
228 : ServiceFramework("IOAuthProcedureForNintendoAccountLinkage") {
229 // clang-format off
230 static const FunctionInfo functions[] = {
231 {0, nullptr, "PrepareAsync"},
232 {1, nullptr, "GetRequest"},
233 {2, nullptr, "ApplyResponse"},
234 {3, nullptr, "ApplyResponseAsync"},
235 {10, nullptr, "Suspend"},
236 {100, nullptr, "GetRequestWithTheme"},
237 {101, nullptr, "IsNetworkServiceAccountReplaced"},
238 {199, nullptr, "GetUrlForIntroductionOfExtraMembership"}, // 2.0.0 - 5.1.0
239 };
240 // clang-format on
241
242 RegisterHandlers(functions);
243 }
244};
245
246class INotifier final : public ServiceFramework<INotifier> {
247public:
248 explicit INotifier(Common::UUID user_id) : ServiceFramework("INotifier") {
249 // clang-format off
250 static const FunctionInfo functions[] = {
251 {0, nullptr, "GetSystemEvent"},
252 };
253 // clang-format on
254
255 RegisterHandlers(functions);
256 }
257};
258
47class IProfileCommon : public ServiceFramework<IProfileCommon> { 259class IProfileCommon : public ServiceFramework<IProfileCommon> {
48public: 260public:
49 explicit IProfileCommon(const char* name, bool editor_commands, Common::UUID user_id, 261 explicit IProfileCommon(const char* name, bool editor_commands, Common::UUID user_id,
@@ -226,6 +438,54 @@ public:
226 : IProfileCommon("IProfileEditor", true, user_id, profile_manager) {} 438 : IProfileCommon("IProfileEditor", true, user_id, profile_manager) {}
227}; 439};
228 440
441class IAsyncContext final : public ServiceFramework<IAsyncContext> {
442public:
443 explicit IAsyncContext(Common::UUID user_id) : ServiceFramework("IAsyncContext") {
444 // clang-format off
445 static const FunctionInfo functions[] = {
446 {0, nullptr, "GetSystemEvent"},
447 {1, nullptr, "Cancel"},
448 {2, nullptr, "HasDone"},
449 {3, nullptr, "GetResult"},
450 };
451 // clang-format on
452
453 RegisterHandlers(functions);
454 }
455};
456
457class ISessionObject final : public ServiceFramework<ISessionObject> {
458public:
459 explicit ISessionObject(Common::UUID user_id) : ServiceFramework("ISessionObject") {
460 // clang-format off
461 static const FunctionInfo functions[] = {
462 {999, nullptr, "Dummy"},
463 };
464 // clang-format on
465
466 RegisterHandlers(functions);
467 }
468};
469
470class IGuestLoginRequest final : public ServiceFramework<IGuestLoginRequest> {
471public:
472 explicit IGuestLoginRequest(Common::UUID) : ServiceFramework("IGuestLoginRequest") {
473 // clang-format off
474 static const FunctionInfo functions[] = {
475 {0, nullptr, "GetSessionId"},
476 {11, nullptr, "Unknown"}, // 1.0.0 - 2.3.0 (the name is blank on Switchbrew)
477 {12, nullptr, "GetAccountId"},
478 {13, nullptr, "GetLinkedNintendoAccountId"},
479 {14, nullptr, "GetNickname"},
480 {15, nullptr, "GetProfileImage"},
481 {21, nullptr, "LoadIdTokenCache"}, // 3.0.0+
482 };
483 // clang-format on
484
485 RegisterHandlers(functions);
486 }
487};
488
229class IManagerForApplication final : public ServiceFramework<IManagerForApplication> { 489class IManagerForApplication final : public ServiceFramework<IManagerForApplication> {
230public: 490public:
231 explicit IManagerForApplication(Common::UUID user_id) 491 explicit IManagerForApplication(Common::UUID user_id)
@@ -265,6 +525,87 @@ private:
265 Common::UUID user_id; 525 Common::UUID user_id;
266}; 526};
267 527
528// 6.0.0+
529class IAsyncNetworkServiceLicenseKindContext final
530 : public ServiceFramework<IAsyncNetworkServiceLicenseKindContext> {
531public:
532 explicit IAsyncNetworkServiceLicenseKindContext(Common::UUID user_id)
533 : ServiceFramework("IAsyncNetworkServiceLicenseKindContext") {
534 // clang-format off
535 static const FunctionInfo functions[] = {
536 {0, nullptr, "GetSystemEvent"},
537 {1, nullptr, "Cancel"},
538 {2, nullptr, "HasDone"},
539 {3, nullptr, "GetResult"},
540 {4, nullptr, "GetNetworkServiceLicenseKind"},
541 };
542 // clang-format on
543
544 RegisterHandlers(functions);
545 }
546};
547
548// 8.0.0+
549class IOAuthProcedureForUserRegistration final
550 : public ServiceFramework<IOAuthProcedureForUserRegistration> {
551public:
552 explicit IOAuthProcedureForUserRegistration(Common::UUID user_id)
553 : ServiceFramework("IOAuthProcedureForUserRegistration") {
554 // clang-format off
555 static const FunctionInfo functions[] = {
556 {0, nullptr, "PrepareAsync"},
557 {1, nullptr, "GetRequest"},
558 {2, nullptr, "ApplyResponse"},
559 {3, nullptr, "ApplyResponseAsync"},
560 {10, nullptr, "Suspend"},
561 {100, nullptr, "GetAccountId"},
562 {101, nullptr, "GetLinkedNintendoAccountId"},
563 {102, nullptr, "GetNickname"},
564 {103, nullptr, "GetProfileImage"},
565 {110, nullptr, "RegisterUserAsync"},
566 {111, nullptr, "GetUid"},
567 };
568 // clang-format on
569
570 RegisterHandlers(functions);
571 }
572};
573
574class DAUTH_O final : public ServiceFramework<DAUTH_O> {
575public:
576 explicit DAUTH_O(Common::UUID) : ServiceFramework("dauth:o") {
577 // clang-format off
578 static const FunctionInfo functions[] = {
579 {0, nullptr, "EnsureAuthenticationTokenCacheAsync"}, // [5.0.0-5.1.0] GeneratePostData
580 {1, nullptr, "LoadAuthenticationTokenCache"}, // 6.0.0+
581 {2, nullptr, "InvalidateAuthenticationTokenCache"}, // 6.0.0+
582 {10, nullptr, "EnsureEdgeTokenCacheAsync"}, // 6.0.0+
583 {11, nullptr, "LoadEdgeTokenCache"}, // 6.0.0+
584 {12, nullptr, "InvalidateEdgeTokenCache"}, // 6.0.0+
585 };
586 // clang-format on
587
588 RegisterHandlers(functions);
589 }
590};
591
592// 6.0.0+
593class IAsyncResult final : public ServiceFramework<IAsyncResult> {
594public:
595 explicit IAsyncResult(Common::UUID user_id) : ServiceFramework("IAsyncResult") {
596 // clang-format off
597 static const FunctionInfo functions[] = {
598 {0, nullptr, "GetResult"},
599 {1, nullptr, "Cancel"},
600 {2, nullptr, "IsAvailable"},
601 {3, nullptr, "GetSystemEvent"},
602 };
603 // clang-format on
604
605 RegisterHandlers(functions);
606 }
607};
608
268void Module::Interface::GetUserCount(Kernel::HLERequestContext& ctx) { 609void Module::Interface::GetUserCount(Kernel::HLERequestContext& ctx) {
269 LOG_DEBUG(Service_ACC, "called"); 610 LOG_DEBUG(Service_ACC, "called");
270 IPC::ResponseBuilder rb{ctx, 3}; 611 IPC::ResponseBuilder rb{ctx, 3};
@@ -435,6 +776,15 @@ void Module::Interface::ListQualifiedUsers(Kernel::HLERequestContext& ctx) {
435 rb.Push(RESULT_SUCCESS); 776 rb.Push(RESULT_SUCCESS);
436} 777}
437 778
779void Module::Interface::ListOpenContextStoredUsers(Kernel::HLERequestContext& ctx) {
780 LOG_WARNING(Service_ACC, "(STUBBED) called");
781
782 // TODO(ogniK): Handle open contexts
783 ctx.WriteBuffer(profile_manager->GetOpenUsers());
784 IPC::ResponseBuilder rb{ctx, 2};
785 rb.Push(RESULT_SUCCESS);
786}
787
438void Module::Interface::TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx) { 788void Module::Interface::TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx) {
439 LOG_DEBUG(Service_ACC, "called"); 789 LOG_DEBUG(Service_ACC, "called");
440 // A u8 is passed into this function which we can safely ignore. It's to determine if we have 790 // A u8 is passed into this function which we can safely ignore. It's to determine if we have
diff --git a/src/core/hle/service/acc/acc.h b/src/core/hle/service/acc/acc.h
index 74ca39d6e..d4c6395c6 100644
--- a/src/core/hle/service/acc/acc.h
+++ b/src/core/hle/service/acc/acc.h
@@ -34,6 +34,7 @@ public:
34 void IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx); 34 void IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx);
35 void GetProfileEditor(Kernel::HLERequestContext& ctx); 35 void GetProfileEditor(Kernel::HLERequestContext& ctx);
36 void ListQualifiedUsers(Kernel::HLERequestContext& ctx); 36 void ListQualifiedUsers(Kernel::HLERequestContext& ctx);
37 void ListOpenContextStoredUsers(Kernel::HLERequestContext& ctx);
37 38
38 private: 39 private:
39 ResultCode InitializeApplicationInfoBase(); 40 ResultCode InitializeApplicationInfoBase();
diff --git a/src/core/hle/service/acc/acc_aa.cpp b/src/core/hle/service/acc/acc_aa.cpp
index 3bac6bcd1..51f119b12 100644
--- a/src/core/hle/service/acc/acc_aa.cpp
+++ b/src/core/hle/service/acc/acc_aa.cpp
@@ -13,8 +13,8 @@ ACC_AA::ACC_AA(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
13 {0, nullptr, "EnsureCacheAsync"}, 13 {0, nullptr, "EnsureCacheAsync"},
14 {1, nullptr, "LoadCache"}, 14 {1, nullptr, "LoadCache"},
15 {2, nullptr, "GetDeviceAccountId"}, 15 {2, nullptr, "GetDeviceAccountId"},
16 {50, nullptr, "RegisterNotificationTokenAsync"}, 16 {50, nullptr, "RegisterNotificationTokenAsync"}, // 1.0.0 - 6.2.0
17 {51, nullptr, "UnregisterNotificationTokenAsync"}, 17 {51, nullptr, "UnregisterNotificationTokenAsync"}, // 1.0.0 - 6.2.0
18 }; 18 };
19 RegisterHandlers(functions); 19 RegisterHandlers(functions);
20} 20}
diff --git a/src/core/hle/service/acc/acc_su.cpp b/src/core/hle/service/acc/acc_su.cpp
index 2eefc6df5..d2bb8c2c8 100644
--- a/src/core/hle/service/acc/acc_su.cpp
+++ b/src/core/hle/service/acc/acc_su.cpp
@@ -17,28 +17,28 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
17 {3, &ACC_SU::ListOpenUsers, "ListOpenUsers"}, 17 {3, &ACC_SU::ListOpenUsers, "ListOpenUsers"},
18 {4, &ACC_SU::GetLastOpenedUser, "GetLastOpenedUser"}, 18 {4, &ACC_SU::GetLastOpenedUser, "GetLastOpenedUser"},
19 {5, &ACC_SU::GetProfile, "GetProfile"}, 19 {5, &ACC_SU::GetProfile, "GetProfile"},
20 {6, nullptr, "GetProfileDigest"}, 20 {6, nullptr, "GetProfileDigest"}, // 3.0.0+
21 {50, &ACC_SU::IsUserRegistrationRequestPermitted, "IsUserRegistrationRequestPermitted"}, 21 {50, &ACC_SU::IsUserRegistrationRequestPermitted, "IsUserRegistrationRequestPermitted"},
22 {51, &ACC_SU::TrySelectUserWithoutInteraction, "TrySelectUserWithoutInteraction"}, 22 {51, &ACC_SU::TrySelectUserWithoutInteraction, "TrySelectUserWithoutInteraction"},
23 {60, nullptr, "ListOpenContextStoredUsers"}, 23 {60, &ACC_SU::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"}, // 5.0.0 - 5.1.0
24 {99, nullptr, "DebugActivateOpenContextRetention"}, 24 {99, nullptr, "DebugActivateOpenContextRetention"}, // 6.0.0+
25 {100, nullptr, "GetUserRegistrationNotifier"}, 25 {100, nullptr, "GetUserRegistrationNotifier"},
26 {101, nullptr, "GetUserStateChangeNotifier"}, 26 {101, nullptr, "GetUserStateChangeNotifier"},
27 {102, nullptr, "GetBaasAccountManagerForSystemService"}, 27 {102, nullptr, "GetBaasAccountManagerForSystemService"},
28 {103, nullptr, "GetBaasUserAvailabilityChangeNotifier"}, 28 {103, nullptr, "GetBaasUserAvailabilityChangeNotifier"},
29 {104, nullptr, "GetProfileUpdateNotifier"}, 29 {104, nullptr, "GetProfileUpdateNotifier"},
30 {105, nullptr, "CheckNetworkServiceAvailabilityAsync"}, 30 {105, nullptr, "CheckNetworkServiceAvailabilityAsync"}, // 4.0.0+
31 {106, nullptr, "GetProfileSyncNotifier"}, 31 {106, nullptr, "GetProfileSyncNotifier"}, // 9.0.0+
32 {110, nullptr, "StoreSaveDataThumbnail"}, 32 {110, nullptr, "StoreSaveDataThumbnail"},
33 {111, nullptr, "ClearSaveDataThumbnail"}, 33 {111, nullptr, "ClearSaveDataThumbnail"},
34 {112, nullptr, "LoadSaveDataThumbnail"}, 34 {112, nullptr, "LoadSaveDataThumbnail"},
35 {113, nullptr, "GetSaveDataThumbnailExistence"}, 35 {113, nullptr, "GetSaveDataThumbnailExistence"}, // 5.0.0+
36 {120, nullptr, "ListOpenUsersInApplication"}, 36 {120, nullptr, "ListOpenUsersInApplication"}, // 10.0.0+
37 {130, nullptr, "ActivateOpenContextRetention"}, 37 {130, nullptr, "ActivateOpenContextRetention"}, // 6.0.0+
38 {140, &ACC_SU::ListQualifiedUsers, "ListQualifiedUsers"}, 38 {140, &ACC_SU::ListQualifiedUsers, "ListQualifiedUsers"}, // 6.0.0+
39 {150, nullptr, "AuthenticateApplicationAsync"}, 39 {150, nullptr, "AuthenticateApplicationAsync"}, // 10.0.0+
40 {190, nullptr, "GetUserLastOpenedApplication"}, 40 {190, nullptr, "GetUserLastOpenedApplication"}, // 1.0.0 - 9.2.0
41 {191, nullptr, "ActivateOpenContextHolder"}, 41 {191, nullptr, "ActivateOpenContextHolder"}, // 7.0.0+
42 {200, nullptr, "BeginUserRegistration"}, 42 {200, nullptr, "BeginUserRegistration"},
43 {201, nullptr, "CompleteUserRegistration"}, 43 {201, nullptr, "CompleteUserRegistration"},
44 {202, nullptr, "CancelUserRegistration"}, 44 {202, nullptr, "CancelUserRegistration"},
@@ -46,15 +46,15 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
46 {204, nullptr, "SetUserPosition"}, 46 {204, nullptr, "SetUserPosition"},
47 {205, &ACC_SU::GetProfileEditor, "GetProfileEditor"}, 47 {205, &ACC_SU::GetProfileEditor, "GetProfileEditor"},
48 {206, nullptr, "CompleteUserRegistrationForcibly"}, 48 {206, nullptr, "CompleteUserRegistrationForcibly"},
49 {210, nullptr, "CreateFloatingRegistrationRequest"}, 49 {210, nullptr, "CreateFloatingRegistrationRequest"}, // 3.0.0+
50 {211, nullptr, "CreateProcedureToRegisterUserWithNintendoAccount"}, 50 {211, nullptr, "CreateProcedureToRegisterUserWithNintendoAccount"}, // 8.0.0+
51 {212, nullptr, "ResumeProcedureToRegisterUserWithNintendoAccount"}, 51 {212, nullptr, "ResumeProcedureToRegisterUserWithNintendoAccount"}, // 8.0.0+
52 {230, nullptr, "AuthenticateServiceAsync"}, 52 {230, nullptr, "AuthenticateServiceAsync"},
53 {250, nullptr, "GetBaasAccountAdministrator"}, 53 {250, nullptr, "GetBaasAccountAdministrator"},
54 {290, nullptr, "ProxyProcedureForGuestLoginWithNintendoAccount"}, 54 {290, nullptr, "ProxyProcedureForGuestLoginWithNintendoAccount"},
55 {291, nullptr, "ProxyProcedureForFloatingRegistrationWithNintendoAccount"}, 55 {291, nullptr, "ProxyProcedureForFloatingRegistrationWithNintendoAccount"}, // 3.0.0+
56 {299, nullptr, "SuspendBackgroundDaemon"}, 56 {299, nullptr, "SuspendBackgroundDaemon"},
57 {997, nullptr, "DebugInvalidateTokenCacheForUser"}, 57 {997, nullptr, "DebugInvalidateTokenCacheForUser"}, // 3.0.0+
58 {998, nullptr, "DebugSetUserStateClose"}, 58 {998, nullptr, "DebugSetUserStateClose"},
59 {999, nullptr, "DebugSetUserStateOpen"}, 59 {999, nullptr, "DebugSetUserStateOpen"},
60 }; 60 };
diff --git a/src/core/hle/service/acc/acc_u0.cpp b/src/core/hle/service/acc/acc_u0.cpp
index fb4e7e772..cb44e06b7 100644
--- a/src/core/hle/service/acc/acc_u0.cpp
+++ b/src/core/hle/service/acc/acc_u0.cpp
@@ -17,23 +17,23 @@ ACC_U0::ACC_U0(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
17 {3, &ACC_U0::ListOpenUsers, "ListOpenUsers"}, 17 {3, &ACC_U0::ListOpenUsers, "ListOpenUsers"},
18 {4, &ACC_U0::GetLastOpenedUser, "GetLastOpenedUser"}, 18 {4, &ACC_U0::GetLastOpenedUser, "GetLastOpenedUser"},
19 {5, &ACC_U0::GetProfile, "GetProfile"}, 19 {5, &ACC_U0::GetProfile, "GetProfile"},
20 {6, nullptr, "GetProfileDigest"}, 20 {6, nullptr, "GetProfileDigest"}, // 3.0.0+
21 {50, &ACC_U0::IsUserRegistrationRequestPermitted, "IsUserRegistrationRequestPermitted"}, 21 {50, &ACC_U0::IsUserRegistrationRequestPermitted, "IsUserRegistrationRequestPermitted"},
22 {51, &ACC_U0::TrySelectUserWithoutInteraction, "TrySelectUserWithoutInteraction"}, 22 {51, &ACC_U0::TrySelectUserWithoutInteraction, "TrySelectUserWithoutInteraction"},
23 {60, nullptr, "ListOpenContextStoredUsers"}, 23 {60, &ACC_U0::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"}, // 5.0.0 - 5.1.0
24 {99, nullptr, "DebugActivateOpenContextRetention"}, 24 {99, nullptr, "DebugActivateOpenContextRetention"}, // 6.0.0+
25 {100, &ACC_U0::InitializeApplicationInfo, "InitializeApplicationInfo"}, 25 {100, &ACC_U0::InitializeApplicationInfo, "InitializeApplicationInfo"},
26 {101, &ACC_U0::GetBaasAccountManagerForApplication, "GetBaasAccountManagerForApplication"}, 26 {101, &ACC_U0::GetBaasAccountManagerForApplication, "GetBaasAccountManagerForApplication"},
27 {102, nullptr, "AuthenticateApplicationAsync"}, 27 {102, nullptr, "AuthenticateApplicationAsync"},
28 {103, nullptr, "CheckNetworkServiceAvailabilityAsync"}, 28 {103, nullptr, "CheckNetworkServiceAvailabilityAsync"}, // 4.0.0+
29 {110, nullptr, "StoreSaveDataThumbnail"}, 29 {110, nullptr, "StoreSaveDataThumbnail"},
30 {111, nullptr, "ClearSaveDataThumbnail"}, 30 {111, nullptr, "ClearSaveDataThumbnail"},
31 {120, nullptr, "CreateGuestLoginRequest"}, 31 {120, nullptr, "CreateGuestLoginRequest"},
32 {130, nullptr, "LoadOpenContext"}, 32 {130, nullptr, "LoadOpenContext"}, // 5.0.0+
33 {131, nullptr, "ListOpenContextStoredUsers"}, 33 {131, &ACC_U0::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"}, // 6.0.0+
34 {140, &ACC_U0::InitializeApplicationInfoRestricted, "InitializeApplicationInfoRestricted"}, 34 {140, &ACC_U0::InitializeApplicationInfoRestricted, "InitializeApplicationInfoRestricted"}, // 6.0.0+
35 {141, &ACC_U0::ListQualifiedUsers, "ListQualifiedUsers"}, 35 {141, &ACC_U0::ListQualifiedUsers, "ListQualifiedUsers"}, // 6.0.0+
36 {150, &ACC_U0::IsUserAccountSwitchLocked, "IsUserAccountSwitchLocked"}, 36 {150, &ACC_U0::IsUserAccountSwitchLocked, "IsUserAccountSwitchLocked"}, // 6.0.0+
37 }; 37 };
38 // clang-format on 38 // clang-format on
39 39
diff --git a/src/core/hle/service/acc/acc_u1.cpp b/src/core/hle/service/acc/acc_u1.cpp
index 9f29cdc82..a4aa5316a 100644
--- a/src/core/hle/service/acc/acc_u1.cpp
+++ b/src/core/hle/service/acc/acc_u1.cpp
@@ -17,28 +17,29 @@ ACC_U1::ACC_U1(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
17 {3, &ACC_U1::ListOpenUsers, "ListOpenUsers"}, 17 {3, &ACC_U1::ListOpenUsers, "ListOpenUsers"},
18 {4, &ACC_U1::GetLastOpenedUser, "GetLastOpenedUser"}, 18 {4, &ACC_U1::GetLastOpenedUser, "GetLastOpenedUser"},
19 {5, &ACC_U1::GetProfile, "GetProfile"}, 19 {5, &ACC_U1::GetProfile, "GetProfile"},
20 {6, nullptr, "GetProfileDigest"}, 20 {6, nullptr, "GetProfileDigest"}, // 3.0.0+
21 {50, &ACC_U1::IsUserRegistrationRequestPermitted, "IsUserRegistrationRequestPermitted"}, 21 {50, &ACC_U1::IsUserRegistrationRequestPermitted, "IsUserRegistrationRequestPermitted"},
22 {51, &ACC_U1::TrySelectUserWithoutInteraction, "TrySelectUserWithoutInteraction"}, 22 {51, &ACC_U1::TrySelectUserWithoutInteraction, "TrySelectUserWithoutInteraction"},
23 {60, nullptr, "ListOpenContextStoredUsers"}, 23 {60, &ACC_U1::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"}, // 5.0.0 - 5.1.0
24 {99, nullptr, "DebugActivateOpenContextRetention"}, 24 {99, nullptr, "DebugActivateOpenContextRetention"}, // 6.0.0+
25 {100, nullptr, "GetUserRegistrationNotifier"}, 25 {100, nullptr, "GetUserRegistrationNotifier"},
26 {101, nullptr, "GetUserStateChangeNotifier"}, 26 {101, nullptr, "GetUserStateChangeNotifier"},
27 {102, nullptr, "GetBaasAccountManagerForSystemService"}, 27 {102, nullptr, "GetBaasAccountManagerForSystemService"},
28 {103, nullptr, "GetProfileUpdateNotifier"}, 28 {103, nullptr, "GetBaasUserAvailabilityChangeNotifier"},
29 {104, nullptr, "CheckNetworkServiceAvailabilityAsync"}, 29 {104, nullptr, "GetProfileUpdateNotifier"},
30 {105, nullptr, "GetBaasUserAvailabilityChangeNotifier"}, 30 {105, nullptr, "CheckNetworkServiceAvailabilityAsync"}, // 4.0.0+
31 {106, nullptr, "GetProfileSyncNotifier"}, 31 {106, nullptr, "GetProfileSyncNotifier"}, // 9.0.0+
32 {110, nullptr, "StoreSaveDataThumbnail"}, 32 {110, nullptr, "StoreSaveDataThumbnail"},
33 {111, nullptr, "ClearSaveDataThumbnail"}, 33 {111, nullptr, "ClearSaveDataThumbnail"},
34 {112, nullptr, "LoadSaveDataThumbnail"}, 34 {112, nullptr, "LoadSaveDataThumbnail"},
35 {113, nullptr, "GetSaveDataThumbnailExistence"}, 35 {113, nullptr, "GetSaveDataThumbnailExistence"}, // 5.0.0+
36 {130, nullptr, "ActivateOpenContextRetention"}, 36 {120, nullptr, "ListOpenUsersInApplication"}, // 10.0.0+
37 {140, &ACC_U1::ListQualifiedUsers, "ListQualifiedUsers"}, 37 {130, nullptr, "ActivateOpenContextRetention"}, // 6.0.0+
38 {150, nullptr, "AuthenticateApplicationAsync"}, 38 {140, &ACC_U1::ListQualifiedUsers, "ListQualifiedUsers"}, // 6.0.0+
39 {190, nullptr, "GetUserLastOpenedApplication"}, 39 {150, nullptr, "AuthenticateApplicationAsync"}, // 10.0.0+
40 {191, nullptr, "ActivateOpenContextHolder"}, 40 {190, nullptr, "GetUserLastOpenedApplication"}, // 1.0.0 - 9.2.0
41 {997, nullptr, "DebugInvalidateTokenCacheForUser"}, 41 {191, nullptr, "ActivateOpenContextHolder"}, // 7.0.0+
42 {997, nullptr, "DebugInvalidateTokenCacheForUser"}, // 3.0.0+
42 {998, nullptr, "DebugSetUserStateClose"}, 43 {998, nullptr, "DebugSetUserStateClose"},
43 {999, nullptr, "DebugSetUserStateOpen"}, 44 {999, nullptr, "DebugSetUserStateOpen"},
44 }; 45 };
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 4df74c4f9..256449aa7 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -10,6 +10,7 @@
10#include "core/core.h" 10#include "core/core.h"
11#include "core/file_sys/control_metadata.h" 11#include "core/file_sys/control_metadata.h"
12#include "core/file_sys/patch_manager.h" 12#include "core/file_sys/patch_manager.h"
13#include "core/file_sys/registered_cache.h"
13#include "core/file_sys/savedata_factory.h" 14#include "core/file_sys/savedata_factory.h"
14#include "core/hle/ipc_helpers.h" 15#include "core/hle/ipc_helpers.h"
15#include "core/hle/kernel/kernel.h" 16#include "core/hle/kernel/kernel.h"
@@ -68,6 +69,7 @@ IWindowController::IWindowController(Core::System& system_)
68 static const FunctionInfo functions[] = { 69 static const FunctionInfo functions[] = {
69 {0, nullptr, "CreateWindow"}, 70 {0, nullptr, "CreateWindow"},
70 {1, &IWindowController::GetAppletResourceUserId, "GetAppletResourceUserId"}, 71 {1, &IWindowController::GetAppletResourceUserId, "GetAppletResourceUserId"},
72 {2, nullptr, "GetAppletResourceUserIdOfCallerApplet"},
71 {10, &IWindowController::AcquireForegroundRights, "AcquireForegroundRights"}, 73 {10, &IWindowController::AcquireForegroundRights, "AcquireForegroundRights"},
72 {11, nullptr, "ReleaseForegroundRights"}, 74 {11, nullptr, "ReleaseForegroundRights"},
73 {12, nullptr, "RejectToChangeIntoBackground"}, 75 {12, nullptr, "RejectToChangeIntoBackground"},
@@ -189,8 +191,8 @@ IDisplayController::IDisplayController() : ServiceFramework("IDisplayController"
189 {5, nullptr, "GetLastForegroundCaptureImageEx"}, 191 {5, nullptr, "GetLastForegroundCaptureImageEx"},
190 {6, nullptr, "GetLastApplicationCaptureImageEx"}, 192 {6, nullptr, "GetLastApplicationCaptureImageEx"},
191 {7, nullptr, "GetCallerAppletCaptureImageEx"}, 193 {7, nullptr, "GetCallerAppletCaptureImageEx"},
192 {8, nullptr, "TakeScreenShotOfOwnLayer"}, // 2.0.0+ 194 {8, nullptr, "TakeScreenShotOfOwnLayer"},
193 {9, nullptr, "CopyBetweenCaptureBuffers"}, // 5.0.0+ 195 {9, nullptr, "CopyBetweenCaptureBuffers"},
194 {10, nullptr, "AcquireLastApplicationCaptureBuffer"}, 196 {10, nullptr, "AcquireLastApplicationCaptureBuffer"},
195 {11, nullptr, "ReleaseLastApplicationCaptureBuffer"}, 197 {11, nullptr, "ReleaseLastApplicationCaptureBuffer"},
196 {12, nullptr, "AcquireLastForegroundCaptureBuffer"}, 198 {12, nullptr, "AcquireLastForegroundCaptureBuffer"},
@@ -200,17 +202,14 @@ IDisplayController::IDisplayController() : ServiceFramework("IDisplayController"
200 {16, nullptr, "AcquireLastApplicationCaptureBufferEx"}, 202 {16, nullptr, "AcquireLastApplicationCaptureBufferEx"},
201 {17, nullptr, "AcquireLastForegroundCaptureBufferEx"}, 203 {17, nullptr, "AcquireLastForegroundCaptureBufferEx"},
202 {18, nullptr, "AcquireCallerAppletCaptureBufferEx"}, 204 {18, nullptr, "AcquireCallerAppletCaptureBufferEx"},
203 // 2.0.0+
204 {20, nullptr, "ClearCaptureBuffer"}, 205 {20, nullptr, "ClearCaptureBuffer"},
205 {21, nullptr, "ClearAppletTransitionBuffer"}, 206 {21, nullptr, "ClearAppletTransitionBuffer"},
206 // 4.0.0+
207 {22, nullptr, "AcquireLastApplicationCaptureSharedBuffer"}, 207 {22, nullptr, "AcquireLastApplicationCaptureSharedBuffer"},
208 {23, nullptr, "ReleaseLastApplicationCaptureSharedBuffer"}, 208 {23, nullptr, "ReleaseLastApplicationCaptureSharedBuffer"},
209 {24, nullptr, "AcquireLastForegroundCaptureSharedBuffer"}, 209 {24, nullptr, "AcquireLastForegroundCaptureSharedBuffer"},
210 {25, nullptr, "ReleaseLastForegroundCaptureSharedBuffer"}, 210 {25, nullptr, "ReleaseLastForegroundCaptureSharedBuffer"},
211 {26, nullptr, "AcquireCallerAppletCaptureSharedBuffer"}, 211 {26, nullptr, "AcquireCallerAppletCaptureSharedBuffer"},
212 {27, nullptr, "ReleaseCallerAppletCaptureSharedBuffer"}, 212 {27, nullptr, "ReleaseCallerAppletCaptureSharedBuffer"},
213 // 6.0.0+
214 {28, nullptr, "TakeScreenShotOfOwnLayerEx"}, 213 {28, nullptr, "TakeScreenShotOfOwnLayerEx"},
215 }; 214 };
216 // clang-format on 215 // clang-format on
@@ -225,7 +224,7 @@ IDebugFunctions::IDebugFunctions() : ServiceFramework{"IDebugFunctions"} {
225 static const FunctionInfo functions[] = { 224 static const FunctionInfo functions[] = {
226 {0, nullptr, "NotifyMessageToHomeMenuForDebug"}, 225 {0, nullptr, "NotifyMessageToHomeMenuForDebug"},
227 {1, nullptr, "OpenMainApplication"}, 226 {1, nullptr, "OpenMainApplication"},
228 {10, nullptr, "EmulateButtonEvent"}, 227 {10, nullptr, "PerformSystemButtonPressing"},
229 {20, nullptr, "InvalidateTransitionLayer"}, 228 {20, nullptr, "InvalidateTransitionLayer"},
230 {30, nullptr, "RequestLaunchApplicationWithUserAndArgumentForDebug"}, 229 {30, nullptr, "RequestLaunchApplicationWithUserAndArgumentForDebug"},
231 {40, nullptr, "GetAppletResourceUsageInfo"}, 230 {40, nullptr, "GetAppletResourceUsageInfo"},
@@ -267,13 +266,13 @@ ISelfController::ISelfController(Core::System& system,
267 {16, &ISelfController::SetOutOfFocusSuspendingEnabled, "SetOutOfFocusSuspendingEnabled"}, 266 {16, &ISelfController::SetOutOfFocusSuspendingEnabled, "SetOutOfFocusSuspendingEnabled"},
268 {17, nullptr, "SetControllerFirmwareUpdateSection"}, 267 {17, nullptr, "SetControllerFirmwareUpdateSection"},
269 {18, nullptr, "SetRequiresCaptureButtonShortPressedMessage"}, 268 {18, nullptr, "SetRequiresCaptureButtonShortPressedMessage"},
270 {19, &ISelfController::SetScreenShotImageOrientation, "SetScreenShotImageOrientation"}, 269 {19, &ISelfController::SetAlbumImageOrientation, "SetAlbumImageOrientation"},
271 {20, nullptr, "SetDesirableKeyboardLayout"}, 270 {20, nullptr, "SetDesirableKeyboardLayout"},
272 {40, &ISelfController::CreateManagedDisplayLayer, "CreateManagedDisplayLayer"}, 271 {40, &ISelfController::CreateManagedDisplayLayer, "CreateManagedDisplayLayer"},
273 {41, nullptr, "IsSystemBufferSharingEnabled"}, 272 {41, nullptr, "IsSystemBufferSharingEnabled"},
274 {42, nullptr, "GetSystemSharedLayerHandle"}, 273 {42, nullptr, "GetSystemSharedLayerHandle"},
275 {43, nullptr, "GetSystemSharedBufferHandle"}, 274 {43, nullptr, "GetSystemSharedBufferHandle"},
276 {44, nullptr, "CreateManagedDisplaySeparableLayer"}, 275 {44, &ISelfController::CreateManagedDisplaySeparableLayer, "CreateManagedDisplaySeparableLayer"},
277 {45, nullptr, "SetManagedDisplayLayerSeparationMode"}, 276 {45, nullptr, "SetManagedDisplayLayerSeparationMode"},
278 {50, &ISelfController::SetHandlesRequestToDisplay, "SetHandlesRequestToDisplay"}, 277 {50, &ISelfController::SetHandlesRequestToDisplay, "SetHandlesRequestToDisplay"},
279 {51, nullptr, "ApproveToDisplay"}, 278 {51, nullptr, "ApproveToDisplay"},
@@ -443,7 +442,7 @@ void ISelfController::SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext&
443 rb.Push(RESULT_SUCCESS); 442 rb.Push(RESULT_SUCCESS);
444} 443}
445 444
446void ISelfController::SetScreenShotImageOrientation(Kernel::HLERequestContext& ctx) { 445void ISelfController::SetAlbumImageOrientation(Kernel::HLERequestContext& ctx) {
447 LOG_WARNING(Service_AM, "(STUBBED) called"); 446 LOG_WARNING(Service_AM, "(STUBBED) called");
448 447
449 IPC::ResponseBuilder rb{ctx, 2}; 448 IPC::ResponseBuilder rb{ctx, 2};
@@ -463,6 +462,24 @@ void ISelfController::CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx)
463 rb.Push(*layer_id); 462 rb.Push(*layer_id);
464} 463}
465 464
465void ISelfController::CreateManagedDisplaySeparableLayer(Kernel::HLERequestContext& ctx) {
466 LOG_WARNING(Service_AM, "(STUBBED) called");
467
468 // TODO(Subv): Find out how AM determines the display to use, for now just
469 // create the layer in the Default display.
470 // This calls nn::vi::CreateRecordingLayer() which creates another layer.
471 // Currently we do not support more than 1 layer per display, output 1 layer id for now.
472 // Outputting 1 layer id instead of the expected 2 has not been observed to cause any adverse
473 // side effects.
474 // TODO: Support multiple layers
475 const auto display_id = nvflinger->OpenDisplay("Default");
476 const auto layer_id = nvflinger->CreateLayer(*display_id);
477
478 IPC::ResponseBuilder rb{ctx, 4};
479 rb.Push(RESULT_SUCCESS);
480 rb.Push(*layer_id);
481}
482
466void ISelfController::SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx) { 483void ISelfController::SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx) {
467 LOG_WARNING(Service_AM, "(STUBBED) called"); 484 LOG_WARNING(Service_AM, "(STUBBED) called");
468 485
@@ -607,6 +624,7 @@ ICommonStateGetter::ICommonStateGetter(Core::System& system,
607 {20, nullptr, "PushToGeneralChannel"}, 624 {20, nullptr, "PushToGeneralChannel"},
608 {30, nullptr, "GetHomeButtonReaderLockAccessor"}, 625 {30, nullptr, "GetHomeButtonReaderLockAccessor"},
609 {31, nullptr, "GetReaderLockAccessorEx"}, 626 {31, nullptr, "GetReaderLockAccessorEx"},
627 {32, nullptr, "GetWriterLockAccessorEx"},
610 {40, nullptr, "GetCradleFwVersion"}, 628 {40, nullptr, "GetCradleFwVersion"},
611 {50, &ICommonStateGetter::IsVrModeEnabled, "IsVrModeEnabled"}, 629 {50, &ICommonStateGetter::IsVrModeEnabled, "IsVrModeEnabled"},
612 {51, &ICommonStateGetter::SetVrModeEnabled, "SetVrModeEnabled"}, 630 {51, &ICommonStateGetter::SetVrModeEnabled, "SetVrModeEnabled"},
@@ -731,14 +749,14 @@ void ICommonStateGetter::GetDefaultDisplayResolution(Kernel::HLERequestContext&
731 749
732 if (Settings::values.use_docked_mode) { 750 if (Settings::values.use_docked_mode) {
733 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::DockedWidth) * 751 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::DockedWidth) *
734 static_cast<u32>(Settings::values.resolution_factor)); 752 static_cast<u32>(Settings::values.resolution_factor.GetValue()));
735 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::DockedHeight) * 753 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::DockedHeight) *
736 static_cast<u32>(Settings::values.resolution_factor)); 754 static_cast<u32>(Settings::values.resolution_factor.GetValue()));
737 } else { 755 } else {
738 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::UndockedWidth) * 756 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::UndockedWidth) *
739 static_cast<u32>(Settings::values.resolution_factor)); 757 static_cast<u32>(Settings::values.resolution_factor.GetValue()));
740 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::UndockedHeight) * 758 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::UndockedHeight) *
741 static_cast<u32>(Settings::values.resolution_factor)); 759 static_cast<u32>(Settings::values.resolution_factor.GetValue()));
742 } 760 }
743} 761}
744 762
@@ -842,7 +860,7 @@ public:
842 {110, nullptr, "NeedsToExitProcess"}, 860 {110, nullptr, "NeedsToExitProcess"},
843 {120, nullptr, "GetLibraryAppletInfo"}, 861 {120, nullptr, "GetLibraryAppletInfo"},
844 {150, nullptr, "RequestForAppletToGetForeground"}, 862 {150, nullptr, "RequestForAppletToGetForeground"},
845 {160, nullptr, "GetIndirectLayerConsumerHandle"}, 863 {160, &ILibraryAppletAccessor::GetIndirectLayerConsumerHandle, "GetIndirectLayerConsumerHandle"},
846 }; 864 };
847 // clang-format on 865 // clang-format on
848 866
@@ -961,6 +979,18 @@ private:
961 rb.PushCopyObjects(applet->GetBroker().GetInteractiveDataEvent()); 979 rb.PushCopyObjects(applet->GetBroker().GetInteractiveDataEvent());
962 } 980 }
963 981
982 void GetIndirectLayerConsumerHandle(Kernel::HLERequestContext& ctx) {
983 LOG_WARNING(Service_AM, "(STUBBED) called");
984
985 // We require a non-zero handle to be valid. Using 0xdeadbeef allows us to trace if this is
986 // actually used anywhere
987 constexpr u64 handle = 0xdeadbeef;
988
989 IPC::ResponseBuilder rb{ctx, 4};
990 rb.Push(RESULT_SUCCESS);
991 rb.Push(handle);
992 }
993
964 std::shared_ptr<Applets::Applet> applet; 994 std::shared_ptr<Applets::Applet> applet;
965}; 995};
966 996
@@ -1132,6 +1162,7 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
1132 {24, nullptr, "GetLaunchStorageInfoForDebug"}, 1162 {24, nullptr, "GetLaunchStorageInfoForDebug"},
1133 {25, &IApplicationFunctions::ExtendSaveData, "ExtendSaveData"}, 1163 {25, &IApplicationFunctions::ExtendSaveData, "ExtendSaveData"},
1134 {26, &IApplicationFunctions::GetSaveDataSize, "GetSaveDataSize"}, 1164 {26, &IApplicationFunctions::GetSaveDataSize, "GetSaveDataSize"},
1165 {27, nullptr, "CreateCacheStorage"},
1135 {30, &IApplicationFunctions::BeginBlockingHomeButtonShortAndLongPressed, "BeginBlockingHomeButtonShortAndLongPressed"}, 1166 {30, &IApplicationFunctions::BeginBlockingHomeButtonShortAndLongPressed, "BeginBlockingHomeButtonShortAndLongPressed"},
1136 {31, &IApplicationFunctions::EndBlockingHomeButtonShortAndLongPressed, "EndBlockingHomeButtonShortAndLongPressed"}, 1167 {31, &IApplicationFunctions::EndBlockingHomeButtonShortAndLongPressed, "EndBlockingHomeButtonShortAndLongPressed"},
1137 {32, &IApplicationFunctions::BeginBlockingHomeButton, "BeginBlockingHomeButton"}, 1168 {32, &IApplicationFunctions::BeginBlockingHomeButton, "BeginBlockingHomeButton"},
@@ -1157,6 +1188,8 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
1157 {120, nullptr, "ExecuteProgram"}, 1188 {120, nullptr, "ExecuteProgram"},
1158 {121, nullptr, "ClearUserChannel"}, 1189 {121, nullptr, "ClearUserChannel"},
1159 {122, nullptr, "UnpopToUserChannel"}, 1190 {122, nullptr, "UnpopToUserChannel"},
1191 {123, nullptr, "GetPreviousProgramIndex"},
1192 {124, nullptr, "EnableApplicationAllThreadDumpOnCrash"},
1160 {130, &IApplicationFunctions::GetGpuErrorDetectedSystemEvent, "GetGpuErrorDetectedSystemEvent"}, 1193 {130, &IApplicationFunctions::GetGpuErrorDetectedSystemEvent, "GetGpuErrorDetectedSystemEvent"},
1161 {140, &IApplicationFunctions::GetFriendInvitationStorageChannelEvent, "GetFriendInvitationStorageChannelEvent"}, 1194 {140, &IApplicationFunctions::GetFriendInvitationStorageChannelEvent, "GetFriendInvitationStorageChannelEvent"},
1162 {141, nullptr, "TryPopFromFriendInvitationStorageChannel"}, 1195 {141, nullptr, "TryPopFromFriendInvitationStorageChannel"},
@@ -1339,14 +1372,25 @@ void IApplicationFunctions::GetDisplayVersion(Kernel::HLERequestContext& ctx) {
1339 1372
1340 std::array<u8, 0x10> version_string{}; 1373 std::array<u8, 0x10> version_string{};
1341 1374
1342 FileSys::PatchManager pm{system.CurrentProcess()->GetTitleID()}; 1375 const auto res = [this] {
1343 const auto res = pm.GetControlMetadata(); 1376 const auto title_id = system.CurrentProcess()->GetTitleID();
1377
1378 FileSys::PatchManager pm{title_id};
1379 auto res = pm.GetControlMetadata();
1380 if (res.first != nullptr) {
1381 return res;
1382 }
1383
1384 FileSys::PatchManager pm_update{FileSys::GetUpdateTitleID(title_id)};
1385 return pm_update.GetControlMetadata();
1386 }();
1387
1344 if (res.first != nullptr) { 1388 if (res.first != nullptr) {
1345 const auto& version = res.first->GetVersionString(); 1389 const auto& version = res.first->GetVersionString();
1346 std::copy(version.begin(), version.end(), version_string.begin()); 1390 std::copy(version.begin(), version.end(), version_string.begin());
1347 } else { 1391 } else {
1348 constexpr u128 default_version = {1, 0}; 1392 constexpr char default_version[]{"1.0.0"};
1349 std::memcpy(version_string.data(), default_version.data(), sizeof(u128)); 1393 std::memcpy(version_string.data(), default_version, sizeof(default_version));
1350 } 1394 }
1351 1395
1352 IPC::ResponseBuilder rb{ctx, 6}; 1396 IPC::ResponseBuilder rb{ctx, 6};
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index 469f7f814..6cfb11b48 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -138,8 +138,9 @@ private:
138 void SetFocusHandlingMode(Kernel::HLERequestContext& ctx); 138 void SetFocusHandlingMode(Kernel::HLERequestContext& ctx);
139 void SetRestartMessageEnabled(Kernel::HLERequestContext& ctx); 139 void SetRestartMessageEnabled(Kernel::HLERequestContext& ctx);
140 void SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx); 140 void SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx);
141 void SetScreenShotImageOrientation(Kernel::HLERequestContext& ctx); 141 void SetAlbumImageOrientation(Kernel::HLERequestContext& ctx);
142 void CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx); 142 void CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx);
143 void CreateManagedDisplaySeparableLayer(Kernel::HLERequestContext& ctx);
143 void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx); 144 void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx);
144 void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); 145 void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx);
145 void GetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); 146 void GetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx);
diff --git a/src/core/hle/service/am/applets/software_keyboard.cpp b/src/core/hle/service/am/applets/software_keyboard.cpp
index 54e63c138..fbe3686ae 100644
--- a/src/core/hle/service/am/applets/software_keyboard.cpp
+++ b/src/core/hle/service/am/applets/software_keyboard.cpp
@@ -30,7 +30,7 @@ static Core::Frontend::SoftwareKeyboardParameters ConvertToFrontendParameters(
30 config.sub_text.size()); 30 config.sub_text.size());
31 params.guide_text = Common::UTF16StringFromFixedZeroTerminatedBuffer(config.guide_text.data(), 31 params.guide_text = Common::UTF16StringFromFixedZeroTerminatedBuffer(config.guide_text.data(),
32 config.guide_text.size()); 32 config.guide_text.size());
33 params.initial_text = initial_text; 33 params.initial_text = std::move(initial_text);
34 params.max_length = config.length_limit == 0 ? DEFAULT_MAX_LENGTH : config.length_limit; 34 params.max_length = config.length_limit == 0 ? DEFAULT_MAX_LENGTH : config.length_limit;
35 params.password = static_cast<bool>(config.is_password); 35 params.password = static_cast<bool>(config.is_password);
36 params.cursor_at_beginning = static_cast<bool>(config.initial_cursor_position); 36 params.cursor_at_beginning = static_cast<bool>(config.initial_cursor_position);
@@ -60,7 +60,7 @@ void SoftwareKeyboard::Initialize() {
60 std::memcpy(&config, keyboard_config.data(), sizeof(KeyboardConfig)); 60 std::memcpy(&config, keyboard_config.data(), sizeof(KeyboardConfig));
61 61
62 const auto work_buffer_storage = broker.PopNormalDataToApplet(); 62 const auto work_buffer_storage = broker.PopNormalDataToApplet();
63 ASSERT(work_buffer_storage != nullptr); 63 ASSERT_OR_EXECUTE(work_buffer_storage != nullptr, { return; });
64 const auto& work_buffer = work_buffer_storage->GetData(); 64 const auto& work_buffer = work_buffer_storage->GetData();
65 65
66 if (config.initial_string_size == 0) 66 if (config.initial_string_size == 0)
@@ -109,7 +109,7 @@ void SoftwareKeyboard::Execute() {
109 109
110 const auto parameters = ConvertToFrontendParameters(config, initial_text); 110 const auto parameters = ConvertToFrontendParameters(config, initial_text);
111 111
112 frontend.RequestText([this](std::optional<std::u16string> text) { WriteText(text); }, 112 frontend.RequestText([this](std::optional<std::u16string> text) { WriteText(std::move(text)); },
113 parameters); 113 parameters);
114} 114}
115 115
diff --git a/src/core/hle/service/am/spsm.cpp b/src/core/hle/service/am/spsm.cpp
index 003ee8667..f27729ce7 100644
--- a/src/core/hle/service/am/spsm.cpp
+++ b/src/core/hle/service/am/spsm.cpp
@@ -10,17 +10,17 @@ SPSM::SPSM() : ServiceFramework{"spsm"} {
10 // clang-format off 10 // clang-format off
11 static const FunctionInfo functions[] = { 11 static const FunctionInfo functions[] = {
12 {0, nullptr, "GetState"}, 12 {0, nullptr, "GetState"},
13 {1, nullptr, "SleepSystemAndWaitAwake"}, 13 {1, nullptr, "EnterSleep"},
14 {2, nullptr, "Unknown1"}, 14 {2, nullptr, "GetLastWakeReason"},
15 {3, nullptr, "Unknown2"}, 15 {3, nullptr, "Shutdown"},
16 {4, nullptr, "GetNotificationMessageEventHandle"}, 16 {4, nullptr, "GetNotificationMessageEventHandle"},
17 {5, nullptr, "Unknown3"}, 17 {5, nullptr, "ReceiveNotificationMessage"},
18 {6, nullptr, "Unknown4"}, 18 {6, nullptr, "AnalyzeLogForLastSleepWakeSequence"},
19 {7, nullptr, "Unknown5"}, 19 {7, nullptr, "ResetEventLog"},
20 {8, nullptr, "AnalyzePerformanceLogForLastSleepWakeSequence"}, 20 {8, nullptr, "AnalyzePerformanceLogForLastSleepWakeSequence"},
21 {9, nullptr, "ChangeHomeButtonLongPressingTime"}, 21 {9, nullptr, "ChangeHomeButtonLongPressingTime"},
22 {10, nullptr, "Unknown6"}, 22 {10, nullptr, "PutErrorState"},
23 {11, nullptr, "Unknown7"}, 23 {11, nullptr, "InvalidateCurrentHomeButtonPressing"},
24 }; 24 };
25 // clang-format on 25 // clang-format on
26 26
diff --git a/src/core/hle/service/aoc/aoc_u.cpp b/src/core/hle/service/aoc/aoc_u.cpp
index 4227a4adf..8e79f707b 100644
--- a/src/core/hle/service/aoc/aoc_u.cpp
+++ b/src/core/hle/service/aoc/aoc_u.cpp
@@ -60,6 +60,7 @@ AOC_U::AOC_U(Core::System& system)
60 {6, nullptr, "PrepareAddOnContentByApplicationId"}, 60 {6, nullptr, "PrepareAddOnContentByApplicationId"},
61 {7, &AOC_U::PrepareAddOnContent, "PrepareAddOnContent"}, 61 {7, &AOC_U::PrepareAddOnContent, "PrepareAddOnContent"},
62 {8, &AOC_U::GetAddOnContentListChangedEvent, "GetAddOnContentListChangedEvent"}, 62 {8, &AOC_U::GetAddOnContentListChangedEvent, "GetAddOnContentListChangedEvent"},
63 {9, nullptr, "GetAddOnContentLostErrorCode"},
63 {100, nullptr, "CreateEcPurchasedEventManager"}, 64 {100, nullptr, "CreateEcPurchasedEventManager"},
64 {101, nullptr, "CreatePermanentEcPurchasedEventManager"}, 65 {101, nullptr, "CreatePermanentEcPurchasedEventManager"},
65 }; 66 };
diff --git a/src/core/hle/service/bcat/bcat.cpp b/src/core/hle/service/bcat/bcat.cpp
index 8bb2528c9..b31766212 100644
--- a/src/core/hle/service/bcat/bcat.cpp
+++ b/src/core/hle/service/bcat/bcat.cpp
@@ -14,6 +14,8 @@ BCAT::BCAT(Core::System& system, std::shared_ptr<Module> module,
14 {0, &BCAT::CreateBcatService, "CreateBcatService"}, 14 {0, &BCAT::CreateBcatService, "CreateBcatService"},
15 {1, &BCAT::CreateDeliveryCacheStorageService, "CreateDeliveryCacheStorageService"}, 15 {1, &BCAT::CreateDeliveryCacheStorageService, "CreateDeliveryCacheStorageService"},
16 {2, &BCAT::CreateDeliveryCacheStorageServiceWithApplicationId, "CreateDeliveryCacheStorageServiceWithApplicationId"}, 16 {2, &BCAT::CreateDeliveryCacheStorageServiceWithApplicationId, "CreateDeliveryCacheStorageServiceWithApplicationId"},
17 {3, nullptr, "CreateDeliveryCacheProgressService"},
18 {4, nullptr, "CreateDeliveryCacheProgressServiceWithApplicationId"},
17 }; 19 };
18 // clang-format on 20 // clang-format on
19 RegisterHandlers(functions); 21 RegisterHandlers(functions);
diff --git a/src/core/hle/service/bcat/module.cpp b/src/core/hle/service/bcat/module.cpp
index 34aba7a27..603b64d4f 100644
--- a/src/core/hle/service/bcat/module.cpp
+++ b/src/core/hle/service/bcat/module.cpp
@@ -143,10 +143,13 @@ public:
143 {20401, nullptr, "UnregisterSystemApplicationDeliveryTask"}, 143 {20401, nullptr, "UnregisterSystemApplicationDeliveryTask"},
144 {20410, nullptr, "SetSystemApplicationDeliveryTaskTimer"}, 144 {20410, nullptr, "SetSystemApplicationDeliveryTaskTimer"},
145 {30100, &IBcatService::SetPassphrase, "SetPassphrase"}, 145 {30100, &IBcatService::SetPassphrase, "SetPassphrase"},
146 {30101, nullptr, "Unknown"},
147 {30102, nullptr, "Unknown2"},
146 {30200, nullptr, "RegisterBackgroundDeliveryTask"}, 148 {30200, nullptr, "RegisterBackgroundDeliveryTask"},
147 {30201, nullptr, "UnregisterBackgroundDeliveryTask"}, 149 {30201, nullptr, "UnregisterBackgroundDeliveryTask"},
148 {30202, nullptr, "BlockDeliveryTask"}, 150 {30202, nullptr, "BlockDeliveryTask"},
149 {30203, nullptr, "UnblockDeliveryTask"}, 151 {30203, nullptr, "UnblockDeliveryTask"},
152 {30210, nullptr, "SetDeliveryTaskTimer"},
150 {30300, nullptr, "RegisterSystemApplicationDeliveryTasks"}, 153 {30300, nullptr, "RegisterSystemApplicationDeliveryTasks"},
151 {90100, nullptr, "EnumerateBackgroundDeliveryTask"}, 154 {90100, nullptr, "EnumerateBackgroundDeliveryTask"},
152 {90200, nullptr, "GetDeliveryList"}, 155 {90200, nullptr, "GetDeliveryList"},
diff --git a/src/core/hle/service/bpc/bpc.cpp b/src/core/hle/service/bpc/bpc.cpp
index 1c1ecdb60..fac6b2f9c 100644
--- a/src/core/hle/service/bpc/bpc.cpp
+++ b/src/core/hle/service/bpc/bpc.cpp
@@ -23,9 +23,14 @@ public:
23 {5, nullptr, "GetBoardPowerControlEvent"}, 23 {5, nullptr, "GetBoardPowerControlEvent"},
24 {6, nullptr, "GetSleepButtonState"}, 24 {6, nullptr, "GetSleepButtonState"},
25 {7, nullptr, "GetPowerEvent"}, 25 {7, nullptr, "GetPowerEvent"},
26 {8, nullptr, "Unknown1"}, 26 {8, nullptr, "CreateWakeupTimer"},
27 {9, nullptr, "Unknown2"}, 27 {9, nullptr, "CancelWakeupTimer"},
28 {10, nullptr, "Unknown3"}, 28 {10, nullptr, "EnableWakeupTimerOnDevice"},
29 {11, nullptr, "CreateWakeupTimerEx"},
30 {12, nullptr, "GetLastEnabledWakeupTimerType"},
31 {13, nullptr, "CleanAllWakeupTimers"},
32 {14, nullptr, "Unknown"},
33 {15, nullptr, "Unknown2"},
29 }; 34 };
30 // clang-format on 35 // clang-format on
31 36
@@ -38,10 +43,11 @@ public:
38 explicit BPC_R() : ServiceFramework{"bpc:r"} { 43 explicit BPC_R() : ServiceFramework{"bpc:r"} {
39 // clang-format off 44 // clang-format off
40 static const FunctionInfo functions[] = { 45 static const FunctionInfo functions[] = {
41 {0, nullptr, "GetExternalRtcValue"}, 46 {0, nullptr, "GetRtcTime"},
42 {1, nullptr, "SetExternalRtcValue"}, 47 {1, nullptr, "SetRtcTime"},
43 {2, nullptr, "ReadExternalRtcResetFlag"}, 48 {2, nullptr, "GetRtcResetDetected"},
44 {3, nullptr, "ClearExternalRtcResetFlag"}, 49 {3, nullptr, "ClearRtcResetDetected"},
50 {4, nullptr, "SetUpRtcResetOnShutdown"},
45 }; 51 };
46 // clang-format on 52 // clang-format on
47 53
diff --git a/src/core/hle/service/btdrv/btdrv.cpp b/src/core/hle/service/btdrv/btdrv.cpp
index 40a06c9fd..f311afa2f 100644
--- a/src/core/hle/service/btdrv/btdrv.cpp
+++ b/src/core/hle/service/btdrv/btdrv.cpp
@@ -58,102 +58,103 @@ public:
58 {1, nullptr, "InitializeBluetooth"}, 58 {1, nullptr, "InitializeBluetooth"},
59 {2, nullptr, "EnableBluetooth"}, 59 {2, nullptr, "EnableBluetooth"},
60 {3, nullptr, "DisableBluetooth"}, 60 {3, nullptr, "DisableBluetooth"},
61 {4, nullptr, "CleanupBluetooth"}, 61 {4, nullptr, "FinalizeBluetooth"},
62 {5, nullptr, "GetAdapterProperties"}, 62 {5, nullptr, "GetAdapterProperties"},
63 {6, nullptr, "GetAdapterProperty"}, 63 {6, nullptr, "GetAdapterProperty"},
64 {7, nullptr, "SetAdapterProperty"}, 64 {7, nullptr, "SetAdapterProperty"},
65 {8, nullptr, "StartDiscovery"}, 65 {8, nullptr, "StartInquiry"},
66 {9, nullptr, "CancelDiscovery"}, 66 {9, nullptr, "StopInquiry"},
67 {10, nullptr, "CreateBond"}, 67 {10, nullptr, "CreateBond"},
68 {11, nullptr, "RemoveBond"}, 68 {11, nullptr, "RemoveBond"},
69 {12, nullptr, "CancelBond"}, 69 {12, nullptr, "CancelBond"},
70 {13, nullptr, "PinReply"}, 70 {13, nullptr, "RespondToPinRequest"},
71 {14, nullptr, "SspReply"}, 71 {14, nullptr, "RespondToSspRequest"},
72 {15, nullptr, "GetEventInfo"}, 72 {15, nullptr, "GetEventInfo"},
73 {16, nullptr, "InitializeHid"}, 73 {16, nullptr, "InitializeHid"},
74 {17, nullptr, "HidConnect"}, 74 {17, nullptr, "OpenHidConnection"},
75 {18, nullptr, "HidDisconnect"}, 75 {18, nullptr, "CloseHidConnection"},
76 {19, nullptr, "HidSendData"}, 76 {19, nullptr, "WriteHidData"},
77 {20, nullptr, "HidSendData2"}, 77 {20, nullptr, "WriteHidData2"},
78 {21, nullptr, "HidSetReport"}, 78 {21, nullptr, "SetHidReport"},
79 {22, nullptr, "HidGetReport"}, 79 {22, nullptr, "GetHidReport"},
80 {23, nullptr, "HidWakeController"}, 80 {23, nullptr, "TriggerConnection"},
81 {24, nullptr, "HidAddPairedDevice"}, 81 {24, nullptr, "AddPairedDeviceInfo"},
82 {25, nullptr, "HidGetPairedDevice"}, 82 {25, nullptr, "GetPairedDeviceInfo"},
83 {26, nullptr, "CleanupHid"}, 83 {26, nullptr, "FinalizeHid"},
84 {27, nullptr, "HidGetEventInfo"}, 84 {27, nullptr, "GetHidEventInfo"},
85 {28, nullptr, "ExtSetTsi"}, 85 {28, nullptr, "SetTsi"},
86 {29, nullptr, "ExtSetBurstMode"}, 86 {29, nullptr, "EnableBurstMode"},
87 {30, nullptr, "ExtSetZeroRetran"}, 87 {30, nullptr, "SetZeroRetransmission"},
88 {31, nullptr, "ExtSetMcMode"}, 88 {31, nullptr, "EnableMcMode"},
89 {32, nullptr, "ExtStartLlrMode"}, 89 {32, nullptr, "EnableLlrScan"},
90 {33, nullptr, "ExtExitLlrMode"}, 90 {33, nullptr, "DisableLlrScan"},
91 {34, nullptr, "ExtSetRadio"}, 91 {34, nullptr, "EnableRadio"},
92 {35, nullptr, "ExtSetVisibility"}, 92 {35, nullptr, "SetVisibility"},
93 {36, nullptr, "ExtSetTbfcScan"}, 93 {36, nullptr, "EnableTbfcScan"},
94 {37, nullptr, "RegisterHidReportEvent"}, 94 {37, nullptr, "RegisterHidReportEvent"},
95 {38, nullptr, "HidGetReportEventInfo"}, 95 {38, nullptr, "GetHidReportEventInfo"},
96 {39, nullptr, "GetLatestPlr"}, 96 {39, nullptr, "GetLatestPlr"},
97 {40, nullptr, "ExtGetPendingConnections"}, 97 {40, nullptr, "GetPendingConnections"},
98 {41, nullptr, "GetChannelMap"}, 98 {41, nullptr, "GetChannelMap"},
99 {42, nullptr, "EnableBluetoothBoostSetting"}, 99 {42, nullptr, "EnableTxPowerBoostSetting"},
100 {43, nullptr, "IsBluetoothBoostSettingEnabled"}, 100 {43, nullptr, "IsTxPowerBoostSettingEnabled"},
101 {44, nullptr, "EnableBluetoothAfhSetting"}, 101 {44, nullptr, "EnableAfhSetting"},
102 {45, nullptr, "IsBluetoothAfhSettingEnabled"}, 102 {45, nullptr, "IsAfhSettingEnabled"},
103 {46, nullptr, "InitializeBluetoothLe"}, 103 {46, nullptr, "InitializeBle"},
104 {47, nullptr, "EnableBluetoothLe"}, 104 {47, nullptr, "EnableBle"},
105 {48, nullptr, "DisableBluetoothLe"}, 105 {48, nullptr, "DisableBle"},
106 {49, nullptr, "CleanupBluetoothLe"}, 106 {49, nullptr, "FinalizeBle"},
107 {50, nullptr, "SetLeVisibility"}, 107 {50, nullptr, "SetBleVisibility"},
108 {51, nullptr, "SetLeConnectionParameter"}, 108 {51, nullptr, "SetBleConnectionParameter"},
109 {52, nullptr, "SetLeDefaultConnectionParameter"}, 109 {52, nullptr, "SetBleDefaultConnectionParameter"},
110 {53, nullptr, "SetLeAdvertiseData"}, 110 {53, nullptr, "SetBleAdvertiseData"},
111 {54, nullptr, "SetLeAdvertiseParameter"}, 111 {54, nullptr, "SetBleAdvertiseParameter"},
112 {55, nullptr, "StartLeScan"}, 112 {55, nullptr, "StartBleScan"},
113 {56, nullptr, "StopLeScan"}, 113 {56, nullptr, "StopBleScan"},
114 {57, nullptr, "AddLeScanFilterCondition"}, 114 {57, nullptr, "AddBleScanFilterCondition"},
115 {58, nullptr, "DeleteLeScanFilterCondition"}, 115 {58, nullptr, "DeleteBleScanFilterCondition"},
116 {59, nullptr, "DeleteLeScanFilter"}, 116 {59, nullptr, "DeleteBleScanFilter"},
117 {60, nullptr, "ClearLeScanFilters"}, 117 {60, nullptr, "ClearBleScanFilters"},
118 {61, nullptr, "EnableLeScanFilter"}, 118 {61, nullptr, "EnableBleScanFilter"},
119 {62, nullptr, "RegisterLeClient"}, 119 {62, nullptr, "RegisterGattClient"},
120 {63, nullptr, "UnregisterLeClient"}, 120 {63, nullptr, "UnregisterGattClient"},
121 {64, nullptr, "UnregisterLeClientAll"}, 121 {64, nullptr, "UnregisterAllGattClients"},
122 {65, nullptr, "LeClientConnect"}, 122 {65, nullptr, "ConnectGattServer"},
123 {66, nullptr, "LeClientCancelConnection"}, 123 {66, nullptr, "CancelConnectGattServer"},
124 {67, nullptr, "LeClientDisconnect"}, 124 {67, nullptr, "DisconnectGattServer"},
125 {68, nullptr, "LeClientGetAttributes"}, 125 {68, nullptr, "GetGattAttribute"},
126 {69, nullptr, "LeClientDiscoverService"}, 126 {69, nullptr, "GetGattService"},
127 {70, nullptr, "LeClientConfigureMtu"}, 127 {70, nullptr, "ConfigureAttMtu"},
128 {71, nullptr, "RegisterLeServer"}, 128 {71, nullptr, "RegisterGattServer"},
129 {72, nullptr, "UnregisterLeServer"}, 129 {72, nullptr, "UnregisterGattServer"},
130 {73, nullptr, "LeServerConnect"}, 130 {73, nullptr, "ConnectGattClient"},
131 {74, nullptr, "LeServerDisconnect"}, 131 {74, nullptr, "DisconnectGattClient"},
132 {75, nullptr, "CreateLeService"}, 132 {75, nullptr, "AddGattService"},
133 {76, nullptr, "StartLeService"}, 133 {76, nullptr, "EnableGattService"},
134 {77, nullptr, "AddLeCharacteristic"}, 134 {77, nullptr, "AddGattCharacteristic"},
135 {78, nullptr, "AddLeDescriptor"}, 135 {78, nullptr, "AddGattDescriptor"},
136 {79, nullptr, "GetLeCoreEventInfo"}, 136 {79, nullptr, "GetBleManagedEventInfo"},
137 {80, nullptr, "LeGetFirstCharacteristic"}, 137 {80, nullptr, "GetGattFirstCharacteristic"},
138 {81, nullptr, "LeGetNextCharacteristic"}, 138 {81, nullptr, "GetGattNextCharacteristic"},
139 {82, nullptr, "LeGetFirstDescriptor"}, 139 {82, nullptr, "GetGattFirstDescriptor"},
140 {83, nullptr, "LeGetNextDescriptor"}, 140 {83, nullptr, "GetGattNextDescriptor"},
141 {84, nullptr, "RegisterLeCoreDataPath"}, 141 {84, nullptr, "RegisterGattManagedDataPath"},
142 {85, nullptr, "UnregisterLeCoreDataPath"}, 142 {85, nullptr, "UnregisterGattManagedDataPath"},
143 {86, nullptr, "RegisterLeHidDataPath"}, 143 {86, nullptr, "RegisterGattHidDataPath"},
144 {87, nullptr, "UnregisterLeHidDataPath"}, 144 {87, nullptr, "UnregisterGattHidDataPath"},
145 {88, nullptr, "RegisterLeDataPath"}, 145 {88, nullptr, "RegisterGattDataPath"},
146 {89, nullptr, "UnregisterLeDataPath"}, 146 {89, nullptr, "UnregisterGattDataPath"},
147 {90, nullptr, "LeClientReadCharacteristic"}, 147 {90, nullptr, "ReadGattCharacteristic"},
148 {91, nullptr, "LeClientReadDescriptor"}, 148 {91, nullptr, "ReadGattDescriptor"},
149 {92, nullptr, "LeClientWriteCharacteristic"}, 149 {92, nullptr, "WriteGattCharacteristic"},
150 {93, nullptr, "LeClientWriteDescriptor"}, 150 {93, nullptr, "WriteGattDescriptor"},
151 {94, nullptr, "LeClientRegisterNotification"}, 151 {94, nullptr, "RegisterGattNotification"},
152 {95, nullptr, "LeClientDeregisterNotification"}, 152 {95, nullptr, "UnregisterGattNotification"},
153 {96, nullptr, "GetLeHidEventInfo"}, 153 {96, nullptr, "GetLeHidEventInfo"},
154 {97, nullptr, "RegisterBleHidEvent"}, 154 {97, nullptr, "RegisterBleHidEvent"},
155 {98, nullptr, "SetLeScanParameter"}, 155 {98, nullptr, "SetBleScanParameter"},
156 {256, nullptr, "GetIsManufacturingMode"}, 156 {99, nullptr, "MoveToSecondaryPiconet"},
157 {256, nullptr, "IsManufacturingMode"},
157 {257, nullptr, "EmulateBluetoothCrash"}, 158 {257, nullptr, "EmulateBluetoothCrash"},
158 {258, nullptr, "GetBleChannelMap"}, 159 {258, nullptr, "GetBleChannelMap"},
159 }; 160 };
diff --git a/src/core/hle/service/btm/btm.cpp b/src/core/hle/service/btm/btm.cpp
index 251b3c9df..0d251c6d0 100644
--- a/src/core/hle/service/btm/btm.cpp
+++ b/src/core/hle/service/btm/btm.cpp
@@ -132,66 +132,71 @@ public:
132 explicit BTM() : ServiceFramework{"btm"} { 132 explicit BTM() : ServiceFramework{"btm"} {
133 // clang-format off 133 // clang-format off
134 static const FunctionInfo functions[] = { 134 static const FunctionInfo functions[] = {
135 {0, nullptr, "Unknown1"}, 135 {0, nullptr, "GetState"},
136 {1, nullptr, "Unknown2"}, 136 {1, nullptr, "GetHostDeviceProperty"},
137 {2, nullptr, "RegisterSystemEventForConnectedDeviceCondition"}, 137 {2, nullptr, "AcquireDeviceConditionEvent"},
138 {3, nullptr, "Unknown3"}, 138 {3, nullptr, "GetDeviceCondition"},
139 {4, nullptr, "Unknown4"}, 139 {4, nullptr, "SetBurstMode"},
140 {5, nullptr, "Unknown5"}, 140 {5, nullptr, "SetSlotMode"},
141 {6, nullptr, "Unknown6"}, 141 {6, nullptr, "SetBluetoothMode"},
142 {7, nullptr, "Unknown7"}, 142 {7, nullptr, "SetWlanMode"},
143 {8, nullptr, "RegisterSystemEventForRegisteredDeviceInfo"}, 143 {8, nullptr, "AcquireDeviceInfoEvent"},
144 {9, nullptr, "Unknown8"}, 144 {9, nullptr, "GetDeviceInfo"},
145 {10, nullptr, "Unknown9"}, 145 {10, nullptr, "AddDeviceInfo"},
146 {11, nullptr, "Unknown10"}, 146 {11, nullptr, "RemoveDeviceInfo"},
147 {12, nullptr, "Unknown11"}, 147 {12, nullptr, "IncreaseDeviceInfoOrder"},
148 {13, nullptr, "Unknown12"}, 148 {13, nullptr, "LlrNotify"},
149 {14, nullptr, "EnableRadio"}, 149 {14, nullptr, "EnableRadio"},
150 {15, nullptr, "DisableRadio"}, 150 {15, nullptr, "DisableRadio"},
151 {16, nullptr, "Unknown13"}, 151 {16, nullptr, "HidDisconnect"},
152 {17, nullptr, "Unknown14"}, 152 {17, nullptr, "HidSetRetransmissionMode"},
153 {18, nullptr, "Unknown15"}, 153 {18, nullptr, "AcquireAwakeReqEvent"},
154 {19, nullptr, "Unknown16"}, 154 {19, nullptr, "AcquireLlrStateEvent"},
155 {20, nullptr, "Unknown17"}, 155 {20, nullptr, "IsLlrStarted"},
156 {21, nullptr, "Unknown18"}, 156 {21, nullptr, "EnableSlotSaving"},
157 {22, nullptr, "Unknown19"}, 157 {22, nullptr, "ProtectDeviceInfo"},
158 {23, nullptr, "Unknown20"}, 158 {23, nullptr, "AcquireBleScanEvent"},
159 {24, nullptr, "Unknown21"}, 159 {24, nullptr, "GetBleScanParameterGeneral"},
160 {25, nullptr, "Unknown22"}, 160 {25, nullptr, "GetBleScanParameterSmartDevice"},
161 {26, nullptr, "Unknown23"}, 161 {26, nullptr, "StartBleScanForGeneral"},
162 {27, nullptr, "Unknown24"}, 162 {27, nullptr, "StopBleScanForGeneral"},
163 {28, nullptr, "Unknown25"}, 163 {28, nullptr, "GetBleScanResultsForGeneral"},
164 {29, nullptr, "Unknown26"}, 164 {29, nullptr, "StartBleScanForPairedDevice"},
165 {30, nullptr, "Unknown27"}, 165 {30, nullptr, "StopBleScanForPairedDevice"},
166 {31, nullptr, "Unknown28"}, 166 {31, nullptr, "StartBleScanForSmartDevice"},
167 {32, nullptr, "Unknown29"}, 167 {32, nullptr, "StopBleScanForSmartDevice"},
168 {33, nullptr, "Unknown30"}, 168 {33, nullptr, "GetBleScanResultsForSmartDevice"},
169 {34, nullptr, "Unknown31"}, 169 {34, nullptr, "AcquireBleConnectionEvent"},
170 {35, nullptr, "Unknown32"}, 170 {35, nullptr, "BleConnect"},
171 {36, nullptr, "Unknown33"}, 171 {36, nullptr, "BleOverrideConnection"},
172 {37, nullptr, "Unknown34"}, 172 {37, nullptr, "BleDisconnect"},
173 {38, nullptr, "Unknown35"}, 173 {38, nullptr, "BleGetConnectionState"},
174 {39, nullptr, "Unknown36"}, 174 {39, nullptr, "BleGetGattClientConditionList"},
175 {40, nullptr, "Unknown37"}, 175 {40, nullptr, "AcquireBlePairingEvent"},
176 {41, nullptr, "Unknown38"}, 176 {41, nullptr, "BlePairDevice"},
177 {42, nullptr, "Unknown39"}, 177 {42, nullptr, "BleUnpairDeviceOnBoth"},
178 {43, nullptr, "Unknown40"}, 178 {43, nullptr, "BleUnpairDevice"},
179 {44, nullptr, "Unknown41"}, 179 {44, nullptr, "BleGetPairedAddresses"},
180 {45, nullptr, "Unknown42"}, 180 {45, nullptr, "AcquireBleServiceDiscoveryEvent"},
181 {46, nullptr, "Unknown43"}, 181 {46, nullptr, "GetGattServices"},
182 {47, nullptr, "Unknown44"}, 182 {47, nullptr, "GetGattService"},
183 {48, nullptr, "Unknown45"}, 183 {48, nullptr, "GetGattIncludedServices"},
184 {49, nullptr, "Unknown46"}, 184 {49, nullptr, "GetBelongingService"},
185 {50, nullptr, "Unknown47"}, 185 {50, nullptr, "GetGattCharacteristics"},
186 {51, nullptr, "Unknown48"}, 186 {51, nullptr, "GetGattDescriptors"},
187 {52, nullptr, "Unknown49"}, 187 {52, nullptr, "AcquireBleMtuConfigEvent"},
188 {53, nullptr, "Unknown50"}, 188 {53, nullptr, "ConfigureBleMtu"},
189 {54, nullptr, "Unknown51"}, 189 {54, nullptr, "GetBleMtu"},
190 {55, nullptr, "Unknown52"}, 190 {55, nullptr, "RegisterBleGattDataPath"},
191 {56, nullptr, "Unknown53"}, 191 {56, nullptr, "UnregisterBleGattDataPath"},
192 {57, nullptr, "Unknown54"}, 192 {57, nullptr, "RegisterAppletResourceUserId"},
193 {58, nullptr, "Unknown55"}, 193 {58, nullptr, "UnregisterAppletResourceUserId"},
194 {59, nullptr, "Unknown56"}, 194 {59, nullptr, "SetAppletResourceUserId"},
195 {60, nullptr, "Unknown60"},
196 {61, nullptr, "Unknown61"},
197 {62, nullptr, "Unknown62"},
198 {63, nullptr, "Unknown63"},
199 {64, nullptr, "Unknown64"},
195 }; 200 };
196 // clang-format on 201 // clang-format on
197 202
@@ -204,19 +209,19 @@ public:
204 explicit BTM_DBG() : ServiceFramework{"btm:dbg"} { 209 explicit BTM_DBG() : ServiceFramework{"btm:dbg"} {
205 // clang-format off 210 // clang-format off
206 static const FunctionInfo functions[] = { 211 static const FunctionInfo functions[] = {
207 {0, nullptr, "RegisterSystemEventForDiscovery"}, 212 {0, nullptr, "AcquireDiscoveryEvent"},
208 {1, nullptr, "Unknown1"}, 213 {1, nullptr, "StartDiscovery"},
209 {2, nullptr, "Unknown2"}, 214 {2, nullptr, "CancelDiscovery"},
210 {3, nullptr, "Unknown3"}, 215 {3, nullptr, "GetDeviceProperty"},
211 {4, nullptr, "Unknown4"}, 216 {4, nullptr, "CreateBond"},
212 {5, nullptr, "Unknown5"}, 217 {5, nullptr, "CancelBond"},
213 {6, nullptr, "Unknown6"}, 218 {6, nullptr, "SetTsiMode"},
214 {7, nullptr, "Unknown7"}, 219 {7, nullptr, "GeneralTest"},
215 {8, nullptr, "Unknown8"}, 220 {8, nullptr, "HidConnect"},
216 {9, nullptr, "Unknown9"}, 221 {9, nullptr, "GeneralGet"},
217 {10, nullptr, "Unknown10"}, 222 {10, nullptr, "GetGattClientDisconnectionReason"},
218 {11, nullptr, "Unknown11"}, 223 {11, nullptr, "GetBleConnectionParameter"},
219 {12, nullptr, "Unknown11"}, 224 {12, nullptr, "GetBleConnectionParameterRequest"},
220 }; 225 };
221 // clang-format on 226 // clang-format on
222 227
diff --git a/src/core/hle/service/caps/caps.cpp b/src/core/hle/service/caps/caps.cpp
index 26c8a7081..ba5749b84 100644
--- a/src/core/hle/service/caps/caps.cpp
+++ b/src/core/hle/service/caps/caps.cpp
@@ -1,4 +1,4 @@
1// Copyright 2018 yuzu emulator team 1// Copyright 2018 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps.h b/src/core/hle/service/caps/caps.h
index fc70a4c27..b8c67b6e2 100644
--- a/src/core/hle/service/caps/caps.h
+++ b/src/core/hle/service/caps/caps.h
@@ -1,4 +1,4 @@
1// Copyright 2018 yuzu emulator team 1// Copyright 2018 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
@@ -12,73 +12,79 @@ class ServiceManager;
12 12
13namespace Service::Capture { 13namespace Service::Capture {
14 14
15enum AlbumImageOrientation { 15enum class AlbumImageOrientation {
16 Orientation0 = 0, 16 Orientation0 = 0,
17 Orientation1 = 1, 17 Orientation1 = 1,
18 Orientation2 = 2, 18 Orientation2 = 2,
19 Orientation3 = 3, 19 Orientation3 = 3,
20}; 20};
21 21
22enum AlbumReportOption { 22enum class AlbumReportOption {
23 Disable = 0, 23 Disable = 0,
24 Enable = 1, 24 Enable = 1,
25}; 25};
26 26
27enum ContentType : u8 { 27enum class ContentType : u8 {
28 Screenshot = 0, 28 Screenshot = 0,
29 Movie = 1, 29 Movie = 1,
30 ExtraMovie = 3, 30 ExtraMovie = 3,
31}; 31};
32 32
33enum AlbumStorage : u8 { 33enum class AlbumStorage : u8 {
34 NAND = 0, 34 NAND = 0,
35 SD = 1, 35 SD = 1,
36}; 36};
37 37
38struct AlbumFileDateTime { 38struct AlbumFileDateTime {
39 u16 year; 39 s16 year{};
40 u8 month; 40 s8 month{};
41 u8 day; 41 s8 day{};
42 u8 hour; 42 s8 hour{};
43 u8 minute; 43 s8 minute{};
44 u8 second; 44 s8 second{};
45 u8 uid; 45 s8 uid{};
46}; 46};
47static_assert(sizeof(AlbumFileDateTime) == 0x8, "AlbumFileDateTime has incorrect size.");
47 48
48struct AlbumEntry { 49struct AlbumEntry {
49 u64 size; 50 u64 size{};
50 u64 application_id; 51 u64 application_id{};
51 AlbumFileDateTime datetime; 52 AlbumFileDateTime datetime{};
52 AlbumStorage storage; 53 AlbumStorage storage{};
53 ContentType content; 54 ContentType content{};
54 u8 padding[6]; 55 INSERT_PADDING_BYTES(6);
55}; 56};
57static_assert(sizeof(AlbumEntry) == 0x20, "AlbumEntry has incorrect size.");
56 58
57struct AlbumFileEntry { 59struct AlbumFileEntry {
58 u64 size; 60 u64 size{}; // Size of the entry
59 u64 hash; 61 u64 hash{}; // AES256 with hardcoded key over AlbumEntry
60 AlbumFileDateTime datetime; 62 AlbumFileDateTime datetime{};
61 AlbumStorage storage; 63 AlbumStorage storage{};
62 ContentType content; 64 ContentType content{};
63 u8 padding[5]; 65 INSERT_PADDING_BYTES(5);
64 u8 unknown; 66 u8 unknown{1}; // Set to 1 on official SW
65}; 67};
68static_assert(sizeof(AlbumFileEntry) == 0x20, "AlbumFileEntry has incorrect size.");
66 69
67struct ApplicationAlbumEntry { 70struct ApplicationAlbumEntry {
68 u64 size; 71 u64 size{}; // Size of the entry
69 u64 hash; 72 u64 hash{}; // AES256 with hardcoded key over AlbumEntry
70 AlbumFileDateTime datetime; 73 AlbumFileDateTime datetime{};
71 AlbumStorage storage; 74 AlbumStorage storage{};
72 ContentType content; 75 ContentType content{};
73 u8 padding[5]; 76 INSERT_PADDING_BYTES(5);
74 u8 unknown; 77 u8 unknown{1}; // Set to 1 on official SW
75}; 78};
79static_assert(sizeof(ApplicationAlbumEntry) == 0x20, "ApplicationAlbumEntry has incorrect size.");
76 80
77struct ApplicationAlbumFileEntry { 81struct ApplicationAlbumFileEntry {
78 ApplicationAlbumEntry entry; 82 ApplicationAlbumEntry entry{};
79 AlbumFileDateTime datetime; 83 AlbumFileDateTime datetime{};
80 u64 unknown; 84 u64 unknown{};
81}; 85};
86static_assert(sizeof(ApplicationAlbumFileEntry) == 0x30,
87 "ApplicationAlbumFileEntry has incorrect size.");
82 88
83/// Registers all Capture services with the specified service manager. 89/// Registers all Capture services with the specified service manager.
84void InstallInterfaces(SM::ServiceManager& sm); 90void InstallInterfaces(SM::ServiceManager& sm);
diff --git a/src/core/hle/service/caps/caps_a.cpp b/src/core/hle/service/caps/caps_a.cpp
index 88a3fdc05..a0a3b2ae3 100644
--- a/src/core/hle/service/caps/caps_a.cpp
+++ b/src/core/hle/service/caps/caps_a.cpp
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_a.h b/src/core/hle/service/caps/caps_a.h
index 8de832491..cb93aad5b 100644
--- a/src/core/hle/service/caps/caps_a.h
+++ b/src/core/hle/service/caps/caps_a.h
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_c.cpp b/src/core/hle/service/caps/caps_c.cpp
index ea6452ffa..ab17a187e 100644
--- a/src/core/hle/service/caps/caps_c.cpp
+++ b/src/core/hle/service/caps/caps_c.cpp
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_c.h b/src/core/hle/service/caps/caps_c.h
index d07cdb441..a9d028689 100644
--- a/src/core/hle/service/caps/caps_c.h
+++ b/src/core/hle/service/caps/caps_c.h
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_sc.cpp b/src/core/hle/service/caps/caps_sc.cpp
index d01a8a58e..822ee96c8 100644
--- a/src/core/hle/service/caps/caps_sc.cpp
+++ b/src/core/hle/service/caps/caps_sc.cpp
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_sc.h b/src/core/hle/service/caps/caps_sc.h
index 9ba372f7a..ac3e929ca 100644
--- a/src/core/hle/service/caps/caps_sc.h
+++ b/src/core/hle/service/caps/caps_sc.h
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_ss.cpp b/src/core/hle/service/caps/caps_ss.cpp
index eaa3a7494..24dc716e7 100644
--- a/src/core/hle/service/caps/caps_ss.cpp
+++ b/src/core/hle/service/caps/caps_ss.cpp
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_ss.h b/src/core/hle/service/caps/caps_ss.h
index e258a6925..450686e4f 100644
--- a/src/core/hle/service/caps/caps_ss.h
+++ b/src/core/hle/service/caps/caps_ss.h
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_su.cpp b/src/core/hle/service/caps/caps_su.cpp
index e8b0698e8..fffb2ecf9 100644
--- a/src/core/hle/service/caps/caps_su.cpp
+++ b/src/core/hle/service/caps/caps_su.cpp
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_su.h b/src/core/hle/service/caps/caps_su.h
index c494d7c84..62c9603a9 100644
--- a/src/core/hle/service/caps/caps_su.h
+++ b/src/core/hle/service/caps/caps_su.h
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_u.cpp b/src/core/hle/service/caps/caps_u.cpp
index 78bab6ed8..f36d8de2d 100644
--- a/src/core/hle/service/caps/caps_u.cpp
+++ b/src/core/hle/service/caps/caps_u.cpp
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
@@ -58,19 +58,25 @@ void CAPS_U::GetAlbumContentsFileListForApplication(Kernel::HLERequestContext& c
58 // u8 ContentType, two s64s, and an u64 AppletResourceUserId. Returns an output u64 for total 58 // u8 ContentType, two s64s, and an u64 AppletResourceUserId. Returns an output u64 for total
59 // output entries (which is copied to a s32 by official SW). 59 // output entries (which is copied to a s32 by official SW).
60 IPC::RequestParser rp{ctx}; 60 IPC::RequestParser rp{ctx};
61 [[maybe_unused]] const auto application_album_file_entries = rp.PopRaw<std::array<u8, 0x30>>(); 61 const auto pid{rp.Pop<s32>()};
62 const auto pid = rp.Pop<s32>(); 62 const auto content_type{rp.PopEnum<ContentType>()};
63 const auto content_type = rp.PopRaw<ContentType>(); 63 const auto start_posix_time{rp.Pop<s64>()};
64 [[maybe_unused]] const auto start_datetime = rp.PopRaw<AlbumFileDateTime>(); 64 const auto end_posix_time{rp.Pop<s64>()};
65 [[maybe_unused]] const auto end_datetime = rp.PopRaw<AlbumFileDateTime>(); 65 const auto applet_resource_user_id{rp.Pop<u64>()};
66 const auto applet_resource_user_id = rp.Pop<u64>(); 66
67 // TODO: Update this when we implement the album.
68 // Currently we do not have a method of accessing album entries, set this to 0 for now.
69 constexpr s32 total_entries{0};
70
67 LOG_WARNING(Service_Capture, 71 LOG_WARNING(Service_Capture,
68 "(STUBBED) called. pid={}, content_type={}, applet_resource_user_id={}", pid, 72 "(STUBBED) called. pid={}, content_type={}, start_posix_time={}, "
69 content_type, applet_resource_user_id); 73 "end_posix_time={}, applet_resource_user_id={}, total_entries={}",
74 pid, content_type, start_posix_time, end_posix_time, applet_resource_user_id,
75 total_entries);
70 76
71 IPC::ResponseBuilder rb{ctx, 3}; 77 IPC::ResponseBuilder rb{ctx, 3};
72 rb.Push(RESULT_SUCCESS); 78 rb.Push(RESULT_SUCCESS);
73 rb.Push<s32>(0); 79 rb.Push(total_entries);
74} 80}
75 81
76} // namespace Service::Capture 82} // namespace Service::Capture
diff --git a/src/core/hle/service/caps/caps_u.h b/src/core/hle/service/caps/caps_u.h
index e6e0716ff..689364de4 100644
--- a/src/core/hle/service/caps/caps_u.h
+++ b/src/core/hle/service/caps/caps_u.h
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/es/es.cpp b/src/core/hle/service/es/es.cpp
index f8e9df4b1..a41c73c48 100644
--- a/src/core/hle/service/es/es.cpp
+++ b/src/core/hle/service/es/es.cpp
@@ -27,8 +27,8 @@ public:
27 {8, &ETicket::GetTitleKey, "GetTitleKey"}, 27 {8, &ETicket::GetTitleKey, "GetTitleKey"},
28 {9, &ETicket::CountCommonTicket, "CountCommonTicket"}, 28 {9, &ETicket::CountCommonTicket, "CountCommonTicket"},
29 {10, &ETicket::CountPersonalizedTicket, "CountPersonalizedTicket"}, 29 {10, &ETicket::CountPersonalizedTicket, "CountPersonalizedTicket"},
30 {11, &ETicket::ListCommonTicket, "ListCommonTicket"}, 30 {11, &ETicket::ListCommonTicketRightsIds, "ListCommonTicketRightsIds"},
31 {12, &ETicket::ListPersonalizedTicket, "ListPersonalizedTicket"}, 31 {12, &ETicket::ListPersonalizedTicketRightsIds, "ListPersonalizedTicketRightsIds"},
32 {13, nullptr, "ListMissingPersonalizedTicket"}, 32 {13, nullptr, "ListMissingPersonalizedTicket"},
33 {14, &ETicket::GetCommonTicketSize, "GetCommonTicketSize"}, 33 {14, &ETicket::GetCommonTicketSize, "GetCommonTicketSize"},
34 {15, &ETicket::GetPersonalizedTicketSize, "GetPersonalizedTicketSize"}, 34 {15, &ETicket::GetPersonalizedTicketSize, "GetPersonalizedTicketSize"},
@@ -55,7 +55,46 @@ public:
55 {36, nullptr, "DeleteAllInactiveELicenseRequiredPersonalizedTicket"}, 55 {36, nullptr, "DeleteAllInactiveELicenseRequiredPersonalizedTicket"},
56 {37, nullptr, "OwnTicket2"}, 56 {37, nullptr, "OwnTicket2"},
57 {38, nullptr, "OwnTicket3"}, 57 {38, nullptr, "OwnTicket3"},
58 {501, nullptr, "Unknown501"},
59 {502, nullptr, "Unknown502"},
58 {503, nullptr, "GetTitleKey"}, 60 {503, nullptr, "GetTitleKey"},
61 {504, nullptr, "Unknown504"},
62 {508, nullptr, "Unknown508"},
63 {509, nullptr, "Unknown509"},
64 {510, nullptr, "Unknown510"},
65 {511, nullptr, "Unknown511"},
66 {1001, nullptr, "Unknown1001"},
67 {1002, nullptr, "Unknown1001"},
68 {1003, nullptr, "Unknown1003"},
69 {1004, nullptr, "Unknown1004"},
70 {1005, nullptr, "Unknown1005"},
71 {1006, nullptr, "Unknown1006"},
72 {1007, nullptr, "Unknown1007"},
73 {1009, nullptr, "Unknown1009"},
74 {1010, nullptr, "Unknown1010"},
75 {1011, nullptr, "Unknown1011"},
76 {1012, nullptr, "Unknown1012"},
77 {1013, nullptr, "Unknown1013"},
78 {1014, nullptr, "Unknown1014"},
79 {1015, nullptr, "Unknown1015"},
80 {1016, nullptr, "Unknown1016"},
81 {1017, nullptr, "Unknown1017"},
82 {1018, nullptr, "Unknown1018"},
83 {1019, nullptr, "Unknown1019"},
84 {1020, nullptr, "Unknown1020"},
85 {1021, nullptr, "Unknown1021"},
86 {1501, nullptr, "Unknown1501"},
87 {1502, nullptr, "Unknown1502"},
88 {1503, nullptr, "Unknown1503"},
89 {1504, nullptr, "Unknown1504"},
90 {1505, nullptr, "Unknown1505"},
91 {2000, nullptr, "Unknown2000"},
92 {2001, nullptr, "Unknown2001"},
93 {2100, nullptr, "Unknown2100"},
94 {2501, nullptr, "Unknown2501"},
95 {2502, nullptr, "Unknown2502"},
96 {3001, nullptr, "Unknown3001"},
97 {3002, nullptr, "Unknown3002"},
59 }; 98 };
60 // clang-format on 99 // clang-format on
61 RegisterHandlers(functions); 100 RegisterHandlers(functions);
@@ -147,7 +186,7 @@ private:
147 rb.Push<u32>(count); 186 rb.Push<u32>(count);
148 } 187 }
149 188
150 void ListCommonTicket(Kernel::HLERequestContext& ctx) { 189 void ListCommonTicketRightsIds(Kernel::HLERequestContext& ctx) {
151 u32 out_entries; 190 u32 out_entries;
152 if (keys.GetCommonTickets().empty()) 191 if (keys.GetCommonTickets().empty())
153 out_entries = 0; 192 out_entries = 0;
@@ -170,7 +209,7 @@ private:
170 rb.Push<u32>(out_entries); 209 rb.Push<u32>(out_entries);
171 } 210 }
172 211
173 void ListPersonalizedTicket(Kernel::HLERequestContext& ctx) { 212 void ListPersonalizedTicketRightsIds(Kernel::HLERequestContext& ctx) {
174 u32 out_entries; 213 u32 out_entries;
175 if (keys.GetPersonalizedTickets().empty()) 214 if (keys.GetPersonalizedTickets().empty())
176 out_entries = 0; 215 out_entries = 0;
@@ -263,7 +302,7 @@ private:
263 rb.Push<u64>(write_size); 302 rb.Push<u64>(write_size);
264 } 303 }
265 304
266 Core::Crypto::KeyManager keys; 305 Core::Crypto::KeyManager& keys = Core::Crypto::KeyManager::Instance();
267}; 306};
268 307
269void InstallInterfaces(SM::ServiceManager& service_manager) { 308void InstallInterfaces(SM::ServiceManager& service_manager) {
diff --git a/src/core/hle/service/eupld/eupld.cpp b/src/core/hle/service/eupld/eupld.cpp
index 2df30acee..0d6d244f4 100644
--- a/src/core/hle/service/eupld/eupld.cpp
+++ b/src/core/hle/service/eupld/eupld.cpp
@@ -19,6 +19,7 @@ public:
19 {1, nullptr, "ImportCrt"}, 19 {1, nullptr, "ImportCrt"},
20 {2, nullptr, "ImportPki"}, 20 {2, nullptr, "ImportPki"},
21 {3, nullptr, "SetAutoUpload"}, 21 {3, nullptr, "SetAutoUpload"},
22 {4, nullptr, "GetAutoUpload"},
22 }; 23 };
23 // clang-format on 24 // clang-format on
24 25
diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp
index 68f259b70..b7adaffc7 100644
--- a/src/core/hle/service/friend/friend.cpp
+++ b/src/core/hle/service/friend/friend.cpp
@@ -25,9 +25,13 @@ public:
25 {10101, &IFriendService::GetFriendList, "GetFriendList"}, 25 {10101, &IFriendService::GetFriendList, "GetFriendList"},
26 {10102, nullptr, "UpdateFriendInfo"}, 26 {10102, nullptr, "UpdateFriendInfo"},
27 {10110, nullptr, "GetFriendProfileImage"}, 27 {10110, nullptr, "GetFriendProfileImage"},
28 {10120, nullptr, "Unknown10120"},
29 {10121, nullptr, "Unknown10121"},
28 {10200, nullptr, "SendFriendRequestForApplication"}, 30 {10200, nullptr, "SendFriendRequestForApplication"},
29 {10211, nullptr, "AddFacedFriendRequestForApplication"}, 31 {10211, nullptr, "AddFacedFriendRequestForApplication"},
30 {10400, &IFriendService::GetBlockedUserListIds, "GetBlockedUserListIds"}, 32 {10400, &IFriendService::GetBlockedUserListIds, "GetBlockedUserListIds"},
33 {10420, nullptr, "Unknown10420"},
34 {10421, nullptr, "Unknown10421"},
31 {10500, nullptr, "GetProfileList"}, 35 {10500, nullptr, "GetProfileList"},
32 {10600, nullptr, "DeclareOpenOnlinePlaySession"}, 36 {10600, nullptr, "DeclareOpenOnlinePlaySession"},
33 {10601, &IFriendService::DeclareCloseOnlinePlaySession, "DeclareCloseOnlinePlaySession"}, 37 {10601, &IFriendService::DeclareCloseOnlinePlaySession, "DeclareCloseOnlinePlaySession"},
@@ -97,6 +101,8 @@ public:
97 {30900, nullptr, "SendFriendInvitation"}, 101 {30900, nullptr, "SendFriendInvitation"},
98 {30910, nullptr, "ReadFriendInvitation"}, 102 {30910, nullptr, "ReadFriendInvitation"},
99 {30911, nullptr, "ReadAllFriendInvitations"}, 103 {30911, nullptr, "ReadAllFriendInvitations"},
104 {40100, nullptr, "Unknown40100"},
105 {40400, nullptr, "Unknown40400"},
100 {49900, nullptr, "DeleteNetworkServiceAccountCache"}, 106 {49900, nullptr, "DeleteNetworkServiceAccountCache"},
101 }; 107 };
102 // clang-format on 108 // clang-format on
diff --git a/src/core/hle/service/grc/grc.cpp b/src/core/hle/service/grc/grc.cpp
index 24910ac6c..401e0b208 100644
--- a/src/core/hle/service/grc/grc.cpp
+++ b/src/core/hle/service/grc/grc.cpp
@@ -17,6 +17,9 @@ public:
17 static const FunctionInfo functions[] = { 17 static const FunctionInfo functions[] = {
18 {1, nullptr, "OpenContinuousRecorder"}, 18 {1, nullptr, "OpenContinuousRecorder"},
19 {2, nullptr, "OpenGameMovieTrimmer"}, 19 {2, nullptr, "OpenGameMovieTrimmer"},
20 {3, nullptr, "OpenOffscreenRecorder"},
21 {101, nullptr, "CreateMovieMaker"},
22 {9903, nullptr, "SetOffscreenRecordingMarker"}
20 }; 23 };
21 // clang-format on 24 // clang-format on
22 25
diff --git a/src/core/hle/service/hid/controllers/debug_pad.cpp b/src/core/hle/service/hid/controllers/debug_pad.cpp
index 1f2131ec8..cb35919e9 100644
--- a/src/core/hle/service/hid/controllers/debug_pad.cpp
+++ b/src/core/hle/service/hid/controllers/debug_pad.cpp
@@ -23,7 +23,7 @@ void Controller_DebugPad::OnRelease() {}
23 23
24void Controller_DebugPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, 24void Controller_DebugPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
25 std::size_t size) { 25 std::size_t size) {
26 shared_memory.header.timestamp = core_timing.GetTicks(); 26 shared_memory.header.timestamp = core_timing.GetCPUTicks();
27 shared_memory.header.total_entry_count = 17; 27 shared_memory.header.total_entry_count = 17;
28 28
29 if (!IsControllerActivated()) { 29 if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/controllers/gesture.cpp b/src/core/hle/service/hid/controllers/gesture.cpp
index 6e990dd00..b7b7bfeae 100644
--- a/src/core/hle/service/hid/controllers/gesture.cpp
+++ b/src/core/hle/service/hid/controllers/gesture.cpp
@@ -19,7 +19,7 @@ void Controller_Gesture::OnRelease() {}
19 19
20void Controller_Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, 20void Controller_Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
21 std::size_t size) { 21 std::size_t size) {
22 shared_memory.header.timestamp = core_timing.GetTicks(); 22 shared_memory.header.timestamp = core_timing.GetCPUTicks();
23 shared_memory.header.total_entry_count = 17; 23 shared_memory.header.total_entry_count = 17;
24 24
25 if (!IsControllerActivated()) { 25 if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/controllers/keyboard.cpp b/src/core/hle/service/hid/controllers/keyboard.cpp
index 9a8d354ba..feae89525 100644
--- a/src/core/hle/service/hid/controllers/keyboard.cpp
+++ b/src/core/hle/service/hid/controllers/keyboard.cpp
@@ -21,7 +21,7 @@ void Controller_Keyboard::OnRelease() {}
21 21
22void Controller_Keyboard::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, 22void Controller_Keyboard::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
23 std::size_t size) { 23 std::size_t size) {
24 shared_memory.header.timestamp = core_timing.GetTicks(); 24 shared_memory.header.timestamp = core_timing.GetCPUTicks();
25 shared_memory.header.total_entry_count = 17; 25 shared_memory.header.total_entry_count = 17;
26 26
27 if (!IsControllerActivated()) { 27 if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/controllers/mouse.cpp b/src/core/hle/service/hid/controllers/mouse.cpp
index 93d88ea50..ac40989c5 100644
--- a/src/core/hle/service/hid/controllers/mouse.cpp
+++ b/src/core/hle/service/hid/controllers/mouse.cpp
@@ -19,7 +19,7 @@ void Controller_Mouse::OnRelease() {}
19 19
20void Controller_Mouse::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, 20void Controller_Mouse::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
21 std::size_t size) { 21 std::size_t size) {
22 shared_memory.header.timestamp = core_timing.GetTicks(); 22 shared_memory.header.timestamp = core_timing.GetCPUTicks();
23 shared_memory.header.total_entry_count = 17; 23 shared_memory.header.total_entry_count = 17;
24 24
25 if (!IsControllerActivated()) { 25 if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index c55d900e2..ef67ad690 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -328,7 +328,7 @@ void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8*
328 const auto& last_entry = 328 const auto& last_entry =
329 main_controller->npad[main_controller->common.last_entry_index]; 329 main_controller->npad[main_controller->common.last_entry_index];
330 330
331 main_controller->common.timestamp = core_timing.GetTicks(); 331 main_controller->common.timestamp = core_timing.GetCPUTicks();
332 main_controller->common.last_entry_index = 332 main_controller->common.last_entry_index =
333 (main_controller->common.last_entry_index + 1) % 17; 333 (main_controller->common.last_entry_index + 1) % 17;
334 334
@@ -566,6 +566,14 @@ void Controller_NPad::DisconnectNPad(u32 npad_id) {
566 connected_controllers[NPadIdToIndex(npad_id)].is_connected = false; 566 connected_controllers[NPadIdToIndex(npad_id)].is_connected = false;
567} 567}
568 568
569void Controller_NPad::SetGyroscopeZeroDriftMode(GyroscopeZeroDriftMode drift_mode) {
570 gyroscope_zero_drift_mode = drift_mode;
571}
572
573Controller_NPad::GyroscopeZeroDriftMode Controller_NPad::GetGyroscopeZeroDriftMode() const {
574 return gyroscope_zero_drift_mode;
575}
576
569void Controller_NPad::StartLRAssignmentMode() { 577void Controller_NPad::StartLRAssignmentMode() {
570 // Nothing internally is used for lr assignment mode. Since we have the ability to set the 578 // Nothing internally is used for lr assignment mode. Since we have the ability to set the
571 // controller types from boot, it doesn't really matter about showing a selection screen 579 // controller types from boot, it doesn't really matter about showing a selection screen
diff --git a/src/core/hle/service/hid/controllers/npad.h b/src/core/hle/service/hid/controllers/npad.h
index 931f03430..5d4c58a43 100644
--- a/src/core/hle/service/hid/controllers/npad.h
+++ b/src/core/hle/service/hid/controllers/npad.h
@@ -58,6 +58,12 @@ public:
58 }; 58 };
59 static_assert(sizeof(Vibration) == 0x10, "Vibration is an invalid size"); 59 static_assert(sizeof(Vibration) == 0x10, "Vibration is an invalid size");
60 60
61 enum class GyroscopeZeroDriftMode : u32 {
62 Loose = 0,
63 Standard = 1,
64 Tight = 2,
65 };
66
61 enum class NpadHoldType : u64 { 67 enum class NpadHoldType : u64 {
62 Vertical = 0, 68 Vertical = 0,
63 Horizontal = 1, 69 Horizontal = 1,
@@ -117,6 +123,8 @@ public:
117 123
118 void ConnectNPad(u32 npad_id); 124 void ConnectNPad(u32 npad_id);
119 void DisconnectNPad(u32 npad_id); 125 void DisconnectNPad(u32 npad_id);
126 void SetGyroscopeZeroDriftMode(GyroscopeZeroDriftMode drift_mode);
127 GyroscopeZeroDriftMode GetGyroscopeZeroDriftMode() const;
120 LedPattern GetLedPattern(u32 npad_id); 128 LedPattern GetLedPattern(u32 npad_id);
121 void SetVibrationEnabled(bool can_vibrate); 129 void SetVibrationEnabled(bool can_vibrate);
122 bool IsVibrationEnabled() const; 130 bool IsVibrationEnabled() const;
@@ -324,8 +332,8 @@ private:
324 std::array<Kernel::EventPair, 10> styleset_changed_events; 332 std::array<Kernel::EventPair, 10> styleset_changed_events;
325 Vibration last_processed_vibration{}; 333 Vibration last_processed_vibration{};
326 std::array<ControllerHolder, 10> connected_controllers{}; 334 std::array<ControllerHolder, 10> connected_controllers{};
335 GyroscopeZeroDriftMode gyroscope_zero_drift_mode{GyroscopeZeroDriftMode::Standard};
327 bool can_controllers_vibrate{true}; 336 bool can_controllers_vibrate{true};
328
329 std::array<ControllerPad, 10> npad_pad_states{}; 337 std::array<ControllerPad, 10> npad_pad_states{};
330 bool is_in_lr_assignment_mode{false}; 338 bool is_in_lr_assignment_mode{false};
331 Core::System& system; 339 Core::System& system;
diff --git a/src/core/hle/service/hid/controllers/stubbed.cpp b/src/core/hle/service/hid/controllers/stubbed.cpp
index 9e527d176..e7483bfa2 100644
--- a/src/core/hle/service/hid/controllers/stubbed.cpp
+++ b/src/core/hle/service/hid/controllers/stubbed.cpp
@@ -23,7 +23,7 @@ void Controller_Stubbed::OnUpdate(const Core::Timing::CoreTiming& core_timing, u
23 } 23 }
24 24
25 CommonHeader header{}; 25 CommonHeader header{};
26 header.timestamp = core_timing.GetTicks(); 26 header.timestamp = core_timing.GetCPUTicks();
27 header.total_entry_count = 17; 27 header.total_entry_count = 17;
28 header.entry_count = 0; 28 header.entry_count = 0;
29 header.last_entry_index = 0; 29 header.last_entry_index = 0;
diff --git a/src/core/hle/service/hid/controllers/touchscreen.cpp b/src/core/hle/service/hid/controllers/touchscreen.cpp
index 1c6e55566..e326f8f5c 100644
--- a/src/core/hle/service/hid/controllers/touchscreen.cpp
+++ b/src/core/hle/service/hid/controllers/touchscreen.cpp
@@ -22,7 +22,7 @@ void Controller_Touchscreen::OnRelease() {}
22 22
23void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, 23void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
24 std::size_t size) { 24 std::size_t size) {
25 shared_memory.header.timestamp = core_timing.GetTicks(); 25 shared_memory.header.timestamp = core_timing.GetCPUTicks();
26 shared_memory.header.total_entry_count = 17; 26 shared_memory.header.total_entry_count = 17;
27 27
28 if (!IsControllerActivated()) { 28 if (!IsControllerActivated()) {
@@ -49,7 +49,7 @@ void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timin
49 touch_entry.diameter_x = Settings::values.touchscreen.diameter_x; 49 touch_entry.diameter_x = Settings::values.touchscreen.diameter_x;
50 touch_entry.diameter_y = Settings::values.touchscreen.diameter_y; 50 touch_entry.diameter_y = Settings::values.touchscreen.diameter_y;
51 touch_entry.rotation_angle = Settings::values.touchscreen.rotation_angle; 51 touch_entry.rotation_angle = Settings::values.touchscreen.rotation_angle;
52 const u64 tick = core_timing.GetTicks(); 52 const u64 tick = core_timing.GetCPUTicks();
53 touch_entry.delta_time = tick - last_touch; 53 touch_entry.delta_time = tick - last_touch;
54 last_touch = tick; 54 last_touch = tick;
55 touch_entry.finger = Settings::values.touchscreen.finger; 55 touch_entry.finger = Settings::values.touchscreen.finger;
diff --git a/src/core/hle/service/hid/controllers/xpad.cpp b/src/core/hle/service/hid/controllers/xpad.cpp
index 27511b27b..2503ef241 100644
--- a/src/core/hle/service/hid/controllers/xpad.cpp
+++ b/src/core/hle/service/hid/controllers/xpad.cpp
@@ -20,7 +20,7 @@ void Controller_XPad::OnRelease() {}
20void Controller_XPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, 20void Controller_XPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
21 std::size_t size) { 21 std::size_t size) {
22 for (auto& xpad_entry : shared_memory.shared_memory_entries) { 22 for (auto& xpad_entry : shared_memory.shared_memory_entries) {
23 xpad_entry.header.timestamp = core_timing.GetTicks(); 23 xpad_entry.header.timestamp = core_timing.GetCPUTicks();
24 xpad_entry.header.total_entry_count = 17; 24 xpad_entry.header.total_entry_count = 17;
25 25
26 if (!IsControllerActivated()) { 26 if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index c84cb1483..e9020e0dc 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -39,11 +39,9 @@ namespace Service::HID {
39 39
40// Updating period for each HID device. 40// Updating period for each HID device.
41// TODO(ogniK): Find actual polling rate of hid 41// TODO(ogniK): Find actual polling rate of hid
42constexpr s64 pad_update_ticks = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 66); 42constexpr s64 pad_update_ticks = static_cast<s64>(1000000000 / 66);
43[[maybe_unused]] constexpr s64 accelerometer_update_ticks = 43[[maybe_unused]] constexpr s64 accelerometer_update_ticks = static_cast<s64>(1000000000 / 100);
44 static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 100); 44[[maybe_unused]] constexpr s64 gyroscope_update_ticks = static_cast<s64>(1000000000 / 100);
45[[maybe_unused]] constexpr s64 gyroscope_update_ticks =
46 static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 100);
47constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000; 45constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000;
48 46
49IAppletResource::IAppletResource(Core::System& system) 47IAppletResource::IAppletResource(Core::System& system)
@@ -78,8 +76,8 @@ IAppletResource::IAppletResource(Core::System& system)
78 76
79 // Register update callbacks 77 // Register update callbacks
80 pad_update_event = 78 pad_update_event =
81 Core::Timing::CreateEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 cycles_late) { 79 Core::Timing::CreateEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 ns_late) {
82 UpdateControllers(userdata, cycles_late); 80 UpdateControllers(userdata, ns_late);
83 }); 81 });
84 82
85 // TODO(shinyquagsire23): Other update callbacks? (accel, gyro?) 83 // TODO(shinyquagsire23): Other update callbacks? (accel, gyro?)
@@ -109,7 +107,7 @@ void IAppletResource::GetSharedMemoryHandle(Kernel::HLERequestContext& ctx) {
109 rb.PushCopyObjects(shared_mem); 107 rb.PushCopyObjects(shared_mem);
110} 108}
111 109
112void IAppletResource::UpdateControllers(u64 userdata, s64 cycles_late) { 110void IAppletResource::UpdateControllers(u64 userdata, s64 ns_late) {
113 auto& core_timing = system.CoreTiming(); 111 auto& core_timing = system.CoreTiming();
114 112
115 const bool should_reload = Settings::values.is_device_reload_pending.exchange(false); 113 const bool should_reload = Settings::values.is_device_reload_pending.exchange(false);
@@ -120,7 +118,7 @@ void IAppletResource::UpdateControllers(u64 userdata, s64 cycles_late) {
120 controller->OnUpdate(core_timing, shared_mem->GetPointer(), SHARED_MEMORY_SIZE); 118 controller->OnUpdate(core_timing, shared_mem->GetPointer(), SHARED_MEMORY_SIZE);
121 } 119 }
122 120
123 core_timing.ScheduleEvent(pad_update_ticks - cycles_late, pad_update_event); 121 core_timing.ScheduleEvent(pad_update_ticks - ns_late, pad_update_event);
124} 122}
125 123
126class IActiveVibrationDeviceList final : public ServiceFramework<IActiveVibrationDeviceList> { 124class IActiveVibrationDeviceList final : public ServiceFramework<IActiveVibrationDeviceList> {
@@ -161,7 +159,7 @@ Hid::Hid(Core::System& system) : ServiceFramework("hid"), system(system) {
161 {40, nullptr, "AcquireXpadIdEventHandle"}, 159 {40, nullptr, "AcquireXpadIdEventHandle"},
162 {41, nullptr, "ReleaseXpadIdEventHandle"}, 160 {41, nullptr, "ReleaseXpadIdEventHandle"},
163 {51, &Hid::ActivateXpad, "ActivateXpad"}, 161 {51, &Hid::ActivateXpad, "ActivateXpad"},
164 {55, nullptr, "GetXpadIds"}, 162 {55, &Hid::GetXpadIDs, "GetXpadIds"},
165 {56, nullptr, "ActivateJoyXpad"}, 163 {56, nullptr, "ActivateJoyXpad"},
166 {58, nullptr, "GetJoyXpadLifoHandle"}, 164 {58, nullptr, "GetJoyXpadLifoHandle"},
167 {59, nullptr, "GetJoyXpadIds"}, 165 {59, nullptr, "GetJoyXpadIds"},
@@ -185,8 +183,8 @@ Hid::Hid(Core::System& system) : ServiceFramework("hid"), system(system) {
185 {77, nullptr, "GetAccelerometerPlayMode"}, 183 {77, nullptr, "GetAccelerometerPlayMode"},
186 {78, nullptr, "ResetAccelerometerPlayMode"}, 184 {78, nullptr, "ResetAccelerometerPlayMode"},
187 {79, &Hid::SetGyroscopeZeroDriftMode, "SetGyroscopeZeroDriftMode"}, 185 {79, &Hid::SetGyroscopeZeroDriftMode, "SetGyroscopeZeroDriftMode"},
188 {80, nullptr, "GetGyroscopeZeroDriftMode"}, 186 {80, &Hid::GetGyroscopeZeroDriftMode, "GetGyroscopeZeroDriftMode"},
189 {81, nullptr, "ResetGyroscopeZeroDriftMode"}, 187 {81, &Hid::ResetGyroscopeZeroDriftMode, "ResetGyroscopeZeroDriftMode"},
190 {82, &Hid::IsSixAxisSensorAtRest, "IsSixAxisSensorAtRest"}, 188 {82, &Hid::IsSixAxisSensorAtRest, "IsSixAxisSensorAtRest"},
191 {83, nullptr, "IsFirmwareUpdateAvailableForSixAxisSensor"}, 189 {83, nullptr, "IsFirmwareUpdateAvailableForSixAxisSensor"},
192 {91, &Hid::ActivateGesture, "ActivateGesture"}, 190 {91, &Hid::ActivateGesture, "ActivateGesture"},
@@ -230,15 +228,15 @@ Hid::Hid(Core::System& system) : ServiceFramework("hid"), system(system) {
230 {211, nullptr, "IsVibrationDeviceMounted"}, 228 {211, nullptr, "IsVibrationDeviceMounted"},
231 {300, &Hid::ActivateConsoleSixAxisSensor, "ActivateConsoleSixAxisSensor"}, 229 {300, &Hid::ActivateConsoleSixAxisSensor, "ActivateConsoleSixAxisSensor"},
232 {301, &Hid::StartConsoleSixAxisSensor, "StartConsoleSixAxisSensor"}, 230 {301, &Hid::StartConsoleSixAxisSensor, "StartConsoleSixAxisSensor"},
233 {302, nullptr, "StopConsoleSixAxisSensor"}, 231 {302, &Hid::StopConsoleSixAxisSensor, "StopConsoleSixAxisSensor"},
234 {303, nullptr, "ActivateSevenSixAxisSensor"}, 232 {303, &Hid::ActivateSevenSixAxisSensor, "ActivateSevenSixAxisSensor"},
235 {304, nullptr, "StartSevenSixAxisSensor"}, 233 {304, &Hid::StartSevenSixAxisSensor, "StartSevenSixAxisSensor"},
236 {305, &Hid::StopSevenSixAxisSensor, "StopSevenSixAxisSensor"}, 234 {305, &Hid::StopSevenSixAxisSensor, "StopSevenSixAxisSensor"},
237 {306, &Hid::InitializeSevenSixAxisSensor, "InitializeSevenSixAxisSensor"}, 235 {306, &Hid::InitializeSevenSixAxisSensor, "InitializeSevenSixAxisSensor"},
238 {307, nullptr, "FinalizeSevenSixAxisSensor"}, 236 {307, &Hid::FinalizeSevenSixAxisSensor, "FinalizeSevenSixAxisSensor"},
239 {308, nullptr, "SetSevenSixAxisSensorFusionStrength"}, 237 {308, nullptr, "SetSevenSixAxisSensorFusionStrength"},
240 {309, nullptr, "GetSevenSixAxisSensorFusionStrength"}, 238 {309, nullptr, "GetSevenSixAxisSensorFusionStrength"},
241 {310, nullptr, "ResetSevenSixAxisSensorTimestamp"}, 239 {310, &Hid::ResetSevenSixAxisSensorTimestamp, "ResetSevenSixAxisSensorTimestamp"},
242 {400, nullptr, "IsUsbFullKeyControllerEnabled"}, 240 {400, nullptr, "IsUsbFullKeyControllerEnabled"},
243 {401, nullptr, "EnableUsbFullKeyController"}, 241 {401, nullptr, "EnableUsbFullKeyController"},
244 {402, nullptr, "IsUsbFullKeyControllerConnected"}, 242 {402, nullptr, "IsUsbFullKeyControllerConnected"},
@@ -319,6 +317,17 @@ void Hid::ActivateXpad(Kernel::HLERequestContext& ctx) {
319 rb.Push(RESULT_SUCCESS); 317 rb.Push(RESULT_SUCCESS);
320} 318}
321 319
320void Hid::GetXpadIDs(Kernel::HLERequestContext& ctx) {
321 IPC::RequestParser rp{ctx};
322 const auto applet_resource_user_id{rp.Pop<u64>()};
323
324 LOG_DEBUG(Service_HID, "(STUBBED) called, applet_resource_user_id={}", applet_resource_user_id);
325
326 IPC::ResponseBuilder rb{ctx, 3};
327 rb.Push(RESULT_SUCCESS);
328 rb.Push(0);
329}
330
322void Hid::ActivateDebugPad(Kernel::HLERequestContext& ctx) { 331void Hid::ActivateDebugPad(Kernel::HLERequestContext& ctx) {
323 IPC::RequestParser rp{ctx}; 332 IPC::RequestParser rp{ctx};
324 const auto applet_resource_user_id{rp.Pop<u64>()}; 333 const auto applet_resource_user_id{rp.Pop<u64>()};
@@ -363,6 +372,15 @@ void Hid::ActivateKeyboard(Kernel::HLERequestContext& ctx) {
363 rb.Push(RESULT_SUCCESS); 372 rb.Push(RESULT_SUCCESS);
364} 373}
365 374
375void Hid::SendKeyboardLockKeyEvent(Kernel::HLERequestContext& ctx) {
376 IPC::RequestParser rp{ctx};
377 const auto flags{rp.Pop<u32>()};
378 LOG_WARNING(Service_HID, "(STUBBED) called. flags={}", flags);
379
380 IPC::ResponseBuilder rb{ctx, 2};
381 rb.Push(RESULT_SUCCESS);
382}
383
366void Hid::ActivateGesture(Kernel::HLERequestContext& ctx) { 384void Hid::ActivateGesture(Kernel::HLERequestContext& ctx) {
367 IPC::RequestParser rp{ctx}; 385 IPC::RequestParser rp{ctx};
368 const auto unknown{rp.Pop<u32>()}; 386 const auto unknown{rp.Pop<u32>()};
@@ -402,15 +420,59 @@ void Hid::StartSixAxisSensor(Kernel::HLERequestContext& ctx) {
402 rb.Push(RESULT_SUCCESS); 420 rb.Push(RESULT_SUCCESS);
403} 421}
404 422
423void Hid::StopSixAxisSensor(Kernel::HLERequestContext& ctx) {
424 IPC::RequestParser rp{ctx};
425 const auto handle{rp.Pop<u32>()};
426 const auto applet_resource_user_id{rp.Pop<u64>()};
427
428 LOG_WARNING(Service_HID, "(STUBBED) called, handle={}, applet_resource_user_id={}", handle,
429 applet_resource_user_id);
430
431 IPC::ResponseBuilder rb{ctx, 2};
432 rb.Push(RESULT_SUCCESS);
433}
434
405void Hid::SetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx) { 435void Hid::SetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx) {
406 IPC::RequestParser rp{ctx}; 436 IPC::RequestParser rp{ctx};
407 const auto handle{rp.Pop<u32>()}; 437 const auto handle{rp.Pop<u32>()};
408 const auto drift_mode{rp.Pop<u32>()}; 438 const auto drift_mode{rp.Pop<u32>()};
409 const auto applet_resource_user_id{rp.Pop<u64>()}; 439 const auto applet_resource_user_id{rp.Pop<u64>()};
410 440
411 LOG_WARNING(Service_HID, 441 applet_resource->GetController<Controller_NPad>(HidController::NPad)
412 "(STUBBED) called, handle={}, drift_mode={}, applet_resource_user_id={}", handle, 442 .SetGyroscopeZeroDriftMode(Controller_NPad::GyroscopeZeroDriftMode{drift_mode});
413 drift_mode, applet_resource_user_id); 443
444 LOG_DEBUG(Service_HID, "called, handle={}, drift_mode={}, applet_resource_user_id={}", handle,
445 drift_mode, applet_resource_user_id);
446
447 IPC::ResponseBuilder rb{ctx, 2};
448 rb.Push(RESULT_SUCCESS);
449}
450
451void Hid::GetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx) {
452 IPC::RequestParser rp{ctx};
453 const auto handle{rp.Pop<u32>()};
454 const auto applet_resource_user_id{rp.Pop<u64>()};
455
456 LOG_DEBUG(Service_HID, "called, handle={}, applet_resource_user_id={}", handle,
457 applet_resource_user_id);
458
459 IPC::ResponseBuilder rb{ctx, 3};
460 rb.Push(RESULT_SUCCESS);
461 rb.Push<u32>(
462 static_cast<u32>(applet_resource->GetController<Controller_NPad>(HidController::NPad)
463 .GetGyroscopeZeroDriftMode()));
464}
465
466void Hid::ResetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx) {
467 IPC::RequestParser rp{ctx};
468 const auto handle{rp.Pop<u32>()};
469 const auto applet_resource_user_id{rp.Pop<u64>()};
470
471 applet_resource->GetController<Controller_NPad>(HidController::NPad)
472 .SetGyroscopeZeroDriftMode(Controller_NPad::GyroscopeZeroDriftMode::Standard);
473
474 LOG_DEBUG(Service_HID, "called, handle={}, applet_resource_user_id={}", handle,
475 applet_resource_user_id);
414 476
415 IPC::ResponseBuilder rb{ctx, 2}; 477 IPC::ResponseBuilder rb{ctx, 2};
416 rb.Push(RESULT_SUCCESS); 478 rb.Push(RESULT_SUCCESS);
@@ -821,33 +883,35 @@ void Hid::StartConsoleSixAxisSensor(Kernel::HLERequestContext& ctx) {
821 rb.Push(RESULT_SUCCESS); 883 rb.Push(RESULT_SUCCESS);
822} 884}
823 885
824void Hid::StopSixAxisSensor(Kernel::HLERequestContext& ctx) { 886void Hid::StopConsoleSixAxisSensor(Kernel::HLERequestContext& ctx) {
825 IPC::RequestParser rp{ctx}; 887 IPC::RequestParser rp{ctx};
826 const auto handle{rp.Pop<u32>()}; 888 const auto handle{rp.Pop<u32>()};
889 const auto applet_resource_user_id{rp.Pop<u64>()};
827 890
828 LOG_WARNING(Service_HID, "(STUBBED) called, handle={}", handle); 891 LOG_WARNING(Service_HID, "(STUBBED) called, handle={}, applet_resource_user_id={}", handle,
892 applet_resource_user_id);
829 893
830 IPC::ResponseBuilder rb{ctx, 2}; 894 IPC::ResponseBuilder rb{ctx, 2};
831 rb.Push(RESULT_SUCCESS); 895 rb.Push(RESULT_SUCCESS);
832} 896}
833 897
834void Hid::SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx) { 898void Hid::ActivateSevenSixAxisSensor(Kernel::HLERequestContext& ctx) {
835 IPC::RequestParser rp{ctx}; 899 IPC::RequestParser rp{ctx};
836 const auto applet_resource_user_id{rp.Pop<u64>()}; 900 const auto applet_resource_user_id{rp.Pop<u64>()};
837 const auto unknown{rp.Pop<u32>()};
838 901
839 LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}, unknown={}", 902 LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
840 applet_resource_user_id, unknown); 903 applet_resource_user_id);
841 904
842 IPC::ResponseBuilder rb{ctx, 2}; 905 IPC::ResponseBuilder rb{ctx, 2};
843 rb.Push(RESULT_SUCCESS); 906 rb.Push(RESULT_SUCCESS);
844} 907}
845 908
846void Hid::SetPalmaBoostMode(Kernel::HLERequestContext& ctx) { 909void Hid::StartSevenSixAxisSensor(Kernel::HLERequestContext& ctx) {
847 IPC::RequestParser rp{ctx}; 910 IPC::RequestParser rp{ctx};
848 const auto unknown{rp.Pop<u32>()}; 911 const auto applet_resource_user_id{rp.Pop<u64>()};
849 912
850 LOG_WARNING(Service_HID, "(STUBBED) called, unknown={}", unknown); 913 LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
914 applet_resource_user_id);
851 915
852 IPC::ResponseBuilder rb{ctx, 2}; 916 IPC::ResponseBuilder rb{ctx, 2};
853 rb.Push(RESULT_SUCCESS); 917 rb.Push(RESULT_SUCCESS);
@@ -871,10 +935,46 @@ void Hid::InitializeSevenSixAxisSensor(Kernel::HLERequestContext& ctx) {
871 rb.Push(RESULT_SUCCESS); 935 rb.Push(RESULT_SUCCESS);
872} 936}
873 937
874void Hid::SendKeyboardLockKeyEvent(Kernel::HLERequestContext& ctx) { 938void Hid::FinalizeSevenSixAxisSensor(Kernel::HLERequestContext& ctx) {
875 IPC::RequestParser rp{ctx}; 939 IPC::RequestParser rp{ctx};
876 const auto flags{rp.Pop<u32>()}; 940 const auto applet_resource_user_id{rp.Pop<u64>()};
877 LOG_WARNING(Service_HID, "(STUBBED) called. flags={}", flags); 941
942 LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
943 applet_resource_user_id);
944
945 IPC::ResponseBuilder rb{ctx, 2};
946 rb.Push(RESULT_SUCCESS);
947}
948
949void Hid::ResetSevenSixAxisSensorTimestamp(Kernel::HLERequestContext& ctx) {
950 IPC::RequestParser rp{ctx};
951 const auto applet_resource_user_id{rp.Pop<u64>()};
952
953 LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
954 applet_resource_user_id);
955
956 IPC::ResponseBuilder rb{ctx, 2};
957 rb.Push(RESULT_SUCCESS);
958}
959
960void Hid::SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx) {
961 IPC::RequestParser rp{ctx};
962 const auto applet_resource_user_id{rp.Pop<u64>()};
963 const auto is_palma_all_connectable{rp.Pop<bool>()};
964
965 LOG_WARNING(Service_HID,
966 "(STUBBED) called, applet_resource_user_id={}, is_palma_all_connectable={}",
967 applet_resource_user_id, is_palma_all_connectable);
968
969 IPC::ResponseBuilder rb{ctx, 2};
970 rb.Push(RESULT_SUCCESS);
971}
972
973void Hid::SetPalmaBoostMode(Kernel::HLERequestContext& ctx) {
974 IPC::RequestParser rp{ctx};
975 const auto palma_boost_mode{rp.Pop<bool>()};
976
977 LOG_WARNING(Service_HID, "(STUBBED) called, palma_boost_mode={}", palma_boost_mode);
878 978
879 IPC::ResponseBuilder rb{ctx, 2}; 979 IPC::ResponseBuilder rb{ctx, 2};
880 rb.Push(RESULT_SUCCESS); 980 rb.Push(RESULT_SUCCESS);
diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h
index c8ed4ad8b..6fb048360 100644
--- a/src/core/hle/service/hid/hid.h
+++ b/src/core/hle/service/hid/hid.h
@@ -86,14 +86,19 @@ public:
86private: 86private:
87 void CreateAppletResource(Kernel::HLERequestContext& ctx); 87 void CreateAppletResource(Kernel::HLERequestContext& ctx);
88 void ActivateXpad(Kernel::HLERequestContext& ctx); 88 void ActivateXpad(Kernel::HLERequestContext& ctx);
89 void GetXpadIDs(Kernel::HLERequestContext& ctx);
89 void ActivateDebugPad(Kernel::HLERequestContext& ctx); 90 void ActivateDebugPad(Kernel::HLERequestContext& ctx);
90 void ActivateTouchScreen(Kernel::HLERequestContext& ctx); 91 void ActivateTouchScreen(Kernel::HLERequestContext& ctx);
91 void ActivateMouse(Kernel::HLERequestContext& ctx); 92 void ActivateMouse(Kernel::HLERequestContext& ctx);
92 void ActivateKeyboard(Kernel::HLERequestContext& ctx); 93 void ActivateKeyboard(Kernel::HLERequestContext& ctx);
94 void SendKeyboardLockKeyEvent(Kernel::HLERequestContext& ctx);
93 void ActivateGesture(Kernel::HLERequestContext& ctx); 95 void ActivateGesture(Kernel::HLERequestContext& ctx);
94 void ActivateNpadWithRevision(Kernel::HLERequestContext& ctx); 96 void ActivateNpadWithRevision(Kernel::HLERequestContext& ctx);
95 void StartSixAxisSensor(Kernel::HLERequestContext& ctx); 97 void StartSixAxisSensor(Kernel::HLERequestContext& ctx);
98 void StopSixAxisSensor(Kernel::HLERequestContext& ctx);
96 void SetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx); 99 void SetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx);
100 void GetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx);
101 void ResetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx);
97 void IsSixAxisSensorAtRest(Kernel::HLERequestContext& ctx); 102 void IsSixAxisSensorAtRest(Kernel::HLERequestContext& ctx);
98 void SetSupportedNpadStyleSet(Kernel::HLERequestContext& ctx); 103 void SetSupportedNpadStyleSet(Kernel::HLERequestContext& ctx);
99 void GetSupportedNpadStyleSet(Kernel::HLERequestContext& ctx); 104 void GetSupportedNpadStyleSet(Kernel::HLERequestContext& ctx);
@@ -125,12 +130,15 @@ private:
125 void IsVibrationPermitted(Kernel::HLERequestContext& ctx); 130 void IsVibrationPermitted(Kernel::HLERequestContext& ctx);
126 void ActivateConsoleSixAxisSensor(Kernel::HLERequestContext& ctx); 131 void ActivateConsoleSixAxisSensor(Kernel::HLERequestContext& ctx);
127 void StartConsoleSixAxisSensor(Kernel::HLERequestContext& ctx); 132 void StartConsoleSixAxisSensor(Kernel::HLERequestContext& ctx);
128 void StopSixAxisSensor(Kernel::HLERequestContext& ctx); 133 void StopConsoleSixAxisSensor(Kernel::HLERequestContext& ctx);
129 void SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx); 134 void ActivateSevenSixAxisSensor(Kernel::HLERequestContext& ctx);
130 void SetPalmaBoostMode(Kernel::HLERequestContext& ctx); 135 void StartSevenSixAxisSensor(Kernel::HLERequestContext& ctx);
131 void StopSevenSixAxisSensor(Kernel::HLERequestContext& ctx); 136 void StopSevenSixAxisSensor(Kernel::HLERequestContext& ctx);
132 void InitializeSevenSixAxisSensor(Kernel::HLERequestContext& ctx); 137 void InitializeSevenSixAxisSensor(Kernel::HLERequestContext& ctx);
133 void SendKeyboardLockKeyEvent(Kernel::HLERequestContext& ctx); 138 void FinalizeSevenSixAxisSensor(Kernel::HLERequestContext& ctx);
139 void ResetSevenSixAxisSensorTimestamp(Kernel::HLERequestContext& ctx);
140 void SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx);
141 void SetPalmaBoostMode(Kernel::HLERequestContext& ctx);
134 142
135 std::shared_ptr<IAppletResource> applet_resource; 143 std::shared_ptr<IAppletResource> applet_resource;
136 Core::System& system; 144 Core::System& system;
diff --git a/src/core/hle/service/hid/irs.cpp b/src/core/hle/service/hid/irs.cpp
index 36ed6f7da..e82fd031b 100644
--- a/src/core/hle/service/hid/irs.cpp
+++ b/src/core/hle/service/hid/irs.cpp
@@ -98,7 +98,7 @@ void IRS::GetImageTransferProcessorState(Kernel::HLERequestContext& ctx) {
98 98
99 IPC::ResponseBuilder rb{ctx, 5}; 99 IPC::ResponseBuilder rb{ctx, 5};
100 rb.Push(RESULT_SUCCESS); 100 rb.Push(RESULT_SUCCESS);
101 rb.PushRaw<u64>(system.CoreTiming().GetTicks()); 101 rb.PushRaw<u64>(system.CoreTiming().GetCPUTicks());
102 rb.PushRaw<u32>(0); 102 rb.PushRaw<u32>(0);
103} 103}
104 104
diff --git a/src/core/hle/service/lbl/lbl.cpp b/src/core/hle/service/lbl/lbl.cpp
index e8f9f2d29..17350b403 100644
--- a/src/core/hle/service/lbl/lbl.cpp
+++ b/src/core/hle/service/lbl/lbl.cpp
@@ -47,6 +47,7 @@ public:
47 {26, &LBL::EnableVrMode, "EnableVrMode"}, 47 {26, &LBL::EnableVrMode, "EnableVrMode"},
48 {27, &LBL::DisableVrMode, "DisableVrMode"}, 48 {27, &LBL::DisableVrMode, "DisableVrMode"},
49 {28, &LBL::IsVrModeEnabled, "IsVrModeEnabled"}, 49 {28, &LBL::IsVrModeEnabled, "IsVrModeEnabled"},
50 {29, nullptr, "IsAutoBrightnessControlSupported"},
50 }; 51 };
51 // clang-format on 52 // clang-format on
52 53
diff --git a/src/core/hle/service/ldn/ldn.cpp b/src/core/hle/service/ldn/ldn.cpp
index 92adde6d4..49972cd69 100644
--- a/src/core/hle/service/ldn/ldn.cpp
+++ b/src/core/hle/service/ldn/ldn.cpp
@@ -69,6 +69,7 @@ public:
69 {101, nullptr, "GetNetworkInfoLatestUpdate"}, 69 {101, nullptr, "GetNetworkInfoLatestUpdate"},
70 {102, nullptr, "Scan"}, 70 {102, nullptr, "Scan"},
71 {103, nullptr, "ScanPrivate"}, 71 {103, nullptr, "ScanPrivate"},
72 {104, nullptr, "SetWirelessControllerRestriction"},
72 {200, nullptr, "OpenAccessPoint"}, 73 {200, nullptr, "OpenAccessPoint"},
73 {201, nullptr, "CloseAccessPoint"}, 74 {201, nullptr, "CloseAccessPoint"},
74 {202, nullptr, "CreateNetwork"}, 75 {202, nullptr, "CreateNetwork"},
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index 6ad3be1b3..64a526b9e 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -39,42 +39,61 @@ constexpr ResultCode ERROR_NOT_INITIALIZED{ErrorModule::Loader, 87};
39constexpr std::size_t MAXIMUM_LOADED_RO{0x40}; 39constexpr std::size_t MAXIMUM_LOADED_RO{0x40};
40constexpr std::size_t MAXIMUM_MAP_RETRIES{0x200}; 40constexpr std::size_t MAXIMUM_MAP_RETRIES{0x200};
41 41
42constexpr std::size_t TEXT_INDEX{0};
43constexpr std::size_t RO_INDEX{1};
44constexpr std::size_t DATA_INDEX{2};
45
46struct NRRCertification {
47 u64_le application_id_mask;
48 u64_le application_id_pattern;
49 INSERT_PADDING_BYTES(0x10);
50 std::array<u8, 0x100> public_key; // Also known as modulus
51 std::array<u8, 0x100> signature;
52};
53static_assert(sizeof(NRRCertification) == 0x220, "NRRCertification has invalid size.");
54
42struct NRRHeader { 55struct NRRHeader {
43 u32_le magic; 56 u32_le magic;
44 INSERT_PADDING_BYTES(12); 57 u32_le certification_signature_key_generation; // 9.0.0+
45 u64_le title_id_mask; 58 INSERT_PADDING_WORDS(2);
46 u64_le title_id_pattern; 59 NRRCertification certification;
47 INSERT_PADDING_BYTES(16); 60 std::array<u8, 0x100> signature;
48 std::array<u8, 0x100> modulus; 61 u64_le application_id;
49 std::array<u8, 0x100> signature_1;
50 std::array<u8, 0x100> signature_2;
51 u64_le title_id;
52 u32_le size; 62 u32_le size;
53 INSERT_PADDING_BYTES(4); 63 u8 nrr_kind; // 7.0.0+
64 INSERT_PADDING_BYTES(3);
54 u32_le hash_offset; 65 u32_le hash_offset;
55 u32_le hash_count; 66 u32_le hash_count;
56 INSERT_PADDING_BYTES(8); 67 INSERT_PADDING_WORDS(2);
68};
69static_assert(sizeof(NRRHeader) == 0x350, "NRRHeader has invalid size.");
70
71struct SegmentHeader {
72 u32_le memory_offset;
73 u32_le memory_size;
57}; 74};
58static_assert(sizeof(NRRHeader) == 0x350, "NRRHeader has incorrect size."); 75static_assert(sizeof(SegmentHeader) == 0x8, "SegmentHeader has invalid size.");
59 76
60struct NROHeader { 77struct NROHeader {
78 // Switchbrew calls this "Start" (0x10)
61 INSERT_PADDING_WORDS(1); 79 INSERT_PADDING_WORDS(1);
62 u32_le mod_offset; 80 u32_le mod_offset;
63 INSERT_PADDING_WORDS(2); 81 INSERT_PADDING_WORDS(2);
82
83 // Switchbrew calls this "Header" (0x70)
64 u32_le magic; 84 u32_le magic;
65 u32_le version; 85 u32_le version;
66 u32_le nro_size; 86 u32_le nro_size;
67 u32_le flags; 87 u32_le flags;
68 u32_le text_offset; 88 // .text, .ro, .data
69 u32_le text_size; 89 std::array<SegmentHeader, 3> segment_headers;
70 u32_le ro_offset;
71 u32_le ro_size;
72 u32_le rw_offset;
73 u32_le rw_size;
74 u32_le bss_size; 90 u32_le bss_size;
75 INSERT_PADDING_WORDS(1); 91 INSERT_PADDING_WORDS(1);
76 std::array<u8, 0x20> build_id; 92 std::array<u8, 0x20> build_id;
77 INSERT_PADDING_BYTES(0x20); 93 u32_le dso_handle_offset;
94 INSERT_PADDING_WORDS(1);
95 // .apiInfo, .dynstr, .dynsym
96 std::array<SegmentHeader, 3> segment_headers_2;
78}; 97};
79static_assert(sizeof(NROHeader) == 0x80, "NROHeader has invalid size."); 98static_assert(sizeof(NROHeader) == 0x80, "NROHeader has invalid size.");
80 99
@@ -91,6 +110,7 @@ struct NROInfo {
91 std::size_t data_size{}; 110 std::size_t data_size{};
92 VAddr src_addr{}; 111 VAddr src_addr{};
93}; 112};
113static_assert(sizeof(NROInfo) == 0x60, "NROInfo has invalid size.");
94 114
95class DebugMonitor final : public ServiceFramework<DebugMonitor> { 115class DebugMonitor final : public ServiceFramework<DebugMonitor> {
96public: 116public:
@@ -226,11 +246,11 @@ public:
226 return; 246 return;
227 } 247 }
228 248
229 if (system.CurrentProcess()->GetTitleID() != header.title_id) { 249 if (system.CurrentProcess()->GetTitleID() != header.application_id) {
230 LOG_ERROR(Service_LDR, 250 LOG_ERROR(Service_LDR,
231 "Attempting to load NRR with title ID other than current process. (actual " 251 "Attempting to load NRR with title ID other than current process. (actual "
232 "{:016X})!", 252 "{:016X})!",
233 header.title_id); 253 header.application_id);
234 IPC::ResponseBuilder rb{ctx, 2}; 254 IPC::ResponseBuilder rb{ctx, 2};
235 rb.Push(ERROR_INVALID_NRR); 255 rb.Push(ERROR_INVALID_NRR);
236 return; 256 return;
@@ -348,10 +368,10 @@ public:
348 368
349 ResultCode LoadNro(Kernel::Process* process, const NROHeader& nro_header, VAddr nro_addr, 369 ResultCode LoadNro(Kernel::Process* process, const NROHeader& nro_header, VAddr nro_addr,
350 VAddr start) const { 370 VAddr start) const {
351 const VAddr text_start{start + nro_header.text_offset}; 371 const VAddr text_start{start + nro_header.segment_headers[TEXT_INDEX].memory_offset};
352 const VAddr ro_start{start + nro_header.ro_offset}; 372 const VAddr ro_start{start + nro_header.segment_headers[RO_INDEX].memory_offset};
353 const VAddr data_start{start + nro_header.rw_offset}; 373 const VAddr data_start{start + nro_header.segment_headers[DATA_INDEX].memory_offset};
354 const VAddr bss_start{data_start + nro_header.rw_size}; 374 const VAddr bss_start{data_start + nro_header.segment_headers[DATA_INDEX].memory_size};
355 const VAddr bss_end_addr{ 375 const VAddr bss_end_addr{
356 Common::AlignUp(bss_start + nro_header.bss_size, Kernel::Memory::PageSize)}; 376 Common::AlignUp(bss_start + nro_header.bss_size, Kernel::Memory::PageSize)};
357 377
@@ -360,9 +380,12 @@ public:
360 system.Memory().ReadBlock(src_addr, source_data.data(), source_data.size()); 380 system.Memory().ReadBlock(src_addr, source_data.data(), source_data.size());
361 system.Memory().WriteBlock(dst_addr, source_data.data(), source_data.size()); 381 system.Memory().WriteBlock(dst_addr, source_data.data(), source_data.size());
362 }}; 382 }};
363 CopyCode(nro_addr + nro_header.text_offset, text_start, nro_header.text_size); 383 CopyCode(nro_addr + nro_header.segment_headers[TEXT_INDEX].memory_offset, text_start,
364 CopyCode(nro_addr + nro_header.ro_offset, ro_start, nro_header.ro_size); 384 nro_header.segment_headers[TEXT_INDEX].memory_size);
365 CopyCode(nro_addr + nro_header.rw_offset, data_start, nro_header.rw_size); 385 CopyCode(nro_addr + nro_header.segment_headers[RO_INDEX].memory_offset, ro_start,
386 nro_header.segment_headers[RO_INDEX].memory_size);
387 CopyCode(nro_addr + nro_header.segment_headers[DATA_INDEX].memory_offset, data_start,
388 nro_header.segment_headers[DATA_INDEX].memory_size);
366 389
367 CASCADE_CODE(process->PageTable().SetCodeMemoryPermission( 390 CASCADE_CODE(process->PageTable().SetCodeMemoryPermission(
368 text_start, ro_start - text_start, Kernel::Memory::MemoryPermission::ReadAndExecute)); 391 text_start, ro_start - text_start, Kernel::Memory::MemoryPermission::ReadAndExecute));
@@ -484,9 +507,11 @@ public:
484 } 507 }
485 508
486 // Track the loaded NRO 509 // Track the loaded NRO
487 nro.insert_or_assign(*map_result, NROInfo{hash, *map_result, nro_size, bss_address, 510 nro.insert_or_assign(*map_result,
488 bss_size, header.text_size, header.ro_size, 511 NROInfo{hash, *map_result, nro_size, bss_address, bss_size,
489 header.rw_size, nro_address}); 512 header.segment_headers[TEXT_INDEX].memory_size,
513 header.segment_headers[RO_INDEX].memory_size,
514 header.segment_headers[DATA_INDEX].memory_size, nro_address});
490 515
491 // Invalidate JIT caches for the newly mapped process code 516 // Invalidate JIT caches for the newly mapped process code
492 system.InvalidateCpuInstructionCaches(); 517 system.InvalidateCpuInstructionCaches();
@@ -584,11 +609,21 @@ private:
584 static bool IsValidNRO(const NROHeader& header, u64 nro_size, u64 bss_size) { 609 static bool IsValidNRO(const NROHeader& header, u64 nro_size, u64 bss_size) {
585 return header.magic == Common::MakeMagic('N', 'R', 'O', '0') && 610 return header.magic == Common::MakeMagic('N', 'R', 'O', '0') &&
586 header.nro_size == nro_size && header.bss_size == bss_size && 611 header.nro_size == nro_size && header.bss_size == bss_size &&
587 header.ro_offset == header.text_offset + header.text_size && 612
588 header.rw_offset == header.ro_offset + header.ro_size && 613 header.segment_headers[RO_INDEX].memory_offset ==
589 nro_size == header.rw_offset + header.rw_size && 614 header.segment_headers[TEXT_INDEX].memory_offset +
590 Common::Is4KBAligned(header.text_size) && Common::Is4KBAligned(header.ro_size) && 615 header.segment_headers[TEXT_INDEX].memory_size &&
591 Common::Is4KBAligned(header.rw_size); 616
617 header.segment_headers[DATA_INDEX].memory_offset ==
618 header.segment_headers[RO_INDEX].memory_offset +
619 header.segment_headers[RO_INDEX].memory_size &&
620
621 nro_size == header.segment_headers[DATA_INDEX].memory_offset +
622 header.segment_headers[DATA_INDEX].memory_size &&
623
624 Common::Is4KBAligned(header.segment_headers[TEXT_INDEX].memory_size) &&
625 Common::Is4KBAligned(header.segment_headers[RO_INDEX].memory_size) &&
626 Common::Is4KBAligned(header.segment_headers[DATA_INDEX].memory_size);
592 } 627 }
593 Core::System& system; 628 Core::System& system;
594}; 629};
diff --git a/src/core/hle/service/lm/manager.cpp b/src/core/hle/service/lm/manager.cpp
index b67081b86..3ee2374e7 100644
--- a/src/core/hle/service/lm/manager.cpp
+++ b/src/core/hle/service/lm/manager.cpp
@@ -86,7 +86,8 @@ std::string FormatField(Field type, const std::vector<u8>& data) {
86 return Common::StringFromFixedZeroTerminatedBuffer( 86 return Common::StringFromFixedZeroTerminatedBuffer(
87 reinterpret_cast<const char*>(data.data()), data.size()); 87 reinterpret_cast<const char*>(data.data()), data.size());
88 default: 88 default:
89 UNIMPLEMENTED(); 89 UNIMPLEMENTED_MSG("Unimplemented field type={}", type);
90 return "";
90 } 91 }
91} 92}
92 93
diff --git a/src/core/hle/service/mig/mig.cpp b/src/core/hle/service/mig/mig.cpp
index d16367f2c..113a4665c 100644
--- a/src/core/hle/service/mig/mig.cpp
+++ b/src/core/hle/service/mig/mig.cpp
@@ -20,6 +20,12 @@ public:
20 {101, nullptr, "ResumeServer"}, 20 {101, nullptr, "ResumeServer"},
21 {200, nullptr, "CreateClient"}, 21 {200, nullptr, "CreateClient"},
22 {201, nullptr, "ResumeClient"}, 22 {201, nullptr, "ResumeClient"},
23 {1001, nullptr, "Unknown1001"},
24 {1010, nullptr, "Unknown1010"},
25 {1100, nullptr, "Unknown1100"},
26 {1101, nullptr, "Unknown1101"},
27 {1200, nullptr, "Unknown1200"},
28 {1201, nullptr, "Unknown1201"}
23 }; 29 };
24 // clang-format on 30 // clang-format on
25 31
diff --git a/src/core/hle/service/mm/mm_u.cpp b/src/core/hle/service/mm/mm_u.cpp
index def63dc8a..25c24e537 100644
--- a/src/core/hle/service/mm/mm_u.cpp
+++ b/src/core/hle/service/mm/mm_u.cpp
@@ -14,14 +14,14 @@ public:
14 explicit MM_U() : ServiceFramework{"mm:u"} { 14 explicit MM_U() : ServiceFramework{"mm:u"} {
15 // clang-format off 15 // clang-format off
16 static const FunctionInfo functions[] = { 16 static const FunctionInfo functions[] = {
17 {0, &MM_U::Initialize, "Initialize"}, 17 {0, &MM_U::InitializeOld, "InitializeOld"},
18 {1, &MM_U::Finalize, "Finalize"}, 18 {1, &MM_U::FinalizeOld, "FinalizeOld"},
19 {2, &MM_U::SetAndWait, "SetAndWait"}, 19 {2, &MM_U::SetAndWaitOld, "SetAndWaitOld"},
20 {3, &MM_U::Get, "Get"}, 20 {3, &MM_U::GetOld, "GetOld"},
21 {4, &MM_U::InitializeWithId, "InitializeWithId"}, 21 {4, &MM_U::Initialize, "Initialize"},
22 {5, &MM_U::FinalizeWithId, "FinalizeWithId"}, 22 {5, &MM_U::Finalize, "Finalize"},
23 {6, &MM_U::SetAndWaitWithId, "SetAndWaitWithId"}, 23 {6, &MM_U::SetAndWait, "SetAndWait"},
24 {7, &MM_U::GetWithId, "GetWithId"}, 24 {7, &MM_U::Get, "Get"},
25 }; 25 };
26 // clang-format on 26 // clang-format on
27 27
@@ -29,21 +29,21 @@ public:
29 } 29 }
30 30
31private: 31private:
32 void Initialize(Kernel::HLERequestContext& ctx) { 32 void InitializeOld(Kernel::HLERequestContext& ctx) {
33 LOG_WARNING(Service_MM, "(STUBBED) called"); 33 LOG_WARNING(Service_MM, "(STUBBED) called");
34 34
35 IPC::ResponseBuilder rb{ctx, 2}; 35 IPC::ResponseBuilder rb{ctx, 2};
36 rb.Push(RESULT_SUCCESS); 36 rb.Push(RESULT_SUCCESS);
37 } 37 }
38 38
39 void Finalize(Kernel::HLERequestContext& ctx) { 39 void FinalizeOld(Kernel::HLERequestContext& ctx) {
40 LOG_WARNING(Service_MM, "(STUBBED) called"); 40 LOG_WARNING(Service_MM, "(STUBBED) called");
41 41
42 IPC::ResponseBuilder rb{ctx, 2}; 42 IPC::ResponseBuilder rb{ctx, 2};
43 rb.Push(RESULT_SUCCESS); 43 rb.Push(RESULT_SUCCESS);
44 } 44 }
45 45
46 void SetAndWait(Kernel::HLERequestContext& ctx) { 46 void SetAndWaitOld(Kernel::HLERequestContext& ctx) {
47 IPC::RequestParser rp{ctx}; 47 IPC::RequestParser rp{ctx};
48 min = rp.Pop<u32>(); 48 min = rp.Pop<u32>();
49 max = rp.Pop<u32>(); 49 max = rp.Pop<u32>();
@@ -54,7 +54,7 @@ private:
54 rb.Push(RESULT_SUCCESS); 54 rb.Push(RESULT_SUCCESS);
55 } 55 }
56 56
57 void Get(Kernel::HLERequestContext& ctx) { 57 void GetOld(Kernel::HLERequestContext& ctx) {
58 LOG_WARNING(Service_MM, "(STUBBED) called"); 58 LOG_WARNING(Service_MM, "(STUBBED) called");
59 59
60 IPC::ResponseBuilder rb{ctx, 3}; 60 IPC::ResponseBuilder rb{ctx, 3};
@@ -62,7 +62,7 @@ private:
62 rb.Push(current); 62 rb.Push(current);
63 } 63 }
64 64
65 void InitializeWithId(Kernel::HLERequestContext& ctx) { 65 void Initialize(Kernel::HLERequestContext& ctx) {
66 LOG_WARNING(Service_MM, "(STUBBED) called"); 66 LOG_WARNING(Service_MM, "(STUBBED) called");
67 67
68 IPC::ResponseBuilder rb{ctx, 3}; 68 IPC::ResponseBuilder rb{ctx, 3};
@@ -70,14 +70,14 @@ private:
70 rb.Push<u32>(id); // Any non zero value 70 rb.Push<u32>(id); // Any non zero value
71 } 71 }
72 72
73 void FinalizeWithId(Kernel::HLERequestContext& ctx) { 73 void Finalize(Kernel::HLERequestContext& ctx) {
74 LOG_WARNING(Service_MM, "(STUBBED) called"); 74 LOG_WARNING(Service_MM, "(STUBBED) called");
75 75
76 IPC::ResponseBuilder rb{ctx, 2}; 76 IPC::ResponseBuilder rb{ctx, 2};
77 rb.Push(RESULT_SUCCESS); 77 rb.Push(RESULT_SUCCESS);
78 } 78 }
79 79
80 void SetAndWaitWithId(Kernel::HLERequestContext& ctx) { 80 void SetAndWait(Kernel::HLERequestContext& ctx) {
81 IPC::RequestParser rp{ctx}; 81 IPC::RequestParser rp{ctx};
82 u32 input_id = rp.Pop<u32>(); 82 u32 input_id = rp.Pop<u32>();
83 min = rp.Pop<u32>(); 83 min = rp.Pop<u32>();
@@ -90,7 +90,7 @@ private:
90 rb.Push(RESULT_SUCCESS); 90 rb.Push(RESULT_SUCCESS);
91 } 91 }
92 92
93 void GetWithId(Kernel::HLERequestContext& ctx) { 93 void Get(Kernel::HLERequestContext& ctx) {
94 LOG_WARNING(Service_MM, "(STUBBED) called"); 94 LOG_WARNING(Service_MM, "(STUBBED) called");
95 95
96 IPC::ResponseBuilder rb{ctx, 3}; 96 IPC::ResponseBuilder rb{ctx, 3};
diff --git a/src/core/hle/service/ncm/ncm.cpp b/src/core/hle/service/ncm/ncm.cpp
index ec9aae04a..e38dea1f4 100644
--- a/src/core/hle/service/ncm/ncm.cpp
+++ b/src/core/hle/service/ncm/ncm.cpp
@@ -28,16 +28,16 @@ public:
28 {7, nullptr, "ResolveApplicationLegalInformationPath"}, 28 {7, nullptr, "ResolveApplicationLegalInformationPath"},
29 {8, nullptr, "RedirectApplicationLegalInformationPath"}, 29 {8, nullptr, "RedirectApplicationLegalInformationPath"},
30 {9, nullptr, "Refresh"}, 30 {9, nullptr, "Refresh"},
31 {10, nullptr, "RedirectProgramPath2"}, 31 {10, nullptr, "RedirectApplicationProgramPath"},
32 {11, nullptr, "Refresh2"}, 32 {11, nullptr, "ClearApplicationRedirection"},
33 {12, nullptr, "DeleteProgramPath"}, 33 {12, nullptr, "EraseProgramRedirection"},
34 {13, nullptr, "DeleteApplicationControlPath"}, 34 {13, nullptr, "EraseApplicationControlRedirection"},
35 {14, nullptr, "DeleteApplicationHtmlDocumentPath"}, 35 {14, nullptr, "EraseApplicationHtmlDocumentRedirection"},
36 {15, nullptr, "DeleteApplicationLegalInformationPath"}, 36 {15, nullptr, "EraseApplicationLegalInformationRedirection"},
37 {16, nullptr, ""}, 37 {16, nullptr, "ResolveProgramPathForDebug"},
38 {17, nullptr, ""}, 38 {17, nullptr, "RedirectProgramPathForDebug"},
39 {18, nullptr, ""}, 39 {18, nullptr, "RedirectApplicationProgramPathForDebug"},
40 {19, nullptr, ""}, 40 {19, nullptr, "EraseProgramRedirectionForDebug"},
41 }; 41 };
42 // clang-format on 42 // clang-format on
43 43
diff --git a/src/core/hle/service/nfc/nfc.cpp b/src/core/hle/service/nfc/nfc.cpp
index b7b34ce7e..780ea30fe 100644
--- a/src/core/hle/service/nfc/nfc.cpp
+++ b/src/core/hle/service/nfc/nfc.cpp
@@ -198,9 +198,9 @@ public:
198 static const FunctionInfo functions[] = { 198 static const FunctionInfo functions[] = {
199 {0, nullptr, "Initialize"}, 199 {0, nullptr, "Initialize"},
200 {1, nullptr, "Finalize"}, 200 {1, nullptr, "Finalize"},
201 {2, nullptr, "GetState"}, 201 {2, nullptr, "GetStateOld"},
202 {3, nullptr, "IsNfcEnabled"}, 202 {3, nullptr, "IsNfcEnabledOld"},
203 {100, nullptr, "SetNfcEnabled"}, 203 {100, nullptr, "SetNfcEnabledOld"},
204 {400, nullptr, "InitializeSystem"}, 204 {400, nullptr, "InitializeSystem"},
205 {401, nullptr, "FinalizeSystem"}, 205 {401, nullptr, "FinalizeSystem"},
206 {402, nullptr, "GetState"}, 206 {402, nullptr, "GetState"},
diff --git a/src/core/hle/service/ns/ns.cpp b/src/core/hle/service/ns/ns.cpp
index 3e4dd2f7a..886450be2 100644
--- a/src/core/hle/service/ns/ns.cpp
+++ b/src/core/hle/service/ns/ns.cpp
@@ -366,7 +366,8 @@ ResultVal<u8> IApplicationManagerInterface::GetApplicationDesiredLanguage(
366 LOG_DEBUG(Service_NS, "called with supported_languages={:08X}", supported_languages); 366 LOG_DEBUG(Service_NS, "called with supported_languages={:08X}", supported_languages);
367 367
368 // Get language code from settings 368 // Get language code from settings
369 const auto language_code = Set::GetLanguageCodeFromIndex(Settings::values.language_index); 369 const auto language_code =
370 Set::GetLanguageCodeFromIndex(Settings::values.language_index.GetValue());
370 371
371 // Convert to application language, get priority list 372 // Convert to application language, get priority list
372 const auto application_language = ConvertToApplicationLanguage(language_code); 373 const auto application_language = ConvertToApplicationLanguage(language_code);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
index cc2192e5c..fba89e7a6 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
@@ -25,7 +25,7 @@ u32 nvhost_ctrl_gpu::ioctl(Ioctl command, const std::vector<u8>& input,
25 case IoctlCommand::IocGetCharacteristicsCommand: 25 case IoctlCommand::IocGetCharacteristicsCommand:
26 return GetCharacteristics(input, output, output2, version); 26 return GetCharacteristics(input, output, output2, version);
27 case IoctlCommand::IocGetTPCMasksCommand: 27 case IoctlCommand::IocGetTPCMasksCommand:
28 return GetTPCMasks(input, output); 28 return GetTPCMasks(input, output, output2, version);
29 case IoctlCommand::IocGetActiveSlotMaskCommand: 29 case IoctlCommand::IocGetActiveSlotMaskCommand:
30 return GetActiveSlotMask(input, output); 30 return GetActiveSlotMask(input, output);
31 case IoctlCommand::IocZcullGetCtxSizeCommand: 31 case IoctlCommand::IocZcullGetCtxSizeCommand:
@@ -98,17 +98,22 @@ u32 nvhost_ctrl_gpu::GetCharacteristics(const std::vector<u8>& input, std::vecto
98 return 0; 98 return 0;
99} 99}
100 100
101u32 nvhost_ctrl_gpu::GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output) { 101u32 nvhost_ctrl_gpu::GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output,
102 std::vector<u8>& output2, IoctlVersion version) {
102 IoctlGpuGetTpcMasksArgs params{}; 103 IoctlGpuGetTpcMasksArgs params{};
103 std::memcpy(&params, input.data(), input.size()); 104 std::memcpy(&params, input.data(), input.size());
104 LOG_INFO(Service_NVDRV, "called, mask=0x{:X}, mask_buf_addr=0x{:X}", params.mask_buf_size, 105 LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
105 params.mask_buf_addr); 106 if (params.mask_buffer_size != 0) {
106 // TODO(ogniK): Confirm value on hardware 107 params.tcp_mask = 3;
107 if (params.mask_buf_size) 108 }
108 params.tpc_mask_size = 4 * 1; // 4 * num_gpc 109
109 else 110 if (version == IoctlVersion::Version3) {
110 params.tpc_mask_size = 0; 111 std::memcpy(output.data(), input.data(), output.size());
111 std::memcpy(output.data(), &params, sizeof(params)); 112 std::memcpy(output2.data(), &params.tcp_mask, output2.size());
113 } else {
114 std::memcpy(output.data(), &params, output.size());
115 }
116
112 return 0; 117 return 0;
113} 118}
114 119
@@ -195,8 +200,7 @@ u32 nvhost_ctrl_gpu::GetGpuTime(const std::vector<u8>& input, std::vector<u8>& o
195 200
196 IoctlGetGpuTime params{}; 201 IoctlGetGpuTime params{};
197 std::memcpy(&params, input.data(), input.size()); 202 std::memcpy(&params, input.data(), input.size());
198 const auto ns = Core::Timing::CyclesToNs(system.CoreTiming().GetTicks()); 203 params.gpu_time = static_cast<u64_le>(system.CoreTiming().GetGlobalTimeNs().count());
199 params.gpu_time = static_cast<u64_le>(ns.count());
200 std::memcpy(output.data(), &params, output.size()); 204 std::memcpy(output.data(), &params, output.size());
201 return 0; 205 return 0;
202} 206}
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
index 07b644ec5..ef60f72ce 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
@@ -92,16 +92,11 @@ private:
92 "IoctlCharacteristics is incorrect size"); 92 "IoctlCharacteristics is incorrect size");
93 93
94 struct IoctlGpuGetTpcMasksArgs { 94 struct IoctlGpuGetTpcMasksArgs {
95 /// [in] TPC mask buffer size reserved by userspace. Should be at least 95 u32_le mask_buffer_size{};
96 /// sizeof(__u32) * fls(gpc_mask) to receive TPC mask for each GPC. 96 INSERT_PADDING_WORDS(1);
97 /// [out] full kernel buffer size 97 u64_le mask_buffer_address{};
98 u32_le mask_buf_size; 98 u32_le tcp_mask{};
99 u32_le reserved; 99 INSERT_PADDING_WORDS(1);
100
101 /// [in] pointer to TPC mask buffer. It will receive one 32-bit TPC mask per GPC or 0 if
102 /// GPC is not enabled or not present. This parameter is ignored if mask_buf_size is 0.
103 u64_le mask_buf_addr;
104 u64_le tpc_mask_size; // Nintendo add this?
105 }; 100 };
106 static_assert(sizeof(IoctlGpuGetTpcMasksArgs) == 24, 101 static_assert(sizeof(IoctlGpuGetTpcMasksArgs) == 24,
107 "IoctlGpuGetTpcMasksArgs is incorrect size"); 102 "IoctlGpuGetTpcMasksArgs is incorrect size");
@@ -166,7 +161,8 @@ private:
166 161
167 u32 GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output, 162 u32 GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output,
168 std::vector<u8>& output2, IoctlVersion version); 163 std::vector<u8>& output2, IoctlVersion version);
169 u32 GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output); 164 u32 GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output, std::vector<u8>& output2,
165 IoctlVersion version);
170 u32 GetActiveSlotMask(const std::vector<u8>& input, std::vector<u8>& output); 166 u32 GetActiveSlotMask(const std::vector<u8>& input, std::vector<u8>& output);
171 u32 ZCullGetCtxSize(const std::vector<u8>& input, std::vector<u8>& output); 167 u32 ZCullGetCtxSize(const std::vector<u8>& input, std::vector<u8>& output);
172 u32 ZCullGetInfo(const std::vector<u8>& input, std::vector<u8>& output); 168 u32 ZCullGetInfo(const std::vector<u8>& input, std::vector<u8>& output);
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index 437bc5dee..2f44d3779 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -9,6 +9,7 @@
9#include "common/logging/log.h" 9#include "common/logging/log.h"
10#include "common/microprofile.h" 10#include "common/microprofile.h"
11#include "common/scope_exit.h" 11#include "common/scope_exit.h"
12#include "common/thread.h"
12#include "core/core.h" 13#include "core/core.h"
13#include "core/core_timing.h" 14#include "core/core_timing.h"
14#include "core/core_timing_util.h" 15#include "core/core_timing_util.h"
@@ -27,8 +28,35 @@
27 28
28namespace Service::NVFlinger { 29namespace Service::NVFlinger {
29 30
30constexpr s64 frame_ticks = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 60); 31constexpr s64 frame_ticks = static_cast<s64>(1000000000 / 60);
31constexpr s64 frame_ticks_30fps = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 30); 32constexpr s64 frame_ticks_30fps = static_cast<s64>(1000000000 / 30);
33
34void NVFlinger::VSyncThread(NVFlinger& nv_flinger) {
35 nv_flinger.SplitVSync();
36}
37
38void NVFlinger::SplitVSync() {
39 system.RegisterHostThread();
40 std::string name = "yuzu:VSyncThread";
41 MicroProfileOnThreadCreate(name.c_str());
42 Common::SetCurrentThreadName(name.c_str());
43 Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
44 s64 delay = 0;
45 while (is_running) {
46 guard->lock();
47 const s64 time_start = system.CoreTiming().GetGlobalTimeNs().count();
48 Compose();
49 const auto ticks = GetNextTicks();
50 const s64 time_end = system.CoreTiming().GetGlobalTimeNs().count();
51 const s64 time_passed = time_end - time_start;
52 const s64 next_time = std::max<s64>(0, ticks - time_passed - delay);
53 guard->unlock();
54 if (next_time > 0) {
55 wait_event->WaitFor(std::chrono::nanoseconds{next_time});
56 }
57 delay = (system.CoreTiming().GetGlobalTimeNs().count() - time_end) - next_time;
58 }
59}
32 60
33NVFlinger::NVFlinger(Core::System& system) : system(system) { 61NVFlinger::NVFlinger(Core::System& system) : system(system) {
34 displays.emplace_back(0, "Default", system); 62 displays.emplace_back(0, "Default", system);
@@ -36,22 +64,36 @@ NVFlinger::NVFlinger(Core::System& system) : system(system) {
36 displays.emplace_back(2, "Edid", system); 64 displays.emplace_back(2, "Edid", system);
37 displays.emplace_back(3, "Internal", system); 65 displays.emplace_back(3, "Internal", system);
38 displays.emplace_back(4, "Null", system); 66 displays.emplace_back(4, "Null", system);
67 guard = std::make_shared<std::mutex>();
39 68
40 // Schedule the screen composition events 69 // Schedule the screen composition events
41 composition_event = 70 composition_event =
42 Core::Timing::CreateEvent("ScreenComposition", [this](u64 userdata, s64 cycles_late) { 71 Core::Timing::CreateEvent("ScreenComposition", [this](u64 userdata, s64 ns_late) {
72 Lock();
43 Compose(); 73 Compose();
44 const auto ticks = 74 const auto ticks = GetNextTicks();
45 Settings::values.force_30fps_mode ? frame_ticks_30fps : GetNextTicks(); 75 this->system.CoreTiming().ScheduleEvent(std::max<s64>(0LL, ticks - ns_late),
46 this->system.CoreTiming().ScheduleEvent(std::max<s64>(0LL, ticks - cycles_late),
47 composition_event); 76 composition_event);
48 }); 77 });
49 78 if (system.IsMulticore()) {
50 system.CoreTiming().ScheduleEvent(frame_ticks, composition_event); 79 is_running = true;
80 wait_event = std::make_unique<Common::Event>();
81 vsync_thread = std::make_unique<std::thread>(VSyncThread, std::ref(*this));
82 } else {
83 system.CoreTiming().ScheduleEvent(frame_ticks, composition_event);
84 }
51} 85}
52 86
53NVFlinger::~NVFlinger() { 87NVFlinger::~NVFlinger() {
54 system.CoreTiming().UnscheduleEvent(composition_event, 0); 88 if (system.IsMulticore()) {
89 is_running = false;
90 wait_event->Set();
91 vsync_thread->join();
92 vsync_thread.reset();
93 wait_event.reset();
94 } else {
95 system.CoreTiming().UnscheduleEvent(composition_event, 0);
96 }
55} 97}
56 98
57void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) { 99void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
@@ -199,10 +241,12 @@ void NVFlinger::Compose() {
199 241
200 auto& gpu = system.GPU(); 242 auto& gpu = system.GPU();
201 const auto& multi_fence = buffer->get().multi_fence; 243 const auto& multi_fence = buffer->get().multi_fence;
244 guard->unlock();
202 for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) { 245 for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) {
203 const auto& fence = multi_fence.fences[fence_id]; 246 const auto& fence = multi_fence.fences[fence_id];
204 gpu.WaitFence(fence.id, fence.value); 247 gpu.WaitFence(fence.id, fence.value);
205 } 248 }
249 guard->lock();
206 250
207 MicroProfileFlip(); 251 MicroProfileFlip();
208 252
@@ -223,7 +267,7 @@ void NVFlinger::Compose() {
223 267
224s64 NVFlinger::GetNextTicks() const { 268s64 NVFlinger::GetNextTicks() const {
225 constexpr s64 max_hertz = 120LL; 269 constexpr s64 max_hertz = 120LL;
226 return (Core::Hardware::BASE_CLOCK_RATE * (1LL << swap_interval)) / max_hertz; 270 return (1000000000 * (1LL << swap_interval)) / max_hertz;
227} 271}
228 272
229} // namespace Service::NVFlinger 273} // namespace Service::NVFlinger
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h
index 57a21f33b..e4959a9af 100644
--- a/src/core/hle/service/nvflinger/nvflinger.h
+++ b/src/core/hle/service/nvflinger/nvflinger.h
@@ -4,15 +4,22 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <atomic>
7#include <memory> 8#include <memory>
9#include <mutex>
8#include <optional> 10#include <optional>
9#include <string> 11#include <string>
10#include <string_view> 12#include <string_view>
13#include <thread>
11#include <vector> 14#include <vector>
12 15
13#include "common/common_types.h" 16#include "common/common_types.h"
14#include "core/hle/kernel/object.h" 17#include "core/hle/kernel/object.h"
15 18
19namespace Common {
20class Event;
21} // namespace Common
22
16namespace Core::Timing { 23namespace Core::Timing {
17class CoreTiming; 24class CoreTiming;
18struct EventType; 25struct EventType;
@@ -79,6 +86,10 @@ public:
79 86
80 s64 GetNextTicks() const; 87 s64 GetNextTicks() const;
81 88
89 std::unique_lock<std::mutex> Lock() {
90 return std::unique_lock{*guard};
91 }
92
82private: 93private:
83 /// Finds the display identified by the specified ID. 94 /// Finds the display identified by the specified ID.
84 VI::Display* FindDisplay(u64 display_id); 95 VI::Display* FindDisplay(u64 display_id);
@@ -92,6 +103,10 @@ private:
92 /// Finds the layer identified by the specified ID in the desired display. 103 /// Finds the layer identified by the specified ID in the desired display.
93 const VI::Layer* FindLayer(u64 display_id, u64 layer_id) const; 104 const VI::Layer* FindLayer(u64 display_id, u64 layer_id) const;
94 105
106 static void VSyncThread(NVFlinger& nv_flinger);
107
108 void SplitVSync();
109
95 std::shared_ptr<Nvidia::Module> nvdrv; 110 std::shared_ptr<Nvidia::Module> nvdrv;
96 111
97 std::vector<VI::Display> displays; 112 std::vector<VI::Display> displays;
@@ -108,7 +123,13 @@ private:
108 /// Event that handles screen composition. 123 /// Event that handles screen composition.
109 std::shared_ptr<Core::Timing::EventType> composition_event; 124 std::shared_ptr<Core::Timing::EventType> composition_event;
110 125
126 std::shared_ptr<std::mutex> guard;
127
111 Core::System& system; 128 Core::System& system;
129
130 std::unique_ptr<std::thread> vsync_thread;
131 std::unique_ptr<Common::Event> wait_event;
132 std::atomic<bool> is_running{};
112}; 133};
113 134
114} // namespace Service::NVFlinger 135} // namespace Service::NVFlinger
diff --git a/src/core/hle/service/prepo/prepo.cpp b/src/core/hle/service/prepo/prepo.cpp
index 9d36ea0d0..cde3312da 100644
--- a/src/core/hle/service/prepo/prepo.cpp
+++ b/src/core/hle/service/prepo/prepo.cpp
@@ -80,8 +80,13 @@ private:
80 const auto user_id = rp.PopRaw<u128>(); 80 const auto user_id = rp.PopRaw<u128>();
81 const auto process_id = rp.PopRaw<u64>(); 81 const auto process_id = rp.PopRaw<u64>();
82 std::vector<std::vector<u8>> data{ctx.ReadBuffer(0)}; 82 std::vector<std::vector<u8>> data{ctx.ReadBuffer(0)};
83
83 if constexpr (Type == Core::Reporter::PlayReportType::Old2) { 84 if constexpr (Type == Core::Reporter::PlayReportType::Old2) {
84 data.emplace_back(ctx.ReadBuffer(1)); 85 const auto read_buffer_count =
86 ctx.BufferDescriptorX().size() + ctx.BufferDescriptorA().size();
87 if (read_buffer_count > 1) {
88 data.emplace_back(ctx.ReadBuffer(1));
89 }
85 } 90 }
86 91
87 LOG_DEBUG( 92 LOG_DEBUG(
diff --git a/src/core/hle/service/set/set.cpp b/src/core/hle/service/set/set.cpp
index f3b4b286c..34fe2fd82 100644
--- a/src/core/hle/service/set/set.cpp
+++ b/src/core/hle/service/set/set.cpp
@@ -3,6 +3,7 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm> 5#include <algorithm>
6#include <array>
6#include <chrono> 7#include <chrono>
7#include "common/logging/log.h" 8#include "common/logging/log.h"
8#include "core/hle/ipc_helpers.h" 9#include "core/hle/ipc_helpers.h"
@@ -31,6 +32,44 @@ constexpr std::array<LanguageCode, 17> available_language_codes = {{
31 LanguageCode::ZH_HANT, 32 LanguageCode::ZH_HANT,
32}}; 33}};
33 34
35enum class KeyboardLayout : u64 {
36 Japanese = 0,
37 EnglishUs = 1,
38 EnglishUsInternational = 2,
39 EnglishUk = 3,
40 French = 4,
41 FrenchCa = 5,
42 Spanish = 6,
43 SpanishLatin = 7,
44 German = 8,
45 Italian = 9,
46 Portuguese = 10,
47 Russian = 11,
48 Korean = 12,
49 ChineseSimplified = 13,
50 ChineseTraditional = 14,
51};
52
53constexpr std::array<std::pair<LanguageCode, KeyboardLayout>, 17> language_to_layout{{
54 {LanguageCode::JA, KeyboardLayout::Japanese},
55 {LanguageCode::EN_US, KeyboardLayout::EnglishUs},
56 {LanguageCode::FR, KeyboardLayout::French},
57 {LanguageCode::DE, KeyboardLayout::German},
58 {LanguageCode::IT, KeyboardLayout::Italian},
59 {LanguageCode::ES, KeyboardLayout::Spanish},
60 {LanguageCode::ZH_CN, KeyboardLayout::ChineseSimplified},
61 {LanguageCode::KO, KeyboardLayout::Korean},
62 {LanguageCode::NL, KeyboardLayout::EnglishUsInternational},
63 {LanguageCode::PT, KeyboardLayout::Portuguese},
64 {LanguageCode::RU, KeyboardLayout::Russian},
65 {LanguageCode::ZH_TW, KeyboardLayout::ChineseTraditional},
66 {LanguageCode::EN_GB, KeyboardLayout::EnglishUk},
67 {LanguageCode::FR_CA, KeyboardLayout::FrenchCa},
68 {LanguageCode::ES_419, KeyboardLayout::SpanishLatin},
69 {LanguageCode::ZH_HANS, KeyboardLayout::ChineseSimplified},
70 {LanguageCode::ZH_HANT, KeyboardLayout::ChineseTraditional},
71}};
72
34constexpr std::size_t pre4_0_0_max_entries = 15; 73constexpr std::size_t pre4_0_0_max_entries = 15;
35constexpr std::size_t post4_0_0_max_entries = 17; 74constexpr std::size_t post4_0_0_max_entries = 17;
36 75
@@ -50,6 +89,25 @@ void GetAvailableLanguageCodesImpl(Kernel::HLERequestContext& ctx, std::size_t m
50 ctx.WriteBuffer(available_language_codes.data(), copy_size); 89 ctx.WriteBuffer(available_language_codes.data(), copy_size);
51 PushResponseLanguageCode(ctx, copy_amount); 90 PushResponseLanguageCode(ctx, copy_amount);
52} 91}
92
93void GetKeyCodeMapImpl(Kernel::HLERequestContext& ctx) {
94 const auto language_code = available_language_codes[Settings::values.language_index.GetValue()];
95 const auto key_code =
96 std::find_if(language_to_layout.cbegin(), language_to_layout.cend(),
97 [=](const auto& element) { return element.first == language_code; });
98 KeyboardLayout layout = KeyboardLayout::EnglishUs;
99 if (key_code == language_to_layout.cend()) {
100 LOG_ERROR(Service_SET,
101 "Could not find keyboard layout for language index {}, defaulting to English us",
102 Settings::values.language_index.GetValue());
103 } else {
104 layout = key_code->second;
105 }
106
107 IPC::ResponseBuilder rb{ctx, 2};
108 rb.Push(RESULT_SUCCESS);
109 ctx.WriteBuffer(&layout, sizeof(KeyboardLayout));
110}
53} // Anonymous namespace 111} // Anonymous namespace
54 112
55LanguageCode GetLanguageCodeFromIndex(std::size_t index) { 113LanguageCode GetLanguageCodeFromIndex(std::size_t index) {
@@ -105,11 +163,11 @@ void SET::GetQuestFlag(Kernel::HLERequestContext& ctx) {
105} 163}
106 164
107void SET::GetLanguageCode(Kernel::HLERequestContext& ctx) { 165void SET::GetLanguageCode(Kernel::HLERequestContext& ctx) {
108 LOG_DEBUG(Service_SET, "called {}", Settings::values.language_index); 166 LOG_DEBUG(Service_SET, "called {}", Settings::values.language_index.GetValue());
109 167
110 IPC::ResponseBuilder rb{ctx, 4}; 168 IPC::ResponseBuilder rb{ctx, 4};
111 rb.Push(RESULT_SUCCESS); 169 rb.Push(RESULT_SUCCESS);
112 rb.PushEnum(available_language_codes[Settings::values.language_index]); 170 rb.PushEnum(available_language_codes[Settings::values.language_index.GetValue()]);
113} 171}
114 172
115void SET::GetRegionCode(Kernel::HLERequestContext& ctx) { 173void SET::GetRegionCode(Kernel::HLERequestContext& ctx) {
@@ -117,7 +175,17 @@ void SET::GetRegionCode(Kernel::HLERequestContext& ctx) {
117 175
118 IPC::ResponseBuilder rb{ctx, 3}; 176 IPC::ResponseBuilder rb{ctx, 3};
119 rb.Push(RESULT_SUCCESS); 177 rb.Push(RESULT_SUCCESS);
120 rb.Push(Settings::values.region_index); 178 rb.Push(Settings::values.region_index.GetValue());
179}
180
181void SET::GetKeyCodeMap(Kernel::HLERequestContext& ctx) {
182 LOG_DEBUG(Service_SET, "Called {}", ctx.Description());
183 GetKeyCodeMapImpl(ctx);
184}
185
186void SET::GetKeyCodeMap2(Kernel::HLERequestContext& ctx) {
187 LOG_DEBUG(Service_SET, "Called {}", ctx.Description());
188 GetKeyCodeMapImpl(ctx);
121} 189}
122 190
123SET::SET() : ServiceFramework("set") { 191SET::SET() : ServiceFramework("set") {
@@ -130,9 +198,9 @@ SET::SET() : ServiceFramework("set") {
130 {4, &SET::GetRegionCode, "GetRegionCode"}, 198 {4, &SET::GetRegionCode, "GetRegionCode"},
131 {5, &SET::GetAvailableLanguageCodes2, "GetAvailableLanguageCodes2"}, 199 {5, &SET::GetAvailableLanguageCodes2, "GetAvailableLanguageCodes2"},
132 {6, &SET::GetAvailableLanguageCodeCount2, "GetAvailableLanguageCodeCount2"}, 200 {6, &SET::GetAvailableLanguageCodeCount2, "GetAvailableLanguageCodeCount2"},
133 {7, nullptr, "GetKeyCodeMap"}, 201 {7, &SET::GetKeyCodeMap, "GetKeyCodeMap"},
134 {8, &SET::GetQuestFlag, "GetQuestFlag"}, 202 {8, &SET::GetQuestFlag, "GetQuestFlag"},
135 {9, nullptr, "GetKeyCodeMap2"}, 203 {9, &SET::GetKeyCodeMap2, "GetKeyCodeMap2"},
136 {10, nullptr, "GetFirmwareVersionForDebug"}, 204 {10, nullptr, "GetFirmwareVersionForDebug"},
137 }; 205 };
138 // clang-format on 206 // clang-format on
diff --git a/src/core/hle/service/set/set.h b/src/core/hle/service/set/set.h
index 6084b345d..8ac9c169d 100644
--- a/src/core/hle/service/set/set.h
+++ b/src/core/hle/service/set/set.h
@@ -44,6 +44,8 @@ private:
44 void GetAvailableLanguageCodeCount2(Kernel::HLERequestContext& ctx); 44 void GetAvailableLanguageCodeCount2(Kernel::HLERequestContext& ctx);
45 void GetQuestFlag(Kernel::HLERequestContext& ctx); 45 void GetQuestFlag(Kernel::HLERequestContext& ctx);
46 void GetRegionCode(Kernel::HLERequestContext& ctx); 46 void GetRegionCode(Kernel::HLERequestContext& ctx);
47 void GetKeyCodeMap(Kernel::HLERequestContext& ctx);
48 void GetKeyCodeMap2(Kernel::HLERequestContext& ctx);
47}; 49};
48 50
49} // namespace Service::Set 51} // namespace Service::Set
diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp
index 6ada13be4..d872de16c 100644
--- a/src/core/hle/service/sm/sm.cpp
+++ b/src/core/hle/service/sm/sm.cpp
@@ -142,7 +142,7 @@ void SM::GetService(Kernel::HLERequestContext& ctx) {
142 } 142 }
143 143
144 // Wake the threads waiting on the ServerPort 144 // Wake the threads waiting on the ServerPort
145 server_port->WakeupAllWaitingThreads(); 145 server_port->Signal();
146 146
147 LOG_DEBUG(Service_SM, "called service={} -> session={}", name, client->GetObjectId()); 147 LOG_DEBUG(Service_SM, "called service={} -> session={}", name, client->GetObjectId());
148 IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; 148 IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles};
diff --git a/src/core/hle/service/spl/module.cpp b/src/core/hle/service/spl/module.cpp
index e724d4ab8..865ed3b91 100644
--- a/src/core/hle/service/spl/module.cpp
+++ b/src/core/hle/service/spl/module.cpp
@@ -19,7 +19,7 @@ namespace Service::SPL {
19 19
20Module::Interface::Interface(std::shared_ptr<Module> module, const char* name) 20Module::Interface::Interface(std::shared_ptr<Module> module, const char* name)
21 : ServiceFramework(name), module(std::move(module)), 21 : ServiceFramework(name), module(std::move(module)),
22 rng(Settings::values.rng_seed.value_or(std::time(nullptr))) {} 22 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr))) {}
23 23
24Module::Interface::~Interface() = default; 24Module::Interface::~Interface() = default;
25 25
diff --git a/src/core/hle/service/time/standard_steady_clock_core.cpp b/src/core/hle/service/time/standard_steady_clock_core.cpp
index 1575f0b49..59a272f4a 100644
--- a/src/core/hle/service/time/standard_steady_clock_core.cpp
+++ b/src/core/hle/service/time/standard_steady_clock_core.cpp
@@ -11,9 +11,8 @@
11namespace Service::Time::Clock { 11namespace Service::Time::Clock {
12 12
13TimeSpanType StandardSteadyClockCore::GetCurrentRawTimePoint(Core::System& system) { 13TimeSpanType StandardSteadyClockCore::GetCurrentRawTimePoint(Core::System& system) {
14 const TimeSpanType ticks_time_span{TimeSpanType::FromTicks( 14 const TimeSpanType ticks_time_span{
15 Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks()), 15 TimeSpanType::FromTicks(system.CoreTiming().GetClockTicks(), Core::Hardware::CNTFREQ)};
16 Core::Hardware::CNTFREQ)};
17 TimeSpanType raw_time_point{setup_value.nanoseconds + ticks_time_span.nanoseconds}; 16 TimeSpanType raw_time_point{setup_value.nanoseconds + ticks_time_span.nanoseconds};
18 17
19 if (raw_time_point.nanoseconds < cached_raw_time_point.nanoseconds) { 18 if (raw_time_point.nanoseconds < cached_raw_time_point.nanoseconds) {
diff --git a/src/core/hle/service/time/tick_based_steady_clock_core.cpp b/src/core/hle/service/time/tick_based_steady_clock_core.cpp
index 44d5bc651..8baaa2a6a 100644
--- a/src/core/hle/service/time/tick_based_steady_clock_core.cpp
+++ b/src/core/hle/service/time/tick_based_steady_clock_core.cpp
@@ -11,9 +11,8 @@
11namespace Service::Time::Clock { 11namespace Service::Time::Clock {
12 12
13SteadyClockTimePoint TickBasedSteadyClockCore::GetTimePoint(Core::System& system) { 13SteadyClockTimePoint TickBasedSteadyClockCore::GetTimePoint(Core::System& system) {
14 const TimeSpanType ticks_time_span{TimeSpanType::FromTicks( 14 const TimeSpanType ticks_time_span{
15 Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks()), 15 TimeSpanType::FromTicks(system.CoreTiming().GetClockTicks(), Core::Hardware::CNTFREQ)};
16 Core::Hardware::CNTFREQ)};
17 16
18 return {ticks_time_span.ToSeconds(), GetClockSourceId()}; 17 return {ticks_time_span.ToSeconds(), GetClockSourceId()};
19} 18}
diff --git a/src/core/hle/service/time/time.cpp b/src/core/hle/service/time/time.cpp
index cc1dbd575..13e4b3818 100644
--- a/src/core/hle/service/time/time.cpp
+++ b/src/core/hle/service/time/time.cpp
@@ -241,9 +241,8 @@ void Module::Interface::CalculateMonotonicSystemClockBaseTimePoint(Kernel::HLERe
241 const auto current_time_point{steady_clock_core.GetCurrentTimePoint(system)}; 241 const auto current_time_point{steady_clock_core.GetCurrentTimePoint(system)};
242 242
243 if (current_time_point.clock_source_id == context.steady_time_point.clock_source_id) { 243 if (current_time_point.clock_source_id == context.steady_time_point.clock_source_id) {
244 const auto ticks{Clock::TimeSpanType::FromTicks( 244 const auto ticks{Clock::TimeSpanType::FromTicks(system.CoreTiming().GetClockTicks(),
245 Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks()), 245 Core::Hardware::CNTFREQ)};
246 Core::Hardware::CNTFREQ)};
247 const s64 base_time_point{context.offset + current_time_point.time_point - 246 const s64 base_time_point{context.offset + current_time_point.time_point -
248 ticks.ToSeconds()}; 247 ticks.ToSeconds()};
249 IPC::ResponseBuilder rb{ctx, (sizeof(s64) / 4) + 2}; 248 IPC::ResponseBuilder rb{ctx, (sizeof(s64) / 4) + 2};
diff --git a/src/core/hle/service/time/time_sharedmemory.cpp b/src/core/hle/service/time/time_sharedmemory.cpp
index 999ec1e51..e0ae9f874 100644
--- a/src/core/hle/service/time/time_sharedmemory.cpp
+++ b/src/core/hle/service/time/time_sharedmemory.cpp
@@ -30,8 +30,7 @@ void SharedMemory::SetupStandardSteadyClock(Core::System& system,
30 const Common::UUID& clock_source_id, 30 const Common::UUID& clock_source_id,
31 Clock::TimeSpanType current_time_point) { 31 Clock::TimeSpanType current_time_point) {
32 const Clock::TimeSpanType ticks_time_span{Clock::TimeSpanType::FromTicks( 32 const Clock::TimeSpanType ticks_time_span{Clock::TimeSpanType::FromTicks(
33 Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks()), 33 system.CoreTiming().GetClockTicks(), Core::Hardware::CNTFREQ)};
34 Core::Hardware::CNTFREQ)};
35 const Clock::SteadyClockContext context{ 34 const Clock::SteadyClockContext context{
36 static_cast<u64>(current_time_point.nanoseconds - ticks_time_span.nanoseconds), 35 static_cast<u64>(current_time_point.nanoseconds - ticks_time_span.nanoseconds),
37 clock_source_id}; 36 clock_source_id};
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index 67b45e7c0..ea7b4ae13 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -511,6 +511,7 @@ private:
511 LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id, 511 LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id,
512 static_cast<u32>(transaction), flags); 512 static_cast<u32>(transaction), flags);
513 513
514 nv_flinger->Lock();
514 auto& buffer_queue = nv_flinger->FindBufferQueue(id); 515 auto& buffer_queue = nv_flinger->FindBufferQueue(id);
515 516
516 switch (transaction) { 517 switch (transaction) {
@@ -518,9 +519,9 @@ private:
518 IGBPConnectRequestParcel request{ctx.ReadBuffer()}; 519 IGBPConnectRequestParcel request{ctx.ReadBuffer()};
519 IGBPConnectResponseParcel response{ 520 IGBPConnectResponseParcel response{
520 static_cast<u32>(static_cast<u32>(DisplayResolution::UndockedWidth) * 521 static_cast<u32>(static_cast<u32>(DisplayResolution::UndockedWidth) *
521 Settings::values.resolution_factor), 522 Settings::values.resolution_factor.GetValue()),
522 static_cast<u32>(static_cast<u32>(DisplayResolution::UndockedHeight) * 523 static_cast<u32>(static_cast<u32>(DisplayResolution::UndockedHeight) *
523 Settings::values.resolution_factor)}; 524 Settings::values.resolution_factor.GetValue())};
524 ctx.WriteBuffer(response.Serialize()); 525 ctx.WriteBuffer(response.Serialize());
525 break; 526 break;
526 } 527 }
@@ -550,6 +551,7 @@ private:
550 [=](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx, 551 [=](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
551 Kernel::ThreadWakeupReason reason) { 552 Kernel::ThreadWakeupReason reason) {
552 // Repeat TransactParcel DequeueBuffer when a buffer is available 553 // Repeat TransactParcel DequeueBuffer when a buffer is available
554 nv_flinger->Lock();
553 auto& buffer_queue = nv_flinger->FindBufferQueue(id); 555 auto& buffer_queue = nv_flinger->FindBufferQueue(id);
554 auto result = buffer_queue.DequeueBuffer(width, height); 556 auto result = buffer_queue.DequeueBuffer(width, height);
555 ASSERT_MSG(result != std::nullopt, "Could not dequeue buffer."); 557 ASSERT_MSG(result != std::nullopt, "Could not dequeue buffer.");
@@ -747,14 +749,14 @@ private:
747 749
748 if (Settings::values.use_docked_mode) { 750 if (Settings::values.use_docked_mode) {
749 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::DockedWidth) * 751 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::DockedWidth) *
750 static_cast<u32>(Settings::values.resolution_factor)); 752 static_cast<u32>(Settings::values.resolution_factor.GetValue()));
751 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::DockedHeight) * 753 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::DockedHeight) *
752 static_cast<u32>(Settings::values.resolution_factor)); 754 static_cast<u32>(Settings::values.resolution_factor.GetValue()));
753 } else { 755 } else {
754 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::UndockedWidth) * 756 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::UndockedWidth) *
755 static_cast<u32>(Settings::values.resolution_factor)); 757 static_cast<u32>(Settings::values.resolution_factor.GetValue()));
756 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::UndockedHeight) * 758 rb.Push(static_cast<u32>(Service::VI::DisplayResolution::UndockedHeight) *
757 static_cast<u32>(Settings::values.resolution_factor)); 759 static_cast<u32>(Settings::values.resolution_factor.GetValue()));
758 } 760 }
759 761
760 rb.PushRaw<float>(60.0f); // This wouldn't seem to be correct for 30 fps games. 762 rb.PushRaw<float>(60.0f); // This wouldn't seem to be correct for 30 fps games.
@@ -1029,9 +1031,9 @@ private:
1029 // between docked and undocked dimensions. We take the liberty of applying 1031 // between docked and undocked dimensions. We take the liberty of applying
1030 // the resolution scaling factor here. 1032 // the resolution scaling factor here.
1031 rb.Push(static_cast<u64>(DisplayResolution::UndockedWidth) * 1033 rb.Push(static_cast<u64>(DisplayResolution::UndockedWidth) *
1032 static_cast<u32>(Settings::values.resolution_factor)); 1034 static_cast<u32>(Settings::values.resolution_factor.GetValue()));
1033 rb.Push(static_cast<u64>(DisplayResolution::UndockedHeight) * 1035 rb.Push(static_cast<u64>(DisplayResolution::UndockedHeight) *
1034 static_cast<u32>(Settings::values.resolution_factor)); 1036 static_cast<u32>(Settings::values.resolution_factor.GetValue()));
1035 } 1037 }
1036 1038
1037 void SetLayerScalingMode(Kernel::HLERequestContext& ctx) { 1039 void SetLayerScalingMode(Kernel::HLERequestContext& ctx) {
@@ -1064,8 +1066,8 @@ private:
1064 LOG_WARNING(Service_VI, "(STUBBED) called"); 1066 LOG_WARNING(Service_VI, "(STUBBED) called");
1065 1067
1066 DisplayInfo display_info; 1068 DisplayInfo display_info;
1067 display_info.width *= static_cast<u64>(Settings::values.resolution_factor); 1069 display_info.width *= static_cast<u64>(Settings::values.resolution_factor.GetValue());
1068 display_info.height *= static_cast<u64>(Settings::values.resolution_factor); 1070 display_info.height *= static_cast<u64>(Settings::values.resolution_factor.GetValue());
1069 ctx.WriteBuffer(&display_info, sizeof(DisplayInfo)); 1071 ctx.WriteBuffer(&display_info, sizeof(DisplayInfo));
1070 IPC::ResponseBuilder rb{ctx, 4}; 1072 IPC::ResponseBuilder rb{ctx, 4};
1071 rb.Push(RESULT_SUCCESS); 1073 rb.Push(RESULT_SUCCESS);
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 9d87045a0..2c5588933 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -8,6 +8,7 @@
8#include <utility> 8#include <utility>
9 9
10#include "common/assert.h" 10#include "common/assert.h"
11#include "common/atomic_ops.h"
11#include "common/common_types.h" 12#include "common/common_types.h"
12#include "common/logging/log.h" 13#include "common/logging/log.h"
13#include "common/page_table.h" 14#include "common/page_table.h"
@@ -29,15 +30,12 @@ namespace Core::Memory {
29struct Memory::Impl { 30struct Memory::Impl {
30 explicit Impl(Core::System& system_) : system{system_} {} 31 explicit Impl(Core::System& system_) : system{system_} {}
31 32
32 void SetCurrentPageTable(Kernel::Process& process) { 33 void SetCurrentPageTable(Kernel::Process& process, u32 core_id) {
33 current_page_table = &process.PageTable().PageTableImpl(); 34 current_page_table = &process.PageTable().PageTableImpl();
34 35
35 const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth(); 36 const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth();
36 37
37 system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width); 38 system.ArmInterface(core_id).PageTableChanged(*current_page_table, address_space_width);
38 system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width);
39 system.ArmInterface(2).PageTableChanged(*current_page_table, address_space_width);
40 system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width);
41 } 39 }
42 40
43 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { 41 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
@@ -179,6 +177,22 @@ struct Memory::Impl {
179 } 177 }
180 } 178 }
181 179
180 bool WriteExclusive8(const VAddr addr, const u8 data, const u8 expected) {
181 return WriteExclusive<u8>(addr, data, expected);
182 }
183
184 bool WriteExclusive16(const VAddr addr, const u16 data, const u16 expected) {
185 return WriteExclusive<u16_le>(addr, data, expected);
186 }
187
188 bool WriteExclusive32(const VAddr addr, const u32 data, const u32 expected) {
189 return WriteExclusive<u32_le>(addr, data, expected);
190 }
191
192 bool WriteExclusive64(const VAddr addr, const u64 data, const u64 expected) {
193 return WriteExclusive<u64_le>(addr, data, expected);
194 }
195
182 std::string ReadCString(VAddr vaddr, std::size_t max_length) { 196 std::string ReadCString(VAddr vaddr, std::size_t max_length) {
183 std::string string; 197 std::string string;
184 string.reserve(max_length); 198 string.reserve(max_length);
@@ -534,9 +548,9 @@ struct Memory::Impl {
534 // longer exist, and we should just leave the pagetable entry blank. 548 // longer exist, and we should just leave the pagetable entry blank.
535 page_type = Common::PageType::Unmapped; 549 page_type = Common::PageType::Unmapped;
536 } else { 550 } else {
537 page_type = Common::PageType::Memory;
538 current_page_table->pointers[vaddr >> PAGE_BITS] = 551 current_page_table->pointers[vaddr >> PAGE_BITS] =
539 pointer - (vaddr & ~PAGE_MASK); 552 pointer - (vaddr & ~PAGE_MASK);
553 page_type = Common::PageType::Memory;
540 } 554 }
541 break; 555 break;
542 } 556 }
@@ -577,9 +591,12 @@ struct Memory::Impl {
577 base + page_table.pointers.size()); 591 base + page_table.pointers.size());
578 592
579 if (!target) { 593 if (!target) {
594 ASSERT_MSG(type != Common::PageType::Memory,
595 "Mapping memory page without a pointer @ {:016x}", base * PAGE_SIZE);
596
580 while (base != end) { 597 while (base != end) {
581 page_table.pointers[base] = nullptr;
582 page_table.attributes[base] = type; 598 page_table.attributes[base] = type;
599 page_table.pointers[base] = nullptr;
583 page_table.backing_addr[base] = 0; 600 page_table.backing_addr[base] = 0;
584 601
585 base += 1; 602 base += 1;
@@ -682,6 +699,67 @@ struct Memory::Impl {
682 } 699 }
683 } 700 }
684 701
702 template <typename T>
703 bool WriteExclusive(const VAddr vaddr, const T data, const T expected) {
704 u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
705 if (page_pointer != nullptr) {
706 // NOTE: Avoid adding any extra logic to this fast-path block
707 T volatile* pointer = reinterpret_cast<T volatile*>(&page_pointer[vaddr]);
708 return Common::AtomicCompareAndSwap(pointer, data, expected);
709 }
710
711 const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
712 switch (type) {
713 case Common::PageType::Unmapped:
714 LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
715 static_cast<u32>(data), vaddr);
716 return true;
717 case Common::PageType::Memory:
718 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
719 break;
720 case Common::PageType::RasterizerCachedMemory: {
721 u8* host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
722 system.GPU().InvalidateRegion(vaddr, sizeof(T));
723 T volatile* pointer = reinterpret_cast<T volatile*>(&host_ptr);
724 return Common::AtomicCompareAndSwap(pointer, data, expected);
725 break;
726 }
727 default:
728 UNREACHABLE();
729 }
730 return true;
731 }
732
733 bool WriteExclusive128(const VAddr vaddr, const u128 data, const u128 expected) {
734 u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
735 if (page_pointer != nullptr) {
736 // NOTE: Avoid adding any extra logic to this fast-path block
737 u64 volatile* pointer = reinterpret_cast<u64 volatile*>(&page_pointer[vaddr]);
738 return Common::AtomicCompareAndSwap(pointer, data, expected);
739 }
740
741 const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
742 switch (type) {
743 case Common::PageType::Unmapped:
744 LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}{:016X}", sizeof(data) * 8,
745 static_cast<u64>(data[1]), static_cast<u64>(data[0]), vaddr);
746 return true;
747 case Common::PageType::Memory:
748 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
749 break;
750 case Common::PageType::RasterizerCachedMemory: {
751 u8* host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
752 system.GPU().InvalidateRegion(vaddr, sizeof(u128));
753 u64 volatile* pointer = reinterpret_cast<u64 volatile*>(&host_ptr);
754 return Common::AtomicCompareAndSwap(pointer, data, expected);
755 break;
756 }
757 default:
758 UNREACHABLE();
759 }
760 return true;
761 }
762
685 Common::PageTable* current_page_table = nullptr; 763 Common::PageTable* current_page_table = nullptr;
686 Core::System& system; 764 Core::System& system;
687}; 765};
@@ -689,8 +767,8 @@ struct Memory::Impl {
689Memory::Memory(Core::System& system) : impl{std::make_unique<Impl>(system)} {} 767Memory::Memory(Core::System& system) : impl{std::make_unique<Impl>(system)} {}
690Memory::~Memory() = default; 768Memory::~Memory() = default;
691 769
692void Memory::SetCurrentPageTable(Kernel::Process& process) { 770void Memory::SetCurrentPageTable(Kernel::Process& process, u32 core_id) {
693 impl->SetCurrentPageTable(process); 771 impl->SetCurrentPageTable(process, core_id);
694} 772}
695 773
696void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { 774void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
@@ -764,6 +842,26 @@ void Memory::Write64(VAddr addr, u64 data) {
764 impl->Write64(addr, data); 842 impl->Write64(addr, data);
765} 843}
766 844
845bool Memory::WriteExclusive8(VAddr addr, u8 data, u8 expected) {
846 return impl->WriteExclusive8(addr, data, expected);
847}
848
849bool Memory::WriteExclusive16(VAddr addr, u16 data, u16 expected) {
850 return impl->WriteExclusive16(addr, data, expected);
851}
852
853bool Memory::WriteExclusive32(VAddr addr, u32 data, u32 expected) {
854 return impl->WriteExclusive32(addr, data, expected);
855}
856
857bool Memory::WriteExclusive64(VAddr addr, u64 data, u64 expected) {
858 return impl->WriteExclusive64(addr, data, expected);
859}
860
861bool Memory::WriteExclusive128(VAddr addr, u128 data, u128 expected) {
862 return impl->WriteExclusive128(addr, data, expected);
863}
864
767std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) { 865std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) {
768 return impl->ReadCString(vaddr, max_length); 866 return impl->ReadCString(vaddr, max_length);
769} 867}
diff --git a/src/core/memory.h b/src/core/memory.h
index 9292f3b0a..4a1cc63f4 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -64,7 +64,7 @@ public:
64 * 64 *
65 * @param process The process to use the page table of. 65 * @param process The process to use the page table of.
66 */ 66 */
67 void SetCurrentPageTable(Kernel::Process& process); 67 void SetCurrentPageTable(Kernel::Process& process, u32 core_id);
68 68
69 /** 69 /**
70 * Maps an allocated buffer onto a region of the emulated process address space. 70 * Maps an allocated buffer onto a region of the emulated process address space.
@@ -245,6 +245,71 @@ public:
245 void Write64(VAddr addr, u64 data); 245 void Write64(VAddr addr, u64 data);
246 246
247 /** 247 /**
248 * Writes a 8-bit unsigned integer to the given virtual address in
249 * the current process' address space if and only if the address contains
250 * the expected value. This operation is atomic.
251 *
252 * @param addr The virtual address to write the 8-bit unsigned integer to.
253 * @param data The 8-bit unsigned integer to write to the given virtual address.
254 * @param expected The 8-bit unsigned integer to check against the given virtual address.
255 *
256 * @post The memory range [addr, sizeof(data)) contains the given data value.
257 */
258 bool WriteExclusive8(VAddr addr, u8 data, u8 expected);
259
260 /**
261 * Writes a 16-bit unsigned integer to the given virtual address in
262 * the current process' address space if and only if the address contains
263 * the expected value. This operation is atomic.
264 *
265 * @param addr The virtual address to write the 16-bit unsigned integer to.
266 * @param data The 16-bit unsigned integer to write to the given virtual address.
267 * @param expected The 16-bit unsigned integer to check against the given virtual address.
268 *
269 * @post The memory range [addr, sizeof(data)) contains the given data value.
270 */
271 bool WriteExclusive16(VAddr addr, u16 data, u16 expected);
272
273 /**
274 * Writes a 32-bit unsigned integer to the given virtual address in
275 * the current process' address space if and only if the address contains
276 * the expected value. This operation is atomic.
277 *
278 * @param addr The virtual address to write the 32-bit unsigned integer to.
279 * @param data The 32-bit unsigned integer to write to the given virtual address.
280 * @param expected The 32-bit unsigned integer to check against the given virtual address.
281 *
282 * @post The memory range [addr, sizeof(data)) contains the given data value.
283 */
284 bool WriteExclusive32(VAddr addr, u32 data, u32 expected);
285
286 /**
287 * Writes a 64-bit unsigned integer to the given virtual address in
288 * the current process' address space if and only if the address contains
289 * the expected value. This operation is atomic.
290 *
291 * @param addr The virtual address to write the 64-bit unsigned integer to.
292 * @param data The 64-bit unsigned integer to write to the given virtual address.
293 * @param expected The 64-bit unsigned integer to check against the given virtual address.
294 *
295 * @post The memory range [addr, sizeof(data)) contains the given data value.
296 */
297 bool WriteExclusive64(VAddr addr, u64 data, u64 expected);
298
299 /**
300 * Writes a 128-bit unsigned integer to the given virtual address in
301 * the current process' address space if and only if the address contains
302 * the expected value. This operation is atomic.
303 *
304 * @param addr The virtual address to write the 128-bit unsigned integer to.
305 * @param data The 128-bit unsigned integer to write to the given virtual address.
306 * @param expected The 128-bit unsigned integer to check against the given virtual address.
307 *
308 * @post The memory range [addr, sizeof(data)) contains the given data value.
309 */
310 bool WriteExclusive128(VAddr addr, u128 data, u128 expected);
311
312 /**
248 * Reads a null-terminated string from the given virtual address. 313 * Reads a null-terminated string from the given virtual address.
249 * This function will continually read characters until either: 314 * This function will continually read characters until either:
250 * 315 *
diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp
index b139e8465..53d27859b 100644
--- a/src/core/memory/cheat_engine.cpp
+++ b/src/core/memory/cheat_engine.cpp
@@ -20,7 +20,7 @@
20 20
21namespace Core::Memory { 21namespace Core::Memory {
22 22
23constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 12); 23constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(1000000000 / 12);
24constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF; 24constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF;
25 25
26StandardVmCallbacks::StandardVmCallbacks(Core::System& system, const CheatProcessMetadata& metadata) 26StandardVmCallbacks::StandardVmCallbacks(Core::System& system, const CheatProcessMetadata& metadata)
@@ -190,7 +190,7 @@ CheatEngine::~CheatEngine() {
190void CheatEngine::Initialize() { 190void CheatEngine::Initialize() {
191 event = Core::Timing::CreateEvent( 191 event = Core::Timing::CreateEvent(
192 "CheatEngine::FrameCallback::" + Common::HexToString(metadata.main_nso_build_id), 192 "CheatEngine::FrameCallback::" + Common::HexToString(metadata.main_nso_build_id),
193 [this](u64 userdata, s64 cycles_late) { FrameCallback(userdata, cycles_late); }); 193 [this](u64 userdata, s64 ns_late) { FrameCallback(userdata, ns_late); });
194 core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS, event); 194 core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS, event);
195 195
196 metadata.process_id = system.CurrentProcess()->GetProcessID(); 196 metadata.process_id = system.CurrentProcess()->GetProcessID();
@@ -217,7 +217,7 @@ void CheatEngine::Reload(std::vector<CheatEntry> cheats) {
217 217
218MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70)); 218MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70));
219 219
220void CheatEngine::FrameCallback(u64 userdata, s64 cycles_late) { 220void CheatEngine::FrameCallback(u64 userdata, s64 ns_late) {
221 if (is_pending_reload.exchange(false)) { 221 if (is_pending_reload.exchange(false)) {
222 vm.LoadProgram(cheats); 222 vm.LoadProgram(cheats);
223 } 223 }
@@ -230,7 +230,7 @@ void CheatEngine::FrameCallback(u64 userdata, s64 cycles_late) {
230 230
231 vm.Execute(metadata); 231 vm.Execute(metadata);
232 232
233 core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS - cycles_late, event); 233 core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS - ns_late, event);
234} 234}
235 235
236} // namespace Core::Memory 236} // namespace Core::Memory
diff --git a/src/core/perf_stats.cpp b/src/core/perf_stats.cpp
index f1ae9d4df..29339ead7 100644
--- a/src/core/perf_stats.cpp
+++ b/src/core/perf_stats.cpp
@@ -119,13 +119,14 @@ double PerfStats::GetLastFrameTimeScale() {
119} 119}
120 120
121void FrameLimiter::DoFrameLimiting(microseconds current_system_time_us) { 121void FrameLimiter::DoFrameLimiting(microseconds current_system_time_us) {
122 if (!Settings::values.use_frame_limit) { 122 if (!Settings::values.use_frame_limit.GetValue() ||
123 Settings::values.use_multi_core.GetValue()) {
123 return; 124 return;
124 } 125 }
125 126
126 auto now = Clock::now(); 127 auto now = Clock::now();
127 128
128 const double sleep_scale = Settings::values.frame_limit / 100.0; 129 const double sleep_scale = Settings::values.frame_limit.GetValue() / 100.0;
129 130
130 // Max lag caused by slow frames. Shouldn't be more than the length of a frame at the current 131 // Max lag caused by slow frames. Shouldn't be more than the length of a frame at the current
131 // speed percent or it will clamp too much and prevent this from properly limiting to that 132 // speed percent or it will clamp too much and prevent this from properly limiting to that
diff --git a/src/core/settings.cpp b/src/core/settings.cpp
index 4edff9cd8..d3886c4ec 100644
--- a/src/core/settings.cpp
+++ b/src/core/settings.cpp
@@ -62,6 +62,7 @@ const std::array<const char*, NumMouseButtons> mapping = {{
62} 62}
63 63
64Values values = {}; 64Values values = {};
65bool configuring_global = true;
65 66
66std::string GetTimeZoneString() { 67std::string GetTimeZoneString() {
67 static constexpr std::array<const char*, 46> timezones{{ 68 static constexpr std::array<const char*, 46> timezones{{
@@ -73,9 +74,9 @@ std::string GetTimeZoneString() {
73 "UCT", "Universal", "UTC", "W-SU", "WET", "Zulu", 74 "UCT", "Universal", "UTC", "W-SU", "WET", "Zulu",
74 }}; 75 }};
75 76
76 ASSERT(Settings::values.time_zone_index < timezones.size()); 77 ASSERT(Settings::values.time_zone_index.GetValue() < timezones.size());
77 78
78 return timezones[Settings::values.time_zone_index]; 79 return timezones[Settings::values.time_zone_index.GetValue()];
79} 80}
80 81
81void Apply() { 82void Apply() {
@@ -97,25 +98,25 @@ void LogSetting(const std::string& name, const T& value) {
97 98
98void LogSettings() { 99void LogSettings() {
99 LOG_INFO(Config, "yuzu Configuration:"); 100 LOG_INFO(Config, "yuzu Configuration:");
100 LogSetting("System_UseDockedMode", Settings::values.use_docked_mode); 101 LogSetting("Controls_UseDockedMode", Settings::values.use_docked_mode);
101 LogSetting("System_RngSeed", Settings::values.rng_seed.value_or(0)); 102 LogSetting("System_RngSeed", Settings::values.rng_seed.GetValue().value_or(0));
102 LogSetting("System_CurrentUser", Settings::values.current_user); 103 LogSetting("System_CurrentUser", Settings::values.current_user);
103 LogSetting("System_LanguageIndex", Settings::values.language_index); 104 LogSetting("System_LanguageIndex", Settings::values.language_index.GetValue());
104 LogSetting("System_RegionIndex", Settings::values.region_index); 105 LogSetting("System_RegionIndex", Settings::values.region_index.GetValue());
105 LogSetting("System_TimeZoneIndex", Settings::values.time_zone_index); 106 LogSetting("System_TimeZoneIndex", Settings::values.time_zone_index.GetValue());
106 LogSetting("Core_UseMultiCore", Settings::values.use_multi_core); 107 LogSetting("Core_UseMultiCore", Settings::values.use_multi_core.GetValue());
107 LogSetting("Renderer_UseResolutionFactor", Settings::values.resolution_factor); 108 LogSetting("Renderer_UseResolutionFactor", Settings::values.resolution_factor.GetValue());
108 LogSetting("Renderer_UseFrameLimit", Settings::values.use_frame_limit); 109 LogSetting("Renderer_UseFrameLimit", Settings::values.use_frame_limit.GetValue());
109 LogSetting("Renderer_FrameLimit", Settings::values.frame_limit); 110 LogSetting("Renderer_FrameLimit", Settings::values.frame_limit.GetValue());
110 LogSetting("Renderer_UseDiskShaderCache", Settings::values.use_disk_shader_cache); 111 LogSetting("Renderer_UseDiskShaderCache", Settings::values.use_disk_shader_cache.GetValue());
111 LogSetting("Renderer_GPUAccuracyLevel", Settings::values.gpu_accuracy); 112 LogSetting("Renderer_GPUAccuracyLevel", Settings::values.gpu_accuracy.GetValue());
112 LogSetting("Renderer_UseAsynchronousGpuEmulation", 113 LogSetting("Renderer_UseAsynchronousGpuEmulation",
113 Settings::values.use_asynchronous_gpu_emulation); 114 Settings::values.use_asynchronous_gpu_emulation.GetValue());
114 LogSetting("Renderer_UseVsync", Settings::values.use_vsync); 115 LogSetting("Renderer_UseVsync", Settings::values.use_vsync.GetValue());
115 LogSetting("Renderer_UseAssemblyShaders", Settings::values.use_assembly_shaders); 116 LogSetting("Renderer_UseAssemblyShaders", Settings::values.use_assembly_shaders.GetValue());
116 LogSetting("Renderer_AnisotropicFilteringLevel", Settings::values.max_anisotropy); 117 LogSetting("Renderer_AnisotropicFilteringLevel", Settings::values.max_anisotropy.GetValue());
117 LogSetting("Audio_OutputEngine", Settings::values.sink_id); 118 LogSetting("Audio_OutputEngine", Settings::values.sink_id);
118 LogSetting("Audio_EnableAudioStretching", Settings::values.enable_audio_stretching); 119 LogSetting("Audio_EnableAudioStretching", Settings::values.enable_audio_stretching.GetValue());
119 LogSetting("Audio_OutputDevice", Settings::values.audio_device_id); 120 LogSetting("Audio_OutputDevice", Settings::values.audio_device_id);
120 LogSetting("DataStorage_UseVirtualSd", Settings::values.use_virtual_sd); 121 LogSetting("DataStorage_UseVirtualSd", Settings::values.use_virtual_sd);
121 LogSetting("DataStorage_NandDir", FileUtil::GetUserPath(FileUtil::UserPath::NANDDir)); 122 LogSetting("DataStorage_NandDir", FileUtil::GetUserPath(FileUtil::UserPath::NANDDir));
@@ -127,12 +128,60 @@ void LogSettings() {
127 LogSetting("Services_BCATBoxcatLocal", Settings::values.bcat_boxcat_local); 128 LogSetting("Services_BCATBoxcatLocal", Settings::values.bcat_boxcat_local);
128} 129}
129 130
131float Volume() {
132 if (values.audio_muted) {
133 return 0.0f;
134 }
135 return values.volume.GetValue();
136}
137
130bool IsGPULevelExtreme() { 138bool IsGPULevelExtreme() {
131 return values.gpu_accuracy == GPUAccuracy::Extreme; 139 return values.gpu_accuracy.GetValue() == GPUAccuracy::Extreme;
132} 140}
133 141
134bool IsGPULevelHigh() { 142bool IsGPULevelHigh() {
135 return values.gpu_accuracy == GPUAccuracy::Extreme || values.gpu_accuracy == GPUAccuracy::High; 143 return values.gpu_accuracy.GetValue() == GPUAccuracy::Extreme ||
144 values.gpu_accuracy.GetValue() == GPUAccuracy::High;
145}
146
147void RestoreGlobalState() {
148 // If a game is running, DO NOT restore the global settings state
149 if (Core::System::GetInstance().IsPoweredOn()) {
150 return;
151 }
152
153 // Audio
154 values.enable_audio_stretching.SetGlobal(true);
155 values.volume.SetGlobal(true);
156
157 // Core
158 values.use_multi_core.SetGlobal(true);
159
160 // Renderer
161 values.renderer_backend.SetGlobal(true);
162 values.vulkan_device.SetGlobal(true);
163 values.aspect_ratio.SetGlobal(true);
164 values.max_anisotropy.SetGlobal(true);
165 values.use_frame_limit.SetGlobal(true);
166 values.frame_limit.SetGlobal(true);
167 values.use_disk_shader_cache.SetGlobal(true);
168 values.gpu_accuracy.SetGlobal(true);
169 values.use_asynchronous_gpu_emulation.SetGlobal(true);
170 values.use_vsync.SetGlobal(true);
171 values.use_assembly_shaders.SetGlobal(true);
172 values.use_fast_gpu_time.SetGlobal(true);
173 values.force_30fps_mode.SetGlobal(true);
174 values.bg_red.SetGlobal(true);
175 values.bg_green.SetGlobal(true);
176 values.bg_blue.SetGlobal(true);
177
178 // System
179 values.language_index.SetGlobal(true);
180 values.region_index.SetGlobal(true);
181 values.time_zone_index.SetGlobal(true);
182 values.rng_seed.SetGlobal(true);
183 values.custom_rtc.SetGlobal(true);
184 values.sound_index.SetGlobal(true);
136} 185}
137 186
138} // namespace Settings 187} // namespace Settings
diff --git a/src/core/settings.h b/src/core/settings.h
index 78eb33737..850ca4072 100644
--- a/src/core/settings.h
+++ b/src/core/settings.h
@@ -382,20 +382,85 @@ enum class GPUAccuracy : u32 {
382 Extreme = 2, 382 Extreme = 2,
383}; 383};
384 384
385extern bool configuring_global;
386
387template <typename Type>
388class Setting final {
389public:
390 Setting() = default;
391 explicit Setting(Type val) : global{val} {}
392 ~Setting() = default;
393 void SetGlobal(bool to_global) {
394 use_global = to_global;
395 }
396 bool UsingGlobal() const {
397 return use_global;
398 }
399 Type GetValue(bool need_global = false) const {
400 if (use_global || need_global) {
401 return global;
402 }
403 return local;
404 }
405 void SetValue(const Type& value) {
406 if (use_global) {
407 global = value;
408 } else {
409 local = value;
410 }
411 }
412
413private:
414 bool use_global = true;
415 Type global{};
416 Type local{};
417};
418
385struct Values { 419struct Values {
420 // Audio
421 std::string audio_device_id;
422 std::string sink_id;
423 bool audio_muted;
424 Setting<bool> enable_audio_stretching;
425 Setting<float> volume;
426
427 // Core
428 Setting<bool> use_multi_core;
429
430 // Renderer
431 Setting<RendererBackend> renderer_backend;
432 bool renderer_debug;
433 Setting<int> vulkan_device;
434
435 Setting<u16> resolution_factor = Setting(static_cast<u16>(1));
436 Setting<int> aspect_ratio;
437 Setting<int> max_anisotropy;
438 Setting<bool> use_frame_limit;
439 Setting<u16> frame_limit;
440 Setting<bool> use_disk_shader_cache;
441 Setting<GPUAccuracy> gpu_accuracy;
442 Setting<bool> use_asynchronous_gpu_emulation;
443 Setting<bool> use_vsync;
444 Setting<bool> use_assembly_shaders;
445 Setting<bool> force_30fps_mode;
446 Setting<bool> use_fast_gpu_time;
447
448 Setting<float> bg_red;
449 Setting<float> bg_green;
450 Setting<float> bg_blue;
451
386 // System 452 // System
387 bool use_docked_mode; 453 Setting<std::optional<u32>> rng_seed;
388 std::optional<u32> rng_seed;
389 // Measured in seconds since epoch 454 // Measured in seconds since epoch
390 std::optional<std::chrono::seconds> custom_rtc; 455 Setting<std::optional<std::chrono::seconds>> custom_rtc;
391 // Set on game boot, reset on stop. Seconds difference between current time and `custom_rtc` 456 // Set on game boot, reset on stop. Seconds difference between current time and `custom_rtc`
392 std::chrono::seconds custom_rtc_differential; 457 std::chrono::seconds custom_rtc_differential;
393 458
394 s32 current_user; 459 s32 current_user;
395 s32 language_index; 460 Setting<s32> language_index;
396 s32 region_index; 461 Setting<s32> region_index;
397 s32 time_zone_index; 462 Setting<s32> time_zone_index;
398 s32 sound_index; 463 Setting<s32> sound_index;
399 464
400 // Controls 465 // Controls
401 std::array<PlayerInput, 10> players; 466 std::array<PlayerInput, 10> players;
@@ -419,8 +484,7 @@ struct Values {
419 u16 udp_input_port; 484 u16 udp_input_port;
420 u8 udp_pad_index; 485 u8 udp_pad_index;
421 486
422 // Core 487 bool use_docked_mode;
423 bool use_multi_core;
424 488
425 // Data Storage 489 // Data Storage
426 bool use_virtual_sd; 490 bool use_virtual_sd;
@@ -432,38 +496,6 @@ struct Values {
432 NANDUserSize nand_user_size; 496 NANDUserSize nand_user_size;
433 SDMCSize sdmc_size; 497 SDMCSize sdmc_size;
434 498
435 // Renderer
436 RendererBackend renderer_backend;
437 bool renderer_debug;
438 int vulkan_device;
439
440 float resolution_factor;
441 int aspect_ratio;
442 int max_anisotropy;
443 bool use_frame_limit;
444 u16 frame_limit;
445 bool use_disk_shader_cache;
446 GPUAccuracy gpu_accuracy;
447 bool use_asynchronous_gpu_emulation;
448 bool use_vsync;
449 bool use_assembly_shaders;
450 bool force_30fps_mode;
451 bool use_fast_gpu_time;
452
453 float bg_red;
454 float bg_green;
455 float bg_blue;
456
457 std::string log_filter;
458
459 bool use_dev_keys;
460
461 // Audio
462 std::string sink_id;
463 bool enable_audio_stretching;
464 std::string audio_device_id;
465 float volume;
466
467 // Debugging 499 // Debugging
468 bool record_frame_times; 500 bool record_frame_times;
469 bool use_gdbstub; 501 bool use_gdbstub;
@@ -474,8 +506,13 @@ struct Values {
474 bool reporting_services; 506 bool reporting_services;
475 bool quest_flag; 507 bool quest_flag;
476 bool disable_cpu_opt; 508 bool disable_cpu_opt;
509 bool disable_macro_jit;
477 510
478 // BCAT 511 // Misceallaneous
512 std::string log_filter;
513 bool use_dev_keys;
514
515 // Services
479 std::string bcat_backend; 516 std::string bcat_backend;
480 bool bcat_boxcat_local; 517 bool bcat_boxcat_local;
481 518
@@ -489,6 +526,8 @@ struct Values {
489 std::map<u64, std::vector<std::string>> disabled_addons; 526 std::map<u64, std::vector<std::string>> disabled_addons;
490} extern values; 527} extern values;
491 528
529float Volume();
530
492bool IsGPULevelExtreme(); 531bool IsGPULevelExtreme();
493bool IsGPULevelHigh(); 532bool IsGPULevelHigh();
494 533
@@ -497,4 +536,7 @@ std::string GetTimeZoneString();
497void Apply(); 536void Apply();
498void LogSettings(); 537void LogSettings();
499 538
539// Restore the global state of all applicable settings in the Values struct
540void RestoreGlobalState();
541
500} // namespace Settings 542} // namespace Settings
diff --git a/src/core/telemetry_session.cpp b/src/core/telemetry_session.cpp
index c781b3cfc..78915e6db 100644
--- a/src/core/telemetry_session.cpp
+++ b/src/core/telemetry_session.cpp
@@ -189,19 +189,24 @@ void TelemetrySession::AddInitialInfo(Loader::AppLoader& app_loader) {
189 // Log user configuration information 189 // Log user configuration information
190 constexpr auto field_type = Telemetry::FieldType::UserConfig; 190 constexpr auto field_type = Telemetry::FieldType::UserConfig;
191 AddField(field_type, "Audio_SinkId", Settings::values.sink_id); 191 AddField(field_type, "Audio_SinkId", Settings::values.sink_id);
192 AddField(field_type, "Audio_EnableAudioStretching", Settings::values.enable_audio_stretching); 192 AddField(field_type, "Audio_EnableAudioStretching",
193 AddField(field_type, "Core_UseMultiCore", Settings::values.use_multi_core); 193 Settings::values.enable_audio_stretching.GetValue());
194 AddField(field_type, "Renderer_Backend", TranslateRenderer(Settings::values.renderer_backend)); 194 AddField(field_type, "Core_UseMultiCore", Settings::values.use_multi_core.GetValue());
195 AddField(field_type, "Renderer_ResolutionFactor", Settings::values.resolution_factor); 195 AddField(field_type, "Renderer_Backend",
196 AddField(field_type, "Renderer_UseFrameLimit", Settings::values.use_frame_limit); 196 TranslateRenderer(Settings::values.renderer_backend.GetValue()));
197 AddField(field_type, "Renderer_FrameLimit", Settings::values.frame_limit); 197 AddField(field_type, "Renderer_ResolutionFactor",
198 AddField(field_type, "Renderer_UseDiskShaderCache", Settings::values.use_disk_shader_cache); 198 Settings::values.resolution_factor.GetValue());
199 AddField(field_type, "Renderer_UseFrameLimit", Settings::values.use_frame_limit.GetValue());
200 AddField(field_type, "Renderer_FrameLimit", Settings::values.frame_limit.GetValue());
201 AddField(field_type, "Renderer_UseDiskShaderCache",
202 Settings::values.use_disk_shader_cache.GetValue());
199 AddField(field_type, "Renderer_GPUAccuracyLevel", 203 AddField(field_type, "Renderer_GPUAccuracyLevel",
200 TranslateGPUAccuracyLevel(Settings::values.gpu_accuracy)); 204 TranslateGPUAccuracyLevel(Settings::values.gpu_accuracy.GetValue()));
201 AddField(field_type, "Renderer_UseAsynchronousGpuEmulation", 205 AddField(field_type, "Renderer_UseAsynchronousGpuEmulation",
202 Settings::values.use_asynchronous_gpu_emulation); 206 Settings::values.use_asynchronous_gpu_emulation.GetValue());
203 AddField(field_type, "Renderer_UseVsync", Settings::values.use_vsync); 207 AddField(field_type, "Renderer_UseVsync", Settings::values.use_vsync.GetValue());
204 AddField(field_type, "Renderer_UseAssemblyShaders", Settings::values.use_assembly_shaders); 208 AddField(field_type, "Renderer_UseAssemblyShaders",
209 Settings::values.use_assembly_shaders.GetValue());
205 AddField(field_type, "System_UseDockedMode", Settings::values.use_docked_mode); 210 AddField(field_type, "System_UseDockedMode", Settings::values.use_docked_mode);
206} 211}
207 212
diff --git a/src/core/tools/freezer.cpp b/src/core/tools/freezer.cpp
index b2c6c537e..8b0c50d11 100644
--- a/src/core/tools/freezer.cpp
+++ b/src/core/tools/freezer.cpp
@@ -14,7 +14,7 @@
14namespace Tools { 14namespace Tools {
15namespace { 15namespace {
16 16
17constexpr s64 MEMORY_FREEZER_TICKS = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 60); 17constexpr s64 MEMORY_FREEZER_TICKS = static_cast<s64>(1000000000 / 60);
18 18
19u64 MemoryReadWidth(Core::Memory::Memory& memory, u32 width, VAddr addr) { 19u64 MemoryReadWidth(Core::Memory::Memory& memory, u32 width, VAddr addr) {
20 switch (width) { 20 switch (width) {
@@ -57,7 +57,7 @@ Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& m
57 : core_timing{core_timing_}, memory{memory_} { 57 : core_timing{core_timing_}, memory{memory_} {
58 event = Core::Timing::CreateEvent( 58 event = Core::Timing::CreateEvent(
59 "MemoryFreezer::FrameCallback", 59 "MemoryFreezer::FrameCallback",
60 [this](u64 userdata, s64 cycles_late) { FrameCallback(userdata, cycles_late); }); 60 [this](u64 userdata, s64 ns_late) { FrameCallback(userdata, ns_late); });
61 core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS, event); 61 core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS, event);
62} 62}
63 63
@@ -158,7 +158,7 @@ std::vector<Freezer::Entry> Freezer::GetEntries() const {
158 return entries; 158 return entries;
159} 159}
160 160
161void Freezer::FrameCallback(u64 userdata, s64 cycles_late) { 161void Freezer::FrameCallback(u64 userdata, s64 ns_late) {
162 if (!IsActive()) { 162 if (!IsActive()) {
163 LOG_DEBUG(Common_Memory, "Memory freezer has been deactivated, ending callback events."); 163 LOG_DEBUG(Common_Memory, "Memory freezer has been deactivated, ending callback events.");
164 return; 164 return;
@@ -173,7 +173,7 @@ void Freezer::FrameCallback(u64 userdata, s64 cycles_late) {
173 MemoryWriteWidth(memory, entry.width, entry.address, entry.value); 173 MemoryWriteWidth(memory, entry.width, entry.address, entry.value);
174 } 174 }
175 175
176 core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS - cycles_late, event); 176 core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS - ns_late, event);
177} 177}
178 178
179void Freezer::FillEntryReads() { 179void Freezer::FillEntryReads() {