summaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/CMakeLists.txt14
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.cpp8
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.h10
-rw-r--r--src/core/arm/unicorn/arm_unicorn.cpp5
-rw-r--r--src/core/arm/unicorn/arm_unicorn.h4
-rw-r--r--src/core/core.cpp11
-rw-r--r--src/core/core.h4
-rw-r--r--src/core/core_cpu.cpp6
-rw-r--r--src/core/core_timing.cpp2
-rw-r--r--src/core/core_timing.h2
-rw-r--r--src/core/core_timing_util.cpp6
-rw-r--r--src/core/core_timing_util.h3
-rw-r--r--src/core/file_sys/cheat_engine.cpp492
-rw-r--r--src/core/file_sys/cheat_engine.h234
-rw-r--r--src/core/file_sys/content_archive.h15
-rw-r--r--src/core/file_sys/control_metadata.cpp6
-rw-r--r--src/core/file_sys/control_metadata.h30
-rw-r--r--src/core/file_sys/errors.h3
-rw-r--r--src/core/file_sys/fsmitm_romfsbuild.cpp4
-rw-r--r--src/core/file_sys/nca_metadata.cpp8
-rw-r--r--src/core/file_sys/nca_metadata.h3
-rw-r--r--src/core/file_sys/patch_manager.cpp79
-rw-r--r--src/core/file_sys/patch_manager.h9
-rw-r--r--src/core/file_sys/program_metadata.cpp27
-rw-r--r--src/core/file_sys/program_metadata.h2
-rw-r--r--src/core/file_sys/registered_cache.cpp2
-rw-r--r--src/core/file_sys/savedata_factory.cpp8
-rw-r--r--src/core/file_sys/savedata_factory.h11
-rw-r--r--src/core/file_sys/system_archive/system_archive.cpp3
-rw-r--r--src/core/file_sys/system_archive/system_version.cpp52
-rw-r--r--src/core/file_sys/system_archive/system_version.h16
-rw-r--r--src/core/frontend/emu_window.cpp6
-rw-r--r--src/core/gdbstub/gdbstub.cpp2
-rw-r--r--src/core/hle/ipc.h44
-rw-r--r--src/core/hle/ipc_helpers.h34
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp6
-rw-r--r--src/core/hle/kernel/client_port.cpp10
-rw-r--r--src/core/hle/kernel/code_set.cpp12
-rw-r--r--src/core/hle/kernel/code_set.h89
-rw-r--r--src/core/hle/kernel/kernel.cpp15
-rw-r--r--src/core/hle/kernel/kernel.h6
-rw-r--r--src/core/hle/kernel/mutex.cpp35
-rw-r--r--src/core/hle/kernel/mutex.h20
-rw-r--r--src/core/hle/kernel/object.cpp2
-rw-r--r--src/core/hle/kernel/object.h2
-rw-r--r--src/core/hle/kernel/process.cpp54
-rw-r--r--src/core/hle/kernel/process.h95
-rw-r--r--src/core/hle/kernel/readable_event.cpp2
-rw-r--r--src/core/hle/kernel/readable_event.h2
-rw-r--r--src/core/hle/kernel/resource_limit.cpp7
-rw-r--r--src/core/hle/kernel/resource_limit.h11
-rw-r--r--src/core/hle/kernel/scheduler.cpp67
-rw-r--r--src/core/hle/kernel/scheduler.h6
-rw-r--r--src/core/hle/kernel/server_port.cpp9
-rw-r--r--src/core/hle/kernel/server_port.h9
-rw-r--r--src/core/hle/kernel/server_session.cpp4
-rw-r--r--src/core/hle/kernel/server_session.h9
-rw-r--r--src/core/hle/kernel/shared_memory.cpp11
-rw-r--r--src/core/hle/kernel/shared_memory.h10
-rw-r--r--src/core/hle/kernel/svc.cpp302
-rw-r--r--src/core/hle/kernel/svc_wrap.h8
-rw-r--r--src/core/hle/kernel/thread.cpp93
-rw-r--r--src/core/hle/kernel/thread.h32
-rw-r--r--src/core/hle/kernel/transfer_memory.cpp81
-rw-r--r--src/core/hle/kernel/transfer_memory.h103
-rw-r--r--src/core/hle/kernel/vm_manager.cpp102
-rw-r--r--src/core/hle/kernel/vm_manager.h69
-rw-r--r--src/core/hle/kernel/wait_object.h2
-rw-r--r--src/core/hle/result.h4
-rw-r--r--src/core/hle/service/am/am.cpp185
-rw-r--r--src/core/hle/service/am/am.h31
-rw-r--r--src/core/hle/service/audio/audin_u.cpp4
-rw-r--r--src/core/hle/service/audio/audout_u.cpp4
-rw-r--r--src/core/hle/service/audio/audrec_u.cpp4
-rw-r--r--src/core/hle/service/audio/audren_u.cpp13
-rw-r--r--src/core/hle/service/audio/hwopus.cpp82
-rw-r--r--src/core/hle/service/fatal/fatal.cpp89
-rw-r--r--src/core/hle/service/filesystem/filesystem.cpp19
-rw-r--r--src/core/hle/service/filesystem/filesystem.h2
-rw-r--r--src/core/hle/service/filesystem/fsp_srv.cpp94
-rw-r--r--src/core/hle/service/filesystem/fsp_srv.h1
-rw-r--r--src/core/hle/service/hid/controllers/debug_pad.h30
-rw-r--r--src/core/hle/service/hid/controllers/npad.h102
-rw-r--r--src/core/hle/service/hid/controllers/touchscreen.h4
-rw-r--r--src/core/hle/service/hid/hid.cpp10
-rw-r--r--src/core/hle/service/hid/hid.h5
-rw-r--r--src/core/hle/service/ldr/ldr.cpp8
-rw-r--r--src/core/hle/service/lm/lm.cpp2
-rw-r--r--src/core/hle/service/nfc/nfc.cpp2
-rw-r--r--src/core/hle/service/nfp/nfp.cpp2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdevice.h10
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdisp_disp0.h2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp17
-rw-r--r--src/core/hle/service/nvdrv/interface.h2
-rw-r--r--src/core/hle/service/nvdrv/nvmemp.h2
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp4
-rw-r--r--src/core/hle/service/service.h2
-rw-r--r--src/core/hle/service/set/set_cal.h2
-rw-r--r--src/core/hle/service/set/set_sys.cpp79
-rw-r--r--src/core/hle/service/set/set_sys.h2
-rw-r--r--src/core/hle/service/sockets/sfdnsres.cpp12
-rw-r--r--src/core/hle/service/spl/module.cpp4
-rw-r--r--src/core/hle/service/ssl/ssl.cpp10
-rw-r--r--src/core/hle/service/vi/vi.cpp5
-rw-r--r--src/core/loader/elf.cpp3
-rw-r--r--src/core/loader/linker.cpp147
-rw-r--r--src/core/loader/linker.h36
-rw-r--r--src/core/loader/nro.cpp3
-rw-r--r--src/core/loader/nro.h4
-rw-r--r--src/core/loader/nso.cpp117
-rw-r--r--src/core/loader/nso.h43
-rw-r--r--src/core/loader/xci.h2
-rw-r--r--src/core/memory.cpp220
-rw-r--r--src/core/memory.h94
-rw-r--r--src/core/memory_hook.cpp11
-rw-r--r--src/core/memory_hook.h47
-rw-r--r--src/core/memory_setup.h19
-rw-r--r--src/core/perf_stats.cpp10
-rw-r--r--src/core/settings.cpp1
-rw-r--r--src/core/settings.h1
120 files changed, 2714 insertions, 1344 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 8ccb2d5f0..c59107102 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -31,6 +31,8 @@ add_library(core STATIC
31 file_sys/bis_factory.h 31 file_sys/bis_factory.h
32 file_sys/card_image.cpp 32 file_sys/card_image.cpp
33 file_sys/card_image.h 33 file_sys/card_image.h
34 file_sys/cheat_engine.cpp
35 file_sys/cheat_engine.h
34 file_sys/content_archive.cpp 36 file_sys/content_archive.cpp
35 file_sys/content_archive.h 37 file_sys/content_archive.h
36 file_sys/control_metadata.cpp 38 file_sys/control_metadata.cpp
@@ -68,6 +70,8 @@ add_library(core STATIC
68 file_sys/system_archive/ng_word.h 70 file_sys/system_archive/ng_word.h
69 file_sys/system_archive/system_archive.cpp 71 file_sys/system_archive/system_archive.cpp
70 file_sys/system_archive/system_archive.h 72 file_sys/system_archive/system_archive.h
73 file_sys/system_archive/system_version.cpp
74 file_sys/system_archive/system_version.h
71 file_sys/vfs.cpp 75 file_sys/vfs.cpp
72 file_sys/vfs.h 76 file_sys/vfs.h
73 file_sys/vfs_concat.cpp 77 file_sys/vfs_concat.cpp
@@ -107,6 +111,8 @@ add_library(core STATIC
107 hle/kernel/client_port.h 111 hle/kernel/client_port.h
108 hle/kernel/client_session.cpp 112 hle/kernel/client_session.cpp
109 hle/kernel/client_session.h 113 hle/kernel/client_session.h
114 hle/kernel/code_set.cpp
115 hle/kernel/code_set.h
110 hle/kernel/errors.h 116 hle/kernel/errors.h
111 hle/kernel/handle_table.cpp 117 hle/kernel/handle_table.cpp
112 hle/kernel/handle_table.h 118 hle/kernel/handle_table.h
@@ -140,6 +146,8 @@ add_library(core STATIC
140 hle/kernel/svc_wrap.h 146 hle/kernel/svc_wrap.h
141 hle/kernel/thread.cpp 147 hle/kernel/thread.cpp
142 hle/kernel/thread.h 148 hle/kernel/thread.h
149 hle/kernel/transfer_memory.cpp
150 hle/kernel/transfer_memory.h
143 hle/kernel/vm_manager.cpp 151 hle/kernel/vm_manager.cpp
144 hle/kernel/vm_manager.h 152 hle/kernel/vm_manager.h
145 hle/kernel/wait_object.cpp 153 hle/kernel/wait_object.cpp
@@ -419,8 +427,6 @@ add_library(core STATIC
419 loader/deconstructed_rom_directory.h 427 loader/deconstructed_rom_directory.h
420 loader/elf.cpp 428 loader/elf.cpp
421 loader/elf.h 429 loader/elf.h
422 loader/linker.cpp
423 loader/linker.h
424 loader/loader.cpp 430 loader/loader.cpp
425 loader/loader.h 431 loader/loader.h
426 loader/nax.cpp 432 loader/nax.cpp
@@ -437,8 +443,6 @@ add_library(core STATIC
437 loader/xci.h 443 loader/xci.h
438 memory.cpp 444 memory.cpp
439 memory.h 445 memory.h
440 memory_hook.cpp
441 memory_hook.h
442 memory_setup.h 446 memory_setup.h
443 perf_stats.cpp 447 perf_stats.cpp
444 perf_stats.h 448 perf_stats.h
@@ -454,7 +458,7 @@ add_library(core STATIC
454create_target_directory_groups(core) 458create_target_directory_groups(core)
455 459
456target_link_libraries(core PUBLIC common PRIVATE audio_core video_core) 460target_link_libraries(core PUBLIC common PRIVATE audio_core video_core)
457target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt lz4_static mbedtls opus unicorn open_source_archives) 461target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt mbedtls opus unicorn open_source_archives)
458if (ENABLE_WEB_SERVICE) 462if (ENABLE_WEB_SERVICE)
459 target_compile_definitions(core PRIVATE -DENABLE_WEB_SERVICE) 463 target_compile_definitions(core PRIVATE -DENABLE_WEB_SERVICE)
460 target_link_libraries(core PRIVATE web_service) 464 target_link_libraries(core PRIVATE web_service)
diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp
index 9b7ca4030..49145911b 100644
--- a/src/core/arm/dynarmic/arm_dynarmic.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic.cpp
@@ -12,6 +12,7 @@
12#include "core/core.h" 12#include "core/core.h"
13#include "core/core_cpu.h" 13#include "core/core_cpu.h"
14#include "core/core_timing.h" 14#include "core/core_timing.h"
15#include "core/core_timing_util.h"
15#include "core/gdbstub/gdbstub.h" 16#include "core/gdbstub/gdbstub.h"
16#include "core/hle/kernel/process.h" 17#include "core/hle/kernel/process.h"
17#include "core/hle/kernel/svc.h" 18#include "core/hle/kernel/svc.h"
@@ -25,7 +26,6 @@ using Vector = Dynarmic::A64::Vector;
25class ARM_Dynarmic_Callbacks : public Dynarmic::A64::UserCallbacks { 26class ARM_Dynarmic_Callbacks : public Dynarmic::A64::UserCallbacks {
26public: 27public:
27 explicit ARM_Dynarmic_Callbacks(ARM_Dynarmic& parent) : parent(parent) {} 28 explicit ARM_Dynarmic_Callbacks(ARM_Dynarmic& parent) : parent(parent) {}
28 ~ARM_Dynarmic_Callbacks() = default;
29 29
30 u8 MemoryRead8(u64 vaddr) override { 30 u8 MemoryRead8(u64 vaddr) override {
31 return Memory::Read8(vaddr); 31 return Memory::Read8(vaddr);
@@ -119,7 +119,7 @@ public:
119 return std::max(parent.core_timing.GetDowncount(), 0); 119 return std::max(parent.core_timing.GetDowncount(), 0);
120 } 120 }
121 u64 GetCNTPCT() override { 121 u64 GetCNTPCT() override {
122 return parent.core_timing.GetTicks(); 122 return Timing::CpuCyclesToClockCycles(parent.core_timing.GetTicks());
123 } 123 }
124 124
125 ARM_Dynarmic& parent; 125 ARM_Dynarmic& parent;
@@ -151,7 +151,7 @@ std::unique_ptr<Dynarmic::A64::Jit> ARM_Dynarmic::MakeJit() const {
151 config.tpidr_el0 = &cb->tpidr_el0; 151 config.tpidr_el0 = &cb->tpidr_el0;
152 config.dczid_el0 = 4; 152 config.dczid_el0 = 4;
153 config.ctr_el0 = 0x8444c004; 153 config.ctr_el0 = 0x8444c004;
154 config.cntfrq_el0 = 19200000; // Value from fusee. 154 config.cntfrq_el0 = Timing::CNTFREQ;
155 155
156 // Unpredictable instructions 156 // Unpredictable instructions
157 config.define_unpredictable_behaviour = true; 157 config.define_unpredictable_behaviour = true;
@@ -163,7 +163,6 @@ MICROPROFILE_DEFINE(ARM_Jit_Dynarmic, "ARM JIT", "Dynarmic", MP_RGB(255, 64, 64)
163 163
164void ARM_Dynarmic::Run() { 164void ARM_Dynarmic::Run() {
165 MICROPROFILE_SCOPE(ARM_Jit_Dynarmic); 165 MICROPROFILE_SCOPE(ARM_Jit_Dynarmic);
166 ASSERT(Memory::GetCurrentPageTable() == current_page_table);
167 166
168 jit->Run(); 167 jit->Run();
169} 168}
@@ -278,7 +277,6 @@ void ARM_Dynarmic::ClearExclusiveState() {
278 277
279void ARM_Dynarmic::PageTableChanged() { 278void ARM_Dynarmic::PageTableChanged() {
280 jit = MakeJit(); 279 jit = MakeJit();
281 current_page_table = Memory::GetCurrentPageTable();
282} 280}
283 281
284DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(std::size_t core_count) : monitor(core_count) {} 282DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(std::size_t core_count) : monitor(core_count) {}
diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h
index 6cc458296..d867c2a50 100644
--- a/src/core/arm/dynarmic/arm_dynarmic.h
+++ b/src/core/arm/dynarmic/arm_dynarmic.h
@@ -12,10 +12,6 @@
12#include "core/arm/exclusive_monitor.h" 12#include "core/arm/exclusive_monitor.h"
13#include "core/arm/unicorn/arm_unicorn.h" 13#include "core/arm/unicorn/arm_unicorn.h"
14 14
15namespace Memory {
16struct PageTable;
17}
18
19namespace Core::Timing { 15namespace Core::Timing {
20class CoreTiming; 16class CoreTiming;
21} 17}
@@ -29,7 +25,7 @@ class ARM_Dynarmic final : public ARM_Interface {
29public: 25public:
30 ARM_Dynarmic(Timing::CoreTiming& core_timing, ExclusiveMonitor& exclusive_monitor, 26 ARM_Dynarmic(Timing::CoreTiming& core_timing, ExclusiveMonitor& exclusive_monitor,
31 std::size_t core_index); 27 std::size_t core_index);
32 ~ARM_Dynarmic(); 28 ~ARM_Dynarmic() override;
33 29
34 void MapBackingMemory(VAddr address, std::size_t size, u8* memory, 30 void MapBackingMemory(VAddr address, std::size_t size, u8* memory,
35 Kernel::VMAPermission perms) override; 31 Kernel::VMAPermission perms) override;
@@ -69,14 +65,12 @@ private:
69 std::size_t core_index; 65 std::size_t core_index;
70 Timing::CoreTiming& core_timing; 66 Timing::CoreTiming& core_timing;
71 DynarmicExclusiveMonitor& exclusive_monitor; 67 DynarmicExclusiveMonitor& exclusive_monitor;
72
73 Memory::PageTable* current_page_table = nullptr;
74}; 68};
75 69
76class DynarmicExclusiveMonitor final : public ExclusiveMonitor { 70class DynarmicExclusiveMonitor final : public ExclusiveMonitor {
77public: 71public:
78 explicit DynarmicExclusiveMonitor(std::size_t core_count); 72 explicit DynarmicExclusiveMonitor(std::size_t core_count);
79 ~DynarmicExclusiveMonitor(); 73 ~DynarmicExclusiveMonitor() override;
80 74
81 void SetExclusive(std::size_t core_index, VAddr addr) override; 75 void SetExclusive(std::size_t core_index, VAddr addr) override;
82 void ClearExclusive() override; 76 void ClearExclusive() override;
diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp
index a542a098b..27309280c 100644
--- a/src/core/arm/unicorn/arm_unicorn.cpp
+++ b/src/core/arm/unicorn/arm_unicorn.cpp
@@ -192,12 +192,13 @@ void ARM_Unicorn::ExecuteInstructions(int num_instructions) {
192 CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions)); 192 CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions));
193 core_timing.AddTicks(num_instructions); 193 core_timing.AddTicks(num_instructions);
194 if (GDBStub::IsServerEnabled()) { 194 if (GDBStub::IsServerEnabled()) {
195 if (last_bkpt_hit) { 195 if (last_bkpt_hit && last_bkpt.type == GDBStub::BreakpointType::Execute) {
196 uc_reg_write(uc, UC_ARM64_REG_PC, &last_bkpt.address); 196 uc_reg_write(uc, UC_ARM64_REG_PC, &last_bkpt.address);
197 } 197 }
198
198 Kernel::Thread* thread = Kernel::GetCurrentThread(); 199 Kernel::Thread* thread = Kernel::GetCurrentThread();
199 SaveContext(thread->GetContext()); 200 SaveContext(thread->GetContext());
200 if (last_bkpt_hit || GDBStub::GetCpuStepFlag()) { 201 if (last_bkpt_hit || GDBStub::IsMemoryBreak() || GDBStub::GetCpuStepFlag()) {
201 last_bkpt_hit = false; 202 last_bkpt_hit = false;
202 GDBStub::Break(); 203 GDBStub::Break();
203 GDBStub::SendTrap(thread, 5); 204 GDBStub::SendTrap(thread, 5);
diff --git a/src/core/arm/unicorn/arm_unicorn.h b/src/core/arm/unicorn/arm_unicorn.h
index dbd6955ea..1e44f0736 100644
--- a/src/core/arm/unicorn/arm_unicorn.h
+++ b/src/core/arm/unicorn/arm_unicorn.h
@@ -18,7 +18,7 @@ namespace Core {
18class ARM_Unicorn final : public ARM_Interface { 18class ARM_Unicorn final : public ARM_Interface {
19public: 19public:
20 explicit ARM_Unicorn(Timing::CoreTiming& core_timing); 20 explicit ARM_Unicorn(Timing::CoreTiming& core_timing);
21 ~ARM_Unicorn(); 21 ~ARM_Unicorn() override;
22 22
23 void MapBackingMemory(VAddr address, std::size_t size, u8* memory, 23 void MapBackingMemory(VAddr address, std::size_t size, u8* memory,
24 Kernel::VMAPermission perms) override; 24 Kernel::VMAPermission perms) override;
@@ -50,7 +50,7 @@ private:
50 uc_engine* uc{}; 50 uc_engine* uc{};
51 Timing::CoreTiming& core_timing; 51 Timing::CoreTiming& core_timing;
52 GDBStub::BreakpointAddress last_bkpt{}; 52 GDBStub::BreakpointAddress last_bkpt{};
53 bool last_bkpt_hit; 53 bool last_bkpt_hit = false;
54}; 54};
55 55
56} // namespace Core 56} // namespace Core
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 89b3fb418..4fe77c25b 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -32,6 +32,7 @@
32#include "core/perf_stats.h" 32#include "core/perf_stats.h"
33#include "core/settings.h" 33#include "core/settings.h"
34#include "core/telemetry_session.h" 34#include "core/telemetry_session.h"
35#include "file_sys/cheat_engine.h"
35#include "frontend/applets/profile_select.h" 36#include "frontend/applets/profile_select.h"
36#include "frontend/applets/software_keyboard.h" 37#include "frontend/applets/software_keyboard.h"
37#include "frontend/applets/web_browser.h" 38#include "frontend/applets/web_browser.h"
@@ -205,6 +206,7 @@ struct System::Impl {
205 GDBStub::Shutdown(); 206 GDBStub::Shutdown();
206 Service::Shutdown(); 207 Service::Shutdown();
207 service_manager.reset(); 208 service_manager.reset();
209 cheat_engine.reset();
208 telemetry_session.reset(); 210 telemetry_session.reset();
209 gpu_core.reset(); 211 gpu_core.reset();
210 212
@@ -255,6 +257,8 @@ struct System::Impl {
255 CpuCoreManager cpu_core_manager; 257 CpuCoreManager cpu_core_manager;
256 bool is_powered_on = false; 258 bool is_powered_on = false;
257 259
260 std::unique_ptr<FileSys::CheatEngine> cheat_engine;
261
258 /// Frontend applets 262 /// Frontend applets
259 std::unique_ptr<Core::Frontend::ProfileSelectApplet> profile_selector; 263 std::unique_ptr<Core::Frontend::ProfileSelectApplet> profile_selector;
260 std::unique_ptr<Core::Frontend::SoftwareKeyboardApplet> software_keyboard; 264 std::unique_ptr<Core::Frontend::SoftwareKeyboardApplet> software_keyboard;
@@ -453,6 +457,13 @@ Tegra::DebugContext* System::GetGPUDebugContext() const {
453 return impl->debug_context.get(); 457 return impl->debug_context.get();
454} 458}
455 459
460void System::RegisterCheatList(const std::vector<FileSys::CheatList>& list,
461 const std::string& build_id, VAddr code_region_start,
462 VAddr code_region_end) {
463 impl->cheat_engine = std::make_unique<FileSys::CheatEngine>(*this, list, build_id,
464 code_region_start, code_region_end);
465}
466
456void System::SetFilesystem(std::shared_ptr<FileSys::VfsFilesystem> vfs) { 467void System::SetFilesystem(std::shared_ptr<FileSys::VfsFilesystem> vfs) {
457 impl->virtual_filesystem = std::move(vfs); 468 impl->virtual_filesystem = std::move(vfs);
458} 469}
diff --git a/src/core/core.h b/src/core/core.h
index ba76a41d8..4d83b93cc 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -20,6 +20,7 @@ class WebBrowserApplet;
20} // namespace Core::Frontend 20} // namespace Core::Frontend
21 21
22namespace FileSys { 22namespace FileSys {
23class CheatList;
23class VfsFilesystem; 24class VfsFilesystem;
24} // namespace FileSys 25} // namespace FileSys
25 26
@@ -253,6 +254,9 @@ public:
253 254
254 std::shared_ptr<FileSys::VfsFilesystem> GetFilesystem() const; 255 std::shared_ptr<FileSys::VfsFilesystem> GetFilesystem() const;
255 256
257 void RegisterCheatList(const std::vector<FileSys::CheatList>& list, const std::string& build_id,
258 VAddr code_region_start, VAddr code_region_end);
259
256 void SetProfileSelector(std::unique_ptr<Frontend::ProfileSelectApplet> applet); 260 void SetProfileSelector(std::unique_ptr<Frontend::ProfileSelectApplet> applet);
257 261
258 const Frontend::ProfileSelectApplet& GetProfileSelector() const; 262 const Frontend::ProfileSelectApplet& GetProfileSelector() const;
diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp
index 1eefed6d0..e75741db0 100644
--- a/src/core/core_cpu.cpp
+++ b/src/core/core_cpu.cpp
@@ -22,7 +22,7 @@
22namespace Core { 22namespace Core {
23 23
24void CpuBarrier::NotifyEnd() { 24void CpuBarrier::NotifyEnd() {
25 std::unique_lock<std::mutex> lock(mutex); 25 std::unique_lock lock{mutex};
26 end = true; 26 end = true;
27 condition.notify_all(); 27 condition.notify_all();
28} 28}
@@ -34,7 +34,7 @@ bool CpuBarrier::Rendezvous() {
34 } 34 }
35 35
36 if (!end) { 36 if (!end) {
37 std::unique_lock<std::mutex> lock(mutex); 37 std::unique_lock lock{mutex};
38 38
39 --cores_waiting; 39 --cores_waiting;
40 if (!cores_waiting) { 40 if (!cores_waiting) {
@@ -131,7 +131,7 @@ void Cpu::Reschedule() {
131 131
132 reschedule_pending = false; 132 reschedule_pending = false;
133 // Lock the global kernel mutex when we manipulate the HLE state 133 // Lock the global kernel mutex when we manipulate the HLE state
134 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 134 std::lock_guard lock{HLE::g_hle_lock};
135 scheduler->Reschedule(); 135 scheduler->Reschedule();
136} 136}
137 137
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index a0dd5db24..41adb2302 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -186,7 +186,7 @@ void CoreTiming::Advance() {
186 Event evt = std::move(event_queue.front()); 186 Event evt = std::move(event_queue.front());
187 std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>()); 187 std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
188 event_queue.pop_back(); 188 event_queue.pop_back();
189 evt.type->callback(evt.userdata, static_cast<int>(global_timer - evt.time)); 189 evt.type->callback(evt.userdata, global_timer - evt.time);
190 } 190 }
191 191
192 is_global_timer_sane = false; 192 is_global_timer_sane = false;
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index 59163bae1..9d2efde37 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -15,7 +15,7 @@
15namespace Core::Timing { 15namespace Core::Timing {
16 16
17/// A callback that may be scheduled for a particular core timing event. 17/// A callback that may be scheduled for a particular core timing event.
18using TimedCallback = std::function<void(u64 userdata, int cycles_late)>; 18using TimedCallback = std::function<void(u64 userdata, s64 cycles_late)>;
19 19
20/// Contains the characteristics of a particular event. 20/// Contains the characteristics of a particular event.
21struct EventType { 21struct EventType {
diff --git a/src/core/core_timing_util.cpp b/src/core/core_timing_util.cpp
index 88ff70233..7942f30d6 100644
--- a/src/core/core_timing_util.cpp
+++ b/src/core/core_timing_util.cpp
@@ -7,6 +7,7 @@
7#include <cinttypes> 7#include <cinttypes>
8#include <limits> 8#include <limits>
9#include "common/logging/log.h" 9#include "common/logging/log.h"
10#include "common/uint128.h"
10 11
11namespace Core::Timing { 12namespace Core::Timing {
12 13
@@ -60,4 +61,9 @@ s64 nsToCycles(u64 ns) {
60 return (BASE_CLOCK_RATE * static_cast<s64>(ns)) / 1000000000; 61 return (BASE_CLOCK_RATE * static_cast<s64>(ns)) / 1000000000;
61} 62}
62 63
64u64 CpuCyclesToClockCycles(u64 ticks) {
65 const u128 temporal = Common::Multiply64Into128(ticks, CNTFREQ);
66 return Common::Divide128On32(temporal, static_cast<u32>(BASE_CLOCK_RATE)).first;
67}
68
63} // namespace Core::Timing 69} // namespace Core::Timing
diff --git a/src/core/core_timing_util.h b/src/core/core_timing_util.h
index 513cfac1b..679aa3123 100644
--- a/src/core/core_timing_util.h
+++ b/src/core/core_timing_util.h
@@ -11,6 +11,7 @@ namespace Core::Timing {
11// The below clock rate is based on Switch's clockspeed being widely known as 1.020GHz 11// The below clock rate is based on Switch's clockspeed being widely known as 1.020GHz
12// The exact value used is of course unverified. 12// The exact value used is of course unverified.
13constexpr u64 BASE_CLOCK_RATE = 1019215872; // Switch clock speed is 1020MHz un/docked 13constexpr u64 BASE_CLOCK_RATE = 1019215872; // Switch clock speed is 1020MHz un/docked
14constexpr u64 CNTFREQ = 19200000; // Value from fusee.
14 15
15inline s64 msToCycles(int ms) { 16inline s64 msToCycles(int ms) {
16 // since ms is int there is no way to overflow 17 // since ms is int there is no way to overflow
@@ -61,4 +62,6 @@ inline u64 cyclesToMs(s64 cycles) {
61 return cycles * 1000 / BASE_CLOCK_RATE; 62 return cycles * 1000 / BASE_CLOCK_RATE;
62} 63}
63 64
65u64 CpuCyclesToClockCycles(u64 ticks);
66
64} // namespace Core::Timing 67} // namespace Core::Timing
diff --git a/src/core/file_sys/cheat_engine.cpp b/src/core/file_sys/cheat_engine.cpp
new file mode 100644
index 000000000..b06c2f20a
--- /dev/null
+++ b/src/core/file_sys/cheat_engine.cpp
@@ -0,0 +1,492 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <locale>
6#include "common/hex_util.h"
7#include "common/microprofile.h"
8#include "common/swap.h"
9#include "core/core.h"
10#include "core/core_timing.h"
11#include "core/core_timing_util.h"
12#include "core/file_sys/cheat_engine.h"
13#include "core/hle/kernel/process.h"
14#include "core/hle/service/hid/controllers/npad.h"
15#include "core/hle/service/hid/hid.h"
16#include "core/hle/service/sm/sm.h"
17
18namespace FileSys {
19
20constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 60);
21constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF;
22
23u64 Cheat::Address() const {
24 u64 out;
25 std::memcpy(&out, raw.data(), sizeof(u64));
26 return Common::swap64(out) & 0xFFFFFFFFFF;
27}
28
29u64 Cheat::ValueWidth(u64 offset) const {
30 return Value(offset, width);
31}
32
33u64 Cheat::Value(u64 offset, u64 width) const {
34 u64 out;
35 std::memcpy(&out, raw.data() + offset, sizeof(u64));
36 out = Common::swap64(out);
37 if (width == 8)
38 return out;
39 return out & ((1ull << (width * CHAR_BIT)) - 1);
40}
41
42u32 Cheat::KeypadValue() const {
43 u32 out;
44 std::memcpy(&out, raw.data(), sizeof(u32));
45 return Common::swap32(out) & 0x0FFFFFFF;
46}
47
48void CheatList::SetMemoryParameters(VAddr main_begin, VAddr heap_begin, VAddr main_end,
49 VAddr heap_end, MemoryWriter writer, MemoryReader reader) {
50 this->main_region_begin = main_begin;
51 this->main_region_end = main_end;
52 this->heap_region_begin = heap_begin;
53 this->heap_region_end = heap_end;
54 this->writer = writer;
55 this->reader = reader;
56}
57
58MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70));
59
60void CheatList::Execute() {
61 MICROPROFILE_SCOPE(Cheat_Engine);
62
63 std::fill(scratch.begin(), scratch.end(), 0);
64 in_standard = false;
65 for (std::size_t i = 0; i < master_list.size(); ++i) {
66 LOG_DEBUG(Common_Filesystem, "Executing block #{:08X} ({})", i, master_list[i].first);
67 current_block = i;
68 ExecuteBlock(master_list[i].second);
69 }
70
71 in_standard = true;
72 for (std::size_t i = 0; i < standard_list.size(); ++i) {
73 LOG_DEBUG(Common_Filesystem, "Executing block #{:08X} ({})", i, standard_list[i].first);
74 current_block = i;
75 ExecuteBlock(standard_list[i].second);
76 }
77}
78
79CheatList::CheatList(const Core::System& system_, ProgramSegment master, ProgramSegment standard)
80 : master_list{std::move(master)}, standard_list{std::move(standard)}, system{&system_} {}
81
82bool CheatList::EvaluateConditional(const Cheat& cheat) const {
83 using ComparisonFunction = bool (*)(u64, u64);
84 constexpr std::array<ComparisonFunction, 6> comparison_functions{
85 [](u64 a, u64 b) { return a > b; }, [](u64 a, u64 b) { return a >= b; },
86 [](u64 a, u64 b) { return a < b; }, [](u64 a, u64 b) { return a <= b; },
87 [](u64 a, u64 b) { return a == b; }, [](u64 a, u64 b) { return a != b; },
88 };
89
90 if (cheat.type == CodeType::ConditionalInput) {
91 const auto applet_resource =
92 system->ServiceManager().GetService<Service::HID::Hid>("hid")->GetAppletResource();
93 if (applet_resource == nullptr) {
94 LOG_WARNING(
95 Common_Filesystem,
96 "Attempted to evaluate input conditional, but applet resource is not initialized!");
97 return false;
98 }
99
100 const auto press_state =
101 applet_resource
102 ->GetController<Service::HID::Controller_NPad>(Service::HID::HidController::NPad)
103 .GetAndResetPressState();
104 return ((press_state & cheat.KeypadValue()) & KEYPAD_BITMASK) != 0;
105 }
106
107 ASSERT(cheat.type == CodeType::Conditional);
108
109 const auto offset =
110 cheat.memory_type == MemoryType::MainNSO ? main_region_begin : heap_region_begin;
111 ASSERT(static_cast<u8>(cheat.comparison_op.Value()) < 6);
112 auto* function = comparison_functions[static_cast<u8>(cheat.comparison_op.Value())];
113 const auto addr = cheat.Address() + offset;
114
115 return function(reader(cheat.width, SanitizeAddress(addr)), cheat.ValueWidth(8));
116}
117
118void CheatList::ProcessBlockPairs(const Block& block) {
119 block_pairs.clear();
120
121 u64 scope = 0;
122 std::map<u64, u64> pairs;
123
124 for (std::size_t i = 0; i < block.size(); ++i) {
125 const auto& cheat = block[i];
126
127 switch (cheat.type) {
128 case CodeType::Conditional:
129 case CodeType::ConditionalInput:
130 pairs.insert_or_assign(scope, i);
131 ++scope;
132 break;
133 case CodeType::EndConditional: {
134 --scope;
135 const auto idx = pairs.at(scope);
136 block_pairs.insert_or_assign(idx, i);
137 break;
138 }
139 case CodeType::Loop: {
140 if (cheat.end_of_loop) {
141 --scope;
142 const auto idx = pairs.at(scope);
143 block_pairs.insert_or_assign(idx, i);
144 } else {
145 pairs.insert_or_assign(scope, i);
146 ++scope;
147 }
148 break;
149 }
150 }
151 }
152}
153
154void CheatList::WriteImmediate(const Cheat& cheat) {
155 const auto offset =
156 cheat.memory_type == MemoryType::MainNSO ? main_region_begin : heap_region_begin;
157 const auto& register_3 = scratch.at(cheat.register_3);
158
159 const auto addr = cheat.Address() + offset + register_3;
160 LOG_DEBUG(Common_Filesystem, "writing value={:016X} to addr={:016X}", addr,
161 cheat.Value(8, cheat.width));
162 writer(cheat.width, SanitizeAddress(addr), cheat.ValueWidth(8));
163}
164
165void CheatList::BeginConditional(const Cheat& cheat) {
166 if (EvaluateConditional(cheat)) {
167 return;
168 }
169
170 const auto iter = block_pairs.find(current_index);
171 ASSERT(iter != block_pairs.end());
172 current_index = iter->second - 1;
173}
174
175void CheatList::EndConditional(const Cheat& cheat) {
176 LOG_DEBUG(Common_Filesystem, "Ending conditional block.");
177}
178
179void CheatList::Loop(const Cheat& cheat) {
180 if (cheat.end_of_loop.Value())
181 ASSERT(!cheat.end_of_loop.Value());
182
183 auto& register_3 = scratch.at(cheat.register_3);
184 const auto iter = block_pairs.find(current_index);
185 ASSERT(iter != block_pairs.end());
186 ASSERT(iter->first < iter->second);
187
188 const s32 initial_value = static_cast<s32>(cheat.Value(4, sizeof(s32)));
189 for (s32 i = initial_value; i >= 0; --i) {
190 register_3 = static_cast<u64>(i);
191 for (std::size_t c = iter->first + 1; c < iter->second; ++c) {
192 current_index = c;
193 ExecuteSingleCheat(
194 (in_standard ? standard_list : master_list)[current_block].second[c]);
195 }
196 }
197
198 current_index = iter->second;
199}
200
201void CheatList::LoadImmediate(const Cheat& cheat) {
202 auto& register_3 = scratch.at(cheat.register_3);
203
204 LOG_DEBUG(Common_Filesystem, "setting register={:01X} equal to value={:016X}", cheat.register_3,
205 cheat.Value(4, 8));
206 register_3 = cheat.Value(4, 8);
207}
208
209void CheatList::LoadIndexed(const Cheat& cheat) {
210 const auto offset =
211 cheat.memory_type == MemoryType::MainNSO ? main_region_begin : heap_region_begin;
212 auto& register_3 = scratch.at(cheat.register_3);
213
214 const auto addr = (cheat.load_from_register.Value() ? register_3 : offset) + cheat.Address();
215 LOG_DEBUG(Common_Filesystem, "writing indexed value to register={:01X}, addr={:016X}",
216 cheat.register_3, addr);
217 register_3 = reader(cheat.width, SanitizeAddress(addr));
218}
219
220void CheatList::StoreIndexed(const Cheat& cheat) {
221 const auto& register_3 = scratch.at(cheat.register_3);
222
223 const auto addr =
224 register_3 + (cheat.add_additional_register.Value() ? scratch.at(cheat.register_6) : 0);
225 LOG_DEBUG(Common_Filesystem, "writing value={:016X} to addr={:016X}",
226 cheat.Value(4, cheat.width), addr);
227 writer(cheat.width, SanitizeAddress(addr), cheat.ValueWidth(4));
228}
229
230void CheatList::RegisterArithmetic(const Cheat& cheat) {
231 using ArithmeticFunction = u64 (*)(u64, u64);
232 constexpr std::array<ArithmeticFunction, 5> arithmetic_functions{
233 [](u64 a, u64 b) { return a + b; }, [](u64 a, u64 b) { return a - b; },
234 [](u64 a, u64 b) { return a * b; }, [](u64 a, u64 b) { return a << b; },
235 [](u64 a, u64 b) { return a >> b; },
236 };
237
238 using ArithmeticOverflowCheck = bool (*)(u64, u64);
239 constexpr std::array<ArithmeticOverflowCheck, 5> arithmetic_overflow_checks{
240 [](u64 a, u64 b) { return a > (std::numeric_limits<u64>::max() - b); }, // a + b
241 [](u64 a, u64 b) { return a > (std::numeric_limits<u64>::max() + b); }, // a - b
242 [](u64 a, u64 b) { return a > (std::numeric_limits<u64>::max() / b); }, // a * b
243 [](u64 a, u64 b) { return b >= 64 || (a & ~((1ull << (64 - b)) - 1)) != 0; }, // a << b
244 [](u64 a, u64 b) { return b >= 64 || (a & ((1ull << b) - 1)) != 0; }, // a >> b
245 };
246
247 static_assert(sizeof(arithmetic_functions) == sizeof(arithmetic_overflow_checks),
248 "Missing or have extra arithmetic overflow checks compared to functions!");
249
250 auto& register_3 = scratch.at(cheat.register_3);
251
252 ASSERT(static_cast<u8>(cheat.arithmetic_op.Value()) < 5);
253 auto* function = arithmetic_functions[static_cast<u8>(cheat.arithmetic_op.Value())];
254 auto* overflow_function =
255 arithmetic_overflow_checks[static_cast<u8>(cheat.arithmetic_op.Value())];
256 LOG_DEBUG(Common_Filesystem, "performing arithmetic with register={:01X}, value={:016X}",
257 cheat.register_3, cheat.ValueWidth(4));
258
259 if (overflow_function(register_3, cheat.ValueWidth(4))) {
260 LOG_WARNING(Common_Filesystem,
261 "overflow will occur when performing arithmetic operation={:02X} with operands "
262 "a={:016X}, b={:016X}!",
263 static_cast<u8>(cheat.arithmetic_op.Value()), register_3, cheat.ValueWidth(4));
264 }
265
266 register_3 = function(register_3, cheat.ValueWidth(4));
267}
268
269void CheatList::BeginConditionalInput(const Cheat& cheat) {
270 if (EvaluateConditional(cheat))
271 return;
272
273 const auto iter = block_pairs.find(current_index);
274 ASSERT(iter != block_pairs.end());
275 current_index = iter->second - 1;
276}
277
278VAddr CheatList::SanitizeAddress(VAddr in) const {
279 if ((in < main_region_begin || in >= main_region_end) &&
280 (in < heap_region_begin || in >= heap_region_end)) {
281 LOG_ERROR(Common_Filesystem,
282 "Cheat attempting to access memory at invalid address={:016X}, if this persists, "
283 "the cheat may be incorrect. However, this may be normal early in execution if "
284 "the game has not properly set up yet.",
285 in);
286 return 0; ///< Invalid addresses will hard crash
287 }
288
289 return in;
290}
291
292void CheatList::ExecuteSingleCheat(const Cheat& cheat) {
293 using CheatOperationFunction = void (CheatList::*)(const Cheat&);
294 constexpr std::array<CheatOperationFunction, 9> cheat_operation_functions{
295 &CheatList::WriteImmediate, &CheatList::BeginConditional,
296 &CheatList::EndConditional, &CheatList::Loop,
297 &CheatList::LoadImmediate, &CheatList::LoadIndexed,
298 &CheatList::StoreIndexed, &CheatList::RegisterArithmetic,
299 &CheatList::BeginConditionalInput,
300 };
301
302 const auto index = static_cast<u8>(cheat.type.Value());
303 ASSERT(index < sizeof(cheat_operation_functions));
304 const auto op = cheat_operation_functions[index];
305 (this->*op)(cheat);
306}
307
308void CheatList::ExecuteBlock(const Block& block) {
309 encountered_loops.clear();
310
311 ProcessBlockPairs(block);
312 for (std::size_t i = 0; i < block.size(); ++i) {
313 current_index = i;
314 ExecuteSingleCheat(block[i]);
315 i = current_index;
316 }
317}
318
319CheatParser::~CheatParser() = default;
320
321CheatList CheatParser::MakeCheatList(const Core::System& system, CheatList::ProgramSegment master,
322 CheatList::ProgramSegment standard) const {
323 return {system, std::move(master), std::move(standard)};
324}
325
326TextCheatParser::~TextCheatParser() = default;
327
328CheatList TextCheatParser::Parse(const Core::System& system, const std::vector<u8>& data) const {
329 std::stringstream ss;
330 ss.write(reinterpret_cast<const char*>(data.data()), data.size());
331
332 std::vector<std::string> lines;
333 std::string stream_line;
334 while (std::getline(ss, stream_line)) {
335 // Remove a trailing \r
336 if (!stream_line.empty() && stream_line.back() == '\r')
337 stream_line.pop_back();
338 lines.push_back(std::move(stream_line));
339 }
340
341 CheatList::ProgramSegment master_list;
342 CheatList::ProgramSegment standard_list;
343
344 for (std::size_t i = 0; i < lines.size(); ++i) {
345 auto line = lines[i];
346
347 if (!line.empty() && (line[0] == '[' || line[0] == '{')) {
348 const auto master = line[0] == '{';
349 const auto begin = master ? line.find('{') : line.find('[');
350 const auto end = master ? line.rfind('}') : line.rfind(']');
351
352 ASSERT(begin != std::string::npos && end != std::string::npos);
353
354 const std::string patch_name{line.begin() + begin + 1, line.begin() + end};
355 CheatList::Block block{};
356
357 while (i < lines.size() - 1) {
358 line = lines[++i];
359 if (!line.empty() && (line[0] == '[' || line[0] == '{')) {
360 --i;
361 break;
362 }
363
364 if (line.size() < 8)
365 continue;
366
367 Cheat out{};
368 out.raw = ParseSingleLineCheat(line);
369 block.push_back(out);
370 }
371
372 (master ? master_list : standard_list).emplace_back(patch_name, block);
373 }
374 }
375
376 return MakeCheatList(system, master_list, standard_list);
377}
378
379std::array<u8, 16> TextCheatParser::ParseSingleLineCheat(const std::string& line) const {
380 std::array<u8, 16> out{};
381
382 if (line.size() < 8)
383 return out;
384
385 const auto word1 = Common::HexStringToArray<sizeof(u32)>(std::string_view{line.data(), 8});
386 std::memcpy(out.data(), word1.data(), sizeof(u32));
387
388 if (line.size() < 17 || line[8] != ' ')
389 return out;
390
391 const auto word2 = Common::HexStringToArray<sizeof(u32)>(std::string_view{line.data() + 9, 8});
392 std::memcpy(out.data() + sizeof(u32), word2.data(), sizeof(u32));
393
394 if (line.size() < 26 || line[17] != ' ') {
395 // Perform shifting in case value is truncated early.
396 const auto type = static_cast<CodeType>((out[0] & 0xF0) >> 4);
397 if (type == CodeType::Loop || type == CodeType::LoadImmediate ||
398 type == CodeType::StoreIndexed || type == CodeType::RegisterArithmetic) {
399 std::memcpy(out.data() + 8, out.data() + 4, sizeof(u32));
400 std::memset(out.data() + 4, 0, sizeof(u32));
401 }
402
403 return out;
404 }
405
406 const auto word3 = Common::HexStringToArray<sizeof(u32)>(std::string_view{line.data() + 18, 8});
407 std::memcpy(out.data() + 2 * sizeof(u32), word3.data(), sizeof(u32));
408
409 if (line.size() < 35 || line[26] != ' ') {
410 // Perform shifting in case value is truncated early.
411 const auto type = static_cast<CodeType>((out[0] & 0xF0) >> 4);
412 if (type == CodeType::WriteImmediate || type == CodeType::Conditional) {
413 std::memcpy(out.data() + 12, out.data() + 8, sizeof(u32));
414 std::memset(out.data() + 8, 0, sizeof(u32));
415 }
416
417 return out;
418 }
419
420 const auto word4 = Common::HexStringToArray<sizeof(u32)>(std::string_view{line.data() + 27, 8});
421 std::memcpy(out.data() + 3 * sizeof(u32), word4.data(), sizeof(u32));
422
423 return out;
424}
425
426namespace {
427u64 MemoryReadImpl(u32 width, VAddr addr) {
428 switch (width) {
429 case 1:
430 return Memory::Read8(addr);
431 case 2:
432 return Memory::Read16(addr);
433 case 4:
434 return Memory::Read32(addr);
435 case 8:
436 return Memory::Read64(addr);
437 default:
438 UNREACHABLE();
439 return 0;
440 }
441}
442
443void MemoryWriteImpl(u32 width, VAddr addr, u64 value) {
444 switch (width) {
445 case 1:
446 Memory::Write8(addr, static_cast<u8>(value));
447 break;
448 case 2:
449 Memory::Write16(addr, static_cast<u16>(value));
450 break;
451 case 4:
452 Memory::Write32(addr, static_cast<u32>(value));
453 break;
454 case 8:
455 Memory::Write64(addr, value);
456 break;
457 default:
458 UNREACHABLE();
459 }
460}
461} // Anonymous namespace
462
463CheatEngine::CheatEngine(Core::System& system, std::vector<CheatList> cheats_,
464 const std::string& build_id, VAddr code_region_start,
465 VAddr code_region_end)
466 : cheats{std::move(cheats_)}, core_timing{system.CoreTiming()} {
467 event = core_timing.RegisterEvent(
468 "CheatEngine::FrameCallback::" + build_id,
469 [this](u64 userdata, s64 cycles_late) { FrameCallback(userdata, cycles_late); });
470 core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS, event);
471
472 const auto& vm_manager = system.CurrentProcess()->VMManager();
473 for (auto& list : this->cheats) {
474 list.SetMemoryParameters(code_region_start, vm_manager.GetHeapRegionBaseAddress(),
475 code_region_end, vm_manager.GetHeapRegionEndAddress(),
476 &MemoryWriteImpl, &MemoryReadImpl);
477 }
478}
479
480CheatEngine::~CheatEngine() {
481 core_timing.UnscheduleEvent(event, 0);
482}
483
484void CheatEngine::FrameCallback(u64 userdata, s64 cycles_late) {
485 for (auto& list : cheats) {
486 list.Execute();
487 }
488
489 core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS - cycles_late, event);
490}
491
492} // namespace FileSys
diff --git a/src/core/file_sys/cheat_engine.h b/src/core/file_sys/cheat_engine.h
new file mode 100644
index 000000000..ac22a82cb
--- /dev/null
+++ b/src/core/file_sys/cheat_engine.h
@@ -0,0 +1,234 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <map>
8#include <set>
9#include <vector>
10#include "common/bit_field.h"
11#include "common/common_types.h"
12
13namespace Core {
14class System;
15}
16
17namespace Core::Timing {
18class CoreTiming;
19struct EventType;
20} // namespace Core::Timing
21
22namespace FileSys {
23
24enum class CodeType : u32 {
25 // 0TMR00AA AAAAAAAA YYYYYYYY YYYYYYYY
26 // Writes a T sized value Y to the address A added to the value of register R in memory domain M
27 WriteImmediate = 0,
28
29 // 1TMC00AA AAAAAAAA YYYYYYYY YYYYYYYY
30 // Compares the T sized value Y to the value at address A in memory domain M using the
31 // conditional function C. If success, continues execution. If failure, jumps to the matching
32 // EndConditional statement.
33 Conditional = 1,
34
35 // 20000000
36 // Terminates a Conditional or ConditionalInput block.
37 EndConditional = 2,
38
39 // 300R0000 VVVVVVVV
40 // Starts looping V times, storing the current count in register R.
41 // Loop block is terminated with a matching 310R0000.
42 Loop = 3,
43
44 // 400R0000 VVVVVVVV VVVVVVVV
45 // Sets the value of register R to the value V.
46 LoadImmediate = 4,
47
48 // 5TMRI0AA AAAAAAAA
49 // Sets the value of register R to the value of width T at address A in memory domain M, with
50 // the current value of R added to the address if I == 1.
51 LoadIndexed = 5,
52
53 // 6T0RIFG0 VVVVVVVV VVVVVVVV
54 // Writes the value V of width T to the memory address stored in register R. Adds the value of
55 // register G to the final calculation if F is nonzero. Increments the value of register R by T
56 // after operation if I is nonzero.
57 StoreIndexed = 6,
58
59 // 7T0RA000 VVVVVVVV
60 // Performs the arithmetic operation A on the value in register R and the value V of width T,
61 // storing the result in register R.
62 RegisterArithmetic = 7,
63
64 // 8KKKKKKK
65 // Checks to see if any of the buttons defined by the bitmask K are pressed. If any are,
66 // execution continues. If none are, execution skips to the next EndConditional command.
67 ConditionalInput = 8,
68};
69
70enum class MemoryType : u32 {
71 // Addressed relative to start of main NSO
72 MainNSO = 0,
73
74 // Addressed relative to start of heap
75 Heap = 1,
76};
77
78enum class ArithmeticOp : u32 {
79 Add = 0,
80 Sub = 1,
81 Mult = 2,
82 LShift = 3,
83 RShift = 4,
84};
85
86enum class ComparisonOp : u32 {
87 GreaterThan = 1,
88 GreaterThanEqual = 2,
89 LessThan = 3,
90 LessThanEqual = 4,
91 Equal = 5,
92 Inequal = 6,
93};
94
95union Cheat {
96 std::array<u8, 16> raw;
97
98 BitField<4, 4, CodeType> type;
99 BitField<0, 4, u32> width; // Can be 1, 2, 4, or 8. Measured in bytes.
100 BitField<0, 4, u32> end_of_loop;
101 BitField<12, 4, MemoryType> memory_type;
102 BitField<8, 4, u32> register_3;
103 BitField<8, 4, ComparisonOp> comparison_op;
104 BitField<20, 4, u32> load_from_register;
105 BitField<20, 4, u32> increment_register;
106 BitField<20, 4, ArithmeticOp> arithmetic_op;
107 BitField<16, 4, u32> add_additional_register;
108 BitField<28, 4, u32> register_6;
109
110 u64 Address() const;
111 u64 ValueWidth(u64 offset) const;
112 u64 Value(u64 offset, u64 width) const;
113 u32 KeypadValue() const;
114};
115
116class CheatParser;
117
118// Represents a full collection of cheats for a game. The Execute function should be called every
119// interval that all cheats should be executed. Clients should not directly instantiate this class
120// (hence private constructor), they should instead receive an instance from CheatParser, which
121// guarantees the list is always in an acceptable state.
122class CheatList {
123public:
124 friend class CheatParser;
125
126 using Block = std::vector<Cheat>;
127 using ProgramSegment = std::vector<std::pair<std::string, Block>>;
128
129 // (width in bytes, address, value)
130 using MemoryWriter = void (*)(u32, VAddr, u64);
131 // (width in bytes, address) -> value
132 using MemoryReader = u64 (*)(u32, VAddr);
133
134 void SetMemoryParameters(VAddr main_begin, VAddr heap_begin, VAddr main_end, VAddr heap_end,
135 MemoryWriter writer, MemoryReader reader);
136
137 void Execute();
138
139private:
140 CheatList(const Core::System& system_, ProgramSegment master, ProgramSegment standard);
141
142 void ProcessBlockPairs(const Block& block);
143 void ExecuteSingleCheat(const Cheat& cheat);
144
145 void ExecuteBlock(const Block& block);
146
147 bool EvaluateConditional(const Cheat& cheat) const;
148
149 // Individual cheat operations
150 void WriteImmediate(const Cheat& cheat);
151 void BeginConditional(const Cheat& cheat);
152 void EndConditional(const Cheat& cheat);
153 void Loop(const Cheat& cheat);
154 void LoadImmediate(const Cheat& cheat);
155 void LoadIndexed(const Cheat& cheat);
156 void StoreIndexed(const Cheat& cheat);
157 void RegisterArithmetic(const Cheat& cheat);
158 void BeginConditionalInput(const Cheat& cheat);
159
160 VAddr SanitizeAddress(VAddr in) const;
161
162 // Master Codes are defined as codes that cannot be disabled and are run prior to all
163 // others.
164 ProgramSegment master_list;
165 // All other codes
166 ProgramSegment standard_list;
167
168 bool in_standard = false;
169
170 // 16 (0x0-0xF) scratch registers that can be used by cheats
171 std::array<u64, 16> scratch{};
172
173 MemoryWriter writer = nullptr;
174 MemoryReader reader = nullptr;
175
176 u64 main_region_begin{};
177 u64 heap_region_begin{};
178 u64 main_region_end{};
179 u64 heap_region_end{};
180
181 u64 current_block{};
182 // The current index of the cheat within the current Block
183 u64 current_index{};
184
185 // The 'stack' of the program. When a conditional or loop statement is encountered, its index is
186 // pushed onto this queue. When a end block is encountered, the condition is checked.
187 std::map<u64, u64> block_pairs;
188
189 std::set<u64> encountered_loops;
190
191 const Core::System* system;
192};
193
194// Intermediary class that parses a text file or other disk format for storing cheats into a
195// CheatList object, that can be used for execution.
196class CheatParser {
197public:
198 virtual ~CheatParser();
199
200 virtual CheatList Parse(const Core::System& system, const std::vector<u8>& data) const = 0;
201
202protected:
203 CheatList MakeCheatList(const Core::System& system_, CheatList::ProgramSegment master,
204 CheatList::ProgramSegment standard) const;
205};
206
207// CheatParser implementation that parses text files
208class TextCheatParser final : public CheatParser {
209public:
210 ~TextCheatParser() override;
211
212 CheatList Parse(const Core::System& system, const std::vector<u8>& data) const override;
213
214private:
215 std::array<u8, 16> ParseSingleLineCheat(const std::string& line) const;
216};
217
218// Class that encapsulates a CheatList and manages its interaction with memory and CoreTiming
219class CheatEngine final {
220public:
221 CheatEngine(Core::System& system_, std::vector<CheatList> cheats_, const std::string& build_id,
222 VAddr code_region_start, VAddr code_region_end);
223 ~CheatEngine();
224
225private:
226 void FrameCallback(u64 userdata, s64 cycles_late);
227
228 std::vector<CheatList> cheats;
229
230 Core::Timing::EventType* event;
231 Core::Timing::CoreTiming& core_timing;
232};
233
234} // namespace FileSys
diff --git a/src/core/file_sys/content_archive.h b/src/core/file_sys/content_archive.h
index 5d4d05c82..15b9e6624 100644
--- a/src/core/file_sys/content_archive.h
+++ b/src/core/file_sys/content_archive.h
@@ -24,13 +24,26 @@ namespace FileSys {
24 24
25union NCASectionHeader; 25union NCASectionHeader;
26 26
27/// Describes the type of content within an NCA archive.
27enum class NCAContentType : u8 { 28enum class NCAContentType : u8 {
29 /// Executable-related data
28 Program = 0, 30 Program = 0,
31
32 /// Metadata.
29 Meta = 1, 33 Meta = 1,
34
35 /// Access control data.
30 Control = 2, 36 Control = 2,
37
38 /// Information related to the game manual
39 /// e.g. Legal information, etc.
31 Manual = 3, 40 Manual = 3,
41
42 /// System data.
32 Data = 4, 43 Data = 4,
33 Data_Unknown5 = 5, ///< Seems to be used on some system archives 44
45 /// Data that can be accessed by applications.
46 PublicData = 5,
34}; 47};
35 48
36enum class NCASectionCryptoType : u8 { 49enum class NCASectionCryptoType : u8 {
diff --git a/src/core/file_sys/control_metadata.cpp b/src/core/file_sys/control_metadata.cpp
index 83c184750..60ea9ad12 100644
--- a/src/core/file_sys/control_metadata.cpp
+++ b/src/core/file_sys/control_metadata.cpp
@@ -67,7 +67,7 @@ std::string NACP::GetDeveloperName(Language language) const {
67} 67}
68 68
69u64 NACP::GetTitleId() const { 69u64 NACP::GetTitleId() const {
70 return raw.title_id; 70 return raw.save_data_owner_id;
71} 71}
72 72
73u64 NACP::GetDLCBaseTitleId() const { 73u64 NACP::GetDLCBaseTitleId() const {
@@ -80,11 +80,11 @@ std::string NACP::GetVersionString() const {
80} 80}
81 81
82u64 NACP::GetDefaultNormalSaveSize() const { 82u64 NACP::GetDefaultNormalSaveSize() const {
83 return raw.normal_save_data_size; 83 return raw.user_account_save_data_size;
84} 84}
85 85
86u64 NACP::GetDefaultJournalSaveSize() const { 86u64 NACP::GetDefaultJournalSaveSize() const {
87 return raw.journal_sava_data_size; 87 return raw.user_account_save_data_journal_size;
88} 88}
89 89
90std::vector<u8> NACP::GetRawBytes() const { 90std::vector<u8> NACP::GetRawBytes() const {
diff --git a/src/core/file_sys/control_metadata.h b/src/core/file_sys/control_metadata.h
index 7b9cdc910..280710ddf 100644
--- a/src/core/file_sys/control_metadata.h
+++ b/src/core/file_sys/control_metadata.h
@@ -38,23 +38,35 @@ struct RawNACP {
38 u8 video_capture_mode; 38 u8 video_capture_mode;
39 bool data_loss_confirmation; 39 bool data_loss_confirmation;
40 INSERT_PADDING_BYTES(1); 40 INSERT_PADDING_BYTES(1);
41 u64_le title_id; 41 u64_le presence_group_id;
42 std::array<u8, 0x20> rating_age; 42 std::array<u8, 0x20> rating_age;
43 std::array<char, 0x10> version_string; 43 std::array<char, 0x10> version_string;
44 u64_le dlc_base_title_id; 44 u64_le dlc_base_title_id;
45 u64_le title_id_2; 45 u64_le save_data_owner_id;
46 u64_le normal_save_data_size; 46 u64_le user_account_save_data_size;
47 u64_le journal_sava_data_size; 47 u64_le user_account_save_data_journal_size;
48 INSERT_PADDING_BYTES(0x18); 48 u64_le device_save_data_size;
49 u64_le product_code; 49 u64_le device_save_data_journal_size;
50 u64_le bcat_delivery_cache_storage_size;
51 char application_error_code_category[8];
50 std::array<u64_le, 0x8> local_communication; 52 std::array<u64_le, 0x8> local_communication;
51 u8 logo_type; 53 u8 logo_type;
52 u8 logo_handling; 54 u8 logo_handling;
53 bool runtime_add_on_content_install; 55 bool runtime_add_on_content_install;
54 INSERT_PADDING_BYTES(5); 56 INSERT_PADDING_BYTES(5);
55 u64_le title_id_update; 57 u64_le seed_for_pseudo_device_id;
56 std::array<u8, 0x40> bcat_passphrase; 58 std::array<u8, 0x41> bcat_passphrase;
57 INSERT_PADDING_BYTES(0xEC0); 59 INSERT_PADDING_BYTES(7);
60 u64_le user_account_save_data_max_size;
61 u64_le user_account_save_data_max_journal_size;
62 u64_le device_save_data_max_size;
63 u64_le device_save_data_max_journal_size;
64 u64_le temporary_storage_size;
65 u64_le cache_storage_size;
66 u64_le cache_storage_journal_size;
67 u64_le cache_storage_data_and_journal_max_size;
68 u64_le cache_storage_max_index;
69 INSERT_PADDING_BYTES(0xE70);
58}; 70};
59static_assert(sizeof(RawNACP) == 0x4000, "RawNACP has incorrect size."); 71static_assert(sizeof(RawNACP) == 0x4000, "RawNACP has incorrect size.");
60 72
diff --git a/src/core/file_sys/errors.h b/src/core/file_sys/errors.h
index e4a4ee4ab..bb4654366 100644
--- a/src/core/file_sys/errors.h
+++ b/src/core/file_sys/errors.h
@@ -11,6 +11,9 @@ namespace FileSys {
11constexpr ResultCode ERROR_PATH_NOT_FOUND{ErrorModule::FS, 1}; 11constexpr ResultCode ERROR_PATH_NOT_FOUND{ErrorModule::FS, 1};
12constexpr ResultCode ERROR_ENTITY_NOT_FOUND{ErrorModule::FS, 1002}; 12constexpr ResultCode ERROR_ENTITY_NOT_FOUND{ErrorModule::FS, 1002};
13constexpr ResultCode ERROR_SD_CARD_NOT_FOUND{ErrorModule::FS, 2001}; 13constexpr ResultCode ERROR_SD_CARD_NOT_FOUND{ErrorModule::FS, 2001};
14constexpr ResultCode ERROR_OUT_OF_BOUNDS{ErrorModule::FS, 3005};
15constexpr ResultCode ERROR_FAILED_MOUNT_ARCHIVE{ErrorModule::FS, 3223};
16constexpr ResultCode ERROR_INVALID_ARGUMENT{ErrorModule::FS, 6001};
14constexpr ResultCode ERROR_INVALID_OFFSET{ErrorModule::FS, 6061}; 17constexpr ResultCode ERROR_INVALID_OFFSET{ErrorModule::FS, 6061};
15constexpr ResultCode ERROR_INVALID_SIZE{ErrorModule::FS, 6062}; 18constexpr ResultCode ERROR_INVALID_SIZE{ErrorModule::FS, 6062};
16 19
diff --git a/src/core/file_sys/fsmitm_romfsbuild.cpp b/src/core/file_sys/fsmitm_romfsbuild.cpp
index 47b7526c7..d126ae8dd 100644
--- a/src/core/file_sys/fsmitm_romfsbuild.cpp
+++ b/src/core/file_sys/fsmitm_romfsbuild.cpp
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <cstring> 25#include <cstring>
26#include <string_view>
26#include "common/alignment.h" 27#include "common/alignment.h"
27#include "common/assert.h" 28#include "common/assert.h"
28#include "core/file_sys/fsmitm_romfsbuild.h" 29#include "core/file_sys/fsmitm_romfsbuild.h"
@@ -97,7 +98,8 @@ struct RomFSBuildFileContext {
97 VirtualFile source; 98 VirtualFile source;
98}; 99};
99 100
100static u32 romfs_calc_path_hash(u32 parent, std::string path, u32 start, std::size_t path_len) { 101static u32 romfs_calc_path_hash(u32 parent, std::string_view path, u32 start,
102 std::size_t path_len) {
101 u32 hash = parent ^ 123456789; 103 u32 hash = parent ^ 123456789;
102 for (u32 i = 0; i < path_len; i++) { 104 for (u32 i = 0; i < path_len; i++) {
103 hash = (hash >> 5) | (hash << 27); 105 hash = (hash >> 5) | (hash << 27);
diff --git a/src/core/file_sys/nca_metadata.cpp b/src/core/file_sys/nca_metadata.cpp
index 6f34b7836..93d0df6b9 100644
--- a/src/core/file_sys/nca_metadata.cpp
+++ b/src/core/file_sys/nca_metadata.cpp
@@ -10,14 +10,6 @@
10 10
11namespace FileSys { 11namespace FileSys {
12 12
13bool operator>=(TitleType lhs, TitleType rhs) {
14 return static_cast<std::size_t>(lhs) >= static_cast<std::size_t>(rhs);
15}
16
17bool operator<=(TitleType lhs, TitleType rhs) {
18 return static_cast<std::size_t>(lhs) <= static_cast<std::size_t>(rhs);
19}
20
21CNMT::CNMT(VirtualFile file) { 13CNMT::CNMT(VirtualFile file) {
22 if (file->ReadObject(&header) != sizeof(CNMTHeader)) 14 if (file->ReadObject(&header) != sizeof(CNMTHeader))
23 return; 15 return;
diff --git a/src/core/file_sys/nca_metadata.h b/src/core/file_sys/nca_metadata.h
index a05d155f4..50bf38471 100644
--- a/src/core/file_sys/nca_metadata.h
+++ b/src/core/file_sys/nca_metadata.h
@@ -29,9 +29,6 @@ enum class TitleType : u8 {
29 DeltaTitle = 0x83, 29 DeltaTitle = 0x83,
30}; 30};
31 31
32bool operator>=(TitleType lhs, TitleType rhs);
33bool operator<=(TitleType lhs, TitleType rhs);
34
35enum class ContentRecordType : u8 { 32enum class ContentRecordType : u8 {
36 Meta = 0, 33 Meta = 0,
37 Program = 1, 34 Program = 1,
diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp
index 61706966e..e11217708 100644
--- a/src/core/file_sys/patch_manager.cpp
+++ b/src/core/file_sys/patch_manager.cpp
@@ -7,6 +7,7 @@
7#include <cstddef> 7#include <cstddef>
8#include <cstring> 8#include <cstring>
9 9
10#include "common/file_util.h"
10#include "common/hex_util.h" 11#include "common/hex_util.h"
11#include "common/logging/log.h" 12#include "common/logging/log.h"
12#include "core/file_sys/content_archive.h" 13#include "core/file_sys/content_archive.h"
@@ -19,6 +20,7 @@
19#include "core/file_sys/vfs_vector.h" 20#include "core/file_sys/vfs_vector.h"
20#include "core/hle/service/filesystem/filesystem.h" 21#include "core/hle/service/filesystem/filesystem.h"
21#include "core/loader/loader.h" 22#include "core/loader/loader.h"
23#include "core/loader/nso.h"
22#include "core/settings.h" 24#include "core/settings.h"
23 25
24namespace FileSys { 26namespace FileSys {
@@ -31,14 +33,6 @@ constexpr std::array<const char*, 14> EXEFS_FILE_NAMES{
31 "subsdk3", "subsdk4", "subsdk5", "subsdk6", "subsdk7", "subsdk8", "subsdk9", 33 "subsdk3", "subsdk4", "subsdk5", "subsdk6", "subsdk7", "subsdk8", "subsdk9",
32}; 34};
33 35
34struct NSOBuildHeader {
35 u32_le magic;
36 INSERT_PADDING_BYTES(0x3C);
37 std::array<u8, 0x20> build_id;
38 INSERT_PADDING_BYTES(0xA0);
39};
40static_assert(sizeof(NSOBuildHeader) == 0x100, "NSOBuildHeader has incorrect size.");
41
42std::string FormatTitleVersion(u32 version, TitleVersionFormat format) { 36std::string FormatTitleVersion(u32 version, TitleVersionFormat format) {
43 std::array<u8, sizeof(u32)> bytes{}; 37 std::array<u8, sizeof(u32)> bytes{};
44 bytes[0] = version % SINGLE_BYTE_MODULUS; 38 bytes[0] = version % SINGLE_BYTE_MODULUS;
@@ -162,14 +156,16 @@ std::vector<VirtualFile> PatchManager::CollectPatches(const std::vector<VirtualD
162} 156}
163 157
164std::vector<u8> PatchManager::PatchNSO(const std::vector<u8>& nso) const { 158std::vector<u8> PatchManager::PatchNSO(const std::vector<u8>& nso) const {
165 if (nso.size() < 0x100) 159 if (nso.size() < sizeof(Loader::NSOHeader)) {
166 return nso; 160 return nso;
161 }
167 162
168 NSOBuildHeader header; 163 Loader::NSOHeader header;
169 std::memcpy(&header, nso.data(), sizeof(NSOBuildHeader)); 164 std::memcpy(&header, nso.data(), sizeof(header));
170 165
171 if (header.magic != Common::MakeMagic('N', 'S', 'O', '0')) 166 if (header.magic != Common::MakeMagic('N', 'S', 'O', '0')) {
172 return nso; 167 return nso;
168 }
173 169
174 const auto build_id_raw = Common::HexArrayToString(header.build_id); 170 const auto build_id_raw = Common::HexArrayToString(header.build_id);
175 const auto build_id = build_id_raw.substr(0, build_id_raw.find_last_not_of('0') + 1); 171 const auto build_id = build_id_raw.substr(0, build_id_raw.find_last_not_of('0') + 1);
@@ -212,9 +208,11 @@ std::vector<u8> PatchManager::PatchNSO(const std::vector<u8>& nso) const {
212 } 208 }
213 } 209 }
214 210
215 if (out.size() < 0x100) 211 if (out.size() < sizeof(Loader::NSOHeader)) {
216 return nso; 212 return nso;
217 std::memcpy(out.data(), &header, sizeof(NSOBuildHeader)); 213 }
214
215 std::memcpy(out.data(), &header, sizeof(header));
218 return out; 216 return out;
219} 217}
220 218
@@ -232,6 +230,57 @@ bool PatchManager::HasNSOPatch(const std::array<u8, 32>& build_id_) const {
232 return !CollectPatches(patch_dirs, build_id).empty(); 230 return !CollectPatches(patch_dirs, build_id).empty();
233} 231}
234 232
233static std::optional<CheatList> ReadCheatFileFromFolder(const Core::System& system, u64 title_id,
234 const std::array<u8, 0x20>& build_id_,
235 const VirtualDir& base_path, bool upper) {
236 const auto build_id_raw = Common::HexArrayToString(build_id_, upper);
237 const auto build_id = build_id_raw.substr(0, sizeof(u64) * 2);
238 const auto file = base_path->GetFile(fmt::format("{}.txt", build_id));
239
240 if (file == nullptr) {
241 LOG_INFO(Common_Filesystem, "No cheats file found for title_id={:016X}, build_id={}",
242 title_id, build_id);
243 return std::nullopt;
244 }
245
246 std::vector<u8> data(file->GetSize());
247 if (file->Read(data.data(), data.size()) != data.size()) {
248 LOG_INFO(Common_Filesystem, "Failed to read cheats file for title_id={:016X}, build_id={}",
249 title_id, build_id);
250 return std::nullopt;
251 }
252
253 TextCheatParser parser;
254 return parser.Parse(system, data);
255}
256
257std::vector<CheatList> PatchManager::CreateCheatList(const Core::System& system,
258 const std::array<u8, 32>& build_id_) const {
259 const auto load_dir = Service::FileSystem::GetModificationLoadRoot(title_id);
260 auto patch_dirs = load_dir->GetSubdirectories();
261 std::sort(patch_dirs.begin(), patch_dirs.end(),
262 [](const VirtualDir& l, const VirtualDir& r) { return l->GetName() < r->GetName(); });
263
264 std::vector<CheatList> out;
265 out.reserve(patch_dirs.size());
266 for (const auto& subdir : patch_dirs) {
267 auto cheats_dir = subdir->GetSubdirectory("cheats");
268 if (cheats_dir != nullptr) {
269 auto res = ReadCheatFileFromFolder(system, title_id, build_id_, cheats_dir, true);
270 if (res.has_value()) {
271 out.push_back(std::move(*res));
272 continue;
273 }
274
275 res = ReadCheatFileFromFolder(system, title_id, build_id_, cheats_dir, false);
276 if (res.has_value())
277 out.push_back(std::move(*res));
278 }
279 }
280
281 return out;
282}
283
235static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType type) { 284static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType type) {
236 const auto load_dir = Service::FileSystem::GetModificationLoadRoot(title_id); 285 const auto load_dir = Service::FileSystem::GetModificationLoadRoot(title_id);
237 if ((type != ContentRecordType::Program && type != ContentRecordType::Data) || 286 if ((type != ContentRecordType::Program && type != ContentRecordType::Data) ||
@@ -403,6 +452,8 @@ std::map<std::string, std::string, std::less<>> PatchManager::GetPatchVersionNam
403 } 452 }
404 if (IsDirValidAndNonEmpty(mod->GetSubdirectory("romfs"))) 453 if (IsDirValidAndNonEmpty(mod->GetSubdirectory("romfs")))
405 AppendCommaIfNotEmpty(types, "LayeredFS"); 454 AppendCommaIfNotEmpty(types, "LayeredFS");
455 if (IsDirValidAndNonEmpty(mod->GetSubdirectory("cheats")))
456 AppendCommaIfNotEmpty(types, "Cheats");
406 457
407 if (types.empty()) 458 if (types.empty())
408 continue; 459 continue;
diff --git a/src/core/file_sys/patch_manager.h b/src/core/file_sys/patch_manager.h
index b8a1652fd..de2672c76 100644
--- a/src/core/file_sys/patch_manager.h
+++ b/src/core/file_sys/patch_manager.h
@@ -8,9 +8,14 @@
8#include <memory> 8#include <memory>
9#include <string> 9#include <string>
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "core/file_sys/cheat_engine.h"
11#include "core/file_sys/nca_metadata.h" 12#include "core/file_sys/nca_metadata.h"
12#include "core/file_sys/vfs.h" 13#include "core/file_sys/vfs.h"
13 14
15namespace Core {
16class System;
17}
18
14namespace FileSys { 19namespace FileSys {
15 20
16class NCA; 21class NCA;
@@ -45,6 +50,10 @@ public:
45 // Used to prevent expensive copies in NSO loader. 50 // Used to prevent expensive copies in NSO loader.
46 bool HasNSOPatch(const std::array<u8, 0x20>& build_id) const; 51 bool HasNSOPatch(const std::array<u8, 0x20>& build_id) const;
47 52
53 // Creates a CheatList object with all
54 std::vector<CheatList> CreateCheatList(const Core::System& system,
55 const std::array<u8, 0x20>& build_id) const;
56
48 // Currently tracked RomFS patches: 57 // Currently tracked RomFS patches:
49 // - Game Updates 58 // - Game Updates
50 // - LayeredFS 59 // - LayeredFS
diff --git a/src/core/file_sys/program_metadata.cpp b/src/core/file_sys/program_metadata.cpp
index d3e00437f..d863253f8 100644
--- a/src/core/file_sys/program_metadata.cpp
+++ b/src/core/file_sys/program_metadata.cpp
@@ -3,7 +3,6 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <cstddef> 5#include <cstddef>
6#include <cstring>
7#include <vector> 6#include <vector>
8 7
9#include "common/logging/log.h" 8#include "common/logging/log.h"
@@ -17,28 +16,30 @@ ProgramMetadata::ProgramMetadata() = default;
17ProgramMetadata::~ProgramMetadata() = default; 16ProgramMetadata::~ProgramMetadata() = default;
18 17
19Loader::ResultStatus ProgramMetadata::Load(VirtualFile file) { 18Loader::ResultStatus ProgramMetadata::Load(VirtualFile file) {
20 std::size_t total_size = static_cast<std::size_t>(file->GetSize()); 19 const std::size_t total_size = file->GetSize();
21 if (total_size < sizeof(Header)) 20 if (total_size < sizeof(Header)) {
22 return Loader::ResultStatus::ErrorBadNPDMHeader; 21 return Loader::ResultStatus::ErrorBadNPDMHeader;
22 }
23 23
24 // TODO(DarkLordZach): Use ReadObject when Header/AcidHeader becomes trivially copyable. 24 if (sizeof(Header) != file->ReadObject(&npdm_header)) {
25 std::vector<u8> npdm_header_data = file->ReadBytes(sizeof(Header));
26 if (sizeof(Header) != npdm_header_data.size())
27 return Loader::ResultStatus::ErrorBadNPDMHeader; 25 return Loader::ResultStatus::ErrorBadNPDMHeader;
28 std::memcpy(&npdm_header, npdm_header_data.data(), sizeof(Header)); 26 }
29 27
30 std::vector<u8> acid_header_data = file->ReadBytes(sizeof(AcidHeader), npdm_header.acid_offset); 28 if (sizeof(AcidHeader) != file->ReadObject(&acid_header, npdm_header.acid_offset)) {
31 if (sizeof(AcidHeader) != acid_header_data.size())
32 return Loader::ResultStatus::ErrorBadACIDHeader; 29 return Loader::ResultStatus::ErrorBadACIDHeader;
33 std::memcpy(&acid_header, acid_header_data.data(), sizeof(AcidHeader)); 30 }
34 31
35 if (sizeof(AciHeader) != file->ReadObject(&aci_header, npdm_header.aci_offset)) 32 if (sizeof(AciHeader) != file->ReadObject(&aci_header, npdm_header.aci_offset)) {
36 return Loader::ResultStatus::ErrorBadACIHeader; 33 return Loader::ResultStatus::ErrorBadACIHeader;
34 }
37 35
38 if (sizeof(FileAccessControl) != file->ReadObject(&acid_file_access, acid_header.fac_offset)) 36 if (sizeof(FileAccessControl) != file->ReadObject(&acid_file_access, acid_header.fac_offset)) {
39 return Loader::ResultStatus::ErrorBadFileAccessControl; 37 return Loader::ResultStatus::ErrorBadFileAccessControl;
40 if (sizeof(FileAccessHeader) != file->ReadObject(&aci_file_access, aci_header.fah_offset)) 38 }
39
40 if (sizeof(FileAccessHeader) != file->ReadObject(&aci_file_access, aci_header.fah_offset)) {
41 return Loader::ResultStatus::ErrorBadFileAccessHeader; 41 return Loader::ResultStatus::ErrorBadFileAccessHeader;
42 }
42 43
43 aci_kernel_capabilities.resize(aci_header.kac_size / sizeof(u32)); 44 aci_kernel_capabilities.resize(aci_header.kac_size / sizeof(u32));
44 const u64 read_size = aci_header.kac_size; 45 const u64 read_size = aci_header.kac_size;
diff --git a/src/core/file_sys/program_metadata.h b/src/core/file_sys/program_metadata.h
index 0033ba347..7de5b9cf9 100644
--- a/src/core/file_sys/program_metadata.h
+++ b/src/core/file_sys/program_metadata.h
@@ -58,7 +58,6 @@ public:
58 void Print() const; 58 void Print() const;
59 59
60private: 60private:
61 // TODO(DarkLordZach): BitField is not trivially copyable.
62 struct Header { 61 struct Header {
63 std::array<char, 4> magic; 62 std::array<char, 4> magic;
64 std::array<u8, 8> reserved; 63 std::array<u8, 8> reserved;
@@ -85,7 +84,6 @@ private:
85 84
86 static_assert(sizeof(Header) == 0x80, "NPDM header structure size is wrong"); 85 static_assert(sizeof(Header) == 0x80, "NPDM header structure size is wrong");
87 86
88 // TODO(DarkLordZach): BitField is not trivially copyable.
89 struct AcidHeader { 87 struct AcidHeader {
90 std::array<u8, 0x100> signature; 88 std::array<u8, 0x100> signature;
91 std::array<u8, 0x100> nca_modulus; 89 std::array<u8, 0x100> nca_modulus;
diff --git a/src/core/file_sys/registered_cache.cpp b/src/core/file_sys/registered_cache.cpp
index 128199063..1c6bacace 100644
--- a/src/core/file_sys/registered_cache.cpp
+++ b/src/core/file_sys/registered_cache.cpp
@@ -94,7 +94,7 @@ static ContentRecordType GetCRTypeFromNCAType(NCAContentType type) {
94 case NCAContentType::Control: 94 case NCAContentType::Control:
95 return ContentRecordType::Control; 95 return ContentRecordType::Control;
96 case NCAContentType::Data: 96 case NCAContentType::Data:
97 case NCAContentType::Data_Unknown5: 97 case NCAContentType::PublicData:
98 return ContentRecordType::Data; 98 return ContentRecordType::Data;
99 case NCAContentType::Manual: 99 case NCAContentType::Manual:
100 // TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal. 100 // TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal.
diff --git a/src/core/file_sys/savedata_factory.cpp b/src/core/file_sys/savedata_factory.cpp
index 1913dc956..7974b031d 100644
--- a/src/core/file_sys/savedata_factory.cpp
+++ b/src/core/file_sys/savedata_factory.cpp
@@ -16,8 +16,10 @@ namespace FileSys {
16constexpr char SAVE_DATA_SIZE_FILENAME[] = ".yuzu_save_size"; 16constexpr char SAVE_DATA_SIZE_FILENAME[] = ".yuzu_save_size";
17 17
18std::string SaveDataDescriptor::DebugInfo() const { 18std::string SaveDataDescriptor::DebugInfo() const {
19 return fmt::format("[type={:02X}, title_id={:016X}, user_id={:016X}{:016X}, save_id={:016X}]", 19 return fmt::format("[type={:02X}, title_id={:016X}, user_id={:016X}{:016X}, save_id={:016X}, "
20 static_cast<u8>(type), title_id, user_id[1], user_id[0], save_id); 20 "rank={}, index={}]",
21 static_cast<u8>(type), title_id, user_id[1], user_id[0], save_id,
22 static_cast<u8>(rank), index);
21} 23}
22 24
23SaveDataFactory::SaveDataFactory(VirtualDir save_directory) : dir(std::move(save_directory)) { 25SaveDataFactory::SaveDataFactory(VirtualDir save_directory) : dir(std::move(save_directory)) {
@@ -28,7 +30,7 @@ SaveDataFactory::SaveDataFactory(VirtualDir save_directory) : dir(std::move(save
28 30
29SaveDataFactory::~SaveDataFactory() = default; 31SaveDataFactory::~SaveDataFactory() = default;
30 32
31ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space, SaveDataDescriptor meta) { 33ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space, const SaveDataDescriptor& meta) {
32 if (meta.type == SaveDataType::SystemSaveData || meta.type == SaveDataType::SaveData) { 34 if (meta.type == SaveDataType::SystemSaveData || meta.type == SaveDataType::SaveData) {
33 if (meta.zero_1 != 0) { 35 if (meta.zero_1 != 0) {
34 LOG_WARNING(Service_FS, 36 LOG_WARNING(Service_FS,
diff --git a/src/core/file_sys/savedata_factory.h b/src/core/file_sys/savedata_factory.h
index 3a1caf292..b73654571 100644
--- a/src/core/file_sys/savedata_factory.h
+++ b/src/core/file_sys/savedata_factory.h
@@ -32,12 +32,19 @@ enum class SaveDataType : u8 {
32 CacheStorage = 5, 32 CacheStorage = 5,
33}; 33};
34 34
35enum class SaveDataRank : u8 {
36 Primary,
37 Secondary,
38};
39
35struct SaveDataDescriptor { 40struct SaveDataDescriptor {
36 u64_le title_id; 41 u64_le title_id;
37 u128 user_id; 42 u128 user_id;
38 u64_le save_id; 43 u64_le save_id;
39 SaveDataType type; 44 SaveDataType type;
40 INSERT_PADDING_BYTES(7); 45 SaveDataRank rank;
46 u16_le index;
47 INSERT_PADDING_BYTES(4);
41 u64_le zero_1; 48 u64_le zero_1;
42 u64_le zero_2; 49 u64_le zero_2;
43 u64_le zero_3; 50 u64_le zero_3;
@@ -57,7 +64,7 @@ public:
57 explicit SaveDataFactory(VirtualDir dir); 64 explicit SaveDataFactory(VirtualDir dir);
58 ~SaveDataFactory(); 65 ~SaveDataFactory();
59 66
60 ResultVal<VirtualDir> Open(SaveDataSpaceId space, SaveDataDescriptor meta); 67 ResultVal<VirtualDir> Open(SaveDataSpaceId space, const SaveDataDescriptor& meta);
61 68
62 VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const; 69 VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const;
63 70
diff --git a/src/core/file_sys/system_archive/system_archive.cpp b/src/core/file_sys/system_archive/system_archive.cpp
index e3e79f40a..c9722ed77 100644
--- a/src/core/file_sys/system_archive/system_archive.cpp
+++ b/src/core/file_sys/system_archive/system_archive.cpp
@@ -6,6 +6,7 @@
6#include "core/file_sys/romfs.h" 6#include "core/file_sys/romfs.h"
7#include "core/file_sys/system_archive/ng_word.h" 7#include "core/file_sys/system_archive/ng_word.h"
8#include "core/file_sys/system_archive/system_archive.h" 8#include "core/file_sys/system_archive/system_archive.h"
9#include "core/file_sys/system_archive/system_version.h"
9 10
10namespace FileSys::SystemArchive { 11namespace FileSys::SystemArchive {
11 12
@@ -30,7 +31,7 @@ constexpr std::array<SystemArchiveDescriptor, SYSTEM_ARCHIVE_COUNT> SYSTEM_ARCHI
30 {0x0100000000000806, "NgWord", &NgWord1}, 31 {0x0100000000000806, "NgWord", &NgWord1},
31 {0x0100000000000807, "SsidList", nullptr}, 32 {0x0100000000000807, "SsidList", nullptr},
32 {0x0100000000000808, "Dictionary", nullptr}, 33 {0x0100000000000808, "Dictionary", nullptr},
33 {0x0100000000000809, "SystemVersion", nullptr}, 34 {0x0100000000000809, "SystemVersion", &SystemVersion},
34 {0x010000000000080A, "AvatarImage", nullptr}, 35 {0x010000000000080A, "AvatarImage", nullptr},
35 {0x010000000000080B, "LocalNews", nullptr}, 36 {0x010000000000080B, "LocalNews", nullptr},
36 {0x010000000000080C, "Eula", nullptr}, 37 {0x010000000000080C, "Eula", nullptr},
diff --git a/src/core/file_sys/system_archive/system_version.cpp b/src/core/file_sys/system_archive/system_version.cpp
new file mode 100644
index 000000000..6e22f97b0
--- /dev/null
+++ b/src/core/file_sys/system_archive/system_version.cpp
@@ -0,0 +1,52 @@
1// Copyright 2019 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/file_sys/system_archive/system_version.h"
6#include "core/file_sys/vfs_vector.h"
7
8namespace FileSys::SystemArchive {
9
10namespace SystemVersionData {
11
12// This section should reflect the best system version to describe yuzu's HLE api.
13// TODO(DarkLordZach): Update when HLE gets better.
14
15constexpr u8 VERSION_MAJOR = 5;
16constexpr u8 VERSION_MINOR = 1;
17constexpr u8 VERSION_MICRO = 0;
18
19constexpr u8 REVISION_MAJOR = 3;
20constexpr u8 REVISION_MINOR = 0;
21
22constexpr char PLATFORM_STRING[] = "NX";
23constexpr char VERSION_HASH[] = "23f9df53e25709d756e0c76effcb2473bd3447dd";
24constexpr char DISPLAY_VERSION[] = "5.1.0";
25constexpr char DISPLAY_TITLE[] = "NintendoSDK Firmware for NX 5.1.0-3.0";
26
27} // namespace SystemVersionData
28
29std::string GetLongDisplayVersion() {
30 return SystemVersionData::DISPLAY_TITLE;
31}
32
33VirtualDir SystemVersion() {
34 VirtualFile file = std::make_shared<VectorVfsFile>(std::vector<u8>(0x100), "file");
35 file->WriteObject(SystemVersionData::VERSION_MAJOR, 0);
36 file->WriteObject(SystemVersionData::VERSION_MINOR, 1);
37 file->WriteObject(SystemVersionData::VERSION_MICRO, 2);
38 file->WriteObject(SystemVersionData::REVISION_MAJOR, 4);
39 file->WriteObject(SystemVersionData::REVISION_MINOR, 5);
40 file->WriteArray(SystemVersionData::PLATFORM_STRING,
41 std::min<u64>(sizeof(SystemVersionData::PLATFORM_STRING), 0x20ULL), 0x8);
42 file->WriteArray(SystemVersionData::VERSION_HASH,
43 std::min<u64>(sizeof(SystemVersionData::VERSION_HASH), 0x40ULL), 0x28);
44 file->WriteArray(SystemVersionData::DISPLAY_VERSION,
45 std::min<u64>(sizeof(SystemVersionData::DISPLAY_VERSION), 0x18ULL), 0x68);
46 file->WriteArray(SystemVersionData::DISPLAY_TITLE,
47 std::min<u64>(sizeof(SystemVersionData::DISPLAY_TITLE), 0x80ULL), 0x80);
48 return std::make_shared<VectorVfsDirectory>(std::vector<VirtualFile>{file},
49 std::vector<VirtualDir>{}, "data");
50}
51
52} // namespace FileSys::SystemArchive
diff --git a/src/core/file_sys/system_archive/system_version.h b/src/core/file_sys/system_archive/system_version.h
new file mode 100644
index 000000000..deed79b26
--- /dev/null
+++ b/src/core/file_sys/system_archive/system_version.h
@@ -0,0 +1,16 @@
1// Copyright 2019 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <string>
8#include "core/file_sys/vfs_types.h"
9
10namespace FileSys::SystemArchive {
11
12std::string GetLongDisplayVersion();
13
14VirtualDir SystemVersion();
15
16} // namespace FileSys::SystemArchive
diff --git a/src/core/frontend/emu_window.cpp b/src/core/frontend/emu_window.cpp
index e29afd630..1320bbe77 100644
--- a/src/core/frontend/emu_window.cpp
+++ b/src/core/frontend/emu_window.cpp
@@ -30,7 +30,7 @@ private:
30 explicit Device(std::weak_ptr<TouchState>&& touch_state) : touch_state(touch_state) {} 30 explicit Device(std::weak_ptr<TouchState>&& touch_state) : touch_state(touch_state) {}
31 std::tuple<float, float, bool> GetStatus() const override { 31 std::tuple<float, float, bool> GetStatus() const override {
32 if (auto state = touch_state.lock()) { 32 if (auto state = touch_state.lock()) {
33 std::lock_guard<std::mutex> guard(state->mutex); 33 std::lock_guard guard{state->mutex};
34 return std::make_tuple(state->touch_x, state->touch_y, state->touch_pressed); 34 return std::make_tuple(state->touch_x, state->touch_y, state->touch_pressed);
35 } 35 }
36 return std::make_tuple(0.0f, 0.0f, false); 36 return std::make_tuple(0.0f, 0.0f, false);
@@ -81,7 +81,7 @@ void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) {
81 if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y)) 81 if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y))
82 return; 82 return;
83 83
84 std::lock_guard<std::mutex> guard(touch_state->mutex); 84 std::lock_guard guard{touch_state->mutex};
85 touch_state->touch_x = static_cast<float>(framebuffer_x - framebuffer_layout.screen.left) / 85 touch_state->touch_x = static_cast<float>(framebuffer_x - framebuffer_layout.screen.left) /
86 (framebuffer_layout.screen.right - framebuffer_layout.screen.left); 86 (framebuffer_layout.screen.right - framebuffer_layout.screen.left);
87 touch_state->touch_y = static_cast<float>(framebuffer_y - framebuffer_layout.screen.top) / 87 touch_state->touch_y = static_cast<float>(framebuffer_y - framebuffer_layout.screen.top) /
@@ -91,7 +91,7 @@ void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) {
91} 91}
92 92
93void EmuWindow::TouchReleased() { 93void EmuWindow::TouchReleased() {
94 std::lock_guard<std::mutex> guard(touch_state->mutex); 94 std::lock_guard guard{touch_state->mutex};
95 touch_state->touch_pressed = false; 95 touch_state->touch_pressed = false;
96 touch_state->touch_x = 0; 96 touch_state->touch_x = 0;
97 touch_state->touch_y = 0; 97 touch_state->touch_y = 0;
diff --git a/src/core/gdbstub/gdbstub.cpp b/src/core/gdbstub/gdbstub.cpp
index dafb32aae..afa812598 100644
--- a/src/core/gdbstub/gdbstub.cpp
+++ b/src/core/gdbstub/gdbstub.cpp
@@ -1030,7 +1030,7 @@ static void Step() {
1030 1030
1031/// Tell the CPU if we hit a memory breakpoint. 1031/// Tell the CPU if we hit a memory breakpoint.
1032bool IsMemoryBreak() { 1032bool IsMemoryBreak() {
1033 if (IsConnected()) { 1033 if (!IsConnected()) {
1034 return false; 1034 return false;
1035 } 1035 }
1036 1036
diff --git a/src/core/hle/ipc.h b/src/core/hle/ipc.h
index 455d1f346..fae54bcc7 100644
--- a/src/core/hle/ipc.h
+++ b/src/core/hle/ipc.h
@@ -39,10 +39,10 @@ struct CommandHeader {
39 union { 39 union {
40 u32_le raw_low; 40 u32_le raw_low;
41 BitField<0, 16, CommandType> type; 41 BitField<0, 16, CommandType> type;
42 BitField<16, 4, u32_le> num_buf_x_descriptors; 42 BitField<16, 4, u32> num_buf_x_descriptors;
43 BitField<20, 4, u32_le> num_buf_a_descriptors; 43 BitField<20, 4, u32> num_buf_a_descriptors;
44 BitField<24, 4, u32_le> num_buf_b_descriptors; 44 BitField<24, 4, u32> num_buf_b_descriptors;
45 BitField<28, 4, u32_le> num_buf_w_descriptors; 45 BitField<28, 4, u32> num_buf_w_descriptors;
46 }; 46 };
47 47
48 enum class BufferDescriptorCFlag : u32 { 48 enum class BufferDescriptorCFlag : u32 {
@@ -53,28 +53,28 @@ struct CommandHeader {
53 53
54 union { 54 union {
55 u32_le raw_high; 55 u32_le raw_high;
56 BitField<0, 10, u32_le> data_size; 56 BitField<0, 10, u32> data_size;
57 BitField<10, 4, BufferDescriptorCFlag> buf_c_descriptor_flags; 57 BitField<10, 4, BufferDescriptorCFlag> buf_c_descriptor_flags;
58 BitField<31, 1, u32_le> enable_handle_descriptor; 58 BitField<31, 1, u32> enable_handle_descriptor;
59 }; 59 };
60}; 60};
61static_assert(sizeof(CommandHeader) == 8, "CommandHeader size is incorrect"); 61static_assert(sizeof(CommandHeader) == 8, "CommandHeader size is incorrect");
62 62
63union HandleDescriptorHeader { 63union HandleDescriptorHeader {
64 u32_le raw_high; 64 u32_le raw_high;
65 BitField<0, 1, u32_le> send_current_pid; 65 BitField<0, 1, u32> send_current_pid;
66 BitField<1, 4, u32_le> num_handles_to_copy; 66 BitField<1, 4, u32> num_handles_to_copy;
67 BitField<5, 4, u32_le> num_handles_to_move; 67 BitField<5, 4, u32> num_handles_to_move;
68}; 68};
69static_assert(sizeof(HandleDescriptorHeader) == 4, "HandleDescriptorHeader size is incorrect"); 69static_assert(sizeof(HandleDescriptorHeader) == 4, "HandleDescriptorHeader size is incorrect");
70 70
71struct BufferDescriptorX { 71struct BufferDescriptorX {
72 union { 72 union {
73 BitField<0, 6, u32_le> counter_bits_0_5; 73 BitField<0, 6, u32> counter_bits_0_5;
74 BitField<6, 3, u32_le> address_bits_36_38; 74 BitField<6, 3, u32> address_bits_36_38;
75 BitField<9, 3, u32_le> counter_bits_9_11; 75 BitField<9, 3, u32> counter_bits_9_11;
76 BitField<12, 4, u32_le> address_bits_32_35; 76 BitField<12, 4, u32> address_bits_32_35;
77 BitField<16, 16, u32_le> size; 77 BitField<16, 16, u32> size;
78 }; 78 };
79 79
80 u32_le address_bits_0_31; 80 u32_le address_bits_0_31;
@@ -103,10 +103,10 @@ struct BufferDescriptorABW {
103 u32_le address_bits_0_31; 103 u32_le address_bits_0_31;
104 104
105 union { 105 union {
106 BitField<0, 2, u32_le> flags; 106 BitField<0, 2, u32> flags;
107 BitField<2, 3, u32_le> address_bits_36_38; 107 BitField<2, 3, u32> address_bits_36_38;
108 BitField<24, 4, u32_le> size_bits_32_35; 108 BitField<24, 4, u32> size_bits_32_35;
109 BitField<28, 4, u32_le> address_bits_32_35; 109 BitField<28, 4, u32> address_bits_32_35;
110 }; 110 };
111 111
112 VAddr Address() const { 112 VAddr Address() const {
@@ -128,8 +128,8 @@ struct BufferDescriptorC {
128 u32_le address_bits_0_31; 128 u32_le address_bits_0_31;
129 129
130 union { 130 union {
131 BitField<0, 16, u32_le> address_bits_32_47; 131 BitField<0, 16, u32> address_bits_32_47;
132 BitField<16, 16, u32_le> size; 132 BitField<16, 16, u32> size;
133 }; 133 };
134 134
135 VAddr Address() const { 135 VAddr Address() const {
@@ -167,8 +167,8 @@ struct DomainMessageHeader {
167 struct { 167 struct {
168 union { 168 union {
169 BitField<0, 8, CommandType> command; 169 BitField<0, 8, CommandType> command;
170 BitField<8, 8, u32_le> input_object_count; 170 BitField<8, 8, u32> input_object_count;
171 BitField<16, 16, u32_le> size; 171 BitField<16, 16, u32> size;
172 }; 172 };
173 u32_le object_id; 173 u32_le object_id;
174 INSERT_PADDING_WORDS(2); 174 INSERT_PADDING_WORDS(2);
diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h
index a1e4be070..ac0e1d796 100644
--- a/src/core/hle/ipc_helpers.h
+++ b/src/core/hle/ipc_helpers.h
@@ -139,10 +139,8 @@ public:
139 context->AddDomainObject(std::move(iface)); 139 context->AddDomainObject(std::move(iface));
140 } else { 140 } else {
141 auto& kernel = Core::System::GetInstance().Kernel(); 141 auto& kernel = Core::System::GetInstance().Kernel();
142 auto sessions = 142 auto [server, client] =
143 Kernel::ServerSession::CreateSessionPair(kernel, iface->GetServiceName()); 143 Kernel::ServerSession::CreateSessionPair(kernel, iface->GetServiceName());
144 auto server = std::get<Kernel::SharedPtr<Kernel::ServerSession>>(sessions);
145 auto client = std::get<Kernel::SharedPtr<Kernel::ClientSession>>(sessions);
146 iface->ClientConnected(server); 144 iface->ClientConnected(server);
147 context->AddMoveObject(std::move(client)); 145 context->AddMoveObject(std::move(client));
148 } 146 }
@@ -275,6 +273,20 @@ inline void ResponseBuilder::Push(u64 value) {
275} 273}
276 274
277template <> 275template <>
276inline void ResponseBuilder::Push(float value) {
277 u32 integral;
278 std::memcpy(&integral, &value, sizeof(u32));
279 Push(integral);
280}
281
282template <>
283inline void ResponseBuilder::Push(double value) {
284 u64 integral;
285 std::memcpy(&integral, &value, sizeof(u64));
286 Push(integral);
287}
288
289template <>
278inline void ResponseBuilder::Push(bool value) { 290inline void ResponseBuilder::Push(bool value) {
279 Push(static_cast<u8>(value)); 291 Push(static_cast<u8>(value));
280} 292}
@@ -416,6 +428,22 @@ inline s64 RequestParser::Pop() {
416} 428}
417 429
418template <> 430template <>
431inline float RequestParser::Pop() {
432 const u32 value = Pop<u32>();
433 float real;
434 std::memcpy(&real, &value, sizeof(real));
435 return real;
436}
437
438template <>
439inline double RequestParser::Pop() {
440 const u64 value = Pop<u64>();
441 float real;
442 std::memcpy(&real, &value, sizeof(real));
443 return real;
444}
445
446template <>
419inline bool RequestParser::Pop() { 447inline bool RequestParser::Pop() {
420 return Pop<u8>() != 0; 448 return Pop<u8>() != 0;
421} 449}
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 352190da8..c8842410b 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -26,7 +26,7 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_
26 // them all. 26 // them all.
27 std::size_t last = waiting_threads.size(); 27 std::size_t last = waiting_threads.size();
28 if (num_to_wake > 0) { 28 if (num_to_wake > 0) {
29 last = num_to_wake; 29 last = std::min(last, static_cast<std::size_t>(num_to_wake));
30 } 30 }
31 31
32 // Signal the waiting threads. 32 // Signal the waiting threads.
@@ -90,9 +90,9 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
90 // Determine the modified value depending on the waiting count. 90 // Determine the modified value depending on the waiting count.
91 s32 updated_value; 91 s32 updated_value;
92 if (waiting_threads.empty()) { 92 if (waiting_threads.empty()) {
93 updated_value = value - 1;
94 } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
95 updated_value = value + 1; 93 updated_value = value + 1;
94 } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
95 updated_value = value - 1;
96 } else { 96 } else {
97 updated_value = value; 97 updated_value = value;
98 } 98 }
diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp
index aa432658e..744b1697d 100644
--- a/src/core/hle/kernel/client_port.cpp
+++ b/src/core/hle/kernel/client_port.cpp
@@ -2,8 +2,6 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <tuple>
6
7#include "core/hle/kernel/client_port.h" 5#include "core/hle/kernel/client_port.h"
8#include "core/hle/kernel/client_session.h" 6#include "core/hle/kernel/client_session.h"
9#include "core/hle/kernel/errors.h" 7#include "core/hle/kernel/errors.h"
@@ -31,18 +29,18 @@ ResultVal<SharedPtr<ClientSession>> ClientPort::Connect() {
31 active_sessions++; 29 active_sessions++;
32 30
33 // Create a new session pair, let the created sessions inherit the parent port's HLE handler. 31 // Create a new session pair, let the created sessions inherit the parent port's HLE handler.
34 auto sessions = ServerSession::CreateSessionPair(kernel, server_port->GetName(), this); 32 auto [server, client] = ServerSession::CreateSessionPair(kernel, server_port->GetName(), this);
35 33
36 if (server_port->HasHLEHandler()) { 34 if (server_port->HasHLEHandler()) {
37 server_port->GetHLEHandler()->ClientConnected(std::get<SharedPtr<ServerSession>>(sessions)); 35 server_port->GetHLEHandler()->ClientConnected(server);
38 } else { 36 } else {
39 server_port->AppendPendingSession(std::get<SharedPtr<ServerSession>>(sessions)); 37 server_port->AppendPendingSession(server);
40 } 38 }
41 39
42 // Wake the threads waiting on the ServerPort 40 // Wake the threads waiting on the ServerPort
43 server_port->WakeupAllWaitingThreads(); 41 server_port->WakeupAllWaitingThreads();
44 42
45 return MakeResult(std::get<SharedPtr<ClientSession>>(sessions)); 43 return MakeResult(client);
46} 44}
47 45
48void ClientPort::ConnectionClosed() { 46void ClientPort::ConnectionClosed() {
diff --git a/src/core/hle/kernel/code_set.cpp b/src/core/hle/kernel/code_set.cpp
new file mode 100644
index 000000000..1f434e9af
--- /dev/null
+++ b/src/core/hle/kernel/code_set.cpp
@@ -0,0 +1,12 @@
1// Copyright 2019 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/code_set.h"
6
7namespace Kernel {
8
9CodeSet::CodeSet() = default;
10CodeSet::~CodeSet() = default;
11
12} // namespace Kernel
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h
new file mode 100644
index 000000000..879957dcb
--- /dev/null
+++ b/src/core/hle/kernel/code_set.h
@@ -0,0 +1,89 @@
1// Copyright 2019 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <cstddef>
8#include <vector>
9
10#include "common/common_types.h"
11
12namespace Kernel {
13
14/**
15 * Represents executable data that may be loaded into a kernel process.
16 *
17 * A code set consists of three basic segments:
18 * - A code (AKA text) segment,
19 * - A read-only data segment (rodata)
20 * - A data segment
21 *
22 * The code segment is the portion of the object file that contains
23 * executable instructions.
24 *
25 * The read-only data segment in the portion of the object file that
26 * contains (as one would expect) read-only data, such as fixed constant
27 * values and data structures.
28 *
29 * The data segment is similar to the read-only data segment -- it contains
30 * variables and data structures that have predefined values, however,
31 * entities within this segment can be modified.
32 */
33struct CodeSet final {
34 /// A single segment within a code set.
35 struct Segment final {
36 /// The byte offset that this segment is located at.
37 std::size_t offset = 0;
38
39 /// The address to map this segment to.
40 VAddr addr = 0;
41
42 /// The size of this segment in bytes.
43 u32 size = 0;
44 };
45
46 explicit CodeSet();
47 ~CodeSet();
48
49 CodeSet(const CodeSet&) = delete;
50 CodeSet& operator=(const CodeSet&) = delete;
51
52 CodeSet(CodeSet&&) = default;
53 CodeSet& operator=(CodeSet&&) = default;
54
55 Segment& CodeSegment() {
56 return segments[0];
57 }
58
59 const Segment& CodeSegment() const {
60 return segments[0];
61 }
62
63 Segment& RODataSegment() {
64 return segments[1];
65 }
66
67 const Segment& RODataSegment() const {
68 return segments[1];
69 }
70
71 Segment& DataSegment() {
72 return segments[2];
73 }
74
75 const Segment& DataSegment() const {
76 return segments[2];
77 }
78
79 /// The overall data that backs this code set.
80 std::vector<u8> memory;
81
82 /// The segments that comprise this code set.
83 std::array<Segment, 3> segments;
84
85 /// The entry point address for this code set.
86 VAddr entrypoint = 0;
87};
88
89} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 4d224d01d..4d58e7c69 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -21,6 +21,7 @@
21#include "core/hle/kernel/thread.h" 21#include "core/hle/kernel/thread.h"
22#include "core/hle/lock.h" 22#include "core/hle/lock.h"
23#include "core/hle/result.h" 23#include "core/hle/result.h"
24#include "core/memory.h"
24 25
25namespace Kernel { 26namespace Kernel {
26 27
@@ -29,12 +30,12 @@ namespace Kernel {
29 * @param thread_handle The handle of the thread that's been awoken 30 * @param thread_handle The handle of the thread that's been awoken
30 * @param cycles_late The number of CPU cycles that have passed since the desired wakeup time 31 * @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
31 */ 32 */
32static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_late) { 33static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
33 const auto proper_handle = static_cast<Handle>(thread_handle); 34 const auto proper_handle = static_cast<Handle>(thread_handle);
34 const auto& system = Core::System::GetInstance(); 35 const auto& system = Core::System::GetInstance();
35 36
36 // Lock the global kernel mutex when we enter the kernel HLE. 37 // Lock the global kernel mutex when we enter the kernel HLE.
37 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 38 std::lock_guard lock{HLE::g_hle_lock};
38 39
39 SharedPtr<Thread> thread = 40 SharedPtr<Thread> thread =
40 system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle); 41 system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle);
@@ -62,7 +63,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_
62 63
63 if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 || 64 if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 ||
64 thread->GetWaitHandle() != 0) { 65 thread->GetWaitHandle() != 0) {
65 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); 66 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex ||
67 thread->GetStatus() == ThreadStatus::WaitCondVar);
66 thread->SetMutexWaitAddress(0); 68 thread->SetMutexWaitAddress(0);
67 thread->SetCondVarWaitAddress(0); 69 thread->SetCondVarWaitAddress(0);
68 thread->SetWaitHandle(0); 70 thread->SetWaitHandle(0);
@@ -114,7 +116,7 @@ struct KernelCore::Impl {
114 116
115 // Creates the default system resource limit 117 // Creates the default system resource limit
116 void InitializeSystemResourceLimit(KernelCore& kernel) { 118 void InitializeSystemResourceLimit(KernelCore& kernel) {
117 system_resource_limit = ResourceLimit::Create(kernel, "System"); 119 system_resource_limit = ResourceLimit::Create(kernel);
118 120
119 // If setting the default system values fails, then something seriously wrong has occurred. 121 // If setting the default system values fails, then something seriously wrong has occurred.
120 ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x200000000) 122 ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x200000000)
@@ -180,6 +182,7 @@ void KernelCore::AppendNewProcess(SharedPtr<Process> process) {
180 182
181void KernelCore::MakeCurrentProcess(Process* process) { 183void KernelCore::MakeCurrentProcess(Process* process) {
182 impl->current_process = process; 184 impl->current_process = process;
185 Memory::SetCurrentPageTable(&process->VMManager().page_table);
183} 186}
184 187
185Process* KernelCore::CurrentProcess() { 188Process* KernelCore::CurrentProcess() {
@@ -190,6 +193,10 @@ const Process* KernelCore::CurrentProcess() const {
190 return impl->current_process; 193 return impl->current_process;
191} 194}
192 195
196const std::vector<SharedPtr<Process>>& KernelCore::GetProcessList() const {
197 return impl->process_list;
198}
199
193void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) { 200void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) {
194 impl->named_ports.emplace(std::move(name), std::move(port)); 201 impl->named_ports.emplace(std::move(name), std::move(port));
195} 202}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index ff17ff865..6b8738599 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -8,9 +8,6 @@
8#include <unordered_map> 8#include <unordered_map>
9#include "core/hle/kernel/object.h" 9#include "core/hle/kernel/object.h"
10 10
11template <typename T>
12class ResultVal;
13
14namespace Core { 11namespace Core {
15class System; 12class System;
16} 13}
@@ -75,6 +72,9 @@ public:
75 /// Retrieves a const pointer to the current process. 72 /// Retrieves a const pointer to the current process.
76 const Process* CurrentProcess() const; 73 const Process* CurrentProcess() const;
77 74
75 /// Retrieves the list of processes.
76 const std::vector<SharedPtr<Process>>& GetProcessList() const;
77
78 /// Adds a port to the named port table 78 /// Adds a port to the named port table
79 void AddNamedPort(std::string name, SharedPtr<ClientPort> port); 79 void AddNamedPort(std::string name, SharedPtr<ClientPort> port);
80 80
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 0743670ad..98e87313b 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -2,7 +2,6 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <map>
6#include <utility> 5#include <utility>
7#include <vector> 6#include <vector>
8 7
@@ -10,8 +9,11 @@
10#include "core/core.h" 9#include "core/core.h"
11#include "core/hle/kernel/errors.h" 10#include "core/hle/kernel/errors.h"
12#include "core/hle/kernel/handle_table.h" 11#include "core/hle/kernel/handle_table.h"
12#include "core/hle/kernel/kernel.h"
13#include "core/hle/kernel/mutex.h" 13#include "core/hle/kernel/mutex.h"
14#include "core/hle/kernel/object.h" 14#include "core/hle/kernel/object.h"
15#include "core/hle/kernel/process.h"
16#include "core/hle/kernel/scheduler.h"
15#include "core/hle/kernel/thread.h" 17#include "core/hle/kernel/thread.h"
16#include "core/hle/result.h" 18#include "core/hle/result.h"
17#include "core/memory.h" 19#include "core/memory.h"
@@ -57,41 +59,47 @@ static void TransferMutexOwnership(VAddr mutex_addr, SharedPtr<Thread> current_t
57 } 59 }
58} 60}
59 61
60ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle holding_thread_handle, 62Mutex::Mutex(Core::System& system) : system{system} {}
63Mutex::~Mutex() = default;
64
65ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
61 Handle requesting_thread_handle) { 66 Handle requesting_thread_handle) {
62 // The mutex address must be 4-byte aligned 67 // The mutex address must be 4-byte aligned
63 if ((address % sizeof(u32)) != 0) { 68 if ((address % sizeof(u32)) != 0) {
64 return ERR_INVALID_ADDRESS; 69 return ERR_INVALID_ADDRESS;
65 } 70 }
66 71
72 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
73 Thread* const current_thread = system.CurrentScheduler().GetCurrentThread();
67 SharedPtr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle); 74 SharedPtr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
68 SharedPtr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle); 75 SharedPtr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle);
69 76
70 // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another 77 // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another
71 // thread. 78 // thread.
72 ASSERT(requesting_thread == GetCurrentThread()); 79 ASSERT(requesting_thread == current_thread);
73 80
74 u32 addr_value = Memory::Read32(address); 81 const u32 addr_value = Memory::Read32(address);
75 82
76 // If the mutex isn't being held, just return success. 83 // If the mutex isn't being held, just return success.
77 if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) { 84 if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
78 return RESULT_SUCCESS; 85 return RESULT_SUCCESS;
79 } 86 }
80 87
81 if (holding_thread == nullptr) 88 if (holding_thread == nullptr) {
82 return ERR_INVALID_HANDLE; 89 return ERR_INVALID_HANDLE;
90 }
83 91
84 // Wait until the mutex is released 92 // Wait until the mutex is released
85 GetCurrentThread()->SetMutexWaitAddress(address); 93 current_thread->SetMutexWaitAddress(address);
86 GetCurrentThread()->SetWaitHandle(requesting_thread_handle); 94 current_thread->SetWaitHandle(requesting_thread_handle);
87 95
88 GetCurrentThread()->SetStatus(ThreadStatus::WaitMutex); 96 current_thread->SetStatus(ThreadStatus::WaitMutex);
89 GetCurrentThread()->InvalidateWakeupCallback(); 97 current_thread->InvalidateWakeupCallback();
90 98
91 // Update the lock holder thread's priority to prevent priority inversion. 99 // Update the lock holder thread's priority to prevent priority inversion.
92 holding_thread->AddMutexWaiter(GetCurrentThread()); 100 holding_thread->AddMutexWaiter(current_thread);
93 101
94 Core::System::GetInstance().PrepareReschedule(); 102 system.PrepareReschedule();
95 103
96 return RESULT_SUCCESS; 104 return RESULT_SUCCESS;
97} 105}
@@ -102,7 +110,8 @@ ResultCode Mutex::Release(VAddr address) {
102 return ERR_INVALID_ADDRESS; 110 return ERR_INVALID_ADDRESS;
103 } 111 }
104 112
105 auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(GetCurrentThread(), address); 113 auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
114 auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(current_thread, address);
106 115
107 // There are no more threads waiting for the mutex, release it completely. 116 // There are no more threads waiting for the mutex, release it completely.
108 if (thread == nullptr) { 117 if (thread == nullptr) {
@@ -111,7 +120,7 @@ ResultCode Mutex::Release(VAddr address) {
111 } 120 }
112 121
113 // Transfer the ownership of the mutex from the previous owner to the new one. 122 // Transfer the ownership of the mutex from the previous owner to the new one.
114 TransferMutexOwnership(address, GetCurrentThread(), thread); 123 TransferMutexOwnership(address, current_thread, thread);
115 124
116 u32 mutex_value = thread->GetWaitHandle(); 125 u32 mutex_value = thread->GetWaitHandle();
117 126
diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h
index 81e62d497..b904de2e8 100644
--- a/src/core/hle/kernel/mutex.h
+++ b/src/core/hle/kernel/mutex.h
@@ -5,32 +5,34 @@
5#pragma once 5#pragma once
6 6
7#include "common/common_types.h" 7#include "common/common_types.h"
8#include "core/hle/kernel/object.h"
9 8
10union ResultCode; 9union ResultCode;
11 10
12namespace Kernel { 11namespace Core {
12class System;
13}
13 14
14class HandleTable; 15namespace Kernel {
15class Thread;
16 16
17class Mutex final { 17class Mutex final {
18public: 18public:
19 explicit Mutex(Core::System& system);
20 ~Mutex();
21
19 /// Flag that indicates that a mutex still has threads waiting for it. 22 /// Flag that indicates that a mutex still has threads waiting for it.
20 static constexpr u32 MutexHasWaitersFlag = 0x40000000; 23 static constexpr u32 MutexHasWaitersFlag = 0x40000000;
21 /// Mask of the bits in a mutex address value that contain the mutex owner. 24 /// Mask of the bits in a mutex address value that contain the mutex owner.
22 static constexpr u32 MutexOwnerMask = 0xBFFFFFFF; 25 static constexpr u32 MutexOwnerMask = 0xBFFFFFFF;
23 26
24 /// Attempts to acquire a mutex at the specified address. 27 /// Attempts to acquire a mutex at the specified address.
25 static ResultCode TryAcquire(HandleTable& handle_table, VAddr address, 28 ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
26 Handle holding_thread_handle, Handle requesting_thread_handle); 29 Handle requesting_thread_handle);
27 30
28 /// Releases the mutex at the specified address. 31 /// Releases the mutex at the specified address.
29 static ResultCode Release(VAddr address); 32 ResultCode Release(VAddr address);
30 33
31private: 34private:
32 Mutex() = default; 35 Core::System& system;
33 ~Mutex() = default;
34}; 36};
35 37
36} // namespace Kernel 38} // namespace Kernel
diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp
index 8870463d0..10431e94c 100644
--- a/src/core/hle/kernel/object.cpp
+++ b/src/core/hle/kernel/object.cpp
@@ -23,7 +23,7 @@ bool Object::IsWaitable() const {
23 case HandleType::Unknown: 23 case HandleType::Unknown:
24 case HandleType::WritableEvent: 24 case HandleType::WritableEvent:
25 case HandleType::SharedMemory: 25 case HandleType::SharedMemory:
26 case HandleType::AddressArbiter: 26 case HandleType::TransferMemory:
27 case HandleType::ResourceLimit: 27 case HandleType::ResourceLimit:
28 case HandleType::ClientPort: 28 case HandleType::ClientPort:
29 case HandleType::ClientSession: 29 case HandleType::ClientSession:
diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h
index 4c2505908..332876c27 100644
--- a/src/core/hle/kernel/object.h
+++ b/src/core/hle/kernel/object.h
@@ -22,9 +22,9 @@ enum class HandleType : u32 {
22 WritableEvent, 22 WritableEvent,
23 ReadableEvent, 23 ReadableEvent,
24 SharedMemory, 24 SharedMemory,
25 TransferMemory,
25 Thread, 26 Thread,
26 Process, 27 Process,
27 AddressArbiter,
28 ResourceLimit, 28 ResourceLimit,
29 ClientPort, 29 ClientPort,
30 ServerPort, 30 ServerPort,
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 49fced7b1..4e94048da 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -5,10 +5,12 @@
5#include <algorithm> 5#include <algorithm>
6#include <memory> 6#include <memory>
7#include <random> 7#include <random>
8#include "common/alignment.h"
8#include "common/assert.h" 9#include "common/assert.h"
9#include "common/logging/log.h" 10#include "common/logging/log.h"
10#include "core/core.h" 11#include "core/core.h"
11#include "core/file_sys/program_metadata.h" 12#include "core/file_sys/program_metadata.h"
13#include "core/hle/kernel/code_set.h"
12#include "core/hle/kernel/errors.h" 14#include "core/hle/kernel/errors.h"
13#include "core/hle/kernel/kernel.h" 15#include "core/hle/kernel/kernel.h"
14#include "core/hle/kernel/process.h" 16#include "core/hle/kernel/process.h"
@@ -30,9 +32,6 @@ namespace {
30 * @param priority The priority to give the main thread 32 * @param priority The priority to give the main thread
31 */ 33 */
32void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_point, u32 priority) { 34void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_point, u32 priority) {
33 // Setup page table so we can write to memory
34 SetCurrentPageTable(&owner_process.VMManager().page_table);
35
36 // Initialize new "main" thread 35 // Initialize new "main" thread
37 const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress(); 36 const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress();
38 auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0, 37 auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0,
@@ -50,9 +49,6 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_poi
50} 49}
51} // Anonymous namespace 50} // Anonymous namespace
52 51
53CodeSet::CodeSet() = default;
54CodeSet::~CodeSet() = default;
55
56SharedPtr<Process> Process::Create(Core::System& system, std::string&& name) { 52SharedPtr<Process> Process::Create(Core::System& system, std::string&& name) {
57 auto& kernel = system.Kernel(); 53 auto& kernel = system.Kernel();
58 54
@@ -77,6 +73,18 @@ SharedPtr<ResourceLimit> Process::GetResourceLimit() const {
77 return resource_limit; 73 return resource_limit;
78} 74}
79 75
76u64 Process::GetTotalPhysicalMemoryUsed() const {
77 return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size;
78}
79
80void Process::RegisterThread(const Thread* thread) {
81 thread_list.push_back(thread);
82}
83
84void Process::UnregisterThread(const Thread* thread) {
85 thread_list.remove(thread);
86}
87
80ResultCode Process::ClearSignalState() { 88ResultCode Process::ClearSignalState() {
81 if (status == ProcessStatus::Exited) { 89 if (status == ProcessStatus::Exited) {
82 LOG_ERROR(Kernel, "called on a terminated process instance."); 90 LOG_ERROR(Kernel, "called on a terminated process instance.");
@@ -98,6 +106,8 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) {
98 is_64bit_process = metadata.Is64BitProgram(); 106 is_64bit_process = metadata.Is64BitProgram();
99 107
100 vm_manager.Reset(metadata.GetAddressSpaceType()); 108 vm_manager.Reset(metadata.GetAddressSpaceType());
109 // Ensure that the potentially resized page table is seen by CPU backends.
110 Memory::SetCurrentPageTable(&vm_manager.page_table);
101 111
102 const auto& caps = metadata.GetKernelCapabilities(); 112 const auto& caps = metadata.GetKernelCapabilities();
103 const auto capability_init_result = 113 const auto capability_init_result =
@@ -109,14 +119,17 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) {
109 return handle_table.SetSize(capabilities.GetHandleTableSize()); 119 return handle_table.SetSize(capabilities.GetHandleTableSize());
110} 120}
111 121
112void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) { 122void Process::Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size) {
123 // The kernel always ensures that the given stack size is page aligned.
124 main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE);
125
113 // Allocate and map the main thread stack 126 // Allocate and map the main thread stack
114 // TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part 127 // TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part
115 // of the user address space. 128 // of the user address space.
129 const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size;
116 vm_manager 130 vm_manager
117 .MapMemoryBlock(vm_manager.GetTLSIORegionEndAddress() - stack_size, 131 .MapMemoryBlock(mapping_address, std::make_shared<std::vector<u8>>(main_thread_stack_size),
118 std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size, 132 0, main_thread_stack_size, MemoryState::Stack)
119 MemoryState::Stack)
120 .Unwrap(); 133 .Unwrap();
121 134
122 vm_manager.LogLayout(); 135 vm_manager.LogLayout();
@@ -212,33 +225,38 @@ void Process::FreeTLSSlot(VAddr tls_address) {
212} 225}
213 226
214void Process::LoadModule(CodeSet module_, VAddr base_addr) { 227void Process::LoadModule(CodeSet module_, VAddr base_addr) {
215 const auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, 228 const auto memory = std::make_shared<std::vector<u8>>(std::move(module_.memory));
229
230 const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions,
216 MemoryState memory_state) { 231 MemoryState memory_state) {
217 const auto vma = vm_manager 232 const auto vma = vm_manager
218 .MapMemoryBlock(segment.addr + base_addr, module_.memory, 233 .MapMemoryBlock(segment.addr + base_addr, memory, segment.offset,
219 segment.offset, segment.size, memory_state) 234 segment.size, memory_state)
220 .Unwrap(); 235 .Unwrap();
221 vm_manager.Reprotect(vma, permissions); 236 vm_manager.Reprotect(vma, permissions);
222 }; 237 };
223 238
224 // Map CodeSet segments 239 // Map CodeSet segments
225 MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::CodeStatic); 240 MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code);
226 MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeMutable); 241 MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData);
227 MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeMutable); 242 MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData);
243
244 code_memory_size += module_.memory.size();
228 245
229 // Clear instruction cache in CPU JIT 246 // Clear instruction cache in CPU JIT
230 system.InvalidateCpuInstructionCaches(); 247 system.InvalidateCpuInstructionCaches();
231} 248}
232 249
233Process::Process(Core::System& system) 250Process::Process(Core::System& system)
234 : WaitObject{system.Kernel()}, address_arbiter{system}, system{system} {} 251 : WaitObject{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {}
252
235Process::~Process() = default; 253Process::~Process() = default;
236 254
237void Process::Acquire(Thread* thread) { 255void Process::Acquire(Thread* thread) {
238 ASSERT_MSG(!ShouldWait(thread), "Object unavailable!"); 256 ASSERT_MSG(!ShouldWait(thread), "Object unavailable!");
239} 257}
240 258
241bool Process::ShouldWait(Thread* thread) const { 259bool Process::ShouldWait(const Thread* thread) const {
242 return !is_signaled; 260 return !is_signaled;
243} 261}
244 262
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 47ffd4ad3..f060f2a3b 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -7,13 +7,14 @@
7#include <array> 7#include <array>
8#include <bitset> 8#include <bitset>
9#include <cstddef> 9#include <cstddef>
10#include <memory> 10#include <list>
11#include <string> 11#include <string>
12#include <vector> 12#include <vector>
13#include <boost/container/static_vector.hpp> 13#include <boost/container/static_vector.hpp>
14#include "common/common_types.h" 14#include "common/common_types.h"
15#include "core/hle/kernel/address_arbiter.h" 15#include "core/hle/kernel/address_arbiter.h"
16#include "core/hle/kernel/handle_table.h" 16#include "core/hle/kernel/handle_table.h"
17#include "core/hle/kernel/mutex.h"
17#include "core/hle/kernel/process_capability.h" 18#include "core/hle/kernel/process_capability.h"
18#include "core/hle/kernel/vm_manager.h" 19#include "core/hle/kernel/vm_manager.h"
19#include "core/hle/kernel/wait_object.h" 20#include "core/hle/kernel/wait_object.h"
@@ -33,13 +34,7 @@ class KernelCore;
33class ResourceLimit; 34class ResourceLimit;
34class Thread; 35class Thread;
35 36
36struct AddressMapping { 37struct CodeSet;
37 // Address and size must be page-aligned
38 VAddr address;
39 u64 size;
40 bool read_only;
41 bool unk_flag;
42};
43 38
44enum class MemoryRegion : u16 { 39enum class MemoryRegion : u16 {
45 APPLICATION = 1, 40 APPLICATION = 1,
@@ -65,46 +60,6 @@ enum class ProcessStatus {
65 DebugBreak, 60 DebugBreak,
66}; 61};
67 62
68struct CodeSet final {
69 struct Segment {
70 std::size_t offset = 0;
71 VAddr addr = 0;
72 u32 size = 0;
73 };
74
75 explicit CodeSet();
76 ~CodeSet();
77
78 Segment& CodeSegment() {
79 return segments[0];
80 }
81
82 const Segment& CodeSegment() const {
83 return segments[0];
84 }
85
86 Segment& RODataSegment() {
87 return segments[1];
88 }
89
90 const Segment& RODataSegment() const {
91 return segments[1];
92 }
93
94 Segment& DataSegment() {
95 return segments[2];
96 }
97
98 const Segment& DataSegment() const {
99 return segments[2];
100 }
101
102 std::shared_ptr<std::vector<u8>> memory;
103
104 std::array<Segment, 3> segments;
105 VAddr entrypoint = 0;
106};
107
108class Process final : public WaitObject { 63class Process final : public WaitObject {
109public: 64public:
110 enum : u64 { 65 enum : u64 {
@@ -165,6 +120,16 @@ public:
165 return address_arbiter; 120 return address_arbiter;
166 } 121 }
167 122
123 /// Gets a reference to the process' mutex lock.
124 Mutex& GetMutex() {
125 return mutex;
126 }
127
128 /// Gets a const reference to the process' mutex lock
129 const Mutex& GetMutex() const {
130 return mutex;
131 }
132
168 /// Gets the current status of the process 133 /// Gets the current status of the process
169 ProcessStatus GetStatus() const { 134 ProcessStatus GetStatus() const {
170 return status; 135 return status;
@@ -222,6 +187,22 @@ public:
222 return random_entropy.at(index); 187 return random_entropy.at(index);
223 } 188 }
224 189
190 /// Retrieves the total physical memory used by this process in bytes.
191 u64 GetTotalPhysicalMemoryUsed() const;
192
193 /// Gets the list of all threads created with this process as their owner.
194 const std::list<const Thread*>& GetThreadList() const {
195 return thread_list;
196 }
197
198 /// Registers a thread as being created under this process,
199 /// adding it to this process' thread list.
200 void RegisterThread(const Thread* thread);
201
202 /// Unregisters a thread from this process, removing it
203 /// from this process' thread list.
204 void UnregisterThread(const Thread* thread);
205
225 /// Clears the signaled state of the process if and only if it's signaled. 206 /// Clears the signaled state of the process if and only if it's signaled.
226 /// 207 ///
227 /// @pre The process must not be already terminated. If this is called on a 208 /// @pre The process must not be already terminated. If this is called on a
@@ -246,7 +227,7 @@ public:
246 /** 227 /**
247 * Applies address space changes and launches the process main thread. 228 * Applies address space changes and launches the process main thread.
248 */ 229 */
249 void Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size); 230 void Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size);
250 231
251 /** 232 /**
252 * Prepares a process for termination by stopping all of its threads 233 * Prepares a process for termination by stopping all of its threads
@@ -270,7 +251,7 @@ private:
270 ~Process() override; 251 ~Process() override;
271 252
272 /// Checks if the specified thread should wait until this process is available. 253 /// Checks if the specified thread should wait until this process is available.
273 bool ShouldWait(Thread* thread) const override; 254 bool ShouldWait(const Thread* thread) const override;
274 255
275 /// Acquires/locks this process for the specified thread if it's available. 256 /// Acquires/locks this process for the specified thread if it's available.
276 void Acquire(Thread* thread) override; 257 void Acquire(Thread* thread) override;
@@ -283,6 +264,12 @@ private:
283 /// Memory manager for this process. 264 /// Memory manager for this process.
284 Kernel::VMManager vm_manager; 265 Kernel::VMManager vm_manager;
285 266
267 /// Size of the main thread's stack in bytes.
268 u64 main_thread_stack_size = 0;
269
270 /// Size of the loaded code memory in bytes.
271 u64 code_memory_size = 0;
272
286 /// Current status of the process 273 /// Current status of the process
287 ProcessStatus status; 274 ProcessStatus status;
288 275
@@ -327,9 +314,17 @@ private:
327 /// Per-process address arbiter. 314 /// Per-process address arbiter.
328 AddressArbiter address_arbiter; 315 AddressArbiter address_arbiter;
329 316
317 /// The per-process mutex lock instance used for handling various
318 /// forms of services, such as lock arbitration, and condition
319 /// variable related facilities.
320 Mutex mutex;
321
330 /// Random values for svcGetInfo RandomEntropy 322 /// Random values for svcGetInfo RandomEntropy
331 std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy; 323 std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy;
332 324
325 /// List of threads that are running with this process as their owner.
326 std::list<const Thread*> thread_list;
327
333 /// System context 328 /// System context
334 Core::System& system; 329 Core::System& system;
335 330
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 0e5083f70..c2b798a4e 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -14,7 +14,7 @@ namespace Kernel {
14ReadableEvent::ReadableEvent(KernelCore& kernel) : WaitObject{kernel} {} 14ReadableEvent::ReadableEvent(KernelCore& kernel) : WaitObject{kernel} {}
15ReadableEvent::~ReadableEvent() = default; 15ReadableEvent::~ReadableEvent() = default;
16 16
17bool ReadableEvent::ShouldWait(Thread* thread) const { 17bool ReadableEvent::ShouldWait(const Thread* thread) const {
18 return !signaled; 18 return !signaled;
19} 19}
20 20
diff --git a/src/core/hle/kernel/readable_event.h b/src/core/hle/kernel/readable_event.h
index 77a9c362c..2eb9dcbb7 100644
--- a/src/core/hle/kernel/readable_event.h
+++ b/src/core/hle/kernel/readable_event.h
@@ -36,7 +36,7 @@ public:
36 return HANDLE_TYPE; 36 return HANDLE_TYPE;
37 } 37 }
38 38
39 bool ShouldWait(Thread* thread) const override; 39 bool ShouldWait(const Thread* thread) const override;
40 void Acquire(Thread* thread) override; 40 void Acquire(Thread* thread) override;
41 41
42 /// Unconditionally clears the readable event's state. 42 /// Unconditionally clears the readable event's state.
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index 2f9695005..173f69915 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -16,11 +16,8 @@ constexpr std::size_t ResourceTypeToIndex(ResourceType type) {
16ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {} 16ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {}
17ResourceLimit::~ResourceLimit() = default; 17ResourceLimit::~ResourceLimit() = default;
18 18
19SharedPtr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel, std::string name) { 19SharedPtr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
20 SharedPtr<ResourceLimit> resource_limit(new ResourceLimit(kernel)); 20 return new ResourceLimit(kernel);
21
22 resource_limit->name = std::move(name);
23 return resource_limit;
24} 21}
25 22
26s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const { 23s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const {
diff --git a/src/core/hle/kernel/resource_limit.h b/src/core/hle/kernel/resource_limit.h
index 59dc11c22..70e09858a 100644
--- a/src/core/hle/kernel/resource_limit.h
+++ b/src/core/hle/kernel/resource_limit.h
@@ -31,16 +31,14 @@ constexpr bool IsValidResourceType(ResourceType type) {
31 31
32class ResourceLimit final : public Object { 32class ResourceLimit final : public Object {
33public: 33public:
34 /** 34 /// Creates a resource limit object.
35 * Creates a resource limit object. 35 static SharedPtr<ResourceLimit> Create(KernelCore& kernel);
36 */
37 static SharedPtr<ResourceLimit> Create(KernelCore& kernel, std::string name = "Unknown");
38 36
39 std::string GetTypeName() const override { 37 std::string GetTypeName() const override {
40 return "ResourceLimit"; 38 return "ResourceLimit";
41 } 39 }
42 std::string GetName() const override { 40 std::string GetName() const override {
43 return name; 41 return GetTypeName();
44 } 42 }
45 43
46 static const HandleType HANDLE_TYPE = HandleType::ResourceLimit; 44 static const HandleType HANDLE_TYPE = HandleType::ResourceLimit;
@@ -95,9 +93,6 @@ private:
95 ResourceArray limits{}; 93 ResourceArray limits{};
96 /// Current resource limit values. 94 /// Current resource limit values.
97 ResourceArray values{}; 95 ResourceArray values{};
98
99 /// Name of resource limit object.
100 std::string name;
101}; 96};
102 97
103} // namespace Kernel 98} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 5fccfd9f4..e8447b69a 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -29,8 +29,8 @@ Scheduler::~Scheduler() {
29} 29}
30 30
31bool Scheduler::HaveReadyThreads() const { 31bool Scheduler::HaveReadyThreads() const {
32 std::lock_guard<std::mutex> lock(scheduler_mutex); 32 std::lock_guard lock{scheduler_mutex};
33 return ready_queue.get_first() != nullptr; 33 return !ready_queue.empty();
34} 34}
35 35
36Thread* Scheduler::GetCurrentThread() const { 36Thread* Scheduler::GetCurrentThread() const {
@@ -46,22 +46,27 @@ Thread* Scheduler::PopNextReadyThread() {
46 Thread* thread = GetCurrentThread(); 46 Thread* thread = GetCurrentThread();
47 47
48 if (thread && thread->GetStatus() == ThreadStatus::Running) { 48 if (thread && thread->GetStatus() == ThreadStatus::Running) {
49 if (ready_queue.empty()) {
50 return thread;
51 }
49 // We have to do better than the current thread. 52 // We have to do better than the current thread.
50 // This call returns null when that's not possible. 53 // This call returns null when that's not possible.
51 next = ready_queue.pop_first_better(thread->GetPriority()); 54 next = ready_queue.front();
52 if (!next) { 55 if (next == nullptr || next->GetPriority() >= thread->GetPriority()) {
53 // Otherwise just keep going with the current thread
54 next = thread; 56 next = thread;
55 } 57 }
56 } else { 58 } else {
57 next = ready_queue.pop_first(); 59 if (ready_queue.empty()) {
60 return nullptr;
61 }
62 next = ready_queue.front();
58 } 63 }
59 64
60 return next; 65 return next;
61} 66}
62 67
63void Scheduler::SwitchContext(Thread* new_thread) { 68void Scheduler::SwitchContext(Thread* new_thread) {
64 Thread* const previous_thread = GetCurrentThread(); 69 Thread* previous_thread = GetCurrentThread();
65 Process* const previous_process = system.Kernel().CurrentProcess(); 70 Process* const previous_process = system.Kernel().CurrentProcess();
66 71
67 UpdateLastContextSwitchTime(previous_thread, previous_process); 72 UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -75,7 +80,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
75 if (previous_thread->GetStatus() == ThreadStatus::Running) { 80 if (previous_thread->GetStatus() == ThreadStatus::Running) {
76 // This is only the case when a reschedule is triggered without the current thread 81 // This is only the case when a reschedule is triggered without the current thread
77 // yielding execution (i.e. an event triggered, system core time-sliced, etc) 82 // yielding execution (i.e. an event triggered, system core time-sliced, etc)
78 ready_queue.push_front(previous_thread->GetPriority(), previous_thread); 83 ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
79 previous_thread->SetStatus(ThreadStatus::Ready); 84 previous_thread->SetStatus(ThreadStatus::Ready);
80 } 85 }
81 } 86 }
@@ -90,13 +95,12 @@ void Scheduler::SwitchContext(Thread* new_thread) {
90 95
91 current_thread = new_thread; 96 current_thread = new_thread;
92 97
93 ready_queue.remove(new_thread->GetPriority(), new_thread); 98 ready_queue.remove(new_thread, new_thread->GetPriority());
94 new_thread->SetStatus(ThreadStatus::Running); 99 new_thread->SetStatus(ThreadStatus::Running);
95 100
96 auto* const thread_owner_process = current_thread->GetOwnerProcess(); 101 auto* const thread_owner_process = current_thread->GetOwnerProcess();
97 if (previous_process != thread_owner_process) { 102 if (previous_process != thread_owner_process) {
98 system.Kernel().MakeCurrentProcess(thread_owner_process); 103 system.Kernel().MakeCurrentProcess(thread_owner_process);
99 SetCurrentPageTable(&thread_owner_process->VMManager().page_table);
100 } 104 }
101 105
102 cpu_core.LoadContext(new_thread->GetContext()); 106 cpu_core.LoadContext(new_thread->GetContext());
@@ -127,7 +131,7 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
127} 131}
128 132
129void Scheduler::Reschedule() { 133void Scheduler::Reschedule() {
130 std::lock_guard<std::mutex> lock(scheduler_mutex); 134 std::lock_guard lock{scheduler_mutex};
131 135
132 Thread* cur = GetCurrentThread(); 136 Thread* cur = GetCurrentThread();
133 Thread* next = PopNextReadyThread(); 137 Thread* next = PopNextReadyThread();
@@ -143,51 +147,54 @@ void Scheduler::Reschedule() {
143 SwitchContext(next); 147 SwitchContext(next);
144} 148}
145 149
146void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) { 150void Scheduler::AddThread(SharedPtr<Thread> thread) {
147 std::lock_guard<std::mutex> lock(scheduler_mutex); 151 std::lock_guard lock{scheduler_mutex};
148 152
149 thread_list.push_back(std::move(thread)); 153 thread_list.push_back(std::move(thread));
150 ready_queue.prepare(priority);
151} 154}
152 155
153void Scheduler::RemoveThread(Thread* thread) { 156void Scheduler::RemoveThread(Thread* thread) {
154 std::lock_guard<std::mutex> lock(scheduler_mutex); 157 std::lock_guard lock{scheduler_mutex};
155 158
156 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), 159 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
157 thread_list.end()); 160 thread_list.end());
158} 161}
159 162
160void Scheduler::ScheduleThread(Thread* thread, u32 priority) { 163void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
161 std::lock_guard<std::mutex> lock(scheduler_mutex); 164 std::lock_guard lock{scheduler_mutex};
162 165
163 ASSERT(thread->GetStatus() == ThreadStatus::Ready); 166 ASSERT(thread->GetStatus() == ThreadStatus::Ready);
164 ready_queue.push_back(priority, thread); 167 ready_queue.add(thread, priority);
165} 168}
166 169
167void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { 170void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
168 std::lock_guard<std::mutex> lock(scheduler_mutex); 171 std::lock_guard lock{scheduler_mutex};
169 172
170 ASSERT(thread->GetStatus() == ThreadStatus::Ready); 173 ASSERT(thread->GetStatus() == ThreadStatus::Ready);
171 ready_queue.remove(priority, thread); 174 ready_queue.remove(thread, priority);
172} 175}
173 176
174void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { 177void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
175 std::lock_guard<std::mutex> lock(scheduler_mutex); 178 std::lock_guard lock{scheduler_mutex};
179 if (thread->GetPriority() == priority) {
180 return;
181 }
176 182
177 // If thread was ready, adjust queues 183 // If thread was ready, adjust queues
178 if (thread->GetStatus() == ThreadStatus::Ready) 184 if (thread->GetStatus() == ThreadStatus::Ready)
179 ready_queue.move(thread, thread->GetPriority(), priority); 185 ready_queue.adjust(thread, thread->GetPriority(), priority);
180 else
181 ready_queue.prepare(priority);
182} 186}
183 187
184Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { 188Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
185 std::lock_guard<std::mutex> lock(scheduler_mutex); 189 std::lock_guard lock{scheduler_mutex};
186 190
187 const u32 mask = 1U << core; 191 const u32 mask = 1U << core;
188 return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) { 192 for (auto* thread : ready_queue) {
189 return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority; 193 if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) {
190 }); 194 return thread;
195 }
196 }
197 return nullptr;
191} 198}
192 199
193void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { 200void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
@@ -199,8 +206,7 @@ void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
199 ASSERT(thread->GetPriority() < THREADPRIO_COUNT); 206 ASSERT(thread->GetPriority() < THREADPRIO_COUNT);
200 207
201 // Yield this thread -- sleep for zero time and force reschedule to different thread 208 // Yield this thread -- sleep for zero time and force reschedule to different thread
202 WaitCurrentThread_Sleep(); 209 GetCurrentThread()->Sleep(0);
203 GetCurrentThread()->WakeAfterDelay(0);
204} 210}
205 211
206void Scheduler::YieldWithLoadBalancing(Thread* thread) { 212void Scheduler::YieldWithLoadBalancing(Thread* thread) {
@@ -215,8 +221,7 @@ void Scheduler::YieldWithLoadBalancing(Thread* thread) {
215 ASSERT(priority < THREADPRIO_COUNT); 221 ASSERT(priority < THREADPRIO_COUNT);
216 222
217 // Sleep for zero time to be able to force reschedule to different thread 223 // Sleep for zero time to be able to force reschedule to different thread
218 WaitCurrentThread_Sleep(); 224 GetCurrentThread()->Sleep(0);
219 GetCurrentThread()->WakeAfterDelay(0);
220 225
221 Thread* suggested_thread = nullptr; 226 Thread* suggested_thread = nullptr;
222 227
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 1c5bf57d9..b29bf7be8 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -7,7 +7,7 @@
7#include <mutex> 7#include <mutex>
8#include <vector> 8#include <vector>
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "common/thread_queue_list.h" 10#include "common/multi_level_queue.h"
11#include "core/hle/kernel/object.h" 11#include "core/hle/kernel/object.h"
12#include "core/hle/kernel/thread.h" 12#include "core/hle/kernel/thread.h"
13 13
@@ -38,7 +38,7 @@ public:
38 u64 GetLastContextSwitchTicks() const; 38 u64 GetLastContextSwitchTicks() const;
39 39
40 /// Adds a new thread to the scheduler 40 /// Adds a new thread to the scheduler
41 void AddThread(SharedPtr<Thread> thread, u32 priority); 41 void AddThread(SharedPtr<Thread> thread);
42 42
43 /// Removes a thread from the scheduler 43 /// Removes a thread from the scheduler
44 void RemoveThread(Thread* thread); 44 void RemoveThread(Thread* thread);
@@ -156,7 +156,7 @@ private:
156 std::vector<SharedPtr<Thread>> thread_list; 156 std::vector<SharedPtr<Thread>> thread_list;
157 157
158 /// Lists only ready thread ids. 158 /// Lists only ready thread ids.
159 Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue; 159 Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
160 160
161 SharedPtr<Thread> current_thread = nullptr; 161 SharedPtr<Thread> current_thread = nullptr;
162 162
diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp
index 0e1515c89..02e7c60e6 100644
--- a/src/core/hle/kernel/server_port.cpp
+++ b/src/core/hle/kernel/server_port.cpp
@@ -30,7 +30,7 @@ void ServerPort::AppendPendingSession(SharedPtr<ServerSession> pending_session)
30 pending_sessions.push_back(std::move(pending_session)); 30 pending_sessions.push_back(std::move(pending_session));
31} 31}
32 32
33bool ServerPort::ShouldWait(Thread* thread) const { 33bool ServerPort::ShouldWait(const Thread* thread) const {
34 // If there are no pending sessions, we wait until a new one is added. 34 // If there are no pending sessions, we wait until a new one is added.
35 return pending_sessions.empty(); 35 return pending_sessions.empty();
36} 36}
@@ -39,9 +39,8 @@ void ServerPort::Acquire(Thread* thread) {
39 ASSERT_MSG(!ShouldWait(thread), "object unavailable!"); 39 ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
40} 40}
41 41
42std::tuple<SharedPtr<ServerPort>, SharedPtr<ClientPort>> ServerPort::CreatePortPair( 42ServerPort::PortPair ServerPort::CreatePortPair(KernelCore& kernel, u32 max_sessions,
43 KernelCore& kernel, u32 max_sessions, std::string name) { 43 std::string name) {
44
45 SharedPtr<ServerPort> server_port(new ServerPort(kernel)); 44 SharedPtr<ServerPort> server_port(new ServerPort(kernel));
46 SharedPtr<ClientPort> client_port(new ClientPort(kernel)); 45 SharedPtr<ClientPort> client_port(new ClientPort(kernel));
47 46
@@ -51,7 +50,7 @@ std::tuple<SharedPtr<ServerPort>, SharedPtr<ClientPort>> ServerPort::CreatePortP
51 client_port->max_sessions = max_sessions; 50 client_port->max_sessions = max_sessions;
52 client_port->active_sessions = 0; 51 client_port->active_sessions = 0;
53 52
54 return std::make_tuple(std::move(server_port), std::move(client_port)); 53 return std::make_pair(std::move(server_port), std::move(client_port));
55} 54}
56 55
57} // namespace Kernel 56} // namespace Kernel
diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h
index 9bc667cf2..fef573b71 100644
--- a/src/core/hle/kernel/server_port.h
+++ b/src/core/hle/kernel/server_port.h
@@ -6,7 +6,7 @@
6 6
7#include <memory> 7#include <memory>
8#include <string> 8#include <string>
9#include <tuple> 9#include <utility>
10#include <vector> 10#include <vector>
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "core/hle/kernel/object.h" 12#include "core/hle/kernel/object.h"
@@ -23,6 +23,7 @@ class SessionRequestHandler;
23class ServerPort final : public WaitObject { 23class ServerPort final : public WaitObject {
24public: 24public:
25 using HLEHandler = std::shared_ptr<SessionRequestHandler>; 25 using HLEHandler = std::shared_ptr<SessionRequestHandler>;
26 using PortPair = std::pair<SharedPtr<ServerPort>, SharedPtr<ClientPort>>;
26 27
27 /** 28 /**
28 * Creates a pair of ServerPort and an associated ClientPort. 29 * Creates a pair of ServerPort and an associated ClientPort.
@@ -32,8 +33,8 @@ public:
32 * @param name Optional name of the ports 33 * @param name Optional name of the ports
33 * @return The created port tuple 34 * @return The created port tuple
34 */ 35 */
35 static std::tuple<SharedPtr<ServerPort>, SharedPtr<ClientPort>> CreatePortPair( 36 static PortPair CreatePortPair(KernelCore& kernel, u32 max_sessions,
36 KernelCore& kernel, u32 max_sessions, std::string name = "UnknownPort"); 37 std::string name = "UnknownPort");
37 38
38 std::string GetTypeName() const override { 39 std::string GetTypeName() const override {
39 return "ServerPort"; 40 return "ServerPort";
@@ -75,7 +76,7 @@ public:
75 /// waiting to be accepted by this port. 76 /// waiting to be accepted by this port.
76 void AppendPendingSession(SharedPtr<ServerSession> pending_session); 77 void AppendPendingSession(SharedPtr<ServerSession> pending_session);
77 78
78 bool ShouldWait(Thread* thread) const override; 79 bool ShouldWait(const Thread* thread) const override;
79 void Acquire(Thread* thread) override; 80 void Acquire(Thread* thread) override;
80 81
81private: 82private:
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 4d8a337a7..a6b2cf06a 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -46,7 +46,7 @@ ResultVal<SharedPtr<ServerSession>> ServerSession::Create(KernelCore& kernel, st
46 return MakeResult(std::move(server_session)); 46 return MakeResult(std::move(server_session));
47} 47}
48 48
49bool ServerSession::ShouldWait(Thread* thread) const { 49bool ServerSession::ShouldWait(const Thread* thread) const {
50 // Closed sessions should never wait, an error will be returned from svcReplyAndReceive. 50 // Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
51 if (parent->client == nullptr) 51 if (parent->client == nullptr)
52 return false; 52 return false;
@@ -204,6 +204,6 @@ ServerSession::SessionPair ServerSession::CreateSessionPair(KernelCore& kernel,
204 client_session->parent = parent; 204 client_session->parent = parent;
205 server_session->parent = parent; 205 server_session->parent = parent;
206 206
207 return std::make_tuple(std::move(server_session), std::move(client_session)); 207 return std::make_pair(std::move(server_session), std::move(client_session));
208} 208}
209} // namespace Kernel 209} // namespace Kernel
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h
index aea4ccfeb..09b835ff8 100644
--- a/src/core/hle/kernel/server_session.h
+++ b/src/core/hle/kernel/server_session.h
@@ -6,6 +6,7 @@
6 6
7#include <memory> 7#include <memory>
8#include <string> 8#include <string>
9#include <utility>
9#include <vector> 10#include <vector>
10 11
11#include "core/hle/kernel/object.h" 12#include "core/hle/kernel/object.h"
@@ -41,6 +42,10 @@ public:
41 return "ServerSession"; 42 return "ServerSession";
42 } 43 }
43 44
45 std::string GetName() const override {
46 return name;
47 }
48
44 static const HandleType HANDLE_TYPE = HandleType::ServerSession; 49 static const HandleType HANDLE_TYPE = HandleType::ServerSession;
45 HandleType GetHandleType() const override { 50 HandleType GetHandleType() const override {
46 return HANDLE_TYPE; 51 return HANDLE_TYPE;
@@ -54,7 +59,7 @@ public:
54 return parent.get(); 59 return parent.get();
55 } 60 }
56 61
57 using SessionPair = std::tuple<SharedPtr<ServerSession>, SharedPtr<ClientSession>>; 62 using SessionPair = std::pair<SharedPtr<ServerSession>, SharedPtr<ClientSession>>;
58 63
59 /** 64 /**
60 * Creates a pair of ServerSession and an associated ClientSession. 65 * Creates a pair of ServerSession and an associated ClientSession.
@@ -82,7 +87,7 @@ public:
82 */ 87 */
83 ResultCode HandleSyncRequest(SharedPtr<Thread> thread); 88 ResultCode HandleSyncRequest(SharedPtr<Thread> thread);
84 89
85 bool ShouldWait(Thread* thread) const override; 90 bool ShouldWait(const Thread* thread) const override;
86 91
87 void Acquire(Thread* thread) override; 92 void Acquire(Thread* thread) override;
88 93
diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp
index 62861da36..f15c5ee36 100644
--- a/src/core/hle/kernel/shared_memory.cpp
+++ b/src/core/hle/kernel/shared_memory.cpp
@@ -9,7 +9,6 @@
9#include "core/hle/kernel/errors.h" 9#include "core/hle/kernel/errors.h"
10#include "core/hle/kernel/kernel.h" 10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/shared_memory.h" 11#include "core/hle/kernel/shared_memory.h"
12#include "core/memory.h"
13 12
14namespace Kernel { 13namespace Kernel {
15 14
@@ -119,7 +118,15 @@ ResultCode SharedMemory::Map(Process& target_process, VAddr address, MemoryPermi
119 ConvertPermissions(permissions)); 118 ConvertPermissions(permissions));
120} 119}
121 120
122ResultCode SharedMemory::Unmap(Process& target_process, VAddr address) { 121ResultCode SharedMemory::Unmap(Process& target_process, VAddr address, u64 unmap_size) {
122 if (unmap_size != size) {
123 LOG_ERROR(Kernel,
124 "Invalid size passed to Unmap. Size must be equal to the size of the "
125 "memory managed. Shared memory size=0x{:016X}, Unmap size=0x{:016X}",
126 size, unmap_size);
127 return ERR_INVALID_SIZE;
128 }
129
123 // TODO(Subv): Verify what happens if the application tries to unmap an address that is not 130 // TODO(Subv): Verify what happens if the application tries to unmap an address that is not
124 // mapped to a SharedMemory. 131 // mapped to a SharedMemory.
125 return target_process.VMManager().UnmapRange(address, size); 132 return target_process.VMManager().UnmapRange(address, size);
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index dab2a6bea..37e18c443 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -104,11 +104,17 @@ public:
104 104
105 /** 105 /**
106 * Unmaps a shared memory block from the specified address in system memory 106 * Unmaps a shared memory block from the specified address in system memory
107 *
107 * @param target_process Process from which to unmap the memory block. 108 * @param target_process Process from which to unmap the memory block.
108 * @param address Address in system memory where the shared memory block is mapped 109 * @param address Address in system memory where the shared memory block is mapped.
110 * @param unmap_size The amount of bytes to unmap from this shared memory instance.
111 *
109 * @return Result code of the unmap operation 112 * @return Result code of the unmap operation
113 *
114 * @pre The given size to unmap must be the same size as the amount of memory managed by
115 * the SharedMemory instance itself, otherwise ERR_INVALID_SIZE will be returned.
110 */ 116 */
111 ResultCode Unmap(Process& target_process, VAddr address); 117 ResultCode Unmap(Process& target_process, VAddr address, u64 unmap_size);
112 118
113 /** 119 /**
114 * Gets a pointer to the shared memory block 120 * Gets a pointer to the shared memory block
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 77d0e3d96..2fd07ab34 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -32,6 +32,7 @@
32#include "core/hle/kernel/svc.h" 32#include "core/hle/kernel/svc.h"
33#include "core/hle/kernel/svc_wrap.h" 33#include "core/hle/kernel/svc_wrap.h"
34#include "core/hle/kernel/thread.h" 34#include "core/hle/kernel/thread.h"
35#include "core/hle/kernel/transfer_memory.h"
35#include "core/hle/kernel/writable_event.h" 36#include "core/hle/kernel/writable_event.h"
36#include "core/hle/lock.h" 37#include "core/hle/lock.h"
37#include "core/hle/result.h" 38#include "core/hle/result.h"
@@ -174,11 +175,8 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
174 return ERR_INVALID_SIZE; 175 return ERR_INVALID_SIZE;
175 } 176 }
176 177
177 auto& vm_manager = Core::CurrentProcess()->VMManager(); 178 auto& vm_manager = Core::System::GetInstance().Kernel().CurrentProcess()->VMManager();
178 const VAddr heap_base = vm_manager.GetHeapRegionBaseAddress(); 179 const auto alloc_result = vm_manager.SetHeapSize(heap_size);
179 const auto alloc_result =
180 vm_manager.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite);
181
182 if (alloc_result.Failed()) { 180 if (alloc_result.Failed()) {
183 return alloc_result.Code(); 181 return alloc_result.Code();
184 } 182 }
@@ -551,9 +549,9 @@ static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr,
551 return ERR_INVALID_ADDRESS; 549 return ERR_INVALID_ADDRESS;
552 } 550 }
553 551
554 auto& handle_table = Core::CurrentProcess()->GetHandleTable(); 552 auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
555 return Mutex::TryAcquire(handle_table, mutex_addr, holding_thread_handle, 553 return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle,
556 requesting_thread_handle); 554 requesting_thread_handle);
557} 555}
558 556
559/// Unlock a mutex 557/// Unlock a mutex
@@ -571,7 +569,8 @@ static ResultCode ArbitrateUnlock(VAddr mutex_addr) {
571 return ERR_INVALID_ADDRESS; 569 return ERR_INVALID_ADDRESS;
572 } 570 }
573 571
574 return Mutex::Release(mutex_addr); 572 auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
573 return current_process->GetMutex().Release(mutex_addr);
575} 574}
576 575
577enum class BreakType : u32 { 576enum class BreakType : u32 {
@@ -710,7 +709,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
710 HeapRegionBaseAddr = 4, 709 HeapRegionBaseAddr = 4,
711 HeapRegionSize = 5, 710 HeapRegionSize = 5,
712 TotalMemoryUsage = 6, 711 TotalMemoryUsage = 6,
713 TotalHeapUsage = 7, 712 TotalPhysicalMemoryUsed = 7,
714 IsCurrentProcessBeingDebugged = 8, 713 IsCurrentProcessBeingDebugged = 8,
715 RegisterResourceLimit = 9, 714 RegisterResourceLimit = 9,
716 IdleTickCount = 10, 715 IdleTickCount = 10,
@@ -746,7 +745,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
746 case GetInfoType::NewMapRegionBaseAddr: 745 case GetInfoType::NewMapRegionBaseAddr:
747 case GetInfoType::NewMapRegionSize: 746 case GetInfoType::NewMapRegionSize:
748 case GetInfoType::TotalMemoryUsage: 747 case GetInfoType::TotalMemoryUsage:
749 case GetInfoType::TotalHeapUsage: 748 case GetInfoType::TotalPhysicalMemoryUsed:
750 case GetInfoType::IsVirtualAddressMemoryEnabled: 749 case GetInfoType::IsVirtualAddressMemoryEnabled:
751 case GetInfoType::PersonalMmHeapUsage: 750 case GetInfoType::PersonalMmHeapUsage:
752 case GetInfoType::TitleId: 751 case GetInfoType::TitleId:
@@ -806,8 +805,8 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
806 *result = process->VMManager().GetTotalMemoryUsage(); 805 *result = process->VMManager().GetTotalMemoryUsage();
807 return RESULT_SUCCESS; 806 return RESULT_SUCCESS;
808 807
809 case GetInfoType::TotalHeapUsage: 808 case GetInfoType::TotalPhysicalMemoryUsed:
810 *result = process->VMManager().GetTotalHeapUsage(); 809 *result = process->GetTotalPhysicalMemoryUsed();
811 return RESULT_SUCCESS; 810 return RESULT_SUCCESS;
812 811
813 case GetInfoType::IsVirtualAddressMemoryEnabled: 812 case GetInfoType::IsVirtualAddressMemoryEnabled:
@@ -1141,7 +1140,7 @@ static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64
1141 return ERR_INVALID_MEMORY_RANGE; 1140 return ERR_INVALID_MEMORY_RANGE;
1142 } 1141 }
1143 1142
1144 return shared_memory->Unmap(*current_process, addr); 1143 return shared_memory->Unmap(*current_process, addr, size);
1145} 1144}
1146 1145
1147static ResultCode QueryProcessMemory(VAddr memory_info_address, VAddr page_info_address, 1146static ResultCode QueryProcessMemory(VAddr memory_info_address, VAddr page_info_address,
@@ -1284,10 +1283,14 @@ static ResultCode StartThread(Handle thread_handle) {
1284 1283
1285/// Called when a thread exits 1284/// Called when a thread exits
1286static void ExitThread() { 1285static void ExitThread() {
1287 LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", Core::CurrentArmInterface().GetPC()); 1286 auto& system = Core::System::GetInstance();
1288 1287
1289 ExitCurrentThread(); 1288 LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
1290 Core::System::GetInstance().PrepareReschedule(); 1289
1290 auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
1291 current_thread->Stop();
1292 system.CurrentScheduler().RemoveThread(current_thread);
1293 system.PrepareReschedule();
1291} 1294}
1292 1295
1293/// Sleep the current thread 1296/// Sleep the current thread
@@ -1300,32 +1303,32 @@ static void SleepThread(s64 nanoseconds) {
1300 YieldAndWaitForLoadBalancing = -2, 1303 YieldAndWaitForLoadBalancing = -2,
1301 }; 1304 };
1302 1305
1306 auto& system = Core::System::GetInstance();
1307 auto& scheduler = system.CurrentScheduler();
1308 auto* const current_thread = scheduler.GetCurrentThread();
1309
1303 if (nanoseconds <= 0) { 1310 if (nanoseconds <= 0) {
1304 auto& scheduler{Core::System::GetInstance().CurrentScheduler()};
1305 switch (static_cast<SleepType>(nanoseconds)) { 1311 switch (static_cast<SleepType>(nanoseconds)) {
1306 case SleepType::YieldWithoutLoadBalancing: 1312 case SleepType::YieldWithoutLoadBalancing:
1307 scheduler.YieldWithoutLoadBalancing(GetCurrentThread()); 1313 scheduler.YieldWithoutLoadBalancing(current_thread);
1308 break; 1314 break;
1309 case SleepType::YieldWithLoadBalancing: 1315 case SleepType::YieldWithLoadBalancing:
1310 scheduler.YieldWithLoadBalancing(GetCurrentThread()); 1316 scheduler.YieldWithLoadBalancing(current_thread);
1311 break; 1317 break;
1312 case SleepType::YieldAndWaitForLoadBalancing: 1318 case SleepType::YieldAndWaitForLoadBalancing:
1313 scheduler.YieldAndWaitForLoadBalancing(GetCurrentThread()); 1319 scheduler.YieldAndWaitForLoadBalancing(current_thread);
1314 break; 1320 break;
1315 default: 1321 default:
1316 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); 1322 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
1317 } 1323 }
1318 } else { 1324 } else {
1319 // Sleep current thread and check for next thread to schedule 1325 current_thread->Sleep(nanoseconds);
1320 WaitCurrentThread_Sleep();
1321
1322 // Create an event to wake the thread up after the specified nanosecond delay has passed
1323 GetCurrentThread()->WakeAfterDelay(nanoseconds);
1324 } 1326 }
1325 1327
1326 // Reschedule all CPU cores 1328 // Reschedule all CPU cores
1327 for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) 1329 for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) {
1328 Core::System::GetInstance().CpuCore(i).PrepareReschedule(); 1330 system.CpuCore(i).PrepareReschedule();
1331 }
1329} 1332}
1330 1333
1331/// Wait process wide key atomic 1334/// Wait process wide key atomic
@@ -1336,17 +1339,35 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var
1336 "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}", 1339 "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}",
1337 mutex_addr, condition_variable_addr, thread_handle, nano_seconds); 1340 mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
1338 1341
1339 const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); 1342 if (Memory::IsKernelVirtualAddress(mutex_addr)) {
1343 LOG_ERROR(
1344 Kernel_SVC,
1345 "Given mutex address must not be within the kernel address space. address=0x{:016X}",
1346 mutex_addr);
1347 return ERR_INVALID_ADDRESS_STATE;
1348 }
1349
1350 if (!Common::IsWordAligned(mutex_addr)) {
1351 LOG_ERROR(Kernel_SVC, "Given mutex address must be word-aligned. address=0x{:016X}",
1352 mutex_addr);
1353 return ERR_INVALID_ADDRESS;
1354 }
1355
1356 auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
1357 const auto& handle_table = current_process->GetHandleTable();
1340 SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle); 1358 SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
1341 ASSERT(thread); 1359 ASSERT(thread);
1342 1360
1343 CASCADE_CODE(Mutex::Release(mutex_addr)); 1361 const auto release_result = current_process->GetMutex().Release(mutex_addr);
1362 if (release_result.IsError()) {
1363 return release_result;
1364 }
1344 1365
1345 SharedPtr<Thread> current_thread = GetCurrentThread(); 1366 SharedPtr<Thread> current_thread = GetCurrentThread();
1346 current_thread->SetCondVarWaitAddress(condition_variable_addr); 1367 current_thread->SetCondVarWaitAddress(condition_variable_addr);
1347 current_thread->SetMutexWaitAddress(mutex_addr); 1368 current_thread->SetMutexWaitAddress(mutex_addr);
1348 current_thread->SetWaitHandle(thread_handle); 1369 current_thread->SetWaitHandle(thread_handle);
1349 current_thread->SetStatus(ThreadStatus::WaitMutex); 1370 current_thread->SetStatus(ThreadStatus::WaitCondVar);
1350 current_thread->InvalidateWakeupCallback(); 1371 current_thread->InvalidateWakeupCallback();
1351 1372
1352 current_thread->WakeAfterDelay(nano_seconds); 1373 current_thread->WakeAfterDelay(nano_seconds);
@@ -1390,10 +1411,10 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1390 // them all. 1411 // them all.
1391 std::size_t last = waiting_threads.size(); 1412 std::size_t last = waiting_threads.size();
1392 if (target != -1) 1413 if (target != -1)
1393 last = target; 1414 last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
1394 1415
1395 // If there are no threads waiting on this condition variable, just exit 1416 // If there are no threads waiting on this condition variable, just exit
1396 if (last > waiting_threads.size()) 1417 if (last == 0)
1397 return RESULT_SUCCESS; 1418 return RESULT_SUCCESS;
1398 1419
1399 for (std::size_t index = 0; index < last; ++index) { 1420 for (std::size_t index = 0; index < last; ++index) {
@@ -1401,6 +1422,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1401 1422
1402 ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr); 1423 ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
1403 1424
1425 // liberate Cond Var Thread.
1426 thread->SetCondVarWaitAddress(0);
1427
1404 std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); 1428 std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
1405 1429
1406 auto& monitor = Core::System::GetInstance().Monitor(); 1430 auto& monitor = Core::System::GetInstance().Monitor();
@@ -1419,10 +1443,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1419 } 1443 }
1420 } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(), 1444 } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
1421 thread->GetWaitHandle())); 1445 thread->GetWaitHandle()));
1422
1423 if (mutex_val == 0) { 1446 if (mutex_val == 0) {
1424 // We were able to acquire the mutex, resume this thread. 1447 // We were able to acquire the mutex, resume this thread.
1425 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); 1448 ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
1426 thread->ResumeFromWait(); 1449 thread->ResumeFromWait();
1427 1450
1428 auto* const lock_owner = thread->GetLockOwner(); 1451 auto* const lock_owner = thread->GetLockOwner();
@@ -1432,8 +1455,8 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1432 1455
1433 thread->SetLockOwner(nullptr); 1456 thread->SetLockOwner(nullptr);
1434 thread->SetMutexWaitAddress(0); 1457 thread->SetMutexWaitAddress(0);
1435 thread->SetCondVarWaitAddress(0);
1436 thread->SetWaitHandle(0); 1458 thread->SetWaitHandle(0);
1459 Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
1437 } else { 1460 } else {
1438 // Atomically signal that the mutex now has a waiting thread. 1461 // Atomically signal that the mutex now has a waiting thread.
1439 do { 1462 do {
@@ -1452,12 +1475,11 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1452 const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); 1475 const auto& handle_table = Core::CurrentProcess()->GetHandleTable();
1453 auto owner = handle_table.Get<Thread>(owner_handle); 1476 auto owner = handle_table.Get<Thread>(owner_handle);
1454 ASSERT(owner); 1477 ASSERT(owner);
1455 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); 1478 ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
1456 thread->InvalidateWakeupCallback(); 1479 thread->InvalidateWakeupCallback();
1480 thread->SetStatus(ThreadStatus::WaitMutex);
1457 1481
1458 owner->AddMutexWaiter(thread); 1482 owner->AddMutexWaiter(thread);
1459
1460 Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
1461 } 1483 }
1462 } 1484 }
1463 1485
@@ -1577,14 +1599,121 @@ static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32
1577 } 1599 }
1578 1600
1579 auto& kernel = Core::System::GetInstance().Kernel(); 1601 auto& kernel = Core::System::GetInstance().Kernel();
1580 auto process = kernel.CurrentProcess(); 1602 auto transfer_mem_handle = TransferMemory::Create(kernel, addr, size, perms);
1581 auto& handle_table = process->GetHandleTable();
1582 const auto shared_mem_handle = SharedMemory::Create(kernel, process, size, perms, perms, addr);
1583 1603
1584 CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle)); 1604 auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
1605 const auto result = handle_table.Create(std::move(transfer_mem_handle));
1606 if (result.Failed()) {
1607 return result.Code();
1608 }
1609
1610 *handle = *result;
1585 return RESULT_SUCCESS; 1611 return RESULT_SUCCESS;
1586} 1612}
1587 1613
1614static ResultCode MapTransferMemory(Handle handle, VAddr address, u64 size, u32 permission_raw) {
1615 LOG_DEBUG(Kernel_SVC,
1616 "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}, permissions=0x{:08X}",
1617 handle, address, size, permission_raw);
1618
1619 if (!Common::Is4KBAligned(address)) {
1620 LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
1621 address);
1622 return ERR_INVALID_ADDRESS;
1623 }
1624
1625 if (size == 0 || !Common::Is4KBAligned(size)) {
1626 LOG_ERROR(Kernel_SVC,
1627 "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
1628 size);
1629 return ERR_INVALID_SIZE;
1630 }
1631
1632 if (!IsValidAddressRange(address, size)) {
1633 LOG_ERROR(Kernel_SVC,
1634 "Given address and size overflows the 64-bit range (address=0x{:016X}, "
1635 "size=0x{:016X}).",
1636 address, size);
1637 return ERR_INVALID_ADDRESS_STATE;
1638 }
1639
1640 const auto permissions = static_cast<MemoryPermission>(permission_raw);
1641 if (permissions != MemoryPermission::None && permissions != MemoryPermission::Read &&
1642 permissions != MemoryPermission::ReadWrite) {
1643 LOG_ERROR(Kernel_SVC, "Invalid transfer memory permissions given (permissions=0x{:08X}).",
1644 permission_raw);
1645 return ERR_INVALID_STATE;
1646 }
1647
1648 const auto& kernel = Core::System::GetInstance().Kernel();
1649 const auto* const current_process = kernel.CurrentProcess();
1650 const auto& handle_table = current_process->GetHandleTable();
1651
1652 auto transfer_memory = handle_table.Get<TransferMemory>(handle);
1653 if (!transfer_memory) {
1654 LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
1655 handle);
1656 return ERR_INVALID_HANDLE;
1657 }
1658
1659 if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
1660 LOG_ERROR(Kernel_SVC,
1661 "Given address and size don't fully fit within the ASLR region "
1662 "(address=0x{:016X}, size=0x{:016X}).",
1663 address, size);
1664 return ERR_INVALID_MEMORY_RANGE;
1665 }
1666
1667 return transfer_memory->MapMemory(address, size, permissions);
1668}
1669
1670static ResultCode UnmapTransferMemory(Handle handle, VAddr address, u64 size) {
1671 LOG_DEBUG(Kernel_SVC, "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}", handle,
1672 address, size);
1673
1674 if (!Common::Is4KBAligned(address)) {
1675 LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
1676 address);
1677 return ERR_INVALID_ADDRESS;
1678 }
1679
1680 if (size == 0 || !Common::Is4KBAligned(size)) {
1681 LOG_ERROR(Kernel_SVC,
1682 "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
1683 size);
1684 return ERR_INVALID_SIZE;
1685 }
1686
1687 if (!IsValidAddressRange(address, size)) {
1688 LOG_ERROR(Kernel_SVC,
1689 "Given address and size overflows the 64-bit range (address=0x{:016X}, "
1690 "size=0x{:016X}).",
1691 address, size);
1692 return ERR_INVALID_ADDRESS_STATE;
1693 }
1694
1695 const auto& kernel = Core::System::GetInstance().Kernel();
1696 const auto* const current_process = kernel.CurrentProcess();
1697 const auto& handle_table = current_process->GetHandleTable();
1698
1699 auto transfer_memory = handle_table.Get<TransferMemory>(handle);
1700 if (!transfer_memory) {
1701 LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
1702 handle);
1703 return ERR_INVALID_HANDLE;
1704 }
1705
1706 if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
1707 LOG_ERROR(Kernel_SVC,
1708 "Given address and size don't fully fit within the ASLR region "
1709 "(address=0x{:016X}, size=0x{:016X}).",
1710 address, size);
1711 return ERR_INVALID_MEMORY_RANGE;
1712 }
1713
1714 return transfer_memory->UnmapMemory(address, size);
1715}
1716
1588static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask) { 1717static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask) {
1589 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); 1718 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
1590 1719
@@ -1868,6 +1997,83 @@ static ResultCode SetResourceLimitLimitValue(Handle resource_limit, u32 resource
1868 return RESULT_SUCCESS; 1997 return RESULT_SUCCESS;
1869} 1998}
1870 1999
2000static ResultCode GetProcessList(u32* out_num_processes, VAddr out_process_ids,
2001 u32 out_process_ids_size) {
2002 LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
2003 out_process_ids, out_process_ids_size);
2004
2005 // If the supplied size is negative or greater than INT32_MAX / sizeof(u64), bail.
2006 if ((out_process_ids_size & 0xF0000000) != 0) {
2007 LOG_ERROR(Kernel_SVC,
2008 "Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}",
2009 out_process_ids_size);
2010 return ERR_OUT_OF_RANGE;
2011 }
2012
2013 const auto& kernel = Core::System::GetInstance().Kernel();
2014 const auto& vm_manager = kernel.CurrentProcess()->VMManager();
2015 const auto total_copy_size = out_process_ids_size * sizeof(u64);
2016
2017 if (out_process_ids_size > 0 &&
2018 !vm_manager.IsWithinAddressSpace(out_process_ids, total_copy_size)) {
2019 LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
2020 out_process_ids, out_process_ids + total_copy_size);
2021 return ERR_INVALID_ADDRESS_STATE;
2022 }
2023
2024 const auto& process_list = kernel.GetProcessList();
2025 const auto num_processes = process_list.size();
2026 const auto copy_amount = std::min(std::size_t{out_process_ids_size}, num_processes);
2027
2028 for (std::size_t i = 0; i < copy_amount; ++i) {
2029 Memory::Write64(out_process_ids, process_list[i]->GetProcessID());
2030 out_process_ids += sizeof(u64);
2031 }
2032
2033 *out_num_processes = static_cast<u32>(num_processes);
2034 return RESULT_SUCCESS;
2035}
2036
2037ResultCode GetThreadList(u32* out_num_threads, VAddr out_thread_ids, u32 out_thread_ids_size,
2038 Handle debug_handle) {
2039 // TODO: Handle this case when debug events are supported.
2040 UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
2041
2042 LOG_DEBUG(Kernel_SVC, "called. out_thread_ids=0x{:016X}, out_thread_ids_size={}",
2043 out_thread_ids, out_thread_ids_size);
2044
2045 // If the size is negative or larger than INT32_MAX / sizeof(u64)
2046 if ((out_thread_ids_size & 0xF0000000) != 0) {
2047 LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}",
2048 out_thread_ids_size);
2049 return ERR_OUT_OF_RANGE;
2050 }
2051
2052 const auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
2053 const auto& vm_manager = current_process->VMManager();
2054 const auto total_copy_size = out_thread_ids_size * sizeof(u64);
2055
2056 if (out_thread_ids_size > 0 &&
2057 !vm_manager.IsWithinAddressSpace(out_thread_ids, total_copy_size)) {
2058 LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
2059 out_thread_ids, out_thread_ids + total_copy_size);
2060 return ERR_INVALID_ADDRESS_STATE;
2061 }
2062
2063 const auto& thread_list = current_process->GetThreadList();
2064 const auto num_threads = thread_list.size();
2065 const auto copy_amount = std::min(std::size_t{out_thread_ids_size}, num_threads);
2066
2067 auto list_iter = thread_list.cbegin();
2068 for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
2069 Memory::Write64(out_thread_ids, (*list_iter)->GetThreadID());
2070 out_thread_ids += sizeof(u64);
2071 }
2072
2073 *out_num_threads = static_cast<u32>(num_threads);
2074 return RESULT_SUCCESS;
2075}
2076
1871namespace { 2077namespace {
1872struct FunctionDef { 2078struct FunctionDef {
1873 using Func = void(); 2079 using Func = void();
@@ -1960,8 +2166,8 @@ static const FunctionDef SVC_Table[] = {
1960 {0x4E, nullptr, "ReadWriteRegister"}, 2166 {0x4E, nullptr, "ReadWriteRegister"},
1961 {0x4F, nullptr, "SetProcessActivity"}, 2167 {0x4F, nullptr, "SetProcessActivity"},
1962 {0x50, SvcWrap<CreateSharedMemory>, "CreateSharedMemory"}, 2168 {0x50, SvcWrap<CreateSharedMemory>, "CreateSharedMemory"},
1963 {0x51, nullptr, "MapTransferMemory"}, 2169 {0x51, SvcWrap<MapTransferMemory>, "MapTransferMemory"},
1964 {0x52, nullptr, "UnmapTransferMemory"}, 2170 {0x52, SvcWrap<UnmapTransferMemory>, "UnmapTransferMemory"},
1965 {0x53, nullptr, "CreateInterruptEvent"}, 2171 {0x53, nullptr, "CreateInterruptEvent"},
1966 {0x54, nullptr, "QueryPhysicalAddress"}, 2172 {0x54, nullptr, "QueryPhysicalAddress"},
1967 {0x55, nullptr, "QueryIoMapping"}, 2173 {0x55, nullptr, "QueryIoMapping"},
@@ -1980,8 +2186,8 @@ static const FunctionDef SVC_Table[] = {
1980 {0x62, nullptr, "TerminateDebugProcess"}, 2186 {0x62, nullptr, "TerminateDebugProcess"},
1981 {0x63, nullptr, "GetDebugEvent"}, 2187 {0x63, nullptr, "GetDebugEvent"},
1982 {0x64, nullptr, "ContinueDebugEvent"}, 2188 {0x64, nullptr, "ContinueDebugEvent"},
1983 {0x65, nullptr, "GetProcessList"}, 2189 {0x65, SvcWrap<GetProcessList>, "GetProcessList"},
1984 {0x66, nullptr, "GetThreadList"}, 2190 {0x66, SvcWrap<GetThreadList>, "GetThreadList"},
1985 {0x67, nullptr, "GetDebugThreadContext"}, 2191 {0x67, nullptr, "GetDebugThreadContext"},
1986 {0x68, nullptr, "SetDebugThreadContext"}, 2192 {0x68, nullptr, "SetDebugThreadContext"},
1987 {0x69, nullptr, "QueryDebugProcessMemory"}, 2193 {0x69, nullptr, "QueryDebugProcessMemory"},
@@ -2023,7 +2229,7 @@ void CallSVC(u32 immediate) {
2023 MICROPROFILE_SCOPE(Kernel_SVC); 2229 MICROPROFILE_SCOPE(Kernel_SVC);
2024 2230
2025 // Lock the global kernel mutex when we enter the kernel HLE. 2231 // Lock the global kernel mutex when we enter the kernel HLE.
2026 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 2232 std::lock_guard lock{HLE::g_hle_lock};
2027 2233
2028 const FunctionDef* info = GetSVCInfo(immediate); 2234 const FunctionDef* info = GetSVCInfo(immediate);
2029 if (info) { 2235 if (info) {
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 2a2c2c5ea..b3733680f 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -78,6 +78,14 @@ void SvcWrap() {
78 FuncReturn(retval); 78 FuncReturn(retval);
79} 79}
80 80
81template <ResultCode func(u32*, u64, u32)>
82void SvcWrap() {
83 u32 param_1 = 0;
84 const u32 retval = func(&param_1, Param(1), static_cast<u32>(Param(2))).raw;
85 Core::CurrentArmInterface().SetReg(1, param_1);
86 FuncReturn(retval);
87}
88
81template <ResultCode func(u64*, u32)> 89template <ResultCode func(u64*, u32)>
82void SvcWrap() { 90void SvcWrap() {
83 u64 param_1 = 0; 91 u64 param_1 = 0;
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index eb54d6651..1b891f632 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -7,8 +7,6 @@
7#include <optional> 7#include <optional>
8#include <vector> 8#include <vector>
9 9
10#include <boost/range/algorithm_ext/erase.hpp>
11
12#include "common/assert.h" 10#include "common/assert.h"
13#include "common/common_types.h" 11#include "common/common_types.h"
14#include "common/logging/log.h" 12#include "common/logging/log.h"
@@ -30,7 +28,7 @@
30 28
31namespace Kernel { 29namespace Kernel {
32 30
33bool Thread::ShouldWait(Thread* thread) const { 31bool Thread::ShouldWait(const Thread* thread) const {
34 return status != ThreadStatus::Dead; 32 return status != ThreadStatus::Dead;
35} 33}
36 34
@@ -64,21 +62,12 @@ void Thread::Stop() {
64 } 62 }
65 wait_objects.clear(); 63 wait_objects.clear();
66 64
65 owner_process->UnregisterThread(this);
66
67 // Mark the TLS slot in the thread's page as free. 67 // Mark the TLS slot in the thread's page as free.
68 owner_process->FreeTLSSlot(tls_address); 68 owner_process->FreeTLSSlot(tls_address);
69} 69}
70 70
71void WaitCurrentThread_Sleep() {
72 Thread* thread = GetCurrentThread();
73 thread->SetStatus(ThreadStatus::WaitSleep);
74}
75
76void ExitCurrentThread() {
77 Thread* thread = GetCurrentThread();
78 thread->Stop();
79 Core::System::GetInstance().CurrentScheduler().RemoveThread(thread);
80}
81
82void Thread::WakeAfterDelay(s64 nanoseconds) { 71void Thread::WakeAfterDelay(s64 nanoseconds) {
83 // Don't schedule a wakeup if the thread wants to wait forever 72 // Don't schedule a wakeup if the thread wants to wait forever
84 if (nanoseconds == -1) 73 if (nanoseconds == -1)
@@ -118,6 +107,7 @@ void Thread::ResumeFromWait() {
118 case ThreadStatus::WaitSleep: 107 case ThreadStatus::WaitSleep:
119 case ThreadStatus::WaitIPC: 108 case ThreadStatus::WaitIPC:
120 case ThreadStatus::WaitMutex: 109 case ThreadStatus::WaitMutex:
110 case ThreadStatus::WaitCondVar:
121 case ThreadStatus::WaitArb: 111 case ThreadStatus::WaitArb:
122 break; 112 break;
123 113
@@ -211,9 +201,11 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
211 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); 201 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
212 thread->owner_process = &owner_process; 202 thread->owner_process = &owner_process;
213 thread->scheduler = &system.Scheduler(processor_id); 203 thread->scheduler = &system.Scheduler(processor_id);
214 thread->scheduler->AddThread(thread, priority); 204 thread->scheduler->AddThread(thread);
215 thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread); 205 thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread);
216 206
207 thread->owner_process->RegisterThread(thread.get());
208
217 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used 209 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
218 // to initialize the context 210 // to initialize the context
219 ResetThreadContext(thread->context, stack_top, entry_point, arg); 211 ResetThreadContext(thread->context, stack_top, entry_point, arg);
@@ -241,16 +233,16 @@ void Thread::SetWaitSynchronizationOutput(s32 output) {
241 context.cpu_registers[1] = output; 233 context.cpu_registers[1] = output;
242} 234}
243 235
244s32 Thread::GetWaitObjectIndex(WaitObject* object) const { 236s32 Thread::GetWaitObjectIndex(const WaitObject* object) const {
245 ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything"); 237 ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything");
246 auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object); 238 const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object);
247 return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1); 239 return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1);
248} 240}
249 241
250VAddr Thread::GetCommandBufferAddress() const { 242VAddr Thread::GetCommandBufferAddress() const {
251 // Offset from the start of TLS at which the IPC command buffer begins. 243 // Offset from the start of TLS at which the IPC command buffer begins.
252 static constexpr int CommandHeaderOffset = 0x80; 244 constexpr u64 command_header_offset = 0x80;
253 return GetTLSAddress() + CommandHeaderOffset; 245 return GetTLSAddress() + command_header_offset;
254} 246}
255 247
256void Thread::SetStatus(ThreadStatus new_status) { 248void Thread::SetStatus(ThreadStatus new_status) {
@@ -269,8 +261,8 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
269 if (thread->lock_owner == this) { 261 if (thread->lock_owner == this) {
270 // If the thread is already waiting for this thread to release the mutex, ensure that the 262 // If the thread is already waiting for this thread to release the mutex, ensure that the
271 // waiters list is consistent and return without doing anything. 263 // waiters list is consistent and return without doing anything.
272 auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); 264 const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
273 ASSERT(itr != wait_mutex_threads.end()); 265 ASSERT(iter != wait_mutex_threads.end());
274 return; 266 return;
275 } 267 }
276 268
@@ -278,11 +270,16 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
278 ASSERT(thread->lock_owner == nullptr); 270 ASSERT(thread->lock_owner == nullptr);
279 271
280 // Ensure that the thread is not already in the list of mutex waiters 272 // Ensure that the thread is not already in the list of mutex waiters
281 auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); 273 const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
282 ASSERT(itr == wait_mutex_threads.end()); 274 ASSERT(iter == wait_mutex_threads.end());
283 275
276 // Keep the list in an ordered fashion
277 const auto insertion_point = std::find_if(
278 wait_mutex_threads.begin(), wait_mutex_threads.end(),
279 [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); });
280 wait_mutex_threads.insert(insertion_point, thread);
284 thread->lock_owner = this; 281 thread->lock_owner = this;
285 wait_mutex_threads.emplace_back(std::move(thread)); 282
286 UpdatePriority(); 283 UpdatePriority();
287} 284}
288 285
@@ -290,32 +287,44 @@ void Thread::RemoveMutexWaiter(SharedPtr<Thread> thread) {
290 ASSERT(thread->lock_owner == this); 287 ASSERT(thread->lock_owner == this);
291 288
292 // Ensure that the thread is in the list of mutex waiters 289 // Ensure that the thread is in the list of mutex waiters
293 auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); 290 const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
294 ASSERT(itr != wait_mutex_threads.end()); 291 ASSERT(iter != wait_mutex_threads.end());
292
293 wait_mutex_threads.erase(iter);
295 294
296 boost::remove_erase(wait_mutex_threads, thread);
297 thread->lock_owner = nullptr; 295 thread->lock_owner = nullptr;
298 UpdatePriority(); 296 UpdatePriority();
299} 297}
300 298
301void Thread::UpdatePriority() { 299void Thread::UpdatePriority() {
302 // Find the highest priority among all the threads that are waiting for this thread's lock 300 // If any of the threads waiting on the mutex have a higher priority
301 // (taking into account priority inheritance), then this thread inherits
302 // that thread's priority.
303 u32 new_priority = nominal_priority; 303 u32 new_priority = nominal_priority;
304 for (const auto& thread : wait_mutex_threads) { 304 if (!wait_mutex_threads.empty()) {
305 if (thread->nominal_priority < new_priority) 305 if (wait_mutex_threads.front()->current_priority < new_priority) {
306 new_priority = thread->nominal_priority; 306 new_priority = wait_mutex_threads.front()->current_priority;
307 }
307 } 308 }
308 309
309 if (new_priority == current_priority) 310 if (new_priority == current_priority) {
310 return; 311 return;
312 }
311 313
312 scheduler->SetThreadPriority(this, new_priority); 314 scheduler->SetThreadPriority(this, new_priority);
313
314 current_priority = new_priority; 315 current_priority = new_priority;
315 316
317 if (!lock_owner) {
318 return;
319 }
320
321 // Ensure that the thread is within the correct location in the waiting list.
322 auto old_owner = lock_owner;
323 lock_owner->RemoveMutexWaiter(this);
324 old_owner->AddMutexWaiter(this);
325
316 // Recursively update the priority of the thread that depends on the priority of this one. 326 // Recursively update the priority of the thread that depends on the priority of this one.
317 if (lock_owner) 327 lock_owner->UpdatePriority();
318 lock_owner->UpdatePriority();
319} 328}
320 329
321void Thread::ChangeCore(u32 core, u64 mask) { 330void Thread::ChangeCore(u32 core, u64 mask) {
@@ -347,7 +356,7 @@ void Thread::ChangeScheduler() {
347 if (*new_processor_id != processor_id) { 356 if (*new_processor_id != processor_id) {
348 // Remove thread from previous core's scheduler 357 // Remove thread from previous core's scheduler
349 scheduler->RemoveThread(this); 358 scheduler->RemoveThread(this);
350 next_scheduler.AddThread(this, current_priority); 359 next_scheduler.AddThread(this);
351 } 360 }
352 361
353 processor_id = *new_processor_id; 362 processor_id = *new_processor_id;
@@ -362,7 +371,7 @@ void Thread::ChangeScheduler() {
362 system.CpuCore(processor_id).PrepareReschedule(); 371 system.CpuCore(processor_id).PrepareReschedule();
363} 372}
364 373
365bool Thread::AllWaitObjectsReady() { 374bool Thread::AllWaitObjectsReady() const {
366 return std::none_of( 375 return std::none_of(
367 wait_objects.begin(), wait_objects.end(), 376 wait_objects.begin(), wait_objects.end(),
368 [this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); }); 377 [this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); });
@@ -391,6 +400,14 @@ void Thread::SetActivity(ThreadActivity value) {
391 } 400 }
392} 401}
393 402
403void Thread::Sleep(s64 nanoseconds) {
404 // Sleep current thread and check for next thread to schedule
405 SetStatus(ThreadStatus::WaitSleep);
406
407 // Create an event to wake the thread up after the specified nanosecond delay has passed
408 WakeAfterDelay(nanoseconds);
409}
410
394//////////////////////////////////////////////////////////////////////////////////////////////////// 411////////////////////////////////////////////////////////////////////////////////////////////////////
395 412
396/** 413/**
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index c48b21aba..73e5d1bb4 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -51,7 +51,8 @@ enum class ThreadStatus {
51 WaitIPC, ///< Waiting for the reply from an IPC request 51 WaitIPC, ///< Waiting for the reply from an IPC request
52 WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false 52 WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false
53 WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true 53 WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true
54 WaitMutex, ///< Waiting due to an ArbitrateLock/WaitProcessWideKey svc 54 WaitMutex, ///< Waiting due to an ArbitrateLock svc
55 WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc
55 WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc 56 WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc
56 Dormant, ///< Created but not yet made ready 57 Dormant, ///< Created but not yet made ready
57 Dead ///< Run to completion, or forcefully terminated 58 Dead ///< Run to completion, or forcefully terminated
@@ -110,7 +111,7 @@ public:
110 return HANDLE_TYPE; 111 return HANDLE_TYPE;
111 } 112 }
112 113
113 bool ShouldWait(Thread* thread) const override; 114 bool ShouldWait(const Thread* thread) const override;
114 void Acquire(Thread* thread) override; 115 void Acquire(Thread* thread) override;
115 116
116 /** 117 /**
@@ -204,7 +205,7 @@ public:
204 * object in the list. 205 * object in the list.
205 * @param object Object to query the index of. 206 * @param object Object to query the index of.
206 */ 207 */
207 s32 GetWaitObjectIndex(WaitObject* object) const; 208 s32 GetWaitObjectIndex(const WaitObject* object) const;
208 209
209 /** 210 /**
210 * Stops a thread, invalidating it from further use 211 * Stops a thread, invalidating it from further use
@@ -298,7 +299,7 @@ public:
298 } 299 }
299 300
300 /// Determines whether all the objects this thread is waiting on are ready. 301 /// Determines whether all the objects this thread is waiting on are ready.
301 bool AllWaitObjectsReady(); 302 bool AllWaitObjectsReady() const;
302 303
303 const MutexWaitingThreads& GetMutexWaitingThreads() const { 304 const MutexWaitingThreads& GetMutexWaitingThreads() const {
304 return wait_mutex_threads; 305 return wait_mutex_threads;
@@ -383,6 +384,9 @@ public:
383 384
384 void SetActivity(ThreadActivity value); 385 void SetActivity(ThreadActivity value);
385 386
387 /// Sleeps this thread for the given amount of nanoseconds.
388 void Sleep(s64 nanoseconds);
389
386private: 390private:
387 explicit Thread(KernelCore& kernel); 391 explicit Thread(KernelCore& kernel);
388 ~Thread() override; 392 ~Thread() override;
@@ -398,8 +402,14 @@ private:
398 VAddr entry_point = 0; 402 VAddr entry_point = 0;
399 VAddr stack_top = 0; 403 VAddr stack_top = 0;
400 404
401 u32 nominal_priority = 0; ///< Nominal thread priority, as set by the emulated application 405 /// Nominal thread priority, as set by the emulated application.
402 u32 current_priority = 0; ///< Current thread priority, can be temporarily changed 406 /// The nominal priority is the thread priority without priority
407 /// inheritance taken into account.
408 u32 nominal_priority = 0;
409
410 /// Current thread priority. This may change over the course of the
411 /// thread's lifetime in order to facilitate priority inheritance.
412 u32 current_priority = 0;
403 413
404 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. 414 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
405 u64 last_running_ticks = 0; ///< CPU tick when thread was last running 415 u64 last_running_ticks = 0; ///< CPU tick when thread was last running
@@ -460,14 +470,4 @@ private:
460 */ 470 */
461Thread* GetCurrentThread(); 471Thread* GetCurrentThread();
462 472
463/**
464 * Waits the current thread on a sleep
465 */
466void WaitCurrentThread_Sleep();
467
468/**
469 * Stops the current thread and removes it from the thread_list
470 */
471void ExitCurrentThread();
472
473} // namespace Kernel 473} // namespace Kernel
diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp
new file mode 100644
index 000000000..26c4e5e67
--- /dev/null
+++ b/src/core/hle/kernel/transfer_memory.cpp
@@ -0,0 +1,81 @@
1// Copyright 2019 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/errors.h"
6#include "core/hle/kernel/kernel.h"
7#include "core/hle/kernel/process.h"
8#include "core/hle/kernel/shared_memory.h"
9#include "core/hle/kernel/transfer_memory.h"
10#include "core/hle/result.h"
11
12namespace Kernel {
13
14TransferMemory::TransferMemory(KernelCore& kernel) : Object{kernel} {}
15TransferMemory::~TransferMemory() = default;
16
17SharedPtr<TransferMemory> TransferMemory::Create(KernelCore& kernel, VAddr base_address, u64 size,
18 MemoryPermission permissions) {
19 SharedPtr<TransferMemory> transfer_memory{new TransferMemory(kernel)};
20
21 transfer_memory->base_address = base_address;
22 transfer_memory->memory_size = size;
23 transfer_memory->owner_permissions = permissions;
24 transfer_memory->owner_process = kernel.CurrentProcess();
25
26 return transfer_memory;
27}
28
29const u8* TransferMemory::GetPointer() const {
30 return backing_block.get()->data();
31}
32
33u64 TransferMemory::GetSize() const {
34 return memory_size;
35}
36
37ResultCode TransferMemory::MapMemory(VAddr address, u64 size, MemoryPermission permissions) {
38 if (memory_size != size) {
39 return ERR_INVALID_SIZE;
40 }
41
42 if (owner_permissions != permissions) {
43 return ERR_INVALID_STATE;
44 }
45
46 if (is_mapped) {
47 return ERR_INVALID_STATE;
48 }
49
50 backing_block = std::make_shared<std::vector<u8>>(size);
51
52 const auto map_state = owner_permissions == MemoryPermission::None
53 ? MemoryState::TransferMemoryIsolated
54 : MemoryState::TransferMemory;
55 auto& vm_manager = owner_process->VMManager();
56 const auto map_result = vm_manager.MapMemoryBlock(address, backing_block, 0, size, map_state);
57 if (map_result.Failed()) {
58 return map_result.Code();
59 }
60
61 is_mapped = true;
62 return RESULT_SUCCESS;
63}
64
65ResultCode TransferMemory::UnmapMemory(VAddr address, u64 size) {
66 if (memory_size != size) {
67 return ERR_INVALID_SIZE;
68 }
69
70 auto& vm_manager = owner_process->VMManager();
71 const auto result = vm_manager.UnmapRange(address, size);
72
73 if (result.IsError()) {
74 return result;
75 }
76
77 is_mapped = false;
78 return RESULT_SUCCESS;
79}
80
81} // namespace Kernel
diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h
new file mode 100644
index 000000000..a140b1e2b
--- /dev/null
+++ b/src/core/hle/kernel/transfer_memory.h
@@ -0,0 +1,103 @@
1// Copyright 2019 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <vector>
9
10#include "core/hle/kernel/object.h"
11
12union ResultCode;
13
14namespace Kernel {
15
16class KernelCore;
17class Process;
18
19enum class MemoryPermission : u32;
20
21/// Defines the interface for transfer memory objects.
22///
23/// Transfer memory is typically used for the purpose of
24/// transferring memory between separate process instances,
25/// thus the name.
26///
27class TransferMemory final : public Object {
28public:
29 static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory;
30
31 static SharedPtr<TransferMemory> Create(KernelCore& kernel, VAddr base_address, u64 size,
32 MemoryPermission permissions);
33
34 TransferMemory(const TransferMemory&) = delete;
35 TransferMemory& operator=(const TransferMemory&) = delete;
36
37 TransferMemory(TransferMemory&&) = delete;
38 TransferMemory& operator=(TransferMemory&&) = delete;
39
40 std::string GetTypeName() const override {
41 return "TransferMemory";
42 }
43
44 std::string GetName() const override {
45 return GetTypeName();
46 }
47
48 HandleType GetHandleType() const override {
49 return HANDLE_TYPE;
50 }
51
52 /// Gets a pointer to the backing block of this instance.
53 const u8* GetPointer() const;
54
55 /// Gets the size of the memory backing this instance in bytes.
56 u64 GetSize() const;
57
58 /// Attempts to map transfer memory with the given range and memory permissions.
59 ///
60 /// @param address The base address to being mapping memory at.
61 /// @param size The size of the memory to map, in bytes.
62 /// @param permissions The memory permissions to check against when mapping memory.
63 ///
64 /// @pre The given address, size, and memory permissions must all match
65 /// the same values that were given when creating the transfer memory
66 /// instance.
67 ///
68 ResultCode MapMemory(VAddr address, u64 size, MemoryPermission permissions);
69
70 /// Unmaps the transfer memory with the given range
71 ///
72 /// @param address The base address to begin unmapping memory at.
73 /// @param size The size of the memory to unmap, in bytes.
74 ///
75 /// @pre The given address and size must be the same as the ones used
76 /// to create the transfer memory instance.
77 ///
78 ResultCode UnmapMemory(VAddr address, u64 size);
79
80private:
81 explicit TransferMemory(KernelCore& kernel);
82 ~TransferMemory() override;
83
84 /// Memory block backing this instance.
85 std::shared_ptr<std::vector<u8>> backing_block;
86
87 /// The base address for the memory managed by this instance.
88 VAddr base_address = 0;
89
90 /// Size of the memory, in bytes, that this instance manages.
91 u64 memory_size = 0;
92
93 /// The memory permissions that are applied to this instance.
94 MemoryPermission owner_permissions{};
95
96 /// The process that this transfer memory instance was created under.
97 Process* owner_process = nullptr;
98
99 /// Whether or not this transfer memory instance has mapped memory.
100 bool is_mapped = false;
101};
102
103} // namespace Kernel
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 05c59af34..ec0a480ce 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -7,29 +7,29 @@
7#include <utility> 7#include <utility>
8#include "common/assert.h" 8#include "common/assert.h"
9#include "common/logging/log.h" 9#include "common/logging/log.h"
10#include "common/memory_hook.h"
10#include "core/arm/arm_interface.h" 11#include "core/arm/arm_interface.h"
11#include "core/core.h" 12#include "core/core.h"
12#include "core/file_sys/program_metadata.h" 13#include "core/file_sys/program_metadata.h"
13#include "core/hle/kernel/errors.h" 14#include "core/hle/kernel/errors.h"
14#include "core/hle/kernel/vm_manager.h" 15#include "core/hle/kernel/vm_manager.h"
15#include "core/memory.h" 16#include "core/memory.h"
16#include "core/memory_hook.h"
17#include "core/memory_setup.h" 17#include "core/memory_setup.h"
18 18
19namespace Kernel { 19namespace Kernel {
20namespace { 20namespace {
21const char* GetMemoryStateName(MemoryState state) { 21const char* GetMemoryStateName(MemoryState state) {
22 static constexpr const char* names[] = { 22 static constexpr const char* names[] = {
23 "Unmapped", "Io", 23 "Unmapped", "Io",
24 "Normal", "CodeStatic", 24 "Normal", "Code",
25 "CodeMutable", "Heap", 25 "CodeData", "Heap",
26 "Shared", "Unknown1", 26 "Shared", "Unknown1",
27 "ModuleCodeStatic", "ModuleCodeMutable", 27 "ModuleCode", "ModuleCodeData",
28 "IpcBuffer0", "Stack", 28 "IpcBuffer0", "Stack",
29 "ThreadLocal", "TransferMemoryIsolated", 29 "ThreadLocal", "TransferMemoryIsolated",
30 "TransferMemory", "ProcessMemory", 30 "TransferMemory", "ProcessMemory",
31 "Inaccessible", "IpcBuffer1", 31 "Inaccessible", "IpcBuffer1",
32 "IpcBuffer3", "KernelStack", 32 "IpcBuffer3", "KernelStack",
33 }; 33 };
34 34
35 return names[ToSvcMemoryState(state)]; 35 return names[ToSvcMemoryState(state)];
@@ -177,7 +177,7 @@ ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const {
177 177
178ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size, 178ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size,
179 MemoryState state, 179 MemoryState state,
180 Memory::MemoryHookPointer mmio_handler) { 180 Common::MemoryHookPointer mmio_handler) {
181 // This is the appropriately sized VMA that will turn into our allocation. 181 // This is the appropriately sized VMA that will turn into our allocation.
182 CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); 182 CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
183 VirtualMemoryArea& final_vma = vma_handle->second; 183 VirtualMemoryArea& final_vma = vma_handle->second;
@@ -256,57 +256,50 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p
256 return RESULT_SUCCESS; 256 return RESULT_SUCCESS;
257} 257}
258 258
259ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) { 259ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
260 if (!IsWithinHeapRegion(target, size)) { 260 if (size > GetHeapRegionSize()) {
261 return ERR_INVALID_ADDRESS; 261 return ERR_OUT_OF_MEMORY;
262 }
263
264 // No need to do any additional work if the heap is already the given size.
265 if (size == GetCurrentHeapSize()) {
266 return MakeResult(heap_region_base);
262 } 267 }
263 268
264 if (heap_memory == nullptr) { 269 if (heap_memory == nullptr) {
265 // Initialize heap 270 // Initialize heap
266 heap_memory = std::make_shared<std::vector<u8>>(); 271 heap_memory = std::make_shared<std::vector<u8>>(size);
267 heap_start = heap_end = target; 272 heap_end = heap_region_base + size;
268 } else { 273 } else {
269 UnmapRange(heap_start, heap_end - heap_start); 274 UnmapRange(heap_region_base, GetCurrentHeapSize());
270 }
271
272 // If necessary, expand backing vector to cover new heap extents.
273 if (target < heap_start) {
274 heap_memory->insert(begin(*heap_memory), heap_start - target, 0);
275 heap_start = target;
276 RefreshMemoryBlockMappings(heap_memory.get());
277 }
278 if (target + size > heap_end) {
279 heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0);
280 heap_end = target + size;
281 RefreshMemoryBlockMappings(heap_memory.get());
282 } 275 }
283 ASSERT(heap_end - heap_start == heap_memory->size());
284 276
285 CASCADE_RESULT(auto vma, MapMemoryBlock(target, heap_memory, target - heap_start, size, 277 // If necessary, expand backing vector to cover new heap extents in
286 MemoryState::Heap)); 278 // the case of allocating. Otherwise, shrink the backing memory,
287 Reprotect(vma, perms); 279 // if a smaller heap has been requested.
280 const u64 old_heap_size = GetCurrentHeapSize();
281 if (size > old_heap_size) {
282 const u64 alloc_size = size - old_heap_size;
288 283
289 heap_used = size; 284 heap_memory->insert(heap_memory->end(), alloc_size, 0);
290 285 RefreshMemoryBlockMappings(heap_memory.get());
291 return MakeResult<VAddr>(heap_end - size); 286 } else if (size < old_heap_size) {
292} 287 heap_memory->resize(size);
288 heap_memory->shrink_to_fit();
293 289
294ResultCode VMManager::HeapFree(VAddr target, u64 size) { 290 RefreshMemoryBlockMappings(heap_memory.get());
295 if (!IsWithinHeapRegion(target, size)) {
296 return ERR_INVALID_ADDRESS;
297 } 291 }
298 292
299 if (size == 0) { 293 heap_end = heap_region_base + size;
300 return RESULT_SUCCESS; 294 ASSERT(GetCurrentHeapSize() == heap_memory->size());
301 }
302 295
303 const ResultCode result = UnmapRange(target, size); 296 const auto mapping_result =
304 if (result.IsError()) { 297 MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap);
305 return result; 298 if (mapping_result.Failed()) {
299 return mapping_result.Code();
306 } 300 }
307 301
308 heap_used -= size; 302 return MakeResult<VAddr>(heap_region_base);
309 return RESULT_SUCCESS;
310} 303}
311 304
312MemoryInfo VMManager::QueryMemory(VAddr address) const { 305MemoryInfo VMManager::QueryMemory(VAddr address) const {
@@ -598,6 +591,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
598 591
599 heap_region_base = map_region_end; 592 heap_region_base = map_region_end;
600 heap_region_end = heap_region_base + heap_region_size; 593 heap_region_end = heap_region_base + heap_region_size;
594 heap_end = heap_region_base;
601 595
602 new_map_region_base = heap_region_end; 596 new_map_region_base = heap_region_end;
603 new_map_region_end = new_map_region_base + new_map_region_size; 597 new_map_region_end = new_map_region_base + new_map_region_size;
@@ -624,7 +618,7 @@ void VMManager::ClearPageTable() {
624 std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); 618 std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr);
625 page_table.special_regions.clear(); 619 page_table.special_regions.clear();
626 std::fill(page_table.attributes.begin(), page_table.attributes.end(), 620 std::fill(page_table.attributes.begin(), page_table.attributes.end(),
627 Memory::PageType::Unmapped); 621 Common::PageType::Unmapped);
628} 622}
629 623
630VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask, 624VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask,
@@ -692,10 +686,6 @@ u64 VMManager::GetTotalMemoryUsage() const {
692 return 0xF8000000; 686 return 0xF8000000;
693} 687}
694 688
695u64 VMManager::GetTotalHeapUsage() const {
696 return heap_used;
697}
698
699VAddr VMManager::GetAddressSpaceBaseAddress() const { 689VAddr VMManager::GetAddressSpaceBaseAddress() const {
700 return address_space_base; 690 return address_space_base;
701} 691}
@@ -778,6 +768,10 @@ u64 VMManager::GetHeapRegionSize() const {
778 return heap_region_end - heap_region_base; 768 return heap_region_end - heap_region_base;
779} 769}
780 770
771u64 VMManager::GetCurrentHeapSize() const {
772 return heap_end - heap_region_base;
773}
774
781bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const { 775bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const {
782 return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(), 776 return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(),
783 GetHeapRegionEndAddress()); 777 GetHeapRegionEndAddress());
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 88e0b3c02..6f484b7bf 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -9,9 +9,10 @@
9#include <tuple> 9#include <tuple>
10#include <vector> 10#include <vector>
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "common/memory_hook.h"
13#include "common/page_table.h"
12#include "core/hle/result.h" 14#include "core/hle/result.h"
13#include "core/memory.h" 15#include "core/memory.h"
14#include "core/memory_hook.h"
15 16
16namespace FileSys { 17namespace FileSys {
17enum class ProgramAddressSpaceType : u8; 18enum class ProgramAddressSpaceType : u8;
@@ -164,12 +165,12 @@ enum class MemoryState : u32 {
164 Unmapped = 0x00, 165 Unmapped = 0x00,
165 Io = 0x01 | FlagMapped, 166 Io = 0x01 | FlagMapped,
166 Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed, 167 Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed,
167 CodeStatic = 0x03 | CodeFlags | FlagMapProcess, 168 Code = 0x03 | CodeFlags | FlagMapProcess,
168 CodeMutable = 0x04 | CodeFlags | FlagMapProcess | FlagCodeMemory, 169 CodeData = 0x04 | DataFlags | FlagMapProcess | FlagCodeMemory,
169 Heap = 0x05 | DataFlags | FlagCodeMemory, 170 Heap = 0x05 | DataFlags | FlagCodeMemory,
170 Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated, 171 Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated,
171 ModuleCodeStatic = 0x08 | CodeFlags | FlagModule | FlagMapProcess, 172 ModuleCode = 0x08 | CodeFlags | FlagModule | FlagMapProcess,
172 ModuleCodeMutable = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory, 173 ModuleCodeData = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory,
173 174
174 IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated | 175 IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated |
175 IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned, 176 IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned,
@@ -290,7 +291,7 @@ struct VirtualMemoryArea {
290 // Settings for type = MMIO 291 // Settings for type = MMIO
291 /// Physical address of the register area this VMA maps to. 292 /// Physical address of the register area this VMA maps to.
292 PAddr paddr = 0; 293 PAddr paddr = 0;
293 Memory::MemoryHookPointer mmio_handler = nullptr; 294 Common::MemoryHookPointer mmio_handler = nullptr;
294 295
295 /// Tests if this area can be merged to the right with `next`. 296 /// Tests if this area can be merged to the right with `next`.
296 bool CanBeMergedWith(const VirtualMemoryArea& next) const; 297 bool CanBeMergedWith(const VirtualMemoryArea& next) const;
@@ -368,7 +369,7 @@ public:
368 * @param mmio_handler The handler that will implement read and write for this MMIO region. 369 * @param mmio_handler The handler that will implement read and write for this MMIO region.
369 */ 370 */
370 ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state, 371 ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state,
371 Memory::MemoryHookPointer mmio_handler); 372 Common::MemoryHookPointer mmio_handler);
372 373
373 /// Unmaps a range of addresses, splitting VMAs as necessary. 374 /// Unmaps a range of addresses, splitting VMAs as necessary.
374 ResultCode UnmapRange(VAddr target, u64 size); 375 ResultCode UnmapRange(VAddr target, u64 size);
@@ -379,11 +380,41 @@ public:
379 /// Changes the permissions of a range of addresses, splitting VMAs as necessary. 380 /// Changes the permissions of a range of addresses, splitting VMAs as necessary.
380 ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms); 381 ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms);
381 382
382 ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
383 ResultCode HeapFree(VAddr target, u64 size);
384
385 ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state); 383 ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state);
386 384
385 /// Attempts to allocate a heap with the given size.
386 ///
387 /// @param size The size of the heap to allocate in bytes.
388 ///
389 /// @note If a heap is currently allocated, and this is called
390 /// with a size that is equal to the size of the current heap,
391 /// then this function will do nothing and return the current
392 /// heap's starting address, as there's no need to perform
393 /// any additional heap allocation work.
394 ///
395 /// @note If a heap is currently allocated, and this is called
396 /// with a size less than the current heap's size, then
397 /// this function will attempt to shrink the heap.
398 ///
399 /// @note If a heap is currently allocated, and this is called
400 /// with a size larger than the current heap's size, then
401 /// this function will attempt to extend the size of the heap.
402 ///
403 /// @returns A result indicating either success or failure.
404 /// <p>
405 /// If successful, this function will return a result
406 /// containing the starting address to the allocated heap.
407 /// <p>
408 /// If unsuccessful, this function will return a result
409 /// containing an error code.
410 ///
411 /// @pre The given size must lie within the allowable heap
412 /// memory region managed by this VMManager instance.
413 /// Failure to abide by this will result in ERR_OUT_OF_MEMORY
414 /// being returned as the result.
415 ///
416 ResultVal<VAddr> SetHeapSize(u64 size);
417
387 /// Queries the memory manager for information about the given address. 418 /// Queries the memory manager for information about the given address.
388 /// 419 ///
389 /// @param address The address to query the memory manager about for information. 420 /// @param address The address to query the memory manager about for information.
@@ -417,9 +448,6 @@ public:
417 /// Gets the total memory usage, used by svcGetInfo 448 /// Gets the total memory usage, used by svcGetInfo
418 u64 GetTotalMemoryUsage() const; 449 u64 GetTotalMemoryUsage() const;
419 450
420 /// Gets the total heap usage, used by svcGetInfo
421 u64 GetTotalHeapUsage() const;
422
423 /// Gets the address space base address 451 /// Gets the address space base address
424 VAddr GetAddressSpaceBaseAddress() const; 452 VAddr GetAddressSpaceBaseAddress() const;
425 453
@@ -468,6 +496,13 @@ public:
468 /// Gets the total size of the heap region in bytes. 496 /// Gets the total size of the heap region in bytes.
469 u64 GetHeapRegionSize() const; 497 u64 GetHeapRegionSize() const;
470 498
499 /// Gets the total size of the current heap in bytes.
500 ///
501 /// @note This is the current allocated heap size, not the size
502 /// of the region it's allowed to exist within.
503 ///
504 u64 GetCurrentHeapSize() const;
505
471 /// Determines whether or not the specified range is within the heap region. 506 /// Determines whether or not the specified range is within the heap region.
472 bool IsWithinHeapRegion(VAddr address, u64 size) const; 507 bool IsWithinHeapRegion(VAddr address, u64 size) const;
473 508
@@ -509,7 +544,7 @@ public:
509 544
510 /// Each VMManager has its own page table, which is set as the main one when the owning process 545 /// Each VMManager has its own page table, which is set as the main one when the owning process
511 /// is scheduled. 546 /// is scheduled.
512 Memory::PageTable page_table; 547 Common::PageTable page_table{Memory::PAGE_BITS};
513 548
514private: 549private:
515 using VMAIter = VMAMap::iterator; 550 using VMAIter = VMAMap::iterator;
@@ -624,9 +659,9 @@ private:
624 // This makes deallocation and reallocation of holes fast and keeps process memory contiguous 659 // This makes deallocation and reallocation of holes fast and keeps process memory contiguous
625 // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. 660 // in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
626 std::shared_ptr<std::vector<u8>> heap_memory; 661 std::shared_ptr<std::vector<u8>> heap_memory;
627 // The left/right bounds of the address space covered by heap_memory. 662
628 VAddr heap_start = 0; 663 // The end of the currently allocated heap. This is not an inclusive
664 // end of the range. This is essentially 'base_address + current_size'.
629 VAddr heap_end = 0; 665 VAddr heap_end = 0;
630 u64 heap_used = 0;
631}; 666};
632} // namespace Kernel 667} // namespace Kernel
diff --git a/src/core/hle/kernel/wait_object.h b/src/core/hle/kernel/wait_object.h
index 5987fb971..04464a51a 100644
--- a/src/core/hle/kernel/wait_object.h
+++ b/src/core/hle/kernel/wait_object.h
@@ -24,7 +24,7 @@ public:
24 * @param thread The thread about which we're deciding. 24 * @param thread The thread about which we're deciding.
25 * @return True if the current thread should wait due to this object being unavailable 25 * @return True if the current thread should wait due to this object being unavailable
26 */ 26 */
27 virtual bool ShouldWait(Thread* thread) const = 0; 27 virtual bool ShouldWait(const Thread* thread) const = 0;
28 28
29 /// Acquire/lock the object for the specified thread if it is available 29 /// Acquire/lock the object for the specified thread if it is available
30 virtual void Acquire(Thread* thread) = 0; 30 virtual void Acquire(Thread* thread) = 0;
diff --git a/src/core/hle/result.h b/src/core/hle/result.h
index ab84f5ddc..8a3701151 100644
--- a/src/core/hle/result.h
+++ b/src/core/hle/result.h
@@ -119,10 +119,6 @@ union ResultCode {
119 BitField<0, 9, ErrorModule> module; 119 BitField<0, 9, ErrorModule> module;
120 BitField<9, 13, u32> description; 120 BitField<9, 13, u32> description;
121 121
122 // The last bit of `level` is checked by apps and the kernel to determine if a result code is an
123 // error
124 BitField<31, 1, u32> is_error;
125
126 constexpr explicit ResultCode(u32 raw) : raw(raw) {} 122 constexpr explicit ResultCode(u32 raw) : raw(raw) {}
127 123
128 constexpr ResultCode(ErrorModule module_, u32 description_) 124 constexpr ResultCode(ErrorModule module_, u32 description_)
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 3f009d2b7..85271d418 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -2,10 +2,10 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm>
5#include <array> 6#include <array>
6#include <cinttypes> 7#include <cinttypes>
7#include <cstring> 8#include <cstring>
8#include <stack>
9#include "audio_core/audio_renderer.h" 9#include "audio_core/audio_renderer.h"
10#include "core/core.h" 10#include "core/core.h"
11#include "core/file_sys/savedata_factory.h" 11#include "core/file_sys/savedata_factory.h"
@@ -13,7 +13,7 @@
13#include "core/hle/kernel/kernel.h" 13#include "core/hle/kernel/kernel.h"
14#include "core/hle/kernel/process.h" 14#include "core/hle/kernel/process.h"
15#include "core/hle/kernel/readable_event.h" 15#include "core/hle/kernel/readable_event.h"
16#include "core/hle/kernel/shared_memory.h" 16#include "core/hle/kernel/transfer_memory.h"
17#include "core/hle/kernel/writable_event.h" 17#include "core/hle/kernel/writable_event.h"
18#include "core/hle/service/acc/profile_manager.h" 18#include "core/hle/service/acc/profile_manager.h"
19#include "core/hle/service/am/am.h" 19#include "core/hle/service/am/am.h"
@@ -93,38 +93,84 @@ void IWindowController::AcquireForegroundRights(Kernel::HLERequestContext& ctx)
93} 93}
94 94
95IAudioController::IAudioController() : ServiceFramework("IAudioController") { 95IAudioController::IAudioController() : ServiceFramework("IAudioController") {
96 // clang-format off
96 static const FunctionInfo functions[] = { 97 static const FunctionInfo functions[] = {
97 {0, &IAudioController::SetExpectedMasterVolume, "SetExpectedMasterVolume"}, 98 {0, &IAudioController::SetExpectedMasterVolume, "SetExpectedMasterVolume"},
98 {1, &IAudioController::GetMainAppletExpectedMasterVolume, 99 {1, &IAudioController::GetMainAppletExpectedMasterVolume, "GetMainAppletExpectedMasterVolume"},
99 "GetMainAppletExpectedMasterVolume"}, 100 {2, &IAudioController::GetLibraryAppletExpectedMasterVolume, "GetLibraryAppletExpectedMasterVolume"},
100 {2, &IAudioController::GetLibraryAppletExpectedMasterVolume, 101 {3, &IAudioController::ChangeMainAppletMasterVolume, "ChangeMainAppletMasterVolume"},
101 "GetLibraryAppletExpectedMasterVolume"}, 102 {4, &IAudioController::SetTransparentAudioRate, "SetTransparentVolumeRate"},
102 {3, nullptr, "ChangeMainAppletMasterVolume"},
103 {4, nullptr, "SetTransparentVolumeRate"},
104 }; 103 };
104 // clang-format on
105
105 RegisterHandlers(functions); 106 RegisterHandlers(functions);
106} 107}
107 108
108IAudioController::~IAudioController() = default; 109IAudioController::~IAudioController() = default;
109 110
110void IAudioController::SetExpectedMasterVolume(Kernel::HLERequestContext& ctx) { 111void IAudioController::SetExpectedMasterVolume(Kernel::HLERequestContext& ctx) {
111 LOG_WARNING(Service_AM, "(STUBBED) called"); 112 IPC::RequestParser rp{ctx};
113 const float main_applet_volume_tmp = rp.Pop<float>();
114 const float library_applet_volume_tmp = rp.Pop<float>();
115
116 LOG_DEBUG(Service_AM, "called. main_applet_volume={}, library_applet_volume={}",
117 main_applet_volume_tmp, library_applet_volume_tmp);
118
119 // Ensure the volume values remain within the 0-100% range
120 main_applet_volume = std::clamp(main_applet_volume_tmp, min_allowed_volume, max_allowed_volume);
121 library_applet_volume =
122 std::clamp(library_applet_volume_tmp, min_allowed_volume, max_allowed_volume);
123
112 IPC::ResponseBuilder rb{ctx, 2}; 124 IPC::ResponseBuilder rb{ctx, 2};
113 rb.Push(RESULT_SUCCESS); 125 rb.Push(RESULT_SUCCESS);
114} 126}
115 127
116void IAudioController::GetMainAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx) { 128void IAudioController::GetMainAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx) {
117 LOG_WARNING(Service_AM, "(STUBBED) called"); 129 LOG_DEBUG(Service_AM, "called. main_applet_volume={}", main_applet_volume);
118 IPC::ResponseBuilder rb{ctx, 3}; 130 IPC::ResponseBuilder rb{ctx, 3};
119 rb.Push(RESULT_SUCCESS); 131 rb.Push(RESULT_SUCCESS);
120 rb.Push(volume); 132 rb.Push(main_applet_volume);
121} 133}
122 134
123void IAudioController::GetLibraryAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx) { 135void IAudioController::GetLibraryAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx) {
124 LOG_WARNING(Service_AM, "(STUBBED) called"); 136 LOG_DEBUG(Service_AM, "called. library_applet_volume={}", library_applet_volume);
125 IPC::ResponseBuilder rb{ctx, 3}; 137 IPC::ResponseBuilder rb{ctx, 3};
126 rb.Push(RESULT_SUCCESS); 138 rb.Push(RESULT_SUCCESS);
127 rb.Push(volume); 139 rb.Push(library_applet_volume);
140}
141
142void IAudioController::ChangeMainAppletMasterVolume(Kernel::HLERequestContext& ctx) {
143 struct Parameters {
144 float volume;
145 s64 fade_time_ns;
146 };
147 static_assert(sizeof(Parameters) == 16);
148
149 IPC::RequestParser rp{ctx};
150 const auto parameters = rp.PopRaw<Parameters>();
151
152 LOG_DEBUG(Service_AM, "called. volume={}, fade_time_ns={}", parameters.volume,
153 parameters.fade_time_ns);
154
155 main_applet_volume = std::clamp(parameters.volume, min_allowed_volume, max_allowed_volume);
156 fade_time_ns = std::chrono::nanoseconds{parameters.fade_time_ns};
157
158 IPC::ResponseBuilder rb{ctx, 2};
159 rb.Push(RESULT_SUCCESS);
160}
161
162void IAudioController::SetTransparentAudioRate(Kernel::HLERequestContext& ctx) {
163 IPC::RequestParser rp{ctx};
164 const float transparent_volume_rate_tmp = rp.Pop<float>();
165
166 LOG_DEBUG(Service_AM, "called. transparent_volume_rate={}", transparent_volume_rate_tmp);
167
168 // Clamp volume range to 0-100%.
169 transparent_volume_rate =
170 std::clamp(transparent_volume_rate_tmp, min_allowed_volume, max_allowed_volume);
171
172 IPC::ResponseBuilder rb{ctx, 2};
173 rb.Push(RESULT_SUCCESS);
128} 174}
129 175
130IDisplayController::IDisplayController() : ServiceFramework("IDisplayController") { 176IDisplayController::IDisplayController() : ServiceFramework("IDisplayController") {
@@ -169,7 +215,21 @@ IDisplayController::IDisplayController() : ServiceFramework("IDisplayController"
169 215
170IDisplayController::~IDisplayController() = default; 216IDisplayController::~IDisplayController() = default;
171 217
172IDebugFunctions::IDebugFunctions() : ServiceFramework("IDebugFunctions") {} 218IDebugFunctions::IDebugFunctions() : ServiceFramework{"IDebugFunctions"} {
219 // clang-format off
220 static const FunctionInfo functions[] = {
221 {0, nullptr, "NotifyMessageToHomeMenuForDebug"},
222 {1, nullptr, "OpenMainApplication"},
223 {10, nullptr, "EmulateButtonEvent"},
224 {20, nullptr, "InvalidateTransitionLayer"},
225 {30, nullptr, "RequestLaunchApplicationWithUserAndArgumentForDebug"},
226 {40, nullptr, "GetAppletResourceUsageInfo"},
227 };
228 // clang-format on
229
230 RegisterHandlers(functions);
231}
232
173IDebugFunctions::~IDebugFunctions() = default; 233IDebugFunctions::~IDebugFunctions() = default;
174 234
175ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger) 235ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger)
@@ -179,8 +239,8 @@ ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger
179 {0, nullptr, "Exit"}, 239 {0, nullptr, "Exit"},
180 {1, &ISelfController::LockExit, "LockExit"}, 240 {1, &ISelfController::LockExit, "LockExit"},
181 {2, &ISelfController::UnlockExit, "UnlockExit"}, 241 {2, &ISelfController::UnlockExit, "UnlockExit"},
182 {3, nullptr, "EnterFatalSection"}, 242 {3, &ISelfController::EnterFatalSection, "EnterFatalSection"},
183 {4, nullptr, "LeaveFatalSection"}, 243 {4, &ISelfController::LeaveFatalSection, "LeaveFatalSection"},
184 {9, &ISelfController::GetLibraryAppletLaunchableEvent, "GetLibraryAppletLaunchableEvent"}, 244 {9, &ISelfController::GetLibraryAppletLaunchableEvent, "GetLibraryAppletLaunchableEvent"},
185 {10, &ISelfController::SetScreenShotPermission, "SetScreenShotPermission"}, 245 {10, &ISelfController::SetScreenShotPermission, "SetScreenShotPermission"},
186 {11, &ISelfController::SetOperationModeChangedNotification, "SetOperationModeChangedNotification"}, 246 {11, &ISelfController::SetOperationModeChangedNotification, "SetOperationModeChangedNotification"},
@@ -225,41 +285,54 @@ ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger
225 285
226ISelfController::~ISelfController() = default; 286ISelfController::~ISelfController() = default;
227 287
228void ISelfController::SetFocusHandlingMode(Kernel::HLERequestContext& ctx) { 288void ISelfController::LockExit(Kernel::HLERequestContext& ctx) {
229 // Takes 3 input u8s with each field located immediately after the previous
230 // u8, these are bool flags. No output.
231 LOG_WARNING(Service_AM, "(STUBBED) called"); 289 LOG_WARNING(Service_AM, "(STUBBED) called");
232 290
233 IPC::RequestParser rp{ctx}; 291 IPC::ResponseBuilder rb{ctx, 2};
292 rb.Push(RESULT_SUCCESS);
293}
234 294
235 struct FocusHandlingModeParams { 295void ISelfController::UnlockExit(Kernel::HLERequestContext& ctx) {
236 u8 unknown0; 296 LOG_WARNING(Service_AM, "(STUBBED) called");
237 u8 unknown1;
238 u8 unknown2;
239 };
240 auto flags = rp.PopRaw<FocusHandlingModeParams>();
241 297
242 IPC::ResponseBuilder rb{ctx, 2}; 298 IPC::ResponseBuilder rb{ctx, 2};
243 rb.Push(RESULT_SUCCESS); 299 rb.Push(RESULT_SUCCESS);
244} 300}
245 301
246void ISelfController::SetRestartMessageEnabled(Kernel::HLERequestContext& ctx) { 302void ISelfController::EnterFatalSection(Kernel::HLERequestContext& ctx) {
247 LOG_WARNING(Service_AM, "(STUBBED) called"); 303 ++num_fatal_sections_entered;
304 LOG_DEBUG(Service_AM, "called. Num fatal sections entered: {}", num_fatal_sections_entered);
248 305
249 IPC::ResponseBuilder rb{ctx, 2}; 306 IPC::ResponseBuilder rb{ctx, 2};
250 rb.Push(RESULT_SUCCESS); 307 rb.Push(RESULT_SUCCESS);
251} 308}
252 309
253void ISelfController::SetPerformanceModeChangedNotification(Kernel::HLERequestContext& ctx) { 310void ISelfController::LeaveFatalSection(Kernel::HLERequestContext& ctx) {
254 IPC::RequestParser rp{ctx}; 311 LOG_DEBUG(Service_AM, "called.");
255 312
256 bool flag = rp.Pop<bool>(); 313 // Entry and exit of fatal sections must be balanced.
257 LOG_WARNING(Service_AM, "(STUBBED) called flag={}", flag); 314 if (num_fatal_sections_entered == 0) {
315 IPC::ResponseBuilder rb{ctx, 2};
316 rb.Push(ResultCode{ErrorModule::AM, 512});
317 return;
318 }
319
320 --num_fatal_sections_entered;
258 321
259 IPC::ResponseBuilder rb{ctx, 2}; 322 IPC::ResponseBuilder rb{ctx, 2};
260 rb.Push(RESULT_SUCCESS); 323 rb.Push(RESULT_SUCCESS);
261} 324}
262 325
326void ISelfController::GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx) {
327 LOG_WARNING(Service_AM, "(STUBBED) called");
328
329 launchable_event.writable->Signal();
330
331 IPC::ResponseBuilder rb{ctx, 2, 1};
332 rb.Push(RESULT_SUCCESS);
333 rb.PushCopyObjects(launchable_event.readable);
334}
335
263void ISelfController::SetScreenShotPermission(Kernel::HLERequestContext& ctx) { 336void ISelfController::SetScreenShotPermission(Kernel::HLERequestContext& ctx) {
264 LOG_WARNING(Service_AM, "(STUBBED) called"); 337 LOG_WARNING(Service_AM, "(STUBBED) called");
265 338
@@ -277,40 +350,52 @@ void ISelfController::SetOperationModeChangedNotification(Kernel::HLERequestCont
277 rb.Push(RESULT_SUCCESS); 350 rb.Push(RESULT_SUCCESS);
278} 351}
279 352
280void ISelfController::SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx) { 353void ISelfController::SetPerformanceModeChangedNotification(Kernel::HLERequestContext& ctx) {
281 // Takes 3 input u8s with each field located immediately after the previous
282 // u8, these are bool flags. No output.
283 IPC::RequestParser rp{ctx}; 354 IPC::RequestParser rp{ctx};
284 355
285 bool enabled = rp.Pop<bool>(); 356 bool flag = rp.Pop<bool>();
286 LOG_WARNING(Service_AM, "(STUBBED) called enabled={}", enabled); 357 LOG_WARNING(Service_AM, "(STUBBED) called flag={}", flag);
287 358
288 IPC::ResponseBuilder rb{ctx, 2}; 359 IPC::ResponseBuilder rb{ctx, 2};
289 rb.Push(RESULT_SUCCESS); 360 rb.Push(RESULT_SUCCESS);
290} 361}
291 362
292void ISelfController::LockExit(Kernel::HLERequestContext& ctx) { 363void ISelfController::SetFocusHandlingMode(Kernel::HLERequestContext& ctx) {
293 LOG_WARNING(Service_AM, "(STUBBED) called"); 364 // Takes 3 input u8s with each field located immediately after the previous
365 // u8, these are bool flags. No output.
366 IPC::RequestParser rp{ctx};
367
368 struct FocusHandlingModeParams {
369 u8 unknown0;
370 u8 unknown1;
371 u8 unknown2;
372 };
373 const auto flags = rp.PopRaw<FocusHandlingModeParams>();
374
375 LOG_WARNING(Service_AM, "(STUBBED) called. unknown0={}, unknown1={}, unknown2={}",
376 flags.unknown0, flags.unknown1, flags.unknown2);
294 377
295 IPC::ResponseBuilder rb{ctx, 2}; 378 IPC::ResponseBuilder rb{ctx, 2};
296 rb.Push(RESULT_SUCCESS); 379 rb.Push(RESULT_SUCCESS);
297} 380}
298 381
299void ISelfController::UnlockExit(Kernel::HLERequestContext& ctx) { 382void ISelfController::SetRestartMessageEnabled(Kernel::HLERequestContext& ctx) {
300 LOG_WARNING(Service_AM, "(STUBBED) called"); 383 LOG_WARNING(Service_AM, "(STUBBED) called");
301 384
302 IPC::ResponseBuilder rb{ctx, 2}; 385 IPC::ResponseBuilder rb{ctx, 2};
303 rb.Push(RESULT_SUCCESS); 386 rb.Push(RESULT_SUCCESS);
304} 387}
305 388
306void ISelfController::GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx) { 389void ISelfController::SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx) {
307 LOG_WARNING(Service_AM, "(STUBBED) called"); 390 // Takes 3 input u8s with each field located immediately after the previous
391 // u8, these are bool flags. No output.
392 IPC::RequestParser rp{ctx};
308 393
309 launchable_event.writable->Signal(); 394 bool enabled = rp.Pop<bool>();
395 LOG_WARNING(Service_AM, "(STUBBED) called enabled={}", enabled);
310 396
311 IPC::ResponseBuilder rb{ctx, 2, 1}; 397 IPC::ResponseBuilder rb{ctx, 2};
312 rb.Push(RESULT_SUCCESS); 398 rb.Push(RESULT_SUCCESS);
313 rb.PushCopyObjects(launchable_event.readable);
314} 399}
315 400
316void ISelfController::SetScreenShotImageOrientation(Kernel::HLERequestContext& ctx) { 401void ISelfController::SetScreenShotImageOrientation(Kernel::HLERequestContext& ctx) {
@@ -847,19 +932,19 @@ void ILibraryAppletCreator::CreateTransferMemoryStorage(Kernel::HLERequestContex
847 rp.SetCurrentOffset(3); 932 rp.SetCurrentOffset(3);
848 const auto handle{rp.Pop<Kernel::Handle>()}; 933 const auto handle{rp.Pop<Kernel::Handle>()};
849 934
850 const auto shared_mem = 935 const auto transfer_mem =
851 Core::System::GetInstance().CurrentProcess()->GetHandleTable().Get<Kernel::SharedMemory>( 936 Core::System::GetInstance().CurrentProcess()->GetHandleTable().Get<Kernel::TransferMemory>(
852 handle); 937 handle);
853 938
854 if (shared_mem == nullptr) { 939 if (transfer_mem == nullptr) {
855 LOG_ERROR(Service_AM, "shared_mem is a nullpr for handle={:08X}", handle); 940 LOG_ERROR(Service_AM, "shared_mem is a nullpr for handle={:08X}", handle);
856 IPC::ResponseBuilder rb{ctx, 2}; 941 IPC::ResponseBuilder rb{ctx, 2};
857 rb.Push(ResultCode(-1)); 942 rb.Push(ResultCode(-1));
858 return; 943 return;
859 } 944 }
860 945
861 const u8* mem_begin = shared_mem->GetPointer(); 946 const u8* const mem_begin = transfer_mem->GetPointer();
862 const u8* mem_end = mem_begin + shared_mem->GetSize(); 947 const u8* const mem_end = mem_begin + transfer_mem->GetSize();
863 std::vector<u8> memory{mem_begin, mem_end}; 948 std::vector<u8> memory{mem_begin, mem_end};
864 949
865 IPC::ResponseBuilder rb{ctx, 2, 0, 1}; 950 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index b6113cfdd..991b7d47c 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -4,6 +4,7 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <chrono>
7#include <memory> 8#include <memory>
8#include <queue> 9#include <queue>
9#include "core/hle/kernel/writable_event.h" 10#include "core/hle/kernel/writable_event.h"
@@ -81,8 +82,21 @@ private:
81 void SetExpectedMasterVolume(Kernel::HLERequestContext& ctx); 82 void SetExpectedMasterVolume(Kernel::HLERequestContext& ctx);
82 void GetMainAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx); 83 void GetMainAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx);
83 void GetLibraryAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx); 84 void GetLibraryAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx);
85 void ChangeMainAppletMasterVolume(Kernel::HLERequestContext& ctx);
86 void SetTransparentAudioRate(Kernel::HLERequestContext& ctx);
84 87
85 u32 volume{100}; 88 static constexpr float min_allowed_volume = 0.0f;
89 static constexpr float max_allowed_volume = 1.0f;
90
91 float main_applet_volume{0.25f};
92 float library_applet_volume{max_allowed_volume};
93 float transparent_volume_rate{min_allowed_volume};
94
95 // Volume transition fade time in nanoseconds.
96 // e.g. If the main applet volume was 0% and was changed to 50%
97 // with a fade of 50ns, then over the course of 50ns,
98 // the volume will gradually fade up to 50%
99 std::chrono::nanoseconds fade_time_ns{0};
86}; 100};
87 101
88class IDisplayController final : public ServiceFramework<IDisplayController> { 102class IDisplayController final : public ServiceFramework<IDisplayController> {
@@ -103,17 +117,19 @@ public:
103 ~ISelfController() override; 117 ~ISelfController() override;
104 118
105private: 119private:
106 void SetFocusHandlingMode(Kernel::HLERequestContext& ctx);
107 void SetRestartMessageEnabled(Kernel::HLERequestContext& ctx);
108 void SetPerformanceModeChangedNotification(Kernel::HLERequestContext& ctx);
109 void SetOperationModeChangedNotification(Kernel::HLERequestContext& ctx);
110 void SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx);
111 void LockExit(Kernel::HLERequestContext& ctx); 120 void LockExit(Kernel::HLERequestContext& ctx);
112 void UnlockExit(Kernel::HLERequestContext& ctx); 121 void UnlockExit(Kernel::HLERequestContext& ctx);
122 void EnterFatalSection(Kernel::HLERequestContext& ctx);
123 void LeaveFatalSection(Kernel::HLERequestContext& ctx);
113 void GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx); 124 void GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx);
125 void SetScreenShotPermission(Kernel::HLERequestContext& ctx);
126 void SetOperationModeChangedNotification(Kernel::HLERequestContext& ctx);
127 void SetPerformanceModeChangedNotification(Kernel::HLERequestContext& ctx);
128 void SetFocusHandlingMode(Kernel::HLERequestContext& ctx);
129 void SetRestartMessageEnabled(Kernel::HLERequestContext& ctx);
130 void SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx);
114 void SetScreenShotImageOrientation(Kernel::HLERequestContext& ctx); 131 void SetScreenShotImageOrientation(Kernel::HLERequestContext& ctx);
115 void CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx); 132 void CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx);
116 void SetScreenShotPermission(Kernel::HLERequestContext& ctx);
117 void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx); 133 void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx);
118 void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); 134 void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx);
119 void GetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); 135 void GetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx);
@@ -121,6 +137,7 @@ private:
121 std::shared_ptr<NVFlinger::NVFlinger> nvflinger; 137 std::shared_ptr<NVFlinger::NVFlinger> nvflinger;
122 Kernel::EventPair launchable_event; 138 Kernel::EventPair launchable_event;
123 u32 idle_time_detection_extension = 0; 139 u32 idle_time_detection_extension = 0;
140 u64 num_fatal_sections_entered = 0;
124}; 141};
125 142
126class ICommonStateGetter final : public ServiceFramework<ICommonStateGetter> { 143class ICommonStateGetter final : public ServiceFramework<ICommonStateGetter> {
diff --git a/src/core/hle/service/audio/audin_u.cpp b/src/core/hle/service/audio/audin_u.cpp
index 088410564..e5daefdde 100644
--- a/src/core/hle/service/audio/audin_u.cpp
+++ b/src/core/hle/service/audio/audin_u.cpp
@@ -2,9 +2,6 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/logging/log.h"
6#include "core/hle/ipc_helpers.h"
7#include "core/hle/kernel/hle_ipc.h"
8#include "core/hle/service/audio/audin_u.h" 5#include "core/hle/service/audio/audin_u.h"
9 6
10namespace Service::Audio { 7namespace Service::Audio {
@@ -33,7 +30,6 @@ public:
33 30
34 RegisterHandlers(functions); 31 RegisterHandlers(functions);
35 } 32 }
36 ~IAudioIn() = default;
37}; 33};
38 34
39AudInU::AudInU() : ServiceFramework("audin:u") { 35AudInU::AudInU() : ServiceFramework("audin:u") {
diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp
index 21f5e64c7..39acb7b23 100644
--- a/src/core/hle/service/audio/audout_u.cpp
+++ b/src/core/hle/service/audio/audout_u.cpp
@@ -150,7 +150,6 @@ private:
150 void GetReleasedAudioOutBufferImpl(Kernel::HLERequestContext& ctx) { 150 void GetReleasedAudioOutBufferImpl(Kernel::HLERequestContext& ctx) {
151 LOG_DEBUG(Service_Audio, "called {}", ctx.Description()); 151 LOG_DEBUG(Service_Audio, "called {}", ctx.Description());
152 152
153 IPC::RequestParser rp{ctx};
154 const u64 max_count{ctx.GetWriteBufferSize() / sizeof(u64)}; 153 const u64 max_count{ctx.GetWriteBufferSize() / sizeof(u64)};
155 const auto released_buffers{audio_core.GetTagsAndReleaseBuffers(stream, max_count)}; 154 const auto released_buffers{audio_core.GetTagsAndReleaseBuffers(stream, max_count)};
156 155
@@ -194,12 +193,9 @@ private:
194void AudOutU::ListAudioOutsImpl(Kernel::HLERequestContext& ctx) { 193void AudOutU::ListAudioOutsImpl(Kernel::HLERequestContext& ctx) {
195 LOG_DEBUG(Service_Audio, "called"); 194 LOG_DEBUG(Service_Audio, "called");
196 195
197 IPC::RequestParser rp{ctx};
198
199 ctx.WriteBuffer(DefaultDevice); 196 ctx.WriteBuffer(DefaultDevice);
200 197
201 IPC::ResponseBuilder rb{ctx, 3}; 198 IPC::ResponseBuilder rb{ctx, 3};
202
203 rb.Push(RESULT_SUCCESS); 199 rb.Push(RESULT_SUCCESS);
204 rb.Push<u32>(1); // Amount of audio devices 200 rb.Push<u32>(1); // Amount of audio devices
205} 201}
diff --git a/src/core/hle/service/audio/audrec_u.cpp b/src/core/hle/service/audio/audrec_u.cpp
index 6956a2e64..1a5aed9ed 100644
--- a/src/core/hle/service/audio/audrec_u.cpp
+++ b/src/core/hle/service/audio/audrec_u.cpp
@@ -2,9 +2,6 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/logging/log.h"
6#include "core/hle/ipc_helpers.h"
7#include "core/hle/kernel/hle_ipc.h"
8#include "core/hle/service/audio/audrec_u.h" 5#include "core/hle/service/audio/audrec_u.h"
9 6
10namespace Service::Audio { 7namespace Service::Audio {
@@ -30,7 +27,6 @@ public:
30 27
31 RegisterHandlers(functions); 28 RegisterHandlers(functions);
32 } 29 }
33 ~IFinalOutputRecorder() = default;
34}; 30};
35 31
36AudRecU::AudRecU() : ServiceFramework("audrec:u") { 32AudRecU::AudRecU() : ServiceFramework("audrec:u") {
diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp
index c9de10a24..1dde6edb7 100644
--- a/src/core/hle/service/audio/audren_u.cpp
+++ b/src/core/hle/service/audio/audren_u.cpp
@@ -10,6 +10,7 @@
10#include "common/alignment.h" 10#include "common/alignment.h"
11#include "common/common_funcs.h" 11#include "common/common_funcs.h"
12#include "common/logging/log.h" 12#include "common/logging/log.h"
13#include "common/string_util.h"
13#include "core/core.h" 14#include "core/core.h"
14#include "core/hle/ipc_helpers.h" 15#include "core/hle/ipc_helpers.h"
15#include "core/hle/kernel/hle_ipc.h" 16#include "core/hle/kernel/hle_ipc.h"
@@ -184,7 +185,6 @@ public:
184private: 185private:
185 void ListAudioDeviceName(Kernel::HLERequestContext& ctx) { 186 void ListAudioDeviceName(Kernel::HLERequestContext& ctx) {
186 LOG_WARNING(Service_Audio, "(STUBBED) called"); 187 LOG_WARNING(Service_Audio, "(STUBBED) called");
187 IPC::RequestParser rp{ctx};
188 188
189 constexpr std::array<char, 15> audio_interface{{"AudioInterface"}}; 189 constexpr std::array<char, 15> audio_interface{{"AudioInterface"}};
190 ctx.WriteBuffer(audio_interface); 190 ctx.WriteBuffer(audio_interface);
@@ -195,13 +195,13 @@ private:
195 } 195 }
196 196
197 void SetAudioDeviceOutputVolume(Kernel::HLERequestContext& ctx) { 197 void SetAudioDeviceOutputVolume(Kernel::HLERequestContext& ctx) {
198 LOG_WARNING(Service_Audio, "(STUBBED) called");
199
200 IPC::RequestParser rp{ctx}; 198 IPC::RequestParser rp{ctx};
201 f32 volume = static_cast<f32>(rp.Pop<u32>()); 199 const f32 volume = rp.Pop<f32>();
202 200
203 auto file_buffer = ctx.ReadBuffer(); 201 const auto device_name_buffer = ctx.ReadBuffer();
204 auto end = std::find(file_buffer.begin(), file_buffer.end(), '\0'); 202 const std::string name = Common::StringFromBuffer(device_name_buffer);
203
204 LOG_WARNING(Service_Audio, "(STUBBED) called. name={}, volume={}", name, volume);
205 205
206 IPC::ResponseBuilder rb{ctx, 2}; 206 IPC::ResponseBuilder rb{ctx, 2};
207 rb.Push(RESULT_SUCCESS); 207 rb.Push(RESULT_SUCCESS);
@@ -209,7 +209,6 @@ private:
209 209
210 void GetActiveAudioDeviceName(Kernel::HLERequestContext& ctx) { 210 void GetActiveAudioDeviceName(Kernel::HLERequestContext& ctx) {
211 LOG_WARNING(Service_Audio, "(STUBBED) called"); 211 LOG_WARNING(Service_Audio, "(STUBBED) called");
212 IPC::RequestParser rp{ctx};
213 212
214 constexpr std::array<char, 12> audio_interface{{"AudioDevice"}}; 213 constexpr std::array<char, 12> audio_interface{{"AudioDevice"}};
215 ctx.WriteBuffer(audio_interface); 214 ctx.WriteBuffer(audio_interface);
diff --git a/src/core/hle/service/audio/hwopus.cpp b/src/core/hle/service/audio/hwopus.cpp
index 377e12cfa..cb4a1160d 100644
--- a/src/core/hle/service/audio/hwopus.cpp
+++ b/src/core/hle/service/audio/hwopus.cpp
@@ -8,6 +8,7 @@
8#include <vector> 8#include <vector>
9 9
10#include <opus.h> 10#include <opus.h>
11#include <opus_multistream.h>
11 12
12#include "common/assert.h" 13#include "common/assert.h"
13#include "common/logging/log.h" 14#include "common/logging/log.h"
@@ -18,12 +19,12 @@
18namespace Service::Audio { 19namespace Service::Audio {
19namespace { 20namespace {
20struct OpusDeleter { 21struct OpusDeleter {
21 void operator()(void* ptr) const { 22 void operator()(OpusMSDecoder* ptr) const {
22 operator delete(ptr); 23 opus_multistream_decoder_destroy(ptr);
23 } 24 }
24}; 25};
25 26
26using OpusDecoderPtr = std::unique_ptr<OpusDecoder, OpusDeleter>; 27using OpusDecoderPtr = std::unique_ptr<OpusMSDecoder, OpusDeleter>;
27 28
28struct OpusPacketHeader { 29struct OpusPacketHeader {
29 // Packet size in bytes. 30 // Packet size in bytes.
@@ -33,7 +34,7 @@ struct OpusPacketHeader {
33}; 34};
34static_assert(sizeof(OpusPacketHeader) == 0x8, "OpusHeader is an invalid size"); 35static_assert(sizeof(OpusPacketHeader) == 0x8, "OpusHeader is an invalid size");
35 36
36class OpusDecoderStateBase { 37class OpusDecoderState {
37public: 38public:
38 /// Describes extra behavior that may be asked of the decoding context. 39 /// Describes extra behavior that may be asked of the decoding context.
39 enum class ExtraBehavior { 40 enum class ExtraBehavior {
@@ -49,22 +50,13 @@ public:
49 Enabled, 50 Enabled,
50 }; 51 };
51 52
52 virtual ~OpusDecoderStateBase() = default;
53
54 // Decodes interleaved Opus packets. Optionally allows reporting time taken to
55 // perform the decoding, as well as any relevant extra behavior.
56 virtual void DecodeInterleaved(Kernel::HLERequestContext& ctx, PerfTime perf_time,
57 ExtraBehavior extra_behavior) = 0;
58};
59
60// Represents the decoder state for a non-multistream decoder.
61class OpusDecoderState final : public OpusDecoderStateBase {
62public:
63 explicit OpusDecoderState(OpusDecoderPtr decoder, u32 sample_rate, u32 channel_count) 53 explicit OpusDecoderState(OpusDecoderPtr decoder, u32 sample_rate, u32 channel_count)
64 : decoder{std::move(decoder)}, sample_rate{sample_rate}, channel_count{channel_count} {} 54 : decoder{std::move(decoder)}, sample_rate{sample_rate}, channel_count{channel_count} {}
65 55
56 // Decodes interleaved Opus packets. Optionally allows reporting time taken to
57 // perform the decoding, as well as any relevant extra behavior.
66 void DecodeInterleaved(Kernel::HLERequestContext& ctx, PerfTime perf_time, 58 void DecodeInterleaved(Kernel::HLERequestContext& ctx, PerfTime perf_time,
67 ExtraBehavior extra_behavior) override { 59 ExtraBehavior extra_behavior) {
68 if (perf_time == PerfTime::Disabled) { 60 if (perf_time == PerfTime::Disabled) {
69 DecodeInterleavedHelper(ctx, nullptr, extra_behavior); 61 DecodeInterleavedHelper(ctx, nullptr, extra_behavior);
70 } else { 62 } else {
@@ -135,7 +127,7 @@ private:
135 127
136 const int frame_size = (static_cast<int>(raw_output_sz / sizeof(s16) / channel_count)); 128 const int frame_size = (static_cast<int>(raw_output_sz / sizeof(s16) / channel_count));
137 const auto out_sample_count = 129 const auto out_sample_count =
138 opus_decode(decoder.get(), frame, hdr.size, output.data(), frame_size, 0); 130 opus_multistream_decode(decoder.get(), frame, hdr.size, output.data(), frame_size, 0);
139 if (out_sample_count < 0) { 131 if (out_sample_count < 0) {
140 LOG_ERROR(Audio, 132 LOG_ERROR(Audio,
141 "Incorrect sample count received from opus_decode, " 133 "Incorrect sample count received from opus_decode, "
@@ -158,7 +150,7 @@ private:
158 void ResetDecoderContext() { 150 void ResetDecoderContext() {
159 ASSERT(decoder != nullptr); 151 ASSERT(decoder != nullptr);
160 152
161 opus_decoder_ctl(decoder.get(), OPUS_RESET_STATE); 153 opus_multistream_decoder_ctl(decoder.get(), OPUS_RESET_STATE);
162 } 154 }
163 155
164 OpusDecoderPtr decoder; 156 OpusDecoderPtr decoder;
@@ -168,7 +160,7 @@ private:
168 160
169class IHardwareOpusDecoderManager final : public ServiceFramework<IHardwareOpusDecoderManager> { 161class IHardwareOpusDecoderManager final : public ServiceFramework<IHardwareOpusDecoderManager> {
170public: 162public:
171 explicit IHardwareOpusDecoderManager(std::unique_ptr<OpusDecoderStateBase> decoder_state) 163 explicit IHardwareOpusDecoderManager(OpusDecoderState decoder_state)
172 : ServiceFramework("IHardwareOpusDecoderManager"), decoder_state{std::move(decoder_state)} { 164 : ServiceFramework("IHardwareOpusDecoderManager"), decoder_state{std::move(decoder_state)} {
173 // clang-format off 165 // clang-format off
174 static const FunctionInfo functions[] = { 166 static const FunctionInfo functions[] = {
@@ -190,35 +182,51 @@ private:
190 void DecodeInterleavedOld(Kernel::HLERequestContext& ctx) { 182 void DecodeInterleavedOld(Kernel::HLERequestContext& ctx) {
191 LOG_DEBUG(Audio, "called"); 183 LOG_DEBUG(Audio, "called");
192 184
193 decoder_state->DecodeInterleaved(ctx, OpusDecoderStateBase::PerfTime::Disabled, 185 decoder_state.DecodeInterleaved(ctx, OpusDecoderState::PerfTime::Disabled,
194 OpusDecoderStateBase::ExtraBehavior::None); 186 OpusDecoderState::ExtraBehavior::None);
195 } 187 }
196 188
197 void DecodeInterleavedWithPerfOld(Kernel::HLERequestContext& ctx) { 189 void DecodeInterleavedWithPerfOld(Kernel::HLERequestContext& ctx) {
198 LOG_DEBUG(Audio, "called"); 190 LOG_DEBUG(Audio, "called");
199 191
200 decoder_state->DecodeInterleaved(ctx, OpusDecoderStateBase::PerfTime::Enabled, 192 decoder_state.DecodeInterleaved(ctx, OpusDecoderState::PerfTime::Enabled,
201 OpusDecoderStateBase::ExtraBehavior::None); 193 OpusDecoderState::ExtraBehavior::None);
202 } 194 }
203 195
204 void DecodeInterleaved(Kernel::HLERequestContext& ctx) { 196 void DecodeInterleaved(Kernel::HLERequestContext& ctx) {
205 LOG_DEBUG(Audio, "called"); 197 LOG_DEBUG(Audio, "called");
206 198
207 IPC::RequestParser rp{ctx}; 199 IPC::RequestParser rp{ctx};
208 const auto extra_behavior = rp.Pop<bool>() 200 const auto extra_behavior = rp.Pop<bool>() ? OpusDecoderState::ExtraBehavior::ResetContext
209 ? OpusDecoderStateBase::ExtraBehavior::ResetContext 201 : OpusDecoderState::ExtraBehavior::None;
210 : OpusDecoderStateBase::ExtraBehavior::None;
211 202
212 decoder_state->DecodeInterleaved(ctx, OpusDecoderStateBase::PerfTime::Enabled, 203 decoder_state.DecodeInterleaved(ctx, OpusDecoderState::PerfTime::Enabled, extra_behavior);
213 extra_behavior);
214 } 204 }
215 205
216 std::unique_ptr<OpusDecoderStateBase> decoder_state; 206 OpusDecoderState decoder_state;
217}; 207};
218 208
219std::size_t WorkerBufferSize(u32 channel_count) { 209std::size_t WorkerBufferSize(u32 channel_count) {
220 ASSERT_MSG(channel_count == 1 || channel_count == 2, "Invalid channel count"); 210 ASSERT_MSG(channel_count == 1 || channel_count == 2, "Invalid channel count");
221 return opus_decoder_get_size(static_cast<int>(channel_count)); 211 constexpr int num_streams = 1;
212 const int num_stereo_streams = channel_count == 2 ? 1 : 0;
213 return opus_multistream_decoder_get_size(num_streams, num_stereo_streams);
214}
215
216// Creates the mapping table that maps the input channels to the particular
217// output channels. In the stereo case, we map the left and right input channels
218// to the left and right output channels respectively.
219//
220// However, in the monophonic case, we only map the one available channel
221// to the sole output channel. We specify 255 for the would-be right channel
222// as this is a special value defined by Opus to indicate to the decoder to
223// ignore that channel.
224std::array<u8, 2> CreateMappingTable(u32 channel_count) {
225 if (channel_count == 2) {
226 return {{0, 1}};
227 }
228
229 return {{0, 255}};
222} 230}
223} // Anonymous namespace 231} // Anonymous namespace
224 232
@@ -259,9 +267,15 @@ void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) {
259 const std::size_t worker_sz = WorkerBufferSize(channel_count); 267 const std::size_t worker_sz = WorkerBufferSize(channel_count);
260 ASSERT_MSG(buffer_sz >= worker_sz, "Worker buffer too large"); 268 ASSERT_MSG(buffer_sz >= worker_sz, "Worker buffer too large");
261 269
262 OpusDecoderPtr decoder{static_cast<OpusDecoder*>(operator new(worker_sz))}; 270 const int num_stereo_streams = channel_count == 2 ? 1 : 0;
263 if (const int err = opus_decoder_init(decoder.get(), sample_rate, channel_count)) { 271 const auto mapping_table = CreateMappingTable(channel_count);
264 LOG_ERROR(Audio, "Failed to init opus decoder with error={}", err); 272
273 int error = 0;
274 OpusDecoderPtr decoder{
275 opus_multistream_decoder_create(sample_rate, static_cast<int>(channel_count), 1,
276 num_stereo_streams, mapping_table.data(), &error)};
277 if (error != OPUS_OK || decoder == nullptr) {
278 LOG_ERROR(Audio, "Failed to create Opus decoder (error={}).", error);
265 IPC::ResponseBuilder rb{ctx, 2}; 279 IPC::ResponseBuilder rb{ctx, 2};
266 // TODO(ogniK): Use correct error code 280 // TODO(ogniK): Use correct error code
267 rb.Push(ResultCode(-1)); 281 rb.Push(ResultCode(-1));
@@ -271,7 +285,7 @@ void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) {
271 IPC::ResponseBuilder rb{ctx, 2, 0, 1}; 285 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
272 rb.Push(RESULT_SUCCESS); 286 rb.Push(RESULT_SUCCESS);
273 rb.PushIpcInterface<IHardwareOpusDecoderManager>( 287 rb.PushIpcInterface<IHardwareOpusDecoderManager>(
274 std::make_unique<OpusDecoderState>(std::move(decoder), sample_rate, channel_count)); 288 OpusDecoderState{std::move(decoder), sample_rate, channel_count});
275} 289}
276 290
277HwOpus::HwOpus() : ServiceFramework("hwopus") { 291HwOpus::HwOpus() : ServiceFramework("hwopus") {
diff --git a/src/core/hle/service/fatal/fatal.cpp b/src/core/hle/service/fatal/fatal.cpp
index 770590d0b..2c229bcad 100644
--- a/src/core/hle/service/fatal/fatal.cpp
+++ b/src/core/hle/service/fatal/fatal.cpp
@@ -25,21 +25,34 @@ Module::Interface::Interface(std::shared_ptr<Module> module, const char* name)
25Module::Interface::~Interface() = default; 25Module::Interface::~Interface() = default;
26 26
27struct FatalInfo { 27struct FatalInfo {
28 std::array<u64_le, 31> registers{}; // TODO(ogniK): See if this actually is registers or 28 enum class Architecture : s32 {
29 // not(find a game which has non zero valeus) 29 AArch64,
30 u64_le unk0{}; 30 AArch32,
31 u64_le unk1{}; 31 };
32 u64_le unk2{}; 32
33 u64_le unk3{}; 33 const char* ArchAsString() const {
34 u64_le unk4{}; 34 return arch == Architecture::AArch64 ? "AArch64" : "AArch32";
35 u64_le unk5{}; 35 }
36 u64_le unk6{}; 36
37 std::array<u64_le, 31> registers{};
38 u64_le sp{};
39 u64_le pc{};
40 u64_le pstate{};
41 u64_le afsr0{};
42 u64_le afsr1{};
43 u64_le esr{};
44 u64_le far{};
37 45
38 std::array<u64_le, 32> backtrace{}; 46 std::array<u64_le, 32> backtrace{};
39 u64_le unk7{}; 47 u64_le program_entry_point{};
40 u64_le unk8{}; 48
49 // Bit flags that indicate which registers have been set with values
50 // for this context. The service itself uses these to determine which
51 // registers to specifically print out.
52 u64_le set_flags{};
53
41 u32_le backtrace_size{}; 54 u32_le backtrace_size{};
42 u32_le unk9{}; 55 Architecture arch{};
43 u32_le unk10{}; // TODO(ogniK): Is this even used or is it just padding? 56 u32_le unk10{}; // TODO(ogniK): Is this even used or is it just padding?
44}; 57};
45static_assert(sizeof(FatalInfo) == 0x250, "FatalInfo is an invalid size"); 58static_assert(sizeof(FatalInfo) == 0x250, "FatalInfo is an invalid size");
@@ -52,36 +65,36 @@ enum class FatalType : u32 {
52 65
53static void GenerateErrorReport(ResultCode error_code, const FatalInfo& info) { 66static void GenerateErrorReport(ResultCode error_code, const FatalInfo& info) {
54 const auto title_id = Core::CurrentProcess()->GetTitleID(); 67 const auto title_id = Core::CurrentProcess()->GetTitleID();
55 std::string crash_report = 68 std::string crash_report = fmt::format(
56 fmt::format("Yuzu {}-{} crash report\n" 69 "Yuzu {}-{} crash report\n"
57 "Title ID: {:016x}\n" 70 "Title ID: {:016x}\n"
58 "Result: 0x{:X} ({:04}-{:04d})\n" 71 "Result: 0x{:X} ({:04}-{:04d})\n"
59 "\n", 72 "Set flags: 0x{:16X}\n"
60 Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw, 73 "Program entry point: 0x{:16X}\n"
61 2000 + static_cast<u32>(error_code.module.Value()), 74 "\n",
62 static_cast<u32>(error_code.description.Value()), info.unk8, info.unk7); 75 Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw,
76 2000 + static_cast<u32>(error_code.module.Value()),
77 static_cast<u32>(error_code.description.Value()), info.set_flags, info.program_entry_point);
63 if (info.backtrace_size != 0x0) { 78 if (info.backtrace_size != 0x0) {
64 crash_report += "Registers:\n"; 79 crash_report += "Registers:\n";
65 // TODO(ogniK): This is just a guess, find a game which actually has non zero values
66 for (size_t i = 0; i < info.registers.size(); i++) { 80 for (size_t i = 0; i < info.registers.size(); i++) {
67 crash_report += 81 crash_report +=
68 fmt::format(" X[{:02d}]: {:016x}\n", i, info.registers[i]); 82 fmt::format(" X[{:02d}]: {:016x}\n", i, info.registers[i]);
69 } 83 }
70 crash_report += fmt::format(" Unknown 0: {:016x}\n", info.unk0); 84 crash_report += fmt::format(" SP: {:016x}\n", info.sp);
71 crash_report += fmt::format(" Unknown 1: {:016x}\n", info.unk1); 85 crash_report += fmt::format(" PC: {:016x}\n", info.pc);
72 crash_report += fmt::format(" Unknown 2: {:016x}\n", info.unk2); 86 crash_report += fmt::format(" PSTATE: {:016x}\n", info.pstate);
73 crash_report += fmt::format(" Unknown 3: {:016x}\n", info.unk3); 87 crash_report += fmt::format(" AFSR0: {:016x}\n", info.afsr0);
74 crash_report += fmt::format(" Unknown 4: {:016x}\n", info.unk4); 88 crash_report += fmt::format(" AFSR1: {:016x}\n", info.afsr1);
75 crash_report += fmt::format(" Unknown 5: {:016x}\n", info.unk5); 89 crash_report += fmt::format(" ESR: {:016x}\n", info.esr);
76 crash_report += fmt::format(" Unknown 6: {:016x}\n", info.unk6); 90 crash_report += fmt::format(" FAR: {:016x}\n", info.far);
77 crash_report += "\nBacktrace:\n"; 91 crash_report += "\nBacktrace:\n";
78 for (size_t i = 0; i < info.backtrace_size; i++) { 92 for (size_t i = 0; i < info.backtrace_size; i++) {
79 crash_report += 93 crash_report +=
80 fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]); 94 fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]);
81 } 95 }
82 crash_report += fmt::format("\nUnknown 7: 0x{:016x}\n", info.unk7); 96
83 crash_report += fmt::format("Unknown 8: 0x{:016x}\n", info.unk8); 97 crash_report += fmt::format("Architecture: {}\n", info.ArchAsString());
84 crash_report += fmt::format("Unknown 9: 0x{:016x}\n", info.unk9);
85 crash_report += fmt::format("Unknown 10: 0x{:016x}\n", info.unk10); 98 crash_report += fmt::format("Unknown 10: 0x{:016x}\n", info.unk10);
86 } 99 }
87 100
@@ -125,13 +138,13 @@ static void ThrowFatalError(ResultCode error_code, FatalType fatal_type, const F
125 case FatalType::ErrorReport: 138 case FatalType::ErrorReport:
126 GenerateErrorReport(error_code, info); 139 GenerateErrorReport(error_code, info);
127 break; 140 break;
128 }; 141 }
129} 142}
130 143
131void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) { 144void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) {
132 LOG_ERROR(Service_Fatal, "called"); 145 LOG_ERROR(Service_Fatal, "called");
133 IPC::RequestParser rp{ctx}; 146 IPC::RequestParser rp{ctx};
134 auto error_code = rp.Pop<ResultCode>(); 147 const auto error_code = rp.Pop<ResultCode>();
135 148
136 ThrowFatalError(error_code, FatalType::ErrorScreen, {}); 149 ThrowFatalError(error_code, FatalType::ErrorScreen, {});
137 IPC::ResponseBuilder rb{ctx, 2}; 150 IPC::ResponseBuilder rb{ctx, 2};
@@ -141,8 +154,8 @@ void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) {
141void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) { 154void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) {
142 LOG_ERROR(Service_Fatal, "called"); 155 LOG_ERROR(Service_Fatal, "called");
143 IPC::RequestParser rp(ctx); 156 IPC::RequestParser rp(ctx);
144 auto error_code = rp.Pop<ResultCode>(); 157 const auto error_code = rp.Pop<ResultCode>();
145 auto fatal_type = rp.PopEnum<FatalType>(); 158 const auto fatal_type = rp.PopEnum<FatalType>();
146 159
147 ThrowFatalError(error_code, fatal_type, {}); // No info is passed with ThrowFatalWithPolicy 160 ThrowFatalError(error_code, fatal_type, {}); // No info is passed with ThrowFatalWithPolicy
148 IPC::ResponseBuilder rb{ctx, 2}; 161 IPC::ResponseBuilder rb{ctx, 2};
@@ -152,9 +165,9 @@ void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) {
152void Module::Interface::ThrowFatalWithCpuContext(Kernel::HLERequestContext& ctx) { 165void Module::Interface::ThrowFatalWithCpuContext(Kernel::HLERequestContext& ctx) {
153 LOG_ERROR(Service_Fatal, "called"); 166 LOG_ERROR(Service_Fatal, "called");
154 IPC::RequestParser rp(ctx); 167 IPC::RequestParser rp(ctx);
155 auto error_code = rp.Pop<ResultCode>(); 168 const auto error_code = rp.Pop<ResultCode>();
156 auto fatal_type = rp.PopEnum<FatalType>(); 169 const auto fatal_type = rp.PopEnum<FatalType>();
157 auto fatal_info = ctx.ReadBuffer(); 170 const auto fatal_info = ctx.ReadBuffer();
158 FatalInfo info{}; 171 FatalInfo info{};
159 172
160 ASSERT_MSG(fatal_info.size() == sizeof(FatalInfo), "Invalid fatal info buffer size!"); 173 ASSERT_MSG(fatal_info.size() == sizeof(FatalInfo), "Invalid fatal info buffer size!");
diff --git a/src/core/hle/service/filesystem/filesystem.cpp b/src/core/hle/service/filesystem/filesystem.cpp
index c6da2df43..4c2b371c3 100644
--- a/src/core/hle/service/filesystem/filesystem.cpp
+++ b/src/core/hle/service/filesystem/filesystem.cpp
@@ -197,13 +197,16 @@ ResultCode VfsDirectoryServiceWrapper::RenameDirectory(const std::string& src_pa
197 197
198ResultVal<FileSys::VirtualFile> VfsDirectoryServiceWrapper::OpenFile(const std::string& path_, 198ResultVal<FileSys::VirtualFile> VfsDirectoryServiceWrapper::OpenFile(const std::string& path_,
199 FileSys::Mode mode) const { 199 FileSys::Mode mode) const {
200 std::string path(FileUtil::SanitizePath(path_)); 200 const std::string path(FileUtil::SanitizePath(path_));
201 auto npath = path; 201 std::string_view npath = path;
202 while (npath.size() > 0 && (npath[0] == '/' || npath[0] == '\\')) 202 while (!npath.empty() && (npath[0] == '/' || npath[0] == '\\')) {
203 npath = npath.substr(1); 203 npath.remove_prefix(1);
204 }
205
204 auto file = backing->GetFileRelative(npath); 206 auto file = backing->GetFileRelative(npath);
205 if (file == nullptr) 207 if (file == nullptr) {
206 return FileSys::ERROR_PATH_NOT_FOUND; 208 return FileSys::ERROR_PATH_NOT_FOUND;
209 }
207 210
208 if (mode == FileSys::Mode::Append) { 211 if (mode == FileSys::Mode::Append) {
209 return MakeResult<FileSys::VirtualFile>( 212 return MakeResult<FileSys::VirtualFile>(
@@ -319,15 +322,15 @@ ResultVal<FileSys::VirtualFile> OpenRomFS(u64 title_id, FileSys::StorageId stora
319} 322}
320 323
321ResultVal<FileSys::VirtualDir> OpenSaveData(FileSys::SaveDataSpaceId space, 324ResultVal<FileSys::VirtualDir> OpenSaveData(FileSys::SaveDataSpaceId space,
322 FileSys::SaveDataDescriptor save_struct) { 325 const FileSys::SaveDataDescriptor& descriptor) {
323 LOG_TRACE(Service_FS, "Opening Save Data for space_id={:01X}, save_struct={}", 326 LOG_TRACE(Service_FS, "Opening Save Data for space_id={:01X}, save_struct={}",
324 static_cast<u8>(space), save_struct.DebugInfo()); 327 static_cast<u8>(space), descriptor.DebugInfo());
325 328
326 if (save_data_factory == nullptr) { 329 if (save_data_factory == nullptr) {
327 return FileSys::ERROR_ENTITY_NOT_FOUND; 330 return FileSys::ERROR_ENTITY_NOT_FOUND;
328 } 331 }
329 332
330 return save_data_factory->Open(space, save_struct); 333 return save_data_factory->Open(space, descriptor);
331} 334}
332 335
333ResultVal<FileSys::VirtualDir> OpenSaveDataSpace(FileSys::SaveDataSpaceId space) { 336ResultVal<FileSys::VirtualDir> OpenSaveDataSpace(FileSys::SaveDataSpaceId space) {
diff --git a/src/core/hle/service/filesystem/filesystem.h b/src/core/hle/service/filesystem/filesystem.h
index 6fd5e7b23..7cfc0d902 100644
--- a/src/core/hle/service/filesystem/filesystem.h
+++ b/src/core/hle/service/filesystem/filesystem.h
@@ -46,7 +46,7 @@ ResultVal<FileSys::VirtualFile> OpenRomFSCurrentProcess();
46ResultVal<FileSys::VirtualFile> OpenRomFS(u64 title_id, FileSys::StorageId storage_id, 46ResultVal<FileSys::VirtualFile> OpenRomFS(u64 title_id, FileSys::StorageId storage_id,
47 FileSys::ContentRecordType type); 47 FileSys::ContentRecordType type);
48ResultVal<FileSys::VirtualDir> OpenSaveData(FileSys::SaveDataSpaceId space, 48ResultVal<FileSys::VirtualDir> OpenSaveData(FileSys::SaveDataSpaceId space,
49 FileSys::SaveDataDescriptor save_struct); 49 const FileSys::SaveDataDescriptor& descriptor);
50ResultVal<FileSys::VirtualDir> OpenSaveDataSpace(FileSys::SaveDataSpaceId space); 50ResultVal<FileSys::VirtualDir> OpenSaveDataSpace(FileSys::SaveDataSpaceId space);
51ResultVal<FileSys::VirtualDir> OpenSDMC(); 51ResultVal<FileSys::VirtualDir> OpenSDMC();
52 52
diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp
index 54959edd8..657baddb8 100644
--- a/src/core/hle/service/filesystem/fsp_srv.cpp
+++ b/src/core/hle/service/filesystem/fsp_srv.cpp
@@ -315,61 +315,53 @@ public:
315 void CreateFile(Kernel::HLERequestContext& ctx) { 315 void CreateFile(Kernel::HLERequestContext& ctx) {
316 IPC::RequestParser rp{ctx}; 316 IPC::RequestParser rp{ctx};
317 317
318 auto file_buffer = ctx.ReadBuffer(); 318 const auto file_buffer = ctx.ReadBuffer();
319 std::string name = Common::StringFromBuffer(file_buffer); 319 const std::string name = Common::StringFromBuffer(file_buffer);
320 320
321 u64 mode = rp.Pop<u64>(); 321 const u64 mode = rp.Pop<u64>();
322 u32 size = rp.Pop<u32>(); 322 const u32 size = rp.Pop<u32>();
323 323
324 LOG_DEBUG(Service_FS, "called file {} mode 0x{:X} size 0x{:08X}", name, mode, size); 324 LOG_DEBUG(Service_FS, "called. file={}, mode=0x{:X}, size=0x{:08X}", name, mode, size);
325 325
326 IPC::ResponseBuilder rb{ctx, 2}; 326 IPC::ResponseBuilder rb{ctx, 2};
327 rb.Push(backend.CreateFile(name, size)); 327 rb.Push(backend.CreateFile(name, size));
328 } 328 }
329 329
330 void DeleteFile(Kernel::HLERequestContext& ctx) { 330 void DeleteFile(Kernel::HLERequestContext& ctx) {
331 IPC::RequestParser rp{ctx}; 331 const auto file_buffer = ctx.ReadBuffer();
332 332 const std::string name = Common::StringFromBuffer(file_buffer);
333 auto file_buffer = ctx.ReadBuffer();
334 std::string name = Common::StringFromBuffer(file_buffer);
335 333
336 LOG_DEBUG(Service_FS, "called file {}", name); 334 LOG_DEBUG(Service_FS, "called. file={}", name);
337 335
338 IPC::ResponseBuilder rb{ctx, 2}; 336 IPC::ResponseBuilder rb{ctx, 2};
339 rb.Push(backend.DeleteFile(name)); 337 rb.Push(backend.DeleteFile(name));
340 } 338 }
341 339
342 void CreateDirectory(Kernel::HLERequestContext& ctx) { 340 void CreateDirectory(Kernel::HLERequestContext& ctx) {
343 IPC::RequestParser rp{ctx}; 341 const auto file_buffer = ctx.ReadBuffer();
344 342 const std::string name = Common::StringFromBuffer(file_buffer);
345 auto file_buffer = ctx.ReadBuffer();
346 std::string name = Common::StringFromBuffer(file_buffer);
347 343
348 LOG_DEBUG(Service_FS, "called directory {}", name); 344 LOG_DEBUG(Service_FS, "called. directory={}", name);
349 345
350 IPC::ResponseBuilder rb{ctx, 2}; 346 IPC::ResponseBuilder rb{ctx, 2};
351 rb.Push(backend.CreateDirectory(name)); 347 rb.Push(backend.CreateDirectory(name));
352 } 348 }
353 349
354 void DeleteDirectory(Kernel::HLERequestContext& ctx) { 350 void DeleteDirectory(Kernel::HLERequestContext& ctx) {
355 const IPC::RequestParser rp{ctx};
356
357 const auto file_buffer = ctx.ReadBuffer(); 351 const auto file_buffer = ctx.ReadBuffer();
358 std::string name = Common::StringFromBuffer(file_buffer); 352 const std::string name = Common::StringFromBuffer(file_buffer);
359 353
360 LOG_DEBUG(Service_FS, "called directory {}", name); 354 LOG_DEBUG(Service_FS, "called. directory={}", name);
361 355
362 IPC::ResponseBuilder rb{ctx, 2}; 356 IPC::ResponseBuilder rb{ctx, 2};
363 rb.Push(backend.DeleteDirectory(name)); 357 rb.Push(backend.DeleteDirectory(name));
364 } 358 }
365 359
366 void DeleteDirectoryRecursively(Kernel::HLERequestContext& ctx) { 360 void DeleteDirectoryRecursively(Kernel::HLERequestContext& ctx) {
367 const IPC::RequestParser rp{ctx};
368
369 const auto file_buffer = ctx.ReadBuffer(); 361 const auto file_buffer = ctx.ReadBuffer();
370 std::string name = Common::StringFromBuffer(file_buffer); 362 const std::string name = Common::StringFromBuffer(file_buffer);
371 363
372 LOG_DEBUG(Service_FS, "called directory {}", name); 364 LOG_DEBUG(Service_FS, "called. directory={}", name);
373 365
374 IPC::ResponseBuilder rb{ctx, 2}; 366 IPC::ResponseBuilder rb{ctx, 2};
375 rb.Push(backend.DeleteDirectoryRecursively(name)); 367 rb.Push(backend.DeleteDirectoryRecursively(name));
@@ -386,18 +378,16 @@ public:
386 } 378 }
387 379
388 void RenameFile(Kernel::HLERequestContext& ctx) { 380 void RenameFile(Kernel::HLERequestContext& ctx) {
389 IPC::RequestParser rp{ctx};
390
391 std::vector<u8> buffer; 381 std::vector<u8> buffer;
392 buffer.resize(ctx.BufferDescriptorX()[0].Size()); 382 buffer.resize(ctx.BufferDescriptorX()[0].Size());
393 Memory::ReadBlock(ctx.BufferDescriptorX()[0].Address(), buffer.data(), buffer.size()); 383 Memory::ReadBlock(ctx.BufferDescriptorX()[0].Address(), buffer.data(), buffer.size());
394 std::string src_name = Common::StringFromBuffer(buffer); 384 const std::string src_name = Common::StringFromBuffer(buffer);
395 385
396 buffer.resize(ctx.BufferDescriptorX()[1].Size()); 386 buffer.resize(ctx.BufferDescriptorX()[1].Size());
397 Memory::ReadBlock(ctx.BufferDescriptorX()[1].Address(), buffer.data(), buffer.size()); 387 Memory::ReadBlock(ctx.BufferDescriptorX()[1].Address(), buffer.data(), buffer.size());
398 std::string dst_name = Common::StringFromBuffer(buffer); 388 const std::string dst_name = Common::StringFromBuffer(buffer);
399 389
400 LOG_DEBUG(Service_FS, "called file '{}' to file '{}'", src_name, dst_name); 390 LOG_DEBUG(Service_FS, "called. file '{}' to file '{}'", src_name, dst_name);
401 391
402 IPC::ResponseBuilder rb{ctx, 2}; 392 IPC::ResponseBuilder rb{ctx, 2};
403 rb.Push(backend.RenameFile(src_name, dst_name)); 393 rb.Push(backend.RenameFile(src_name, dst_name));
@@ -406,12 +396,12 @@ public:
406 void OpenFile(Kernel::HLERequestContext& ctx) { 396 void OpenFile(Kernel::HLERequestContext& ctx) {
407 IPC::RequestParser rp{ctx}; 397 IPC::RequestParser rp{ctx};
408 398
409 auto file_buffer = ctx.ReadBuffer(); 399 const auto file_buffer = ctx.ReadBuffer();
410 std::string name = Common::StringFromBuffer(file_buffer); 400 const std::string name = Common::StringFromBuffer(file_buffer);
411 401
412 auto mode = static_cast<FileSys::Mode>(rp.Pop<u32>()); 402 const auto mode = static_cast<FileSys::Mode>(rp.Pop<u32>());
413 403
414 LOG_DEBUG(Service_FS, "called file {} mode {}", name, static_cast<u32>(mode)); 404 LOG_DEBUG(Service_FS, "called. file={}, mode={}", name, static_cast<u32>(mode));
415 405
416 auto result = backend.OpenFile(name, mode); 406 auto result = backend.OpenFile(name, mode);
417 if (result.Failed()) { 407 if (result.Failed()) {
@@ -430,13 +420,13 @@ public:
430 void OpenDirectory(Kernel::HLERequestContext& ctx) { 420 void OpenDirectory(Kernel::HLERequestContext& ctx) {
431 IPC::RequestParser rp{ctx}; 421 IPC::RequestParser rp{ctx};
432 422
433 auto file_buffer = ctx.ReadBuffer(); 423 const auto file_buffer = ctx.ReadBuffer();
434 std::string name = Common::StringFromBuffer(file_buffer); 424 const std::string name = Common::StringFromBuffer(file_buffer);
435 425
436 // TODO(Subv): Implement this filter. 426 // TODO(Subv): Implement this filter.
437 u32 filter_flags = rp.Pop<u32>(); 427 const u32 filter_flags = rp.Pop<u32>();
438 428
439 LOG_DEBUG(Service_FS, "called directory {} filter {}", name, filter_flags); 429 LOG_DEBUG(Service_FS, "called. directory={}, filter={}", name, filter_flags);
440 430
441 auto result = backend.OpenDirectory(name); 431 auto result = backend.OpenDirectory(name);
442 if (result.Failed()) { 432 if (result.Failed()) {
@@ -453,12 +443,10 @@ public:
453 } 443 }
454 444
455 void GetEntryType(Kernel::HLERequestContext& ctx) { 445 void GetEntryType(Kernel::HLERequestContext& ctx) {
456 IPC::RequestParser rp{ctx}; 446 const auto file_buffer = ctx.ReadBuffer();
457 447 const std::string name = Common::StringFromBuffer(file_buffer);
458 auto file_buffer = ctx.ReadBuffer();
459 std::string name = Common::StringFromBuffer(file_buffer);
460 448
461 LOG_DEBUG(Service_FS, "called file {}", name); 449 LOG_DEBUG(Service_FS, "called. file={}", name);
462 450
463 auto result = backend.GetEntryType(name); 451 auto result = backend.GetEntryType(name);
464 if (result.Failed()) { 452 if (result.Failed()) {
@@ -616,7 +604,9 @@ private:
616 u64_le save_id; 604 u64_le save_id;
617 u64_le title_id; 605 u64_le title_id;
618 u64_le save_image_size; 606 u64_le save_image_size;
619 INSERT_PADDING_BYTES(0x28); 607 u16_le index;
608 FileSys::SaveDataRank rank;
609 INSERT_PADDING_BYTES(0x25);
620 }; 610 };
621 static_assert(sizeof(SaveDataInfo) == 0x60, "SaveDataInfo has incorrect size."); 611 static_assert(sizeof(SaveDataInfo) == 0x60, "SaveDataInfo has incorrect size.");
622 612
@@ -733,7 +723,10 @@ FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") {
733FSP_SRV::~FSP_SRV() = default; 723FSP_SRV::~FSP_SRV() = default;
734 724
735void FSP_SRV::SetCurrentProcess(Kernel::HLERequestContext& ctx) { 725void FSP_SRV::SetCurrentProcess(Kernel::HLERequestContext& ctx) {
736 LOG_WARNING(Service_FS, "(STUBBED) called"); 726 IPC::RequestParser rp{ctx};
727 current_process_id = rp.Pop<u64>();
728
729 LOG_DEBUG(Service_FS, "called. current_process_id=0x{:016X}", current_process_id);
737 730
738 IPC::ResponseBuilder rb{ctx, 2}; 731 IPC::ResponseBuilder rb{ctx, 2};
739 rb.Push(RESULT_SUCCESS); 732 rb.Push(RESULT_SUCCESS);
@@ -776,16 +769,17 @@ void FSP_SRV::CreateSaveDataFileSystem(Kernel::HLERequestContext& ctx) {
776} 769}
777 770
778void FSP_SRV::OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx) { 771void FSP_SRV::OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx) {
779 IPC::RequestParser rp{ctx}; 772 LOG_INFO(Service_FS, "called.");
780 773
781 auto space_id = rp.PopRaw<FileSys::SaveDataSpaceId>(); 774 struct Parameters {
782 auto unk = rp.Pop<u32>(); 775 FileSys::SaveDataSpaceId save_data_space_id;
783 LOG_INFO(Service_FS, "called with unknown={:08X}", unk); 776 FileSys::SaveDataDescriptor descriptor;
784 777 };
785 auto save_struct = rp.PopRaw<FileSys::SaveDataDescriptor>();
786 778
787 auto dir = OpenSaveData(space_id, save_struct); 779 IPC::RequestParser rp{ctx};
780 const auto parameters = rp.PopRaw<Parameters>();
788 781
782 auto dir = OpenSaveData(parameters.save_data_space_id, parameters.descriptor);
789 if (dir.Failed()) { 783 if (dir.Failed()) {
790 IPC::ResponseBuilder rb{ctx, 2, 0, 0}; 784 IPC::ResponseBuilder rb{ctx, 2, 0, 0};
791 rb.Push(FileSys::ERROR_ENTITY_NOT_FOUND); 785 rb.Push(FileSys::ERROR_ENTITY_NOT_FOUND);
diff --git a/src/core/hle/service/filesystem/fsp_srv.h b/src/core/hle/service/filesystem/fsp_srv.h
index 3a5f4e200..d7572ba7a 100644
--- a/src/core/hle/service/filesystem/fsp_srv.h
+++ b/src/core/hle/service/filesystem/fsp_srv.h
@@ -32,6 +32,7 @@ private:
32 void OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx); 32 void OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx);
33 33
34 FileSys::VirtualFile romfs; 34 FileSys::VirtualFile romfs;
35 u64 current_process_id = 0;
35}; 36};
36 37
37} // namespace Service::FileSystem 38} // namespace Service::FileSystem
diff --git a/src/core/hle/service/hid/controllers/debug_pad.h b/src/core/hle/service/hid/controllers/debug_pad.h
index 929035034..e584b92ec 100644
--- a/src/core/hle/service/hid/controllers/debug_pad.h
+++ b/src/core/hle/service/hid/controllers/debug_pad.h
@@ -41,20 +41,20 @@ private:
41 struct PadState { 41 struct PadState {
42 union { 42 union {
43 u32_le raw{}; 43 u32_le raw{};
44 BitField<0, 1, u32_le> a; 44 BitField<0, 1, u32> a;
45 BitField<1, 1, u32_le> b; 45 BitField<1, 1, u32> b;
46 BitField<2, 1, u32_le> x; 46 BitField<2, 1, u32> x;
47 BitField<3, 1, u32_le> y; 47 BitField<3, 1, u32> y;
48 BitField<4, 1, u32_le> l; 48 BitField<4, 1, u32> l;
49 BitField<5, 1, u32_le> r; 49 BitField<5, 1, u32> r;
50 BitField<6, 1, u32_le> zl; 50 BitField<6, 1, u32> zl;
51 BitField<7, 1, u32_le> zr; 51 BitField<7, 1, u32> zr;
52 BitField<8, 1, u32_le> plus; 52 BitField<8, 1, u32> plus;
53 BitField<9, 1, u32_le> minus; 53 BitField<9, 1, u32> minus;
54 BitField<10, 1, u32_le> d_left; 54 BitField<10, 1, u32> d_left;
55 BitField<11, 1, u32_le> d_up; 55 BitField<11, 1, u32> d_up;
56 BitField<12, 1, u32_le> d_right; 56 BitField<12, 1, u32> d_right;
57 BitField<13, 1, u32_le> d_down; 57 BitField<13, 1, u32> d_down;
58 }; 58 };
59 }; 59 };
60 static_assert(sizeof(PadState) == 0x4, "PadState is an invalid size"); 60 static_assert(sizeof(PadState) == 0x4, "PadState is an invalid size");
@@ -62,7 +62,7 @@ private:
62 struct Attributes { 62 struct Attributes {
63 union { 63 union {
64 u32_le raw{}; 64 u32_le raw{};
65 BitField<0, 1, u32_le> connected; 65 BitField<0, 1, u32> connected;
66 }; 66 };
67 }; 67 };
68 static_assert(sizeof(Attributes) == 0x4, "Attributes is an invalid size"); 68 static_assert(sizeof(Attributes) == 0x4, "Attributes is an invalid size");
diff --git a/src/core/hle/service/hid/controllers/npad.h b/src/core/hle/service/hid/controllers/npad.h
index 18c7a94e6..4ff50b3cd 100644
--- a/src/core/hle/service/hid/controllers/npad.h
+++ b/src/core/hle/service/hid/controllers/npad.h
@@ -39,13 +39,13 @@ public:
39 union { 39 union {
40 u32_le raw{}; 40 u32_le raw{};
41 41
42 BitField<0, 1, u32_le> pro_controller; 42 BitField<0, 1, u32> pro_controller;
43 BitField<1, 1, u32_le> handheld; 43 BitField<1, 1, u32> handheld;
44 BitField<2, 1, u32_le> joycon_dual; 44 BitField<2, 1, u32> joycon_dual;
45 BitField<3, 1, u32_le> joycon_left; 45 BitField<3, 1, u32> joycon_left;
46 BitField<4, 1, u32_le> joycon_right; 46 BitField<4, 1, u32> joycon_right;
47 47
48 BitField<6, 1, u32_le> pokeball; // TODO(ogniK): Confirm when possible 48 BitField<6, 1, u32> pokeball; // TODO(ogniK): Confirm when possible
49 }; 49 };
50 }; 50 };
51 static_assert(sizeof(NPadType) == 4, "NPadType is an invalid size"); 51 static_assert(sizeof(NPadType) == 4, "NPadType is an invalid size");
@@ -150,43 +150,43 @@ private:
150 union { 150 union {
151 u64_le raw{}; 151 u64_le raw{};
152 // Button states 152 // Button states
153 BitField<0, 1, u64_le> a; 153 BitField<0, 1, u64> a;
154 BitField<1, 1, u64_le> b; 154 BitField<1, 1, u64> b;
155 BitField<2, 1, u64_le> x; 155 BitField<2, 1, u64> x;
156 BitField<3, 1, u64_le> y; 156 BitField<3, 1, u64> y;
157 BitField<4, 1, u64_le> l_stick; 157 BitField<4, 1, u64> l_stick;
158 BitField<5, 1, u64_le> r_stick; 158 BitField<5, 1, u64> r_stick;
159 BitField<6, 1, u64_le> l; 159 BitField<6, 1, u64> l;
160 BitField<7, 1, u64_le> r; 160 BitField<7, 1, u64> r;
161 BitField<8, 1, u64_le> zl; 161 BitField<8, 1, u64> zl;
162 BitField<9, 1, u64_le> zr; 162 BitField<9, 1, u64> zr;
163 BitField<10, 1, u64_le> plus; 163 BitField<10, 1, u64> plus;
164 BitField<11, 1, u64_le> minus; 164 BitField<11, 1, u64> minus;
165 165
166 // D-Pad 166 // D-Pad
167 BitField<12, 1, u64_le> d_left; 167 BitField<12, 1, u64> d_left;
168 BitField<13, 1, u64_le> d_up; 168 BitField<13, 1, u64> d_up;
169 BitField<14, 1, u64_le> d_right; 169 BitField<14, 1, u64> d_right;
170 BitField<15, 1, u64_le> d_down; 170 BitField<15, 1, u64> d_down;
171 171
172 // Left JoyStick 172 // Left JoyStick
173 BitField<16, 1, u64_le> l_stick_left; 173 BitField<16, 1, u64> l_stick_left;
174 BitField<17, 1, u64_le> l_stick_up; 174 BitField<17, 1, u64> l_stick_up;
175 BitField<18, 1, u64_le> l_stick_right; 175 BitField<18, 1, u64> l_stick_right;
176 BitField<19, 1, u64_le> l_stick_down; 176 BitField<19, 1, u64> l_stick_down;
177 177
178 // Right JoyStick 178 // Right JoyStick
179 BitField<20, 1, u64_le> r_stick_left; 179 BitField<20, 1, u64> r_stick_left;
180 BitField<21, 1, u64_le> r_stick_up; 180 BitField<21, 1, u64> r_stick_up;
181 BitField<22, 1, u64_le> r_stick_right; 181 BitField<22, 1, u64> r_stick_right;
182 BitField<23, 1, u64_le> r_stick_down; 182 BitField<23, 1, u64> r_stick_down;
183 183
184 // Not always active? 184 // Not always active?
185 BitField<24, 1, u64_le> left_sl; 185 BitField<24, 1, u64> left_sl;
186 BitField<25, 1, u64_le> left_sr; 186 BitField<25, 1, u64> left_sr;
187 187
188 BitField<26, 1, u64_le> right_sl; 188 BitField<26, 1, u64> right_sl;
189 BitField<27, 1, u64_le> right_sr; 189 BitField<27, 1, u64> right_sr;
190 }; 190 };
191 }; 191 };
192 static_assert(sizeof(ControllerPadState) == 8, "ControllerPadState is an invalid size"); 192 static_assert(sizeof(ControllerPadState) == 8, "ControllerPadState is an invalid size");
@@ -200,12 +200,12 @@ private:
200 struct ConnectionState { 200 struct ConnectionState {
201 union { 201 union {
202 u32_le raw{}; 202 u32_le raw{};
203 BitField<0, 1, u32_le> IsConnected; 203 BitField<0, 1, u32> IsConnected;
204 BitField<1, 1, u32_le> IsWired; 204 BitField<1, 1, u32> IsWired;
205 BitField<2, 1, u32_le> IsLeftJoyConnected; 205 BitField<2, 1, u32> IsLeftJoyConnected;
206 BitField<3, 1, u32_le> IsLeftJoyWired; 206 BitField<3, 1, u32> IsLeftJoyWired;
207 BitField<4, 1, u32_le> IsRightJoyConnected; 207 BitField<4, 1, u32> IsRightJoyConnected;
208 BitField<5, 1, u32_le> IsRightJoyWired; 208 BitField<5, 1, u32> IsRightJoyWired;
209 }; 209 };
210 }; 210 };
211 static_assert(sizeof(ConnectionState) == 4, "ConnectionState is an invalid size"); 211 static_assert(sizeof(ConnectionState) == 4, "ConnectionState is an invalid size");
@@ -240,23 +240,23 @@ private:
240 struct NPadProperties { 240 struct NPadProperties {
241 union { 241 union {
242 s64_le raw{}; 242 s64_le raw{};
243 BitField<11, 1, s64_le> is_vertical; 243 BitField<11, 1, s64> is_vertical;
244 BitField<12, 1, s64_le> is_horizontal; 244 BitField<12, 1, s64> is_horizontal;
245 BitField<13, 1, s64_le> use_plus; 245 BitField<13, 1, s64> use_plus;
246 BitField<14, 1, s64_le> use_minus; 246 BitField<14, 1, s64> use_minus;
247 }; 247 };
248 }; 248 };
249 249
250 struct NPadDevice { 250 struct NPadDevice {
251 union { 251 union {
252 u32_le raw{}; 252 u32_le raw{};
253 BitField<0, 1, s32_le> pro_controller; 253 BitField<0, 1, s32> pro_controller;
254 BitField<1, 1, s32_le> handheld; 254 BitField<1, 1, s32> handheld;
255 BitField<2, 1, s32_le> handheld_left; 255 BitField<2, 1, s32> handheld_left;
256 BitField<3, 1, s32_le> handheld_right; 256 BitField<3, 1, s32> handheld_right;
257 BitField<4, 1, s32_le> joycon_left; 257 BitField<4, 1, s32> joycon_left;
258 BitField<5, 1, s32_le> joycon_right; 258 BitField<5, 1, s32> joycon_right;
259 BitField<6, 1, s32_le> pokeball; 259 BitField<6, 1, s32> pokeball;
260 }; 260 };
261 }; 261 };
262 262
diff --git a/src/core/hle/service/hid/controllers/touchscreen.h b/src/core/hle/service/hid/controllers/touchscreen.h
index 012b6e0dd..76fc340e9 100644
--- a/src/core/hle/service/hid/controllers/touchscreen.h
+++ b/src/core/hle/service/hid/controllers/touchscreen.h
@@ -33,8 +33,8 @@ private:
33 struct Attributes { 33 struct Attributes {
34 union { 34 union {
35 u32 raw{}; 35 u32 raw{};
36 BitField<0, 1, u32_le> start_touch; 36 BitField<0, 1, u32> start_touch;
37 BitField<1, 1, u32_le> end_touch; 37 BitField<1, 1, u32> end_touch;
38 }; 38 };
39 }; 39 };
40 static_assert(sizeof(Attributes) == 0x4, "Attributes is an invalid size"); 40 static_assert(sizeof(Attributes) == 0x4, "Attributes is an invalid size");
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 8a6de83a2..63b55758b 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -36,9 +36,9 @@ namespace Service::HID {
36 36
37// Updating period for each HID device. 37// Updating period for each HID device.
38// TODO(ogniK): Find actual polling rate of hid 38// TODO(ogniK): Find actual polling rate of hid
39constexpr u64 pad_update_ticks = Core::Timing::BASE_CLOCK_RATE / 66; 39constexpr s64 pad_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 66);
40constexpr u64 accelerometer_update_ticks = Core::Timing::BASE_CLOCK_RATE / 100; 40constexpr s64 accelerometer_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 100);
41constexpr u64 gyroscope_update_ticks = Core::Timing::BASE_CLOCK_RATE / 100; 41constexpr s64 gyroscope_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 100);
42constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000; 42constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000;
43 43
44IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") { 44IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") {
@@ -75,7 +75,7 @@ IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") {
75 // Register update callbacks 75 // Register update callbacks
76 auto& core_timing = Core::System::GetInstance().CoreTiming(); 76 auto& core_timing = Core::System::GetInstance().CoreTiming();
77 pad_update_event = 77 pad_update_event =
78 core_timing.RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, int cycles_late) { 78 core_timing.RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 cycles_late) {
79 UpdateControllers(userdata, cycles_late); 79 UpdateControllers(userdata, cycles_late);
80 }); 80 });
81 81
@@ -106,7 +106,7 @@ void IAppletResource::GetSharedMemoryHandle(Kernel::HLERequestContext& ctx) {
106 rb.PushCopyObjects(shared_mem); 106 rb.PushCopyObjects(shared_mem);
107} 107}
108 108
109void IAppletResource::UpdateControllers(u64 userdata, int cycles_late) { 109void IAppletResource::UpdateControllers(u64 userdata, s64 cycles_late) {
110 auto& core_timing = Core::System::GetInstance().CoreTiming(); 110 auto& core_timing = Core::System::GetInstance().CoreTiming();
111 111
112 const bool should_reload = Settings::values.is_device_reload_pending.exchange(false); 112 const bool should_reload = Settings::values.is_device_reload_pending.exchange(false);
diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h
index 7cc58db4c..d3660cad2 100644
--- a/src/core/hle/service/hid/hid.h
+++ b/src/core/hle/service/hid/hid.h
@@ -4,6 +4,9 @@
4 4
5#pragma once 5#pragma once
6 6
7#include "core/hle/service/hid/controllers/controller_base.h"
8#include "core/hle/service/service.h"
9
7#include "controllers/controller_base.h" 10#include "controllers/controller_base.h"
8#include "core/hle/service/service.h" 11#include "core/hle/service/service.h"
9 12
@@ -62,7 +65,7 @@ private:
62 } 65 }
63 66
64 void GetSharedMemoryHandle(Kernel::HLERequestContext& ctx); 67 void GetSharedMemoryHandle(Kernel::HLERequestContext& ctx);
65 void UpdateControllers(u64 userdata, int cycles_late); 68 void UpdateControllers(u64 userdata, s64 cycles_late);
66 69
67 Kernel::SharedPtr<Kernel::SharedMemory> shared_mem; 70 Kernel::SharedPtr<Kernel::SharedMemory> shared_mem;
68 71
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index 9df7ac50f..d65693fc7 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -319,15 +319,14 @@ public:
319 } 319 }
320 320
321 ASSERT(vm_manager 321 ASSERT(vm_manager
322 .MirrorMemory(*map_address, nro_addr, nro_size, 322 .MirrorMemory(*map_address, nro_addr, nro_size, Kernel::MemoryState::ModuleCode)
323 Kernel::MemoryState::ModuleCodeStatic)
324 .IsSuccess()); 323 .IsSuccess());
325 ASSERT(vm_manager.UnmapRange(nro_addr, nro_size).IsSuccess()); 324 ASSERT(vm_manager.UnmapRange(nro_addr, nro_size).IsSuccess());
326 325
327 if (bss_size > 0) { 326 if (bss_size > 0) {
328 ASSERT(vm_manager 327 ASSERT(vm_manager
329 .MirrorMemory(*map_address + nro_size, bss_addr, bss_size, 328 .MirrorMemory(*map_address + nro_size, bss_addr, bss_size,
330 Kernel::MemoryState::ModuleCodeStatic) 329 Kernel::MemoryState::ModuleCode)
331 .IsSuccess()); 330 .IsSuccess());
332 ASSERT(vm_manager.UnmapRange(bss_addr, bss_size).IsSuccess()); 331 ASSERT(vm_manager.UnmapRange(bss_addr, bss_size).IsSuccess());
333 } 332 }
@@ -388,8 +387,7 @@ public:
388 const auto& nro_size = iter->second.size; 387 const auto& nro_size = iter->second.size;
389 388
390 ASSERT(vm_manager 389 ASSERT(vm_manager
391 .MirrorMemory(heap_addr, mapped_addr, nro_size, 390 .MirrorMemory(heap_addr, mapped_addr, nro_size, Kernel::MemoryState::ModuleCode)
392 Kernel::MemoryState::ModuleCodeStatic)
393 .IsSuccess()); 391 .IsSuccess());
394 ASSERT(vm_manager.UnmapRange(mapped_addr, nro_size).IsSuccess()); 392 ASSERT(vm_manager.UnmapRange(mapped_addr, nro_size).IsSuccess());
395 393
diff --git a/src/core/hle/service/lm/lm.cpp b/src/core/hle/service/lm/lm.cpp
index 1f462e087..2a61593e2 100644
--- a/src/core/hle/service/lm/lm.cpp
+++ b/src/core/hle/service/lm/lm.cpp
@@ -42,7 +42,7 @@ private:
42 union { 42 union {
43 BitField<0, 16, Flags> flags; 43 BitField<0, 16, Flags> flags;
44 BitField<16, 8, Severity> severity; 44 BitField<16, 8, Severity> severity;
45 BitField<24, 8, u32_le> verbosity; 45 BitField<24, 8, u32> verbosity;
46 }; 46 };
47 u32_le payload_size; 47 u32_le payload_size;
48 48
diff --git a/src/core/hle/service/nfc/nfc.cpp b/src/core/hle/service/nfc/nfc.cpp
index 5c62d42ba..ca88bf97f 100644
--- a/src/core/hle/service/nfc/nfc.cpp
+++ b/src/core/hle/service/nfc/nfc.cpp
@@ -150,7 +150,7 @@ private:
150 150
151 IPC::ResponseBuilder rb{ctx, 3}; 151 IPC::ResponseBuilder rb{ctx, 3};
152 rb.Push(RESULT_SUCCESS); 152 rb.Push(RESULT_SUCCESS);
153 rb.PushRaw<u8>(Settings::values.enable_nfc); 153 rb.PushRaw<u8>(true);
154 } 154 }
155 155
156 void GetStateOld(Kernel::HLERequestContext& ctx) { 156 void GetStateOld(Kernel::HLERequestContext& ctx) {
diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp
index 1c4482e47..c6babdd4d 100644
--- a/src/core/hle/service/nfp/nfp.cpp
+++ b/src/core/hle/service/nfp/nfp.cpp
@@ -335,7 +335,7 @@ void Module::Interface::CreateUserInterface(Kernel::HLERequestContext& ctx) {
335} 335}
336 336
337bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) { 337bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) {
338 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 338 std::lock_guard lock{HLE::g_hle_lock};
339 if (buffer.size() < sizeof(AmiiboFile)) { 339 if (buffer.size() < sizeof(AmiiboFile)) {
340 return false; 340 return false;
341 } 341 }
diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h
index 0f02a1a18..4f6042b00 100644
--- a/src/core/hle/service/nvdrv/devices/nvdevice.h
+++ b/src/core/hle/service/nvdrv/devices/nvdevice.h
@@ -19,11 +19,11 @@ public:
19 virtual ~nvdevice() = default; 19 virtual ~nvdevice() = default;
20 union Ioctl { 20 union Ioctl {
21 u32_le raw; 21 u32_le raw;
22 BitField<0, 8, u32_le> cmd; 22 BitField<0, 8, u32> cmd;
23 BitField<8, 8, u32_le> group; 23 BitField<8, 8, u32> group;
24 BitField<16, 14, u32_le> length; 24 BitField<16, 14, u32> length;
25 BitField<30, 1, u32_le> is_in; 25 BitField<30, 1, u32> is_in;
26 BitField<31, 1, u32_le> is_out; 26 BitField<31, 1, u32> is_out;
27 }; 27 };
28 28
29 /** 29 /**
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
index ace71169f..12f3ef825 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
@@ -18,7 +18,7 @@ class nvmap;
18class nvdisp_disp0 final : public nvdevice { 18class nvdisp_disp0 final : public nvdevice {
19public: 19public:
20 explicit nvdisp_disp0(std::shared_ptr<nvmap> nvmap_dev); 20 explicit nvdisp_disp0(std::shared_ptr<nvmap> nvmap_dev);
21 ~nvdisp_disp0(); 21 ~nvdisp_disp0() override;
22 22
23 u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; 23 u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
24 24
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index a34b9e753..af62d33d2 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -10,6 +10,7 @@
10#include "core/core.h" 10#include "core/core.h"
11#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" 11#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
12#include "core/hle/service/nvdrv/devices/nvmap.h" 12#include "core/hle/service/nvdrv/devices/nvmap.h"
13#include "core/memory.h"
13#include "video_core/memory_manager.h" 14#include "video_core/memory_manager.h"
14#include "video_core/rasterizer_interface.h" 15#include "video_core/rasterizer_interface.h"
15#include "video_core/renderer_base.h" 16#include "video_core/renderer_base.h"
@@ -88,7 +89,7 @@ u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output)
88 for (const auto& entry : entries) { 89 for (const auto& entry : entries) {
89 LOG_WARNING(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", 90 LOG_WARNING(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}",
90 entry.offset, entry.nvmap_handle, entry.pages); 91 entry.offset, entry.nvmap_handle, entry.pages);
91 Tegra::GPUVAddr offset = static_cast<Tegra::GPUVAddr>(entry.offset) << 0x10; 92 GPUVAddr offset = static_cast<GPUVAddr>(entry.offset) << 0x10;
92 auto object = nvmap_dev->GetObject(entry.nvmap_handle); 93 auto object = nvmap_dev->GetObject(entry.nvmap_handle);
93 if (!object) { 94 if (!object) {
94 LOG_CRITICAL(Service_NVDRV, "nvmap {} is an invalid handle!", entry.nvmap_handle); 95 LOG_CRITICAL(Service_NVDRV, "nvmap {} is an invalid handle!", entry.nvmap_handle);
@@ -101,7 +102,7 @@ u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output)
101 u64 size = static_cast<u64>(entry.pages) << 0x10; 102 u64 size = static_cast<u64>(entry.pages) << 0x10;
102 ASSERT(size <= object->size); 103 ASSERT(size <= object->size);
103 104
104 Tegra::GPUVAddr returned = gpu.MemoryManager().MapBufferEx(object->addr, offset, size); 105 GPUVAddr returned = gpu.MemoryManager().MapBufferEx(object->addr, offset, size);
105 ASSERT(returned == offset); 106 ASSERT(returned == offset);
106 } 107 }
107 std::memcpy(output.data(), entries.data(), output.size()); 108 std::memcpy(output.data(), entries.data(), output.size());
@@ -172,16 +173,8 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou
172 return 0; 173 return 0;
173 } 174 }
174 175
175 auto& system_instance = Core::System::GetInstance(); 176 params.offset = Core::System::GetInstance().GPU().MemoryManager().UnmapBuffer(params.offset,
176 177 itr->second.size);
177 // Remove this memory region from the rasterizer cache.
178 auto& gpu = system_instance.GPU();
179 auto cpu_addr = gpu.MemoryManager().GpuToCpuAddress(params.offset);
180 ASSERT(cpu_addr);
181 gpu.FlushAndInvalidateRegion(*cpu_addr, itr->second.size);
182
183 params.offset = gpu.MemoryManager().UnmapBuffer(params.offset, itr->second.size);
184
185 buffer_mappings.erase(itr->second.offset); 178 buffer_mappings.erase(itr->second.offset);
186 179
187 std::memcpy(output.data(), &params, output.size()); 180 std::memcpy(output.data(), &params, output.size());
diff --git a/src/core/hle/service/nvdrv/interface.h b/src/core/hle/service/nvdrv/interface.h
index fe311b069..5b4889910 100644
--- a/src/core/hle/service/nvdrv/interface.h
+++ b/src/core/hle/service/nvdrv/interface.h
@@ -17,7 +17,7 @@ namespace Service::Nvidia {
17class NVDRV final : public ServiceFramework<NVDRV> { 17class NVDRV final : public ServiceFramework<NVDRV> {
18public: 18public:
19 NVDRV(std::shared_ptr<Module> nvdrv, const char* name); 19 NVDRV(std::shared_ptr<Module> nvdrv, const char* name);
20 ~NVDRV(); 20 ~NVDRV() override;
21 21
22private: 22private:
23 void Open(Kernel::HLERequestContext& ctx); 23 void Open(Kernel::HLERequestContext& ctx);
diff --git a/src/core/hle/service/nvdrv/nvmemp.h b/src/core/hle/service/nvdrv/nvmemp.h
index 5a4dfc1f9..6eafb1346 100644
--- a/src/core/hle/service/nvdrv/nvmemp.h
+++ b/src/core/hle/service/nvdrv/nvmemp.h
@@ -11,7 +11,7 @@ namespace Service::Nvidia {
11class NVMEMP final : public ServiceFramework<NVMEMP> { 11class NVMEMP final : public ServiceFramework<NVMEMP> {
12public: 12public:
13 NVMEMP(); 13 NVMEMP();
14 ~NVMEMP(); 14 ~NVMEMP() override;
15 15
16private: 16private:
17 void Cmd0(Kernel::HLERequestContext& ctx); 17 void Cmd0(Kernel::HLERequestContext& ctx);
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index fc496b654..c7f5bbf28 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -26,7 +26,7 @@
26namespace Service::NVFlinger { 26namespace Service::NVFlinger {
27 27
28constexpr std::size_t SCREEN_REFRESH_RATE = 60; 28constexpr std::size_t SCREEN_REFRESH_RATE = 60;
29constexpr u64 frame_ticks = static_cast<u64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); 29constexpr s64 frame_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE);
30 30
31NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_timing} { 31NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_timing} {
32 displays.emplace_back(0, "Default"); 32 displays.emplace_back(0, "Default");
@@ -37,7 +37,7 @@ NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_t
37 37
38 // Schedule the screen composition events 38 // Schedule the screen composition events
39 composition_event = 39 composition_event =
40 core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, int cycles_late) { 40 core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, s64 cycles_late) {
41 Compose(); 41 Compose();
42 this->core_timing.ScheduleEvent(frame_ticks - cycles_late, composition_event); 42 this->core_timing.ScheduleEvent(frame_ticks - cycles_late, composition_event);
43 }); 43 });
diff --git a/src/core/hle/service/service.h b/src/core/hle/service/service.h
index 830790269..abbfe5524 100644
--- a/src/core/hle/service/service.h
+++ b/src/core/hle/service/service.h
@@ -90,7 +90,7 @@ private:
90 Kernel::HLERequestContext& ctx); 90 Kernel::HLERequestContext& ctx);
91 91
92 ServiceFrameworkBase(const char* service_name, u32 max_sessions, InvokerFn* handler_invoker); 92 ServiceFrameworkBase(const char* service_name, u32 max_sessions, InvokerFn* handler_invoker);
93 ~ServiceFrameworkBase(); 93 ~ServiceFrameworkBase() override;
94 94
95 void RegisterHandlersBase(const FunctionInfoBase* functions, std::size_t n); 95 void RegisterHandlersBase(const FunctionInfoBase* functions, std::size_t n);
96 void ReportUnimplementedFunction(Kernel::HLERequestContext& ctx, const FunctionInfoBase* info); 96 void ReportUnimplementedFunction(Kernel::HLERequestContext& ctx, const FunctionInfoBase* info);
diff --git a/src/core/hle/service/set/set_cal.h b/src/core/hle/service/set/set_cal.h
index 583036eac..a0677e815 100644
--- a/src/core/hle/service/set/set_cal.h
+++ b/src/core/hle/service/set/set_cal.h
@@ -11,7 +11,7 @@ namespace Service::Set {
11class SET_CAL final : public ServiceFramework<SET_CAL> { 11class SET_CAL final : public ServiceFramework<SET_CAL> {
12public: 12public:
13 explicit SET_CAL(); 13 explicit SET_CAL();
14 ~SET_CAL(); 14 ~SET_CAL() override;
15}; 15};
16 16
17} // namespace Service::Set 17} // namespace Service::Set
diff --git a/src/core/hle/service/set/set_sys.cpp b/src/core/hle/service/set/set_sys.cpp
index c9b4da5b0..ecee554bf 100644
--- a/src/core/hle/service/set/set_sys.cpp
+++ b/src/core/hle/service/set/set_sys.cpp
@@ -2,13 +2,88 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/assert.h"
5#include "common/logging/log.h" 6#include "common/logging/log.h"
7#include "core/file_sys/errors.h"
8#include "core/file_sys/system_archive/system_version.h"
6#include "core/hle/ipc_helpers.h" 9#include "core/hle/ipc_helpers.h"
7#include "core/hle/kernel/client_port.h" 10#include "core/hle/kernel/client_port.h"
11#include "core/hle/service/filesystem/filesystem.h"
8#include "core/hle/service/set/set_sys.h" 12#include "core/hle/service/set/set_sys.h"
9 13
10namespace Service::Set { 14namespace Service::Set {
11 15
16namespace {
17constexpr u64 SYSTEM_VERSION_FILE_MINOR_REVISION_OFFSET = 0x05;
18
19enum class GetFirmwareVersionType {
20 Version1,
21 Version2,
22};
23
24void GetFirmwareVersionImpl(Kernel::HLERequestContext& ctx, GetFirmwareVersionType type) {
25 LOG_WARNING(Service_SET, "called - Using hardcoded firmware version '{}'",
26 FileSys::SystemArchive::GetLongDisplayVersion());
27
28 ASSERT_MSG(ctx.GetWriteBufferSize() == 0x100,
29 "FirmwareVersion output buffer must be 0x100 bytes in size!");
30
31 // Instead of using the normal procedure of checking for the real system archive and if it
32 // doesn't exist, synthesizing one, I feel that that would lead to strange bugs because a
33 // used is using a really old or really new SystemVersion title. The synthesized one ensures
34 // consistence (currently reports as 5.1.0-0.0)
35 const auto archive = FileSys::SystemArchive::SystemVersion();
36
37 const auto early_exit_failure = [&ctx](const std::string& desc, ResultCode code) {
38 LOG_ERROR(Service_SET, "General failure while attempting to resolve firmware version ({}).",
39 desc.c_str());
40 IPC::ResponseBuilder rb{ctx, 2};
41 rb.Push(code);
42 };
43
44 if (archive == nullptr) {
45 early_exit_failure("The system version archive couldn't be synthesized.",
46 FileSys::ERROR_FAILED_MOUNT_ARCHIVE);
47 return;
48 }
49
50 const auto ver_file = archive->GetFile("file");
51 if (ver_file == nullptr) {
52 early_exit_failure("The system version archive didn't contain the file 'file'.",
53 FileSys::ERROR_INVALID_ARGUMENT);
54 return;
55 }
56
57 auto data = ver_file->ReadAllBytes();
58 if (data.size() != 0x100) {
59 early_exit_failure("The system version file 'file' was not the correct size.",
60 FileSys::ERROR_OUT_OF_BOUNDS);
61 return;
62 }
63
64 // If the command is GetFirmwareVersion (as opposed to GetFirmwareVersion2), hardware will
65 // zero out the REVISION_MINOR field.
66 if (type == GetFirmwareVersionType::Version1) {
67 data[SYSTEM_VERSION_FILE_MINOR_REVISION_OFFSET] = 0;
68 }
69
70 ctx.WriteBuffer(data);
71
72 IPC::ResponseBuilder rb{ctx, 2};
73 rb.Push(RESULT_SUCCESS);
74}
75} // Anonymous namespace
76
77void SET_SYS::GetFirmwareVersion(Kernel::HLERequestContext& ctx) {
78 LOG_DEBUG(Service_SET, "called");
79 GetFirmwareVersionImpl(ctx, GetFirmwareVersionType::Version1);
80}
81
82void SET_SYS::GetFirmwareVersion2(Kernel::HLERequestContext& ctx) {
83 LOG_DEBUG(Service_SET, "called");
84 GetFirmwareVersionImpl(ctx, GetFirmwareVersionType::Version2);
85}
86
12void SET_SYS::GetColorSetId(Kernel::HLERequestContext& ctx) { 87void SET_SYS::GetColorSetId(Kernel::HLERequestContext& ctx) {
13 LOG_DEBUG(Service_SET, "called"); 88 LOG_DEBUG(Service_SET, "called");
14 89
@@ -33,8 +108,8 @@ SET_SYS::SET_SYS() : ServiceFramework("set:sys") {
33 {0, nullptr, "SetLanguageCode"}, 108 {0, nullptr, "SetLanguageCode"},
34 {1, nullptr, "SetNetworkSettings"}, 109 {1, nullptr, "SetNetworkSettings"},
35 {2, nullptr, "GetNetworkSettings"}, 110 {2, nullptr, "GetNetworkSettings"},
36 {3, nullptr, "GetFirmwareVersion"}, 111 {3, &SET_SYS::GetFirmwareVersion, "GetFirmwareVersion"},
37 {4, nullptr, "GetFirmwareVersion2"}, 112 {4, &SET_SYS::GetFirmwareVersion2, "GetFirmwareVersion2"},
38 {5, nullptr, "GetFirmwareVersionDigest"}, 113 {5, nullptr, "GetFirmwareVersionDigest"},
39 {7, nullptr, "GetLockScreenFlag"}, 114 {7, nullptr, "GetLockScreenFlag"},
40 {8, nullptr, "SetLockScreenFlag"}, 115 {8, nullptr, "SetLockScreenFlag"},
diff --git a/src/core/hle/service/set/set_sys.h b/src/core/hle/service/set/set_sys.h
index f602f3c77..13ee2cf46 100644
--- a/src/core/hle/service/set/set_sys.h
+++ b/src/core/hle/service/set/set_sys.h
@@ -20,6 +20,8 @@ private:
20 BasicBlack = 1, 20 BasicBlack = 1,
21 }; 21 };
22 22
23 void GetFirmwareVersion(Kernel::HLERequestContext& ctx);
24 void GetFirmwareVersion2(Kernel::HLERequestContext& ctx);
23 void GetColorSetId(Kernel::HLERequestContext& ctx); 25 void GetColorSetId(Kernel::HLERequestContext& ctx);
24 void SetColorSetId(Kernel::HLERequestContext& ctx); 26 void SetColorSetId(Kernel::HLERequestContext& ctx);
25 27
diff --git a/src/core/hle/service/sockets/sfdnsres.cpp b/src/core/hle/service/sockets/sfdnsres.cpp
index 13ab1d31e..852e71e4b 100644
--- a/src/core/hle/service/sockets/sfdnsres.cpp
+++ b/src/core/hle/service/sockets/sfdnsres.cpp
@@ -8,12 +8,20 @@
8namespace Service::Sockets { 8namespace Service::Sockets {
9 9
10void SFDNSRES::GetAddrInfo(Kernel::HLERequestContext& ctx) { 10void SFDNSRES::GetAddrInfo(Kernel::HLERequestContext& ctx) {
11 struct Parameters {
12 u8 use_nsd_resolve;
13 u32 unknown;
14 u64 process_id;
15 };
16
11 IPC::RequestParser rp{ctx}; 17 IPC::RequestParser rp{ctx};
18 const auto parameters = rp.PopRaw<Parameters>();
12 19
13 LOG_WARNING(Service, "(STUBBED) called"); 20 LOG_WARNING(Service,
21 "(STUBBED) called. use_nsd_resolve={}, unknown=0x{:08X}, process_id=0x{:016X}",
22 parameters.use_nsd_resolve, parameters.unknown, parameters.process_id);
14 23
15 IPC::ResponseBuilder rb{ctx, 2}; 24 IPC::ResponseBuilder rb{ctx, 2};
16
17 rb.Push(RESULT_SUCCESS); 25 rb.Push(RESULT_SUCCESS);
18} 26}
19 27
diff --git a/src/core/hle/service/spl/module.cpp b/src/core/hle/service/spl/module.cpp
index 8db0c2f13..e724d4ab8 100644
--- a/src/core/hle/service/spl/module.cpp
+++ b/src/core/hle/service/spl/module.cpp
@@ -26,9 +26,7 @@ Module::Interface::~Interface() = default;
26void Module::Interface::GetRandomBytes(Kernel::HLERequestContext& ctx) { 26void Module::Interface::GetRandomBytes(Kernel::HLERequestContext& ctx) {
27 LOG_DEBUG(Service_SPL, "called"); 27 LOG_DEBUG(Service_SPL, "called");
28 28
29 IPC::RequestParser rp{ctx}; 29 const std::size_t size = ctx.GetWriteBufferSize();
30
31 std::size_t size = ctx.GetWriteBufferSize();
32 30
33 std::uniform_int_distribution<u16> distribution(0, std::numeric_limits<u8>::max()); 31 std::uniform_int_distribution<u16> distribution(0, std::numeric_limits<u8>::max());
34 std::vector<u8> data(size); 32 std::vector<u8> data(size);
diff --git a/src/core/hle/service/ssl/ssl.cpp b/src/core/hle/service/ssl/ssl.cpp
index af40a1815..f7f87a958 100644
--- a/src/core/hle/service/ssl/ssl.cpp
+++ b/src/core/hle/service/ssl/ssl.cpp
@@ -64,13 +64,19 @@ public:
64 }; 64 };
65 RegisterHandlers(functions); 65 RegisterHandlers(functions);
66 } 66 }
67 ~ISslContext() = default;
68 67
69private: 68private:
70 void SetOption(Kernel::HLERequestContext& ctx) { 69 void SetOption(Kernel::HLERequestContext& ctx) {
71 LOG_WARNING(Service_SSL, "(STUBBED) called"); 70 struct Parameters {
71 u8 enable;
72 u32 option;
73 };
72 74
73 IPC::RequestParser rp{ctx}; 75 IPC::RequestParser rp{ctx};
76 const auto parameters = rp.PopRaw<Parameters>();
77
78 LOG_WARNING(Service_SSL, "(STUBBED) called. enable={}, option={}", parameters.enable,
79 parameters.option);
74 80
75 IPC::ResponseBuilder rb{ctx, 2}; 81 IPC::ResponseBuilder rb{ctx, 2};
76 rb.Push(RESULT_SUCCESS); 82 rb.Push(RESULT_SUCCESS);
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index 566cd6006..4e17249a9 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -498,7 +498,6 @@ public:
498 }; 498 };
499 RegisterHandlers(functions); 499 RegisterHandlers(functions);
500 } 500 }
501 ~IHOSBinderDriver() = default;
502 501
503private: 502private:
504 enum class TransactionId { 503 enum class TransactionId {
@@ -692,7 +691,6 @@ public:
692 }; 691 };
693 RegisterHandlers(functions); 692 RegisterHandlers(functions);
694 } 693 }
695 ~ISystemDisplayService() = default;
696 694
697private: 695private:
698 void SetLayerZ(Kernel::HLERequestContext& ctx) { 696 void SetLayerZ(Kernel::HLERequestContext& ctx) {
@@ -818,7 +816,6 @@ public:
818 }; 816 };
819 RegisterHandlers(functions); 817 RegisterHandlers(functions);
820 } 818 }
821 ~IManagerDisplayService() = default;
822 819
823private: 820private:
824 void CloseDisplay(Kernel::HLERequestContext& ctx) { 821 void CloseDisplay(Kernel::HLERequestContext& ctx) {
@@ -884,7 +881,6 @@ private:
884class IApplicationDisplayService final : public ServiceFramework<IApplicationDisplayService> { 881class IApplicationDisplayService final : public ServiceFramework<IApplicationDisplayService> {
885public: 882public:
886 explicit IApplicationDisplayService(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); 883 explicit IApplicationDisplayService(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger);
887 ~IApplicationDisplayService() = default;
888 884
889private: 885private:
890 enum class ConvertedScaleMode : u64 { 886 enum class ConvertedScaleMode : u64 {
@@ -1037,7 +1033,6 @@ private:
1037 void ListDisplays(Kernel::HLERequestContext& ctx) { 1033 void ListDisplays(Kernel::HLERequestContext& ctx) {
1038 LOG_WARNING(Service_VI, "(STUBBED) called"); 1034 LOG_WARNING(Service_VI, "(STUBBED) called");
1039 1035
1040 IPC::RequestParser rp{ctx};
1041 DisplayInfo display_info; 1036 DisplayInfo display_info;
1042 display_info.width *= static_cast<u64>(Settings::values.resolution_factor); 1037 display_info.width *= static_cast<u64>(Settings::values.resolution_factor);
1043 display_info.height *= static_cast<u64>(Settings::values.resolution_factor); 1038 display_info.height *= static_cast<u64>(Settings::values.resolution_factor);
diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp
index 6057c7f26..46ac372f6 100644
--- a/src/core/loader/elf.cpp
+++ b/src/core/loader/elf.cpp
@@ -9,6 +9,7 @@
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "common/file_util.h" 10#include "common/file_util.h"
11#include "common/logging/log.h" 11#include "common/logging/log.h"
12#include "core/hle/kernel/code_set.h"
12#include "core/hle/kernel/process.h" 13#include "core/hle/kernel/process.h"
13#include "core/hle/kernel/vm_manager.h" 14#include "core/hle/kernel/vm_manager.h"
14#include "core/loader/elf.h" 15#include "core/loader/elf.h"
@@ -340,7 +341,7 @@ Kernel::CodeSet ElfReader::LoadInto(VAddr vaddr) {
340 } 341 }
341 342
342 codeset.entrypoint = base_addr + header->e_entry; 343 codeset.entrypoint = base_addr + header->e_entry;
343 codeset.memory = std::make_shared<std::vector<u8>>(std::move(program_image)); 344 codeset.memory = std::move(program_image);
344 345
345 LOG_DEBUG(Loader, "Done loading."); 346 LOG_DEBUG(Loader, "Done loading.");
346 347
diff --git a/src/core/loader/linker.cpp b/src/core/loader/linker.cpp
deleted file mode 100644
index 57ca8c3ee..000000000
--- a/src/core/loader/linker.cpp
+++ /dev/null
@@ -1,147 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <vector>
6
7#include "common/common_funcs.h"
8#include "common/logging/log.h"
9#include "common/swap.h"
10#include "core/loader/linker.h"
11#include "core/memory.h"
12
13namespace Loader {
14
15enum class RelocationType : u32 { ABS64 = 257, GLOB_DAT = 1025, JUMP_SLOT = 1026, RELATIVE = 1027 };
16
17enum DynamicType : u32 {
18 DT_NULL = 0,
19 DT_PLTRELSZ = 2,
20 DT_STRTAB = 5,
21 DT_SYMTAB = 6,
22 DT_RELA = 7,
23 DT_RELASZ = 8,
24 DT_STRSZ = 10,
25 DT_JMPREL = 23,
26};
27
28struct Elf64_Rela {
29 u64_le offset;
30 RelocationType type;
31 u32_le symbol;
32 s64_le addend;
33};
34static_assert(sizeof(Elf64_Rela) == 0x18, "Elf64_Rela has incorrect size.");
35
36struct Elf64_Dyn {
37 u64_le tag;
38 u64_le value;
39};
40static_assert(sizeof(Elf64_Dyn) == 0x10, "Elf64_Dyn has incorrect size.");
41
42struct Elf64_Sym {
43 u32_le name;
44 INSERT_PADDING_BYTES(0x2);
45 u16_le shndx;
46 u64_le value;
47 u64_le size;
48};
49static_assert(sizeof(Elf64_Sym) == 0x18, "Elf64_Sym has incorrect size.");
50
51void Linker::WriteRelocations(std::vector<u8>& program_image, const std::vector<Symbol>& symbols,
52 u64 relocation_offset, u64 size, VAddr load_base) {
53 for (u64 i = 0; i < size; i += sizeof(Elf64_Rela)) {
54 Elf64_Rela rela;
55 std::memcpy(&rela, &program_image[relocation_offset + i], sizeof(Elf64_Rela));
56
57 const Symbol& symbol = symbols[rela.symbol];
58 switch (rela.type) {
59 case RelocationType::RELATIVE: {
60 const u64 value = load_base + rela.addend;
61 if (!symbol.name.empty()) {
62 exports[symbol.name] = value;
63 }
64 std::memcpy(&program_image[rela.offset], &value, sizeof(u64));
65 break;
66 }
67 case RelocationType::JUMP_SLOT:
68 case RelocationType::GLOB_DAT:
69 if (!symbol.value) {
70 imports[symbol.name] = {rela.offset + load_base, 0};
71 } else {
72 exports[symbol.name] = symbol.value;
73 std::memcpy(&program_image[rela.offset], &symbol.value, sizeof(u64));
74 }
75 break;
76 case RelocationType::ABS64:
77 if (!symbol.value) {
78 imports[symbol.name] = {rela.offset + load_base, rela.addend};
79 } else {
80 const u64 value = symbol.value + rela.addend;
81 exports[symbol.name] = value;
82 std::memcpy(&program_image[rela.offset], &value, sizeof(u64));
83 }
84 break;
85 default:
86 LOG_CRITICAL(Loader, "Unknown relocation type: {}", static_cast<int>(rela.type));
87 break;
88 }
89 }
90}
91
92void Linker::Relocate(std::vector<u8>& program_image, u32 dynamic_section_offset, VAddr load_base) {
93 std::map<u64, u64> dynamic;
94 while (dynamic_section_offset < program_image.size()) {
95 Elf64_Dyn dyn;
96 std::memcpy(&dyn, &program_image[dynamic_section_offset], sizeof(Elf64_Dyn));
97 dynamic_section_offset += sizeof(Elf64_Dyn);
98
99 if (dyn.tag == DT_NULL) {
100 break;
101 }
102 dynamic[dyn.tag] = dyn.value;
103 }
104
105 u64 offset = dynamic[DT_SYMTAB];
106 std::vector<Symbol> symbols;
107 while (offset < program_image.size()) {
108 Elf64_Sym sym;
109 std::memcpy(&sym, &program_image[offset], sizeof(Elf64_Sym));
110 offset += sizeof(Elf64_Sym);
111
112 if (sym.name >= dynamic[DT_STRSZ]) {
113 break;
114 }
115
116 std::string name = reinterpret_cast<char*>(&program_image[dynamic[DT_STRTAB] + sym.name]);
117 if (sym.value) {
118 exports[name] = load_base + sym.value;
119 symbols.emplace_back(std::move(name), load_base + sym.value);
120 } else {
121 symbols.emplace_back(std::move(name), 0);
122 }
123 }
124
125 if (dynamic.find(DT_RELA) != dynamic.end()) {
126 WriteRelocations(program_image, symbols, dynamic[DT_RELA], dynamic[DT_RELASZ], load_base);
127 }
128
129 if (dynamic.find(DT_JMPREL) != dynamic.end()) {
130 WriteRelocations(program_image, symbols, dynamic[DT_JMPREL], dynamic[DT_PLTRELSZ],
131 load_base);
132 }
133}
134
135void Linker::ResolveImports() {
136 // Resolve imports
137 for (const auto& import : imports) {
138 const auto& search = exports.find(import.first);
139 if (search != exports.end()) {
140 Memory::Write64(import.second.ea, search->second + import.second.addend);
141 } else {
142 LOG_ERROR(Loader, "Unresolved import: {}", import.first);
143 }
144 }
145}
146
147} // namespace Loader
diff --git a/src/core/loader/linker.h b/src/core/loader/linker.h
deleted file mode 100644
index 107625837..000000000
--- a/src/core/loader/linker.h
+++ /dev/null
@@ -1,36 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <map>
8#include <string>
9#include "common/common_types.h"
10
11namespace Loader {
12
13class Linker {
14protected:
15 struct Symbol {
16 Symbol(std::string&& name, u64 value) : name(std::move(name)), value(value) {}
17 std::string name;
18 u64 value;
19 };
20
21 struct Import {
22 VAddr ea;
23 s64 addend;
24 };
25
26 void WriteRelocations(std::vector<u8>& program_image, const std::vector<Symbol>& symbols,
27 u64 relocation_offset, u64 size, VAddr load_base);
28 void Relocate(std::vector<u8>& program_image, u32 dynamic_section_offset, VAddr load_base);
29
30 void ResolveImports();
31
32 std::map<std::string, Import> imports;
33 std::map<std::string, VAddr> exports;
34};
35
36} // namespace Loader
diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp
index 4fad0c0dd..31e4a0c84 100644
--- a/src/core/loader/nro.cpp
+++ b/src/core/loader/nro.cpp
@@ -14,6 +14,7 @@
14#include "core/file_sys/romfs_factory.h" 14#include "core/file_sys/romfs_factory.h"
15#include "core/file_sys/vfs_offset.h" 15#include "core/file_sys/vfs_offset.h"
16#include "core/gdbstub/gdbstub.h" 16#include "core/gdbstub/gdbstub.h"
17#include "core/hle/kernel/code_set.h"
17#include "core/hle/kernel/process.h" 18#include "core/hle/kernel/process.h"
18#include "core/hle/kernel/vm_manager.h" 19#include "core/hle/kernel/vm_manager.h"
19#include "core/hle/service/filesystem/filesystem.h" 20#include "core/hle/service/filesystem/filesystem.h"
@@ -186,7 +187,7 @@ static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data,
186 program_image.resize(static_cast<u32>(program_image.size()) + bss_size); 187 program_image.resize(static_cast<u32>(program_image.size()) + bss_size);
187 188
188 // Load codeset for current process 189 // Load codeset for current process
189 codeset.memory = std::make_shared<std::vector<u8>>(std::move(program_image)); 190 codeset.memory = std::move(program_image);
190 process.LoadModule(std::move(codeset), load_base); 191 process.LoadModule(std::move(codeset), load_base);
191 192
192 // Register module with GDBStub 193 // Register module with GDBStub
diff --git a/src/core/loader/nro.h b/src/core/loader/nro.h
index 013d629c0..85b0ed644 100644
--- a/src/core/loader/nro.h
+++ b/src/core/loader/nro.h
@@ -4,10 +4,10 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <memory>
7#include <string> 8#include <string>
8#include <vector> 9#include <vector>
9#include "common/common_types.h" 10#include "common/common_types.h"
10#include "core/loader/linker.h"
11#include "core/loader/loader.h" 11#include "core/loader/loader.h"
12 12
13namespace FileSys { 13namespace FileSys {
@@ -21,7 +21,7 @@ class Process;
21namespace Loader { 21namespace Loader {
22 22
23/// Loads an NRO file 23/// Loads an NRO file
24class AppLoader_NRO final : public AppLoader, Linker { 24class AppLoader_NRO final : public AppLoader {
25public: 25public:
26 explicit AppLoader_NRO(FileSys::VirtualFile file); 26 explicit AppLoader_NRO(FileSys::VirtualFile file);
27 ~AppLoader_NRO() override; 27 ~AppLoader_NRO() override;
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp
index 6ded0b707..ffe2eea8a 100644
--- a/src/core/loader/nso.cpp
+++ b/src/core/loader/nso.cpp
@@ -4,13 +4,17 @@
4 4
5#include <cinttypes> 5#include <cinttypes>
6#include <vector> 6#include <vector>
7#include <lz4.h> 7
8#include "common/common_funcs.h" 8#include "common/common_funcs.h"
9#include "common/file_util.h" 9#include "common/file_util.h"
10#include "common/hex_util.h"
10#include "common/logging/log.h" 11#include "common/logging/log.h"
12#include "common/lz4_compression.h"
11#include "common/swap.h" 13#include "common/swap.h"
14#include "core/core.h"
12#include "core/file_sys/patch_manager.h" 15#include "core/file_sys/patch_manager.h"
13#include "core/gdbstub/gdbstub.h" 16#include "core/gdbstub/gdbstub.h"
17#include "core/hle/kernel/code_set.h"
14#include "core/hle/kernel/process.h" 18#include "core/hle/kernel/process.h"
15#include "core/hle/kernel/vm_manager.h" 19#include "core/hle/kernel/vm_manager.h"
16#include "core/loader/nso.h" 20#include "core/loader/nso.h"
@@ -18,36 +22,8 @@
18#include "core/settings.h" 22#include "core/settings.h"
19 23
20namespace Loader { 24namespace Loader {
21 25namespace {
22struct NsoSegmentHeader { 26struct MODHeader {
23 u32_le offset;
24 u32_le location;
25 u32_le size;
26 union {
27 u32_le alignment;
28 u32_le bss_size;
29 };
30};
31static_assert(sizeof(NsoSegmentHeader) == 0x10, "NsoSegmentHeader has incorrect size.");
32
33struct NsoHeader {
34 u32_le magic;
35 u32_le version;
36 INSERT_PADDING_WORDS(1);
37 u8 flags;
38 std::array<NsoSegmentHeader, 3> segments; // Text, RoData, Data (in that order)
39 std::array<u8, 0x20> build_id;
40 std::array<u32_le, 3> segments_compressed_size;
41
42 bool IsSegmentCompressed(size_t segment_num) const {
43 ASSERT_MSG(segment_num < 3, "Invalid segment {}", segment_num);
44 return ((flags >> segment_num) & 1);
45 }
46};
47static_assert(sizeof(NsoHeader) == 0x6c, "NsoHeader has incorrect size.");
48static_assert(std::is_trivially_copyable_v<NsoHeader>, "NsoHeader isn't trivially copyable.");
49
50struct ModHeader {
51 u32_le magic; 27 u32_le magic;
52 u32_le dynamic_offset; 28 u32_le dynamic_offset;
53 u32_le bss_start_offset; 29 u32_le bss_start_offset;
@@ -56,7 +32,28 @@ struct ModHeader {
56 u32_le eh_frame_hdr_end_offset; 32 u32_le eh_frame_hdr_end_offset;
57 u32_le module_offset; // Offset to runtime-generated module object. typically equal to .bss base 33 u32_le module_offset; // Offset to runtime-generated module object. typically equal to .bss base
58}; 34};
59static_assert(sizeof(ModHeader) == 0x1c, "ModHeader has incorrect size."); 35static_assert(sizeof(MODHeader) == 0x1c, "MODHeader has incorrect size.");
36
37std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data,
38 const NSOSegmentHeader& header) {
39 const std::vector<u8> uncompressed_data =
40 Common::Compression::DecompressDataLZ4(compressed_data, header.size);
41
42 ASSERT_MSG(uncompressed_data.size() == static_cast<int>(header.size), "{} != {}", header.size,
43 uncompressed_data.size());
44
45 return uncompressed_data;
46}
47
48constexpr u32 PageAlignSize(u32 size) {
49 return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK;
50}
51} // Anonymous namespace
52
53bool NSOHeader::IsSegmentCompressed(size_t segment_num) const {
54 ASSERT_MSG(segment_num < 3, "Invalid segment {}", segment_num);
55 return ((flags >> segment_num) & 1) != 0;
56}
60 57
61AppLoader_NSO::AppLoader_NSO(FileSys::VirtualFile file) : AppLoader(std::move(file)) {} 58AppLoader_NSO::AppLoader_NSO(FileSys::VirtualFile file) : AppLoader(std::move(file)) {}
62 59
@@ -73,38 +70,22 @@ FileType AppLoader_NSO::IdentifyType(const FileSys::VirtualFile& file) {
73 return FileType::NSO; 70 return FileType::NSO;
74} 71}
75 72
76static std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data,
77 const NsoSegmentHeader& header) {
78 std::vector<u8> uncompressed_data(header.size);
79 const int bytes_uncompressed =
80 LZ4_decompress_safe(reinterpret_cast<const char*>(compressed_data.data()),
81 reinterpret_cast<char*>(uncompressed_data.data()),
82 static_cast<int>(compressed_data.size()), header.size);
83
84 ASSERT_MSG(bytes_uncompressed == static_cast<int>(header.size) &&
85 bytes_uncompressed == static_cast<int>(uncompressed_data.size()),
86 "{} != {} != {}", bytes_uncompressed, header.size, uncompressed_data.size());
87
88 return uncompressed_data;
89}
90
91static constexpr u32 PageAlignSize(u32 size) {
92 return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK;
93}
94
95std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, 73std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process,
96 const FileSys::VfsFile& file, VAddr load_base, 74 const FileSys::VfsFile& file, VAddr load_base,
97 bool should_pass_arguments, 75 bool should_pass_arguments,
98 std::optional<FileSys::PatchManager> pm) { 76 std::optional<FileSys::PatchManager> pm) {
99 if (file.GetSize() < sizeof(NsoHeader)) 77 if (file.GetSize() < sizeof(NSOHeader)) {
100 return {}; 78 return {};
79 }
101 80
102 NsoHeader nso_header{}; 81 NSOHeader nso_header{};
103 if (sizeof(NsoHeader) != file.ReadObject(&nso_header)) 82 if (sizeof(NSOHeader) != file.ReadObject(&nso_header)) {
104 return {}; 83 return {};
84 }
105 85
106 if (nso_header.magic != Common::MakeMagic('N', 'S', 'O', '0')) 86 if (nso_header.magic != Common::MakeMagic('N', 'S', 'O', '0')) {
107 return {}; 87 return {};
88 }
108 89
109 // Build program image 90 // Build program image
110 Kernel::CodeSet codeset; 91 Kernel::CodeSet codeset;
@@ -140,10 +121,10 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process,
140 std::memcpy(&module_offset, program_image.data() + 4, sizeof(u32)); 121 std::memcpy(&module_offset, program_image.data() + 4, sizeof(u32));
141 122
142 // Read MOD header 123 // Read MOD header
143 ModHeader mod_header{}; 124 MODHeader mod_header{};
144 // Default .bss to size in segment header if MOD0 section doesn't exist 125 // Default .bss to size in segment header if MOD0 section doesn't exist
145 u32 bss_size{PageAlignSize(nso_header.segments[2].bss_size)}; 126 u32 bss_size{PageAlignSize(nso_header.segments[2].bss_size)};
146 std::memcpy(&mod_header, program_image.data() + module_offset, sizeof(ModHeader)); 127 std::memcpy(&mod_header, program_image.data() + module_offset, sizeof(MODHeader));
147 const bool has_mod_header{mod_header.magic == Common::MakeMagic('M', 'O', 'D', '0')}; 128 const bool has_mod_header{mod_header.magic == Common::MakeMagic('M', 'O', 'D', '0')};
148 if (has_mod_header) { 129 if (has_mod_header) {
149 // Resize program image to include .bss section and page align each section 130 // Resize program image to include .bss section and page align each section
@@ -155,17 +136,29 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process,
155 136
156 // Apply patches if necessary 137 // Apply patches if necessary
157 if (pm && (pm->HasNSOPatch(nso_header.build_id) || Settings::values.dump_nso)) { 138 if (pm && (pm->HasNSOPatch(nso_header.build_id) || Settings::values.dump_nso)) {
158 std::vector<u8> pi_header(program_image.size() + 0x100); 139 std::vector<u8> pi_header(sizeof(NSOHeader) + program_image.size());
159 std::memcpy(pi_header.data(), &nso_header, sizeof(NsoHeader)); 140 pi_header.insert(pi_header.begin(), reinterpret_cast<u8*>(&nso_header),
160 std::memcpy(pi_header.data() + 0x100, program_image.data(), program_image.size()); 141 reinterpret_cast<u8*>(&nso_header) + sizeof(NSOHeader));
142 pi_header.insert(pi_header.begin() + sizeof(NSOHeader), program_image.begin(),
143 program_image.end());
161 144
162 pi_header = pm->PatchNSO(pi_header); 145 pi_header = pm->PatchNSO(pi_header);
163 146
164 std::memcpy(program_image.data(), pi_header.data() + 0x100, program_image.size()); 147 std::copy(pi_header.begin() + sizeof(NSOHeader), pi_header.end(), program_image.begin());
148 }
149
150 // Apply cheats if they exist and the program has a valid title ID
151 if (pm) {
152 auto& system = Core::System::GetInstance();
153 const auto cheats = pm->CreateCheatList(system, nso_header.build_id);
154 if (!cheats.empty()) {
155 system.RegisterCheatList(cheats, Common::HexArrayToString(nso_header.build_id),
156 load_base, load_base + program_image.size());
157 }
165 } 158 }
166 159
167 // Load codeset for current process 160 // Load codeset for current process
168 codeset.memory = std::make_shared<std::vector<u8>>(std::move(program_image)); 161 codeset.memory = std::move(program_image);
169 process.LoadModule(std::move(codeset), load_base); 162 process.LoadModule(std::move(codeset), load_base);
170 163
171 // Register module with GDBStub 164 // Register module with GDBStub
diff --git a/src/core/loader/nso.h b/src/core/loader/nso.h
index 135b6ea5a..4674c3724 100644
--- a/src/core/loader/nso.h
+++ b/src/core/loader/nso.h
@@ -4,10 +4,12 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array>
7#include <optional> 8#include <optional>
9#include <type_traits>
8#include "common/common_types.h" 10#include "common/common_types.h"
11#include "common/swap.h"
9#include "core/file_sys/patch_manager.h" 12#include "core/file_sys/patch_manager.h"
10#include "core/loader/linker.h"
11#include "core/loader/loader.h" 13#include "core/loader/loader.h"
12 14
13namespace Kernel { 15namespace Kernel {
@@ -16,6 +18,43 @@ class Process;
16 18
17namespace Loader { 19namespace Loader {
18 20
21struct NSOSegmentHeader {
22 u32_le offset;
23 u32_le location;
24 u32_le size;
25 union {
26 u32_le alignment;
27 u32_le bss_size;
28 };
29};
30static_assert(sizeof(NSOSegmentHeader) == 0x10, "NsoSegmentHeader has incorrect size.");
31
32struct NSOHeader {
33 using SHA256Hash = std::array<u8, 0x20>;
34
35 struct RODataRelativeExtent {
36 u32_le data_offset;
37 u32_le size;
38 };
39
40 u32_le magic;
41 u32_le version;
42 u32 reserved;
43 u32_le flags;
44 std::array<NSOSegmentHeader, 3> segments; // Text, RoData, Data (in that order)
45 std::array<u8, 0x20> build_id;
46 std::array<u32_le, 3> segments_compressed_size;
47 std::array<u8, 0x1C> padding;
48 RODataRelativeExtent api_info_extent;
49 RODataRelativeExtent dynstr_extent;
50 RODataRelativeExtent dynsyn_extent;
51 std::array<SHA256Hash, 3> segment_hashes;
52
53 bool IsSegmentCompressed(size_t segment_num) const;
54};
55static_assert(sizeof(NSOHeader) == 0x100, "NSOHeader has incorrect size.");
56static_assert(std::is_trivially_copyable_v<NSOHeader>, "NSOHeader must be trivially copyable.");
57
19constexpr u64 NSO_ARGUMENT_DATA_ALLOCATION_SIZE = 0x9000; 58constexpr u64 NSO_ARGUMENT_DATA_ALLOCATION_SIZE = 0x9000;
20 59
21struct NSOArgumentHeader { 60struct NSOArgumentHeader {
@@ -26,7 +65,7 @@ struct NSOArgumentHeader {
26static_assert(sizeof(NSOArgumentHeader) == 0x20, "NSOArgumentHeader has incorrect size."); 65static_assert(sizeof(NSOArgumentHeader) == 0x20, "NSOArgumentHeader has incorrect size.");
27 66
28/// Loads an NSO file 67/// Loads an NSO file
29class AppLoader_NSO final : public AppLoader, Linker { 68class AppLoader_NSO final : public AppLoader {
30public: 69public:
31 explicit AppLoader_NSO(FileSys::VirtualFile file); 70 explicit AppLoader_NSO(FileSys::VirtualFile file);
32 71
diff --git a/src/core/loader/xci.h b/src/core/loader/xci.h
index d6995b61e..436f7387c 100644
--- a/src/core/loader/xci.h
+++ b/src/core/loader/xci.h
@@ -22,7 +22,7 @@ class AppLoader_NCA;
22class AppLoader_XCI final : public AppLoader { 22class AppLoader_XCI final : public AppLoader {
23public: 23public:
24 explicit AppLoader_XCI(FileSys::VirtualFile file); 24 explicit AppLoader_XCI(FileSys::VirtualFile file);
25 ~AppLoader_XCI(); 25 ~AppLoader_XCI() override;
26 26
27 /** 27 /**
28 * Returns the type of the file 28 * Returns the type of the file
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 6591c45d2..4e0538bc2 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -10,6 +10,7 @@
10#include "common/assert.h" 10#include "common/assert.h"
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "common/logging/log.h" 12#include "common/logging/log.h"
13#include "common/page_table.h"
13#include "common/swap.h" 14#include "common/swap.h"
14#include "core/arm/arm_interface.h" 15#include "core/arm/arm_interface.h"
15#include "core/core.h" 16#include "core/core.h"
@@ -18,13 +19,14 @@
18#include "core/hle/lock.h" 19#include "core/hle/lock.h"
19#include "core/memory.h" 20#include "core/memory.h"
20#include "core/memory_setup.h" 21#include "core/memory_setup.h"
22#include "video_core/gpu.h"
21#include "video_core/renderer_base.h" 23#include "video_core/renderer_base.h"
22 24
23namespace Memory { 25namespace Memory {
24 26
25static PageTable* current_page_table = nullptr; 27static Common::PageTable* current_page_table = nullptr;
26 28
27void SetCurrentPageTable(PageTable* page_table) { 29void SetCurrentPageTable(Common::PageTable* page_table) {
28 current_page_table = page_table; 30 current_page_table = page_table;
29 31
30 auto& system = Core::System::GetInstance(); 32 auto& system = Core::System::GetInstance();
@@ -36,39 +38,16 @@ void SetCurrentPageTable(PageTable* page_table) {
36 } 38 }
37} 39}
38 40
39PageTable* GetCurrentPageTable() { 41static void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory,
40 return current_page_table; 42 Common::PageType type) {
41}
42
43PageTable::PageTable() = default;
44
45PageTable::PageTable(std::size_t address_space_width_in_bits) {
46 Resize(address_space_width_in_bits);
47}
48
49PageTable::~PageTable() = default;
50
51void PageTable::Resize(std::size_t address_space_width_in_bits) {
52 const std::size_t num_page_table_entries = 1ULL << (address_space_width_in_bits - PAGE_BITS);
53
54 pointers.resize(num_page_table_entries);
55 attributes.resize(num_page_table_entries);
56
57 // The default is a 39-bit address space, which causes an initial 1GB allocation size. If the
58 // vector size is subsequently decreased (via resize), the vector might not automatically
59 // actually reallocate/resize its underlying allocation, which wastes up to ~800 MB for
60 // 36-bit titles. Call shrink_to_fit to reduce capacity to what's actually in use.
61
62 pointers.shrink_to_fit();
63 attributes.shrink_to_fit();
64}
65
66static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, PageType type) {
67 LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, 43 LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE,
68 (base + size) * PAGE_SIZE); 44 (base + size) * PAGE_SIZE);
69 45
70 RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE, 46 // During boot, current_page_table might not be set yet, in which case we need not flush
71 FlushMode::FlushAndInvalidate); 47 if (Core::System::GetInstance().IsPoweredOn()) {
48 Core::System::GetInstance().GPU().FlushAndInvalidateRegion(base << PAGE_BITS,
49 size * PAGE_SIZE);
50 }
72 51
73 VAddr end = base + size; 52 VAddr end = base + size;
74 ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", 53 ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
@@ -88,41 +67,47 @@ static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, Pa
88 } 67 }
89} 68}
90 69
91void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target) { 70void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
92 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 71 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
93 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 72 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
94 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); 73 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
95} 74}
96 75
97void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer mmio_handler) { 76void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
77 Common::MemoryHookPointer mmio_handler) {
98 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 78 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
99 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 79 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
100 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); 80 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Special);
101 81
102 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); 82 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
103 SpecialRegion region{SpecialRegion::Type::IODevice, std::move(mmio_handler)}; 83 Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, std::move(mmio_handler)};
104 page_table.special_regions.add(std::make_pair(interval, std::set<SpecialRegion>{region})); 84 page_table.special_regions.add(
85 std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
105} 86}
106 87
107void UnmapRegion(PageTable& page_table, VAddr base, u64 size) { 88void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
108 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 89 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
109 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 90 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
110 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); 91 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Unmapped);
111 92
112 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); 93 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
113 page_table.special_regions.erase(interval); 94 page_table.special_regions.erase(interval);
114} 95}
115 96
116void AddDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook) { 97void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
98 Common::MemoryHookPointer hook) {
117 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); 99 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
118 SpecialRegion region{SpecialRegion::Type::DebugHook, std::move(hook)}; 100 Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
119 page_table.special_regions.add(std::make_pair(interval, std::set<SpecialRegion>{region})); 101 page_table.special_regions.add(
102 std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
120} 103}
121 104
122void RemoveDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook) { 105void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
106 Common::MemoryHookPointer hook) {
123 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); 107 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
124 SpecialRegion region{SpecialRegion::Type::DebugHook, std::move(hook)}; 108 Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
125 page_table.special_regions.subtract(std::make_pair(interval, std::set<SpecialRegion>{region})); 109 page_table.special_regions.subtract(
110 std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
126} 111}
127 112
128/** 113/**
@@ -171,19 +156,19 @@ T Read(const VAddr vaddr) {
171 return value; 156 return value;
172 } 157 }
173 158
174 PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; 159 Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
175 switch (type) { 160 switch (type) {
176 case PageType::Unmapped: 161 case Common::PageType::Unmapped:
177 LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); 162 LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
178 return 0; 163 return 0;
179 case PageType::Memory: 164 case Common::PageType::Memory:
180 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); 165 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
181 break; 166 break;
182 case PageType::RasterizerCachedMemory: { 167 case Common::PageType::RasterizerCachedMemory: {
183 RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Flush); 168 auto host_ptr{GetPointerFromVMA(vaddr)};
184 169 Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T));
185 T value; 170 T value;
186 std::memcpy(&value, GetPointerFromVMA(vaddr), sizeof(T)); 171 std::memcpy(&value, host_ptr, sizeof(T));
187 return value; 172 return value;
188 } 173 }
189 default: 174 default:
@@ -201,18 +186,19 @@ void Write(const VAddr vaddr, const T data) {
201 return; 186 return;
202 } 187 }
203 188
204 PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; 189 Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
205 switch (type) { 190 switch (type) {
206 case PageType::Unmapped: 191 case Common::PageType::Unmapped:
207 LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, 192 LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
208 static_cast<u32>(data), vaddr); 193 static_cast<u32>(data), vaddr);
209 return; 194 return;
210 case PageType::Memory: 195 case Common::PageType::Memory:
211 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); 196 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
212 break; 197 break;
213 case PageType::RasterizerCachedMemory: { 198 case Common::PageType::RasterizerCachedMemory: {
214 RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Invalidate); 199 auto host_ptr{GetPointerFromVMA(vaddr)};
215 std::memcpy(GetPointerFromVMA(vaddr), &data, sizeof(T)); 200 Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T));
201 std::memcpy(host_ptr, &data, sizeof(T));
216 break; 202 break;
217 } 203 }
218 default: 204 default:
@@ -227,10 +213,10 @@ bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) {
227 if (page_pointer) 213 if (page_pointer)
228 return true; 214 return true;
229 215
230 if (page_table.attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) 216 if (page_table.attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory)
231 return true; 217 return true;
232 218
233 if (page_table.attributes[vaddr >> PAGE_BITS] != PageType::Special) 219 if (page_table.attributes[vaddr >> PAGE_BITS] != Common::PageType::Special)
234 return false; 220 return false;
235 221
236 return false; 222 return false;
@@ -250,7 +236,8 @@ u8* GetPointer(const VAddr vaddr) {
250 return page_pointer + (vaddr & PAGE_MASK); 236 return page_pointer + (vaddr & PAGE_MASK);
251 } 237 }
252 238
253 if (current_page_table->attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) { 239 if (current_page_table->attributes[vaddr >> PAGE_BITS] ==
240 Common::PageType::RasterizerCachedMemory) {
254 return GetPointerFromVMA(vaddr); 241 return GetPointerFromVMA(vaddr);
255 } 242 }
256 243
@@ -284,20 +271,20 @@ void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
284 271
285 u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; 272 u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
286 for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { 273 for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
287 PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; 274 Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
288 275
289 if (cached) { 276 if (cached) {
290 // Switch page type to cached if now cached 277 // Switch page type to cached if now cached
291 switch (page_type) { 278 switch (page_type) {
292 case PageType::Unmapped: 279 case Common::PageType::Unmapped:
293 // It is not necessary for a process to have this region mapped into its address 280 // It is not necessary for a process to have this region mapped into its address
294 // space, for example, a system module need not have a VRAM mapping. 281 // space, for example, a system module need not have a VRAM mapping.
295 break; 282 break;
296 case PageType::Memory: 283 case Common::PageType::Memory:
297 page_type = PageType::RasterizerCachedMemory; 284 page_type = Common::PageType::RasterizerCachedMemory;
298 current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; 285 current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
299 break; 286 break;
300 case PageType::RasterizerCachedMemory: 287 case Common::PageType::RasterizerCachedMemory:
301 // There can be more than one GPU region mapped per CPU region, so it's common that 288 // There can be more than one GPU region mapped per CPU region, so it's common that
302 // this area is already marked as cached. 289 // this area is already marked as cached.
303 break; 290 break;
@@ -307,23 +294,23 @@ void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
307 } else { 294 } else {
308 // Switch page type to uncached if now uncached 295 // Switch page type to uncached if now uncached
309 switch (page_type) { 296 switch (page_type) {
310 case PageType::Unmapped: 297 case Common::PageType::Unmapped:
311 // It is not necessary for a process to have this region mapped into its address 298 // It is not necessary for a process to have this region mapped into its address
312 // space, for example, a system module need not have a VRAM mapping. 299 // space, for example, a system module need not have a VRAM mapping.
313 break; 300 break;
314 case PageType::Memory: 301 case Common::PageType::Memory:
315 // There can be more than one GPU region mapped per CPU region, so it's common that 302 // There can be more than one GPU region mapped per CPU region, so it's common that
316 // this area is already unmarked as cached. 303 // this area is already unmarked as cached.
317 break; 304 break;
318 case PageType::RasterizerCachedMemory: { 305 case Common::PageType::RasterizerCachedMemory: {
319 u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); 306 u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK);
320 if (pointer == nullptr) { 307 if (pointer == nullptr) {
321 // It's possible that this function has been called while updating the pagetable 308 // It's possible that this function has been called while updating the pagetable
322 // after unmapping a VMA. In that case the underlying VMA will no longer exist, 309 // after unmapping a VMA. In that case the underlying VMA will no longer exist,
323 // and we should just leave the pagetable entry blank. 310 // and we should just leave the pagetable entry blank.
324 page_type = PageType::Unmapped; 311 page_type = Common::PageType::Unmapped;
325 } else { 312 } else {
326 page_type = PageType::Memory; 313 page_type = Common::PageType::Memory;
327 current_page_table->pointers[vaddr >> PAGE_BITS] = pointer; 314 current_page_table->pointers[vaddr >> PAGE_BITS] = pointer;
328 } 315 }
329 break; 316 break;
@@ -335,47 +322,6 @@ void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
335 } 322 }
336} 323}
337 324
338void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
339 auto& system_instance = Core::System::GetInstance();
340
341 // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be
342 // null here
343 if (!system_instance.IsPoweredOn()) {
344 return;
345 }
346
347 const VAddr end = start + size;
348
349 const auto CheckRegion = [&](VAddr region_start, VAddr region_end) {
350 if (start >= region_end || end <= region_start) {
351 // No overlap with region
352 return;
353 }
354
355 const VAddr overlap_start = std::max(start, region_start);
356 const VAddr overlap_end = std::min(end, region_end);
357 const VAddr overlap_size = overlap_end - overlap_start;
358
359 auto& gpu = system_instance.GPU();
360 switch (mode) {
361 case FlushMode::Flush:
362 gpu.FlushRegion(overlap_start, overlap_size);
363 break;
364 case FlushMode::Invalidate:
365 gpu.InvalidateRegion(overlap_start, overlap_size);
366 break;
367 case FlushMode::FlushAndInvalidate:
368 gpu.FlushAndInvalidateRegion(overlap_start, overlap_size);
369 break;
370 }
371 };
372
373 const auto& vm_manager = Core::CurrentProcess()->VMManager();
374
375 CheckRegion(vm_manager.GetCodeRegionBaseAddress(), vm_manager.GetCodeRegionEndAddress());
376 CheckRegion(vm_manager.GetHeapRegionBaseAddress(), vm_manager.GetHeapRegionEndAddress());
377}
378
379u8 Read8(const VAddr addr) { 325u8 Read8(const VAddr addr) {
380 return Read<u8>(addr); 326 return Read<u8>(addr);
381} 327}
@@ -406,24 +352,24 @@ void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_
406 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 352 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
407 353
408 switch (page_table.attributes[page_index]) { 354 switch (page_table.attributes[page_index]) {
409 case PageType::Unmapped: { 355 case Common::PageType::Unmapped: {
410 LOG_ERROR(HW_Memory, 356 LOG_ERROR(HW_Memory,
411 "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 357 "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
412 current_vaddr, src_addr, size); 358 current_vaddr, src_addr, size);
413 std::memset(dest_buffer, 0, copy_amount); 359 std::memset(dest_buffer, 0, copy_amount);
414 break; 360 break;
415 } 361 }
416 case PageType::Memory: { 362 case Common::PageType::Memory: {
417 DEBUG_ASSERT(page_table.pointers[page_index]); 363 DEBUG_ASSERT(page_table.pointers[page_index]);
418 364
419 const u8* src_ptr = page_table.pointers[page_index] + page_offset; 365 const u8* src_ptr = page_table.pointers[page_index] + page_offset;
420 std::memcpy(dest_buffer, src_ptr, copy_amount); 366 std::memcpy(dest_buffer, src_ptr, copy_amount);
421 break; 367 break;
422 } 368 }
423 case PageType::RasterizerCachedMemory: { 369 case Common::PageType::RasterizerCachedMemory: {
424 RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), 370 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
425 FlushMode::Flush); 371 Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount);
426 std::memcpy(dest_buffer, GetPointerFromVMA(process, current_vaddr), copy_amount); 372 std::memcpy(dest_buffer, host_ptr, copy_amount);
427 break; 373 break;
428 } 374 }
429 default: 375 default:
@@ -470,23 +416,23 @@ void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const voi
470 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 416 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
471 417
472 switch (page_table.attributes[page_index]) { 418 switch (page_table.attributes[page_index]) {
473 case PageType::Unmapped: { 419 case Common::PageType::Unmapped: {
474 LOG_ERROR(HW_Memory, 420 LOG_ERROR(HW_Memory,
475 "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 421 "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
476 current_vaddr, dest_addr, size); 422 current_vaddr, dest_addr, size);
477 break; 423 break;
478 } 424 }
479 case PageType::Memory: { 425 case Common::PageType::Memory: {
480 DEBUG_ASSERT(page_table.pointers[page_index]); 426 DEBUG_ASSERT(page_table.pointers[page_index]);
481 427
482 u8* dest_ptr = page_table.pointers[page_index] + page_offset; 428 u8* dest_ptr = page_table.pointers[page_index] + page_offset;
483 std::memcpy(dest_ptr, src_buffer, copy_amount); 429 std::memcpy(dest_ptr, src_buffer, copy_amount);
484 break; 430 break;
485 } 431 }
486 case PageType::RasterizerCachedMemory: { 432 case Common::PageType::RasterizerCachedMemory: {
487 RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), 433 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
488 FlushMode::Invalidate); 434 Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount);
489 std::memcpy(GetPointerFromVMA(process, current_vaddr), src_buffer, copy_amount); 435 std::memcpy(host_ptr, src_buffer, copy_amount);
490 break; 436 break;
491 } 437 }
492 default: 438 default:
@@ -516,23 +462,23 @@ void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std:
516 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 462 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
517 463
518 switch (page_table.attributes[page_index]) { 464 switch (page_table.attributes[page_index]) {
519 case PageType::Unmapped: { 465 case Common::PageType::Unmapped: {
520 LOG_ERROR(HW_Memory, 466 LOG_ERROR(HW_Memory,
521 "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 467 "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
522 current_vaddr, dest_addr, size); 468 current_vaddr, dest_addr, size);
523 break; 469 break;
524 } 470 }
525 case PageType::Memory: { 471 case Common::PageType::Memory: {
526 DEBUG_ASSERT(page_table.pointers[page_index]); 472 DEBUG_ASSERT(page_table.pointers[page_index]);
527 473
528 u8* dest_ptr = page_table.pointers[page_index] + page_offset; 474 u8* dest_ptr = page_table.pointers[page_index] + page_offset;
529 std::memset(dest_ptr, 0, copy_amount); 475 std::memset(dest_ptr, 0, copy_amount);
530 break; 476 break;
531 } 477 }
532 case PageType::RasterizerCachedMemory: { 478 case Common::PageType::RasterizerCachedMemory: {
533 RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), 479 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
534 FlushMode::Invalidate); 480 Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount);
535 std::memset(GetPointerFromVMA(process, current_vaddr), 0, copy_amount); 481 std::memset(host_ptr, 0, copy_amount);
536 break; 482 break;
537 } 483 }
538 default: 484 default:
@@ -558,23 +504,23 @@ void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr,
558 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 504 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
559 505
560 switch (page_table.attributes[page_index]) { 506 switch (page_table.attributes[page_index]) {
561 case PageType::Unmapped: { 507 case Common::PageType::Unmapped: {
562 LOG_ERROR(HW_Memory, 508 LOG_ERROR(HW_Memory,
563 "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 509 "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
564 current_vaddr, src_addr, size); 510 current_vaddr, src_addr, size);
565 ZeroBlock(process, dest_addr, copy_amount); 511 ZeroBlock(process, dest_addr, copy_amount);
566 break; 512 break;
567 } 513 }
568 case PageType::Memory: { 514 case Common::PageType::Memory: {
569 DEBUG_ASSERT(page_table.pointers[page_index]); 515 DEBUG_ASSERT(page_table.pointers[page_index]);
570 const u8* src_ptr = page_table.pointers[page_index] + page_offset; 516 const u8* src_ptr = page_table.pointers[page_index] + page_offset;
571 WriteBlock(process, dest_addr, src_ptr, copy_amount); 517 WriteBlock(process, dest_addr, src_ptr, copy_amount);
572 break; 518 break;
573 } 519 }
574 case PageType::RasterizerCachedMemory: { 520 case Common::PageType::RasterizerCachedMemory: {
575 RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), 521 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
576 FlushMode::Flush); 522 Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount);
577 WriteBlock(process, dest_addr, GetPointerFromVMA(process, current_vaddr), copy_amount); 523 WriteBlock(process, dest_addr, host_ptr, copy_amount);
578 break; 524 break;
579 } 525 }
580 default: 526 default:
diff --git a/src/core/memory.h b/src/core/memory.h
index 1acf5ce8c..6845f5fe1 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -6,11 +6,11 @@
6 6
7#include <cstddef> 7#include <cstddef>
8#include <string> 8#include <string>
9#include <tuple>
10#include <vector>
11#include <boost/icl/interval_map.hpp>
12#include "common/common_types.h" 9#include "common/common_types.h"
13#include "core/memory_hook.h" 10
11namespace Common {
12struct PageTable;
13}
14 14
15namespace Kernel { 15namespace Kernel {
16class Process; 16class Process;
@@ -26,83 +26,8 @@ constexpr std::size_t PAGE_BITS = 12;
26constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS; 26constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS;
27constexpr u64 PAGE_MASK = PAGE_SIZE - 1; 27constexpr u64 PAGE_MASK = PAGE_SIZE - 1;
28 28
29enum class PageType : u8 {
30 /// Page is unmapped and should cause an access error.
31 Unmapped,
32 /// Page is mapped to regular memory. This is the only type you can get pointers to.
33 Memory,
34 /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
35 /// invalidation
36 RasterizerCachedMemory,
37 /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
38 Special,
39};
40
41struct SpecialRegion {
42 enum class Type {
43 DebugHook,
44 IODevice,
45 } type;
46
47 MemoryHookPointer handler;
48
49 bool operator<(const SpecialRegion& other) const {
50 return std::tie(type, handler) < std::tie(other.type, other.handler);
51 }
52
53 bool operator==(const SpecialRegion& other) const {
54 return std::tie(type, handler) == std::tie(other.type, other.handler);
55 }
56};
57
58/**
59 * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely
60 * mimics the way a real CPU page table works.
61 */
62struct PageTable {
63 explicit PageTable();
64 explicit PageTable(std::size_t address_space_width_in_bits);
65 ~PageTable();
66
67 /**
68 * Resizes the page table to be able to accomodate enough pages within
69 * a given address space.
70 *
71 * @param address_space_width_in_bits The address size width in bits.
72 */
73 void Resize(std::size_t address_space_width_in_bits);
74
75 /**
76 * Vector of memory pointers backing each page. An entry can only be non-null if the
77 * corresponding entry in the `attributes` vector is of type `Memory`.
78 */
79 std::vector<u8*> pointers;
80
81 /**
82 * Contains MMIO handlers that back memory regions whose entries in the `attribute` vector is
83 * of type `Special`.
84 */
85 boost::icl::interval_map<VAddr, std::set<SpecialRegion>> special_regions;
86
87 /**
88 * Vector of fine grained page attributes. If it is set to any value other than `Memory`, then
89 * the corresponding entry in `pointers` MUST be set to null.
90 */
91 std::vector<PageType> attributes;
92};
93
94/// Virtual user-space memory regions 29/// Virtual user-space memory regions
95enum : VAddr { 30enum : VAddr {
96 /// Read-only page containing kernel and system configuration values.
97 CONFIG_MEMORY_VADDR = 0x1FF80000,
98 CONFIG_MEMORY_SIZE = 0x00001000,
99 CONFIG_MEMORY_VADDR_END = CONFIG_MEMORY_VADDR + CONFIG_MEMORY_SIZE,
100
101 /// Usually read-only page containing mostly values read from hardware.
102 SHARED_PAGE_VADDR = 0x1FF81000,
103 SHARED_PAGE_SIZE = 0x00001000,
104 SHARED_PAGE_VADDR_END = SHARED_PAGE_VADDR + SHARED_PAGE_SIZE,
105
106 /// TLS (Thread-Local Storage) related. 31 /// TLS (Thread-Local Storage) related.
107 TLS_ENTRY_SIZE = 0x200, 32 TLS_ENTRY_SIZE = 0x200,
108 33
@@ -115,9 +40,8 @@ enum : VAddr {
115 KERNEL_REGION_END = KERNEL_REGION_VADDR + KERNEL_REGION_SIZE, 40 KERNEL_REGION_END = KERNEL_REGION_VADDR + KERNEL_REGION_SIZE,
116}; 41};
117 42
118/// Currently active page table 43/// Changes the currently active page table.
119void SetCurrentPageTable(PageTable* page_table); 44void SetCurrentPageTable(Common::PageTable* page_table);
120PageTable* GetCurrentPageTable();
121 45
122/// Determines if the given VAddr is valid for the specified process. 46/// Determines if the given VAddr is valid for the specified process.
123bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr); 47bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr);
@@ -161,10 +85,4 @@ enum class FlushMode {
161 */ 85 */
162void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached); 86void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached);
163 87
164/**
165 * Flushes and invalidates any externally cached rasterizer resources touching the given virtual
166 * address region.
167 */
168void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode);
169
170} // namespace Memory 88} // namespace Memory
diff --git a/src/core/memory_hook.cpp b/src/core/memory_hook.cpp
deleted file mode 100644
index c61c6c1fb..000000000
--- a/src/core/memory_hook.cpp
+++ /dev/null
@@ -1,11 +0,0 @@
1// Copyright 2018 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/memory_hook.h"
6
7namespace Memory {
8
9MemoryHook::~MemoryHook() = default;
10
11} // namespace Memory
diff --git a/src/core/memory_hook.h b/src/core/memory_hook.h
deleted file mode 100644
index 940777107..000000000
--- a/src/core/memory_hook.h
+++ /dev/null
@@ -1,47 +0,0 @@
1// Copyright 2016 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <optional>
9
10#include "common/common_types.h"
11
12namespace Memory {
13
14/**
15 * Memory hooks have two purposes:
16 * 1. To allow reads and writes to a region of memory to be intercepted. This is used to implement
17 * texture forwarding and memory breakpoints for debugging.
18 * 2. To allow for the implementation of MMIO devices.
19 *
20 * A hook may be mapped to multiple regions of memory.
21 *
22 * If a std::nullopt or false is returned from a function, the read/write request is passed through
23 * to the underlying memory region.
24 */
25class MemoryHook {
26public:
27 virtual ~MemoryHook();
28
29 virtual std::optional<bool> IsValidAddress(VAddr addr) = 0;
30
31 virtual std::optional<u8> Read8(VAddr addr) = 0;
32 virtual std::optional<u16> Read16(VAddr addr) = 0;
33 virtual std::optional<u32> Read32(VAddr addr) = 0;
34 virtual std::optional<u64> Read64(VAddr addr) = 0;
35
36 virtual bool ReadBlock(VAddr src_addr, void* dest_buffer, std::size_t size) = 0;
37
38 virtual bool Write8(VAddr addr, u8 data) = 0;
39 virtual bool Write16(VAddr addr, u16 data) = 0;
40 virtual bool Write32(VAddr addr, u32 data) = 0;
41 virtual bool Write64(VAddr addr, u64 data) = 0;
42
43 virtual bool WriteBlock(VAddr dest_addr, const void* src_buffer, std::size_t size) = 0;
44};
45
46using MemoryHookPointer = std::shared_ptr<MemoryHook>;
47} // namespace Memory
diff --git a/src/core/memory_setup.h b/src/core/memory_setup.h
index 9a1a4f4be..5225ee8e2 100644
--- a/src/core/memory_setup.h
+++ b/src/core/memory_setup.h
@@ -5,7 +5,11 @@
5#pragma once 5#pragma once
6 6
7#include "common/common_types.h" 7#include "common/common_types.h"
8#include "core/memory_hook.h" 8#include "common/memory_hook.h"
9
10namespace Common {
11struct PageTable;
12}
9 13
10namespace Memory { 14namespace Memory {
11 15
@@ -17,7 +21,7 @@ namespace Memory {
17 * @param size The amount of bytes to map. Must be page-aligned. 21 * @param size The amount of bytes to map. Must be page-aligned.
18 * @param target Buffer with the memory backing the mapping. Must be of length at least `size`. 22 * @param target Buffer with the memory backing the mapping. Must be of length at least `size`.
19 */ 23 */
20void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target); 24void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target);
21 25
22/** 26/**
23 * Maps a region of the emulated process address space as a IO region. 27 * Maps a region of the emulated process address space as a IO region.
@@ -26,11 +30,14 @@ void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target);
26 * @param size The amount of bytes to map. Must be page-aligned. 30 * @param size The amount of bytes to map. Must be page-aligned.
27 * @param mmio_handler The handler that backs the mapping. 31 * @param mmio_handler The handler that backs the mapping.
28 */ 32 */
29void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer mmio_handler); 33void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
34 Common::MemoryHookPointer mmio_handler);
30 35
31void UnmapRegion(PageTable& page_table, VAddr base, u64 size); 36void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size);
32 37
33void AddDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook); 38void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
34void RemoveDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook); 39 Common::MemoryHookPointer hook);
40void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
41 Common::MemoryHookPointer hook);
35 42
36} // namespace Memory 43} // namespace Memory
diff --git a/src/core/perf_stats.cpp b/src/core/perf_stats.cpp
index c716a462b..4afd6c8a3 100644
--- a/src/core/perf_stats.cpp
+++ b/src/core/perf_stats.cpp
@@ -18,13 +18,13 @@ using std::chrono::microseconds;
18namespace Core { 18namespace Core {
19 19
20void PerfStats::BeginSystemFrame() { 20void PerfStats::BeginSystemFrame() {
21 std::lock_guard<std::mutex> lock(object_mutex); 21 std::lock_guard lock{object_mutex};
22 22
23 frame_begin = Clock::now(); 23 frame_begin = Clock::now();
24} 24}
25 25
26void PerfStats::EndSystemFrame() { 26void PerfStats::EndSystemFrame() {
27 std::lock_guard<std::mutex> lock(object_mutex); 27 std::lock_guard lock{object_mutex};
28 28
29 auto frame_end = Clock::now(); 29 auto frame_end = Clock::now();
30 accumulated_frametime += frame_end - frame_begin; 30 accumulated_frametime += frame_end - frame_begin;
@@ -35,13 +35,13 @@ void PerfStats::EndSystemFrame() {
35} 35}
36 36
37void PerfStats::EndGameFrame() { 37void PerfStats::EndGameFrame() {
38 std::lock_guard<std::mutex> lock(object_mutex); 38 std::lock_guard lock{object_mutex};
39 39
40 game_frames += 1; 40 game_frames += 1;
41} 41}
42 42
43PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) { 43PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) {
44 std::lock_guard<std::mutex> lock(object_mutex); 44 std::lock_guard lock{object_mutex};
45 45
46 const auto now = Clock::now(); 46 const auto now = Clock::now();
47 // Walltime elapsed since stats were reset 47 // Walltime elapsed since stats were reset
@@ -67,7 +67,7 @@ PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us
67} 67}
68 68
69double PerfStats::GetLastFrameTimeScale() { 69double PerfStats::GetLastFrameTimeScale() {
70 std::lock_guard<std::mutex> lock(object_mutex); 70 std::lock_guard lock{object_mutex};
71 71
72 constexpr double FRAME_LENGTH = 1.0 / 60; 72 constexpr double FRAME_LENGTH = 1.0 / 60;
73 return duration_cast<DoubleSecs>(previous_frame_length).count() / FRAME_LENGTH; 73 return duration_cast<DoubleSecs>(previous_frame_length).count() / FRAME_LENGTH;
diff --git a/src/core/settings.cpp b/src/core/settings.cpp
index 6dd3139cc..6d32ebea3 100644
--- a/src/core/settings.cpp
+++ b/src/core/settings.cpp
@@ -82,7 +82,6 @@ void LogSetting(const std::string& name, const T& value) {
82void LogSettings() { 82void LogSettings() {
83 LOG_INFO(Config, "yuzu Configuration:"); 83 LOG_INFO(Config, "yuzu Configuration:");
84 LogSetting("System_UseDockedMode", Settings::values.use_docked_mode); 84 LogSetting("System_UseDockedMode", Settings::values.use_docked_mode);
85 LogSetting("System_EnableNfc", Settings::values.enable_nfc);
86 LogSetting("System_RngSeed", Settings::values.rng_seed.value_or(0)); 85 LogSetting("System_RngSeed", Settings::values.rng_seed.value_or(0));
87 LogSetting("System_CurrentUser", Settings::values.current_user); 86 LogSetting("System_CurrentUser", Settings::values.current_user);
88 LogSetting("System_LanguageIndex", Settings::values.language_index); 87 LogSetting("System_LanguageIndex", Settings::values.language_index);
diff --git a/src/core/settings.h b/src/core/settings.h
index cdfb2f742..d543eb32f 100644
--- a/src/core/settings.h
+++ b/src/core/settings.h
@@ -349,7 +349,6 @@ struct TouchscreenInput {
349struct Values { 349struct Values {
350 // System 350 // System
351 bool use_docked_mode; 351 bool use_docked_mode;
352 bool enable_nfc;
353 std::optional<u32> rng_seed; 352 std::optional<u32> rng_seed;
354 // Measured in seconds since epoch 353 // Measured in seconds since epoch
355 std::optional<std::chrono::seconds> custom_rtc; 354 std::optional<std::chrono::seconds> custom_rtc;