summaryrefslogtreecommitdiff
path: root/src/core/arm
diff options
context:
space:
mode:
authorGravatar Fernando S2023-12-06 14:19:17 +0100
committerGravatar GitHub2023-12-06 14:19:17 +0100
commit8a79dd2d6c6445bff63ea1f2f5f1611a6afcd97a (patch)
tree265bf3c7970a570479c6a3ac1250549995f0329c /src/core/arm
parentMerge pull request #12271 from liamwhite/pretext-fix (diff)
parentarm: fix context save of vector regs (diff)
downloadyuzu-8a79dd2d6c6445bff63ea1f2f5f1611a6afcd97a.tar.gz
yuzu-8a79dd2d6c6445bff63ea1f2f5f1611a6afcd97a.tar.xz
yuzu-8a79dd2d6c6445bff63ea1f2f5f1611a6afcd97a.zip
Merge pull request #12236 from liamwhite/cpu-refactor
core: refactor emulated cpu core activation
Diffstat (limited to 'src/core/arm')
-rw-r--r--src/core/arm/arm_interface.cpp217
-rw-r--r--src/core/arm/arm_interface.h221
-rw-r--r--src/core/arm/debug.cpp351
-rw-r--r--src/core/arm/debug.h35
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp325
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.h92
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp340
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.h77
-rw-r--r--src/core/arm/dynarmic/dynarmic_cp15.cpp4
-rw-r--r--src/core/arm/dynarmic/dynarmic_cp15.h8
-rw-r--r--src/core/arm/dynarmic/dynarmic_exclusive_monitor.h8
-rw-r--r--src/core/arm/nce/arm_nce.cpp255
-rw-r--r--src/core/arm/nce/arm_nce.h70
-rw-r--r--src/core/arm/nce/arm_nce.s80
-rw-r--r--src/core/arm/nce/guest_context.h8
-rw-r--r--src/core/arm/nce/patcher.cpp2
16 files changed, 972 insertions, 1121 deletions
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp
index d231bf89c..698c9c8ad 100644
--- a/src/core/arm/arm_interface.cpp
+++ b/src/core/arm/arm_interface.cpp
@@ -1,231 +1,32 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include <map>
5#include <optional>
6
7#include "common/bit_field.h"
8#include "common/common_types.h"
9#include "common/demangle.h"
10#include "common/logging/log.h" 4#include "common/logging/log.h"
11#include "core/arm/arm_interface.h" 5#include "core/arm/arm_interface.h"
12#include "core/arm/symbols.h" 6#include "core/arm/debug.h"
13#include "core/core.h" 7#include "core/core.h"
14#include "core/debugger/debugger.h"
15#include "core/hle/kernel/k_process.h" 8#include "core/hle/kernel/k_process.h"
16#include "core/hle/kernel/k_thread.h"
17#include "core/hle/kernel/svc.h"
18#include "core/loader/loader.h"
19#include "core/memory.h"
20 9
21namespace Core { 10namespace Core {
22 11
23constexpr u64 SEGMENT_BASE = 0x7100000000ull; 12void ArmInterface::LogBacktrace(const Kernel::KProcess* process) const {
24 13 Kernel::Svc::ThreadContext ctx;
25std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktraceFromContext( 14 this->GetContext(ctx);
26 Core::System& system, const ARM_Interface::ThreadContext32& ctx) {
27 std::vector<BacktraceEntry> out;
28 auto& memory = system.ApplicationMemory();
29
30 const auto& reg = ctx.cpu_registers;
31 u32 pc = reg[15], lr = reg[14], fp = reg[11];
32 out.push_back({"", 0, pc, 0, ""});
33
34 // fp (= r11) points to the last frame record.
35 // Frame records are two words long:
36 // fp+0 : pointer to previous frame record
37 // fp+4 : value of lr for frame
38 for (size_t i = 0; i < 256; i++) {
39 out.push_back({"", 0, lr, 0, ""});
40 if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 8)) {
41 break;
42 }
43 lr = memory.Read32(fp + 4);
44 fp = memory.Read32(fp);
45 }
46
47 SymbolicateBacktrace(system, out);
48
49 return out;
50}
51
52std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktraceFromContext(
53 Core::System& system, const ARM_Interface::ThreadContext64& ctx) {
54 std::vector<BacktraceEntry> out;
55 auto& memory = system.ApplicationMemory();
56
57 const auto& reg = ctx.cpu_registers;
58 u64 pc = ctx.pc, lr = reg[30], fp = reg[29];
59
60 out.push_back({"", 0, pc, 0, ""});
61
62 // fp (= x29) points to the previous frame record.
63 // Frame records are two words long:
64 // fp+0 : pointer to previous frame record
65 // fp+8 : value of lr for frame
66 for (size_t i = 0; i < 256; i++) {
67 out.push_back({"", 0, lr, 0, ""});
68 if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 16)) {
69 break;
70 }
71 lr = memory.Read64(fp + 8);
72 fp = memory.Read64(fp);
73 }
74
75 SymbolicateBacktrace(system, out);
76
77 return out;
78}
79
80void ARM_Interface::SymbolicateBacktrace(Core::System& system, std::vector<BacktraceEntry>& out) {
81 std::map<VAddr, std::string> modules;
82 auto& loader{system.GetAppLoader()};
83 if (loader.ReadNSOModules(modules) != Loader::ResultStatus::Success) {
84 return;
85 }
86
87 std::map<std::string, Symbols::Symbols> symbols;
88 for (const auto& module : modules) {
89 symbols.insert_or_assign(module.second,
90 Symbols::GetSymbols(module.first, system.ApplicationMemory(),
91 system.ApplicationProcess()->Is64Bit()));
92 }
93
94 for (auto& entry : out) {
95 VAddr base = 0;
96 for (auto iter = modules.rbegin(); iter != modules.rend(); ++iter) {
97 const auto& module{*iter};
98 if (entry.original_address >= module.first) {
99 entry.module = module.second;
100 base = module.first;
101 break;
102 }
103 }
104
105 entry.offset = entry.original_address - base;
106 entry.address = SEGMENT_BASE + entry.offset;
107
108 if (entry.module.empty()) {
109 entry.module = "unknown";
110 }
111
112 const auto symbol_set = symbols.find(entry.module);
113 if (symbol_set != symbols.end()) {
114 const auto symbol = Symbols::GetSymbolName(symbol_set->second, entry.offset);
115 if (symbol) {
116 entry.name = Common::DemangleSymbol(*symbol);
117 }
118 }
119 }
120}
121
122std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktrace() const {
123 if (GetArchitecture() == Architecture::Aarch64) {
124 ThreadContext64 ctx;
125 SaveContext(ctx);
126 return GetBacktraceFromContext(system, ctx);
127 } else {
128 ThreadContext32 ctx;
129 SaveContext(ctx);
130 return GetBacktraceFromContext(system, ctx);
131 }
132}
133 15
134void ARM_Interface::LogBacktrace() const { 16 LOG_ERROR(Core_ARM, "Backtrace, sp={:016X}, pc={:016X}", ctx.sp, ctx.pc);
135 const VAddr sp = GetSP();
136 const VAddr pc = GetPC();
137 LOG_ERROR(Core_ARM, "Backtrace, sp={:016X}, pc={:016X}", sp, pc);
138 LOG_ERROR(Core_ARM, "{:20}{:20}{:20}{:20}{}", "Module Name", "Address", "Original Address", 17 LOG_ERROR(Core_ARM, "{:20}{:20}{:20}{:20}{}", "Module Name", "Address", "Original Address",
139 "Offset", "Symbol"); 18 "Offset", "Symbol");
140 LOG_ERROR(Core_ARM, ""); 19 LOG_ERROR(Core_ARM, "");
141 const auto backtrace = GetBacktrace(); 20 const auto backtrace = GetBacktraceFromContext(process, ctx);
142 for (const auto& entry : backtrace) { 21 for (const auto& entry : backtrace) {
143 LOG_ERROR(Core_ARM, "{:20}{:016X} {:016X} {:016X} {}", entry.module, entry.address, 22 LOG_ERROR(Core_ARM, "{:20}{:016X} {:016X} {:016X} {}", entry.module, entry.address,
144 entry.original_address, entry.offset, entry.name); 23 entry.original_address, entry.offset, entry.name);
145 } 24 }
146} 25}
147 26
148void ARM_Interface::Run() { 27const Kernel::DebugWatchpoint* ArmInterface::MatchingWatchpoint(
149 using Kernel::StepState;
150 using Kernel::SuspendType;
151
152 while (true) {
153 Kernel::KThread* current_thread{Kernel::GetCurrentThreadPointer(system.Kernel())};
154 HaltReason hr{};
155
156 // If the thread is scheduled for termination, exit the thread.
157 if (current_thread->HasDpc()) {
158 if (current_thread->IsTerminationRequested()) {
159 current_thread->Exit();
160 UNREACHABLE();
161 }
162 }
163
164 // Notify the debugger and go to sleep if a step was performed
165 // and this thread has been scheduled again.
166 if (current_thread->GetStepState() == StepState::StepPerformed) {
167 system.GetDebugger().NotifyThreadStopped(current_thread);
168 current_thread->RequestSuspend(SuspendType::Debug);
169 break;
170 }
171
172 // Otherwise, run the thread.
173 system.EnterCPUProfile();
174 if (current_thread->GetStepState() == StepState::StepPending) {
175 hr = StepJit();
176
177 if (True(hr & HaltReason::StepThread)) {
178 current_thread->SetStepState(StepState::StepPerformed);
179 }
180 } else {
181 hr = RunJit();
182 }
183 system.ExitCPUProfile();
184
185 // Notify the debugger and go to sleep if a breakpoint was hit,
186 // or if the thread is unable to continue for any reason.
187 if (True(hr & HaltReason::InstructionBreakpoint) || True(hr & HaltReason::PrefetchAbort)) {
188 if (!True(hr & HaltReason::PrefetchAbort)) {
189 RewindBreakpointInstruction();
190 }
191 if (system.DebuggerEnabled()) {
192 system.GetDebugger().NotifyThreadStopped(current_thread);
193 } else {
194 LogBacktrace();
195 }
196 current_thread->RequestSuspend(SuspendType::Debug);
197 break;
198 }
199
200 // Notify the debugger and go to sleep if a watchpoint was hit.
201 if (True(hr & HaltReason::DataAbort)) {
202 if (system.DebuggerEnabled()) {
203 system.GetDebugger().NotifyThreadWatchpoint(current_thread, *HaltedWatchpoint());
204 } else {
205 LogBacktrace();
206 }
207 current_thread->RequestSuspend(SuspendType::Debug);
208 break;
209 }
210
211 // Handle syscalls and scheduling (this may change the current thread/core)
212 if (True(hr & HaltReason::SupervisorCall)) {
213 Kernel::Svc::Call(system, GetSvcNumber());
214 break;
215 }
216 if (True(hr & HaltReason::BreakLoop) || !uses_wall_clock) {
217 break;
218 }
219 }
220}
221
222void ARM_Interface::LoadWatchpointArray(const WatchpointArray* wp) {
223 watchpoints = wp;
224}
225
226const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint(
227 u64 addr, u64 size, Kernel::DebugWatchpointType access_type) const { 28 u64 addr, u64 size, Kernel::DebugWatchpointType access_type) const {
228 if (!watchpoints) { 29 if (!m_watchpoints) {
229 return nullptr; 30 return nullptr;
230 } 31 }
231 32
@@ -233,7 +34,7 @@ const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint(
233 const u64 end_address{addr + size}; 34 const u64 end_address{addr + size};
234 35
235 for (size_t i = 0; i < Core::Hardware::NUM_WATCHPOINTS; i++) { 36 for (size_t i = 0; i < Core::Hardware::NUM_WATCHPOINTS; i++) {
236 const auto& watch{(*watchpoints)[i]}; 37 const auto& watch{(*m_watchpoints)[i]};
237 38
238 if (end_address <= GetInteger(watch.start_address)) { 39 if (end_address <= GetInteger(watch.start_address)) {
239 continue; 40 continue;
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index a9d9ac09d..806c7c9e9 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -12,20 +12,20 @@
12#include "common/common_types.h" 12#include "common/common_types.h"
13#include "core/hardware_properties.h" 13#include "core/hardware_properties.h"
14 14
15#include "core/hle/kernel/svc_types.h"
16
15namespace Common { 17namespace Common {
16struct PageTable; 18struct PageTable;
17} 19}
18 20
19namespace Kernel { 21namespace Kernel {
20enum class VMAPermission : u8;
21enum class DebugWatchpointType : u8; 22enum class DebugWatchpointType : u8;
22struct DebugWatchpoint; 23struct DebugWatchpoint;
24class KThread;
25class KProcess;
23} // namespace Kernel 26} // namespace Kernel
24 27
25namespace Core { 28namespace Core {
26class System;
27class CPUInterruptHandler;
28
29using WatchpointArray = std::array<Kernel::DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>; 29using WatchpointArray = std::array<Kernel::DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>;
30 30
31// NOTE: these values match the HaltReason enum in Dynarmic 31// NOTE: these values match the HaltReason enum in Dynarmic
@@ -40,197 +40,74 @@ enum class HaltReason : u64 {
40DECLARE_ENUM_FLAG_OPERATORS(HaltReason); 40DECLARE_ENUM_FLAG_OPERATORS(HaltReason);
41 41
42enum class Architecture { 42enum class Architecture {
43 Aarch32, 43 AArch64,
44 Aarch64, 44 AArch32,
45}; 45};
46 46
47/// Generic ARMv8 CPU interface 47/// Generic ARMv8 CPU interface
48class ARM_Interface { 48class ArmInterface {
49public: 49public:
50 YUZU_NON_COPYABLE(ARM_Interface); 50 YUZU_NON_COPYABLE(ArmInterface);
51 YUZU_NON_MOVEABLE(ARM_Interface); 51 YUZU_NON_MOVEABLE(ArmInterface);
52 52
53 explicit ARM_Interface(System& system_, bool uses_wall_clock_) 53 explicit ArmInterface(bool uses_wall_clock) : m_uses_wall_clock{uses_wall_clock} {}
54 : system{system_}, uses_wall_clock{uses_wall_clock_} {} 54 virtual ~ArmInterface() = default;
55 virtual ~ARM_Interface() = default; 55
56 56 // Perform any backend-specific initialization.
57 struct ThreadContext32 {
58 std::array<u32, 16> cpu_registers{};
59 std::array<u32, 64> extension_registers{};
60 u32 cpsr{};
61 u32 fpscr{};
62 u32 fpexc{};
63 u32 tpidr{};
64 };
65 // Internally within the kernel, it expects the AArch32 version of the
66 // thread context to be 344 bytes in size.
67 static_assert(sizeof(ThreadContext32) == 0x150);
68
69 struct ThreadContext64 {
70 std::array<u64, 31> cpu_registers{};
71 u64 sp{};
72 u64 pc{};
73 u32 pstate{};
74 std::array<u8, 4> padding{};
75 std::array<u128, 32> vector_registers{};
76 u32 fpcr{};
77 u32 fpsr{};
78 u64 tpidr{};
79 };
80 // Internally within the kernel, it expects the AArch64 version of the
81 // thread context to be 800 bytes in size.
82 static_assert(sizeof(ThreadContext64) == 0x320);
83
84 /// Perform any backend-specific initialization.
85 virtual void Initialize() {} 57 virtual void Initialize() {}
86 58
87 /// Runs the CPU until an event happens 59 // Runs the CPU until an event happens.
88 void Run(); 60 virtual HaltReason RunThread(Kernel::KThread* thread) = 0;
89 61
90 /// Clear all instruction cache 62 // Runs the CPU for one instruction or until an event happens.
63 virtual HaltReason StepThread(Kernel::KThread* thread) = 0;
64
65 // Admits a backend-specific mechanism to lock the thread context.
66 virtual void LockThread(Kernel::KThread* thread) {}
67 virtual void UnlockThread(Kernel::KThread* thread) {}
68
69 // Clear the entire instruction cache for this CPU.
91 virtual void ClearInstructionCache() = 0; 70 virtual void ClearInstructionCache() = 0;
92 71
93 /** 72 // Clear a range of the instruction cache for this CPU.
94 * Clear instruction cache range
95 * @param addr Start address of the cache range to clear
96 * @param size Size of the cache range to clear, starting at addr
97 */
98 virtual void InvalidateCacheRange(u64 addr, std::size_t size) = 0; 73 virtual void InvalidateCacheRange(u64 addr, std::size_t size) = 0;
99 74
100 /** 75 // Get the current architecture.
101 * Notifies CPU emulation that the current page table has changed. 76 // This returns AArch64 when PSTATE.nRW == 0 and AArch32 when PSTATE.nRW == 1.
102 * @param new_page_table The new page table.
103 * @param new_address_space_size_in_bits The new usable size of the address space in bits.
104 * This can be either 32, 36, or 39 on official software.
105 */
106 virtual void PageTableChanged(Common::PageTable& new_page_table,
107 std::size_t new_address_space_size_in_bits) = 0;
108
109 /**
110 * Set the Program Counter to an address
111 * @param addr Address to set PC to
112 */
113 virtual void SetPC(u64 addr) = 0;
114
115 /*
116 * Get the current Program Counter
117 * @return Returns current PC
118 */
119 virtual u64 GetPC() const = 0;
120
121 /**
122 * Get the current Stack Pointer
123 * @return Returns current SP
124 */
125 virtual u64 GetSP() const = 0;
126
127 /**
128 * Get an ARM register
129 * @param index Register index
130 * @return Returns the value in the register
131 */
132 virtual u64 GetReg(int index) const = 0;
133
134 /**
135 * Set an ARM register
136 * @param index Register index
137 * @param value Value to set register to
138 */
139 virtual void SetReg(int index, u64 value) = 0;
140
141 /**
142 * Gets the value of a specified vector register.
143 *
144 * @param index The index of the vector register.
145 * @return the value within the vector register.
146 */
147 virtual u128 GetVectorReg(int index) const = 0;
148
149 /**
150 * Sets a given value into a vector register.
151 *
152 * @param index The index of the vector register.
153 * @param value The new value to place in the register.
154 */
155 virtual void SetVectorReg(int index, u128 value) = 0;
156
157 /**
158 * Get the current PSTATE register
159 * @return Returns the value of the PSTATE register
160 */
161 virtual u32 GetPSTATE() const = 0;
162
163 /**
164 * Set the current PSTATE register
165 * @param pstate Value to set PSTATE to
166 */
167 virtual void SetPSTATE(u32 pstate) = 0;
168
169 virtual u64 GetTlsAddress() const = 0;
170
171 virtual void SetTlsAddress(u64 address) = 0;
172
173 /**
174 * Gets the value within the TPIDR_EL0 (read/write software thread ID) register.
175 *
176 * @return the value within the register.
177 */
178 virtual u64 GetTPIDR_EL0() const = 0;
179
180 /**
181 * Sets a new value within the TPIDR_EL0 (read/write software thread ID) register.
182 *
183 * @param value The new value to place in the register.
184 */
185 virtual void SetTPIDR_EL0(u64 value) = 0;
186
187 virtual Architecture GetArchitecture() const = 0; 77 virtual Architecture GetArchitecture() const = 0;
188 virtual void SaveContext(ThreadContext32& ctx) const = 0;
189 virtual void SaveContext(ThreadContext64& ctx) const = 0;
190 virtual void LoadContext(const ThreadContext32& ctx) = 0;
191 virtual void LoadContext(const ThreadContext64& ctx) = 0;
192 void LoadWatchpointArray(const WatchpointArray* wp);
193 78
194 /// Clears the exclusive monitor's state. 79 // Context accessors.
195 virtual void ClearExclusiveState() = 0; 80 // These should not be called if the CPU is running.
81 virtual void GetContext(Kernel::Svc::ThreadContext& ctx) const = 0;
82 virtual void SetContext(const Kernel::Svc::ThreadContext& ctx) = 0;
83 virtual void SetTpidrroEl0(u64 value) = 0;
196 84
197 /// Signal an interrupt and ask the core to halt as soon as possible. 85 virtual void GetSvcArguments(std::span<uint64_t, 8> args) const = 0;
198 virtual void SignalInterrupt() = 0; 86 virtual void SetSvcArguments(std::span<const uint64_t, 8> args) = 0;
87 virtual u32 GetSvcNumber() const = 0;
199 88
200 /// Clear a previous interrupt. 89 void SetWatchpointArray(const WatchpointArray* watchpoints) {
201 virtual void ClearInterrupt() = 0; 90 m_watchpoints = watchpoints;
91 }
202 92
203 struct BacktraceEntry { 93 // Signal an interrupt for execution to halt as soon as possible.
204 std::string module; 94 // It is safe to call this if the CPU is not running.
205 u64 address; 95 virtual void SignalInterrupt(Kernel::KThread* thread) = 0;
206 u64 original_address;
207 u64 offset;
208 std::string name;
209 };
210 96
211 static std::vector<BacktraceEntry> GetBacktraceFromContext(System& system, 97 // Stack trace generation.
212 const ThreadContext32& ctx); 98 void LogBacktrace(const Kernel::KProcess* process) const;
213 static std::vector<BacktraceEntry> GetBacktraceFromContext(System& system,
214 const ThreadContext64& ctx);
215 99
216 std::vector<BacktraceEntry> GetBacktrace() const; 100 // Debug functionality.
217 void LogBacktrace() const; 101 virtual const Kernel::DebugWatchpoint* HaltedWatchpoint() const = 0;
102 virtual void RewindBreakpointInstruction() = 0;
218 103
219protected: 104protected:
220 /// System context that this ARM interface is running under.
221 System& system;
222 const WatchpointArray* watchpoints;
223 bool uses_wall_clock;
224
225 static void SymbolicateBacktrace(Core::System& system, std::vector<BacktraceEntry>& out);
226 const Kernel::DebugWatchpoint* MatchingWatchpoint( 105 const Kernel::DebugWatchpoint* MatchingWatchpoint(
227 u64 addr, u64 size, Kernel::DebugWatchpointType access_type) const; 106 u64 addr, u64 size, Kernel::DebugWatchpointType access_type) const;
228 107
229 virtual HaltReason RunJit() = 0; 108protected:
230 virtual HaltReason StepJit() = 0; 109 const WatchpointArray* m_watchpoints{};
231 virtual u32 GetSvcNumber() const = 0; 110 bool m_uses_wall_clock{};
232 virtual const Kernel::DebugWatchpoint* HaltedWatchpoint() const = 0;
233 virtual void RewindBreakpointInstruction() = 0;
234}; 111};
235 112
236} // namespace Core 113} // namespace Core
diff --git a/src/core/arm/debug.cpp b/src/core/arm/debug.cpp
new file mode 100644
index 000000000..1fe37b8ee
--- /dev/null
+++ b/src/core/arm/debug.cpp
@@ -0,0 +1,351 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/demangle.h"
5#include "core/arm/debug.h"
6#include "core/arm/symbols.h"
7#include "core/hle/kernel/k_process.h"
8#include "core/hle/kernel/k_thread.h"
9#include "core/memory.h"
10
11namespace Core {
12
13namespace {
14
15std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory,
16 const Kernel::KThread& thread) {
17 // Read thread type from TLS
18 const VAddr tls_thread_type{memory.Read64(thread.GetTlsAddress() + 0x1f8)};
19 const VAddr argument_thread_type{thread.GetArgument()};
20
21 if (argument_thread_type && tls_thread_type != argument_thread_type) {
22 // Probably not created by nnsdk, no name available.
23 return std::nullopt;
24 }
25
26 if (!tls_thread_type) {
27 return std::nullopt;
28 }
29
30 const u16 version{memory.Read16(tls_thread_type + 0x46)};
31 VAddr name_pointer{};
32 if (version == 1) {
33 name_pointer = memory.Read64(tls_thread_type + 0x1a0);
34 } else {
35 name_pointer = memory.Read64(tls_thread_type + 0x1a8);
36 }
37
38 if (!name_pointer) {
39 // No name provided.
40 return std::nullopt;
41 }
42
43 return memory.ReadCString(name_pointer, 256);
44}
45
46std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory,
47 const Kernel::KThread& thread) {
48 // Read thread type from TLS
49 const VAddr tls_thread_type{memory.Read32(thread.GetTlsAddress() + 0x1fc)};
50 const VAddr argument_thread_type{thread.GetArgument()};
51
52 if (argument_thread_type && tls_thread_type != argument_thread_type) {
53 // Probably not created by nnsdk, no name available.
54 return std::nullopt;
55 }
56
57 if (!tls_thread_type) {
58 return std::nullopt;
59 }
60
61 const u16 version{memory.Read16(tls_thread_type + 0x26)};
62 VAddr name_pointer{};
63 if (version == 1) {
64 name_pointer = memory.Read32(tls_thread_type + 0xe4);
65 } else {
66 name_pointer = memory.Read32(tls_thread_type + 0xe8);
67 }
68
69 if (!name_pointer) {
70 // No name provided.
71 return std::nullopt;
72 }
73
74 return memory.ReadCString(name_pointer, 256);
75}
76
77constexpr std::array<u64, 2> SegmentBases{
78 0x60000000ULL,
79 0x7100000000ULL,
80};
81
82void SymbolicateBacktrace(const Kernel::KProcess* process, std::vector<BacktraceEntry>& out) {
83 auto modules = FindModules(process);
84
85 const bool is_64 = process->Is64Bit();
86
87 std::map<std::string, Symbols::Symbols> symbols;
88 for (const auto& module : modules) {
89 symbols.insert_or_assign(module.second,
90 Symbols::GetSymbols(module.first, process->GetMemory(), is_64));
91 }
92
93 for (auto& entry : out) {
94 VAddr base = 0;
95 for (auto iter = modules.rbegin(); iter != modules.rend(); ++iter) {
96 const auto& module{*iter};
97 if (entry.original_address >= module.first) {
98 entry.module = module.second;
99 base = module.first;
100 break;
101 }
102 }
103
104 entry.offset = entry.original_address - base;
105 entry.address = SegmentBases[is_64] + entry.offset;
106
107 if (entry.module.empty()) {
108 entry.module = "unknown";
109 }
110
111 const auto symbol_set = symbols.find(entry.module);
112 if (symbol_set != symbols.end()) {
113 const auto symbol = Symbols::GetSymbolName(symbol_set->second, entry.offset);
114 if (symbol) {
115 entry.name = Common::DemangleSymbol(*symbol);
116 }
117 }
118 }
119}
120
121std::vector<BacktraceEntry> GetAArch64Backtrace(const Kernel::KProcess* process,
122 const Kernel::Svc::ThreadContext& ctx) {
123 std::vector<BacktraceEntry> out;
124 auto& memory = process->GetMemory();
125 auto pc = ctx.pc, lr = ctx.lr, fp = ctx.fp;
126
127 out.push_back({"", 0, pc, 0, ""});
128
129 // fp (= x29) points to the previous frame record.
130 // Frame records are two words long:
131 // fp+0 : pointer to previous frame record
132 // fp+8 : value of lr for frame
133 for (size_t i = 0; i < 256; i++) {
134 out.push_back({"", 0, lr, 0, ""});
135 if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 16)) {
136 break;
137 }
138 lr = memory.Read64(fp + 8);
139 fp = memory.Read64(fp);
140 }
141
142 SymbolicateBacktrace(process, out);
143
144 return out;
145}
146
147std::vector<BacktraceEntry> GetAArch32Backtrace(const Kernel::KProcess* process,
148 const Kernel::Svc::ThreadContext& ctx) {
149 std::vector<BacktraceEntry> out;
150 auto& memory = process->GetMemory();
151 auto pc = ctx.pc, lr = ctx.lr, fp = ctx.fp;
152
153 out.push_back({"", 0, pc, 0, ""});
154
155 // fp (= r11) points to the last frame record.
156 // Frame records are two words long:
157 // fp+0 : pointer to previous frame record
158 // fp+4 : value of lr for frame
159 for (size_t i = 0; i < 256; i++) {
160 out.push_back({"", 0, lr, 0, ""});
161 if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 8)) {
162 break;
163 }
164 lr = memory.Read32(fp + 4);
165 fp = memory.Read32(fp);
166 }
167
168 SymbolicateBacktrace(process, out);
169
170 return out;
171}
172
173} // namespace
174
175std::optional<std::string> GetThreadName(const Kernel::KThread* thread) {
176 const auto* process = thread->GetOwnerProcess();
177 if (process->Is64Bit()) {
178 return GetNameFromThreadType64(process->GetMemory(), *thread);
179 } else {
180 return GetNameFromThreadType32(process->GetMemory(), *thread);
181 }
182}
183
184std::string_view GetThreadWaitReason(const Kernel::KThread* thread) {
185 switch (thread->GetWaitReasonForDebugging()) {
186 case Kernel::ThreadWaitReasonForDebugging::Sleep:
187 return "Sleep";
188 case Kernel::ThreadWaitReasonForDebugging::IPC:
189 return "IPC";
190 case Kernel::ThreadWaitReasonForDebugging::Synchronization:
191 return "Synchronization";
192 case Kernel::ThreadWaitReasonForDebugging::ConditionVar:
193 return "ConditionVar";
194 case Kernel::ThreadWaitReasonForDebugging::Arbitration:
195 return "Arbitration";
196 case Kernel::ThreadWaitReasonForDebugging::Suspended:
197 return "Suspended";
198 default:
199 return "Unknown";
200 }
201}
202
203std::string GetThreadState(const Kernel::KThread* thread) {
204 switch (thread->GetState()) {
205 case Kernel::ThreadState::Initialized:
206 return "Initialized";
207 case Kernel::ThreadState::Waiting:
208 return fmt::format("Waiting ({})", GetThreadWaitReason(thread));
209 case Kernel::ThreadState::Runnable:
210 return "Runnable";
211 case Kernel::ThreadState::Terminated:
212 return "Terminated";
213 default:
214 return "Unknown";
215 }
216}
217
218Kernel::KProcessAddress GetModuleEnd(const Kernel::KProcess* process,
219 Kernel::KProcessAddress base) {
220 Kernel::KMemoryInfo mem_info;
221 Kernel::Svc::MemoryInfo svc_mem_info;
222 Kernel::Svc::PageInfo page_info;
223 VAddr cur_addr{GetInteger(base)};
224 auto& page_table = process->GetPageTable();
225
226 // Expect: r-x Code (.text)
227 R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
228 svc_mem_info = mem_info.GetSvcMemoryInfo();
229 cur_addr = svc_mem_info.base_address + svc_mem_info.size;
230 if (svc_mem_info.state != Kernel::Svc::MemoryState::Code ||
231 svc_mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) {
232 return cur_addr - 1;
233 }
234
235 // Expect: r-- Code (.rodata)
236 R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
237 svc_mem_info = mem_info.GetSvcMemoryInfo();
238 cur_addr = svc_mem_info.base_address + svc_mem_info.size;
239 if (svc_mem_info.state != Kernel::Svc::MemoryState::Code ||
240 svc_mem_info.permission != Kernel::Svc::MemoryPermission::Read) {
241 return cur_addr - 1;
242 }
243
244 // Expect: rw- CodeData (.data)
245 R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
246 svc_mem_info = mem_info.GetSvcMemoryInfo();
247 cur_addr = svc_mem_info.base_address + svc_mem_info.size;
248 return cur_addr - 1;
249}
250
251Loader::AppLoader::Modules FindModules(const Kernel::KProcess* process) {
252 Loader::AppLoader::Modules modules;
253
254 auto& page_table = process->GetPageTable();
255 auto& memory = process->GetMemory();
256 VAddr cur_addr = 0;
257
258 // Look for executable sections in Code or AliasCode regions.
259 while (true) {
260 Kernel::KMemoryInfo mem_info{};
261 Kernel::Svc::PageInfo page_info{};
262 R_ASSERT(
263 page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
264 auto svc_mem_info = mem_info.GetSvcMemoryInfo();
265
266 if (svc_mem_info.permission == Kernel::Svc::MemoryPermission::ReadExecute &&
267 (svc_mem_info.state == Kernel::Svc::MemoryState::Code ||
268 svc_mem_info.state == Kernel::Svc::MemoryState::AliasCode)) {
269 // Try to read the module name from its path.
270 constexpr s32 PathLengthMax = 0x200;
271 struct {
272 u32 zero;
273 s32 path_length;
274 std::array<char, PathLengthMax> path;
275 } module_path;
276
277 if (memory.ReadBlock(svc_mem_info.base_address + svc_mem_info.size, &module_path,
278 sizeof(module_path))) {
279 if (module_path.zero == 0 && module_path.path_length > 0) {
280 // Truncate module name.
281 module_path.path[PathLengthMax - 1] = '\0';
282
283 // Ignore leading directories.
284 char* path_pointer = module_path.path.data();
285
286 for (s32 i = 0; i < std::min(PathLengthMax, module_path.path_length) &&
287 module_path.path[i] != '\0';
288 i++) {
289 if (module_path.path[i] == '/' || module_path.path[i] == '\\') {
290 path_pointer = module_path.path.data() + i + 1;
291 }
292 }
293
294 // Insert output.
295 modules.emplace(svc_mem_info.base_address, path_pointer);
296 }
297 }
298 }
299
300 // Check if we're done.
301 const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size;
302 if (next_address <= cur_addr) {
303 break;
304 }
305
306 cur_addr = next_address;
307 }
308
309 return modules;
310}
311
312Kernel::KProcessAddress FindMainModuleEntrypoint(const Kernel::KProcess* process) {
313 // Do we have any loaded executable sections?
314 auto modules = FindModules(process);
315
316 if (modules.size() >= 2) {
317 // If we have two or more, the first one is rtld and the second is main.
318 return std::next(modules.begin())->first;
319 } else if (!modules.empty()) {
320 // If we only have one, this is the main module.
321 return modules.begin()->first;
322 }
323
324 // As a last resort, use the start of the code region.
325 return GetInteger(process->GetPageTable().GetCodeRegionStart());
326}
327
328void InvalidateInstructionCacheRange(const Kernel::KProcess* process, u64 address, u64 size) {
329 for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
330 auto* interface = process->GetArmInterface(i);
331 if (interface) {
332 interface->InvalidateCacheRange(address, size);
333 }
334 }
335}
336
337std::vector<BacktraceEntry> GetBacktraceFromContext(const Kernel::KProcess* process,
338 const Kernel::Svc::ThreadContext& ctx) {
339 if (process->Is64Bit()) {
340 return GetAArch64Backtrace(process, ctx);
341 } else {
342 return GetAArch32Backtrace(process, ctx);
343 }
344}
345
346std::vector<BacktraceEntry> GetBacktrace(const Kernel::KThread* thread) {
347 Kernel::Svc::ThreadContext ctx = thread->GetContext();
348 return GetBacktraceFromContext(thread->GetOwnerProcess(), ctx);
349}
350
351} // namespace Core
diff --git a/src/core/arm/debug.h b/src/core/arm/debug.h
new file mode 100644
index 000000000..c542633db
--- /dev/null
+++ b/src/core/arm/debug.h
@@ -0,0 +1,35 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <optional>
7
8#include "core/hle/kernel/k_thread.h"
9#include "core/loader/loader.h"
10
11namespace Core {
12
13std::optional<std::string> GetThreadName(const Kernel::KThread* thread);
14std::string_view GetThreadWaitReason(const Kernel::KThread* thread);
15std::string GetThreadState(const Kernel::KThread* thread);
16
17Loader::AppLoader::Modules FindModules(const Kernel::KProcess* process);
18Kernel::KProcessAddress GetModuleEnd(const Kernel::KProcess* process, Kernel::KProcessAddress base);
19Kernel::KProcessAddress FindMainModuleEntrypoint(const Kernel::KProcess* process);
20
21void InvalidateInstructionCacheRange(const Kernel::KProcess* process, u64 address, u64 size);
22
23struct BacktraceEntry {
24 std::string module;
25 u64 address;
26 u64 original_address;
27 u64 offset;
28 std::string name;
29};
30
31std::vector<BacktraceEntry> GetBacktraceFromContext(const Kernel::KProcess* process,
32 const Kernel::Svc::ThreadContext& ctx);
33std::vector<BacktraceEntry> GetBacktrace(const Kernel::KThread* thread);
34
35} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index 44a297cdc..f34865e26 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -1,25 +1,13 @@
1// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include <cinttypes>
5#include <memory>
6#include <dynarmic/interface/A32/a32.h>
7#include <dynarmic/interface/A32/config.h>
8#include "common/assert.h"
9#include "common/literals.h"
10#include "common/logging/log.h"
11#include "common/page_table.h"
12#include "common/settings.h" 4#include "common/settings.h"
13#include "core/arm/dynarmic/arm_dynarmic.h" 5#include "core/arm/dynarmic/arm_dynarmic.h"
14#include "core/arm/dynarmic/arm_dynarmic_32.h" 6#include "core/arm/dynarmic/arm_dynarmic_32.h"
15#include "core/arm/dynarmic/dynarmic_cp15.h" 7#include "core/arm/dynarmic/dynarmic_cp15.h"
16#include "core/arm/dynarmic/dynarmic_exclusive_monitor.h" 8#include "core/arm/dynarmic/dynarmic_exclusive_monitor.h"
17#include "core/core.h"
18#include "core/core_timing.h" 9#include "core/core_timing.h"
19#include "core/debugger/debugger.h"
20#include "core/hle/kernel/k_process.h" 10#include "core/hle/kernel/k_process.h"
21#include "core/hle/kernel/svc.h"
22#include "core/memory.h"
23 11
24namespace Core { 12namespace Core {
25 13
@@ -27,78 +15,78 @@ using namespace Common::Literals;
27 15
28class DynarmicCallbacks32 : public Dynarmic::A32::UserCallbacks { 16class DynarmicCallbacks32 : public Dynarmic::A32::UserCallbacks {
29public: 17public:
30 explicit DynarmicCallbacks32(ARM_Dynarmic_32& parent_) 18 explicit DynarmicCallbacks32(ArmDynarmic32& parent, const Kernel::KProcess* process)
31 : parent{parent_}, memory(parent.system.ApplicationMemory()), 19 : m_parent{parent}, m_memory(process->GetMemory()),
32 debugger_enabled{parent.system.DebuggerEnabled()}, 20 m_process(process), m_debugger_enabled{parent.m_system.DebuggerEnabled()},
33 check_memory_access{debugger_enabled || 21 m_check_memory_access{m_debugger_enabled ||
34 !Settings::values.cpuopt_ignore_memory_aborts.GetValue()} {} 22 !Settings::values.cpuopt_ignore_memory_aborts.GetValue()} {}
35 23
36 u8 MemoryRead8(u32 vaddr) override { 24 u8 MemoryRead8(u32 vaddr) override {
37 CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Read); 25 CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Read);
38 return memory.Read8(vaddr); 26 return m_memory.Read8(vaddr);
39 } 27 }
40 u16 MemoryRead16(u32 vaddr) override { 28 u16 MemoryRead16(u32 vaddr) override {
41 CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Read); 29 CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Read);
42 return memory.Read16(vaddr); 30 return m_memory.Read16(vaddr);
43 } 31 }
44 u32 MemoryRead32(u32 vaddr) override { 32 u32 MemoryRead32(u32 vaddr) override {
45 CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Read); 33 CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Read);
46 return memory.Read32(vaddr); 34 return m_memory.Read32(vaddr);
47 } 35 }
48 u64 MemoryRead64(u32 vaddr) override { 36 u64 MemoryRead64(u32 vaddr) override {
49 CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read); 37 CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read);
50 return memory.Read64(vaddr); 38 return m_memory.Read64(vaddr);
51 } 39 }
52 std::optional<u32> MemoryReadCode(u32 vaddr) override { 40 std::optional<u32> MemoryReadCode(u32 vaddr) override {
53 if (!memory.IsValidVirtualAddressRange(vaddr, sizeof(u32))) { 41 if (!m_memory.IsValidVirtualAddressRange(vaddr, sizeof(u32))) {
54 return std::nullopt; 42 return std::nullopt;
55 } 43 }
56 return memory.Read32(vaddr); 44 return m_memory.Read32(vaddr);
57 } 45 }
58 46
59 void MemoryWrite8(u32 vaddr, u8 value) override { 47 void MemoryWrite8(u32 vaddr, u8 value) override {
60 if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) { 48 if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) {
61 memory.Write8(vaddr, value); 49 m_memory.Write8(vaddr, value);
62 } 50 }
63 } 51 }
64 void MemoryWrite16(u32 vaddr, u16 value) override { 52 void MemoryWrite16(u32 vaddr, u16 value) override {
65 if (CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write)) { 53 if (CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write)) {
66 memory.Write16(vaddr, value); 54 m_memory.Write16(vaddr, value);
67 } 55 }
68 } 56 }
69 void MemoryWrite32(u32 vaddr, u32 value) override { 57 void MemoryWrite32(u32 vaddr, u32 value) override {
70 if (CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write)) { 58 if (CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write)) {
71 memory.Write32(vaddr, value); 59 m_memory.Write32(vaddr, value);
72 } 60 }
73 } 61 }
74 void MemoryWrite64(u32 vaddr, u64 value) override { 62 void MemoryWrite64(u32 vaddr, u64 value) override {
75 if (CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write)) { 63 if (CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write)) {
76 memory.Write64(vaddr, value); 64 m_memory.Write64(vaddr, value);
77 } 65 }
78 } 66 }
79 67
80 bool MemoryWriteExclusive8(u32 vaddr, u8 value, u8 expected) override { 68 bool MemoryWriteExclusive8(u32 vaddr, u8 value, u8 expected) override {
81 return CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write) && 69 return CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write) &&
82 memory.WriteExclusive8(vaddr, value, expected); 70 m_memory.WriteExclusive8(vaddr, value, expected);
83 } 71 }
84 bool MemoryWriteExclusive16(u32 vaddr, u16 value, u16 expected) override { 72 bool MemoryWriteExclusive16(u32 vaddr, u16 value, u16 expected) override {
85 return CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write) && 73 return CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write) &&
86 memory.WriteExclusive16(vaddr, value, expected); 74 m_memory.WriteExclusive16(vaddr, value, expected);
87 } 75 }
88 bool MemoryWriteExclusive32(u32 vaddr, u32 value, u32 expected) override { 76 bool MemoryWriteExclusive32(u32 vaddr, u32 value, u32 expected) override {
89 return CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write) && 77 return CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write) &&
90 memory.WriteExclusive32(vaddr, value, expected); 78 m_memory.WriteExclusive32(vaddr, value, expected);
91 } 79 }
92 bool MemoryWriteExclusive64(u32 vaddr, u64 value, u64 expected) override { 80 bool MemoryWriteExclusive64(u32 vaddr, u64 value, u64 expected) override {
93 return CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write) && 81 return CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write) &&
94 memory.WriteExclusive64(vaddr, value, expected); 82 m_memory.WriteExclusive64(vaddr, value, expected);
95 } 83 }
96 84
97 void InterpreterFallback(u32 pc, std::size_t num_instructions) override { 85 void InterpreterFallback(u32 pc, std::size_t num_instructions) override {
98 parent.LogBacktrace(); 86 m_parent.LogBacktrace(m_process);
99 LOG_ERROR(Core_ARM, 87 LOG_ERROR(Core_ARM,
100 "Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc, 88 "Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc,
101 num_instructions, memory.Read32(pc)); 89 num_instructions, m_memory.Read32(pc));
102 } 90 }
103 91
104 void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override { 92 void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override {
@@ -108,73 +96,64 @@ public:
108 ReturnException(pc, PrefetchAbort); 96 ReturnException(pc, PrefetchAbort);
109 return; 97 return;
110 default: 98 default:
111 if (debugger_enabled) { 99 if (m_debugger_enabled) {
112 ReturnException(pc, InstructionBreakpoint); 100 ReturnException(pc, InstructionBreakpoint);
113 return; 101 return;
114 } 102 }
115 103
116 parent.LogBacktrace(); 104 m_parent.LogBacktrace(m_process);
117 LOG_CRITICAL(Core_ARM, 105 LOG_CRITICAL(Core_ARM,
118 "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})", 106 "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})",
119 exception, pc, memory.Read32(pc), parent.IsInThumbMode()); 107 exception, pc, m_memory.Read32(pc), m_parent.IsInThumbMode());
120 } 108 }
121 } 109 }
122 110
123 void CallSVC(u32 swi) override { 111 void CallSVC(u32 swi) override {
124 parent.svc_swi = swi; 112 m_parent.m_svc_swi = swi;
125 parent.jit.load()->HaltExecution(SupervisorCall); 113 m_parent.m_jit->HaltExecution(SupervisorCall);
126 } 114 }
127 115
128 void AddTicks(u64 ticks) override { 116 void AddTicks(u64 ticks) override {
129 if (parent.uses_wall_clock) { 117 ASSERT_MSG(!m_parent.m_uses_wall_clock, "Dynarmic ticking disabled");
130 return;
131 }
132 118
133 // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a 119 // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
134 // rough approximation of the amount of executed ticks in the system, it may be thrown off 120 // rough approximation of the amount of executed ticks in the system, it may be thrown off
135 // if not all cores are doing a similar amount of work. Instead of doing this, we should 121 // if not all cores are doing a similar amount of work. Instead of doing this, we should
136 // device a way so that timing is consistent across all cores without increasing the ticks 4 122 // device a way so that timing is consistent across all cores without increasing the ticks 4
137 // times. 123 // times.
138 u64 amortized_ticks = 124 u64 amortized_ticks = ticks / Core::Hardware::NUM_CPU_CORES;
139 (ticks - num_interpreted_instructions) / Core::Hardware::NUM_CPU_CORES;
140 // Always execute at least one tick. 125 // Always execute at least one tick.
141 amortized_ticks = std::max<u64>(amortized_ticks, 1); 126 amortized_ticks = std::max<u64>(amortized_ticks, 1);
142 127
143 parent.system.CoreTiming().AddTicks(amortized_ticks); 128 m_parent.m_system.CoreTiming().AddTicks(amortized_ticks);
144 num_interpreted_instructions = 0;
145 } 129 }
146 130
147 u64 GetTicksRemaining() override { 131 u64 GetTicksRemaining() override {
148 if (parent.uses_wall_clock) { 132 ASSERT_MSG(!m_parent.m_uses_wall_clock, "Dynarmic ticking disabled");
149 if (!IsInterrupted()) {
150 return minimum_run_cycles;
151 }
152 return 0U;
153 }
154 133
155 return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0); 134 return std::max<s64>(m_parent.m_system.CoreTiming().GetDowncount(), 0);
156 } 135 }
157 136
158 bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) { 137 bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) {
159 if (!check_memory_access) { 138 if (!m_check_memory_access) {
160 return true; 139 return true;
161 } 140 }
162 141
163 if (!memory.IsValidVirtualAddressRange(addr, size)) { 142 if (!m_memory.IsValidVirtualAddressRange(addr, size)) {
164 LOG_CRITICAL(Core_ARM, "Stopping execution due to unmapped memory access at {:#x}", 143 LOG_CRITICAL(Core_ARM, "Stopping execution due to unmapped memory access at {:#x}",
165 addr); 144 addr);
166 parent.jit.load()->HaltExecution(PrefetchAbort); 145 m_parent.m_jit->HaltExecution(PrefetchAbort);
167 return false; 146 return false;
168 } 147 }
169 148
170 if (!debugger_enabled) { 149 if (!m_debugger_enabled) {
171 return true; 150 return true;
172 } 151 }
173 152
174 const auto match{parent.MatchingWatchpoint(addr, size, type)}; 153 const auto match{m_parent.MatchingWatchpoint(addr, size, type)};
175 if (match) { 154 if (match) {
176 parent.halted_watchpoint = match; 155 m_parent.m_halted_watchpoint = match;
177 parent.jit.load()->HaltExecution(DataAbort); 156 m_parent.m_jit->HaltExecution(DataAbort);
178 return false; 157 return false;
179 } 158 }
180 159
@@ -182,32 +161,31 @@ public:
182 } 161 }
183 162
184 void ReturnException(u32 pc, Dynarmic::HaltReason hr) { 163 void ReturnException(u32 pc, Dynarmic::HaltReason hr) {
185 parent.SaveContext(parent.breakpoint_context); 164 m_parent.GetContext(m_parent.m_breakpoint_context);
186 parent.breakpoint_context.cpu_registers[15] = pc; 165 m_parent.m_breakpoint_context.pc = pc;
187 parent.jit.load()->HaltExecution(hr); 166 m_parent.m_breakpoint_context.r[15] = pc;
188 } 167 m_parent.m_jit->HaltExecution(hr);
189
190 bool IsInterrupted() {
191 return parent.system.Kernel().PhysicalCore(parent.core_index).IsInterrupted();
192 } 168 }
193 169
194 ARM_Dynarmic_32& parent; 170 ArmDynarmic32& m_parent;
195 Core::Memory::Memory& memory; 171 Core::Memory::Memory& m_memory;
196 std::size_t num_interpreted_instructions{}; 172 const Kernel::KProcess* m_process{};
197 const bool debugger_enabled{}; 173 const bool m_debugger_enabled{};
198 const bool check_memory_access{}; 174 const bool m_check_memory_access{};
199 static constexpr u64 minimum_run_cycles = 10000U; 175 static constexpr u64 MinimumRunCycles = 10000U;
200}; 176};
201 177
202std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* page_table) const { 178std::shared_ptr<Dynarmic::A32::Jit> ArmDynarmic32::MakeJit(Common::PageTable* page_table) const {
203 Dynarmic::A32::UserConfig config; 179 Dynarmic::A32::UserConfig config;
204 config.callbacks = cb.get(); 180 config.callbacks = m_cb.get();
205 config.coprocessors[15] = cp15; 181 config.coprocessors[15] = m_cp15;
206 config.define_unpredictable_behaviour = true; 182 config.define_unpredictable_behaviour = true;
207 static constexpr std::size_t YUZU_PAGEBITS = 12; 183
208 static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - YUZU_PAGEBITS);
209 if (page_table) { 184 if (page_table) {
210 config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>( 185 constexpr size_t PageBits = 12;
186 constexpr size_t NumPageTableEntries = 1 << (32 - PageBits);
187
188 config.page_table = reinterpret_cast<std::array<std::uint8_t*, NumPageTableEntries>*>(
211 page_table->pointers.data()); 189 page_table->pointers.data());
212 config.absolute_offset_page_table = true; 190 config.absolute_offset_page_table = true;
213 config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS; 191 config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
@@ -221,12 +199,12 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
221 } 199 }
222 200
223 // Multi-process state 201 // Multi-process state
224 config.processor_id = core_index; 202 config.processor_id = m_core_index;
225 config.global_monitor = &exclusive_monitor.monitor; 203 config.global_monitor = &m_exclusive_monitor.monitor;
226 204
227 // Timing 205 // Timing
228 config.wall_clock_cntpct = uses_wall_clock; 206 config.wall_clock_cntpct = m_uses_wall_clock;
229 config.enable_cycle_counting = true; 207 config.enable_cycle_counting = !m_uses_wall_clock;
230 208
231 // Code cache size 209 // Code cache size
232#ifdef ARCHITECTURE_arm64 210#ifdef ARCHITECTURE_arm64
@@ -236,7 +214,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
236#endif 214#endif
237 215
238 // Allow memory fault handling to work 216 // Allow memory fault handling to work
239 if (system.DebuggerEnabled()) { 217 if (m_system.DebuggerEnabled()) {
240 config.check_halt_on_memory_access = true; 218 config.check_halt_on_memory_access = true;
241 } 219 }
242 220
@@ -325,137 +303,140 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
325 return std::make_unique<Dynarmic::A32::Jit>(config); 303 return std::make_unique<Dynarmic::A32::Jit>(config);
326} 304}
327 305
328HaltReason ARM_Dynarmic_32::RunJit() { 306static std::pair<u32, u32> FpscrToFpsrFpcr(u32 fpscr) {
329 return TranslateHaltReason(jit.load()->Run()); 307 // FPSCR bits [31:27] are mapped to FPSR[31:27].
308 // FPSCR bit [7] is mapped to FPSR[7].
309 // FPSCR bits [4:0] are mapped to FPSR[4:0].
310 const u32 nzcv = fpscr & 0xf8000000;
311 const u32 idc = fpscr & 0x80;
312 const u32 fiq = fpscr & 0x1f;
313 const u32 fpsr = nzcv | idc | fiq;
314
315 // FPSCR bits [26:15] are mapped to FPCR[26:15].
316 // FPSCR bits [12:8] are mapped to FPCR[12:8].
317 const u32 round = fpscr & 0x7ff8000;
318 const u32 trap = fpscr & 0x1f00;
319 const u32 fpcr = round | trap;
320
321 return {fpsr, fpcr};
330} 322}
331 323
332HaltReason ARM_Dynarmic_32::StepJit() { 324static u32 FpsrFpcrToFpscr(u64 fpsr, u64 fpcr) {
333 return TranslateHaltReason(jit.load()->Step()); 325 auto [s, c] = FpscrToFpsrFpcr(static_cast<u32>(fpsr | fpcr));
326 return s | c;
334} 327}
335 328
336u32 ARM_Dynarmic_32::GetSvcNumber() const { 329bool ArmDynarmic32::IsInThumbMode() const {
337 return svc_swi; 330 return (m_jit->Cpsr() & 0x20) != 0;
338} 331}
339 332
340const Kernel::DebugWatchpoint* ARM_Dynarmic_32::HaltedWatchpoint() const { 333HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) {
341 return halted_watchpoint; 334 m_jit->ClearExclusiveState();
335 return TranslateHaltReason(m_jit->Run());
342} 336}
343 337
344void ARM_Dynarmic_32::RewindBreakpointInstruction() { 338HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) {
345 LoadContext(breakpoint_context); 339 m_jit->ClearExclusiveState();
340 return TranslateHaltReason(m_jit->Step());
346} 341}
347 342
348ARM_Dynarmic_32::ARM_Dynarmic_32(System& system_, bool uses_wall_clock_, 343u32 ArmDynarmic32::GetSvcNumber() const {
349 DynarmicExclusiveMonitor& exclusive_monitor_, 344 return m_svc_swi;
350 std::size_t core_index_) 345}
351 : ARM_Interface{system_, uses_wall_clock_}, cb(std::make_unique<DynarmicCallbacks32>(*this)),
352 cp15(std::make_shared<DynarmicCP15>(*this)), core_index{core_index_},
353 exclusive_monitor{exclusive_monitor_}, null_jit{MakeJit(nullptr)}, jit{null_jit.get()} {}
354 346
355ARM_Dynarmic_32::~ARM_Dynarmic_32() = default; 347void ArmDynarmic32::GetSvcArguments(std::span<uint64_t, 8> args) const {
348 Dynarmic::A32::Jit& j = *m_jit;
349 auto& gpr = j.Regs();
356 350
357void ARM_Dynarmic_32::SetPC(u64 pc) { 351 for (size_t i = 0; i < 8; i++) {
358 jit.load()->Regs()[15] = static_cast<u32>(pc); 352 args[i] = gpr[i];
353 }
359} 354}
360 355
361u64 ARM_Dynarmic_32::GetPC() const { 356void ArmDynarmic32::SetSvcArguments(std::span<const uint64_t, 8> args) {
362 return jit.load()->Regs()[15]; 357 Dynarmic::A32::Jit& j = *m_jit;
363} 358 auto& gpr = j.Regs();
364 359
365u64 ARM_Dynarmic_32::GetSP() const { 360 for (size_t i = 0; i < 8; i++) {
366 return jit.load()->Regs()[13]; 361 gpr[i] = static_cast<u32>(args[i]);
362 }
367} 363}
368 364
369u64 ARM_Dynarmic_32::GetReg(int index) const { 365const Kernel::DebugWatchpoint* ArmDynarmic32::HaltedWatchpoint() const {
370 return jit.load()->Regs()[index]; 366 return m_halted_watchpoint;
371} 367}
372 368
373void ARM_Dynarmic_32::SetReg(int index, u64 value) { 369void ArmDynarmic32::RewindBreakpointInstruction() {
374 jit.load()->Regs()[index] = static_cast<u32>(value); 370 this->SetContext(m_breakpoint_context);
375} 371}
376 372
377u128 ARM_Dynarmic_32::GetVectorReg(int index) const { 373ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, const Kernel::KProcess* process,
378 return {}; 374 DynarmicExclusiveMonitor& exclusive_monitor, std::size_t core_index)
375 : ArmInterface{uses_wall_clock}, m_system{system}, m_exclusive_monitor{exclusive_monitor},
376 m_cb(std::make_unique<DynarmicCallbacks32>(*this, process)),
377 m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} {
378 auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl();
379 m_jit = MakeJit(&page_table_impl);
379} 380}
380 381
381void ARM_Dynarmic_32::SetVectorReg(int index, u128 value) {} 382ArmDynarmic32::~ArmDynarmic32() = default;
382 383
383u32 ARM_Dynarmic_32::GetPSTATE() const { 384void ArmDynarmic32::SetTpidrroEl0(u64 value) {
384 return jit.load()->Cpsr(); 385 m_cp15->uro = static_cast<u32>(value);
385} 386}
386 387
387void ARM_Dynarmic_32::SetPSTATE(u32 cpsr) { 388void ArmDynarmic32::GetContext(Kernel::Svc::ThreadContext& ctx) const {
388 jit.load()->SetCpsr(cpsr); 389 Dynarmic::A32::Jit& j = *m_jit;
389} 390 auto& gpr = j.Regs();
391 auto& fpr = j.ExtRegs();
390 392
391u64 ARM_Dynarmic_32::GetTlsAddress() const { 393 for (size_t i = 0; i < 16; i++) {
392 return cp15->uro; 394 ctx.r[i] = gpr[i];
393} 395 }
394 396
395void ARM_Dynarmic_32::SetTlsAddress(u64 address) { 397 ctx.fp = gpr[11];
396 cp15->uro = static_cast<u32>(address); 398 ctx.sp = gpr[13];
397} 399 ctx.lr = gpr[14];
400 ctx.pc = gpr[15];
401 ctx.pstate = j.Cpsr();
398 402
399u64 ARM_Dynarmic_32::GetTPIDR_EL0() const { 403 static_assert(sizeof(fpr) <= sizeof(ctx.v));
400 return cp15->uprw; 404 std::memcpy(ctx.v.data(), &fpr, sizeof(fpr));
401}
402 405
403void ARM_Dynarmic_32::SetTPIDR_EL0(u64 value) { 406 auto [fpsr, fpcr] = FpscrToFpsrFpcr(j.Fpscr());
404 cp15->uprw = static_cast<u32>(value); 407 ctx.fpcr = fpcr;
408 ctx.fpsr = fpsr;
409 ctx.tpidr = m_cp15->uprw;
405} 410}
406 411
407void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) const { 412void ArmDynarmic32::SetContext(const Kernel::Svc::ThreadContext& ctx) {
408 Dynarmic::A32::Jit* j = jit.load(); 413 Dynarmic::A32::Jit& j = *m_jit;
409 ctx.cpu_registers = j->Regs(); 414 auto& gpr = j.Regs();
410 ctx.extension_registers = j->ExtRegs(); 415 auto& fpr = j.ExtRegs();
411 ctx.cpsr = j->Cpsr();
412 ctx.fpscr = j->Fpscr();
413}
414 416
415void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) { 417 for (size_t i = 0; i < 16; i++) {
416 Dynarmic::A32::Jit* j = jit.load(); 418 gpr[i] = static_cast<u32>(ctx.r[i]);
417 j->Regs() = ctx.cpu_registers; 419 }
418 j->ExtRegs() = ctx.extension_registers;
419 j->SetCpsr(ctx.cpsr);
420 j->SetFpscr(ctx.fpscr);
421}
422 420
423void ARM_Dynarmic_32::SignalInterrupt() { 421 j.SetCpsr(ctx.pstate);
424 jit.load()->HaltExecution(BreakLoop);
425}
426 422
427void ARM_Dynarmic_32::ClearInterrupt() { 423 static_assert(sizeof(fpr) <= sizeof(ctx.v));
428 jit.load()->ClearHalt(BreakLoop); 424 std::memcpy(&fpr, ctx.v.data(), sizeof(fpr));
429}
430 425
431void ARM_Dynarmic_32::ClearInstructionCache() { 426 j.SetFpscr(FpsrFpcrToFpscr(ctx.fpsr, ctx.fpcr));
432 jit.load()->ClearCache(); 427 m_cp15->uprw = static_cast<u32>(ctx.tpidr);
433} 428}
434 429
435void ARM_Dynarmic_32::InvalidateCacheRange(u64 addr, std::size_t size) { 430void ArmDynarmic32::SignalInterrupt(Kernel::KThread* thread) {
436 jit.load()->InvalidateCacheRange(static_cast<u32>(addr), size); 431 m_jit->HaltExecution(BreakLoop);
437} 432}
438 433
439void ARM_Dynarmic_32::ClearExclusiveState() { 434void ArmDynarmic32::ClearInstructionCache() {
440 jit.load()->ClearExclusiveState(); 435 m_jit->ClearCache();
441} 436}
442 437
443void ARM_Dynarmic_32::PageTableChanged(Common::PageTable& page_table, 438void ArmDynarmic32::InvalidateCacheRange(u64 addr, std::size_t size) {
444 std::size_t new_address_space_size_in_bits) { 439 m_jit->InvalidateCacheRange(static_cast<u32>(addr), size);
445 ThreadContext32 ctx{};
446 SaveContext(ctx);
447
448 auto key = std::make_pair(&page_table, new_address_space_size_in_bits);
449 auto iter = jit_cache.find(key);
450 if (iter != jit_cache.end()) {
451 jit.store(iter->second.get());
452 LoadContext(ctx);
453 return;
454 }
455 std::shared_ptr new_jit = MakeJit(&page_table);
456 jit.store(new_jit.get());
457 LoadContext(ctx);
458 jit_cache.emplace(key, std::move(new_jit));
459} 440}
460 441
461} // namespace Core 442} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h
index 92fb3f836..185ac7cbf 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.h
@@ -3,14 +3,8 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <atomic>
7#include <memory>
8#include <unordered_map>
9
10#include <dynarmic/interface/A32/a32.h> 6#include <dynarmic/interface/A32/a32.h>
11#include <dynarmic/interface/A64/a64.h> 7
12#include "common/common_types.h"
13#include "common/hash.h"
14#include "core/arm/arm_interface.h" 8#include "core/arm/arm_interface.h"
15#include "core/arm/dynarmic/dynarmic_exclusive_monitor.h" 9#include "core/arm/dynarmic/dynarmic_exclusive_monitor.h"
16 10
@@ -20,89 +14,63 @@ class Memory;
20 14
21namespace Core { 15namespace Core {
22 16
23class CPUInterruptHandler;
24class DynarmicCallbacks32; 17class DynarmicCallbacks32;
25class DynarmicCP15; 18class DynarmicCP15;
26class DynarmicExclusiveMonitor;
27class System; 19class System;
28 20
29class ARM_Dynarmic_32 final : public ARM_Interface { 21class ArmDynarmic32 final : public ArmInterface {
30public: 22public:
31 ARM_Dynarmic_32(System& system_, bool uses_wall_clock_, 23 ArmDynarmic32(System& system, bool uses_wall_clock, const Kernel::KProcess* process,
32 DynarmicExclusiveMonitor& exclusive_monitor_, std::size_t core_index_); 24 DynarmicExclusiveMonitor& exclusive_monitor, std::size_t core_index);
33 ~ARM_Dynarmic_32() override; 25 ~ArmDynarmic32() override;
34
35 void SetPC(u64 pc) override;
36 u64 GetPC() const override;
37 u64 GetSP() const override;
38 u64 GetReg(int index) const override;
39 void SetReg(int index, u64 value) override;
40 u128 GetVectorReg(int index) const override;
41 void SetVectorReg(int index, u128 value) override;
42 u32 GetPSTATE() const override;
43 void SetPSTATE(u32 pstate) override;
44 u64 GetTlsAddress() const override;
45 void SetTlsAddress(u64 address) override;
46 void SetTPIDR_EL0(u64 value) override;
47 u64 GetTPIDR_EL0() const override;
48
49 bool IsInThumbMode() const {
50 return (GetPSTATE() & 0x20) != 0;
51 }
52 26
53 Architecture GetArchitecture() const override { 27 Architecture GetArchitecture() const override {
54 return Architecture::Aarch32; 28 return Architecture::AArch32;
55 } 29 }
56 void SaveContext(ThreadContext32& ctx) const override;
57 void SaveContext(ThreadContext64& ctx) const override {}
58 void LoadContext(const ThreadContext32& ctx) override;
59 void LoadContext(const ThreadContext64& ctx) override {}
60 30
61 void SignalInterrupt() override; 31 bool IsInThumbMode() const;
62 void ClearInterrupt() override; 32
63 void ClearExclusiveState() override; 33 HaltReason RunThread(Kernel::KThread* thread) override;
34 HaltReason StepThread(Kernel::KThread* thread) override;
35
36 void GetContext(Kernel::Svc::ThreadContext& ctx) const override;
37 void SetContext(const Kernel::Svc::ThreadContext& ctx) override;
38 void SetTpidrroEl0(u64 value) override;
39
40 void GetSvcArguments(std::span<uint64_t, 8> args) const override;
41 void SetSvcArguments(std::span<const uint64_t, 8> args) override;
42 u32 GetSvcNumber() const override;
64 43
44 void SignalInterrupt(Kernel::KThread* thread) override;
65 void ClearInstructionCache() override; 45 void ClearInstructionCache() override;
66 void InvalidateCacheRange(u64 addr, std::size_t size) override; 46 void InvalidateCacheRange(u64 addr, std::size_t size) override;
67 void PageTableChanged(Common::PageTable& new_page_table,
68 std::size_t new_address_space_size_in_bits) override;
69 47
70protected: 48protected:
71 HaltReason RunJit() override;
72 HaltReason StepJit() override;
73 u32 GetSvcNumber() const override;
74 const Kernel::DebugWatchpoint* HaltedWatchpoint() const override; 49 const Kernel::DebugWatchpoint* HaltedWatchpoint() const override;
75 void RewindBreakpointInstruction() override; 50 void RewindBreakpointInstruction() override;
76 51
77private: 52private:
78 std::shared_ptr<Dynarmic::A32::Jit> MakeJit(Common::PageTable* page_table) const; 53 System& m_system;
79 54 DynarmicExclusiveMonitor& m_exclusive_monitor;
80 static std::vector<BacktraceEntry> GetBacktrace(Core::System& system, u64 fp, u64 lr, u64 pc);
81
82 using JitCacheKey = std::pair<Common::PageTable*, std::size_t>;
83 using JitCacheType =
84 std::unordered_map<JitCacheKey, std::shared_ptr<Dynarmic::A32::Jit>, Common::PairHash>;
85 55
56private:
86 friend class DynarmicCallbacks32; 57 friend class DynarmicCallbacks32;
87 friend class DynarmicCP15; 58 friend class DynarmicCP15;
88 59
89 std::unique_ptr<DynarmicCallbacks32> cb; 60 std::shared_ptr<Dynarmic::A32::Jit> MakeJit(Common::PageTable* page_table) const;
90 JitCacheType jit_cache;
91 std::shared_ptr<DynarmicCP15> cp15;
92 std::size_t core_index;
93 DynarmicExclusiveMonitor& exclusive_monitor;
94 61
95 std::shared_ptr<Dynarmic::A32::Jit> null_jit; 62 std::unique_ptr<DynarmicCallbacks32> m_cb{};
63 std::shared_ptr<DynarmicCP15> m_cp15{};
64 std::size_t m_core_index{};
96 65
97 // A raw pointer here is fine; we never delete Jit instances. 66 std::shared_ptr<Dynarmic::A32::Jit> m_jit{};
98 std::atomic<Dynarmic::A32::Jit*> jit;
99 67
100 // SVC callback 68 // SVC callback
101 u32 svc_swi{}; 69 u32 m_svc_swi{};
102 70
103 // Watchpoint info 71 // Watchpoint info
104 const Kernel::DebugWatchpoint* halted_watchpoint; 72 const Kernel::DebugWatchpoint* m_halted_watchpoint{};
105 ThreadContext32 breakpoint_context; 73 Kernel::Svc::ThreadContext m_breakpoint_context{};
106}; 74};
107 75
108} // namespace Core 76} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 2e3674b6d..dff14756e 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -1,25 +1,12 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include <cinttypes>
5#include <memory>
6#include <dynarmic/interface/A64/a64.h>
7#include <dynarmic/interface/A64/config.h>
8#include "common/assert.h"
9#include "common/literals.h"
10#include "common/logging/log.h"
11#include "common/page_table.h"
12#include "common/settings.h" 4#include "common/settings.h"
13#include "core/arm/dynarmic/arm_dynarmic.h" 5#include "core/arm/dynarmic/arm_dynarmic.h"
14#include "core/arm/dynarmic/arm_dynarmic_64.h" 6#include "core/arm/dynarmic/arm_dynarmic_64.h"
15#include "core/arm/dynarmic/dynarmic_exclusive_monitor.h" 7#include "core/arm/dynarmic/dynarmic_exclusive_monitor.h"
16#include "core/core.h"
17#include "core/core_timing.h" 8#include "core/core_timing.h"
18#include "core/debugger/debugger.h"
19#include "core/hardware_properties.h"
20#include "core/hle/kernel/k_process.h" 9#include "core/hle/kernel/k_process.h"
21#include "core/hle/kernel/svc.h"
22#include "core/memory.h"
23 10
24namespace Core { 11namespace Core {
25 12
@@ -28,92 +15,92 @@ using namespace Common::Literals;
28 15
29class DynarmicCallbacks64 : public Dynarmic::A64::UserCallbacks { 16class DynarmicCallbacks64 : public Dynarmic::A64::UserCallbacks {
30public: 17public:
31 explicit DynarmicCallbacks64(ARM_Dynarmic_64& parent_) 18 explicit DynarmicCallbacks64(ArmDynarmic64& parent, const Kernel::KProcess* process)
32 : parent{parent_}, memory(parent.system.ApplicationMemory()), 19 : m_parent{parent}, m_memory(process->GetMemory()),
33 debugger_enabled{parent.system.DebuggerEnabled()}, 20 m_process(process), m_debugger_enabled{parent.m_system.DebuggerEnabled()},
34 check_memory_access{debugger_enabled || 21 m_check_memory_access{m_debugger_enabled ||
35 !Settings::values.cpuopt_ignore_memory_aborts.GetValue()} {} 22 !Settings::values.cpuopt_ignore_memory_aborts.GetValue()} {}
36 23
37 u8 MemoryRead8(u64 vaddr) override { 24 u8 MemoryRead8(u64 vaddr) override {
38 CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Read); 25 CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Read);
39 return memory.Read8(vaddr); 26 return m_memory.Read8(vaddr);
40 } 27 }
41 u16 MemoryRead16(u64 vaddr) override { 28 u16 MemoryRead16(u64 vaddr) override {
42 CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Read); 29 CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Read);
43 return memory.Read16(vaddr); 30 return m_memory.Read16(vaddr);
44 } 31 }
45 u32 MemoryRead32(u64 vaddr) override { 32 u32 MemoryRead32(u64 vaddr) override {
46 CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Read); 33 CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Read);
47 return memory.Read32(vaddr); 34 return m_memory.Read32(vaddr);
48 } 35 }
49 u64 MemoryRead64(u64 vaddr) override { 36 u64 MemoryRead64(u64 vaddr) override {
50 CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read); 37 CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read);
51 return memory.Read64(vaddr); 38 return m_memory.Read64(vaddr);
52 } 39 }
53 Vector MemoryRead128(u64 vaddr) override { 40 Vector MemoryRead128(u64 vaddr) override {
54 CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Read); 41 CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Read);
55 return {memory.Read64(vaddr), memory.Read64(vaddr + 8)}; 42 return {m_memory.Read64(vaddr), m_memory.Read64(vaddr + 8)};
56 } 43 }
57 std::optional<u32> MemoryReadCode(u64 vaddr) override { 44 std::optional<u32> MemoryReadCode(u64 vaddr) override {
58 if (!memory.IsValidVirtualAddressRange(vaddr, sizeof(u32))) { 45 if (!m_memory.IsValidVirtualAddressRange(vaddr, sizeof(u32))) {
59 return std::nullopt; 46 return std::nullopt;
60 } 47 }
61 return memory.Read32(vaddr); 48 return m_memory.Read32(vaddr);
62 } 49 }
63 50
64 void MemoryWrite8(u64 vaddr, u8 value) override { 51 void MemoryWrite8(u64 vaddr, u8 value) override {
65 if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) { 52 if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) {
66 memory.Write8(vaddr, value); 53 m_memory.Write8(vaddr, value);
67 } 54 }
68 } 55 }
69 void MemoryWrite16(u64 vaddr, u16 value) override { 56 void MemoryWrite16(u64 vaddr, u16 value) override {
70 if (CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write)) { 57 if (CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write)) {
71 memory.Write16(vaddr, value); 58 m_memory.Write16(vaddr, value);
72 } 59 }
73 } 60 }
74 void MemoryWrite32(u64 vaddr, u32 value) override { 61 void MemoryWrite32(u64 vaddr, u32 value) override {
75 if (CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write)) { 62 if (CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write)) {
76 memory.Write32(vaddr, value); 63 m_memory.Write32(vaddr, value);
77 } 64 }
78 } 65 }
79 void MemoryWrite64(u64 vaddr, u64 value) override { 66 void MemoryWrite64(u64 vaddr, u64 value) override {
80 if (CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write)) { 67 if (CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write)) {
81 memory.Write64(vaddr, value); 68 m_memory.Write64(vaddr, value);
82 } 69 }
83 } 70 }
84 void MemoryWrite128(u64 vaddr, Vector value) override { 71 void MemoryWrite128(u64 vaddr, Vector value) override {
85 if (CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Write)) { 72 if (CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Write)) {
86 memory.Write64(vaddr, value[0]); 73 m_memory.Write64(vaddr, value[0]);
87 memory.Write64(vaddr + 8, value[1]); 74 m_memory.Write64(vaddr + 8, value[1]);
88 } 75 }
89 } 76 }
90 77
91 bool MemoryWriteExclusive8(u64 vaddr, std::uint8_t value, std::uint8_t expected) override { 78 bool MemoryWriteExclusive8(u64 vaddr, std::uint8_t value, std::uint8_t expected) override {
92 return CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write) && 79 return CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write) &&
93 memory.WriteExclusive8(vaddr, value, expected); 80 m_memory.WriteExclusive8(vaddr, value, expected);
94 } 81 }
95 bool MemoryWriteExclusive16(u64 vaddr, std::uint16_t value, std::uint16_t expected) override { 82 bool MemoryWriteExclusive16(u64 vaddr, std::uint16_t value, std::uint16_t expected) override {
96 return CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write) && 83 return CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write) &&
97 memory.WriteExclusive16(vaddr, value, expected); 84 m_memory.WriteExclusive16(vaddr, value, expected);
98 } 85 }
99 bool MemoryWriteExclusive32(u64 vaddr, std::uint32_t value, std::uint32_t expected) override { 86 bool MemoryWriteExclusive32(u64 vaddr, std::uint32_t value, std::uint32_t expected) override {
100 return CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write) && 87 return CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write) &&
101 memory.WriteExclusive32(vaddr, value, expected); 88 m_memory.WriteExclusive32(vaddr, value, expected);
102 } 89 }
103 bool MemoryWriteExclusive64(u64 vaddr, std::uint64_t value, std::uint64_t expected) override { 90 bool MemoryWriteExclusive64(u64 vaddr, std::uint64_t value, std::uint64_t expected) override {
104 return CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write) && 91 return CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write) &&
105 memory.WriteExclusive64(vaddr, value, expected); 92 m_memory.WriteExclusive64(vaddr, value, expected);
106 } 93 }
107 bool MemoryWriteExclusive128(u64 vaddr, Vector value, Vector expected) override { 94 bool MemoryWriteExclusive128(u64 vaddr, Vector value, Vector expected) override {
108 return CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Write) && 95 return CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Write) &&
109 memory.WriteExclusive128(vaddr, value, expected); 96 m_memory.WriteExclusive128(vaddr, value, expected);
110 } 97 }
111 98
112 void InterpreterFallback(u64 pc, std::size_t num_instructions) override { 99 void InterpreterFallback(u64 pc, std::size_t num_instructions) override {
113 parent.LogBacktrace(); 100 m_parent.LogBacktrace(m_process);
114 LOG_ERROR(Core_ARM, 101 LOG_ERROR(Core_ARM,
115 "Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc, 102 "Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc,
116 num_instructions, memory.Read32(pc)); 103 num_instructions, m_memory.Read32(pc));
117 ReturnException(pc, PrefetchAbort); 104 ReturnException(pc, PrefetchAbort);
118 } 105 }
119 106
@@ -124,11 +111,11 @@ public:
124 static constexpr u64 ICACHE_LINE_SIZE = 64; 111 static constexpr u64 ICACHE_LINE_SIZE = 64;
125 112
126 const u64 cache_line_start = value & ~(ICACHE_LINE_SIZE - 1); 113 const u64 cache_line_start = value & ~(ICACHE_LINE_SIZE - 1);
127 parent.system.InvalidateCpuInstructionCacheRange(cache_line_start, ICACHE_LINE_SIZE); 114 m_parent.InvalidateCacheRange(cache_line_start, ICACHE_LINE_SIZE);
128 break; 115 break;
129 } 116 }
130 case Dynarmic::A64::InstructionCacheOperation::InvalidateAllToPoU: 117 case Dynarmic::A64::InstructionCacheOperation::InvalidateAllToPoU:
131 parent.system.InvalidateCpuInstructionCaches(); 118 m_parent.ClearInstructionCache();
132 break; 119 break;
133 case Dynarmic::A64::InstructionCacheOperation::InvalidateAllToPoUInnerSharable: 120 case Dynarmic::A64::InstructionCacheOperation::InvalidateAllToPoUInnerSharable:
134 default: 121 default:
@@ -136,7 +123,7 @@ public:
136 break; 123 break;
137 } 124 }
138 125
139 parent.jit.load()->HaltExecution(Dynarmic::HaltReason::CacheInvalidation); 126 m_parent.m_jit->HaltExecution(Dynarmic::HaltReason::CacheInvalidation);
140 } 127 }
141 128
142 void ExceptionRaised(u64 pc, Dynarmic::A64::Exception exception) override { 129 void ExceptionRaised(u64 pc, Dynarmic::A64::Exception exception) override {
@@ -152,26 +139,24 @@ public:
152 ReturnException(pc, PrefetchAbort); 139 ReturnException(pc, PrefetchAbort);
153 return; 140 return;
154 default: 141 default:
155 if (debugger_enabled) { 142 if (m_debugger_enabled) {
156 ReturnException(pc, InstructionBreakpoint); 143 ReturnException(pc, InstructionBreakpoint);
157 return; 144 return;
158 } 145 }
159 146
160 parent.LogBacktrace(); 147 m_parent.LogBacktrace(m_process);
161 LOG_CRITICAL(Core_ARM, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})", 148 LOG_CRITICAL(Core_ARM, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
162 static_cast<std::size_t>(exception), pc, memory.Read32(pc)); 149 static_cast<std::size_t>(exception), pc, m_memory.Read32(pc));
163 } 150 }
164 } 151 }
165 152
166 void CallSVC(u32 swi) override { 153 void CallSVC(u32 svc) override {
167 parent.svc_swi = swi; 154 m_parent.m_svc = svc;
168 parent.jit.load()->HaltExecution(SupervisorCall); 155 m_parent.m_jit->HaltExecution(SupervisorCall);
169 } 156 }
170 157
171 void AddTicks(u64 ticks) override { 158 void AddTicks(u64 ticks) override {
172 if (parent.uses_wall_clock) { 159 ASSERT_MSG(!m_parent.m_uses_wall_clock, "Dynarmic ticking disabled");
173 return;
174 }
175 160
176 // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a 161 // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
177 // rough approximation of the amount of executed ticks in the system, it may be thrown off 162 // rough approximation of the amount of executed ticks in the system, it may be thrown off
@@ -182,44 +167,39 @@ public:
182 // Always execute at least one tick. 167 // Always execute at least one tick.
183 amortized_ticks = std::max<u64>(amortized_ticks, 1); 168 amortized_ticks = std::max<u64>(amortized_ticks, 1);
184 169
185 parent.system.CoreTiming().AddTicks(amortized_ticks); 170 m_parent.m_system.CoreTiming().AddTicks(amortized_ticks);
186 } 171 }
187 172
188 u64 GetTicksRemaining() override { 173 u64 GetTicksRemaining() override {
189 if (parent.uses_wall_clock) { 174 ASSERT_MSG(!m_parent.m_uses_wall_clock, "Dynarmic ticking disabled");
190 if (!IsInterrupted()) {
191 return minimum_run_cycles;
192 }
193 return 0U;
194 }
195 175
196 return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0); 176 return std::max<s64>(m_parent.m_system.CoreTiming().GetDowncount(), 0);
197 } 177 }
198 178
199 u64 GetCNTPCT() override { 179 u64 GetCNTPCT() override {
200 return parent.system.CoreTiming().GetClockTicks(); 180 return m_parent.m_system.CoreTiming().GetClockTicks();
201 } 181 }
202 182
203 bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) { 183 bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) {
204 if (!check_memory_access) { 184 if (!m_check_memory_access) {
205 return true; 185 return true;
206 } 186 }
207 187
208 if (!memory.IsValidVirtualAddressRange(addr, size)) { 188 if (!m_memory.IsValidVirtualAddressRange(addr, size)) {
209 LOG_CRITICAL(Core_ARM, "Stopping execution due to unmapped memory access at {:#x}", 189 LOG_CRITICAL(Core_ARM, "Stopping execution due to unmapped memory access at {:#x}",
210 addr); 190 addr);
211 parent.jit.load()->HaltExecution(PrefetchAbort); 191 m_parent.m_jit->HaltExecution(PrefetchAbort);
212 return false; 192 return false;
213 } 193 }
214 194
215 if (!debugger_enabled) { 195 if (!m_debugger_enabled) {
216 return true; 196 return true;
217 } 197 }
218 198
219 const auto match{parent.MatchingWatchpoint(addr, size, type)}; 199 const auto match{m_parent.MatchingWatchpoint(addr, size, type)};
220 if (match) { 200 if (match) {
221 parent.halted_watchpoint = match; 201 m_parent.m_halted_watchpoint = match;
222 parent.jit.load()->HaltExecution(DataAbort); 202 m_parent.m_jit->HaltExecution(DataAbort);
223 return false; 203 return false;
224 } 204 }
225 205
@@ -227,30 +207,27 @@ public:
227 } 207 }
228 208
229 void ReturnException(u64 pc, Dynarmic::HaltReason hr) { 209 void ReturnException(u64 pc, Dynarmic::HaltReason hr) {
230 parent.SaveContext(parent.breakpoint_context); 210 m_parent.GetContext(m_parent.m_breakpoint_context);
231 parent.breakpoint_context.pc = pc; 211 m_parent.m_breakpoint_context.pc = pc;
232 parent.jit.load()->HaltExecution(hr); 212 m_parent.m_jit->HaltExecution(hr);
233 } 213 }
234 214
235 bool IsInterrupted() { 215 ArmDynarmic64& m_parent;
236 return parent.system.Kernel().PhysicalCore(parent.core_index).IsInterrupted(); 216 Core::Memory::Memory& m_memory;
237 } 217 u64 m_tpidrro_el0{};
238 218 u64 m_tpidr_el0{};
239 ARM_Dynarmic_64& parent; 219 const Kernel::KProcess* m_process{};
240 Core::Memory::Memory& memory; 220 const bool m_debugger_enabled{};
241 u64 tpidrro_el0 = 0; 221 const bool m_check_memory_access{};
242 u64 tpidr_el0 = 0; 222 static constexpr u64 MinimumRunCycles = 10000U;
243 const bool debugger_enabled{};
244 const bool check_memory_access{};
245 static constexpr u64 minimum_run_cycles = 10000U;
246}; 223};
247 224
248std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* page_table, 225std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* page_table,
249 std::size_t address_space_bits) const { 226 std::size_t address_space_bits) const {
250 Dynarmic::A64::UserConfig config; 227 Dynarmic::A64::UserConfig config;
251 228
252 // Callbacks 229 // Callbacks
253 config.callbacks = cb.get(); 230 config.callbacks = m_cb.get();
254 231
255 // Memory 232 // Memory
256 if (page_table) { 233 if (page_table) {
@@ -271,12 +248,12 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
271 } 248 }
272 249
273 // Multi-process state 250 // Multi-process state
274 config.processor_id = core_index; 251 config.processor_id = m_core_index;
275 config.global_monitor = &exclusive_monitor.monitor; 252 config.global_monitor = &m_exclusive_monitor.monitor;
276 253
277 // System registers 254 // System registers
278 config.tpidrro_el0 = &cb->tpidrro_el0; 255 config.tpidrro_el0 = &m_cb->m_tpidrro_el0;
279 config.tpidr_el0 = &cb->tpidr_el0; 256 config.tpidr_el0 = &m_cb->m_tpidr_el0;
280 config.dczid_el0 = 4; 257 config.dczid_el0 = 4;
281 config.ctr_el0 = 0x8444c004; 258 config.ctr_el0 = 0x8444c004;
282 config.cntfrq_el0 = Hardware::CNTFREQ; 259 config.cntfrq_el0 = Hardware::CNTFREQ;
@@ -285,8 +262,8 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
285 config.define_unpredictable_behaviour = true; 262 config.define_unpredictable_behaviour = true;
286 263
287 // Timing 264 // Timing
288 config.wall_clock_cntpct = uses_wall_clock; 265 config.wall_clock_cntpct = m_uses_wall_clock;
289 config.enable_cycle_counting = true; 266 config.enable_cycle_counting = !m_uses_wall_clock;
290 267
291 // Code cache size 268 // Code cache size
292#ifdef ARCHITECTURE_arm64 269#ifdef ARCHITECTURE_arm64
@@ -296,7 +273,7 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
296#endif 273#endif
297 274
298 // Allow memory fault handling to work 275 // Allow memory fault handling to work
299 if (system.DebuggerEnabled()) { 276 if (m_system.DebuggerEnabled()) {
300 config.check_halt_on_memory_access = true; 277 config.check_halt_on_memory_access = true;
301 } 278 }
302 279
@@ -384,147 +361,112 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
384 return std::make_shared<Dynarmic::A64::Jit>(config); 361 return std::make_shared<Dynarmic::A64::Jit>(config);
385} 362}
386 363
387HaltReason ARM_Dynarmic_64::RunJit() { 364HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) {
388 return TranslateHaltReason(jit.load()->Run()); 365 m_jit->ClearExclusiveState();
389} 366 return TranslateHaltReason(m_jit->Run());
390
391HaltReason ARM_Dynarmic_64::StepJit() {
392 return TranslateHaltReason(jit.load()->Step());
393}
394
395u32 ARM_Dynarmic_64::GetSvcNumber() const {
396 return svc_swi;
397}
398
399const Kernel::DebugWatchpoint* ARM_Dynarmic_64::HaltedWatchpoint() const {
400 return halted_watchpoint;
401}
402
403void ARM_Dynarmic_64::RewindBreakpointInstruction() {
404 LoadContext(breakpoint_context);
405}
406
407ARM_Dynarmic_64::ARM_Dynarmic_64(System& system_, bool uses_wall_clock_,
408 DynarmicExclusiveMonitor& exclusive_monitor_,
409 std::size_t core_index_)
410 : ARM_Interface{system_, uses_wall_clock_},
411 cb(std::make_unique<DynarmicCallbacks64>(*this)), core_index{core_index_},
412 exclusive_monitor{exclusive_monitor_}, null_jit{MakeJit(nullptr, 48)}, jit{null_jit.get()} {}
413
414ARM_Dynarmic_64::~ARM_Dynarmic_64() = default;
415
416void ARM_Dynarmic_64::SetPC(u64 pc) {
417 jit.load()->SetPC(pc);
418}
419
420u64 ARM_Dynarmic_64::GetPC() const {
421 return jit.load()->GetPC();
422} 367}
423 368
424u64 ARM_Dynarmic_64::GetSP() const { 369HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) {
425 return jit.load()->GetSP(); 370 m_jit->ClearExclusiveState();
371 return TranslateHaltReason(m_jit->Step());
426} 372}
427 373
428u64 ARM_Dynarmic_64::GetReg(int index) const { 374u32 ArmDynarmic64::GetSvcNumber() const {
429 return jit.load()->GetRegister(index); 375 return m_svc;
430} 376}
431 377
432void ARM_Dynarmic_64::SetReg(int index, u64 value) { 378void ArmDynarmic64::GetSvcArguments(std::span<uint64_t, 8> args) const {
433 jit.load()->SetRegister(index, value); 379 Dynarmic::A64::Jit& j = *m_jit;
434}
435 380
436u128 ARM_Dynarmic_64::GetVectorReg(int index) const { 381 for (size_t i = 0; i < 8; i++) {
437 return jit.load()->GetVector(index); 382 args[i] = j.GetRegister(i);
383 }
438} 384}
439 385
440void ARM_Dynarmic_64::SetVectorReg(int index, u128 value) { 386void ArmDynarmic64::SetSvcArguments(std::span<const uint64_t, 8> args) {
441 jit.load()->SetVector(index, value); 387 Dynarmic::A64::Jit& j = *m_jit;
442}
443 388
444u32 ARM_Dynarmic_64::GetPSTATE() const { 389 for (size_t i = 0; i < 8; i++) {
445 return jit.load()->GetPstate(); 390 j.SetRegister(i, args[i]);
391 }
446} 392}
447 393
448void ARM_Dynarmic_64::SetPSTATE(u32 pstate) { 394const Kernel::DebugWatchpoint* ArmDynarmic64::HaltedWatchpoint() const {
449 jit.load()->SetPstate(pstate); 395 return m_halted_watchpoint;
450} 396}
451 397
452u64 ARM_Dynarmic_64::GetTlsAddress() const { 398void ArmDynarmic64::RewindBreakpointInstruction() {
453 return cb->tpidrro_el0; 399 this->SetContext(m_breakpoint_context);
454} 400}
455 401
456void ARM_Dynarmic_64::SetTlsAddress(u64 address) { 402ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, const Kernel::KProcess* process,
457 cb->tpidrro_el0 = address; 403 DynarmicExclusiveMonitor& exclusive_monitor, std::size_t core_index)
404 : ArmInterface{uses_wall_clock}, m_system{system}, m_exclusive_monitor{exclusive_monitor},
405 m_cb(std::make_unique<DynarmicCallbacks64>(*this, process)), m_core_index{core_index} {
406 auto& page_table = process->GetPageTable().GetBasePageTable();
407 auto& page_table_impl = page_table.GetImpl();
408 m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth());
458} 409}
459 410
460u64 ARM_Dynarmic_64::GetTPIDR_EL0() const { 411ArmDynarmic64::~ArmDynarmic64() = default;
461 return cb->tpidr_el0;
462}
463 412
464void ARM_Dynarmic_64::SetTPIDR_EL0(u64 value) { 413void ArmDynarmic64::SetTpidrroEl0(u64 value) {
465 cb->tpidr_el0 = value; 414 m_cb->m_tpidrro_el0 = value;
466} 415}
467 416
468void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) const { 417void ArmDynarmic64::GetContext(Kernel::Svc::ThreadContext& ctx) const {
469 Dynarmic::A64::Jit* j = jit.load(); 418 Dynarmic::A64::Jit& j = *m_jit;
470 ctx.cpu_registers = j->GetRegisters(); 419 auto gpr = j.GetRegisters();
471 ctx.sp = j->GetSP(); 420 auto fpr = j.GetVectors();
472 ctx.pc = j->GetPC();
473 ctx.pstate = j->GetPstate();
474 ctx.vector_registers = j->GetVectors();
475 ctx.fpcr = j->GetFpcr();
476 ctx.fpsr = j->GetFpsr();
477 ctx.tpidr = cb->tpidr_el0;
478}
479 421
480void ARM_Dynarmic_64::LoadContext(const ThreadContext64& ctx) { 422 // TODO: this is inconvenient
481 Dynarmic::A64::Jit* j = jit.load(); 423 for (size_t i = 0; i < 29; i++) {
482 j->SetRegisters(ctx.cpu_registers); 424 ctx.r[i] = gpr[i];
483 j->SetSP(ctx.sp); 425 }
484 j->SetPC(ctx.pc); 426 ctx.fp = gpr[29];
485 j->SetPstate(ctx.pstate); 427 ctx.lr = gpr[30];
486 j->SetVectors(ctx.vector_registers); 428
487 j->SetFpcr(ctx.fpcr); 429 ctx.sp = j.GetSP();
488 j->SetFpsr(ctx.fpsr); 430 ctx.pc = j.GetPC();
489 SetTPIDR_EL0(ctx.tpidr); 431 ctx.pstate = j.GetPstate();
432 ctx.v = fpr;
433 ctx.fpcr = j.GetFpcr();
434 ctx.fpsr = j.GetFpsr();
435 ctx.tpidr = m_cb->m_tpidr_el0;
490} 436}
491 437
492void ARM_Dynarmic_64::SignalInterrupt() { 438void ArmDynarmic64::SetContext(const Kernel::Svc::ThreadContext& ctx) {
493 jit.load()->HaltExecution(BreakLoop); 439 Dynarmic::A64::Jit& j = *m_jit;
494}
495 440
496void ARM_Dynarmic_64::ClearInterrupt() { 441 // TODO: this is inconvenient
497 jit.load()->ClearHalt(BreakLoop); 442 std::array<u64, 31> gpr;
498}
499 443
500void ARM_Dynarmic_64::ClearInstructionCache() { 444 for (size_t i = 0; i < 29; i++) {
501 jit.load()->ClearCache(); 445 gpr[i] = ctx.r[i];
446 }
447 gpr[29] = ctx.fp;
448 gpr[30] = ctx.lr;
449
450 j.SetRegisters(gpr);
451 j.SetSP(ctx.sp);
452 j.SetPC(ctx.pc);
453 j.SetPstate(ctx.pstate);
454 j.SetVectors(ctx.v);
455 j.SetFpcr(ctx.fpcr);
456 j.SetFpsr(ctx.fpsr);
457 m_cb->m_tpidr_el0 = ctx.tpidr;
502} 458}
503 459
504void ARM_Dynarmic_64::InvalidateCacheRange(u64 addr, std::size_t size) { 460void ArmDynarmic64::SignalInterrupt(Kernel::KThread* thread) {
505 jit.load()->InvalidateCacheRange(addr, size); 461 m_jit->HaltExecution(BreakLoop);
506} 462}
507 463
508void ARM_Dynarmic_64::ClearExclusiveState() { 464void ArmDynarmic64::ClearInstructionCache() {
509 jit.load()->ClearExclusiveState(); 465 m_jit->ClearCache();
510} 466}
511 467
512void ARM_Dynarmic_64::PageTableChanged(Common::PageTable& page_table, 468void ArmDynarmic64::InvalidateCacheRange(u64 addr, std::size_t size) {
513 std::size_t new_address_space_size_in_bits) { 469 m_jit->InvalidateCacheRange(addr, size);
514 ThreadContext64 ctx{};
515 SaveContext(ctx);
516
517 auto key = std::make_pair(&page_table, new_address_space_size_in_bits);
518 auto iter = jit_cache.find(key);
519 if (iter != jit_cache.end()) {
520 jit.store(iter->second.get());
521 LoadContext(ctx);
522 return;
523 }
524 std::shared_ptr new_jit = MakeJit(&page_table, new_address_space_size_in_bits);
525 jit.store(new_jit.get());
526 LoadContext(ctx);
527 jit_cache.emplace(key, std::move(new_jit));
528} 470}
529 471
530} // namespace Core 472} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h
index 2b88a08e2..4f3dd026f 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.h
@@ -23,76 +23,55 @@ class DynarmicCallbacks64;
23class DynarmicExclusiveMonitor; 23class DynarmicExclusiveMonitor;
24class System; 24class System;
25 25
26class ARM_Dynarmic_64 final : public ARM_Interface { 26class ArmDynarmic64 final : public ArmInterface {
27public: 27public:
28 ARM_Dynarmic_64(System& system_, bool uses_wall_clock_, 28 ArmDynarmic64(System& system, bool uses_wall_clock, const Kernel::KProcess* process,
29 DynarmicExclusiveMonitor& exclusive_monitor_, std::size_t core_index_); 29 DynarmicExclusiveMonitor& exclusive_monitor, std::size_t core_index);
30 ~ARM_Dynarmic_64() override; 30 ~ArmDynarmic64() override;
31
32 void SetPC(u64 pc) override;
33 u64 GetPC() const override;
34 u64 GetSP() const override;
35 u64 GetReg(int index) const override;
36 void SetReg(int index, u64 value) override;
37 u128 GetVectorReg(int index) const override;
38 void SetVectorReg(int index, u128 value) override;
39 u32 GetPSTATE() const override;
40 void SetPSTATE(u32 pstate) override;
41 u64 GetTlsAddress() const override;
42 void SetTlsAddress(u64 address) override;
43 void SetTPIDR_EL0(u64 value) override;
44 u64 GetTPIDR_EL0() const override;
45 31
46 Architecture GetArchitecture() const override { 32 Architecture GetArchitecture() const override {
47 return Architecture::Aarch64; 33 return Architecture::AArch64;
48 } 34 }
49 void SaveContext(ThreadContext32& ctx) const override {}
50 void SaveContext(ThreadContext64& ctx) const override;
51 void LoadContext(const ThreadContext32& ctx) override {}
52 void LoadContext(const ThreadContext64& ctx) override;
53 35
54 void SignalInterrupt() override; 36 HaltReason RunThread(Kernel::KThread* thread) override;
55 void ClearInterrupt() override; 37 HaltReason StepThread(Kernel::KThread* thread) override;
56 void ClearExclusiveState() override;
57 38
39 void GetContext(Kernel::Svc::ThreadContext& ctx) const override;
40 void SetContext(const Kernel::Svc::ThreadContext& ctx) override;
41 void SetTpidrroEl0(u64 value) override;
42
43 void GetSvcArguments(std::span<uint64_t, 8> args) const override;
44 void SetSvcArguments(std::span<const uint64_t, 8> args) override;
45 u32 GetSvcNumber() const override;
46
47 void SignalInterrupt(Kernel::KThread* thread) override;
58 void ClearInstructionCache() override; 48 void ClearInstructionCache() override;
59 void InvalidateCacheRange(u64 addr, std::size_t size) override; 49 void InvalidateCacheRange(u64 addr, std::size_t size) override;
60 void PageTableChanged(Common::PageTable& new_page_table,
61 std::size_t new_address_space_size_in_bits) override;
62 50
63protected: 51protected:
64 HaltReason RunJit() override;
65 HaltReason StepJit() override;
66 u32 GetSvcNumber() const override;
67 const Kernel::DebugWatchpoint* HaltedWatchpoint() const override; 52 const Kernel::DebugWatchpoint* HaltedWatchpoint() const override;
68 void RewindBreakpointInstruction() override; 53 void RewindBreakpointInstruction() override;
69 54
70private: 55private:
71 std::shared_ptr<Dynarmic::A64::Jit> MakeJit(Common::PageTable* page_table, 56 System& m_system;
72 std::size_t address_space_bits) const; 57 DynarmicExclusiveMonitor& m_exclusive_monitor;
73
74 using JitCacheKey = std::pair<Common::PageTable*, std::size_t>;
75 using JitCacheType =
76 std::unordered_map<JitCacheKey, std::shared_ptr<Dynarmic::A64::Jit>, Common::PairHash>;
77 58
59private:
78 friend class DynarmicCallbacks64; 60 friend class DynarmicCallbacks64;
79 std::unique_ptr<DynarmicCallbacks64> cb;
80 JitCacheType jit_cache;
81
82 std::size_t core_index;
83 DynarmicExclusiveMonitor& exclusive_monitor;
84 61
85 std::shared_ptr<Dynarmic::A64::Jit> null_jit; 62 std::shared_ptr<Dynarmic::A64::Jit> MakeJit(Common::PageTable* page_table,
63 std::size_t address_space_bits) const;
64 std::unique_ptr<DynarmicCallbacks64> m_cb{};
65 std::size_t m_core_index{};
86 66
87 // A raw pointer here is fine; we never delete Jit instances. 67 std::shared_ptr<Dynarmic::A64::Jit> m_jit{};
88 std::atomic<Dynarmic::A64::Jit*> jit;
89 68
90 // SVC callback 69 // SVC callback
91 u32 svc_swi{}; 70 u32 m_svc{};
92 71
93 // Breakpoint info 72 // Watchpoint info
94 const Kernel::DebugWatchpoint* halted_watchpoint; 73 const Kernel::DebugWatchpoint* m_halted_watchpoint{};
95 ThreadContext64 breakpoint_context; 74 Kernel::Svc::ThreadContext m_breakpoint_context{};
96}; 75};
97 76
98} // namespace Core 77} // namespace Core
diff --git a/src/core/arm/dynarmic/dynarmic_cp15.cpp b/src/core/arm/dynarmic/dynarmic_cp15.cpp
index 92c548db0..f3eee0d42 100644
--- a/src/core/arm/dynarmic/dynarmic_cp15.cpp
+++ b/src/core/arm/dynarmic/dynarmic_cp15.cpp
@@ -124,8 +124,8 @@ CallbackOrAccessTwoWords DynarmicCP15::CompileGetTwoWords(bool two, unsigned opc
124 if (!two && opc == 0 && CRm == CoprocReg::C14) { 124 if (!two && opc == 0 && CRm == CoprocReg::C14) {
125 // CNTPCT 125 // CNTPCT
126 const auto callback = [](void* arg, u32, u32) -> u64 { 126 const auto callback = [](void* arg, u32, u32) -> u64 {
127 const auto& parent_arg = *static_cast<ARM_Dynarmic_32*>(arg); 127 const auto& parent_arg = *static_cast<ArmDynarmic32*>(arg);
128 return parent_arg.system.CoreTiming().GetClockTicks(); 128 return parent_arg.m_system.CoreTiming().GetClockTicks();
129 }; 129 };
130 return Callback{callback, &parent}; 130 return Callback{callback, &parent};
131 } 131 }
diff --git a/src/core/arm/dynarmic/dynarmic_cp15.h b/src/core/arm/dynarmic/dynarmic_cp15.h
index d90b3e568..f3d96b0d8 100644
--- a/src/core/arm/dynarmic/dynarmic_cp15.h
+++ b/src/core/arm/dynarmic/dynarmic_cp15.h
@@ -10,13 +10,13 @@
10 10
11namespace Core { 11namespace Core {
12 12
13class ARM_Dynarmic_32; 13class ArmDynarmic32;
14 14
15class DynarmicCP15 final : public Dynarmic::A32::Coprocessor { 15class DynarmicCP15 final : public Dynarmic::A32::Coprocessor {
16public: 16public:
17 using CoprocReg = Dynarmic::A32::CoprocReg; 17 using CoprocReg = Dynarmic::A32::CoprocReg;
18 18
19 explicit DynarmicCP15(ARM_Dynarmic_32& parent_) : parent{parent_} {} 19 explicit DynarmicCP15(ArmDynarmic32& parent_) : parent{parent_} {}
20 20
21 std::optional<Callback> CompileInternalOperation(bool two, unsigned opc1, CoprocReg CRd, 21 std::optional<Callback> CompileInternalOperation(bool two, unsigned opc1, CoprocReg CRd,
22 CoprocReg CRn, CoprocReg CRm, 22 CoprocReg CRn, CoprocReg CRm,
@@ -32,11 +32,11 @@ public:
32 std::optional<Callback> CompileStoreWords(bool two, bool long_transfer, CoprocReg CRd, 32 std::optional<Callback> CompileStoreWords(bool two, bool long_transfer, CoprocReg CRd,
33 std::optional<u8> option) override; 33 std::optional<u8> option) override;
34 34
35 ARM_Dynarmic_32& parent; 35 ArmDynarmic32& parent;
36 u32 uprw = 0; 36 u32 uprw = 0;
37 u32 uro = 0; 37 u32 uro = 0;
38 38
39 friend class ARM_Dynarmic_32; 39 friend class ArmDynarmic32;
40}; 40};
41 41
42} // namespace Core 42} // namespace Core
diff --git a/src/core/arm/dynarmic/dynarmic_exclusive_monitor.h b/src/core/arm/dynarmic/dynarmic_exclusive_monitor.h
index fbfcd8d95..c4f22ec89 100644
--- a/src/core/arm/dynarmic/dynarmic_exclusive_monitor.h
+++ b/src/core/arm/dynarmic/dynarmic_exclusive_monitor.h
@@ -14,8 +14,8 @@ class Memory;
14 14
15namespace Core { 15namespace Core {
16 16
17class ARM_Dynarmic_32; 17class ArmDynarmic32;
18class ARM_Dynarmic_64; 18class ArmDynarmic64;
19 19
20class DynarmicExclusiveMonitor final : public ExclusiveMonitor { 20class DynarmicExclusiveMonitor final : public ExclusiveMonitor {
21public: 21public:
@@ -36,8 +36,8 @@ public:
36 bool ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) override; 36 bool ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) override;
37 37
38private: 38private:
39 friend class ARM_Dynarmic_32; 39 friend class ArmDynarmic32;
40 friend class ARM_Dynarmic_64; 40 friend class ArmDynarmic64;
41 Dynarmic::ExclusiveMonitor monitor; 41 Dynarmic::ExclusiveMonitor monitor;
42 Core::Memory::Memory& memory; 42 Core::Memory::Memory& memory;
43}; 43};
diff --git a/src/core/arm/nce/arm_nce.cpp b/src/core/arm/nce/arm_nce.cpp
index f7bdafd39..b42a32a0b 100644
--- a/src/core/arm/nce/arm_nce.cpp
+++ b/src/core/arm/nce/arm_nce.cpp
@@ -6,6 +6,7 @@
6 6
7#include "common/signal_chain.h" 7#include "common/signal_chain.h"
8#include "core/arm/nce/arm_nce.h" 8#include "core/arm/nce/arm_nce.h"
9#include "core/arm/nce/guest_context.h"
9#include "core/arm/nce/patcher.h" 10#include "core/arm/nce/patcher.h"
10#include "core/core.h" 11#include "core/core.h"
11#include "core/memory.h" 12#include "core/memory.h"
@@ -38,7 +39,7 @@ fpsimd_context* GetFloatingPointState(mcontext_t& host_ctx) {
38 39
39} // namespace 40} // namespace
40 41
41void* ARM_NCE::RestoreGuestContext(void* raw_context) { 42void* ArmNce::RestoreGuestContext(void* raw_context) {
42 // Retrieve the host context. 43 // Retrieve the host context.
43 auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext; 44 auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext;
44 45
@@ -71,7 +72,7 @@ void* ARM_NCE::RestoreGuestContext(void* raw_context) {
71 return tpidr; 72 return tpidr;
72} 73}
73 74
74void ARM_NCE::SaveGuestContext(GuestContext* guest_ctx, void* raw_context) { 75void ArmNce::SaveGuestContext(GuestContext* guest_ctx, void* raw_context) {
75 // Retrieve the host context. 76 // Retrieve the host context.
76 auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext; 77 auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext;
77 78
@@ -103,7 +104,7 @@ void ARM_NCE::SaveGuestContext(GuestContext* guest_ctx, void* raw_context) {
103 host_ctx.regs[0] = guest_ctx->esr_el1.exchange(0); 104 host_ctx.regs[0] = guest_ctx->esr_el1.exchange(0);
104} 105}
105 106
106bool ARM_NCE::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* raw_context) { 107bool ArmNce::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* raw_context) {
107 auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext; 108 auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext;
108 auto* info = static_cast<siginfo_t*>(raw_info); 109 auto* info = static_cast<siginfo_t*>(raw_info);
109 110
@@ -134,7 +135,7 @@ bool ARM_NCE::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* ra
134 // - If we lose the race, then SignalInterrupt will send us a signal we are masking, 135 // - If we lose the race, then SignalInterrupt will send us a signal we are masking,
135 // and it will do nothing when it is unmasked, as we have already left guest code. 136 // and it will do nothing when it is unmasked, as we have already left guest code.
136 // - If we win the race, then SignalInterrupt will wait for us to unlock first. 137 // - If we win the race, then SignalInterrupt will wait for us to unlock first.
137 auto& thread_params = guest_ctx->parent->running_thread->GetNativeExecutionParameters(); 138 auto& thread_params = guest_ctx->parent->m_running_thread->GetNativeExecutionParameters();
138 thread_params.lock.store(SpinLockLocked); 139 thread_params.lock.store(SpinLockLocked);
139 140
140 // Return to host. 141 // Return to host.
@@ -142,97 +143,93 @@ bool ARM_NCE::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* ra
142 return false; 143 return false;
143} 144}
144 145
145void ARM_NCE::HandleHostFault(int sig, void* raw_info, void* raw_context) { 146void ArmNce::HandleHostFault(int sig, void* raw_info, void* raw_context) {
146 return g_orig_action.sa_sigaction(sig, static_cast<siginfo_t*>(raw_info), raw_context); 147 return g_orig_action.sa_sigaction(sig, static_cast<siginfo_t*>(raw_info), raw_context);
147} 148}
148 149
149HaltReason ARM_NCE::RunJit() { 150void ArmNce::LockThread(Kernel::KThread* thread) {
150 // Get the thread parameters.
151 // TODO: pass the current thread down from ::Run
152 auto* thread = Kernel::GetCurrentThreadPointer(system.Kernel());
153 auto* thread_params = &thread->GetNativeExecutionParameters(); 151 auto* thread_params = &thread->GetNativeExecutionParameters();
152 LockThreadParameters(thread_params);
153}
154 154
155 { 155void ArmNce::UnlockThread(Kernel::KThread* thread) {
156 // Lock our core context. 156 auto* thread_params = &thread->GetNativeExecutionParameters();
157 std::scoped_lock lk{lock}; 157 UnlockThreadParameters(thread_params);
158 158}
159 // We should not be running.
160 ASSERT(running_thread == nullptr);
161
162 // Check if we need to run. If we have already been halted, we are done.
163 u64 halt = guest_ctx.esr_el1.exchange(0);
164 if (halt != 0) {
165 return static_cast<HaltReason>(halt);
166 }
167
168 // Mark that we are running.
169 running_thread = thread;
170 159
171 // Acquire the lock on the thread parameters. 160HaltReason ArmNce::RunThread(Kernel::KThread* thread) {
172 // This allows us to force synchronization with SignalInterrupt. 161 // Check if we're already interrupted.
173 LockThreadParameters(thread_params); 162 // If we are, we can just return immediately.
163 HaltReason hr = static_cast<HaltReason>(m_guest_ctx.esr_el1.exchange(0));
164 if (True(hr)) {
165 return hr;
174 } 166 }
175 167
168 // Get the thread context.
169 auto* thread_params = &thread->GetNativeExecutionParameters();
170 auto* process = thread->GetOwnerProcess();
171
176 // Assign current members. 172 // Assign current members.
177 guest_ctx.parent = this; 173 m_running_thread = thread;
178 thread_params->native_context = &guest_ctx; 174 m_guest_ctx.parent = this;
179 thread_params->tpidr_el0 = guest_ctx.tpidr_el0; 175 thread_params->native_context = &m_guest_ctx;
180 thread_params->tpidrro_el0 = guest_ctx.tpidrro_el0; 176 thread_params->tpidr_el0 = m_guest_ctx.tpidr_el0;
177 thread_params->tpidrro_el0 = m_guest_ctx.tpidrro_el0;
181 thread_params->is_running = true; 178 thread_params->is_running = true;
182 179
183 HaltReason halt{};
184
185 // TODO: finding and creating the post handler needs to be locked 180 // TODO: finding and creating the post handler needs to be locked
186 // to deal with dynamic loading of NROs. 181 // to deal with dynamic loading of NROs.
187 const auto& post_handlers = system.ApplicationProcess()->GetPostHandlers(); 182 const auto& post_handlers = process->GetPostHandlers();
188 if (auto it = post_handlers.find(guest_ctx.pc); it != post_handlers.end()) { 183 if (auto it = post_handlers.find(m_guest_ctx.pc); it != post_handlers.end()) {
189 halt = ReturnToRunCodeByTrampoline(thread_params, &guest_ctx, it->second); 184 hr = ReturnToRunCodeByTrampoline(thread_params, &m_guest_ctx, it->second);
190 } else { 185 } else {
191 halt = ReturnToRunCodeByExceptionLevelChange(thread_id, thread_params); 186 hr = ReturnToRunCodeByExceptionLevelChange(m_thread_id, thread_params);
192 } 187 }
193 188
194 // Unload members. 189 // Unload members.
195 // The thread does not change, so we can persist the old reference. 190 // The thread does not change, so we can persist the old reference.
196 guest_ctx.tpidr_el0 = thread_params->tpidr_el0; 191 m_running_thread = nullptr;
192 m_guest_ctx.tpidr_el0 = thread_params->tpidr_el0;
197 thread_params->native_context = nullptr; 193 thread_params->native_context = nullptr;
198 thread_params->is_running = false; 194 thread_params->is_running = false;
199 195
200 // Unlock the thread parameters.
201 UnlockThreadParameters(thread_params);
202
203 {
204 // Lock the core context.
205 std::scoped_lock lk{lock};
206
207 // On exit, we no longer have an active thread.
208 running_thread = nullptr;
209 }
210
211 // Return the halt reason. 196 // Return the halt reason.
212 return halt; 197 return hr;
213} 198}
214 199
215HaltReason ARM_NCE::StepJit() { 200HaltReason ArmNce::StepThread(Kernel::KThread* thread) {
216 return HaltReason::StepThread; 201 return HaltReason::StepThread;
217} 202}
218 203
219u32 ARM_NCE::GetSvcNumber() const { 204u32 ArmNce::GetSvcNumber() const {
220 return guest_ctx.svc_swi; 205 return m_guest_ctx.svc;
206}
207
208void ArmNce::GetSvcArguments(std::span<uint64_t, 8> args) const {
209 for (size_t i = 0; i < 8; i++) {
210 args[i] = m_guest_ctx.cpu_registers[i];
211 }
212}
213
214void ArmNce::SetSvcArguments(std::span<const uint64_t, 8> args) {
215 for (size_t i = 0; i < 8; i++) {
216 m_guest_ctx.cpu_registers[i] = args[i];
217 }
221} 218}
222 219
223ARM_NCE::ARM_NCE(System& system_, bool uses_wall_clock_, std::size_t core_index_) 220ArmNce::ArmNce(System& system, bool uses_wall_clock, std::size_t core_index)
224 : ARM_Interface{system_, uses_wall_clock_}, core_index{core_index_} { 221 : ArmInterface{uses_wall_clock}, m_system{system}, m_core_index{core_index} {
225 guest_ctx.system = &system_; 222 m_guest_ctx.system = &m_system;
226} 223}
227 224
228ARM_NCE::~ARM_NCE() = default; 225ArmNce::~ArmNce() = default;
229 226
230void ARM_NCE::Initialize() { 227void ArmNce::Initialize() {
231 thread_id = gettid(); 228 m_thread_id = gettid();
232 229
233 // Setup our signals 230 // Setup our signals
234 static std::once_flag flag; 231 static std::once_flag signals;
235 std::call_once(flag, [] { 232 std::call_once(signals, [] {
236 using HandlerType = decltype(sigaction::sa_sigaction); 233 using HandlerType = decltype(sigaction::sa_sigaction);
237 234
238 sigset_t signal_mask; 235 sigset_t signal_mask;
@@ -244,7 +241,7 @@ void ARM_NCE::Initialize() {
244 struct sigaction return_to_run_code_action {}; 241 struct sigaction return_to_run_code_action {};
245 return_to_run_code_action.sa_flags = SA_SIGINFO | SA_ONSTACK; 242 return_to_run_code_action.sa_flags = SA_SIGINFO | SA_ONSTACK;
246 return_to_run_code_action.sa_sigaction = reinterpret_cast<HandlerType>( 243 return_to_run_code_action.sa_sigaction = reinterpret_cast<HandlerType>(
247 &ARM_NCE::ReturnToRunCodeByExceptionLevelChangeSignalHandler); 244 &ArmNce::ReturnToRunCodeByExceptionLevelChangeSignalHandler);
248 return_to_run_code_action.sa_mask = signal_mask; 245 return_to_run_code_action.sa_mask = signal_mask;
249 Common::SigAction(ReturnToRunCodeByExceptionLevelChangeSignal, &return_to_run_code_action, 246 Common::SigAction(ReturnToRunCodeByExceptionLevelChangeSignal, &return_to_run_code_action,
250 nullptr); 247 nullptr);
@@ -252,14 +249,13 @@ void ARM_NCE::Initialize() {
252 struct sigaction break_from_run_code_action {}; 249 struct sigaction break_from_run_code_action {};
253 break_from_run_code_action.sa_flags = SA_SIGINFO | SA_ONSTACK; 250 break_from_run_code_action.sa_flags = SA_SIGINFO | SA_ONSTACK;
254 break_from_run_code_action.sa_sigaction = 251 break_from_run_code_action.sa_sigaction =
255 reinterpret_cast<HandlerType>(&ARM_NCE::BreakFromRunCodeSignalHandler); 252 reinterpret_cast<HandlerType>(&ArmNce::BreakFromRunCodeSignalHandler);
256 break_from_run_code_action.sa_mask = signal_mask; 253 break_from_run_code_action.sa_mask = signal_mask;
257 Common::SigAction(BreakFromRunCodeSignal, &break_from_run_code_action, nullptr); 254 Common::SigAction(BreakFromRunCodeSignal, &break_from_run_code_action, nullptr);
258 255
259 struct sigaction fault_action {}; 256 struct sigaction fault_action {};
260 fault_action.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_RESTART; 257 fault_action.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_RESTART;
261 fault_action.sa_sigaction = 258 fault_action.sa_sigaction = reinterpret_cast<HandlerType>(&ArmNce::GuestFaultSignalHandler);
262 reinterpret_cast<HandlerType>(&ARM_NCE::GuestFaultSignalHandler);
263 fault_action.sa_mask = signal_mask; 259 fault_action.sa_mask = signal_mask;
264 Common::SigAction(GuestFaultSignal, &fault_action, &g_orig_action); 260 Common::SigAction(GuestFaultSignal, &fault_action, &g_orig_action);
265 261
@@ -272,111 +268,59 @@ void ARM_NCE::Initialize() {
272 }); 268 });
273} 269}
274 270
275void ARM_NCE::SetPC(u64 pc) { 271void ArmNce::SetTpidrroEl0(u64 value) {
276 guest_ctx.pc = pc; 272 m_guest_ctx.tpidrro_el0 = value;
277} 273}
278 274
279u64 ARM_NCE::GetPC() const { 275void ArmNce::GetContext(Kernel::Svc::ThreadContext& ctx) const {
280 return guest_ctx.pc; 276 for (size_t i = 0; i < 29; i++) {
281} 277 ctx.r[i] = m_guest_ctx.cpu_registers[i];
282 278 }
283u64 ARM_NCE::GetSP() const { 279 ctx.fp = m_guest_ctx.cpu_registers[29];
284 return guest_ctx.sp; 280 ctx.lr = m_guest_ctx.cpu_registers[30];
285} 281 ctx.sp = m_guest_ctx.sp;
286 282 ctx.pc = m_guest_ctx.pc;
287u64 ARM_NCE::GetReg(int index) const { 283 ctx.pstate = m_guest_ctx.pstate;
288 return guest_ctx.cpu_registers[index]; 284 ctx.v = m_guest_ctx.vector_registers;
289} 285 ctx.fpcr = m_guest_ctx.fpcr;
290 286 ctx.fpsr = m_guest_ctx.fpsr;
291void ARM_NCE::SetReg(int index, u64 value) { 287 ctx.tpidr = m_guest_ctx.tpidr_el0;
292 guest_ctx.cpu_registers[index] = value;
293}
294
295u128 ARM_NCE::GetVectorReg(int index) const {
296 return guest_ctx.vector_registers[index];
297}
298
299void ARM_NCE::SetVectorReg(int index, u128 value) {
300 guest_ctx.vector_registers[index] = value;
301}
302
303u32 ARM_NCE::GetPSTATE() const {
304 return guest_ctx.pstate;
305}
306
307void ARM_NCE::SetPSTATE(u32 pstate) {
308 guest_ctx.pstate = pstate;
309}
310
311u64 ARM_NCE::GetTlsAddress() const {
312 return guest_ctx.tpidrro_el0;
313}
314
315void ARM_NCE::SetTlsAddress(u64 address) {
316 guest_ctx.tpidrro_el0 = address;
317}
318
319u64 ARM_NCE::GetTPIDR_EL0() const {
320 return guest_ctx.tpidr_el0;
321}
322
323void ARM_NCE::SetTPIDR_EL0(u64 value) {
324 guest_ctx.tpidr_el0 = value;
325}
326
327void ARM_NCE::SaveContext(ThreadContext64& ctx) const {
328 ctx.cpu_registers = guest_ctx.cpu_registers;
329 ctx.sp = guest_ctx.sp;
330 ctx.pc = guest_ctx.pc;
331 ctx.pstate = guest_ctx.pstate;
332 ctx.vector_registers = guest_ctx.vector_registers;
333 ctx.fpcr = guest_ctx.fpcr;
334 ctx.fpsr = guest_ctx.fpsr;
335 ctx.tpidr = guest_ctx.tpidr_el0;
336} 288}
337 289
338void ARM_NCE::LoadContext(const ThreadContext64& ctx) { 290void ArmNce::SetContext(const Kernel::Svc::ThreadContext& ctx) {
339 guest_ctx.cpu_registers = ctx.cpu_registers; 291 for (size_t i = 0; i < 29; i++) {
340 guest_ctx.sp = ctx.sp; 292 m_guest_ctx.cpu_registers[i] = ctx.r[i];
341 guest_ctx.pc = ctx.pc; 293 }
342 guest_ctx.pstate = ctx.pstate; 294 m_guest_ctx.cpu_registers[29] = ctx.fp;
343 guest_ctx.vector_registers = ctx.vector_registers; 295 m_guest_ctx.cpu_registers[30] = ctx.lr;
344 guest_ctx.fpcr = ctx.fpcr; 296 m_guest_ctx.sp = ctx.sp;
345 guest_ctx.fpsr = ctx.fpsr; 297 m_guest_ctx.pc = ctx.pc;
346 guest_ctx.tpidr_el0 = ctx.tpidr; 298 m_guest_ctx.pstate = ctx.pstate;
299 m_guest_ctx.vector_registers = ctx.v;
300 m_guest_ctx.fpcr = ctx.fpcr;
301 m_guest_ctx.fpsr = ctx.fpsr;
302 m_guest_ctx.tpidr_el0 = ctx.tpidr;
347} 303}
348 304
349void ARM_NCE::SignalInterrupt() { 305void ArmNce::SignalInterrupt(Kernel::KThread* thread) {
350 // Lock core context.
351 std::scoped_lock lk{lock};
352
353 // Add break loop condition. 306 // Add break loop condition.
354 guest_ctx.esr_el1.fetch_or(static_cast<u64>(HaltReason::BreakLoop)); 307 m_guest_ctx.esr_el1.fetch_or(static_cast<u64>(HaltReason::BreakLoop));
355
356 // If there is no thread running, we are done.
357 if (running_thread == nullptr) {
358 return;
359 }
360 308
361 // Lock the thread context. 309 // Lock the thread context.
362 auto* params = &running_thread->GetNativeExecutionParameters(); 310 auto* params = &thread->GetNativeExecutionParameters();
363 LockThreadParameters(params); 311 LockThreadParameters(params);
364 312
365 if (params->is_running) { 313 if (params->is_running) {
366 // We should signal to the running thread. 314 // We should signal to the running thread.
367 // The running thread will unlock the thread context. 315 // The running thread will unlock the thread context.
368 syscall(SYS_tkill, thread_id, BreakFromRunCodeSignal); 316 syscall(SYS_tkill, m_thread_id, BreakFromRunCodeSignal);
369 } else { 317 } else {
370 // If the thread is no longer running, we have nothing to do. 318 // If the thread is no longer running, we have nothing to do.
371 UnlockThreadParameters(params); 319 UnlockThreadParameters(params);
372 } 320 }
373} 321}
374 322
375void ARM_NCE::ClearInterrupt() { 323void ArmNce::ClearInstructionCache() {
376 guest_ctx.esr_el1 = {};
377}
378
379void ARM_NCE::ClearInstructionCache() {
380 // TODO: This is not possible to implement correctly on Linux because 324 // TODO: This is not possible to implement correctly on Linux because
381 // we do not have any access to ic iallu. 325 // we do not have any access to ic iallu.
382 326
@@ -384,17 +328,8 @@ void ARM_NCE::ClearInstructionCache() {
384 std::atomic_thread_fence(std::memory_order_seq_cst); 328 std::atomic_thread_fence(std::memory_order_seq_cst);
385} 329}
386 330
387void ARM_NCE::InvalidateCacheRange(u64 addr, std::size_t size) { 331void ArmNce::InvalidateCacheRange(u64 addr, std::size_t size) {
388 this->ClearInstructionCache(); 332 this->ClearInstructionCache();
389} 333}
390 334
391void ARM_NCE::ClearExclusiveState() {
392 // No-op.
393}
394
395void ARM_NCE::PageTableChanged(Common::PageTable& page_table,
396 std::size_t new_address_space_size_in_bits) {
397 // No-op. Page table is never used.
398}
399
400} // namespace Core 335} // namespace Core
diff --git a/src/core/arm/nce/arm_nce.h b/src/core/arm/nce/arm_nce.h
index 5fbd6dbf3..f55c10d1d 100644
--- a/src/core/arm/nce/arm_nce.h
+++ b/src/core/arm/nce/arm_nce.h
@@ -3,11 +3,7 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <atomic> 6#include <mutex>
7#include <memory>
8#include <span>
9#include <unordered_map>
10#include <vector>
11 7
12#include "core/arm/arm_interface.h" 8#include "core/arm/arm_interface.h"
13#include "core/arm/nce/guest_context.h" 9#include "core/arm/nce/guest_context.h"
@@ -20,51 +16,36 @@ namespace Core {
20 16
21class System; 17class System;
22 18
23class ARM_NCE final : public ARM_Interface { 19class ArmNce final : public ArmInterface {
24public: 20public:
25 ARM_NCE(System& system_, bool uses_wall_clock_, std::size_t core_index_); 21 ArmNce(System& system, bool uses_wall_clock, std::size_t core_index);
26 22 ~ArmNce() override;
27 ~ARM_NCE() override;
28 23
29 void Initialize() override; 24 void Initialize() override;
30 void SetPC(u64 pc) override;
31 u64 GetPC() const override;
32 u64 GetSP() const override;
33 u64 GetReg(int index) const override;
34 void SetReg(int index, u64 value) override;
35 u128 GetVectorReg(int index) const override;
36 void SetVectorReg(int index, u128 value) override;
37
38 u32 GetPSTATE() const override;
39 void SetPSTATE(u32 pstate) override;
40 u64 GetTlsAddress() const override;
41 void SetTlsAddress(u64 address) override;
42 void SetTPIDR_EL0(u64 value) override;
43 u64 GetTPIDR_EL0() const override;
44 25
45 Architecture GetArchitecture() const override { 26 Architecture GetArchitecture() const override {
46 return Architecture::Aarch64; 27 return Architecture::AArch64;
47 } 28 }
48 29
49 void SaveContext(ThreadContext32& ctx) const override {} 30 HaltReason RunThread(Kernel::KThread* thread) override;
50 void SaveContext(ThreadContext64& ctx) const override; 31 HaltReason StepThread(Kernel::KThread* thread) override;
51 void LoadContext(const ThreadContext32& ctx) override {} 32
52 void LoadContext(const ThreadContext64& ctx) override; 33 void GetContext(Kernel::Svc::ThreadContext& ctx) const override;
34 void SetContext(const Kernel::Svc::ThreadContext& ctx) override;
35 void SetTpidrroEl0(u64 value) override;
53 36
54 void SignalInterrupt() override; 37 void GetSvcArguments(std::span<uint64_t, 8> args) const override;
55 void ClearInterrupt() override; 38 void SetSvcArguments(std::span<const uint64_t, 8> args) override;
56 void ClearExclusiveState() override; 39 u32 GetSvcNumber() const override;
40
41 void SignalInterrupt(Kernel::KThread* thread) override;
57 void ClearInstructionCache() override; 42 void ClearInstructionCache() override;
58 void InvalidateCacheRange(u64 addr, std::size_t size) override; 43 void InvalidateCacheRange(u64 addr, std::size_t size) override;
59 void PageTableChanged(Common::PageTable& new_page_table,
60 std::size_t new_address_space_size_in_bits) override;
61
62protected:
63 HaltReason RunJit() override;
64 HaltReason StepJit() override;
65 44
66 u32 GetSvcNumber() const override; 45 void LockThread(Kernel::KThread* thread) override;
46 void UnlockThread(Kernel::KThread* thread) override;
67 47
48protected:
68 const Kernel::DebugWatchpoint* HaltedWatchpoint() const override { 49 const Kernel::DebugWatchpoint* HaltedWatchpoint() const override {
69 return nullptr; 50 return nullptr;
70 } 51 }
@@ -93,16 +74,15 @@ private:
93 static void HandleHostFault(int sig, void* info, void* raw_context); 74 static void HandleHostFault(int sig, void* info, void* raw_context);
94 75
95public: 76public:
77 Core::System& m_system;
78
96 // Members set on initialization. 79 // Members set on initialization.
97 std::size_t core_index{}; 80 std::size_t m_core_index{};
98 pid_t thread_id{-1}; 81 pid_t m_thread_id{-1};
99 82
100 // Core context. 83 // Core context.
101 GuestContext guest_ctx; 84 GuestContext m_guest_ctx{};
102 85 Kernel::KThread* m_running_thread{};
103 // Thread and invalidation info.
104 std::mutex lock;
105 Kernel::KThread* running_thread{};
106}; 86};
107 87
108} // namespace Core 88} // namespace Core
diff --git a/src/core/arm/nce/arm_nce.s b/src/core/arm/nce/arm_nce.s
index b98e09f31..4aeda4740 100644
--- a/src/core/arm/nce/arm_nce.s
+++ b/src/core/arm/nce/arm_nce.s
@@ -8,11 +8,11 @@
8 movk reg, #(((val) >> 0x10) & 0xFFFF), lsl #16 8 movk reg, #(((val) >> 0x10) & 0xFFFF), lsl #16
9 9
10 10
11/* static HaltReason Core::ARM_NCE::ReturnToRunCodeByTrampoline(void* tpidr, Core::GuestContext* ctx, u64 trampoline_addr) */ 11/* static HaltReason Core::ArmNce::ReturnToRunCodeByTrampoline(void* tpidr, Core::GuestContext* ctx, u64 trampoline_addr) */
12.section .text._ZN4Core7ARM_NCE27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm, "ax", %progbits 12.section .text._ZN4Core6ArmNce27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm, "ax", %progbits
13.global _ZN4Core7ARM_NCE27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm 13.global _ZN4Core6ArmNce27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm
14.type _ZN4Core7ARM_NCE27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm, %function 14.type _ZN4Core6ArmNce27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm, %function
15_ZN4Core7ARM_NCE27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm: 15_ZN4Core6ArmNce27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm:
16 /* Back up host sp to x3. */ 16 /* Back up host sp to x3. */
17 /* Back up host tpidr_el0 to x4. */ 17 /* Back up host tpidr_el0 to x4. */
18 mov x3, sp 18 mov x3, sp
@@ -49,11 +49,11 @@ _ZN4Core7ARM_NCE27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm:
49 br x2 49 br x2
50 50
51 51
52/* static HaltReason Core::ARM_NCE::ReturnToRunCodeByExceptionLevelChange(int tid, void* tpidr) */ 52/* static HaltReason Core::ArmNce::ReturnToRunCodeByExceptionLevelChange(int tid, void* tpidr) */
53.section .text._ZN4Core7ARM_NCE37ReturnToRunCodeByExceptionLevelChangeEiPv, "ax", %progbits 53.section .text._ZN4Core6ArmNce37ReturnToRunCodeByExceptionLevelChangeEiPv, "ax", %progbits
54.global _ZN4Core7ARM_NCE37ReturnToRunCodeByExceptionLevelChangeEiPv 54.global _ZN4Core6ArmNce37ReturnToRunCodeByExceptionLevelChangeEiPv
55.type _ZN4Core7ARM_NCE37ReturnToRunCodeByExceptionLevelChangeEiPv, %function 55.type _ZN4Core6ArmNce37ReturnToRunCodeByExceptionLevelChangeEiPv, %function
56_ZN4Core7ARM_NCE37ReturnToRunCodeByExceptionLevelChangeEiPv: 56_ZN4Core6ArmNce37ReturnToRunCodeByExceptionLevelChangeEiPv:
57 /* This jumps to the signal handler, which will restore the entire context. */ 57 /* This jumps to the signal handler, which will restore the entire context. */
58 /* On entry, x0 = thread id, which is already in the right place. */ 58 /* On entry, x0 = thread id, which is already in the right place. */
59 59
@@ -71,17 +71,17 @@ _ZN4Core7ARM_NCE37ReturnToRunCodeByExceptionLevelChangeEiPv:
71 brk #1000 71 brk #1000
72 72
73 73
74/* static void Core::ARM_NCE::ReturnToRunCodeByExceptionLevelChangeSignalHandler(int sig, void* info, void* raw_context) */ 74/* static void Core::ArmNce::ReturnToRunCodeByExceptionLevelChangeSignalHandler(int sig, void* info, void* raw_context) */
75.section .text._ZN4Core7ARM_NCE50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_, "ax", %progbits 75.section .text._ZN4Core6ArmNce50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_, "ax", %progbits
76.global _ZN4Core7ARM_NCE50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_ 76.global _ZN4Core6ArmNce50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_
77.type _ZN4Core7ARM_NCE50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_, %function 77.type _ZN4Core6ArmNce50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_, %function
78_ZN4Core7ARM_NCE50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_: 78_ZN4Core6ArmNce50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_:
79 stp x29, x30, [sp, #-0x10]! 79 stp x29, x30, [sp, #-0x10]!
80 mov x29, sp 80 mov x29, sp
81 81
82 /* Call the context restorer with the raw context. */ 82 /* Call the context restorer with the raw context. */
83 mov x0, x2 83 mov x0, x2
84 bl _ZN4Core7ARM_NCE19RestoreGuestContextEPv 84 bl _ZN4Core6ArmNce19RestoreGuestContextEPv
85 85
86 /* Save the old value of tpidr_el0. */ 86 /* Save the old value of tpidr_el0. */
87 mrs x8, tpidr_el0 87 mrs x8, tpidr_el0
@@ -92,18 +92,18 @@ _ZN4Core7ARM_NCE50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_:
92 msr tpidr_el0, x0 92 msr tpidr_el0, x0
93 93
94 /* Unlock the context. */ 94 /* Unlock the context. */
95 bl _ZN4Core7ARM_NCE22UnlockThreadParametersEPv 95 bl _ZN4Core6ArmNce22UnlockThreadParametersEPv
96 96
97 /* Returning from here will enter the guest. */ 97 /* Returning from here will enter the guest. */
98 ldp x29, x30, [sp], #0x10 98 ldp x29, x30, [sp], #0x10
99 ret 99 ret
100 100
101 101
102/* static void Core::ARM_NCE::BreakFromRunCodeSignalHandler(int sig, void* info, void* raw_context) */ 102/* static void Core::ArmNce::BreakFromRunCodeSignalHandler(int sig, void* info, void* raw_context) */
103.section .text._ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_, "ax", %progbits 103.section .text._ZN4Core6ArmNce29BreakFromRunCodeSignalHandlerEiPvS1_, "ax", %progbits
104.global _ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_ 104.global _ZN4Core6ArmNce29BreakFromRunCodeSignalHandlerEiPvS1_
105.type _ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_, %function 105.type _ZN4Core6ArmNce29BreakFromRunCodeSignalHandlerEiPvS1_, %function
106_ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_: 106_ZN4Core6ArmNce29BreakFromRunCodeSignalHandlerEiPvS1_:
107 /* Check to see if we have the correct TLS magic. */ 107 /* Check to see if we have the correct TLS magic. */
108 mrs x8, tpidr_el0 108 mrs x8, tpidr_el0
109 ldr w9, [x8, #(TpidrEl0TlsMagic)] 109 ldr w9, [x8, #(TpidrEl0TlsMagic)]
@@ -121,7 +121,7 @@ _ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_:
121 121
122 /* Tail call the restorer. */ 122 /* Tail call the restorer. */
123 mov x1, x2 123 mov x1, x2
124 b _ZN4Core7ARM_NCE16SaveGuestContextEPNS_12GuestContextEPv 124 b _ZN4Core6ArmNce16SaveGuestContextEPNS_12GuestContextEPv
125 125
126 /* Returning from here will enter host code. */ 126 /* Returning from here will enter host code. */
127 127
@@ -130,11 +130,11 @@ _ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_:
130 ret 130 ret
131 131
132 132
133/* static void Core::ARM_NCE::GuestFaultSignalHandler(int sig, void* info, void* raw_context) */ 133/* static void Core::ArmNce::GuestFaultSignalHandler(int sig, void* info, void* raw_context) */
134.section .text._ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_, "ax", %progbits 134.section .text._ZN4Core6ArmNce23GuestFaultSignalHandlerEiPvS1_, "ax", %progbits
135.global _ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_ 135.global _ZN4Core6ArmNce23GuestFaultSignalHandlerEiPvS1_
136.type _ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_, %function 136.type _ZN4Core6ArmNce23GuestFaultSignalHandlerEiPvS1_, %function
137_ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_: 137_ZN4Core6ArmNce23GuestFaultSignalHandlerEiPvS1_:
138 /* Check to see if we have the correct TLS magic. */ 138 /* Check to see if we have the correct TLS magic. */
139 mrs x8, tpidr_el0 139 mrs x8, tpidr_el0
140 ldr w9, [x8, #(TpidrEl0TlsMagic)] 140 ldr w9, [x8, #(TpidrEl0TlsMagic)]
@@ -146,7 +146,7 @@ _ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_:
146 146
147 /* Incorrect TLS magic, so this is a host fault. */ 147 /* Incorrect TLS magic, so this is a host fault. */
148 /* Tail call the handler. */ 148 /* Tail call the handler. */
149 b _ZN4Core7ARM_NCE15HandleHostFaultEiPvS1_ 149 b _ZN4Core6ArmNce15HandleHostFaultEiPvS1_
150 150
1511: 1511:
152 /* Correct TLS magic, so this is a guest fault. */ 152 /* Correct TLS magic, so this is a guest fault. */
@@ -163,7 +163,7 @@ _ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_:
163 msr tpidr_el0, x3 163 msr tpidr_el0, x3
164 164
165 /* Call the handler. */ 165 /* Call the handler. */
166 bl _ZN4Core7ARM_NCE16HandleGuestFaultEPNS_12GuestContextEPvS3_ 166 bl _ZN4Core6ArmNce16HandleGuestFaultEPNS_12GuestContextEPvS3_
167 167
168 /* If the handler returned false, we want to preserve the host tpidr_el0. */ 168 /* If the handler returned false, we want to preserve the host tpidr_el0. */
169 cbz x0, 2f 169 cbz x0, 2f
@@ -177,11 +177,11 @@ _ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_:
177 ret 177 ret
178 178
179 179
180/* static void Core::ARM_NCE::LockThreadParameters(void* tpidr) */ 180/* static void Core::ArmNce::LockThreadParameters(void* tpidr) */
181.section .text._ZN4Core7ARM_NCE20LockThreadParametersEPv, "ax", %progbits 181.section .text._ZN4Core6ArmNce20LockThreadParametersEPv, "ax", %progbits
182.global _ZN4Core7ARM_NCE20LockThreadParametersEPv 182.global _ZN4Core6ArmNce20LockThreadParametersEPv
183.type _ZN4Core7ARM_NCE20LockThreadParametersEPv, %function 183.type _ZN4Core6ArmNce20LockThreadParametersEPv, %function
184_ZN4Core7ARM_NCE20LockThreadParametersEPv: 184_ZN4Core6ArmNce20LockThreadParametersEPv:
185 /* Offset to lock member. */ 185 /* Offset to lock member. */
186 add x0, x0, #(TpidrEl0Lock) 186 add x0, x0, #(TpidrEl0Lock)
187 187
@@ -205,11 +205,11 @@ _ZN4Core7ARM_NCE20LockThreadParametersEPv:
205 ret 205 ret
206 206
207 207
208/* static void Core::ARM_NCE::UnlockThreadParameters(void* tpidr) */ 208/* static void Core::ArmNce::UnlockThreadParameters(void* tpidr) */
209.section .text._ZN4Core7ARM_NCE22UnlockThreadParametersEPv, "ax", %progbits 209.section .text._ZN4Core6ArmNce22UnlockThreadParametersEPv, "ax", %progbits
210.global _ZN4Core7ARM_NCE22UnlockThreadParametersEPv 210.global _ZN4Core6ArmNce22UnlockThreadParametersEPv
211.type _ZN4Core7ARM_NCE22UnlockThreadParametersEPv, %function 211.type _ZN4Core6ArmNce22UnlockThreadParametersEPv, %function
212_ZN4Core7ARM_NCE22UnlockThreadParametersEPv: 212_ZN4Core6ArmNce22UnlockThreadParametersEPv:
213 /* Offset to lock member. */ 213 /* Offset to lock member. */
214 add x0, x0, #(TpidrEl0Lock) 214 add x0, x0, #(TpidrEl0Lock)
215 215
diff --git a/src/core/arm/nce/guest_context.h b/src/core/arm/nce/guest_context.h
index 0767a0337..a7eadccce 100644
--- a/src/core/arm/nce/guest_context.h
+++ b/src/core/arm/nce/guest_context.h
@@ -3,6 +3,8 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <atomic>
7
6#include "common/common_funcs.h" 8#include "common/common_funcs.h"
7#include "common/common_types.h" 9#include "common/common_types.h"
8#include "core/arm/arm_interface.h" 10#include "core/arm/arm_interface.h"
@@ -10,7 +12,7 @@
10 12
11namespace Core { 13namespace Core {
12 14
13class ARM_NCE; 15class ArmNce;
14class System; 16class System;
15 17
16struct HostContext { 18struct HostContext {
@@ -33,9 +35,9 @@ struct GuestContext {
33 u64 tpidr_el0{}; 35 u64 tpidr_el0{};
34 std::atomic<u64> esr_el1{}; 36 std::atomic<u64> esr_el1{};
35 u32 nzcv{}; 37 u32 nzcv{};
36 u32 svc_swi{}; 38 u32 svc{};
37 System* system{}; 39 System* system{};
38 ARM_NCE* parent{}; 40 ArmNce* parent{};
39}; 41};
40 42
41// Verify assembly offsets. 43// Verify assembly offsets.
diff --git a/src/core/arm/nce/patcher.cpp b/src/core/arm/nce/patcher.cpp
index bdaa3af49..47a7a8880 100644
--- a/src/core/arm/nce/patcher.cpp
+++ b/src/core/arm/nce/patcher.cpp
@@ -280,7 +280,7 @@ void Patcher::WriteSvcTrampoline(ModuleDestLabel module_dest, u32 svc_id) {
280 280
281 // Store SVC number to execute when we return 281 // Store SVC number to execute when we return
282 c.MOV(X2, svc_id); 282 c.MOV(X2, svc_id);
283 c.STR(W2, X1, offsetof(GuestContext, svc_swi)); 283 c.STR(W2, X1, offsetof(GuestContext, svc));
284 284
285 // We are calling a SVC. Clear esr_el1 and return it. 285 // We are calling a SVC. Clear esr_el1 and return it.
286 static_assert(std::is_same_v<std::underlying_type_t<HaltReason>, u64>); 286 static_assert(std::is_same_v<std::underlying_type_t<HaltReason>, u64>);