diff options
| author | 2023-12-06 14:19:17 +0100 | |
|---|---|---|
| committer | 2023-12-06 14:19:17 +0100 | |
| commit | 8a79dd2d6c6445bff63ea1f2f5f1611a6afcd97a (patch) | |
| tree | 265bf3c7970a570479c6a3ac1250549995f0329c /src/core/arm/nce | |
| parent | Merge pull request #12271 from liamwhite/pretext-fix (diff) | |
| parent | arm: fix context save of vector regs (diff) | |
| download | yuzu-8a79dd2d6c6445bff63ea1f2f5f1611a6afcd97a.tar.gz yuzu-8a79dd2d6c6445bff63ea1f2f5f1611a6afcd97a.tar.xz yuzu-8a79dd2d6c6445bff63ea1f2f5f1611a6afcd97a.zip | |
Merge pull request #12236 from liamwhite/cpu-refactor
core: refactor emulated cpu core activation
Diffstat (limited to 'src/core/arm/nce')
| -rw-r--r-- | src/core/arm/nce/arm_nce.cpp | 255 | ||||
| -rw-r--r-- | src/core/arm/nce/arm_nce.h | 70 | ||||
| -rw-r--r-- | src/core/arm/nce/arm_nce.s | 80 | ||||
| -rw-r--r-- | src/core/arm/nce/guest_context.h | 8 | ||||
| -rw-r--r-- | src/core/arm/nce/patcher.cpp | 2 |
5 files changed, 166 insertions, 249 deletions
diff --git a/src/core/arm/nce/arm_nce.cpp b/src/core/arm/nce/arm_nce.cpp index f7bdafd39..b42a32a0b 100644 --- a/src/core/arm/nce/arm_nce.cpp +++ b/src/core/arm/nce/arm_nce.cpp | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | #include "common/signal_chain.h" | 7 | #include "common/signal_chain.h" |
| 8 | #include "core/arm/nce/arm_nce.h" | 8 | #include "core/arm/nce/arm_nce.h" |
| 9 | #include "core/arm/nce/guest_context.h" | ||
| 9 | #include "core/arm/nce/patcher.h" | 10 | #include "core/arm/nce/patcher.h" |
| 10 | #include "core/core.h" | 11 | #include "core/core.h" |
| 11 | #include "core/memory.h" | 12 | #include "core/memory.h" |
| @@ -38,7 +39,7 @@ fpsimd_context* GetFloatingPointState(mcontext_t& host_ctx) { | |||
| 38 | 39 | ||
| 39 | } // namespace | 40 | } // namespace |
| 40 | 41 | ||
| 41 | void* ARM_NCE::RestoreGuestContext(void* raw_context) { | 42 | void* ArmNce::RestoreGuestContext(void* raw_context) { |
| 42 | // Retrieve the host context. | 43 | // Retrieve the host context. |
| 43 | auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext; | 44 | auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext; |
| 44 | 45 | ||
| @@ -71,7 +72,7 @@ void* ARM_NCE::RestoreGuestContext(void* raw_context) { | |||
| 71 | return tpidr; | 72 | return tpidr; |
| 72 | } | 73 | } |
| 73 | 74 | ||
| 74 | void ARM_NCE::SaveGuestContext(GuestContext* guest_ctx, void* raw_context) { | 75 | void ArmNce::SaveGuestContext(GuestContext* guest_ctx, void* raw_context) { |
| 75 | // Retrieve the host context. | 76 | // Retrieve the host context. |
| 76 | auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext; | 77 | auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext; |
| 77 | 78 | ||
| @@ -103,7 +104,7 @@ void ARM_NCE::SaveGuestContext(GuestContext* guest_ctx, void* raw_context) { | |||
| 103 | host_ctx.regs[0] = guest_ctx->esr_el1.exchange(0); | 104 | host_ctx.regs[0] = guest_ctx->esr_el1.exchange(0); |
| 104 | } | 105 | } |
| 105 | 106 | ||
| 106 | bool ARM_NCE::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* raw_context) { | 107 | bool ArmNce::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* raw_context) { |
| 107 | auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext; | 108 | auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext; |
| 108 | auto* info = static_cast<siginfo_t*>(raw_info); | 109 | auto* info = static_cast<siginfo_t*>(raw_info); |
| 109 | 110 | ||
| @@ -134,7 +135,7 @@ bool ARM_NCE::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* ra | |||
| 134 | // - If we lose the race, then SignalInterrupt will send us a signal we are masking, | 135 | // - If we lose the race, then SignalInterrupt will send us a signal we are masking, |
| 135 | // and it will do nothing when it is unmasked, as we have already left guest code. | 136 | // and it will do nothing when it is unmasked, as we have already left guest code. |
| 136 | // - If we win the race, then SignalInterrupt will wait for us to unlock first. | 137 | // - If we win the race, then SignalInterrupt will wait for us to unlock first. |
| 137 | auto& thread_params = guest_ctx->parent->running_thread->GetNativeExecutionParameters(); | 138 | auto& thread_params = guest_ctx->parent->m_running_thread->GetNativeExecutionParameters(); |
| 138 | thread_params.lock.store(SpinLockLocked); | 139 | thread_params.lock.store(SpinLockLocked); |
| 139 | 140 | ||
| 140 | // Return to host. | 141 | // Return to host. |
| @@ -142,97 +143,93 @@ bool ARM_NCE::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* ra | |||
| 142 | return false; | 143 | return false; |
| 143 | } | 144 | } |
| 144 | 145 | ||
| 145 | void ARM_NCE::HandleHostFault(int sig, void* raw_info, void* raw_context) { | 146 | void ArmNce::HandleHostFault(int sig, void* raw_info, void* raw_context) { |
| 146 | return g_orig_action.sa_sigaction(sig, static_cast<siginfo_t*>(raw_info), raw_context); | 147 | return g_orig_action.sa_sigaction(sig, static_cast<siginfo_t*>(raw_info), raw_context); |
| 147 | } | 148 | } |
| 148 | 149 | ||
| 149 | HaltReason ARM_NCE::RunJit() { | 150 | void ArmNce::LockThread(Kernel::KThread* thread) { |
| 150 | // Get the thread parameters. | ||
| 151 | // TODO: pass the current thread down from ::Run | ||
| 152 | auto* thread = Kernel::GetCurrentThreadPointer(system.Kernel()); | ||
| 153 | auto* thread_params = &thread->GetNativeExecutionParameters(); | 151 | auto* thread_params = &thread->GetNativeExecutionParameters(); |
| 152 | LockThreadParameters(thread_params); | ||
| 153 | } | ||
| 154 | 154 | ||
| 155 | { | 155 | void ArmNce::UnlockThread(Kernel::KThread* thread) { |
| 156 | // Lock our core context. | 156 | auto* thread_params = &thread->GetNativeExecutionParameters(); |
| 157 | std::scoped_lock lk{lock}; | 157 | UnlockThreadParameters(thread_params); |
| 158 | 158 | } | |
| 159 | // We should not be running. | ||
| 160 | ASSERT(running_thread == nullptr); | ||
| 161 | |||
| 162 | // Check if we need to run. If we have already been halted, we are done. | ||
| 163 | u64 halt = guest_ctx.esr_el1.exchange(0); | ||
| 164 | if (halt != 0) { | ||
| 165 | return static_cast<HaltReason>(halt); | ||
| 166 | } | ||
| 167 | |||
| 168 | // Mark that we are running. | ||
| 169 | running_thread = thread; | ||
| 170 | 159 | ||
| 171 | // Acquire the lock on the thread parameters. | 160 | HaltReason ArmNce::RunThread(Kernel::KThread* thread) { |
| 172 | // This allows us to force synchronization with SignalInterrupt. | 161 | // Check if we're already interrupted. |
| 173 | LockThreadParameters(thread_params); | 162 | // If we are, we can just return immediately. |
| 163 | HaltReason hr = static_cast<HaltReason>(m_guest_ctx.esr_el1.exchange(0)); | ||
| 164 | if (True(hr)) { | ||
| 165 | return hr; | ||
| 174 | } | 166 | } |
| 175 | 167 | ||
| 168 | // Get the thread context. | ||
| 169 | auto* thread_params = &thread->GetNativeExecutionParameters(); | ||
| 170 | auto* process = thread->GetOwnerProcess(); | ||
| 171 | |||
| 176 | // Assign current members. | 172 | // Assign current members. |
| 177 | guest_ctx.parent = this; | 173 | m_running_thread = thread; |
| 178 | thread_params->native_context = &guest_ctx; | 174 | m_guest_ctx.parent = this; |
| 179 | thread_params->tpidr_el0 = guest_ctx.tpidr_el0; | 175 | thread_params->native_context = &m_guest_ctx; |
| 180 | thread_params->tpidrro_el0 = guest_ctx.tpidrro_el0; | 176 | thread_params->tpidr_el0 = m_guest_ctx.tpidr_el0; |
| 177 | thread_params->tpidrro_el0 = m_guest_ctx.tpidrro_el0; | ||
| 181 | thread_params->is_running = true; | 178 | thread_params->is_running = true; |
| 182 | 179 | ||
| 183 | HaltReason halt{}; | ||
| 184 | |||
| 185 | // TODO: finding and creating the post handler needs to be locked | 180 | // TODO: finding and creating the post handler needs to be locked |
| 186 | // to deal with dynamic loading of NROs. | 181 | // to deal with dynamic loading of NROs. |
| 187 | const auto& post_handlers = system.ApplicationProcess()->GetPostHandlers(); | 182 | const auto& post_handlers = process->GetPostHandlers(); |
| 188 | if (auto it = post_handlers.find(guest_ctx.pc); it != post_handlers.end()) { | 183 | if (auto it = post_handlers.find(m_guest_ctx.pc); it != post_handlers.end()) { |
| 189 | halt = ReturnToRunCodeByTrampoline(thread_params, &guest_ctx, it->second); | 184 | hr = ReturnToRunCodeByTrampoline(thread_params, &m_guest_ctx, it->second); |
| 190 | } else { | 185 | } else { |
| 191 | halt = ReturnToRunCodeByExceptionLevelChange(thread_id, thread_params); | 186 | hr = ReturnToRunCodeByExceptionLevelChange(m_thread_id, thread_params); |
| 192 | } | 187 | } |
| 193 | 188 | ||
| 194 | // Unload members. | 189 | // Unload members. |
| 195 | // The thread does not change, so we can persist the old reference. | 190 | // The thread does not change, so we can persist the old reference. |
| 196 | guest_ctx.tpidr_el0 = thread_params->tpidr_el0; | 191 | m_running_thread = nullptr; |
| 192 | m_guest_ctx.tpidr_el0 = thread_params->tpidr_el0; | ||
| 197 | thread_params->native_context = nullptr; | 193 | thread_params->native_context = nullptr; |
| 198 | thread_params->is_running = false; | 194 | thread_params->is_running = false; |
| 199 | 195 | ||
| 200 | // Unlock the thread parameters. | ||
| 201 | UnlockThreadParameters(thread_params); | ||
| 202 | |||
| 203 | { | ||
| 204 | // Lock the core context. | ||
| 205 | std::scoped_lock lk{lock}; | ||
| 206 | |||
| 207 | // On exit, we no longer have an active thread. | ||
| 208 | running_thread = nullptr; | ||
| 209 | } | ||
| 210 | |||
| 211 | // Return the halt reason. | 196 | // Return the halt reason. |
| 212 | return halt; | 197 | return hr; |
| 213 | } | 198 | } |
| 214 | 199 | ||
| 215 | HaltReason ARM_NCE::StepJit() { | 200 | HaltReason ArmNce::StepThread(Kernel::KThread* thread) { |
| 216 | return HaltReason::StepThread; | 201 | return HaltReason::StepThread; |
| 217 | } | 202 | } |
| 218 | 203 | ||
| 219 | u32 ARM_NCE::GetSvcNumber() const { | 204 | u32 ArmNce::GetSvcNumber() const { |
| 220 | return guest_ctx.svc_swi; | 205 | return m_guest_ctx.svc; |
| 206 | } | ||
| 207 | |||
| 208 | void ArmNce::GetSvcArguments(std::span<uint64_t, 8> args) const { | ||
| 209 | for (size_t i = 0; i < 8; i++) { | ||
| 210 | args[i] = m_guest_ctx.cpu_registers[i]; | ||
| 211 | } | ||
| 212 | } | ||
| 213 | |||
| 214 | void ArmNce::SetSvcArguments(std::span<const uint64_t, 8> args) { | ||
| 215 | for (size_t i = 0; i < 8; i++) { | ||
| 216 | m_guest_ctx.cpu_registers[i] = args[i]; | ||
| 217 | } | ||
| 221 | } | 218 | } |
| 222 | 219 | ||
| 223 | ARM_NCE::ARM_NCE(System& system_, bool uses_wall_clock_, std::size_t core_index_) | 220 | ArmNce::ArmNce(System& system, bool uses_wall_clock, std::size_t core_index) |
| 224 | : ARM_Interface{system_, uses_wall_clock_}, core_index{core_index_} { | 221 | : ArmInterface{uses_wall_clock}, m_system{system}, m_core_index{core_index} { |
| 225 | guest_ctx.system = &system_; | 222 | m_guest_ctx.system = &m_system; |
| 226 | } | 223 | } |
| 227 | 224 | ||
| 228 | ARM_NCE::~ARM_NCE() = default; | 225 | ArmNce::~ArmNce() = default; |
| 229 | 226 | ||
| 230 | void ARM_NCE::Initialize() { | 227 | void ArmNce::Initialize() { |
| 231 | thread_id = gettid(); | 228 | m_thread_id = gettid(); |
| 232 | 229 | ||
| 233 | // Setup our signals | 230 | // Setup our signals |
| 234 | static std::once_flag flag; | 231 | static std::once_flag signals; |
| 235 | std::call_once(flag, [] { | 232 | std::call_once(signals, [] { |
| 236 | using HandlerType = decltype(sigaction::sa_sigaction); | 233 | using HandlerType = decltype(sigaction::sa_sigaction); |
| 237 | 234 | ||
| 238 | sigset_t signal_mask; | 235 | sigset_t signal_mask; |
| @@ -244,7 +241,7 @@ void ARM_NCE::Initialize() { | |||
| 244 | struct sigaction return_to_run_code_action {}; | 241 | struct sigaction return_to_run_code_action {}; |
| 245 | return_to_run_code_action.sa_flags = SA_SIGINFO | SA_ONSTACK; | 242 | return_to_run_code_action.sa_flags = SA_SIGINFO | SA_ONSTACK; |
| 246 | return_to_run_code_action.sa_sigaction = reinterpret_cast<HandlerType>( | 243 | return_to_run_code_action.sa_sigaction = reinterpret_cast<HandlerType>( |
| 247 | &ARM_NCE::ReturnToRunCodeByExceptionLevelChangeSignalHandler); | 244 | &ArmNce::ReturnToRunCodeByExceptionLevelChangeSignalHandler); |
| 248 | return_to_run_code_action.sa_mask = signal_mask; | 245 | return_to_run_code_action.sa_mask = signal_mask; |
| 249 | Common::SigAction(ReturnToRunCodeByExceptionLevelChangeSignal, &return_to_run_code_action, | 246 | Common::SigAction(ReturnToRunCodeByExceptionLevelChangeSignal, &return_to_run_code_action, |
| 250 | nullptr); | 247 | nullptr); |
| @@ -252,14 +249,13 @@ void ARM_NCE::Initialize() { | |||
| 252 | struct sigaction break_from_run_code_action {}; | 249 | struct sigaction break_from_run_code_action {}; |
| 253 | break_from_run_code_action.sa_flags = SA_SIGINFO | SA_ONSTACK; | 250 | break_from_run_code_action.sa_flags = SA_SIGINFO | SA_ONSTACK; |
| 254 | break_from_run_code_action.sa_sigaction = | 251 | break_from_run_code_action.sa_sigaction = |
| 255 | reinterpret_cast<HandlerType>(&ARM_NCE::BreakFromRunCodeSignalHandler); | 252 | reinterpret_cast<HandlerType>(&ArmNce::BreakFromRunCodeSignalHandler); |
| 256 | break_from_run_code_action.sa_mask = signal_mask; | 253 | break_from_run_code_action.sa_mask = signal_mask; |
| 257 | Common::SigAction(BreakFromRunCodeSignal, &break_from_run_code_action, nullptr); | 254 | Common::SigAction(BreakFromRunCodeSignal, &break_from_run_code_action, nullptr); |
| 258 | 255 | ||
| 259 | struct sigaction fault_action {}; | 256 | struct sigaction fault_action {}; |
| 260 | fault_action.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_RESTART; | 257 | fault_action.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_RESTART; |
| 261 | fault_action.sa_sigaction = | 258 | fault_action.sa_sigaction = reinterpret_cast<HandlerType>(&ArmNce::GuestFaultSignalHandler); |
| 262 | reinterpret_cast<HandlerType>(&ARM_NCE::GuestFaultSignalHandler); | ||
| 263 | fault_action.sa_mask = signal_mask; | 259 | fault_action.sa_mask = signal_mask; |
| 264 | Common::SigAction(GuestFaultSignal, &fault_action, &g_orig_action); | 260 | Common::SigAction(GuestFaultSignal, &fault_action, &g_orig_action); |
| 265 | 261 | ||
| @@ -272,111 +268,59 @@ void ARM_NCE::Initialize() { | |||
| 272 | }); | 268 | }); |
| 273 | } | 269 | } |
| 274 | 270 | ||
| 275 | void ARM_NCE::SetPC(u64 pc) { | 271 | void ArmNce::SetTpidrroEl0(u64 value) { |
| 276 | guest_ctx.pc = pc; | 272 | m_guest_ctx.tpidrro_el0 = value; |
| 277 | } | 273 | } |
| 278 | 274 | ||
| 279 | u64 ARM_NCE::GetPC() const { | 275 | void ArmNce::GetContext(Kernel::Svc::ThreadContext& ctx) const { |
| 280 | return guest_ctx.pc; | 276 | for (size_t i = 0; i < 29; i++) { |
| 281 | } | 277 | ctx.r[i] = m_guest_ctx.cpu_registers[i]; |
| 282 | 278 | } | |
| 283 | u64 ARM_NCE::GetSP() const { | 279 | ctx.fp = m_guest_ctx.cpu_registers[29]; |
| 284 | return guest_ctx.sp; | 280 | ctx.lr = m_guest_ctx.cpu_registers[30]; |
| 285 | } | 281 | ctx.sp = m_guest_ctx.sp; |
| 286 | 282 | ctx.pc = m_guest_ctx.pc; | |
| 287 | u64 ARM_NCE::GetReg(int index) const { | 283 | ctx.pstate = m_guest_ctx.pstate; |
| 288 | return guest_ctx.cpu_registers[index]; | 284 | ctx.v = m_guest_ctx.vector_registers; |
| 289 | } | 285 | ctx.fpcr = m_guest_ctx.fpcr; |
| 290 | 286 | ctx.fpsr = m_guest_ctx.fpsr; | |
| 291 | void ARM_NCE::SetReg(int index, u64 value) { | 287 | ctx.tpidr = m_guest_ctx.tpidr_el0; |
| 292 | guest_ctx.cpu_registers[index] = value; | ||
| 293 | } | ||
| 294 | |||
| 295 | u128 ARM_NCE::GetVectorReg(int index) const { | ||
| 296 | return guest_ctx.vector_registers[index]; | ||
| 297 | } | ||
| 298 | |||
| 299 | void ARM_NCE::SetVectorReg(int index, u128 value) { | ||
| 300 | guest_ctx.vector_registers[index] = value; | ||
| 301 | } | ||
| 302 | |||
| 303 | u32 ARM_NCE::GetPSTATE() const { | ||
| 304 | return guest_ctx.pstate; | ||
| 305 | } | ||
| 306 | |||
| 307 | void ARM_NCE::SetPSTATE(u32 pstate) { | ||
| 308 | guest_ctx.pstate = pstate; | ||
| 309 | } | ||
| 310 | |||
| 311 | u64 ARM_NCE::GetTlsAddress() const { | ||
| 312 | return guest_ctx.tpidrro_el0; | ||
| 313 | } | ||
| 314 | |||
| 315 | void ARM_NCE::SetTlsAddress(u64 address) { | ||
| 316 | guest_ctx.tpidrro_el0 = address; | ||
| 317 | } | ||
| 318 | |||
| 319 | u64 ARM_NCE::GetTPIDR_EL0() const { | ||
| 320 | return guest_ctx.tpidr_el0; | ||
| 321 | } | ||
| 322 | |||
| 323 | void ARM_NCE::SetTPIDR_EL0(u64 value) { | ||
| 324 | guest_ctx.tpidr_el0 = value; | ||
| 325 | } | ||
| 326 | |||
| 327 | void ARM_NCE::SaveContext(ThreadContext64& ctx) const { | ||
| 328 | ctx.cpu_registers = guest_ctx.cpu_registers; | ||
| 329 | ctx.sp = guest_ctx.sp; | ||
| 330 | ctx.pc = guest_ctx.pc; | ||
| 331 | ctx.pstate = guest_ctx.pstate; | ||
| 332 | ctx.vector_registers = guest_ctx.vector_registers; | ||
| 333 | ctx.fpcr = guest_ctx.fpcr; | ||
| 334 | ctx.fpsr = guest_ctx.fpsr; | ||
| 335 | ctx.tpidr = guest_ctx.tpidr_el0; | ||
| 336 | } | 288 | } |
| 337 | 289 | ||
| 338 | void ARM_NCE::LoadContext(const ThreadContext64& ctx) { | 290 | void ArmNce::SetContext(const Kernel::Svc::ThreadContext& ctx) { |
| 339 | guest_ctx.cpu_registers = ctx.cpu_registers; | 291 | for (size_t i = 0; i < 29; i++) { |
| 340 | guest_ctx.sp = ctx.sp; | 292 | m_guest_ctx.cpu_registers[i] = ctx.r[i]; |
| 341 | guest_ctx.pc = ctx.pc; | 293 | } |
| 342 | guest_ctx.pstate = ctx.pstate; | 294 | m_guest_ctx.cpu_registers[29] = ctx.fp; |
| 343 | guest_ctx.vector_registers = ctx.vector_registers; | 295 | m_guest_ctx.cpu_registers[30] = ctx.lr; |
| 344 | guest_ctx.fpcr = ctx.fpcr; | 296 | m_guest_ctx.sp = ctx.sp; |
| 345 | guest_ctx.fpsr = ctx.fpsr; | 297 | m_guest_ctx.pc = ctx.pc; |
| 346 | guest_ctx.tpidr_el0 = ctx.tpidr; | 298 | m_guest_ctx.pstate = ctx.pstate; |
| 299 | m_guest_ctx.vector_registers = ctx.v; | ||
| 300 | m_guest_ctx.fpcr = ctx.fpcr; | ||
| 301 | m_guest_ctx.fpsr = ctx.fpsr; | ||
| 302 | m_guest_ctx.tpidr_el0 = ctx.tpidr; | ||
| 347 | } | 303 | } |
| 348 | 304 | ||
| 349 | void ARM_NCE::SignalInterrupt() { | 305 | void ArmNce::SignalInterrupt(Kernel::KThread* thread) { |
| 350 | // Lock core context. | ||
| 351 | std::scoped_lock lk{lock}; | ||
| 352 | |||
| 353 | // Add break loop condition. | 306 | // Add break loop condition. |
| 354 | guest_ctx.esr_el1.fetch_or(static_cast<u64>(HaltReason::BreakLoop)); | 307 | m_guest_ctx.esr_el1.fetch_or(static_cast<u64>(HaltReason::BreakLoop)); |
| 355 | |||
| 356 | // If there is no thread running, we are done. | ||
| 357 | if (running_thread == nullptr) { | ||
| 358 | return; | ||
| 359 | } | ||
| 360 | 308 | ||
| 361 | // Lock the thread context. | 309 | // Lock the thread context. |
| 362 | auto* params = &running_thread->GetNativeExecutionParameters(); | 310 | auto* params = &thread->GetNativeExecutionParameters(); |
| 363 | LockThreadParameters(params); | 311 | LockThreadParameters(params); |
| 364 | 312 | ||
| 365 | if (params->is_running) { | 313 | if (params->is_running) { |
| 366 | // We should signal to the running thread. | 314 | // We should signal to the running thread. |
| 367 | // The running thread will unlock the thread context. | 315 | // The running thread will unlock the thread context. |
| 368 | syscall(SYS_tkill, thread_id, BreakFromRunCodeSignal); | 316 | syscall(SYS_tkill, m_thread_id, BreakFromRunCodeSignal); |
| 369 | } else { | 317 | } else { |
| 370 | // If the thread is no longer running, we have nothing to do. | 318 | // If the thread is no longer running, we have nothing to do. |
| 371 | UnlockThreadParameters(params); | 319 | UnlockThreadParameters(params); |
| 372 | } | 320 | } |
| 373 | } | 321 | } |
| 374 | 322 | ||
| 375 | void ARM_NCE::ClearInterrupt() { | 323 | void ArmNce::ClearInstructionCache() { |
| 376 | guest_ctx.esr_el1 = {}; | ||
| 377 | } | ||
| 378 | |||
| 379 | void ARM_NCE::ClearInstructionCache() { | ||
| 380 | // TODO: This is not possible to implement correctly on Linux because | 324 | // TODO: This is not possible to implement correctly on Linux because |
| 381 | // we do not have any access to ic iallu. | 325 | // we do not have any access to ic iallu. |
| 382 | 326 | ||
| @@ -384,17 +328,8 @@ void ARM_NCE::ClearInstructionCache() { | |||
| 384 | std::atomic_thread_fence(std::memory_order_seq_cst); | 328 | std::atomic_thread_fence(std::memory_order_seq_cst); |
| 385 | } | 329 | } |
| 386 | 330 | ||
| 387 | void ARM_NCE::InvalidateCacheRange(u64 addr, std::size_t size) { | 331 | void ArmNce::InvalidateCacheRange(u64 addr, std::size_t size) { |
| 388 | this->ClearInstructionCache(); | 332 | this->ClearInstructionCache(); |
| 389 | } | 333 | } |
| 390 | 334 | ||
| 391 | void ARM_NCE::ClearExclusiveState() { | ||
| 392 | // No-op. | ||
| 393 | } | ||
| 394 | |||
| 395 | void ARM_NCE::PageTableChanged(Common::PageTable& page_table, | ||
| 396 | std::size_t new_address_space_size_in_bits) { | ||
| 397 | // No-op. Page table is never used. | ||
| 398 | } | ||
| 399 | |||
| 400 | } // namespace Core | 335 | } // namespace Core |
diff --git a/src/core/arm/nce/arm_nce.h b/src/core/arm/nce/arm_nce.h index 5fbd6dbf3..f55c10d1d 100644 --- a/src/core/arm/nce/arm_nce.h +++ b/src/core/arm/nce/arm_nce.h | |||
| @@ -3,11 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <atomic> | 6 | #include <mutex> |
| 7 | #include <memory> | ||
| 8 | #include <span> | ||
| 9 | #include <unordered_map> | ||
| 10 | #include <vector> | ||
| 11 | 7 | ||
| 12 | #include "core/arm/arm_interface.h" | 8 | #include "core/arm/arm_interface.h" |
| 13 | #include "core/arm/nce/guest_context.h" | 9 | #include "core/arm/nce/guest_context.h" |
| @@ -20,51 +16,36 @@ namespace Core { | |||
| 20 | 16 | ||
| 21 | class System; | 17 | class System; |
| 22 | 18 | ||
| 23 | class ARM_NCE final : public ARM_Interface { | 19 | class ArmNce final : public ArmInterface { |
| 24 | public: | 20 | public: |
| 25 | ARM_NCE(System& system_, bool uses_wall_clock_, std::size_t core_index_); | 21 | ArmNce(System& system, bool uses_wall_clock, std::size_t core_index); |
| 26 | 22 | ~ArmNce() override; | |
| 27 | ~ARM_NCE() override; | ||
| 28 | 23 | ||
| 29 | void Initialize() override; | 24 | void Initialize() override; |
| 30 | void SetPC(u64 pc) override; | ||
| 31 | u64 GetPC() const override; | ||
| 32 | u64 GetSP() const override; | ||
| 33 | u64 GetReg(int index) const override; | ||
| 34 | void SetReg(int index, u64 value) override; | ||
| 35 | u128 GetVectorReg(int index) const override; | ||
| 36 | void SetVectorReg(int index, u128 value) override; | ||
| 37 | |||
| 38 | u32 GetPSTATE() const override; | ||
| 39 | void SetPSTATE(u32 pstate) override; | ||
| 40 | u64 GetTlsAddress() const override; | ||
| 41 | void SetTlsAddress(u64 address) override; | ||
| 42 | void SetTPIDR_EL0(u64 value) override; | ||
| 43 | u64 GetTPIDR_EL0() const override; | ||
| 44 | 25 | ||
| 45 | Architecture GetArchitecture() const override { | 26 | Architecture GetArchitecture() const override { |
| 46 | return Architecture::Aarch64; | 27 | return Architecture::AArch64; |
| 47 | } | 28 | } |
| 48 | 29 | ||
| 49 | void SaveContext(ThreadContext32& ctx) const override {} | 30 | HaltReason RunThread(Kernel::KThread* thread) override; |
| 50 | void SaveContext(ThreadContext64& ctx) const override; | 31 | HaltReason StepThread(Kernel::KThread* thread) override; |
| 51 | void LoadContext(const ThreadContext32& ctx) override {} | 32 | |
| 52 | void LoadContext(const ThreadContext64& ctx) override; | 33 | void GetContext(Kernel::Svc::ThreadContext& ctx) const override; |
| 34 | void SetContext(const Kernel::Svc::ThreadContext& ctx) override; | ||
| 35 | void SetTpidrroEl0(u64 value) override; | ||
| 53 | 36 | ||
| 54 | void SignalInterrupt() override; | 37 | void GetSvcArguments(std::span<uint64_t, 8> args) const override; |
| 55 | void ClearInterrupt() override; | 38 | void SetSvcArguments(std::span<const uint64_t, 8> args) override; |
| 56 | void ClearExclusiveState() override; | 39 | u32 GetSvcNumber() const override; |
| 40 | |||
| 41 | void SignalInterrupt(Kernel::KThread* thread) override; | ||
| 57 | void ClearInstructionCache() override; | 42 | void ClearInstructionCache() override; |
| 58 | void InvalidateCacheRange(u64 addr, std::size_t size) override; | 43 | void InvalidateCacheRange(u64 addr, std::size_t size) override; |
| 59 | void PageTableChanged(Common::PageTable& new_page_table, | ||
| 60 | std::size_t new_address_space_size_in_bits) override; | ||
| 61 | |||
| 62 | protected: | ||
| 63 | HaltReason RunJit() override; | ||
| 64 | HaltReason StepJit() override; | ||
| 65 | 44 | ||
| 66 | u32 GetSvcNumber() const override; | 45 | void LockThread(Kernel::KThread* thread) override; |
| 46 | void UnlockThread(Kernel::KThread* thread) override; | ||
| 67 | 47 | ||
| 48 | protected: | ||
| 68 | const Kernel::DebugWatchpoint* HaltedWatchpoint() const override { | 49 | const Kernel::DebugWatchpoint* HaltedWatchpoint() const override { |
| 69 | return nullptr; | 50 | return nullptr; |
| 70 | } | 51 | } |
| @@ -93,16 +74,15 @@ private: | |||
| 93 | static void HandleHostFault(int sig, void* info, void* raw_context); | 74 | static void HandleHostFault(int sig, void* info, void* raw_context); |
| 94 | 75 | ||
| 95 | public: | 76 | public: |
| 77 | Core::System& m_system; | ||
| 78 | |||
| 96 | // Members set on initialization. | 79 | // Members set on initialization. |
| 97 | std::size_t core_index{}; | 80 | std::size_t m_core_index{}; |
| 98 | pid_t thread_id{-1}; | 81 | pid_t m_thread_id{-1}; |
| 99 | 82 | ||
| 100 | // Core context. | 83 | // Core context. |
| 101 | GuestContext guest_ctx; | 84 | GuestContext m_guest_ctx{}; |
| 102 | 85 | Kernel::KThread* m_running_thread{}; | |
| 103 | // Thread and invalidation info. | ||
| 104 | std::mutex lock; | ||
| 105 | Kernel::KThread* running_thread{}; | ||
| 106 | }; | 86 | }; |
| 107 | 87 | ||
| 108 | } // namespace Core | 88 | } // namespace Core |
diff --git a/src/core/arm/nce/arm_nce.s b/src/core/arm/nce/arm_nce.s index b98e09f31..4aeda4740 100644 --- a/src/core/arm/nce/arm_nce.s +++ b/src/core/arm/nce/arm_nce.s | |||
| @@ -8,11 +8,11 @@ | |||
| 8 | movk reg, #(((val) >> 0x10) & 0xFFFF), lsl #16 | 8 | movk reg, #(((val) >> 0x10) & 0xFFFF), lsl #16 |
| 9 | 9 | ||
| 10 | 10 | ||
| 11 | /* static HaltReason Core::ARM_NCE::ReturnToRunCodeByTrampoline(void* tpidr, Core::GuestContext* ctx, u64 trampoline_addr) */ | 11 | /* static HaltReason Core::ArmNce::ReturnToRunCodeByTrampoline(void* tpidr, Core::GuestContext* ctx, u64 trampoline_addr) */ |
| 12 | .section .text._ZN4Core7ARM_NCE27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm, "ax", %progbits | 12 | .section .text._ZN4Core6ArmNce27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm, "ax", %progbits |
| 13 | .global _ZN4Core7ARM_NCE27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm | 13 | .global _ZN4Core6ArmNce27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm |
| 14 | .type _ZN4Core7ARM_NCE27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm, %function | 14 | .type _ZN4Core6ArmNce27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm, %function |
| 15 | _ZN4Core7ARM_NCE27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm: | 15 | _ZN4Core6ArmNce27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm: |
| 16 | /* Back up host sp to x3. */ | 16 | /* Back up host sp to x3. */ |
| 17 | /* Back up host tpidr_el0 to x4. */ | 17 | /* Back up host tpidr_el0 to x4. */ |
| 18 | mov x3, sp | 18 | mov x3, sp |
| @@ -49,11 +49,11 @@ _ZN4Core7ARM_NCE27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm: | |||
| 49 | br x2 | 49 | br x2 |
| 50 | 50 | ||
| 51 | 51 | ||
| 52 | /* static HaltReason Core::ARM_NCE::ReturnToRunCodeByExceptionLevelChange(int tid, void* tpidr) */ | 52 | /* static HaltReason Core::ArmNce::ReturnToRunCodeByExceptionLevelChange(int tid, void* tpidr) */ |
| 53 | .section .text._ZN4Core7ARM_NCE37ReturnToRunCodeByExceptionLevelChangeEiPv, "ax", %progbits | 53 | .section .text._ZN4Core6ArmNce37ReturnToRunCodeByExceptionLevelChangeEiPv, "ax", %progbits |
| 54 | .global _ZN4Core7ARM_NCE37ReturnToRunCodeByExceptionLevelChangeEiPv | 54 | .global _ZN4Core6ArmNce37ReturnToRunCodeByExceptionLevelChangeEiPv |
| 55 | .type _ZN4Core7ARM_NCE37ReturnToRunCodeByExceptionLevelChangeEiPv, %function | 55 | .type _ZN4Core6ArmNce37ReturnToRunCodeByExceptionLevelChangeEiPv, %function |
| 56 | _ZN4Core7ARM_NCE37ReturnToRunCodeByExceptionLevelChangeEiPv: | 56 | _ZN4Core6ArmNce37ReturnToRunCodeByExceptionLevelChangeEiPv: |
| 57 | /* This jumps to the signal handler, which will restore the entire context. */ | 57 | /* This jumps to the signal handler, which will restore the entire context. */ |
| 58 | /* On entry, x0 = thread id, which is already in the right place. */ | 58 | /* On entry, x0 = thread id, which is already in the right place. */ |
| 59 | 59 | ||
| @@ -71,17 +71,17 @@ _ZN4Core7ARM_NCE37ReturnToRunCodeByExceptionLevelChangeEiPv: | |||
| 71 | brk #1000 | 71 | brk #1000 |
| 72 | 72 | ||
| 73 | 73 | ||
| 74 | /* static void Core::ARM_NCE::ReturnToRunCodeByExceptionLevelChangeSignalHandler(int sig, void* info, void* raw_context) */ | 74 | /* static void Core::ArmNce::ReturnToRunCodeByExceptionLevelChangeSignalHandler(int sig, void* info, void* raw_context) */ |
| 75 | .section .text._ZN4Core7ARM_NCE50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_, "ax", %progbits | 75 | .section .text._ZN4Core6ArmNce50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_, "ax", %progbits |
| 76 | .global _ZN4Core7ARM_NCE50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_ | 76 | .global _ZN4Core6ArmNce50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_ |
| 77 | .type _ZN4Core7ARM_NCE50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_, %function | 77 | .type _ZN4Core6ArmNce50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_, %function |
| 78 | _ZN4Core7ARM_NCE50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_: | 78 | _ZN4Core6ArmNce50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_: |
| 79 | stp x29, x30, [sp, #-0x10]! | 79 | stp x29, x30, [sp, #-0x10]! |
| 80 | mov x29, sp | 80 | mov x29, sp |
| 81 | 81 | ||
| 82 | /* Call the context restorer with the raw context. */ | 82 | /* Call the context restorer with the raw context. */ |
| 83 | mov x0, x2 | 83 | mov x0, x2 |
| 84 | bl _ZN4Core7ARM_NCE19RestoreGuestContextEPv | 84 | bl _ZN4Core6ArmNce19RestoreGuestContextEPv |
| 85 | 85 | ||
| 86 | /* Save the old value of tpidr_el0. */ | 86 | /* Save the old value of tpidr_el0. */ |
| 87 | mrs x8, tpidr_el0 | 87 | mrs x8, tpidr_el0 |
| @@ -92,18 +92,18 @@ _ZN4Core7ARM_NCE50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_: | |||
| 92 | msr tpidr_el0, x0 | 92 | msr tpidr_el0, x0 |
| 93 | 93 | ||
| 94 | /* Unlock the context. */ | 94 | /* Unlock the context. */ |
| 95 | bl _ZN4Core7ARM_NCE22UnlockThreadParametersEPv | 95 | bl _ZN4Core6ArmNce22UnlockThreadParametersEPv |
| 96 | 96 | ||
| 97 | /* Returning from here will enter the guest. */ | 97 | /* Returning from here will enter the guest. */ |
| 98 | ldp x29, x30, [sp], #0x10 | 98 | ldp x29, x30, [sp], #0x10 |
| 99 | ret | 99 | ret |
| 100 | 100 | ||
| 101 | 101 | ||
| 102 | /* static void Core::ARM_NCE::BreakFromRunCodeSignalHandler(int sig, void* info, void* raw_context) */ | 102 | /* static void Core::ArmNce::BreakFromRunCodeSignalHandler(int sig, void* info, void* raw_context) */ |
| 103 | .section .text._ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_, "ax", %progbits | 103 | .section .text._ZN4Core6ArmNce29BreakFromRunCodeSignalHandlerEiPvS1_, "ax", %progbits |
| 104 | .global _ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_ | 104 | .global _ZN4Core6ArmNce29BreakFromRunCodeSignalHandlerEiPvS1_ |
| 105 | .type _ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_, %function | 105 | .type _ZN4Core6ArmNce29BreakFromRunCodeSignalHandlerEiPvS1_, %function |
| 106 | _ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_: | 106 | _ZN4Core6ArmNce29BreakFromRunCodeSignalHandlerEiPvS1_: |
| 107 | /* Check to see if we have the correct TLS magic. */ | 107 | /* Check to see if we have the correct TLS magic. */ |
| 108 | mrs x8, tpidr_el0 | 108 | mrs x8, tpidr_el0 |
| 109 | ldr w9, [x8, #(TpidrEl0TlsMagic)] | 109 | ldr w9, [x8, #(TpidrEl0TlsMagic)] |
| @@ -121,7 +121,7 @@ _ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_: | |||
| 121 | 121 | ||
| 122 | /* Tail call the restorer. */ | 122 | /* Tail call the restorer. */ |
| 123 | mov x1, x2 | 123 | mov x1, x2 |
| 124 | b _ZN4Core7ARM_NCE16SaveGuestContextEPNS_12GuestContextEPv | 124 | b _ZN4Core6ArmNce16SaveGuestContextEPNS_12GuestContextEPv |
| 125 | 125 | ||
| 126 | /* Returning from here will enter host code. */ | 126 | /* Returning from here will enter host code. */ |
| 127 | 127 | ||
| @@ -130,11 +130,11 @@ _ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_: | |||
| 130 | ret | 130 | ret |
| 131 | 131 | ||
| 132 | 132 | ||
| 133 | /* static void Core::ARM_NCE::GuestFaultSignalHandler(int sig, void* info, void* raw_context) */ | 133 | /* static void Core::ArmNce::GuestFaultSignalHandler(int sig, void* info, void* raw_context) */ |
| 134 | .section .text._ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_, "ax", %progbits | 134 | .section .text._ZN4Core6ArmNce23GuestFaultSignalHandlerEiPvS1_, "ax", %progbits |
| 135 | .global _ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_ | 135 | .global _ZN4Core6ArmNce23GuestFaultSignalHandlerEiPvS1_ |
| 136 | .type _ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_, %function | 136 | .type _ZN4Core6ArmNce23GuestFaultSignalHandlerEiPvS1_, %function |
| 137 | _ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_: | 137 | _ZN4Core6ArmNce23GuestFaultSignalHandlerEiPvS1_: |
| 138 | /* Check to see if we have the correct TLS magic. */ | 138 | /* Check to see if we have the correct TLS magic. */ |
| 139 | mrs x8, tpidr_el0 | 139 | mrs x8, tpidr_el0 |
| 140 | ldr w9, [x8, #(TpidrEl0TlsMagic)] | 140 | ldr w9, [x8, #(TpidrEl0TlsMagic)] |
| @@ -146,7 +146,7 @@ _ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_: | |||
| 146 | 146 | ||
| 147 | /* Incorrect TLS magic, so this is a host fault. */ | 147 | /* Incorrect TLS magic, so this is a host fault. */ |
| 148 | /* Tail call the handler. */ | 148 | /* Tail call the handler. */ |
| 149 | b _ZN4Core7ARM_NCE15HandleHostFaultEiPvS1_ | 149 | b _ZN4Core6ArmNce15HandleHostFaultEiPvS1_ |
| 150 | 150 | ||
| 151 | 1: | 151 | 1: |
| 152 | /* Correct TLS magic, so this is a guest fault. */ | 152 | /* Correct TLS magic, so this is a guest fault. */ |
| @@ -163,7 +163,7 @@ _ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_: | |||
| 163 | msr tpidr_el0, x3 | 163 | msr tpidr_el0, x3 |
| 164 | 164 | ||
| 165 | /* Call the handler. */ | 165 | /* Call the handler. */ |
| 166 | bl _ZN4Core7ARM_NCE16HandleGuestFaultEPNS_12GuestContextEPvS3_ | 166 | bl _ZN4Core6ArmNce16HandleGuestFaultEPNS_12GuestContextEPvS3_ |
| 167 | 167 | ||
| 168 | /* If the handler returned false, we want to preserve the host tpidr_el0. */ | 168 | /* If the handler returned false, we want to preserve the host tpidr_el0. */ |
| 169 | cbz x0, 2f | 169 | cbz x0, 2f |
| @@ -177,11 +177,11 @@ _ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_: | |||
| 177 | ret | 177 | ret |
| 178 | 178 | ||
| 179 | 179 | ||
| 180 | /* static void Core::ARM_NCE::LockThreadParameters(void* tpidr) */ | 180 | /* static void Core::ArmNce::LockThreadParameters(void* tpidr) */ |
| 181 | .section .text._ZN4Core7ARM_NCE20LockThreadParametersEPv, "ax", %progbits | 181 | .section .text._ZN4Core6ArmNce20LockThreadParametersEPv, "ax", %progbits |
| 182 | .global _ZN4Core7ARM_NCE20LockThreadParametersEPv | 182 | .global _ZN4Core6ArmNce20LockThreadParametersEPv |
| 183 | .type _ZN4Core7ARM_NCE20LockThreadParametersEPv, %function | 183 | .type _ZN4Core6ArmNce20LockThreadParametersEPv, %function |
| 184 | _ZN4Core7ARM_NCE20LockThreadParametersEPv: | 184 | _ZN4Core6ArmNce20LockThreadParametersEPv: |
| 185 | /* Offset to lock member. */ | 185 | /* Offset to lock member. */ |
| 186 | add x0, x0, #(TpidrEl0Lock) | 186 | add x0, x0, #(TpidrEl0Lock) |
| 187 | 187 | ||
| @@ -205,11 +205,11 @@ _ZN4Core7ARM_NCE20LockThreadParametersEPv: | |||
| 205 | ret | 205 | ret |
| 206 | 206 | ||
| 207 | 207 | ||
| 208 | /* static void Core::ARM_NCE::UnlockThreadParameters(void* tpidr) */ | 208 | /* static void Core::ArmNce::UnlockThreadParameters(void* tpidr) */ |
| 209 | .section .text._ZN4Core7ARM_NCE22UnlockThreadParametersEPv, "ax", %progbits | 209 | .section .text._ZN4Core6ArmNce22UnlockThreadParametersEPv, "ax", %progbits |
| 210 | .global _ZN4Core7ARM_NCE22UnlockThreadParametersEPv | 210 | .global _ZN4Core6ArmNce22UnlockThreadParametersEPv |
| 211 | .type _ZN4Core7ARM_NCE22UnlockThreadParametersEPv, %function | 211 | .type _ZN4Core6ArmNce22UnlockThreadParametersEPv, %function |
| 212 | _ZN4Core7ARM_NCE22UnlockThreadParametersEPv: | 212 | _ZN4Core6ArmNce22UnlockThreadParametersEPv: |
| 213 | /* Offset to lock member. */ | 213 | /* Offset to lock member. */ |
| 214 | add x0, x0, #(TpidrEl0Lock) | 214 | add x0, x0, #(TpidrEl0Lock) |
| 215 | 215 | ||
diff --git a/src/core/arm/nce/guest_context.h b/src/core/arm/nce/guest_context.h index 0767a0337..a7eadccce 100644 --- a/src/core/arm/nce/guest_context.h +++ b/src/core/arm/nce/guest_context.h | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <atomic> | ||
| 7 | |||
| 6 | #include "common/common_funcs.h" | 8 | #include "common/common_funcs.h" |
| 7 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 8 | #include "core/arm/arm_interface.h" | 10 | #include "core/arm/arm_interface.h" |
| @@ -10,7 +12,7 @@ | |||
| 10 | 12 | ||
| 11 | namespace Core { | 13 | namespace Core { |
| 12 | 14 | ||
| 13 | class ARM_NCE; | 15 | class ArmNce; |
| 14 | class System; | 16 | class System; |
| 15 | 17 | ||
| 16 | struct HostContext { | 18 | struct HostContext { |
| @@ -33,9 +35,9 @@ struct GuestContext { | |||
| 33 | u64 tpidr_el0{}; | 35 | u64 tpidr_el0{}; |
| 34 | std::atomic<u64> esr_el1{}; | 36 | std::atomic<u64> esr_el1{}; |
| 35 | u32 nzcv{}; | 37 | u32 nzcv{}; |
| 36 | u32 svc_swi{}; | 38 | u32 svc{}; |
| 37 | System* system{}; | 39 | System* system{}; |
| 38 | ARM_NCE* parent{}; | 40 | ArmNce* parent{}; |
| 39 | }; | 41 | }; |
| 40 | 42 | ||
| 41 | // Verify assembly offsets. | 43 | // Verify assembly offsets. |
diff --git a/src/core/arm/nce/patcher.cpp b/src/core/arm/nce/patcher.cpp index bdaa3af49..47a7a8880 100644 --- a/src/core/arm/nce/patcher.cpp +++ b/src/core/arm/nce/patcher.cpp | |||
| @@ -280,7 +280,7 @@ void Patcher::WriteSvcTrampoline(ModuleDestLabel module_dest, u32 svc_id) { | |||
| 280 | 280 | ||
| 281 | // Store SVC number to execute when we return | 281 | // Store SVC number to execute when we return |
| 282 | c.MOV(X2, svc_id); | 282 | c.MOV(X2, svc_id); |
| 283 | c.STR(W2, X1, offsetof(GuestContext, svc_swi)); | 283 | c.STR(W2, X1, offsetof(GuestContext, svc)); |
| 284 | 284 | ||
| 285 | // We are calling a SVC. Clear esr_el1 and return it. | 285 | // We are calling a SVC. Clear esr_el1 and return it. |
| 286 | static_assert(std::is_same_v<std::underlying_type_t<HaltReason>, u64>); | 286 | static_assert(std::is_same_v<std::underlying_type_t<HaltReason>, u64>); |