summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp6
-rw-r--r--src/core/hle/kernel/k_auto_object.h2
-rw-r--r--src/core/hle/kernel/k_process.h2
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h3
-rw-r--r--src/core/hle/kernel/k_thread.cpp4
-rw-r--r--src/core/hle/kernel/k_thread.h3
-rw-r--r--src/core/hle/kernel/kernel.cpp8
7 files changed, 15 insertions, 13 deletions
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index 8027bec00..7765e7848 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -148,9 +148,9 @@ u64 GenerateUniformRange(u64 min, u64 max, F f) {
148} // Anonymous namespace 148} // Anonymous namespace
149 149
150u64 KSystemControl::GenerateRandomU64() { 150u64 KSystemControl::GenerateRandomU64() {
151 static std::random_device device; 151 std::random_device device;
152 static std::mt19937 gen(device()); 152 std::mt19937 gen(device());
153 static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max()); 153 std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
154 return distribution(gen); 154 return distribution(gen);
155} 155}
156 156
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index 05779f2d5..abdb8ae7c 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -163,7 +163,7 @@ public:
163 do { 163 do {
164 ASSERT(cur_ref_count > 0); 164 ASSERT(cur_ref_count > 0);
165 } while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, 165 } while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1,
166 std::memory_order_relaxed)); 166 std::memory_order_acq_rel));
167 167
168 // If ref count hits zero, destroy the object. 168 // If ref count hits zero, destroy the object.
169 if (cur_ref_count - 1 == 0) { 169 if (cur_ref_count - 1 == 0) {
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 48b17fc74..9f171e3da 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -422,7 +422,7 @@ private:
422 bool is_64bit_process = true; 422 bool is_64bit_process = true;
423 423
424 /// Total running time for the process in ticks. 424 /// Total running time for the process in ticks.
425 u64 total_process_running_time_ticks = 0; 425 std::atomic<u64> total_process_running_time_ticks = 0;
426 426
427 /// Per-process handle table for storing created object handles in. 427 /// Per-process handle table for storing created object handles in.
428 KHandleTable handle_table; 428 KHandleTable handle_table;
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 93c47f1b1..016e0a818 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -4,6 +4,7 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <atomic>
7#include "common/assert.h" 8#include "common/assert.h"
8#include "core/hle/kernel/k_spin_lock.h" 9#include "core/hle/kernel/k_spin_lock.h"
9#include "core/hle/kernel/k_thread.h" 10#include "core/hle/kernel/k_thread.h"
@@ -75,7 +76,7 @@ private:
75 KernelCore& kernel; 76 KernelCore& kernel;
76 KAlignedSpinLock spin_lock{}; 77 KAlignedSpinLock spin_lock{};
77 s32 lock_count{}; 78 s32 lock_count{};
78 KThread* owner_thread{}; 79 std::atomic<KThread*> owner_thread{};
79}; 80};
80 81
81} // namespace Kernel 82} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 94c8faf68..d3bb1c871 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -723,7 +723,7 @@ void KThread::UpdateState() {
723 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 723 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
724 724
725 // Set our suspend flags in state. 725 // Set our suspend flags in state.
726 const auto old_state = thread_state; 726 const ThreadState old_state = thread_state;
727 const auto new_state = 727 const auto new_state =
728 static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask); 728 static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask);
729 thread_state = new_state; 729 thread_state = new_state;
@@ -738,7 +738,7 @@ void KThread::Continue() {
738 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 738 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
739 739
740 // Clear our suspend flags in state. 740 // Clear our suspend flags in state.
741 const auto old_state = thread_state; 741 const ThreadState old_state = thread_state;
742 thread_state = old_state & ThreadState::Mask; 742 thread_state = old_state & ThreadState::Mask;
743 743
744 // Note the state change in scheduler. 744 // Note the state change in scheduler.
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index f46db7298..d0fd85130 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -5,6 +5,7 @@
5#pragma once 5#pragma once
6 6
7#include <array> 7#include <array>
8#include <atomic>
8#include <span> 9#include <span>
9#include <string> 10#include <string>
10#include <utility> 11#include <utility>
@@ -751,7 +752,7 @@ private:
751 KAffinityMask original_physical_affinity_mask{}; 752 KAffinityMask original_physical_affinity_mask{};
752 s32 original_physical_ideal_core_id{}; 753 s32 original_physical_ideal_core_id{};
753 s32 num_core_migration_disables{}; 754 s32 num_core_migration_disables{};
754 ThreadState thread_state{}; 755 std::atomic<ThreadState> thread_state{};
755 std::atomic<bool> termination_requested{}; 756 std::atomic<bool> termination_requested{};
756 bool wait_cancelled{}; 757 bool wait_cancelled{};
757 bool cancellable{}; 758 bool cancellable{};
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 134a0b8e9..481a0d7cb 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -85,7 +85,7 @@ struct KernelCore::Impl {
85 85
86 void InitializeCores() { 86 void InitializeCores() {
87 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 87 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
88 cores[core_id].Initialize(current_process->Is64BitProcess()); 88 cores[core_id].Initialize((*current_process).Is64BitProcess());
89 system.Memory().SetCurrentPageTable(*current_process, core_id); 89 system.Memory().SetCurrentPageTable(*current_process, core_id);
90 } 90 }
91 } 91 }
@@ -168,11 +168,11 @@ struct KernelCore::Impl {
168 168
169 // Shutdown all processes. 169 // Shutdown all processes.
170 if (current_process) { 170 if (current_process) {
171 current_process->Finalize(); 171 (*current_process).Finalize();
172 // current_process->Close(); 172 // current_process->Close();
173 // TODO: The current process should be destroyed based on accurate ref counting after 173 // TODO: The current process should be destroyed based on accurate ref counting after
174 // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak. 174 // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
175 current_process->Destroy(); 175 (*current_process).Destroy();
176 current_process = nullptr; 176 current_process = nullptr;
177 } 177 }
178 178
@@ -704,7 +704,7 @@ struct KernelCore::Impl {
704 704
705 // Lists all processes that exist in the current session. 705 // Lists all processes that exist in the current session.
706 std::vector<KProcess*> process_list; 706 std::vector<KProcess*> process_list;
707 KProcess* current_process{}; 707 std::atomic<KProcess*> current_process{};
708 std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context; 708 std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
709 Kernel::TimeManager time_manager; 709 Kernel::TimeManager time_manager;
710 710