diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/core.cpp | 4 | ||||
| -rw-r--r-- | src/core/debugger/gdbstub.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_process.cpp | 268 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_process.h | 186 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc/svc_info.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc/svc_process.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/service/am/am.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/service/glue/arp.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/service/pm/pm.cpp | 20 | ||||
| -rw-r--r-- | src/core/memory/cheat_engine.cpp | 2 |
10 files changed, 254 insertions, 240 deletions
diff --git a/src/core/core.cpp b/src/core/core.cpp index bd2082fd6..d2b597068 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp | |||
| @@ -434,7 +434,7 @@ struct System::Impl { | |||
| 434 | } | 434 | } |
| 435 | 435 | ||
| 436 | Service::Glue::ApplicationLaunchProperty launch{}; | 436 | Service::Glue::ApplicationLaunchProperty launch{}; |
| 437 | launch.title_id = process.GetProgramID(); | 437 | launch.title_id = process.GetProgramId(); |
| 438 | 438 | ||
| 439 | FileSys::PatchManager pm{launch.title_id, fs_controller, *content_provider}; | 439 | FileSys::PatchManager pm{launch.title_id, fs_controller, *content_provider}; |
| 440 | launch.version = pm.GetGameVersion().value_or(0); | 440 | launch.version = pm.GetGameVersion().value_or(0); |
| @@ -762,7 +762,7 @@ const Core::SpeedLimiter& System::SpeedLimiter() const { | |||
| 762 | } | 762 | } |
| 763 | 763 | ||
| 764 | u64 System::GetApplicationProcessProgramID() const { | 764 | u64 System::GetApplicationProcessProgramID() const { |
| 765 | return impl->kernel.ApplicationProcess()->GetProgramID(); | 765 | return impl->kernel.ApplicationProcess()->GetProgramId(); |
| 766 | } | 766 | } |
| 767 | 767 | ||
| 768 | Loader::ResultStatus System::GetGameName(std::string& out) const { | 768 | Loader::ResultStatus System::GetGameName(std::string& out) const { |
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp index f39f2ca29..b2fe6bd7d 100644 --- a/src/core/debugger/gdbstub.cpp +++ b/src/core/debugger/gdbstub.cpp | |||
| @@ -756,7 +756,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { | |||
| 756 | 756 | ||
| 757 | reply = fmt::format("Process: {:#x} ({})\n" | 757 | reply = fmt::format("Process: {:#x} ({})\n" |
| 758 | "Program Id: {:#018x}\n", | 758 | "Program Id: {:#018x}\n", |
| 759 | process->GetProcessID(), process->GetName(), process->GetProgramID()); | 759 | process->GetProcessId(), process->GetName(), process->GetProgramId()); |
| 760 | reply += | 760 | reply += |
| 761 | fmt::format("Layout:\n" | 761 | fmt::format("Layout:\n" |
| 762 | " Alias: {:#012x} - {:#012x}\n" | 762 | " Alias: {:#012x} - {:#012x}\n" |
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index fa3fc8c1c..46ac3833e 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -71,32 +71,32 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string | |||
| 71 | auto& kernel = system.Kernel(); | 71 | auto& kernel = system.Kernel(); |
| 72 | 72 | ||
| 73 | process->name = std::move(process_name); | 73 | process->name = std::move(process_name); |
| 74 | process->resource_limit = res_limit; | 74 | process->m_resource_limit = res_limit; |
| 75 | process->system_resource_address = 0; | 75 | process->m_system_resource_address = 0; |
| 76 | process->state = State::Created; | 76 | process->m_state = State::Created; |
| 77 | process->program_id = 0; | 77 | process->m_program_id = 0; |
| 78 | process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() | 78 | process->m_process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() |
| 79 | : kernel.CreateNewUserProcessID(); | 79 | : kernel.CreateNewUserProcessID(); |
| 80 | process->capabilities.InitializeForMetadatalessProcess(); | 80 | process->m_capabilities.InitializeForMetadatalessProcess(); |
| 81 | process->is_initialized = true; | 81 | process->m_is_initialized = true; |
| 82 | 82 | ||
| 83 | std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr))); | 83 | std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr))); |
| 84 | std::uniform_int_distribution<u64> distribution; | 84 | std::uniform_int_distribution<u64> distribution; |
| 85 | std::generate(process->random_entropy.begin(), process->random_entropy.end(), | 85 | std::generate(process->m_random_entropy.begin(), process->m_random_entropy.end(), |
| 86 | [&] { return distribution(rng); }); | 86 | [&] { return distribution(rng); }); |
| 87 | 87 | ||
| 88 | kernel.AppendNewProcess(process); | 88 | kernel.AppendNewProcess(process); |
| 89 | 89 | ||
| 90 | // Clear remaining fields. | 90 | // Clear remaining fields. |
| 91 | process->num_running_threads = 0; | 91 | process->m_num_running_threads = 0; |
| 92 | process->is_signaled = false; | 92 | process->m_is_signaled = false; |
| 93 | process->exception_thread = nullptr; | 93 | process->m_exception_thread = nullptr; |
| 94 | process->is_suspended = false; | 94 | process->m_is_suspended = false; |
| 95 | process->schedule_count = 0; | 95 | process->m_schedule_count = 0; |
| 96 | process->is_handle_table_initialized = false; | 96 | process->m_is_handle_table_initialized = false; |
| 97 | 97 | ||
| 98 | // Open a reference to the resource limit. | 98 | // Open a reference to the resource limit. |
| 99 | process->resource_limit->Open(); | 99 | process->m_resource_limit->Open(); |
| 100 | 100 | ||
| 101 | R_SUCCEED(); | 101 | R_SUCCEED(); |
| 102 | } | 102 | } |
| @@ -106,34 +106,34 @@ void KProcess::DoWorkerTaskImpl() { | |||
| 106 | } | 106 | } |
| 107 | 107 | ||
| 108 | KResourceLimit* KProcess::GetResourceLimit() const { | 108 | KResourceLimit* KProcess::GetResourceLimit() const { |
| 109 | return resource_limit; | 109 | return m_resource_limit; |
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | void KProcess::IncrementRunningThreadCount() { | 112 | void KProcess::IncrementRunningThreadCount() { |
| 113 | ASSERT(num_running_threads.load() >= 0); | 113 | ASSERT(m_num_running_threads.load() >= 0); |
| 114 | ++num_running_threads; | 114 | ++m_num_running_threads; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | void KProcess::DecrementRunningThreadCount() { | 117 | void KProcess::DecrementRunningThreadCount() { |
| 118 | ASSERT(num_running_threads.load() > 0); | 118 | ASSERT(m_num_running_threads.load() > 0); |
| 119 | 119 | ||
| 120 | if (const auto prev = num_running_threads--; prev == 1) { | 120 | if (const auto prev = m_num_running_threads--; prev == 1) { |
| 121 | // TODO(bunnei): Process termination to be implemented when multiprocess is supported. | 121 | // TODO(bunnei): Process termination to be implemented when multiprocess is supported. |
| 122 | } | 122 | } |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | u64 KProcess::GetTotalPhysicalMemoryAvailable() { | 125 | u64 KProcess::GetTotalPhysicalMemoryAvailable() { |
| 126 | const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) + | 126 | const u64 capacity{m_resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) + |
| 127 | page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size + | 127 | m_page_table.GetNormalMemorySize() + GetSystemResourceSize() + m_image_size + |
| 128 | main_thread_stack_size}; | 128 | m_main_thread_stack_size}; |
| 129 | if (const auto pool_size = m_kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); | 129 | if (const auto pool_size = m_kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); |
| 130 | capacity != pool_size) { | 130 | capacity != pool_size) { |
| 131 | LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size); | 131 | LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size); |
| 132 | } | 132 | } |
| 133 | if (capacity < memory_usage_capacity) { | 133 | if (capacity < m_memory_usage_capacity) { |
| 134 | return capacity; | 134 | return capacity; |
| 135 | } | 135 | } |
| 136 | return memory_usage_capacity; | 136 | return m_memory_usage_capacity; |
| 137 | } | 137 | } |
| 138 | 138 | ||
| 139 | u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() { | 139 | u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() { |
| @@ -141,7 +141,7 @@ u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() { | |||
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | u64 KProcess::GetTotalPhysicalMemoryUsed() { | 143 | u64 KProcess::GetTotalPhysicalMemoryUsed() { |
| 144 | return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() + | 144 | return m_image_size + m_main_thread_stack_size + m_page_table.GetNormalMemorySize() + |
| 145 | GetSystemResourceSize(); | 145 | GetSystemResourceSize(); |
| 146 | } | 146 | } |
| 147 | 147 | ||
| @@ -152,14 +152,14 @@ u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() { | |||
| 152 | bool KProcess::ReleaseUserException(KThread* thread) { | 152 | bool KProcess::ReleaseUserException(KThread* thread) { |
| 153 | KScopedSchedulerLock sl{m_kernel}; | 153 | KScopedSchedulerLock sl{m_kernel}; |
| 154 | 154 | ||
| 155 | if (exception_thread == thread) { | 155 | if (m_exception_thread == thread) { |
| 156 | exception_thread = nullptr; | 156 | m_exception_thread = nullptr; |
| 157 | 157 | ||
| 158 | // Remove waiter thread. | 158 | // Remove waiter thread. |
| 159 | bool has_waiters{}; | 159 | bool has_waiters{}; |
| 160 | if (KThread* next = thread->RemoveKernelWaiterByKey( | 160 | if (KThread* next = thread->RemoveKernelWaiterByKey( |
| 161 | std::addressof(has_waiters), | 161 | std::addressof(has_waiters), |
| 162 | reinterpret_cast<uintptr_t>(std::addressof(exception_thread))); | 162 | reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread))); |
| 163 | next != nullptr) { | 163 | next != nullptr) { |
| 164 | next->EndWait(ResultSuccess); | 164 | next->EndWait(ResultSuccess); |
| 165 | } | 165 | } |
| @@ -173,7 +173,7 @@ bool KProcess::ReleaseUserException(KThread* thread) { | |||
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | void KProcess::PinCurrentThread(s32 core_id) { | 175 | void KProcess::PinCurrentThread(s32 core_id) { |
| 176 | ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); | 176 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 177 | 177 | ||
| 178 | // Get the current thread. | 178 | // Get the current thread. |
| 179 | KThread* cur_thread = | 179 | KThread* cur_thread = |
| @@ -191,7 +191,7 @@ void KProcess::PinCurrentThread(s32 core_id) { | |||
| 191 | } | 191 | } |
| 192 | 192 | ||
| 193 | void KProcess::UnpinCurrentThread(s32 core_id) { | 193 | void KProcess::UnpinCurrentThread(s32 core_id) { |
| 194 | ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); | 194 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 195 | 195 | ||
| 196 | // Get the current thread. | 196 | // Get the current thread. |
| 197 | KThread* cur_thread = | 197 | KThread* cur_thread = |
| @@ -206,7 +206,7 @@ void KProcess::UnpinCurrentThread(s32 core_id) { | |||
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | void KProcess::UnpinThread(KThread* thread) { | 208 | void KProcess::UnpinThread(KThread* thread) { |
| 209 | ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); | 209 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 210 | 210 | ||
| 211 | // Get the thread's core id. | 211 | // Get the thread's core id. |
| 212 | const auto core_id = thread->GetActiveCore(); | 212 | const auto core_id = thread->GetActiveCore(); |
| @@ -222,14 +222,14 @@ void KProcess::UnpinThread(KThread* thread) { | |||
| 222 | Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, | 222 | Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, |
| 223 | [[maybe_unused]] size_t size) { | 223 | [[maybe_unused]] size_t size) { |
| 224 | // Lock ourselves, to prevent concurrent access. | 224 | // Lock ourselves, to prevent concurrent access. |
| 225 | KScopedLightLock lk(state_lock); | 225 | KScopedLightLock lk(m_state_lock); |
| 226 | 226 | ||
| 227 | // Try to find an existing info for the memory. | 227 | // Try to find an existing info for the memory. |
| 228 | KSharedMemoryInfo* shemen_info = nullptr; | 228 | KSharedMemoryInfo* shemen_info = nullptr; |
| 229 | const auto iter = std::find_if( | 229 | const auto iter = std::find_if( |
| 230 | shared_memory_list.begin(), shared_memory_list.end(), | 230 | m_shared_memory_list.begin(), m_shared_memory_list.end(), |
| 231 | [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; }); | 231 | [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; }); |
| 232 | if (iter != shared_memory_list.end()) { | 232 | if (iter != m_shared_memory_list.end()) { |
| 233 | shemen_info = *iter; | 233 | shemen_info = *iter; |
| 234 | } | 234 | } |
| 235 | 235 | ||
| @@ -238,7 +238,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad | |||
| 238 | R_UNLESS(shemen_info != nullptr, ResultOutOfMemory); | 238 | R_UNLESS(shemen_info != nullptr, ResultOutOfMemory); |
| 239 | 239 | ||
| 240 | shemen_info->Initialize(shmem); | 240 | shemen_info->Initialize(shmem); |
| 241 | shared_memory_list.push_back(shemen_info); | 241 | m_shared_memory_list.push_back(shemen_info); |
| 242 | } | 242 | } |
| 243 | 243 | ||
| 244 | // Open a reference to the shared memory and its info. | 244 | // Open a reference to the shared memory and its info. |
| @@ -251,20 +251,20 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad | |||
| 251 | void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, | 251 | void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, |
| 252 | [[maybe_unused]] size_t size) { | 252 | [[maybe_unused]] size_t size) { |
| 253 | // Lock ourselves, to prevent concurrent access. | 253 | // Lock ourselves, to prevent concurrent access. |
| 254 | KScopedLightLock lk(state_lock); | 254 | KScopedLightLock lk(m_state_lock); |
| 255 | 255 | ||
| 256 | KSharedMemoryInfo* shemen_info = nullptr; | 256 | KSharedMemoryInfo* shemen_info = nullptr; |
| 257 | const auto iter = std::find_if( | 257 | const auto iter = std::find_if( |
| 258 | shared_memory_list.begin(), shared_memory_list.end(), | 258 | m_shared_memory_list.begin(), m_shared_memory_list.end(), |
| 259 | [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; }); | 259 | [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; }); |
| 260 | if (iter != shared_memory_list.end()) { | 260 | if (iter != m_shared_memory_list.end()) { |
| 261 | shemen_info = *iter; | 261 | shemen_info = *iter; |
| 262 | } | 262 | } |
| 263 | 263 | ||
| 264 | ASSERT(shemen_info != nullptr); | 264 | ASSERT(shemen_info != nullptr); |
| 265 | 265 | ||
| 266 | if (shemen_info->Close()) { | 266 | if (shemen_info->Close()) { |
| 267 | shared_memory_list.erase(iter); | 267 | m_shared_memory_list.erase(iter); |
| 268 | KSharedMemoryInfo::Free(m_kernel, shemen_info); | 268 | KSharedMemoryInfo::Free(m_kernel, shemen_info); |
| 269 | } | 269 | } |
| 270 | 270 | ||
| @@ -273,22 +273,22 @@ void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr a | |||
| 273 | } | 273 | } |
| 274 | 274 | ||
| 275 | void KProcess::RegisterThread(KThread* thread) { | 275 | void KProcess::RegisterThread(KThread* thread) { |
| 276 | KScopedLightLock lk{list_lock}; | 276 | KScopedLightLock lk{m_list_lock}; |
| 277 | 277 | ||
| 278 | thread_list.push_back(thread); | 278 | m_thread_list.push_back(thread); |
| 279 | } | 279 | } |
| 280 | 280 | ||
| 281 | void KProcess::UnregisterThread(KThread* thread) { | 281 | void KProcess::UnregisterThread(KThread* thread) { |
| 282 | KScopedLightLock lk{list_lock}; | 282 | KScopedLightLock lk{m_list_lock}; |
| 283 | 283 | ||
| 284 | thread_list.remove(thread); | 284 | m_thread_list.remove(thread); |
| 285 | } | 285 | } |
| 286 | 286 | ||
| 287 | u64 KProcess::GetFreeThreadCount() const { | 287 | u64 KProcess::GetFreeThreadCount() const { |
| 288 | if (resource_limit != nullptr) { | 288 | if (m_resource_limit != nullptr) { |
| 289 | const auto current_value = | 289 | const auto current_value = |
| 290 | resource_limit->GetCurrentValue(LimitableResource::ThreadCountMax); | 290 | m_resource_limit->GetCurrentValue(LimitableResource::ThreadCountMax); |
| 291 | const auto limit_value = resource_limit->GetLimitValue(LimitableResource::ThreadCountMax); | 291 | const auto limit_value = m_resource_limit->GetLimitValue(LimitableResource::ThreadCountMax); |
| 292 | return limit_value - current_value; | 292 | return limit_value - current_value; |
| 293 | } else { | 293 | } else { |
| 294 | return 0; | 294 | return 0; |
| @@ -297,35 +297,35 @@ u64 KProcess::GetFreeThreadCount() const { | |||
| 297 | 297 | ||
| 298 | Result KProcess::Reset() { | 298 | Result KProcess::Reset() { |
| 299 | // Lock the process and the scheduler. | 299 | // Lock the process and the scheduler. |
| 300 | KScopedLightLock lk(state_lock); | 300 | KScopedLightLock lk(m_state_lock); |
| 301 | KScopedSchedulerLock sl{m_kernel}; | 301 | KScopedSchedulerLock sl{m_kernel}; |
| 302 | 302 | ||
| 303 | // Validate that we're in a state that we can reset. | 303 | // Validate that we're in a state that we can reset. |
| 304 | R_UNLESS(state != State::Terminated, ResultInvalidState); | 304 | R_UNLESS(m_state != State::Terminated, ResultInvalidState); |
| 305 | R_UNLESS(is_signaled, ResultInvalidState); | 305 | R_UNLESS(m_is_signaled, ResultInvalidState); |
| 306 | 306 | ||
| 307 | // Clear signaled. | 307 | // Clear signaled. |
| 308 | is_signaled = false; | 308 | m_is_signaled = false; |
| 309 | R_SUCCEED(); | 309 | R_SUCCEED(); |
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | Result KProcess::SetActivity(ProcessActivity activity) { | 312 | Result KProcess::SetActivity(ProcessActivity activity) { |
| 313 | // Lock ourselves and the scheduler. | 313 | // Lock ourselves and the scheduler. |
| 314 | KScopedLightLock lk{state_lock}; | 314 | KScopedLightLock lk{m_state_lock}; |
| 315 | KScopedLightLock list_lk{list_lock}; | 315 | KScopedLightLock list_lk{m_list_lock}; |
| 316 | KScopedSchedulerLock sl{m_kernel}; | 316 | KScopedSchedulerLock sl{m_kernel}; |
| 317 | 317 | ||
| 318 | // Validate our state. | 318 | // Validate our state. |
| 319 | R_UNLESS(state != State::Terminating, ResultInvalidState); | 319 | R_UNLESS(m_state != State::Terminating, ResultInvalidState); |
| 320 | R_UNLESS(state != State::Terminated, ResultInvalidState); | 320 | R_UNLESS(m_state != State::Terminated, ResultInvalidState); |
| 321 | 321 | ||
| 322 | // Either pause or resume. | 322 | // Either pause or resume. |
| 323 | if (activity == ProcessActivity::Paused) { | 323 | if (activity == ProcessActivity::Paused) { |
| 324 | // Verify that we're not suspended. | 324 | // Verify that we're not suspended. |
| 325 | R_UNLESS(!is_suspended, ResultInvalidState); | 325 | R_UNLESS(!m_is_suspended, ResultInvalidState); |
| 326 | 326 | ||
| 327 | // Suspend all threads. | 327 | // Suspend all threads. |
| 328 | for (auto* thread : GetThreadList()) { | 328 | for (auto* thread : this->GetThreadList()) { |
| 329 | thread->RequestSuspend(SuspendType::Process); | 329 | thread->RequestSuspend(SuspendType::Process); |
| 330 | } | 330 | } |
| 331 | 331 | ||
| @@ -335,10 +335,10 @@ Result KProcess::SetActivity(ProcessActivity activity) { | |||
| 335 | ASSERT(activity == ProcessActivity::Runnable); | 335 | ASSERT(activity == ProcessActivity::Runnable); |
| 336 | 336 | ||
| 337 | // Verify that we're suspended. | 337 | // Verify that we're suspended. |
| 338 | R_UNLESS(is_suspended, ResultInvalidState); | 338 | R_UNLESS(m_is_suspended, ResultInvalidState); |
| 339 | 339 | ||
| 340 | // Resume all threads. | 340 | // Resume all threads. |
| 341 | for (auto* thread : GetThreadList()) { | 341 | for (auto* thread : this->GetThreadList()) { |
| 342 | thread->Resume(SuspendType::Process); | 342 | thread->Resume(SuspendType::Process); |
| 343 | } | 343 | } |
| 344 | 344 | ||
| @@ -350,31 +350,32 @@ Result KProcess::SetActivity(ProcessActivity activity) { | |||
| 350 | } | 350 | } |
| 351 | 351 | ||
| 352 | Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) { | 352 | Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) { |
| 353 | program_id = metadata.GetTitleID(); | 353 | m_program_id = metadata.GetTitleID(); |
| 354 | ideal_core = metadata.GetMainThreadCore(); | 354 | m_ideal_core = metadata.GetMainThreadCore(); |
| 355 | is_64bit_process = metadata.Is64BitProgram(); | 355 | m_is_64bit_process = metadata.Is64BitProgram(); |
| 356 | system_resource_size = metadata.GetSystemResourceSize(); | 356 | m_system_resource_size = metadata.GetSystemResourceSize(); |
| 357 | image_size = code_size; | 357 | m_image_size = code_size; |
| 358 | 358 | ||
| 359 | KScopedResourceReservation memory_reservation( | 359 | KScopedResourceReservation memory_reservation( |
| 360 | resource_limit, LimitableResource::PhysicalMemoryMax, code_size + system_resource_size); | 360 | m_resource_limit, LimitableResource::PhysicalMemoryMax, code_size + m_system_resource_size); |
| 361 | if (!memory_reservation.Succeeded()) { | 361 | if (!memory_reservation.Succeeded()) { |
| 362 | LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", | 362 | LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", |
| 363 | code_size + system_resource_size); | 363 | code_size + m_system_resource_size); |
| 364 | R_RETURN(ResultLimitReached); | 364 | R_RETURN(ResultLimitReached); |
| 365 | } | 365 | } |
| 366 | // Initialize process address space | 366 | // Initialize process address space |
| 367 | if (const Result result{page_table.InitializeForProcess( | 367 | if (const Result result{m_page_table.InitializeForProcess( |
| 368 | metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application, | 368 | metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application, |
| 369 | 0x8000000, code_size, std::addressof(m_kernel.GetAppSystemResource()), resource_limit)}; | 369 | 0x8000000, code_size, std::addressof(m_kernel.GetAppSystemResource()), |
| 370 | m_resource_limit)}; | ||
| 370 | result.IsError()) { | 371 | result.IsError()) { |
| 371 | R_RETURN(result); | 372 | R_RETURN(result); |
| 372 | } | 373 | } |
| 373 | 374 | ||
| 374 | // Map process code region | 375 | // Map process code region |
| 375 | if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(), | 376 | if (const Result result{m_page_table.MapProcessCode(m_page_table.GetCodeRegionStart(), |
| 376 | code_size / PageSize, KMemoryState::Code, | 377 | code_size / PageSize, KMemoryState::Code, |
| 377 | KMemoryPermission::None)}; | 378 | KMemoryPermission::None)}; |
| 378 | result.IsError()) { | 379 | result.IsError()) { |
| 379 | R_RETURN(result); | 380 | R_RETURN(result); |
| 380 | } | 381 | } |
| @@ -382,7 +383,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | |||
| 382 | // Initialize process capabilities | 383 | // Initialize process capabilities |
| 383 | const auto& caps{metadata.GetKernelCapabilities()}; | 384 | const auto& caps{metadata.GetKernelCapabilities()}; |
| 384 | if (const Result result{ | 385 | if (const Result result{ |
| 385 | capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)}; | 386 | m_capabilities.InitializeForUserProcess(caps.data(), caps.size(), m_page_table)}; |
| 386 | result.IsError()) { | 387 | result.IsError()) { |
| 387 | R_RETURN(result); | 388 | R_RETURN(result); |
| 388 | } | 389 | } |
| @@ -392,12 +393,14 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | |||
| 392 | case FileSys::ProgramAddressSpaceType::Is32Bit: | 393 | case FileSys::ProgramAddressSpaceType::Is32Bit: |
| 393 | case FileSys::ProgramAddressSpaceType::Is36Bit: | 394 | case FileSys::ProgramAddressSpaceType::Is36Bit: |
| 394 | case FileSys::ProgramAddressSpaceType::Is39Bit: | 395 | case FileSys::ProgramAddressSpaceType::Is39Bit: |
| 395 | memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart(); | 396 | m_memory_usage_capacity = |
| 397 | m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart(); | ||
| 396 | break; | 398 | break; |
| 397 | 399 | ||
| 398 | case FileSys::ProgramAddressSpaceType::Is32BitNoMap: | 400 | case FileSys::ProgramAddressSpaceType::Is32BitNoMap: |
| 399 | memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() + | 401 | m_memory_usage_capacity = |
| 400 | page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart(); | 402 | m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart() + |
| 403 | m_page_table.GetAliasRegionEnd() - m_page_table.GetAliasRegionStart(); | ||
| 401 | break; | 404 | break; |
| 402 | 405 | ||
| 403 | default: | 406 | default: |
| @@ -406,26 +409,27 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | |||
| 406 | } | 409 | } |
| 407 | 410 | ||
| 408 | // Create TLS region | 411 | // Create TLS region |
| 409 | R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address))); | 412 | R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address))); |
| 410 | memory_reservation.Commit(); | 413 | memory_reservation.Commit(); |
| 411 | 414 | ||
| 412 | R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize())); | 415 | R_RETURN(m_handle_table.Initialize(m_capabilities.GetHandleTableSize())); |
| 413 | } | 416 | } |
| 414 | 417 | ||
| 415 | void KProcess::Run(s32 main_thread_priority, u64 stack_size) { | 418 | void KProcess::Run(s32 main_thread_priority, u64 stack_size) { |
| 416 | ASSERT(AllocateMainThreadStack(stack_size) == ResultSuccess); | 419 | ASSERT(this->AllocateMainThreadStack(stack_size) == ResultSuccess); |
| 417 | resource_limit->Reserve(LimitableResource::ThreadCountMax, 1); | 420 | m_resource_limit->Reserve(LimitableResource::ThreadCountMax, 1); |
| 418 | 421 | ||
| 419 | const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; | 422 | const std::size_t heap_capacity{m_memory_usage_capacity - |
| 420 | ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); | 423 | (m_main_thread_stack_size + m_image_size)}; |
| 424 | ASSERT(!m_page_table.SetMaxHeapSize(heap_capacity).IsError()); | ||
| 421 | 425 | ||
| 422 | ChangeState(State::Running); | 426 | this->ChangeState(State::Running); |
| 423 | 427 | ||
| 424 | SetupMainThread(m_kernel.System(), *this, main_thread_priority, main_thread_stack_top); | 428 | SetupMainThread(m_kernel.System(), *this, main_thread_priority, m_main_thread_stack_top); |
| 425 | } | 429 | } |
| 426 | 430 | ||
| 427 | void KProcess::PrepareForTermination() { | 431 | void KProcess::PrepareForTermination() { |
| 428 | ChangeState(State::Terminating); | 432 | this->ChangeState(State::Terminating); |
| 429 | 433 | ||
| 430 | const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) { | 434 | const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) { |
| 431 | for (auto* thread : in_thread_list) { | 435 | for (auto* thread : in_thread_list) { |
| @@ -445,12 +449,12 @@ void KProcess::PrepareForTermination() { | |||
| 445 | 449 | ||
| 446 | stop_threads(m_kernel.System().GlobalSchedulerContext().GetThreadList()); | 450 | stop_threads(m_kernel.System().GlobalSchedulerContext().GetThreadList()); |
| 447 | 451 | ||
| 448 | this->DeleteThreadLocalRegion(plr_address); | 452 | this->DeleteThreadLocalRegion(m_plr_address); |
| 449 | plr_address = 0; | 453 | m_plr_address = 0; |
| 450 | 454 | ||
| 451 | if (resource_limit) { | 455 | if (m_resource_limit) { |
| 452 | resource_limit->Release(LimitableResource::PhysicalMemoryMax, | 456 | m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, |
| 453 | main_thread_stack_size + image_size); | 457 | m_main_thread_stack_size + m_image_size); |
| 454 | } | 458 | } |
| 455 | 459 | ||
| 456 | ChangeState(State::Terminated); | 460 | ChangeState(State::Terminated); |
| @@ -459,8 +463,8 @@ void KProcess::PrepareForTermination() { | |||
| 459 | void KProcess::Finalize() { | 463 | void KProcess::Finalize() { |
| 460 | // Free all shared memory infos. | 464 | // Free all shared memory infos. |
| 461 | { | 465 | { |
| 462 | auto it = shared_memory_list.begin(); | 466 | auto it = m_shared_memory_list.begin(); |
| 463 | while (it != shared_memory_list.end()) { | 467 | while (it != m_shared_memory_list.end()) { |
| 464 | KSharedMemoryInfo* info = *it; | 468 | KSharedMemoryInfo* info = *it; |
| 465 | KSharedMemory* shmem = info->GetSharedMemory(); | 469 | KSharedMemory* shmem = info->GetSharedMemory(); |
| 466 | 470 | ||
| @@ -470,19 +474,19 @@ void KProcess::Finalize() { | |||
| 470 | 474 | ||
| 471 | shmem->Close(); | 475 | shmem->Close(); |
| 472 | 476 | ||
| 473 | it = shared_memory_list.erase(it); | 477 | it = m_shared_memory_list.erase(it); |
| 474 | KSharedMemoryInfo::Free(m_kernel, info); | 478 | KSharedMemoryInfo::Free(m_kernel, info); |
| 475 | } | 479 | } |
| 476 | } | 480 | } |
| 477 | 481 | ||
| 478 | // Release memory to the resource limit. | 482 | // Release memory to the resource limit. |
| 479 | if (resource_limit != nullptr) { | 483 | if (m_resource_limit != nullptr) { |
| 480 | resource_limit->Close(); | 484 | m_resource_limit->Close(); |
| 481 | resource_limit = nullptr; | 485 | m_resource_limit = nullptr; |
| 482 | } | 486 | } |
| 483 | 487 | ||
| 484 | // Finalize the page table. | 488 | // Finalize the page table. |
| 485 | page_table.Finalize(); | 489 | m_page_table.Finalize(); |
| 486 | 490 | ||
| 487 | // Perform inherited finalization. | 491 | // Perform inherited finalization. |
| 488 | KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize(); | 492 | KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize(); |
| @@ -496,14 +500,14 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { | |||
| 496 | { | 500 | { |
| 497 | KScopedSchedulerLock sl{m_kernel}; | 501 | KScopedSchedulerLock sl{m_kernel}; |
| 498 | 502 | ||
| 499 | if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) { | 503 | if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) { |
| 500 | tlr = it->Reserve(); | 504 | tlr = it->Reserve(); |
| 501 | ASSERT(tlr != 0); | 505 | ASSERT(tlr != 0); |
| 502 | 506 | ||
| 503 | if (it->IsAllUsed()) { | 507 | if (it->IsAllUsed()) { |
| 504 | tlp = std::addressof(*it); | 508 | tlp = std::addressof(*it); |
| 505 | partially_used_tlp_tree.erase(it); | 509 | m_partially_used_tlp_tree.erase(it); |
| 506 | fully_used_tlp_tree.insert(*tlp); | 510 | m_fully_used_tlp_tree.insert(*tlp); |
| 507 | } | 511 | } |
| 508 | 512 | ||
| 509 | *out = tlr; | 513 | *out = tlr; |
| @@ -527,9 +531,9 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { | |||
| 527 | { | 531 | { |
| 528 | KScopedSchedulerLock sl{m_kernel}; | 532 | KScopedSchedulerLock sl{m_kernel}; |
| 529 | if (tlp->IsAllUsed()) { | 533 | if (tlp->IsAllUsed()) { |
| 530 | fully_used_tlp_tree.insert(*tlp); | 534 | m_fully_used_tlp_tree.insert(*tlp); |
| 531 | } else { | 535 | } else { |
| 532 | partially_used_tlp_tree.insert(*tlp); | 536 | m_partially_used_tlp_tree.insert(*tlp); |
| 533 | } | 537 | } |
| 534 | } | 538 | } |
| 535 | 539 | ||
| @@ -547,22 +551,22 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) { | |||
| 547 | KScopedSchedulerLock sl{m_kernel}; | 551 | KScopedSchedulerLock sl{m_kernel}; |
| 548 | 552 | ||
| 549 | // Try to find the page in the partially used list. | 553 | // Try to find the page in the partially used list. |
| 550 | auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); | 554 | auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); |
| 551 | if (it == partially_used_tlp_tree.end()) { | 555 | if (it == m_partially_used_tlp_tree.end()) { |
| 552 | // If we don't find it, it has to be in the fully used list. | 556 | // If we don't find it, it has to be in the fully used list. |
| 553 | it = fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); | 557 | it = m_fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); |
| 554 | R_UNLESS(it != fully_used_tlp_tree.end(), ResultInvalidAddress); | 558 | R_UNLESS(it != m_fully_used_tlp_tree.end(), ResultInvalidAddress); |
| 555 | 559 | ||
| 556 | // Release the region. | 560 | // Release the region. |
| 557 | it->Release(addr); | 561 | it->Release(addr); |
| 558 | 562 | ||
| 559 | // Move the page out of the fully used list. | 563 | // Move the page out of the fully used list. |
| 560 | KThreadLocalPage* tlp = std::addressof(*it); | 564 | KThreadLocalPage* tlp = std::addressof(*it); |
| 561 | fully_used_tlp_tree.erase(it); | 565 | m_fully_used_tlp_tree.erase(it); |
| 562 | if (tlp->IsAllFree()) { | 566 | if (tlp->IsAllFree()) { |
| 563 | page_to_free = tlp; | 567 | page_to_free = tlp; |
| 564 | } else { | 568 | } else { |
| 565 | partially_used_tlp_tree.insert(*tlp); | 569 | m_partially_used_tlp_tree.insert(*tlp); |
| 566 | } | 570 | } |
| 567 | } else { | 571 | } else { |
| 568 | // Release the region. | 572 | // Release the region. |
| @@ -571,7 +575,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) { | |||
| 571 | // Handle the all-free case. | 575 | // Handle the all-free case. |
| 572 | KThreadLocalPage* tlp = std::addressof(*it); | 576 | KThreadLocalPage* tlp = std::addressof(*it); |
| 573 | if (tlp->IsAllFree()) { | 577 | if (tlp->IsAllFree()) { |
| 574 | partially_used_tlp_tree.erase(it); | 578 | m_partially_used_tlp_tree.erase(it); |
| 575 | page_to_free = tlp; | 579 | page_to_free = tlp; |
| 576 | } | 580 | } |
| 577 | } | 581 | } |
| @@ -589,11 +593,11 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) { | |||
| 589 | 593 | ||
| 590 | bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, | 594 | bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, |
| 591 | DebugWatchpointType type) { | 595 | DebugWatchpointType type) { |
| 592 | const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) { | 596 | const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) { |
| 593 | return wp.type == DebugWatchpointType::None; | 597 | return wp.type == DebugWatchpointType::None; |
| 594 | })}; | 598 | })}; |
| 595 | 599 | ||
| 596 | if (watch == watchpoints.end()) { | 600 | if (watch == m_watchpoints.end()) { |
| 597 | return false; | 601 | return false; |
| 598 | } | 602 | } |
| 599 | 603 | ||
| @@ -602,7 +606,7 @@ bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, | |||
| 602 | watch->type = type; | 606 | watch->type = type; |
| 603 | 607 | ||
| 604 | for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) { | 608 | for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) { |
| 605 | debug_page_refcounts[page]++; | 609 | m_debug_page_refcounts[page]++; |
| 606 | system.Memory().MarkRegionDebug(page, PageSize, true); | 610 | system.Memory().MarkRegionDebug(page, PageSize, true); |
| 607 | } | 611 | } |
| 608 | 612 | ||
| @@ -611,11 +615,11 @@ bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, | |||
| 611 | 615 | ||
| 612 | bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, | 616 | bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, |
| 613 | DebugWatchpointType type) { | 617 | DebugWatchpointType type) { |
| 614 | const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) { | 618 | const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) { |
| 615 | return wp.start_address == addr && wp.end_address == addr + size && wp.type == type; | 619 | return wp.start_address == addr && wp.end_address == addr + size && wp.type == type; |
| 616 | })}; | 620 | })}; |
| 617 | 621 | ||
| 618 | if (watch == watchpoints.end()) { | 622 | if (watch == m_watchpoints.end()) { |
| 619 | return false; | 623 | return false; |
| 620 | } | 624 | } |
| 621 | 625 | ||
| @@ -624,8 +628,8 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, | |||
| 624 | watch->type = DebugWatchpointType::None; | 628 | watch->type = DebugWatchpointType::None; |
| 625 | 629 | ||
| 626 | for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) { | 630 | for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) { |
| 627 | debug_page_refcounts[page]--; | 631 | m_debug_page_refcounts[page]--; |
| 628 | if (!debug_page_refcounts[page]) { | 632 | if (!m_debug_page_refcounts[page]) { |
| 629 | system.Memory().MarkRegionDebug(page, PageSize, false); | 633 | system.Memory().MarkRegionDebug(page, PageSize, false); |
| 630 | } | 634 | } |
| 631 | } | 635 | } |
| @@ -636,7 +640,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, | |||
| 636 | void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { | 640 | void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { |
| 637 | const auto ReprotectSegment = [&](const CodeSet::Segment& segment, | 641 | const auto ReprotectSegment = [&](const CodeSet::Segment& segment, |
| 638 | Svc::MemoryPermission permission) { | 642 | Svc::MemoryPermission permission) { |
| 639 | page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); | 643 | m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); |
| 640 | }; | 644 | }; |
| 641 | 645 | ||
| 642 | m_kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), | 646 | m_kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), |
| @@ -648,35 +652,35 @@ void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { | |||
| 648 | } | 652 | } |
| 649 | 653 | ||
| 650 | bool KProcess::IsSignaled() const { | 654 | bool KProcess::IsSignaled() const { |
| 651 | ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); | 655 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 652 | return is_signaled; | 656 | return m_is_signaled; |
| 653 | } | 657 | } |
| 654 | 658 | ||
| 655 | KProcess::KProcess(KernelCore& kernel) | 659 | KProcess::KProcess(KernelCore& kernel) |
| 656 | : KAutoObjectWithSlabHeapAndContainer{kernel}, page_table{m_kernel.System()}, | 660 | : KAutoObjectWithSlabHeapAndContainer{kernel}, m_page_table{m_kernel.System()}, |
| 657 | handle_table{m_kernel}, address_arbiter{m_kernel.System()}, condition_var{m_kernel.System()}, | 661 | m_handle_table{m_kernel}, m_address_arbiter{m_kernel.System()}, |
| 658 | state_lock{m_kernel}, list_lock{m_kernel} {} | 662 | m_condition_var{m_kernel.System()}, m_state_lock{m_kernel}, m_list_lock{m_kernel} {} |
| 659 | 663 | ||
| 660 | KProcess::~KProcess() = default; | 664 | KProcess::~KProcess() = default; |
| 661 | 665 | ||
| 662 | void KProcess::ChangeState(State new_state) { | 666 | void KProcess::ChangeState(State new_state) { |
| 663 | if (state == new_state) { | 667 | if (m_state == new_state) { |
| 664 | return; | 668 | return; |
| 665 | } | 669 | } |
| 666 | 670 | ||
| 667 | state = new_state; | 671 | m_state = new_state; |
| 668 | is_signaled = true; | 672 | m_is_signaled = true; |
| 669 | NotifyAvailable(); | 673 | this->NotifyAvailable(); |
| 670 | } | 674 | } |
| 671 | 675 | ||
| 672 | Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { | 676 | Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { |
| 673 | // Ensure that we haven't already allocated stack. | 677 | // Ensure that we haven't already allocated stack. |
| 674 | ASSERT(main_thread_stack_size == 0); | 678 | ASSERT(m_main_thread_stack_size == 0); |
| 675 | 679 | ||
| 676 | // Ensure that we're allocating a valid stack. | 680 | // Ensure that we're allocating a valid stack. |
| 677 | stack_size = Common::AlignUp(stack_size, PageSize); | 681 | stack_size = Common::AlignUp(stack_size, PageSize); |
| 678 | // R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory); | 682 | // R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory); |
| 679 | R_UNLESS(stack_size + image_size >= image_size, ResultOutOfMemory); | 683 | R_UNLESS(stack_size + m_image_size >= m_image_size, ResultOutOfMemory); |
| 680 | 684 | ||
| 681 | // Place a tentative reservation of memory for our new stack. | 685 | // Place a tentative reservation of memory for our new stack. |
| 682 | KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax, | 686 | KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax, |
| @@ -686,11 +690,11 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { | |||
| 686 | // Allocate and map our stack. | 690 | // Allocate and map our stack. |
| 687 | if (stack_size) { | 691 | if (stack_size) { |
| 688 | KProcessAddress stack_bottom; | 692 | KProcessAddress stack_bottom; |
| 689 | R_TRY(page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, | 693 | R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, |
| 690 | KMemoryState::Stack, KMemoryPermission::UserReadWrite)); | 694 | KMemoryState::Stack, KMemoryPermission::UserReadWrite)); |
| 691 | 695 | ||
| 692 | main_thread_stack_top = stack_bottom + stack_size; | 696 | m_main_thread_stack_top = stack_bottom + stack_size; |
| 693 | main_thread_stack_size = stack_size; | 697 | m_main_thread_stack_size = stack_size; |
| 694 | } | 698 | } |
| 695 | 699 | ||
| 696 | // We succeeded! Commit our memory reservation. | 700 | // We succeeded! Commit our memory reservation. |
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index a19d9b09d..7b7a971b8 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h | |||
| @@ -107,66 +107,76 @@ public: | |||
| 107 | 107 | ||
| 108 | /// Gets a reference to the process' page table. | 108 | /// Gets a reference to the process' page table. |
| 109 | KPageTable& PageTable() { | 109 | KPageTable& PageTable() { |
| 110 | return page_table; | 110 | return m_page_table; |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | /// Gets const a reference to the process' page table. | 113 | /// Gets const a reference to the process' page table. |
| 114 | const KPageTable& PageTable() const { | 114 | const KPageTable& PageTable() const { |
| 115 | return page_table; | 115 | return m_page_table; |
| 116 | } | ||
| 117 | |||
| 118 | /// Gets a reference to the process' page table. | ||
| 119 | KPageTable& GetPageTable() { | ||
| 120 | return m_page_table; | ||
| 121 | } | ||
| 122 | |||
| 123 | /// Gets const a reference to the process' page table. | ||
| 124 | const KPageTable& GetPageTable() const { | ||
| 125 | return m_page_table; | ||
| 116 | } | 126 | } |
| 117 | 127 | ||
| 118 | /// Gets a reference to the process' handle table. | 128 | /// Gets a reference to the process' handle table. |
| 119 | KHandleTable& GetHandleTable() { | 129 | KHandleTable& GetHandleTable() { |
| 120 | return handle_table; | 130 | return m_handle_table; |
| 121 | } | 131 | } |
| 122 | 132 | ||
| 123 | /// Gets a const reference to the process' handle table. | 133 | /// Gets a const reference to the process' handle table. |
| 124 | const KHandleTable& GetHandleTable() const { | 134 | const KHandleTable& GetHandleTable() const { |
| 125 | return handle_table; | 135 | return m_handle_table; |
| 126 | } | 136 | } |
| 127 | 137 | ||
| 128 | Result SignalToAddress(VAddr address) { | 138 | Result SignalToAddress(VAddr address) { |
| 129 | return condition_var.SignalToAddress(address); | 139 | return m_condition_var.SignalToAddress(address); |
| 130 | } | 140 | } |
| 131 | 141 | ||
| 132 | Result WaitForAddress(Handle handle, VAddr address, u32 tag) { | 142 | Result WaitForAddress(Handle handle, VAddr address, u32 tag) { |
| 133 | return condition_var.WaitForAddress(handle, address, tag); | 143 | return m_condition_var.WaitForAddress(handle, address, tag); |
| 134 | } | 144 | } |
| 135 | 145 | ||
| 136 | void SignalConditionVariable(u64 cv_key, int32_t count) { | 146 | void SignalConditionVariable(u64 cv_key, int32_t count) { |
| 137 | return condition_var.Signal(cv_key, count); | 147 | return m_condition_var.Signal(cv_key, count); |
| 138 | } | 148 | } |
| 139 | 149 | ||
| 140 | Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { | 150 | Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { |
| 141 | R_RETURN(condition_var.Wait(address, cv_key, tag, ns)); | 151 | R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns)); |
| 142 | } | 152 | } |
| 143 | 153 | ||
| 144 | Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) { | 154 | Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) { |
| 145 | R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count)); | 155 | R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count)); |
| 146 | } | 156 | } |
| 147 | 157 | ||
| 148 | Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, | 158 | Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, |
| 149 | s64 timeout) { | 159 | s64 timeout) { |
| 150 | R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout)); | 160 | R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout)); |
| 151 | } | 161 | } |
| 152 | 162 | ||
| 153 | VAddr GetProcessLocalRegionAddress() const { | 163 | VAddr GetProcessLocalRegionAddress() const { |
| 154 | return plr_address; | 164 | return m_plr_address; |
| 155 | } | 165 | } |
| 156 | 166 | ||
| 157 | /// Gets the current status of the process | 167 | /// Gets the current status of the process |
| 158 | State GetState() const { | 168 | State GetState() const { |
| 159 | return state; | 169 | return m_state; |
| 160 | } | 170 | } |
| 161 | 171 | ||
| 162 | /// Gets the unique ID that identifies this particular process. | 172 | /// Gets the unique ID that identifies this particular process. |
| 163 | u64 GetProcessID() const { | 173 | u64 GetProcessId() const { |
| 164 | return process_id; | 174 | return m_process_id; |
| 165 | } | 175 | } |
| 166 | 176 | ||
| 167 | /// Gets the program ID corresponding to this process. | 177 | /// Gets the program ID corresponding to this process. |
| 168 | u64 GetProgramID() const { | 178 | u64 GetProgramId() const { |
| 169 | return program_id; | 179 | return m_program_id; |
| 170 | } | 180 | } |
| 171 | 181 | ||
| 172 | /// Gets the resource limit descriptor for this process | 182 | /// Gets the resource limit descriptor for this process |
| @@ -174,7 +184,7 @@ public: | |||
| 174 | 184 | ||
| 175 | /// Gets the ideal CPU core ID for this process | 185 | /// Gets the ideal CPU core ID for this process |
| 176 | u8 GetIdealCoreId() const { | 186 | u8 GetIdealCoreId() const { |
| 177 | return ideal_core; | 187 | return m_ideal_core; |
| 178 | } | 188 | } |
| 179 | 189 | ||
| 180 | /// Checks if the specified thread priority is valid. | 190 | /// Checks if the specified thread priority is valid. |
| @@ -184,17 +194,17 @@ public: | |||
| 184 | 194 | ||
| 185 | /// Gets the bitmask of allowed cores that this process' threads can run on. | 195 | /// Gets the bitmask of allowed cores that this process' threads can run on. |
| 186 | u64 GetCoreMask() const { | 196 | u64 GetCoreMask() const { |
| 187 | return capabilities.GetCoreMask(); | 197 | return m_capabilities.GetCoreMask(); |
| 188 | } | 198 | } |
| 189 | 199 | ||
| 190 | /// Gets the bitmask of allowed thread priorities. | 200 | /// Gets the bitmask of allowed thread priorities. |
| 191 | u64 GetPriorityMask() const { | 201 | u64 GetPriorityMask() const { |
| 192 | return capabilities.GetPriorityMask(); | 202 | return m_capabilities.GetPriorityMask(); |
| 193 | } | 203 | } |
| 194 | 204 | ||
| 195 | /// Gets the amount of secure memory to allocate for memory management. | 205 | /// Gets the amount of secure memory to allocate for memory management. |
| 196 | u32 GetSystemResourceSize() const { | 206 | u32 GetSystemResourceSize() const { |
| 197 | return system_resource_size; | 207 | return m_system_resource_size; |
| 198 | } | 208 | } |
| 199 | 209 | ||
| 200 | /// Gets the amount of secure memory currently in use for memory management. | 210 | /// Gets the amount of secure memory currently in use for memory management. |
| @@ -214,67 +224,67 @@ public: | |||
| 214 | 224 | ||
| 215 | /// Whether this process is an AArch64 or AArch32 process. | 225 | /// Whether this process is an AArch64 or AArch32 process. |
| 216 | bool Is64BitProcess() const { | 226 | bool Is64BitProcess() const { |
| 217 | return is_64bit_process; | 227 | return m_is_64bit_process; |
| 218 | } | 228 | } |
| 219 | 229 | ||
| 220 | [[nodiscard]] bool IsSuspended() const { | 230 | bool IsSuspended() const { |
| 221 | return is_suspended; | 231 | return m_is_suspended; |
| 222 | } | 232 | } |
| 223 | 233 | ||
| 224 | void SetSuspended(bool suspended) { | 234 | void SetSuspended(bool suspended) { |
| 225 | is_suspended = suspended; | 235 | m_is_suspended = suspended; |
| 226 | } | 236 | } |
| 227 | 237 | ||
| 228 | /// Gets the total running time of the process instance in ticks. | 238 | /// Gets the total running time of the process instance in ticks. |
| 229 | u64 GetCPUTimeTicks() const { | 239 | u64 GetCPUTimeTicks() const { |
| 230 | return total_process_running_time_ticks; | 240 | return m_total_process_running_time_ticks; |
| 231 | } | 241 | } |
| 232 | 242 | ||
| 233 | /// Updates the total running time, adding the given ticks to it. | 243 | /// Updates the total running time, adding the given ticks to it. |
| 234 | void UpdateCPUTimeTicks(u64 ticks) { | 244 | void UpdateCPUTimeTicks(u64 ticks) { |
| 235 | total_process_running_time_ticks += ticks; | 245 | m_total_process_running_time_ticks += ticks; |
| 236 | } | 246 | } |
| 237 | 247 | ||
| 238 | /// Gets the process schedule count, used for thread yielding | 248 | /// Gets the process schedule count, used for thread yielding |
| 239 | s64 GetScheduledCount() const { | 249 | s64 GetScheduledCount() const { |
| 240 | return schedule_count; | 250 | return m_schedule_count; |
| 241 | } | 251 | } |
| 242 | 252 | ||
| 243 | /// Increments the process schedule count, used for thread yielding. | 253 | /// Increments the process schedule count, used for thread yielding. |
| 244 | void IncrementScheduledCount() { | 254 | void IncrementScheduledCount() { |
| 245 | ++schedule_count; | 255 | ++m_schedule_count; |
| 246 | } | 256 | } |
| 247 | 257 | ||
| 248 | void IncrementRunningThreadCount(); | 258 | void IncrementRunningThreadCount(); |
| 249 | void DecrementRunningThreadCount(); | 259 | void DecrementRunningThreadCount(); |
| 250 | 260 | ||
| 251 | void SetRunningThread(s32 core, KThread* thread, u64 idle_count) { | 261 | void SetRunningThread(s32 core, KThread* thread, u64 idle_count) { |
| 252 | running_threads[core] = thread; | 262 | m_running_threads[core] = thread; |
| 253 | running_thread_idle_counts[core] = idle_count; | 263 | m_running_thread_idle_counts[core] = idle_count; |
| 254 | } | 264 | } |
| 255 | 265 | ||
| 256 | void ClearRunningThread(KThread* thread) { | 266 | void ClearRunningThread(KThread* thread) { |
| 257 | for (size_t i = 0; i < running_threads.size(); ++i) { | 267 | for (size_t i = 0; i < m_running_threads.size(); ++i) { |
| 258 | if (running_threads[i] == thread) { | 268 | if (m_running_threads[i] == thread) { |
| 259 | running_threads[i] = nullptr; | 269 | m_running_threads[i] = nullptr; |
| 260 | } | 270 | } |
| 261 | } | 271 | } |
| 262 | } | 272 | } |
| 263 | 273 | ||
| 264 | [[nodiscard]] KThread* GetRunningThread(s32 core) const { | 274 | [[nodiscard]] KThread* GetRunningThread(s32 core) const { |
| 265 | return running_threads[core]; | 275 | return m_running_threads[core]; |
| 266 | } | 276 | } |
| 267 | 277 | ||
| 268 | bool ReleaseUserException(KThread* thread); | 278 | bool ReleaseUserException(KThread* thread); |
| 269 | 279 | ||
| 270 | [[nodiscard]] KThread* GetPinnedThread(s32 core_id) const { | 280 | [[nodiscard]] KThread* GetPinnedThread(s32 core_id) const { |
| 271 | ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); | 281 | ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); |
| 272 | return pinned_threads[core_id]; | 282 | return m_pinned_threads[core_id]; |
| 273 | } | 283 | } |
| 274 | 284 | ||
| 275 | /// Gets 8 bytes of random data for svcGetInfo RandomEntropy | 285 | /// Gets 8 bytes of random data for svcGetInfo RandomEntropy |
| 276 | u64 GetRandomEntropy(std::size_t index) const { | 286 | u64 GetRandomEntropy(std::size_t index) const { |
| 277 | return random_entropy.at(index); | 287 | return m_random_entropy.at(index); |
| 278 | } | 288 | } |
| 279 | 289 | ||
| 280 | /// Retrieves the total physical memory available to this process in bytes. | 290 | /// Retrieves the total physical memory available to this process in bytes. |
| @@ -293,7 +303,7 @@ public: | |||
| 293 | 303 | ||
| 294 | /// Gets the list of all threads created with this process as their owner. | 304 | /// Gets the list of all threads created with this process as their owner. |
| 295 | std::list<KThread*>& GetThreadList() { | 305 | std::list<KThread*>& GetThreadList() { |
| 296 | return thread_list; | 306 | return m_thread_list; |
| 297 | } | 307 | } |
| 298 | 308 | ||
| 299 | /// Registers a thread as being created under this process, | 309 | /// Registers a thread as being created under this process, |
| @@ -345,15 +355,15 @@ public: | |||
| 345 | void LoadModule(CodeSet code_set, VAddr base_addr); | 355 | void LoadModule(CodeSet code_set, VAddr base_addr); |
| 346 | 356 | ||
| 347 | bool IsInitialized() const override { | 357 | bool IsInitialized() const override { |
| 348 | return is_initialized; | 358 | return m_is_initialized; |
| 349 | } | 359 | } |
| 350 | 360 | ||
| 351 | static void PostDestroy([[maybe_unused]] uintptr_t arg) {} | 361 | static void PostDestroy(uintptr_t arg) {} |
| 352 | 362 | ||
| 353 | void Finalize() override; | 363 | void Finalize() override; |
| 354 | 364 | ||
| 355 | u64 GetId() const override { | 365 | u64 GetId() const override { |
| 356 | return GetProcessID(); | 366 | return GetProcessId(); |
| 357 | } | 367 | } |
| 358 | 368 | ||
| 359 | bool IsSignaled() const override; | 369 | bool IsSignaled() const override; |
| @@ -367,7 +377,7 @@ public: | |||
| 367 | void UnpinThread(KThread* thread); | 377 | void UnpinThread(KThread* thread); |
| 368 | 378 | ||
| 369 | KLightLock& GetStateLock() { | 379 | KLightLock& GetStateLock() { |
| 370 | return state_lock; | 380 | return m_state_lock; |
| 371 | } | 381 | } |
| 372 | 382 | ||
| 373 | Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size); | 383 | Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size); |
| @@ -392,7 +402,7 @@ public: | |||
| 392 | bool RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type); | 402 | bool RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type); |
| 393 | 403 | ||
| 394 | const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const { | 404 | const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const { |
| 395 | return watchpoints; | 405 | return m_watchpoints; |
| 396 | } | 406 | } |
| 397 | 407 | ||
| 398 | const std::string& GetName() { | 408 | const std::string& GetName() { |
| @@ -403,23 +413,23 @@ private: | |||
| 403 | void PinThread(s32 core_id, KThread* thread) { | 413 | void PinThread(s32 core_id, KThread* thread) { |
| 404 | ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); | 414 | ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); |
| 405 | ASSERT(thread != nullptr); | 415 | ASSERT(thread != nullptr); |
| 406 | ASSERT(pinned_threads[core_id] == nullptr); | 416 | ASSERT(m_pinned_threads[core_id] == nullptr); |
| 407 | pinned_threads[core_id] = thread; | 417 | m_pinned_threads[core_id] = thread; |
| 408 | } | 418 | } |
| 409 | 419 | ||
| 410 | void UnpinThread(s32 core_id, KThread* thread) { | 420 | void UnpinThread(s32 core_id, KThread* thread) { |
| 411 | ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); | 421 | ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); |
| 412 | ASSERT(thread != nullptr); | 422 | ASSERT(thread != nullptr); |
| 413 | ASSERT(pinned_threads[core_id] == thread); | 423 | ASSERT(m_pinned_threads[core_id] == thread); |
| 414 | pinned_threads[core_id] = nullptr; | 424 | m_pinned_threads[core_id] = nullptr; |
| 415 | } | 425 | } |
| 416 | 426 | ||
| 417 | void FinalizeHandleTable() { | 427 | void FinalizeHandleTable() { |
| 418 | // Finalize the table. | 428 | // Finalize the table. |
| 419 | handle_table.Finalize(); | 429 | m_handle_table.Finalize(); |
| 420 | 430 | ||
| 421 | // Note that the table is finalized. | 431 | // Note that the table is finalized. |
| 422 | is_handle_table_initialized = false; | 432 | m_is_handle_table_initialized = false; |
| 423 | } | 433 | } |
| 424 | 434 | ||
| 425 | void ChangeState(State new_state); | 435 | void ChangeState(State new_state); |
| @@ -428,107 +438,107 @@ private: | |||
| 428 | Result AllocateMainThreadStack(std::size_t stack_size); | 438 | Result AllocateMainThreadStack(std::size_t stack_size); |
| 429 | 439 | ||
| 430 | /// Memory manager for this process | 440 | /// Memory manager for this process |
| 431 | KPageTable page_table; | 441 | KPageTable m_page_table; |
| 432 | 442 | ||
| 433 | /// Current status of the process | 443 | /// Current status of the process |
| 434 | State state{}; | 444 | State m_state{}; |
| 435 | 445 | ||
| 436 | /// The ID of this process | 446 | /// The ID of this process |
| 437 | u64 process_id = 0; | 447 | u64 m_process_id = 0; |
| 438 | 448 | ||
| 439 | /// Title ID corresponding to the process | 449 | /// Title ID corresponding to the process |
| 440 | u64 program_id = 0; | 450 | u64 m_program_id = 0; |
| 441 | 451 | ||
| 442 | /// Specifies additional memory to be reserved for the process's memory management by the | 452 | /// Specifies additional memory to be reserved for the process's memory management by the |
| 443 | /// system. When this is non-zero, secure memory is allocated and used for page table allocation | 453 | /// system. When this is non-zero, secure memory is allocated and used for page table allocation |
| 444 | /// instead of using the normal global page tables/memory block management. | 454 | /// instead of using the normal global page tables/memory block management. |
| 445 | u32 system_resource_size = 0; | 455 | u32 m_system_resource_size = 0; |
| 446 | 456 | ||
| 447 | /// Resource limit descriptor for this process | 457 | /// Resource limit descriptor for this process |
| 448 | KResourceLimit* resource_limit{}; | 458 | KResourceLimit* m_resource_limit{}; |
| 449 | 459 | ||
| 450 | VAddr system_resource_address{}; | 460 | VAddr m_system_resource_address{}; |
| 451 | 461 | ||
| 452 | /// The ideal CPU core for this process, threads are scheduled on this core by default. | 462 | /// The ideal CPU core for this process, threads are scheduled on this core by default. |
| 453 | u8 ideal_core = 0; | 463 | u8 m_ideal_core = 0; |
| 454 | 464 | ||
| 455 | /// Contains the parsed process capability descriptors. | 465 | /// Contains the parsed process capability descriptors. |
| 456 | ProcessCapabilities capabilities; | 466 | ProcessCapabilities m_capabilities; |
| 457 | 467 | ||
| 458 | /// Whether or not this process is AArch64, or AArch32. | 468 | /// Whether or not this process is AArch64, or AArch32. |
| 459 | /// By default, we currently assume this is true, unless otherwise | 469 | /// By default, we currently assume this is true, unless otherwise |
| 460 | /// specified by metadata provided to the process during loading. | 470 | /// specified by metadata provided to the process during loading. |
| 461 | bool is_64bit_process = true; | 471 | bool m_is_64bit_process = true; |
| 462 | 472 | ||
| 463 | /// Total running time for the process in ticks. | 473 | /// Total running time for the process in ticks. |
| 464 | std::atomic<u64> total_process_running_time_ticks = 0; | 474 | std::atomic<u64> m_total_process_running_time_ticks = 0; |
| 465 | 475 | ||
| 466 | /// Per-process handle table for storing created object handles in. | 476 | /// Per-process handle table for storing created object handles in. |
| 467 | KHandleTable handle_table; | 477 | KHandleTable m_handle_table; |
| 468 | 478 | ||
| 469 | /// Per-process address arbiter. | 479 | /// Per-process address arbiter. |
| 470 | KAddressArbiter address_arbiter; | 480 | KAddressArbiter m_address_arbiter; |
| 471 | 481 | ||
| 472 | /// The per-process mutex lock instance used for handling various | 482 | /// The per-process mutex lock instance used for handling various |
| 473 | /// forms of services, such as lock arbitration, and condition | 483 | /// forms of services, such as lock arbitration, and condition |
| 474 | /// variable related facilities. | 484 | /// variable related facilities. |
| 475 | KConditionVariable condition_var; | 485 | KConditionVariable m_condition_var; |
| 476 | 486 | ||
| 477 | /// Address indicating the location of the process' dedicated TLS region. | 487 | /// Address indicating the location of the process' dedicated TLS region. |
| 478 | VAddr plr_address = 0; | 488 | VAddr m_plr_address = 0; |
| 479 | 489 | ||
| 480 | /// Random values for svcGetInfo RandomEntropy | 490 | /// Random values for svcGetInfo RandomEntropy |
| 481 | std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{}; | 491 | std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{}; |
| 482 | 492 | ||
| 483 | /// List of threads that are running with this process as their owner. | 493 | /// List of threads that are running with this process as their owner. |
| 484 | std::list<KThread*> thread_list; | 494 | std::list<KThread*> m_thread_list; |
| 485 | 495 | ||
| 486 | /// List of shared memory that are running with this process as their owner. | 496 | /// List of shared memory that are running with this process as their owner. |
| 487 | std::list<KSharedMemoryInfo*> shared_memory_list; | 497 | std::list<KSharedMemoryInfo*> m_shared_memory_list; |
| 488 | 498 | ||
| 489 | /// Address of the top of the main thread's stack | 499 | /// Address of the top of the main thread's stack |
| 490 | VAddr main_thread_stack_top{}; | 500 | VAddr m_main_thread_stack_top{}; |
| 491 | 501 | ||
| 492 | /// Size of the main thread's stack | 502 | /// Size of the main thread's stack |
| 493 | std::size_t main_thread_stack_size{}; | 503 | std::size_t m_main_thread_stack_size{}; |
| 494 | 504 | ||
| 495 | /// Memory usage capacity for the process | 505 | /// Memory usage capacity for the process |
| 496 | std::size_t memory_usage_capacity{}; | 506 | std::size_t m_memory_usage_capacity{}; |
| 497 | 507 | ||
| 498 | /// Process total image size | 508 | /// Process total image size |
| 499 | std::size_t image_size{}; | 509 | std::size_t m_image_size{}; |
| 500 | 510 | ||
| 501 | /// Schedule count of this process | 511 | /// Schedule count of this process |
| 502 | s64 schedule_count{}; | 512 | s64 m_schedule_count{}; |
| 503 | 513 | ||
| 504 | size_t memory_release_hint{}; | 514 | size_t m_memory_release_hint{}; |
| 505 | 515 | ||
| 506 | std::string name{}; | 516 | std::string name{}; |
| 507 | 517 | ||
| 508 | bool is_signaled{}; | 518 | bool m_is_signaled{}; |
| 509 | bool is_suspended{}; | 519 | bool m_is_suspended{}; |
| 510 | bool is_immortal{}; | 520 | bool m_is_immortal{}; |
| 511 | bool is_handle_table_initialized{}; | 521 | bool m_is_handle_table_initialized{}; |
| 512 | bool is_initialized{}; | 522 | bool m_is_initialized{}; |
| 513 | 523 | ||
| 514 | std::atomic<u16> num_running_threads{}; | 524 | std::atomic<u16> m_num_running_threads{}; |
| 515 | 525 | ||
| 516 | std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{}; | 526 | std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{}; |
| 517 | std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{}; | 527 | std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{}; |
| 518 | std::array<KThread*, Core::Hardware::NUM_CPU_CORES> pinned_threads{}; | 528 | std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{}; |
| 519 | std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> watchpoints{}; | 529 | std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{}; |
| 520 | std::map<VAddr, u64> debug_page_refcounts; | 530 | std::map<VAddr, u64> m_debug_page_refcounts; |
| 521 | 531 | ||
| 522 | KThread* exception_thread{}; | 532 | KThread* m_exception_thread{}; |
| 523 | 533 | ||
| 524 | KLightLock state_lock; | 534 | KLightLock m_state_lock; |
| 525 | KLightLock list_lock; | 535 | KLightLock m_list_lock; |
| 526 | 536 | ||
| 527 | using TLPTree = | 537 | using TLPTree = |
| 528 | Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>; | 538 | Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>; |
| 529 | using TLPIterator = TLPTree::iterator; | 539 | using TLPIterator = TLPTree::iterator; |
| 530 | TLPTree fully_used_tlp_tree; | 540 | TLPTree m_fully_used_tlp_tree; |
| 531 | TLPTree partially_used_tlp_tree; | 541 | TLPTree m_partially_used_tlp_tree; |
| 532 | }; | 542 | }; |
| 533 | 543 | ||
| 534 | } // namespace Kernel | 544 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/svc/svc_info.cpp b/src/core/hle/kernel/svc/svc_info.cpp index 7d94347c5..04b6d6964 100644 --- a/src/core/hle/kernel/svc/svc_info.cpp +++ b/src/core/hle/kernel/svc/svc_info.cpp | |||
| @@ -103,7 +103,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle | |||
| 103 | R_SUCCEED(); | 103 | R_SUCCEED(); |
| 104 | 104 | ||
| 105 | case InfoType::ProgramId: | 105 | case InfoType::ProgramId: |
| 106 | *result = process->GetProgramID(); | 106 | *result = process->GetProgramId(); |
| 107 | R_SUCCEED(); | 107 | R_SUCCEED(); |
| 108 | 108 | ||
| 109 | case InfoType::UserExceptionContextAddress: | 109 | case InfoType::UserExceptionContextAddress: |
diff --git a/src/core/hle/kernel/svc/svc_process.cpp b/src/core/hle/kernel/svc/svc_process.cpp index e4149fba9..b538c37e7 100644 --- a/src/core/hle/kernel/svc/svc_process.cpp +++ b/src/core/hle/kernel/svc/svc_process.cpp | |||
| @@ -11,7 +11,7 @@ namespace Kernel::Svc { | |||
| 11 | void ExitProcess(Core::System& system) { | 11 | void ExitProcess(Core::System& system) { |
| 12 | auto* current_process = GetCurrentProcessPointer(system.Kernel()); | 12 | auto* current_process = GetCurrentProcessPointer(system.Kernel()); |
| 13 | 13 | ||
| 14 | LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); | 14 | LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessId()); |
| 15 | ASSERT_MSG(current_process->GetState() == KProcess::State::Running, | 15 | ASSERT_MSG(current_process->GetState() == KProcess::State::Running, |
| 16 | "Process has already exited"); | 16 | "Process has already exited"); |
| 17 | 17 | ||
| @@ -80,7 +80,7 @@ Result GetProcessList(Core::System& system, s32* out_num_processes, VAddr out_pr | |||
| 80 | std::min(static_cast<std::size_t>(out_process_ids_size), num_processes); | 80 | std::min(static_cast<std::size_t>(out_process_ids_size), num_processes); |
| 81 | 81 | ||
| 82 | for (std::size_t i = 0; i < copy_amount; ++i) { | 82 | for (std::size_t i = 0; i < copy_amount; ++i) { |
| 83 | memory.Write64(out_process_ids, process_list[i]->GetProcessID()); | 83 | memory.Write64(out_process_ids, process_list[i]->GetProcessId()); |
| 84 | out_process_ids += sizeof(u64); | 84 | out_process_ids += sizeof(u64); |
| 85 | } | 85 | } |
| 86 | 86 | ||
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index f17df5124..deeca925d 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp | |||
| @@ -79,7 +79,7 @@ IWindowController::IWindowController(Core::System& system_) | |||
| 79 | IWindowController::~IWindowController() = default; | 79 | IWindowController::~IWindowController() = default; |
| 80 | 80 | ||
| 81 | void IWindowController::GetAppletResourceUserId(HLERequestContext& ctx) { | 81 | void IWindowController::GetAppletResourceUserId(HLERequestContext& ctx) { |
| 82 | const u64 process_id = system.ApplicationProcess()->GetProcessID(); | 82 | const u64 process_id = system.ApplicationProcess()->GetProcessId(); |
| 83 | 83 | ||
| 84 | LOG_DEBUG(Service_AM, "called. Process ID=0x{:016X}", process_id); | 84 | LOG_DEBUG(Service_AM, "called. Process ID=0x{:016X}", process_id); |
| 85 | 85 | ||
diff --git a/src/core/hle/service/glue/arp.cpp b/src/core/hle/service/glue/arp.cpp index 929dcca0d..ed6fcb5f6 100644 --- a/src/core/hle/service/glue/arp.cpp +++ b/src/core/hle/service/glue/arp.cpp | |||
| @@ -18,14 +18,14 @@ namespace { | |||
| 18 | std::optional<u64> GetTitleIDForProcessID(const Core::System& system, u64 process_id) { | 18 | std::optional<u64> GetTitleIDForProcessID(const Core::System& system, u64 process_id) { |
| 19 | const auto& list = system.Kernel().GetProcessList(); | 19 | const auto& list = system.Kernel().GetProcessList(); |
| 20 | const auto iter = std::find_if(list.begin(), list.end(), [&process_id](const auto& process) { | 20 | const auto iter = std::find_if(list.begin(), list.end(), [&process_id](const auto& process) { |
| 21 | return process->GetProcessID() == process_id; | 21 | return process->GetProcessId() == process_id; |
| 22 | }); | 22 | }); |
| 23 | 23 | ||
| 24 | if (iter == list.end()) { | 24 | if (iter == list.end()) { |
| 25 | return std::nullopt; | 25 | return std::nullopt; |
| 26 | } | 26 | } |
| 27 | 27 | ||
| 28 | return (*iter)->GetProgramID(); | 28 | return (*iter)->GetProgramId(); |
| 29 | } | 29 | } |
| 30 | } // Anonymous namespace | 30 | } // Anonymous namespace |
| 31 | 31 | ||
diff --git a/src/core/hle/service/pm/pm.cpp b/src/core/hle/service/pm/pm.cpp index ea249c26f..f9cf2dda3 100644 --- a/src/core/hle/service/pm/pm.cpp +++ b/src/core/hle/service/pm/pm.cpp | |||
| @@ -37,12 +37,12 @@ std::optional<Kernel::KProcess*> SearchProcessList( | |||
| 37 | void GetApplicationPidGeneric(HLERequestContext& ctx, | 37 | void GetApplicationPidGeneric(HLERequestContext& ctx, |
| 38 | const std::vector<Kernel::KProcess*>& process_list) { | 38 | const std::vector<Kernel::KProcess*>& process_list) { |
| 39 | const auto process = SearchProcessList(process_list, [](const auto& proc) { | 39 | const auto process = SearchProcessList(process_list, [](const auto& proc) { |
| 40 | return proc->GetProcessID() == Kernel::KProcess::ProcessIDMin; | 40 | return proc->GetProcessId() == Kernel::KProcess::ProcessIDMin; |
| 41 | }); | 41 | }); |
| 42 | 42 | ||
| 43 | IPC::ResponseBuilder rb{ctx, 4}; | 43 | IPC::ResponseBuilder rb{ctx, 4}; |
| 44 | rb.Push(ResultSuccess); | 44 | rb.Push(ResultSuccess); |
| 45 | rb.Push(process.has_value() ? (*process)->GetProcessID() : NO_PROCESS_FOUND_PID); | 45 | rb.Push(process.has_value() ? (*process)->GetProcessId() : NO_PROCESS_FOUND_PID); |
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | } // Anonymous namespace | 48 | } // Anonymous namespace |
| @@ -108,7 +108,7 @@ private: | |||
| 108 | 108 | ||
| 109 | const auto process = | 109 | const auto process = |
| 110 | SearchProcessList(kernel.GetProcessList(), [program_id](const auto& proc) { | 110 | SearchProcessList(kernel.GetProcessList(), [program_id](const auto& proc) { |
| 111 | return proc->GetProgramID() == program_id; | 111 | return proc->GetProgramId() == program_id; |
| 112 | }); | 112 | }); |
| 113 | 113 | ||
| 114 | if (!process.has_value()) { | 114 | if (!process.has_value()) { |
| @@ -119,7 +119,7 @@ private: | |||
| 119 | 119 | ||
| 120 | IPC::ResponseBuilder rb{ctx, 4}; | 120 | IPC::ResponseBuilder rb{ctx, 4}; |
| 121 | rb.Push(ResultSuccess); | 121 | rb.Push(ResultSuccess); |
| 122 | rb.Push((*process)->GetProcessID()); | 122 | rb.Push((*process)->GetProcessId()); |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | void GetApplicationProcessId(HLERequestContext& ctx) { | 125 | void GetApplicationProcessId(HLERequestContext& ctx) { |
| @@ -136,7 +136,7 @@ private: | |||
| 136 | LOG_WARNING(Service_PM, "(Partial Implementation) called, pid={:016X}", pid); | 136 | LOG_WARNING(Service_PM, "(Partial Implementation) called, pid={:016X}", pid); |
| 137 | 137 | ||
| 138 | const auto process = SearchProcessList(kernel.GetProcessList(), [pid](const auto& proc) { | 138 | const auto process = SearchProcessList(kernel.GetProcessList(), [pid](const auto& proc) { |
| 139 | return proc->GetProcessID() == pid; | 139 | return proc->GetProcessId() == pid; |
| 140 | }); | 140 | }); |
| 141 | 141 | ||
| 142 | if (!process.has_value()) { | 142 | if (!process.has_value()) { |
| @@ -159,7 +159,7 @@ private: | |||
| 159 | 159 | ||
| 160 | OverrideStatus override_status{}; | 160 | OverrideStatus override_status{}; |
| 161 | ProgramLocation program_location{ | 161 | ProgramLocation program_location{ |
| 162 | .program_id = (*process)->GetProgramID(), | 162 | .program_id = (*process)->GetProgramId(), |
| 163 | .storage_id = 0, | 163 | .storage_id = 0, |
| 164 | }; | 164 | }; |
| 165 | 165 | ||
| @@ -194,7 +194,7 @@ private: | |||
| 194 | LOG_DEBUG(Service_PM, "called, process_id={:016X}", process_id); | 194 | LOG_DEBUG(Service_PM, "called, process_id={:016X}", process_id); |
| 195 | 195 | ||
| 196 | const auto process = SearchProcessList(process_list, [process_id](const auto& proc) { | 196 | const auto process = SearchProcessList(process_list, [process_id](const auto& proc) { |
| 197 | return proc->GetProcessID() == process_id; | 197 | return proc->GetProcessId() == process_id; |
| 198 | }); | 198 | }); |
| 199 | 199 | ||
| 200 | if (!process.has_value()) { | 200 | if (!process.has_value()) { |
| @@ -205,7 +205,7 @@ private: | |||
| 205 | 205 | ||
| 206 | IPC::ResponseBuilder rb{ctx, 4}; | 206 | IPC::ResponseBuilder rb{ctx, 4}; |
| 207 | rb.Push(ResultSuccess); | 207 | rb.Push(ResultSuccess); |
| 208 | rb.Push((*process)->GetProgramID()); | 208 | rb.Push((*process)->GetProgramId()); |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | void AtmosphereGetProcessId(HLERequestContext& ctx) { | 211 | void AtmosphereGetProcessId(HLERequestContext& ctx) { |
| @@ -215,7 +215,7 @@ private: | |||
| 215 | LOG_DEBUG(Service_PM, "called, program_id={:016X}", program_id); | 215 | LOG_DEBUG(Service_PM, "called, program_id={:016X}", program_id); |
| 216 | 216 | ||
| 217 | const auto process = SearchProcessList(process_list, [program_id](const auto& proc) { | 217 | const auto process = SearchProcessList(process_list, [program_id](const auto& proc) { |
| 218 | return proc->GetProgramID() == program_id; | 218 | return proc->GetProgramId() == program_id; |
| 219 | }); | 219 | }); |
| 220 | 220 | ||
| 221 | if (!process.has_value()) { | 221 | if (!process.has_value()) { |
| @@ -226,7 +226,7 @@ private: | |||
| 226 | 226 | ||
| 227 | IPC::ResponseBuilder rb{ctx, 4}; | 227 | IPC::ResponseBuilder rb{ctx, 4}; |
| 228 | rb.Push(ResultSuccess); | 228 | rb.Push(ResultSuccess); |
| 229 | rb.Push((*process)->GetProcessID()); | 229 | rb.Push((*process)->GetProcessId()); |
| 230 | } | 230 | } |
| 231 | 231 | ||
| 232 | const std::vector<Kernel::KProcess*>& process_list; | 232 | const std::vector<Kernel::KProcess*>& process_list; |
diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp index c2d96bbec..de729955f 100644 --- a/src/core/memory/cheat_engine.cpp +++ b/src/core/memory/cheat_engine.cpp | |||
| @@ -196,7 +196,7 @@ void CheatEngine::Initialize() { | |||
| 196 | }); | 196 | }); |
| 197 | core_timing.ScheduleLoopingEvent(CHEAT_ENGINE_NS, CHEAT_ENGINE_NS, event); | 197 | core_timing.ScheduleLoopingEvent(CHEAT_ENGINE_NS, CHEAT_ENGINE_NS, event); |
| 198 | 198 | ||
| 199 | metadata.process_id = system.ApplicationProcess()->GetProcessID(); | 199 | metadata.process_id = system.ApplicationProcess()->GetProcessId(); |
| 200 | metadata.title_id = system.GetApplicationProcessProgramID(); | 200 | metadata.title_id = system.GetApplicationProcessProgramID(); |
| 201 | 201 | ||
| 202 | const auto& page_table = system.ApplicationProcess()->PageTable(); | 202 | const auto& page_table = system.ApplicationProcess()->PageTable(); |