diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/core_timing.cpp | 6 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 21 | ||||
| -rw-r--r-- | src/core/hle/service/am/am.cpp | 20 | ||||
| -rw-r--r-- | src/core/hle/service/am/am.h | 1 |
4 files changed, 32 insertions, 16 deletions
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index 5c83c41a4..a63e60461 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp | |||
| @@ -172,7 +172,7 @@ void CoreTiming::ClearPendingEvents() { | |||
| 172 | } | 172 | } |
| 173 | 173 | ||
| 174 | void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { | 174 | void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { |
| 175 | basic_lock.lock(); | 175 | std::scoped_lock lock{basic_lock}; |
| 176 | 176 | ||
| 177 | const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { | 177 | const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { |
| 178 | return e.type.lock().get() == event_type.get(); | 178 | return e.type.lock().get() == event_type.get(); |
| @@ -183,12 +183,10 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { | |||
| 183 | event_queue.erase(itr, event_queue.end()); | 183 | event_queue.erase(itr, event_queue.end()); |
| 184 | std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>()); | 184 | std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>()); |
| 185 | } | 185 | } |
| 186 | basic_lock.unlock(); | ||
| 187 | } | 186 | } |
| 188 | 187 | ||
| 189 | std::optional<s64> CoreTiming::Advance() { | 188 | std::optional<s64> CoreTiming::Advance() { |
| 190 | std::scoped_lock advance_scope{advance_lock}; | 189 | std::scoped_lock lock{advance_lock, basic_lock}; |
| 191 | std::scoped_lock basic_scope{basic_lock}; | ||
| 192 | global_timer = GetGlobalTimeNs().count(); | 190 | global_timer = GetGlobalTimeNs().count(); |
| 193 | 191 | ||
| 194 | while (!event_queue.empty() && event_queue.front().time <= global_timer) { | 192 | while (!event_queue.empty() && event_queue.front().time <= global_timer) { |
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 2b12c0dbf..7b929781c 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | // licensed under GPLv2 or later under exception provided by the author. | 6 | // licensed under GPLv2 or later under exception provided by the author. |
| 7 | 7 | ||
| 8 | #include <algorithm> | 8 | #include <algorithm> |
| 9 | #include <mutex> | ||
| 9 | #include <set> | 10 | #include <set> |
| 10 | #include <unordered_set> | 11 | #include <unordered_set> |
| 11 | #include <utility> | 12 | #include <utility> |
| @@ -31,22 +32,20 @@ GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {} | |||
| 31 | GlobalScheduler::~GlobalScheduler() = default; | 32 | GlobalScheduler::~GlobalScheduler() = default; |
| 32 | 33 | ||
| 33 | void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) { | 34 | void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) { |
| 34 | global_list_guard.lock(); | 35 | std::scoped_lock lock{global_list_guard}; |
| 35 | thread_list.push_back(std::move(thread)); | 36 | thread_list.push_back(std::move(thread)); |
| 36 | global_list_guard.unlock(); | ||
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) { | 39 | void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) { |
| 40 | global_list_guard.lock(); | 40 | std::scoped_lock lock{global_list_guard}; |
| 41 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), | 41 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), |
| 42 | thread_list.end()); | 42 | thread_list.end()); |
| 43 | global_list_guard.unlock(); | ||
| 44 | } | 43 | } |
| 45 | 44 | ||
| 46 | u32 GlobalScheduler::SelectThreads() { | 45 | u32 GlobalScheduler::SelectThreads() { |
| 47 | ASSERT(is_locked); | 46 | ASSERT(is_locked); |
| 48 | const auto update_thread = [](Thread* thread, Scheduler& sched) { | 47 | const auto update_thread = [](Thread* thread, Scheduler& sched) { |
| 49 | sched.guard.lock(); | 48 | std::scoped_lock lock{sched.guard}; |
| 50 | if (thread != sched.selected_thread_set.get()) { | 49 | if (thread != sched.selected_thread_set.get()) { |
| 51 | if (thread == nullptr) { | 50 | if (thread == nullptr) { |
| 52 | ++sched.idle_selection_count; | 51 | ++sched.idle_selection_count; |
| @@ -57,7 +56,6 @@ u32 GlobalScheduler::SelectThreads() { | |||
| 57 | sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread); | 56 | sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread); |
| 58 | sched.is_context_switch_pending = reschedule_pending; | 57 | sched.is_context_switch_pending = reschedule_pending; |
| 59 | std::atomic_thread_fence(std::memory_order_seq_cst); | 58 | std::atomic_thread_fence(std::memory_order_seq_cst); |
| 60 | sched.guard.unlock(); | ||
| 61 | return reschedule_pending; | 59 | return reschedule_pending; |
| 62 | }; | 60 | }; |
| 63 | if (!is_reselection_pending.load()) { | 61 | if (!is_reselection_pending.load()) { |
| @@ -757,11 +755,12 @@ void Scheduler::OnSwitch(void* this_scheduler) { | |||
| 757 | 755 | ||
| 758 | void Scheduler::SwitchToCurrent() { | 756 | void Scheduler::SwitchToCurrent() { |
| 759 | while (true) { | 757 | while (true) { |
| 760 | guard.lock(); | 758 | { |
| 761 | selected_thread = selected_thread_set; | 759 | std::scoped_lock lock{guard}; |
| 762 | current_thread = selected_thread; | 760 | selected_thread = selected_thread_set; |
| 763 | is_context_switch_pending = false; | 761 | current_thread = selected_thread; |
| 764 | guard.unlock(); | 762 | is_context_switch_pending = false; |
| 763 | } | ||
| 765 | while (!is_context_switch_pending) { | 764 | while (!is_context_switch_pending) { |
| 766 | if (current_thread != nullptr && !current_thread->IsHLEThread()) { | 765 | if (current_thread != nullptr && !current_thread->IsHLEThread()) { |
| 767 | current_thread->context_guard.lock(); | 766 | current_thread->context_guard.lock(); |
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index 24cfb370b..c688d6d98 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp | |||
| @@ -272,7 +272,7 @@ ISelfController::ISelfController(Core::System& system, | |||
| 272 | {41, nullptr, "IsSystemBufferSharingEnabled"}, | 272 | {41, nullptr, "IsSystemBufferSharingEnabled"}, |
| 273 | {42, nullptr, "GetSystemSharedLayerHandle"}, | 273 | {42, nullptr, "GetSystemSharedLayerHandle"}, |
| 274 | {43, nullptr, "GetSystemSharedBufferHandle"}, | 274 | {43, nullptr, "GetSystemSharedBufferHandle"}, |
| 275 | {44, nullptr, "CreateManagedDisplaySeparableLayer"}, | 275 | {44, &ISelfController::CreateManagedDisplaySeparableLayer, "CreateManagedDisplaySeparableLayer"}, |
| 276 | {45, nullptr, "SetManagedDisplayLayerSeparationMode"}, | 276 | {45, nullptr, "SetManagedDisplayLayerSeparationMode"}, |
| 277 | {50, &ISelfController::SetHandlesRequestToDisplay, "SetHandlesRequestToDisplay"}, | 277 | {50, &ISelfController::SetHandlesRequestToDisplay, "SetHandlesRequestToDisplay"}, |
| 278 | {51, nullptr, "ApproveToDisplay"}, | 278 | {51, nullptr, "ApproveToDisplay"}, |
| @@ -462,6 +462,24 @@ void ISelfController::CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx) | |||
| 462 | rb.Push(*layer_id); | 462 | rb.Push(*layer_id); |
| 463 | } | 463 | } |
| 464 | 464 | ||
| 465 | void ISelfController::CreateManagedDisplaySeparableLayer(Kernel::HLERequestContext& ctx) { | ||
| 466 | LOG_WARNING(Service_AM, "(STUBBED) called"); | ||
| 467 | |||
| 468 | // TODO(Subv): Find out how AM determines the display to use, for now just | ||
| 469 | // create the layer in the Default display. | ||
| 470 | // This calls nn::vi::CreateRecordingLayer() which creates another layer. | ||
| 471 | // Currently we do not support more than 1 layer per display, output 1 layer id for now. | ||
| 472 | // Outputting 1 layer id instead of the expected 2 has not been observed to cause any adverse | ||
| 473 | // side effects. | ||
| 474 | // TODO: Support multiple layers | ||
| 475 | const auto display_id = nvflinger->OpenDisplay("Default"); | ||
| 476 | const auto layer_id = nvflinger->CreateLayer(*display_id); | ||
| 477 | |||
| 478 | IPC::ResponseBuilder rb{ctx, 4}; | ||
| 479 | rb.Push(RESULT_SUCCESS); | ||
| 480 | rb.Push(*layer_id); | ||
| 481 | } | ||
| 482 | |||
| 465 | void ISelfController::SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx) { | 483 | void ISelfController::SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx) { |
| 466 | LOG_WARNING(Service_AM, "(STUBBED) called"); | 484 | LOG_WARNING(Service_AM, "(STUBBED) called"); |
| 467 | 485 | ||
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h index 2f69466ec..6cfb11b48 100644 --- a/src/core/hle/service/am/am.h +++ b/src/core/hle/service/am/am.h | |||
| @@ -140,6 +140,7 @@ private: | |||
| 140 | void SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx); | 140 | void SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx); |
| 141 | void SetAlbumImageOrientation(Kernel::HLERequestContext& ctx); | 141 | void SetAlbumImageOrientation(Kernel::HLERequestContext& ctx); |
| 142 | void CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx); | 142 | void CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx); |
| 143 | void CreateManagedDisplaySeparableLayer(Kernel::HLERequestContext& ctx); | ||
| 143 | void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx); | 144 | void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx); |
| 144 | void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); | 145 | void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); |
| 145 | void GetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); | 146 | void GetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); |