summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/CMakeLists.txt4
-rw-r--r--src/audio_core/stream.cpp25
-rw-r--r--src/audio_core/stream.h5
-rw-r--r--src/common/CMakeLists.txt13
-rw-r--r--src/common/atomic_ops.cpp70
-rw-r--r--src/common/atomic_ops.h17
-rw-r--r--src/common/fiber.cpp222
-rw-r--r--src/common/fiber.h92
-rw-r--r--src/common/spin_lock.cpp54
-rw-r--r--src/common/spin_lock.h26
-rw-r--r--src/common/thread.cpp52
-rw-r--r--src/common/thread.h13
-rw-r--r--src/common/uint128.cpp26
-rw-r--r--src/common/uint128.h3
-rw-r--r--src/common/wall_clock.cpp91
-rw-r--r--src/common/wall_clock.h53
-rw-r--r--src/common/x64/cpu_detect.cpp33
-rw-r--r--src/common/x64/cpu_detect.h12
-rw-r--r--src/common/x64/native_clock.cpp103
-rw-r--r--src/common/x64/native_clock.h48
-rw-r--r--src/core/CMakeLists.txt12
-rw-r--r--src/core/arm/arm_interface.cpp57
-rw-r--r--src/core/arm/arm_interface.h20
-rw-r--r--src/core/arm/cpu_interrupt_handler.cpp29
-rw-r--r--src/core/arm/cpu_interrupt_handler.h39
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp82
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.h7
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp106
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.h26
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_cp15.cpp2
-rw-r--r--src/core/arm/dynarmic/arm_exclusive_monitor.cpp76
-rw-r--r--src/core/arm/dynarmic/arm_exclusive_monitor.h48
-rw-r--r--src/core/arm/exclusive_monitor.cpp2
-rw-r--r--src/core/arm/exclusive_monitor.h6
-rw-r--r--src/core/arm/unicorn/arm_unicorn.cpp19
-rw-r--r--src/core/arm/unicorn/arm_unicorn.h5
-rw-r--r--src/core/core.cpp128
-rw-r--r--src/core/core.h48
-rw-r--r--src/core/core_manager.cpp67
-rw-r--r--src/core/core_manager.h63
-rw-r--r--src/core/core_timing.cpp256
-rw-r--r--src/core/core_timing.h123
-rw-r--r--src/core/core_timing_util.cpp44
-rw-r--r--src/core/core_timing_util.h18
-rw-r--r--src/core/cpu_manager.cpp368
-rw-r--r--src/core/cpu_manager.h80
-rw-r--r--src/core/gdbstub/gdbstub.cpp1
-rw-r--r--src/core/hardware_properties.h4
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp212
-rw-r--r--src/core/hle/kernel/address_arbiter.h3
-rw-r--r--src/core/hle/kernel/client_port.cpp2
-rw-r--r--src/core/hle/kernel/errors.h1
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp87
-rw-r--r--src/core/hle/kernel/kernel.cpp255
-rw-r--r--src/core/hle/kernel/kernel.h39
-rw-r--r--src/core/hle/kernel/memory/memory_manager.cpp1
-rw-r--r--src/core/hle/kernel/mutex.cpp118
-rw-r--r--src/core/hle/kernel/mutex.h4
-rw-r--r--src/core/hle/kernel/physical_core.cpp52
-rw-r--r--src/core/hle/kernel/physical_core.h44
-rw-r--r--src/core/hle/kernel/process.cpp23
-rw-r--r--src/core/hle/kernel/readable_event.cpp3
-rw-r--r--src/core/hle/kernel/scheduler.cpp576
-rw-r--r--src/core/hle/kernel/scheduler.h123
-rw-r--r--src/core/hle/kernel/server_session.cpp16
-rw-r--r--src/core/hle/kernel/svc.cpp464
-rw-r--r--src/core/hle/kernel/svc_wrap.h137
-rw-r--r--src/core/hle/kernel/synchronization.cpp137
-rw-r--r--src/core/hle/kernel/synchronization_object.cpp64
-rw-r--r--src/core/hle/kernel/synchronization_object.h18
-rw-r--r--src/core/hle/kernel/thread.cpp424
-rw-r--r--src/core/hle/kernel/thread.h277
-rw-r--r--src/core/hle/kernel/time_manager.cpp23
-rw-r--r--src/core/hle/kernel/time_manager.h4
-rw-r--r--src/core/hle/service/am/am.cpp18
-rw-r--r--src/core/hle/service/am/am.h2
-rw-r--r--src/core/hle/service/am/applets/software_keyboard.cpp2
-rw-r--r--src/core/hle/service/am/spsm.cpp16
-rw-r--r--src/core/hle/service/aoc/aoc_u.cpp1
-rw-r--r--src/core/hle/service/bcat/bcat.cpp2
-rw-r--r--src/core/hle/service/bcat/module.cpp3
-rw-r--r--src/core/hle/service/bpc/bpc.cpp20
-rw-r--r--src/core/hle/service/btdrv/btdrv.cpp167
-rw-r--r--src/core/hle/service/btm/btm.cpp147
-rw-r--r--src/core/hle/service/caps/caps.cpp2
-rw-r--r--src/core/hle/service/caps/caps.h76
-rw-r--r--src/core/hle/service/caps/caps_a.cpp2
-rw-r--r--src/core/hle/service/caps/caps_a.h2
-rw-r--r--src/core/hle/service/caps/caps_c.cpp2
-rw-r--r--src/core/hle/service/caps/caps_c.h2
-rw-r--r--src/core/hle/service/caps/caps_sc.cpp2
-rw-r--r--src/core/hle/service/caps/caps_sc.h2
-rw-r--r--src/core/hle/service/caps/caps_ss.cpp2
-rw-r--r--src/core/hle/service/caps/caps_ss.h2
-rw-r--r--src/core/hle/service/caps/caps_su.cpp2
-rw-r--r--src/core/hle/service/caps/caps_su.h2
-rw-r--r--src/core/hle/service/caps/caps_u.cpp26
-rw-r--r--src/core/hle/service/caps/caps_u.h2
-rw-r--r--src/core/hle/service/es/es.cpp47
-rw-r--r--src/core/hle/service/eupld/eupld.cpp1
-rw-r--r--src/core/hle/service/friend/friend.cpp6
-rw-r--r--src/core/hle/service/grc/grc.cpp3
-rw-r--r--src/core/hle/service/hid/controllers/debug_pad.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/gesture.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/keyboard.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/mouse.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/stubbed.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/touchscreen.cpp4
-rw-r--r--src/core/hle/service/hid/controllers/xpad.cpp2
-rw-r--r--src/core/hle/service/hid/hid.cpp16
-rw-r--r--src/core/hle/service/hid/irs.cpp2
-rw-r--r--src/core/hle/service/lbl/lbl.cpp1
-rw-r--r--src/core/hle/service/ldn/ldn.cpp1
-rw-r--r--src/core/hle/service/ldr/ldr.cpp105
-rw-r--r--src/core/hle/service/mig/mig.cpp6
-rw-r--r--src/core/hle/service/mm/mm_u.cpp32
-rw-r--r--src/core/hle/service/ncm/ncm.cpp20
-rw-r--r--src/core/hle/service/nfc/nfc.cpp6
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp3
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp64
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.h21
-rw-r--r--src/core/hle/service/sm/sm.cpp2
-rw-r--r--src/core/hle/service/time/standard_steady_clock_core.cpp5
-rw-r--r--src/core/hle/service/time/tick_based_steady_clock_core.cpp5
-rw-r--r--src/core/hle/service/time/time.cpp5
-rw-r--r--src/core/hle/service/time/time_sharedmemory.cpp3
-rw-r--r--src/core/hle/service/vi/vi.cpp2
-rw-r--r--src/core/memory.cpp109
-rw-r--r--src/core/memory.h67
-rw-r--r--src/core/memory/cheat_engine.cpp8
-rw-r--r--src/core/perf_stats.cpp2
-rw-r--r--src/core/settings.cpp7
-rw-r--r--src/core/settings.h3
-rw-r--r--src/core/tools/freezer.cpp8
-rw-r--r--src/tests/CMakeLists.txt1
-rw-r--r--src/tests/common/fibers.cpp358
-rw-r--r--src/tests/core/core_timing.cpp182
-rw-r--r--src/video_core/engines/shader_bytecode.h8
-rw-r--r--src/video_core/gpu.cpp5
-rw-r--r--src/video_core/gpu.h6
-rw-r--r--src/video_core/gpu_asynch.cpp9
-rw-r--r--src/video_core/gpu_asynch.h2
-rw-r--r--src/video_core/gpu_synch.cpp8
-rw-r--r--src/video_core/gpu_synch.h2
-rw-r--r--src/video_core/gpu_thread.cpp7
-rw-r--r--src/video_core/renderer_opengl/gl_device.cpp2
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp12
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h1
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp28
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp60
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp2
-rw-r--r--src/video_core/renderer_vulkan/wrapper.cpp16
-rw-r--r--src/video_core/renderer_vulkan/wrapper.h4
-rw-r--r--src/video_core/shader/decode/half_set.cpp88
-rw-r--r--src/video_core/texture_cache/surface_base.cpp3
-rw-r--r--src/yuzu/bootmanager.cpp54
-rw-r--r--src/yuzu/bootmanager.h8
-rw-r--r--src/yuzu/configuration/config.cpp3
-rw-r--r--src/yuzu/configuration/config.h2
-rw-r--r--src/yuzu/configuration/configure_general.cpp6
-rw-r--r--src/yuzu/configuration/configure_general.ui7
-rw-r--r--src/yuzu/debugger/wait_tree.cpp54
-rw-r--r--src/yuzu/main.cpp51
-rw-r--r--src/yuzu/main.h2
-rw-r--r--src/yuzu/main.ui6
-rw-r--r--src/yuzu_cmd/yuzu.cpp5
-rw-r--r--src/yuzu_tester/yuzu.cpp5
169 files changed, 5760 insertions, 2139 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 3a57356ab..1e977e8a8 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -62,6 +62,10 @@ else()
62 -Wno-unused-parameter 62 -Wno-unused-parameter
63 ) 63 )
64 64
65 if (ARCHITECTURE_x86_64)
66 add_compile_options("-mcx16")
67 endif()
68
65 if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang) 69 if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang)
66 add_compile_options("-stdlib=libc++") 70 add_compile_options("-stdlib=libc++")
67 endif() 71 endif()
diff --git a/src/audio_core/stream.cpp b/src/audio_core/stream.cpp
index 4ca98f8ea..dfc4805d9 100644
--- a/src/audio_core/stream.cpp
+++ b/src/audio_core/stream.cpp
@@ -59,15 +59,24 @@ Stream::State Stream::GetState() const {
59 return state; 59 return state;
60} 60}
61 61
62s64 Stream::GetBufferReleaseCycles(const Buffer& buffer) const { 62s64 Stream::GetBufferReleaseNS(const Buffer& buffer) const {
63 const std::size_t num_samples{buffer.GetSamples().size() / GetNumChannels()}; 63 const std::size_t num_samples{buffer.GetSamples().size() / GetNumChannels()};
64 const auto us = 64 const auto ns =
65 std::chrono::microseconds((static_cast<u64>(num_samples) * 1000000) / sample_rate); 65 std::chrono::nanoseconds((static_cast<u64>(num_samples) * 1000000000ULL) / sample_rate);
66 return Core::Timing::usToCycles(us); 66 return ns.count();
67}
68
69s64 Stream::GetBufferReleaseNSHostTiming(const Buffer& buffer) const {
70 const std::size_t num_samples{buffer.GetSamples().size() / GetNumChannels()};
71 /// DSP signals before playing the last sample, in HLE we emulate this in this way
72 s64 base_samples = std::max<s64>(static_cast<s64>(num_samples) - 1, 0);
73 const auto ns =
74 std::chrono::nanoseconds((static_cast<u64>(base_samples) * 1000000000ULL) / sample_rate);
75 return ns.count();
67} 76}
68 77
69static void VolumeAdjustSamples(std::vector<s16>& samples, float game_volume) { 78static void VolumeAdjustSamples(std::vector<s16>& samples, float game_volume) {
70 const float volume{std::clamp(Settings::values.volume - (1.0f - game_volume), 0.0f, 1.0f)}; 79 const float volume{std::clamp(Settings::Volume() - (1.0f - game_volume), 0.0f, 1.0f)};
71 80
72 if (volume == 1.0f) { 81 if (volume == 1.0f) {
73 return; 82 return;
@@ -105,7 +114,11 @@ void Stream::PlayNextBuffer() {
105 114
106 sink_stream.EnqueueSamples(GetNumChannels(), active_buffer->GetSamples()); 115 sink_stream.EnqueueSamples(GetNumChannels(), active_buffer->GetSamples());
107 116
108 core_timing.ScheduleEvent(GetBufferReleaseCycles(*active_buffer), release_event, {}); 117 if (core_timing.IsHostTiming()) {
118 core_timing.ScheduleEvent(GetBufferReleaseNSHostTiming(*active_buffer), release_event, {});
119 } else {
120 core_timing.ScheduleEvent(GetBufferReleaseNS(*active_buffer), release_event, {});
121 }
109} 122}
110 123
111void Stream::ReleaseActiveBuffer() { 124void Stream::ReleaseActiveBuffer() {
diff --git a/src/audio_core/stream.h b/src/audio_core/stream.h
index 1708a4d98..e309d60fe 100644
--- a/src/audio_core/stream.h
+++ b/src/audio_core/stream.h
@@ -96,7 +96,10 @@ private:
96 void ReleaseActiveBuffer(); 96 void ReleaseActiveBuffer();
97 97
98 /// Gets the number of core cycles when the specified buffer will be released 98 /// Gets the number of core cycles when the specified buffer will be released
99 s64 GetBufferReleaseCycles(const Buffer& buffer) const; 99 s64 GetBufferReleaseNS(const Buffer& buffer) const;
100
101 /// Gets the number of core cycles when the specified buffer will be released
102 s64 GetBufferReleaseNSHostTiming(const Buffer& buffer) const;
100 103
101 u32 sample_rate; ///< Sample rate of the stream 104 u32 sample_rate; ///< Sample rate of the stream
102 Format format; ///< Format of the stream 105 Format format; ///< Format of the stream
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 0a3e2f4d1..d120c8d3d 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -98,6 +98,8 @@ add_library(common STATIC
98 algorithm.h 98 algorithm.h
99 alignment.h 99 alignment.h
100 assert.h 100 assert.h
101 atomic_ops.cpp
102 atomic_ops.h
101 detached_tasks.cpp 103 detached_tasks.cpp
102 detached_tasks.h 104 detached_tasks.h
103 bit_field.h 105 bit_field.h
@@ -110,6 +112,8 @@ add_library(common STATIC
110 common_types.h 112 common_types.h
111 dynamic_library.cpp 113 dynamic_library.cpp
112 dynamic_library.h 114 dynamic_library.h
115 fiber.cpp
116 fiber.h
113 file_util.cpp 117 file_util.cpp
114 file_util.h 118 file_util.h
115 hash.h 119 hash.h
@@ -143,6 +147,8 @@ add_library(common STATIC
143 scm_rev.cpp 147 scm_rev.cpp
144 scm_rev.h 148 scm_rev.h
145 scope_exit.h 149 scope_exit.h
150 spin_lock.cpp
151 spin_lock.h
146 string_util.cpp 152 string_util.cpp
147 string_util.h 153 string_util.h
148 swap.h 154 swap.h
@@ -163,6 +169,8 @@ add_library(common STATIC
163 vector_math.h 169 vector_math.h
164 virtual_buffer.cpp 170 virtual_buffer.cpp
165 virtual_buffer.h 171 virtual_buffer.h
172 wall_clock.cpp
173 wall_clock.h
166 web_result.h 174 web_result.h
167 zstd_compression.cpp 175 zstd_compression.cpp
168 zstd_compression.h 176 zstd_compression.h
@@ -173,12 +181,15 @@ if(ARCHITECTURE_x86_64)
173 PRIVATE 181 PRIVATE
174 x64/cpu_detect.cpp 182 x64/cpu_detect.cpp
175 x64/cpu_detect.h 183 x64/cpu_detect.h
184 x64/native_clock.cpp
185 x64/native_clock.h
176 x64/xbyak_abi.h 186 x64/xbyak_abi.h
177 x64/xbyak_util.h 187 x64/xbyak_util.h
178 ) 188 )
179endif() 189endif()
180 190
181create_target_directory_groups(common) 191create_target_directory_groups(common)
192find_package(Boost 1.71 COMPONENTS context headers REQUIRED)
182 193
183target_link_libraries(common PUBLIC Boost::boost fmt::fmt microprofile) 194target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile)
184target_link_libraries(common PRIVATE lz4::lz4 zstd::zstd xbyak) 195target_link_libraries(common PRIVATE lz4::lz4 zstd::zstd xbyak)
diff --git a/src/common/atomic_ops.cpp b/src/common/atomic_ops.cpp
new file mode 100644
index 000000000..1098e21ff
--- /dev/null
+++ b/src/common/atomic_ops.cpp
@@ -0,0 +1,70 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <cstring>
6
7#include "common/atomic_ops.h"
8
9#if _MSC_VER
10#include <intrin.h>
11#endif
12
13namespace Common {
14
15#if _MSC_VER
16
17bool AtomicCompareAndSwap(u8 volatile* pointer, u8 value, u8 expected) {
18 u8 result = _InterlockedCompareExchange8((char*)pointer, value, expected);
19 return result == expected;
20}
21
22bool AtomicCompareAndSwap(u16 volatile* pointer, u16 value, u16 expected) {
23 u16 result = _InterlockedCompareExchange16((short*)pointer, value, expected);
24 return result == expected;
25}
26
27bool AtomicCompareAndSwap(u32 volatile* pointer, u32 value, u32 expected) {
28 u32 result = _InterlockedCompareExchange((long*)pointer, value, expected);
29 return result == expected;
30}
31
32bool AtomicCompareAndSwap(u64 volatile* pointer, u64 value, u64 expected) {
33 u64 result = _InterlockedCompareExchange64((__int64*)pointer, value, expected);
34 return result == expected;
35}
36
37bool AtomicCompareAndSwap(u64 volatile* pointer, u128 value, u128 expected) {
38 return _InterlockedCompareExchange128((__int64*)pointer, value[1], value[0],
39 (__int64*)expected.data()) != 0;
40}
41
42#else
43
44bool AtomicCompareAndSwap(u8 volatile* pointer, u8 value, u8 expected) {
45 return __sync_bool_compare_and_swap(pointer, expected, value);
46}
47
48bool AtomicCompareAndSwap(u16 volatile* pointer, u16 value, u16 expected) {
49 return __sync_bool_compare_and_swap(pointer, expected, value);
50}
51
52bool AtomicCompareAndSwap(u32 volatile* pointer, u32 value, u32 expected) {
53 return __sync_bool_compare_and_swap(pointer, expected, value);
54}
55
56bool AtomicCompareAndSwap(u64 volatile* pointer, u64 value, u64 expected) {
57 return __sync_bool_compare_and_swap(pointer, expected, value);
58}
59
60bool AtomicCompareAndSwap(u64 volatile* pointer, u128 value, u128 expected) {
61 unsigned __int128 value_a;
62 unsigned __int128 expected_a;
63 std::memcpy(&value_a, value.data(), sizeof(u128));
64 std::memcpy(&expected_a, expected.data(), sizeof(u128));
65 return __sync_bool_compare_and_swap((unsigned __int128*)pointer, expected_a, value_a);
66}
67
68#endif
69
70} // namespace Common
diff --git a/src/common/atomic_ops.h b/src/common/atomic_ops.h
new file mode 100644
index 000000000..e6181d521
--- /dev/null
+++ b/src/common/atomic_ops.h
@@ -0,0 +1,17 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8
9namespace Common {
10
11bool AtomicCompareAndSwap(u8 volatile* pointer, u8 value, u8 expected);
12bool AtomicCompareAndSwap(u16 volatile* pointer, u16 value, u16 expected);
13bool AtomicCompareAndSwap(u32 volatile* pointer, u32 value, u32 expected);
14bool AtomicCompareAndSwap(u64 volatile* pointer, u64 value, u64 expected);
15bool AtomicCompareAndSwap(u64 volatile* pointer, u128 value, u128 expected);
16
17} // namespace Common
diff --git a/src/common/fiber.cpp b/src/common/fiber.cpp
new file mode 100644
index 000000000..1c1d09ccb
--- /dev/null
+++ b/src/common/fiber.cpp
@@ -0,0 +1,222 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/fiber.h"
7#if defined(_WIN32) || defined(WIN32)
8#include <windows.h>
9#else
10#include <boost/context/detail/fcontext.hpp>
11#endif
12
13namespace Common {
14
15constexpr std::size_t default_stack_size = 256 * 1024; // 256kb
16
17#if defined(_WIN32) || defined(WIN32)
18
19struct Fiber::FiberImpl {
20 LPVOID handle = nullptr;
21 LPVOID rewind_handle = nullptr;
22};
23
24void Fiber::Start() {
25 ASSERT(previous_fiber != nullptr);
26 previous_fiber->guard.unlock();
27 previous_fiber.reset();
28 entry_point(start_parameter);
29 UNREACHABLE();
30}
31
32void Fiber::OnRewind() {
33 ASSERT(impl->handle != nullptr);
34 DeleteFiber(impl->handle);
35 impl->handle = impl->rewind_handle;
36 impl->rewind_handle = nullptr;
37 rewind_point(rewind_parameter);
38 UNREACHABLE();
39}
40
41void Fiber::FiberStartFunc(void* fiber_parameter) {
42 auto fiber = static_cast<Fiber*>(fiber_parameter);
43 fiber->Start();
44}
45
46void Fiber::RewindStartFunc(void* fiber_parameter) {
47 auto fiber = static_cast<Fiber*>(fiber_parameter);
48 fiber->OnRewind();
49}
50
51Fiber::Fiber(std::function<void(void*)>&& entry_point_func, void* start_parameter)
52 : entry_point{std::move(entry_point_func)}, start_parameter{start_parameter} {
53 impl = std::make_unique<FiberImpl>();
54 impl->handle = CreateFiber(default_stack_size, &FiberStartFunc, this);
55}
56
57Fiber::Fiber() : impl{std::make_unique<FiberImpl>()} {}
58
59Fiber::~Fiber() {
60 if (released) {
61 return;
62 }
63 // Make sure the Fiber is not being used
64 const bool locked = guard.try_lock();
65 ASSERT_MSG(locked, "Destroying a fiber that's still running");
66 if (locked) {
67 guard.unlock();
68 }
69 DeleteFiber(impl->handle);
70}
71
72void Fiber::Exit() {
73 ASSERT_MSG(is_thread_fiber, "Exitting non main thread fiber");
74 if (!is_thread_fiber) {
75 return;
76 }
77 ConvertFiberToThread();
78 guard.unlock();
79 released = true;
80}
81
82void Fiber::SetRewindPoint(std::function<void(void*)>&& rewind_func, void* start_parameter) {
83 rewind_point = std::move(rewind_func);
84 rewind_parameter = start_parameter;
85}
86
87void Fiber::Rewind() {
88 ASSERT(rewind_point);
89 ASSERT(impl->rewind_handle == nullptr);
90 impl->rewind_handle = CreateFiber(default_stack_size, &RewindStartFunc, this);
91 SwitchToFiber(impl->rewind_handle);
92}
93
94void Fiber::YieldTo(std::shared_ptr<Fiber>& from, std::shared_ptr<Fiber>& to) {
95 ASSERT_MSG(from != nullptr, "Yielding fiber is null!");
96 ASSERT_MSG(to != nullptr, "Next fiber is null!");
97 to->guard.lock();
98 to->previous_fiber = from;
99 SwitchToFiber(to->impl->handle);
100 ASSERT(from->previous_fiber != nullptr);
101 from->previous_fiber->guard.unlock();
102 from->previous_fiber.reset();
103}
104
105std::shared_ptr<Fiber> Fiber::ThreadToFiber() {
106 std::shared_ptr<Fiber> fiber = std::shared_ptr<Fiber>{new Fiber()};
107 fiber->guard.lock();
108 fiber->impl->handle = ConvertThreadToFiber(nullptr);
109 fiber->is_thread_fiber = true;
110 return fiber;
111}
112
113#else
114
115struct Fiber::FiberImpl {
116 alignas(64) std::array<u8, default_stack_size> stack;
117 alignas(64) std::array<u8, default_stack_size> rewind_stack;
118 u8* stack_limit;
119 u8* rewind_stack_limit;
120 boost::context::detail::fcontext_t context;
121 boost::context::detail::fcontext_t rewind_context;
122};
123
124void Fiber::Start(boost::context::detail::transfer_t& transfer) {
125 ASSERT(previous_fiber != nullptr);
126 previous_fiber->impl->context = transfer.fctx;
127 previous_fiber->guard.unlock();
128 previous_fiber.reset();
129 entry_point(start_parameter);
130 UNREACHABLE();
131}
132
133void Fiber::OnRewind([[maybe_unused]] boost::context::detail::transfer_t& transfer) {
134 ASSERT(impl->context != nullptr);
135 impl->context = impl->rewind_context;
136 impl->rewind_context = nullptr;
137 u8* tmp = impl->stack_limit;
138 impl->stack_limit = impl->rewind_stack_limit;
139 impl->rewind_stack_limit = tmp;
140 rewind_point(rewind_parameter);
141 UNREACHABLE();
142}
143
144void Fiber::FiberStartFunc(boost::context::detail::transfer_t transfer) {
145 auto fiber = static_cast<Fiber*>(transfer.data);
146 fiber->Start(transfer);
147}
148
149void Fiber::RewindStartFunc(boost::context::detail::transfer_t transfer) {
150 auto fiber = static_cast<Fiber*>(transfer.data);
151 fiber->OnRewind(transfer);
152}
153
154Fiber::Fiber(std::function<void(void*)>&& entry_point_func, void* start_parameter)
155 : entry_point{std::move(entry_point_func)}, start_parameter{start_parameter} {
156 impl = std::make_unique<FiberImpl>();
157 impl->stack_limit = impl->stack.data();
158 impl->rewind_stack_limit = impl->rewind_stack.data();
159 u8* stack_base = impl->stack_limit + default_stack_size;
160 impl->context =
161 boost::context::detail::make_fcontext(stack_base, impl->stack.size(), FiberStartFunc);
162}
163
164void Fiber::SetRewindPoint(std::function<void(void*)>&& rewind_func, void* start_parameter) {
165 rewind_point = std::move(rewind_func);
166 rewind_parameter = start_parameter;
167}
168
169Fiber::Fiber() : impl{std::make_unique<FiberImpl>()} {}
170
171Fiber::~Fiber() {
172 if (released) {
173 return;
174 }
175 // Make sure the Fiber is not being used
176 const bool locked = guard.try_lock();
177 ASSERT_MSG(locked, "Destroying a fiber that's still running");
178 if (locked) {
179 guard.unlock();
180 }
181}
182
183void Fiber::Exit() {
184
185 ASSERT_MSG(is_thread_fiber, "Exitting non main thread fiber");
186 if (!is_thread_fiber) {
187 return;
188 }
189 guard.unlock();
190 released = true;
191}
192
193void Fiber::Rewind() {
194 ASSERT(rewind_point);
195 ASSERT(impl->rewind_context == nullptr);
196 u8* stack_base = impl->rewind_stack_limit + default_stack_size;
197 impl->rewind_context =
198 boost::context::detail::make_fcontext(stack_base, impl->stack.size(), RewindStartFunc);
199 boost::context::detail::jump_fcontext(impl->rewind_context, this);
200}
201
202void Fiber::YieldTo(std::shared_ptr<Fiber>& from, std::shared_ptr<Fiber>& to) {
203 ASSERT_MSG(from != nullptr, "Yielding fiber is null!");
204 ASSERT_MSG(to != nullptr, "Next fiber is null!");
205 to->guard.lock();
206 to->previous_fiber = from;
207 auto transfer = boost::context::detail::jump_fcontext(to->impl->context, to.get());
208 ASSERT(from->previous_fiber != nullptr);
209 from->previous_fiber->impl->context = transfer.fctx;
210 from->previous_fiber->guard.unlock();
211 from->previous_fiber.reset();
212}
213
214std::shared_ptr<Fiber> Fiber::ThreadToFiber() {
215 std::shared_ptr<Fiber> fiber = std::shared_ptr<Fiber>{new Fiber()};
216 fiber->guard.lock();
217 fiber->is_thread_fiber = true;
218 return fiber;
219}
220
221#endif
222} // namespace Common
diff --git a/src/common/fiber.h b/src/common/fiber.h
new file mode 100644
index 000000000..dafc1100e
--- /dev/null
+++ b/src/common/fiber.h
@@ -0,0 +1,92 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <functional>
8#include <memory>
9
10#include "common/common_types.h"
11#include "common/spin_lock.h"
12
13#if !defined(_WIN32) && !defined(WIN32)
14namespace boost::context::detail {
15struct transfer_t;
16}
17#endif
18
19namespace Common {
20
21/**
22 * Fiber class
23 * a fiber is a userspace thread with it's own context. They can be used to
24 * implement coroutines, emulated threading systems and certain asynchronous
25 * patterns.
26 *
27 * This class implements fibers at a low level, thus allowing greater freedom
28 * to implement such patterns. This fiber class is 'threadsafe' only one fiber
29 * can be running at a time and threads will be locked while trying to yield to
30 * a running fiber until it yields. WARNING exchanging two running fibers between
31 * threads will cause a deadlock. In order to prevent a deadlock, each thread should
32 * have an intermediary fiber, you switch to the intermediary fiber of the current
33 * thread and then from it switch to the expected fiber. This way you can exchange
34 * 2 fibers within 2 different threads.
35 */
36class Fiber {
37public:
38 Fiber(std::function<void(void*)>&& entry_point_func, void* start_parameter);
39 ~Fiber();
40
41 Fiber(const Fiber&) = delete;
42 Fiber& operator=(const Fiber&) = delete;
43
44 Fiber(Fiber&&) = default;
45 Fiber& operator=(Fiber&&) = default;
46
47 /// Yields control from Fiber 'from' to Fiber 'to'
48 /// Fiber 'from' must be the currently running fiber.
49 static void YieldTo(std::shared_ptr<Fiber>& from, std::shared_ptr<Fiber>& to);
50 static std::shared_ptr<Fiber> ThreadToFiber();
51
52 void SetRewindPoint(std::function<void(void*)>&& rewind_func, void* start_parameter);
53
54 void Rewind();
55
56 /// Only call from main thread's fiber
57 void Exit();
58
59 /// Changes the start parameter of the fiber. Has no effect if the fiber already started
60 void SetStartParameter(void* new_parameter) {
61 start_parameter = new_parameter;
62 }
63
64private:
65 Fiber();
66
67#if defined(_WIN32) || defined(WIN32)
68 void OnRewind();
69 void Start();
70 static void FiberStartFunc(void* fiber_parameter);
71 static void RewindStartFunc(void* fiber_parameter);
72#else
73 void OnRewind(boost::context::detail::transfer_t& transfer);
74 void Start(boost::context::detail::transfer_t& transfer);
75 static void FiberStartFunc(boost::context::detail::transfer_t transfer);
76 static void RewindStartFunc(boost::context::detail::transfer_t transfer);
77#endif
78
79 struct FiberImpl;
80
81 SpinLock guard{};
82 std::function<void(void*)> entry_point;
83 std::function<void(void*)> rewind_point;
84 void* rewind_parameter{};
85 void* start_parameter{};
86 std::shared_ptr<Fiber> previous_fiber;
87 std::unique_ptr<FiberImpl> impl;
88 bool is_thread_fiber{};
89 bool released{};
90};
91
92} // namespace Common
diff --git a/src/common/spin_lock.cpp b/src/common/spin_lock.cpp
new file mode 100644
index 000000000..c1524220f
--- /dev/null
+++ b/src/common/spin_lock.cpp
@@ -0,0 +1,54 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/spin_lock.h"
6
7#if _MSC_VER
8#include <intrin.h>
9#if _M_AMD64
10#define __x86_64__ 1
11#endif
12#if _M_ARM64
13#define __aarch64__ 1
14#endif
15#else
16#if __x86_64__
17#include <xmmintrin.h>
18#endif
19#endif
20
21namespace {
22
23void ThreadPause() {
24#if __x86_64__
25 _mm_pause();
26#elif __aarch64__ && _MSC_VER
27 __yield();
28#elif __aarch64__
29 asm("yield");
30#endif
31}
32
33} // Anonymous namespace
34
35namespace Common {
36
37void SpinLock::lock() {
38 while (lck.test_and_set(std::memory_order_acquire)) {
39 ThreadPause();
40 }
41}
42
43void SpinLock::unlock() {
44 lck.clear(std::memory_order_release);
45}
46
47bool SpinLock::try_lock() {
48 if (lck.test_and_set(std::memory_order_acquire)) {
49 return false;
50 }
51 return true;
52}
53
54} // namespace Common
diff --git a/src/common/spin_lock.h b/src/common/spin_lock.h
new file mode 100644
index 000000000..1df5528c4
--- /dev/null
+++ b/src/common/spin_lock.h
@@ -0,0 +1,26 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8
9namespace Common {
10
11/**
12 * SpinLock class
13 * a lock similar to mutex that forces a thread to spin wait instead calling the
14 * supervisor. Should be used on short sequences of code.
15 */
16class SpinLock {
17public:
18 void lock();
19 void unlock();
20 bool try_lock();
21
22private:
23 std::atomic_flag lck = ATOMIC_FLAG_INIT;
24};
25
26} // namespace Common
diff --git a/src/common/thread.cpp b/src/common/thread.cpp
index 0cd2d10bf..8e5935e6a 100644
--- a/src/common/thread.cpp
+++ b/src/common/thread.cpp
@@ -25,6 +25,52 @@
25 25
26namespace Common { 26namespace Common {
27 27
28#ifdef _WIN32
29
30void SetCurrentThreadPriority(ThreadPriority new_priority) {
31 auto handle = GetCurrentThread();
32 int windows_priority = 0;
33 switch (new_priority) {
34 case ThreadPriority::Low:
35 windows_priority = THREAD_PRIORITY_BELOW_NORMAL;
36 break;
37 case ThreadPriority::Normal:
38 windows_priority = THREAD_PRIORITY_NORMAL;
39 break;
40 case ThreadPriority::High:
41 windows_priority = THREAD_PRIORITY_ABOVE_NORMAL;
42 break;
43 case ThreadPriority::VeryHigh:
44 windows_priority = THREAD_PRIORITY_HIGHEST;
45 break;
46 default:
47 windows_priority = THREAD_PRIORITY_NORMAL;
48 break;
49 }
50 SetThreadPriority(handle, windows_priority);
51}
52
53#else
54
55void SetCurrentThreadPriority(ThreadPriority new_priority) {
56 pthread_t this_thread = pthread_self();
57
58 s32 max_prio = sched_get_priority_max(SCHED_OTHER);
59 s32 min_prio = sched_get_priority_min(SCHED_OTHER);
60 u32 level = static_cast<u32>(new_priority) + 1;
61
62 struct sched_param params;
63 if (max_prio > min_prio) {
64 params.sched_priority = min_prio + ((max_prio - min_prio) * level) / 4;
65 } else {
66 params.sched_priority = min_prio - ((min_prio - max_prio) * level) / 4;
67 }
68
69 pthread_setschedparam(this_thread, SCHED_OTHER, &params);
70}
71
72#endif
73
28#ifdef _MSC_VER 74#ifdef _MSC_VER
29 75
30// Sets the debugger-visible name of the current thread. 76// Sets the debugger-visible name of the current thread.
@@ -70,6 +116,12 @@ void SetCurrentThreadName(const char* name) {
70} 116}
71#endif 117#endif
72 118
119#if defined(_WIN32)
120void SetCurrentThreadName(const char* name) {
121 // Do Nothing on MingW
122}
123#endif
124
73#endif 125#endif
74 126
75} // namespace Common 127} // namespace Common
diff --git a/src/common/thread.h b/src/common/thread.h
index 2fc071685..52b359413 100644
--- a/src/common/thread.h
+++ b/src/common/thread.h
@@ -9,6 +9,7 @@
9#include <cstddef> 9#include <cstddef>
10#include <mutex> 10#include <mutex>
11#include <thread> 11#include <thread>
12#include "common/common_types.h"
12 13
13namespace Common { 14namespace Common {
14 15
@@ -28,8 +29,7 @@ public:
28 is_set = false; 29 is_set = false;
29 } 30 }
30 31
31 template <class Duration> 32 bool WaitFor(const std::chrono::nanoseconds& time) {
32 bool WaitFor(const std::chrono::duration<Duration>& time) {
33 std::unique_lock lk{mutex}; 33 std::unique_lock lk{mutex};
34 if (!condvar.wait_for(lk, time, [this] { return is_set; })) 34 if (!condvar.wait_for(lk, time, [this] { return is_set; }))
35 return false; 35 return false;
@@ -86,6 +86,15 @@ private:
86 std::size_t generation = 0; // Incremented once each time the barrier is used 86 std::size_t generation = 0; // Incremented once each time the barrier is used
87}; 87};
88 88
89enum class ThreadPriority : u32 {
90 Low = 0,
91 Normal = 1,
92 High = 2,
93 VeryHigh = 3,
94};
95
96void SetCurrentThreadPriority(ThreadPriority new_priority);
97
89void SetCurrentThreadName(const char* name); 98void SetCurrentThreadName(const char* name);
90 99
91} // namespace Common 100} // namespace Common
diff --git a/src/common/uint128.cpp b/src/common/uint128.cpp
index 32bf56730..16bf7c828 100644
--- a/src/common/uint128.cpp
+++ b/src/common/uint128.cpp
@@ -6,12 +6,38 @@
6#include <intrin.h> 6#include <intrin.h>
7 7
8#pragma intrinsic(_umul128) 8#pragma intrinsic(_umul128)
9#pragma intrinsic(_udiv128)
9#endif 10#endif
10#include <cstring> 11#include <cstring>
11#include "common/uint128.h" 12#include "common/uint128.h"
12 13
13namespace Common { 14namespace Common {
14 15
16#ifdef _MSC_VER
17
18u64 MultiplyAndDivide64(u64 a, u64 b, u64 d) {
19 u128 r{};
20 r[0] = _umul128(a, b, &r[1]);
21 u64 remainder;
22#if _MSC_VER < 1923
23 return udiv128(r[1], r[0], d, &remainder);
24#else
25 return _udiv128(r[1], r[0], d, &remainder);
26#endif
27}
28
29#else
30
31u64 MultiplyAndDivide64(u64 a, u64 b, u64 d) {
32 const u64 diva = a / d;
33 const u64 moda = a % d;
34 const u64 divb = b / d;
35 const u64 modb = b % d;
36 return diva * b + moda * divb + moda * modb / d;
37}
38
39#endif
40
15u128 Multiply64Into128(u64 a, u64 b) { 41u128 Multiply64Into128(u64 a, u64 b) {
16 u128 result; 42 u128 result;
17#ifdef _MSC_VER 43#ifdef _MSC_VER
diff --git a/src/common/uint128.h b/src/common/uint128.h
index a3be2a2cb..503cd2d0c 100644
--- a/src/common/uint128.h
+++ b/src/common/uint128.h
@@ -9,6 +9,9 @@
9 9
10namespace Common { 10namespace Common {
11 11
12// This function multiplies 2 u64 values and divides it by a u64 value.
13u64 MultiplyAndDivide64(u64 a, u64 b, u64 d);
14
12// This function multiplies 2 u64 values and produces a u128 value; 15// This function multiplies 2 u64 values and produces a u128 value;
13u128 Multiply64Into128(u64 a, u64 b); 16u128 Multiply64Into128(u64 a, u64 b);
14 17
diff --git a/src/common/wall_clock.cpp b/src/common/wall_clock.cpp
new file mode 100644
index 000000000..3afbdb898
--- /dev/null
+++ b/src/common/wall_clock.cpp
@@ -0,0 +1,91 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/uint128.h"
6#include "common/wall_clock.h"
7
8#ifdef ARCHITECTURE_x86_64
9#include "common/x64/cpu_detect.h"
10#include "common/x64/native_clock.h"
11#endif
12
13namespace Common {
14
15using base_timer = std::chrono::steady_clock;
16using base_time_point = std::chrono::time_point<base_timer>;
17
18class StandardWallClock : public WallClock {
19public:
20 StandardWallClock(u64 emulated_cpu_frequency, u64 emulated_clock_frequency)
21 : WallClock(emulated_cpu_frequency, emulated_clock_frequency, false) {
22 start_time = base_timer::now();
23 }
24
25 std::chrono::nanoseconds GetTimeNS() override {
26 base_time_point current = base_timer::now();
27 auto elapsed = current - start_time;
28 return std::chrono::duration_cast<std::chrono::nanoseconds>(elapsed);
29 }
30
31 std::chrono::microseconds GetTimeUS() override {
32 base_time_point current = base_timer::now();
33 auto elapsed = current - start_time;
34 return std::chrono::duration_cast<std::chrono::microseconds>(elapsed);
35 }
36
37 std::chrono::milliseconds GetTimeMS() override {
38 base_time_point current = base_timer::now();
39 auto elapsed = current - start_time;
40 return std::chrono::duration_cast<std::chrono::milliseconds>(elapsed);
41 }
42
43 u64 GetClockCycles() override {
44 std::chrono::nanoseconds time_now = GetTimeNS();
45 const u128 temporary =
46 Common::Multiply64Into128(time_now.count(), emulated_clock_frequency);
47 return Common::Divide128On32(temporary, 1000000000).first;
48 }
49
50 u64 GetCPUCycles() override {
51 std::chrono::nanoseconds time_now = GetTimeNS();
52 const u128 temporary = Common::Multiply64Into128(time_now.count(), emulated_cpu_frequency);
53 return Common::Divide128On32(temporary, 1000000000).first;
54 }
55
56 void Pause(bool is_paused) override {
57 // Do nothing in this clock type.
58 }
59
60private:
61 base_time_point start_time;
62};
63
64#ifdef ARCHITECTURE_x86_64
65
66std::unique_ptr<WallClock> CreateBestMatchingClock(u32 emulated_cpu_frequency,
67 u32 emulated_clock_frequency) {
68 const auto& caps = GetCPUCaps();
69 u64 rtsc_frequency = 0;
70 if (caps.invariant_tsc) {
71 rtsc_frequency = EstimateRDTSCFrequency();
72 }
73 if (rtsc_frequency == 0) {
74 return std::make_unique<StandardWallClock>(emulated_cpu_frequency,
75 emulated_clock_frequency);
76 } else {
77 return std::make_unique<X64::NativeClock>(emulated_cpu_frequency, emulated_clock_frequency,
78 rtsc_frequency);
79 }
80}
81
82#else
83
84std::unique_ptr<WallClock> CreateBestMatchingClock(u32 emulated_cpu_frequency,
85 u32 emulated_clock_frequency) {
86 return std::make_unique<StandardWallClock>(emulated_cpu_frequency, emulated_clock_frequency);
87}
88
89#endif
90
91} // namespace Common
diff --git a/src/common/wall_clock.h b/src/common/wall_clock.h
new file mode 100644
index 000000000..367d72134
--- /dev/null
+++ b/src/common/wall_clock.h
@@ -0,0 +1,53 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <chrono>
8#include <memory>
9
10#include "common/common_types.h"
11
12namespace Common {
13
14class WallClock {
15public:
16 /// Returns current wall time in nanoseconds
17 virtual std::chrono::nanoseconds GetTimeNS() = 0;
18
19 /// Returns current wall time in microseconds
20 virtual std::chrono::microseconds GetTimeUS() = 0;
21
22 /// Returns current wall time in milliseconds
23 virtual std::chrono::milliseconds GetTimeMS() = 0;
24
25 /// Returns current wall time in emulated clock cycles
26 virtual u64 GetClockCycles() = 0;
27
28 /// Returns current wall time in emulated cpu cycles
29 virtual u64 GetCPUCycles() = 0;
30
31 virtual void Pause(bool is_paused) = 0;
32
33 /// Tells if the wall clock, uses the host CPU's hardware clock
34 bool IsNative() const {
35 return is_native;
36 }
37
38protected:
39 WallClock(u64 emulated_cpu_frequency, u64 emulated_clock_frequency, bool is_native)
40 : emulated_cpu_frequency{emulated_cpu_frequency},
41 emulated_clock_frequency{emulated_clock_frequency}, is_native{is_native} {}
42
43 u64 emulated_cpu_frequency;
44 u64 emulated_clock_frequency;
45
46private:
47 bool is_native;
48};
49
50std::unique_ptr<WallClock> CreateBestMatchingClock(u32 emulated_cpu_frequency,
51 u32 emulated_clock_frequency);
52
53} // namespace Common
diff --git a/src/common/x64/cpu_detect.cpp b/src/common/x64/cpu_detect.cpp
index f35dcb498..fccd2eee5 100644
--- a/src/common/x64/cpu_detect.cpp
+++ b/src/common/x64/cpu_detect.cpp
@@ -62,6 +62,17 @@ static CPUCaps Detect() {
62 std::memcpy(&caps.brand_string[0], &cpu_id[1], sizeof(int)); 62 std::memcpy(&caps.brand_string[0], &cpu_id[1], sizeof(int));
63 std::memcpy(&caps.brand_string[4], &cpu_id[3], sizeof(int)); 63 std::memcpy(&caps.brand_string[4], &cpu_id[3], sizeof(int));
64 std::memcpy(&caps.brand_string[8], &cpu_id[2], sizeof(int)); 64 std::memcpy(&caps.brand_string[8], &cpu_id[2], sizeof(int));
65 if (cpu_id[1] == 0x756e6547 && cpu_id[2] == 0x6c65746e && cpu_id[3] == 0x49656e69)
66 caps.manufacturer = Manufacturer::Intel;
67 else if (cpu_id[1] == 0x68747541 && cpu_id[2] == 0x444d4163 && cpu_id[3] == 0x69746e65)
68 caps.manufacturer = Manufacturer::AMD;
69 else if (cpu_id[1] == 0x6f677948 && cpu_id[2] == 0x656e6975 && cpu_id[3] == 0x6e65476e)
70 caps.manufacturer = Manufacturer::Hygon;
71 else
72 caps.manufacturer = Manufacturer::Unknown;
73
74 u32 family = {};
75 u32 model = {};
65 76
66 __cpuid(cpu_id, 0x80000000); 77 __cpuid(cpu_id, 0x80000000);
67 78
@@ -73,6 +84,14 @@ static CPUCaps Detect() {
73 // Detect family and other miscellaneous features 84 // Detect family and other miscellaneous features
74 if (max_std_fn >= 1) { 85 if (max_std_fn >= 1) {
75 __cpuid(cpu_id, 0x00000001); 86 __cpuid(cpu_id, 0x00000001);
87 family = (cpu_id[0] >> 8) & 0xf;
88 model = (cpu_id[0] >> 4) & 0xf;
89 if (family == 0xf) {
90 family += (cpu_id[0] >> 20) & 0xff;
91 }
92 if (family >= 6) {
93 model += ((cpu_id[0] >> 16) & 0xf) << 4;
94 }
76 95
77 if ((cpu_id[3] >> 25) & 1) 96 if ((cpu_id[3] >> 25) & 1)
78 caps.sse = true; 97 caps.sse = true;
@@ -135,6 +154,20 @@ static CPUCaps Detect() {
135 caps.fma4 = true; 154 caps.fma4 = true;
136 } 155 }
137 156
157 if (max_ex_fn >= 0x80000007) {
158 __cpuid(cpu_id, 0x80000007);
159 if (cpu_id[3] & (1 << 8)) {
160 caps.invariant_tsc = true;
161 }
162 }
163
164 if (max_std_fn >= 0x16) {
165 __cpuid(cpu_id, 0x16);
166 caps.base_frequency = cpu_id[0];
167 caps.max_frequency = cpu_id[1];
168 caps.bus_frequency = cpu_id[2];
169 }
170
138 return caps; 171 return caps;
139} 172}
140 173
diff --git a/src/common/x64/cpu_detect.h b/src/common/x64/cpu_detect.h
index 7606c3f7b..e3b63302e 100644
--- a/src/common/x64/cpu_detect.h
+++ b/src/common/x64/cpu_detect.h
@@ -6,8 +6,16 @@
6 6
7namespace Common { 7namespace Common {
8 8
9enum class Manufacturer : u32 {
10 Intel = 0,
11 AMD = 1,
12 Hygon = 2,
13 Unknown = 3,
14};
15
9/// x86/x64 CPU capabilities that may be detected by this module 16/// x86/x64 CPU capabilities that may be detected by this module
10struct CPUCaps { 17struct CPUCaps {
18 Manufacturer manufacturer;
11 char cpu_string[0x21]; 19 char cpu_string[0x21];
12 char brand_string[0x41]; 20 char brand_string[0x41];
13 bool sse; 21 bool sse;
@@ -25,6 +33,10 @@ struct CPUCaps {
25 bool fma; 33 bool fma;
26 bool fma4; 34 bool fma4;
27 bool aes; 35 bool aes;
36 bool invariant_tsc;
37 u32 base_frequency;
38 u32 max_frequency;
39 u32 bus_frequency;
28}; 40};
29 41
30/** 42/**
diff --git a/src/common/x64/native_clock.cpp b/src/common/x64/native_clock.cpp
new file mode 100644
index 000000000..424b39b1f
--- /dev/null
+++ b/src/common/x64/native_clock.cpp
@@ -0,0 +1,103 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <chrono>
6#include <mutex>
7#include <thread>
8
9#ifdef _MSC_VER
10#include <intrin.h>
11#else
12#include <x86intrin.h>
13#endif
14
15#include "common/uint128.h"
16#include "common/x64/native_clock.h"
17
18namespace Common {
19
20u64 EstimateRDTSCFrequency() {
21 const auto milli_10 = std::chrono::milliseconds{10};
22 // get current time
23 _mm_mfence();
24 const u64 tscStart = __rdtsc();
25 const auto startTime = std::chrono::high_resolution_clock::now();
26 // wait roughly 3 seconds
27 while (true) {
28 auto milli = std::chrono::duration_cast<std::chrono::milliseconds>(
29 std::chrono::high_resolution_clock::now() - startTime);
30 if (milli.count() >= 3000)
31 break;
32 std::this_thread::sleep_for(milli_10);
33 }
34 const auto endTime = std::chrono::high_resolution_clock::now();
35 _mm_mfence();
36 const u64 tscEnd = __rdtsc();
37 // calculate difference
38 const u64 timer_diff =
39 std::chrono::duration_cast<std::chrono::nanoseconds>(endTime - startTime).count();
40 const u64 tsc_diff = tscEnd - tscStart;
41 const u64 tsc_freq = MultiplyAndDivide64(tsc_diff, 1000000000ULL, timer_diff);
42 return tsc_freq;
43}
44
45namespace X64 {
46NativeClock::NativeClock(u64 emulated_cpu_frequency, u64 emulated_clock_frequency,
47 u64 rtsc_frequency)
48 : WallClock(emulated_cpu_frequency, emulated_clock_frequency, true), rtsc_frequency{
49 rtsc_frequency} {
50 _mm_mfence();
51 last_measure = __rdtsc();
52 accumulated_ticks = 0U;
53}
54
55u64 NativeClock::GetRTSC() {
56 std::scoped_lock scope{rtsc_serialize};
57 _mm_mfence();
58 const u64 current_measure = __rdtsc();
59 u64 diff = current_measure - last_measure;
60 diff = diff & ~static_cast<u64>(static_cast<s64>(diff) >> 63); // max(diff, 0)
61 if (current_measure > last_measure) {
62 last_measure = current_measure;
63 }
64 accumulated_ticks += diff;
65 /// The clock cannot be more precise than the guest timer, remove the lower bits
66 return accumulated_ticks & inaccuracy_mask;
67}
68
69void NativeClock::Pause(bool is_paused) {
70 if (!is_paused) {
71 _mm_mfence();
72 last_measure = __rdtsc();
73 }
74}
75
76std::chrono::nanoseconds NativeClock::GetTimeNS() {
77 const u64 rtsc_value = GetRTSC();
78 return std::chrono::nanoseconds{MultiplyAndDivide64(rtsc_value, 1000000000, rtsc_frequency)};
79}
80
81std::chrono::microseconds NativeClock::GetTimeUS() {
82 const u64 rtsc_value = GetRTSC();
83 return std::chrono::microseconds{MultiplyAndDivide64(rtsc_value, 1000000, rtsc_frequency)};
84}
85
86std::chrono::milliseconds NativeClock::GetTimeMS() {
87 const u64 rtsc_value = GetRTSC();
88 return std::chrono::milliseconds{MultiplyAndDivide64(rtsc_value, 1000, rtsc_frequency)};
89}
90
91u64 NativeClock::GetClockCycles() {
92 const u64 rtsc_value = GetRTSC();
93 return MultiplyAndDivide64(rtsc_value, emulated_clock_frequency, rtsc_frequency);
94}
95
96u64 NativeClock::GetCPUCycles() {
97 const u64 rtsc_value = GetRTSC();
98 return MultiplyAndDivide64(rtsc_value, emulated_cpu_frequency, rtsc_frequency);
99}
100
101} // namespace X64
102
103} // namespace Common
diff --git a/src/common/x64/native_clock.h b/src/common/x64/native_clock.h
new file mode 100644
index 000000000..891a3bbfd
--- /dev/null
+++ b/src/common/x64/native_clock.h
@@ -0,0 +1,48 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <optional>
8
9#include "common/spin_lock.h"
10#include "common/wall_clock.h"
11
12namespace Common {
13
14namespace X64 {
15class NativeClock : public WallClock {
16public:
17 NativeClock(u64 emulated_cpu_frequency, u64 emulated_clock_frequency, u64 rtsc_frequency);
18
19 std::chrono::nanoseconds GetTimeNS() override;
20
21 std::chrono::microseconds GetTimeUS() override;
22
23 std::chrono::milliseconds GetTimeMS() override;
24
25 u64 GetClockCycles() override;
26
27 u64 GetCPUCycles() override;
28
29 void Pause(bool is_paused) override;
30
31private:
32 u64 GetRTSC();
33
34 /// value used to reduce the native clocks accuracy as some apss rely on
35 /// undefined behavior where the level of accuracy in the clock shouldn't
36 /// be higher.
37 static constexpr u64 inaccuracy_mask = ~(0x400 - 1);
38
39 SpinLock rtsc_serialize{};
40 u64 last_measure{};
41 u64 accumulated_ticks{};
42 u64 rtsc_frequency;
43};
44} // namespace X64
45
46u64 EstimateRDTSCFrequency();
47
48} // namespace Common
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index cb9ced5c9..f87d67db5 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -7,6 +7,16 @@ endif()
7add_library(core STATIC 7add_library(core STATIC
8 arm/arm_interface.h 8 arm/arm_interface.h
9 arm/arm_interface.cpp 9 arm/arm_interface.cpp
10 arm/cpu_interrupt_handler.cpp
11 arm/cpu_interrupt_handler.h
12 arm/dynarmic/arm_dynarmic_32.cpp
13 arm/dynarmic/arm_dynarmic_32.h
14 arm/dynarmic/arm_dynarmic_64.cpp
15 arm/dynarmic/arm_dynarmic_64.h
16 arm/dynarmic/arm_dynarmic_cp15.cpp
17 arm/dynarmic/arm_dynarmic_cp15.h
18 arm/dynarmic/arm_exclusive_monitor.cpp
19 arm/dynarmic/arm_exclusive_monitor.h
10 arm/exclusive_monitor.cpp 20 arm/exclusive_monitor.cpp
11 arm/exclusive_monitor.h 21 arm/exclusive_monitor.h
12 arm/unicorn/arm_unicorn.cpp 22 arm/unicorn/arm_unicorn.cpp
@@ -15,8 +25,6 @@ add_library(core STATIC
15 constants.h 25 constants.h
16 core.cpp 26 core.cpp
17 core.h 27 core.h
18 core_manager.cpp
19 core_manager.h
20 core_timing.cpp 28 core_timing.cpp
21 core_timing.h 29 core_timing.h
22 core_timing_util.cpp 30 core_timing_util.cpp
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp
index d079a1bc8..d2295ed90 100644
--- a/src/core/arm/arm_interface.cpp
+++ b/src/core/arm/arm_interface.cpp
@@ -139,6 +139,63 @@ std::optional<std::string> GetSymbolName(const Symbols& symbols, VAddr func_addr
139 139
140constexpr u64 SEGMENT_BASE = 0x7100000000ull; 140constexpr u64 SEGMENT_BASE = 0x7100000000ull;
141 141
142std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktraceFromContext(
143 System& system, const ThreadContext64& ctx) {
144 std::vector<BacktraceEntry> out;
145 auto& memory = system.Memory();
146
147 auto fp = ctx.cpu_registers[29];
148 auto lr = ctx.cpu_registers[30];
149 while (true) {
150 out.push_back({"", 0, lr, 0});
151 if (!fp) {
152 break;
153 }
154 lr = memory.Read64(fp + 8) - 4;
155 fp = memory.Read64(fp);
156 }
157
158 std::map<VAddr, std::string> modules;
159 auto& loader{system.GetAppLoader()};
160 if (loader.ReadNSOModules(modules) != Loader::ResultStatus::Success) {
161 return {};
162 }
163
164 std::map<std::string, Symbols> symbols;
165 for (const auto& module : modules) {
166 symbols.insert_or_assign(module.second, GetSymbols(module.first, memory));
167 }
168
169 for (auto& entry : out) {
170 VAddr base = 0;
171 for (auto iter = modules.rbegin(); iter != modules.rend(); ++iter) {
172 const auto& module{*iter};
173 if (entry.original_address >= module.first) {
174 entry.module = module.second;
175 base = module.first;
176 break;
177 }
178 }
179
180 entry.offset = entry.original_address - base;
181 entry.address = SEGMENT_BASE + entry.offset;
182
183 if (entry.module.empty())
184 entry.module = "unknown";
185
186 const auto symbol_set = symbols.find(entry.module);
187 if (symbol_set != symbols.end()) {
188 const auto symbol = GetSymbolName(symbol_set->second, entry.offset);
189 if (symbol.has_value()) {
190 // TODO(DarkLordZach): Add demangling of symbol names.
191 entry.name = *symbol;
192 }
193 }
194 }
195
196 return out;
197}
198
142std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktrace() const { 199std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktrace() const {
143 std::vector<BacktraceEntry> out; 200 std::vector<BacktraceEntry> out;
144 auto& memory = system.Memory(); 201 auto& memory = system.Memory();
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index cb2e640e2..1f24051e4 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -7,6 +7,7 @@
7#include <array> 7#include <array>
8#include <vector> 8#include <vector>
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "core/hardware_properties.h"
10 11
11namespace Common { 12namespace Common {
12struct PageTable; 13struct PageTable;
@@ -18,25 +19,29 @@ enum class VMAPermission : u8;
18 19
19namespace Core { 20namespace Core {
20class System; 21class System;
22class CPUInterruptHandler;
23
24using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>;
21 25
22/// Generic ARMv8 CPU interface 26/// Generic ARMv8 CPU interface
23class ARM_Interface : NonCopyable { 27class ARM_Interface : NonCopyable {
24public: 28public:
25 explicit ARM_Interface(System& system_) : system{system_} {} 29 explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers, bool uses_wall_clock)
30 : system{system_}, interrupt_handlers{interrupt_handlers}, uses_wall_clock{
31 uses_wall_clock} {}
26 virtual ~ARM_Interface() = default; 32 virtual ~ARM_Interface() = default;
27 33
28 struct ThreadContext32 { 34 struct ThreadContext32 {
29 std::array<u32, 16> cpu_registers{}; 35 std::array<u32, 16> cpu_registers{};
36 std::array<u32, 64> extension_registers{};
30 u32 cpsr{}; 37 u32 cpsr{};
31 std::array<u8, 4> padding{};
32 std::array<u64, 32> fprs{};
33 u32 fpscr{}; 38 u32 fpscr{};
34 u32 fpexc{}; 39 u32 fpexc{};
35 u32 tpidr{}; 40 u32 tpidr{};
36 }; 41 };
37 // Internally within the kernel, it expects the AArch32 version of the 42 // Internally within the kernel, it expects the AArch32 version of the
38 // thread context to be 344 bytes in size. 43 // thread context to be 344 bytes in size.
39 static_assert(sizeof(ThreadContext32) == 0x158); 44 static_assert(sizeof(ThreadContext32) == 0x150);
40 45
41 struct ThreadContext64 { 46 struct ThreadContext64 {
42 std::array<u64, 31> cpu_registers{}; 47 std::array<u64, 31> cpu_registers{};
@@ -143,6 +148,8 @@ public:
143 */ 148 */
144 virtual void SetTPIDR_EL0(u64 value) = 0; 149 virtual void SetTPIDR_EL0(u64 value) = 0;
145 150
151 virtual void ChangeProcessorID(std::size_t new_core_id) = 0;
152
146 virtual void SaveContext(ThreadContext32& ctx) = 0; 153 virtual void SaveContext(ThreadContext32& ctx) = 0;
147 virtual void SaveContext(ThreadContext64& ctx) = 0; 154 virtual void SaveContext(ThreadContext64& ctx) = 0;
148 virtual void LoadContext(const ThreadContext32& ctx) = 0; 155 virtual void LoadContext(const ThreadContext32& ctx) = 0;
@@ -162,6 +169,9 @@ public:
162 std::string name; 169 std::string name;
163 }; 170 };
164 171
172 static std::vector<BacktraceEntry> GetBacktraceFromContext(System& system,
173 const ThreadContext64& ctx);
174
165 std::vector<BacktraceEntry> GetBacktrace() const; 175 std::vector<BacktraceEntry> GetBacktrace() const;
166 176
167 /// fp (= r29) points to the last frame record. 177 /// fp (= r29) points to the last frame record.
@@ -175,6 +185,8 @@ public:
175protected: 185protected:
176 /// System context that this ARM interface is running under. 186 /// System context that this ARM interface is running under.
177 System& system; 187 System& system;
188 CPUInterrupts& interrupt_handlers;
189 bool uses_wall_clock;
178}; 190};
179 191
180} // namespace Core 192} // namespace Core
diff --git a/src/core/arm/cpu_interrupt_handler.cpp b/src/core/arm/cpu_interrupt_handler.cpp
new file mode 100644
index 000000000..2f1a1a269
--- /dev/null
+++ b/src/core/arm/cpu_interrupt_handler.cpp
@@ -0,0 +1,29 @@
1// Copyright 2020 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/thread.h"
8#include "core/arm/cpu_interrupt_handler.h"
9
10namespace Core {
11
12CPUInterruptHandler::CPUInterruptHandler() : is_interrupted{} {
13 interrupt_event = std::make_unique<Common::Event>();
14}
15
16CPUInterruptHandler::~CPUInterruptHandler() = default;
17
18void CPUInterruptHandler::SetInterrupt(bool is_interrupted_) {
19 if (is_interrupted_) {
20 interrupt_event->Set();
21 }
22 this->is_interrupted = is_interrupted_;
23}
24
25void CPUInterruptHandler::AwaitInterrupt() {
26 interrupt_event->Wait();
27}
28
29} // namespace Core
diff --git a/src/core/arm/cpu_interrupt_handler.h b/src/core/arm/cpu_interrupt_handler.h
new file mode 100644
index 000000000..3d062d326
--- /dev/null
+++ b/src/core/arm/cpu_interrupt_handler.h
@@ -0,0 +1,39 @@
1// Copyright 2020 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8
9namespace Common {
10class Event;
11}
12
13namespace Core {
14
15class CPUInterruptHandler {
16public:
17 CPUInterruptHandler();
18 ~CPUInterruptHandler();
19
20 CPUInterruptHandler(const CPUInterruptHandler&) = delete;
21 CPUInterruptHandler& operator=(const CPUInterruptHandler&) = delete;
22
23 CPUInterruptHandler(CPUInterruptHandler&&) = default;
24 CPUInterruptHandler& operator=(CPUInterruptHandler&&) = default;
25
26 bool IsInterrupted() const {
27 return is_interrupted;
28 }
29
30 void SetInterrupt(bool is_interrupted);
31
32 void AwaitInterrupt();
33
34private:
35 bool is_interrupted{};
36 std::unique_ptr<Common::Event> interrupt_event;
37};
38
39} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index 4c8663d03..0d4ab95b7 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -7,15 +7,17 @@
7#include <dynarmic/A32/a32.h> 7#include <dynarmic/A32/a32.h>
8#include <dynarmic/A32/config.h> 8#include <dynarmic/A32/config.h>
9#include <dynarmic/A32/context.h> 9#include <dynarmic/A32/context.h>
10#include "common/microprofile.h" 10#include "common/logging/log.h"
11#include "common/page_table.h"
12#include "core/arm/cpu_interrupt_handler.h"
11#include "core/arm/dynarmic/arm_dynarmic_32.h" 13#include "core/arm/dynarmic/arm_dynarmic_32.h"
12#include "core/arm/dynarmic/arm_dynarmic_64.h"
13#include "core/arm/dynarmic/arm_dynarmic_cp15.h" 14#include "core/arm/dynarmic/arm_dynarmic_cp15.h"
15#include "core/arm/dynarmic/arm_exclusive_monitor.h"
14#include "core/core.h" 16#include "core/core.h"
15#include "core/core_manager.h"
16#include "core/core_timing.h" 17#include "core/core_timing.h"
17#include "core/hle/kernel/svc.h" 18#include "core/hle/kernel/svc.h"
18#include "core/memory.h" 19#include "core/memory.h"
20#include "core/settings.h"
19 21
20namespace Core { 22namespace Core {
21 23
@@ -49,6 +51,19 @@ public:
49 parent.system.Memory().Write64(vaddr, value); 51 parent.system.Memory().Write64(vaddr, value);
50 } 52 }
51 53
54 bool MemoryWriteExclusive8(u32 vaddr, u8 value, u8 expected) override {
55 return parent.system.Memory().WriteExclusive8(vaddr, value, expected);
56 }
57 bool MemoryWriteExclusive16(u32 vaddr, u16 value, u16 expected) override {
58 return parent.system.Memory().WriteExclusive16(vaddr, value, expected);
59 }
60 bool MemoryWriteExclusive32(u32 vaddr, u32 value, u32 expected) override {
61 return parent.system.Memory().WriteExclusive32(vaddr, value, expected);
62 }
63 bool MemoryWriteExclusive64(u32 vaddr, u64 value, u64 expected) override {
64 return parent.system.Memory().WriteExclusive64(vaddr, value, expected);
65 }
66
52 void InterpreterFallback(u32 pc, std::size_t num_instructions) override { 67 void InterpreterFallback(u32 pc, std::size_t num_instructions) override {
53 UNIMPLEMENTED_MSG("This should never happen, pc = {:08X}, code = {:08X}", pc, 68 UNIMPLEMENTED_MSG("This should never happen, pc = {:08X}, code = {:08X}", pc,
54 MemoryReadCode(pc)); 69 MemoryReadCode(pc));
@@ -72,24 +87,36 @@ public:
72 } 87 }
73 88
74 void AddTicks(u64 ticks) override { 89 void AddTicks(u64 ticks) override {
90 if (parent.uses_wall_clock) {
91 return;
92 }
75 // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a 93 // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
76 // rough approximation of the amount of executed ticks in the system, it may be thrown off 94 // rough approximation of the amount of executed ticks in the system, it may be thrown off
77 // if not all cores are doing a similar amount of work. Instead of doing this, we should 95 // if not all cores are doing a similar amount of work. Instead of doing this, we should
78 // device a way so that timing is consistent across all cores without increasing the ticks 4 96 // device a way so that timing is consistent across all cores without increasing the ticks 4
79 // times. 97 // times.
80 u64 amortized_ticks = (ticks - num_interpreted_instructions) / Core::NUM_CPU_CORES; 98 u64 amortized_ticks =
99 (ticks - num_interpreted_instructions) / Core::Hardware::NUM_CPU_CORES;
81 // Always execute at least one tick. 100 // Always execute at least one tick.
82 amortized_ticks = std::max<u64>(amortized_ticks, 1); 101 amortized_ticks = std::max<u64>(amortized_ticks, 1);
83 102
84 parent.system.CoreTiming().AddTicks(amortized_ticks); 103 parent.system.CoreTiming().AddTicks(amortized_ticks);
85 num_interpreted_instructions = 0; 104 num_interpreted_instructions = 0;
86 } 105 }
106
87 u64 GetTicksRemaining() override { 107 u64 GetTicksRemaining() override {
88 return std::max(parent.system.CoreTiming().GetDowncount(), {}); 108 if (parent.uses_wall_clock) {
109 if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) {
110 return minimum_run_cycles;
111 }
112 return 0U;
113 }
114 return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
89 } 115 }
90 116
91 ARM_Dynarmic_32& parent; 117 ARM_Dynarmic_32& parent;
92 std::size_t num_interpreted_instructions{}; 118 std::size_t num_interpreted_instructions{};
119 static constexpr u64 minimum_run_cycles = 1000U;
93}; 120};
94 121
95std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable& page_table, 122std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable& page_table,
@@ -100,13 +127,31 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable&
100 // config.page_table = &page_table.pointers; 127 // config.page_table = &page_table.pointers;
101 config.coprocessors[15] = cp15; 128 config.coprocessors[15] = cp15;
102 config.define_unpredictable_behaviour = true; 129 config.define_unpredictable_behaviour = true;
130 static constexpr std::size_t PAGE_BITS = 12;
131 static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS);
132 config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
133 page_table.pointers.data());
134 config.absolute_offset_page_table = true;
135 config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
136 config.only_detect_misalignment_via_page_table_on_page_boundary = true;
137
138 // Multi-process state
139 config.processor_id = core_index;
140 config.global_monitor = &exclusive_monitor.monitor;
141
142 // Timing
143 config.wall_clock_cntpct = uses_wall_clock;
144
145 // Optimizations
146 if (Settings::values.disable_cpu_opt) {
147 config.enable_optimizations = false;
148 config.enable_fast_dispatch = false;
149 }
150
103 return std::make_unique<Dynarmic::A32::Jit>(config); 151 return std::make_unique<Dynarmic::A32::Jit>(config);
104} 152}
105 153
106MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_32, "ARM JIT", "Dynarmic", MP_RGB(255, 64, 64));
107
108void ARM_Dynarmic_32::Run() { 154void ARM_Dynarmic_32::Run() {
109 MICROPROFILE_SCOPE(ARM_Jit_Dynarmic_32);
110 jit->Run(); 155 jit->Run();
111} 156}
112 157
@@ -114,9 +159,11 @@ void ARM_Dynarmic_32::Step() {
114 jit->Step(); 159 jit->Step();
115} 160}
116 161
117ARM_Dynarmic_32::ARM_Dynarmic_32(System& system, ExclusiveMonitor& exclusive_monitor, 162ARM_Dynarmic_32::ARM_Dynarmic_32(System& system, CPUInterrupts& interrupt_handlers,
163 bool uses_wall_clock, ExclusiveMonitor& exclusive_monitor,
118 std::size_t core_index) 164 std::size_t core_index)
119 : ARM_Interface{system}, cb(std::make_unique<DynarmicCallbacks32>(*this)), 165 : ARM_Interface{system, interrupt_handlers, uses_wall_clock},
166 cb(std::make_unique<DynarmicCallbacks32>(*this)),
120 cp15(std::make_shared<DynarmicCP15>(*this)), core_index{core_index}, 167 cp15(std::make_shared<DynarmicCP15>(*this)), core_index{core_index},
121 exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {} 168 exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
122 169
@@ -168,17 +215,25 @@ void ARM_Dynarmic_32::SetTPIDR_EL0(u64 value) {
168 cp15->uprw = static_cast<u32>(value); 215 cp15->uprw = static_cast<u32>(value);
169} 216}
170 217
218void ARM_Dynarmic_32::ChangeProcessorID(std::size_t new_core_id) {
219 jit->ChangeProcessorID(new_core_id);
220}
221
171void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) { 222void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
172 Dynarmic::A32::Context context; 223 Dynarmic::A32::Context context;
173 jit->SaveContext(context); 224 jit->SaveContext(context);
174 ctx.cpu_registers = context.Regs(); 225 ctx.cpu_registers = context.Regs();
226 ctx.extension_registers = context.ExtRegs();
175 ctx.cpsr = context.Cpsr(); 227 ctx.cpsr = context.Cpsr();
228 ctx.fpscr = context.Fpscr();
176} 229}
177 230
178void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) { 231void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) {
179 Dynarmic::A32::Context context; 232 Dynarmic::A32::Context context;
180 context.Regs() = ctx.cpu_registers; 233 context.Regs() = ctx.cpu_registers;
234 context.ExtRegs() = ctx.extension_registers;
181 context.SetCpsr(ctx.cpsr); 235 context.SetCpsr(ctx.cpsr);
236 context.SetFpscr(ctx.fpscr);
182 jit->LoadContext(context); 237 jit->LoadContext(context);
183} 238}
184 239
@@ -187,10 +242,15 @@ void ARM_Dynarmic_32::PrepareReschedule() {
187} 242}
188 243
189void ARM_Dynarmic_32::ClearInstructionCache() { 244void ARM_Dynarmic_32::ClearInstructionCache() {
245 if (!jit) {
246 return;
247 }
190 jit->ClearCache(); 248 jit->ClearCache();
191} 249}
192 250
193void ARM_Dynarmic_32::ClearExclusiveState() {} 251void ARM_Dynarmic_32::ClearExclusiveState() {
252 jit->ClearExclusiveState();
253}
194 254
195void ARM_Dynarmic_32::PageTableChanged(Common::PageTable& page_table, 255void ARM_Dynarmic_32::PageTableChanged(Common::PageTable& page_table,
196 std::size_t new_address_space_size_in_bits) { 256 std::size_t new_address_space_size_in_bits) {
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h
index e5b92d7bb..2bab31b92 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.h
@@ -9,7 +9,7 @@
9 9
10#include <dynarmic/A32/a32.h> 10#include <dynarmic/A32/a32.h>
11#include <dynarmic/A64/a64.h> 11#include <dynarmic/A64/a64.h>
12#include <dynarmic/A64/exclusive_monitor.h> 12#include <dynarmic/exclusive_monitor.h>
13#include "common/common_types.h" 13#include "common/common_types.h"
14#include "common/hash.h" 14#include "common/hash.h"
15#include "core/arm/arm_interface.h" 15#include "core/arm/arm_interface.h"
@@ -21,6 +21,7 @@ class Memory;
21 21
22namespace Core { 22namespace Core {
23 23
24class CPUInterruptHandler;
24class DynarmicCallbacks32; 25class DynarmicCallbacks32;
25class DynarmicCP15; 26class DynarmicCP15;
26class DynarmicExclusiveMonitor; 27class DynarmicExclusiveMonitor;
@@ -28,7 +29,8 @@ class System;
28 29
29class ARM_Dynarmic_32 final : public ARM_Interface { 30class ARM_Dynarmic_32 final : public ARM_Interface {
30public: 31public:
31 ARM_Dynarmic_32(System& system, ExclusiveMonitor& exclusive_monitor, std::size_t core_index); 32 ARM_Dynarmic_32(System& system, CPUInterrupts& interrupt_handlers, bool uses_wall_clock,
33 ExclusiveMonitor& exclusive_monitor, std::size_t core_index);
32 ~ARM_Dynarmic_32() override; 34 ~ARM_Dynarmic_32() override;
33 35
34 void SetPC(u64 pc) override; 36 void SetPC(u64 pc) override;
@@ -45,6 +47,7 @@ public:
45 void SetTlsAddress(VAddr address) override; 47 void SetTlsAddress(VAddr address) override;
46 void SetTPIDR_EL0(u64 value) override; 48 void SetTPIDR_EL0(u64 value) override;
47 u64 GetTPIDR_EL0() const override; 49 u64 GetTPIDR_EL0() const override;
50 void ChangeProcessorID(std::size_t new_core_id) override;
48 51
49 void SaveContext(ThreadContext32& ctx) override; 52 void SaveContext(ThreadContext32& ctx) override;
50 void SaveContext(ThreadContext64& ctx) override {} 53 void SaveContext(ThreadContext64& ctx) override {}
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 5f5e36d94..790981034 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -7,11 +7,11 @@
7#include <dynarmic/A64/a64.h> 7#include <dynarmic/A64/a64.h>
8#include <dynarmic/A64/config.h> 8#include <dynarmic/A64/config.h>
9#include "common/logging/log.h" 9#include "common/logging/log.h"
10#include "common/microprofile.h"
11#include "common/page_table.h" 10#include "common/page_table.h"
11#include "core/arm/cpu_interrupt_handler.h"
12#include "core/arm/dynarmic/arm_dynarmic_64.h" 12#include "core/arm/dynarmic/arm_dynarmic_64.h"
13#include "core/arm/dynarmic/arm_exclusive_monitor.h"
13#include "core/core.h" 14#include "core/core.h"
14#include "core/core_manager.h"
15#include "core/core_timing.h" 15#include "core/core_timing.h"
16#include "core/core_timing_util.h" 16#include "core/core_timing_util.h"
17#include "core/gdbstub/gdbstub.h" 17#include "core/gdbstub/gdbstub.h"
@@ -65,6 +65,22 @@ public:
65 memory.Write64(vaddr + 8, value[1]); 65 memory.Write64(vaddr + 8, value[1]);
66 } 66 }
67 67
68 bool MemoryWriteExclusive8(u64 vaddr, std::uint8_t value, std::uint8_t expected) override {
69 return parent.system.Memory().WriteExclusive8(vaddr, value, expected);
70 }
71 bool MemoryWriteExclusive16(u64 vaddr, std::uint16_t value, std::uint16_t expected) override {
72 return parent.system.Memory().WriteExclusive16(vaddr, value, expected);
73 }
74 bool MemoryWriteExclusive32(u64 vaddr, std::uint32_t value, std::uint32_t expected) override {
75 return parent.system.Memory().WriteExclusive32(vaddr, value, expected);
76 }
77 bool MemoryWriteExclusive64(u64 vaddr, std::uint64_t value, std::uint64_t expected) override {
78 return parent.system.Memory().WriteExclusive64(vaddr, value, expected);
79 }
80 bool MemoryWriteExclusive128(u64 vaddr, Vector value, Vector expected) override {
81 return parent.system.Memory().WriteExclusive128(vaddr, value, expected);
82 }
83
68 void InterpreterFallback(u64 pc, std::size_t num_instructions) override { 84 void InterpreterFallback(u64 pc, std::size_t num_instructions) override {
69 LOG_INFO(Core_ARM, "Unicorn fallback @ 0x{:X} for {} instructions (instr = {:08X})", pc, 85 LOG_INFO(Core_ARM, "Unicorn fallback @ 0x{:X} for {} instructions (instr = {:08X})", pc,
70 num_instructions, MemoryReadCode(pc)); 86 num_instructions, MemoryReadCode(pc));
@@ -108,29 +124,42 @@ public:
108 } 124 }
109 125
110 void AddTicks(u64 ticks) override { 126 void AddTicks(u64 ticks) override {
127 if (parent.uses_wall_clock) {
128 return;
129 }
111 // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a 130 // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
112 // rough approximation of the amount of executed ticks in the system, it may be thrown off 131 // rough approximation of the amount of executed ticks in the system, it may be thrown off
113 // if not all cores are doing a similar amount of work. Instead of doing this, we should 132 // if not all cores are doing a similar amount of work. Instead of doing this, we should
114 // device a way so that timing is consistent across all cores without increasing the ticks 4 133 // device a way so that timing is consistent across all cores without increasing the ticks 4
115 // times. 134 // times.
116 u64 amortized_ticks = (ticks - num_interpreted_instructions) / Core::NUM_CPU_CORES; 135 u64 amortized_ticks =
136 (ticks - num_interpreted_instructions) / Core::Hardware::NUM_CPU_CORES;
117 // Always execute at least one tick. 137 // Always execute at least one tick.
118 amortized_ticks = std::max<u64>(amortized_ticks, 1); 138 amortized_ticks = std::max<u64>(amortized_ticks, 1);
119 139
120 parent.system.CoreTiming().AddTicks(amortized_ticks); 140 parent.system.CoreTiming().AddTicks(amortized_ticks);
121 num_interpreted_instructions = 0; 141 num_interpreted_instructions = 0;
122 } 142 }
143
123 u64 GetTicksRemaining() override { 144 u64 GetTicksRemaining() override {
124 return std::max(parent.system.CoreTiming().GetDowncount(), s64{0}); 145 if (parent.uses_wall_clock) {
146 if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) {
147 return minimum_run_cycles;
148 }
149 return 0U;
150 }
151 return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
125 } 152 }
153
126 u64 GetCNTPCT() override { 154 u64 GetCNTPCT() override {
127 return Timing::CpuCyclesToClockCycles(parent.system.CoreTiming().GetTicks()); 155 return parent.system.CoreTiming().GetClockTicks();
128 } 156 }
129 157
130 ARM_Dynarmic_64& parent; 158 ARM_Dynarmic_64& parent;
131 std::size_t num_interpreted_instructions = 0; 159 std::size_t num_interpreted_instructions = 0;
132 u64 tpidrro_el0 = 0; 160 u64 tpidrro_el0 = 0;
133 u64 tpidr_el0 = 0; 161 u64 tpidr_el0 = 0;
162 static constexpr u64 minimum_run_cycles = 1000U;
134}; 163};
135 164
136std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable& page_table, 165std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable& page_table,
@@ -168,14 +197,13 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
168 config.enable_fast_dispatch = false; 197 config.enable_fast_dispatch = false;
169 } 198 }
170 199
200 // Timing
201 config.wall_clock_cntpct = uses_wall_clock;
202
171 return std::make_shared<Dynarmic::A64::Jit>(config); 203 return std::make_shared<Dynarmic::A64::Jit>(config);
172} 204}
173 205
174MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_64, "ARM JIT", "Dynarmic", MP_RGB(255, 64, 64));
175
176void ARM_Dynarmic_64::Run() { 206void ARM_Dynarmic_64::Run() {
177 MICROPROFILE_SCOPE(ARM_Jit_Dynarmic_64);
178
179 jit->Run(); 207 jit->Run();
180} 208}
181 209
@@ -183,11 +211,16 @@ void ARM_Dynarmic_64::Step() {
183 cb->InterpreterFallback(jit->GetPC(), 1); 211 cb->InterpreterFallback(jit->GetPC(), 1);
184} 212}
185 213
186ARM_Dynarmic_64::ARM_Dynarmic_64(System& system, ExclusiveMonitor& exclusive_monitor, 214ARM_Dynarmic_64::ARM_Dynarmic_64(System& system, CPUInterrupts& interrupt_handlers,
215 bool uses_wall_clock, ExclusiveMonitor& exclusive_monitor,
187 std::size_t core_index) 216 std::size_t core_index)
188 : ARM_Interface{system}, cb(std::make_unique<DynarmicCallbacks64>(*this)), 217 : ARM_Interface{system, interrupt_handlers, uses_wall_clock},
189 inner_unicorn{system, ARM_Unicorn::Arch::AArch64}, core_index{core_index}, 218 cb(std::make_unique<DynarmicCallbacks64>(*this)), inner_unicorn{system, interrupt_handlers,
190 exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {} 219 uses_wall_clock,
220 ARM_Unicorn::Arch::AArch64,
221 core_index},
222 core_index{core_index}, exclusive_monitor{
223 dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
191 224
192ARM_Dynarmic_64::~ARM_Dynarmic_64() = default; 225ARM_Dynarmic_64::~ARM_Dynarmic_64() = default;
193 226
@@ -239,6 +272,10 @@ void ARM_Dynarmic_64::SetTPIDR_EL0(u64 value) {
239 cb->tpidr_el0 = value; 272 cb->tpidr_el0 = value;
240} 273}
241 274
275void ARM_Dynarmic_64::ChangeProcessorID(std::size_t new_core_id) {
276 jit->ChangeProcessorID(new_core_id);
277}
278
242void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) { 279void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
243 ctx.cpu_registers = jit->GetRegisters(); 280 ctx.cpu_registers = jit->GetRegisters();
244 ctx.sp = jit->GetSP(); 281 ctx.sp = jit->GetSP();
@@ -266,6 +303,9 @@ void ARM_Dynarmic_64::PrepareReschedule() {
266} 303}
267 304
268void ARM_Dynarmic_64::ClearInstructionCache() { 305void ARM_Dynarmic_64::ClearInstructionCache() {
306 if (!jit) {
307 return;
308 }
269 jit->ClearCache(); 309 jit->ClearCache();
270} 310}
271 311
@@ -285,44 +325,4 @@ void ARM_Dynarmic_64::PageTableChanged(Common::PageTable& page_table,
285 jit_cache.emplace(key, jit); 325 jit_cache.emplace(key, jit);
286} 326}
287 327
288DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count)
289 : monitor(core_count), memory{memory} {}
290
291DynarmicExclusiveMonitor::~DynarmicExclusiveMonitor() = default;
292
293void DynarmicExclusiveMonitor::SetExclusive(std::size_t core_index, VAddr addr) {
294 // Size doesn't actually matter.
295 monitor.Mark(core_index, addr, 16);
296}
297
298void DynarmicExclusiveMonitor::ClearExclusive() {
299 monitor.Clear();
300}
301
302bool DynarmicExclusiveMonitor::ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) {
303 return monitor.DoExclusiveOperation(core_index, vaddr, 1, [&] { memory.Write8(vaddr, value); });
304}
305
306bool DynarmicExclusiveMonitor::ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) {
307 return monitor.DoExclusiveOperation(core_index, vaddr, 2,
308 [&] { memory.Write16(vaddr, value); });
309}
310
311bool DynarmicExclusiveMonitor::ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) {
312 return monitor.DoExclusiveOperation(core_index, vaddr, 4,
313 [&] { memory.Write32(vaddr, value); });
314}
315
316bool DynarmicExclusiveMonitor::ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) {
317 return monitor.DoExclusiveOperation(core_index, vaddr, 8,
318 [&] { memory.Write64(vaddr, value); });
319}
320
321bool DynarmicExclusiveMonitor::ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) {
322 return monitor.DoExclusiveOperation(core_index, vaddr, 16, [&] {
323 memory.Write64(vaddr + 0, value[0]);
324 memory.Write64(vaddr + 8, value[1]);
325 });
326}
327
328} // namespace Core 328} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h
index 647cecaf0..403c55961 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.h
@@ -8,7 +8,6 @@
8#include <unordered_map> 8#include <unordered_map>
9 9
10#include <dynarmic/A64/a64.h> 10#include <dynarmic/A64/a64.h>
11#include <dynarmic/A64/exclusive_monitor.h>
12#include "common/common_types.h" 11#include "common/common_types.h"
13#include "common/hash.h" 12#include "common/hash.h"
14#include "core/arm/arm_interface.h" 13#include "core/arm/arm_interface.h"
@@ -22,12 +21,14 @@ class Memory;
22namespace Core { 21namespace Core {
23 22
24class DynarmicCallbacks64; 23class DynarmicCallbacks64;
24class CPUInterruptHandler;
25class DynarmicExclusiveMonitor; 25class DynarmicExclusiveMonitor;
26class System; 26class System;
27 27
28class ARM_Dynarmic_64 final : public ARM_Interface { 28class ARM_Dynarmic_64 final : public ARM_Interface {
29public: 29public:
30 ARM_Dynarmic_64(System& system, ExclusiveMonitor& exclusive_monitor, std::size_t core_index); 30 ARM_Dynarmic_64(System& system, CPUInterrupts& interrupt_handlers, bool uses_wall_clock,
31 ExclusiveMonitor& exclusive_monitor, std::size_t core_index);
31 ~ARM_Dynarmic_64() override; 32 ~ARM_Dynarmic_64() override;
32 33
33 void SetPC(u64 pc) override; 34 void SetPC(u64 pc) override;
@@ -44,6 +45,7 @@ public:
44 void SetTlsAddress(VAddr address) override; 45 void SetTlsAddress(VAddr address) override;
45 void SetTPIDR_EL0(u64 value) override; 46 void SetTPIDR_EL0(u64 value) override;
46 u64 GetTPIDR_EL0() const override; 47 u64 GetTPIDR_EL0() const override;
48 void ChangeProcessorID(std::size_t new_core_id) override;
47 49
48 void SaveContext(ThreadContext32& ctx) override {} 50 void SaveContext(ThreadContext32& ctx) override {}
49 void SaveContext(ThreadContext64& ctx) override; 51 void SaveContext(ThreadContext64& ctx) override;
@@ -75,24 +77,4 @@ private:
75 DynarmicExclusiveMonitor& exclusive_monitor; 77 DynarmicExclusiveMonitor& exclusive_monitor;
76}; 78};
77 79
78class DynarmicExclusiveMonitor final : public ExclusiveMonitor {
79public:
80 explicit DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count);
81 ~DynarmicExclusiveMonitor() override;
82
83 void SetExclusive(std::size_t core_index, VAddr addr) override;
84 void ClearExclusive() override;
85
86 bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) override;
87 bool ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) override;
88 bool ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) override;
89 bool ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) override;
90 bool ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) override;
91
92private:
93 friend class ARM_Dynarmic_64;
94 Dynarmic::A64::ExclusiveMonitor monitor;
95 Core::Memory::Memory& memory;
96};
97
98} // namespace Core 80} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp b/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp
index d43e4dd70..54556e0f9 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp
@@ -97,7 +97,7 @@ CallbackOrAccessTwoWords DynarmicCP15::CompileGetTwoWords(bool two, unsigned opc
97 const auto callback = static_cast<u64 (*)(Dynarmic::A32::Jit*, void*, u32, u32)>( 97 const auto callback = static_cast<u64 (*)(Dynarmic::A32::Jit*, void*, u32, u32)>(
98 [](Dynarmic::A32::Jit*, void* arg, u32, u32) -> u64 { 98 [](Dynarmic::A32::Jit*, void* arg, u32, u32) -> u64 {
99 ARM_Dynarmic_32& parent = *(ARM_Dynarmic_32*)arg; 99 ARM_Dynarmic_32& parent = *(ARM_Dynarmic_32*)arg;
100 return Timing::CpuCyclesToClockCycles(parent.system.CoreTiming().GetTicks()); 100 return parent.system.CoreTiming().GetClockTicks();
101 }); 101 });
102 return Dynarmic::A32::Coprocessor::Callback{callback, (void*)&parent}; 102 return Dynarmic::A32::Coprocessor::Callback{callback, (void*)&parent};
103 } 103 }
diff --git a/src/core/arm/dynarmic/arm_exclusive_monitor.cpp b/src/core/arm/dynarmic/arm_exclusive_monitor.cpp
new file mode 100644
index 000000000..4e209f6a5
--- /dev/null
+++ b/src/core/arm/dynarmic/arm_exclusive_monitor.cpp
@@ -0,0 +1,76 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <cinttypes>
6#include <memory>
7#include "core/arm/dynarmic/arm_exclusive_monitor.h"
8#include "core/memory.h"
9
10namespace Core {
11
12DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count)
13 : monitor(core_count), memory{memory} {}
14
15DynarmicExclusiveMonitor::~DynarmicExclusiveMonitor() = default;
16
17u8 DynarmicExclusiveMonitor::ExclusiveRead8(std::size_t core_index, VAddr addr) {
18 return monitor.ReadAndMark<u8>(core_index, addr, [&]() -> u8 { return memory.Read8(addr); });
19}
20
21u16 DynarmicExclusiveMonitor::ExclusiveRead16(std::size_t core_index, VAddr addr) {
22 return monitor.ReadAndMark<u16>(core_index, addr, [&]() -> u16 { return memory.Read16(addr); });
23}
24
25u32 DynarmicExclusiveMonitor::ExclusiveRead32(std::size_t core_index, VAddr addr) {
26 return monitor.ReadAndMark<u32>(core_index, addr, [&]() -> u32 { return memory.Read32(addr); });
27}
28
29u64 DynarmicExclusiveMonitor::ExclusiveRead64(std::size_t core_index, VAddr addr) {
30 return monitor.ReadAndMark<u64>(core_index, addr, [&]() -> u64 { return memory.Read64(addr); });
31}
32
33u128 DynarmicExclusiveMonitor::ExclusiveRead128(std::size_t core_index, VAddr addr) {
34 return monitor.ReadAndMark<u128>(core_index, addr, [&]() -> u128 {
35 u128 result;
36 result[0] = memory.Read64(addr);
37 result[1] = memory.Read64(addr + 8);
38 return result;
39 });
40}
41
42void DynarmicExclusiveMonitor::ClearExclusive() {
43 monitor.Clear();
44}
45
46bool DynarmicExclusiveMonitor::ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) {
47 return monitor.DoExclusiveOperation<u8>(core_index, vaddr, [&](u8 expected) -> bool {
48 return memory.WriteExclusive8(vaddr, value, expected);
49 });
50}
51
52bool DynarmicExclusiveMonitor::ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) {
53 return monitor.DoExclusiveOperation<u16>(core_index, vaddr, [&](u16 expected) -> bool {
54 return memory.WriteExclusive16(vaddr, value, expected);
55 });
56}
57
58bool DynarmicExclusiveMonitor::ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) {
59 return monitor.DoExclusiveOperation<u32>(core_index, vaddr, [&](u32 expected) -> bool {
60 return memory.WriteExclusive32(vaddr, value, expected);
61 });
62}
63
64bool DynarmicExclusiveMonitor::ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) {
65 return monitor.DoExclusiveOperation<u64>(core_index, vaddr, [&](u64 expected) -> bool {
66 return memory.WriteExclusive64(vaddr, value, expected);
67 });
68}
69
70bool DynarmicExclusiveMonitor::ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) {
71 return monitor.DoExclusiveOperation<u128>(core_index, vaddr, [&](u128 expected) -> bool {
72 return memory.WriteExclusive128(vaddr, value, expected);
73 });
74}
75
76} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_exclusive_monitor.h b/src/core/arm/dynarmic/arm_exclusive_monitor.h
new file mode 100644
index 000000000..964f4a55d
--- /dev/null
+++ b/src/core/arm/dynarmic/arm_exclusive_monitor.h
@@ -0,0 +1,48 @@
1// Copyright 2020 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <unordered_map>
9
10#include <dynarmic/exclusive_monitor.h>
11
12#include "common/common_types.h"
13#include "core/arm/dynarmic/arm_dynarmic_32.h"
14#include "core/arm/dynarmic/arm_dynarmic_64.h"
15#include "core/arm/exclusive_monitor.h"
16
17namespace Core::Memory {
18class Memory;
19}
20
21namespace Core {
22
23class DynarmicExclusiveMonitor final : public ExclusiveMonitor {
24public:
25 explicit DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count);
26 ~DynarmicExclusiveMonitor() override;
27
28 u8 ExclusiveRead8(std::size_t core_index, VAddr addr) override;
29 u16 ExclusiveRead16(std::size_t core_index, VAddr addr) override;
30 u32 ExclusiveRead32(std::size_t core_index, VAddr addr) override;
31 u64 ExclusiveRead64(std::size_t core_index, VAddr addr) override;
32 u128 ExclusiveRead128(std::size_t core_index, VAddr addr) override;
33 void ClearExclusive() override;
34
35 bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) override;
36 bool ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) override;
37 bool ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) override;
38 bool ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) override;
39 bool ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) override;
40
41private:
42 friend class ARM_Dynarmic_32;
43 friend class ARM_Dynarmic_64;
44 Dynarmic::ExclusiveMonitor monitor;
45 Core::Memory::Memory& memory;
46};
47
48} // namespace Core
diff --git a/src/core/arm/exclusive_monitor.cpp b/src/core/arm/exclusive_monitor.cpp
index b32401e0b..d8cba369d 100644
--- a/src/core/arm/exclusive_monitor.cpp
+++ b/src/core/arm/exclusive_monitor.cpp
@@ -3,7 +3,7 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#ifdef ARCHITECTURE_x86_64 5#ifdef ARCHITECTURE_x86_64
6#include "core/arm/dynarmic/arm_dynarmic_64.h" 6#include "core/arm/dynarmic/arm_exclusive_monitor.h"
7#endif 7#endif
8#include "core/arm/exclusive_monitor.h" 8#include "core/arm/exclusive_monitor.h"
9#include "core/memory.h" 9#include "core/memory.h"
diff --git a/src/core/arm/exclusive_monitor.h b/src/core/arm/exclusive_monitor.h
index ccd73b80f..62f6e6023 100644
--- a/src/core/arm/exclusive_monitor.h
+++ b/src/core/arm/exclusive_monitor.h
@@ -18,7 +18,11 @@ class ExclusiveMonitor {
18public: 18public:
19 virtual ~ExclusiveMonitor(); 19 virtual ~ExclusiveMonitor();
20 20
21 virtual void SetExclusive(std::size_t core_index, VAddr addr) = 0; 21 virtual u8 ExclusiveRead8(std::size_t core_index, VAddr addr) = 0;
22 virtual u16 ExclusiveRead16(std::size_t core_index, VAddr addr) = 0;
23 virtual u32 ExclusiveRead32(std::size_t core_index, VAddr addr) = 0;
24 virtual u64 ExclusiveRead64(std::size_t core_index, VAddr addr) = 0;
25 virtual u128 ExclusiveRead128(std::size_t core_index, VAddr addr) = 0;
22 virtual void ClearExclusive() = 0; 26 virtual void ClearExclusive() = 0;
23 27
24 virtual bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) = 0; 28 virtual bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) = 0;
diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp
index e40e9626a..1df3f3ed1 100644
--- a/src/core/arm/unicorn/arm_unicorn.cpp
+++ b/src/core/arm/unicorn/arm_unicorn.cpp
@@ -6,6 +6,7 @@
6#include <unicorn/arm64.h> 6#include <unicorn/arm64.h>
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/microprofile.h" 8#include "common/microprofile.h"
9#include "core/arm/cpu_interrupt_handler.h"
9#include "core/arm/unicorn/arm_unicorn.h" 10#include "core/arm/unicorn/arm_unicorn.h"
10#include "core/core.h" 11#include "core/core.h"
11#include "core/core_timing.h" 12#include "core/core_timing.h"
@@ -62,7 +63,9 @@ static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int si
62 return false; 63 return false;
63} 64}
64 65
65ARM_Unicorn::ARM_Unicorn(System& system, Arch architecture) : ARM_Interface{system} { 66ARM_Unicorn::ARM_Unicorn(System& system, CPUInterrupts& interrupt_handlers, bool uses_wall_clock,
67 Arch architecture, std::size_t core_index)
68 : ARM_Interface{system, interrupt_handlers, uses_wall_clock}, core_index{core_index} {
66 const auto arch = architecture == Arch::AArch32 ? UC_ARCH_ARM : UC_ARCH_ARM64; 69 const auto arch = architecture == Arch::AArch32 ? UC_ARCH_ARM : UC_ARCH_ARM64;
67 CHECKED(uc_open(arch, UC_MODE_ARM, &uc)); 70 CHECKED(uc_open(arch, UC_MODE_ARM, &uc));
68 71
@@ -156,12 +159,20 @@ void ARM_Unicorn::SetTPIDR_EL0(u64 value) {
156 CHECKED(uc_reg_write(uc, UC_ARM64_REG_TPIDR_EL0, &value)); 159 CHECKED(uc_reg_write(uc, UC_ARM64_REG_TPIDR_EL0, &value));
157} 160}
158 161
162void ARM_Unicorn::ChangeProcessorID(std::size_t new_core_id) {
163 core_index = new_core_id;
164}
165
159void ARM_Unicorn::Run() { 166void ARM_Unicorn::Run() {
160 if (GDBStub::IsServerEnabled()) { 167 if (GDBStub::IsServerEnabled()) {
161 ExecuteInstructions(std::max(4000000U, 0U)); 168 ExecuteInstructions(std::max(4000000U, 0U));
162 } else { 169 } else {
163 ExecuteInstructions( 170 while (true) {
164 std::max(std::size_t(system.CoreTiming().GetDowncount()), std::size_t{0})); 171 if (interrupt_handlers[core_index].IsInterrupted()) {
172 return;
173 }
174 ExecuteInstructions(10);
175 }
165 } 176 }
166} 177}
167 178
@@ -183,8 +194,6 @@ void ARM_Unicorn::ExecuteInstructions(std::size_t num_instructions) {
183 UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, page_buffer.data())); 194 UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, page_buffer.data()));
184 CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions)); 195 CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions));
185 CHECKED(uc_mem_unmap(uc, map_addr, page_buffer.size())); 196 CHECKED(uc_mem_unmap(uc, map_addr, page_buffer.size()));
186
187 system.CoreTiming().AddTicks(num_instructions);
188 if (GDBStub::IsServerEnabled()) { 197 if (GDBStub::IsServerEnabled()) {
189 if (last_bkpt_hit && last_bkpt.type == GDBStub::BreakpointType::Execute) { 198 if (last_bkpt_hit && last_bkpt.type == GDBStub::BreakpointType::Execute) {
190 uc_reg_write(uc, UC_ARM64_REG_PC, &last_bkpt.address); 199 uc_reg_write(uc, UC_ARM64_REG_PC, &last_bkpt.address);
diff --git a/src/core/arm/unicorn/arm_unicorn.h b/src/core/arm/unicorn/arm_unicorn.h
index 725c65085..810aff311 100644
--- a/src/core/arm/unicorn/arm_unicorn.h
+++ b/src/core/arm/unicorn/arm_unicorn.h
@@ -20,7 +20,8 @@ public:
20 AArch64, // 64-bit ARM 20 AArch64, // 64-bit ARM
21 }; 21 };
22 22
23 explicit ARM_Unicorn(System& system, Arch architecture); 23 explicit ARM_Unicorn(System& system, CPUInterrupts& interrupt_handlers, bool uses_wall_clock,
24 Arch architecture, std::size_t core_index);
24 ~ARM_Unicorn() override; 25 ~ARM_Unicorn() override;
25 26
26 void SetPC(u64 pc) override; 27 void SetPC(u64 pc) override;
@@ -35,6 +36,7 @@ public:
35 void SetTlsAddress(VAddr address) override; 36 void SetTlsAddress(VAddr address) override;
36 void SetTPIDR_EL0(u64 value) override; 37 void SetTPIDR_EL0(u64 value) override;
37 u64 GetTPIDR_EL0() const override; 38 u64 GetTPIDR_EL0() const override;
39 void ChangeProcessorID(std::size_t new_core_id) override;
38 void PrepareReschedule() override; 40 void PrepareReschedule() override;
39 void ClearExclusiveState() override; 41 void ClearExclusiveState() override;
40 void ExecuteInstructions(std::size_t num_instructions); 42 void ExecuteInstructions(std::size_t num_instructions);
@@ -55,6 +57,7 @@ private:
55 uc_engine* uc{}; 57 uc_engine* uc{};
56 GDBStub::BreakpointAddress last_bkpt{}; 58 GDBStub::BreakpointAddress last_bkpt{};
57 bool last_bkpt_hit = false; 59 bool last_bkpt_hit = false;
60 std::size_t core_index;
58}; 61};
59 62
60} // namespace Core 63} // namespace Core
diff --git a/src/core/core.cpp b/src/core/core.cpp
index f9f8a3000..1a243c515 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -8,10 +8,10 @@
8 8
9#include "common/file_util.h" 9#include "common/file_util.h"
10#include "common/logging/log.h" 10#include "common/logging/log.h"
11#include "common/microprofile.h"
11#include "common/string_util.h" 12#include "common/string_util.h"
12#include "core/arm/exclusive_monitor.h" 13#include "core/arm/exclusive_monitor.h"
13#include "core/core.h" 14#include "core/core.h"
14#include "core/core_manager.h"
15#include "core/core_timing.h" 15#include "core/core_timing.h"
16#include "core/cpu_manager.h" 16#include "core/cpu_manager.h"
17#include "core/device_memory.h" 17#include "core/device_memory.h"
@@ -51,6 +51,11 @@
51#include "video_core/renderer_base.h" 51#include "video_core/renderer_base.h"
52#include "video_core/video_core.h" 52#include "video_core/video_core.h"
53 53
54MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_CPU0, "ARM JIT", "Dynarmic CPU 0", MP_RGB(255, 64, 64));
55MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_CPU1, "ARM JIT", "Dynarmic CPU 1", MP_RGB(255, 64, 64));
56MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_CPU2, "ARM JIT", "Dynarmic CPU 2", MP_RGB(255, 64, 64));
57MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_CPU3, "ARM JIT", "Dynarmic CPU 3", MP_RGB(255, 64, 64));
58
54namespace Core { 59namespace Core {
55 60
56namespace { 61namespace {
@@ -117,23 +122,22 @@ struct System::Impl {
117 : kernel{system}, fs_controller{system}, memory{system}, 122 : kernel{system}, fs_controller{system}, memory{system},
118 cpu_manager{system}, reporter{system}, applet_manager{system} {} 123 cpu_manager{system}, reporter{system}, applet_manager{system} {}
119 124
120 CoreManager& CurrentCoreManager() { 125 ResultStatus Run() {
121 return cpu_manager.GetCurrentCoreManager(); 126 status = ResultStatus::Success;
122 }
123 127
124 Kernel::PhysicalCore& CurrentPhysicalCore() { 128 kernel.Suspend(false);
125 const auto index = cpu_manager.GetActiveCoreIndex(); 129 core_timing.SyncPause(false);
126 return kernel.PhysicalCore(index); 130 cpu_manager.Pause(false);
127 }
128 131
129 Kernel::PhysicalCore& GetPhysicalCore(std::size_t index) { 132 return status;
130 return kernel.PhysicalCore(index);
131 } 133 }
132 134
133 ResultStatus RunLoop(bool tight_loop) { 135 ResultStatus Pause() {
134 status = ResultStatus::Success; 136 status = ResultStatus::Success;
135 137
136 cpu_manager.RunLoop(tight_loop); 138 core_timing.SyncPause(true);
139 kernel.Suspend(true);
140 cpu_manager.Pause(true);
137 141
138 return status; 142 return status;
139 } 143 }
@@ -143,7 +147,15 @@ struct System::Impl {
143 147
144 device_memory = std::make_unique<Core::DeviceMemory>(system); 148 device_memory = std::make_unique<Core::DeviceMemory>(system);
145 149
146 core_timing.Initialize(); 150 is_multicore = Settings::values.use_multi_core;
151 is_async_gpu = is_multicore || Settings::values.use_asynchronous_gpu_emulation;
152
153 kernel.SetMulticore(is_multicore);
154 cpu_manager.SetMulticore(is_multicore);
155 cpu_manager.SetAsyncGpu(is_async_gpu);
156 core_timing.SetMulticore(is_multicore);
157
158 core_timing.Initialize([&system]() { system.RegisterHostThread(); });
147 kernel.Initialize(); 159 kernel.Initialize();
148 cpu_manager.Initialize(); 160 cpu_manager.Initialize();
149 161
@@ -180,6 +192,11 @@ struct System::Impl {
180 is_powered_on = true; 192 is_powered_on = true;
181 exit_lock = false; 193 exit_lock = false;
182 194
195 microprofile_dynarmic[0] = MICROPROFILE_TOKEN(ARM_Jit_Dynarmic_CPU0);
196 microprofile_dynarmic[1] = MICROPROFILE_TOKEN(ARM_Jit_Dynarmic_CPU1);
197 microprofile_dynarmic[2] = MICROPROFILE_TOKEN(ARM_Jit_Dynarmic_CPU2);
198 microprofile_dynarmic[3] = MICROPROFILE_TOKEN(ARM_Jit_Dynarmic_CPU3);
199
183 LOG_DEBUG(Core, "Initialized OK"); 200 LOG_DEBUG(Core, "Initialized OK");
184 201
185 return ResultStatus::Success; 202 return ResultStatus::Success;
@@ -277,8 +294,6 @@ struct System::Impl {
277 service_manager.reset(); 294 service_manager.reset();
278 cheat_engine.reset(); 295 cheat_engine.reset();
279 telemetry_session.reset(); 296 telemetry_session.reset();
280 perf_stats.reset();
281 gpu_core.reset();
282 device_memory.reset(); 297 device_memory.reset();
283 298
284 // Close all CPU/threading state 299 // Close all CPU/threading state
@@ -290,6 +305,8 @@ struct System::Impl {
290 305
291 // Close app loader 306 // Close app loader
292 app_loader.reset(); 307 app_loader.reset();
308 gpu_core.reset();
309 perf_stats.reset();
293 310
294 // Clear all applets 311 // Clear all applets
295 applet_manager.ClearAll(); 312 applet_manager.ClearAll();
@@ -382,25 +399,35 @@ struct System::Impl {
382 399
383 std::unique_ptr<Core::PerfStats> perf_stats; 400 std::unique_ptr<Core::PerfStats> perf_stats;
384 Core::FrameLimiter frame_limiter; 401 Core::FrameLimiter frame_limiter;
402
403 bool is_multicore{};
404 bool is_async_gpu{};
405
406 std::array<u64, Core::Hardware::NUM_CPU_CORES> dynarmic_ticks{};
407 std::array<MicroProfileToken, Core::Hardware::NUM_CPU_CORES> microprofile_dynarmic{};
385}; 408};
386 409
387System::System() : impl{std::make_unique<Impl>(*this)} {} 410System::System() : impl{std::make_unique<Impl>(*this)} {}
388System::~System() = default; 411System::~System() = default;
389 412
390CoreManager& System::CurrentCoreManager() { 413CpuManager& System::GetCpuManager() {
391 return impl->CurrentCoreManager(); 414 return impl->cpu_manager;
415}
416
417const CpuManager& System::GetCpuManager() const {
418 return impl->cpu_manager;
392} 419}
393 420
394const CoreManager& System::CurrentCoreManager() const { 421System::ResultStatus System::Run() {
395 return impl->CurrentCoreManager(); 422 return impl->Run();
396} 423}
397 424
398System::ResultStatus System::RunLoop(bool tight_loop) { 425System::ResultStatus System::Pause() {
399 return impl->RunLoop(tight_loop); 426 return impl->Pause();
400} 427}
401 428
402System::ResultStatus System::SingleStep() { 429System::ResultStatus System::SingleStep() {
403 return RunLoop(false); 430 return ResultStatus::Success;
404} 431}
405 432
406void System::InvalidateCpuInstructionCaches() { 433void System::InvalidateCpuInstructionCaches() {
@@ -416,7 +443,7 @@ bool System::IsPoweredOn() const {
416} 443}
417 444
418void System::PrepareReschedule() { 445void System::PrepareReschedule() {
419 impl->CurrentPhysicalCore().Stop(); 446 // Deprecated, does nothing, kept for backward compatibility.
420} 447}
421 448
422void System::PrepareReschedule(const u32 core_index) { 449void System::PrepareReschedule(const u32 core_index) {
@@ -436,31 +463,41 @@ const TelemetrySession& System::TelemetrySession() const {
436} 463}
437 464
438ARM_Interface& System::CurrentArmInterface() { 465ARM_Interface& System::CurrentArmInterface() {
439 return impl->CurrentPhysicalCore().ArmInterface(); 466 return impl->kernel.CurrentScheduler().GetCurrentThread()->ArmInterface();
440} 467}
441 468
442const ARM_Interface& System::CurrentArmInterface() const { 469const ARM_Interface& System::CurrentArmInterface() const {
443 return impl->CurrentPhysicalCore().ArmInterface(); 470 return impl->kernel.CurrentScheduler().GetCurrentThread()->ArmInterface();
444} 471}
445 472
446std::size_t System::CurrentCoreIndex() const { 473std::size_t System::CurrentCoreIndex() const {
447 return impl->cpu_manager.GetActiveCoreIndex(); 474 std::size_t core = impl->kernel.GetCurrentHostThreadID();
475 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
476 return core;
448} 477}
449 478
450Kernel::Scheduler& System::CurrentScheduler() { 479Kernel::Scheduler& System::CurrentScheduler() {
451 return impl->CurrentPhysicalCore().Scheduler(); 480 return impl->kernel.CurrentScheduler();
452} 481}
453 482
454const Kernel::Scheduler& System::CurrentScheduler() const { 483const Kernel::Scheduler& System::CurrentScheduler() const {
455 return impl->CurrentPhysicalCore().Scheduler(); 484 return impl->kernel.CurrentScheduler();
485}
486
487Kernel::PhysicalCore& System::CurrentPhysicalCore() {
488 return impl->kernel.CurrentPhysicalCore();
489}
490
491const Kernel::PhysicalCore& System::CurrentPhysicalCore() const {
492 return impl->kernel.CurrentPhysicalCore();
456} 493}
457 494
458Kernel::Scheduler& System::Scheduler(std::size_t core_index) { 495Kernel::Scheduler& System::Scheduler(std::size_t core_index) {
459 return impl->GetPhysicalCore(core_index).Scheduler(); 496 return impl->kernel.Scheduler(core_index);
460} 497}
461 498
462const Kernel::Scheduler& System::Scheduler(std::size_t core_index) const { 499const Kernel::Scheduler& System::Scheduler(std::size_t core_index) const {
463 return impl->GetPhysicalCore(core_index).Scheduler(); 500 return impl->kernel.Scheduler(core_index);
464} 501}
465 502
466/// Gets the global scheduler 503/// Gets the global scheduler
@@ -490,20 +527,15 @@ const Kernel::Process* System::CurrentProcess() const {
490} 527}
491 528
492ARM_Interface& System::ArmInterface(std::size_t core_index) { 529ARM_Interface& System::ArmInterface(std::size_t core_index) {
493 return impl->GetPhysicalCore(core_index).ArmInterface(); 530 auto* thread = impl->kernel.Scheduler(core_index).GetCurrentThread();
531 ASSERT(thread && !thread->IsHLEThread());
532 return thread->ArmInterface();
494} 533}
495 534
496const ARM_Interface& System::ArmInterface(std::size_t core_index) const { 535const ARM_Interface& System::ArmInterface(std::size_t core_index) const {
497 return impl->GetPhysicalCore(core_index).ArmInterface(); 536 auto* thread = impl->kernel.Scheduler(core_index).GetCurrentThread();
498} 537 ASSERT(thread && !thread->IsHLEThread());
499 538 return thread->ArmInterface();
500CoreManager& System::GetCoreManager(std::size_t core_index) {
501 return impl->cpu_manager.GetCoreManager(core_index);
502}
503
504const CoreManager& System::GetCoreManager(std::size_t core_index) const {
505 ASSERT(core_index < NUM_CPU_CORES);
506 return impl->cpu_manager.GetCoreManager(core_index);
507} 539}
508 540
509ExclusiveMonitor& System::Monitor() { 541ExclusiveMonitor& System::Monitor() {
@@ -722,4 +754,18 @@ void System::RegisterHostThread() {
722 impl->kernel.RegisterHostThread(); 754 impl->kernel.RegisterHostThread();
723} 755}
724 756
757void System::EnterDynarmicProfile() {
758 std::size_t core = impl->kernel.GetCurrentHostThreadID();
759 impl->dynarmic_ticks[core] = MicroProfileEnter(impl->microprofile_dynarmic[core]);
760}
761
762void System::ExitDynarmicProfile() {
763 std::size_t core = impl->kernel.GetCurrentHostThreadID();
764 MicroProfileLeave(impl->microprofile_dynarmic[core], impl->dynarmic_ticks[core]);
765}
766
767bool System::IsMulticore() const {
768 return impl->is_multicore;
769}
770
725} // namespace Core 771} // namespace Core
diff --git a/src/core/core.h b/src/core/core.h
index acc53d6a1..5c6cfbffe 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -27,6 +27,7 @@ class VfsFilesystem;
27namespace Kernel { 27namespace Kernel {
28class GlobalScheduler; 28class GlobalScheduler;
29class KernelCore; 29class KernelCore;
30class PhysicalCore;
30class Process; 31class Process;
31class Scheduler; 32class Scheduler;
32} // namespace Kernel 33} // namespace Kernel
@@ -90,7 +91,7 @@ class InterruptManager;
90namespace Core { 91namespace Core {
91 92
92class ARM_Interface; 93class ARM_Interface;
93class CoreManager; 94class CpuManager;
94class DeviceMemory; 95class DeviceMemory;
95class ExclusiveMonitor; 96class ExclusiveMonitor;
96class FrameLimiter; 97class FrameLimiter;
@@ -136,16 +137,16 @@ public:
136 }; 137 };
137 138
138 /** 139 /**
139 * Run the core CPU loop 140 * Run the OS and Application
140 * This function runs the core for the specified number of CPU instructions before trying to 141 * This function will start emulation and run the relevant devices
141 * update hardware. This is much faster than SingleStep (and should be equivalent), as the CPU 142 */
142 * is not required to do a full dispatch with each instruction. NOTE: the number of instructions 143 ResultStatus Run();
143 * requested is not guaranteed to run, as this will be interrupted preemptively if a hardware 144
144 * update is requested (e.g. on a thread switch). 145 /**
145 * @param tight_loop If false, the CPU single-steps. 146 * Pause the OS and Application
146 * @return Result status, indicating whether or not the operation succeeded. 147 * This function will pause emulation and stop the relevant devices
147 */ 148 */
148 ResultStatus RunLoop(bool tight_loop = true); 149 ResultStatus Pause();
149 150
150 /** 151 /**
151 * Step the CPU one instruction 152 * Step the CPU one instruction
@@ -209,17 +210,21 @@ public:
209 /// Gets the scheduler for the CPU core that is currently running 210 /// Gets the scheduler for the CPU core that is currently running
210 const Kernel::Scheduler& CurrentScheduler() const; 211 const Kernel::Scheduler& CurrentScheduler() const;
211 212
213 /// Gets the physical core for the CPU core that is currently running
214 Kernel::PhysicalCore& CurrentPhysicalCore();
215
216 /// Gets the physical core for the CPU core that is currently running
217 const Kernel::PhysicalCore& CurrentPhysicalCore() const;
218
212 /// Gets a reference to an ARM interface for the CPU core with the specified index 219 /// Gets a reference to an ARM interface for the CPU core with the specified index
213 ARM_Interface& ArmInterface(std::size_t core_index); 220 ARM_Interface& ArmInterface(std::size_t core_index);
214 221
215 /// Gets a const reference to an ARM interface from the CPU core with the specified index 222 /// Gets a const reference to an ARM interface from the CPU core with the specified index
216 const ARM_Interface& ArmInterface(std::size_t core_index) const; 223 const ARM_Interface& ArmInterface(std::size_t core_index) const;
217 224
218 /// Gets a CPU interface to the CPU core with the specified index 225 CpuManager& GetCpuManager();
219 CoreManager& GetCoreManager(std::size_t core_index);
220 226
221 /// Gets a CPU interface to the CPU core with the specified index 227 const CpuManager& GetCpuManager() const;
222 const CoreManager& GetCoreManager(std::size_t core_index) const;
223 228
224 /// Gets a reference to the exclusive monitor 229 /// Gets a reference to the exclusive monitor
225 ExclusiveMonitor& Monitor(); 230 ExclusiveMonitor& Monitor();
@@ -370,14 +375,17 @@ public:
370 /// Register a host thread as an auxiliary thread. 375 /// Register a host thread as an auxiliary thread.
371 void RegisterHostThread(); 376 void RegisterHostThread();
372 377
373private: 378 /// Enter Dynarmic Microprofile
374 System(); 379 void EnterDynarmicProfile();
380
381 /// Exit Dynarmic Microprofile
382 void ExitDynarmicProfile();
375 383
376 /// Returns the currently running CPU core 384 /// Tells if system is running on multicore.
377 CoreManager& CurrentCoreManager(); 385 bool IsMulticore() const;
378 386
379 /// Returns the currently running CPU core 387private:
380 const CoreManager& CurrentCoreManager() const; 388 System();
381 389
382 /** 390 /**
383 * Initialize the emulated system. 391 * Initialize the emulated system.
diff --git a/src/core/core_manager.cpp b/src/core/core_manager.cpp
deleted file mode 100644
index b6b797c80..000000000
--- a/src/core/core_manager.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <condition_variable>
6#include <mutex>
7
8#include "common/logging/log.h"
9#include "core/arm/exclusive_monitor.h"
10#include "core/arm/unicorn/arm_unicorn.h"
11#include "core/core.h"
12#include "core/core_manager.h"
13#include "core/core_timing.h"
14#include "core/hle/kernel/kernel.h"
15#include "core/hle/kernel/physical_core.h"
16#include "core/hle/kernel/scheduler.h"
17#include "core/hle/kernel/thread.h"
18#include "core/hle/lock.h"
19#include "core/settings.h"
20
21namespace Core {
22
23CoreManager::CoreManager(System& system, std::size_t core_index)
24 : global_scheduler{system.GlobalScheduler()}, physical_core{system.Kernel().PhysicalCore(
25 core_index)},
26 core_timing{system.CoreTiming()}, core_index{core_index} {}
27
28CoreManager::~CoreManager() = default;
29
30void CoreManager::RunLoop(bool tight_loop) {
31 Reschedule();
32
33 // If we don't have a currently active thread then don't execute instructions,
34 // instead advance to the next event and try to yield to the next thread
35 if (Kernel::GetCurrentThread() == nullptr) {
36 LOG_TRACE(Core, "Core-{} idling", core_index);
37 core_timing.Idle();
38 } else {
39 if (tight_loop) {
40 physical_core.Run();
41 } else {
42 physical_core.Step();
43 }
44 }
45 core_timing.Advance();
46
47 Reschedule();
48}
49
50void CoreManager::SingleStep() {
51 return RunLoop(false);
52}
53
54void CoreManager::PrepareReschedule() {
55 physical_core.Stop();
56}
57
58void CoreManager::Reschedule() {
59 // Lock the global kernel mutex when we manipulate the HLE state
60 std::lock_guard lock(HLE::g_hle_lock);
61
62 global_scheduler.SelectThread(core_index);
63
64 physical_core.Scheduler().TryDoContextSwitch();
65}
66
67} // namespace Core
diff --git a/src/core/core_manager.h b/src/core/core_manager.h
deleted file mode 100644
index d525de00a..000000000
--- a/src/core/core_manager.h
+++ /dev/null
@@ -1,63 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8#include <cstddef>
9#include <memory>
10#include "common/common_types.h"
11
12namespace Kernel {
13class GlobalScheduler;
14class PhysicalCore;
15} // namespace Kernel
16
17namespace Core {
18class System;
19}
20
21namespace Core::Timing {
22class CoreTiming;
23}
24
25namespace Core::Memory {
26class Memory;
27}
28
29namespace Core {
30
31constexpr unsigned NUM_CPU_CORES{4};
32
33class CoreManager {
34public:
35 CoreManager(System& system, std::size_t core_index);
36 ~CoreManager();
37
38 void RunLoop(bool tight_loop = true);
39
40 void SingleStep();
41
42 void PrepareReschedule();
43
44 bool IsMainCore() const {
45 return core_index == 0;
46 }
47
48 std::size_t CoreIndex() const {
49 return core_index;
50 }
51
52private:
53 void Reschedule();
54
55 Kernel::GlobalScheduler& global_scheduler;
56 Kernel::PhysicalCore& physical_core;
57 Timing::CoreTiming& core_timing;
58
59 std::atomic<bool> reschedule_pending = false;
60 std::size_t core_index;
61};
62
63} // namespace Core
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index 46d4178c4..5c83c41a4 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -1,29 +1,27 @@
1// Copyright 2008 Dolphin Emulator Project / 2017 Citra Emulator Project 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2+ 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "core/core_timing.h"
6
7#include <algorithm> 5#include <algorithm>
8#include <mutex> 6#include <mutex>
9#include <string> 7#include <string>
10#include <tuple> 8#include <tuple>
11 9
12#include "common/assert.h" 10#include "common/assert.h"
13#include "common/thread.h" 11#include "common/microprofile.h"
12#include "core/core_timing.h"
14#include "core/core_timing_util.h" 13#include "core/core_timing_util.h"
15#include "core/hardware_properties.h"
16 14
17namespace Core::Timing { 15namespace Core::Timing {
18 16
19constexpr int MAX_SLICE_LENGTH = 10000; 17constexpr u64 MAX_SLICE_LENGTH = 4000;
20 18
21std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) { 19std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) {
22 return std::make_shared<EventType>(std::move(callback), std::move(name)); 20 return std::make_shared<EventType>(std::move(callback), std::move(name));
23} 21}
24 22
25struct CoreTiming::Event { 23struct CoreTiming::Event {
26 s64 time; 24 u64 time;
27 u64 fifo_order; 25 u64 fifo_order;
28 u64 userdata; 26 u64 userdata;
29 std::weak_ptr<EventType> type; 27 std::weak_ptr<EventType> type;
@@ -39,51 +37,90 @@ struct CoreTiming::Event {
39 } 37 }
40}; 38};
41 39
42CoreTiming::CoreTiming() = default; 40CoreTiming::CoreTiming() {
43CoreTiming::~CoreTiming() = default; 41 clock =
42 Common::CreateBestMatchingClock(Core::Hardware::BASE_CLOCK_RATE, Core::Hardware::CNTFREQ);
43}
44 44
45void CoreTiming::Initialize() { 45CoreTiming::~CoreTiming() = default;
46 downcounts.fill(MAX_SLICE_LENGTH);
47 time_slice.fill(MAX_SLICE_LENGTH);
48 slice_length = MAX_SLICE_LENGTH;
49 global_timer = 0;
50 idled_cycles = 0;
51 current_context = 0;
52 46
53 // The time between CoreTiming being initialized and the first call to Advance() is considered 47void CoreTiming::ThreadEntry(CoreTiming& instance) {
54 // the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before 48 constexpr char name[] = "yuzu:HostTiming";
55 // executing the first cycle of each slice to prepare the slice length and downcount for 49 MicroProfileOnThreadCreate(name);
56 // that slice. 50 Common::SetCurrentThreadName(name);
57 is_global_timer_sane = true; 51 Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh);
52 instance.on_thread_init();
53 instance.ThreadLoop();
54}
58 55
56void CoreTiming::Initialize(std::function<void(void)>&& on_thread_init_) {
57 on_thread_init = std::move(on_thread_init_);
59 event_fifo_id = 0; 58 event_fifo_id = 0;
60 59 shutting_down = false;
60 ticks = 0;
61 const auto empty_timed_callback = [](u64, s64) {}; 61 const auto empty_timed_callback = [](u64, s64) {};
62 ev_lost = CreateEvent("_lost_event", empty_timed_callback); 62 ev_lost = CreateEvent("_lost_event", empty_timed_callback);
63 if (is_multicore) {
64 timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this));
65 }
63} 66}
64 67
65void CoreTiming::Shutdown() { 68void CoreTiming::Shutdown() {
69 paused = true;
70 shutting_down = true;
71 pause_event.Set();
72 event.Set();
73 if (timer_thread) {
74 timer_thread->join();
75 }
66 ClearPendingEvents(); 76 ClearPendingEvents();
77 timer_thread.reset();
78 has_started = false;
67} 79}
68 80
69void CoreTiming::ScheduleEvent(s64 cycles_into_future, const std::shared_ptr<EventType>& event_type, 81void CoreTiming::Pause(bool is_paused) {
70 u64 userdata) { 82 paused = is_paused;
71 std::lock_guard guard{inner_mutex}; 83 pause_event.Set();
72 const s64 timeout = GetTicks() + cycles_into_future; 84}
73 85
74 // If this event needs to be scheduled before the next advance(), force one early 86void CoreTiming::SyncPause(bool is_paused) {
75 if (!is_global_timer_sane) { 87 if (is_paused == paused && paused_set == paused) {
76 ForceExceptionCheck(cycles_into_future); 88 return;
89 }
90 Pause(is_paused);
91 if (timer_thread) {
92 if (!is_paused) {
93 pause_event.Set();
94 }
95 event.Set();
96 while (paused_set != is_paused)
97 ;
77 } 98 }
99}
78 100
79 event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type}); 101bool CoreTiming::IsRunning() const {
102 return !paused_set;
103}
80 104
81 std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); 105bool CoreTiming::HasPendingEvents() const {
106 return !(wait_set && event_queue.empty());
82} 107}
83 108
84void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) { 109void CoreTiming::ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
85 std::lock_guard guard{inner_mutex}; 110 u64 userdata) {
111 {
112 std::scoped_lock scope{basic_lock};
113 const u64 timeout = static_cast<u64>(GetGlobalTimeNs().count() + ns_into_future);
114
115 event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
86 116
117 std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
118 }
119 event.Set();
120}
121
122void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) {
123 std::scoped_lock scope{basic_lock};
87 const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { 124 const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
88 return e.type.lock().get() == event_type.get() && e.userdata == userdata; 125 return e.type.lock().get() == event_type.get() && e.userdata == userdata;
89 }); 126 });
@@ -95,21 +132,39 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u
95 } 132 }
96} 133}
97 134
98u64 CoreTiming::GetTicks() const { 135void CoreTiming::AddTicks(u64 ticks) {
99 u64 ticks = static_cast<u64>(global_timer); 136 this->ticks += ticks;
100 if (!is_global_timer_sane) { 137 downcount -= ticks;
101 ticks += accumulated_ticks; 138}
139
140void CoreTiming::Idle() {
141 if (!event_queue.empty()) {
142 const u64 next_event_time = event_queue.front().time;
143 const u64 next_ticks = nsToCycles(std::chrono::nanoseconds(next_event_time)) + 10U;
144 if (next_ticks > ticks) {
145 ticks = next_ticks;
146 }
147 return;
102 } 148 }
103 return ticks; 149 ticks += 1000U;
104} 150}
105 151
106u64 CoreTiming::GetIdleTicks() const { 152void CoreTiming::ResetTicks() {
107 return static_cast<u64>(idled_cycles); 153 downcount = MAX_SLICE_LENGTH;
108} 154}
109 155
110void CoreTiming::AddTicks(u64 ticks) { 156u64 CoreTiming::GetCPUTicks() const {
111 accumulated_ticks += ticks; 157 if (is_multicore) {
112 downcounts[current_context] -= static_cast<s64>(ticks); 158 return clock->GetCPUCycles();
159 }
160 return ticks;
161}
162
163u64 CoreTiming::GetClockTicks() const {
164 if (is_multicore) {
165 return clock->GetClockCycles();
166 }
167 return CpuCyclesToClockCycles(ticks);
113} 168}
114 169
115void CoreTiming::ClearPendingEvents() { 170void CoreTiming::ClearPendingEvents() {
@@ -117,7 +172,7 @@ void CoreTiming::ClearPendingEvents() {
117} 172}
118 173
119void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { 174void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
120 std::lock_guard guard{inner_mutex}; 175 basic_lock.lock();
121 176
122 const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { 177 const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
123 return e.type.lock().get() == event_type.get(); 178 return e.type.lock().get() == event_type.get();
@@ -128,99 +183,72 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
128 event_queue.erase(itr, event_queue.end()); 183 event_queue.erase(itr, event_queue.end());
129 std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>()); 184 std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
130 } 185 }
186 basic_lock.unlock();
131} 187}
132 188
133void CoreTiming::ForceExceptionCheck(s64 cycles) { 189std::optional<s64> CoreTiming::Advance() {
134 cycles = std::max<s64>(0, cycles); 190 std::scoped_lock advance_scope{advance_lock};
135 if (downcounts[current_context] <= cycles) { 191 std::scoped_lock basic_scope{basic_lock};
136 return; 192 global_timer = GetGlobalTimeNs().count();
137 }
138
139 // downcount is always (much) smaller than MAX_INT so we can safely cast cycles to an int
140 // here. Account for cycles already executed by adjusting the g.slice_length
141 downcounts[current_context] = static_cast<int>(cycles);
142}
143
144std::optional<u64> CoreTiming::NextAvailableCore(const s64 needed_ticks) const {
145 const u64 original_context = current_context;
146 u64 next_context = (original_context + 1) % num_cpu_cores;
147 while (next_context != original_context) {
148 if (time_slice[next_context] >= needed_ticks) {
149 return {next_context};
150 } else if (time_slice[next_context] >= 0) {
151 return std::nullopt;
152 }
153 next_context = (next_context + 1) % num_cpu_cores;
154 }
155 return std::nullopt;
156}
157
158void CoreTiming::Advance() {
159 std::unique_lock<std::mutex> guard(inner_mutex);
160
161 const u64 cycles_executed = accumulated_ticks;
162 time_slice[current_context] = std::max<s64>(0, time_slice[current_context] - accumulated_ticks);
163 global_timer += cycles_executed;
164
165 is_global_timer_sane = true;
166 193
167 while (!event_queue.empty() && event_queue.front().time <= global_timer) { 194 while (!event_queue.empty() && event_queue.front().time <= global_timer) {
168 Event evt = std::move(event_queue.front()); 195 Event evt = std::move(event_queue.front());
169 std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>()); 196 std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
170 event_queue.pop_back(); 197 event_queue.pop_back();
171 inner_mutex.unlock(); 198 basic_lock.unlock();
172 199
173 if (auto event_type{evt.type.lock()}) { 200 if (auto event_type{evt.type.lock()}) {
174 event_type->callback(evt.userdata, global_timer - evt.time); 201 event_type->callback(evt.userdata, global_timer - evt.time);
175 } 202 }
176 203
177 inner_mutex.lock(); 204 basic_lock.lock();
205 global_timer = GetGlobalTimeNs().count();
178 } 206 }
179 207
180 is_global_timer_sane = false;
181
182 // Still events left (scheduled in the future)
183 if (!event_queue.empty()) { 208 if (!event_queue.empty()) {
184 const s64 needed_ticks = 209 const s64 next_time = event_queue.front().time - global_timer;
185 std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH); 210 return next_time;
186 const auto next_core = NextAvailableCore(needed_ticks); 211 } else {
187 if (next_core) { 212 return std::nullopt;
188 downcounts[*next_core] = needed_ticks;
189 }
190 } 213 }
191
192 accumulated_ticks = 0;
193
194 downcounts[current_context] = time_slice[current_context];
195} 214}
196 215
197void CoreTiming::ResetRun() { 216void CoreTiming::ThreadLoop() {
198 downcounts.fill(MAX_SLICE_LENGTH); 217 has_started = true;
199 time_slice.fill(MAX_SLICE_LENGTH); 218 while (!shutting_down) {
200 current_context = 0; 219 while (!paused) {
201 // Still events left (scheduled in the future) 220 paused_set = false;
202 if (!event_queue.empty()) { 221 const auto next_time = Advance();
203 const s64 needed_ticks = 222 if (next_time) {
204 std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH); 223 if (*next_time > 0) {
205 downcounts[current_context] = needed_ticks; 224 std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time);
225 event.WaitFor(next_time_ns);
226 }
227 } else {
228 wait_set = true;
229 event.Wait();
230 }
231 wait_set = false;
232 }
233 paused_set = true;
234 clock->Pause(true);
235 pause_event.Wait();
236 clock->Pause(false);
206 } 237 }
207
208 is_global_timer_sane = false;
209 accumulated_ticks = 0;
210} 238}
211 239
212void CoreTiming::Idle() { 240std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
213 accumulated_ticks += downcounts[current_context]; 241 if (is_multicore) {
214 idled_cycles += downcounts[current_context]; 242 return clock->GetTimeNS();
215 downcounts[current_context] = 0; 243 }
244 return CyclesToNs(ticks);
216} 245}
217 246
218std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const { 247std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
219 return std::chrono::microseconds{GetTicks() * 1000000 / Hardware::BASE_CLOCK_RATE}; 248 if (is_multicore) {
220} 249 return clock->GetTimeUS();
221 250 }
222s64 CoreTiming::GetDowncount() const { 251 return CyclesToUs(ticks);
223 return downcounts[current_context];
224} 252}
225 253
226} // namespace Core::Timing 254} // namespace Core::Timing
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index d50f4eb8a..72faaab64 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -1,19 +1,25 @@
1// Copyright 2008 Dolphin Emulator Project / 2017 Citra Emulator Project 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2+ 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#pragma once 5#pragma once
6 6
7#include <atomic>
7#include <chrono> 8#include <chrono>
8#include <functional> 9#include <functional>
9#include <memory> 10#include <memory>
10#include <mutex> 11#include <mutex>
11#include <optional> 12#include <optional>
12#include <string> 13#include <string>
14#include <thread>
13#include <vector> 15#include <vector>
14 16
15#include "common/common_types.h" 17#include "common/common_types.h"
18#include "common/spin_lock.h"
19#include "common/thread.h"
16#include "common/threadsafe_queue.h" 20#include "common/threadsafe_queue.h"
21#include "common/wall_clock.h"
22#include "core/hardware_properties.h"
17 23
18namespace Core::Timing { 24namespace Core::Timing {
19 25
@@ -56,16 +62,40 @@ public:
56 62
57 /// CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is 63 /// CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is
58 /// required to end slice - 1 and start slice 0 before the first cycle of code is executed. 64 /// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
59 void Initialize(); 65 void Initialize(std::function<void(void)>&& on_thread_init_);
60 66
61 /// Tears down all timing related functionality. 67 /// Tears down all timing related functionality.
62 void Shutdown(); 68 void Shutdown();
63 69
64 /// After the first Advance, the slice lengths and the downcount will be reduced whenever an 70 /// Sets if emulation is multicore or single core, must be set before Initialize
65 /// event is scheduled earlier than the current values. 71 void SetMulticore(bool is_multicore) {
66 /// 72 this->is_multicore = is_multicore;
67 /// Scheduling from a callback will not update the downcount until the Advance() completes. 73 }
68 void ScheduleEvent(s64 cycles_into_future, const std::shared_ptr<EventType>& event_type, 74
75 /// Check if it's using host timing.
76 bool IsHostTiming() const {
77 return is_multicore;
78 }
79
80 /// Pauses/Unpauses the execution of the timer thread.
81 void Pause(bool is_paused);
82
83 /// Pauses/Unpauses the execution of the timer thread and waits until paused.
84 void SyncPause(bool is_paused);
85
86 /// Checks if core timing is running.
87 bool IsRunning() const;
88
89 /// Checks if the timer thread has started.
90 bool HasStarted() const {
91 return has_started;
92 }
93
94 /// Checks if there are any pending time events.
95 bool HasPendingEvents() const;
96
97 /// Schedules an event in core timing
98 void ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
69 u64 userdata = 0); 99 u64 userdata = 0);
70 100
71 void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata); 101 void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata);
@@ -73,41 +103,30 @@ public:
73 /// We only permit one event of each type in the queue at a time. 103 /// We only permit one event of each type in the queue at a time.
74 void RemoveEvent(const std::shared_ptr<EventType>& event_type); 104 void RemoveEvent(const std::shared_ptr<EventType>& event_type);
75 105
76 void ForceExceptionCheck(s64 cycles);
77
78 /// This should only be called from the emu thread, if you are calling it any other thread,
79 /// you are doing something evil
80 u64 GetTicks() const;
81
82 u64 GetIdleTicks() const;
83
84 void AddTicks(u64 ticks); 106 void AddTicks(u64 ticks);
85 107
86 /// Advance must be called at the beginning of dispatcher loops, not the end. Advance() ends 108 void ResetTicks();
87 /// the previous timing slice and begins the next one, you must Advance from the previous
88 /// slice to the current one before executing any cycles. CoreTiming starts in slice -1 so an
89 /// Advance() is required to initialize the slice length before the first cycle of emulated
90 /// instructions is executed.
91 void Advance();
92 109
93 /// Pretend that the main CPU has executed enough cycles to reach the next event.
94 void Idle(); 110 void Idle();
95 111
96 std::chrono::microseconds GetGlobalTimeUs() const; 112 s64 GetDowncount() const {
113 return downcount;
114 }
97 115
98 void ResetRun(); 116 /// Returns current time in emulated CPU cycles
117 u64 GetCPUTicks() const;
99 118
100 s64 GetDowncount() const; 119 /// Returns current time in emulated in Clock cycles
120 u64 GetClockTicks() const;
101 121
102 void SwitchContext(u64 new_context) { 122 /// Returns current time in microseconds.
103 current_context = new_context; 123 std::chrono::microseconds GetGlobalTimeUs() const;
104 }
105 124
106 bool CanCurrentContextRun() const { 125 /// Returns current time in nanoseconds.
107 return time_slice[current_context] > 0; 126 std::chrono::nanoseconds GetGlobalTimeNs() const;
108 }
109 127
110 std::optional<u64> NextAvailableCore(const s64 needed_ticks) const; 128 /// Checks for events manually and returns time in nanoseconds for next event, threadsafe.
129 std::optional<s64> Advance();
111 130
112private: 131private:
113 struct Event; 132 struct Event;
@@ -115,21 +134,14 @@ private:
115 /// Clear all pending events. This should ONLY be done on exit. 134 /// Clear all pending events. This should ONLY be done on exit.
116 void ClearPendingEvents(); 135 void ClearPendingEvents();
117 136
118 static constexpr u64 num_cpu_cores = 4; 137 static void ThreadEntry(CoreTiming& instance);
138 void ThreadLoop();
119 139
120 s64 global_timer = 0; 140 std::unique_ptr<Common::WallClock> clock;
121 s64 idled_cycles = 0;
122 s64 slice_length = 0;
123 u64 accumulated_ticks = 0;
124 std::array<s64, num_cpu_cores> downcounts{};
125 // Slice of time assigned to each core per run.
126 std::array<s64, num_cpu_cores> time_slice{};
127 u64 current_context = 0;
128 141
129 // Are we in a function that has been called from Advance() 142 u64 global_timer = 0;
130 // If events are scheduled from a function that gets called from Advance(), 143
131 // don't change slice_length and downcount. 144 std::chrono::nanoseconds start_point;
132 bool is_global_timer_sane = false;
133 145
134 // The queue is a min-heap using std::make_heap/push_heap/pop_heap. 146 // The queue is a min-heap using std::make_heap/push_heap/pop_heap.
135 // We don't use std::priority_queue because we need to be able to serialize, unserialize and 147 // We don't use std::priority_queue because we need to be able to serialize, unserialize and
@@ -139,8 +151,23 @@ private:
139 u64 event_fifo_id = 0; 151 u64 event_fifo_id = 0;
140 152
141 std::shared_ptr<EventType> ev_lost; 153 std::shared_ptr<EventType> ev_lost;
142 154 Common::Event event{};
143 std::mutex inner_mutex; 155 Common::Event pause_event{};
156 Common::SpinLock basic_lock{};
157 Common::SpinLock advance_lock{};
158 std::unique_ptr<std::thread> timer_thread;
159 std::atomic<bool> paused{};
160 std::atomic<bool> paused_set{};
161 std::atomic<bool> wait_set{};
162 std::atomic<bool> shutting_down{};
163 std::atomic<bool> has_started{};
164 std::function<void(void)> on_thread_init{};
165
166 bool is_multicore{};
167
168 /// Cycle timing
169 u64 ticks{};
170 s64 downcount{};
144}; 171};
145 172
146/// Creates a core timing event with the given name and callback. 173/// Creates a core timing event with the given name and callback.
diff --git a/src/core/core_timing_util.cpp b/src/core/core_timing_util.cpp
index de50d3b14..aefc63663 100644
--- a/src/core/core_timing_util.cpp
+++ b/src/core/core_timing_util.cpp
@@ -38,15 +38,23 @@ s64 usToCycles(std::chrono::microseconds us) {
38} 38}
39 39
40s64 nsToCycles(std::chrono::nanoseconds ns) { 40s64 nsToCycles(std::chrono::nanoseconds ns) {
41 if (static_cast<u64>(ns.count() / 1000000000) > MAX_VALUE_TO_MULTIPLY) { 41 const u128 temporal = Common::Multiply64Into128(ns.count(), Hardware::BASE_CLOCK_RATE);
42 LOG_ERROR(Core_Timing, "Integer overflow, use max value"); 42 return Common::Divide128On32(temporal, static_cast<u32>(1000000000)).first;
43 return std::numeric_limits<s64>::max(); 43}
44 } 44
45 if (static_cast<u64>(ns.count()) > MAX_VALUE_TO_MULTIPLY) { 45u64 msToClockCycles(std::chrono::milliseconds ns) {
46 LOG_DEBUG(Core_Timing, "Time very big, do rounding"); 46 const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
47 return Hardware::BASE_CLOCK_RATE * (ns.count() / 1000000000); 47 return Common::Divide128On32(temp, 1000).first;
48 } 48}
49 return (Hardware::BASE_CLOCK_RATE * ns.count()) / 1000000000; 49
50u64 usToClockCycles(std::chrono::microseconds ns) {
51 const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
52 return Common::Divide128On32(temp, 1000000).first;
53}
54
55u64 nsToClockCycles(std::chrono::nanoseconds ns) {
56 const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
57 return Common::Divide128On32(temp, 1000000000).first;
50} 58}
51 59
52u64 CpuCyclesToClockCycles(u64 ticks) { 60u64 CpuCyclesToClockCycles(u64 ticks) {
@@ -54,4 +62,22 @@ u64 CpuCyclesToClockCycles(u64 ticks) {
54 return Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first; 62 return Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
55} 63}
56 64
65std::chrono::milliseconds CyclesToMs(s64 cycles) {
66 const u128 temporal = Common::Multiply64Into128(cycles, 1000);
67 u64 ms = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
68 return std::chrono::milliseconds(ms);
69}
70
71std::chrono::nanoseconds CyclesToNs(s64 cycles) {
72 const u128 temporal = Common::Multiply64Into128(cycles, 1000000000);
73 u64 ns = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
74 return std::chrono::nanoseconds(ns);
75}
76
77std::chrono::microseconds CyclesToUs(s64 cycles) {
78 const u128 temporal = Common::Multiply64Into128(cycles, 1000000);
79 u64 us = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
80 return std::chrono::microseconds(us);
81}
82
57} // namespace Core::Timing 83} // namespace Core::Timing
diff --git a/src/core/core_timing_util.h b/src/core/core_timing_util.h
index addc72b19..2ed979e14 100644
--- a/src/core/core_timing_util.h
+++ b/src/core/core_timing_util.h
@@ -13,18 +13,12 @@ namespace Core::Timing {
13s64 msToCycles(std::chrono::milliseconds ms); 13s64 msToCycles(std::chrono::milliseconds ms);
14s64 usToCycles(std::chrono::microseconds us); 14s64 usToCycles(std::chrono::microseconds us);
15s64 nsToCycles(std::chrono::nanoseconds ns); 15s64 nsToCycles(std::chrono::nanoseconds ns);
16 16u64 msToClockCycles(std::chrono::milliseconds ns);
17inline std::chrono::milliseconds CyclesToMs(s64 cycles) { 17u64 usToClockCycles(std::chrono::microseconds ns);
18 return std::chrono::milliseconds(cycles * 1000 / Hardware::BASE_CLOCK_RATE); 18u64 nsToClockCycles(std::chrono::nanoseconds ns);
19} 19std::chrono::milliseconds CyclesToMs(s64 cycles);
20 20std::chrono::nanoseconds CyclesToNs(s64 cycles);
21inline std::chrono::nanoseconds CyclesToNs(s64 cycles) { 21std::chrono::microseconds CyclesToUs(s64 cycles);
22 return std::chrono::nanoseconds(cycles * 1000000000 / Hardware::BASE_CLOCK_RATE);
23}
24
25inline std::chrono::microseconds CyclesToUs(s64 cycles) {
26 return std::chrono::microseconds(cycles * 1000000 / Hardware::BASE_CLOCK_RATE);
27}
28 22
29u64 CpuCyclesToClockCycles(u64 ticks); 23u64 CpuCyclesToClockCycles(u64 ticks);
30 24
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 70ddbdcca..32afcf3ae 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -2,80 +2,372 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/fiber.h"
6#include "common/microprofile.h"
7#include "common/thread.h"
5#include "core/arm/exclusive_monitor.h" 8#include "core/arm/exclusive_monitor.h"
6#include "core/core.h" 9#include "core/core.h"
7#include "core/core_manager.h"
8#include "core/core_timing.h" 10#include "core/core_timing.h"
9#include "core/cpu_manager.h" 11#include "core/cpu_manager.h"
10#include "core/gdbstub/gdbstub.h" 12#include "core/gdbstub/gdbstub.h"
13#include "core/hle/kernel/kernel.h"
14#include "core/hle/kernel/physical_core.h"
15#include "core/hle/kernel/scheduler.h"
16#include "core/hle/kernel/thread.h"
17#include "video_core/gpu.h"
11 18
12namespace Core { 19namespace Core {
13 20
14CpuManager::CpuManager(System& system) : system{system} {} 21CpuManager::CpuManager(System& system) : system{system} {}
15CpuManager::~CpuManager() = default; 22CpuManager::~CpuManager() = default;
16 23
24void CpuManager::ThreadStart(CpuManager& cpu_manager, std::size_t core) {
25 cpu_manager.RunThread(core);
26}
27
17void CpuManager::Initialize() { 28void CpuManager::Initialize() {
18 for (std::size_t index = 0; index < core_managers.size(); ++index) { 29 running_mode = true;
19 core_managers[index] = std::make_unique<CoreManager>(system, index); 30 if (is_multicore) {
31 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
32 core_data[core].host_thread =
33 std::make_unique<std::thread>(ThreadStart, std::ref(*this), core);
34 }
35 } else {
36 core_data[0].host_thread = std::make_unique<std::thread>(ThreadStart, std::ref(*this), 0);
20 } 37 }
21} 38}
22 39
23void CpuManager::Shutdown() { 40void CpuManager::Shutdown() {
24 for (auto& cpu_core : core_managers) { 41 running_mode = false;
25 cpu_core.reset(); 42 Pause(false);
43 if (is_multicore) {
44 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
45 core_data[core].host_thread->join();
46 core_data[core].host_thread.reset();
47 }
48 } else {
49 core_data[0].host_thread->join();
50 core_data[0].host_thread.reset();
26 } 51 }
27} 52}
28 53
29CoreManager& CpuManager::GetCoreManager(std::size_t index) { 54std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() {
30 return *core_managers.at(index); 55 return std::function<void(void*)>(GuestThreadFunction);
31} 56}
32 57
33const CoreManager& CpuManager::GetCoreManager(std::size_t index) const { 58std::function<void(void*)> CpuManager::GetIdleThreadStartFunc() {
34 return *core_managers.at(index); 59 return std::function<void(void*)>(IdleThreadFunction);
35} 60}
36 61
37CoreManager& CpuManager::GetCurrentCoreManager() { 62std::function<void(void*)> CpuManager::GetSuspendThreadStartFunc() {
38 // Otherwise, use single-threaded mode active_core variable 63 return std::function<void(void*)>(SuspendThreadFunction);
39 return *core_managers[active_core];
40} 64}
41 65
42const CoreManager& CpuManager::GetCurrentCoreManager() const { 66void CpuManager::GuestThreadFunction(void* cpu_manager_) {
43 // Otherwise, use single-threaded mode active_core variable 67 CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
44 return *core_managers[active_core]; 68 if (cpu_manager->is_multicore) {
69 cpu_manager->MultiCoreRunGuestThread();
70 } else {
71 cpu_manager->SingleCoreRunGuestThread();
72 }
45} 73}
46 74
47void CpuManager::RunLoop(bool tight_loop) { 75void CpuManager::GuestRewindFunction(void* cpu_manager_) {
48 if (GDBStub::IsServerEnabled()) { 76 CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
49 GDBStub::HandlePacket(); 77 if (cpu_manager->is_multicore) {
78 cpu_manager->MultiCoreRunGuestLoop();
79 } else {
80 cpu_manager->SingleCoreRunGuestLoop();
81 }
82}
50 83
51 // If the loop is halted and we want to step, use a tiny (1) number of instructions to 84void CpuManager::IdleThreadFunction(void* cpu_manager_) {
52 // execute. Otherwise, get out of the loop function. 85 CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
53 if (GDBStub::GetCpuHaltFlag()) { 86 if (cpu_manager->is_multicore) {
54 if (GDBStub::GetCpuStepFlag()) { 87 cpu_manager->MultiCoreRunIdleThread();
55 tight_loop = false; 88 } else {
56 } else { 89 cpu_manager->SingleCoreRunIdleThread();
57 return; 90 }
91}
92
93void CpuManager::SuspendThreadFunction(void* cpu_manager_) {
94 CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
95 if (cpu_manager->is_multicore) {
96 cpu_manager->MultiCoreRunSuspendThread();
97 } else {
98 cpu_manager->SingleCoreRunSuspendThread();
99 }
100}
101
102void* CpuManager::GetStartFuncParamater() {
103 return static_cast<void*>(this);
104}
105
106///////////////////////////////////////////////////////////////////////////////
107/// MultiCore ///
108///////////////////////////////////////////////////////////////////////////////
109
110void CpuManager::MultiCoreRunGuestThread() {
111 auto& kernel = system.Kernel();
112 {
113 auto& sched = kernel.CurrentScheduler();
114 sched.OnThreadStart();
115 }
116 MultiCoreRunGuestLoop();
117}
118
119void CpuManager::MultiCoreRunGuestLoop() {
120 auto& kernel = system.Kernel();
121 auto* thread = kernel.CurrentScheduler().GetCurrentThread();
122 while (true) {
123 auto* physical_core = &kernel.CurrentPhysicalCore();
124 auto& arm_interface = thread->ArmInterface();
125 system.EnterDynarmicProfile();
126 while (!physical_core->IsInterrupted()) {
127 arm_interface.Run();
128 physical_core = &kernel.CurrentPhysicalCore();
129 }
130 system.ExitDynarmicProfile();
131 arm_interface.ClearExclusiveState();
132 auto& scheduler = kernel.CurrentScheduler();
133 scheduler.TryDoContextSwitch();
134 }
135}
136
137void CpuManager::MultiCoreRunIdleThread() {
138 auto& kernel = system.Kernel();
139 while (true) {
140 auto& physical_core = kernel.CurrentPhysicalCore();
141 physical_core.Idle();
142 auto& scheduler = kernel.CurrentScheduler();
143 scheduler.TryDoContextSwitch();
144 }
145}
146
147void CpuManager::MultiCoreRunSuspendThread() {
148 auto& kernel = system.Kernel();
149 {
150 auto& sched = kernel.CurrentScheduler();
151 sched.OnThreadStart();
152 }
153 while (true) {
154 auto core = kernel.GetCurrentHostThreadID();
155 auto& scheduler = kernel.CurrentScheduler();
156 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
157 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
158 ASSERT(scheduler.ContextSwitchPending());
159 ASSERT(core == kernel.GetCurrentHostThreadID());
160 scheduler.TryDoContextSwitch();
161 }
162}
163
164void CpuManager::MultiCorePause(bool paused) {
165 if (!paused) {
166 bool all_not_barrier = false;
167 while (!all_not_barrier) {
168 all_not_barrier = true;
169 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
170 all_not_barrier &=
171 !core_data[core].is_running.load() && core_data[core].initialized.load();
172 }
173 }
174 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
175 core_data[core].enter_barrier->Set();
176 }
177 if (paused_state.load()) {
178 bool all_barrier = false;
179 while (!all_barrier) {
180 all_barrier = true;
181 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
182 all_barrier &=
183 core_data[core].is_paused.load() && core_data[core].initialized.load();
184 }
185 }
186 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
187 core_data[core].exit_barrier->Set();
188 }
189 }
190 } else {
191 /// Wait until all cores are paused.
192 bool all_barrier = false;
193 while (!all_barrier) {
194 all_barrier = true;
195 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
196 all_barrier &=
197 core_data[core].is_paused.load() && core_data[core].initialized.load();
58 } 198 }
59 } 199 }
200 /// Don't release the barrier
60 } 201 }
202 paused_state = paused;
203}
204
205///////////////////////////////////////////////////////////////////////////////
206/// SingleCore ///
207///////////////////////////////////////////////////////////////////////////////
61 208
62 auto& core_timing = system.CoreTiming(); 209void CpuManager::SingleCoreRunGuestThread() {
63 core_timing.ResetRun(); 210 auto& kernel = system.Kernel();
64 bool keep_running{}; 211 {
65 do { 212 auto& sched = kernel.CurrentScheduler();
66 keep_running = false; 213 sched.OnThreadStart();
67 for (active_core = 0; active_core < NUM_CPU_CORES; ++active_core) { 214 }
68 core_timing.SwitchContext(active_core); 215 SingleCoreRunGuestLoop();
69 if (core_timing.CanCurrentContextRun()) { 216}
70 core_managers[active_core]->RunLoop(tight_loop); 217
218void CpuManager::SingleCoreRunGuestLoop() {
219 auto& kernel = system.Kernel();
220 auto* thread = kernel.CurrentScheduler().GetCurrentThread();
221 while (true) {
222 auto* physical_core = &kernel.CurrentPhysicalCore();
223 auto& arm_interface = thread->ArmInterface();
224 system.EnterDynarmicProfile();
225 if (!physical_core->IsInterrupted()) {
226 arm_interface.Run();
227 physical_core = &kernel.CurrentPhysicalCore();
228 }
229 system.ExitDynarmicProfile();
230 thread->SetPhantomMode(true);
231 system.CoreTiming().Advance();
232 thread->SetPhantomMode(false);
233 arm_interface.ClearExclusiveState();
234 PreemptSingleCore();
235 auto& scheduler = kernel.Scheduler(current_core);
236 scheduler.TryDoContextSwitch();
237 }
238}
239
240void CpuManager::SingleCoreRunIdleThread() {
241 auto& kernel = system.Kernel();
242 while (true) {
243 auto& physical_core = kernel.CurrentPhysicalCore();
244 PreemptSingleCore(false);
245 system.CoreTiming().AddTicks(1000U);
246 idle_count++;
247 auto& scheduler = physical_core.Scheduler();
248 scheduler.TryDoContextSwitch();
249 }
250}
251
252void CpuManager::SingleCoreRunSuspendThread() {
253 auto& kernel = system.Kernel();
254 {
255 auto& sched = kernel.CurrentScheduler();
256 sched.OnThreadStart();
257 }
258 while (true) {
259 auto core = kernel.GetCurrentHostThreadID();
260 auto& scheduler = kernel.CurrentScheduler();
261 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
262 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
263 ASSERT(scheduler.ContextSwitchPending());
264 ASSERT(core == kernel.GetCurrentHostThreadID());
265 scheduler.TryDoContextSwitch();
266 }
267}
268
269void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
270 std::size_t old_core = current_core;
271 auto& scheduler = system.Kernel().Scheduler(old_core);
272 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
273 if (idle_count >= 4 || from_running_enviroment) {
274 if (!from_running_enviroment) {
275 system.CoreTiming().Idle();
276 idle_count = 0;
277 }
278 current_thread->SetPhantomMode(true);
279 system.CoreTiming().Advance();
280 current_thread->SetPhantomMode(false);
281 }
282 current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
283 system.CoreTiming().ResetTicks();
284 scheduler.Unload();
285 auto& next_scheduler = system.Kernel().Scheduler(current_core);
286 Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
287 /// May have changed scheduler
288 auto& current_scheduler = system.Kernel().Scheduler(current_core);
289 current_scheduler.Reload();
290 auto* currrent_thread2 = current_scheduler.GetCurrentThread();
291 if (!currrent_thread2->IsIdleThread()) {
292 idle_count = 0;
293 }
294}
295
296void CpuManager::SingleCorePause(bool paused) {
297 if (!paused) {
298 bool all_not_barrier = false;
299 while (!all_not_barrier) {
300 all_not_barrier = !core_data[0].is_running.load() && core_data[0].initialized.load();
301 }
302 core_data[0].enter_barrier->Set();
303 if (paused_state.load()) {
304 bool all_barrier = false;
305 while (!all_barrier) {
306 all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
71 } 307 }
72 keep_running |= core_timing.CanCurrentContextRun(); 308 core_data[0].exit_barrier->Set();
73 } 309 }
74 } while (keep_running); 310 } else {
311 /// Wait until all cores are paused.
312 bool all_barrier = false;
313 while (!all_barrier) {
314 all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
315 }
316 /// Don't release the barrier
317 }
318 paused_state = paused;
319}
320
321void CpuManager::Pause(bool paused) {
322 if (is_multicore) {
323 MultiCorePause(paused);
324 } else {
325 SingleCorePause(paused);
326 }
327}
75 328
76 if (GDBStub::IsServerEnabled()) { 329void CpuManager::RunThread(std::size_t core) {
77 GDBStub::SetCpuStepFlag(false); 330 /// Initialization
331 system.RegisterCoreThread(core);
332 std::string name;
333 if (is_multicore) {
334 name = "yuzu:CoreCPUThread_" + std::to_string(core);
335 } else {
336 name = "yuzu:CPUThread";
337 }
338 MicroProfileOnThreadCreate(name.c_str());
339 Common::SetCurrentThreadName(name.c_str());
340 Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
341 auto& data = core_data[core];
342 data.enter_barrier = std::make_unique<Common::Event>();
343 data.exit_barrier = std::make_unique<Common::Event>();
344 data.host_context = Common::Fiber::ThreadToFiber();
345 data.is_running = false;
346 data.initialized = true;
347 const bool sc_sync = !is_async_gpu && !is_multicore;
348 bool sc_sync_first_use = sc_sync;
349 /// Running
350 while (running_mode) {
351 data.is_running = false;
352 data.enter_barrier->Wait();
353 if (sc_sync_first_use) {
354 system.GPU().ObtainContext();
355 sc_sync_first_use = false;
356 }
357 auto& scheduler = system.Kernel().CurrentScheduler();
358 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
359 data.is_running = true;
360 Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext());
361 data.is_running = false;
362 data.is_paused = true;
363 data.exit_barrier->Wait();
364 data.is_paused = false;
78 } 365 }
366 /// Time to cleanup
367 data.host_context->Exit();
368 data.enter_barrier.reset();
369 data.exit_barrier.reset();
370 data.initialized = false;
79} 371}
80 372
81} // namespace Core 373} // namespace Core
diff --git a/src/core/cpu_manager.h b/src/core/cpu_manager.h
index 97554d1bb..35929ed94 100644
--- a/src/core/cpu_manager.h
+++ b/src/core/cpu_manager.h
@@ -5,12 +5,19 @@
5#pragma once 5#pragma once
6 6
7#include <array> 7#include <array>
8#include <atomic>
9#include <functional>
8#include <memory> 10#include <memory>
11#include <thread>
9#include "core/hardware_properties.h" 12#include "core/hardware_properties.h"
10 13
14namespace Common {
15class Event;
16class Fiber;
17} // namespace Common
18
11namespace Core { 19namespace Core {
12 20
13class CoreManager;
14class System; 21class System;
15 22
16class CpuManager { 23class CpuManager {
@@ -24,24 +31,75 @@ public:
24 CpuManager& operator=(const CpuManager&) = delete; 31 CpuManager& operator=(const CpuManager&) = delete;
25 CpuManager& operator=(CpuManager&&) = delete; 32 CpuManager& operator=(CpuManager&&) = delete;
26 33
34 /// Sets if emulation is multicore or single core, must be set before Initialize
35 void SetMulticore(bool is_multicore) {
36 this->is_multicore = is_multicore;
37 }
38
39 /// Sets if emulation is using an asynchronous GPU.
40 void SetAsyncGpu(bool is_async_gpu) {
41 this->is_async_gpu = is_async_gpu;
42 }
43
27 void Initialize(); 44 void Initialize();
28 void Shutdown(); 45 void Shutdown();
29 46
30 CoreManager& GetCoreManager(std::size_t index); 47 void Pause(bool paused);
31 const CoreManager& GetCoreManager(std::size_t index) const;
32 48
33 CoreManager& GetCurrentCoreManager(); 49 std::function<void(void*)> GetGuestThreadStartFunc();
34 const CoreManager& GetCurrentCoreManager() const; 50 std::function<void(void*)> GetIdleThreadStartFunc();
51 std::function<void(void*)> GetSuspendThreadStartFunc();
52 void* GetStartFuncParamater();
35 53
36 std::size_t GetActiveCoreIndex() const { 54 void PreemptSingleCore(bool from_running_enviroment = true);
37 return active_core;
38 }
39 55
40 void RunLoop(bool tight_loop); 56 std::size_t CurrentCore() const {
57 return current_core.load();
58 }
41 59
42private: 60private:
43 std::array<std::unique_ptr<CoreManager>, Hardware::NUM_CPU_CORES> core_managers; 61 static void GuestThreadFunction(void* cpu_manager);
44 std::size_t active_core{}; ///< Active core, only used in single thread mode 62 static void GuestRewindFunction(void* cpu_manager);
63 static void IdleThreadFunction(void* cpu_manager);
64 static void SuspendThreadFunction(void* cpu_manager);
65
66 void MultiCoreRunGuestThread();
67 void MultiCoreRunGuestLoop();
68 void MultiCoreRunIdleThread();
69 void MultiCoreRunSuspendThread();
70 void MultiCorePause(bool paused);
71
72 void SingleCoreRunGuestThread();
73 void SingleCoreRunGuestLoop();
74 void SingleCoreRunIdleThread();
75 void SingleCoreRunSuspendThread();
76 void SingleCorePause(bool paused);
77
78 static void ThreadStart(CpuManager& cpu_manager, std::size_t core);
79
80 void RunThread(std::size_t core);
81
82 struct CoreData {
83 std::shared_ptr<Common::Fiber> host_context;
84 std::unique_ptr<Common::Event> enter_barrier;
85 std::unique_ptr<Common::Event> exit_barrier;
86 std::atomic<bool> is_running;
87 std::atomic<bool> is_paused;
88 std::atomic<bool> initialized;
89 std::unique_ptr<std::thread> host_thread;
90 };
91
92 std::atomic<bool> running_mode{};
93 std::atomic<bool> paused_state{};
94
95 std::array<CoreData, Core::Hardware::NUM_CPU_CORES> core_data{};
96
97 bool is_async_gpu{};
98 bool is_multicore{};
99 std::atomic<std::size_t> current_core{};
100 std::size_t preemption_count{};
101 std::size_t idle_count{};
102 static constexpr std::size_t max_cycle_runs = 5;
45 103
46 System& system; 104 System& system;
47}; 105};
diff --git a/src/core/gdbstub/gdbstub.cpp b/src/core/gdbstub/gdbstub.cpp
index 70c0f8b80..79f22a403 100644
--- a/src/core/gdbstub/gdbstub.cpp
+++ b/src/core/gdbstub/gdbstub.cpp
@@ -35,7 +35,6 @@
35#include "common/swap.h" 35#include "common/swap.h"
36#include "core/arm/arm_interface.h" 36#include "core/arm/arm_interface.h"
37#include "core/core.h" 37#include "core/core.h"
38#include "core/core_manager.h"
39#include "core/gdbstub/gdbstub.h" 38#include "core/gdbstub/gdbstub.h"
40#include "core/hle/kernel/memory/page_table.h" 39#include "core/hle/kernel/memory/page_table.h"
41#include "core/hle/kernel/process.h" 40#include "core/hle/kernel/process.h"
diff --git a/src/core/hardware_properties.h b/src/core/hardware_properties.h
index b04e046ed..456b41e1b 100644
--- a/src/core/hardware_properties.h
+++ b/src/core/hardware_properties.h
@@ -42,6 +42,10 @@ struct EmuThreadHandle {
42 constexpr u32 invalid_handle = 0xFFFFFFFF; 42 constexpr u32 invalid_handle = 0xFFFFFFFF;
43 return {invalid_handle, invalid_handle}; 43 return {invalid_handle, invalid_handle};
44 } 44 }
45
46 bool IsInvalid() const {
47 return (*this) == InvalidHandle();
48 }
45}; 49};
46 50
47} // namespace Core 51} // namespace Core
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 8475b698c..4d2a9b35d 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -7,11 +7,15 @@
7 7
8#include "common/assert.h" 8#include "common/assert.h"
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "core/arm/exclusive_monitor.h"
10#include "core/core.h" 11#include "core/core.h"
11#include "core/hle/kernel/address_arbiter.h" 12#include "core/hle/kernel/address_arbiter.h"
12#include "core/hle/kernel/errors.h" 13#include "core/hle/kernel/errors.h"
14#include "core/hle/kernel/handle_table.h"
15#include "core/hle/kernel/kernel.h"
13#include "core/hle/kernel/scheduler.h" 16#include "core/hle/kernel/scheduler.h"
14#include "core/hle/kernel/thread.h" 17#include "core/hle/kernel/thread.h"
18#include "core/hle/kernel/time_manager.h"
15#include "core/hle/result.h" 19#include "core/hle/result.h"
16#include "core/memory.h" 20#include "core/memory.h"
17 21
@@ -20,6 +24,7 @@ namespace Kernel {
20// Wake up num_to_wake (or all) threads in a vector. 24// Wake up num_to_wake (or all) threads in a vector.
21void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, 25void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
22 s32 num_to_wake) { 26 s32 num_to_wake) {
27 auto& time_manager = system.Kernel().TimeManager();
23 // Only process up to 'target' threads, unless 'target' is <= 0, in which case process 28 // Only process up to 'target' threads, unless 'target' is <= 0, in which case process
24 // them all. 29 // them all.
25 std::size_t last = waiting_threads.size(); 30 std::size_t last = waiting_threads.size();
@@ -29,12 +34,10 @@ void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& wai
29 34
30 // Signal the waiting threads. 35 // Signal the waiting threads.
31 for (std::size_t i = 0; i < last; i++) { 36 for (std::size_t i = 0; i < last; i++) {
32 ASSERT(waiting_threads[i]->GetStatus() == ThreadStatus::WaitArb); 37 waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
33 waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS);
34 RemoveThread(waiting_threads[i]); 38 RemoveThread(waiting_threads[i]);
35 waiting_threads[i]->SetArbiterWaitAddress(0); 39 waiting_threads[i]->WaitForArbitration(false);
36 waiting_threads[i]->ResumeFromWait(); 40 waiting_threads[i]->ResumeFromWait();
37 system.PrepareReschedule(waiting_threads[i]->GetProcessorID());
38 } 41 }
39} 42}
40 43
@@ -56,6 +59,7 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v
56} 59}
57 60
58ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { 61ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
62 SchedulerLock lock(system.Kernel());
59 const std::vector<std::shared_ptr<Thread>> waiting_threads = 63 const std::vector<std::shared_ptr<Thread>> waiting_threads =
60 GetThreadsWaitingOnAddress(address); 64 GetThreadsWaitingOnAddress(address);
61 WakeThreads(waiting_threads, num_to_wake); 65 WakeThreads(waiting_threads, num_to_wake);
@@ -64,6 +68,7 @@ ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
64 68
65ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, 69ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
66 s32 num_to_wake) { 70 s32 num_to_wake) {
71 SchedulerLock lock(system.Kernel());
67 auto& memory = system.Memory(); 72 auto& memory = system.Memory();
68 73
69 // Ensure that we can write to the address. 74 // Ensure that we can write to the address.
@@ -71,16 +76,24 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32
71 return ERR_INVALID_ADDRESS_STATE; 76 return ERR_INVALID_ADDRESS_STATE;
72 } 77 }
73 78
74 if (static_cast<s32>(memory.Read32(address)) != value) { 79 const std::size_t current_core = system.CurrentCoreIndex();
75 return ERR_INVALID_STATE; 80 auto& monitor = system.Monitor();
76 } 81 u32 current_value;
82 do {
83 current_value = monitor.ExclusiveRead32(current_core, address);
84
85 if (current_value != value) {
86 return ERR_INVALID_STATE;
87 }
88 current_value++;
89 } while (!monitor.ExclusiveWrite32(current_core, address, current_value));
77 90
78 memory.Write32(address, static_cast<u32>(value + 1));
79 return SignalToAddressOnly(address, num_to_wake); 91 return SignalToAddressOnly(address, num_to_wake);
80} 92}
81 93
82ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, 94ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
83 s32 num_to_wake) { 95 s32 num_to_wake) {
96 SchedulerLock lock(system.Kernel());
84 auto& memory = system.Memory(); 97 auto& memory = system.Memory();
85 98
86 // Ensure that we can write to the address. 99 // Ensure that we can write to the address.
@@ -92,29 +105,33 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
92 const std::vector<std::shared_ptr<Thread>> waiting_threads = 105 const std::vector<std::shared_ptr<Thread>> waiting_threads =
93 GetThreadsWaitingOnAddress(address); 106 GetThreadsWaitingOnAddress(address);
94 107
95 // Determine the modified value depending on the waiting count. 108 const std::size_t current_core = system.CurrentCoreIndex();
109 auto& monitor = system.Monitor();
96 s32 updated_value; 110 s32 updated_value;
97 if (num_to_wake <= 0) { 111 do {
98 if (waiting_threads.empty()) { 112 updated_value = monitor.ExclusiveRead32(current_core, address);
99 updated_value = value + 1; 113
100 } else { 114 if (updated_value != value) {
101 updated_value = value - 1; 115 return ERR_INVALID_STATE;
102 } 116 }
103 } else { 117 // Determine the modified value depending on the waiting count.
104 if (waiting_threads.empty()) { 118 if (num_to_wake <= 0) {
105 updated_value = value + 1; 119 if (waiting_threads.empty()) {
106 } else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) { 120 updated_value = value + 1;
107 updated_value = value - 1; 121 } else {
122 updated_value = value - 1;
123 }
108 } else { 124 } else {
109 updated_value = value; 125 if (waiting_threads.empty()) {
126 updated_value = value + 1;
127 } else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
128 updated_value = value - 1;
129 } else {
130 updated_value = value;
131 }
110 } 132 }
111 } 133 } while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
112 134
113 if (static_cast<s32>(memory.Read32(address)) != value) {
114 return ERR_INVALID_STATE;
115 }
116
117 memory.Write32(address, static_cast<u32>(updated_value));
118 WakeThreads(waiting_threads, num_to_wake); 135 WakeThreads(waiting_threads, num_to_wake);
119 return RESULT_SUCCESS; 136 return RESULT_SUCCESS;
120} 137}
@@ -136,60 +153,127 @@ ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s
136ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, 153ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
137 bool should_decrement) { 154 bool should_decrement) {
138 auto& memory = system.Memory(); 155 auto& memory = system.Memory();
156 auto& kernel = system.Kernel();
157 Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
139 158
140 // Ensure that we can read the address. 159 Handle event_handle = InvalidHandle;
141 if (!memory.IsValidVirtualAddress(address)) { 160 {
142 return ERR_INVALID_ADDRESS_STATE; 161 SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
143 } 162
163 if (current_thread->IsPendingTermination()) {
164 lock.CancelSleep();
165 return ERR_THREAD_TERMINATING;
166 }
167
168 // Ensure that we can read the address.
169 if (!memory.IsValidVirtualAddress(address)) {
170 lock.CancelSleep();
171 return ERR_INVALID_ADDRESS_STATE;
172 }
173
174 s32 current_value = static_cast<s32>(memory.Read32(address));
175 if (current_value >= value) {
176 lock.CancelSleep();
177 return ERR_INVALID_STATE;
178 }
179
180 current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
181
182 s32 decrement_value;
183
184 const std::size_t current_core = system.CurrentCoreIndex();
185 auto& monitor = system.Monitor();
186 do {
187 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
188 if (should_decrement) {
189 decrement_value = current_value - 1;
190 } else {
191 decrement_value = current_value;
192 }
193 } while (
194 !monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value)));
195
196 // Short-circuit without rescheduling, if timeout is zero.
197 if (timeout == 0) {
198 lock.CancelSleep();
199 return RESULT_TIMEOUT;
200 }
144 201
145 const s32 cur_value = static_cast<s32>(memory.Read32(address)); 202 current_thread->SetArbiterWaitAddress(address);
146 if (cur_value >= value) { 203 InsertThread(SharedFrom(current_thread));
147 return ERR_INVALID_STATE; 204 current_thread->SetStatus(ThreadStatus::WaitArb);
205 current_thread->WaitForArbitration(true);
148 } 206 }
149 207
150 if (should_decrement) { 208 if (event_handle != InvalidHandle) {
151 memory.Write32(address, static_cast<u32>(cur_value - 1)); 209 auto& time_manager = kernel.TimeManager();
210 time_manager.UnscheduleTimeEvent(event_handle);
152 } 211 }
153 212
154 // Short-circuit without rescheduling, if timeout is zero. 213 {
155 if (timeout == 0) { 214 SchedulerLock lock(kernel);
156 return RESULT_TIMEOUT; 215 if (current_thread->IsWaitingForArbitration()) {
216 RemoveThread(SharedFrom(current_thread));
217 current_thread->WaitForArbitration(false);
218 }
157 } 219 }
158 220
159 return WaitForAddressImpl(address, timeout); 221 return current_thread->GetSignalingResult();
160} 222}
161 223
162ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { 224ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
163 auto& memory = system.Memory(); 225 auto& memory = system.Memory();
226 auto& kernel = system.Kernel();
227 Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
164 228
165 // Ensure that we can read the address. 229 Handle event_handle = InvalidHandle;
166 if (!memory.IsValidVirtualAddress(address)) { 230 {
167 return ERR_INVALID_ADDRESS_STATE; 231 SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
168 } 232
233 if (current_thread->IsPendingTermination()) {
234 lock.CancelSleep();
235 return ERR_THREAD_TERMINATING;
236 }
237
238 // Ensure that we can read the address.
239 if (!memory.IsValidVirtualAddress(address)) {
240 lock.CancelSleep();
241 return ERR_INVALID_ADDRESS_STATE;
242 }
169 243
170 // Only wait for the address if equal. 244 s32 current_value = static_cast<s32>(memory.Read32(address));
171 if (static_cast<s32>(memory.Read32(address)) != value) { 245 if (current_value != value) {
172 return ERR_INVALID_STATE; 246 lock.CancelSleep();
247 return ERR_INVALID_STATE;
248 }
249
250 // Short-circuit without rescheduling, if timeout is zero.
251 if (timeout == 0) {
252 lock.CancelSleep();
253 return RESULT_TIMEOUT;
254 }
255
256 current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
257 current_thread->SetArbiterWaitAddress(address);
258 InsertThread(SharedFrom(current_thread));
259 current_thread->SetStatus(ThreadStatus::WaitArb);
260 current_thread->WaitForArbitration(true);
173 } 261 }
174 262
175 // Short-circuit without rescheduling if timeout is zero. 263 if (event_handle != InvalidHandle) {
176 if (timeout == 0) { 264 auto& time_manager = kernel.TimeManager();
177 return RESULT_TIMEOUT; 265 time_manager.UnscheduleTimeEvent(event_handle);
178 } 266 }
179 267
180 return WaitForAddressImpl(address, timeout); 268 {
181} 269 SchedulerLock lock(kernel);
270 if (current_thread->IsWaitingForArbitration()) {
271 RemoveThread(SharedFrom(current_thread));
272 current_thread->WaitForArbitration(false);
273 }
274 }
182 275
183ResultCode AddressArbiter::WaitForAddressImpl(VAddr address, s64 timeout) { 276 return current_thread->GetSignalingResult();
184 Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
185 current_thread->SetArbiterWaitAddress(address);
186 InsertThread(SharedFrom(current_thread));
187 current_thread->SetStatus(ThreadStatus::WaitArb);
188 current_thread->InvalidateWakeupCallback();
189 current_thread->WakeAfterDelay(timeout);
190
191 system.PrepareReschedule(current_thread->GetProcessorID());
192 return RESULT_TIMEOUT;
193} 277}
194 278
195void AddressArbiter::HandleWakeupThread(std::shared_ptr<Thread> thread) { 279void AddressArbiter::HandleWakeupThread(std::shared_ptr<Thread> thread) {
@@ -221,9 +305,9 @@ void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
221 const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(), 305 const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
222 [&thread](const auto& entry) { return thread == entry; }); 306 [&thread](const auto& entry) { return thread == entry; });
223 307
224 ASSERT(iter != thread_list.cend()); 308 if (iter != thread_list.cend()) {
225 309 thread_list.erase(iter);
226 thread_list.erase(iter); 310 }
227} 311}
228 312
229std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress( 313std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h
index f958eee5a..0b05d533c 100644
--- a/src/core/hle/kernel/address_arbiter.h
+++ b/src/core/hle/kernel/address_arbiter.h
@@ -73,9 +73,6 @@ private:
73 /// Waits on an address if the value passed is equal to the argument value. 73 /// Waits on an address if the value passed is equal to the argument value.
74 ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout); 74 ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
75 75
76 // Waits on the given address with a timeout in nanoseconds
77 ResultCode WaitForAddressImpl(VAddr address, s64 timeout);
78
79 /// Wake up num_to_wake (or all) threads in a vector. 76 /// Wake up num_to_wake (or all) threads in a vector.
80 void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake); 77 void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
81 78
diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp
index 5498fd313..8aff2227a 100644
--- a/src/core/hle/kernel/client_port.cpp
+++ b/src/core/hle/kernel/client_port.cpp
@@ -34,7 +34,7 @@ ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
34 } 34 }
35 35
36 // Wake the threads waiting on the ServerPort 36 // Wake the threads waiting on the ServerPort
37 server_port->WakeupAllWaitingThreads(); 37 server_port->Signal();
38 38
39 return MakeResult(std::move(client)); 39 return MakeResult(std::move(client));
40} 40}
diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h
index 29bfa3621..d4e5d88cf 100644
--- a/src/core/hle/kernel/errors.h
+++ b/src/core/hle/kernel/errors.h
@@ -12,6 +12,7 @@ namespace Kernel {
12 12
13constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7}; 13constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
14constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14}; 14constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
15constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59};
15constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101}; 16constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
16constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102}; 17constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
17constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103}; 18constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103};
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index ba0eac4c2..9277b5d08 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -14,14 +14,17 @@
14#include "common/common_types.h" 14#include "common/common_types.h"
15#include "common/logging/log.h" 15#include "common/logging/log.h"
16#include "core/hle/ipc_helpers.h" 16#include "core/hle/ipc_helpers.h"
17#include "core/hle/kernel/errors.h"
17#include "core/hle/kernel/handle_table.h" 18#include "core/hle/kernel/handle_table.h"
18#include "core/hle/kernel/hle_ipc.h" 19#include "core/hle/kernel/hle_ipc.h"
19#include "core/hle/kernel/kernel.h" 20#include "core/hle/kernel/kernel.h"
20#include "core/hle/kernel/object.h" 21#include "core/hle/kernel/object.h"
21#include "core/hle/kernel/process.h" 22#include "core/hle/kernel/process.h"
22#include "core/hle/kernel/readable_event.h" 23#include "core/hle/kernel/readable_event.h"
24#include "core/hle/kernel/scheduler.h"
23#include "core/hle/kernel/server_session.h" 25#include "core/hle/kernel/server_session.h"
24#include "core/hle/kernel/thread.h" 26#include "core/hle/kernel/thread.h"
27#include "core/hle/kernel/time_manager.h"
25#include "core/hle/kernel/writable_event.h" 28#include "core/hle/kernel/writable_event.h"
26#include "core/memory.h" 29#include "core/memory.h"
27 30
@@ -46,15 +49,6 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
46 const std::string& reason, u64 timeout, WakeupCallback&& callback, 49 const std::string& reason, u64 timeout, WakeupCallback&& callback,
47 std::shared_ptr<WritableEvent> writable_event) { 50 std::shared_ptr<WritableEvent> writable_event) {
48 // Put the client thread to sleep until the wait event is signaled or the timeout expires. 51 // Put the client thread to sleep until the wait event is signaled or the timeout expires.
49 thread->SetWakeupCallback(
50 [context = *this, callback](ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
51 std::shared_ptr<SynchronizationObject> object,
52 std::size_t index) mutable -> bool {
53 ASSERT(thread->GetStatus() == ThreadStatus::WaitHLEEvent);
54 callback(thread, context, reason);
55 context.WriteToOutgoingCommandBuffer(*thread);
56 return true;
57 });
58 52
59 if (!writable_event) { 53 if (!writable_event) {
60 // Create event if not provided 54 // Create event if not provided
@@ -62,14 +56,26 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
62 writable_event = pair.writable; 56 writable_event = pair.writable;
63 } 57 }
64 58
65 const auto readable_event{writable_event->GetReadableEvent()}; 59 {
66 writable_event->Clear(); 60 Handle event_handle = InvalidHandle;
67 thread->SetStatus(ThreadStatus::WaitHLEEvent); 61 SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout);
68 thread->SetSynchronizationObjects({readable_event}); 62 thread->SetHLECallback(
69 readable_event->AddWaitingThread(thread); 63 [context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool {
70 64 ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT
71 if (timeout > 0) { 65 ? ThreadWakeupReason::Timeout
72 thread->WakeAfterDelay(timeout); 66 : ThreadWakeupReason::Signal;
67 callback(thread, context, reason);
68 context.WriteToOutgoingCommandBuffer(*thread);
69 return true;
70 });
71 const auto readable_event{writable_event->GetReadableEvent()};
72 writable_event->Clear();
73 thread->SetHLESyncObject(readable_event.get());
74 thread->SetStatus(ThreadStatus::WaitHLEEvent);
75 thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
76 readable_event->AddWaitingThread(thread);
77 lock.Release();
78 thread->SetHLETimeEvent(event_handle);
73 } 79 }
74 80
75 is_thread_waiting = true; 81 is_thread_waiting = true;
@@ -282,18 +288,18 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
282} 288}
283 289
284std::vector<u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const { 290std::vector<u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
285 std::vector<u8> buffer; 291 std::vector<u8> buffer{};
286 const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && 292 const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
287 BufferDescriptorA()[buffer_index].Size()}; 293 BufferDescriptorA()[buffer_index].Size()};
288 294
289 if (is_buffer_a) { 295 if (is_buffer_a) {
290 ASSERT_MSG(BufferDescriptorA().size() > buffer_index, 296 ASSERT_OR_EXECUTE_MSG(BufferDescriptorA().size() > buffer_index, { return buffer; },
291 "BufferDescriptorA invalid buffer_index {}", buffer_index); 297 "BufferDescriptorA invalid buffer_index {}", buffer_index);
292 buffer.resize(BufferDescriptorA()[buffer_index].Size()); 298 buffer.resize(BufferDescriptorA()[buffer_index].Size());
293 memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size()); 299 memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size());
294 } else { 300 } else {
295 ASSERT_MSG(BufferDescriptorX().size() > buffer_index, 301 ASSERT_OR_EXECUTE_MSG(BufferDescriptorX().size() > buffer_index, { return buffer; },
296 "BufferDescriptorX invalid buffer_index {}", buffer_index); 302 "BufferDescriptorX invalid buffer_index {}", buffer_index);
297 buffer.resize(BufferDescriptorX()[buffer_index].Size()); 303 buffer.resize(BufferDescriptorX()[buffer_index].Size());
298 memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size()); 304 memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size());
299 } 305 }
@@ -318,16 +324,16 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
318 } 324 }
319 325
320 if (is_buffer_b) { 326 if (is_buffer_b) {
321 ASSERT_MSG(BufferDescriptorB().size() > buffer_index, 327 ASSERT_OR_EXECUTE_MSG(BufferDescriptorB().size() > buffer_index &&
322 "BufferDescriptorB invalid buffer_index {}", buffer_index); 328 BufferDescriptorB()[buffer_index].Size() >= size,
323 ASSERT_MSG(BufferDescriptorB()[buffer_index].Size() >= size, 329 { return 0; }, "BufferDescriptorB is invalid, index={}, size={}",
324 "BufferDescriptorB buffer_index {} is not large enough", buffer_index); 330 buffer_index, size);
325 memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size); 331 memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size);
326 } else { 332 } else {
327 ASSERT_MSG(BufferDescriptorC().size() > buffer_index, 333 ASSERT_OR_EXECUTE_MSG(BufferDescriptorC().size() > buffer_index &&
328 "BufferDescriptorC invalid buffer_index {}", buffer_index); 334 BufferDescriptorC()[buffer_index].Size() >= size,
329 ASSERT_MSG(BufferDescriptorC()[buffer_index].Size() >= size, 335 { return 0; }, "BufferDescriptorC is invalid, index={}, size={}",
330 "BufferDescriptorC buffer_index {} is not large enough", buffer_index); 336 buffer_index, size);
331 memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size); 337 memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size);
332 } 338 }
333 339
@@ -338,16 +344,12 @@ std::size_t HLERequestContext::GetReadBufferSize(std::size_t buffer_index) const
338 const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && 344 const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
339 BufferDescriptorA()[buffer_index].Size()}; 345 BufferDescriptorA()[buffer_index].Size()};
340 if (is_buffer_a) { 346 if (is_buffer_a) {
341 ASSERT_MSG(BufferDescriptorA().size() > buffer_index, 347 ASSERT_OR_EXECUTE_MSG(BufferDescriptorA().size() > buffer_index, { return 0; },
342 "BufferDescriptorA invalid buffer_index {}", buffer_index); 348 "BufferDescriptorA invalid buffer_index {}", buffer_index);
343 ASSERT_MSG(BufferDescriptorA()[buffer_index].Size() > 0,
344 "BufferDescriptorA buffer_index {} is empty", buffer_index);
345 return BufferDescriptorA()[buffer_index].Size(); 349 return BufferDescriptorA()[buffer_index].Size();
346 } else { 350 } else {
347 ASSERT_MSG(BufferDescriptorX().size() > buffer_index, 351 ASSERT_OR_EXECUTE_MSG(BufferDescriptorX().size() > buffer_index, { return 0; },
348 "BufferDescriptorX invalid buffer_index {}", buffer_index); 352 "BufferDescriptorX invalid buffer_index {}", buffer_index);
349 ASSERT_MSG(BufferDescriptorX()[buffer_index].Size() > 0,
350 "BufferDescriptorX buffer_index {} is empty", buffer_index);
351 return BufferDescriptorX()[buffer_index].Size(); 353 return BufferDescriptorX()[buffer_index].Size();
352 } 354 }
353} 355}
@@ -356,14 +358,15 @@ std::size_t HLERequestContext::GetWriteBufferSize(std::size_t buffer_index) cons
356 const bool is_buffer_b{BufferDescriptorB().size() > buffer_index && 358 const bool is_buffer_b{BufferDescriptorB().size() > buffer_index &&
357 BufferDescriptorB()[buffer_index].Size()}; 359 BufferDescriptorB()[buffer_index].Size()};
358 if (is_buffer_b) { 360 if (is_buffer_b) {
359 ASSERT_MSG(BufferDescriptorB().size() > buffer_index, 361 ASSERT_OR_EXECUTE_MSG(BufferDescriptorB().size() > buffer_index, { return 0; },
360 "BufferDescriptorB invalid buffer_index {}", buffer_index); 362 "BufferDescriptorB invalid buffer_index {}", buffer_index);
361 return BufferDescriptorB()[buffer_index].Size(); 363 return BufferDescriptorB()[buffer_index].Size();
362 } else { 364 } else {
363 ASSERT_MSG(BufferDescriptorC().size() > buffer_index, 365 ASSERT_OR_EXECUTE_MSG(BufferDescriptorC().size() > buffer_index, { return 0; },
364 "BufferDescriptorC invalid buffer_index {}", buffer_index); 366 "BufferDescriptorC invalid buffer_index {}", buffer_index);
365 return BufferDescriptorC()[buffer_index].Size(); 367 return BufferDescriptorC()[buffer_index].Size();
366 } 368 }
369 return 0;
367} 370}
368 371
369std::string HLERequestContext::Description() const { 372std::string HLERequestContext::Description() const {
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 7655382fa..1f2af7a1b 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -2,6 +2,7 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <array>
5#include <atomic> 6#include <atomic>
6#include <bitset> 7#include <bitset>
7#include <functional> 8#include <functional>
@@ -13,11 +14,15 @@
13 14
14#include "common/assert.h" 15#include "common/assert.h"
15#include "common/logging/log.h" 16#include "common/logging/log.h"
17#include "common/microprofile.h"
18#include "common/thread.h"
16#include "core/arm/arm_interface.h" 19#include "core/arm/arm_interface.h"
20#include "core/arm/cpu_interrupt_handler.h"
17#include "core/arm/exclusive_monitor.h" 21#include "core/arm/exclusive_monitor.h"
18#include "core/core.h" 22#include "core/core.h"
19#include "core/core_timing.h" 23#include "core/core_timing.h"
20#include "core/core_timing_util.h" 24#include "core/core_timing_util.h"
25#include "core/cpu_manager.h"
21#include "core/device_memory.h" 26#include "core/device_memory.h"
22#include "core/hardware_properties.h" 27#include "core/hardware_properties.h"
23#include "core/hle/kernel/client_port.h" 28#include "core/hle/kernel/client_port.h"
@@ -39,85 +44,28 @@
39#include "core/hle/result.h" 44#include "core/hle/result.h"
40#include "core/memory.h" 45#include "core/memory.h"
41 46
42namespace Kernel { 47MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
43
44/**
45 * Callback that will wake up the thread it was scheduled for
46 * @param thread_handle The handle of the thread that's been awoken
47 * @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
48 */
49static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
50 const auto proper_handle = static_cast<Handle>(thread_handle);
51 const auto& system = Core::System::GetInstance();
52
53 // Lock the global kernel mutex when we enter the kernel HLE.
54 std::lock_guard lock{HLE::g_hle_lock};
55
56 std::shared_ptr<Thread> thread =
57 system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
58 if (thread == nullptr) {
59 LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle);
60 return;
61 }
62
63 bool resume = true;
64
65 if (thread->GetStatus() == ThreadStatus::WaitSynch ||
66 thread->GetStatus() == ThreadStatus::WaitHLEEvent) {
67 // Remove the thread from each of its waiting objects' waitlists
68 for (const auto& object : thread->GetSynchronizationObjects()) {
69 object->RemoveWaitingThread(thread);
70 }
71 thread->ClearSynchronizationObjects();
72
73 // Invoke the wakeup callback before clearing the wait objects
74 if (thread->HasWakeupCallback()) {
75 resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Timeout, thread, nullptr, 0);
76 }
77 } else if (thread->GetStatus() == ThreadStatus::WaitMutex ||
78 thread->GetStatus() == ThreadStatus::WaitCondVar) {
79 thread->SetMutexWaitAddress(0);
80 thread->SetWaitHandle(0);
81 if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
82 thread->GetOwnerProcess()->RemoveConditionVariableThread(thread);
83 thread->SetCondVarWaitAddress(0);
84 }
85
86 auto* const lock_owner = thread->GetLockOwner();
87 // Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance
88 // and don't have a lock owner unless SignalProcessWideKey was called first and the thread
89 // wasn't awakened due to the mutex already being acquired.
90 if (lock_owner != nullptr) {
91 lock_owner->RemoveMutexWaiter(thread);
92 }
93 }
94 48
95 if (thread->GetStatus() == ThreadStatus::WaitArb) { 49namespace Kernel {
96 auto& address_arbiter = thread->GetOwnerProcess()->GetAddressArbiter();
97 address_arbiter.HandleWakeupThread(thread);
98 }
99
100 if (resume) {
101 if (thread->GetStatus() == ThreadStatus::WaitCondVar ||
102 thread->GetStatus() == ThreadStatus::WaitArb) {
103 thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
104 }
105 thread->ResumeFromWait();
106 }
107}
108 50
109struct KernelCore::Impl { 51struct KernelCore::Impl {
110 explicit Impl(Core::System& system, KernelCore& kernel) 52 explicit Impl(Core::System& system, KernelCore& kernel)
111 : global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {} 53 : global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {}
112 54
55 void SetMulticore(bool is_multicore) {
56 this->is_multicore = is_multicore;
57 }
58
113 void Initialize(KernelCore& kernel) { 59 void Initialize(KernelCore& kernel) {
114 Shutdown(); 60 Shutdown();
61 RegisterHostThread();
115 62
116 InitializePhysicalCores(); 63 InitializePhysicalCores();
117 InitializeSystemResourceLimit(kernel); 64 InitializeSystemResourceLimit(kernel);
118 InitializeMemoryLayout(); 65 InitializeMemoryLayout();
119 InitializeThreads(); 66 InitializePreemption(kernel);
120 InitializePreemption(); 67 InitializeSchedulers();
68 InitializeSuspendThreads();
121 } 69 }
122 70
123 void Shutdown() { 71 void Shutdown() {
@@ -126,13 +74,26 @@ struct KernelCore::Impl {
126 next_user_process_id = Process::ProcessIDMin; 74 next_user_process_id = Process::ProcessIDMin;
127 next_thread_id = 1; 75 next_thread_id = 1;
128 76
77 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
78 if (suspend_threads[i]) {
79 suspend_threads[i].reset();
80 }
81 }
82
83 for (std::size_t i = 0; i < cores.size(); i++) {
84 cores[i].Shutdown();
85 schedulers[i].reset();
86 }
87 cores.clear();
88
89 registered_core_threads.reset();
90
129 process_list.clear(); 91 process_list.clear();
130 current_process = nullptr; 92 current_process = nullptr;
131 93
132 system_resource_limit = nullptr; 94 system_resource_limit = nullptr;
133 95
134 global_handle_table.Clear(); 96 global_handle_table.Clear();
135 thread_wakeup_event_type = nullptr;
136 preemption_event = nullptr; 97 preemption_event = nullptr;
137 98
138 global_scheduler.Shutdown(); 99 global_scheduler.Shutdown();
@@ -145,13 +106,21 @@ struct KernelCore::Impl {
145 cores.clear(); 106 cores.clear();
146 107
147 exclusive_monitor.reset(); 108 exclusive_monitor.reset();
109 host_thread_ids.clear();
148 } 110 }
149 111
150 void InitializePhysicalCores() { 112 void InitializePhysicalCores() {
151 exclusive_monitor = 113 exclusive_monitor =
152 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); 114 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
153 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 115 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
154 cores.emplace_back(system, i, *exclusive_monitor); 116 schedulers[i] = std::make_unique<Kernel::Scheduler>(system, i);
117 cores.emplace_back(system, i, *schedulers[i], interrupts[i]);
118 }
119 }
120
121 void InitializeSchedulers() {
122 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
123 cores[i].Scheduler().Initialize();
155 } 124 }
156 } 125 }
157 126
@@ -173,15 +142,13 @@ struct KernelCore::Impl {
173 } 142 }
174 } 143 }
175 144
176 void InitializeThreads() { 145 void InitializePreemption(KernelCore& kernel) {
177 thread_wakeup_event_type = 146 preemption_event = Core::Timing::CreateEvent(
178 Core::Timing::CreateEvent("ThreadWakeupCallback", ThreadWakeupCallback); 147 "PreemptionCallback", [this, &kernel](u64 userdata, s64 cycles_late) {
179 } 148 {
180 149 SchedulerLock lock(kernel);
181 void InitializePreemption() { 150 global_scheduler.PreemptThreads();
182 preemption_event = 151 }
183 Core::Timing::CreateEvent("PreemptionCallback", [this](u64 userdata, s64 cycles_late) {
184 global_scheduler.PreemptThreads();
185 s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); 152 s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
186 system.CoreTiming().ScheduleEvent(time_interval, preemption_event); 153 system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
187 }); 154 });
@@ -190,6 +157,20 @@ struct KernelCore::Impl {
190 system.CoreTiming().ScheduleEvent(time_interval, preemption_event); 157 system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
191 } 158 }
192 159
160 void InitializeSuspendThreads() {
161 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
162 std::string name = "Suspend Thread Id:" + std::to_string(i);
163 std::function<void(void*)> init_func =
164 system.GetCpuManager().GetSuspendThreadStartFunc();
165 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
166 ThreadType type =
167 static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND);
168 auto thread_res = Thread::Create(system, type, name, 0, 0, 0, static_cast<u32>(i), 0,
169 nullptr, std::move(init_func), init_func_parameter);
170 suspend_threads[i] = std::move(thread_res).Unwrap();
171 }
172 }
173
193 void MakeCurrentProcess(Process* process) { 174 void MakeCurrentProcess(Process* process) {
194 current_process = process; 175 current_process = process;
195 176
@@ -197,15 +178,17 @@ struct KernelCore::Impl {
197 return; 178 return;
198 } 179 }
199 180
200 for (auto& core : cores) { 181 u32 core_id = GetCurrentHostThreadID();
201 core.SetIs64Bit(process->Is64BitProcess()); 182 if (core_id < Core::Hardware::NUM_CPU_CORES) {
183 system.Memory().SetCurrentPageTable(*process, core_id);
202 } 184 }
203
204 system.Memory().SetCurrentPageTable(*process);
205 } 185 }
206 186
207 void RegisterCoreThread(std::size_t core_id) { 187 void RegisterCoreThread(std::size_t core_id) {
208 std::unique_lock lock{register_thread_mutex}; 188 std::unique_lock lock{register_thread_mutex};
189 if (!is_multicore) {
190 single_core_thread_id = std::this_thread::get_id();
191 }
209 const std::thread::id this_id = std::this_thread::get_id(); 192 const std::thread::id this_id = std::this_thread::get_id();
210 const auto it = host_thread_ids.find(this_id); 193 const auto it = host_thread_ids.find(this_id);
211 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); 194 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
@@ -219,12 +202,19 @@ struct KernelCore::Impl {
219 std::unique_lock lock{register_thread_mutex}; 202 std::unique_lock lock{register_thread_mutex};
220 const std::thread::id this_id = std::this_thread::get_id(); 203 const std::thread::id this_id = std::this_thread::get_id();
221 const auto it = host_thread_ids.find(this_id); 204 const auto it = host_thread_ids.find(this_id);
222 ASSERT(it == host_thread_ids.end()); 205 if (it != host_thread_ids.end()) {
206 return;
207 }
223 host_thread_ids[this_id] = registered_thread_ids++; 208 host_thread_ids[this_id] = registered_thread_ids++;
224 } 209 }
225 210
226 u32 GetCurrentHostThreadID() const { 211 u32 GetCurrentHostThreadID() const {
227 const std::thread::id this_id = std::this_thread::get_id(); 212 const std::thread::id this_id = std::this_thread::get_id();
213 if (!is_multicore) {
214 if (single_core_thread_id == this_id) {
215 return static_cast<u32>(system.GetCpuManager().CurrentCore());
216 }
217 }
228 const auto it = host_thread_ids.find(this_id); 218 const auto it = host_thread_ids.find(this_id);
229 if (it == host_thread_ids.end()) { 219 if (it == host_thread_ids.end()) {
230 return Core::INVALID_HOST_THREAD_ID; 220 return Core::INVALID_HOST_THREAD_ID;
@@ -240,7 +230,7 @@ struct KernelCore::Impl {
240 } 230 }
241 const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler(); 231 const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler();
242 const Kernel::Thread* current = sched.GetCurrentThread(); 232 const Kernel::Thread* current = sched.GetCurrentThread();
243 if (current != nullptr) { 233 if (current != nullptr && !current->IsPhantomMode()) {
244 result.guest_handle = current->GetGlobalHandle(); 234 result.guest_handle = current->GetGlobalHandle();
245 } else { 235 } else {
246 result.guest_handle = InvalidHandle; 236 result.guest_handle = InvalidHandle;
@@ -313,7 +303,6 @@ struct KernelCore::Impl {
313 303
314 std::shared_ptr<ResourceLimit> system_resource_limit; 304 std::shared_ptr<ResourceLimit> system_resource_limit;
315 305
316 std::shared_ptr<Core::Timing::EventType> thread_wakeup_event_type;
317 std::shared_ptr<Core::Timing::EventType> preemption_event; 306 std::shared_ptr<Core::Timing::EventType> preemption_event;
318 307
319 // This is the kernel's handle table or supervisor handle table which 308 // This is the kernel's handle table or supervisor handle table which
@@ -343,6 +332,15 @@ struct KernelCore::Impl {
343 std::shared_ptr<Kernel::SharedMemory> irs_shared_mem; 332 std::shared_ptr<Kernel::SharedMemory> irs_shared_mem;
344 std::shared_ptr<Kernel::SharedMemory> time_shared_mem; 333 std::shared_ptr<Kernel::SharedMemory> time_shared_mem;
345 334
335 std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
336 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
337 std::array<std::unique_ptr<Kernel::Scheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
338
339 bool is_multicore{};
340 std::thread::id single_core_thread_id{};
341
342 std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{};
343
346 // System context 344 // System context
347 Core::System& system; 345 Core::System& system;
348}; 346};
@@ -352,6 +350,10 @@ KernelCore::~KernelCore() {
352 Shutdown(); 350 Shutdown();
353} 351}
354 352
353void KernelCore::SetMulticore(bool is_multicore) {
354 impl->SetMulticore(is_multicore);
355}
356
355void KernelCore::Initialize() { 357void KernelCore::Initialize() {
356 impl->Initialize(*this); 358 impl->Initialize(*this);
357} 359}
@@ -397,11 +399,11 @@ const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const {
397} 399}
398 400
399Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) { 401Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) {
400 return impl->cores[id].Scheduler(); 402 return *impl->schedulers[id];
401} 403}
402 404
403const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const { 405const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const {
404 return impl->cores[id].Scheduler(); 406 return *impl->schedulers[id];
405} 407}
406 408
407Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) { 409Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) {
@@ -412,6 +414,39 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
412 return impl->cores[id]; 414 return impl->cores[id];
413} 415}
414 416
417Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
418 u32 core_id = impl->GetCurrentHostThreadID();
419 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
420 return impl->cores[core_id];
421}
422
423const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
424 u32 core_id = impl->GetCurrentHostThreadID();
425 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
426 return impl->cores[core_id];
427}
428
429Kernel::Scheduler& KernelCore::CurrentScheduler() {
430 u32 core_id = impl->GetCurrentHostThreadID();
431 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
432 return *impl->schedulers[core_id];
433}
434
435const Kernel::Scheduler& KernelCore::CurrentScheduler() const {
436 u32 core_id = impl->GetCurrentHostThreadID();
437 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
438 return *impl->schedulers[core_id];
439}
440
441std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
442 return impl->interrupts;
443}
444
445const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts()
446 const {
447 return impl->interrupts;
448}
449
415Kernel::Synchronization& KernelCore::Synchronization() { 450Kernel::Synchronization& KernelCore::Synchronization() {
416 return impl->synchronization; 451 return impl->synchronization;
417} 452}
@@ -437,15 +472,17 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
437} 472}
438 473
439void KernelCore::InvalidateAllInstructionCaches() { 474void KernelCore::InvalidateAllInstructionCaches() {
440 for (std::size_t i = 0; i < impl->global_scheduler.CpuCoresCount(); i++) { 475 auto& threads = GlobalScheduler().GetThreadList();
441 PhysicalCore(i).ArmInterface().ClearInstructionCache(); 476 for (auto& thread : threads) {
477 if (!thread->IsHLEThread()) {
478 auto& arm_interface = thread->ArmInterface();
479 arm_interface.ClearInstructionCache();
480 }
442 } 481 }
443} 482}
444 483
445void KernelCore::PrepareReschedule(std::size_t id) { 484void KernelCore::PrepareReschedule(std::size_t id) {
446 if (id < impl->global_scheduler.CpuCoresCount()) { 485 // TODO: Reimplement, this
447 impl->cores[id].Stop();
448 }
449} 486}
450 487
451void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) { 488void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) {
@@ -481,10 +518,6 @@ u64 KernelCore::CreateNewUserProcessID() {
481 return impl->next_user_process_id++; 518 return impl->next_user_process_id++;
482} 519}
483 520
484const std::shared_ptr<Core::Timing::EventType>& KernelCore::ThreadWakeupCallbackEventType() const {
485 return impl->thread_wakeup_event_type;
486}
487
488Kernel::HandleTable& KernelCore::GlobalHandleTable() { 521Kernel::HandleTable& KernelCore::GlobalHandleTable() {
489 return impl->global_handle_table; 522 return impl->global_handle_table;
490} 523}
@@ -557,4 +590,34 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const {
557 return *impl->time_shared_mem; 590 return *impl->time_shared_mem;
558} 591}
559 592
593void KernelCore::Suspend(bool in_suspention) {
594 const bool should_suspend = exception_exited || in_suspention;
595 {
596 SchedulerLock lock(*this);
597 ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep;
598 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
599 impl->suspend_threads[i]->SetStatus(status);
600 }
601 }
602}
603
604bool KernelCore::IsMulticore() const {
605 return impl->is_multicore;
606}
607
608void KernelCore::ExceptionalExit() {
609 exception_exited = true;
610 Suspend(true);
611}
612
613void KernelCore::EnterSVCProfile() {
614 std::size_t core = impl->GetCurrentHostThreadID();
615 impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
616}
617
618void KernelCore::ExitSVCProfile() {
619 std::size_t core = impl->GetCurrentHostThreadID();
620 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
621}
622
560} // namespace Kernel 623} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 83de1f542..49bd47e89 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -4,15 +4,17 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array>
7#include <memory> 8#include <memory>
8#include <string> 9#include <string>
9#include <unordered_map> 10#include <unordered_map>
10#include <vector> 11#include <vector>
12#include "core/hardware_properties.h"
11#include "core/hle/kernel/memory/memory_types.h" 13#include "core/hle/kernel/memory/memory_types.h"
12#include "core/hle/kernel/object.h" 14#include "core/hle/kernel/object.h"
13 15
14namespace Core { 16namespace Core {
15struct EmuThreadHandle; 17class CPUInterruptHandler;
16class ExclusiveMonitor; 18class ExclusiveMonitor;
17class System; 19class System;
18} // namespace Core 20} // namespace Core
@@ -65,6 +67,9 @@ public:
65 KernelCore(KernelCore&&) = delete; 67 KernelCore(KernelCore&&) = delete;
66 KernelCore& operator=(KernelCore&&) = delete; 68 KernelCore& operator=(KernelCore&&) = delete;
67 69
70 /// Sets if emulation is multicore or single core, must be set before Initialize
71 void SetMulticore(bool is_multicore);
72
68 /// Resets the kernel to a clean slate for use. 73 /// Resets the kernel to a clean slate for use.
69 void Initialize(); 74 void Initialize();
70 75
@@ -110,6 +115,18 @@ public:
110 /// Gets the an instance of the respective physical CPU core. 115 /// Gets the an instance of the respective physical CPU core.
111 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; 116 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
112 117
118 /// Gets the sole instance of the Scheduler at the current running core.
119 Kernel::Scheduler& CurrentScheduler();
120
121 /// Gets the sole instance of the Scheduler at the current running core.
122 const Kernel::Scheduler& CurrentScheduler() const;
123
124 /// Gets the an instance of the current physical CPU core.
125 Kernel::PhysicalCore& CurrentPhysicalCore();
126
127 /// Gets the an instance of the current physical CPU core.
128 const Kernel::PhysicalCore& CurrentPhysicalCore() const;
129
113 /// Gets the an instance of the Synchronization Interface. 130 /// Gets the an instance of the Synchronization Interface.
114 Kernel::Synchronization& Synchronization(); 131 Kernel::Synchronization& Synchronization();
115 132
@@ -129,6 +146,10 @@ public:
129 146
130 const Core::ExclusiveMonitor& GetExclusiveMonitor() const; 147 const Core::ExclusiveMonitor& GetExclusiveMonitor() const;
131 148
149 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts();
150
151 const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const;
152
132 void InvalidateAllInstructionCaches(); 153 void InvalidateAllInstructionCaches();
133 154
134 /// Adds a port to the named port table 155 /// Adds a port to the named port table
@@ -191,6 +212,18 @@ public:
191 /// Gets the shared memory object for Time services. 212 /// Gets the shared memory object for Time services.
192 const Kernel::SharedMemory& GetTimeSharedMem() const; 213 const Kernel::SharedMemory& GetTimeSharedMem() const;
193 214
215 /// Suspend/unsuspend the OS.
216 void Suspend(bool in_suspention);
217
218 /// Exceptional exit the OS.
219 void ExceptionalExit();
220
221 bool IsMulticore() const;
222
223 void EnterSVCProfile();
224
225 void ExitSVCProfile();
226
194private: 227private:
195 friend class Object; 228 friend class Object;
196 friend class Process; 229 friend class Process;
@@ -208,9 +241,6 @@ private:
208 /// Creates a new thread ID, incrementing the internal thread ID counter. 241 /// Creates a new thread ID, incrementing the internal thread ID counter.
209 u64 CreateNewThreadID(); 242 u64 CreateNewThreadID();
210 243
211 /// Retrieves the event type used for thread wakeup callbacks.
212 const std::shared_ptr<Core::Timing::EventType>& ThreadWakeupCallbackEventType() const;
213
214 /// Provides a reference to the global handle table. 244 /// Provides a reference to the global handle table.
215 Kernel::HandleTable& GlobalHandleTable(); 245 Kernel::HandleTable& GlobalHandleTable();
216 246
@@ -219,6 +249,7 @@ private:
219 249
220 struct Impl; 250 struct Impl;
221 std::unique_ptr<Impl> impl; 251 std::unique_ptr<Impl> impl;
252 bool exception_exited{};
222}; 253};
223 254
224} // namespace Kernel 255} // namespace Kernel
diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp
index 616148190..acf13585c 100644
--- a/src/core/hle/kernel/memory/memory_manager.cpp
+++ b/src/core/hle/kernel/memory/memory_manager.cpp
@@ -139,7 +139,6 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
139 } 139 }
140 140
141 // Only succeed if we allocated as many pages as we wanted 141 // Only succeed if we allocated as many pages as we wanted
142 ASSERT(num_pages >= 0);
143 if (num_pages) { 142 if (num_pages) {
144 return ERR_OUT_OF_MEMORY; 143 return ERR_OUT_OF_MEMORY;
145 } 144 }
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 7869eb32b..8f6c944d1 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -34,8 +34,6 @@ static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThr
34 if (thread->GetMutexWaitAddress() != mutex_addr) 34 if (thread->GetMutexWaitAddress() != mutex_addr)
35 continue; 35 continue;
36 36
37 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
38
39 ++num_waiters; 37 ++num_waiters;
40 if (highest_priority_thread == nullptr || 38 if (highest_priority_thread == nullptr ||
41 thread->GetPriority() < highest_priority_thread->GetPriority()) { 39 thread->GetPriority() < highest_priority_thread->GetPriority()) {
@@ -49,6 +47,7 @@ static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThr
49/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner. 47/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
50static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread, 48static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread,
51 std::shared_ptr<Thread> new_owner) { 49 std::shared_ptr<Thread> new_owner) {
50 current_thread->RemoveMutexWaiter(new_owner);
52 const auto threads = current_thread->GetMutexWaitingThreads(); 51 const auto threads = current_thread->GetMutexWaitingThreads();
53 for (const auto& thread : threads) { 52 for (const auto& thread : threads) {
54 if (thread->GetMutexWaitAddress() != mutex_addr) 53 if (thread->GetMutexWaitAddress() != mutex_addr)
@@ -72,85 +71,100 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
72 return ERR_INVALID_ADDRESS; 71 return ERR_INVALID_ADDRESS;
73 } 72 }
74 73
75 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 74 auto& kernel = system.Kernel();
76 std::shared_ptr<Thread> current_thread = 75 std::shared_ptr<Thread> current_thread =
77 SharedFrom(system.CurrentScheduler().GetCurrentThread()); 76 SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
78 std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle); 77 {
79 std::shared_ptr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle); 78 SchedulerLock lock(kernel);
79 // The mutex address must be 4-byte aligned
80 if ((address % sizeof(u32)) != 0) {
81 return ERR_INVALID_ADDRESS;
82 }
80 83
81 // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another 84 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
82 // thread. 85 std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
83 ASSERT(requesting_thread == current_thread); 86 std::shared_ptr<Thread> requesting_thread =
87 handle_table.Get<Thread>(requesting_thread_handle);
84 88
85 const u32 addr_value = system.Memory().Read32(address); 89 // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of
90 // another thread.
91 ASSERT(requesting_thread == current_thread);
86 92
87 // If the mutex isn't being held, just return success. 93 current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
88 if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
89 return RESULT_SUCCESS;
90 }
91 94
92 if (holding_thread == nullptr) { 95 const u32 addr_value = system.Memory().Read32(address);
93 LOG_ERROR(Kernel, "Holding thread does not exist! thread_handle={:08X}", 96
94 holding_thread_handle); 97 // If the mutex isn't being held, just return success.
95 return ERR_INVALID_HANDLE; 98 if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
96 } 99 return RESULT_SUCCESS;
100 }
97 101
98 // Wait until the mutex is released 102 if (holding_thread == nullptr) {
99 current_thread->SetMutexWaitAddress(address); 103 return ERR_INVALID_HANDLE;
100 current_thread->SetWaitHandle(requesting_thread_handle); 104 }
101 105
102 current_thread->SetStatus(ThreadStatus::WaitMutex); 106 // Wait until the mutex is released
103 current_thread->InvalidateWakeupCallback(); 107 current_thread->SetMutexWaitAddress(address);
108 current_thread->SetWaitHandle(requesting_thread_handle);
104 109
105 // Update the lock holder thread's priority to prevent priority inversion. 110 current_thread->SetStatus(ThreadStatus::WaitMutex);
106 holding_thread->AddMutexWaiter(current_thread);
107 111
108 system.PrepareReschedule(); 112 // Update the lock holder thread's priority to prevent priority inversion.
113 holding_thread->AddMutexWaiter(current_thread);
114 }
109 115
110 return RESULT_SUCCESS; 116 {
117 SchedulerLock lock(kernel);
118 auto* owner = current_thread->GetLockOwner();
119 if (owner != nullptr) {
120 owner->RemoveMutexWaiter(current_thread);
121 }
122 }
123 return current_thread->GetSignalingResult();
111} 124}
112 125
113ResultCode Mutex::Release(VAddr address) { 126std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thread> owner,
127 VAddr address) {
114 // The mutex address must be 4-byte aligned 128 // The mutex address must be 4-byte aligned
115 if ((address % sizeof(u32)) != 0) { 129 if ((address % sizeof(u32)) != 0) {
116 LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address); 130 LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
117 return ERR_INVALID_ADDRESS; 131 return {ERR_INVALID_ADDRESS, nullptr};
118 } 132 }
119 133
120 std::shared_ptr<Thread> current_thread = 134 auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address);
121 SharedFrom(system.CurrentScheduler().GetCurrentThread()); 135 if (new_owner == nullptr) {
122 auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(current_thread, address);
123
124 // There are no more threads waiting for the mutex, release it completely.
125 if (thread == nullptr) {
126 system.Memory().Write32(address, 0); 136 system.Memory().Write32(address, 0);
127 return RESULT_SUCCESS; 137 return {RESULT_SUCCESS, nullptr};
128 } 138 }
129
130 // Transfer the ownership of the mutex from the previous owner to the new one. 139 // Transfer the ownership of the mutex from the previous owner to the new one.
131 TransferMutexOwnership(address, current_thread, thread); 140 TransferMutexOwnership(address, owner, new_owner);
132 141 u32 mutex_value = new_owner->GetWaitHandle();
133 u32 mutex_value = thread->GetWaitHandle();
134
135 if (num_waiters >= 2) { 142 if (num_waiters >= 2) {
136 // Notify the guest that there are still some threads waiting for the mutex 143 // Notify the guest that there are still some threads waiting for the mutex
137 mutex_value |= Mutex::MutexHasWaitersFlag; 144 mutex_value |= Mutex::MutexHasWaitersFlag;
138 } 145 }
146 new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
147 new_owner->SetLockOwner(nullptr);
148 new_owner->ResumeFromWait();
139 149
140 // Grant the mutex to the next waiting thread and resume it.
141 system.Memory().Write32(address, mutex_value); 150 system.Memory().Write32(address, mutex_value);
151 return {RESULT_SUCCESS, new_owner};
152}
142 153
143 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); 154ResultCode Mutex::Release(VAddr address) {
144 thread->ResumeFromWait(); 155 auto& kernel = system.Kernel();
156 SchedulerLock lock(kernel);
145 157
146 thread->SetLockOwner(nullptr); 158 std::shared_ptr<Thread> current_thread =
147 thread->SetCondVarWaitAddress(0); 159 SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
148 thread->SetMutexWaitAddress(0);
149 thread->SetWaitHandle(0);
150 thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
151 160
152 system.PrepareReschedule(); 161 auto [result, new_owner] = Unlock(current_thread, address);
153 162
154 return RESULT_SUCCESS; 163 if (result != RESULT_SUCCESS && new_owner != nullptr) {
164 new_owner->SetSynchronizationResults(nullptr, result);
165 }
166
167 return result;
155} 168}
169
156} // namespace Kernel 170} // namespace Kernel
diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h
index b904de2e8..3b81dc3df 100644
--- a/src/core/hle/kernel/mutex.h
+++ b/src/core/hle/kernel/mutex.h
@@ -28,6 +28,10 @@ public:
28 ResultCode TryAcquire(VAddr address, Handle holding_thread_handle, 28 ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
29 Handle requesting_thread_handle); 29 Handle requesting_thread_handle);
30 30
31 /// Unlocks a mutex for owner at address
32 std::pair<ResultCode, std::shared_ptr<Thread>> Unlock(std::shared_ptr<Thread> owner,
33 VAddr address);
34
31 /// Releases the mutex at the specified address. 35 /// Releases the mutex at the specified address.
32 ResultCode Release(VAddr address); 36 ResultCode Release(VAddr address);
33 37
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index a15011076..c6bbdb080 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -2,12 +2,15 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/assert.h"
5#include "common/logging/log.h" 6#include "common/logging/log.h"
7#include "common/spin_lock.h"
6#include "core/arm/arm_interface.h" 8#include "core/arm/arm_interface.h"
7#ifdef ARCHITECTURE_x86_64 9#ifdef ARCHITECTURE_x86_64
8#include "core/arm/dynarmic/arm_dynarmic_32.h" 10#include "core/arm/dynarmic/arm_dynarmic_32.h"
9#include "core/arm/dynarmic/arm_dynarmic_64.h" 11#include "core/arm/dynarmic/arm_dynarmic_64.h"
10#endif 12#endif
13#include "core/arm/cpu_interrupt_handler.h"
11#include "core/arm/exclusive_monitor.h" 14#include "core/arm/exclusive_monitor.h"
12#include "core/arm/unicorn/arm_unicorn.h" 15#include "core/arm/unicorn/arm_unicorn.h"
13#include "core/core.h" 16#include "core/core.h"
@@ -17,50 +20,37 @@
17 20
18namespace Kernel { 21namespace Kernel {
19 22
20PhysicalCore::PhysicalCore(Core::System& system, std::size_t id, 23PhysicalCore::PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler,
21 Core::ExclusiveMonitor& exclusive_monitor) 24 Core::CPUInterruptHandler& interrupt_handler)
22 : core_index{id} { 25 : interrupt_handler{interrupt_handler}, core_index{id}, scheduler{scheduler} {
23#ifdef ARCHITECTURE_x86_64
24 arm_interface_32 =
25 std::make_unique<Core::ARM_Dynarmic_32>(system, exclusive_monitor, core_index);
26 arm_interface_64 =
27 std::make_unique<Core::ARM_Dynarmic_64>(system, exclusive_monitor, core_index);
28
29#else
30 using Core::ARM_Unicorn;
31 arm_interface_32 = std::make_unique<ARM_Unicorn>(system, ARM_Unicorn::Arch::AArch32);
32 arm_interface_64 = std::make_unique<ARM_Unicorn>(system, ARM_Unicorn::Arch::AArch64);
33 LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
34#endif
35 26
36 scheduler = std::make_unique<Kernel::Scheduler>(system, core_index); 27 guard = std::make_unique<Common::SpinLock>();
37} 28}
38 29
39PhysicalCore::~PhysicalCore() = default; 30PhysicalCore::~PhysicalCore() = default;
40 31
41void PhysicalCore::Run() { 32void PhysicalCore::Idle() {
42 arm_interface->Run(); 33 interrupt_handler.AwaitInterrupt();
43 arm_interface->ClearExclusiveState();
44} 34}
45 35
46void PhysicalCore::Step() { 36void PhysicalCore::Shutdown() {
47 arm_interface->Step(); 37 scheduler.Shutdown();
48} 38}
49 39
50void PhysicalCore::Stop() { 40bool PhysicalCore::IsInterrupted() const {
51 arm_interface->PrepareReschedule(); 41 return interrupt_handler.IsInterrupted();
52} 42}
53 43
54void PhysicalCore::Shutdown() { 44void PhysicalCore::Interrupt() {
55 scheduler->Shutdown(); 45 guard->lock();
46 interrupt_handler.SetInterrupt(true);
47 guard->unlock();
56} 48}
57 49
58void PhysicalCore::SetIs64Bit(bool is_64_bit) { 50void PhysicalCore::ClearInterrupt() {
59 if (is_64_bit) { 51 guard->lock();
60 arm_interface = arm_interface_64.get(); 52 interrupt_handler.SetInterrupt(false);
61 } else { 53 guard->unlock();
62 arm_interface = arm_interface_32.get();
63 }
64} 54}
65 55
66} // namespace Kernel 56} // namespace Kernel
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index 3269166be..d7a7a951c 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -7,12 +7,17 @@
7#include <cstddef> 7#include <cstddef>
8#include <memory> 8#include <memory>
9 9
10namespace Common {
11class SpinLock;
12}
13
10namespace Kernel { 14namespace Kernel {
11class Scheduler; 15class Scheduler;
12} // namespace Kernel 16} // namespace Kernel
13 17
14namespace Core { 18namespace Core {
15class ARM_Interface; 19class ARM_Interface;
20class CPUInterruptHandler;
16class ExclusiveMonitor; 21class ExclusiveMonitor;
17class System; 22class System;
18} // namespace Core 23} // namespace Core
@@ -21,7 +26,8 @@ namespace Kernel {
21 26
22class PhysicalCore { 27class PhysicalCore {
23public: 28public:
24 PhysicalCore(Core::System& system, std::size_t id, Core::ExclusiveMonitor& exclusive_monitor); 29 PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler,
30 Core::CPUInterruptHandler& interrupt_handler);
25 ~PhysicalCore(); 31 ~PhysicalCore();
26 32
27 PhysicalCore(const PhysicalCore&) = delete; 33 PhysicalCore(const PhysicalCore&) = delete;
@@ -30,23 +36,18 @@ public:
30 PhysicalCore(PhysicalCore&&) = default; 36 PhysicalCore(PhysicalCore&&) = default;
31 PhysicalCore& operator=(PhysicalCore&&) = default; 37 PhysicalCore& operator=(PhysicalCore&&) = default;
32 38
33 /// Execute current jit state 39 void Idle();
34 void Run(); 40 /// Interrupt this physical core.
35 /// Execute a single instruction in current jit. 41 void Interrupt();
36 void Step();
37 /// Stop JIT execution/exit
38 void Stop();
39 42
40 // Shutdown this physical core. 43 /// Clear this core's interrupt
41 void Shutdown(); 44 void ClearInterrupt();
42 45
43 Core::ARM_Interface& ArmInterface() { 46 /// Check if this core is interrupted
44 return *arm_interface; 47 bool IsInterrupted() const;
45 }
46 48
47 const Core::ARM_Interface& ArmInterface() const { 49 // Shutdown this physical core.
48 return *arm_interface; 50 void Shutdown();
49 }
50 51
51 bool IsMainCore() const { 52 bool IsMainCore() const {
52 return core_index == 0; 53 return core_index == 0;
@@ -61,21 +62,18 @@ public:
61 } 62 }
62 63
63 Kernel::Scheduler& Scheduler() { 64 Kernel::Scheduler& Scheduler() {
64 return *scheduler; 65 return scheduler;
65 } 66 }
66 67
67 const Kernel::Scheduler& Scheduler() const { 68 const Kernel::Scheduler& Scheduler() const {
68 return *scheduler; 69 return scheduler;
69 } 70 }
70 71
71 void SetIs64Bit(bool is_64_bit);
72
73private: 72private:
73 Core::CPUInterruptHandler& interrupt_handler;
74 std::size_t core_index; 74 std::size_t core_index;
75 std::unique_ptr<Core::ARM_Interface> arm_interface_32; 75 Kernel::Scheduler& scheduler;
76 std::unique_ptr<Core::ARM_Interface> arm_interface_64; 76 std::unique_ptr<Common::SpinLock> guard;
77 std::unique_ptr<Kernel::Scheduler> scheduler;
78 Core::ARM_Interface* arm_interface{};
79}; 77};
80 78
81} // namespace Kernel 79} // namespace Kernel
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index c4c5199b1..f9d7c024d 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -22,6 +22,7 @@
22#include "core/hle/kernel/resource_limit.h" 22#include "core/hle/kernel/resource_limit.h"
23#include "core/hle/kernel/scheduler.h" 23#include "core/hle/kernel/scheduler.h"
24#include "core/hle/kernel/thread.h" 24#include "core/hle/kernel/thread.h"
25#include "core/hle/lock.h"
25#include "core/memory.h" 26#include "core/memory.h"
26#include "core/settings.h" 27#include "core/settings.h"
27 28
@@ -30,14 +31,15 @@ namespace {
30/** 31/**
31 * Sets up the primary application thread 32 * Sets up the primary application thread
32 * 33 *
34 * @param system The system instance to create the main thread under.
33 * @param owner_process The parent process for the main thread 35 * @param owner_process The parent process for the main thread
34 * @param kernel The kernel instance to create the main thread under.
35 * @param priority The priority to give the main thread 36 * @param priority The priority to give the main thread
36 */ 37 */
37void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority, VAddr stack_top) { 38void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) {
38 const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); 39 const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
39 auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0, 40 ThreadType type = THREADTYPE_USER;
40 owner_process.GetIdealCore(), stack_top, owner_process); 41 auto thread_res = Thread::Create(system, type, "main", entry_point, priority, 0,
42 owner_process.GetIdealCore(), stack_top, &owner_process);
41 43
42 std::shared_ptr<Thread> thread = std::move(thread_res).Unwrap(); 44 std::shared_ptr<Thread> thread = std::move(thread_res).Unwrap();
43 45
@@ -48,8 +50,12 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority, V
48 thread->GetContext32().cpu_registers[1] = thread_handle; 50 thread->GetContext32().cpu_registers[1] = thread_handle;
49 thread->GetContext64().cpu_registers[1] = thread_handle; 51 thread->GetContext64().cpu_registers[1] = thread_handle;
50 52
53 auto& kernel = system.Kernel();
51 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires 54 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
52 thread->ResumeFromWait(); 55 {
56 SchedulerLock lock{kernel};
57 thread->SetStatus(ThreadStatus::Ready);
58 }
53} 59}
54} // Anonymous namespace 60} // Anonymous namespace
55 61
@@ -182,7 +188,6 @@ void Process::RemoveConditionVariableThread(std::shared_ptr<Thread> thread) {
182 } 188 }
183 ++it; 189 ++it;
184 } 190 }
185 UNREACHABLE();
186} 191}
187 192
188std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads( 193std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads(
@@ -207,6 +212,7 @@ void Process::UnregisterThread(const Thread* thread) {
207} 212}
208 213
209ResultCode Process::ClearSignalState() { 214ResultCode Process::ClearSignalState() {
215 SchedulerLock lock(system.Kernel());
210 if (status == ProcessStatus::Exited) { 216 if (status == ProcessStatus::Exited) {
211 LOG_ERROR(Kernel, "called on a terminated process instance."); 217 LOG_ERROR(Kernel, "called on a terminated process instance.");
212 return ERR_INVALID_STATE; 218 return ERR_INVALID_STATE;
@@ -294,7 +300,7 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
294 300
295 ChangeStatus(ProcessStatus::Running); 301 ChangeStatus(ProcessStatus::Running);
296 302
297 SetupMainThread(*this, kernel, main_thread_priority, main_thread_stack_top); 303 SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top);
298 resource_limit->Reserve(ResourceType::Threads, 1); 304 resource_limit->Reserve(ResourceType::Threads, 1);
299 resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size); 305 resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size);
300} 306}
@@ -340,6 +346,7 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
340} 346}
341 347
342VAddr Process::CreateTLSRegion() { 348VAddr Process::CreateTLSRegion() {
349 SchedulerLock lock(system.Kernel());
343 if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; 350 if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
344 tls_page_iter != tls_pages.cend()) { 351 tls_page_iter != tls_pages.cend()) {
345 return *tls_page_iter->ReserveSlot(); 352 return *tls_page_iter->ReserveSlot();
@@ -370,6 +377,7 @@ VAddr Process::CreateTLSRegion() {
370} 377}
371 378
372void Process::FreeTLSRegion(VAddr tls_address) { 379void Process::FreeTLSRegion(VAddr tls_address) {
380 SchedulerLock lock(system.Kernel());
373 const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); 381 const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
374 auto iter = 382 auto iter =
375 std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { 383 std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
@@ -384,6 +392,7 @@ void Process::FreeTLSRegion(VAddr tls_address) {
384} 392}
385 393
386void Process::LoadModule(CodeSet code_set, VAddr base_addr) { 394void Process::LoadModule(CodeSet code_set, VAddr base_addr) {
395 std::lock_guard lock{HLE::g_hle_lock};
387 const auto ReprotectSegment = [&](const CodeSet::Segment& segment, 396 const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
388 Memory::MemoryPermission permission) { 397 Memory::MemoryPermission permission) {
389 page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission); 398 page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission);
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index ef5e19e63..6e286419e 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -6,8 +6,10 @@
6#include "common/assert.h" 6#include "common/assert.h"
7#include "common/logging/log.h" 7#include "common/logging/log.h"
8#include "core/hle/kernel/errors.h" 8#include "core/hle/kernel/errors.h"
9#include "core/hle/kernel/kernel.h"
9#include "core/hle/kernel/object.h" 10#include "core/hle/kernel/object.h"
10#include "core/hle/kernel/readable_event.h" 11#include "core/hle/kernel/readable_event.h"
12#include "core/hle/kernel/scheduler.h"
11#include "core/hle/kernel/thread.h" 13#include "core/hle/kernel/thread.h"
12 14
13namespace Kernel { 15namespace Kernel {
@@ -37,6 +39,7 @@ void ReadableEvent::Clear() {
37} 39}
38 40
39ResultCode ReadableEvent::Reset() { 41ResultCode ReadableEvent::Reset() {
42 SchedulerLock lock(kernel);
40 if (!is_signaled) { 43 if (!is_signaled) {
41 LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}", 44 LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
42 GetObjectId(), GetTypeName(), GetName()); 45 GetObjectId(), GetTypeName(), GetName());
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 1140c72a3..2b12c0dbf 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -11,11 +11,15 @@
11#include <utility> 11#include <utility>
12 12
13#include "common/assert.h" 13#include "common/assert.h"
14#include "common/bit_util.h"
15#include "common/fiber.h"
14#include "common/logging/log.h" 16#include "common/logging/log.h"
15#include "core/arm/arm_interface.h" 17#include "core/arm/arm_interface.h"
16#include "core/core.h" 18#include "core/core.h"
17#include "core/core_timing.h" 19#include "core/core_timing.h"
20#include "core/cpu_manager.h"
18#include "core/hle/kernel/kernel.h" 21#include "core/hle/kernel/kernel.h"
22#include "core/hle/kernel/physical_core.h"
19#include "core/hle/kernel/process.h" 23#include "core/hle/kernel/process.h"
20#include "core/hle/kernel/scheduler.h" 24#include "core/hle/kernel/scheduler.h"
21#include "core/hle/kernel/time_manager.h" 25#include "core/hle/kernel/time_manager.h"
@@ -27,103 +31,151 @@ GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
27GlobalScheduler::~GlobalScheduler() = default; 31GlobalScheduler::~GlobalScheduler() = default;
28 32
29void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) { 33void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) {
34 global_list_guard.lock();
30 thread_list.push_back(std::move(thread)); 35 thread_list.push_back(std::move(thread));
36 global_list_guard.unlock();
31} 37}
32 38
33void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) { 39void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
40 global_list_guard.lock();
34 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), 41 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
35 thread_list.end()); 42 thread_list.end());
43 global_list_guard.unlock();
36} 44}
37 45
38void GlobalScheduler::UnloadThread(std::size_t core) { 46u32 GlobalScheduler::SelectThreads() {
39 Scheduler& sched = kernel.Scheduler(core); 47 ASSERT(is_locked);
40 sched.UnloadThread();
41}
42
43void GlobalScheduler::SelectThread(std::size_t core) {
44 const auto update_thread = [](Thread* thread, Scheduler& sched) { 48 const auto update_thread = [](Thread* thread, Scheduler& sched) {
45 if (thread != sched.selected_thread.get()) { 49 sched.guard.lock();
50 if (thread != sched.selected_thread_set.get()) {
46 if (thread == nullptr) { 51 if (thread == nullptr) {
47 ++sched.idle_selection_count; 52 ++sched.idle_selection_count;
48 } 53 }
49 sched.selected_thread = SharedFrom(thread); 54 sched.selected_thread_set = SharedFrom(thread);
50 } 55 }
51 sched.is_context_switch_pending = sched.selected_thread != sched.current_thread; 56 const bool reschedule_pending =
57 sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread);
58 sched.is_context_switch_pending = reschedule_pending;
52 std::atomic_thread_fence(std::memory_order_seq_cst); 59 std::atomic_thread_fence(std::memory_order_seq_cst);
60 sched.guard.unlock();
61 return reschedule_pending;
53 }; 62 };
54 Scheduler& sched = kernel.Scheduler(core); 63 if (!is_reselection_pending.load()) {
55 Thread* current_thread = nullptr; 64 return 0;
56 // Step 1: Get top thread in schedule queue.
57 current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
58 if (current_thread) {
59 update_thread(current_thread, sched);
60 return;
61 } 65 }
62 // Step 2: Try selecting a suggested thread. 66 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> top_threads{};
63 Thread* winner = nullptr; 67
64 std::set<s32> sug_cores; 68 u32 idle_cores{};
65 for (auto thread : suggested_queue[core]) { 69
66 s32 this_core = thread->GetProcessorID(); 70 // Step 1: Get top thread in schedule queue.
67 Thread* thread_on_core = nullptr; 71 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
68 if (this_core >= 0) { 72 Thread* top_thread =
69 thread_on_core = scheduled_queue[this_core].front(); 73 scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
70 } 74 if (top_thread != nullptr) {
71 if (this_core < 0 || thread != thread_on_core) { 75 // TODO(Blinkhawk): Implement Thread Pinning
72 winner = thread; 76 } else {
73 break; 77 idle_cores |= (1ul << core);
74 } 78 }
75 sug_cores.insert(this_core); 79 top_threads[core] = top_thread;
76 } 80 }
77 // if we got a suggested thread, select it, else do a second pass. 81
78 if (winner && winner->GetPriority() > 2) { 82 while (idle_cores != 0) {
79 if (winner->IsRunning()) { 83 u32 core_id = Common::CountTrailingZeroes32(idle_cores);
80 UnloadThread(static_cast<u32>(winner->GetProcessorID())); 84
85 if (!suggested_queue[core_id].empty()) {
86 std::array<s32, Core::Hardware::NUM_CPU_CORES> migration_candidates{};
87 std::size_t num_candidates = 0;
88 auto iter = suggested_queue[core_id].begin();
89 Thread* suggested = nullptr;
90 // Step 2: Try selecting a suggested thread.
91 while (iter != suggested_queue[core_id].end()) {
92 suggested = *iter;
93 iter++;
94 s32 suggested_core_id = suggested->GetProcessorID();
95 Thread* top_thread =
96 suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr;
97 if (top_thread != suggested) {
98 if (top_thread != nullptr &&
99 top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
100 suggested = nullptr;
101 break;
102 // There's a too high thread to do core migration, cancel
103 }
104 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
105 break;
106 }
107 suggested = nullptr;
108 migration_candidates[num_candidates++] = suggested_core_id;
109 }
110 // Step 3: Select a suggested thread from another core
111 if (suggested == nullptr) {
112 for (std::size_t i = 0; i < num_candidates; i++) {
113 s32 candidate_core = migration_candidates[i];
114 suggested = top_threads[candidate_core];
115 auto it = scheduled_queue[candidate_core].begin();
116 it++;
117 Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
118 if (next != nullptr) {
119 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
120 suggested);
121 top_threads[candidate_core] = next;
122 break;
123 } else {
124 suggested = nullptr;
125 }
126 }
127 }
128 top_threads[core_id] = suggested;
81 } 129 }
82 TransferToCore(winner->GetPriority(), static_cast<s32>(core), winner); 130
83 update_thread(winner, sched); 131 idle_cores &= ~(1ul << core_id);
84 return;
85 } 132 }
86 // Step 3: Select a suggested thread from another core 133 u32 cores_needing_context_switch{};
87 for (auto& src_core : sug_cores) { 134 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
88 auto it = scheduled_queue[src_core].begin(); 135 Scheduler& sched = kernel.Scheduler(core);
89 it++; 136 ASSERT(top_threads[core] == nullptr || top_threads[core]->GetProcessorID() == core);
90 if (it != scheduled_queue[src_core].end()) { 137 if (update_thread(top_threads[core], sched)) {
91 Thread* thread_on_core = scheduled_queue[src_core].front(); 138 cores_needing_context_switch |= (1ul << core);
92 Thread* to_change = *it;
93 if (thread_on_core->IsRunning() || to_change->IsRunning()) {
94 UnloadThread(static_cast<u32>(src_core));
95 }
96 TransferToCore(thread_on_core->GetPriority(), static_cast<s32>(core), thread_on_core);
97 current_thread = thread_on_core;
98 break;
99 } 139 }
100 } 140 }
101 update_thread(current_thread, sched); 141 return cores_needing_context_switch;
102} 142}
103 143
104bool GlobalScheduler::YieldThread(Thread* yielding_thread) { 144bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
145 ASSERT(is_locked);
105 // Note: caller should use critical section, etc. 146 // Note: caller should use critical section, etc.
147 if (!yielding_thread->IsRunnable()) {
148 // Normally this case shouldn't happen except for SetThreadActivity.
149 is_reselection_pending.store(true, std::memory_order_release);
150 return false;
151 }
106 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 152 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
107 const u32 priority = yielding_thread->GetPriority(); 153 const u32 priority = yielding_thread->GetPriority();
108 154
109 // Yield the thread 155 // Yield the thread
110 const Thread* const winner = scheduled_queue[core_id].front(priority); 156 Reschedule(priority, core_id, yielding_thread);
111 ASSERT_MSG(yielding_thread == winner, "Thread yielding without being in front"); 157 const Thread* const winner = scheduled_queue[core_id].front();
112 scheduled_queue[core_id].yield(priority); 158 if (kernel.GetCurrentHostThreadID() != core_id) {
159 is_reselection_pending.store(true, std::memory_order_release);
160 }
113 161
114 return AskForReselectionOrMarkRedundant(yielding_thread, winner); 162 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
115} 163}
116 164
117bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { 165bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
166 ASSERT(is_locked);
118 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 167 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
119 // etc. 168 // etc.
169 if (!yielding_thread->IsRunnable()) {
170 // Normally this case shouldn't happen except for SetThreadActivity.
171 is_reselection_pending.store(true, std::memory_order_release);
172 return false;
173 }
120 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 174 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
121 const u32 priority = yielding_thread->GetPriority(); 175 const u32 priority = yielding_thread->GetPriority();
122 176
123 // Yield the thread 177 // Yield the thread
124 ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), 178 Reschedule(priority, core_id, yielding_thread);
125 "Thread yielding without being in front");
126 scheduled_queue[core_id].yield(priority);
127 179
128 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads; 180 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
129 for (std::size_t i = 0; i < current_threads.size(); i++) { 181 for (std::size_t i = 0; i < current_threads.size(); i++) {
@@ -153,21 +205,28 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
153 205
154 if (winner != nullptr) { 206 if (winner != nullptr) {
155 if (winner != yielding_thread) { 207 if (winner != yielding_thread) {
156 if (winner->IsRunning()) {
157 UnloadThread(static_cast<u32>(winner->GetProcessorID()));
158 }
159 TransferToCore(winner->GetPriority(), s32(core_id), winner); 208 TransferToCore(winner->GetPriority(), s32(core_id), winner);
160 } 209 }
161 } else { 210 } else {
162 winner = next_thread; 211 winner = next_thread;
163 } 212 }
164 213
214 if (kernel.GetCurrentHostThreadID() != core_id) {
215 is_reselection_pending.store(true, std::memory_order_release);
216 }
217
165 return AskForReselectionOrMarkRedundant(yielding_thread, winner); 218 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
166} 219}
167 220
168bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { 221bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
222 ASSERT(is_locked);
169 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 223 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
170 // etc. 224 // etc.
225 if (!yielding_thread->IsRunnable()) {
226 // Normally this case shouldn't happen except for SetThreadActivity.
227 is_reselection_pending.store(true, std::memory_order_release);
228 return false;
229 }
171 Thread* winner = nullptr; 230 Thread* winner = nullptr;
172 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 231 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
173 232
@@ -195,25 +254,31 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
195 } 254 }
196 if (winner != nullptr) { 255 if (winner != nullptr) {
197 if (winner != yielding_thread) { 256 if (winner != yielding_thread) {
198 if (winner->IsRunning()) {
199 UnloadThread(static_cast<u32>(winner->GetProcessorID()));
200 }
201 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner); 257 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
202 } 258 }
203 } else { 259 } else {
204 winner = yielding_thread; 260 winner = yielding_thread;
205 } 261 }
262 } else {
263 winner = scheduled_queue[core_id].front();
264 }
265
266 if (kernel.GetCurrentHostThreadID() != core_id) {
267 is_reselection_pending.store(true, std::memory_order_release);
206 } 268 }
207 269
208 return AskForReselectionOrMarkRedundant(yielding_thread, winner); 270 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
209} 271}
210 272
211void GlobalScheduler::PreemptThreads() { 273void GlobalScheduler::PreemptThreads() {
274 ASSERT(is_locked);
212 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 275 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
213 const u32 priority = preemption_priorities[core_id]; 276 const u32 priority = preemption_priorities[core_id];
214 277
215 if (scheduled_queue[core_id].size(priority) > 0) { 278 if (scheduled_queue[core_id].size(priority) > 0) {
216 scheduled_queue[core_id].front(priority)->IncrementYieldCount(); 279 if (scheduled_queue[core_id].size(priority) > 1) {
280 scheduled_queue[core_id].front(priority)->IncrementYieldCount();
281 }
217 scheduled_queue[core_id].yield(priority); 282 scheduled_queue[core_id].yield(priority);
218 if (scheduled_queue[core_id].size(priority) > 1) { 283 if (scheduled_queue[core_id].size(priority) > 1) {
219 scheduled_queue[core_id].front(priority)->IncrementYieldCount(); 284 scheduled_queue[core_id].front(priority)->IncrementYieldCount();
@@ -247,9 +312,6 @@ void GlobalScheduler::PreemptThreads() {
247 } 312 }
248 313
249 if (winner != nullptr) { 314 if (winner != nullptr) {
250 if (winner->IsRunning()) {
251 UnloadThread(static_cast<u32>(winner->GetProcessorID()));
252 }
253 TransferToCore(winner->GetPriority(), s32(core_id), winner); 315 TransferToCore(winner->GetPriority(), s32(core_id), winner);
254 current_thread = 316 current_thread =
255 winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread; 317 winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
@@ -280,9 +342,6 @@ void GlobalScheduler::PreemptThreads() {
280 } 342 }
281 343
282 if (winner != nullptr) { 344 if (winner != nullptr) {
283 if (winner->IsRunning()) {
284 UnloadThread(static_cast<u32>(winner->GetProcessorID()));
285 }
286 TransferToCore(winner->GetPriority(), s32(core_id), winner); 345 TransferToCore(winner->GetPriority(), s32(core_id), winner);
287 current_thread = winner; 346 current_thread = winner;
288 } 347 }
@@ -292,34 +351,65 @@ void GlobalScheduler::PreemptThreads() {
292 } 351 }
293} 352}
294 353
354void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
355 Core::EmuThreadHandle global_thread) {
356 u32 current_core = global_thread.host_handle;
357 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
358 (current_core < Core::Hardware::NUM_CPU_CORES);
359 while (cores_pending_reschedule != 0) {
360 u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
361 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
362 if (!must_context_switch || core != current_core) {
363 auto& phys_core = kernel.PhysicalCore(core);
364 phys_core.Interrupt();
365 } else {
366 must_context_switch = true;
367 }
368 cores_pending_reschedule &= ~(1ul << core);
369 }
370 if (must_context_switch) {
371 auto& core_scheduler = kernel.CurrentScheduler();
372 kernel.ExitSVCProfile();
373 core_scheduler.TryDoContextSwitch();
374 kernel.EnterSVCProfile();
375 }
376}
377
295void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { 378void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) {
379 ASSERT(is_locked);
296 suggested_queue[core].add(thread, priority); 380 suggested_queue[core].add(thread, priority);
297} 381}
298 382
299void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) { 383void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) {
384 ASSERT(is_locked);
300 suggested_queue[core].remove(thread, priority); 385 suggested_queue[core].remove(thread, priority);
301} 386}
302 387
303void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) { 388void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) {
389 ASSERT(is_locked);
304 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); 390 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
305 scheduled_queue[core].add(thread, priority); 391 scheduled_queue[core].add(thread, priority);
306} 392}
307 393
308void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) { 394void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) {
395 ASSERT(is_locked);
309 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); 396 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
310 scheduled_queue[core].add(thread, priority, false); 397 scheduled_queue[core].add(thread, priority, false);
311} 398}
312 399
313void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) { 400void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) {
401 ASSERT(is_locked);
314 scheduled_queue[core].remove(thread, priority); 402 scheduled_queue[core].remove(thread, priority);
315 scheduled_queue[core].add(thread, priority); 403 scheduled_queue[core].add(thread, priority);
316} 404}
317 405
318void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) { 406void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) {
407 ASSERT(is_locked);
319 scheduled_queue[core].remove(thread, priority); 408 scheduled_queue[core].remove(thread, priority);
320} 409}
321 410
322void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { 411void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
412 ASSERT(is_locked);
323 const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; 413 const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
324 const s32 source_core = thread->GetProcessorID(); 414 const s32 source_core = thread->GetProcessorID();
325 if (source_core == destination_core || !schedulable) { 415 if (source_core == destination_core || !schedulable) {
@@ -349,6 +439,108 @@ bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread,
349 } 439 }
350} 440}
351 441
442void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
443 if (old_flags == thread->scheduling_state) {
444 return;
445 }
446 ASSERT(is_locked);
447
448 if (old_flags == static_cast<u32>(ThreadSchedStatus::Runnable)) {
449 // In this case the thread was running, now it's pausing/exitting
450 if (thread->processor_id >= 0) {
451 Unschedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
452 }
453
454 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
455 if (core != static_cast<u32>(thread->processor_id) &&
456 ((thread->affinity_mask >> core) & 1) != 0) {
457 Unsuggest(thread->current_priority, core, thread);
458 }
459 }
460 } else if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
461 // The thread is now set to running from being stopped
462 if (thread->processor_id >= 0) {
463 Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
464 }
465
466 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
467 if (core != static_cast<u32>(thread->processor_id) &&
468 ((thread->affinity_mask >> core) & 1) != 0) {
469 Suggest(thread->current_priority, core, thread);
470 }
471 }
472 }
473
474 SetReselectionPending();
475}
476
477void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) {
478 if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable)) {
479 return;
480 }
481 ASSERT(is_locked);
482 if (thread->processor_id >= 0) {
483 Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread);
484 }
485
486 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
487 if (core != static_cast<u32>(thread->processor_id) &&
488 ((thread->affinity_mask >> core) & 1) != 0) {
489 Unsuggest(old_priority, core, thread);
490 }
491 }
492
493 if (thread->processor_id >= 0) {
494 if (thread == kernel.CurrentScheduler().GetCurrentThread()) {
495 SchedulePrepend(thread->current_priority, static_cast<u32>(thread->processor_id),
496 thread);
497 } else {
498 Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
499 }
500 }
501
502 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
503 if (core != static_cast<u32>(thread->processor_id) &&
504 ((thread->affinity_mask >> core) & 1) != 0) {
505 Suggest(thread->current_priority, core, thread);
506 }
507 }
508 thread->IncrementYieldCount();
509 SetReselectionPending();
510}
511
512void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask,
513 s32 old_core) {
514 if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable) ||
515 thread->current_priority >= THREADPRIO_COUNT) {
516 return;
517 }
518 ASSERT(is_locked);
519
520 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
521 if (((old_affinity_mask >> core) & 1) != 0) {
522 if (core == static_cast<u32>(old_core)) {
523 Unschedule(thread->current_priority, core, thread);
524 } else {
525 Unsuggest(thread->current_priority, core, thread);
526 }
527 }
528 }
529
530 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
531 if (((thread->affinity_mask >> core) & 1) != 0) {
532 if (core == static_cast<u32>(thread->processor_id)) {
533 Schedule(thread->current_priority, core, thread);
534 } else {
535 Suggest(thread->current_priority, core, thread);
536 }
537 }
538 }
539
540 thread->IncrementYieldCount();
541 SetReselectionPending();
542}
543
352void GlobalScheduler::Shutdown() { 544void GlobalScheduler::Shutdown() {
353 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 545 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
354 scheduled_queue[core].clear(); 546 scheduled_queue[core].clear();
@@ -359,10 +551,12 @@ void GlobalScheduler::Shutdown() {
359 551
360void GlobalScheduler::Lock() { 552void GlobalScheduler::Lock() {
361 Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID(); 553 Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID();
554 ASSERT(!current_thread.IsInvalid());
362 if (current_thread == current_owner) { 555 if (current_thread == current_owner) {
363 ++scope_lock; 556 ++scope_lock;
364 } else { 557 } else {
365 inner_lock.lock(); 558 inner_lock.lock();
559 is_locked = true;
366 current_owner = current_thread; 560 current_owner = current_thread;
367 ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle()); 561 ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle());
368 scope_lock = 1; 562 scope_lock = 1;
@@ -374,17 +568,18 @@ void GlobalScheduler::Unlock() {
374 ASSERT(scope_lock > 0); 568 ASSERT(scope_lock > 0);
375 return; 569 return;
376 } 570 }
377 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 571 u32 cores_pending_reschedule = SelectThreads();
378 SelectThread(i); 572 Core::EmuThreadHandle leaving_thread = current_owner;
379 }
380 current_owner = Core::EmuThreadHandle::InvalidHandle(); 573 current_owner = Core::EmuThreadHandle::InvalidHandle();
381 scope_lock = 1; 574 scope_lock = 1;
575 is_locked = false;
382 inner_lock.unlock(); 576 inner_lock.unlock();
383 // TODO(Blinkhawk): Setup the interrupts and change context on current core. 577 EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread);
384} 578}
385 579
386Scheduler::Scheduler(Core::System& system, std::size_t core_id) 580Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) {
387 : system{system}, core_id{core_id} {} 581 switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this);
582}
388 583
389Scheduler::~Scheduler() = default; 584Scheduler::~Scheduler() = default;
390 585
@@ -393,56 +588,128 @@ bool Scheduler::HaveReadyThreads() const {
393} 588}
394 589
395Thread* Scheduler::GetCurrentThread() const { 590Thread* Scheduler::GetCurrentThread() const {
396 return current_thread.get(); 591 if (current_thread) {
592 return current_thread.get();
593 }
594 return idle_thread.get();
397} 595}
398 596
399Thread* Scheduler::GetSelectedThread() const { 597Thread* Scheduler::GetSelectedThread() const {
400 return selected_thread.get(); 598 return selected_thread.get();
401} 599}
402 600
403void Scheduler::SelectThreads() {
404 system.GlobalScheduler().SelectThread(core_id);
405}
406
407u64 Scheduler::GetLastContextSwitchTicks() const { 601u64 Scheduler::GetLastContextSwitchTicks() const {
408 return last_context_switch_time; 602 return last_context_switch_time;
409} 603}
410 604
411void Scheduler::TryDoContextSwitch() { 605void Scheduler::TryDoContextSwitch() {
606 auto& phys_core = system.Kernel().CurrentPhysicalCore();
607 if (phys_core.IsInterrupted()) {
608 phys_core.ClearInterrupt();
609 }
610 guard.lock();
412 if (is_context_switch_pending) { 611 if (is_context_switch_pending) {
413 SwitchContext(); 612 SwitchContext();
613 } else {
614 guard.unlock();
414 } 615 }
415} 616}
416 617
417void Scheduler::UnloadThread() { 618void Scheduler::OnThreadStart() {
418 Thread* const previous_thread = GetCurrentThread(); 619 SwitchContextStep2();
419 Process* const previous_process = system.Kernel().CurrentProcess(); 620}
420 621
421 UpdateLastContextSwitchTime(previous_thread, previous_process); 622void Scheduler::Unload() {
623 Thread* thread = current_thread.get();
624 if (thread) {
625 thread->SetContinuousOnSVC(false);
626 thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
627 thread->SetIsRunning(false);
628 if (!thread->IsHLEThread() && !thread->HasExited()) {
629 Core::ARM_Interface& cpu_core = thread->ArmInterface();
630 cpu_core.SaveContext(thread->GetContext32());
631 cpu_core.SaveContext(thread->GetContext64());
632 // Save the TPIDR_EL0 system register in case it was modified.
633 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
634 cpu_core.ClearExclusiveState();
635 }
636 thread->context_guard.unlock();
637 }
638}
422 639
423 // Save context for previous thread 640void Scheduler::Reload() {
424 if (previous_thread) { 641 Thread* thread = current_thread.get();
425 system.ArmInterface(core_id).SaveContext(previous_thread->GetContext32()); 642 if (thread) {
426 system.ArmInterface(core_id).SaveContext(previous_thread->GetContext64()); 643 ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
427 // Save the TPIDR_EL0 system register in case it was modified. 644 "Thread must be runnable.");
428 previous_thread->SetTPIDR_EL0(system.ArmInterface(core_id).GetTPIDR_EL0());
429 645
430 if (previous_thread->GetStatus() == ThreadStatus::Running) { 646 // Cancel any outstanding wakeup events for this thread
431 // This is only the case when a reschedule is triggered without the current thread 647 thread->SetIsRunning(true);
432 // yielding execution (i.e. an event triggered, system core time-sliced, etc) 648 thread->SetWasRunning(false);
433 previous_thread->SetStatus(ThreadStatus::Ready); 649 thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
650
651 auto* const thread_owner_process = thread->GetOwnerProcess();
652 if (thread_owner_process != nullptr) {
653 system.Kernel().MakeCurrentProcess(thread_owner_process);
654 }
655 if (!thread->IsHLEThread()) {
656 Core::ARM_Interface& cpu_core = thread->ArmInterface();
657 cpu_core.LoadContext(thread->GetContext32());
658 cpu_core.LoadContext(thread->GetContext64());
659 cpu_core.SetTlsAddress(thread->GetTLSAddress());
660 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
661 cpu_core.ChangeProcessorID(this->core_id);
662 cpu_core.ClearExclusiveState();
434 } 663 }
435 previous_thread->SetIsRunning(false);
436 } 664 }
437 current_thread = nullptr; 665}
666
667void Scheduler::SwitchContextStep2() {
668 Thread* previous_thread = current_thread_prev.get();
669 Thread* new_thread = selected_thread.get();
670
671 // Load context of new thread
672 Process* const previous_process =
673 previous_thread != nullptr ? previous_thread->GetOwnerProcess() : nullptr;
674
675 if (new_thread) {
676 ASSERT_MSG(new_thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
677 "Thread must be runnable.");
678
679 // Cancel any outstanding wakeup events for this thread
680 new_thread->SetIsRunning(true);
681 new_thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
682 new_thread->SetWasRunning(false);
683
684 auto* const thread_owner_process = current_thread->GetOwnerProcess();
685 if (thread_owner_process != nullptr) {
686 system.Kernel().MakeCurrentProcess(thread_owner_process);
687 }
688 if (!new_thread->IsHLEThread()) {
689 Core::ARM_Interface& cpu_core = new_thread->ArmInterface();
690 cpu_core.LoadContext(new_thread->GetContext32());
691 cpu_core.LoadContext(new_thread->GetContext64());
692 cpu_core.SetTlsAddress(new_thread->GetTLSAddress());
693 cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
694 cpu_core.ChangeProcessorID(this->core_id);
695 cpu_core.ClearExclusiveState();
696 }
697 }
698
699 TryDoContextSwitch();
438} 700}
439 701
440void Scheduler::SwitchContext() { 702void Scheduler::SwitchContext() {
441 Thread* const previous_thread = GetCurrentThread(); 703 current_thread_prev = current_thread;
442 Thread* const new_thread = GetSelectedThread(); 704 selected_thread = selected_thread_set;
705 Thread* previous_thread = current_thread_prev.get();
706 Thread* new_thread = selected_thread.get();
707 current_thread = selected_thread;
443 708
444 is_context_switch_pending = false; 709 is_context_switch_pending = false;
710
445 if (new_thread == previous_thread) { 711 if (new_thread == previous_thread) {
712 guard.unlock();
446 return; 713 return;
447 } 714 }
448 715
@@ -452,51 +719,75 @@ void Scheduler::SwitchContext() {
452 719
453 // Save context for previous thread 720 // Save context for previous thread
454 if (previous_thread) { 721 if (previous_thread) {
455 system.ArmInterface(core_id).SaveContext(previous_thread->GetContext32()); 722 if (new_thread != nullptr && new_thread->IsSuspendThread()) {
456 system.ArmInterface(core_id).SaveContext(previous_thread->GetContext64()); 723 previous_thread->SetWasRunning(true);
457 // Save the TPIDR_EL0 system register in case it was modified.
458 previous_thread->SetTPIDR_EL0(system.ArmInterface(core_id).GetTPIDR_EL0());
459
460 if (previous_thread->GetStatus() == ThreadStatus::Running) {
461 // This is only the case when a reschedule is triggered without the current thread
462 // yielding execution (i.e. an event triggered, system core time-sliced, etc)
463 previous_thread->SetStatus(ThreadStatus::Ready);
464 } 724 }
725 previous_thread->SetContinuousOnSVC(false);
726 previous_thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
465 previous_thread->SetIsRunning(false); 727 previous_thread->SetIsRunning(false);
466 } 728 if (!previous_thread->IsHLEThread() && !previous_thread->HasExited()) {
467 729 Core::ARM_Interface& cpu_core = previous_thread->ArmInterface();
468 // Load context of new thread 730 cpu_core.SaveContext(previous_thread->GetContext32());
469 if (new_thread) { 731 cpu_core.SaveContext(previous_thread->GetContext64());
470 ASSERT_MSG(new_thread->GetProcessorID() == s32(this->core_id), 732 // Save the TPIDR_EL0 system register in case it was modified.
471 "Thread must be assigned to this core."); 733 previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
472 ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, 734 cpu_core.ClearExclusiveState();
473 "Thread must be ready to become running.");
474
475 // Cancel any outstanding wakeup events for this thread
476 new_thread->CancelWakeupTimer();
477 current_thread = SharedFrom(new_thread);
478 new_thread->SetStatus(ThreadStatus::Running);
479 new_thread->SetIsRunning(true);
480
481 auto* const thread_owner_process = current_thread->GetOwnerProcess();
482 if (previous_process != thread_owner_process) {
483 system.Kernel().MakeCurrentProcess(thread_owner_process);
484 } 735 }
736 previous_thread->context_guard.unlock();
737 }
485 738
486 system.ArmInterface(core_id).LoadContext(new_thread->GetContext32()); 739 std::shared_ptr<Common::Fiber>* old_context;
487 system.ArmInterface(core_id).LoadContext(new_thread->GetContext64()); 740 if (previous_thread != nullptr) {
488 system.ArmInterface(core_id).SetTlsAddress(new_thread->GetTLSAddress()); 741 old_context = &previous_thread->GetHostContext();
489 system.ArmInterface(core_id).SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
490 } else { 742 } else {
491 current_thread = nullptr; 743 old_context = &idle_thread->GetHostContext();
492 // Note: We do not reset the current process and current page table when idling because 744 }
493 // technically we haven't changed processes, our threads are just paused. 745 guard.unlock();
746
747 Common::Fiber::YieldTo(*old_context, switch_fiber);
748 /// When a thread wakes up, the scheduler may have changed to other in another core.
749 auto& next_scheduler = system.Kernel().CurrentScheduler();
750 next_scheduler.SwitchContextStep2();
751}
752
753void Scheduler::OnSwitch(void* this_scheduler) {
754 Scheduler* sched = static_cast<Scheduler*>(this_scheduler);
755 sched->SwitchToCurrent();
756}
757
758void Scheduler::SwitchToCurrent() {
759 while (true) {
760 guard.lock();
761 selected_thread = selected_thread_set;
762 current_thread = selected_thread;
763 is_context_switch_pending = false;
764 guard.unlock();
765 while (!is_context_switch_pending) {
766 if (current_thread != nullptr && !current_thread->IsHLEThread()) {
767 current_thread->context_guard.lock();
768 if (!current_thread->IsRunnable()) {
769 current_thread->context_guard.unlock();
770 break;
771 }
772 if (current_thread->GetProcessorID() != core_id) {
773 current_thread->context_guard.unlock();
774 break;
775 }
776 }
777 std::shared_ptr<Common::Fiber>* next_context;
778 if (current_thread != nullptr) {
779 next_context = &current_thread->GetHostContext();
780 } else {
781 next_context = &idle_thread->GetHostContext();
782 }
783 Common::Fiber::YieldTo(switch_fiber, *next_context);
784 }
494 } 785 }
495} 786}
496 787
497void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { 788void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
498 const u64 prev_switch_ticks = last_context_switch_time; 789 const u64 prev_switch_ticks = last_context_switch_time;
499 const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks(); 790 const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
500 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; 791 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
501 792
502 if (thread != nullptr) { 793 if (thread != nullptr) {
@@ -510,6 +801,16 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
510 last_context_switch_time = most_recent_switch_ticks; 801 last_context_switch_time = most_recent_switch_ticks;
511} 802}
512 803
804void Scheduler::Initialize() {
805 std::string name = "Idle Thread Id:" + std::to_string(core_id);
806 std::function<void(void*)> init_func = system.GetCpuManager().GetIdleThreadStartFunc();
807 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
808 ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
809 auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
810 nullptr, std::move(init_func), init_func_parameter);
811 idle_thread = std::move(thread_res).Unwrap();
812}
813
513void Scheduler::Shutdown() { 814void Scheduler::Shutdown() {
514 current_thread = nullptr; 815 current_thread = nullptr;
515 selected_thread = nullptr; 816 selected_thread = nullptr;
@@ -538,4 +839,13 @@ SchedulerLockAndSleep::~SchedulerLockAndSleep() {
538 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); 839 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
539} 840}
540 841
842void SchedulerLockAndSleep::Release() {
843 if (sleep_cancelled) {
844 return;
845 }
846 auto& time_manager = kernel.TimeManager();
847 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
848 sleep_cancelled = true;
849}
850
541} // namespace Kernel 851} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 07df33f9c..b3b4b5169 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -11,9 +11,14 @@
11 11
12#include "common/common_types.h" 12#include "common/common_types.h"
13#include "common/multi_level_queue.h" 13#include "common/multi_level_queue.h"
14#include "common/spin_lock.h"
14#include "core/hardware_properties.h" 15#include "core/hardware_properties.h"
15#include "core/hle/kernel/thread.h" 16#include "core/hle/kernel/thread.h"
16 17
18namespace Common {
19class Fiber;
20}
21
17namespace Core { 22namespace Core {
18class ARM_Interface; 23class ARM_Interface;
19class System; 24class System;
@@ -41,41 +46,17 @@ public:
41 return thread_list; 46 return thread_list;
42 } 47 }
43 48
44 /** 49 /// Notify the scheduler a thread's status has changed.
45 * Add a thread to the suggested queue of a cpu core. Suggested threads may be 50 void AdjustSchedulingOnStatus(Thread* thread, u32 old_flags);
46 * picked if no thread is scheduled to run on the core.
47 */
48 void Suggest(u32 priority, std::size_t core, Thread* thread);
49
50 /**
51 * Remove a thread to the suggested queue of a cpu core. Suggested threads may be
52 * picked if no thread is scheduled to run on the core.
53 */
54 void Unsuggest(u32 priority, std::size_t core, Thread* thread);
55
56 /**
57 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
58 * back the queue in its priority level.
59 */
60 void Schedule(u32 priority, std::size_t core, Thread* thread);
61
62 /**
63 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
64 * front the queue in its priority level.
65 */
66 void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
67 51
68 /// Reschedule an already scheduled thread based on a new priority 52 /// Notify the scheduler a thread's priority has changed.
69 void Reschedule(u32 priority, std::size_t core, Thread* thread); 53 void AdjustSchedulingOnPriority(Thread* thread, u32 old_priority);
70
71 /// Unschedules a thread.
72 void Unschedule(u32 priority, std::size_t core, Thread* thread);
73 54
74 /// Selects a core and forces it to unload its current thread's context 55 /// Notify the scheduler a thread's core and/or affinity mask has changed.
75 void UnloadThread(std::size_t core); 56 void AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, s32 old_core);
76 57
77 /** 58 /**
78 * Takes care of selecting the new scheduled thread in three steps: 59 * Takes care of selecting the new scheduled threads in three steps:
79 * 60 *
80 * 1. First a thread is selected from the top of the priority queue. If no thread 61 * 1. First a thread is selected from the top of the priority queue. If no thread
81 * is obtained then we move to step two, else we are done. 62 * is obtained then we move to step two, else we are done.
@@ -85,8 +66,10 @@ public:
85 * 66 *
86 * 3. Third is no suggested thread is found, we do a second pass and pick a running 67 * 3. Third is no suggested thread is found, we do a second pass and pick a running
87 * thread in another core and swap it with its current thread. 68 * thread in another core and swap it with its current thread.
69 *
70 * returns the cores needing scheduling.
88 */ 71 */
89 void SelectThread(std::size_t core); 72 u32 SelectThreads();
90 73
91 bool HaveReadyThreads(std::size_t core_id) const { 74 bool HaveReadyThreads(std::size_t core_id) const {
92 return !scheduled_queue[core_id].empty(); 75 return !scheduled_queue[core_id].empty();
@@ -149,6 +132,40 @@ private:
149 /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling 132 /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
150 /// and reschedules current core if needed. 133 /// and reschedules current core if needed.
151 void Unlock(); 134 void Unlock();
135
136 void EnableInterruptAndSchedule(u32 cores_pending_reschedule,
137 Core::EmuThreadHandle global_thread);
138
139 /**
140 * Add a thread to the suggested queue of a cpu core. Suggested threads may be
141 * picked if no thread is scheduled to run on the core.
142 */
143 void Suggest(u32 priority, std::size_t core, Thread* thread);
144
145 /**
146 * Remove a thread to the suggested queue of a cpu core. Suggested threads may be
147 * picked if no thread is scheduled to run on the core.
148 */
149 void Unsuggest(u32 priority, std::size_t core, Thread* thread);
150
151 /**
152 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
153 * back the queue in its priority level.
154 */
155 void Schedule(u32 priority, std::size_t core, Thread* thread);
156
157 /**
158 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
159 * front the queue in its priority level.
160 */
161 void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
162
163 /// Reschedule an already scheduled thread based on a new priority
164 void Reschedule(u32 priority, std::size_t core, Thread* thread);
165
166 /// Unschedules a thread.
167 void Unschedule(u32 priority, std::size_t core, Thread* thread);
168
152 /** 169 /**
153 * Transfers a thread into an specific core. If the destination_core is -1 170 * Transfers a thread into an specific core. If the destination_core is -1
154 * it will be unscheduled from its source code and added into its suggested 171 * it will be unscheduled from its source code and added into its suggested
@@ -170,10 +187,13 @@ private:
170 std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; 187 std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
171 188
172 /// Scheduler lock mechanisms. 189 /// Scheduler lock mechanisms.
173 std::mutex inner_lock{}; // TODO(Blinkhawk): Replace for a SpinLock 190 bool is_locked{};
191 Common::SpinLock inner_lock{};
174 std::atomic<s64> scope_lock{}; 192 std::atomic<s64> scope_lock{};
175 Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()}; 193 Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()};
176 194
195 Common::SpinLock global_list_guard{};
196
177 /// Lists all thread ids that aren't deleted/etc. 197 /// Lists all thread ids that aren't deleted/etc.
178 std::vector<std::shared_ptr<Thread>> thread_list; 198 std::vector<std::shared_ptr<Thread>> thread_list;
179 KernelCore& kernel; 199 KernelCore& kernel;
@@ -190,11 +210,11 @@ public:
190 /// Reschedules to the next available thread (call after current thread is suspended) 210 /// Reschedules to the next available thread (call after current thread is suspended)
191 void TryDoContextSwitch(); 211 void TryDoContextSwitch();
192 212
193 /// Unloads currently running thread 213 /// The next two are for SingleCore Only.
194 void UnloadThread(); 214 /// Unload current thread before preempting core.
195 215 void Unload();
196 /// Select the threads in top of the scheduling multilist. 216 /// Reload current thread after core preemption.
197 void SelectThreads(); 217 void Reload();
198 218
199 /// Gets the current running thread 219 /// Gets the current running thread
200 Thread* GetCurrentThread() const; 220 Thread* GetCurrentThread() const;
@@ -209,15 +229,30 @@ public:
209 return is_context_switch_pending; 229 return is_context_switch_pending;
210 } 230 }
211 231
232 void Initialize();
233
212 /// Shutdowns the scheduler. 234 /// Shutdowns the scheduler.
213 void Shutdown(); 235 void Shutdown();
214 236
237 void OnThreadStart();
238
239 std::shared_ptr<Common::Fiber>& ControlContext() {
240 return switch_fiber;
241 }
242
243 const std::shared_ptr<Common::Fiber>& ControlContext() const {
244 return switch_fiber;
245 }
246
215private: 247private:
216 friend class GlobalScheduler; 248 friend class GlobalScheduler;
217 249
218 /// Switches the CPU's active thread context to that of the specified thread 250 /// Switches the CPU's active thread context to that of the specified thread
219 void SwitchContext(); 251 void SwitchContext();
220 252
253 /// When a thread wakes up, it must run this through it's new scheduler
254 void SwitchContextStep2();
255
221 /** 256 /**
222 * Called on every context switch to update the internal timestamp 257 * Called on every context switch to update the internal timestamp
223 * This also updates the running time ticks for the given thread and 258 * This also updates the running time ticks for the given thread and
@@ -231,14 +266,24 @@ private:
231 */ 266 */
232 void UpdateLastContextSwitchTime(Thread* thread, Process* process); 267 void UpdateLastContextSwitchTime(Thread* thread, Process* process);
233 268
269 static void OnSwitch(void* this_scheduler);
270 void SwitchToCurrent();
271
234 std::shared_ptr<Thread> current_thread = nullptr; 272 std::shared_ptr<Thread> current_thread = nullptr;
235 std::shared_ptr<Thread> selected_thread = nullptr; 273 std::shared_ptr<Thread> selected_thread = nullptr;
274 std::shared_ptr<Thread> current_thread_prev = nullptr;
275 std::shared_ptr<Thread> selected_thread_set = nullptr;
276 std::shared_ptr<Thread> idle_thread = nullptr;
277
278 std::shared_ptr<Common::Fiber> switch_fiber = nullptr;
236 279
237 Core::System& system; 280 Core::System& system;
238 u64 last_context_switch_time = 0; 281 u64 last_context_switch_time = 0;
239 u64 idle_selection_count = 0; 282 u64 idle_selection_count = 0;
240 const std::size_t core_id; 283 const std::size_t core_id;
241 284
285 Common::SpinLock guard{};
286
242 bool is_context_switch_pending = false; 287 bool is_context_switch_pending = false;
243}; 288};
244 289
@@ -261,6 +306,8 @@ public:
261 sleep_cancelled = true; 306 sleep_cancelled = true;
262 } 307 }
263 308
309 void Release();
310
264private: 311private:
265 Handle& event_handle; 312 Handle& event_handle;
266 Thread* time_task; 313 Thread* time_task;
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 25438b86b..7b23a6889 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -17,6 +17,7 @@
17#include "core/hle/kernel/hle_ipc.h" 17#include "core/hle/kernel/hle_ipc.h"
18#include "core/hle/kernel/kernel.h" 18#include "core/hle/kernel/kernel.h"
19#include "core/hle/kernel/process.h" 19#include "core/hle/kernel/process.h"
20#include "core/hle/kernel/scheduler.h"
20#include "core/hle/kernel/server_session.h" 21#include "core/hle/kernel/server_session.h"
21#include "core/hle/kernel/session.h" 22#include "core/hle/kernel/session.h"
22#include "core/hle/kernel/thread.h" 23#include "core/hle/kernel/thread.h"
@@ -168,9 +169,12 @@ ResultCode ServerSession::CompleteSyncRequest() {
168 } 169 }
169 170
170 // Some service requests require the thread to block 171 // Some service requests require the thread to block
171 if (!context.IsThreadWaiting()) { 172 {
172 context.GetThread().ResumeFromWait(); 173 SchedulerLock lock(kernel);
173 context.GetThread().SetWaitSynchronizationResult(result); 174 if (!context.IsThreadWaiting()) {
175 context.GetThread().ResumeFromWait();
176 context.GetThread().SetSynchronizationResults(nullptr, result);
177 }
174 } 178 }
175 179
176 request_queue.Pop(); 180 request_queue.Pop();
@@ -180,8 +184,10 @@ ResultCode ServerSession::CompleteSyncRequest() {
180 184
181ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread, 185ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
182 Core::Memory::Memory& memory) { 186 Core::Memory::Memory& memory) {
183 Core::System::GetInstance().CoreTiming().ScheduleEvent(20000, request_event, {}); 187 ResultCode result = QueueSyncRequest(std::move(thread), memory);
184 return QueueSyncRequest(std::move(thread), memory); 188 const u64 delay = kernel.IsMulticore() ? 0U : 20000U;
189 Core::System::GetInstance().CoreTiming().ScheduleEvent(delay, request_event, {});
190 return result;
185} 191}
186 192
187} // namespace Kernel 193} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 4ae4529f5..5db19dcf3 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -10,14 +10,15 @@
10 10
11#include "common/alignment.h" 11#include "common/alignment.h"
12#include "common/assert.h" 12#include "common/assert.h"
13#include "common/fiber.h"
13#include "common/logging/log.h" 14#include "common/logging/log.h"
14#include "common/microprofile.h" 15#include "common/microprofile.h"
15#include "common/string_util.h" 16#include "common/string_util.h"
16#include "core/arm/exclusive_monitor.h" 17#include "core/arm/exclusive_monitor.h"
17#include "core/core.h" 18#include "core/core.h"
18#include "core/core_manager.h"
19#include "core/core_timing.h" 19#include "core/core_timing.h"
20#include "core/core_timing_util.h" 20#include "core/core_timing_util.h"
21#include "core/cpu_manager.h"
21#include "core/hle/kernel/address_arbiter.h" 22#include "core/hle/kernel/address_arbiter.h"
22#include "core/hle/kernel/client_port.h" 23#include "core/hle/kernel/client_port.h"
23#include "core/hle/kernel/client_session.h" 24#include "core/hle/kernel/client_session.h"
@@ -27,6 +28,7 @@
27#include "core/hle/kernel/memory/memory_block.h" 28#include "core/hle/kernel/memory/memory_block.h"
28#include "core/hle/kernel/memory/page_table.h" 29#include "core/hle/kernel/memory/page_table.h"
29#include "core/hle/kernel/mutex.h" 30#include "core/hle/kernel/mutex.h"
31#include "core/hle/kernel/physical_core.h"
30#include "core/hle/kernel/process.h" 32#include "core/hle/kernel/process.h"
31#include "core/hle/kernel/readable_event.h" 33#include "core/hle/kernel/readable_event.h"
32#include "core/hle/kernel/resource_limit.h" 34#include "core/hle/kernel/resource_limit.h"
@@ -37,6 +39,7 @@
37#include "core/hle/kernel/svc_wrap.h" 39#include "core/hle/kernel/svc_wrap.h"
38#include "core/hle/kernel/synchronization.h" 40#include "core/hle/kernel/synchronization.h"
39#include "core/hle/kernel/thread.h" 41#include "core/hle/kernel/thread.h"
42#include "core/hle/kernel/time_manager.h"
40#include "core/hle/kernel/transfer_memory.h" 43#include "core/hle/kernel/transfer_memory.h"
41#include "core/hle/kernel/writable_event.h" 44#include "core/hle/kernel/writable_event.h"
42#include "core/hle/lock.h" 45#include "core/hle/lock.h"
@@ -133,6 +136,7 @@ enum class ResourceLimitValueType {
133 136
134ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit, 137ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit,
135 u32 resource_type, ResourceLimitValueType value_type) { 138 u32 resource_type, ResourceLimitValueType value_type) {
139 std::lock_guard lock{HLE::g_hle_lock};
136 const auto type = static_cast<ResourceType>(resource_type); 140 const auto type = static_cast<ResourceType>(resource_type);
137 if (!IsValidResourceType(type)) { 141 if (!IsValidResourceType(type)) {
138 LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type); 142 LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type);
@@ -160,6 +164,7 @@ ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_
160 164
161/// Set the process heap to a given Size. It can both extend and shrink the heap. 165/// Set the process heap to a given Size. It can both extend and shrink the heap.
162static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_size) { 166static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_size) {
167 std::lock_guard lock{HLE::g_hle_lock};
163 LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size); 168 LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size);
164 169
165 // Size must be a multiple of 0x200000 (2MB) and be equal to or less than 8GB. 170 // Size must be a multiple of 0x200000 (2MB) and be equal to or less than 8GB.
@@ -190,6 +195,7 @@ static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_s
190 195
191static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, 196static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
192 u32 attribute) { 197 u32 attribute) {
198 std::lock_guard lock{HLE::g_hle_lock};
193 LOG_DEBUG(Kernel_SVC, 199 LOG_DEBUG(Kernel_SVC,
194 "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address, 200 "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
195 size, mask, attribute); 201 size, mask, attribute);
@@ -226,8 +232,15 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
226 static_cast<Memory::MemoryAttribute>(attribute)); 232 static_cast<Memory::MemoryAttribute>(attribute));
227} 233}
228 234
235static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
236 u32 attribute) {
237 return SetMemoryAttribute(system, static_cast<VAddr>(address), static_cast<std::size_t>(size),
238 mask, attribute);
239}
240
229/// Maps a memory range into a different range. 241/// Maps a memory range into a different range.
230static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) { 242static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
243 std::lock_guard lock{HLE::g_hle_lock};
231 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, 244 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
232 src_addr, size); 245 src_addr, size);
233 246
@@ -241,8 +254,14 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr
241 return page_table.Map(dst_addr, src_addr, size); 254 return page_table.Map(dst_addr, src_addr, size);
242} 255}
243 256
257static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
258 return MapMemory(system, static_cast<VAddr>(dst_addr), static_cast<VAddr>(src_addr),
259 static_cast<std::size_t>(size));
260}
261
244/// Unmaps a region that was previously mapped with svcMapMemory 262/// Unmaps a region that was previously mapped with svcMapMemory
245static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) { 263static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
264 std::lock_guard lock{HLE::g_hle_lock};
246 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, 265 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
247 src_addr, size); 266 src_addr, size);
248 267
@@ -256,9 +275,15 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
256 return page_table.Unmap(dst_addr, src_addr, size); 275 return page_table.Unmap(dst_addr, src_addr, size);
257} 276}
258 277
278static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
279 return UnmapMemory(system, static_cast<VAddr>(dst_addr), static_cast<VAddr>(src_addr),
280 static_cast<std::size_t>(size));
281}
282
259/// Connect to an OS service given the port name, returns the handle to the port to out 283/// Connect to an OS service given the port name, returns the handle to the port to out
260static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle, 284static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
261 VAddr port_name_address) { 285 VAddr port_name_address) {
286 std::lock_guard lock{HLE::g_hle_lock};
262 auto& memory = system.Memory(); 287 auto& memory = system.Memory();
263 288
264 if (!memory.IsValidVirtualAddress(port_name_address)) { 289 if (!memory.IsValidVirtualAddress(port_name_address)) {
@@ -317,11 +342,30 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
317 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); 342 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
318 343
319 auto thread = system.CurrentScheduler().GetCurrentThread(); 344 auto thread = system.CurrentScheduler().GetCurrentThread();
320 thread->InvalidateWakeupCallback(); 345 {
321 thread->SetStatus(ThreadStatus::WaitIPC); 346 SchedulerLock lock(system.Kernel());
322 system.PrepareReschedule(thread->GetProcessorID()); 347 thread->InvalidateHLECallback();
348 thread->SetStatus(ThreadStatus::WaitIPC);
349 session->SendSyncRequest(SharedFrom(thread), system.Memory());
350 }
351
352 if (thread->HasHLECallback()) {
353 Handle event_handle = thread->GetHLETimeEvent();
354 if (event_handle != InvalidHandle) {
355 auto& time_manager = system.Kernel().TimeManager();
356 time_manager.UnscheduleTimeEvent(event_handle);
357 }
358
359 {
360 SchedulerLock lock(system.Kernel());
361 auto* sync_object = thread->GetHLESyncObject();
362 sync_object->RemoveWaitingThread(SharedFrom(thread));
363 }
364
365 thread->InvokeHLECallback(SharedFrom(thread));
366 }
323 367
324 return session->SendSyncRequest(SharedFrom(thread), system.Memory()); 368 return thread->GetSignalingResult();
325} 369}
326 370
327static ResultCode SendSyncRequest32(Core::System& system, Handle handle) { 371static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
@@ -383,6 +427,15 @@ static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle han
383 return ERR_INVALID_HANDLE; 427 return ERR_INVALID_HANDLE;
384} 428}
385 429
430static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32* process_id_high,
431 Handle handle) {
432 u64 process_id{};
433 const auto result = GetProcessId(system, &process_id, handle);
434 *process_id_low = static_cast<u32>(process_id);
435 *process_id_high = static_cast<u32>(process_id >> 32);
436 return result;
437}
438
386/// Wait for the given handles to synchronize, timeout after the specified nanoseconds 439/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
387static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address, 440static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address,
388 u64 handle_count, s64 nano_seconds) { 441 u64 handle_count, s64 nano_seconds) {
@@ -447,10 +500,13 @@ static ResultCode CancelSynchronization(Core::System& system, Handle thread_hand
447 } 500 }
448 501
449 thread->CancelWait(); 502 thread->CancelWait();
450 system.PrepareReschedule(thread->GetProcessorID());
451 return RESULT_SUCCESS; 503 return RESULT_SUCCESS;
452} 504}
453 505
506static ResultCode CancelSynchronization32(Core::System& system, Handle thread_handle) {
507 return CancelSynchronization(system, thread_handle);
508}
509
454/// Attempts to locks a mutex, creating it if it does not already exist 510/// Attempts to locks a mutex, creating it if it does not already exist
455static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle, 511static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle,
456 VAddr mutex_addr, Handle requesting_thread_handle) { 512 VAddr mutex_addr, Handle requesting_thread_handle) {
@@ -475,6 +531,12 @@ static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_hand
475 requesting_thread_handle); 531 requesting_thread_handle);
476} 532}
477 533
534static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle,
535 u32 mutex_addr, Handle requesting_thread_handle) {
536 return ArbitrateLock(system, holding_thread_handle, static_cast<VAddr>(mutex_addr),
537 requesting_thread_handle);
538}
539
478/// Unlock a mutex 540/// Unlock a mutex
479static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) { 541static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
480 LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr); 542 LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr);
@@ -494,6 +556,10 @@ static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
494 return current_process->GetMutex().Release(mutex_addr); 556 return current_process->GetMutex().Release(mutex_addr);
495} 557}
496 558
559static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) {
560 return ArbitrateUnlock(system, static_cast<VAddr>(mutex_addr));
561}
562
497enum class BreakType : u32 { 563enum class BreakType : u32 {
498 Panic = 0, 564 Panic = 0,
499 AssertionFailed = 1, 565 AssertionFailed = 1,
@@ -594,6 +660,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
594 info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt); 660 info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
595 661
596 if (!break_reason.signal_debugger) { 662 if (!break_reason.signal_debugger) {
663 SchedulerLock lock(system.Kernel());
597 LOG_CRITICAL( 664 LOG_CRITICAL(
598 Debug_Emulated, 665 Debug_Emulated,
599 "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}", 666 "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
@@ -605,14 +672,16 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
605 const auto thread_processor_id = current_thread->GetProcessorID(); 672 const auto thread_processor_id = current_thread->GetProcessorID();
606 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace(); 673 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
607 674
608 system.Kernel().CurrentProcess()->PrepareForTermination();
609
610 // Kill the current thread 675 // Kill the current thread
676 system.Kernel().ExceptionalExit();
611 current_thread->Stop(); 677 current_thread->Stop();
612 system.PrepareReschedule();
613 } 678 }
614} 679}
615 680
681static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) {
682 Break(system, reason, static_cast<u64>(info1), static_cast<u64>(info2));
683}
684
616/// Used to output a message on a debug hardware unit - does nothing on a retail unit 685/// Used to output a message on a debug hardware unit - does nothing on a retail unit
617static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr address, u64 len) { 686static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr address, u64 len) {
618 if (len == 0) { 687 if (len == 0) {
@@ -627,6 +696,7 @@ static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr addre
627/// Gets system/memory information for the current process 696/// Gets system/memory information for the current process
628static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 handle, 697static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 handle,
629 u64 info_sub_id) { 698 u64 info_sub_id) {
699 std::lock_guard lock{HLE::g_hle_lock};
630 LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id, 700 LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
631 info_sub_id, handle); 701 info_sub_id, handle);
632 702
@@ -863,9 +933,9 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
863 if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) { 933 if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
864 const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks(); 934 const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks();
865 935
866 out_ticks = thread_ticks + (core_timing.GetTicks() - prev_ctx_ticks); 936 out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
867 } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { 937 } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
868 out_ticks = core_timing.GetTicks() - prev_ctx_ticks; 938 out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
869 } 939 }
870 940
871 *result = out_ticks; 941 *result = out_ticks;
@@ -892,6 +962,7 @@ static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_h
892 962
893/// Maps memory at a desired address 963/// Maps memory at a desired address
894static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { 964static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
965 std::lock_guard lock{HLE::g_hle_lock};
895 LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); 966 LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
896 967
897 if (!Common::Is4KBAligned(addr)) { 968 if (!Common::Is4KBAligned(addr)) {
@@ -939,8 +1010,13 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
939 return page_table.MapPhysicalMemory(addr, size); 1010 return page_table.MapPhysicalMemory(addr, size);
940} 1011}
941 1012
1013static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
1014 return MapPhysicalMemory(system, static_cast<VAddr>(addr), static_cast<std::size_t>(size));
1015}
1016
942/// Unmaps memory previously mapped via MapPhysicalMemory 1017/// Unmaps memory previously mapped via MapPhysicalMemory
943static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { 1018static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
1019 std::lock_guard lock{HLE::g_hle_lock};
944 LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); 1020 LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
945 1021
946 if (!Common::Is4KBAligned(addr)) { 1022 if (!Common::Is4KBAligned(addr)) {
@@ -988,6 +1064,10 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
988 return page_table.UnmapPhysicalMemory(addr, size); 1064 return page_table.UnmapPhysicalMemory(addr, size);
989} 1065}
990 1066
1067static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
1068 return UnmapPhysicalMemory(system, static_cast<VAddr>(addr), static_cast<std::size_t>(size));
1069}
1070
991/// Sets the thread activity 1071/// Sets the thread activity
992static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) { 1072static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) {
993 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity); 1073 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity);
@@ -1017,10 +1097,11 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act
1017 return ERR_BUSY; 1097 return ERR_BUSY;
1018 } 1098 }
1019 1099
1020 thread->SetActivity(static_cast<ThreadActivity>(activity)); 1100 return thread->SetActivity(static_cast<ThreadActivity>(activity));
1101}
1021 1102
1022 system.PrepareReschedule(thread->GetProcessorID()); 1103static ResultCode SetThreadActivity32(Core::System& system, Handle handle, u32 activity) {
1023 return RESULT_SUCCESS; 1104 return SetThreadActivity(system, handle, activity);
1024} 1105}
1025 1106
1026/// Gets the thread context 1107/// Gets the thread context
@@ -1064,6 +1145,10 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H
1064 return RESULT_SUCCESS; 1145 return RESULT_SUCCESS;
1065} 1146}
1066 1147
1148static ResultCode GetThreadContext32(Core::System& system, u32 thread_context, Handle handle) {
1149 return GetThreadContext(system, static_cast<VAddr>(thread_context), handle);
1150}
1151
1067/// Gets the priority for the specified thread 1152/// Gets the priority for the specified thread
1068static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle handle) { 1153static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle handle) {
1069 LOG_TRACE(Kernel_SVC, "called"); 1154 LOG_TRACE(Kernel_SVC, "called");
@@ -1071,6 +1156,7 @@ static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle
1071 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1156 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1072 const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle); 1157 const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle);
1073 if (!thread) { 1158 if (!thread) {
1159 *priority = 0;
1074 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle); 1160 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
1075 return ERR_INVALID_HANDLE; 1161 return ERR_INVALID_HANDLE;
1076 } 1162 }
@@ -1105,18 +1191,26 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri
1105 1191
1106 thread->SetPriority(priority); 1192 thread->SetPriority(priority);
1107 1193
1108 system.PrepareReschedule(thread->GetProcessorID());
1109 return RESULT_SUCCESS; 1194 return RESULT_SUCCESS;
1110} 1195}
1111 1196
1197static ResultCode SetThreadPriority32(Core::System& system, Handle handle, u32 priority) {
1198 return SetThreadPriority(system, handle, priority);
1199}
1200
1112/// Get which CPU core is executing the current thread 1201/// Get which CPU core is executing the current thread
1113static u32 GetCurrentProcessorNumber(Core::System& system) { 1202static u32 GetCurrentProcessorNumber(Core::System& system) {
1114 LOG_TRACE(Kernel_SVC, "called"); 1203 LOG_TRACE(Kernel_SVC, "called");
1115 return system.CurrentScheduler().GetCurrentThread()->GetProcessorID(); 1204 return static_cast<u32>(system.CurrentPhysicalCore().CoreIndex());
1205}
1206
1207static u32 GetCurrentProcessorNumber32(Core::System& system) {
1208 return GetCurrentProcessorNumber(system);
1116} 1209}
1117 1210
1118static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr, 1211static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr,
1119 u64 size, u32 permissions) { 1212 u64 size, u32 permissions) {
1213 std::lock_guard lock{HLE::g_hle_lock};
1120 LOG_TRACE(Kernel_SVC, 1214 LOG_TRACE(Kernel_SVC,
1121 "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}", 1215 "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
1122 shared_memory_handle, addr, size, permissions); 1216 shared_memory_handle, addr, size, permissions);
@@ -1187,9 +1281,16 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
1187 return shared_memory->Map(*current_process, addr, size, permission_type); 1281 return shared_memory->Map(*current_process, addr, size, permission_type);
1188} 1282}
1189 1283
1284static ResultCode MapSharedMemory32(Core::System& system, Handle shared_memory_handle, u32 addr,
1285 u32 size, u32 permissions) {
1286 return MapSharedMemory(system, shared_memory_handle, static_cast<VAddr>(addr),
1287 static_cast<std::size_t>(size), permissions);
1288}
1289
1190static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, 1290static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
1191 VAddr page_info_address, Handle process_handle, 1291 VAddr page_info_address, Handle process_handle,
1192 VAddr address) { 1292 VAddr address) {
1293 std::lock_guard lock{HLE::g_hle_lock};
1193 LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address); 1294 LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
1194 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1295 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1195 std::shared_ptr<Process> process = handle_table.Get<Process>(process_handle); 1296 std::shared_ptr<Process> process = handle_table.Get<Process>(process_handle);
@@ -1372,6 +1473,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
1372/// Exits the current process 1473/// Exits the current process
1373static void ExitProcess(Core::System& system) { 1474static void ExitProcess(Core::System& system) {
1374 auto* current_process = system.Kernel().CurrentProcess(); 1475 auto* current_process = system.Kernel().CurrentProcess();
1476 UNIMPLEMENTED();
1375 1477
1376 LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); 1478 LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
1377 ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running, 1479 ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
@@ -1381,8 +1483,10 @@ static void ExitProcess(Core::System& system) {
1381 1483
1382 // Kill the current thread 1484 // Kill the current thread
1383 system.CurrentScheduler().GetCurrentThread()->Stop(); 1485 system.CurrentScheduler().GetCurrentThread()->Stop();
1486}
1384 1487
1385 system.PrepareReschedule(); 1488static void ExitProcess32(Core::System& system) {
1489 ExitProcess(system);
1386} 1490}
1387 1491
1388/// Creates a new thread 1492/// Creates a new thread
@@ -1428,9 +1532,10 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
1428 1532
1429 ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1)); 1533 ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1));
1430 1534
1535 ThreadType type = THREADTYPE_USER;
1431 CASCADE_RESULT(std::shared_ptr<Thread> thread, 1536 CASCADE_RESULT(std::shared_ptr<Thread> thread,
1432 Thread::Create(kernel, "", entry_point, priority, arg, processor_id, stack_top, 1537 Thread::Create(system, type, "", entry_point, priority, arg, processor_id,
1433 *current_process)); 1538 stack_top, current_process));
1434 1539
1435 const auto new_thread_handle = current_process->GetHandleTable().Create(thread); 1540 const auto new_thread_handle = current_process->GetHandleTable().Create(thread);
1436 if (new_thread_handle.Failed()) { 1541 if (new_thread_handle.Failed()) {
@@ -1444,11 +1549,15 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
1444 thread->SetName( 1549 thread->SetName(
1445 fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle)); 1550 fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle));
1446 1551
1447 system.PrepareReschedule(thread->GetProcessorID());
1448
1449 return RESULT_SUCCESS; 1552 return RESULT_SUCCESS;
1450} 1553}
1451 1554
1555static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
1556 u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
1557 return CreateThread(system, out_handle, static_cast<VAddr>(entry_point), static_cast<u64>(arg),
1558 static_cast<VAddr>(stack_top), priority, processor_id);
1559}
1560
1452/// Starts the thread for the provided handle 1561/// Starts the thread for the provided handle
1453static ResultCode StartThread(Core::System& system, Handle thread_handle) { 1562static ResultCode StartThread(Core::System& system, Handle thread_handle) {
1454 LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle); 1563 LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
@@ -1463,13 +1572,11 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
1463 1572
1464 ASSERT(thread->GetStatus() == ThreadStatus::Dormant); 1573 ASSERT(thread->GetStatus() == ThreadStatus::Dormant);
1465 1574
1466 thread->ResumeFromWait(); 1575 return thread->Start();
1467 1576}
1468 if (thread->GetStatus() == ThreadStatus::Ready) {
1469 system.PrepareReschedule(thread->GetProcessorID());
1470 }
1471 1577
1472 return RESULT_SUCCESS; 1578static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
1579 return StartThread(system, thread_handle);
1473} 1580}
1474 1581
1475/// Called when a thread exits 1582/// Called when a thread exits
@@ -1477,9 +1584,12 @@ static void ExitThread(Core::System& system) {
1477 LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC()); 1584 LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
1478 1585
1479 auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); 1586 auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
1480 current_thread->Stop();
1481 system.GlobalScheduler().RemoveThread(SharedFrom(current_thread)); 1587 system.GlobalScheduler().RemoveThread(SharedFrom(current_thread));
1482 system.PrepareReschedule(); 1588 current_thread->Stop();
1589}
1590
1591static void ExitThread32(Core::System& system) {
1592 ExitThread(system);
1483} 1593}
1484 1594
1485/// Sleep the current thread 1595/// Sleep the current thread
@@ -1498,15 +1608,21 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
1498 1608
1499 if (nanoseconds <= 0) { 1609 if (nanoseconds <= 0) {
1500 switch (static_cast<SleepType>(nanoseconds)) { 1610 switch (static_cast<SleepType>(nanoseconds)) {
1501 case SleepType::YieldWithoutLoadBalancing: 1611 case SleepType::YieldWithoutLoadBalancing: {
1502 is_redundant = current_thread->YieldSimple(); 1612 auto pair = current_thread->YieldSimple();
1613 is_redundant = pair.second;
1503 break; 1614 break;
1504 case SleepType::YieldWithLoadBalancing: 1615 }
1505 is_redundant = current_thread->YieldAndBalanceLoad(); 1616 case SleepType::YieldWithLoadBalancing: {
1617 auto pair = current_thread->YieldAndBalanceLoad();
1618 is_redundant = pair.second;
1506 break; 1619 break;
1507 case SleepType::YieldAndWaitForLoadBalancing: 1620 }
1508 is_redundant = current_thread->YieldAndWaitForLoadBalancing(); 1621 case SleepType::YieldAndWaitForLoadBalancing: {
1622 auto pair = current_thread->YieldAndWaitForLoadBalancing();
1623 is_redundant = pair.second;
1509 break; 1624 break;
1625 }
1510 default: 1626 default:
1511 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); 1627 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
1512 } 1628 }
@@ -1514,13 +1630,18 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
1514 current_thread->Sleep(nanoseconds); 1630 current_thread->Sleep(nanoseconds);
1515 } 1631 }
1516 1632
1517 if (is_redundant) { 1633 if (is_redundant && !system.Kernel().IsMulticore()) {
1518 // If it's redundant, the core is pretty much idle. Some games keep idling 1634 system.Kernel().ExitSVCProfile();
1519 // a core while it's doing nothing, we advance timing to avoid costly continuous 1635 system.CoreTiming().AddTicks(1000U);
1520 // calls. 1636 system.GetCpuManager().PreemptSingleCore();
1521 system.CoreTiming().AddTicks(2000); 1637 system.Kernel().EnterSVCProfile();
1522 } 1638 }
1523 system.PrepareReschedule(current_thread->GetProcessorID()); 1639}
1640
1641static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) {
1642 const s64 nanoseconds = static_cast<s64>(static_cast<u64>(nanoseconds_low) |
1643 (static_cast<u64>(nanoseconds_high) << 32));
1644 SleepThread(system, nanoseconds);
1524} 1645}
1525 1646
1526/// Wait process wide key atomic 1647/// Wait process wide key atomic
@@ -1547,31 +1668,69 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
1547 } 1668 }
1548 1669
1549 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); 1670 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
1550 1671 auto& kernel = system.Kernel();
1672 Handle event_handle;
1673 Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
1551 auto* const current_process = system.Kernel().CurrentProcess(); 1674 auto* const current_process = system.Kernel().CurrentProcess();
1552 const auto& handle_table = current_process->GetHandleTable(); 1675 {
1553 std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); 1676 SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
1554 ASSERT(thread); 1677 const auto& handle_table = current_process->GetHandleTable();
1678 std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
1679 ASSERT(thread);
1680
1681 current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
1682
1683 if (thread->IsPendingTermination()) {
1684 lock.CancelSleep();
1685 return ERR_THREAD_TERMINATING;
1686 }
1687
1688 const auto release_result = current_process->GetMutex().Release(mutex_addr);
1689 if (release_result.IsError()) {
1690 lock.CancelSleep();
1691 return release_result;
1692 }
1693
1694 if (nano_seconds == 0) {
1695 lock.CancelSleep();
1696 return RESULT_TIMEOUT;
1697 }
1555 1698
1556 const auto release_result = current_process->GetMutex().Release(mutex_addr); 1699 current_thread->SetCondVarWaitAddress(condition_variable_addr);
1557 if (release_result.IsError()) { 1700 current_thread->SetMutexWaitAddress(mutex_addr);
1558 return release_result; 1701 current_thread->SetWaitHandle(thread_handle);
1702 current_thread->SetStatus(ThreadStatus::WaitCondVar);
1703 current_process->InsertConditionVariableThread(SharedFrom(current_thread));
1559 } 1704 }
1560 1705
1561 Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); 1706 if (event_handle != InvalidHandle) {
1562 current_thread->SetCondVarWaitAddress(condition_variable_addr); 1707 auto& time_manager = kernel.TimeManager();
1563 current_thread->SetMutexWaitAddress(mutex_addr); 1708 time_manager.UnscheduleTimeEvent(event_handle);
1564 current_thread->SetWaitHandle(thread_handle); 1709 }
1565 current_thread->SetStatus(ThreadStatus::WaitCondVar); 1710
1566 current_thread->InvalidateWakeupCallback(); 1711 {
1567 current_process->InsertConditionVariableThread(SharedFrom(current_thread)); 1712 SchedulerLock lock(kernel);
1568 1713
1569 current_thread->WakeAfterDelay(nano_seconds); 1714 auto* owner = current_thread->GetLockOwner();
1715 if (owner != nullptr) {
1716 owner->RemoveMutexWaiter(SharedFrom(current_thread));
1717 }
1570 1718
1719 current_process->RemoveConditionVariableThread(SharedFrom(current_thread));
1720 }
1571 // Note: Deliberately don't attempt to inherit the lock owner's priority. 1721 // Note: Deliberately don't attempt to inherit the lock owner's priority.
1572 1722
1573 system.PrepareReschedule(current_thread->GetProcessorID()); 1723 return current_thread->GetSignalingResult();
1574 return RESULT_SUCCESS; 1724}
1725
1726static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr,
1727 u32 condition_variable_addr, Handle thread_handle,
1728 u32 nanoseconds_low, u32 nanoseconds_high) {
1729 const s64 nanoseconds =
1730 static_cast<s64>(nanoseconds_low | (static_cast<u64>(nanoseconds_high) << 32));
1731 return WaitProcessWideKeyAtomic(system, static_cast<VAddr>(mutex_addr),
1732 static_cast<VAddr>(condition_variable_addr), thread_handle,
1733 nanoseconds);
1575} 1734}
1576 1735
1577/// Signal process wide key 1736/// Signal process wide key
@@ -1582,7 +1741,9 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
1582 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); 1741 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
1583 1742
1584 // Retrieve a list of all threads that are waiting for this condition variable. 1743 // Retrieve a list of all threads that are waiting for this condition variable.
1585 auto* const current_process = system.Kernel().CurrentProcess(); 1744 auto& kernel = system.Kernel();
1745 SchedulerLock lock(kernel);
1746 auto* const current_process = kernel.CurrentProcess();
1586 std::vector<std::shared_ptr<Thread>> waiting_threads = 1747 std::vector<std::shared_ptr<Thread>> waiting_threads =
1587 current_process->GetConditionVariableThreads(condition_variable_addr); 1748 current_process->GetConditionVariableThreads(condition_variable_addr);
1588 1749
@@ -1591,7 +1752,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
1591 std::size_t last = waiting_threads.size(); 1752 std::size_t last = waiting_threads.size();
1592 if (target > 0) 1753 if (target > 0)
1593 last = std::min(waiting_threads.size(), static_cast<std::size_t>(target)); 1754 last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
1594 1755 auto& time_manager = kernel.TimeManager();
1595 for (std::size_t index = 0; index < last; ++index) { 1756 for (std::size_t index = 0; index < last; ++index) {
1596 auto& thread = waiting_threads[index]; 1757 auto& thread = waiting_threads[index];
1597 1758
@@ -1599,7 +1760,6 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
1599 1760
1600 // liberate Cond Var Thread. 1761 // liberate Cond Var Thread.
1601 current_process->RemoveConditionVariableThread(thread); 1762 current_process->RemoveConditionVariableThread(thread);
1602 thread->SetCondVarWaitAddress(0);
1603 1763
1604 const std::size_t current_core = system.CurrentCoreIndex(); 1764 const std::size_t current_core = system.CurrentCoreIndex();
1605 auto& monitor = system.Monitor(); 1765 auto& monitor = system.Monitor();
@@ -1610,10 +1770,8 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
1610 u32 update_val = 0; 1770 u32 update_val = 0;
1611 const VAddr mutex_address = thread->GetMutexWaitAddress(); 1771 const VAddr mutex_address = thread->GetMutexWaitAddress();
1612 do { 1772 do {
1613 monitor.SetExclusive(current_core, mutex_address);
1614
1615 // If the mutex is not yet acquired, acquire it. 1773 // If the mutex is not yet acquired, acquire it.
1616 mutex_val = memory.Read32(mutex_address); 1774 mutex_val = monitor.ExclusiveRead32(current_core, mutex_address);
1617 1775
1618 if (mutex_val != 0) { 1776 if (mutex_val != 0) {
1619 update_val = mutex_val | Mutex::MutexHasWaitersFlag; 1777 update_val = mutex_val | Mutex::MutexHasWaitersFlag;
@@ -1621,33 +1779,28 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
1621 update_val = thread->GetWaitHandle(); 1779 update_val = thread->GetWaitHandle();
1622 } 1780 }
1623 } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val)); 1781 } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val));
1782 monitor.ClearExclusive();
1624 if (mutex_val == 0) { 1783 if (mutex_val == 0) {
1625 // We were able to acquire the mutex, resume this thread. 1784 // We were able to acquire the mutex, resume this thread.
1626 ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
1627 thread->ResumeFromWait();
1628
1629 auto* const lock_owner = thread->GetLockOwner(); 1785 auto* const lock_owner = thread->GetLockOwner();
1630 if (lock_owner != nullptr) { 1786 if (lock_owner != nullptr) {
1631 lock_owner->RemoveMutexWaiter(thread); 1787 lock_owner->RemoveMutexWaiter(thread);
1632 } 1788 }
1633 1789
1634 thread->SetLockOwner(nullptr); 1790 thread->SetLockOwner(nullptr);
1635 thread->SetMutexWaitAddress(0); 1791 thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
1636 thread->SetWaitHandle(0); 1792 thread->ResumeFromWait();
1637 thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
1638 system.PrepareReschedule(thread->GetProcessorID());
1639 } else { 1793 } else {
1640 // The mutex is already owned by some other thread, make this thread wait on it. 1794 // The mutex is already owned by some other thread, make this thread wait on it.
1641 const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask); 1795 const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
1642 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1796 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1643 auto owner = handle_table.Get<Thread>(owner_handle); 1797 auto owner = handle_table.Get<Thread>(owner_handle);
1644 ASSERT(owner); 1798 ASSERT(owner);
1645 ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar); 1799 if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
1646 thread->InvalidateWakeupCallback(); 1800 thread->SetStatus(ThreadStatus::WaitMutex);
1647 thread->SetStatus(ThreadStatus::WaitMutex); 1801 }
1648 1802
1649 owner->AddMutexWaiter(thread); 1803 owner->AddMutexWaiter(thread);
1650 system.PrepareReschedule(thread->GetProcessorID());
1651 } 1804 }
1652 } 1805 }
1653} 1806}
@@ -1678,12 +1831,15 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type,
1678 auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter(); 1831 auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
1679 const ResultCode result = 1832 const ResultCode result =
1680 address_arbiter.WaitForAddress(address, arbitration_type, value, timeout); 1833 address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
1681 if (result == RESULT_SUCCESS) {
1682 system.PrepareReschedule();
1683 }
1684 return result; 1834 return result;
1685} 1835}
1686 1836
1837static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value,
1838 u32 timeout_low, u32 timeout_high) {
1839 s64 timeout = static_cast<s64>(timeout_low | (static_cast<u64>(timeout_high) << 32));
1840 return WaitForAddress(system, static_cast<VAddr>(address), type, value, timeout);
1841}
1842
1687// Signals to an address (via Address Arbiter) 1843// Signals to an address (via Address Arbiter)
1688static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value, 1844static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value,
1689 s32 num_to_wake) { 1845 s32 num_to_wake) {
@@ -1707,6 +1863,11 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type,
1707 return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake); 1863 return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake);
1708} 1864}
1709 1865
1866static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value,
1867 s32 num_to_wake) {
1868 return SignalToAddress(system, static_cast<VAddr>(address), type, value, num_to_wake);
1869}
1870
1710static void KernelDebug([[maybe_unused]] Core::System& system, 1871static void KernelDebug([[maybe_unused]] Core::System& system,
1711 [[maybe_unused]] u32 kernel_debug_type, [[maybe_unused]] u64 param1, 1872 [[maybe_unused]] u32 kernel_debug_type, [[maybe_unused]] u64 param1,
1712 [[maybe_unused]] u64 param2, [[maybe_unused]] u64 param3) { 1873 [[maybe_unused]] u64 param2, [[maybe_unused]] u64 param3) {
@@ -1725,14 +1886,21 @@ static u64 GetSystemTick(Core::System& system) {
1725 auto& core_timing = system.CoreTiming(); 1886 auto& core_timing = system.CoreTiming();
1726 1887
1727 // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick) 1888 // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
1728 const u64 result{Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks())}; 1889 const u64 result{system.CoreTiming().GetClockTicks()};
1729 1890
1730 // Advance time to defeat dumb games that busy-wait for the frame to end. 1891 if (!system.Kernel().IsMulticore()) {
1731 core_timing.AddTicks(400); 1892 core_timing.AddTicks(400U);
1893 }
1732 1894
1733 return result; 1895 return result;
1734} 1896}
1735 1897
1898static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) {
1899 u64 time = GetSystemTick(system);
1900 *time_low = static_cast<u32>(time);
1901 *time_high = static_cast<u32>(time >> 32);
1902}
1903
1736/// Close a handle 1904/// Close a handle
1737static ResultCode CloseHandle(Core::System& system, Handle handle) { 1905static ResultCode CloseHandle(Core::System& system, Handle handle) {
1738 LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle); 1906 LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
@@ -1765,9 +1933,14 @@ static ResultCode ResetSignal(Core::System& system, Handle handle) {
1765 return ERR_INVALID_HANDLE; 1933 return ERR_INVALID_HANDLE;
1766} 1934}
1767 1935
1936static ResultCode ResetSignal32(Core::System& system, Handle handle) {
1937 return ResetSignal(system, handle);
1938}
1939
1768/// Creates a TransferMemory object 1940/// Creates a TransferMemory object
1769static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAddr addr, u64 size, 1941static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAddr addr, u64 size,
1770 u32 permissions) { 1942 u32 permissions) {
1943 std::lock_guard lock{HLE::g_hle_lock};
1771 LOG_DEBUG(Kernel_SVC, "called addr=0x{:X}, size=0x{:X}, perms=0x{:08X}", addr, size, 1944 LOG_DEBUG(Kernel_SVC, "called addr=0x{:X}, size=0x{:X}, perms=0x{:08X}", addr, size,
1772 permissions); 1945 permissions);
1773 1946
@@ -1812,6 +1985,12 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
1812 return RESULT_SUCCESS; 1985 return RESULT_SUCCESS;
1813} 1986}
1814 1987
1988static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u32 addr, u32 size,
1989 u32 permissions) {
1990 return CreateTransferMemory(system, handle, static_cast<VAddr>(addr),
1991 static_cast<std::size_t>(size), permissions);
1992}
1993
1815static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core, 1994static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core,
1816 u64* mask) { 1995 u64* mask) {
1817 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); 1996 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
@@ -1821,6 +2000,8 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
1821 if (!thread) { 2000 if (!thread) {
1822 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}", 2001 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
1823 thread_handle); 2002 thread_handle);
2003 *core = 0;
2004 *mask = 0;
1824 return ERR_INVALID_HANDLE; 2005 return ERR_INVALID_HANDLE;
1825 } 2006 }
1826 2007
@@ -1830,6 +2011,15 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
1830 return RESULT_SUCCESS; 2011 return RESULT_SUCCESS;
1831} 2012}
1832 2013
2014static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, u32* core,
2015 u32* mask_low, u32* mask_high) {
2016 u64 mask{};
2017 const auto result = GetThreadCoreMask(system, thread_handle, core, &mask);
2018 *mask_high = static_cast<u32>(mask >> 32);
2019 *mask_low = static_cast<u32>(mask);
2020 return result;
2021}
2022
1833static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, u32 core, 2023static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, u32 core,
1834 u64 affinity_mask) { 2024 u64 affinity_mask) {
1835 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core=0x{:X}, affinity_mask=0x{:016X}", 2025 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core=0x{:X}, affinity_mask=0x{:016X}",
@@ -1861,7 +2051,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
1861 return ERR_INVALID_COMBINATION; 2051 return ERR_INVALID_COMBINATION;
1862 } 2052 }
1863 2053
1864 if (core < Core::NUM_CPU_CORES) { 2054 if (core < Core::Hardware::NUM_CPU_CORES) {
1865 if ((affinity_mask & (1ULL << core)) == 0) { 2055 if ((affinity_mask & (1ULL << core)) == 0) {
1866 LOG_ERROR(Kernel_SVC, 2056 LOG_ERROR(Kernel_SVC,
1867 "Core is not enabled for the current mask, core={}, mask={:016X}", core, 2057 "Core is not enabled for the current mask, core={}, mask={:016X}", core,
@@ -1883,11 +2073,14 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
1883 return ERR_INVALID_HANDLE; 2073 return ERR_INVALID_HANDLE;
1884 } 2074 }
1885 2075
1886 system.PrepareReschedule(thread->GetProcessorID()); 2076 return thread->SetCoreAndAffinityMask(core, affinity_mask);
1887 thread->ChangeCore(core, affinity_mask); 2077}
1888 system.PrepareReschedule(thread->GetProcessorID());
1889 2078
1890 return RESULT_SUCCESS; 2079static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core,
2080 u32 affinity_mask_low, u32 affinity_mask_high) {
2081 const u64 affinity_mask =
2082 static_cast<u64>(affinity_mask_low) | (static_cast<u64>(affinity_mask_high) << 32);
2083 return SetThreadCoreMask(system, thread_handle, core, affinity_mask);
1891} 2084}
1892 2085
1893static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) { 2086static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) {
@@ -1918,6 +2111,10 @@ static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle
1918 return RESULT_SUCCESS; 2111 return RESULT_SUCCESS;
1919} 2112}
1920 2113
2114static ResultCode CreateEvent32(Core::System& system, Handle* write_handle, Handle* read_handle) {
2115 return CreateEvent(system, write_handle, read_handle);
2116}
2117
1921static ResultCode ClearEvent(Core::System& system, Handle handle) { 2118static ResultCode ClearEvent(Core::System& system, Handle handle) {
1922 LOG_TRACE(Kernel_SVC, "called, event=0x{:08X}", handle); 2119 LOG_TRACE(Kernel_SVC, "called, event=0x{:08X}", handle);
1923 2120
@@ -1939,6 +2136,10 @@ static ResultCode ClearEvent(Core::System& system, Handle handle) {
1939 return ERR_INVALID_HANDLE; 2136 return ERR_INVALID_HANDLE;
1940} 2137}
1941 2138
2139static ResultCode ClearEvent32(Core::System& system, Handle handle) {
2140 return ClearEvent(system, handle);
2141}
2142
1942static ResultCode SignalEvent(Core::System& system, Handle handle) { 2143static ResultCode SignalEvent(Core::System& system, Handle handle) {
1943 LOG_DEBUG(Kernel_SVC, "called. Handle=0x{:08X}", handle); 2144 LOG_DEBUG(Kernel_SVC, "called. Handle=0x{:08X}", handle);
1944 2145
@@ -1951,10 +2152,13 @@ static ResultCode SignalEvent(Core::System& system, Handle handle) {
1951 } 2152 }
1952 2153
1953 writable_event->Signal(); 2154 writable_event->Signal();
1954 system.PrepareReschedule();
1955 return RESULT_SUCCESS; 2155 return RESULT_SUCCESS;
1956} 2156}
1957 2157
2158static ResultCode SignalEvent32(Core::System& system, Handle handle) {
2159 return SignalEvent(system, handle);
2160}
2161
1958static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) { 2162static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
1959 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type); 2163 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type);
1960 2164
@@ -1982,6 +2186,7 @@ static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_
1982} 2186}
1983 2187
1984static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) { 2188static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) {
2189 std::lock_guard lock{HLE::g_hle_lock};
1985 LOG_DEBUG(Kernel_SVC, "called"); 2190 LOG_DEBUG(Kernel_SVC, "called");
1986 2191
1987 auto& kernel = system.Kernel(); 2192 auto& kernel = system.Kernel();
@@ -2139,6 +2344,15 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
2139 return RESULT_SUCCESS; 2344 return RESULT_SUCCESS;
2140} 2345}
2141 2346
2347static ResultCode FlushProcessDataCache32(Core::System& system, Handle handle, u32 address,
2348 u32 size) {
2349 // Note(Blinkhawk): For emulation purposes of the data cache this is mostly a nope
2350 // as all emulation is done in the same cache level in host architecture, thus data cache
2351 // does not need flushing.
2352 LOG_DEBUG(Kernel_SVC, "called");
2353 return RESULT_SUCCESS;
2354}
2355
2142namespace { 2356namespace {
2143struct FunctionDef { 2357struct FunctionDef {
2144 using Func = void(Core::System&); 2358 using Func = void(Core::System&);
@@ -2153,57 +2367,57 @@ static const FunctionDef SVC_Table_32[] = {
2153 {0x00, nullptr, "Unknown"}, 2367 {0x00, nullptr, "Unknown"},
2154 {0x01, SvcWrap32<SetHeapSize32>, "SetHeapSize32"}, 2368 {0x01, SvcWrap32<SetHeapSize32>, "SetHeapSize32"},
2155 {0x02, nullptr, "Unknown"}, 2369 {0x02, nullptr, "Unknown"},
2156 {0x03, nullptr, "SetMemoryAttribute32"}, 2370 {0x03, SvcWrap32<SetMemoryAttribute32>, "SetMemoryAttribute32"},
2157 {0x04, nullptr, "MapMemory32"}, 2371 {0x04, SvcWrap32<MapMemory32>, "MapMemory32"},
2158 {0x05, nullptr, "UnmapMemory32"}, 2372 {0x05, SvcWrap32<UnmapMemory32>, "UnmapMemory32"},
2159 {0x06, SvcWrap32<QueryMemory32>, "QueryMemory32"}, 2373 {0x06, SvcWrap32<QueryMemory32>, "QueryMemory32"},
2160 {0x07, nullptr, "ExitProcess32"}, 2374 {0x07, SvcWrap32<ExitProcess32>, "ExitProcess32"},
2161 {0x08, nullptr, "CreateThread32"}, 2375 {0x08, SvcWrap32<CreateThread32>, "CreateThread32"},
2162 {0x09, nullptr, "StartThread32"}, 2376 {0x09, SvcWrap32<StartThread32>, "StartThread32"},
2163 {0x0a, nullptr, "ExitThread32"}, 2377 {0x0a, SvcWrap32<ExitThread32>, "ExitThread32"},
2164 {0x0b, nullptr, "SleepThread32"}, 2378 {0x0b, SvcWrap32<SleepThread32>, "SleepThread32"},
2165 {0x0c, SvcWrap32<GetThreadPriority32>, "GetThreadPriority32"}, 2379 {0x0c, SvcWrap32<GetThreadPriority32>, "GetThreadPriority32"},
2166 {0x0d, nullptr, "SetThreadPriority32"}, 2380 {0x0d, SvcWrap32<SetThreadPriority32>, "SetThreadPriority32"},
2167 {0x0e, nullptr, "GetThreadCoreMask32"}, 2381 {0x0e, SvcWrap32<GetThreadCoreMask32>, "GetThreadCoreMask32"},
2168 {0x0f, nullptr, "SetThreadCoreMask32"}, 2382 {0x0f, SvcWrap32<SetThreadCoreMask32>, "SetThreadCoreMask32"},
2169 {0x10, nullptr, "GetCurrentProcessorNumber32"}, 2383 {0x10, SvcWrap32<GetCurrentProcessorNumber32>, "GetCurrentProcessorNumber32"},
2170 {0x11, nullptr, "SignalEvent32"}, 2384 {0x11, SvcWrap32<SignalEvent32>, "SignalEvent32"},
2171 {0x12, nullptr, "ClearEvent32"}, 2385 {0x12, SvcWrap32<ClearEvent32>, "ClearEvent32"},
2172 {0x13, nullptr, "MapSharedMemory32"}, 2386 {0x13, SvcWrap32<MapSharedMemory32>, "MapSharedMemory32"},
2173 {0x14, nullptr, "UnmapSharedMemory32"}, 2387 {0x14, nullptr, "UnmapSharedMemory32"},
2174 {0x15, nullptr, "CreateTransferMemory32"}, 2388 {0x15, SvcWrap32<CreateTransferMemory32>, "CreateTransferMemory32"},
2175 {0x16, SvcWrap32<CloseHandle32>, "CloseHandle32"}, 2389 {0x16, SvcWrap32<CloseHandle32>, "CloseHandle32"},
2176 {0x17, nullptr, "ResetSignal32"}, 2390 {0x17, SvcWrap32<ResetSignal32>, "ResetSignal32"},
2177 {0x18, SvcWrap32<WaitSynchronization32>, "WaitSynchronization32"}, 2391 {0x18, SvcWrap32<WaitSynchronization32>, "WaitSynchronization32"},
2178 {0x19, nullptr, "CancelSynchronization32"}, 2392 {0x19, SvcWrap32<CancelSynchronization32>, "CancelSynchronization32"},
2179 {0x1a, nullptr, "ArbitrateLock32"}, 2393 {0x1a, SvcWrap32<ArbitrateLock32>, "ArbitrateLock32"},
2180 {0x1b, nullptr, "ArbitrateUnlock32"}, 2394 {0x1b, SvcWrap32<ArbitrateUnlock32>, "ArbitrateUnlock32"},
2181 {0x1c, nullptr, "WaitProcessWideKeyAtomic32"}, 2395 {0x1c, SvcWrap32<WaitProcessWideKeyAtomic32>, "WaitProcessWideKeyAtomic32"},
2182 {0x1d, SvcWrap32<SignalProcessWideKey32>, "SignalProcessWideKey32"}, 2396 {0x1d, SvcWrap32<SignalProcessWideKey32>, "SignalProcessWideKey32"},
2183 {0x1e, nullptr, "GetSystemTick32"}, 2397 {0x1e, SvcWrap32<GetSystemTick32>, "GetSystemTick32"},
2184 {0x1f, SvcWrap32<ConnectToNamedPort32>, "ConnectToNamedPort32"}, 2398 {0x1f, SvcWrap32<ConnectToNamedPort32>, "ConnectToNamedPort32"},
2185 {0x20, nullptr, "Unknown"}, 2399 {0x20, nullptr, "Unknown"},
2186 {0x21, SvcWrap32<SendSyncRequest32>, "SendSyncRequest32"}, 2400 {0x21, SvcWrap32<SendSyncRequest32>, "SendSyncRequest32"},
2187 {0x22, nullptr, "SendSyncRequestWithUserBuffer32"}, 2401 {0x22, nullptr, "SendSyncRequestWithUserBuffer32"},
2188 {0x23, nullptr, "Unknown"}, 2402 {0x23, nullptr, "Unknown"},
2189 {0x24, nullptr, "GetProcessId32"}, 2403 {0x24, SvcWrap32<GetProcessId32>, "GetProcessId32"},
2190 {0x25, SvcWrap32<GetThreadId32>, "GetThreadId32"}, 2404 {0x25, SvcWrap32<GetThreadId32>, "GetThreadId32"},
2191 {0x26, nullptr, "Break32"}, 2405 {0x26, SvcWrap32<Break32>, "Break32"},
2192 {0x27, nullptr, "OutputDebugString32"}, 2406 {0x27, nullptr, "OutputDebugString32"},
2193 {0x28, nullptr, "Unknown"}, 2407 {0x28, nullptr, "Unknown"},
2194 {0x29, SvcWrap32<GetInfo32>, "GetInfo32"}, 2408 {0x29, SvcWrap32<GetInfo32>, "GetInfo32"},
2195 {0x2a, nullptr, "Unknown"}, 2409 {0x2a, nullptr, "Unknown"},
2196 {0x2b, nullptr, "Unknown"}, 2410 {0x2b, nullptr, "Unknown"},
2197 {0x2c, nullptr, "MapPhysicalMemory32"}, 2411 {0x2c, SvcWrap32<MapPhysicalMemory32>, "MapPhysicalMemory32"},
2198 {0x2d, nullptr, "UnmapPhysicalMemory32"}, 2412 {0x2d, SvcWrap32<UnmapPhysicalMemory32>, "UnmapPhysicalMemory32"},
2199 {0x2e, nullptr, "Unknown"}, 2413 {0x2e, nullptr, "Unknown"},
2200 {0x2f, nullptr, "Unknown"}, 2414 {0x2f, nullptr, "Unknown"},
2201 {0x30, nullptr, "Unknown"}, 2415 {0x30, nullptr, "Unknown"},
2202 {0x31, nullptr, "Unknown"}, 2416 {0x31, nullptr, "Unknown"},
2203 {0x32, nullptr, "SetThreadActivity32"}, 2417 {0x32, SvcWrap32<SetThreadActivity32>, "SetThreadActivity32"},
2204 {0x33, nullptr, "GetThreadContext32"}, 2418 {0x33, SvcWrap32<GetThreadContext32>, "GetThreadContext32"},
2205 {0x34, nullptr, "WaitForAddress32"}, 2419 {0x34, SvcWrap32<WaitForAddress32>, "WaitForAddress32"},
2206 {0x35, nullptr, "SignalToAddress32"}, 2420 {0x35, SvcWrap32<SignalToAddress32>, "SignalToAddress32"},
2207 {0x36, nullptr, "Unknown"}, 2421 {0x36, nullptr, "Unknown"},
2208 {0x37, nullptr, "Unknown"}, 2422 {0x37, nullptr, "Unknown"},
2209 {0x38, nullptr, "Unknown"}, 2423 {0x38, nullptr, "Unknown"},
@@ -2219,7 +2433,7 @@ static const FunctionDef SVC_Table_32[] = {
2219 {0x42, nullptr, "Unknown"}, 2433 {0x42, nullptr, "Unknown"},
2220 {0x43, nullptr, "ReplyAndReceive32"}, 2434 {0x43, nullptr, "ReplyAndReceive32"},
2221 {0x44, nullptr, "Unknown"}, 2435 {0x44, nullptr, "Unknown"},
2222 {0x45, nullptr, "CreateEvent32"}, 2436 {0x45, SvcWrap32<CreateEvent32>, "CreateEvent32"},
2223 {0x46, nullptr, "Unknown"}, 2437 {0x46, nullptr, "Unknown"},
2224 {0x47, nullptr, "Unknown"}, 2438 {0x47, nullptr, "Unknown"},
2225 {0x48, nullptr, "Unknown"}, 2439 {0x48, nullptr, "Unknown"},
@@ -2245,7 +2459,7 @@ static const FunctionDef SVC_Table_32[] = {
2245 {0x5c, nullptr, "Unknown"}, 2459 {0x5c, nullptr, "Unknown"},
2246 {0x5d, nullptr, "Unknown"}, 2460 {0x5d, nullptr, "Unknown"},
2247 {0x5e, nullptr, "Unknown"}, 2461 {0x5e, nullptr, "Unknown"},
2248 {0x5F, nullptr, "FlushProcessDataCache32"}, 2462 {0x5F, SvcWrap32<FlushProcessDataCache32>, "FlushProcessDataCache32"},
2249 {0x60, nullptr, "Unknown"}, 2463 {0x60, nullptr, "Unknown"},
2250 {0x61, nullptr, "Unknown"}, 2464 {0x61, nullptr, "Unknown"},
2251 {0x62, nullptr, "Unknown"}, 2465 {0x62, nullptr, "Unknown"},
@@ -2423,13 +2637,10 @@ static const FunctionDef* GetSVCInfo64(u32 func_num) {
2423 return &SVC_Table_64[func_num]; 2637 return &SVC_Table_64[func_num];
2424} 2638}
2425 2639
2426MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
2427
2428void Call(Core::System& system, u32 immediate) { 2640void Call(Core::System& system, u32 immediate) {
2429 MICROPROFILE_SCOPE(Kernel_SVC); 2641 system.ExitDynarmicProfile();
2430 2642 auto& kernel = system.Kernel();
2431 // Lock the global kernel mutex when we enter the kernel HLE. 2643 kernel.EnterSVCProfile();
2432 std::lock_guard lock{HLE::g_hle_lock};
2433 2644
2434 const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate) 2645 const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
2435 : GetSVCInfo32(immediate); 2646 : GetSVCInfo32(immediate);
@@ -2442,6 +2653,9 @@ void Call(Core::System& system, u32 immediate) {
2442 } else { 2653 } else {
2443 LOG_CRITICAL(Kernel_SVC, "Unknown SVC function 0x{:X}", immediate); 2654 LOG_CRITICAL(Kernel_SVC, "Unknown SVC function 0x{:X}", immediate);
2444 } 2655 }
2656
2657 kernel.ExitSVCProfile();
2658 system.EnterDynarmicProfile();
2445} 2659}
2446 2660
2447} // namespace Kernel::Svc 2661} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 7d735e3fa..0b6dd9df0 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -350,13 +350,50 @@ void SvcWrap64(Core::System& system) {
350 func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2)); 350 func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2));
351} 351}
352 352
353// Used by QueryMemory32 353// Used by QueryMemory32, ArbitrateLock32
354template <ResultCode func(Core::System&, u32, u32, u32)> 354template <ResultCode func(Core::System&, u32, u32, u32)>
355void SvcWrap32(Core::System& system) { 355void SvcWrap32(Core::System& system) {
356 FuncReturn32(system, 356 FuncReturn32(system,
357 func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2)).raw); 357 func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2)).raw);
358} 358}
359 359
360// Used by Break32
361template <void func(Core::System&, u32, u32, u32)>
362void SvcWrap32(Core::System& system) {
363 func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2));
364}
365
366// Used by ExitProcess32, ExitThread32
367template <void func(Core::System&)>
368void SvcWrap32(Core::System& system) {
369 func(system);
370}
371
372// Used by GetCurrentProcessorNumber32
373template <u32 func(Core::System&)>
374void SvcWrap32(Core::System& system) {
375 FuncReturn32(system, func(system));
376}
377
378// Used by SleepThread32
379template <void func(Core::System&, u32, u32)>
380void SvcWrap32(Core::System& system) {
381 func(system, Param32(system, 0), Param32(system, 1));
382}
383
384// Used by CreateThread32
385template <ResultCode func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
386void SvcWrap32(Core::System& system) {
387 Handle param_1 = 0;
388
389 const u32 retval = func(system, &param_1, Param32(system, 0), Param32(system, 1),
390 Param32(system, 2), Param32(system, 3), Param32(system, 4))
391 .raw;
392
393 system.CurrentArmInterface().SetReg(1, param_1);
394 FuncReturn(system, retval);
395}
396
360// Used by GetInfo32 397// Used by GetInfo32
361template <ResultCode func(Core::System&, u32*, u32*, u32, u32, u32, u32)> 398template <ResultCode func(Core::System&, u32*, u32*, u32, u32, u32, u32)>
362void SvcWrap32(Core::System& system) { 399void SvcWrap32(Core::System& system) {
@@ -393,18 +430,114 @@ void SvcWrap32(Core::System& system) {
393 FuncReturn(system, retval); 430 FuncReturn(system, retval);
394} 431}
395 432
433// Used by GetSystemTick32
434template <void func(Core::System&, u32*, u32*)>
435void SvcWrap32(Core::System& system) {
436 u32 param_1 = 0;
437 u32 param_2 = 0;
438
439 func(system, &param_1, &param_2);
440 system.CurrentArmInterface().SetReg(0, param_1);
441 system.CurrentArmInterface().SetReg(1, param_2);
442}
443
444// Used by CreateEvent32
445template <ResultCode func(Core::System&, Handle*, Handle*)>
446void SvcWrap32(Core::System& system) {
447 Handle param_1 = 0;
448 Handle param_2 = 0;
449
450 const u32 retval = func(system, &param_1, &param_2).raw;
451 system.CurrentArmInterface().SetReg(1, param_1);
452 system.CurrentArmInterface().SetReg(2, param_2);
453 FuncReturn(system, retval);
454}
455
456// Used by GetThreadId32
457template <ResultCode func(Core::System&, Handle, u32*, u32*, u32*)>
458void SvcWrap32(Core::System& system) {
459 u32 param_1 = 0;
460 u32 param_2 = 0;
461 u32 param_3 = 0;
462
463 const u32 retval = func(system, Param32(system, 2), &param_1, &param_2, &param_3).raw;
464 system.CurrentArmInterface().SetReg(1, param_1);
465 system.CurrentArmInterface().SetReg(2, param_2);
466 system.CurrentArmInterface().SetReg(3, param_3);
467 FuncReturn(system, retval);
468}
469
396// Used by SignalProcessWideKey32 470// Used by SignalProcessWideKey32
397template <void func(Core::System&, u32, s32)> 471template <void func(Core::System&, u32, s32)>
398void SvcWrap32(Core::System& system) { 472void SvcWrap32(Core::System& system) {
399 func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1))); 473 func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1)));
400} 474}
401 475
402// Used by SendSyncRequest32 476// Used by SetThreadPriority32
477template <ResultCode func(Core::System&, Handle, u32)>
478void SvcWrap32(Core::System& system) {
479 const u32 retval =
480 func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw;
481 FuncReturn(system, retval);
482}
483
484// Used by SetThreadCoreMask32
485template <ResultCode func(Core::System&, Handle, u32, u32, u32)>
486void SvcWrap32(Core::System& system) {
487 const u32 retval =
488 func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
489 static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
490 .raw;
491 FuncReturn(system, retval);
492}
493
494// Used by WaitProcessWideKeyAtomic32
495template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)>
496void SvcWrap32(Core::System& system) {
497 const u32 retval =
498 func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
499 static_cast<Handle>(Param(system, 2)), static_cast<u32>(Param(system, 3)),
500 static_cast<u32>(Param(system, 4)))
501 .raw;
502 FuncReturn(system, retval);
503}
504
505// Used by WaitForAddress32
506template <ResultCode func(Core::System&, u32, u32, s32, u32, u32)>
507void SvcWrap32(Core::System& system) {
508 const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
509 static_cast<u32>(Param(system, 1)), static_cast<s32>(Param(system, 2)),
510 static_cast<u32>(Param(system, 3)), static_cast<u32>(Param(system, 4)))
511 .raw;
512 FuncReturn(system, retval);
513}
514
515// Used by SignalToAddress32
516template <ResultCode func(Core::System&, u32, u32, s32, s32)>
517void SvcWrap32(Core::System& system) {
518 const u32 retval =
519 func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
520 static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
521 .raw;
522 FuncReturn(system, retval);
523}
524
525// Used by SendSyncRequest32, ArbitrateUnlock32
403template <ResultCode func(Core::System&, u32)> 526template <ResultCode func(Core::System&, u32)>
404void SvcWrap32(Core::System& system) { 527void SvcWrap32(Core::System& system) {
405 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw); 528 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
406} 529}
407 530
531// Used by CreateTransferMemory32
532template <ResultCode func(Core::System&, Handle*, u32, u32, u32)>
533void SvcWrap32(Core::System& system) {
534 Handle handle = 0;
535 const u32 retval =
536 func(system, &handle, Param32(system, 1), Param32(system, 2), Param32(system, 3)).raw;
537 system.CurrentArmInterface().SetReg(1, handle);
538 FuncReturn(system, retval);
539}
540
408// Used by WaitSynchronization32 541// Used by WaitSynchronization32
409template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)> 542template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)>
410void SvcWrap32(Core::System& system) { 543void SvcWrap32(Core::System& system) {
diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp
index dc37fad1a..851b702a5 100644
--- a/src/core/hle/kernel/synchronization.cpp
+++ b/src/core/hle/kernel/synchronization.cpp
@@ -10,78 +10,107 @@
10#include "core/hle/kernel/synchronization.h" 10#include "core/hle/kernel/synchronization.h"
11#include "core/hle/kernel/synchronization_object.h" 11#include "core/hle/kernel/synchronization_object.h"
12#include "core/hle/kernel/thread.h" 12#include "core/hle/kernel/thread.h"
13#include "core/hle/kernel/time_manager.h"
13 14
14namespace Kernel { 15namespace Kernel {
15 16
16/// Default thread wakeup callback for WaitSynchronization
17static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
18 std::shared_ptr<SynchronizationObject> object,
19 std::size_t index) {
20 ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
21
22 if (reason == ThreadWakeupReason::Timeout) {
23 thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
24 return true;
25 }
26
27 ASSERT(reason == ThreadWakeupReason::Signal);
28 thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
29 thread->SetWaitSynchronizationOutput(static_cast<u32>(index));
30 return true;
31}
32
33Synchronization::Synchronization(Core::System& system) : system{system} {} 17Synchronization::Synchronization(Core::System& system) : system{system} {}
34 18
35void Synchronization::SignalObject(SynchronizationObject& obj) const { 19void Synchronization::SignalObject(SynchronizationObject& obj) const {
20 auto& kernel = system.Kernel();
21 SchedulerLock lock(kernel);
22 auto& time_manager = kernel.TimeManager();
36 if (obj.IsSignaled()) { 23 if (obj.IsSignaled()) {
37 obj.WakeupAllWaitingThreads(); 24 for (auto thread : obj.GetWaitingThreads()) {
25 if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
26 if (thread->GetStatus() != ThreadStatus::WaitHLEEvent) {
27 ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
28 ASSERT(thread->IsWaitingSync());
29 }
30 thread->SetSynchronizationResults(&obj, RESULT_SUCCESS);
31 thread->ResumeFromWait();
32 }
33 }
34 obj.ClearWaitingThreads();
38 } 35 }
39} 36}
40 37
41std::pair<ResultCode, Handle> Synchronization::WaitFor( 38std::pair<ResultCode, Handle> Synchronization::WaitFor(
42 std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) { 39 std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
40 auto& kernel = system.Kernel();
43 auto* const thread = system.CurrentScheduler().GetCurrentThread(); 41 auto* const thread = system.CurrentScheduler().GetCurrentThread();
44 // Find the first object that is acquirable in the provided list of objects 42 Handle event_handle = InvalidHandle;
45 const auto itr = std::find_if(sync_objects.begin(), sync_objects.end(), 43 {
46 [thread](const std::shared_ptr<SynchronizationObject>& object) { 44 SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
47 return object->IsSignaled(); 45 const auto itr =
48 }); 46 std::find_if(sync_objects.begin(), sync_objects.end(),
49 47 [thread](const std::shared_ptr<SynchronizationObject>& object) {
50 if (itr != sync_objects.end()) { 48 return object->IsSignaled();
51 // We found a ready object, acquire it and set the result value 49 });
52 SynchronizationObject* object = itr->get(); 50
53 object->Acquire(thread); 51 if (itr != sync_objects.end()) {
54 const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr)); 52 // We found a ready object, acquire it and set the result value
55 return {RESULT_SUCCESS, index}; 53 SynchronizationObject* object = itr->get();
54 object->Acquire(thread);
55 const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
56 lock.CancelSleep();
57 return {RESULT_SUCCESS, index};
58 }
59
60 if (nano_seconds == 0) {
61 lock.CancelSleep();
62 return {RESULT_TIMEOUT, InvalidHandle};
63 }
64
65 if (thread->IsPendingTermination()) {
66 lock.CancelSleep();
67 return {ERR_THREAD_TERMINATING, InvalidHandle};
68 }
69
70 if (thread->IsSyncCancelled()) {
71 thread->SetSyncCancelled(false);
72 lock.CancelSleep();
73 return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle};
74 }
75
76 for (auto& object : sync_objects) {
77 object->AddWaitingThread(SharedFrom(thread));
78 }
79
80 thread->SetSynchronizationObjects(&sync_objects);
81 thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
82 thread->SetStatus(ThreadStatus::WaitSynch);
83 thread->SetWaitingSync(true);
56 } 84 }
85 thread->SetWaitingSync(false);
57 86
58 // No objects were ready to be acquired, prepare to suspend the thread. 87 if (event_handle != InvalidHandle) {
59 88 auto& time_manager = kernel.TimeManager();
60 // If a timeout value of 0 was provided, just return the Timeout error code instead of 89 time_manager.UnscheduleTimeEvent(event_handle);
61 // suspending the thread.
62 if (nano_seconds == 0) {
63 return {RESULT_TIMEOUT, InvalidHandle};
64 } 90 }
65 91
66 if (thread->IsSyncCancelled()) { 92 {
67 thread->SetSyncCancelled(false); 93 SchedulerLock lock(kernel);
68 return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle}; 94 ResultCode signaling_result = thread->GetSignalingResult();
95 SynchronizationObject* signaling_object = thread->GetSignalingObject();
96 thread->SetSynchronizationObjects(nullptr);
97 auto shared_thread = SharedFrom(thread);
98 for (auto& obj : sync_objects) {
99 obj->RemoveWaitingThread(shared_thread);
100 }
101 if (signaling_object != nullptr) {
102 const auto itr = std::find_if(
103 sync_objects.begin(), sync_objects.end(),
104 [signaling_object](const std::shared_ptr<SynchronizationObject>& object) {
105 return object.get() == signaling_object;
106 });
107 ASSERT(itr != sync_objects.end());
108 signaling_object->Acquire(thread);
109 const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
110 return {signaling_result, index};
111 }
112 return {signaling_result, -1};
69 } 113 }
70
71 for (auto& object : sync_objects) {
72 object->AddWaitingThread(SharedFrom(thread));
73 }
74
75 thread->SetSynchronizationObjects(std::move(sync_objects));
76 thread->SetStatus(ThreadStatus::WaitSynch);
77
78 // Create an event to wake the thread up after the specified nanosecond delay has passed
79 thread->WakeAfterDelay(nano_seconds);
80 thread->SetWakeupCallback(DefaultThreadWakeupCallback);
81
82 system.PrepareReschedule(thread->GetProcessorID());
83
84 return {RESULT_TIMEOUT, InvalidHandle};
85} 114}
86 115
87} // namespace Kernel 116} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization_object.cpp b/src/core/hle/kernel/synchronization_object.cpp
index 43f3eef18..ba4d39157 100644
--- a/src/core/hle/kernel/synchronization_object.cpp
+++ b/src/core/hle/kernel/synchronization_object.cpp
@@ -38,68 +38,8 @@ void SynchronizationObject::RemoveWaitingThread(std::shared_ptr<Thread> thread)
38 waiting_threads.erase(itr); 38 waiting_threads.erase(itr);
39} 39}
40 40
41std::shared_ptr<Thread> SynchronizationObject::GetHighestPriorityReadyThread() const { 41void SynchronizationObject::ClearWaitingThreads() {
42 Thread* candidate = nullptr; 42 waiting_threads.clear();
43 u32 candidate_priority = THREADPRIO_LOWEST + 1;
44
45 for (const auto& thread : waiting_threads) {
46 const ThreadStatus thread_status = thread->GetStatus();
47
48 // The list of waiting threads must not contain threads that are not waiting to be awakened.
49 ASSERT_MSG(thread_status == ThreadStatus::WaitSynch ||
50 thread_status == ThreadStatus::WaitHLEEvent,
51 "Inconsistent thread statuses in waiting_threads");
52
53 if (thread->GetPriority() >= candidate_priority)
54 continue;
55
56 if (ShouldWait(thread.get()))
57 continue;
58
59 candidate = thread.get();
60 candidate_priority = thread->GetPriority();
61 }
62
63 return SharedFrom(candidate);
64}
65
66void SynchronizationObject::WakeupWaitingThread(std::shared_ptr<Thread> thread) {
67 ASSERT(!ShouldWait(thread.get()));
68
69 if (!thread) {
70 return;
71 }
72
73 if (thread->IsSleepingOnWait()) {
74 for (const auto& object : thread->GetSynchronizationObjects()) {
75 ASSERT(!object->ShouldWait(thread.get()));
76 object->Acquire(thread.get());
77 }
78 } else {
79 Acquire(thread.get());
80 }
81
82 const std::size_t index = thread->GetSynchronizationObjectIndex(SharedFrom(this));
83
84 thread->ClearSynchronizationObjects();
85
86 thread->CancelWakeupTimer();
87
88 bool resume = true;
89 if (thread->HasWakeupCallback()) {
90 resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Signal, thread, SharedFrom(this),
91 index);
92 }
93 if (resume) {
94 thread->ResumeFromWait();
95 kernel.PrepareReschedule(thread->GetProcessorID());
96 }
97}
98
99void SynchronizationObject::WakeupAllWaitingThreads() {
100 while (auto thread = GetHighestPriorityReadyThread()) {
101 WakeupWaitingThread(thread);
102 }
103} 43}
104 44
105const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const { 45const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const {
diff --git a/src/core/hle/kernel/synchronization_object.h b/src/core/hle/kernel/synchronization_object.h
index 741c31faf..f89b24204 100644
--- a/src/core/hle/kernel/synchronization_object.h
+++ b/src/core/hle/kernel/synchronization_object.h
@@ -12,6 +12,7 @@
12namespace Kernel { 12namespace Kernel {
13 13
14class KernelCore; 14class KernelCore;
15class Synchronization;
15class Thread; 16class Thread;
16 17
17/// Class that represents a Kernel object that a thread can be waiting on 18/// Class that represents a Kernel object that a thread can be waiting on
@@ -49,24 +50,11 @@ public:
49 */ 50 */
50 void RemoveWaitingThread(std::shared_ptr<Thread> thread); 51 void RemoveWaitingThread(std::shared_ptr<Thread> thread);
51 52
52 /**
53 * Wake up all threads waiting on this object that can be awoken, in priority order,
54 * and set the synchronization result and output of the thread.
55 */
56 void WakeupAllWaitingThreads();
57
58 /**
59 * Wakes up a single thread waiting on this object.
60 * @param thread Thread that is waiting on this object to wakeup.
61 */
62 void WakeupWaitingThread(std::shared_ptr<Thread> thread);
63
64 /// Obtains the highest priority thread that is ready to run from this object's waiting list.
65 std::shared_ptr<Thread> GetHighestPriorityReadyThread() const;
66
67 /// Get a const reference to the waiting threads list for debug use 53 /// Get a const reference to the waiting threads list for debug use
68 const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const; 54 const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const;
69 55
56 void ClearWaitingThreads();
57
70protected: 58protected:
71 bool is_signaled{}; // Tells if this sync object is signalled; 59 bool is_signaled{}; // Tells if this sync object is signalled;
72 60
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index db7f379ac..2b1092697 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -9,12 +9,21 @@
9 9
10#include "common/assert.h" 10#include "common/assert.h"
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "common/fiber.h"
12#include "common/logging/log.h" 13#include "common/logging/log.h"
13#include "common/thread_queue_list.h" 14#include "common/thread_queue_list.h"
14#include "core/arm/arm_interface.h" 15#include "core/arm/arm_interface.h"
16#ifdef ARCHITECTURE_x86_64
17#include "core/arm/dynarmic/arm_dynarmic_32.h"
18#include "core/arm/dynarmic/arm_dynarmic_64.h"
19#endif
20#include "core/arm/cpu_interrupt_handler.h"
21#include "core/arm/exclusive_monitor.h"
22#include "core/arm/unicorn/arm_unicorn.h"
15#include "core/core.h" 23#include "core/core.h"
16#include "core/core_timing.h" 24#include "core/core_timing.h"
17#include "core/core_timing_util.h" 25#include "core/core_timing_util.h"
26#include "core/cpu_manager.h"
18#include "core/hardware_properties.h" 27#include "core/hardware_properties.h"
19#include "core/hle/kernel/errors.h" 28#include "core/hle/kernel/errors.h"
20#include "core/hle/kernel/handle_table.h" 29#include "core/hle/kernel/handle_table.h"
@@ -23,6 +32,7 @@
23#include "core/hle/kernel/process.h" 32#include "core/hle/kernel/process.h"
24#include "core/hle/kernel/scheduler.h" 33#include "core/hle/kernel/scheduler.h"
25#include "core/hle/kernel/thread.h" 34#include "core/hle/kernel/thread.h"
35#include "core/hle/kernel/time_manager.h"
26#include "core/hle/result.h" 36#include "core/hle/result.h"
27#include "core/memory.h" 37#include "core/memory.h"
28 38
@@ -44,46 +54,26 @@ Thread::Thread(KernelCore& kernel) : SynchronizationObject{kernel} {}
44Thread::~Thread() = default; 54Thread::~Thread() = default;
45 55
46void Thread::Stop() { 56void Thread::Stop() {
47 // Cancel any outstanding wakeup events for this thread 57 {
48 Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(), 58 SchedulerLock lock(kernel);
49 global_handle); 59 SetStatus(ThreadStatus::Dead);
50 kernel.GlobalHandleTable().Close(global_handle); 60 Signal();
51 global_handle = 0; 61 kernel.GlobalHandleTable().Close(global_handle);
52 SetStatus(ThreadStatus::Dead);
53 Signal();
54
55 // Clean up any dangling references in objects that this thread was waiting for
56 for (auto& wait_object : wait_objects) {
57 wait_object->RemoveWaitingThread(SharedFrom(this));
58 }
59 wait_objects.clear();
60
61 owner_process->UnregisterThread(this);
62
63 // Mark the TLS slot in the thread's page as free.
64 owner_process->FreeTLSRegion(tls_address);
65}
66
67void Thread::WakeAfterDelay(s64 nanoseconds) {
68 // Don't schedule a wakeup if the thread wants to wait forever
69 if (nanoseconds == -1)
70 return;
71 62
72 // This function might be called from any thread so we have to be cautious and use the 63 if (owner_process) {
73 // thread-safe version of ScheduleEvent. 64 owner_process->UnregisterThread(this);
74 const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds});
75 Core::System::GetInstance().CoreTiming().ScheduleEvent(
76 cycles, kernel.ThreadWakeupCallbackEventType(), global_handle);
77}
78 65
79void Thread::CancelWakeupTimer() { 66 // Mark the TLS slot in the thread's page as free.
80 Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(), 67 owner_process->FreeTLSRegion(tls_address);
81 global_handle); 68 }
69 arm_interface.reset();
70 has_exited = true;
71 }
72 global_handle = 0;
82} 73}
83 74
84void Thread::ResumeFromWait() { 75void Thread::ResumeFromWait() {
85 ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects"); 76 SchedulerLock lock(kernel);
86
87 switch (status) { 77 switch (status) {
88 case ThreadStatus::Paused: 78 case ThreadStatus::Paused:
89 case ThreadStatus::WaitSynch: 79 case ThreadStatus::WaitSynch:
@@ -99,7 +89,7 @@ void Thread::ResumeFromWait() {
99 case ThreadStatus::Ready: 89 case ThreadStatus::Ready:
100 // The thread's wakeup callback must have already been cleared when the thread was first 90 // The thread's wakeup callback must have already been cleared when the thread was first
101 // awoken. 91 // awoken.
102 ASSERT(wakeup_callback == nullptr); 92 ASSERT(hle_callback == nullptr);
103 // If the thread is waiting on multiple wait objects, it might be awoken more than once 93 // If the thread is waiting on multiple wait objects, it might be awoken more than once
104 // before actually resuming. We can ignore subsequent wakeups if the thread status has 94 // before actually resuming. We can ignore subsequent wakeups if the thread status has
105 // already been set to ThreadStatus::Ready. 95 // already been set to ThreadStatus::Ready.
@@ -115,24 +105,31 @@ void Thread::ResumeFromWait() {
115 return; 105 return;
116 } 106 }
117 107
118 wakeup_callback = nullptr; 108 SetStatus(ThreadStatus::Ready);
109}
110
111void Thread::OnWakeUp() {
112 SchedulerLock lock(kernel);
119 113
120 if (activity == ThreadActivity::Paused) { 114 SetStatus(ThreadStatus::Ready);
121 SetStatus(ThreadStatus::Paused); 115}
122 return;
123 }
124 116
117ResultCode Thread::Start() {
118 SchedulerLock lock(kernel);
125 SetStatus(ThreadStatus::Ready); 119 SetStatus(ThreadStatus::Ready);
120 return RESULT_SUCCESS;
126} 121}
127 122
128void Thread::CancelWait() { 123void Thread::CancelWait() {
129 if (GetSchedulingStatus() != ThreadSchedStatus::Paused) { 124 SchedulerLock lock(kernel);
125 if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) {
130 is_sync_cancelled = true; 126 is_sync_cancelled = true;
131 return; 127 return;
132 } 128 }
129 // TODO(Blinkhawk): Implement cancel of server session
133 is_sync_cancelled = false; 130 is_sync_cancelled = false;
134 SetWaitSynchronizationResult(ERR_SYNCHRONIZATION_CANCELED); 131 SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED);
135 ResumeFromWait(); 132 SetStatus(ThreadStatus::Ready);
136} 133}
137 134
138static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, 135static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
@@ -153,12 +150,29 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
153 context.fpcr = 0; 150 context.fpcr = 0;
154} 151}
155 152
156ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::string name, 153std::shared_ptr<Common::Fiber>& Thread::GetHostContext() {
157 VAddr entry_point, u32 priority, u64 arg, 154 return host_context;
158 s32 processor_id, VAddr stack_top, 155}
159 Process& owner_process) { 156
157ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
158 std::string name, VAddr entry_point, u32 priority,
159 u64 arg, s32 processor_id, VAddr stack_top,
160 Process* owner_process) {
161 std::function<void(void*)> init_func = system.GetCpuManager().GetGuestThreadStartFunc();
162 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
163 return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
164 owner_process, std::move(init_func), init_func_parameter);
165}
166
167ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
168 std::string name, VAddr entry_point, u32 priority,
169 u64 arg, s32 processor_id, VAddr stack_top,
170 Process* owner_process,
171 std::function<void(void*)>&& thread_start_func,
172 void* thread_start_parameter) {
173 auto& kernel = system.Kernel();
160 // Check if priority is in ranged. Lowest priority -> highest priority id. 174 // Check if priority is in ranged. Lowest priority -> highest priority id.
161 if (priority > THREADPRIO_LOWEST) { 175 if (priority > THREADPRIO_LOWEST && ((type_flags & THREADTYPE_IDLE) == 0)) {
162 LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority); 176 LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority);
163 return ERR_INVALID_THREAD_PRIORITY; 177 return ERR_INVALID_THREAD_PRIORITY;
164 } 178 }
@@ -168,11 +182,12 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::strin
168 return ERR_INVALID_PROCESSOR_ID; 182 return ERR_INVALID_PROCESSOR_ID;
169 } 183 }
170 184
171 auto& system = Core::System::GetInstance(); 185 if (owner_process) {
172 if (!system.Memory().IsValidVirtualAddress(owner_process, entry_point)) { 186 if (!system.Memory().IsValidVirtualAddress(*owner_process, entry_point)) {
173 LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point); 187 LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
174 // TODO (bunnei): Find the correct error code to use here 188 // TODO (bunnei): Find the correct error code to use here
175 return RESULT_UNKNOWN; 189 return RESULT_UNKNOWN;
190 }
176 } 191 }
177 192
178 std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel); 193 std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
@@ -183,51 +198,82 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::strin
183 thread->stack_top = stack_top; 198 thread->stack_top = stack_top;
184 thread->tpidr_el0 = 0; 199 thread->tpidr_el0 = 0;
185 thread->nominal_priority = thread->current_priority = priority; 200 thread->nominal_priority = thread->current_priority = priority;
186 thread->last_running_ticks = system.CoreTiming().GetTicks(); 201 thread->last_running_ticks = 0;
187 thread->processor_id = processor_id; 202 thread->processor_id = processor_id;
188 thread->ideal_core = processor_id; 203 thread->ideal_core = processor_id;
189 thread->affinity_mask = 1ULL << processor_id; 204 thread->affinity_mask = 1ULL << processor_id;
190 thread->wait_objects.clear(); 205 thread->wait_objects = nullptr;
191 thread->mutex_wait_address = 0; 206 thread->mutex_wait_address = 0;
192 thread->condvar_wait_address = 0; 207 thread->condvar_wait_address = 0;
193 thread->wait_handle = 0; 208 thread->wait_handle = 0;
194 thread->name = std::move(name); 209 thread->name = std::move(name);
195 thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); 210 thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
196 thread->owner_process = &owner_process; 211 thread->owner_process = owner_process;
197 auto& scheduler = kernel.GlobalScheduler(); 212 thread->type = type_flags;
198 scheduler.AddThread(thread); 213 if ((type_flags & THREADTYPE_IDLE) == 0) {
199 thread->tls_address = thread->owner_process->CreateTLSRegion(); 214 auto& scheduler = kernel.GlobalScheduler();
200 215 scheduler.AddThread(thread);
201 thread->owner_process->RegisterThread(thread.get()); 216 }
217 if (owner_process) {
218 thread->tls_address = thread->owner_process->CreateTLSRegion();
219 thread->owner_process->RegisterThread(thread.get());
220 } else {
221 thread->tls_address = 0;
222 }
223 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
224 // to initialize the context
225 thread->arm_interface.reset();
226 if ((type_flags & THREADTYPE_HLE) == 0) {
227#ifdef ARCHITECTURE_x86_64
228 if (owner_process && !owner_process->Is64BitProcess()) {
229 thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
230 system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(),
231 processor_id);
232 } else {
233 thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
234 system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(),
235 processor_id);
236 }
202 237
203 ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top), 238#else
204 static_cast<u32>(entry_point), static_cast<u32>(arg)); 239 if (owner_process && !owner_process->Is64BitProcess()) {
205 ResetThreadContext64(thread->context_64, stack_top, entry_point, arg); 240 thread->arm_interface = std::make_shared<Core::ARM_Unicorn>(
241 system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch32,
242 processor_id);
243 } else {
244 thread->arm_interface = std::make_shared<Core::ARM_Unicorn>(
245 system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch64,
246 processor_id);
247 }
248 LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
249#endif
250 ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top),
251 static_cast<u32>(entry_point), static_cast<u32>(arg));
252 ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
253 }
254 thread->host_context =
255 std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
206 256
207 return MakeResult<std::shared_ptr<Thread>>(std::move(thread)); 257 return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
208} 258}
209 259
210void Thread::SetPriority(u32 priority) { 260void Thread::SetPriority(u32 priority) {
261 SchedulerLock lock(kernel);
211 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, 262 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
212 "Invalid priority value."); 263 "Invalid priority value.");
213 nominal_priority = priority; 264 nominal_priority = priority;
214 UpdatePriority(); 265 UpdatePriority();
215} 266}
216 267
217void Thread::SetWaitSynchronizationResult(ResultCode result) { 268void Thread::SetSynchronizationResults(SynchronizationObject* object, ResultCode result) {
218 context_32.cpu_registers[0] = result.raw; 269 signaling_object = object;
219 context_64.cpu_registers[0] = result.raw; 270 signaling_result = result;
220}
221
222void Thread::SetWaitSynchronizationOutput(s32 output) {
223 context_32.cpu_registers[1] = output;
224 context_64.cpu_registers[1] = output;
225} 271}
226 272
227s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const { 273s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const {
228 ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything"); 274 ASSERT_MSG(!wait_objects->empty(), "Thread is not waiting for anything");
229 const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object); 275 const auto match = std::find(wait_objects->rbegin(), wait_objects->rend(), object);
230 return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1); 276 return static_cast<s32>(std::distance(match, wait_objects->rend()) - 1);
231} 277}
232 278
233VAddr Thread::GetCommandBufferAddress() const { 279VAddr Thread::GetCommandBufferAddress() const {
@@ -236,6 +282,14 @@ VAddr Thread::GetCommandBufferAddress() const {
236 return GetTLSAddress() + command_header_offset; 282 return GetTLSAddress() + command_header_offset;
237} 283}
238 284
285Core::ARM_Interface& Thread::ArmInterface() {
286 return *arm_interface;
287}
288
289const Core::ARM_Interface& Thread::ArmInterface() const {
290 return *arm_interface;
291}
292
239void Thread::SetStatus(ThreadStatus new_status) { 293void Thread::SetStatus(ThreadStatus new_status) {
240 if (new_status == status) { 294 if (new_status == status) {
241 return; 295 return;
@@ -257,10 +311,6 @@ void Thread::SetStatus(ThreadStatus new_status) {
257 break; 311 break;
258 } 312 }
259 313
260 if (status == ThreadStatus::Running) {
261 last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks();
262 }
263
264 status = new_status; 314 status = new_status;
265} 315}
266 316
@@ -341,75 +391,116 @@ void Thread::UpdatePriority() {
341 lock_owner->UpdatePriority(); 391 lock_owner->UpdatePriority();
342} 392}
343 393
344void Thread::ChangeCore(u32 core, u64 mask) {
345 SetCoreAndAffinityMask(core, mask);
346}
347
348bool Thread::AllSynchronizationObjectsReady() const { 394bool Thread::AllSynchronizationObjectsReady() const {
349 return std::none_of(wait_objects.begin(), wait_objects.end(), 395 return std::none_of(wait_objects->begin(), wait_objects->end(),
350 [this](const std::shared_ptr<SynchronizationObject>& object) { 396 [this](const std::shared_ptr<SynchronizationObject>& object) {
351 return object->ShouldWait(this); 397 return object->ShouldWait(this);
352 }); 398 });
353} 399}
354 400
355bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, 401bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) {
356 std::shared_ptr<SynchronizationObject> object, 402 ASSERT(hle_callback);
357 std::size_t index) { 403 return hle_callback(std::move(thread));
358 ASSERT(wakeup_callback);
359 return wakeup_callback(reason, std::move(thread), std::move(object), index);
360} 404}
361 405
362void Thread::SetActivity(ThreadActivity value) { 406ResultCode Thread::SetActivity(ThreadActivity value) {
363 activity = value; 407 SchedulerLock lock(kernel);
408
409 auto sched_status = GetSchedulingStatus();
410
411 if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) {
412 return ERR_INVALID_STATE;
413 }
414
415 if (IsPendingTermination()) {
416 return RESULT_SUCCESS;
417 }
364 418
365 if (value == ThreadActivity::Paused) { 419 if (value == ThreadActivity::Paused) {
366 // Set status if not waiting 420 if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) != 0) {
367 if (status == ThreadStatus::Ready || status == ThreadStatus::Running) { 421 return ERR_INVALID_STATE;
368 SetStatus(ThreadStatus::Paused); 422 }
369 kernel.PrepareReschedule(processor_id); 423 AddSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
424 } else {
425 if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) == 0) {
426 return ERR_INVALID_STATE;
370 } 427 }
371 } else if (status == ThreadStatus::Paused) { 428 RemoveSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
372 // Ready to reschedule
373 ResumeFromWait();
374 } 429 }
430 return RESULT_SUCCESS;
375} 431}
376 432
377void Thread::Sleep(s64 nanoseconds) { 433ResultCode Thread::Sleep(s64 nanoseconds) {
378 // Sleep current thread and check for next thread to schedule 434 Handle event_handle{};
379 SetStatus(ThreadStatus::WaitSleep); 435 {
436 SchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
437 SetStatus(ThreadStatus::WaitSleep);
438 }
380 439
381 // Create an event to wake the thread up after the specified nanosecond delay has passed 440 if (event_handle != InvalidHandle) {
382 WakeAfterDelay(nanoseconds); 441 auto& time_manager = kernel.TimeManager();
442 time_manager.UnscheduleTimeEvent(event_handle);
443 }
444 return RESULT_SUCCESS;
445}
446
447std::pair<ResultCode, bool> Thread::YieldSimple() {
448 bool is_redundant = false;
449 {
450 SchedulerLock lock(kernel);
451 is_redundant = kernel.GlobalScheduler().YieldThread(this);
452 }
453 return {RESULT_SUCCESS, is_redundant};
454}
455
456std::pair<ResultCode, bool> Thread::YieldAndBalanceLoad() {
457 bool is_redundant = false;
458 {
459 SchedulerLock lock(kernel);
460 is_redundant = kernel.GlobalScheduler().YieldThreadAndBalanceLoad(this);
461 }
462 return {RESULT_SUCCESS, is_redundant};
383} 463}
384 464
385bool Thread::YieldSimple() { 465std::pair<ResultCode, bool> Thread::YieldAndWaitForLoadBalancing() {
386 auto& scheduler = kernel.GlobalScheduler(); 466 bool is_redundant = false;
387 return scheduler.YieldThread(this); 467 {
468 SchedulerLock lock(kernel);
469 is_redundant = kernel.GlobalScheduler().YieldThreadAndWaitForLoadBalancing(this);
470 }
471 return {RESULT_SUCCESS, is_redundant};
388} 472}
389 473
390bool Thread::YieldAndBalanceLoad() { 474void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
391 auto& scheduler = kernel.GlobalScheduler(); 475 const u32 old_state = scheduling_state;
392 return scheduler.YieldThreadAndBalanceLoad(this); 476 pausing_state |= static_cast<u32>(flag);
477 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
478 scheduling_state = base_scheduling | pausing_state;
479 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
393} 480}
394 481
395bool Thread::YieldAndWaitForLoadBalancing() { 482void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
396 auto& scheduler = kernel.GlobalScheduler(); 483 const u32 old_state = scheduling_state;
397 return scheduler.YieldThreadAndWaitForLoadBalancing(this); 484 pausing_state &= ~static_cast<u32>(flag);
485 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
486 scheduling_state = base_scheduling | pausing_state;
487 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
398} 488}
399 489
400void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { 490void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
401 const u32 old_flags = scheduling_state; 491 const u32 old_state = scheduling_state;
402 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) | 492 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
403 static_cast<u32>(new_status); 493 static_cast<u32>(new_status);
404 AdjustSchedulingOnStatus(old_flags); 494 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
405} 495}
406 496
407void Thread::SetCurrentPriority(u32 new_priority) { 497void Thread::SetCurrentPriority(u32 new_priority) {
408 const u32 old_priority = std::exchange(current_priority, new_priority); 498 const u32 old_priority = std::exchange(current_priority, new_priority);
409 AdjustSchedulingOnPriority(old_priority); 499 kernel.GlobalScheduler().AdjustSchedulingOnPriority(this, old_priority);
410} 500}
411 501
412ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { 502ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
503 SchedulerLock lock(kernel);
413 const auto HighestSetCore = [](u64 mask, u32 max_cores) { 504 const auto HighestSetCore = [](u64 mask, u32 max_cores) {
414 for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) { 505 for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
415 if (((mask >> core) & 1) != 0) { 506 if (((mask >> core) & 1) != 0) {
@@ -443,111 +534,12 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
443 processor_id = ideal_core; 534 processor_id = ideal_core;
444 } 535 }
445 } 536 }
446 AdjustSchedulingOnAffinity(old_affinity_mask, old_core); 537 kernel.GlobalScheduler().AdjustSchedulingOnAffinity(this, old_affinity_mask, old_core);
447 } 538 }
448 } 539 }
449 return RESULT_SUCCESS; 540 return RESULT_SUCCESS;
450} 541}
451 542
452void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
453 if (old_flags == scheduling_state) {
454 return;
455 }
456
457 auto& scheduler = kernel.GlobalScheduler();
458 if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) ==
459 ThreadSchedStatus::Runnable) {
460 // In this case the thread was running, now it's pausing/exitting
461 if (processor_id >= 0) {
462 scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this);
463 }
464
465 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
466 if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
467 scheduler.Unsuggest(current_priority, core, this);
468 }
469 }
470 } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) {
471 // The thread is now set to running from being stopped
472 if (processor_id >= 0) {
473 scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this);
474 }
475
476 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
477 if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
478 scheduler.Suggest(current_priority, core, this);
479 }
480 }
481 }
482
483 scheduler.SetReselectionPending();
484}
485
486void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
487 if (GetSchedulingStatus() != ThreadSchedStatus::Runnable) {
488 return;
489 }
490 auto& scheduler = kernel.GlobalScheduler();
491 if (processor_id >= 0) {
492 scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this);
493 }
494
495 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
496 if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
497 scheduler.Unsuggest(old_priority, core, this);
498 }
499 }
500
501 // Add thread to the new priority queues.
502 Thread* current_thread = GetCurrentThread();
503
504 if (processor_id >= 0) {
505 if (current_thread == this) {
506 scheduler.SchedulePrepend(current_priority, static_cast<u32>(processor_id), this);
507 } else {
508 scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this);
509 }
510 }
511
512 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
513 if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
514 scheduler.Suggest(current_priority, core, this);
515 }
516 }
517
518 scheduler.SetReselectionPending();
519}
520
521void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) {
522 auto& scheduler = kernel.GlobalScheduler();
523 if (GetSchedulingStatus() != ThreadSchedStatus::Runnable ||
524 current_priority >= THREADPRIO_COUNT) {
525 return;
526 }
527
528 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
529 if (((old_affinity_mask >> core) & 1) != 0) {
530 if (core == static_cast<u32>(old_core)) {
531 scheduler.Unschedule(current_priority, core, this);
532 } else {
533 scheduler.Unsuggest(current_priority, core, this);
534 }
535 }
536 }
537
538 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
539 if (((affinity_mask >> core) & 1) != 0) {
540 if (core == static_cast<u32>(processor_id)) {
541 scheduler.Schedule(current_priority, core, this);
542 } else {
543 scheduler.Suggest(current_priority, core, this);
544 }
545 }
546 }
547
548 scheduler.SetReselectionPending();
549}
550
551//////////////////////////////////////////////////////////////////////////////////////////////////// 543////////////////////////////////////////////////////////////////////////////////////////////////////
552 544
553/** 545/**
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 23fdef8a4..c0342c462 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -6,26 +6,47 @@
6 6
7#include <functional> 7#include <functional>
8#include <string> 8#include <string>
9#include <utility>
9#include <vector> 10#include <vector>
10 11
11#include "common/common_types.h" 12#include "common/common_types.h"
13#include "common/spin_lock.h"
12#include "core/arm/arm_interface.h" 14#include "core/arm/arm_interface.h"
13#include "core/hle/kernel/object.h" 15#include "core/hle/kernel/object.h"
14#include "core/hle/kernel/synchronization_object.h" 16#include "core/hle/kernel/synchronization_object.h"
15#include "core/hle/result.h" 17#include "core/hle/result.h"
16 18
19namespace Common {
20class Fiber;
21}
22
23namespace Core {
24class ARM_Interface;
25class System;
26} // namespace Core
27
17namespace Kernel { 28namespace Kernel {
18 29
30class GlobalScheduler;
19class KernelCore; 31class KernelCore;
20class Process; 32class Process;
21class Scheduler; 33class Scheduler;
22 34
23enum ThreadPriority : u32 { 35enum ThreadPriority : u32 {
24 THREADPRIO_HIGHEST = 0, ///< Highest thread priority 36 THREADPRIO_HIGHEST = 0, ///< Highest thread priority
25 THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps 37 THREADPRIO_MAX_CORE_MIGRATION = 2, ///< Highest priority for a core migration
26 THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps 38 THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
27 THREADPRIO_LOWEST = 63, ///< Lowest thread priority 39 THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps
28 THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities. 40 THREADPRIO_LOWEST = 63, ///< Lowest thread priority
41 THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities.
42};
43
44enum ThreadType : u32 {
45 THREADTYPE_USER = 0x1,
46 THREADTYPE_KERNEL = 0x2,
47 THREADTYPE_HLE = 0x4,
48 THREADTYPE_IDLE = 0x8,
49 THREADTYPE_SUSPEND = 0x10,
29}; 50};
30 51
31enum ThreadProcessorId : s32 { 52enum ThreadProcessorId : s32 {
@@ -107,26 +128,45 @@ public:
107 128
108 using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>; 129 using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>;
109 130
110 using WakeupCallback = 131 using HLECallback = std::function<bool(std::shared_ptr<Thread> thread)>;
111 std::function<bool(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, 132
112 std::shared_ptr<SynchronizationObject> object, std::size_t index)>; 133 /**
134 * Creates and returns a new thread. The new thread is immediately scheduled
135 * @param system The instance of the whole system
136 * @param name The friendly name desired for the thread
137 * @param entry_point The address at which the thread should start execution
138 * @param priority The thread's priority
139 * @param arg User data to pass to the thread
140 * @param processor_id The ID(s) of the processors on which the thread is desired to be run
141 * @param stack_top The address of the thread's stack top
142 * @param owner_process The parent process for the thread, if null, it's a kernel thread
143 * @return A shared pointer to the newly created thread
144 */
145 static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
146 std::string name, VAddr entry_point,
147 u32 priority, u64 arg, s32 processor_id,
148 VAddr stack_top, Process* owner_process);
113 149
114 /** 150 /**
115 * Creates and returns a new thread. The new thread is immediately scheduled 151 * Creates and returns a new thread. The new thread is immediately scheduled
116 * @param kernel The kernel instance this thread will be created under. 152 * @param system The instance of the whole system
117 * @param name The friendly name desired for the thread 153 * @param name The friendly name desired for the thread
118 * @param entry_point The address at which the thread should start execution 154 * @param entry_point The address at which the thread should start execution
119 * @param priority The thread's priority 155 * @param priority The thread's priority
120 * @param arg User data to pass to the thread 156 * @param arg User data to pass to the thread
121 * @param processor_id The ID(s) of the processors on which the thread is desired to be run 157 * @param processor_id The ID(s) of the processors on which the thread is desired to be run
122 * @param stack_top The address of the thread's stack top 158 * @param stack_top The address of the thread's stack top
123 * @param owner_process The parent process for the thread 159 * @param owner_process The parent process for the thread, if null, it's a kernel thread
160 * @param thread_start_func The function where the host context will start.
161 * @param thread_start_parameter The parameter which will passed to host context on init
124 * @return A shared pointer to the newly created thread 162 * @return A shared pointer to the newly created thread
125 */ 163 */
126 static ResultVal<std::shared_ptr<Thread>> Create(KernelCore& kernel, std::string name, 164 static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
127 VAddr entry_point, u32 priority, u64 arg, 165 std::string name, VAddr entry_point,
128 s32 processor_id, VAddr stack_top, 166 u32 priority, u64 arg, s32 processor_id,
129 Process& owner_process); 167 VAddr stack_top, Process* owner_process,
168 std::function<void(void*)>&& thread_start_func,
169 void* thread_start_parameter);
130 170
131 std::string GetName() const override { 171 std::string GetName() const override {
132 return name; 172 return name;
@@ -181,7 +221,7 @@ public:
181 void UpdatePriority(); 221 void UpdatePriority();
182 222
183 /// Changes the core that the thread is running or scheduled to run on. 223 /// Changes the core that the thread is running or scheduled to run on.
184 void ChangeCore(u32 core, u64 mask); 224 ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
185 225
186 /** 226 /**
187 * Gets the thread's thread ID 227 * Gets the thread's thread ID
@@ -194,6 +234,10 @@ public:
194 /// Resumes a thread from waiting 234 /// Resumes a thread from waiting
195 void ResumeFromWait(); 235 void ResumeFromWait();
196 236
237 void OnWakeUp();
238
239 ResultCode Start();
240
197 /// Cancels a waiting operation that this thread may or may not be within. 241 /// Cancels a waiting operation that this thread may or may not be within.
198 /// 242 ///
199 /// When the thread is within a waiting state, this will set the thread's 243 /// When the thread is within a waiting state, this will set the thread's
@@ -202,26 +246,19 @@ public:
202 /// 246 ///
203 void CancelWait(); 247 void CancelWait();
204 248
205 /** 249 void SetSynchronizationResults(SynchronizationObject* object, ResultCode result);
206 * Schedules an event to wake up the specified thread after the specified delay
207 * @param nanoseconds The time this thread will be allowed to sleep for
208 */
209 void WakeAfterDelay(s64 nanoseconds);
210 250
211 /// Cancel any outstanding wakeup events for this thread 251 Core::ARM_Interface& ArmInterface();
212 void CancelWakeupTimer();
213 252
214 /** 253 const Core::ARM_Interface& ArmInterface() const;
215 * Sets the result after the thread awakens (from svcWaitSynchronization)
216 * @param result Value to set to the returned result
217 */
218 void SetWaitSynchronizationResult(ResultCode result);
219 254
220 /** 255 SynchronizationObject* GetSignalingObject() const {
221 * Sets the output parameter value after the thread awakens (from svcWaitSynchronization) 256 return signaling_object;
222 * @param output Value to set to the output parameter 257 }
223 */ 258
224 void SetWaitSynchronizationOutput(s32 output); 259 ResultCode GetSignalingResult() const {
260 return signaling_result;
261 }
225 262
226 /** 263 /**
227 * Retrieves the index that this particular object occupies in the list of objects 264 * Retrieves the index that this particular object occupies in the list of objects
@@ -269,11 +306,6 @@ public:
269 */ 306 */
270 VAddr GetCommandBufferAddress() const; 307 VAddr GetCommandBufferAddress() const;
271 308
272 /// Returns whether this thread is waiting on objects from a WaitSynchronization call.
273 bool IsSleepingOnWait() const {
274 return status == ThreadStatus::WaitSynch;
275 }
276
277 ThreadContext32& GetContext32() { 309 ThreadContext32& GetContext32() {
278 return context_32; 310 return context_32;
279 } 311 }
@@ -290,6 +322,28 @@ public:
290 return context_64; 322 return context_64;
291 } 323 }
292 324
325 bool IsHLEThread() const {
326 return (type & THREADTYPE_HLE) != 0;
327 }
328
329 bool IsSuspendThread() const {
330 return (type & THREADTYPE_SUSPEND) != 0;
331 }
332
333 bool IsIdleThread() const {
334 return (type & THREADTYPE_IDLE) != 0;
335 }
336
337 bool WasRunning() const {
338 return was_running;
339 }
340
341 void SetWasRunning(bool value) {
342 was_running = value;
343 }
344
345 std::shared_ptr<Common::Fiber>& GetHostContext();
346
293 ThreadStatus GetStatus() const { 347 ThreadStatus GetStatus() const {
294 return status; 348 return status;
295 } 349 }
@@ -325,18 +379,18 @@ public:
325 } 379 }
326 380
327 const ThreadSynchronizationObjects& GetSynchronizationObjects() const { 381 const ThreadSynchronizationObjects& GetSynchronizationObjects() const {
328 return wait_objects; 382 return *wait_objects;
329 } 383 }
330 384
331 void SetSynchronizationObjects(ThreadSynchronizationObjects objects) { 385 void SetSynchronizationObjects(ThreadSynchronizationObjects* objects) {
332 wait_objects = std::move(objects); 386 wait_objects = objects;
333 } 387 }
334 388
335 void ClearSynchronizationObjects() { 389 void ClearSynchronizationObjects() {
336 for (const auto& waiting_object : wait_objects) { 390 for (const auto& waiting_object : *wait_objects) {
337 waiting_object->RemoveWaitingThread(SharedFrom(this)); 391 waiting_object->RemoveWaitingThread(SharedFrom(this));
338 } 392 }
339 wait_objects.clear(); 393 wait_objects->clear();
340 } 394 }
341 395
342 /// Determines whether all the objects this thread is waiting on are ready. 396 /// Determines whether all the objects this thread is waiting on are ready.
@@ -386,26 +440,35 @@ public:
386 arb_wait_address = address; 440 arb_wait_address = address;
387 } 441 }
388 442
389 bool HasWakeupCallback() const { 443 bool HasHLECallback() const {
390 return wakeup_callback != nullptr; 444 return hle_callback != nullptr;
391 } 445 }
392 446
393 void SetWakeupCallback(WakeupCallback callback) { 447 void SetHLECallback(HLECallback callback) {
394 wakeup_callback = std::move(callback); 448 hle_callback = std::move(callback);
395 } 449 }
396 450
397 void InvalidateWakeupCallback() { 451 void SetHLETimeEvent(Handle time_event) {
398 SetWakeupCallback(nullptr); 452 hle_time_event = time_event;
399 } 453 }
400 454
401 /** 455 void SetHLESyncObject(SynchronizationObject* object) {
402 * Invokes the thread's wakeup callback. 456 hle_object = object;
403 * 457 }
404 * @pre A valid wakeup callback has been set. Violating this precondition 458
405 * will cause an assertion to trigger. 459 Handle GetHLETimeEvent() const {
406 */ 460 return hle_time_event;
407 bool InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, 461 }
408 std::shared_ptr<SynchronizationObject> object, std::size_t index); 462
463 SynchronizationObject* GetHLESyncObject() const {
464 return hle_object;
465 }
466
467 void InvalidateHLECallback() {
468 SetHLECallback(nullptr);
469 }
470
471 bool InvokeHLECallback(std::shared_ptr<Thread> thread);
409 472
410 u32 GetIdealCore() const { 473 u32 GetIdealCore() const {
411 return ideal_core; 474 return ideal_core;
@@ -415,23 +478,19 @@ public:
415 return affinity_mask; 478 return affinity_mask;
416 } 479 }
417 480
418 ThreadActivity GetActivity() const { 481 ResultCode SetActivity(ThreadActivity value);
419 return activity;
420 }
421
422 void SetActivity(ThreadActivity value);
423 482
424 /// Sleeps this thread for the given amount of nanoseconds. 483 /// Sleeps this thread for the given amount of nanoseconds.
425 void Sleep(s64 nanoseconds); 484 ResultCode Sleep(s64 nanoseconds);
426 485
427 /// Yields this thread without rebalancing loads. 486 /// Yields this thread without rebalancing loads.
428 bool YieldSimple(); 487 std::pair<ResultCode, bool> YieldSimple();
429 488
430 /// Yields this thread and does a load rebalancing. 489 /// Yields this thread and does a load rebalancing.
431 bool YieldAndBalanceLoad(); 490 std::pair<ResultCode, bool> YieldAndBalanceLoad();
432 491
433 /// Yields this thread and if the core is left idle, loads are rebalanced 492 /// Yields this thread and if the core is left idle, loads are rebalanced
434 bool YieldAndWaitForLoadBalancing(); 493 std::pair<ResultCode, bool> YieldAndWaitForLoadBalancing();
435 494
436 void IncrementYieldCount() { 495 void IncrementYieldCount() {
437 yield_count++; 496 yield_count++;
@@ -446,6 +505,10 @@ public:
446 static_cast<u32>(ThreadSchedMasks::LowMask)); 505 static_cast<u32>(ThreadSchedMasks::LowMask));
447 } 506 }
448 507
508 bool IsRunnable() const {
509 return scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable);
510 }
511
449 bool IsRunning() const { 512 bool IsRunning() const {
450 return is_running; 513 return is_running;
451 } 514 }
@@ -466,17 +529,67 @@ public:
466 return global_handle; 529 return global_handle;
467 } 530 }
468 531
532 bool IsWaitingForArbitration() const {
533 return waiting_for_arbitration;
534 }
535
536 void WaitForArbitration(bool set) {
537 waiting_for_arbitration = set;
538 }
539
540 bool IsWaitingSync() const {
541 return is_waiting_on_sync;
542 }
543
544 void SetWaitingSync(bool is_waiting) {
545 is_waiting_on_sync = is_waiting;
546 }
547
548 bool IsPendingTermination() const {
549 return will_be_terminated || GetSchedulingStatus() == ThreadSchedStatus::Exited;
550 }
551
552 bool IsPaused() const {
553 return pausing_state != 0;
554 }
555
556 bool IsContinuousOnSVC() const {
557 return is_continuous_on_svc;
558 }
559
560 void SetContinuousOnSVC(bool is_continuous) {
561 is_continuous_on_svc = is_continuous;
562 }
563
564 bool IsPhantomMode() const {
565 return is_phantom_mode;
566 }
567
568 void SetPhantomMode(bool phantom) {
569 is_phantom_mode = phantom;
570 }
571
572 bool HasExited() const {
573 return has_exited;
574 }
575
469private: 576private:
577 friend class GlobalScheduler;
578 friend class Scheduler;
579
470 void SetSchedulingStatus(ThreadSchedStatus new_status); 580 void SetSchedulingStatus(ThreadSchedStatus new_status);
581 void AddSchedulingFlag(ThreadSchedFlags flag);
582 void RemoveSchedulingFlag(ThreadSchedFlags flag);
583
471 void SetCurrentPriority(u32 new_priority); 584 void SetCurrentPriority(u32 new_priority);
472 ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
473 585
474 void AdjustSchedulingOnStatus(u32 old_flags);
475 void AdjustSchedulingOnPriority(u32 old_priority);
476 void AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core); 586 void AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core);
477 587
588 Common::SpinLock context_guard{};
478 ThreadContext32 context_32{}; 589 ThreadContext32 context_32{};
479 ThreadContext64 context_64{}; 590 ThreadContext64 context_64{};
591 std::unique_ptr<Core::ARM_Interface> arm_interface{};
592 std::shared_ptr<Common::Fiber> host_context{};
480 593
481 u64 thread_id = 0; 594 u64 thread_id = 0;
482 595
@@ -485,6 +598,8 @@ private:
485 VAddr entry_point = 0; 598 VAddr entry_point = 0;
486 VAddr stack_top = 0; 599 VAddr stack_top = 0;
487 600
601 ThreadType type;
602
488 /// Nominal thread priority, as set by the emulated application. 603 /// Nominal thread priority, as set by the emulated application.
489 /// The nominal priority is the thread priority without priority 604 /// The nominal priority is the thread priority without priority
490 /// inheritance taken into account. 605 /// inheritance taken into account.
@@ -509,7 +624,10 @@ private:
509 624
510 /// Objects that the thread is waiting on, in the same order as they were 625 /// Objects that the thread is waiting on, in the same order as they were
511 /// passed to WaitSynchronization. 626 /// passed to WaitSynchronization.
512 ThreadSynchronizationObjects wait_objects; 627 ThreadSynchronizationObjects* wait_objects;
628
629 SynchronizationObject* signaling_object;
630 ResultCode signaling_result{RESULT_SUCCESS};
513 631
514 /// List of threads that are waiting for a mutex that is held by this thread. 632 /// List of threads that are waiting for a mutex that is held by this thread.
515 MutexWaitingThreads wait_mutex_threads; 633 MutexWaitingThreads wait_mutex_threads;
@@ -526,30 +644,39 @@ private:
526 644
527 /// If waiting for an AddressArbiter, this is the address being waited on. 645 /// If waiting for an AddressArbiter, this is the address being waited on.
528 VAddr arb_wait_address{0}; 646 VAddr arb_wait_address{0};
647 bool waiting_for_arbitration{};
529 648
530 /// Handle used as userdata to reference this object when inserting into the CoreTiming queue. 649 /// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
531 Handle global_handle = 0; 650 Handle global_handle = 0;
532 651
533 /// Callback that will be invoked when the thread is resumed from a waiting state. If the thread 652 /// Callback for HLE Events
534 /// was waiting via WaitSynchronization then the object will be the last object that became 653 HLECallback hle_callback;
535 /// available. In case of a timeout, the object will be nullptr. 654 Handle hle_time_event;
536 WakeupCallback wakeup_callback; 655 SynchronizationObject* hle_object;
537 656
538 Scheduler* scheduler = nullptr; 657 Scheduler* scheduler = nullptr;
539 658
540 u32 ideal_core{0xFFFFFFFF}; 659 u32 ideal_core{0xFFFFFFFF};
541 u64 affinity_mask{0x1}; 660 u64 affinity_mask{0x1};
542 661
543 ThreadActivity activity = ThreadActivity::Normal;
544
545 s32 ideal_core_override = -1; 662 s32 ideal_core_override = -1;
546 u64 affinity_mask_override = 0x1; 663 u64 affinity_mask_override = 0x1;
547 u32 affinity_override_count = 0; 664 u32 affinity_override_count = 0;
548 665
549 u32 scheduling_state = 0; 666 u32 scheduling_state = 0;
667 u32 pausing_state = 0;
550 bool is_running = false; 668 bool is_running = false;
669 bool is_waiting_on_sync = false;
551 bool is_sync_cancelled = false; 670 bool is_sync_cancelled = false;
552 671
672 bool is_continuous_on_svc = false;
673
674 bool will_be_terminated = false;
675 bool is_phantom_mode = false;
676 bool has_exited = false;
677
678 bool was_running = false;
679
553 std::string name; 680 std::string name;
554}; 681};
555 682
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index 21b290468..941305e8e 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -8,30 +8,37 @@
8#include "core/core_timing_util.h" 8#include "core/core_timing_util.h"
9#include "core/hle/kernel/handle_table.h" 9#include "core/hle/kernel/handle_table.h"
10#include "core/hle/kernel/kernel.h" 10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/scheduler.h"
11#include "core/hle/kernel/thread.h" 12#include "core/hle/kernel/thread.h"
12#include "core/hle/kernel/time_manager.h" 13#include "core/hle/kernel/time_manager.h"
13 14
14namespace Kernel { 15namespace Kernel {
15 16
16TimeManager::TimeManager(Core::System& system) : system{system} { 17TimeManager::TimeManager(Core::System& system_) : system{system_} {
17 time_manager_event_type = Core::Timing::CreateEvent( 18 time_manager_event_type = Core::Timing::CreateEvent(
18 "Kernel::TimeManagerCallback", [this](u64 thread_handle, [[maybe_unused]] s64 cycles_late) { 19 "Kernel::TimeManagerCallback", [this](u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
20 SchedulerLock lock(system.Kernel());
19 Handle proper_handle = static_cast<Handle>(thread_handle); 21 Handle proper_handle = static_cast<Handle>(thread_handle);
22 if (cancelled_events[proper_handle]) {
23 return;
24 }
20 std::shared_ptr<Thread> thread = 25 std::shared_ptr<Thread> thread =
21 this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); 26 this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
22 thread->ResumeFromWait(); 27 thread->OnWakeUp();
23 }); 28 });
24} 29}
25 30
26void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) { 31void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) {
32 event_handle = timetask->GetGlobalHandle();
27 if (nanoseconds > 0) { 33 if (nanoseconds > 0) {
28 ASSERT(timetask); 34 ASSERT(timetask);
29 event_handle = timetask->GetGlobalHandle(); 35 ASSERT(timetask->GetStatus() != ThreadStatus::Ready);
30 const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds}); 36 ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex);
31 system.CoreTiming().ScheduleEvent(cycles, time_manager_event_type, event_handle); 37 system.CoreTiming().ScheduleEvent(nanoseconds, time_manager_event_type, event_handle);
32 } else { 38 } else {
33 event_handle = InvalidHandle; 39 event_handle = InvalidHandle;
34 } 40 }
41 cancelled_events[event_handle] = false;
35} 42}
36 43
37void TimeManager::UnscheduleTimeEvent(Handle event_handle) { 44void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
@@ -39,6 +46,12 @@ void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
39 return; 46 return;
40 } 47 }
41 system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle); 48 system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle);
49 cancelled_events[event_handle] = true;
50}
51
52void TimeManager::CancelTimeEvent(Thread* time_task) {
53 Handle event_handle = time_task->GetGlobalHandle();
54 UnscheduleTimeEvent(event_handle);
42} 55}
43 56
44} // namespace Kernel 57} // namespace Kernel
diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h
index eaec486d1..307a18765 100644
--- a/src/core/hle/kernel/time_manager.h
+++ b/src/core/hle/kernel/time_manager.h
@@ -5,6 +5,7 @@
5#pragma once 5#pragma once
6 6
7#include <memory> 7#include <memory>
8#include <unordered_map>
8 9
9#include "core/hle/kernel/object.h" 10#include "core/hle/kernel/object.h"
10 11
@@ -35,9 +36,12 @@ public:
35 /// Unschedule an existing time event 36 /// Unschedule an existing time event
36 void UnscheduleTimeEvent(Handle event_handle); 37 void UnscheduleTimeEvent(Handle event_handle);
37 38
39 void CancelTimeEvent(Thread* time_task);
40
38private: 41private:
39 Core::System& system; 42 Core::System& system;
40 std::shared_ptr<Core::Timing::EventType> time_manager_event_type; 43 std::shared_ptr<Core::Timing::EventType> time_manager_event_type;
44 std::unordered_map<Handle, bool> cancelled_events;
41}; 45};
42 46
43} // namespace Kernel 47} // namespace Kernel
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 4df74c4f9..20f366635 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -68,6 +68,7 @@ IWindowController::IWindowController(Core::System& system_)
68 static const FunctionInfo functions[] = { 68 static const FunctionInfo functions[] = {
69 {0, nullptr, "CreateWindow"}, 69 {0, nullptr, "CreateWindow"},
70 {1, &IWindowController::GetAppletResourceUserId, "GetAppletResourceUserId"}, 70 {1, &IWindowController::GetAppletResourceUserId, "GetAppletResourceUserId"},
71 {2, nullptr, "GetAppletResourceUserIdOfCallerApplet"},
71 {10, &IWindowController::AcquireForegroundRights, "AcquireForegroundRights"}, 72 {10, &IWindowController::AcquireForegroundRights, "AcquireForegroundRights"},
72 {11, nullptr, "ReleaseForegroundRights"}, 73 {11, nullptr, "ReleaseForegroundRights"},
73 {12, nullptr, "RejectToChangeIntoBackground"}, 74 {12, nullptr, "RejectToChangeIntoBackground"},
@@ -189,8 +190,8 @@ IDisplayController::IDisplayController() : ServiceFramework("IDisplayController"
189 {5, nullptr, "GetLastForegroundCaptureImageEx"}, 190 {5, nullptr, "GetLastForegroundCaptureImageEx"},
190 {6, nullptr, "GetLastApplicationCaptureImageEx"}, 191 {6, nullptr, "GetLastApplicationCaptureImageEx"},
191 {7, nullptr, "GetCallerAppletCaptureImageEx"}, 192 {7, nullptr, "GetCallerAppletCaptureImageEx"},
192 {8, nullptr, "TakeScreenShotOfOwnLayer"}, // 2.0.0+ 193 {8, nullptr, "TakeScreenShotOfOwnLayer"},
193 {9, nullptr, "CopyBetweenCaptureBuffers"}, // 5.0.0+ 194 {9, nullptr, "CopyBetweenCaptureBuffers"},
194 {10, nullptr, "AcquireLastApplicationCaptureBuffer"}, 195 {10, nullptr, "AcquireLastApplicationCaptureBuffer"},
195 {11, nullptr, "ReleaseLastApplicationCaptureBuffer"}, 196 {11, nullptr, "ReleaseLastApplicationCaptureBuffer"},
196 {12, nullptr, "AcquireLastForegroundCaptureBuffer"}, 197 {12, nullptr, "AcquireLastForegroundCaptureBuffer"},
@@ -200,17 +201,14 @@ IDisplayController::IDisplayController() : ServiceFramework("IDisplayController"
200 {16, nullptr, "AcquireLastApplicationCaptureBufferEx"}, 201 {16, nullptr, "AcquireLastApplicationCaptureBufferEx"},
201 {17, nullptr, "AcquireLastForegroundCaptureBufferEx"}, 202 {17, nullptr, "AcquireLastForegroundCaptureBufferEx"},
202 {18, nullptr, "AcquireCallerAppletCaptureBufferEx"}, 203 {18, nullptr, "AcquireCallerAppletCaptureBufferEx"},
203 // 2.0.0+
204 {20, nullptr, "ClearCaptureBuffer"}, 204 {20, nullptr, "ClearCaptureBuffer"},
205 {21, nullptr, "ClearAppletTransitionBuffer"}, 205 {21, nullptr, "ClearAppletTransitionBuffer"},
206 // 4.0.0+
207 {22, nullptr, "AcquireLastApplicationCaptureSharedBuffer"}, 206 {22, nullptr, "AcquireLastApplicationCaptureSharedBuffer"},
208 {23, nullptr, "ReleaseLastApplicationCaptureSharedBuffer"}, 207 {23, nullptr, "ReleaseLastApplicationCaptureSharedBuffer"},
209 {24, nullptr, "AcquireLastForegroundCaptureSharedBuffer"}, 208 {24, nullptr, "AcquireLastForegroundCaptureSharedBuffer"},
210 {25, nullptr, "ReleaseLastForegroundCaptureSharedBuffer"}, 209 {25, nullptr, "ReleaseLastForegroundCaptureSharedBuffer"},
211 {26, nullptr, "AcquireCallerAppletCaptureSharedBuffer"}, 210 {26, nullptr, "AcquireCallerAppletCaptureSharedBuffer"},
212 {27, nullptr, "ReleaseCallerAppletCaptureSharedBuffer"}, 211 {27, nullptr, "ReleaseCallerAppletCaptureSharedBuffer"},
213 // 6.0.0+
214 {28, nullptr, "TakeScreenShotOfOwnLayerEx"}, 212 {28, nullptr, "TakeScreenShotOfOwnLayerEx"},
215 }; 213 };
216 // clang-format on 214 // clang-format on
@@ -225,7 +223,7 @@ IDebugFunctions::IDebugFunctions() : ServiceFramework{"IDebugFunctions"} {
225 static const FunctionInfo functions[] = { 223 static const FunctionInfo functions[] = {
226 {0, nullptr, "NotifyMessageToHomeMenuForDebug"}, 224 {0, nullptr, "NotifyMessageToHomeMenuForDebug"},
227 {1, nullptr, "OpenMainApplication"}, 225 {1, nullptr, "OpenMainApplication"},
228 {10, nullptr, "EmulateButtonEvent"}, 226 {10, nullptr, "PerformSystemButtonPressing"},
229 {20, nullptr, "InvalidateTransitionLayer"}, 227 {20, nullptr, "InvalidateTransitionLayer"},
230 {30, nullptr, "RequestLaunchApplicationWithUserAndArgumentForDebug"}, 228 {30, nullptr, "RequestLaunchApplicationWithUserAndArgumentForDebug"},
231 {40, nullptr, "GetAppletResourceUsageInfo"}, 229 {40, nullptr, "GetAppletResourceUsageInfo"},
@@ -267,7 +265,7 @@ ISelfController::ISelfController(Core::System& system,
267 {16, &ISelfController::SetOutOfFocusSuspendingEnabled, "SetOutOfFocusSuspendingEnabled"}, 265 {16, &ISelfController::SetOutOfFocusSuspendingEnabled, "SetOutOfFocusSuspendingEnabled"},
268 {17, nullptr, "SetControllerFirmwareUpdateSection"}, 266 {17, nullptr, "SetControllerFirmwareUpdateSection"},
269 {18, nullptr, "SetRequiresCaptureButtonShortPressedMessage"}, 267 {18, nullptr, "SetRequiresCaptureButtonShortPressedMessage"},
270 {19, &ISelfController::SetScreenShotImageOrientation, "SetScreenShotImageOrientation"}, 268 {19, &ISelfController::SetAlbumImageOrientation, "SetAlbumImageOrientation"},
271 {20, nullptr, "SetDesirableKeyboardLayout"}, 269 {20, nullptr, "SetDesirableKeyboardLayout"},
272 {40, &ISelfController::CreateManagedDisplayLayer, "CreateManagedDisplayLayer"}, 270 {40, &ISelfController::CreateManagedDisplayLayer, "CreateManagedDisplayLayer"},
273 {41, nullptr, "IsSystemBufferSharingEnabled"}, 271 {41, nullptr, "IsSystemBufferSharingEnabled"},
@@ -443,7 +441,7 @@ void ISelfController::SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext&
443 rb.Push(RESULT_SUCCESS); 441 rb.Push(RESULT_SUCCESS);
444} 442}
445 443
446void ISelfController::SetScreenShotImageOrientation(Kernel::HLERequestContext& ctx) { 444void ISelfController::SetAlbumImageOrientation(Kernel::HLERequestContext& ctx) {
447 LOG_WARNING(Service_AM, "(STUBBED) called"); 445 LOG_WARNING(Service_AM, "(STUBBED) called");
448 446
449 IPC::ResponseBuilder rb{ctx, 2}; 447 IPC::ResponseBuilder rb{ctx, 2};
@@ -607,6 +605,7 @@ ICommonStateGetter::ICommonStateGetter(Core::System& system,
607 {20, nullptr, "PushToGeneralChannel"}, 605 {20, nullptr, "PushToGeneralChannel"},
608 {30, nullptr, "GetHomeButtonReaderLockAccessor"}, 606 {30, nullptr, "GetHomeButtonReaderLockAccessor"},
609 {31, nullptr, "GetReaderLockAccessorEx"}, 607 {31, nullptr, "GetReaderLockAccessorEx"},
608 {32, nullptr, "GetWriterLockAccessorEx"},
610 {40, nullptr, "GetCradleFwVersion"}, 609 {40, nullptr, "GetCradleFwVersion"},
611 {50, &ICommonStateGetter::IsVrModeEnabled, "IsVrModeEnabled"}, 610 {50, &ICommonStateGetter::IsVrModeEnabled, "IsVrModeEnabled"},
612 {51, &ICommonStateGetter::SetVrModeEnabled, "SetVrModeEnabled"}, 611 {51, &ICommonStateGetter::SetVrModeEnabled, "SetVrModeEnabled"},
@@ -1132,6 +1131,7 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
1132 {24, nullptr, "GetLaunchStorageInfoForDebug"}, 1131 {24, nullptr, "GetLaunchStorageInfoForDebug"},
1133 {25, &IApplicationFunctions::ExtendSaveData, "ExtendSaveData"}, 1132 {25, &IApplicationFunctions::ExtendSaveData, "ExtendSaveData"},
1134 {26, &IApplicationFunctions::GetSaveDataSize, "GetSaveDataSize"}, 1133 {26, &IApplicationFunctions::GetSaveDataSize, "GetSaveDataSize"},
1134 {27, nullptr, "CreateCacheStorage"},
1135 {30, &IApplicationFunctions::BeginBlockingHomeButtonShortAndLongPressed, "BeginBlockingHomeButtonShortAndLongPressed"}, 1135 {30, &IApplicationFunctions::BeginBlockingHomeButtonShortAndLongPressed, "BeginBlockingHomeButtonShortAndLongPressed"},
1136 {31, &IApplicationFunctions::EndBlockingHomeButtonShortAndLongPressed, "EndBlockingHomeButtonShortAndLongPressed"}, 1136 {31, &IApplicationFunctions::EndBlockingHomeButtonShortAndLongPressed, "EndBlockingHomeButtonShortAndLongPressed"},
1137 {32, &IApplicationFunctions::BeginBlockingHomeButton, "BeginBlockingHomeButton"}, 1137 {32, &IApplicationFunctions::BeginBlockingHomeButton, "BeginBlockingHomeButton"},
@@ -1157,6 +1157,8 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
1157 {120, nullptr, "ExecuteProgram"}, 1157 {120, nullptr, "ExecuteProgram"},
1158 {121, nullptr, "ClearUserChannel"}, 1158 {121, nullptr, "ClearUserChannel"},
1159 {122, nullptr, "UnpopToUserChannel"}, 1159 {122, nullptr, "UnpopToUserChannel"},
1160 {123, nullptr, "GetPreviousProgramIndex"},
1161 {124, nullptr, "EnableApplicationAllThreadDumpOnCrash"},
1160 {130, &IApplicationFunctions::GetGpuErrorDetectedSystemEvent, "GetGpuErrorDetectedSystemEvent"}, 1162 {130, &IApplicationFunctions::GetGpuErrorDetectedSystemEvent, "GetGpuErrorDetectedSystemEvent"},
1161 {140, &IApplicationFunctions::GetFriendInvitationStorageChannelEvent, "GetFriendInvitationStorageChannelEvent"}, 1163 {140, &IApplicationFunctions::GetFriendInvitationStorageChannelEvent, "GetFriendInvitationStorageChannelEvent"},
1162 {141, nullptr, "TryPopFromFriendInvitationStorageChannel"}, 1164 {141, nullptr, "TryPopFromFriendInvitationStorageChannel"},
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index 469f7f814..2f69466ec 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -138,7 +138,7 @@ private:
138 void SetFocusHandlingMode(Kernel::HLERequestContext& ctx); 138 void SetFocusHandlingMode(Kernel::HLERequestContext& ctx);
139 void SetRestartMessageEnabled(Kernel::HLERequestContext& ctx); 139 void SetRestartMessageEnabled(Kernel::HLERequestContext& ctx);
140 void SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx); 140 void SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx);
141 void SetScreenShotImageOrientation(Kernel::HLERequestContext& ctx); 141 void SetAlbumImageOrientation(Kernel::HLERequestContext& ctx);
142 void CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx); 142 void CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx);
143 void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx); 143 void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx);
144 void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); 144 void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx);
diff --git a/src/core/hle/service/am/applets/software_keyboard.cpp b/src/core/hle/service/am/applets/software_keyboard.cpp
index d14076b02..fbe3686ae 100644
--- a/src/core/hle/service/am/applets/software_keyboard.cpp
+++ b/src/core/hle/service/am/applets/software_keyboard.cpp
@@ -60,7 +60,7 @@ void SoftwareKeyboard::Initialize() {
60 std::memcpy(&config, keyboard_config.data(), sizeof(KeyboardConfig)); 60 std::memcpy(&config, keyboard_config.data(), sizeof(KeyboardConfig));
61 61
62 const auto work_buffer_storage = broker.PopNormalDataToApplet(); 62 const auto work_buffer_storage = broker.PopNormalDataToApplet();
63 ASSERT(work_buffer_storage != nullptr); 63 ASSERT_OR_EXECUTE(work_buffer_storage != nullptr, { return; });
64 const auto& work_buffer = work_buffer_storage->GetData(); 64 const auto& work_buffer = work_buffer_storage->GetData();
65 65
66 if (config.initial_string_size == 0) 66 if (config.initial_string_size == 0)
diff --git a/src/core/hle/service/am/spsm.cpp b/src/core/hle/service/am/spsm.cpp
index 003ee8667..f27729ce7 100644
--- a/src/core/hle/service/am/spsm.cpp
+++ b/src/core/hle/service/am/spsm.cpp
@@ -10,17 +10,17 @@ SPSM::SPSM() : ServiceFramework{"spsm"} {
10 // clang-format off 10 // clang-format off
11 static const FunctionInfo functions[] = { 11 static const FunctionInfo functions[] = {
12 {0, nullptr, "GetState"}, 12 {0, nullptr, "GetState"},
13 {1, nullptr, "SleepSystemAndWaitAwake"}, 13 {1, nullptr, "EnterSleep"},
14 {2, nullptr, "Unknown1"}, 14 {2, nullptr, "GetLastWakeReason"},
15 {3, nullptr, "Unknown2"}, 15 {3, nullptr, "Shutdown"},
16 {4, nullptr, "GetNotificationMessageEventHandle"}, 16 {4, nullptr, "GetNotificationMessageEventHandle"},
17 {5, nullptr, "Unknown3"}, 17 {5, nullptr, "ReceiveNotificationMessage"},
18 {6, nullptr, "Unknown4"}, 18 {6, nullptr, "AnalyzeLogForLastSleepWakeSequence"},
19 {7, nullptr, "Unknown5"}, 19 {7, nullptr, "ResetEventLog"},
20 {8, nullptr, "AnalyzePerformanceLogForLastSleepWakeSequence"}, 20 {8, nullptr, "AnalyzePerformanceLogForLastSleepWakeSequence"},
21 {9, nullptr, "ChangeHomeButtonLongPressingTime"}, 21 {9, nullptr, "ChangeHomeButtonLongPressingTime"},
22 {10, nullptr, "Unknown6"}, 22 {10, nullptr, "PutErrorState"},
23 {11, nullptr, "Unknown7"}, 23 {11, nullptr, "InvalidateCurrentHomeButtonPressing"},
24 }; 24 };
25 // clang-format on 25 // clang-format on
26 26
diff --git a/src/core/hle/service/aoc/aoc_u.cpp b/src/core/hle/service/aoc/aoc_u.cpp
index 4227a4adf..8e79f707b 100644
--- a/src/core/hle/service/aoc/aoc_u.cpp
+++ b/src/core/hle/service/aoc/aoc_u.cpp
@@ -60,6 +60,7 @@ AOC_U::AOC_U(Core::System& system)
60 {6, nullptr, "PrepareAddOnContentByApplicationId"}, 60 {6, nullptr, "PrepareAddOnContentByApplicationId"},
61 {7, &AOC_U::PrepareAddOnContent, "PrepareAddOnContent"}, 61 {7, &AOC_U::PrepareAddOnContent, "PrepareAddOnContent"},
62 {8, &AOC_U::GetAddOnContentListChangedEvent, "GetAddOnContentListChangedEvent"}, 62 {8, &AOC_U::GetAddOnContentListChangedEvent, "GetAddOnContentListChangedEvent"},
63 {9, nullptr, "GetAddOnContentLostErrorCode"},
63 {100, nullptr, "CreateEcPurchasedEventManager"}, 64 {100, nullptr, "CreateEcPurchasedEventManager"},
64 {101, nullptr, "CreatePermanentEcPurchasedEventManager"}, 65 {101, nullptr, "CreatePermanentEcPurchasedEventManager"},
65 }; 66 };
diff --git a/src/core/hle/service/bcat/bcat.cpp b/src/core/hle/service/bcat/bcat.cpp
index 8bb2528c9..b31766212 100644
--- a/src/core/hle/service/bcat/bcat.cpp
+++ b/src/core/hle/service/bcat/bcat.cpp
@@ -14,6 +14,8 @@ BCAT::BCAT(Core::System& system, std::shared_ptr<Module> module,
14 {0, &BCAT::CreateBcatService, "CreateBcatService"}, 14 {0, &BCAT::CreateBcatService, "CreateBcatService"},
15 {1, &BCAT::CreateDeliveryCacheStorageService, "CreateDeliveryCacheStorageService"}, 15 {1, &BCAT::CreateDeliveryCacheStorageService, "CreateDeliveryCacheStorageService"},
16 {2, &BCAT::CreateDeliveryCacheStorageServiceWithApplicationId, "CreateDeliveryCacheStorageServiceWithApplicationId"}, 16 {2, &BCAT::CreateDeliveryCacheStorageServiceWithApplicationId, "CreateDeliveryCacheStorageServiceWithApplicationId"},
17 {3, nullptr, "CreateDeliveryCacheProgressService"},
18 {4, nullptr, "CreateDeliveryCacheProgressServiceWithApplicationId"},
17 }; 19 };
18 // clang-format on 20 // clang-format on
19 RegisterHandlers(functions); 21 RegisterHandlers(functions);
diff --git a/src/core/hle/service/bcat/module.cpp b/src/core/hle/service/bcat/module.cpp
index 34aba7a27..603b64d4f 100644
--- a/src/core/hle/service/bcat/module.cpp
+++ b/src/core/hle/service/bcat/module.cpp
@@ -143,10 +143,13 @@ public:
143 {20401, nullptr, "UnregisterSystemApplicationDeliveryTask"}, 143 {20401, nullptr, "UnregisterSystemApplicationDeliveryTask"},
144 {20410, nullptr, "SetSystemApplicationDeliveryTaskTimer"}, 144 {20410, nullptr, "SetSystemApplicationDeliveryTaskTimer"},
145 {30100, &IBcatService::SetPassphrase, "SetPassphrase"}, 145 {30100, &IBcatService::SetPassphrase, "SetPassphrase"},
146 {30101, nullptr, "Unknown"},
147 {30102, nullptr, "Unknown2"},
146 {30200, nullptr, "RegisterBackgroundDeliveryTask"}, 148 {30200, nullptr, "RegisterBackgroundDeliveryTask"},
147 {30201, nullptr, "UnregisterBackgroundDeliveryTask"}, 149 {30201, nullptr, "UnregisterBackgroundDeliveryTask"},
148 {30202, nullptr, "BlockDeliveryTask"}, 150 {30202, nullptr, "BlockDeliveryTask"},
149 {30203, nullptr, "UnblockDeliveryTask"}, 151 {30203, nullptr, "UnblockDeliveryTask"},
152 {30210, nullptr, "SetDeliveryTaskTimer"},
150 {30300, nullptr, "RegisterSystemApplicationDeliveryTasks"}, 153 {30300, nullptr, "RegisterSystemApplicationDeliveryTasks"},
151 {90100, nullptr, "EnumerateBackgroundDeliveryTask"}, 154 {90100, nullptr, "EnumerateBackgroundDeliveryTask"},
152 {90200, nullptr, "GetDeliveryList"}, 155 {90200, nullptr, "GetDeliveryList"},
diff --git a/src/core/hle/service/bpc/bpc.cpp b/src/core/hle/service/bpc/bpc.cpp
index 1c1ecdb60..fac6b2f9c 100644
--- a/src/core/hle/service/bpc/bpc.cpp
+++ b/src/core/hle/service/bpc/bpc.cpp
@@ -23,9 +23,14 @@ public:
23 {5, nullptr, "GetBoardPowerControlEvent"}, 23 {5, nullptr, "GetBoardPowerControlEvent"},
24 {6, nullptr, "GetSleepButtonState"}, 24 {6, nullptr, "GetSleepButtonState"},
25 {7, nullptr, "GetPowerEvent"}, 25 {7, nullptr, "GetPowerEvent"},
26 {8, nullptr, "Unknown1"}, 26 {8, nullptr, "CreateWakeupTimer"},
27 {9, nullptr, "Unknown2"}, 27 {9, nullptr, "CancelWakeupTimer"},
28 {10, nullptr, "Unknown3"}, 28 {10, nullptr, "EnableWakeupTimerOnDevice"},
29 {11, nullptr, "CreateWakeupTimerEx"},
30 {12, nullptr, "GetLastEnabledWakeupTimerType"},
31 {13, nullptr, "CleanAllWakeupTimers"},
32 {14, nullptr, "Unknown"},
33 {15, nullptr, "Unknown2"},
29 }; 34 };
30 // clang-format on 35 // clang-format on
31 36
@@ -38,10 +43,11 @@ public:
38 explicit BPC_R() : ServiceFramework{"bpc:r"} { 43 explicit BPC_R() : ServiceFramework{"bpc:r"} {
39 // clang-format off 44 // clang-format off
40 static const FunctionInfo functions[] = { 45 static const FunctionInfo functions[] = {
41 {0, nullptr, "GetExternalRtcValue"}, 46 {0, nullptr, "GetRtcTime"},
42 {1, nullptr, "SetExternalRtcValue"}, 47 {1, nullptr, "SetRtcTime"},
43 {2, nullptr, "ReadExternalRtcResetFlag"}, 48 {2, nullptr, "GetRtcResetDetected"},
44 {3, nullptr, "ClearExternalRtcResetFlag"}, 49 {3, nullptr, "ClearRtcResetDetected"},
50 {4, nullptr, "SetUpRtcResetOnShutdown"},
45 }; 51 };
46 // clang-format on 52 // clang-format on
47 53
diff --git a/src/core/hle/service/btdrv/btdrv.cpp b/src/core/hle/service/btdrv/btdrv.cpp
index 40a06c9fd..f311afa2f 100644
--- a/src/core/hle/service/btdrv/btdrv.cpp
+++ b/src/core/hle/service/btdrv/btdrv.cpp
@@ -58,102 +58,103 @@ public:
58 {1, nullptr, "InitializeBluetooth"}, 58 {1, nullptr, "InitializeBluetooth"},
59 {2, nullptr, "EnableBluetooth"}, 59 {2, nullptr, "EnableBluetooth"},
60 {3, nullptr, "DisableBluetooth"}, 60 {3, nullptr, "DisableBluetooth"},
61 {4, nullptr, "CleanupBluetooth"}, 61 {4, nullptr, "FinalizeBluetooth"},
62 {5, nullptr, "GetAdapterProperties"}, 62 {5, nullptr, "GetAdapterProperties"},
63 {6, nullptr, "GetAdapterProperty"}, 63 {6, nullptr, "GetAdapterProperty"},
64 {7, nullptr, "SetAdapterProperty"}, 64 {7, nullptr, "SetAdapterProperty"},
65 {8, nullptr, "StartDiscovery"}, 65 {8, nullptr, "StartInquiry"},
66 {9, nullptr, "CancelDiscovery"}, 66 {9, nullptr, "StopInquiry"},
67 {10, nullptr, "CreateBond"}, 67 {10, nullptr, "CreateBond"},
68 {11, nullptr, "RemoveBond"}, 68 {11, nullptr, "RemoveBond"},
69 {12, nullptr, "CancelBond"}, 69 {12, nullptr, "CancelBond"},
70 {13, nullptr, "PinReply"}, 70 {13, nullptr, "RespondToPinRequest"},
71 {14, nullptr, "SspReply"}, 71 {14, nullptr, "RespondToSspRequest"},
72 {15, nullptr, "GetEventInfo"}, 72 {15, nullptr, "GetEventInfo"},
73 {16, nullptr, "InitializeHid"}, 73 {16, nullptr, "InitializeHid"},
74 {17, nullptr, "HidConnect"}, 74 {17, nullptr, "OpenHidConnection"},
75 {18, nullptr, "HidDisconnect"}, 75 {18, nullptr, "CloseHidConnection"},
76 {19, nullptr, "HidSendData"}, 76 {19, nullptr, "WriteHidData"},
77 {20, nullptr, "HidSendData2"}, 77 {20, nullptr, "WriteHidData2"},
78 {21, nullptr, "HidSetReport"}, 78 {21, nullptr, "SetHidReport"},
79 {22, nullptr, "HidGetReport"}, 79 {22, nullptr, "GetHidReport"},
80 {23, nullptr, "HidWakeController"}, 80 {23, nullptr, "TriggerConnection"},
81 {24, nullptr, "HidAddPairedDevice"}, 81 {24, nullptr, "AddPairedDeviceInfo"},
82 {25, nullptr, "HidGetPairedDevice"}, 82 {25, nullptr, "GetPairedDeviceInfo"},
83 {26, nullptr, "CleanupHid"}, 83 {26, nullptr, "FinalizeHid"},
84 {27, nullptr, "HidGetEventInfo"}, 84 {27, nullptr, "GetHidEventInfo"},
85 {28, nullptr, "ExtSetTsi"}, 85 {28, nullptr, "SetTsi"},
86 {29, nullptr, "ExtSetBurstMode"}, 86 {29, nullptr, "EnableBurstMode"},
87 {30, nullptr, "ExtSetZeroRetran"}, 87 {30, nullptr, "SetZeroRetransmission"},
88 {31, nullptr, "ExtSetMcMode"}, 88 {31, nullptr, "EnableMcMode"},
89 {32, nullptr, "ExtStartLlrMode"}, 89 {32, nullptr, "EnableLlrScan"},
90 {33, nullptr, "ExtExitLlrMode"}, 90 {33, nullptr, "DisableLlrScan"},
91 {34, nullptr, "ExtSetRadio"}, 91 {34, nullptr, "EnableRadio"},
92 {35, nullptr, "ExtSetVisibility"}, 92 {35, nullptr, "SetVisibility"},
93 {36, nullptr, "ExtSetTbfcScan"}, 93 {36, nullptr, "EnableTbfcScan"},
94 {37, nullptr, "RegisterHidReportEvent"}, 94 {37, nullptr, "RegisterHidReportEvent"},
95 {38, nullptr, "HidGetReportEventInfo"}, 95 {38, nullptr, "GetHidReportEventInfo"},
96 {39, nullptr, "GetLatestPlr"}, 96 {39, nullptr, "GetLatestPlr"},
97 {40, nullptr, "ExtGetPendingConnections"}, 97 {40, nullptr, "GetPendingConnections"},
98 {41, nullptr, "GetChannelMap"}, 98 {41, nullptr, "GetChannelMap"},
99 {42, nullptr, "EnableBluetoothBoostSetting"}, 99 {42, nullptr, "EnableTxPowerBoostSetting"},
100 {43, nullptr, "IsBluetoothBoostSettingEnabled"}, 100 {43, nullptr, "IsTxPowerBoostSettingEnabled"},
101 {44, nullptr, "EnableBluetoothAfhSetting"}, 101 {44, nullptr, "EnableAfhSetting"},
102 {45, nullptr, "IsBluetoothAfhSettingEnabled"}, 102 {45, nullptr, "IsAfhSettingEnabled"},
103 {46, nullptr, "InitializeBluetoothLe"}, 103 {46, nullptr, "InitializeBle"},
104 {47, nullptr, "EnableBluetoothLe"}, 104 {47, nullptr, "EnableBle"},
105 {48, nullptr, "DisableBluetoothLe"}, 105 {48, nullptr, "DisableBle"},
106 {49, nullptr, "CleanupBluetoothLe"}, 106 {49, nullptr, "FinalizeBle"},
107 {50, nullptr, "SetLeVisibility"}, 107 {50, nullptr, "SetBleVisibility"},
108 {51, nullptr, "SetLeConnectionParameter"}, 108 {51, nullptr, "SetBleConnectionParameter"},
109 {52, nullptr, "SetLeDefaultConnectionParameter"}, 109 {52, nullptr, "SetBleDefaultConnectionParameter"},
110 {53, nullptr, "SetLeAdvertiseData"}, 110 {53, nullptr, "SetBleAdvertiseData"},
111 {54, nullptr, "SetLeAdvertiseParameter"}, 111 {54, nullptr, "SetBleAdvertiseParameter"},
112 {55, nullptr, "StartLeScan"}, 112 {55, nullptr, "StartBleScan"},
113 {56, nullptr, "StopLeScan"}, 113 {56, nullptr, "StopBleScan"},
114 {57, nullptr, "AddLeScanFilterCondition"}, 114 {57, nullptr, "AddBleScanFilterCondition"},
115 {58, nullptr, "DeleteLeScanFilterCondition"}, 115 {58, nullptr, "DeleteBleScanFilterCondition"},
116 {59, nullptr, "DeleteLeScanFilter"}, 116 {59, nullptr, "DeleteBleScanFilter"},
117 {60, nullptr, "ClearLeScanFilters"}, 117 {60, nullptr, "ClearBleScanFilters"},
118 {61, nullptr, "EnableLeScanFilter"}, 118 {61, nullptr, "EnableBleScanFilter"},
119 {62, nullptr, "RegisterLeClient"}, 119 {62, nullptr, "RegisterGattClient"},
120 {63, nullptr, "UnregisterLeClient"}, 120 {63, nullptr, "UnregisterGattClient"},
121 {64, nullptr, "UnregisterLeClientAll"}, 121 {64, nullptr, "UnregisterAllGattClients"},
122 {65, nullptr, "LeClientConnect"}, 122 {65, nullptr, "ConnectGattServer"},
123 {66, nullptr, "LeClientCancelConnection"}, 123 {66, nullptr, "CancelConnectGattServer"},
124 {67, nullptr, "LeClientDisconnect"}, 124 {67, nullptr, "DisconnectGattServer"},
125 {68, nullptr, "LeClientGetAttributes"}, 125 {68, nullptr, "GetGattAttribute"},
126 {69, nullptr, "LeClientDiscoverService"}, 126 {69, nullptr, "GetGattService"},
127 {70, nullptr, "LeClientConfigureMtu"}, 127 {70, nullptr, "ConfigureAttMtu"},
128 {71, nullptr, "RegisterLeServer"}, 128 {71, nullptr, "RegisterGattServer"},
129 {72, nullptr, "UnregisterLeServer"}, 129 {72, nullptr, "UnregisterGattServer"},
130 {73, nullptr, "LeServerConnect"}, 130 {73, nullptr, "ConnectGattClient"},
131 {74, nullptr, "LeServerDisconnect"}, 131 {74, nullptr, "DisconnectGattClient"},
132 {75, nullptr, "CreateLeService"}, 132 {75, nullptr, "AddGattService"},
133 {76, nullptr, "StartLeService"}, 133 {76, nullptr, "EnableGattService"},
134 {77, nullptr, "AddLeCharacteristic"}, 134 {77, nullptr, "AddGattCharacteristic"},
135 {78, nullptr, "AddLeDescriptor"}, 135 {78, nullptr, "AddGattDescriptor"},
136 {79, nullptr, "GetLeCoreEventInfo"}, 136 {79, nullptr, "GetBleManagedEventInfo"},
137 {80, nullptr, "LeGetFirstCharacteristic"}, 137 {80, nullptr, "GetGattFirstCharacteristic"},
138 {81, nullptr, "LeGetNextCharacteristic"}, 138 {81, nullptr, "GetGattNextCharacteristic"},
139 {82, nullptr, "LeGetFirstDescriptor"}, 139 {82, nullptr, "GetGattFirstDescriptor"},
140 {83, nullptr, "LeGetNextDescriptor"}, 140 {83, nullptr, "GetGattNextDescriptor"},
141 {84, nullptr, "RegisterLeCoreDataPath"}, 141 {84, nullptr, "RegisterGattManagedDataPath"},
142 {85, nullptr, "UnregisterLeCoreDataPath"}, 142 {85, nullptr, "UnregisterGattManagedDataPath"},
143 {86, nullptr, "RegisterLeHidDataPath"}, 143 {86, nullptr, "RegisterGattHidDataPath"},
144 {87, nullptr, "UnregisterLeHidDataPath"}, 144 {87, nullptr, "UnregisterGattHidDataPath"},
145 {88, nullptr, "RegisterLeDataPath"}, 145 {88, nullptr, "RegisterGattDataPath"},
146 {89, nullptr, "UnregisterLeDataPath"}, 146 {89, nullptr, "UnregisterGattDataPath"},
147 {90, nullptr, "LeClientReadCharacteristic"}, 147 {90, nullptr, "ReadGattCharacteristic"},
148 {91, nullptr, "LeClientReadDescriptor"}, 148 {91, nullptr, "ReadGattDescriptor"},
149 {92, nullptr, "LeClientWriteCharacteristic"}, 149 {92, nullptr, "WriteGattCharacteristic"},
150 {93, nullptr, "LeClientWriteDescriptor"}, 150 {93, nullptr, "WriteGattDescriptor"},
151 {94, nullptr, "LeClientRegisterNotification"}, 151 {94, nullptr, "RegisterGattNotification"},
152 {95, nullptr, "LeClientDeregisterNotification"}, 152 {95, nullptr, "UnregisterGattNotification"},
153 {96, nullptr, "GetLeHidEventInfo"}, 153 {96, nullptr, "GetLeHidEventInfo"},
154 {97, nullptr, "RegisterBleHidEvent"}, 154 {97, nullptr, "RegisterBleHidEvent"},
155 {98, nullptr, "SetLeScanParameter"}, 155 {98, nullptr, "SetBleScanParameter"},
156 {256, nullptr, "GetIsManufacturingMode"}, 156 {99, nullptr, "MoveToSecondaryPiconet"},
157 {256, nullptr, "IsManufacturingMode"},
157 {257, nullptr, "EmulateBluetoothCrash"}, 158 {257, nullptr, "EmulateBluetoothCrash"},
158 {258, nullptr, "GetBleChannelMap"}, 159 {258, nullptr, "GetBleChannelMap"},
159 }; 160 };
diff --git a/src/core/hle/service/btm/btm.cpp b/src/core/hle/service/btm/btm.cpp
index 251b3c9df..0d251c6d0 100644
--- a/src/core/hle/service/btm/btm.cpp
+++ b/src/core/hle/service/btm/btm.cpp
@@ -132,66 +132,71 @@ public:
132 explicit BTM() : ServiceFramework{"btm"} { 132 explicit BTM() : ServiceFramework{"btm"} {
133 // clang-format off 133 // clang-format off
134 static const FunctionInfo functions[] = { 134 static const FunctionInfo functions[] = {
135 {0, nullptr, "Unknown1"}, 135 {0, nullptr, "GetState"},
136 {1, nullptr, "Unknown2"}, 136 {1, nullptr, "GetHostDeviceProperty"},
137 {2, nullptr, "RegisterSystemEventForConnectedDeviceCondition"}, 137 {2, nullptr, "AcquireDeviceConditionEvent"},
138 {3, nullptr, "Unknown3"}, 138 {3, nullptr, "GetDeviceCondition"},
139 {4, nullptr, "Unknown4"}, 139 {4, nullptr, "SetBurstMode"},
140 {5, nullptr, "Unknown5"}, 140 {5, nullptr, "SetSlotMode"},
141 {6, nullptr, "Unknown6"}, 141 {6, nullptr, "SetBluetoothMode"},
142 {7, nullptr, "Unknown7"}, 142 {7, nullptr, "SetWlanMode"},
143 {8, nullptr, "RegisterSystemEventForRegisteredDeviceInfo"}, 143 {8, nullptr, "AcquireDeviceInfoEvent"},
144 {9, nullptr, "Unknown8"}, 144 {9, nullptr, "GetDeviceInfo"},
145 {10, nullptr, "Unknown9"}, 145 {10, nullptr, "AddDeviceInfo"},
146 {11, nullptr, "Unknown10"}, 146 {11, nullptr, "RemoveDeviceInfo"},
147 {12, nullptr, "Unknown11"}, 147 {12, nullptr, "IncreaseDeviceInfoOrder"},
148 {13, nullptr, "Unknown12"}, 148 {13, nullptr, "LlrNotify"},
149 {14, nullptr, "EnableRadio"}, 149 {14, nullptr, "EnableRadio"},
150 {15, nullptr, "DisableRadio"}, 150 {15, nullptr, "DisableRadio"},
151 {16, nullptr, "Unknown13"}, 151 {16, nullptr, "HidDisconnect"},
152 {17, nullptr, "Unknown14"}, 152 {17, nullptr, "HidSetRetransmissionMode"},
153 {18, nullptr, "Unknown15"}, 153 {18, nullptr, "AcquireAwakeReqEvent"},
154 {19, nullptr, "Unknown16"}, 154 {19, nullptr, "AcquireLlrStateEvent"},
155 {20, nullptr, "Unknown17"}, 155 {20, nullptr, "IsLlrStarted"},
156 {21, nullptr, "Unknown18"}, 156 {21, nullptr, "EnableSlotSaving"},
157 {22, nullptr, "Unknown19"}, 157 {22, nullptr, "ProtectDeviceInfo"},
158 {23, nullptr, "Unknown20"}, 158 {23, nullptr, "AcquireBleScanEvent"},
159 {24, nullptr, "Unknown21"}, 159 {24, nullptr, "GetBleScanParameterGeneral"},
160 {25, nullptr, "Unknown22"}, 160 {25, nullptr, "GetBleScanParameterSmartDevice"},
161 {26, nullptr, "Unknown23"}, 161 {26, nullptr, "StartBleScanForGeneral"},
162 {27, nullptr, "Unknown24"}, 162 {27, nullptr, "StopBleScanForGeneral"},
163 {28, nullptr, "Unknown25"}, 163 {28, nullptr, "GetBleScanResultsForGeneral"},
164 {29, nullptr, "Unknown26"}, 164 {29, nullptr, "StartBleScanForPairedDevice"},
165 {30, nullptr, "Unknown27"}, 165 {30, nullptr, "StopBleScanForPairedDevice"},
166 {31, nullptr, "Unknown28"}, 166 {31, nullptr, "StartBleScanForSmartDevice"},
167 {32, nullptr, "Unknown29"}, 167 {32, nullptr, "StopBleScanForSmartDevice"},
168 {33, nullptr, "Unknown30"}, 168 {33, nullptr, "GetBleScanResultsForSmartDevice"},
169 {34, nullptr, "Unknown31"}, 169 {34, nullptr, "AcquireBleConnectionEvent"},
170 {35, nullptr, "Unknown32"}, 170 {35, nullptr, "BleConnect"},
171 {36, nullptr, "Unknown33"}, 171 {36, nullptr, "BleOverrideConnection"},
172 {37, nullptr, "Unknown34"}, 172 {37, nullptr, "BleDisconnect"},
173 {38, nullptr, "Unknown35"}, 173 {38, nullptr, "BleGetConnectionState"},
174 {39, nullptr, "Unknown36"}, 174 {39, nullptr, "BleGetGattClientConditionList"},
175 {40, nullptr, "Unknown37"}, 175 {40, nullptr, "AcquireBlePairingEvent"},
176 {41, nullptr, "Unknown38"}, 176 {41, nullptr, "BlePairDevice"},
177 {42, nullptr, "Unknown39"}, 177 {42, nullptr, "BleUnpairDeviceOnBoth"},
178 {43, nullptr, "Unknown40"}, 178 {43, nullptr, "BleUnpairDevice"},
179 {44, nullptr, "Unknown41"}, 179 {44, nullptr, "BleGetPairedAddresses"},
180 {45, nullptr, "Unknown42"}, 180 {45, nullptr, "AcquireBleServiceDiscoveryEvent"},
181 {46, nullptr, "Unknown43"}, 181 {46, nullptr, "GetGattServices"},
182 {47, nullptr, "Unknown44"}, 182 {47, nullptr, "GetGattService"},
183 {48, nullptr, "Unknown45"}, 183 {48, nullptr, "GetGattIncludedServices"},
184 {49, nullptr, "Unknown46"}, 184 {49, nullptr, "GetBelongingService"},
185 {50, nullptr, "Unknown47"}, 185 {50, nullptr, "GetGattCharacteristics"},
186 {51, nullptr, "Unknown48"}, 186 {51, nullptr, "GetGattDescriptors"},
187 {52, nullptr, "Unknown49"}, 187 {52, nullptr, "AcquireBleMtuConfigEvent"},
188 {53, nullptr, "Unknown50"}, 188 {53, nullptr, "ConfigureBleMtu"},
189 {54, nullptr, "Unknown51"}, 189 {54, nullptr, "GetBleMtu"},
190 {55, nullptr, "Unknown52"}, 190 {55, nullptr, "RegisterBleGattDataPath"},
191 {56, nullptr, "Unknown53"}, 191 {56, nullptr, "UnregisterBleGattDataPath"},
192 {57, nullptr, "Unknown54"}, 192 {57, nullptr, "RegisterAppletResourceUserId"},
193 {58, nullptr, "Unknown55"}, 193 {58, nullptr, "UnregisterAppletResourceUserId"},
194 {59, nullptr, "Unknown56"}, 194 {59, nullptr, "SetAppletResourceUserId"},
195 {60, nullptr, "Unknown60"},
196 {61, nullptr, "Unknown61"},
197 {62, nullptr, "Unknown62"},
198 {63, nullptr, "Unknown63"},
199 {64, nullptr, "Unknown64"},
195 }; 200 };
196 // clang-format on 201 // clang-format on
197 202
@@ -204,19 +209,19 @@ public:
204 explicit BTM_DBG() : ServiceFramework{"btm:dbg"} { 209 explicit BTM_DBG() : ServiceFramework{"btm:dbg"} {
205 // clang-format off 210 // clang-format off
206 static const FunctionInfo functions[] = { 211 static const FunctionInfo functions[] = {
207 {0, nullptr, "RegisterSystemEventForDiscovery"}, 212 {0, nullptr, "AcquireDiscoveryEvent"},
208 {1, nullptr, "Unknown1"}, 213 {1, nullptr, "StartDiscovery"},
209 {2, nullptr, "Unknown2"}, 214 {2, nullptr, "CancelDiscovery"},
210 {3, nullptr, "Unknown3"}, 215 {3, nullptr, "GetDeviceProperty"},
211 {4, nullptr, "Unknown4"}, 216 {4, nullptr, "CreateBond"},
212 {5, nullptr, "Unknown5"}, 217 {5, nullptr, "CancelBond"},
213 {6, nullptr, "Unknown6"}, 218 {6, nullptr, "SetTsiMode"},
214 {7, nullptr, "Unknown7"}, 219 {7, nullptr, "GeneralTest"},
215 {8, nullptr, "Unknown8"}, 220 {8, nullptr, "HidConnect"},
216 {9, nullptr, "Unknown9"}, 221 {9, nullptr, "GeneralGet"},
217 {10, nullptr, "Unknown10"}, 222 {10, nullptr, "GetGattClientDisconnectionReason"},
218 {11, nullptr, "Unknown11"}, 223 {11, nullptr, "GetBleConnectionParameter"},
219 {12, nullptr, "Unknown11"}, 224 {12, nullptr, "GetBleConnectionParameterRequest"},
220 }; 225 };
221 // clang-format on 226 // clang-format on
222 227
diff --git a/src/core/hle/service/caps/caps.cpp b/src/core/hle/service/caps/caps.cpp
index 26c8a7081..ba5749b84 100644
--- a/src/core/hle/service/caps/caps.cpp
+++ b/src/core/hle/service/caps/caps.cpp
@@ -1,4 +1,4 @@
1// Copyright 2018 yuzu emulator team 1// Copyright 2018 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps.h b/src/core/hle/service/caps/caps.h
index fc70a4c27..b8c67b6e2 100644
--- a/src/core/hle/service/caps/caps.h
+++ b/src/core/hle/service/caps/caps.h
@@ -1,4 +1,4 @@
1// Copyright 2018 yuzu emulator team 1// Copyright 2018 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
@@ -12,73 +12,79 @@ class ServiceManager;
12 12
13namespace Service::Capture { 13namespace Service::Capture {
14 14
15enum AlbumImageOrientation { 15enum class AlbumImageOrientation {
16 Orientation0 = 0, 16 Orientation0 = 0,
17 Orientation1 = 1, 17 Orientation1 = 1,
18 Orientation2 = 2, 18 Orientation2 = 2,
19 Orientation3 = 3, 19 Orientation3 = 3,
20}; 20};
21 21
22enum AlbumReportOption { 22enum class AlbumReportOption {
23 Disable = 0, 23 Disable = 0,
24 Enable = 1, 24 Enable = 1,
25}; 25};
26 26
27enum ContentType : u8 { 27enum class ContentType : u8 {
28 Screenshot = 0, 28 Screenshot = 0,
29 Movie = 1, 29 Movie = 1,
30 ExtraMovie = 3, 30 ExtraMovie = 3,
31}; 31};
32 32
33enum AlbumStorage : u8 { 33enum class AlbumStorage : u8 {
34 NAND = 0, 34 NAND = 0,
35 SD = 1, 35 SD = 1,
36}; 36};
37 37
38struct AlbumFileDateTime { 38struct AlbumFileDateTime {
39 u16 year; 39 s16 year{};
40 u8 month; 40 s8 month{};
41 u8 day; 41 s8 day{};
42 u8 hour; 42 s8 hour{};
43 u8 minute; 43 s8 minute{};
44 u8 second; 44 s8 second{};
45 u8 uid; 45 s8 uid{};
46}; 46};
47static_assert(sizeof(AlbumFileDateTime) == 0x8, "AlbumFileDateTime has incorrect size.");
47 48
48struct AlbumEntry { 49struct AlbumEntry {
49 u64 size; 50 u64 size{};
50 u64 application_id; 51 u64 application_id{};
51 AlbumFileDateTime datetime; 52 AlbumFileDateTime datetime{};
52 AlbumStorage storage; 53 AlbumStorage storage{};
53 ContentType content; 54 ContentType content{};
54 u8 padding[6]; 55 INSERT_PADDING_BYTES(6);
55}; 56};
57static_assert(sizeof(AlbumEntry) == 0x20, "AlbumEntry has incorrect size.");
56 58
57struct AlbumFileEntry { 59struct AlbumFileEntry {
58 u64 size; 60 u64 size{}; // Size of the entry
59 u64 hash; 61 u64 hash{}; // AES256 with hardcoded key over AlbumEntry
60 AlbumFileDateTime datetime; 62 AlbumFileDateTime datetime{};
61 AlbumStorage storage; 63 AlbumStorage storage{};
62 ContentType content; 64 ContentType content{};
63 u8 padding[5]; 65 INSERT_PADDING_BYTES(5);
64 u8 unknown; 66 u8 unknown{1}; // Set to 1 on official SW
65}; 67};
68static_assert(sizeof(AlbumFileEntry) == 0x20, "AlbumFileEntry has incorrect size.");
66 69
67struct ApplicationAlbumEntry { 70struct ApplicationAlbumEntry {
68 u64 size; 71 u64 size{}; // Size of the entry
69 u64 hash; 72 u64 hash{}; // AES256 with hardcoded key over AlbumEntry
70 AlbumFileDateTime datetime; 73 AlbumFileDateTime datetime{};
71 AlbumStorage storage; 74 AlbumStorage storage{};
72 ContentType content; 75 ContentType content{};
73 u8 padding[5]; 76 INSERT_PADDING_BYTES(5);
74 u8 unknown; 77 u8 unknown{1}; // Set to 1 on official SW
75}; 78};
79static_assert(sizeof(ApplicationAlbumEntry) == 0x20, "ApplicationAlbumEntry has incorrect size.");
76 80
77struct ApplicationAlbumFileEntry { 81struct ApplicationAlbumFileEntry {
78 ApplicationAlbumEntry entry; 82 ApplicationAlbumEntry entry{};
79 AlbumFileDateTime datetime; 83 AlbumFileDateTime datetime{};
80 u64 unknown; 84 u64 unknown{};
81}; 85};
86static_assert(sizeof(ApplicationAlbumFileEntry) == 0x30,
87 "ApplicationAlbumFileEntry has incorrect size.");
82 88
83/// Registers all Capture services with the specified service manager. 89/// Registers all Capture services with the specified service manager.
84void InstallInterfaces(SM::ServiceManager& sm); 90void InstallInterfaces(SM::ServiceManager& sm);
diff --git a/src/core/hle/service/caps/caps_a.cpp b/src/core/hle/service/caps/caps_a.cpp
index 88a3fdc05..a0a3b2ae3 100644
--- a/src/core/hle/service/caps/caps_a.cpp
+++ b/src/core/hle/service/caps/caps_a.cpp
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_a.h b/src/core/hle/service/caps/caps_a.h
index 8de832491..cb93aad5b 100644
--- a/src/core/hle/service/caps/caps_a.h
+++ b/src/core/hle/service/caps/caps_a.h
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_c.cpp b/src/core/hle/service/caps/caps_c.cpp
index ea6452ffa..ab17a187e 100644
--- a/src/core/hle/service/caps/caps_c.cpp
+++ b/src/core/hle/service/caps/caps_c.cpp
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_c.h b/src/core/hle/service/caps/caps_c.h
index d07cdb441..a9d028689 100644
--- a/src/core/hle/service/caps/caps_c.h
+++ b/src/core/hle/service/caps/caps_c.h
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_sc.cpp b/src/core/hle/service/caps/caps_sc.cpp
index d01a8a58e..822ee96c8 100644
--- a/src/core/hle/service/caps/caps_sc.cpp
+++ b/src/core/hle/service/caps/caps_sc.cpp
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_sc.h b/src/core/hle/service/caps/caps_sc.h
index 9ba372f7a..ac3e929ca 100644
--- a/src/core/hle/service/caps/caps_sc.h
+++ b/src/core/hle/service/caps/caps_sc.h
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_ss.cpp b/src/core/hle/service/caps/caps_ss.cpp
index eaa3a7494..24dc716e7 100644
--- a/src/core/hle/service/caps/caps_ss.cpp
+++ b/src/core/hle/service/caps/caps_ss.cpp
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_ss.h b/src/core/hle/service/caps/caps_ss.h
index e258a6925..450686e4f 100644
--- a/src/core/hle/service/caps/caps_ss.h
+++ b/src/core/hle/service/caps/caps_ss.h
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_su.cpp b/src/core/hle/service/caps/caps_su.cpp
index e8b0698e8..fffb2ecf9 100644
--- a/src/core/hle/service/caps/caps_su.cpp
+++ b/src/core/hle/service/caps/caps_su.cpp
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_su.h b/src/core/hle/service/caps/caps_su.h
index c494d7c84..62c9603a9 100644
--- a/src/core/hle/service/caps/caps_su.h
+++ b/src/core/hle/service/caps/caps_su.h
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/caps/caps_u.cpp b/src/core/hle/service/caps/caps_u.cpp
index 78bab6ed8..f36d8de2d 100644
--- a/src/core/hle/service/caps/caps_u.cpp
+++ b/src/core/hle/service/caps/caps_u.cpp
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
@@ -58,19 +58,25 @@ void CAPS_U::GetAlbumContentsFileListForApplication(Kernel::HLERequestContext& c
58 // u8 ContentType, two s64s, and an u64 AppletResourceUserId. Returns an output u64 for total 58 // u8 ContentType, two s64s, and an u64 AppletResourceUserId. Returns an output u64 for total
59 // output entries (which is copied to a s32 by official SW). 59 // output entries (which is copied to a s32 by official SW).
60 IPC::RequestParser rp{ctx}; 60 IPC::RequestParser rp{ctx};
61 [[maybe_unused]] const auto application_album_file_entries = rp.PopRaw<std::array<u8, 0x30>>(); 61 const auto pid{rp.Pop<s32>()};
62 const auto pid = rp.Pop<s32>(); 62 const auto content_type{rp.PopEnum<ContentType>()};
63 const auto content_type = rp.PopRaw<ContentType>(); 63 const auto start_posix_time{rp.Pop<s64>()};
64 [[maybe_unused]] const auto start_datetime = rp.PopRaw<AlbumFileDateTime>(); 64 const auto end_posix_time{rp.Pop<s64>()};
65 [[maybe_unused]] const auto end_datetime = rp.PopRaw<AlbumFileDateTime>(); 65 const auto applet_resource_user_id{rp.Pop<u64>()};
66 const auto applet_resource_user_id = rp.Pop<u64>(); 66
67 // TODO: Update this when we implement the album.
68 // Currently we do not have a method of accessing album entries, set this to 0 for now.
69 constexpr s32 total_entries{0};
70
67 LOG_WARNING(Service_Capture, 71 LOG_WARNING(Service_Capture,
68 "(STUBBED) called. pid={}, content_type={}, applet_resource_user_id={}", pid, 72 "(STUBBED) called. pid={}, content_type={}, start_posix_time={}, "
69 content_type, applet_resource_user_id); 73 "end_posix_time={}, applet_resource_user_id={}, total_entries={}",
74 pid, content_type, start_posix_time, end_posix_time, applet_resource_user_id,
75 total_entries);
70 76
71 IPC::ResponseBuilder rb{ctx, 3}; 77 IPC::ResponseBuilder rb{ctx, 3};
72 rb.Push(RESULT_SUCCESS); 78 rb.Push(RESULT_SUCCESS);
73 rb.Push<s32>(0); 79 rb.Push(total_entries);
74} 80}
75 81
76} // namespace Service::Capture 82} // namespace Service::Capture
diff --git a/src/core/hle/service/caps/caps_u.h b/src/core/hle/service/caps/caps_u.h
index e6e0716ff..689364de4 100644
--- a/src/core/hle/service/caps/caps_u.h
+++ b/src/core/hle/service/caps/caps_u.h
@@ -1,4 +1,4 @@
1// Copyright 2020 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
diff --git a/src/core/hle/service/es/es.cpp b/src/core/hle/service/es/es.cpp
index f8e9df4b1..9365f27e1 100644
--- a/src/core/hle/service/es/es.cpp
+++ b/src/core/hle/service/es/es.cpp
@@ -27,8 +27,8 @@ public:
27 {8, &ETicket::GetTitleKey, "GetTitleKey"}, 27 {8, &ETicket::GetTitleKey, "GetTitleKey"},
28 {9, &ETicket::CountCommonTicket, "CountCommonTicket"}, 28 {9, &ETicket::CountCommonTicket, "CountCommonTicket"},
29 {10, &ETicket::CountPersonalizedTicket, "CountPersonalizedTicket"}, 29 {10, &ETicket::CountPersonalizedTicket, "CountPersonalizedTicket"},
30 {11, &ETicket::ListCommonTicket, "ListCommonTicket"}, 30 {11, &ETicket::ListCommonTicketRightsIds, "ListCommonTicketRightsIds"},
31 {12, &ETicket::ListPersonalizedTicket, "ListPersonalizedTicket"}, 31 {12, &ETicket::ListPersonalizedTicketRightsIds, "ListPersonalizedTicketRightsIds"},
32 {13, nullptr, "ListMissingPersonalizedTicket"}, 32 {13, nullptr, "ListMissingPersonalizedTicket"},
33 {14, &ETicket::GetCommonTicketSize, "GetCommonTicketSize"}, 33 {14, &ETicket::GetCommonTicketSize, "GetCommonTicketSize"},
34 {15, &ETicket::GetPersonalizedTicketSize, "GetPersonalizedTicketSize"}, 34 {15, &ETicket::GetPersonalizedTicketSize, "GetPersonalizedTicketSize"},
@@ -55,7 +55,46 @@ public:
55 {36, nullptr, "DeleteAllInactiveELicenseRequiredPersonalizedTicket"}, 55 {36, nullptr, "DeleteAllInactiveELicenseRequiredPersonalizedTicket"},
56 {37, nullptr, "OwnTicket2"}, 56 {37, nullptr, "OwnTicket2"},
57 {38, nullptr, "OwnTicket3"}, 57 {38, nullptr, "OwnTicket3"},
58 {501, nullptr, "Unknown501"},
59 {502, nullptr, "Unknown502"},
58 {503, nullptr, "GetTitleKey"}, 60 {503, nullptr, "GetTitleKey"},
61 {504, nullptr, "Unknown504"},
62 {508, nullptr, "Unknown508"},
63 {509, nullptr, "Unknown509"},
64 {510, nullptr, "Unknown510"},
65 {511, nullptr, "Unknown511"},
66 {1001, nullptr, "Unknown1001"},
67 {1002, nullptr, "Unknown1001"},
68 {1003, nullptr, "Unknown1003"},
69 {1004, nullptr, "Unknown1004"},
70 {1005, nullptr, "Unknown1005"},
71 {1006, nullptr, "Unknown1006"},
72 {1007, nullptr, "Unknown1007"},
73 {1009, nullptr, "Unknown1009"},
74 {1010, nullptr, "Unknown1010"},
75 {1011, nullptr, "Unknown1011"},
76 {1012, nullptr, "Unknown1012"},
77 {1013, nullptr, "Unknown1013"},
78 {1014, nullptr, "Unknown1014"},
79 {1015, nullptr, "Unknown1015"},
80 {1016, nullptr, "Unknown1016"},
81 {1017, nullptr, "Unknown1017"},
82 {1018, nullptr, "Unknown1018"},
83 {1019, nullptr, "Unknown1019"},
84 {1020, nullptr, "Unknown1020"},
85 {1021, nullptr, "Unknown1021"},
86 {1501, nullptr, "Unknown1501"},
87 {1502, nullptr, "Unknown1502"},
88 {1503, nullptr, "Unknown1503"},
89 {1504, nullptr, "Unknown1504"},
90 {1505, nullptr, "Unknown1505"},
91 {2000, nullptr, "Unknown2000"},
92 {2001, nullptr, "Unknown2001"},
93 {2100, nullptr, "Unknown2100"},
94 {2501, nullptr, "Unknown2501"},
95 {2502, nullptr, "Unknown2502"},
96 {3001, nullptr, "Unknown3001"},
97 {3002, nullptr, "Unknown3002"},
59 }; 98 };
60 // clang-format on 99 // clang-format on
61 RegisterHandlers(functions); 100 RegisterHandlers(functions);
@@ -147,7 +186,7 @@ private:
147 rb.Push<u32>(count); 186 rb.Push<u32>(count);
148 } 187 }
149 188
150 void ListCommonTicket(Kernel::HLERequestContext& ctx) { 189 void ListCommonTicketRightsIds(Kernel::HLERequestContext& ctx) {
151 u32 out_entries; 190 u32 out_entries;
152 if (keys.GetCommonTickets().empty()) 191 if (keys.GetCommonTickets().empty())
153 out_entries = 0; 192 out_entries = 0;
@@ -170,7 +209,7 @@ private:
170 rb.Push<u32>(out_entries); 209 rb.Push<u32>(out_entries);
171 } 210 }
172 211
173 void ListPersonalizedTicket(Kernel::HLERequestContext& ctx) { 212 void ListPersonalizedTicketRightsIds(Kernel::HLERequestContext& ctx) {
174 u32 out_entries; 213 u32 out_entries;
175 if (keys.GetPersonalizedTickets().empty()) 214 if (keys.GetPersonalizedTickets().empty())
176 out_entries = 0; 215 out_entries = 0;
diff --git a/src/core/hle/service/eupld/eupld.cpp b/src/core/hle/service/eupld/eupld.cpp
index 2df30acee..0d6d244f4 100644
--- a/src/core/hle/service/eupld/eupld.cpp
+++ b/src/core/hle/service/eupld/eupld.cpp
@@ -19,6 +19,7 @@ public:
19 {1, nullptr, "ImportCrt"}, 19 {1, nullptr, "ImportCrt"},
20 {2, nullptr, "ImportPki"}, 20 {2, nullptr, "ImportPki"},
21 {3, nullptr, "SetAutoUpload"}, 21 {3, nullptr, "SetAutoUpload"},
22 {4, nullptr, "GetAutoUpload"},
22 }; 23 };
23 // clang-format on 24 // clang-format on
24 25
diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp
index 68f259b70..b7adaffc7 100644
--- a/src/core/hle/service/friend/friend.cpp
+++ b/src/core/hle/service/friend/friend.cpp
@@ -25,9 +25,13 @@ public:
25 {10101, &IFriendService::GetFriendList, "GetFriendList"}, 25 {10101, &IFriendService::GetFriendList, "GetFriendList"},
26 {10102, nullptr, "UpdateFriendInfo"}, 26 {10102, nullptr, "UpdateFriendInfo"},
27 {10110, nullptr, "GetFriendProfileImage"}, 27 {10110, nullptr, "GetFriendProfileImage"},
28 {10120, nullptr, "Unknown10120"},
29 {10121, nullptr, "Unknown10121"},
28 {10200, nullptr, "SendFriendRequestForApplication"}, 30 {10200, nullptr, "SendFriendRequestForApplication"},
29 {10211, nullptr, "AddFacedFriendRequestForApplication"}, 31 {10211, nullptr, "AddFacedFriendRequestForApplication"},
30 {10400, &IFriendService::GetBlockedUserListIds, "GetBlockedUserListIds"}, 32 {10400, &IFriendService::GetBlockedUserListIds, "GetBlockedUserListIds"},
33 {10420, nullptr, "Unknown10420"},
34 {10421, nullptr, "Unknown10421"},
31 {10500, nullptr, "GetProfileList"}, 35 {10500, nullptr, "GetProfileList"},
32 {10600, nullptr, "DeclareOpenOnlinePlaySession"}, 36 {10600, nullptr, "DeclareOpenOnlinePlaySession"},
33 {10601, &IFriendService::DeclareCloseOnlinePlaySession, "DeclareCloseOnlinePlaySession"}, 37 {10601, &IFriendService::DeclareCloseOnlinePlaySession, "DeclareCloseOnlinePlaySession"},
@@ -97,6 +101,8 @@ public:
97 {30900, nullptr, "SendFriendInvitation"}, 101 {30900, nullptr, "SendFriendInvitation"},
98 {30910, nullptr, "ReadFriendInvitation"}, 102 {30910, nullptr, "ReadFriendInvitation"},
99 {30911, nullptr, "ReadAllFriendInvitations"}, 103 {30911, nullptr, "ReadAllFriendInvitations"},
104 {40100, nullptr, "Unknown40100"},
105 {40400, nullptr, "Unknown40400"},
100 {49900, nullptr, "DeleteNetworkServiceAccountCache"}, 106 {49900, nullptr, "DeleteNetworkServiceAccountCache"},
101 }; 107 };
102 // clang-format on 108 // clang-format on
diff --git a/src/core/hle/service/grc/grc.cpp b/src/core/hle/service/grc/grc.cpp
index 24910ac6c..401e0b208 100644
--- a/src/core/hle/service/grc/grc.cpp
+++ b/src/core/hle/service/grc/grc.cpp
@@ -17,6 +17,9 @@ public:
17 static const FunctionInfo functions[] = { 17 static const FunctionInfo functions[] = {
18 {1, nullptr, "OpenContinuousRecorder"}, 18 {1, nullptr, "OpenContinuousRecorder"},
19 {2, nullptr, "OpenGameMovieTrimmer"}, 19 {2, nullptr, "OpenGameMovieTrimmer"},
20 {3, nullptr, "OpenOffscreenRecorder"},
21 {101, nullptr, "CreateMovieMaker"},
22 {9903, nullptr, "SetOffscreenRecordingMarker"}
20 }; 23 };
21 // clang-format on 24 // clang-format on
22 25
diff --git a/src/core/hle/service/hid/controllers/debug_pad.cpp b/src/core/hle/service/hid/controllers/debug_pad.cpp
index 1f2131ec8..cb35919e9 100644
--- a/src/core/hle/service/hid/controllers/debug_pad.cpp
+++ b/src/core/hle/service/hid/controllers/debug_pad.cpp
@@ -23,7 +23,7 @@ void Controller_DebugPad::OnRelease() {}
23 23
24void Controller_DebugPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, 24void Controller_DebugPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
25 std::size_t size) { 25 std::size_t size) {
26 shared_memory.header.timestamp = core_timing.GetTicks(); 26 shared_memory.header.timestamp = core_timing.GetCPUTicks();
27 shared_memory.header.total_entry_count = 17; 27 shared_memory.header.total_entry_count = 17;
28 28
29 if (!IsControllerActivated()) { 29 if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/controllers/gesture.cpp b/src/core/hle/service/hid/controllers/gesture.cpp
index 6e990dd00..b7b7bfeae 100644
--- a/src/core/hle/service/hid/controllers/gesture.cpp
+++ b/src/core/hle/service/hid/controllers/gesture.cpp
@@ -19,7 +19,7 @@ void Controller_Gesture::OnRelease() {}
19 19
20void Controller_Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, 20void Controller_Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
21 std::size_t size) { 21 std::size_t size) {
22 shared_memory.header.timestamp = core_timing.GetTicks(); 22 shared_memory.header.timestamp = core_timing.GetCPUTicks();
23 shared_memory.header.total_entry_count = 17; 23 shared_memory.header.total_entry_count = 17;
24 24
25 if (!IsControllerActivated()) { 25 if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/controllers/keyboard.cpp b/src/core/hle/service/hid/controllers/keyboard.cpp
index 9a8d354ba..feae89525 100644
--- a/src/core/hle/service/hid/controllers/keyboard.cpp
+++ b/src/core/hle/service/hid/controllers/keyboard.cpp
@@ -21,7 +21,7 @@ void Controller_Keyboard::OnRelease() {}
21 21
22void Controller_Keyboard::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, 22void Controller_Keyboard::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
23 std::size_t size) { 23 std::size_t size) {
24 shared_memory.header.timestamp = core_timing.GetTicks(); 24 shared_memory.header.timestamp = core_timing.GetCPUTicks();
25 shared_memory.header.total_entry_count = 17; 25 shared_memory.header.total_entry_count = 17;
26 26
27 if (!IsControllerActivated()) { 27 if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/controllers/mouse.cpp b/src/core/hle/service/hid/controllers/mouse.cpp
index 93d88ea50..ac40989c5 100644
--- a/src/core/hle/service/hid/controllers/mouse.cpp
+++ b/src/core/hle/service/hid/controllers/mouse.cpp
@@ -19,7 +19,7 @@ void Controller_Mouse::OnRelease() {}
19 19
20void Controller_Mouse::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, 20void Controller_Mouse::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
21 std::size_t size) { 21 std::size_t size) {
22 shared_memory.header.timestamp = core_timing.GetTicks(); 22 shared_memory.header.timestamp = core_timing.GetCPUTicks();
23 shared_memory.header.total_entry_count = 17; 23 shared_memory.header.total_entry_count = 17;
24 24
25 if (!IsControllerActivated()) { 25 if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index 6fbee7efa..ef67ad690 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -328,7 +328,7 @@ void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8*
328 const auto& last_entry = 328 const auto& last_entry =
329 main_controller->npad[main_controller->common.last_entry_index]; 329 main_controller->npad[main_controller->common.last_entry_index];
330 330
331 main_controller->common.timestamp = core_timing.GetTicks(); 331 main_controller->common.timestamp = core_timing.GetCPUTicks();
332 main_controller->common.last_entry_index = 332 main_controller->common.last_entry_index =
333 (main_controller->common.last_entry_index + 1) % 17; 333 (main_controller->common.last_entry_index + 1) % 17;
334 334
diff --git a/src/core/hle/service/hid/controllers/stubbed.cpp b/src/core/hle/service/hid/controllers/stubbed.cpp
index 9e527d176..e7483bfa2 100644
--- a/src/core/hle/service/hid/controllers/stubbed.cpp
+++ b/src/core/hle/service/hid/controllers/stubbed.cpp
@@ -23,7 +23,7 @@ void Controller_Stubbed::OnUpdate(const Core::Timing::CoreTiming& core_timing, u
23 } 23 }
24 24
25 CommonHeader header{}; 25 CommonHeader header{};
26 header.timestamp = core_timing.GetTicks(); 26 header.timestamp = core_timing.GetCPUTicks();
27 header.total_entry_count = 17; 27 header.total_entry_count = 17;
28 header.entry_count = 0; 28 header.entry_count = 0;
29 header.last_entry_index = 0; 29 header.last_entry_index = 0;
diff --git a/src/core/hle/service/hid/controllers/touchscreen.cpp b/src/core/hle/service/hid/controllers/touchscreen.cpp
index 1c6e55566..e326f8f5c 100644
--- a/src/core/hle/service/hid/controllers/touchscreen.cpp
+++ b/src/core/hle/service/hid/controllers/touchscreen.cpp
@@ -22,7 +22,7 @@ void Controller_Touchscreen::OnRelease() {}
22 22
23void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, 23void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
24 std::size_t size) { 24 std::size_t size) {
25 shared_memory.header.timestamp = core_timing.GetTicks(); 25 shared_memory.header.timestamp = core_timing.GetCPUTicks();
26 shared_memory.header.total_entry_count = 17; 26 shared_memory.header.total_entry_count = 17;
27 27
28 if (!IsControllerActivated()) { 28 if (!IsControllerActivated()) {
@@ -49,7 +49,7 @@ void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timin
49 touch_entry.diameter_x = Settings::values.touchscreen.diameter_x; 49 touch_entry.diameter_x = Settings::values.touchscreen.diameter_x;
50 touch_entry.diameter_y = Settings::values.touchscreen.diameter_y; 50 touch_entry.diameter_y = Settings::values.touchscreen.diameter_y;
51 touch_entry.rotation_angle = Settings::values.touchscreen.rotation_angle; 51 touch_entry.rotation_angle = Settings::values.touchscreen.rotation_angle;
52 const u64 tick = core_timing.GetTicks(); 52 const u64 tick = core_timing.GetCPUTicks();
53 touch_entry.delta_time = tick - last_touch; 53 touch_entry.delta_time = tick - last_touch;
54 last_touch = tick; 54 last_touch = tick;
55 touch_entry.finger = Settings::values.touchscreen.finger; 55 touch_entry.finger = Settings::values.touchscreen.finger;
diff --git a/src/core/hle/service/hid/controllers/xpad.cpp b/src/core/hle/service/hid/controllers/xpad.cpp
index 27511b27b..2503ef241 100644
--- a/src/core/hle/service/hid/controllers/xpad.cpp
+++ b/src/core/hle/service/hid/controllers/xpad.cpp
@@ -20,7 +20,7 @@ void Controller_XPad::OnRelease() {}
20void Controller_XPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, 20void Controller_XPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
21 std::size_t size) { 21 std::size_t size) {
22 for (auto& xpad_entry : shared_memory.shared_memory_entries) { 22 for (auto& xpad_entry : shared_memory.shared_memory_entries) {
23 xpad_entry.header.timestamp = core_timing.GetTicks(); 23 xpad_entry.header.timestamp = core_timing.GetCPUTicks();
24 xpad_entry.header.total_entry_count = 17; 24 xpad_entry.header.total_entry_count = 17;
25 25
26 if (!IsControllerActivated()) { 26 if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 57d5edea7..e9020e0dc 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -39,11 +39,9 @@ namespace Service::HID {
39 39
40// Updating period for each HID device. 40// Updating period for each HID device.
41// TODO(ogniK): Find actual polling rate of hid 41// TODO(ogniK): Find actual polling rate of hid
42constexpr s64 pad_update_ticks = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 66); 42constexpr s64 pad_update_ticks = static_cast<s64>(1000000000 / 66);
43[[maybe_unused]] constexpr s64 accelerometer_update_ticks = 43[[maybe_unused]] constexpr s64 accelerometer_update_ticks = static_cast<s64>(1000000000 / 100);
44 static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 100); 44[[maybe_unused]] constexpr s64 gyroscope_update_ticks = static_cast<s64>(1000000000 / 100);
45[[maybe_unused]] constexpr s64 gyroscope_update_ticks =
46 static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 100);
47constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000; 45constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000;
48 46
49IAppletResource::IAppletResource(Core::System& system) 47IAppletResource::IAppletResource(Core::System& system)
@@ -78,8 +76,8 @@ IAppletResource::IAppletResource(Core::System& system)
78 76
79 // Register update callbacks 77 // Register update callbacks
80 pad_update_event = 78 pad_update_event =
81 Core::Timing::CreateEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 cycles_late) { 79 Core::Timing::CreateEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 ns_late) {
82 UpdateControllers(userdata, cycles_late); 80 UpdateControllers(userdata, ns_late);
83 }); 81 });
84 82
85 // TODO(shinyquagsire23): Other update callbacks? (accel, gyro?) 83 // TODO(shinyquagsire23): Other update callbacks? (accel, gyro?)
@@ -109,7 +107,7 @@ void IAppletResource::GetSharedMemoryHandle(Kernel::HLERequestContext& ctx) {
109 rb.PushCopyObjects(shared_mem); 107 rb.PushCopyObjects(shared_mem);
110} 108}
111 109
112void IAppletResource::UpdateControllers(u64 userdata, s64 cycles_late) { 110void IAppletResource::UpdateControllers(u64 userdata, s64 ns_late) {
113 auto& core_timing = system.CoreTiming(); 111 auto& core_timing = system.CoreTiming();
114 112
115 const bool should_reload = Settings::values.is_device_reload_pending.exchange(false); 113 const bool should_reload = Settings::values.is_device_reload_pending.exchange(false);
@@ -120,7 +118,7 @@ void IAppletResource::UpdateControllers(u64 userdata, s64 cycles_late) {
120 controller->OnUpdate(core_timing, shared_mem->GetPointer(), SHARED_MEMORY_SIZE); 118 controller->OnUpdate(core_timing, shared_mem->GetPointer(), SHARED_MEMORY_SIZE);
121 } 119 }
122 120
123 core_timing.ScheduleEvent(pad_update_ticks - cycles_late, pad_update_event); 121 core_timing.ScheduleEvent(pad_update_ticks - ns_late, pad_update_event);
124} 122}
125 123
126class IActiveVibrationDeviceList final : public ServiceFramework<IActiveVibrationDeviceList> { 124class IActiveVibrationDeviceList final : public ServiceFramework<IActiveVibrationDeviceList> {
diff --git a/src/core/hle/service/hid/irs.cpp b/src/core/hle/service/hid/irs.cpp
index 36ed6f7da..e82fd031b 100644
--- a/src/core/hle/service/hid/irs.cpp
+++ b/src/core/hle/service/hid/irs.cpp
@@ -98,7 +98,7 @@ void IRS::GetImageTransferProcessorState(Kernel::HLERequestContext& ctx) {
98 98
99 IPC::ResponseBuilder rb{ctx, 5}; 99 IPC::ResponseBuilder rb{ctx, 5};
100 rb.Push(RESULT_SUCCESS); 100 rb.Push(RESULT_SUCCESS);
101 rb.PushRaw<u64>(system.CoreTiming().GetTicks()); 101 rb.PushRaw<u64>(system.CoreTiming().GetCPUTicks());
102 rb.PushRaw<u32>(0); 102 rb.PushRaw<u32>(0);
103} 103}
104 104
diff --git a/src/core/hle/service/lbl/lbl.cpp b/src/core/hle/service/lbl/lbl.cpp
index e8f9f2d29..17350b403 100644
--- a/src/core/hle/service/lbl/lbl.cpp
+++ b/src/core/hle/service/lbl/lbl.cpp
@@ -47,6 +47,7 @@ public:
47 {26, &LBL::EnableVrMode, "EnableVrMode"}, 47 {26, &LBL::EnableVrMode, "EnableVrMode"},
48 {27, &LBL::DisableVrMode, "DisableVrMode"}, 48 {27, &LBL::DisableVrMode, "DisableVrMode"},
49 {28, &LBL::IsVrModeEnabled, "IsVrModeEnabled"}, 49 {28, &LBL::IsVrModeEnabled, "IsVrModeEnabled"},
50 {29, nullptr, "IsAutoBrightnessControlSupported"},
50 }; 51 };
51 // clang-format on 52 // clang-format on
52 53
diff --git a/src/core/hle/service/ldn/ldn.cpp b/src/core/hle/service/ldn/ldn.cpp
index 92adde6d4..49972cd69 100644
--- a/src/core/hle/service/ldn/ldn.cpp
+++ b/src/core/hle/service/ldn/ldn.cpp
@@ -69,6 +69,7 @@ public:
69 {101, nullptr, "GetNetworkInfoLatestUpdate"}, 69 {101, nullptr, "GetNetworkInfoLatestUpdate"},
70 {102, nullptr, "Scan"}, 70 {102, nullptr, "Scan"},
71 {103, nullptr, "ScanPrivate"}, 71 {103, nullptr, "ScanPrivate"},
72 {104, nullptr, "SetWirelessControllerRestriction"},
72 {200, nullptr, "OpenAccessPoint"}, 73 {200, nullptr, "OpenAccessPoint"},
73 {201, nullptr, "CloseAccessPoint"}, 74 {201, nullptr, "CloseAccessPoint"},
74 {202, nullptr, "CreateNetwork"}, 75 {202, nullptr, "CreateNetwork"},
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index 6ad3be1b3..64a526b9e 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -39,42 +39,61 @@ constexpr ResultCode ERROR_NOT_INITIALIZED{ErrorModule::Loader, 87};
39constexpr std::size_t MAXIMUM_LOADED_RO{0x40}; 39constexpr std::size_t MAXIMUM_LOADED_RO{0x40};
40constexpr std::size_t MAXIMUM_MAP_RETRIES{0x200}; 40constexpr std::size_t MAXIMUM_MAP_RETRIES{0x200};
41 41
42constexpr std::size_t TEXT_INDEX{0};
43constexpr std::size_t RO_INDEX{1};
44constexpr std::size_t DATA_INDEX{2};
45
46struct NRRCertification {
47 u64_le application_id_mask;
48 u64_le application_id_pattern;
49 INSERT_PADDING_BYTES(0x10);
50 std::array<u8, 0x100> public_key; // Also known as modulus
51 std::array<u8, 0x100> signature;
52};
53static_assert(sizeof(NRRCertification) == 0x220, "NRRCertification has invalid size.");
54
42struct NRRHeader { 55struct NRRHeader {
43 u32_le magic; 56 u32_le magic;
44 INSERT_PADDING_BYTES(12); 57 u32_le certification_signature_key_generation; // 9.0.0+
45 u64_le title_id_mask; 58 INSERT_PADDING_WORDS(2);
46 u64_le title_id_pattern; 59 NRRCertification certification;
47 INSERT_PADDING_BYTES(16); 60 std::array<u8, 0x100> signature;
48 std::array<u8, 0x100> modulus; 61 u64_le application_id;
49 std::array<u8, 0x100> signature_1;
50 std::array<u8, 0x100> signature_2;
51 u64_le title_id;
52 u32_le size; 62 u32_le size;
53 INSERT_PADDING_BYTES(4); 63 u8 nrr_kind; // 7.0.0+
64 INSERT_PADDING_BYTES(3);
54 u32_le hash_offset; 65 u32_le hash_offset;
55 u32_le hash_count; 66 u32_le hash_count;
56 INSERT_PADDING_BYTES(8); 67 INSERT_PADDING_WORDS(2);
68};
69static_assert(sizeof(NRRHeader) == 0x350, "NRRHeader has invalid size.");
70
71struct SegmentHeader {
72 u32_le memory_offset;
73 u32_le memory_size;
57}; 74};
58static_assert(sizeof(NRRHeader) == 0x350, "NRRHeader has incorrect size."); 75static_assert(sizeof(SegmentHeader) == 0x8, "SegmentHeader has invalid size.");
59 76
60struct NROHeader { 77struct NROHeader {
78 // Switchbrew calls this "Start" (0x10)
61 INSERT_PADDING_WORDS(1); 79 INSERT_PADDING_WORDS(1);
62 u32_le mod_offset; 80 u32_le mod_offset;
63 INSERT_PADDING_WORDS(2); 81 INSERT_PADDING_WORDS(2);
82
83 // Switchbrew calls this "Header" (0x70)
64 u32_le magic; 84 u32_le magic;
65 u32_le version; 85 u32_le version;
66 u32_le nro_size; 86 u32_le nro_size;
67 u32_le flags; 87 u32_le flags;
68 u32_le text_offset; 88 // .text, .ro, .data
69 u32_le text_size; 89 std::array<SegmentHeader, 3> segment_headers;
70 u32_le ro_offset;
71 u32_le ro_size;
72 u32_le rw_offset;
73 u32_le rw_size;
74 u32_le bss_size; 90 u32_le bss_size;
75 INSERT_PADDING_WORDS(1); 91 INSERT_PADDING_WORDS(1);
76 std::array<u8, 0x20> build_id; 92 std::array<u8, 0x20> build_id;
77 INSERT_PADDING_BYTES(0x20); 93 u32_le dso_handle_offset;
94 INSERT_PADDING_WORDS(1);
95 // .apiInfo, .dynstr, .dynsym
96 std::array<SegmentHeader, 3> segment_headers_2;
78}; 97};
79static_assert(sizeof(NROHeader) == 0x80, "NROHeader has invalid size."); 98static_assert(sizeof(NROHeader) == 0x80, "NROHeader has invalid size.");
80 99
@@ -91,6 +110,7 @@ struct NROInfo {
91 std::size_t data_size{}; 110 std::size_t data_size{};
92 VAddr src_addr{}; 111 VAddr src_addr{};
93}; 112};
113static_assert(sizeof(NROInfo) == 0x60, "NROInfo has invalid size.");
94 114
95class DebugMonitor final : public ServiceFramework<DebugMonitor> { 115class DebugMonitor final : public ServiceFramework<DebugMonitor> {
96public: 116public:
@@ -226,11 +246,11 @@ public:
226 return; 246 return;
227 } 247 }
228 248
229 if (system.CurrentProcess()->GetTitleID() != header.title_id) { 249 if (system.CurrentProcess()->GetTitleID() != header.application_id) {
230 LOG_ERROR(Service_LDR, 250 LOG_ERROR(Service_LDR,
231 "Attempting to load NRR with title ID other than current process. (actual " 251 "Attempting to load NRR with title ID other than current process. (actual "
232 "{:016X})!", 252 "{:016X})!",
233 header.title_id); 253 header.application_id);
234 IPC::ResponseBuilder rb{ctx, 2}; 254 IPC::ResponseBuilder rb{ctx, 2};
235 rb.Push(ERROR_INVALID_NRR); 255 rb.Push(ERROR_INVALID_NRR);
236 return; 256 return;
@@ -348,10 +368,10 @@ public:
348 368
349 ResultCode LoadNro(Kernel::Process* process, const NROHeader& nro_header, VAddr nro_addr, 369 ResultCode LoadNro(Kernel::Process* process, const NROHeader& nro_header, VAddr nro_addr,
350 VAddr start) const { 370 VAddr start) const {
351 const VAddr text_start{start + nro_header.text_offset}; 371 const VAddr text_start{start + nro_header.segment_headers[TEXT_INDEX].memory_offset};
352 const VAddr ro_start{start + nro_header.ro_offset}; 372 const VAddr ro_start{start + nro_header.segment_headers[RO_INDEX].memory_offset};
353 const VAddr data_start{start + nro_header.rw_offset}; 373 const VAddr data_start{start + nro_header.segment_headers[DATA_INDEX].memory_offset};
354 const VAddr bss_start{data_start + nro_header.rw_size}; 374 const VAddr bss_start{data_start + nro_header.segment_headers[DATA_INDEX].memory_size};
355 const VAddr bss_end_addr{ 375 const VAddr bss_end_addr{
356 Common::AlignUp(bss_start + nro_header.bss_size, Kernel::Memory::PageSize)}; 376 Common::AlignUp(bss_start + nro_header.bss_size, Kernel::Memory::PageSize)};
357 377
@@ -360,9 +380,12 @@ public:
360 system.Memory().ReadBlock(src_addr, source_data.data(), source_data.size()); 380 system.Memory().ReadBlock(src_addr, source_data.data(), source_data.size());
361 system.Memory().WriteBlock(dst_addr, source_data.data(), source_data.size()); 381 system.Memory().WriteBlock(dst_addr, source_data.data(), source_data.size());
362 }}; 382 }};
363 CopyCode(nro_addr + nro_header.text_offset, text_start, nro_header.text_size); 383 CopyCode(nro_addr + nro_header.segment_headers[TEXT_INDEX].memory_offset, text_start,
364 CopyCode(nro_addr + nro_header.ro_offset, ro_start, nro_header.ro_size); 384 nro_header.segment_headers[TEXT_INDEX].memory_size);
365 CopyCode(nro_addr + nro_header.rw_offset, data_start, nro_header.rw_size); 385 CopyCode(nro_addr + nro_header.segment_headers[RO_INDEX].memory_offset, ro_start,
386 nro_header.segment_headers[RO_INDEX].memory_size);
387 CopyCode(nro_addr + nro_header.segment_headers[DATA_INDEX].memory_offset, data_start,
388 nro_header.segment_headers[DATA_INDEX].memory_size);
366 389
367 CASCADE_CODE(process->PageTable().SetCodeMemoryPermission( 390 CASCADE_CODE(process->PageTable().SetCodeMemoryPermission(
368 text_start, ro_start - text_start, Kernel::Memory::MemoryPermission::ReadAndExecute)); 391 text_start, ro_start - text_start, Kernel::Memory::MemoryPermission::ReadAndExecute));
@@ -484,9 +507,11 @@ public:
484 } 507 }
485 508
486 // Track the loaded NRO 509 // Track the loaded NRO
487 nro.insert_or_assign(*map_result, NROInfo{hash, *map_result, nro_size, bss_address, 510 nro.insert_or_assign(*map_result,
488 bss_size, header.text_size, header.ro_size, 511 NROInfo{hash, *map_result, nro_size, bss_address, bss_size,
489 header.rw_size, nro_address}); 512 header.segment_headers[TEXT_INDEX].memory_size,
513 header.segment_headers[RO_INDEX].memory_size,
514 header.segment_headers[DATA_INDEX].memory_size, nro_address});
490 515
491 // Invalidate JIT caches for the newly mapped process code 516 // Invalidate JIT caches for the newly mapped process code
492 system.InvalidateCpuInstructionCaches(); 517 system.InvalidateCpuInstructionCaches();
@@ -584,11 +609,21 @@ private:
584 static bool IsValidNRO(const NROHeader& header, u64 nro_size, u64 bss_size) { 609 static bool IsValidNRO(const NROHeader& header, u64 nro_size, u64 bss_size) {
585 return header.magic == Common::MakeMagic('N', 'R', 'O', '0') && 610 return header.magic == Common::MakeMagic('N', 'R', 'O', '0') &&
586 header.nro_size == nro_size && header.bss_size == bss_size && 611 header.nro_size == nro_size && header.bss_size == bss_size &&
587 header.ro_offset == header.text_offset + header.text_size && 612
588 header.rw_offset == header.ro_offset + header.ro_size && 613 header.segment_headers[RO_INDEX].memory_offset ==
589 nro_size == header.rw_offset + header.rw_size && 614 header.segment_headers[TEXT_INDEX].memory_offset +
590 Common::Is4KBAligned(header.text_size) && Common::Is4KBAligned(header.ro_size) && 615 header.segment_headers[TEXT_INDEX].memory_size &&
591 Common::Is4KBAligned(header.rw_size); 616
617 header.segment_headers[DATA_INDEX].memory_offset ==
618 header.segment_headers[RO_INDEX].memory_offset +
619 header.segment_headers[RO_INDEX].memory_size &&
620
621 nro_size == header.segment_headers[DATA_INDEX].memory_offset +
622 header.segment_headers[DATA_INDEX].memory_size &&
623
624 Common::Is4KBAligned(header.segment_headers[TEXT_INDEX].memory_size) &&
625 Common::Is4KBAligned(header.segment_headers[RO_INDEX].memory_size) &&
626 Common::Is4KBAligned(header.segment_headers[DATA_INDEX].memory_size);
592 } 627 }
593 Core::System& system; 628 Core::System& system;
594}; 629};
diff --git a/src/core/hle/service/mig/mig.cpp b/src/core/hle/service/mig/mig.cpp
index d16367f2c..113a4665c 100644
--- a/src/core/hle/service/mig/mig.cpp
+++ b/src/core/hle/service/mig/mig.cpp
@@ -20,6 +20,12 @@ public:
20 {101, nullptr, "ResumeServer"}, 20 {101, nullptr, "ResumeServer"},
21 {200, nullptr, "CreateClient"}, 21 {200, nullptr, "CreateClient"},
22 {201, nullptr, "ResumeClient"}, 22 {201, nullptr, "ResumeClient"},
23 {1001, nullptr, "Unknown1001"},
24 {1010, nullptr, "Unknown1010"},
25 {1100, nullptr, "Unknown1100"},
26 {1101, nullptr, "Unknown1101"},
27 {1200, nullptr, "Unknown1200"},
28 {1201, nullptr, "Unknown1201"}
23 }; 29 };
24 // clang-format on 30 // clang-format on
25 31
diff --git a/src/core/hle/service/mm/mm_u.cpp b/src/core/hle/service/mm/mm_u.cpp
index def63dc8a..25c24e537 100644
--- a/src/core/hle/service/mm/mm_u.cpp
+++ b/src/core/hle/service/mm/mm_u.cpp
@@ -14,14 +14,14 @@ public:
14 explicit MM_U() : ServiceFramework{"mm:u"} { 14 explicit MM_U() : ServiceFramework{"mm:u"} {
15 // clang-format off 15 // clang-format off
16 static const FunctionInfo functions[] = { 16 static const FunctionInfo functions[] = {
17 {0, &MM_U::Initialize, "Initialize"}, 17 {0, &MM_U::InitializeOld, "InitializeOld"},
18 {1, &MM_U::Finalize, "Finalize"}, 18 {1, &MM_U::FinalizeOld, "FinalizeOld"},
19 {2, &MM_U::SetAndWait, "SetAndWait"}, 19 {2, &MM_U::SetAndWaitOld, "SetAndWaitOld"},
20 {3, &MM_U::Get, "Get"}, 20 {3, &MM_U::GetOld, "GetOld"},
21 {4, &MM_U::InitializeWithId, "InitializeWithId"}, 21 {4, &MM_U::Initialize, "Initialize"},
22 {5, &MM_U::FinalizeWithId, "FinalizeWithId"}, 22 {5, &MM_U::Finalize, "Finalize"},
23 {6, &MM_U::SetAndWaitWithId, "SetAndWaitWithId"}, 23 {6, &MM_U::SetAndWait, "SetAndWait"},
24 {7, &MM_U::GetWithId, "GetWithId"}, 24 {7, &MM_U::Get, "Get"},
25 }; 25 };
26 // clang-format on 26 // clang-format on
27 27
@@ -29,21 +29,21 @@ public:
29 } 29 }
30 30
31private: 31private:
32 void Initialize(Kernel::HLERequestContext& ctx) { 32 void InitializeOld(Kernel::HLERequestContext& ctx) {
33 LOG_WARNING(Service_MM, "(STUBBED) called"); 33 LOG_WARNING(Service_MM, "(STUBBED) called");
34 34
35 IPC::ResponseBuilder rb{ctx, 2}; 35 IPC::ResponseBuilder rb{ctx, 2};
36 rb.Push(RESULT_SUCCESS); 36 rb.Push(RESULT_SUCCESS);
37 } 37 }
38 38
39 void Finalize(Kernel::HLERequestContext& ctx) { 39 void FinalizeOld(Kernel::HLERequestContext& ctx) {
40 LOG_WARNING(Service_MM, "(STUBBED) called"); 40 LOG_WARNING(Service_MM, "(STUBBED) called");
41 41
42 IPC::ResponseBuilder rb{ctx, 2}; 42 IPC::ResponseBuilder rb{ctx, 2};
43 rb.Push(RESULT_SUCCESS); 43 rb.Push(RESULT_SUCCESS);
44 } 44 }
45 45
46 void SetAndWait(Kernel::HLERequestContext& ctx) { 46 void SetAndWaitOld(Kernel::HLERequestContext& ctx) {
47 IPC::RequestParser rp{ctx}; 47 IPC::RequestParser rp{ctx};
48 min = rp.Pop<u32>(); 48 min = rp.Pop<u32>();
49 max = rp.Pop<u32>(); 49 max = rp.Pop<u32>();
@@ -54,7 +54,7 @@ private:
54 rb.Push(RESULT_SUCCESS); 54 rb.Push(RESULT_SUCCESS);
55 } 55 }
56 56
57 void Get(Kernel::HLERequestContext& ctx) { 57 void GetOld(Kernel::HLERequestContext& ctx) {
58 LOG_WARNING(Service_MM, "(STUBBED) called"); 58 LOG_WARNING(Service_MM, "(STUBBED) called");
59 59
60 IPC::ResponseBuilder rb{ctx, 3}; 60 IPC::ResponseBuilder rb{ctx, 3};
@@ -62,7 +62,7 @@ private:
62 rb.Push(current); 62 rb.Push(current);
63 } 63 }
64 64
65 void InitializeWithId(Kernel::HLERequestContext& ctx) { 65 void Initialize(Kernel::HLERequestContext& ctx) {
66 LOG_WARNING(Service_MM, "(STUBBED) called"); 66 LOG_WARNING(Service_MM, "(STUBBED) called");
67 67
68 IPC::ResponseBuilder rb{ctx, 3}; 68 IPC::ResponseBuilder rb{ctx, 3};
@@ -70,14 +70,14 @@ private:
70 rb.Push<u32>(id); // Any non zero value 70 rb.Push<u32>(id); // Any non zero value
71 } 71 }
72 72
73 void FinalizeWithId(Kernel::HLERequestContext& ctx) { 73 void Finalize(Kernel::HLERequestContext& ctx) {
74 LOG_WARNING(Service_MM, "(STUBBED) called"); 74 LOG_WARNING(Service_MM, "(STUBBED) called");
75 75
76 IPC::ResponseBuilder rb{ctx, 2}; 76 IPC::ResponseBuilder rb{ctx, 2};
77 rb.Push(RESULT_SUCCESS); 77 rb.Push(RESULT_SUCCESS);
78 } 78 }
79 79
80 void SetAndWaitWithId(Kernel::HLERequestContext& ctx) { 80 void SetAndWait(Kernel::HLERequestContext& ctx) {
81 IPC::RequestParser rp{ctx}; 81 IPC::RequestParser rp{ctx};
82 u32 input_id = rp.Pop<u32>(); 82 u32 input_id = rp.Pop<u32>();
83 min = rp.Pop<u32>(); 83 min = rp.Pop<u32>();
@@ -90,7 +90,7 @@ private:
90 rb.Push(RESULT_SUCCESS); 90 rb.Push(RESULT_SUCCESS);
91 } 91 }
92 92
93 void GetWithId(Kernel::HLERequestContext& ctx) { 93 void Get(Kernel::HLERequestContext& ctx) {
94 LOG_WARNING(Service_MM, "(STUBBED) called"); 94 LOG_WARNING(Service_MM, "(STUBBED) called");
95 95
96 IPC::ResponseBuilder rb{ctx, 3}; 96 IPC::ResponseBuilder rb{ctx, 3};
diff --git a/src/core/hle/service/ncm/ncm.cpp b/src/core/hle/service/ncm/ncm.cpp
index ec9aae04a..e38dea1f4 100644
--- a/src/core/hle/service/ncm/ncm.cpp
+++ b/src/core/hle/service/ncm/ncm.cpp
@@ -28,16 +28,16 @@ public:
28 {7, nullptr, "ResolveApplicationLegalInformationPath"}, 28 {7, nullptr, "ResolveApplicationLegalInformationPath"},
29 {8, nullptr, "RedirectApplicationLegalInformationPath"}, 29 {8, nullptr, "RedirectApplicationLegalInformationPath"},
30 {9, nullptr, "Refresh"}, 30 {9, nullptr, "Refresh"},
31 {10, nullptr, "RedirectProgramPath2"}, 31 {10, nullptr, "RedirectApplicationProgramPath"},
32 {11, nullptr, "Refresh2"}, 32 {11, nullptr, "ClearApplicationRedirection"},
33 {12, nullptr, "DeleteProgramPath"}, 33 {12, nullptr, "EraseProgramRedirection"},
34 {13, nullptr, "DeleteApplicationControlPath"}, 34 {13, nullptr, "EraseApplicationControlRedirection"},
35 {14, nullptr, "DeleteApplicationHtmlDocumentPath"}, 35 {14, nullptr, "EraseApplicationHtmlDocumentRedirection"},
36 {15, nullptr, "DeleteApplicationLegalInformationPath"}, 36 {15, nullptr, "EraseApplicationLegalInformationRedirection"},
37 {16, nullptr, ""}, 37 {16, nullptr, "ResolveProgramPathForDebug"},
38 {17, nullptr, ""}, 38 {17, nullptr, "RedirectProgramPathForDebug"},
39 {18, nullptr, ""}, 39 {18, nullptr, "RedirectApplicationProgramPathForDebug"},
40 {19, nullptr, ""}, 40 {19, nullptr, "EraseProgramRedirectionForDebug"},
41 }; 41 };
42 // clang-format on 42 // clang-format on
43 43
diff --git a/src/core/hle/service/nfc/nfc.cpp b/src/core/hle/service/nfc/nfc.cpp
index b7b34ce7e..780ea30fe 100644
--- a/src/core/hle/service/nfc/nfc.cpp
+++ b/src/core/hle/service/nfc/nfc.cpp
@@ -198,9 +198,9 @@ public:
198 static const FunctionInfo functions[] = { 198 static const FunctionInfo functions[] = {
199 {0, nullptr, "Initialize"}, 199 {0, nullptr, "Initialize"},
200 {1, nullptr, "Finalize"}, 200 {1, nullptr, "Finalize"},
201 {2, nullptr, "GetState"}, 201 {2, nullptr, "GetStateOld"},
202 {3, nullptr, "IsNfcEnabled"}, 202 {3, nullptr, "IsNfcEnabledOld"},
203 {100, nullptr, "SetNfcEnabled"}, 203 {100, nullptr, "SetNfcEnabledOld"},
204 {400, nullptr, "InitializeSystem"}, 204 {400, nullptr, "InitializeSystem"},
205 {401, nullptr, "FinalizeSystem"}, 205 {401, nullptr, "FinalizeSystem"},
206 {402, nullptr, "GetState"}, 206 {402, nullptr, "GetState"},
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
index 0d913334e..fba89e7a6 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
@@ -200,8 +200,7 @@ u32 nvhost_ctrl_gpu::GetGpuTime(const std::vector<u8>& input, std::vector<u8>& o
200 200
201 IoctlGetGpuTime params{}; 201 IoctlGetGpuTime params{};
202 std::memcpy(&params, input.data(), input.size()); 202 std::memcpy(&params, input.data(), input.size());
203 const auto ns = Core::Timing::CyclesToNs(system.CoreTiming().GetTicks()); 203 params.gpu_time = static_cast<u64_le>(system.CoreTiming().GetGlobalTimeNs().count());
204 params.gpu_time = static_cast<u64_le>(ns.count());
205 std::memcpy(output.data(), &params, output.size()); 204 std::memcpy(output.data(), &params, output.size());
206 return 0; 205 return 0;
207} 206}
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index 437bc5dee..2f44d3779 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -9,6 +9,7 @@
9#include "common/logging/log.h" 9#include "common/logging/log.h"
10#include "common/microprofile.h" 10#include "common/microprofile.h"
11#include "common/scope_exit.h" 11#include "common/scope_exit.h"
12#include "common/thread.h"
12#include "core/core.h" 13#include "core/core.h"
13#include "core/core_timing.h" 14#include "core/core_timing.h"
14#include "core/core_timing_util.h" 15#include "core/core_timing_util.h"
@@ -27,8 +28,35 @@
27 28
28namespace Service::NVFlinger { 29namespace Service::NVFlinger {
29 30
30constexpr s64 frame_ticks = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 60); 31constexpr s64 frame_ticks = static_cast<s64>(1000000000 / 60);
31constexpr s64 frame_ticks_30fps = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 30); 32constexpr s64 frame_ticks_30fps = static_cast<s64>(1000000000 / 30);
33
34void NVFlinger::VSyncThread(NVFlinger& nv_flinger) {
35 nv_flinger.SplitVSync();
36}
37
38void NVFlinger::SplitVSync() {
39 system.RegisterHostThread();
40 std::string name = "yuzu:VSyncThread";
41 MicroProfileOnThreadCreate(name.c_str());
42 Common::SetCurrentThreadName(name.c_str());
43 Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
44 s64 delay = 0;
45 while (is_running) {
46 guard->lock();
47 const s64 time_start = system.CoreTiming().GetGlobalTimeNs().count();
48 Compose();
49 const auto ticks = GetNextTicks();
50 const s64 time_end = system.CoreTiming().GetGlobalTimeNs().count();
51 const s64 time_passed = time_end - time_start;
52 const s64 next_time = std::max<s64>(0, ticks - time_passed - delay);
53 guard->unlock();
54 if (next_time > 0) {
55 wait_event->WaitFor(std::chrono::nanoseconds{next_time});
56 }
57 delay = (system.CoreTiming().GetGlobalTimeNs().count() - time_end) - next_time;
58 }
59}
32 60
33NVFlinger::NVFlinger(Core::System& system) : system(system) { 61NVFlinger::NVFlinger(Core::System& system) : system(system) {
34 displays.emplace_back(0, "Default", system); 62 displays.emplace_back(0, "Default", system);
@@ -36,22 +64,36 @@ NVFlinger::NVFlinger(Core::System& system) : system(system) {
36 displays.emplace_back(2, "Edid", system); 64 displays.emplace_back(2, "Edid", system);
37 displays.emplace_back(3, "Internal", system); 65 displays.emplace_back(3, "Internal", system);
38 displays.emplace_back(4, "Null", system); 66 displays.emplace_back(4, "Null", system);
67 guard = std::make_shared<std::mutex>();
39 68
40 // Schedule the screen composition events 69 // Schedule the screen composition events
41 composition_event = 70 composition_event =
42 Core::Timing::CreateEvent("ScreenComposition", [this](u64 userdata, s64 cycles_late) { 71 Core::Timing::CreateEvent("ScreenComposition", [this](u64 userdata, s64 ns_late) {
72 Lock();
43 Compose(); 73 Compose();
44 const auto ticks = 74 const auto ticks = GetNextTicks();
45 Settings::values.force_30fps_mode ? frame_ticks_30fps : GetNextTicks(); 75 this->system.CoreTiming().ScheduleEvent(std::max<s64>(0LL, ticks - ns_late),
46 this->system.CoreTiming().ScheduleEvent(std::max<s64>(0LL, ticks - cycles_late),
47 composition_event); 76 composition_event);
48 }); 77 });
49 78 if (system.IsMulticore()) {
50 system.CoreTiming().ScheduleEvent(frame_ticks, composition_event); 79 is_running = true;
80 wait_event = std::make_unique<Common::Event>();
81 vsync_thread = std::make_unique<std::thread>(VSyncThread, std::ref(*this));
82 } else {
83 system.CoreTiming().ScheduleEvent(frame_ticks, composition_event);
84 }
51} 85}
52 86
53NVFlinger::~NVFlinger() { 87NVFlinger::~NVFlinger() {
54 system.CoreTiming().UnscheduleEvent(composition_event, 0); 88 if (system.IsMulticore()) {
89 is_running = false;
90 wait_event->Set();
91 vsync_thread->join();
92 vsync_thread.reset();
93 wait_event.reset();
94 } else {
95 system.CoreTiming().UnscheduleEvent(composition_event, 0);
96 }
55} 97}
56 98
57void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) { 99void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
@@ -199,10 +241,12 @@ void NVFlinger::Compose() {
199 241
200 auto& gpu = system.GPU(); 242 auto& gpu = system.GPU();
201 const auto& multi_fence = buffer->get().multi_fence; 243 const auto& multi_fence = buffer->get().multi_fence;
244 guard->unlock();
202 for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) { 245 for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) {
203 const auto& fence = multi_fence.fences[fence_id]; 246 const auto& fence = multi_fence.fences[fence_id];
204 gpu.WaitFence(fence.id, fence.value); 247 gpu.WaitFence(fence.id, fence.value);
205 } 248 }
249 guard->lock();
206 250
207 MicroProfileFlip(); 251 MicroProfileFlip();
208 252
@@ -223,7 +267,7 @@ void NVFlinger::Compose() {
223 267
224s64 NVFlinger::GetNextTicks() const { 268s64 NVFlinger::GetNextTicks() const {
225 constexpr s64 max_hertz = 120LL; 269 constexpr s64 max_hertz = 120LL;
226 return (Core::Hardware::BASE_CLOCK_RATE * (1LL << swap_interval)) / max_hertz; 270 return (1000000000 * (1LL << swap_interval)) / max_hertz;
227} 271}
228 272
229} // namespace Service::NVFlinger 273} // namespace Service::NVFlinger
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h
index 57a21f33b..e4959a9af 100644
--- a/src/core/hle/service/nvflinger/nvflinger.h
+++ b/src/core/hle/service/nvflinger/nvflinger.h
@@ -4,15 +4,22 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <atomic>
7#include <memory> 8#include <memory>
9#include <mutex>
8#include <optional> 10#include <optional>
9#include <string> 11#include <string>
10#include <string_view> 12#include <string_view>
13#include <thread>
11#include <vector> 14#include <vector>
12 15
13#include "common/common_types.h" 16#include "common/common_types.h"
14#include "core/hle/kernel/object.h" 17#include "core/hle/kernel/object.h"
15 18
19namespace Common {
20class Event;
21} // namespace Common
22
16namespace Core::Timing { 23namespace Core::Timing {
17class CoreTiming; 24class CoreTiming;
18struct EventType; 25struct EventType;
@@ -79,6 +86,10 @@ public:
79 86
80 s64 GetNextTicks() const; 87 s64 GetNextTicks() const;
81 88
89 std::unique_lock<std::mutex> Lock() {
90 return std::unique_lock{*guard};
91 }
92
82private: 93private:
83 /// Finds the display identified by the specified ID. 94 /// Finds the display identified by the specified ID.
84 VI::Display* FindDisplay(u64 display_id); 95 VI::Display* FindDisplay(u64 display_id);
@@ -92,6 +103,10 @@ private:
92 /// Finds the layer identified by the specified ID in the desired display. 103 /// Finds the layer identified by the specified ID in the desired display.
93 const VI::Layer* FindLayer(u64 display_id, u64 layer_id) const; 104 const VI::Layer* FindLayer(u64 display_id, u64 layer_id) const;
94 105
106 static void VSyncThread(NVFlinger& nv_flinger);
107
108 void SplitVSync();
109
95 std::shared_ptr<Nvidia::Module> nvdrv; 110 std::shared_ptr<Nvidia::Module> nvdrv;
96 111
97 std::vector<VI::Display> displays; 112 std::vector<VI::Display> displays;
@@ -108,7 +123,13 @@ private:
108 /// Event that handles screen composition. 123 /// Event that handles screen composition.
109 std::shared_ptr<Core::Timing::EventType> composition_event; 124 std::shared_ptr<Core::Timing::EventType> composition_event;
110 125
126 std::shared_ptr<std::mutex> guard;
127
111 Core::System& system; 128 Core::System& system;
129
130 std::unique_ptr<std::thread> vsync_thread;
131 std::unique_ptr<Common::Event> wait_event;
132 std::atomic<bool> is_running{};
112}; 133};
113 134
114} // namespace Service::NVFlinger 135} // namespace Service::NVFlinger
diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp
index 6ada13be4..d872de16c 100644
--- a/src/core/hle/service/sm/sm.cpp
+++ b/src/core/hle/service/sm/sm.cpp
@@ -142,7 +142,7 @@ void SM::GetService(Kernel::HLERequestContext& ctx) {
142 } 142 }
143 143
144 // Wake the threads waiting on the ServerPort 144 // Wake the threads waiting on the ServerPort
145 server_port->WakeupAllWaitingThreads(); 145 server_port->Signal();
146 146
147 LOG_DEBUG(Service_SM, "called service={} -> session={}", name, client->GetObjectId()); 147 LOG_DEBUG(Service_SM, "called service={} -> session={}", name, client->GetObjectId());
148 IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; 148 IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles};
diff --git a/src/core/hle/service/time/standard_steady_clock_core.cpp b/src/core/hle/service/time/standard_steady_clock_core.cpp
index 1575f0b49..59a272f4a 100644
--- a/src/core/hle/service/time/standard_steady_clock_core.cpp
+++ b/src/core/hle/service/time/standard_steady_clock_core.cpp
@@ -11,9 +11,8 @@
11namespace Service::Time::Clock { 11namespace Service::Time::Clock {
12 12
13TimeSpanType StandardSteadyClockCore::GetCurrentRawTimePoint(Core::System& system) { 13TimeSpanType StandardSteadyClockCore::GetCurrentRawTimePoint(Core::System& system) {
14 const TimeSpanType ticks_time_span{TimeSpanType::FromTicks( 14 const TimeSpanType ticks_time_span{
15 Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks()), 15 TimeSpanType::FromTicks(system.CoreTiming().GetClockTicks(), Core::Hardware::CNTFREQ)};
16 Core::Hardware::CNTFREQ)};
17 TimeSpanType raw_time_point{setup_value.nanoseconds + ticks_time_span.nanoseconds}; 16 TimeSpanType raw_time_point{setup_value.nanoseconds + ticks_time_span.nanoseconds};
18 17
19 if (raw_time_point.nanoseconds < cached_raw_time_point.nanoseconds) { 18 if (raw_time_point.nanoseconds < cached_raw_time_point.nanoseconds) {
diff --git a/src/core/hle/service/time/tick_based_steady_clock_core.cpp b/src/core/hle/service/time/tick_based_steady_clock_core.cpp
index 44d5bc651..8baaa2a6a 100644
--- a/src/core/hle/service/time/tick_based_steady_clock_core.cpp
+++ b/src/core/hle/service/time/tick_based_steady_clock_core.cpp
@@ -11,9 +11,8 @@
11namespace Service::Time::Clock { 11namespace Service::Time::Clock {
12 12
13SteadyClockTimePoint TickBasedSteadyClockCore::GetTimePoint(Core::System& system) { 13SteadyClockTimePoint TickBasedSteadyClockCore::GetTimePoint(Core::System& system) {
14 const TimeSpanType ticks_time_span{TimeSpanType::FromTicks( 14 const TimeSpanType ticks_time_span{
15 Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks()), 15 TimeSpanType::FromTicks(system.CoreTiming().GetClockTicks(), Core::Hardware::CNTFREQ)};
16 Core::Hardware::CNTFREQ)};
17 16
18 return {ticks_time_span.ToSeconds(), GetClockSourceId()}; 17 return {ticks_time_span.ToSeconds(), GetClockSourceId()};
19} 18}
diff --git a/src/core/hle/service/time/time.cpp b/src/core/hle/service/time/time.cpp
index 67f1bbcf3..4cf58a61a 100644
--- a/src/core/hle/service/time/time.cpp
+++ b/src/core/hle/service/time/time.cpp
@@ -234,9 +234,8 @@ void Module::Interface::CalculateMonotonicSystemClockBaseTimePoint(Kernel::HLERe
234 const auto current_time_point{steady_clock_core.GetCurrentTimePoint(system)}; 234 const auto current_time_point{steady_clock_core.GetCurrentTimePoint(system)};
235 235
236 if (current_time_point.clock_source_id == context.steady_time_point.clock_source_id) { 236 if (current_time_point.clock_source_id == context.steady_time_point.clock_source_id) {
237 const auto ticks{Clock::TimeSpanType::FromTicks( 237 const auto ticks{Clock::TimeSpanType::FromTicks(system.CoreTiming().GetClockTicks(),
238 Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks()), 238 Core::Hardware::CNTFREQ)};
239 Core::Hardware::CNTFREQ)};
240 const s64 base_time_point{context.offset + current_time_point.time_point - 239 const s64 base_time_point{context.offset + current_time_point.time_point -
241 ticks.ToSeconds()}; 240 ticks.ToSeconds()};
242 IPC::ResponseBuilder rb{ctx, (sizeof(s64) / 4) + 2}; 241 IPC::ResponseBuilder rb{ctx, (sizeof(s64) / 4) + 2};
diff --git a/src/core/hle/service/time/time_sharedmemory.cpp b/src/core/hle/service/time/time_sharedmemory.cpp
index 999ec1e51..e0ae9f874 100644
--- a/src/core/hle/service/time/time_sharedmemory.cpp
+++ b/src/core/hle/service/time/time_sharedmemory.cpp
@@ -30,8 +30,7 @@ void SharedMemory::SetupStandardSteadyClock(Core::System& system,
30 const Common::UUID& clock_source_id, 30 const Common::UUID& clock_source_id,
31 Clock::TimeSpanType current_time_point) { 31 Clock::TimeSpanType current_time_point) {
32 const Clock::TimeSpanType ticks_time_span{Clock::TimeSpanType::FromTicks( 32 const Clock::TimeSpanType ticks_time_span{Clock::TimeSpanType::FromTicks(
33 Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks()), 33 system.CoreTiming().GetClockTicks(), Core::Hardware::CNTFREQ)};
34 Core::Hardware::CNTFREQ)};
35 const Clock::SteadyClockContext context{ 34 const Clock::SteadyClockContext context{
36 static_cast<u64>(current_time_point.nanoseconds - ticks_time_span.nanoseconds), 35 static_cast<u64>(current_time_point.nanoseconds - ticks_time_span.nanoseconds),
37 clock_source_id}; 36 clock_source_id};
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index 46e14c2a3..157092074 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -511,6 +511,7 @@ private:
511 LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id, 511 LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id,
512 static_cast<u32>(transaction), flags); 512 static_cast<u32>(transaction), flags);
513 513
514 nv_flinger->Lock();
514 auto& buffer_queue = nv_flinger->FindBufferQueue(id); 515 auto& buffer_queue = nv_flinger->FindBufferQueue(id);
515 516
516 switch (transaction) { 517 switch (transaction) {
@@ -550,6 +551,7 @@ private:
550 [=](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx, 551 [=](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
551 Kernel::ThreadWakeupReason reason) { 552 Kernel::ThreadWakeupReason reason) {
552 // Repeat TransactParcel DequeueBuffer when a buffer is available 553 // Repeat TransactParcel DequeueBuffer when a buffer is available
554 nv_flinger->Lock();
553 auto& buffer_queue = nv_flinger->FindBufferQueue(id); 555 auto& buffer_queue = nv_flinger->FindBufferQueue(id);
554 auto result = buffer_queue.DequeueBuffer(width, height); 556 auto result = buffer_queue.DequeueBuffer(width, height);
555 ASSERT_MSG(result != std::nullopt, "Could not dequeue buffer."); 557 ASSERT_MSG(result != std::nullopt, "Could not dequeue buffer.");
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 9d87045a0..7def00768 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -8,6 +8,7 @@
8#include <utility> 8#include <utility>
9 9
10#include "common/assert.h" 10#include "common/assert.h"
11#include "common/atomic_ops.h"
11#include "common/common_types.h" 12#include "common/common_types.h"
12#include "common/logging/log.h" 13#include "common/logging/log.h"
13#include "common/page_table.h" 14#include "common/page_table.h"
@@ -29,15 +30,12 @@ namespace Core::Memory {
29struct Memory::Impl { 30struct Memory::Impl {
30 explicit Impl(Core::System& system_) : system{system_} {} 31 explicit Impl(Core::System& system_) : system{system_} {}
31 32
32 void SetCurrentPageTable(Kernel::Process& process) { 33 void SetCurrentPageTable(Kernel::Process& process, u32 core_id) {
33 current_page_table = &process.PageTable().PageTableImpl(); 34 current_page_table = &process.PageTable().PageTableImpl();
34 35
35 const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth(); 36 const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth();
36 37
37 system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width); 38 system.ArmInterface(core_id).PageTableChanged(*current_page_table, address_space_width);
38 system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width);
39 system.ArmInterface(2).PageTableChanged(*current_page_table, address_space_width);
40 system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width);
41 } 39 }
42 40
43 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { 41 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
@@ -179,6 +177,22 @@ struct Memory::Impl {
179 } 177 }
180 } 178 }
181 179
180 bool WriteExclusive8(const VAddr addr, const u8 data, const u8 expected) {
181 return WriteExclusive<u8>(addr, data, expected);
182 }
183
184 bool WriteExclusive16(const VAddr addr, const u16 data, const u16 expected) {
185 return WriteExclusive<u16_le>(addr, data, expected);
186 }
187
188 bool WriteExclusive32(const VAddr addr, const u32 data, const u32 expected) {
189 return WriteExclusive<u32_le>(addr, data, expected);
190 }
191
192 bool WriteExclusive64(const VAddr addr, const u64 data, const u64 expected) {
193 return WriteExclusive<u64_le>(addr, data, expected);
194 }
195
182 std::string ReadCString(VAddr vaddr, std::size_t max_length) { 196 std::string ReadCString(VAddr vaddr, std::size_t max_length) {
183 std::string string; 197 std::string string;
184 string.reserve(max_length); 198 string.reserve(max_length);
@@ -682,6 +696,67 @@ struct Memory::Impl {
682 } 696 }
683 } 697 }
684 698
699 template <typename T>
700 bool WriteExclusive(const VAddr vaddr, const T data, const T expected) {
701 u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
702 if (page_pointer != nullptr) {
703 // NOTE: Avoid adding any extra logic to this fast-path block
704 T volatile* pointer = reinterpret_cast<T volatile*>(&page_pointer[vaddr]);
705 return Common::AtomicCompareAndSwap(pointer, data, expected);
706 }
707
708 const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
709 switch (type) {
710 case Common::PageType::Unmapped:
711 LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
712 static_cast<u32>(data), vaddr);
713 return true;
714 case Common::PageType::Memory:
715 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
716 break;
717 case Common::PageType::RasterizerCachedMemory: {
718 u8* host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
719 system.GPU().InvalidateRegion(vaddr, sizeof(T));
720 T volatile* pointer = reinterpret_cast<T volatile*>(&host_ptr);
721 return Common::AtomicCompareAndSwap(pointer, data, expected);
722 break;
723 }
724 default:
725 UNREACHABLE();
726 }
727 return true;
728 }
729
730 bool WriteExclusive128(const VAddr vaddr, const u128 data, const u128 expected) {
731 u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
732 if (page_pointer != nullptr) {
733 // NOTE: Avoid adding any extra logic to this fast-path block
734 u64 volatile* pointer = reinterpret_cast<u64 volatile*>(&page_pointer[vaddr]);
735 return Common::AtomicCompareAndSwap(pointer, data, expected);
736 }
737
738 const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
739 switch (type) {
740 case Common::PageType::Unmapped:
741 LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}{:016X}", sizeof(data) * 8,
742 static_cast<u64>(data[1]), static_cast<u64>(data[0]), vaddr);
743 return true;
744 case Common::PageType::Memory:
745 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
746 break;
747 case Common::PageType::RasterizerCachedMemory: {
748 u8* host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
749 system.GPU().InvalidateRegion(vaddr, sizeof(u128));
750 u64 volatile* pointer = reinterpret_cast<u64 volatile*>(&host_ptr);
751 return Common::AtomicCompareAndSwap(pointer, data, expected);
752 break;
753 }
754 default:
755 UNREACHABLE();
756 }
757 return true;
758 }
759
685 Common::PageTable* current_page_table = nullptr; 760 Common::PageTable* current_page_table = nullptr;
686 Core::System& system; 761 Core::System& system;
687}; 762};
@@ -689,8 +764,8 @@ struct Memory::Impl {
689Memory::Memory(Core::System& system) : impl{std::make_unique<Impl>(system)} {} 764Memory::Memory(Core::System& system) : impl{std::make_unique<Impl>(system)} {}
690Memory::~Memory() = default; 765Memory::~Memory() = default;
691 766
692void Memory::SetCurrentPageTable(Kernel::Process& process) { 767void Memory::SetCurrentPageTable(Kernel::Process& process, u32 core_id) {
693 impl->SetCurrentPageTable(process); 768 impl->SetCurrentPageTable(process, core_id);
694} 769}
695 770
696void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { 771void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
@@ -764,6 +839,26 @@ void Memory::Write64(VAddr addr, u64 data) {
764 impl->Write64(addr, data); 839 impl->Write64(addr, data);
765} 840}
766 841
842bool Memory::WriteExclusive8(VAddr addr, u8 data, u8 expected) {
843 return impl->WriteExclusive8(addr, data, expected);
844}
845
846bool Memory::WriteExclusive16(VAddr addr, u16 data, u16 expected) {
847 return impl->WriteExclusive16(addr, data, expected);
848}
849
850bool Memory::WriteExclusive32(VAddr addr, u32 data, u32 expected) {
851 return impl->WriteExclusive32(addr, data, expected);
852}
853
854bool Memory::WriteExclusive64(VAddr addr, u64 data, u64 expected) {
855 return impl->WriteExclusive64(addr, data, expected);
856}
857
858bool Memory::WriteExclusive128(VAddr addr, u128 data, u128 expected) {
859 return impl->WriteExclusive128(addr, data, expected);
860}
861
767std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) { 862std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) {
768 return impl->ReadCString(vaddr, max_length); 863 return impl->ReadCString(vaddr, max_length);
769} 864}
diff --git a/src/core/memory.h b/src/core/memory.h
index 9292f3b0a..4a1cc63f4 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -64,7 +64,7 @@ public:
64 * 64 *
65 * @param process The process to use the page table of. 65 * @param process The process to use the page table of.
66 */ 66 */
67 void SetCurrentPageTable(Kernel::Process& process); 67 void SetCurrentPageTable(Kernel::Process& process, u32 core_id);
68 68
69 /** 69 /**
70 * Maps an allocated buffer onto a region of the emulated process address space. 70 * Maps an allocated buffer onto a region of the emulated process address space.
@@ -245,6 +245,71 @@ public:
245 void Write64(VAddr addr, u64 data); 245 void Write64(VAddr addr, u64 data);
246 246
247 /** 247 /**
248 * Writes a 8-bit unsigned integer to the given virtual address in
249 * the current process' address space if and only if the address contains
250 * the expected value. This operation is atomic.
251 *
252 * @param addr The virtual address to write the 8-bit unsigned integer to.
253 * @param data The 8-bit unsigned integer to write to the given virtual address.
254 * @param expected The 8-bit unsigned integer to check against the given virtual address.
255 *
256 * @post The memory range [addr, sizeof(data)) contains the given data value.
257 */
258 bool WriteExclusive8(VAddr addr, u8 data, u8 expected);
259
260 /**
261 * Writes a 16-bit unsigned integer to the given virtual address in
262 * the current process' address space if and only if the address contains
263 * the expected value. This operation is atomic.
264 *
265 * @param addr The virtual address to write the 16-bit unsigned integer to.
266 * @param data The 16-bit unsigned integer to write to the given virtual address.
267 * @param expected The 16-bit unsigned integer to check against the given virtual address.
268 *
269 * @post The memory range [addr, sizeof(data)) contains the given data value.
270 */
271 bool WriteExclusive16(VAddr addr, u16 data, u16 expected);
272
273 /**
274 * Writes a 32-bit unsigned integer to the given virtual address in
275 * the current process' address space if and only if the address contains
276 * the expected value. This operation is atomic.
277 *
278 * @param addr The virtual address to write the 32-bit unsigned integer to.
279 * @param data The 32-bit unsigned integer to write to the given virtual address.
280 * @param expected The 32-bit unsigned integer to check against the given virtual address.
281 *
282 * @post The memory range [addr, sizeof(data)) contains the given data value.
283 */
284 bool WriteExclusive32(VAddr addr, u32 data, u32 expected);
285
286 /**
287 * Writes a 64-bit unsigned integer to the given virtual address in
288 * the current process' address space if and only if the address contains
289 * the expected value. This operation is atomic.
290 *
291 * @param addr The virtual address to write the 64-bit unsigned integer to.
292 * @param data The 64-bit unsigned integer to write to the given virtual address.
293 * @param expected The 64-bit unsigned integer to check against the given virtual address.
294 *
295 * @post The memory range [addr, sizeof(data)) contains the given data value.
296 */
297 bool WriteExclusive64(VAddr addr, u64 data, u64 expected);
298
299 /**
300 * Writes a 128-bit unsigned integer to the given virtual address in
301 * the current process' address space if and only if the address contains
302 * the expected value. This operation is atomic.
303 *
304 * @param addr The virtual address to write the 128-bit unsigned integer to.
305 * @param data The 128-bit unsigned integer to write to the given virtual address.
306 * @param expected The 128-bit unsigned integer to check against the given virtual address.
307 *
308 * @post The memory range [addr, sizeof(data)) contains the given data value.
309 */
310 bool WriteExclusive128(VAddr addr, u128 data, u128 expected);
311
312 /**
248 * Reads a null-terminated string from the given virtual address. 313 * Reads a null-terminated string from the given virtual address.
249 * This function will continually read characters until either: 314 * This function will continually read characters until either:
250 * 315 *
diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp
index b139e8465..53d27859b 100644
--- a/src/core/memory/cheat_engine.cpp
+++ b/src/core/memory/cheat_engine.cpp
@@ -20,7 +20,7 @@
20 20
21namespace Core::Memory { 21namespace Core::Memory {
22 22
23constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 12); 23constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(1000000000 / 12);
24constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF; 24constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF;
25 25
26StandardVmCallbacks::StandardVmCallbacks(Core::System& system, const CheatProcessMetadata& metadata) 26StandardVmCallbacks::StandardVmCallbacks(Core::System& system, const CheatProcessMetadata& metadata)
@@ -190,7 +190,7 @@ CheatEngine::~CheatEngine() {
190void CheatEngine::Initialize() { 190void CheatEngine::Initialize() {
191 event = Core::Timing::CreateEvent( 191 event = Core::Timing::CreateEvent(
192 "CheatEngine::FrameCallback::" + Common::HexToString(metadata.main_nso_build_id), 192 "CheatEngine::FrameCallback::" + Common::HexToString(metadata.main_nso_build_id),
193 [this](u64 userdata, s64 cycles_late) { FrameCallback(userdata, cycles_late); }); 193 [this](u64 userdata, s64 ns_late) { FrameCallback(userdata, ns_late); });
194 core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS, event); 194 core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS, event);
195 195
196 metadata.process_id = system.CurrentProcess()->GetProcessID(); 196 metadata.process_id = system.CurrentProcess()->GetProcessID();
@@ -217,7 +217,7 @@ void CheatEngine::Reload(std::vector<CheatEntry> cheats) {
217 217
218MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70)); 218MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70));
219 219
220void CheatEngine::FrameCallback(u64 userdata, s64 cycles_late) { 220void CheatEngine::FrameCallback(u64 userdata, s64 ns_late) {
221 if (is_pending_reload.exchange(false)) { 221 if (is_pending_reload.exchange(false)) {
222 vm.LoadProgram(cheats); 222 vm.LoadProgram(cheats);
223 } 223 }
@@ -230,7 +230,7 @@ void CheatEngine::FrameCallback(u64 userdata, s64 cycles_late) {
230 230
231 vm.Execute(metadata); 231 vm.Execute(metadata);
232 232
233 core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS - cycles_late, event); 233 core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS - ns_late, event);
234} 234}
235 235
236} // namespace Core::Memory 236} // namespace Core::Memory
diff --git a/src/core/perf_stats.cpp b/src/core/perf_stats.cpp
index f1ae9d4df..9f3a6b811 100644
--- a/src/core/perf_stats.cpp
+++ b/src/core/perf_stats.cpp
@@ -119,7 +119,7 @@ double PerfStats::GetLastFrameTimeScale() {
119} 119}
120 120
121void FrameLimiter::DoFrameLimiting(microseconds current_system_time_us) { 121void FrameLimiter::DoFrameLimiting(microseconds current_system_time_us) {
122 if (!Settings::values.use_frame_limit) { 122 if (!Settings::values.use_frame_limit || Settings::values.use_multi_core) {
123 return; 123 return;
124 } 124 }
125 125
diff --git a/src/core/settings.cpp b/src/core/settings.cpp
index 4edff9cd8..56df5e925 100644
--- a/src/core/settings.cpp
+++ b/src/core/settings.cpp
@@ -127,6 +127,13 @@ void LogSettings() {
127 LogSetting("Services_BCATBoxcatLocal", Settings::values.bcat_boxcat_local); 127 LogSetting("Services_BCATBoxcatLocal", Settings::values.bcat_boxcat_local);
128} 128}
129 129
130float Volume() {
131 if (values.audio_muted) {
132 return 0.0f;
133 }
134 return values.volume;
135}
136
130bool IsGPULevelExtreme() { 137bool IsGPULevelExtreme() {
131 return values.gpu_accuracy == GPUAccuracy::Extreme; 138 return values.gpu_accuracy == GPUAccuracy::Extreme;
132} 139}
diff --git a/src/core/settings.h b/src/core/settings.h
index 33e1e06cd..a598ccbc1 100644
--- a/src/core/settings.h
+++ b/src/core/settings.h
@@ -459,6 +459,7 @@ struct Values {
459 bool use_dev_keys; 459 bool use_dev_keys;
460 460
461 // Audio 461 // Audio
462 bool audio_muted;
462 std::string sink_id; 463 std::string sink_id;
463 bool enable_audio_stretching; 464 bool enable_audio_stretching;
464 std::string audio_device_id; 465 std::string audio_device_id;
@@ -490,6 +491,8 @@ struct Values {
490 std::map<u64, std::vector<std::string>> disabled_addons; 491 std::map<u64, std::vector<std::string>> disabled_addons;
491} extern values; 492} extern values;
492 493
494float Volume();
495
493bool IsGPULevelExtreme(); 496bool IsGPULevelExtreme();
494bool IsGPULevelHigh(); 497bool IsGPULevelHigh();
495 498
diff --git a/src/core/tools/freezer.cpp b/src/core/tools/freezer.cpp
index b2c6c537e..8b0c50d11 100644
--- a/src/core/tools/freezer.cpp
+++ b/src/core/tools/freezer.cpp
@@ -14,7 +14,7 @@
14namespace Tools { 14namespace Tools {
15namespace { 15namespace {
16 16
17constexpr s64 MEMORY_FREEZER_TICKS = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 60); 17constexpr s64 MEMORY_FREEZER_TICKS = static_cast<s64>(1000000000 / 60);
18 18
19u64 MemoryReadWidth(Core::Memory::Memory& memory, u32 width, VAddr addr) { 19u64 MemoryReadWidth(Core::Memory::Memory& memory, u32 width, VAddr addr) {
20 switch (width) { 20 switch (width) {
@@ -57,7 +57,7 @@ Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& m
57 : core_timing{core_timing_}, memory{memory_} { 57 : core_timing{core_timing_}, memory{memory_} {
58 event = Core::Timing::CreateEvent( 58 event = Core::Timing::CreateEvent(
59 "MemoryFreezer::FrameCallback", 59 "MemoryFreezer::FrameCallback",
60 [this](u64 userdata, s64 cycles_late) { FrameCallback(userdata, cycles_late); }); 60 [this](u64 userdata, s64 ns_late) { FrameCallback(userdata, ns_late); });
61 core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS, event); 61 core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS, event);
62} 62}
63 63
@@ -158,7 +158,7 @@ std::vector<Freezer::Entry> Freezer::GetEntries() const {
158 return entries; 158 return entries;
159} 159}
160 160
161void Freezer::FrameCallback(u64 userdata, s64 cycles_late) { 161void Freezer::FrameCallback(u64 userdata, s64 ns_late) {
162 if (!IsActive()) { 162 if (!IsActive()) {
163 LOG_DEBUG(Common_Memory, "Memory freezer has been deactivated, ending callback events."); 163 LOG_DEBUG(Common_Memory, "Memory freezer has been deactivated, ending callback events.");
164 return; 164 return;
@@ -173,7 +173,7 @@ void Freezer::FrameCallback(u64 userdata, s64 cycles_late) {
173 MemoryWriteWidth(memory, entry.width, entry.address, entry.value); 173 MemoryWriteWidth(memory, entry.width, entry.address, entry.value);
174 } 174 }
175 175
176 core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS - cycles_late, event); 176 core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS - ns_late, event);
177} 177}
178 178
179void Freezer::FillEntryReads() { 179void Freezer::FillEntryReads() {
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index c7038b217..47ef30aa9 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -1,6 +1,7 @@
1add_executable(tests 1add_executable(tests
2 common/bit_field.cpp 2 common/bit_field.cpp
3 common/bit_utils.cpp 3 common/bit_utils.cpp
4 common/fibers.cpp
4 common/multi_level_queue.cpp 5 common/multi_level_queue.cpp
5 common/param_package.cpp 6 common/param_package.cpp
6 common/ring_buffer.cpp 7 common/ring_buffer.cpp
diff --git a/src/tests/common/fibers.cpp b/src/tests/common/fibers.cpp
new file mode 100644
index 000000000..4fd92428f
--- /dev/null
+++ b/src/tests/common/fibers.cpp
@@ -0,0 +1,358 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <atomic>
6#include <cstdlib>
7#include <functional>
8#include <memory>
9#include <thread>
10#include <unordered_map>
11#include <vector>
12
13#include <catch2/catch.hpp>
14#include <math.h>
15#include "common/common_types.h"
16#include "common/fiber.h"
17#include "common/spin_lock.h"
18
19namespace Common {
20
21class TestControl1 {
22public:
23 TestControl1() = default;
24
25 void DoWork();
26
27 void ExecuteThread(u32 id);
28
29 std::unordered_map<std::thread::id, u32> ids;
30 std::vector<std::shared_ptr<Common::Fiber>> thread_fibers;
31 std::vector<std::shared_ptr<Common::Fiber>> work_fibers;
32 std::vector<u32> items;
33 std::vector<u32> results;
34};
35
36static void WorkControl1(void* control) {
37 auto* test_control = static_cast<TestControl1*>(control);
38 test_control->DoWork();
39}
40
41void TestControl1::DoWork() {
42 std::thread::id this_id = std::this_thread::get_id();
43 u32 id = ids[this_id];
44 u32 value = items[id];
45 for (u32 i = 0; i < id; i++) {
46 value++;
47 }
48 results[id] = value;
49 Fiber::YieldTo(work_fibers[id], thread_fibers[id]);
50}
51
52void TestControl1::ExecuteThread(u32 id) {
53 std::thread::id this_id = std::this_thread::get_id();
54 ids[this_id] = id;
55 auto thread_fiber = Fiber::ThreadToFiber();
56 thread_fibers[id] = thread_fiber;
57 work_fibers[id] = std::make_shared<Fiber>(std::function<void(void*)>{WorkControl1}, this);
58 items[id] = rand() % 256;
59 Fiber::YieldTo(thread_fibers[id], work_fibers[id]);
60 thread_fibers[id]->Exit();
61}
62
63static void ThreadStart1(u32 id, TestControl1& test_control) {
64 test_control.ExecuteThread(id);
65}
66
67/** This test checks for fiber setup configuration and validates that fibers are
68 * doing all the work required.
69 */
70TEST_CASE("Fibers::Setup", "[common]") {
71 constexpr std::size_t num_threads = 7;
72 TestControl1 test_control{};
73 test_control.thread_fibers.resize(num_threads);
74 test_control.work_fibers.resize(num_threads);
75 test_control.items.resize(num_threads, 0);
76 test_control.results.resize(num_threads, 0);
77 std::vector<std::thread> threads;
78 for (u32 i = 0; i < num_threads; i++) {
79 threads.emplace_back(ThreadStart1, i, std::ref(test_control));
80 }
81 for (u32 i = 0; i < num_threads; i++) {
82 threads[i].join();
83 }
84 for (u32 i = 0; i < num_threads; i++) {
85 REQUIRE(test_control.items[i] + i == test_control.results[i]);
86 }
87}
88
89class TestControl2 {
90public:
91 TestControl2() = default;
92
93 void DoWork1() {
94 trap2 = false;
95 while (trap.load())
96 ;
97 for (u32 i = 0; i < 12000; i++) {
98 value1 += i;
99 }
100 Fiber::YieldTo(fiber1, fiber3);
101 std::thread::id this_id = std::this_thread::get_id();
102 u32 id = ids[this_id];
103 assert1 = id == 1;
104 value2 += 5000;
105 Fiber::YieldTo(fiber1, thread_fibers[id]);
106 }
107
108 void DoWork2() {
109 while (trap2.load())
110 ;
111 value2 = 2000;
112 trap = false;
113 Fiber::YieldTo(fiber2, fiber1);
114 assert3 = false;
115 }
116
117 void DoWork3() {
118 std::thread::id this_id = std::this_thread::get_id();
119 u32 id = ids[this_id];
120 assert2 = id == 0;
121 value1 += 1000;
122 Fiber::YieldTo(fiber3, thread_fibers[id]);
123 }
124
125 void ExecuteThread(u32 id);
126
127 void CallFiber1() {
128 std::thread::id this_id = std::this_thread::get_id();
129 u32 id = ids[this_id];
130 Fiber::YieldTo(thread_fibers[id], fiber1);
131 }
132
133 void CallFiber2() {
134 std::thread::id this_id = std::this_thread::get_id();
135 u32 id = ids[this_id];
136 Fiber::YieldTo(thread_fibers[id], fiber2);
137 }
138
139 void Exit();
140
141 bool assert1{};
142 bool assert2{};
143 bool assert3{true};
144 u32 value1{};
145 u32 value2{};
146 std::atomic<bool> trap{true};
147 std::atomic<bool> trap2{true};
148 std::unordered_map<std::thread::id, u32> ids;
149 std::vector<std::shared_ptr<Common::Fiber>> thread_fibers;
150 std::shared_ptr<Common::Fiber> fiber1;
151 std::shared_ptr<Common::Fiber> fiber2;
152 std::shared_ptr<Common::Fiber> fiber3;
153};
154
155static void WorkControl2_1(void* control) {
156 auto* test_control = static_cast<TestControl2*>(control);
157 test_control->DoWork1();
158}
159
160static void WorkControl2_2(void* control) {
161 auto* test_control = static_cast<TestControl2*>(control);
162 test_control->DoWork2();
163}
164
165static void WorkControl2_3(void* control) {
166 auto* test_control = static_cast<TestControl2*>(control);
167 test_control->DoWork3();
168}
169
170void TestControl2::ExecuteThread(u32 id) {
171 std::thread::id this_id = std::this_thread::get_id();
172 ids[this_id] = id;
173 auto thread_fiber = Fiber::ThreadToFiber();
174 thread_fibers[id] = thread_fiber;
175}
176
177void TestControl2::Exit() {
178 std::thread::id this_id = std::this_thread::get_id();
179 u32 id = ids[this_id];
180 thread_fibers[id]->Exit();
181}
182
183static void ThreadStart2_1(u32 id, TestControl2& test_control) {
184 test_control.ExecuteThread(id);
185 test_control.CallFiber1();
186 test_control.Exit();
187}
188
189static void ThreadStart2_2(u32 id, TestControl2& test_control) {
190 test_control.ExecuteThread(id);
191 test_control.CallFiber2();
192 test_control.Exit();
193}
194
195/** This test checks for fiber thread exchange configuration and validates that fibers are
196 * that a fiber has been succesfully transfered from one thread to another and that the TLS
197 * region of the thread is kept while changing fibers.
198 */
199TEST_CASE("Fibers::InterExchange", "[common]") {
200 TestControl2 test_control{};
201 test_control.thread_fibers.resize(2);
202 test_control.fiber1 =
203 std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_1}, &test_control);
204 test_control.fiber2 =
205 std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_2}, &test_control);
206 test_control.fiber3 =
207 std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_3}, &test_control);
208 std::thread thread1(ThreadStart2_1, 0, std::ref(test_control));
209 std::thread thread2(ThreadStart2_2, 1, std::ref(test_control));
210 thread1.join();
211 thread2.join();
212 REQUIRE(test_control.assert1);
213 REQUIRE(test_control.assert2);
214 REQUIRE(test_control.assert3);
215 REQUIRE(test_control.value2 == 7000);
216 u32 cal_value = 0;
217 for (u32 i = 0; i < 12000; i++) {
218 cal_value += i;
219 }
220 cal_value += 1000;
221 REQUIRE(test_control.value1 == cal_value);
222}
223
224class TestControl3 {
225public:
226 TestControl3() = default;
227
228 void DoWork1() {
229 value1 += 1;
230 Fiber::YieldTo(fiber1, fiber2);
231 std::thread::id this_id = std::this_thread::get_id();
232 u32 id = ids[this_id];
233 value3 += 1;
234 Fiber::YieldTo(fiber1, thread_fibers[id]);
235 }
236
237 void DoWork2() {
238 value2 += 1;
239 std::thread::id this_id = std::this_thread::get_id();
240 u32 id = ids[this_id];
241 Fiber::YieldTo(fiber2, thread_fibers[id]);
242 }
243
244 void ExecuteThread(u32 id);
245
246 void CallFiber1() {
247 std::thread::id this_id = std::this_thread::get_id();
248 u32 id = ids[this_id];
249 Fiber::YieldTo(thread_fibers[id], fiber1);
250 }
251
252 void Exit();
253
254 u32 value1{};
255 u32 value2{};
256 u32 value3{};
257 std::unordered_map<std::thread::id, u32> ids;
258 std::vector<std::shared_ptr<Common::Fiber>> thread_fibers;
259 std::shared_ptr<Common::Fiber> fiber1;
260 std::shared_ptr<Common::Fiber> fiber2;
261};
262
263static void WorkControl3_1(void* control) {
264 auto* test_control = static_cast<TestControl3*>(control);
265 test_control->DoWork1();
266}
267
268static void WorkControl3_2(void* control) {
269 auto* test_control = static_cast<TestControl3*>(control);
270 test_control->DoWork2();
271}
272
273void TestControl3::ExecuteThread(u32 id) {
274 std::thread::id this_id = std::this_thread::get_id();
275 ids[this_id] = id;
276 auto thread_fiber = Fiber::ThreadToFiber();
277 thread_fibers[id] = thread_fiber;
278}
279
280void TestControl3::Exit() {
281 std::thread::id this_id = std::this_thread::get_id();
282 u32 id = ids[this_id];
283 thread_fibers[id]->Exit();
284}
285
286static void ThreadStart3(u32 id, TestControl3& test_control) {
287 test_control.ExecuteThread(id);
288 test_control.CallFiber1();
289 test_control.Exit();
290}
291
292/** This test checks for one two threads racing for starting the same fiber.
293 * It checks execution occured in an ordered manner and by no time there were
294 * two contexts at the same time.
295 */
296TEST_CASE("Fibers::StartRace", "[common]") {
297 TestControl3 test_control{};
298 test_control.thread_fibers.resize(2);
299 test_control.fiber1 =
300 std::make_shared<Fiber>(std::function<void(void*)>{WorkControl3_1}, &test_control);
301 test_control.fiber2 =
302 std::make_shared<Fiber>(std::function<void(void*)>{WorkControl3_2}, &test_control);
303 std::thread thread1(ThreadStart3, 0, std::ref(test_control));
304 std::thread thread2(ThreadStart3, 1, std::ref(test_control));
305 thread1.join();
306 thread2.join();
307 REQUIRE(test_control.value1 == 1);
308 REQUIRE(test_control.value2 == 1);
309 REQUIRE(test_control.value3 == 1);
310}
311
312class TestControl4;
313
314static void WorkControl4(void* control);
315
316class TestControl4 {
317public:
318 TestControl4() {
319 fiber1 = std::make_shared<Fiber>(std::function<void(void*)>{WorkControl4}, this);
320 goal_reached = false;
321 rewinded = false;
322 }
323
324 void Execute() {
325 thread_fiber = Fiber::ThreadToFiber();
326 Fiber::YieldTo(thread_fiber, fiber1);
327 thread_fiber->Exit();
328 }
329
330 void DoWork() {
331 fiber1->SetRewindPoint(std::function<void(void*)>{WorkControl4}, this);
332 if (rewinded) {
333 goal_reached = true;
334 Fiber::YieldTo(fiber1, thread_fiber);
335 }
336 rewinded = true;
337 fiber1->Rewind();
338 }
339
340 std::shared_ptr<Common::Fiber> fiber1;
341 std::shared_ptr<Common::Fiber> thread_fiber;
342 bool goal_reached;
343 bool rewinded;
344};
345
346static void WorkControl4(void* control) {
347 auto* test_control = static_cast<TestControl4*>(control);
348 test_control->DoWork();
349}
350
351TEST_CASE("Fibers::Rewind", "[common]") {
352 TestControl4 test_control{};
353 test_control.Execute();
354 REQUIRE(test_control.goal_reached);
355 REQUIRE(test_control.rewinded);
356}
357
358} // namespace Common
diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp
index ff2d11cc8..e66db1940 100644
--- a/src/tests/core/core_timing.cpp
+++ b/src/tests/core/core_timing.cpp
@@ -18,29 +18,26 @@ namespace {
18// Numbers are chosen randomly to make sure the correct one is given. 18// Numbers are chosen randomly to make sure the correct one is given.
19constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}}; 19constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}};
20constexpr int MAX_SLICE_LENGTH = 10000; // Copied from CoreTiming internals 20constexpr int MAX_SLICE_LENGTH = 10000; // Copied from CoreTiming internals
21constexpr std::array<u64, 5> calls_order{{2, 0, 1, 4, 3}};
22std::array<s64, 5> delays{};
21 23
22std::bitset<CB_IDS.size()> callbacks_ran_flags; 24std::bitset<CB_IDS.size()> callbacks_ran_flags;
23u64 expected_callback = 0; 25u64 expected_callback = 0;
24s64 lateness = 0;
25 26
26template <unsigned int IDX> 27template <unsigned int IDX>
27void CallbackTemplate(u64 userdata, s64 cycles_late) { 28void HostCallbackTemplate(u64 userdata, s64 nanoseconds_late) {
28 static_assert(IDX < CB_IDS.size(), "IDX out of range"); 29 static_assert(IDX < CB_IDS.size(), "IDX out of range");
29 callbacks_ran_flags.set(IDX); 30 callbacks_ran_flags.set(IDX);
30 REQUIRE(CB_IDS[IDX] == userdata); 31 REQUIRE(CB_IDS[IDX] == userdata);
31 REQUIRE(CB_IDS[IDX] == expected_callback); 32 REQUIRE(CB_IDS[IDX] == CB_IDS[calls_order[expected_callback]]);
32 REQUIRE(lateness == cycles_late); 33 delays[IDX] = nanoseconds_late;
33} 34 ++expected_callback;
34
35u64 callbacks_done = 0;
36
37void EmptyCallback(u64 userdata, s64 cycles_late) {
38 ++callbacks_done;
39} 35}
40 36
41struct ScopeInit final { 37struct ScopeInit final {
42 ScopeInit() { 38 ScopeInit() {
43 core_timing.Initialize(); 39 core_timing.SetMulticore(true);
40 core_timing.Initialize([]() {});
44 } 41 }
45 ~ScopeInit() { 42 ~ScopeInit() {
46 core_timing.Shutdown(); 43 core_timing.Shutdown();
@@ -49,110 +46,101 @@ struct ScopeInit final {
49 Core::Timing::CoreTiming core_timing; 46 Core::Timing::CoreTiming core_timing;
50}; 47};
51 48
52void AdvanceAndCheck(Core::Timing::CoreTiming& core_timing, u32 idx, u32 context = 0, 49#pragma optimize("", off)
53 int expected_lateness = 0, int cpu_downcount = 0) {
54 callbacks_ran_flags = 0;
55 expected_callback = CB_IDS[idx];
56 lateness = expected_lateness;
57
58 // Pretend we executed X cycles of instructions.
59 core_timing.SwitchContext(context);
60 core_timing.AddTicks(core_timing.GetDowncount() - cpu_downcount);
61 core_timing.Advance();
62 core_timing.SwitchContext((context + 1) % 4);
63 50
64 REQUIRE(decltype(callbacks_ran_flags)().set(idx) == callbacks_ran_flags); 51u64 TestTimerSpeed(Core::Timing::CoreTiming& core_timing) {
52 u64 start = core_timing.GetGlobalTimeNs().count();
53 u64 placebo = 0;
54 for (std::size_t i = 0; i < 1000; i++) {
55 placebo += core_timing.GetGlobalTimeNs().count();
56 }
57 u64 end = core_timing.GetGlobalTimeNs().count();
58 return (end - start);
65} 59}
60
61#pragma optimize("", on)
62
66} // Anonymous namespace 63} // Anonymous namespace
67 64
68TEST_CASE("CoreTiming[BasicOrder]", "[core]") { 65TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
69 ScopeInit guard; 66 ScopeInit guard;
70 auto& core_timing = guard.core_timing; 67 auto& core_timing = guard.core_timing;
68 std::vector<std::shared_ptr<Core::Timing::EventType>> events{
69 Core::Timing::CreateEvent("callbackA", HostCallbackTemplate<0>),
70 Core::Timing::CreateEvent("callbackB", HostCallbackTemplate<1>),
71 Core::Timing::CreateEvent("callbackC", HostCallbackTemplate<2>),
72 Core::Timing::CreateEvent("callbackD", HostCallbackTemplate<3>),
73 Core::Timing::CreateEvent("callbackE", HostCallbackTemplate<4>),
74 };
75
76 expected_callback = 0;
77
78 core_timing.SyncPause(true);
79
80 u64 one_micro = 1000U;
81 for (std::size_t i = 0; i < events.size(); i++) {
82 u64 order = calls_order[i];
83 core_timing.ScheduleEvent(i * one_micro + 100U, events[order], CB_IDS[order]);
84 }
85 /// test pause
86 REQUIRE(callbacks_ran_flags.none());
71 87
72 std::shared_ptr<Core::Timing::EventType> cb_a = 88 core_timing.Pause(false); // No need to sync
73 Core::Timing::CreateEvent("callbackA", CallbackTemplate<0>);
74 std::shared_ptr<Core::Timing::EventType> cb_b =
75 Core::Timing::CreateEvent("callbackB", CallbackTemplate<1>);
76 std::shared_ptr<Core::Timing::EventType> cb_c =
77 Core::Timing::CreateEvent("callbackC", CallbackTemplate<2>);
78 std::shared_ptr<Core::Timing::EventType> cb_d =
79 Core::Timing::CreateEvent("callbackD", CallbackTemplate<3>);
80 std::shared_ptr<Core::Timing::EventType> cb_e =
81 Core::Timing::CreateEvent("callbackE", CallbackTemplate<4>);
82
83 // Enter slice 0
84 core_timing.ResetRun();
85
86 // D -> B -> C -> A -> E
87 core_timing.SwitchContext(0);
88 core_timing.ScheduleEvent(1000, cb_a, CB_IDS[0]);
89 REQUIRE(1000 == core_timing.GetDowncount());
90 core_timing.ScheduleEvent(500, cb_b, CB_IDS[1]);
91 REQUIRE(500 == core_timing.GetDowncount());
92 core_timing.ScheduleEvent(800, cb_c, CB_IDS[2]);
93 REQUIRE(500 == core_timing.GetDowncount());
94 core_timing.ScheduleEvent(100, cb_d, CB_IDS[3]);
95 REQUIRE(100 == core_timing.GetDowncount());
96 core_timing.ScheduleEvent(1200, cb_e, CB_IDS[4]);
97 REQUIRE(100 == core_timing.GetDowncount());
98
99 AdvanceAndCheck(core_timing, 3, 0);
100 AdvanceAndCheck(core_timing, 1, 1);
101 AdvanceAndCheck(core_timing, 2, 2);
102 AdvanceAndCheck(core_timing, 0, 3);
103 AdvanceAndCheck(core_timing, 4, 0);
104}
105
106TEST_CASE("CoreTiming[FairSharing]", "[core]") {
107 89
108 ScopeInit guard; 90 while (core_timing.HasPendingEvents())
109 auto& core_timing = guard.core_timing; 91 ;
110 92
111 std::shared_ptr<Core::Timing::EventType> empty_callback = 93 REQUIRE(callbacks_ran_flags.all());
112 Core::Timing::CreateEvent("empty_callback", EmptyCallback);
113 94
114 callbacks_done = 0; 95 for (std::size_t i = 0; i < delays.size(); i++) {
115 u64 MAX_CALLBACKS = 10; 96 const double delay = static_cast<double>(delays[i]);
116 for (std::size_t i = 0; i < 10; i++) { 97 const double micro = delay / 1000.0f;
117 core_timing.ScheduleEvent(i * 3333U, empty_callback, 0); 98 const double mili = micro / 1000.0f;
99 printf("HostTimer Pausing Delay[%zu]: %.3f %.6f\n", i, micro, mili);
118 } 100 }
119
120 const s64 advances = MAX_SLICE_LENGTH / 10;
121 core_timing.ResetRun();
122 u64 current_time = core_timing.GetTicks();
123 bool keep_running{};
124 do {
125 keep_running = false;
126 for (u32 active_core = 0; active_core < 4; ++active_core) {
127 core_timing.SwitchContext(active_core);
128 if (core_timing.CanCurrentContextRun()) {
129 core_timing.AddTicks(std::min<s64>(advances, core_timing.GetDowncount()));
130 core_timing.Advance();
131 }
132 keep_running |= core_timing.CanCurrentContextRun();
133 }
134 } while (keep_running);
135 u64 current_time_2 = core_timing.GetTicks();
136
137 REQUIRE(MAX_CALLBACKS == callbacks_done);
138 REQUIRE(current_time_2 == current_time + MAX_SLICE_LENGTH * 4);
139} 101}
140 102
141TEST_CASE("Core::Timing[PredictableLateness]", "[core]") { 103TEST_CASE("CoreTiming[BasicOrderNoPausing]", "[core]") {
142 ScopeInit guard; 104 ScopeInit guard;
143 auto& core_timing = guard.core_timing; 105 auto& core_timing = guard.core_timing;
106 std::vector<std::shared_ptr<Core::Timing::EventType>> events{
107 Core::Timing::CreateEvent("callbackA", HostCallbackTemplate<0>),
108 Core::Timing::CreateEvent("callbackB", HostCallbackTemplate<1>),
109 Core::Timing::CreateEvent("callbackC", HostCallbackTemplate<2>),
110 Core::Timing::CreateEvent("callbackD", HostCallbackTemplate<3>),
111 Core::Timing::CreateEvent("callbackE", HostCallbackTemplate<4>),
112 };
113
114 core_timing.SyncPause(true);
115 core_timing.SyncPause(false);
116
117 expected_callback = 0;
118
119 u64 start = core_timing.GetGlobalTimeNs().count();
120 u64 one_micro = 1000U;
121 for (std::size_t i = 0; i < events.size(); i++) {
122 u64 order = calls_order[i];
123 core_timing.ScheduleEvent(i * one_micro + 100U, events[order], CB_IDS[order]);
124 }
125 u64 end = core_timing.GetGlobalTimeNs().count();
126 const double scheduling_time = static_cast<double>(end - start);
127 const double timer_time = static_cast<double>(TestTimerSpeed(core_timing));
144 128
145 std::shared_ptr<Core::Timing::EventType> cb_a = 129 while (core_timing.HasPendingEvents())
146 Core::Timing::CreateEvent("callbackA", CallbackTemplate<0>); 130 ;
147 std::shared_ptr<Core::Timing::EventType> cb_b =
148 Core::Timing::CreateEvent("callbackB", CallbackTemplate<1>);
149 131
150 // Enter slice 0 132 REQUIRE(callbacks_ran_flags.all());
151 core_timing.ResetRun();
152 133
153 core_timing.ScheduleEvent(100, cb_a, CB_IDS[0]); 134 for (std::size_t i = 0; i < delays.size(); i++) {
154 core_timing.ScheduleEvent(200, cb_b, CB_IDS[1]); 135 const double delay = static_cast<double>(delays[i]);
136 const double micro = delay / 1000.0f;
137 const double mili = micro / 1000.0f;
138 printf("HostTimer No Pausing Delay[%zu]: %.3f %.6f\n", i, micro, mili);
139 }
155 140
156 AdvanceAndCheck(core_timing, 0, 0, 10, -10); // (100 - 10) 141 const double micro = scheduling_time / 1000.0f;
157 AdvanceAndCheck(core_timing, 1, 1, 50, -50); 142 const double mili = micro / 1000.0f;
143 printf("HostTimer No Pausing Scheduling Time: %.3f %.6f\n", micro, mili);
144 printf("HostTimer No Pausing Timer Time: %.3f %.6f\n", timer_time / 1000.f,
145 timer_time / 1000000.f);
158} 146}
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index e7cb87589..d374b73cf 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -661,6 +661,10 @@ union Instruction {
661 constexpr Instruction(u64 value) : value{value} {} 661 constexpr Instruction(u64 value) : value{value} {}
662 constexpr Instruction(const Instruction& instr) : value(instr.value) {} 662 constexpr Instruction(const Instruction& instr) : value(instr.value) {}
663 663
664 constexpr bool Bit(u64 offset) const {
665 return ((value >> offset) & 1) != 0;
666 }
667
664 BitField<0, 8, Register> gpr0; 668 BitField<0, 8, Register> gpr0;
665 BitField<8, 8, Register> gpr8; 669 BitField<8, 8, Register> gpr8;
666 union { 670 union {
@@ -1874,7 +1878,9 @@ public:
1874 HSETP2_C, 1878 HSETP2_C,
1875 HSETP2_R, 1879 HSETP2_R,
1876 HSETP2_IMM, 1880 HSETP2_IMM,
1881 HSET2_C,
1877 HSET2_R, 1882 HSET2_R,
1883 HSET2_IMM,
1878 POPC_C, 1884 POPC_C,
1879 POPC_R, 1885 POPC_R,
1880 POPC_IMM, 1886 POPC_IMM,
@@ -2194,7 +2200,9 @@ private:
2194 INST("0111111-1-------", Id::HSETP2_C, Type::HalfSetPredicate, "HSETP2_C"), 2200 INST("0111111-1-------", Id::HSETP2_C, Type::HalfSetPredicate, "HSETP2_C"),
2195 INST("0101110100100---", Id::HSETP2_R, Type::HalfSetPredicate, "HSETP2_R"), 2201 INST("0101110100100---", Id::HSETP2_R, Type::HalfSetPredicate, "HSETP2_R"),
2196 INST("0111111-0-------", Id::HSETP2_IMM, Type::HalfSetPredicate, "HSETP2_IMM"), 2202 INST("0111111-0-------", Id::HSETP2_IMM, Type::HalfSetPredicate, "HSETP2_IMM"),
2203 INST("0111110-1-------", Id::HSET2_C, Type::HalfSet, "HSET2_C"),
2197 INST("0101110100011---", Id::HSET2_R, Type::HalfSet, "HSET2_R"), 2204 INST("0101110100011---", Id::HSET2_R, Type::HalfSet, "HSET2_R"),
2205 INST("0111110-0-------", Id::HSET2_IMM, Type::HalfSet, "HSET2_IMM"),
2198 INST("010110111010----", Id::FCMP_RR, Type::Arithmetic, "FCMP_RR"), 2206 INST("010110111010----", Id::FCMP_RR, Type::Arithmetic, "FCMP_RR"),
2199 INST("010010111010----", Id::FCMP_RC, Type::Arithmetic, "FCMP_RC"), 2207 INST("010010111010----", Id::FCMP_RC, Type::Arithmetic, "FCMP_RC"),
2200 INST("0101000010000---", Id::MUFU, Type::Arithmetic, "MUFU"), 2208 INST("0101000010000---", Id::MUFU, Type::Arithmetic, "MUFU"),
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 8eb017f65..482e49711 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -2,6 +2,8 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <chrono>
6
5#include "common/assert.h" 7#include "common/assert.h"
6#include "common/microprofile.h" 8#include "common/microprofile.h"
7#include "core/core.h" 9#include "core/core.h"
@@ -154,8 +156,7 @@ u64 GPU::GetTicks() const {
154 constexpr u64 gpu_ticks_num = 384; 156 constexpr u64 gpu_ticks_num = 384;
155 constexpr u64 gpu_ticks_den = 625; 157 constexpr u64 gpu_ticks_den = 625;
156 158
157 const u64 cpu_ticks = system.CoreTiming().GetTicks(); 159 u64 nanoseconds = system.CoreTiming().GetGlobalTimeNs().count();
158 u64 nanoseconds = Core::Timing::CyclesToNs(cpu_ticks).count();
159 if (Settings::values.use_fast_gpu_time) { 160 if (Settings::values.use_fast_gpu_time) {
160 nanoseconds /= 256; 161 nanoseconds /= 256;
161 } 162 }
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index a1b4c305c..2c42483bd 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -284,6 +284,12 @@ public:
284 /// core timing events. 284 /// core timing events.
285 virtual void Start() = 0; 285 virtual void Start() = 0;
286 286
287 /// Obtain the CPU Context
288 virtual void ObtainContext() = 0;
289
290 /// Release the CPU Context
291 virtual void ReleaseContext() = 0;
292
287 /// Push GPU command entries to be processed 293 /// Push GPU command entries to be processed
288 virtual void PushGPUEntries(Tegra::CommandList&& entries) = 0; 294 virtual void PushGPUEntries(Tegra::CommandList&& entries) = 0;
289 295
diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp
index 53305ab43..7b855f63e 100644
--- a/src/video_core/gpu_asynch.cpp
+++ b/src/video_core/gpu_asynch.cpp
@@ -19,10 +19,17 @@ GPUAsynch::GPUAsynch(Core::System& system, std::unique_ptr<VideoCore::RendererBa
19GPUAsynch::~GPUAsynch() = default; 19GPUAsynch::~GPUAsynch() = default;
20 20
21void GPUAsynch::Start() { 21void GPUAsynch::Start() {
22 cpu_context->MakeCurrent();
23 gpu_thread.StartThread(*renderer, *gpu_context, *dma_pusher); 22 gpu_thread.StartThread(*renderer, *gpu_context, *dma_pusher);
24} 23}
25 24
25void GPUAsynch::ObtainContext() {
26 cpu_context->MakeCurrent();
27}
28
29void GPUAsynch::ReleaseContext() {
30 cpu_context->DoneCurrent();
31}
32
26void GPUAsynch::PushGPUEntries(Tegra::CommandList&& entries) { 33void GPUAsynch::PushGPUEntries(Tegra::CommandList&& entries) {
27 gpu_thread.SubmitList(std::move(entries)); 34 gpu_thread.SubmitList(std::move(entries));
28} 35}
diff --git a/src/video_core/gpu_asynch.h b/src/video_core/gpu_asynch.h
index 517658612..15e9f1d38 100644
--- a/src/video_core/gpu_asynch.h
+++ b/src/video_core/gpu_asynch.h
@@ -25,6 +25,8 @@ public:
25 ~GPUAsynch() override; 25 ~GPUAsynch() override;
26 26
27 void Start() override; 27 void Start() override;
28 void ObtainContext() override;
29 void ReleaseContext() override;
28 void PushGPUEntries(Tegra::CommandList&& entries) override; 30 void PushGPUEntries(Tegra::CommandList&& entries) override;
29 void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override; 31 void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override;
30 void FlushRegion(VAddr addr, u64 size) override; 32 void FlushRegion(VAddr addr, u64 size) override;
diff --git a/src/video_core/gpu_synch.cpp b/src/video_core/gpu_synch.cpp
index 6f38a672a..aaeb9811d 100644
--- a/src/video_core/gpu_synch.cpp
+++ b/src/video_core/gpu_synch.cpp
@@ -13,10 +13,16 @@ GPUSynch::GPUSynch(Core::System& system, std::unique_ptr<VideoCore::RendererBase
13 13
14GPUSynch::~GPUSynch() = default; 14GPUSynch::~GPUSynch() = default;
15 15
16void GPUSynch::Start() { 16void GPUSynch::Start() {}
17
18void GPUSynch::ObtainContext() {
17 context->MakeCurrent(); 19 context->MakeCurrent();
18} 20}
19 21
22void GPUSynch::ReleaseContext() {
23 context->DoneCurrent();
24}
25
20void GPUSynch::PushGPUEntries(Tegra::CommandList&& entries) { 26void GPUSynch::PushGPUEntries(Tegra::CommandList&& entries) {
21 dma_pusher->Push(std::move(entries)); 27 dma_pusher->Push(std::move(entries));
22 dma_pusher->DispatchCalls(); 28 dma_pusher->DispatchCalls();
diff --git a/src/video_core/gpu_synch.h b/src/video_core/gpu_synch.h
index 4a6e9a01d..762c20aa5 100644
--- a/src/video_core/gpu_synch.h
+++ b/src/video_core/gpu_synch.h
@@ -24,6 +24,8 @@ public:
24 ~GPUSynch() override; 24 ~GPUSynch() override;
25 25
26 void Start() override; 26 void Start() override;
27 void ObtainContext() override;
28 void ReleaseContext() override;
27 void PushGPUEntries(Tegra::CommandList&& entries) override; 29 void PushGPUEntries(Tegra::CommandList&& entries) override;
28 void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override; 30 void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override;
29 void FlushRegion(VAddr addr, u64 size) override; 31 void FlushRegion(VAddr addr, u64 size) override;
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index c3bb4fe06..738c6f0c1 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -4,6 +4,7 @@
4 4
5#include "common/assert.h" 5#include "common/assert.h"
6#include "common/microprofile.h" 6#include "common/microprofile.h"
7#include "common/thread.h"
7#include "core/core.h" 8#include "core/core.h"
8#include "core/frontend/emu_window.h" 9#include "core/frontend/emu_window.h"
9#include "core/settings.h" 10#include "core/settings.h"
@@ -18,7 +19,11 @@ namespace VideoCommon::GPUThread {
18static void RunThread(Core::System& system, VideoCore::RendererBase& renderer, 19static void RunThread(Core::System& system, VideoCore::RendererBase& renderer,
19 Core::Frontend::GraphicsContext& context, Tegra::DmaPusher& dma_pusher, 20 Core::Frontend::GraphicsContext& context, Tegra::DmaPusher& dma_pusher,
20 SynchState& state) { 21 SynchState& state) {
21 MicroProfileOnThreadCreate("GpuThread"); 22 std::string name = "yuzu:GPU";
23 MicroProfileOnThreadCreate(name.c_str());
24 Common::SetCurrentThreadName(name.c_str());
25 Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
26 system.RegisterHostThread();
22 27
23 // Wait for first GPU command before acquiring the window context 28 // Wait for first GPU command before acquiring the window context
24 while (state.queue.Empty()) 29 while (state.queue.Empty())
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index 447a19595..b6b6659c1 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -178,7 +178,7 @@ bool IsASTCSupported() {
178 for (const GLenum format : formats) { 178 for (const GLenum format : formats) {
179 for (const GLenum support : required_support) { 179 for (const GLenum support : required_support) {
180 GLint value; 180 GLint value;
181 glGetInternalformativ(GL_TEXTURE_2D, format, support, 1, &value); 181 glGetInternalformativ(target, format, support, 1, &value);
182 if (value != GL_FULL_SUPPORT) { 182 if (value != GL_FULL_SUPPORT) {
183 return false; 183 return false;
184 } 184 }
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 46e780a06..c6a3bf3a1 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -460,8 +460,9 @@ Shader* ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
460 const u8* host_ptr_b = memory_manager.GetPointer(address_b); 460 const u8* host_ptr_b = memory_manager.GetPointer(address_b);
461 code_b = GetShaderCode(memory_manager, address_b, host_ptr_b, false); 461 code_b = GetShaderCode(memory_manager, address_b, host_ptr_b, false);
462 } 462 }
463 const std::size_t code_size = code.size() * sizeof(u64);
463 464
464 const auto unique_identifier = GetUniqueIdentifier( 465 const u64 unique_identifier = GetUniqueIdentifier(
465 GetShaderType(program), program == Maxwell::ShaderProgram::VertexA, code, code_b); 466 GetShaderType(program), program == Maxwell::ShaderProgram::VertexA, code, code_b);
466 467
467 const ShaderParameters params{system, disk_cache, device, 468 const ShaderParameters params{system, disk_cache, device,
@@ -477,7 +478,7 @@ Shader* ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
477 478
478 Shader* const result = shader.get(); 479 Shader* const result = shader.get();
479 if (cpu_addr) { 480 if (cpu_addr) {
480 Register(std::move(shader), *cpu_addr, code.size() * sizeof(u64)); 481 Register(std::move(shader), *cpu_addr, code_size);
481 } else { 482 } else {
482 null_shader = std::move(shader); 483 null_shader = std::move(shader);
483 } 484 }
@@ -495,8 +496,9 @@ Shader* ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
495 496
496 const auto host_ptr{memory_manager.GetPointer(code_addr)}; 497 const auto host_ptr{memory_manager.GetPointer(code_addr)};
497 // No kernel found, create a new one 498 // No kernel found, create a new one
498 auto code{GetShaderCode(memory_manager, code_addr, host_ptr, true)}; 499 ProgramCode code{GetShaderCode(memory_manager, code_addr, host_ptr, true)};
499 const auto unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code)}; 500 const std::size_t code_size{code.size() * sizeof(u64)};
501 const u64 unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code)};
500 502
501 const ShaderParameters params{system, disk_cache, device, 503 const ShaderParameters params{system, disk_cache, device,
502 *cpu_addr, host_ptr, unique_identifier}; 504 *cpu_addr, host_ptr, unique_identifier};
@@ -511,7 +513,7 @@ Shader* ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
511 513
512 Shader* const result = kernel.get(); 514 Shader* const result = kernel.get();
513 if (cpu_addr) { 515 if (cpu_addr) {
514 Register(std::move(kernel), *cpu_addr, code.size() * sizeof(u64)); 516 Register(std::move(kernel), *cpu_addr, code_size);
515 } else { 517 } else {
516 null_kernel = std::move(kernel); 518 null_kernel = std::move(kernel);
517 } 519 }
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index 6848f1388..994aaeaf2 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -37,7 +37,6 @@ namespace OpenGL {
37 37
38class Device; 38class Device;
39class RasterizerOpenGL; 39class RasterizerOpenGL;
40struct UnspecializedShader;
41 40
42using Maxwell = Tegra::Engines::Maxwell3D::Regs; 41using Maxwell = Tegra::Engines::Maxwell3D::Regs;
43 42
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index cd9673d1f..2d9b18ed9 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -155,11 +155,31 @@ vk::Instance CreateInstance(Common::DynamicLibrary& library, vk::InstanceDispatc
155 } 155 }
156 } 156 }
157 157
158 static constexpr std::array layers_data{"VK_LAYER_LUNARG_standard_validation"}; 158 std::vector<const char*> layers;
159 vk::Span<const char*> layers = layers_data; 159 layers.reserve(1);
160 if (!enable_layers) { 160 if (enable_layers) {
161 layers = {}; 161 layers.push_back("VK_LAYER_KHRONOS_validation");
162 }
163
164 const std::optional layer_properties = vk::EnumerateInstanceLayerProperties(dld);
165 if (!layer_properties) {
166 LOG_ERROR(Render_Vulkan, "Failed to query layer properties, disabling layers");
167 layers.clear();
168 }
169
170 for (auto layer_it = layers.begin(); layer_it != layers.end();) {
171 const char* const layer = *layer_it;
172 const auto it = std::find_if(
173 layer_properties->begin(), layer_properties->end(),
174 [layer](const VkLayerProperties& prop) { return !std::strcmp(layer, prop.layerName); });
175 if (it == layer_properties->end()) {
176 LOG_ERROR(Render_Vulkan, "Layer {} not available, removing it", layer);
177 layer_it = layers.erase(layer_it);
178 } else {
179 ++layer_it;
180 }
162 } 181 }
182
163 vk::Instance instance = vk::Instance::Create(layers, extensions, dld); 183 vk::Instance instance = vk::Instance::Create(layers, extensions, dld);
164 if (!instance) { 184 if (!instance) {
165 LOG_ERROR(Render_Vulkan, "Failed to create Vulkan instance"); 185 LOG_ERROR(Render_Vulkan, "Failed to create Vulkan instance");
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index e3714ee6d..a8d94eac3 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -143,6 +143,49 @@ Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry
143 } 143 }
144} 144}
145 145
146/// @brief Determine if an attachment to be updated has to preserve contents
147/// @param is_clear True when a clear is being executed
148/// @param regs 3D registers
149/// @return True when the contents have to be preserved
150bool HasToPreserveColorContents(bool is_clear, const Maxwell& regs) {
151 if (!is_clear) {
152 return true;
153 }
154 // First we have to make sure all clear masks are enabled.
155 if (!regs.clear_buffers.R || !regs.clear_buffers.G || !regs.clear_buffers.B ||
156 !regs.clear_buffers.A) {
157 return true;
158 }
159 // If scissors are disabled, the whole screen is cleared
160 if (!regs.clear_flags.scissor) {
161 return false;
162 }
163 // Then we have to confirm scissor testing clears the whole image
164 const std::size_t index = regs.clear_buffers.RT;
165 const auto& scissor = regs.scissor_test[0];
166 return scissor.min_x > 0 || scissor.min_y > 0 || scissor.max_x < regs.rt[index].width ||
167 scissor.max_y < regs.rt[index].height;
168}
169
170/// @brief Determine if an attachment to be updated has to preserve contents
171/// @param is_clear True when a clear is being executed
172/// @param regs 3D registers
173/// @return True when the contents have to be preserved
174bool HasToPreserveDepthContents(bool is_clear, const Maxwell& regs) {
175 // If we are not clearing, the contents have to be preserved
176 if (!is_clear) {
177 return true;
178 }
179 // For depth stencil clears we only have to confirm scissor test covers the whole image
180 if (!regs.clear_flags.scissor) {
181 return false;
182 }
183 // Make sure the clear cover the whole image
184 const auto& scissor = regs.scissor_test[0];
185 return scissor.min_x > 0 || scissor.min_y > 0 || scissor.max_x < regs.zeta_width ||
186 scissor.max_y < regs.zeta_height;
187}
188
146} // Anonymous namespace 189} // Anonymous namespace
147 190
148class BufferBindings final { 191class BufferBindings final {
@@ -344,7 +387,7 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
344 387
345 buffer_cache.Unmap(); 388 buffer_cache.Unmap();
346 389
347 const Texceptions texceptions = UpdateAttachments(); 390 const Texceptions texceptions = UpdateAttachments(false);
348 SetupImageTransitions(texceptions, color_attachments, zeta_attachment); 391 SetupImageTransitions(texceptions, color_attachments, zeta_attachment);
349 392
350 key.renderpass_params = GetRenderPassParams(texceptions); 393 key.renderpass_params = GetRenderPassParams(texceptions);
@@ -400,7 +443,7 @@ void RasterizerVulkan::Clear() {
400 return; 443 return;
401 } 444 }
402 445
403 [[maybe_unused]] const auto texceptions = UpdateAttachments(); 446 [[maybe_unused]] const auto texceptions = UpdateAttachments(true);
404 DEBUG_ASSERT(texceptions.none()); 447 DEBUG_ASSERT(texceptions.none());
405 SetupImageTransitions(0, color_attachments, zeta_attachment); 448 SetupImageTransitions(0, color_attachments, zeta_attachment);
406 449
@@ -677,9 +720,12 @@ void RasterizerVulkan::FlushWork() {
677 draw_counter = 0; 720 draw_counter = 0;
678} 721}
679 722
680RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() { 723RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments(bool is_clear) {
681 MICROPROFILE_SCOPE(Vulkan_RenderTargets); 724 MICROPROFILE_SCOPE(Vulkan_RenderTargets);
682 auto& dirty = system.GPU().Maxwell3D().dirty.flags; 725 auto& maxwell3d = system.GPU().Maxwell3D();
726 auto& dirty = maxwell3d.dirty.flags;
727 auto& regs = maxwell3d.regs;
728
683 const bool update_rendertargets = dirty[VideoCommon::Dirty::RenderTargets]; 729 const bool update_rendertargets = dirty[VideoCommon::Dirty::RenderTargets];
684 dirty[VideoCommon::Dirty::RenderTargets] = false; 730 dirty[VideoCommon::Dirty::RenderTargets] = false;
685 731
@@ -688,7 +734,8 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() {
688 Texceptions texceptions; 734 Texceptions texceptions;
689 for (std::size_t rt = 0; rt < Maxwell::NumRenderTargets; ++rt) { 735 for (std::size_t rt = 0; rt < Maxwell::NumRenderTargets; ++rt) {
690 if (update_rendertargets) { 736 if (update_rendertargets) {
691 color_attachments[rt] = texture_cache.GetColorBufferSurface(rt, true); 737 const bool preserve_contents = HasToPreserveColorContents(is_clear, regs);
738 color_attachments[rt] = texture_cache.GetColorBufferSurface(rt, preserve_contents);
692 } 739 }
693 if (color_attachments[rt] && WalkAttachmentOverlaps(*color_attachments[rt])) { 740 if (color_attachments[rt] && WalkAttachmentOverlaps(*color_attachments[rt])) {
694 texceptions[rt] = true; 741 texceptions[rt] = true;
@@ -696,7 +743,8 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() {
696 } 743 }
697 744
698 if (update_rendertargets) { 745 if (update_rendertargets) {
699 zeta_attachment = texture_cache.GetDepthBufferSurface(true); 746 const bool preserve_contents = HasToPreserveDepthContents(is_clear, regs);
747 zeta_attachment = texture_cache.GetDepthBufferSurface(preserve_contents);
700 } 748 }
701 if (zeta_attachment && WalkAttachmentOverlaps(*zeta_attachment)) { 749 if (zeta_attachment && WalkAttachmentOverlaps(*zeta_attachment)) {
702 texceptions[ZETA_TEXCEPTION_INDEX] = true; 750 texceptions[ZETA_TEXCEPTION_INDEX] = true;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index c8c187606..83e00e7e9 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -159,7 +159,10 @@ private:
159 159
160 void FlushWork(); 160 void FlushWork();
161 161
162 Texceptions UpdateAttachments(); 162 /// @brief Updates the currently bound attachments
163 /// @param is_clear True when the framebuffer is updated as a clear
164 /// @return Bitfield of attachments being used as sampled textures
165 Texceptions UpdateAttachments(bool is_clear);
163 166
164 std::tuple<VkFramebuffer, VkExtent2D> ConfigureFramebuffers(VkRenderPass renderpass); 167 std::tuple<VkFramebuffer, VkExtent2D> ConfigureFramebuffers(VkRenderPass renderpass);
165 168
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index 82ec9180e..56524e6f3 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -9,6 +9,7 @@
9#include <utility> 9#include <utility>
10 10
11#include "common/microprofile.h" 11#include "common/microprofile.h"
12#include "common/thread.h"
12#include "video_core/renderer_vulkan/vk_device.h" 13#include "video_core/renderer_vulkan/vk_device.h"
13#include "video_core/renderer_vulkan/vk_query_cache.h" 14#include "video_core/renderer_vulkan/vk_query_cache.h"
14#include "video_core/renderer_vulkan/vk_resource_manager.h" 15#include "video_core/renderer_vulkan/vk_resource_manager.h"
@@ -133,6 +134,7 @@ void VKScheduler::BindGraphicsPipeline(VkPipeline pipeline) {
133} 134}
134 135
135void VKScheduler::WorkerThread() { 136void VKScheduler::WorkerThread() {
137 Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
136 std::unique_lock lock{mutex}; 138 std::unique_lock lock{mutex};
137 do { 139 do {
138 cv.wait(lock, [this] { return !chunk_queue.Empty() || quit; }); 140 cv.wait(lock, [this] { return !chunk_queue.Empty() || quit; });
diff --git a/src/video_core/renderer_vulkan/wrapper.cpp b/src/video_core/renderer_vulkan/wrapper.cpp
index 42eff85d3..0d485a662 100644
--- a/src/video_core/renderer_vulkan/wrapper.cpp
+++ b/src/video_core/renderer_vulkan/wrapper.cpp
@@ -153,7 +153,8 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
153 153
154bool Load(InstanceDispatch& dld) noexcept { 154bool Load(InstanceDispatch& dld) noexcept {
155#define X(name) Proc(dld.name, dld, #name) 155#define X(name) Proc(dld.name, dld, #name)
156 return X(vkCreateInstance) && X(vkEnumerateInstanceExtensionProperties); 156 return X(vkCreateInstance) && X(vkEnumerateInstanceExtensionProperties) &&
157 X(vkEnumerateInstanceLayerProperties);
157#undef X 158#undef X
158} 159}
159 160
@@ -770,4 +771,17 @@ std::optional<std::vector<VkExtensionProperties>> EnumerateInstanceExtensionProp
770 return properties; 771 return properties;
771} 772}
772 773
774std::optional<std::vector<VkLayerProperties>> EnumerateInstanceLayerProperties(
775 const InstanceDispatch& dld) {
776 u32 num;
777 if (dld.vkEnumerateInstanceLayerProperties(&num, nullptr) != VK_SUCCESS) {
778 return std::nullopt;
779 }
780 std::vector<VkLayerProperties> properties(num);
781 if (dld.vkEnumerateInstanceLayerProperties(&num, properties.data()) != VK_SUCCESS) {
782 return std::nullopt;
783 }
784 return properties;
785}
786
773} // namespace Vulkan::vk 787} // namespace Vulkan::vk
diff --git a/src/video_core/renderer_vulkan/wrapper.h b/src/video_core/renderer_vulkan/wrapper.h
index da42ca88e..d56fdb3f9 100644
--- a/src/video_core/renderer_vulkan/wrapper.h
+++ b/src/video_core/renderer_vulkan/wrapper.h
@@ -141,6 +141,7 @@ struct InstanceDispatch {
141 PFN_vkCreateInstance vkCreateInstance; 141 PFN_vkCreateInstance vkCreateInstance;
142 PFN_vkDestroyInstance vkDestroyInstance; 142 PFN_vkDestroyInstance vkDestroyInstance;
143 PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties; 143 PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties;
144 PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties;
144 145
145 PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT; 146 PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT;
146 PFN_vkCreateDevice vkCreateDevice; 147 PFN_vkCreateDevice vkCreateDevice;
@@ -996,4 +997,7 @@ private:
996std::optional<std::vector<VkExtensionProperties>> EnumerateInstanceExtensionProperties( 997std::optional<std::vector<VkExtensionProperties>> EnumerateInstanceExtensionProperties(
997 const InstanceDispatch& dld); 998 const InstanceDispatch& dld);
998 999
1000std::optional<std::vector<VkLayerProperties>> EnumerateInstanceLayerProperties(
1001 const InstanceDispatch& dld);
1002
999} // namespace Vulkan::vk 1003} // namespace Vulkan::vk
diff --git a/src/video_core/shader/decode/half_set.cpp b/src/video_core/shader/decode/half_set.cpp
index 848e46874..b2e88fa20 100644
--- a/src/video_core/shader/decode/half_set.cpp
+++ b/src/video_core/shader/decode/half_set.cpp
@@ -13,55 +13,101 @@
13 13
14namespace VideoCommon::Shader { 14namespace VideoCommon::Shader {
15 15
16using std::move;
16using Tegra::Shader::Instruction; 17using Tegra::Shader::Instruction;
17using Tegra::Shader::OpCode; 18using Tegra::Shader::OpCode;
19using Tegra::Shader::PredCondition;
18 20
19u32 ShaderIR::DecodeHalfSet(NodeBlock& bb, u32 pc) { 21u32 ShaderIR::DecodeHalfSet(NodeBlock& bb, u32 pc) {
20 const Instruction instr = {program_code[pc]}; 22 const Instruction instr = {program_code[pc]};
21 const auto opcode = OpCode::Decode(instr); 23 const auto opcode = OpCode::Decode(instr);
22 24
23 if (instr.hset2.ftz == 0) { 25 PredCondition cond;
24 LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName()); 26 bool bf;
27 bool ftz;
28 bool neg_a;
29 bool abs_a;
30 bool neg_b;
31 bool abs_b;
32 switch (opcode->get().GetId()) {
33 case OpCode::Id::HSET2_C:
34 case OpCode::Id::HSET2_IMM:
35 cond = instr.hsetp2.cbuf_and_imm.cond;
36 bf = instr.Bit(53);
37 ftz = instr.Bit(54);
38 neg_a = instr.Bit(43);
39 abs_a = instr.Bit(44);
40 neg_b = instr.Bit(56);
41 abs_b = instr.Bit(54);
42 break;
43 case OpCode::Id::HSET2_R:
44 cond = instr.hsetp2.reg.cond;
45 bf = instr.Bit(49);
46 ftz = instr.Bit(50);
47 neg_a = instr.Bit(43);
48 abs_a = instr.Bit(44);
49 neg_b = instr.Bit(31);
50 abs_b = instr.Bit(30);
51 break;
52 default:
53 UNREACHABLE();
25 } 54 }
26 55
27 Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.hset2.type_a); 56 Node op_b = [this, instr, opcode] {
28 op_a = GetOperandAbsNegHalf(op_a, instr.hset2.abs_a, instr.hset2.negate_a);
29
30 Node op_b = [&]() {
31 switch (opcode->get().GetId()) { 57 switch (opcode->get().GetId()) {
58 case OpCode::Id::HSET2_C:
59 // Inform as unimplemented as this is not tested.
60 UNIMPLEMENTED_MSG("HSET2_C is not implemented");
61 return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset());
32 case OpCode::Id::HSET2_R: 62 case OpCode::Id::HSET2_R:
33 return GetRegister(instr.gpr20); 63 return GetRegister(instr.gpr20);
64 case OpCode::Id::HSET2_IMM:
65 return UnpackHalfImmediate(instr, true);
34 default: 66 default:
35 UNREACHABLE(); 67 UNREACHABLE();
36 return Immediate(0); 68 return Node{};
37 } 69 }
38 }(); 70 }();
39 op_b = UnpackHalfFloat(op_b, instr.hset2.type_b);
40 op_b = GetOperandAbsNegHalf(op_b, instr.hset2.abs_b, instr.hset2.negate_b);
41 71
42 const Node second_pred = GetPredicate(instr.hset2.pred39, instr.hset2.neg_pred); 72 if (!ftz) {
73 LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName());
74 }
75
76 Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.hset2.type_a);
77 op_a = GetOperandAbsNegHalf(op_a, abs_a, neg_a);
78
79 switch (opcode->get().GetId()) {
80 case OpCode::Id::HSET2_R:
81 op_b = GetOperandAbsNegHalf(move(op_b), abs_b, neg_b);
82 [[fallthrough]];
83 case OpCode::Id::HSET2_C:
84 op_b = UnpackHalfFloat(move(op_b), instr.hset2.type_b);
85 break;
86 default:
87 break;
88 }
43 89
44 const Node comparison_pair = GetPredicateComparisonHalf(instr.hset2.cond, op_a, op_b); 90 Node second_pred = GetPredicate(instr.hset2.pred39, instr.hset2.neg_pred);
91
92 Node comparison_pair = GetPredicateComparisonHalf(cond, op_a, op_b);
45 93
46 const OperationCode combiner = GetPredicateCombiner(instr.hset2.op); 94 const OperationCode combiner = GetPredicateCombiner(instr.hset2.op);
47 95
48 // HSET2 operates on each half float in the pack. 96 // HSET2 operates on each half float in the pack.
49 std::array<Node, 2> values; 97 std::array<Node, 2> values;
50 for (u32 i = 0; i < 2; ++i) { 98 for (u32 i = 0; i < 2; ++i) {
51 const u32 raw_value = instr.hset2.bf ? 0x3c00 : 0xffff; 99 const u32 raw_value = bf ? 0x3c00 : 0xffff;
52 const Node true_value = Immediate(raw_value << (i * 16)); 100 Node true_value = Immediate(raw_value << (i * 16));
53 const Node false_value = Immediate(0); 101 Node false_value = Immediate(0);
54
55 const Node comparison =
56 Operation(OperationCode::LogicalPick2, comparison_pair, Immediate(i));
57 const Node predicate = Operation(combiner, comparison, second_pred);
58 102
103 Node comparison = Operation(OperationCode::LogicalPick2, comparison_pair, Immediate(i));
104 Node predicate = Operation(combiner, comparison, second_pred);
59 values[i] = 105 values[i] =
60 Operation(OperationCode::Select, NO_PRECISE, predicate, true_value, false_value); 106 Operation(OperationCode::Select, predicate, move(true_value), move(false_value));
61 } 107 }
62 108
63 const Node value = Operation(OperationCode::UBitwiseOr, NO_PRECISE, values[0], values[1]); 109 Node value = Operation(OperationCode::UBitwiseOr, values[0], values[1]);
64 SetRegister(bb, instr.gpr0, value); 110 SetRegister(bb, instr.gpr0, move(value));
65 111
66 return pc; 112 return pc;
67} 113}
diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp
index 94d3a6ae5..0caf3b4f0 100644
--- a/src/video_core/texture_cache/surface_base.cpp
+++ b/src/video_core/texture_cache/surface_base.cpp
@@ -120,6 +120,9 @@ std::optional<std::pair<u32, u32>> SurfaceBaseImpl::GetLayerMipmap(
120 } 120 }
121 const auto relative_address{static_cast<GPUVAddr>(candidate_gpu_addr - gpu_addr)}; 121 const auto relative_address{static_cast<GPUVAddr>(candidate_gpu_addr - gpu_addr)};
122 const auto layer{static_cast<u32>(relative_address / layer_size)}; 122 const auto layer{static_cast<u32>(relative_address / layer_size)};
123 if (layer >= params.depth) {
124 return {};
125 }
123 const GPUVAddr mipmap_address = relative_address - layer_size * layer; 126 const GPUVAddr mipmap_address = relative_address - layer_size * layer;
124 const auto mipmap_it = 127 const auto mipmap_it =
125 Common::BinaryFind(mipmap_offsets.begin(), mipmap_offsets.end(), mipmap_address); 128 Common::BinaryFind(mipmap_offsets.begin(), mipmap_offsets.end(), mipmap_address);
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp
index 696da2137..4bfce48a4 100644
--- a/src/yuzu/bootmanager.cpp
+++ b/src/yuzu/bootmanager.cpp
@@ -44,49 +44,65 @@ EmuThread::EmuThread() = default;
44EmuThread::~EmuThread() = default; 44EmuThread::~EmuThread() = default;
45 45
46void EmuThread::run() { 46void EmuThread::run() {
47 MicroProfileOnThreadCreate("EmuThread"); 47 std::string name = "yuzu:EmuControlThread";
48 MicroProfileOnThreadCreate(name.c_str());
49 Common::SetCurrentThreadName(name.c_str());
50
51 auto& system = Core::System::GetInstance();
52
53 system.RegisterHostThread();
54
55 auto& gpu = system.GPU();
48 56
49 // Main process has been loaded. Make the context current to this thread and begin GPU and CPU 57 // Main process has been loaded. Make the context current to this thread and begin GPU and CPU
50 // execution. 58 // execution.
51 Core::System::GetInstance().GPU().Start(); 59 gpu.Start();
60
61 gpu.ObtainContext();
52 62
53 emit LoadProgress(VideoCore::LoadCallbackStage::Prepare, 0, 0); 63 emit LoadProgress(VideoCore::LoadCallbackStage::Prepare, 0, 0);
54 64
55 Core::System::GetInstance().Renderer().Rasterizer().LoadDiskResources( 65 system.Renderer().Rasterizer().LoadDiskResources(
56 stop_run, [this](VideoCore::LoadCallbackStage stage, std::size_t value, std::size_t total) { 66 stop_run, [this](VideoCore::LoadCallbackStage stage, std::size_t value, std::size_t total) {
57 emit LoadProgress(stage, value, total); 67 emit LoadProgress(stage, value, total);
58 }); 68 });
59 69
60 emit LoadProgress(VideoCore::LoadCallbackStage::Complete, 0, 0); 70 emit LoadProgress(VideoCore::LoadCallbackStage::Complete, 0, 0);
61 71
72 gpu.ReleaseContext();
73
62 // Holds whether the cpu was running during the last iteration, 74 // Holds whether the cpu was running during the last iteration,
63 // so that the DebugModeLeft signal can be emitted before the 75 // so that the DebugModeLeft signal can be emitted before the
64 // next execution step 76 // next execution step
65 bool was_active = false; 77 bool was_active = false;
66 while (!stop_run) { 78 while (!stop_run) {
67 if (running) { 79 if (running) {
68 if (!was_active) 80 if (was_active) {
69 emit DebugModeLeft(); 81 emit DebugModeLeft();
82 }
70 83
71 Core::System::ResultStatus result = Core::System::GetInstance().RunLoop(); 84 running_guard = true;
85 Core::System::ResultStatus result = system.Run();
72 if (result != Core::System::ResultStatus::Success) { 86 if (result != Core::System::ResultStatus::Success) {
87 running_guard = false;
73 this->SetRunning(false); 88 this->SetRunning(false);
74 emit ErrorThrown(result, Core::System::GetInstance().GetStatusDetails()); 89 emit ErrorThrown(result, system.GetStatusDetails());
75 } 90 }
91 running_wait.Wait();
92 result = system.Pause();
93 if (result != Core::System::ResultStatus::Success) {
94 running_guard = false;
95 this->SetRunning(false);
96 emit ErrorThrown(result, system.GetStatusDetails());
97 }
98 running_guard = false;
76 99
77 was_active = running || exec_step; 100 if (!stop_run) {
78 if (!was_active && !stop_run) 101 was_active = true;
79 emit DebugModeEntered(); 102 emit DebugModeEntered();
103 }
80 } else if (exec_step) { 104 } else if (exec_step) {
81 if (!was_active) 105 UNIMPLEMENTED();
82 emit DebugModeLeft();
83
84 exec_step = false;
85 Core::System::GetInstance().SingleStep();
86 emit DebugModeEntered();
87 yieldCurrentThread();
88
89 was_active = false;
90 } else { 106 } else {
91 std::unique_lock lock{running_mutex}; 107 std::unique_lock lock{running_mutex};
92 running_cv.wait(lock, [this] { return IsRunning() || exec_step || stop_run; }); 108 running_cv.wait(lock, [this] { return IsRunning() || exec_step || stop_run; });
@@ -94,7 +110,7 @@ void EmuThread::run() {
94 } 110 }
95 111
96 // Shutdown the core emulation 112 // Shutdown the core emulation
97 Core::System::GetInstance().Shutdown(); 113 system.Shutdown();
98 114
99#if MICROPROFILE_ENABLED 115#if MICROPROFILE_ENABLED
100 MicroProfileOnThreadExit(); 116 MicroProfileOnThreadExit();
@@ -360,7 +376,7 @@ QByteArray GRenderWindow::saveGeometry() {
360} 376}
361 377
362qreal GRenderWindow::windowPixelRatio() const { 378qreal GRenderWindow::windowPixelRatio() const {
363 return devicePixelRatio(); 379 return devicePixelRatioF();
364} 380}
365 381
366std::pair<u32, u32> GRenderWindow::ScaleTouch(const QPointF& pos) const { 382std::pair<u32, u32> GRenderWindow::ScaleTouch(const QPointF& pos) const {
diff --git a/src/yuzu/bootmanager.h b/src/yuzu/bootmanager.h
index 3626604ca..6c59b4d5c 100644
--- a/src/yuzu/bootmanager.h
+++ b/src/yuzu/bootmanager.h
@@ -59,6 +59,12 @@ public:
59 this->running = running; 59 this->running = running;
60 lock.unlock(); 60 lock.unlock();
61 running_cv.notify_all(); 61 running_cv.notify_all();
62 if (!running) {
63 running_wait.Set();
64 /// Wait until effectively paused
65 while (running_guard)
66 ;
67 }
62 } 68 }
63 69
64 /** 70 /**
@@ -84,6 +90,8 @@ private:
84 std::atomic_bool stop_run{false}; 90 std::atomic_bool stop_run{false};
85 std::mutex running_mutex; 91 std::mutex running_mutex;
86 std::condition_variable running_cv; 92 std::condition_variable running_cv;
93 Common::Event running_wait{};
94 std::atomic_bool running_guard{false};
87 95
88signals: 96signals:
89 /** 97 /**
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index 32c81dc70..bbbd96113 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -211,7 +211,7 @@ const std::array<int, Settings::NativeKeyboard::NumKeyboardMods> Config::default
211// This must be in alphabetical order according to action name as it must have the same order as 211// This must be in alphabetical order according to action name as it must have the same order as
212// UISetting::values.shortcuts, which is alphabetically ordered. 212// UISetting::values.shortcuts, which is alphabetically ordered.
213// clang-format off 213// clang-format off
214const std::array<UISettings::Shortcut, 15> Config::default_hotkeys{{ 214const std::array<UISettings::Shortcut, 16> Config::default_hotkeys{{
215 {QStringLiteral("Capture Screenshot"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+P"), Qt::ApplicationShortcut}}, 215 {QStringLiteral("Capture Screenshot"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+P"), Qt::ApplicationShortcut}},
216 {QStringLiteral("Change Docked Mode"), QStringLiteral("Main Window"), {QStringLiteral("F10"), Qt::ApplicationShortcut}}, 216 {QStringLiteral("Change Docked Mode"), QStringLiteral("Main Window"), {QStringLiteral("F10"), Qt::ApplicationShortcut}},
217 {QStringLiteral("Continue/Pause Emulation"), QStringLiteral("Main Window"), {QStringLiteral("F4"), Qt::WindowShortcut}}, 217 {QStringLiteral("Continue/Pause Emulation"), QStringLiteral("Main Window"), {QStringLiteral("F4"), Qt::WindowShortcut}},
@@ -222,6 +222,7 @@ const std::array<UISettings::Shortcut, 15> Config::default_hotkeys{{
222 {QStringLiteral("Increase Speed Limit"), QStringLiteral("Main Window"), {QStringLiteral("+"), Qt::ApplicationShortcut}}, 222 {QStringLiteral("Increase Speed Limit"), QStringLiteral("Main Window"), {QStringLiteral("+"), Qt::ApplicationShortcut}},
223 {QStringLiteral("Load Amiibo"), QStringLiteral("Main Window"), {QStringLiteral("F2"), Qt::ApplicationShortcut}}, 223 {QStringLiteral("Load Amiibo"), QStringLiteral("Main Window"), {QStringLiteral("F2"), Qt::ApplicationShortcut}},
224 {QStringLiteral("Load File"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+O"), Qt::WindowShortcut}}, 224 {QStringLiteral("Load File"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+O"), Qt::WindowShortcut}},
225 {QStringLiteral("Mute Audio"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+M"), Qt::WindowShortcut}},
225 {QStringLiteral("Restart Emulation"), QStringLiteral("Main Window"), {QStringLiteral("F6"), Qt::WindowShortcut}}, 226 {QStringLiteral("Restart Emulation"), QStringLiteral("Main Window"), {QStringLiteral("F6"), Qt::WindowShortcut}},
226 {QStringLiteral("Stop Emulation"), QStringLiteral("Main Window"), {QStringLiteral("F5"), Qt::WindowShortcut}}, 227 {QStringLiteral("Stop Emulation"), QStringLiteral("Main Window"), {QStringLiteral("F5"), Qt::WindowShortcut}},
227 {QStringLiteral("Toggle Filter Bar"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+F"), Qt::WindowShortcut}}, 228 {QStringLiteral("Toggle Filter Bar"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+F"), Qt::WindowShortcut}},
diff --git a/src/yuzu/configuration/config.h b/src/yuzu/configuration/config.h
index 5cd2a5feb..09316382c 100644
--- a/src/yuzu/configuration/config.h
+++ b/src/yuzu/configuration/config.h
@@ -27,7 +27,7 @@ public:
27 default_mouse_buttons; 27 default_mouse_buttons;
28 static const std::array<int, Settings::NativeKeyboard::NumKeyboardKeys> default_keyboard_keys; 28 static const std::array<int, Settings::NativeKeyboard::NumKeyboardKeys> default_keyboard_keys;
29 static const std::array<int, Settings::NativeKeyboard::NumKeyboardMods> default_keyboard_mods; 29 static const std::array<int, Settings::NativeKeyboard::NumKeyboardMods> default_keyboard_mods;
30 static const std::array<UISettings::Shortcut, 15> default_hotkeys; 30 static const std::array<UISettings::Shortcut, 16> default_hotkeys;
31 31
32private: 32private:
33 void ReadValues(); 33 void ReadValues();
diff --git a/src/yuzu/configuration/configure_general.cpp b/src/yuzu/configuration/configure_general.cpp
index cb95423e0..74b2ad537 100644
--- a/src/yuzu/configuration/configure_general.cpp
+++ b/src/yuzu/configuration/configure_general.cpp
@@ -23,6 +23,11 @@ ConfigureGeneral::ConfigureGeneral(QWidget* parent)
23ConfigureGeneral::~ConfigureGeneral() = default; 23ConfigureGeneral::~ConfigureGeneral() = default;
24 24
25void ConfigureGeneral::SetConfiguration() { 25void ConfigureGeneral::SetConfiguration() {
26 const bool runtime_lock = !Core::System::GetInstance().IsPoweredOn();
27
28 ui->use_multi_core->setEnabled(runtime_lock);
29 ui->use_multi_core->setChecked(Settings::values.use_multi_core);
30
26 ui->toggle_check_exit->setChecked(UISettings::values.confirm_before_closing); 31 ui->toggle_check_exit->setChecked(UISettings::values.confirm_before_closing);
27 ui->toggle_user_on_boot->setChecked(UISettings::values.select_user_on_boot); 32 ui->toggle_user_on_boot->setChecked(UISettings::values.select_user_on_boot);
28 ui->toggle_background_pause->setChecked(UISettings::values.pause_when_in_background); 33 ui->toggle_background_pause->setChecked(UISettings::values.pause_when_in_background);
@@ -41,6 +46,7 @@ void ConfigureGeneral::ApplyConfiguration() {
41 46
42 Settings::values.use_frame_limit = ui->toggle_frame_limit->isChecked(); 47 Settings::values.use_frame_limit = ui->toggle_frame_limit->isChecked();
43 Settings::values.frame_limit = ui->frame_limit->value(); 48 Settings::values.frame_limit = ui->frame_limit->value();
49 Settings::values.use_multi_core = ui->use_multi_core->isChecked();
44} 50}
45 51
46void ConfigureGeneral::changeEvent(QEvent* event) { 52void ConfigureGeneral::changeEvent(QEvent* event) {
diff --git a/src/yuzu/configuration/configure_general.ui b/src/yuzu/configuration/configure_general.ui
index fc3b7e65a..2711116a2 100644
--- a/src/yuzu/configuration/configure_general.ui
+++ b/src/yuzu/configuration/configure_general.ui
@@ -52,6 +52,13 @@
52 </layout> 52 </layout>
53 </item> 53 </item>
54 <item> 54 <item>
55 <widget class="QCheckBox" name="use_multi_core">
56 <property name="text">
57 <string>Multicore CPU Emulation</string>
58 </property>
59 </widget>
60 </item>
61 <item>
55 <widget class="QCheckBox" name="toggle_check_exit"> 62 <widget class="QCheckBox" name="toggle_check_exit">
56 <property name="text"> 63 <property name="text">
57 <string>Confirm exit while emulation is running</string> 64 <string>Confirm exit while emulation is running</string>
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index c1ea25fb8..9bb0a0109 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -2,10 +2,13 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <fmt/format.h>
6
5#include "yuzu/debugger/wait_tree.h" 7#include "yuzu/debugger/wait_tree.h"
6#include "yuzu/util/util.h" 8#include "yuzu/util/util.h"
7 9
8#include "common/assert.h" 10#include "common/assert.h"
11#include "core/arm/arm_interface.h"
9#include "core/core.h" 12#include "core/core.h"
10#include "core/hle/kernel/handle_table.h" 13#include "core/hle/kernel/handle_table.h"
11#include "core/hle/kernel/mutex.h" 14#include "core/hle/kernel/mutex.h"
@@ -59,8 +62,10 @@ std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList()
59 std::size_t row = 0; 62 std::size_t row = 0;
60 auto add_threads = [&](const std::vector<std::shared_ptr<Kernel::Thread>>& threads) { 63 auto add_threads = [&](const std::vector<std::shared_ptr<Kernel::Thread>>& threads) {
61 for (std::size_t i = 0; i < threads.size(); ++i) { 64 for (std::size_t i = 0; i < threads.size(); ++i) {
62 item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i])); 65 if (!threads[i]->IsHLEThread()) {
63 item_list.back()->row = row; 66 item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i]));
67 item_list.back()->row = row;
68 }
64 ++row; 69 ++row;
65 } 70 }
66 }; 71 };
@@ -114,20 +119,21 @@ QString WaitTreeCallstack::GetText() const {
114std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() const { 119std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() const {
115 std::vector<std::unique_ptr<WaitTreeItem>> list; 120 std::vector<std::unique_ptr<WaitTreeItem>> list;
116 121
117 constexpr std::size_t BaseRegister = 29; 122 if (thread.IsHLEThread()) {
118 auto& memory = Core::System::GetInstance().Memory(); 123 return list;
119 u64 base_pointer = thread.GetContext64().cpu_registers[BaseRegister]; 124 }
120 125
121 while (base_pointer != 0) { 126 if (thread.GetOwnerProcess() == nullptr || !thread.GetOwnerProcess()->Is64BitProcess()) {
122 const u64 lr = memory.Read64(base_pointer + sizeof(u64)); 127 return list;
123 if (lr == 0) { 128 }
124 break;
125 }
126 129
127 list.push_back(std::make_unique<WaitTreeText>( 130 auto backtrace = Core::ARM_Interface::GetBacktraceFromContext(Core::System::GetInstance(),
128 tr("0x%1").arg(lr - sizeof(u32), 16, 16, QLatin1Char{'0'}))); 131 thread.GetContext64());
129 132
130 base_pointer = memory.Read64(base_pointer); 133 for (auto& entry : backtrace) {
134 std::string s = fmt::format("{:20}{:016X} {:016X} {:016X} {}", entry.module, entry.address,
135 entry.original_address, entry.offset, entry.name);
136 list.push_back(std::make_unique<WaitTreeText>(QString::fromStdString(s)));
131 } 137 }
132 138
133 return list; 139 return list;
@@ -206,7 +212,15 @@ QString WaitTreeThread::GetText() const {
206 status = tr("running"); 212 status = tr("running");
207 break; 213 break;
208 case Kernel::ThreadStatus::Ready: 214 case Kernel::ThreadStatus::Ready:
209 status = tr("ready"); 215 if (!thread.IsPaused()) {
216 if (thread.WasRunning()) {
217 status = tr("running");
218 } else {
219 status = tr("ready");
220 }
221 } else {
222 status = tr("paused");
223 }
210 break; 224 break;
211 case Kernel::ThreadStatus::Paused: 225 case Kernel::ThreadStatus::Paused:
212 status = tr("paused"); 226 status = tr("paused");
@@ -254,7 +268,15 @@ QColor WaitTreeThread::GetColor() const {
254 case Kernel::ThreadStatus::Running: 268 case Kernel::ThreadStatus::Running:
255 return QColor(Qt::GlobalColor::darkGreen); 269 return QColor(Qt::GlobalColor::darkGreen);
256 case Kernel::ThreadStatus::Ready: 270 case Kernel::ThreadStatus::Ready:
257 return QColor(Qt::GlobalColor::darkBlue); 271 if (!thread.IsPaused()) {
272 if (thread.WasRunning()) {
273 return QColor(Qt::GlobalColor::darkGreen);
274 } else {
275 return QColor(Qt::GlobalColor::darkBlue);
276 }
277 } else {
278 return QColor(Qt::GlobalColor::lightGray);
279 }
258 case Kernel::ThreadStatus::Paused: 280 case Kernel::ThreadStatus::Paused:
259 return QColor(Qt::GlobalColor::lightGray); 281 return QColor(Qt::GlobalColor::lightGray);
260 case Kernel::ThreadStatus::WaitHLEEvent: 282 case Kernel::ThreadStatus::WaitHLEEvent:
@@ -319,7 +341,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
319 341
320 if (thread.GetStatus() == Kernel::ThreadStatus::WaitSynch) { 342 if (thread.GetStatus() == Kernel::ThreadStatus::WaitSynch) {
321 list.push_back(std::make_unique<WaitTreeObjectList>(thread.GetSynchronizationObjects(), 343 list.push_back(std::make_unique<WaitTreeObjectList>(thread.GetSynchronizationObjects(),
322 thread.IsSleepingOnWait())); 344 thread.IsWaitingSync()));
323 } 345 }
324 346
325 list.push_back(std::make_unique<WaitTreeCallstack>(thread)); 347 list.push_back(std::make_unique<WaitTreeCallstack>(thread));
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index 0820f8c7e..880a6a06e 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -56,6 +56,7 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual
56#include <QShortcut> 56#include <QShortcut>
57#include <QStatusBar> 57#include <QStatusBar>
58#include <QSysInfo> 58#include <QSysInfo>
59#include <QUrl>
59#include <QtConcurrent/QtConcurrent> 60#include <QtConcurrent/QtConcurrent>
60 61
61#include <fmt/format.h> 62#include <fmt/format.h>
@@ -533,14 +534,36 @@ void GMainWindow::InitializeWidgets() {
533 if (emulation_running) { 534 if (emulation_running) {
534 return; 535 return;
535 } 536 }
536 Settings::values.use_asynchronous_gpu_emulation = 537 bool is_async =
537 !Settings::values.use_asynchronous_gpu_emulation; 538 !Settings::values.use_asynchronous_gpu_emulation || Settings::values.use_multi_core;
539 Settings::values.use_asynchronous_gpu_emulation = is_async;
538 async_status_button->setChecked(Settings::values.use_asynchronous_gpu_emulation); 540 async_status_button->setChecked(Settings::values.use_asynchronous_gpu_emulation);
539 Settings::Apply(); 541 Settings::Apply();
540 }); 542 });
541 async_status_button->setText(tr("ASYNC")); 543 async_status_button->setText(tr("ASYNC"));
542 async_status_button->setCheckable(true); 544 async_status_button->setCheckable(true);
543 async_status_button->setChecked(Settings::values.use_asynchronous_gpu_emulation); 545 async_status_button->setChecked(Settings::values.use_asynchronous_gpu_emulation);
546
547 // Setup Multicore button
548 multicore_status_button = new QPushButton();
549 multicore_status_button->setObjectName(QStringLiteral("TogglableStatusBarButton"));
550 multicore_status_button->setFocusPolicy(Qt::NoFocus);
551 connect(multicore_status_button, &QPushButton::clicked, [&] {
552 if (emulation_running) {
553 return;
554 }
555 Settings::values.use_multi_core = !Settings::values.use_multi_core;
556 bool is_async =
557 Settings::values.use_asynchronous_gpu_emulation || Settings::values.use_multi_core;
558 Settings::values.use_asynchronous_gpu_emulation = is_async;
559 async_status_button->setChecked(Settings::values.use_asynchronous_gpu_emulation);
560 multicore_status_button->setChecked(Settings::values.use_multi_core);
561 Settings::Apply();
562 });
563 multicore_status_button->setText(tr("MULTICORE"));
564 multicore_status_button->setCheckable(true);
565 multicore_status_button->setChecked(Settings::values.use_multi_core);
566 statusBar()->insertPermanentWidget(0, multicore_status_button);
544 statusBar()->insertPermanentWidget(0, async_status_button); 567 statusBar()->insertPermanentWidget(0, async_status_button);
545 568
546 // Setup Renderer API button 569 // Setup Renderer API button
@@ -741,6 +764,9 @@ void GMainWindow::InitializeHotkeys() {
741 Settings::values.use_docked_mode); 764 Settings::values.use_docked_mode);
742 dock_status_button->setChecked(Settings::values.use_docked_mode); 765 dock_status_button->setChecked(Settings::values.use_docked_mode);
743 }); 766 });
767 connect(hotkey_registry.GetHotkey(main_window, QStringLiteral("Mute Audio"), this),
768 &QShortcut::activated, this,
769 [] { Settings::values.audio_muted = !Settings::values.audio_muted; });
744} 770}
745 771
746void GMainWindow::SetDefaultUIGeometry() { 772void GMainWindow::SetDefaultUIGeometry() {
@@ -841,6 +867,7 @@ void GMainWindow::ConnectMenuEvents() {
841 connect(ui.action_Stop, &QAction::triggered, this, &GMainWindow::OnStopGame); 867 connect(ui.action_Stop, &QAction::triggered, this, &GMainWindow::OnStopGame);
842 connect(ui.action_Report_Compatibility, &QAction::triggered, this, 868 connect(ui.action_Report_Compatibility, &QAction::triggered, this,
843 &GMainWindow::OnMenuReportCompatibility); 869 &GMainWindow::OnMenuReportCompatibility);
870 connect(ui.action_Open_Mods_Page, &QAction::triggered, this, &GMainWindow::OnOpenModsPage);
844 connect(ui.action_Restart, &QAction::triggered, this, [this] { BootGame(QString(game_path)); }); 871 connect(ui.action_Restart, &QAction::triggered, this, [this] { BootGame(QString(game_path)); });
845 connect(ui.action_Configure, &QAction::triggered, this, &GMainWindow::OnConfigure); 872 connect(ui.action_Configure, &QAction::triggered, this, &GMainWindow::OnConfigure);
846 873
@@ -921,6 +948,8 @@ bool GMainWindow::LoadROM(const QString& filename) {
921 nullptr, // E-Commerce 948 nullptr, // E-Commerce
922 }); 949 });
923 950
951 system.RegisterHostThread();
952
924 const Core::System::ResultStatus result{system.Load(*render_window, filename.toStdString())}; 953 const Core::System::ResultStatus result{system.Load(*render_window, filename.toStdString())};
925 954
926 const auto drd_callout = 955 const auto drd_callout =
@@ -1037,6 +1066,7 @@ void GMainWindow::BootGame(const QString& filename) {
1037 } 1066 }
1038 status_bar_update_timer.start(2000); 1067 status_bar_update_timer.start(2000);
1039 async_status_button->setDisabled(true); 1068 async_status_button->setDisabled(true);
1069 multicore_status_button->setDisabled(true);
1040 renderer_status_button->setDisabled(true); 1070 renderer_status_button->setDisabled(true);
1041 1071
1042 if (UISettings::values.hide_mouse) { 1072 if (UISettings::values.hide_mouse) {
@@ -1124,6 +1154,7 @@ void GMainWindow::ShutdownGame() {
1124 game_fps_label->setVisible(false); 1154 game_fps_label->setVisible(false);
1125 emu_frametime_label->setVisible(false); 1155 emu_frametime_label->setVisible(false);
1126 async_status_button->setEnabled(true); 1156 async_status_button->setEnabled(true);
1157 multicore_status_button->setEnabled(true);
1127#ifdef HAS_VULKAN 1158#ifdef HAS_VULKAN
1128 renderer_status_button->setEnabled(true); 1159 renderer_status_button->setEnabled(true);
1129#endif 1160#endif
@@ -1808,6 +1839,16 @@ void GMainWindow::OnMenuReportCompatibility() {
1808 } 1839 }
1809} 1840}
1810 1841
1842void GMainWindow::OnOpenModsPage() {
1843 const auto mods_page_url = QStringLiteral("https://github.com/yuzu-emu/yuzu/wiki/Switch-Mods");
1844 const QUrl mods_page(mods_page_url);
1845 const bool open = QDesktopServices::openUrl(mods_page);
1846 if (!open) {
1847 QMessageBox::warning(this, tr("Error opening URL"),
1848 tr("Unable to open the URL \"%1\".").arg(mods_page_url));
1849 }
1850}
1851
1811void GMainWindow::ToggleFullscreen() { 1852void GMainWindow::ToggleFullscreen() {
1812 if (!emulation_running) { 1853 if (!emulation_running) {
1813 return; 1854 return;
@@ -1919,7 +1960,11 @@ void GMainWindow::OnConfigure() {
1919 } 1960 }
1920 1961
1921 dock_status_button->setChecked(Settings::values.use_docked_mode); 1962 dock_status_button->setChecked(Settings::values.use_docked_mode);
1963 multicore_status_button->setChecked(Settings::values.use_multi_core);
1964 Settings::values.use_asynchronous_gpu_emulation =
1965 Settings::values.use_asynchronous_gpu_emulation || Settings::values.use_multi_core;
1922 async_status_button->setChecked(Settings::values.use_asynchronous_gpu_emulation); 1966 async_status_button->setChecked(Settings::values.use_asynchronous_gpu_emulation);
1967
1923#ifdef HAS_VULKAN 1968#ifdef HAS_VULKAN
1924 renderer_status_button->setChecked(Settings::values.renderer_backend == 1969 renderer_status_button->setChecked(Settings::values.renderer_backend ==
1925 Settings::RendererBackend::Vulkan); 1970 Settings::RendererBackend::Vulkan);
@@ -2046,7 +2091,7 @@ void GMainWindow::UpdateStatusBar() {
2046 game_fps_label->setText(tr("Game: %1 FPS").arg(results.game_fps, 0, 'f', 0)); 2091 game_fps_label->setText(tr("Game: %1 FPS").arg(results.game_fps, 0, 'f', 0));
2047 emu_frametime_label->setText(tr("Frame: %1 ms").arg(results.frametime * 1000.0, 0, 'f', 2)); 2092 emu_frametime_label->setText(tr("Frame: %1 ms").arg(results.frametime * 1000.0, 0, 'f', 2));
2048 2093
2049 emu_speed_label->setVisible(true); 2094 emu_speed_label->setVisible(!Settings::values.use_multi_core);
2050 game_fps_label->setVisible(true); 2095 game_fps_label->setVisible(true);
2051 emu_frametime_label->setVisible(true); 2096 emu_frametime_label->setVisible(true);
2052} 2097}
diff --git a/src/yuzu/main.h b/src/yuzu/main.h
index 4f4c8ddbe..5581874ed 100644
--- a/src/yuzu/main.h
+++ b/src/yuzu/main.h
@@ -181,6 +181,7 @@ private slots:
181 void OnPauseGame(); 181 void OnPauseGame();
182 void OnStopGame(); 182 void OnStopGame();
183 void OnMenuReportCompatibility(); 183 void OnMenuReportCompatibility();
184 void OnOpenModsPage();
184 /// Called whenever a user selects a game in the game list widget. 185 /// Called whenever a user selects a game in the game list widget.
185 void OnGameListLoadFile(QString game_path); 186 void OnGameListLoadFile(QString game_path);
186 void OnGameListOpenFolder(GameListOpenTarget target, const std::string& game_path); 187 void OnGameListOpenFolder(GameListOpenTarget target, const std::string& game_path);
@@ -234,6 +235,7 @@ private:
234 QLabel* game_fps_label = nullptr; 235 QLabel* game_fps_label = nullptr;
235 QLabel* emu_frametime_label = nullptr; 236 QLabel* emu_frametime_label = nullptr;
236 QPushButton* async_status_button = nullptr; 237 QPushButton* async_status_button = nullptr;
238 QPushButton* multicore_status_button = nullptr;
237 QPushButton* renderer_status_button = nullptr; 239 QPushButton* renderer_status_button = nullptr;
238 QPushButton* dock_status_button = nullptr; 240 QPushButton* dock_status_button = nullptr;
239 QTimer status_bar_update_timer; 241 QTimer status_bar_update_timer;
diff --git a/src/yuzu/main.ui b/src/yuzu/main.ui
index 97c90f50b..b5745dfd5 100644
--- a/src/yuzu/main.ui
+++ b/src/yuzu/main.ui
@@ -113,6 +113,7 @@
113 <string>&amp;Help</string> 113 <string>&amp;Help</string>
114 </property> 114 </property>
115 <addaction name="action_Report_Compatibility"/> 115 <addaction name="action_Report_Compatibility"/>
116 <addaction name="action_Open_Mods_Page"/>
116 <addaction name="separator"/> 117 <addaction name="separator"/>
117 <addaction name="action_About"/> 118 <addaction name="action_About"/>
118 </widget> 119 </widget>
@@ -256,6 +257,11 @@
256 <bool>false</bool> 257 <bool>false</bool>
257 </property> 258 </property>
258 </action> 259 </action>
260 <action name="action_Open_Mods_Page">
261 <property name="text">
262 <string>Open Mods Page</string>
263 </property>
264 </action>
259 <action name="action_Open_yuzu_Folder"> 265 <action name="action_Open_yuzu_Folder">
260 <property name="text"> 266 <property name="text">
261 <string>Open yuzu Folder</string> 267 <string>Open yuzu Folder</string>
diff --git a/src/yuzu_cmd/yuzu.cpp b/src/yuzu_cmd/yuzu.cpp
index 4d2ea7e9e..e6c6a839d 100644
--- a/src/yuzu_cmd/yuzu.cpp
+++ b/src/yuzu_cmd/yuzu.cpp
@@ -2,6 +2,7 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <chrono>
5#include <iostream> 6#include <iostream>
6#include <memory> 7#include <memory>
7#include <string> 8#include <string>
@@ -236,9 +237,11 @@ int main(int argc, char** argv) {
236 system.Renderer().Rasterizer().LoadDiskResources(); 237 system.Renderer().Rasterizer().LoadDiskResources();
237 238
238 std::thread render_thread([&emu_window] { emu_window->Present(); }); 239 std::thread render_thread([&emu_window] { emu_window->Present(); });
240 system.Run();
239 while (emu_window->IsOpen()) { 241 while (emu_window->IsOpen()) {
240 system.RunLoop(); 242 std::this_thread::sleep_for(std::chrono::milliseconds(1));
241 } 243 }
244 system.Pause();
242 render_thread.join(); 245 render_thread.join();
243 246
244 system.Shutdown(); 247 system.Shutdown();
diff --git a/src/yuzu_tester/yuzu.cpp b/src/yuzu_tester/yuzu.cpp
index 676e70ebd..083667baf 100644
--- a/src/yuzu_tester/yuzu.cpp
+++ b/src/yuzu_tester/yuzu.cpp
@@ -2,6 +2,7 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <chrono>
5#include <iostream> 6#include <iostream>
6#include <memory> 7#include <memory>
7#include <string> 8#include <string>
@@ -255,9 +256,11 @@ int main(int argc, char** argv) {
255 system.GPU().Start(); 256 system.GPU().Start();
256 system.Renderer().Rasterizer().LoadDiskResources(); 257 system.Renderer().Rasterizer().LoadDiskResources();
257 258
259 system.Run();
258 while (!finished) { 260 while (!finished) {
259 system.RunLoop(); 261 std::this_thread::sleep_for(std::chrono::milliseconds(1));
260 } 262 }
263 system.Pause();
261 264
262 detached_tasks.WaitForAllTasks(); 265 detached_tasks.WaitForAllTasks();
263 return return_value; 266 return return_value;