summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt88
-rw-r--r--src/CMakeLists.txt61
-rw-r--r--src/audio_core/stream.cpp2
-rw-r--r--src/common/CMakeLists.txt1
-rw-r--r--src/common/bit_util.h39
-rw-r--r--src/common/detached_tasks.cpp8
-rw-r--r--src/common/logging/backend.cpp6
-rw-r--r--src/common/multi_level_queue.h337
-rw-r--r--src/common/thread.cpp37
-rw-r--r--src/common/thread.h14
-rw-r--r--src/common/threadsafe_queue.h4
-rw-r--r--src/core/CMakeLists.txt4
-rw-r--r--src/core/core_cpu.cpp6
-rw-r--r--src/core/core_timing.cpp2
-rw-r--r--src/core/core_timing.h2
-rw-r--r--src/core/file_sys/cheat_engine.cpp2
-rw-r--r--src/core/file_sys/errors.h3
-rw-r--r--src/core/file_sys/patch_manager.cpp25
-rw-r--r--src/core/file_sys/system_archive/system_archive.cpp3
-rw-r--r--src/core/file_sys/system_archive/system_version.cpp52
-rw-r--r--src/core/file_sys/system_archive/system_version.h16
-rw-r--r--src/core/frontend/emu_window.cpp6
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp6
-rw-r--r--src/core/hle/kernel/kernel.cpp7
-rw-r--r--src/core/hle/kernel/kernel.h3
-rw-r--r--src/core/hle/kernel/object.cpp1
-rw-r--r--src/core/hle/kernel/object.h1
-rw-r--r--src/core/hle/kernel/process.cpp18
-rw-r--r--src/core/hle/kernel/process.h19
-rw-r--r--src/core/hle/kernel/scheduler.cpp60
-rw-r--r--src/core/hle/kernel/scheduler.h6
-rw-r--r--src/core/hle/kernel/svc.cpp156
-rw-r--r--src/core/hle/kernel/thread.cpp5
-rw-r--r--src/core/hle/kernel/thread.h3
-rw-r--r--src/core/hle/kernel/transfer_memory.cpp73
-rw-r--r--src/core/hle/kernel/transfer_memory.h91
-rw-r--r--src/core/hle/kernel/vm_manager.cpp76
-rw-r--r--src/core/hle/kernel/vm_manager.h55
-rw-r--r--src/core/hle/service/fatal/fatal.cpp89
-rw-r--r--src/core/hle/service/hid/hid.cpp10
-rw-r--r--src/core/hle/service/hid/hid.h2
-rw-r--r--src/core/hle/service/nfp/nfp.cpp2
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp4
-rw-r--r--src/core/hle/service/set/set_sys.cpp79
-rw-r--r--src/core/hle/service/set/set_sys.h2
-rw-r--r--src/core/loader/nso.cpp101
-rw-r--r--src/core/loader/nso.h39
-rw-r--r--src/core/perf_stats.cpp10
-rw-r--r--src/input_common/keyboard.cpp8
-rw-r--r--src/input_common/motion_emu.cpp10
-rw-r--r--src/input_common/sdl/sdl_impl.cpp26
-rw-r--r--src/tests/CMakeLists.txt2
-rw-r--r--src/tests/common/bit_utils.cpp23
-rw-r--r--src/tests/common/multi_level_queue.cpp55
-rw-r--r--src/video_core/debug_utils/debug_utils.cpp4
-rw-r--r--src/video_core/debug_utils/debug_utils.h4
-rw-r--r--src/video_core/gpu.cpp9
-rw-r--r--src/video_core/gpu.h6
-rw-r--r--src/video_core/gpu_thread.cpp4
-rw-r--r--src/video_core/gpu_thread.h15
-rw-r--r--src/video_core/rasterizer_cache.h16
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp4
-rw-r--r--src/video_core/renderer_opengl/gl_global_cache.cpp2
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp8
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h4
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.cpp4
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.h4
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp12
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.h2
-rw-r--r--src/web_service/web_backend.cpp4
-rw-r--r--src/yuzu/applets/profile_select.cpp2
-rw-r--r--src/yuzu/applets/software_keyboard.cpp4
-rw-r--r--src/yuzu/applets/web_browser.cpp4
-rw-r--r--src/yuzu/bootmanager.cpp2
-rw-r--r--src/yuzu/bootmanager.h2
-rw-r--r--src/yuzu/debugger/wait_tree.cpp4
79 files changed, 1399 insertions, 489 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a4914f37d..ab18275d3 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -104,78 +104,12 @@ endif()
104message(STATUS "Target architecture: ${ARCHITECTURE}") 104message(STATUS "Target architecture: ${ARCHITECTURE}")
105 105
106 106
107# Configure compilation flags 107# Configure C++ standard
108# =========================== 108# ===========================
109 109
110set(CMAKE_CXX_STANDARD 17) 110set(CMAKE_CXX_STANDARD 17)
111set(CMAKE_CXX_STANDARD_REQUIRED ON) 111set(CMAKE_CXX_STANDARD_REQUIRED ON)
112 112
113if (NOT MSVC)
114 set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-attributes")
115 set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
116
117 if (MINGW)
118 add_definitions(-DMINGW_HAS_SECURE_API)
119
120 if (MINGW_STATIC_BUILD)
121 add_definitions(-DQT_STATICPLUGIN)
122 set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static")
123 set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static")
124 endif()
125 endif()
126else()
127 # Silence "deprecation" warnings
128 add_definitions(/D_CRT_SECURE_NO_WARNINGS /D_CRT_NONSTDC_NO_DEPRECATE /D_SCL_SECURE_NO_WARNINGS)
129 # Avoid windows.h junk
130 add_definitions(/DNOMINMAX)
131 # Avoid windows.h from including some usually unused libs like winsocks.h, since this might cause some redefinition errors.
132 add_definitions(/DWIN32_LEAN_AND_MEAN)
133
134 set(CMAKE_CONFIGURATION_TYPES Debug Release CACHE STRING "" FORCE)
135
136 # Tweak optimization settings
137 # As far as I can tell, there's no way to override the CMake defaults while leaving user
138 # changes intact, so we'll just clobber everything and say sorry.
139 message(STATUS "Cache compiler flags ignored, please edit CMakeLists.txt to change the flags.")
140
141 # /W3 - Level 3 warnings
142 # /MP - Multi-threaded compilation
143 # /Zi - Output debugging information
144 # /Zo - enhanced debug info for optimized builds
145 # /permissive- - enables stricter C++ standards conformance checks
146 set(CMAKE_C_FLAGS "/W3 /MP /Zi /Zo /permissive-" CACHE STRING "" FORCE)
147 # /EHsc - C++-only exception handling semantics
148 # /Zc:throwingNew - let codegen assume `operator new` will never return null
149 # /Zc:inline - let codegen omit inline functions in object files
150 set(CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} /EHsc /std:c++latest /Zc:throwingNew,inline" CACHE STRING "" FORCE)
151
152 # /MDd - Multi-threaded Debug Runtime DLL
153 set(CMAKE_C_FLAGS_DEBUG "/Od /MDd" CACHE STRING "" FORCE)
154 set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}" CACHE STRING "" FORCE)
155
156 # /O2 - Optimization level 2
157 # /GS- - No stack buffer overflow checks
158 # /MD - Multi-threaded runtime DLL
159 set(CMAKE_C_FLAGS_RELEASE "/O2 /GS- /MD" CACHE STRING "" FORCE)
160 set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE}" CACHE STRING "" FORCE)
161
162 set(CMAKE_EXE_LINKER_FLAGS_DEBUG "/DEBUG /MANIFEST:NO" CACHE STRING "" FORCE)
163 set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE)
164endif()
165
166# Set file offset size to 64 bits.
167#
168# On modern Unixes, this is typically already the case. The lone exception is
169# glibc, which may default to 32 bits. glibc allows this to be configured
170# by setting _FILE_OFFSET_BITS.
171if(CMAKE_SYSTEM_NAME STREQUAL "Linux" OR MINGW)
172 add_definitions(-D_FILE_OFFSET_BITS=64)
173endif()
174
175# CMake seems to only define _DEBUG on Windows
176set_property(DIRECTORY APPEND PROPERTY
177 COMPILE_DEFINITIONS $<$<CONFIG:Debug>:_DEBUG> $<$<NOT:$<CONFIG:Debug>>:NDEBUG>)
178
179# System imported libraries 113# System imported libraries
180# ====================== 114# ======================
181 115
@@ -326,25 +260,21 @@ endif()
326# Platform-specific library requirements 260# Platform-specific library requirements
327# ====================================== 261# ======================================
328 262
329IF (APPLE) 263if (APPLE)
330 find_library(COCOA_LIBRARY Cocoa) # Umbrella framework for everything GUI-related 264 # Umbrella framework for everything GUI-related
265 find_library(COCOA_LIBRARY Cocoa)
331 set(PLATFORM_LIBRARIES ${COCOA_LIBRARY} ${IOKIT_LIBRARY} ${COREVIDEO_LIBRARY}) 266 set(PLATFORM_LIBRARIES ${COCOA_LIBRARY} ${IOKIT_LIBRARY} ${COREVIDEO_LIBRARY})
332 267elseif (WIN32)
333 if (CMAKE_CXX_COMPILER_ID STREQUAL Clang)
334 set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
335 set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stdlib=libc++")
336 endif()
337ELSEIF (WIN32)
338 # WSAPoll and SHGetKnownFolderPath (AppData/Roaming) didn't exist before WinNT 6.x (Vista) 268 # WSAPoll and SHGetKnownFolderPath (AppData/Roaming) didn't exist before WinNT 6.x (Vista)
339 add_definitions(-D_WIN32_WINNT=0x0600 -DWINVER=0x0600) 269 add_definitions(-D_WIN32_WINNT=0x0600 -DWINVER=0x0600)
340 set(PLATFORM_LIBRARIES winmm ws2_32) 270 set(PLATFORM_LIBRARIES winmm ws2_32)
341 IF (MINGW) 271 if (MINGW)
342 # PSAPI is the Process Status API 272 # PSAPI is the Process Status API
343 set(PLATFORM_LIBRARIES ${PLATFORM_LIBRARIES} psapi imm32 version) 273 set(PLATFORM_LIBRARIES ${PLATFORM_LIBRARIES} psapi imm32 version)
344 ENDIF (MINGW) 274 endif()
345ELSEIF (CMAKE_SYSTEM_NAME MATCHES "^(Linux|kFreeBSD|GNU|SunOS)$") 275elseif (CMAKE_SYSTEM_NAME MATCHES "^(Linux|kFreeBSD|GNU|SunOS)$")
346 set(PLATFORM_LIBRARIES rt) 276 set(PLATFORM_LIBRARIES rt)
347ENDIF (APPLE) 277endif()
348 278
349# Setup a custom clang-format target (if clang-format can be found) that will run 279# Setup a custom clang-format target (if clang-format can be found) that will run
350# against all the src files. This should be used before making a pull request. 280# against all the src files. This should be used before making a pull request.
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index f69d00a2b..6c99dd5e2 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -1,18 +1,79 @@
1# Enable modules to include each other's files 1# Enable modules to include each other's files
2include_directories(.) 2include_directories(.)
3 3
4# CMake seems to only define _DEBUG on Windows
5set_property(DIRECTORY APPEND PROPERTY
6 COMPILE_DEFINITIONS $<$<CONFIG:Debug>:_DEBUG> $<$<NOT:$<CONFIG:Debug>>:NDEBUG>)
7
8# Set compilation flags
9if (MSVC)
10 set(CMAKE_CONFIGURATION_TYPES Debug Release CACHE STRING "" FORCE)
11
12 # Silence "deprecation" warnings
13 add_definitions(-D_CRT_SECURE_NO_WARNINGS -D_CRT_NONSTDC_NO_DEPRECATE -D_SCL_SECURE_NO_WARNINGS)
14
15 # Avoid windows.h junk
16 add_definitions(-DNOMINMAX)
17
18 # Avoid windows.h from including some usually unused libs like winsocks.h, since this might cause some redefinition errors.
19 add_definitions(-DWIN32_LEAN_AND_MEAN)
20
21 # /W3 - Level 3 warnings
22 # /MP - Multi-threaded compilation
23 # /Zi - Output debugging information
24 # /Zo - enhanced debug info for optimized builds
25 # /permissive- - enables stricter C++ standards conformance checks
26 # /EHsc - C++-only exception handling semantics
27 # /Zc:throwingNew - let codegen assume `operator new` will never return null
28 # /Zc:inline - let codegen omit inline functions in object files
29 add_compile_options(/W3 /MP /Zi /Zo /permissive- /EHsc /std:c++latest /Zc:throwingNew,inline)
30
31 # /GS- - No stack buffer overflow checks
32 add_compile_options("$<$<CONFIG:Release>:/GS->")
33
34 set(CMAKE_EXE_LINKER_FLAGS_DEBUG "/DEBUG /MANIFEST:NO" CACHE STRING "" FORCE)
35 set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE)
36else()
37 add_compile_options("-Wno-attributes")
38
39 if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang)
40 add_compile_options("-stdlib=libc++")
41 endif()
42
43 # Set file offset size to 64 bits.
44 #
45 # On modern Unixes, this is typically already the case. The lone exception is
46 # glibc, which may default to 32 bits. glibc allows this to be configured
47 # by setting _FILE_OFFSET_BITS.
48 if(CMAKE_SYSTEM_NAME STREQUAL "Linux" OR MINGW)
49 add_definitions(-D_FILE_OFFSET_BITS=64)
50 endif()
51
52 if (MINGW)
53 add_definitions(-DMINGW_HAS_SECURE_API)
54
55 if (MINGW_STATIC_BUILD)
56 add_definitions(-DQT_STATICPLUGIN)
57 add_compile_options("-static")
58 endif()
59 endif()
60endif()
61
4add_subdirectory(common) 62add_subdirectory(common)
5add_subdirectory(core) 63add_subdirectory(core)
6add_subdirectory(audio_core) 64add_subdirectory(audio_core)
7add_subdirectory(video_core) 65add_subdirectory(video_core)
8add_subdirectory(input_common) 66add_subdirectory(input_common)
9add_subdirectory(tests) 67add_subdirectory(tests)
68
10if (ENABLE_SDL2) 69if (ENABLE_SDL2)
11 add_subdirectory(yuzu_cmd) 70 add_subdirectory(yuzu_cmd)
12endif() 71endif()
72
13if (ENABLE_QT) 73if (ENABLE_QT)
14 add_subdirectory(yuzu) 74 add_subdirectory(yuzu)
15endif() 75endif()
76
16if (ENABLE_WEB_SERVICE) 77if (ENABLE_WEB_SERVICE)
17 add_subdirectory(web_service) 78 add_subdirectory(web_service)
18endif() 79endif()
diff --git a/src/audio_core/stream.cpp b/src/audio_core/stream.cpp
index 4b66a6786..22a3f8c84 100644
--- a/src/audio_core/stream.cpp
+++ b/src/audio_core/stream.cpp
@@ -38,7 +38,7 @@ Stream::Stream(Core::Timing::CoreTiming& core_timing, u32 sample_rate, Format fo
38 sink_stream{sink_stream}, core_timing{core_timing}, name{std::move(name_)} { 38 sink_stream{sink_stream}, core_timing{core_timing}, name{std::move(name_)} {
39 39
40 release_event = core_timing.RegisterEvent( 40 release_event = core_timing.RegisterEvent(
41 name, [this](u64 userdata, int cycles_late) { ReleaseActiveBuffer(); }); 41 name, [this](u64 userdata, s64 cycles_late) { ReleaseActiveBuffer(); });
42} 42}
43 43
44void Stream::Play() { 44void Stream::Play() {
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 43ae8a9e7..850ce8006 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -98,6 +98,7 @@ add_library(common STATIC
98 microprofile.h 98 microprofile.h
99 microprofileui.h 99 microprofileui.h
100 misc.cpp 100 misc.cpp
101 multi_level_queue.h
101 page_table.cpp 102 page_table.cpp
102 page_table.h 103 page_table.h
103 param_package.cpp 104 param_package.cpp
diff --git a/src/common/bit_util.h b/src/common/bit_util.h
index 1eea17ba1..a4f9ed4aa 100644
--- a/src/common/bit_util.h
+++ b/src/common/bit_util.h
@@ -58,4 +58,43 @@ inline u64 CountLeadingZeroes64(u64 value) {
58 return __builtin_clzll(value); 58 return __builtin_clzll(value);
59} 59}
60#endif 60#endif
61
62#ifdef _MSC_VER
63inline u32 CountTrailingZeroes32(u32 value) {
64 unsigned long trailing_zero = 0;
65
66 if (_BitScanForward(&trailing_zero, value) != 0) {
67 return trailing_zero;
68 }
69
70 return 32;
71}
72
73inline u64 CountTrailingZeroes64(u64 value) {
74 unsigned long trailing_zero = 0;
75
76 if (_BitScanForward64(&trailing_zero, value) != 0) {
77 return trailing_zero;
78 }
79
80 return 64;
81}
82#else
83inline u32 CountTrailingZeroes32(u32 value) {
84 if (value == 0) {
85 return 32;
86 }
87
88 return __builtin_ctz(value);
89}
90
91inline u64 CountTrailingZeroes64(u64 value) {
92 if (value == 0) {
93 return 64;
94 }
95
96 return __builtin_ctzll(value);
97}
98#endif
99
61} // namespace Common 100} // namespace Common
diff --git a/src/common/detached_tasks.cpp b/src/common/detached_tasks.cpp
index a347d9e02..f268d6021 100644
--- a/src/common/detached_tasks.cpp
+++ b/src/common/detached_tasks.cpp
@@ -16,22 +16,22 @@ DetachedTasks::DetachedTasks() {
16} 16}
17 17
18void DetachedTasks::WaitForAllTasks() { 18void DetachedTasks::WaitForAllTasks() {
19 std::unique_lock<std::mutex> lock(mutex); 19 std::unique_lock lock{mutex};
20 cv.wait(lock, [this]() { return count == 0; }); 20 cv.wait(lock, [this]() { return count == 0; });
21} 21}
22 22
23DetachedTasks::~DetachedTasks() { 23DetachedTasks::~DetachedTasks() {
24 std::unique_lock<std::mutex> lock(mutex); 24 std::unique_lock lock{mutex};
25 ASSERT(count == 0); 25 ASSERT(count == 0);
26 instance = nullptr; 26 instance = nullptr;
27} 27}
28 28
29void DetachedTasks::AddTask(std::function<void()> task) { 29void DetachedTasks::AddTask(std::function<void()> task) {
30 std::unique_lock<std::mutex> lock(instance->mutex); 30 std::unique_lock lock{instance->mutex};
31 ++instance->count; 31 ++instance->count;
32 std::thread([task{std::move(task)}]() { 32 std::thread([task{std::move(task)}]() {
33 task(); 33 task();
34 std::unique_lock<std::mutex> lock(instance->mutex); 34 std::unique_lock lock{instance->mutex};
35 --instance->count; 35 --instance->count;
36 std::notify_all_at_thread_exit(instance->cv, std::move(lock)); 36 std::notify_all_at_thread_exit(instance->cv, std::move(lock));
37 }) 37 })
diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp
index 4462ff3fb..a03179520 100644
--- a/src/common/logging/backend.cpp
+++ b/src/common/logging/backend.cpp
@@ -46,12 +46,12 @@ public:
46 } 46 }
47 47
48 void AddBackend(std::unique_ptr<Backend> backend) { 48 void AddBackend(std::unique_ptr<Backend> backend) {
49 std::lock_guard<std::mutex> lock(writing_mutex); 49 std::lock_guard lock{writing_mutex};
50 backends.push_back(std::move(backend)); 50 backends.push_back(std::move(backend));
51 } 51 }
52 52
53 void RemoveBackend(std::string_view backend_name) { 53 void RemoveBackend(std::string_view backend_name) {
54 std::lock_guard<std::mutex> lock(writing_mutex); 54 std::lock_guard lock{writing_mutex};
55 const auto it = 55 const auto it =
56 std::remove_if(backends.begin(), backends.end(), 56 std::remove_if(backends.begin(), backends.end(),
57 [&backend_name](const auto& i) { return backend_name == i->GetName(); }); 57 [&backend_name](const auto& i) { return backend_name == i->GetName(); });
@@ -80,7 +80,7 @@ private:
80 backend_thread = std::thread([&] { 80 backend_thread = std::thread([&] {
81 Entry entry; 81 Entry entry;
82 auto write_logs = [&](Entry& e) { 82 auto write_logs = [&](Entry& e) {
83 std::lock_guard<std::mutex> lock(writing_mutex); 83 std::lock_guard lock{writing_mutex};
84 for (const auto& backend : backends) { 84 for (const auto& backend : backends) {
85 backend->Write(e); 85 backend->Write(e);
86 } 86 }
diff --git a/src/common/multi_level_queue.h b/src/common/multi_level_queue.h
new file mode 100644
index 000000000..2b61b91e0
--- /dev/null
+++ b/src/common/multi_level_queue.h
@@ -0,0 +1,337 @@
1// Copyright 2019 TuxSH
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <iterator>
9#include <list>
10#include <utility>
11
12#include "common/bit_util.h"
13#include "common/common_types.h"
14
15namespace Common {
16
17/**
18 * A MultiLevelQueue is a type of priority queue which has the following characteristics:
19 * - iteratable through each of its elements.
20 * - back can be obtained.
21 * - O(1) add, lookup (both front and back)
22 * - discrete priorities and a max of 64 priorities (limited domain)
23 * This type of priority queue is normaly used for managing threads within an scheduler
24 */
25template <typename T, std::size_t Depth>
26class MultiLevelQueue {
27public:
28 using value_type = T;
29 using reference = value_type&;
30 using const_reference = const value_type&;
31 using pointer = value_type*;
32 using const_pointer = const value_type*;
33
34 using difference_type = typename std::pointer_traits<pointer>::difference_type;
35 using size_type = std::size_t;
36
37 template <bool is_constant>
38 class iterator_impl {
39 public:
40 using iterator_category = std::bidirectional_iterator_tag;
41 using value_type = T;
42 using pointer = std::conditional_t<is_constant, T*, const T*>;
43 using reference = std::conditional_t<is_constant, const T&, T&>;
44 using difference_type = typename std::pointer_traits<pointer>::difference_type;
45
46 friend bool operator==(const iterator_impl& lhs, const iterator_impl& rhs) {
47 if (lhs.IsEnd() && rhs.IsEnd())
48 return true;
49 return std::tie(lhs.current_priority, lhs.it) == std::tie(rhs.current_priority, rhs.it);
50 }
51
52 friend bool operator!=(const iterator_impl& lhs, const iterator_impl& rhs) {
53 return !operator==(lhs, rhs);
54 }
55
56 reference operator*() const {
57 return *it;
58 }
59
60 pointer operator->() const {
61 return it.operator->();
62 }
63
64 iterator_impl& operator++() {
65 if (IsEnd()) {
66 return *this;
67 }
68
69 ++it;
70
71 if (it == GetEndItForPrio()) {
72 u64 prios = mlq.used_priorities;
73 prios &= ~((1ULL << (current_priority + 1)) - 1);
74 if (prios == 0) {
75 current_priority = mlq.depth();
76 } else {
77 current_priority = CountTrailingZeroes64(prios);
78 it = GetBeginItForPrio();
79 }
80 }
81 return *this;
82 }
83
84 iterator_impl& operator--() {
85 if (IsEnd()) {
86 if (mlq.used_priorities != 0) {
87 current_priority = 63 - CountLeadingZeroes64(mlq.used_priorities);
88 it = GetEndItForPrio();
89 --it;
90 }
91 } else if (it == GetBeginItForPrio()) {
92 u64 prios = mlq.used_priorities;
93 prios &= (1ULL << current_priority) - 1;
94 if (prios != 0) {
95 current_priority = CountTrailingZeroes64(prios);
96 it = GetEndItForPrio();
97 --it;
98 }
99 } else {
100 --it;
101 }
102 return *this;
103 }
104
105 iterator_impl operator++(int) {
106 const iterator_impl v{*this};
107 ++(*this);
108 return v;
109 }
110
111 iterator_impl operator--(int) {
112 const iterator_impl v{*this};
113 --(*this);
114 return v;
115 }
116
117 // allow implicit const->non-const
118 iterator_impl(const iterator_impl<false>& other)
119 : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
120
121 iterator_impl(const iterator_impl<true>& other)
122 : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
123
124 iterator_impl& operator=(const iterator_impl<false>& other) {
125 mlq = other.mlq;
126 it = other.it;
127 current_priority = other.current_priority;
128 return *this;
129 }
130
131 friend class iterator_impl<true>;
132 iterator_impl() = default;
133
134 private:
135 friend class MultiLevelQueue;
136 using container_ref =
137 std::conditional_t<is_constant, const MultiLevelQueue&, MultiLevelQueue&>;
138 using list_iterator = std::conditional_t<is_constant, typename std::list<T>::const_iterator,
139 typename std::list<T>::iterator>;
140
141 explicit iterator_impl(container_ref mlq, list_iterator it, u32 current_priority)
142 : mlq(mlq), it(it), current_priority(current_priority) {}
143 explicit iterator_impl(container_ref mlq, u32 current_priority)
144 : mlq(mlq), it(), current_priority(current_priority) {}
145
146 bool IsEnd() const {
147 return current_priority == mlq.depth();
148 }
149
150 list_iterator GetBeginItForPrio() const {
151 return mlq.levels[current_priority].begin();
152 }
153
154 list_iterator GetEndItForPrio() const {
155 return mlq.levels[current_priority].end();
156 }
157
158 container_ref mlq;
159 list_iterator it;
160 u32 current_priority;
161 };
162
163 using iterator = iterator_impl<false>;
164 using const_iterator = iterator_impl<true>;
165
166 void add(const T& element, u32 priority, bool send_back = true) {
167 if (send_back)
168 levels[priority].push_back(element);
169 else
170 levels[priority].push_front(element);
171 used_priorities |= 1ULL << priority;
172 }
173
174 void remove(const T& element, u32 priority) {
175 auto it = ListIterateTo(levels[priority], element);
176 if (it == levels[priority].end())
177 return;
178 levels[priority].erase(it);
179 if (levels[priority].empty()) {
180 used_priorities &= ~(1ULL << priority);
181 }
182 }
183
184 void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) {
185 remove(element, old_priority);
186 add(element, new_priority, !adjust_front);
187 }
188 void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) {
189 adjust(*it, old_priority, new_priority, adjust_front);
190 }
191
192 void transfer_to_front(const T& element, u32 priority, MultiLevelQueue& other) {
193 ListSplice(other.levels[priority], other.levels[priority].begin(), levels[priority],
194 ListIterateTo(levels[priority], element));
195
196 other.used_priorities |= 1ULL << priority;
197
198 if (levels[priority].empty()) {
199 used_priorities &= ~(1ULL << priority);
200 }
201 }
202
203 void transfer_to_front(const_iterator it, u32 priority, MultiLevelQueue& other) {
204 transfer_to_front(*it, priority, other);
205 }
206
207 void transfer_to_back(const T& element, u32 priority, MultiLevelQueue& other) {
208 ListSplice(other.levels[priority], other.levels[priority].end(), levels[priority],
209 ListIterateTo(levels[priority], element));
210
211 other.used_priorities |= 1ULL << priority;
212
213 if (levels[priority].empty()) {
214 used_priorities &= ~(1ULL << priority);
215 }
216 }
217
218 void transfer_to_back(const_iterator it, u32 priority, MultiLevelQueue& other) {
219 transfer_to_back(*it, priority, other);
220 }
221
222 void yield(u32 priority, std::size_t n = 1) {
223 ListShiftForward(levels[priority], n);
224 }
225
226 std::size_t depth() const {
227 return Depth;
228 }
229
230 std::size_t size(u32 priority) const {
231 return levels[priority].size();
232 }
233
234 std::size_t size() const {
235 u64 priorities = used_priorities;
236 std::size_t size = 0;
237 while (priorities != 0) {
238 const u64 current_priority = CountTrailingZeroes64(priorities);
239 size += levels[current_priority].size();
240 priorities &= ~(1ULL << current_priority);
241 }
242 return size;
243 }
244
245 bool empty() const {
246 return used_priorities == 0;
247 }
248
249 bool empty(u32 priority) const {
250 return (used_priorities & (1ULL << priority)) == 0;
251 }
252
253 u32 highest_priority_set(u32 max_priority = 0) const {
254 const u64 priorities =
255 max_priority == 0 ? used_priorities : (used_priorities & ~((1ULL << max_priority) - 1));
256 return priorities == 0 ? Depth : static_cast<u32>(CountTrailingZeroes64(priorities));
257 }
258
259 u32 lowest_priority_set(u32 min_priority = Depth - 1) const {
260 const u64 priorities = min_priority >= Depth - 1
261 ? used_priorities
262 : (used_priorities & ((1ULL << (min_priority + 1)) - 1));
263 return priorities == 0 ? Depth : 63 - CountLeadingZeroes64(priorities);
264 }
265
266 const_iterator cbegin(u32 max_prio = 0) const {
267 const u32 priority = highest_priority_set(max_prio);
268 return priority == Depth ? cend()
269 : const_iterator{*this, levels[priority].cbegin(), priority};
270 }
271 const_iterator begin(u32 max_prio = 0) const {
272 return cbegin(max_prio);
273 }
274 iterator begin(u32 max_prio = 0) {
275 const u32 priority = highest_priority_set(max_prio);
276 return priority == Depth ? end() : iterator{*this, levels[priority].begin(), priority};
277 }
278
279 const_iterator cend(u32 min_prio = Depth - 1) const {
280 return min_prio == Depth - 1 ? const_iterator{*this, Depth} : cbegin(min_prio + 1);
281 }
282 const_iterator end(u32 min_prio = Depth - 1) const {
283 return cend(min_prio);
284 }
285 iterator end(u32 min_prio = Depth - 1) {
286 return min_prio == Depth - 1 ? iterator{*this, Depth} : begin(min_prio + 1);
287 }
288
289 T& front(u32 max_priority = 0) {
290 const u32 priority = highest_priority_set(max_priority);
291 return levels[priority == Depth ? 0 : priority].front();
292 }
293 const T& front(u32 max_priority = 0) const {
294 const u32 priority = highest_priority_set(max_priority);
295 return levels[priority == Depth ? 0 : priority].front();
296 }
297
298 T back(u32 min_priority = Depth - 1) {
299 const u32 priority = lowest_priority_set(min_priority); // intended
300 return levels[priority == Depth ? 63 : priority].back();
301 }
302 const T& back(u32 min_priority = Depth - 1) const {
303 const u32 priority = lowest_priority_set(min_priority); // intended
304 return levels[priority == Depth ? 63 : priority].back();
305 }
306
307private:
308 using const_list_iterator = typename std::list<T>::const_iterator;
309
310 static void ListShiftForward(std::list<T>& list, const std::size_t shift = 1) {
311 if (shift >= list.size()) {
312 return;
313 }
314
315 const auto begin_range = list.begin();
316 const auto end_range = std::next(begin_range, shift);
317 list.splice(list.end(), list, begin_range, end_range);
318 }
319
320 static void ListSplice(std::list<T>& in_list, const_list_iterator position,
321 std::list<T>& out_list, const_list_iterator element) {
322 in_list.splice(position, out_list, element);
323 }
324
325 static const_list_iterator ListIterateTo(const std::list<T>& list, const T& element) {
326 auto it = list.cbegin();
327 while (it != list.cend() && *it != element) {
328 ++it;
329 }
330 return it;
331 }
332
333 std::array<std::list<T>, Depth> levels;
334 u64 used_priorities = 0;
335};
336
337} // namespace Common
diff --git a/src/common/thread.cpp b/src/common/thread.cpp
index 5144c0d9f..fe7a420cc 100644
--- a/src/common/thread.cpp
+++ b/src/common/thread.cpp
@@ -27,18 +27,6 @@ namespace Common {
27 27
28#ifdef _MSC_VER 28#ifdef _MSC_VER
29 29
30void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask) {
31 SetThreadAffinityMask(thread, mask);
32}
33
34void SetCurrentThreadAffinity(u32 mask) {
35 SetThreadAffinityMask(GetCurrentThread(), mask);
36}
37
38void SwitchCurrentThread() {
39 SwitchToThread();
40}
41
42// Sets the debugger-visible name of the current thread. 30// Sets the debugger-visible name of the current thread.
43// Uses undocumented (actually, it is now documented) trick. 31// Uses undocumented (actually, it is now documented) trick.
44// http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vsdebug/html/vxtsksettingthreadname.asp 32// http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vsdebug/html/vxtsksettingthreadname.asp
@@ -70,31 +58,6 @@ void SetCurrentThreadName(const char* name) {
70 58
71#else // !MSVC_VER, so must be POSIX threads 59#else // !MSVC_VER, so must be POSIX threads
72 60
73void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask) {
74#ifdef __APPLE__
75 thread_policy_set(pthread_mach_thread_np(thread), THREAD_AFFINITY_POLICY, (integer_t*)&mask, 1);
76#elif (defined __linux__ || defined __FreeBSD__) && !(defined ANDROID)
77 cpu_set_t cpu_set;
78 CPU_ZERO(&cpu_set);
79
80 for (int i = 0; i != sizeof(mask) * 8; ++i)
81 if ((mask >> i) & 1)
82 CPU_SET(i, &cpu_set);
83
84 pthread_setaffinity_np(thread, sizeof(cpu_set), &cpu_set);
85#endif
86}
87
88void SetCurrentThreadAffinity(u32 mask) {
89 SetThreadAffinity(pthread_self(), mask);
90}
91
92#ifndef _WIN32
93void SwitchCurrentThread() {
94 usleep(1000 * 1);
95}
96#endif
97
98// MinGW with the POSIX threading model does not support pthread_setname_np 61// MinGW with the POSIX threading model does not support pthread_setname_np
99#if !defined(_WIN32) || defined(_MSC_VER) 62#if !defined(_WIN32) || defined(_MSC_VER)
100void SetCurrentThreadName(const char* name) { 63void SetCurrentThreadName(const char* name) {
diff --git a/src/common/thread.h b/src/common/thread.h
index 2cf74452d..0cfd98be6 100644
--- a/src/common/thread.h
+++ b/src/common/thread.h
@@ -9,14 +9,13 @@
9#include <cstddef> 9#include <cstddef>
10#include <mutex> 10#include <mutex>
11#include <thread> 11#include <thread>
12#include "common/common_types.h"
13 12
14namespace Common { 13namespace Common {
15 14
16class Event { 15class Event {
17public: 16public:
18 void Set() { 17 void Set() {
19 std::lock_guard<std::mutex> lk(mutex); 18 std::lock_guard lk{mutex};
20 if (!is_set) { 19 if (!is_set) {
21 is_set = true; 20 is_set = true;
22 condvar.notify_one(); 21 condvar.notify_one();
@@ -24,14 +23,14 @@ public:
24 } 23 }
25 24
26 void Wait() { 25 void Wait() {
27 std::unique_lock<std::mutex> lk(mutex); 26 std::unique_lock lk{mutex};
28 condvar.wait(lk, [&] { return is_set; }); 27 condvar.wait(lk, [&] { return is_set; });
29 is_set = false; 28 is_set = false;
30 } 29 }
31 30
32 template <class Clock, class Duration> 31 template <class Clock, class Duration>
33 bool WaitUntil(const std::chrono::time_point<Clock, Duration>& time) { 32 bool WaitUntil(const std::chrono::time_point<Clock, Duration>& time) {
34 std::unique_lock<std::mutex> lk(mutex); 33 std::unique_lock lk{mutex};
35 if (!condvar.wait_until(lk, time, [this] { return is_set; })) 34 if (!condvar.wait_until(lk, time, [this] { return is_set; }))
36 return false; 35 return false;
37 is_set = false; 36 is_set = false;
@@ -39,7 +38,7 @@ public:
39 } 38 }
40 39
41 void Reset() { 40 void Reset() {
42 std::unique_lock<std::mutex> lk(mutex); 41 std::unique_lock lk{mutex};
43 // no other action required, since wait loops on the predicate and any lingering signal will 42 // no other action required, since wait loops on the predicate and any lingering signal will
44 // get cleared on the first iteration 43 // get cleared on the first iteration
45 is_set = false; 44 is_set = false;
@@ -57,7 +56,7 @@ public:
57 56
58 /// Blocks until all "count" threads have called Sync() 57 /// Blocks until all "count" threads have called Sync()
59 void Sync() { 58 void Sync() {
60 std::unique_lock<std::mutex> lk(mutex); 59 std::unique_lock lk{mutex};
61 const std::size_t current_generation = generation; 60 const std::size_t current_generation = generation;
62 61
63 if (++waiting == count) { 62 if (++waiting == count) {
@@ -78,9 +77,6 @@ private:
78 std::size_t generation = 0; // Incremented once each time the barrier is used 77 std::size_t generation = 0; // Incremented once each time the barrier is used
79}; 78};
80 79
81void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask);
82void SetCurrentThreadAffinity(u32 mask);
83void SwitchCurrentThread(); // On Linux, this is equal to sleep 1ms
84void SetCurrentThreadName(const char* name); 80void SetCurrentThreadName(const char* name);
85 81
86} // namespace Common 82} // namespace Common
diff --git a/src/common/threadsafe_queue.h b/src/common/threadsafe_queue.h
index 821e8536a..e714ba5b3 100644
--- a/src/common/threadsafe_queue.h
+++ b/src/common/threadsafe_queue.h
@@ -78,7 +78,7 @@ public:
78 78
79 T PopWait() { 79 T PopWait() {
80 if (Empty()) { 80 if (Empty()) {
81 std::unique_lock<std::mutex> lock(cv_mutex); 81 std::unique_lock lock{cv_mutex};
82 cv.wait(lock, [this]() { return !Empty(); }); 82 cv.wait(lock, [this]() { return !Empty(); });
83 } 83 }
84 T t; 84 T t;
@@ -137,7 +137,7 @@ public:
137 137
138 template <typename Arg> 138 template <typename Arg>
139 void Push(Arg&& t) { 139 void Push(Arg&& t) {
140 std::lock_guard<std::mutex> lock(write_lock); 140 std::lock_guard lock{write_lock};
141 spsc_queue.Push(t); 141 spsc_queue.Push(t);
142 } 142 }
143 143
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index bbbe60896..9e23afe85 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -70,6 +70,8 @@ add_library(core STATIC
70 file_sys/system_archive/ng_word.h 70 file_sys/system_archive/ng_word.h
71 file_sys/system_archive/system_archive.cpp 71 file_sys/system_archive/system_archive.cpp
72 file_sys/system_archive/system_archive.h 72 file_sys/system_archive/system_archive.h
73 file_sys/system_archive/system_version.cpp
74 file_sys/system_archive/system_version.h
73 file_sys/vfs.cpp 75 file_sys/vfs.cpp
74 file_sys/vfs.h 76 file_sys/vfs.h
75 file_sys/vfs_concat.cpp 77 file_sys/vfs_concat.cpp
@@ -144,6 +146,8 @@ add_library(core STATIC
144 hle/kernel/svc_wrap.h 146 hle/kernel/svc_wrap.h
145 hle/kernel/thread.cpp 147 hle/kernel/thread.cpp
146 hle/kernel/thread.h 148 hle/kernel/thread.h
149 hle/kernel/transfer_memory.cpp
150 hle/kernel/transfer_memory.h
147 hle/kernel/vm_manager.cpp 151 hle/kernel/vm_manager.cpp
148 hle/kernel/vm_manager.h 152 hle/kernel/vm_manager.h
149 hle/kernel/wait_object.cpp 153 hle/kernel/wait_object.cpp
diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp
index 1eefed6d0..e75741db0 100644
--- a/src/core/core_cpu.cpp
+++ b/src/core/core_cpu.cpp
@@ -22,7 +22,7 @@
22namespace Core { 22namespace Core {
23 23
24void CpuBarrier::NotifyEnd() { 24void CpuBarrier::NotifyEnd() {
25 std::unique_lock<std::mutex> lock(mutex); 25 std::unique_lock lock{mutex};
26 end = true; 26 end = true;
27 condition.notify_all(); 27 condition.notify_all();
28} 28}
@@ -34,7 +34,7 @@ bool CpuBarrier::Rendezvous() {
34 } 34 }
35 35
36 if (!end) { 36 if (!end) {
37 std::unique_lock<std::mutex> lock(mutex); 37 std::unique_lock lock{mutex};
38 38
39 --cores_waiting; 39 --cores_waiting;
40 if (!cores_waiting) { 40 if (!cores_waiting) {
@@ -131,7 +131,7 @@ void Cpu::Reschedule() {
131 131
132 reschedule_pending = false; 132 reschedule_pending = false;
133 // Lock the global kernel mutex when we manipulate the HLE state 133 // Lock the global kernel mutex when we manipulate the HLE state
134 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 134 std::lock_guard lock{HLE::g_hle_lock};
135 scheduler->Reschedule(); 135 scheduler->Reschedule();
136} 136}
137 137
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index a0dd5db24..41adb2302 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -186,7 +186,7 @@ void CoreTiming::Advance() {
186 Event evt = std::move(event_queue.front()); 186 Event evt = std::move(event_queue.front());
187 std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>()); 187 std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
188 event_queue.pop_back(); 188 event_queue.pop_back();
189 evt.type->callback(evt.userdata, static_cast<int>(global_timer - evt.time)); 189 evt.type->callback(evt.userdata, global_timer - evt.time);
190 } 190 }
191 191
192 is_global_timer_sane = false; 192 is_global_timer_sane = false;
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index 59163bae1..9d2efde37 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -15,7 +15,7 @@
15namespace Core::Timing { 15namespace Core::Timing {
16 16
17/// A callback that may be scheduled for a particular core timing event. 17/// A callback that may be scheduled for a particular core timing event.
18using TimedCallback = std::function<void(u64 userdata, int cycles_late)>; 18using TimedCallback = std::function<void(u64 userdata, s64 cycles_late)>;
19 19
20/// Contains the characteristics of a particular event. 20/// Contains the characteristics of a particular event.
21struct EventType { 21struct EventType {
diff --git a/src/core/file_sys/cheat_engine.cpp b/src/core/file_sys/cheat_engine.cpp
index 247fbc864..b06c2f20a 100644
--- a/src/core/file_sys/cheat_engine.cpp
+++ b/src/core/file_sys/cheat_engine.cpp
@@ -423,6 +423,7 @@ std::array<u8, 16> TextCheatParser::ParseSingleLineCheat(const std::string& line
423 return out; 423 return out;
424} 424}
425 425
426namespace {
426u64 MemoryReadImpl(u32 width, VAddr addr) { 427u64 MemoryReadImpl(u32 width, VAddr addr) {
427 switch (width) { 428 switch (width) {
428 case 1: 429 case 1:
@@ -457,6 +458,7 @@ void MemoryWriteImpl(u32 width, VAddr addr, u64 value) {
457 UNREACHABLE(); 458 UNREACHABLE();
458 } 459 }
459} 460}
461} // Anonymous namespace
460 462
461CheatEngine::CheatEngine(Core::System& system, std::vector<CheatList> cheats_, 463CheatEngine::CheatEngine(Core::System& system, std::vector<CheatList> cheats_,
462 const std::string& build_id, VAddr code_region_start, 464 const std::string& build_id, VAddr code_region_start,
diff --git a/src/core/file_sys/errors.h b/src/core/file_sys/errors.h
index e4a4ee4ab..bb4654366 100644
--- a/src/core/file_sys/errors.h
+++ b/src/core/file_sys/errors.h
@@ -11,6 +11,9 @@ namespace FileSys {
11constexpr ResultCode ERROR_PATH_NOT_FOUND{ErrorModule::FS, 1}; 11constexpr ResultCode ERROR_PATH_NOT_FOUND{ErrorModule::FS, 1};
12constexpr ResultCode ERROR_ENTITY_NOT_FOUND{ErrorModule::FS, 1002}; 12constexpr ResultCode ERROR_ENTITY_NOT_FOUND{ErrorModule::FS, 1002};
13constexpr ResultCode ERROR_SD_CARD_NOT_FOUND{ErrorModule::FS, 2001}; 13constexpr ResultCode ERROR_SD_CARD_NOT_FOUND{ErrorModule::FS, 2001};
14constexpr ResultCode ERROR_OUT_OF_BOUNDS{ErrorModule::FS, 3005};
15constexpr ResultCode ERROR_FAILED_MOUNT_ARCHIVE{ErrorModule::FS, 3223};
16constexpr ResultCode ERROR_INVALID_ARGUMENT{ErrorModule::FS, 6001};
14constexpr ResultCode ERROR_INVALID_OFFSET{ErrorModule::FS, 6061}; 17constexpr ResultCode ERROR_INVALID_OFFSET{ErrorModule::FS, 6061};
15constexpr ResultCode ERROR_INVALID_SIZE{ErrorModule::FS, 6062}; 18constexpr ResultCode ERROR_INVALID_SIZE{ErrorModule::FS, 6062};
16 19
diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp
index 58884b4a0..e11217708 100644
--- a/src/core/file_sys/patch_manager.cpp
+++ b/src/core/file_sys/patch_manager.cpp
@@ -20,6 +20,7 @@
20#include "core/file_sys/vfs_vector.h" 20#include "core/file_sys/vfs_vector.h"
21#include "core/hle/service/filesystem/filesystem.h" 21#include "core/hle/service/filesystem/filesystem.h"
22#include "core/loader/loader.h" 22#include "core/loader/loader.h"
23#include "core/loader/nso.h"
23#include "core/settings.h" 24#include "core/settings.h"
24 25
25namespace FileSys { 26namespace FileSys {
@@ -32,14 +33,6 @@ constexpr std::array<const char*, 14> EXEFS_FILE_NAMES{
32 "subsdk3", "subsdk4", "subsdk5", "subsdk6", "subsdk7", "subsdk8", "subsdk9", 33 "subsdk3", "subsdk4", "subsdk5", "subsdk6", "subsdk7", "subsdk8", "subsdk9",
33}; 34};
34 35
35struct NSOBuildHeader {
36 u32_le magic;
37 INSERT_PADDING_BYTES(0x3C);
38 std::array<u8, 0x20> build_id;
39 INSERT_PADDING_BYTES(0xA0);
40};
41static_assert(sizeof(NSOBuildHeader) == 0x100, "NSOBuildHeader has incorrect size.");
42
43std::string FormatTitleVersion(u32 version, TitleVersionFormat format) { 36std::string FormatTitleVersion(u32 version, TitleVersionFormat format) {
44 std::array<u8, sizeof(u32)> bytes{}; 37 std::array<u8, sizeof(u32)> bytes{};
45 bytes[0] = version % SINGLE_BYTE_MODULUS; 38 bytes[0] = version % SINGLE_BYTE_MODULUS;
@@ -163,14 +156,16 @@ std::vector<VirtualFile> PatchManager::CollectPatches(const std::vector<VirtualD
163} 156}
164 157
165std::vector<u8> PatchManager::PatchNSO(const std::vector<u8>& nso) const { 158std::vector<u8> PatchManager::PatchNSO(const std::vector<u8>& nso) const {
166 if (nso.size() < 0x100) 159 if (nso.size() < sizeof(Loader::NSOHeader)) {
167 return nso; 160 return nso;
161 }
168 162
169 NSOBuildHeader header; 163 Loader::NSOHeader header;
170 std::memcpy(&header, nso.data(), sizeof(NSOBuildHeader)); 164 std::memcpy(&header, nso.data(), sizeof(header));
171 165
172 if (header.magic != Common::MakeMagic('N', 'S', 'O', '0')) 166 if (header.magic != Common::MakeMagic('N', 'S', 'O', '0')) {
173 return nso; 167 return nso;
168 }
174 169
175 const auto build_id_raw = Common::HexArrayToString(header.build_id); 170 const auto build_id_raw = Common::HexArrayToString(header.build_id);
176 const auto build_id = build_id_raw.substr(0, build_id_raw.find_last_not_of('0') + 1); 171 const auto build_id = build_id_raw.substr(0, build_id_raw.find_last_not_of('0') + 1);
@@ -213,9 +208,11 @@ std::vector<u8> PatchManager::PatchNSO(const std::vector<u8>& nso) const {
213 } 208 }
214 } 209 }
215 210
216 if (out.size() < 0x100) 211 if (out.size() < sizeof(Loader::NSOHeader)) {
217 return nso; 212 return nso;
218 std::memcpy(out.data(), &header, sizeof(NSOBuildHeader)); 213 }
214
215 std::memcpy(out.data(), &header, sizeof(header));
219 return out; 216 return out;
220} 217}
221 218
diff --git a/src/core/file_sys/system_archive/system_archive.cpp b/src/core/file_sys/system_archive/system_archive.cpp
index e3e79f40a..c9722ed77 100644
--- a/src/core/file_sys/system_archive/system_archive.cpp
+++ b/src/core/file_sys/system_archive/system_archive.cpp
@@ -6,6 +6,7 @@
6#include "core/file_sys/romfs.h" 6#include "core/file_sys/romfs.h"
7#include "core/file_sys/system_archive/ng_word.h" 7#include "core/file_sys/system_archive/ng_word.h"
8#include "core/file_sys/system_archive/system_archive.h" 8#include "core/file_sys/system_archive/system_archive.h"
9#include "core/file_sys/system_archive/system_version.h"
9 10
10namespace FileSys::SystemArchive { 11namespace FileSys::SystemArchive {
11 12
@@ -30,7 +31,7 @@ constexpr std::array<SystemArchiveDescriptor, SYSTEM_ARCHIVE_COUNT> SYSTEM_ARCHI
30 {0x0100000000000806, "NgWord", &NgWord1}, 31 {0x0100000000000806, "NgWord", &NgWord1},
31 {0x0100000000000807, "SsidList", nullptr}, 32 {0x0100000000000807, "SsidList", nullptr},
32 {0x0100000000000808, "Dictionary", nullptr}, 33 {0x0100000000000808, "Dictionary", nullptr},
33 {0x0100000000000809, "SystemVersion", nullptr}, 34 {0x0100000000000809, "SystemVersion", &SystemVersion},
34 {0x010000000000080A, "AvatarImage", nullptr}, 35 {0x010000000000080A, "AvatarImage", nullptr},
35 {0x010000000000080B, "LocalNews", nullptr}, 36 {0x010000000000080B, "LocalNews", nullptr},
36 {0x010000000000080C, "Eula", nullptr}, 37 {0x010000000000080C, "Eula", nullptr},
diff --git a/src/core/file_sys/system_archive/system_version.cpp b/src/core/file_sys/system_archive/system_version.cpp
new file mode 100644
index 000000000..6e22f97b0
--- /dev/null
+++ b/src/core/file_sys/system_archive/system_version.cpp
@@ -0,0 +1,52 @@
1// Copyright 2019 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/file_sys/system_archive/system_version.h"
6#include "core/file_sys/vfs_vector.h"
7
8namespace FileSys::SystemArchive {
9
10namespace SystemVersionData {
11
12// This section should reflect the best system version to describe yuzu's HLE api.
13// TODO(DarkLordZach): Update when HLE gets better.
14
15constexpr u8 VERSION_MAJOR = 5;
16constexpr u8 VERSION_MINOR = 1;
17constexpr u8 VERSION_MICRO = 0;
18
19constexpr u8 REVISION_MAJOR = 3;
20constexpr u8 REVISION_MINOR = 0;
21
22constexpr char PLATFORM_STRING[] = "NX";
23constexpr char VERSION_HASH[] = "23f9df53e25709d756e0c76effcb2473bd3447dd";
24constexpr char DISPLAY_VERSION[] = "5.1.0";
25constexpr char DISPLAY_TITLE[] = "NintendoSDK Firmware for NX 5.1.0-3.0";
26
27} // namespace SystemVersionData
28
29std::string GetLongDisplayVersion() {
30 return SystemVersionData::DISPLAY_TITLE;
31}
32
33VirtualDir SystemVersion() {
34 VirtualFile file = std::make_shared<VectorVfsFile>(std::vector<u8>(0x100), "file");
35 file->WriteObject(SystemVersionData::VERSION_MAJOR, 0);
36 file->WriteObject(SystemVersionData::VERSION_MINOR, 1);
37 file->WriteObject(SystemVersionData::VERSION_MICRO, 2);
38 file->WriteObject(SystemVersionData::REVISION_MAJOR, 4);
39 file->WriteObject(SystemVersionData::REVISION_MINOR, 5);
40 file->WriteArray(SystemVersionData::PLATFORM_STRING,
41 std::min<u64>(sizeof(SystemVersionData::PLATFORM_STRING), 0x20ULL), 0x8);
42 file->WriteArray(SystemVersionData::VERSION_HASH,
43 std::min<u64>(sizeof(SystemVersionData::VERSION_HASH), 0x40ULL), 0x28);
44 file->WriteArray(SystemVersionData::DISPLAY_VERSION,
45 std::min<u64>(sizeof(SystemVersionData::DISPLAY_VERSION), 0x18ULL), 0x68);
46 file->WriteArray(SystemVersionData::DISPLAY_TITLE,
47 std::min<u64>(sizeof(SystemVersionData::DISPLAY_TITLE), 0x80ULL), 0x80);
48 return std::make_shared<VectorVfsDirectory>(std::vector<VirtualFile>{file},
49 std::vector<VirtualDir>{}, "data");
50}
51
52} // namespace FileSys::SystemArchive
diff --git a/src/core/file_sys/system_archive/system_version.h b/src/core/file_sys/system_archive/system_version.h
new file mode 100644
index 000000000..deed79b26
--- /dev/null
+++ b/src/core/file_sys/system_archive/system_version.h
@@ -0,0 +1,16 @@
1// Copyright 2019 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <string>
8#include "core/file_sys/vfs_types.h"
9
10namespace FileSys::SystemArchive {
11
12std::string GetLongDisplayVersion();
13
14VirtualDir SystemVersion();
15
16} // namespace FileSys::SystemArchive
diff --git a/src/core/frontend/emu_window.cpp b/src/core/frontend/emu_window.cpp
index e29afd630..1320bbe77 100644
--- a/src/core/frontend/emu_window.cpp
+++ b/src/core/frontend/emu_window.cpp
@@ -30,7 +30,7 @@ private:
30 explicit Device(std::weak_ptr<TouchState>&& touch_state) : touch_state(touch_state) {} 30 explicit Device(std::weak_ptr<TouchState>&& touch_state) : touch_state(touch_state) {}
31 std::tuple<float, float, bool> GetStatus() const override { 31 std::tuple<float, float, bool> GetStatus() const override {
32 if (auto state = touch_state.lock()) { 32 if (auto state = touch_state.lock()) {
33 std::lock_guard<std::mutex> guard(state->mutex); 33 std::lock_guard guard{state->mutex};
34 return std::make_tuple(state->touch_x, state->touch_y, state->touch_pressed); 34 return std::make_tuple(state->touch_x, state->touch_y, state->touch_pressed);
35 } 35 }
36 return std::make_tuple(0.0f, 0.0f, false); 36 return std::make_tuple(0.0f, 0.0f, false);
@@ -81,7 +81,7 @@ void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) {
81 if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y)) 81 if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y))
82 return; 82 return;
83 83
84 std::lock_guard<std::mutex> guard(touch_state->mutex); 84 std::lock_guard guard{touch_state->mutex};
85 touch_state->touch_x = static_cast<float>(framebuffer_x - framebuffer_layout.screen.left) / 85 touch_state->touch_x = static_cast<float>(framebuffer_x - framebuffer_layout.screen.left) /
86 (framebuffer_layout.screen.right - framebuffer_layout.screen.left); 86 (framebuffer_layout.screen.right - framebuffer_layout.screen.left);
87 touch_state->touch_y = static_cast<float>(framebuffer_y - framebuffer_layout.screen.top) / 87 touch_state->touch_y = static_cast<float>(framebuffer_y - framebuffer_layout.screen.top) /
@@ -91,7 +91,7 @@ void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) {
91} 91}
92 92
93void EmuWindow::TouchReleased() { 93void EmuWindow::TouchReleased() {
94 std::lock_guard<std::mutex> guard(touch_state->mutex); 94 std::lock_guard guard{touch_state->mutex};
95 touch_state->touch_pressed = false; 95 touch_state->touch_pressed = false;
96 touch_state->touch_x = 0; 96 touch_state->touch_x = 0;
97 touch_state->touch_y = 0; 97 touch_state->touch_y = 0;
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 352190da8..c8842410b 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -26,7 +26,7 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_
26 // them all. 26 // them all.
27 std::size_t last = waiting_threads.size(); 27 std::size_t last = waiting_threads.size();
28 if (num_to_wake > 0) { 28 if (num_to_wake > 0) {
29 last = num_to_wake; 29 last = std::min(last, static_cast<std::size_t>(num_to_wake));
30 } 30 }
31 31
32 // Signal the waiting threads. 32 // Signal the waiting threads.
@@ -90,9 +90,9 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
90 // Determine the modified value depending on the waiting count. 90 // Determine the modified value depending on the waiting count.
91 s32 updated_value; 91 s32 updated_value;
92 if (waiting_threads.empty()) { 92 if (waiting_threads.empty()) {
93 updated_value = value - 1;
94 } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
95 updated_value = value + 1; 93 updated_value = value + 1;
94 } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
95 updated_value = value - 1;
96 } else { 96 } else {
97 updated_value = value; 97 updated_value = value;
98 } 98 }
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 4d224d01d..6baeb3494 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -29,12 +29,12 @@ namespace Kernel {
29 * @param thread_handle The handle of the thread that's been awoken 29 * @param thread_handle The handle of the thread that's been awoken
30 * @param cycles_late The number of CPU cycles that have passed since the desired wakeup time 30 * @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
31 */ 31 */
32static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_late) { 32static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
33 const auto proper_handle = static_cast<Handle>(thread_handle); 33 const auto proper_handle = static_cast<Handle>(thread_handle);
34 const auto& system = Core::System::GetInstance(); 34 const auto& system = Core::System::GetInstance();
35 35
36 // Lock the global kernel mutex when we enter the kernel HLE. 36 // Lock the global kernel mutex when we enter the kernel HLE.
37 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 37 std::lock_guard lock{HLE::g_hle_lock};
38 38
39 SharedPtr<Thread> thread = 39 SharedPtr<Thread> thread =
40 system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle); 40 system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle);
@@ -62,7 +62,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_
62 62
63 if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 || 63 if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 ||
64 thread->GetWaitHandle() != 0) { 64 thread->GetWaitHandle() != 0) {
65 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); 65 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex ||
66 thread->GetStatus() == ThreadStatus::WaitCondVar);
66 thread->SetMutexWaitAddress(0); 67 thread->SetMutexWaitAddress(0);
67 thread->SetCondVarWaitAddress(0); 68 thread->SetCondVarWaitAddress(0);
68 thread->SetWaitHandle(0); 69 thread->SetWaitHandle(0);
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index ff17ff865..03ea5b659 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -8,9 +8,6 @@
8#include <unordered_map> 8#include <unordered_map>
9#include "core/hle/kernel/object.h" 9#include "core/hle/kernel/object.h"
10 10
11template <typename T>
12class ResultVal;
13
14namespace Core { 11namespace Core {
15class System; 12class System;
16} 13}
diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp
index 8870463d0..217144efc 100644
--- a/src/core/hle/kernel/object.cpp
+++ b/src/core/hle/kernel/object.cpp
@@ -23,6 +23,7 @@ bool Object::IsWaitable() const {
23 case HandleType::Unknown: 23 case HandleType::Unknown:
24 case HandleType::WritableEvent: 24 case HandleType::WritableEvent:
25 case HandleType::SharedMemory: 25 case HandleType::SharedMemory:
26 case HandleType::TransferMemory:
26 case HandleType::AddressArbiter: 27 case HandleType::AddressArbiter:
27 case HandleType::ResourceLimit: 28 case HandleType::ResourceLimit:
28 case HandleType::ClientPort: 29 case HandleType::ClientPort:
diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h
index 4c2505908..3f6baa094 100644
--- a/src/core/hle/kernel/object.h
+++ b/src/core/hle/kernel/object.h
@@ -22,6 +22,7 @@ enum class HandleType : u32 {
22 WritableEvent, 22 WritableEvent,
23 ReadableEvent, 23 ReadableEvent,
24 SharedMemory, 24 SharedMemory,
25 TransferMemory,
25 Thread, 26 Thread,
26 Process, 27 Process,
27 AddressArbiter, 28 AddressArbiter,
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 0d782e4ba..b0b7af76b 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -5,6 +5,7 @@
5#include <algorithm> 5#include <algorithm>
6#include <memory> 6#include <memory>
7#include <random> 7#include <random>
8#include "common/alignment.h"
8#include "common/assert.h" 9#include "common/assert.h"
9#include "common/logging/log.h" 10#include "common/logging/log.h"
10#include "core/core.h" 11#include "core/core.h"
@@ -75,6 +76,10 @@ SharedPtr<ResourceLimit> Process::GetResourceLimit() const {
75 return resource_limit; 76 return resource_limit;
76} 77}
77 78
79u64 Process::GetTotalPhysicalMemoryUsed() const {
80 return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size;
81}
82
78ResultCode Process::ClearSignalState() { 83ResultCode Process::ClearSignalState() {
79 if (status == ProcessStatus::Exited) { 84 if (status == ProcessStatus::Exited) {
80 LOG_ERROR(Kernel, "called on a terminated process instance."); 85 LOG_ERROR(Kernel, "called on a terminated process instance.");
@@ -107,14 +112,17 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) {
107 return handle_table.SetSize(capabilities.GetHandleTableSize()); 112 return handle_table.SetSize(capabilities.GetHandleTableSize());
108} 113}
109 114
110void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) { 115void Process::Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size) {
116 // The kernel always ensures that the given stack size is page aligned.
117 main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE);
118
111 // Allocate and map the main thread stack 119 // Allocate and map the main thread stack
112 // TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part 120 // TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part
113 // of the user address space. 121 // of the user address space.
122 const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size;
114 vm_manager 123 vm_manager
115 .MapMemoryBlock(vm_manager.GetTLSIORegionEndAddress() - stack_size, 124 .MapMemoryBlock(mapping_address, std::make_shared<std::vector<u8>>(main_thread_stack_size),
116 std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size, 125 0, main_thread_stack_size, MemoryState::Stack)
117 MemoryState::Stack)
118 .Unwrap(); 126 .Unwrap();
119 127
120 vm_manager.LogLayout(); 128 vm_manager.LogLayout();
@@ -224,6 +232,8 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) {
224 MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData); 232 MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData);
225 MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData); 233 MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData);
226 234
235 code_memory_size += module_.memory->size();
236
227 // Clear instruction cache in CPU JIT 237 // Clear instruction cache in CPU JIT
228 system.InvalidateCpuInstructionCaches(); 238 system.InvalidateCpuInstructionCaches();
229} 239}
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 1bd7bf5c1..732d12170 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -35,14 +35,6 @@ class Thread;
35 35
36struct CodeSet; 36struct CodeSet;
37 37
38struct AddressMapping {
39 // Address and size must be page-aligned
40 VAddr address;
41 u64 size;
42 bool read_only;
43 bool unk_flag;
44};
45
46enum class MemoryRegion : u16 { 38enum class MemoryRegion : u16 {
47 APPLICATION = 1, 39 APPLICATION = 1,
48 SYSTEM = 2, 40 SYSTEM = 2,
@@ -194,6 +186,9 @@ public:
194 return random_entropy.at(index); 186 return random_entropy.at(index);
195 } 187 }
196 188
189 /// Retrieves the total physical memory used by this process in bytes.
190 u64 GetTotalPhysicalMemoryUsed() const;
191
197 /// Clears the signaled state of the process if and only if it's signaled. 192 /// Clears the signaled state of the process if and only if it's signaled.
198 /// 193 ///
199 /// @pre The process must not be already terminated. If this is called on a 194 /// @pre The process must not be already terminated. If this is called on a
@@ -218,7 +213,7 @@ public:
218 /** 213 /**
219 * Applies address space changes and launches the process main thread. 214 * Applies address space changes and launches the process main thread.
220 */ 215 */
221 void Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size); 216 void Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size);
222 217
223 /** 218 /**
224 * Prepares a process for termination by stopping all of its threads 219 * Prepares a process for termination by stopping all of its threads
@@ -255,6 +250,12 @@ private:
255 /// Memory manager for this process. 250 /// Memory manager for this process.
256 Kernel::VMManager vm_manager; 251 Kernel::VMManager vm_manager;
257 252
253 /// Size of the main thread's stack in bytes.
254 u64 main_thread_stack_size = 0;
255
256 /// Size of the loaded code memory in bytes.
257 u64 code_memory_size = 0;
258
258 /// Current status of the process 259 /// Current status of the process
259 ProcessStatus status; 260 ProcessStatus status;
260 261
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index cc189cc64..ac501bf7f 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -29,8 +29,8 @@ Scheduler::~Scheduler() {
29} 29}
30 30
31bool Scheduler::HaveReadyThreads() const { 31bool Scheduler::HaveReadyThreads() const {
32 std::lock_guard<std::mutex> lock(scheduler_mutex); 32 std::lock_guard lock{scheduler_mutex};
33 return ready_queue.get_first() != nullptr; 33 return !ready_queue.empty();
34} 34}
35 35
36Thread* Scheduler::GetCurrentThread() const { 36Thread* Scheduler::GetCurrentThread() const {
@@ -46,22 +46,27 @@ Thread* Scheduler::PopNextReadyThread() {
46 Thread* thread = GetCurrentThread(); 46 Thread* thread = GetCurrentThread();
47 47
48 if (thread && thread->GetStatus() == ThreadStatus::Running) { 48 if (thread && thread->GetStatus() == ThreadStatus::Running) {
49 if (ready_queue.empty()) {
50 return thread;
51 }
49 // We have to do better than the current thread. 52 // We have to do better than the current thread.
50 // This call returns null when that's not possible. 53 // This call returns null when that's not possible.
51 next = ready_queue.pop_first_better(thread->GetPriority()); 54 next = ready_queue.front();
52 if (!next) { 55 if (next == nullptr || next->GetPriority() >= thread->GetPriority()) {
53 // Otherwise just keep going with the current thread
54 next = thread; 56 next = thread;
55 } 57 }
56 } else { 58 } else {
57 next = ready_queue.pop_first(); 59 if (ready_queue.empty()) {
60 return nullptr;
61 }
62 next = ready_queue.front();
58 } 63 }
59 64
60 return next; 65 return next;
61} 66}
62 67
63void Scheduler::SwitchContext(Thread* new_thread) { 68void Scheduler::SwitchContext(Thread* new_thread) {
64 Thread* const previous_thread = GetCurrentThread(); 69 Thread* previous_thread = GetCurrentThread();
65 Process* const previous_process = system.Kernel().CurrentProcess(); 70 Process* const previous_process = system.Kernel().CurrentProcess();
66 71
67 UpdateLastContextSwitchTime(previous_thread, previous_process); 72 UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -75,7 +80,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
75 if (previous_thread->GetStatus() == ThreadStatus::Running) { 80 if (previous_thread->GetStatus() == ThreadStatus::Running) {
76 // This is only the case when a reschedule is triggered without the current thread 81 // This is only the case when a reschedule is triggered without the current thread
77 // yielding execution (i.e. an event triggered, system core time-sliced, etc) 82 // yielding execution (i.e. an event triggered, system core time-sliced, etc)
78 ready_queue.push_front(previous_thread->GetPriority(), previous_thread); 83 ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
79 previous_thread->SetStatus(ThreadStatus::Ready); 84 previous_thread->SetStatus(ThreadStatus::Ready);
80 } 85 }
81 } 86 }
@@ -90,7 +95,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
90 95
91 current_thread = new_thread; 96 current_thread = new_thread;
92 97
93 ready_queue.remove(new_thread->GetPriority(), new_thread); 98 ready_queue.remove(new_thread, new_thread->GetPriority());
94 new_thread->SetStatus(ThreadStatus::Running); 99 new_thread->SetStatus(ThreadStatus::Running);
95 100
96 auto* const thread_owner_process = current_thread->GetOwnerProcess(); 101 auto* const thread_owner_process = current_thread->GetOwnerProcess();
@@ -127,7 +132,7 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
127} 132}
128 133
129void Scheduler::Reschedule() { 134void Scheduler::Reschedule() {
130 std::lock_guard<std::mutex> lock(scheduler_mutex); 135 std::lock_guard lock{scheduler_mutex};
131 136
132 Thread* cur = GetCurrentThread(); 137 Thread* cur = GetCurrentThread();
133 Thread* next = PopNextReadyThread(); 138 Thread* next = PopNextReadyThread();
@@ -143,51 +148,54 @@ void Scheduler::Reschedule() {
143 SwitchContext(next); 148 SwitchContext(next);
144} 149}
145 150
146void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) { 151void Scheduler::AddThread(SharedPtr<Thread> thread) {
147 std::lock_guard<std::mutex> lock(scheduler_mutex); 152 std::lock_guard lock{scheduler_mutex};
148 153
149 thread_list.push_back(std::move(thread)); 154 thread_list.push_back(std::move(thread));
150 ready_queue.prepare(priority);
151} 155}
152 156
153void Scheduler::RemoveThread(Thread* thread) { 157void Scheduler::RemoveThread(Thread* thread) {
154 std::lock_guard<std::mutex> lock(scheduler_mutex); 158 std::lock_guard lock{scheduler_mutex};
155 159
156 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), 160 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
157 thread_list.end()); 161 thread_list.end());
158} 162}
159 163
160void Scheduler::ScheduleThread(Thread* thread, u32 priority) { 164void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
161 std::lock_guard<std::mutex> lock(scheduler_mutex); 165 std::lock_guard lock{scheduler_mutex};
162 166
163 ASSERT(thread->GetStatus() == ThreadStatus::Ready); 167 ASSERT(thread->GetStatus() == ThreadStatus::Ready);
164 ready_queue.push_back(priority, thread); 168 ready_queue.add(thread, priority);
165} 169}
166 170
167void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { 171void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
168 std::lock_guard<std::mutex> lock(scheduler_mutex); 172 std::lock_guard lock{scheduler_mutex};
169 173
170 ASSERT(thread->GetStatus() == ThreadStatus::Ready); 174 ASSERT(thread->GetStatus() == ThreadStatus::Ready);
171 ready_queue.remove(priority, thread); 175 ready_queue.remove(thread, priority);
172} 176}
173 177
174void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { 178void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
175 std::lock_guard<std::mutex> lock(scheduler_mutex); 179 std::lock_guard lock{scheduler_mutex};
180 if (thread->GetPriority() == priority) {
181 return;
182 }
176 183
177 // If thread was ready, adjust queues 184 // If thread was ready, adjust queues
178 if (thread->GetStatus() == ThreadStatus::Ready) 185 if (thread->GetStatus() == ThreadStatus::Ready)
179 ready_queue.move(thread, thread->GetPriority(), priority); 186 ready_queue.adjust(thread, thread->GetPriority(), priority);
180 else
181 ready_queue.prepare(priority);
182} 187}
183 188
184Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { 189Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
185 std::lock_guard<std::mutex> lock(scheduler_mutex); 190 std::lock_guard lock{scheduler_mutex};
186 191
187 const u32 mask = 1U << core; 192 const u32 mask = 1U << core;
188 return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) { 193 for (auto* thread : ready_queue) {
189 return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority; 194 if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) {
190 }); 195 return thread;
196 }
197 }
198 return nullptr;
191} 199}
192 200
193void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { 201void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 1c5bf57d9..b29bf7be8 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -7,7 +7,7 @@
7#include <mutex> 7#include <mutex>
8#include <vector> 8#include <vector>
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "common/thread_queue_list.h" 10#include "common/multi_level_queue.h"
11#include "core/hle/kernel/object.h" 11#include "core/hle/kernel/object.h"
12#include "core/hle/kernel/thread.h" 12#include "core/hle/kernel/thread.h"
13 13
@@ -38,7 +38,7 @@ public:
38 u64 GetLastContextSwitchTicks() const; 38 u64 GetLastContextSwitchTicks() const;
39 39
40 /// Adds a new thread to the scheduler 40 /// Adds a new thread to the scheduler
41 void AddThread(SharedPtr<Thread> thread, u32 priority); 41 void AddThread(SharedPtr<Thread> thread);
42 42
43 /// Removes a thread from the scheduler 43 /// Removes a thread from the scheduler
44 void RemoveThread(Thread* thread); 44 void RemoveThread(Thread* thread);
@@ -156,7 +156,7 @@ private:
156 std::vector<SharedPtr<Thread>> thread_list; 156 std::vector<SharedPtr<Thread>> thread_list;
157 157
158 /// Lists only ready thread ids. 158 /// Lists only ready thread ids.
159 Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue; 159 Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
160 160
161 SharedPtr<Thread> current_thread = nullptr; 161 SharedPtr<Thread> current_thread = nullptr;
162 162
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index a6a17efe7..76a8b0191 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -32,6 +32,7 @@
32#include "core/hle/kernel/svc.h" 32#include "core/hle/kernel/svc.h"
33#include "core/hle/kernel/svc_wrap.h" 33#include "core/hle/kernel/svc_wrap.h"
34#include "core/hle/kernel/thread.h" 34#include "core/hle/kernel/thread.h"
35#include "core/hle/kernel/transfer_memory.h"
35#include "core/hle/kernel/writable_event.h" 36#include "core/hle/kernel/writable_event.h"
36#include "core/hle/lock.h" 37#include "core/hle/lock.h"
37#include "core/hle/result.h" 38#include "core/hle/result.h"
@@ -174,11 +175,8 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
174 return ERR_INVALID_SIZE; 175 return ERR_INVALID_SIZE;
175 } 176 }
176 177
177 auto& vm_manager = Core::CurrentProcess()->VMManager(); 178 auto& vm_manager = Core::System::GetInstance().Kernel().CurrentProcess()->VMManager();
178 const VAddr heap_base = vm_manager.GetHeapRegionBaseAddress(); 179 const auto alloc_result = vm_manager.SetHeapSize(heap_size);
179 const auto alloc_result =
180 vm_manager.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite);
181
182 if (alloc_result.Failed()) { 180 if (alloc_result.Failed()) {
183 return alloc_result.Code(); 181 return alloc_result.Code();
184 } 182 }
@@ -711,7 +709,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
711 HeapRegionBaseAddr = 4, 709 HeapRegionBaseAddr = 4,
712 HeapRegionSize = 5, 710 HeapRegionSize = 5,
713 TotalMemoryUsage = 6, 711 TotalMemoryUsage = 6,
714 TotalHeapUsage = 7, 712 TotalPhysicalMemoryUsed = 7,
715 IsCurrentProcessBeingDebugged = 8, 713 IsCurrentProcessBeingDebugged = 8,
716 RegisterResourceLimit = 9, 714 RegisterResourceLimit = 9,
717 IdleTickCount = 10, 715 IdleTickCount = 10,
@@ -747,7 +745,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
747 case GetInfoType::NewMapRegionBaseAddr: 745 case GetInfoType::NewMapRegionBaseAddr:
748 case GetInfoType::NewMapRegionSize: 746 case GetInfoType::NewMapRegionSize:
749 case GetInfoType::TotalMemoryUsage: 747 case GetInfoType::TotalMemoryUsage:
750 case GetInfoType::TotalHeapUsage: 748 case GetInfoType::TotalPhysicalMemoryUsed:
751 case GetInfoType::IsVirtualAddressMemoryEnabled: 749 case GetInfoType::IsVirtualAddressMemoryEnabled:
752 case GetInfoType::PersonalMmHeapUsage: 750 case GetInfoType::PersonalMmHeapUsage:
753 case GetInfoType::TitleId: 751 case GetInfoType::TitleId:
@@ -807,8 +805,8 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
807 *result = process->VMManager().GetTotalMemoryUsage(); 805 *result = process->VMManager().GetTotalMemoryUsage();
808 return RESULT_SUCCESS; 806 return RESULT_SUCCESS;
809 807
810 case GetInfoType::TotalHeapUsage: 808 case GetInfoType::TotalPhysicalMemoryUsed:
811 *result = process->VMManager().GetTotalHeapUsage(); 809 *result = process->GetTotalPhysicalMemoryUsed();
812 return RESULT_SUCCESS; 810 return RESULT_SUCCESS;
813 811
814 case GetInfoType::IsVirtualAddressMemoryEnabled: 812 case GetInfoType::IsVirtualAddressMemoryEnabled:
@@ -1355,7 +1353,7 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var
1355 current_thread->SetCondVarWaitAddress(condition_variable_addr); 1353 current_thread->SetCondVarWaitAddress(condition_variable_addr);
1356 current_thread->SetMutexWaitAddress(mutex_addr); 1354 current_thread->SetMutexWaitAddress(mutex_addr);
1357 current_thread->SetWaitHandle(thread_handle); 1355 current_thread->SetWaitHandle(thread_handle);
1358 current_thread->SetStatus(ThreadStatus::WaitMutex); 1356 current_thread->SetStatus(ThreadStatus::WaitCondVar);
1359 current_thread->InvalidateWakeupCallback(); 1357 current_thread->InvalidateWakeupCallback();
1360 1358
1361 current_thread->WakeAfterDelay(nano_seconds); 1359 current_thread->WakeAfterDelay(nano_seconds);
@@ -1399,10 +1397,10 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1399 // them all. 1397 // them all.
1400 std::size_t last = waiting_threads.size(); 1398 std::size_t last = waiting_threads.size();
1401 if (target != -1) 1399 if (target != -1)
1402 last = target; 1400 last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
1403 1401
1404 // If there are no threads waiting on this condition variable, just exit 1402 // If there are no threads waiting on this condition variable, just exit
1405 if (last > waiting_threads.size()) 1403 if (last == 0)
1406 return RESULT_SUCCESS; 1404 return RESULT_SUCCESS;
1407 1405
1408 for (std::size_t index = 0; index < last; ++index) { 1406 for (std::size_t index = 0; index < last; ++index) {
@@ -1410,6 +1408,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1410 1408
1411 ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr); 1409 ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
1412 1410
1411 // liberate Cond Var Thread.
1412 thread->SetCondVarWaitAddress(0);
1413
1413 std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); 1414 std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
1414 1415
1415 auto& monitor = Core::System::GetInstance().Monitor(); 1416 auto& monitor = Core::System::GetInstance().Monitor();
@@ -1428,10 +1429,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1428 } 1429 }
1429 } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(), 1430 } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
1430 thread->GetWaitHandle())); 1431 thread->GetWaitHandle()));
1431
1432 if (mutex_val == 0) { 1432 if (mutex_val == 0) {
1433 // We were able to acquire the mutex, resume this thread. 1433 // We were able to acquire the mutex, resume this thread.
1434 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); 1434 ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
1435 thread->ResumeFromWait(); 1435 thread->ResumeFromWait();
1436 1436
1437 auto* const lock_owner = thread->GetLockOwner(); 1437 auto* const lock_owner = thread->GetLockOwner();
@@ -1441,8 +1441,8 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1441 1441
1442 thread->SetLockOwner(nullptr); 1442 thread->SetLockOwner(nullptr);
1443 thread->SetMutexWaitAddress(0); 1443 thread->SetMutexWaitAddress(0);
1444 thread->SetCondVarWaitAddress(0);
1445 thread->SetWaitHandle(0); 1444 thread->SetWaitHandle(0);
1445 Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
1446 } else { 1446 } else {
1447 // Atomically signal that the mutex now has a waiting thread. 1447 // Atomically signal that the mutex now has a waiting thread.
1448 do { 1448 do {
@@ -1461,12 +1461,11 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1461 const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); 1461 const auto& handle_table = Core::CurrentProcess()->GetHandleTable();
1462 auto owner = handle_table.Get<Thread>(owner_handle); 1462 auto owner = handle_table.Get<Thread>(owner_handle);
1463 ASSERT(owner); 1463 ASSERT(owner);
1464 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); 1464 ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
1465 thread->InvalidateWakeupCallback(); 1465 thread->InvalidateWakeupCallback();
1466 thread->SetStatus(ThreadStatus::WaitMutex);
1466 1467
1467 owner->AddMutexWaiter(thread); 1468 owner->AddMutexWaiter(thread);
1468
1469 Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
1470 } 1469 }
1471 } 1470 }
1472 1471
@@ -1586,14 +1585,121 @@ static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32
1586 } 1585 }
1587 1586
1588 auto& kernel = Core::System::GetInstance().Kernel(); 1587 auto& kernel = Core::System::GetInstance().Kernel();
1589 auto process = kernel.CurrentProcess(); 1588 auto transfer_mem_handle = TransferMemory::Create(kernel, addr, size, perms);
1590 auto& handle_table = process->GetHandleTable();
1591 const auto shared_mem_handle = SharedMemory::Create(kernel, process, size, perms, perms, addr);
1592 1589
1593 CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle)); 1590 auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
1591 const auto result = handle_table.Create(std::move(transfer_mem_handle));
1592 if (result.Failed()) {
1593 return result.Code();
1594 }
1595
1596 *handle = *result;
1594 return RESULT_SUCCESS; 1597 return RESULT_SUCCESS;
1595} 1598}
1596 1599
1600static ResultCode MapTransferMemory(Handle handle, VAddr address, u64 size, u32 permission_raw) {
1601 LOG_DEBUG(Kernel_SVC,
1602 "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}, permissions=0x{:08X}",
1603 handle, address, size, permission_raw);
1604
1605 if (!Common::Is4KBAligned(address)) {
1606 LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
1607 address);
1608 return ERR_INVALID_ADDRESS;
1609 }
1610
1611 if (size == 0 || !Common::Is4KBAligned(size)) {
1612 LOG_ERROR(Kernel_SVC,
1613 "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
1614 size);
1615 return ERR_INVALID_SIZE;
1616 }
1617
1618 if (!IsValidAddressRange(address, size)) {
1619 LOG_ERROR(Kernel_SVC,
1620 "Given address and size overflows the 64-bit range (address=0x{:016X}, "
1621 "size=0x{:016X}).",
1622 address, size);
1623 return ERR_INVALID_ADDRESS_STATE;
1624 }
1625
1626 const auto permissions = static_cast<MemoryPermission>(permission_raw);
1627 if (permissions != MemoryPermission::None && permissions != MemoryPermission::Read &&
1628 permissions != MemoryPermission::ReadWrite) {
1629 LOG_ERROR(Kernel_SVC, "Invalid transfer memory permissions given (permissions=0x{:08X}).",
1630 permission_raw);
1631 return ERR_INVALID_STATE;
1632 }
1633
1634 const auto& kernel = Core::System::GetInstance().Kernel();
1635 const auto* const current_process = kernel.CurrentProcess();
1636 const auto& handle_table = current_process->GetHandleTable();
1637
1638 auto transfer_memory = handle_table.Get<TransferMemory>(handle);
1639 if (!transfer_memory) {
1640 LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
1641 handle);
1642 return ERR_INVALID_HANDLE;
1643 }
1644
1645 if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
1646 LOG_ERROR(Kernel_SVC,
1647 "Given address and size don't fully fit within the ASLR region "
1648 "(address=0x{:016X}, size=0x{:016X}).",
1649 address, size);
1650 return ERR_INVALID_MEMORY_RANGE;
1651 }
1652
1653 return transfer_memory->MapMemory(address, size, permissions);
1654}
1655
1656static ResultCode UnmapTransferMemory(Handle handle, VAddr address, u64 size) {
1657 LOG_DEBUG(Kernel_SVC, "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}", handle,
1658 address, size);
1659
1660 if (!Common::Is4KBAligned(address)) {
1661 LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
1662 address);
1663 return ERR_INVALID_ADDRESS;
1664 }
1665
1666 if (size == 0 || !Common::Is4KBAligned(size)) {
1667 LOG_ERROR(Kernel_SVC,
1668 "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
1669 size);
1670 return ERR_INVALID_SIZE;
1671 }
1672
1673 if (!IsValidAddressRange(address, size)) {
1674 LOG_ERROR(Kernel_SVC,
1675 "Given address and size overflows the 64-bit range (address=0x{:016X}, "
1676 "size=0x{:016X}).",
1677 address, size);
1678 return ERR_INVALID_ADDRESS_STATE;
1679 }
1680
1681 const auto& kernel = Core::System::GetInstance().Kernel();
1682 const auto* const current_process = kernel.CurrentProcess();
1683 const auto& handle_table = current_process->GetHandleTable();
1684
1685 auto transfer_memory = handle_table.Get<TransferMemory>(handle);
1686 if (!transfer_memory) {
1687 LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
1688 handle);
1689 return ERR_INVALID_HANDLE;
1690 }
1691
1692 if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
1693 LOG_ERROR(Kernel_SVC,
1694 "Given address and size don't fully fit within the ASLR region "
1695 "(address=0x{:016X}, size=0x{:016X}).",
1696 address, size);
1697 return ERR_INVALID_MEMORY_RANGE;
1698 }
1699
1700 return transfer_memory->UnmapMemory(address, size);
1701}
1702
1597static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask) { 1703static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask) {
1598 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); 1704 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
1599 1705
@@ -1969,8 +2075,8 @@ static const FunctionDef SVC_Table[] = {
1969 {0x4E, nullptr, "ReadWriteRegister"}, 2075 {0x4E, nullptr, "ReadWriteRegister"},
1970 {0x4F, nullptr, "SetProcessActivity"}, 2076 {0x4F, nullptr, "SetProcessActivity"},
1971 {0x50, SvcWrap<CreateSharedMemory>, "CreateSharedMemory"}, 2077 {0x50, SvcWrap<CreateSharedMemory>, "CreateSharedMemory"},
1972 {0x51, nullptr, "MapTransferMemory"}, 2078 {0x51, SvcWrap<MapTransferMemory>, "MapTransferMemory"},
1973 {0x52, nullptr, "UnmapTransferMemory"}, 2079 {0x52, SvcWrap<UnmapTransferMemory>, "UnmapTransferMemory"},
1974 {0x53, nullptr, "CreateInterruptEvent"}, 2080 {0x53, nullptr, "CreateInterruptEvent"},
1975 {0x54, nullptr, "QueryPhysicalAddress"}, 2081 {0x54, nullptr, "QueryPhysicalAddress"},
1976 {0x55, nullptr, "QueryIoMapping"}, 2082 {0x55, nullptr, "QueryIoMapping"},
@@ -2032,7 +2138,7 @@ void CallSVC(u32 immediate) {
2032 MICROPROFILE_SCOPE(Kernel_SVC); 2138 MICROPROFILE_SCOPE(Kernel_SVC);
2033 2139
2034 // Lock the global kernel mutex when we enter the kernel HLE. 2140 // Lock the global kernel mutex when we enter the kernel HLE.
2035 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 2141 std::lock_guard lock{HLE::g_hle_lock};
2036 2142
2037 const FunctionDef* info = GetSVCInfo(immediate); 2143 const FunctionDef* info = GetSVCInfo(immediate);
2038 if (info) { 2144 if (info) {
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 3b22e8e0d..fa3ac3abc 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -105,6 +105,7 @@ void Thread::ResumeFromWait() {
105 case ThreadStatus::WaitSleep: 105 case ThreadStatus::WaitSleep:
106 case ThreadStatus::WaitIPC: 106 case ThreadStatus::WaitIPC:
107 case ThreadStatus::WaitMutex: 107 case ThreadStatus::WaitMutex:
108 case ThreadStatus::WaitCondVar:
108 case ThreadStatus::WaitArb: 109 case ThreadStatus::WaitArb:
109 break; 110 break;
110 111
@@ -198,7 +199,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
198 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); 199 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
199 thread->owner_process = &owner_process; 200 thread->owner_process = &owner_process;
200 thread->scheduler = &system.Scheduler(processor_id); 201 thread->scheduler = &system.Scheduler(processor_id);
201 thread->scheduler->AddThread(thread, priority); 202 thread->scheduler->AddThread(thread);
202 thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread); 203 thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread);
203 204
204 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used 205 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
@@ -351,7 +352,7 @@ void Thread::ChangeScheduler() {
351 if (*new_processor_id != processor_id) { 352 if (*new_processor_id != processor_id) {
352 // Remove thread from previous core's scheduler 353 // Remove thread from previous core's scheduler
353 scheduler->RemoveThread(this); 354 scheduler->RemoveThread(this);
354 next_scheduler.AddThread(this, current_priority); 355 next_scheduler.AddThread(this);
355 } 356 }
356 357
357 processor_id = *new_processor_id; 358 processor_id = *new_processor_id;
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index faad5f391..9c684758c 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -51,7 +51,8 @@ enum class ThreadStatus {
51 WaitIPC, ///< Waiting for the reply from an IPC request 51 WaitIPC, ///< Waiting for the reply from an IPC request
52 WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false 52 WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false
53 WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true 53 WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true
54 WaitMutex, ///< Waiting due to an ArbitrateLock/WaitProcessWideKey svc 54 WaitMutex, ///< Waiting due to an ArbitrateLock svc
55 WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc
55 WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc 56 WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc
56 Dormant, ///< Created but not yet made ready 57 Dormant, ///< Created but not yet made ready
57 Dead ///< Run to completion, or forcefully terminated 58 Dead ///< Run to completion, or forcefully terminated
diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp
new file mode 100644
index 000000000..23228e1b5
--- /dev/null
+++ b/src/core/hle/kernel/transfer_memory.cpp
@@ -0,0 +1,73 @@
1// Copyright 2019 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/errors.h"
6#include "core/hle/kernel/kernel.h"
7#include "core/hle/kernel/process.h"
8#include "core/hle/kernel/shared_memory.h"
9#include "core/hle/kernel/transfer_memory.h"
10#include "core/hle/result.h"
11
12namespace Kernel {
13
14TransferMemory::TransferMemory(KernelCore& kernel) : Object{kernel} {}
15TransferMemory::~TransferMemory() = default;
16
17SharedPtr<TransferMemory> TransferMemory::Create(KernelCore& kernel, VAddr base_address,
18 size_t size, MemoryPermission permissions) {
19 SharedPtr<TransferMemory> transfer_memory{new TransferMemory(kernel)};
20
21 transfer_memory->base_address = base_address;
22 transfer_memory->memory_size = size;
23 transfer_memory->owner_permissions = permissions;
24 transfer_memory->owner_process = kernel.CurrentProcess();
25
26 return transfer_memory;
27}
28
29ResultCode TransferMemory::MapMemory(VAddr address, size_t size, MemoryPermission permissions) {
30 if (memory_size != size) {
31 return ERR_INVALID_SIZE;
32 }
33
34 if (owner_permissions != permissions) {
35 return ERR_INVALID_STATE;
36 }
37
38 if (is_mapped) {
39 return ERR_INVALID_STATE;
40 }
41
42 const auto map_state = owner_permissions == MemoryPermission::None
43 ? MemoryState::TransferMemoryIsolated
44 : MemoryState::TransferMemory;
45 auto& vm_manager = owner_process->VMManager();
46 const auto map_result = vm_manager.MapMemoryBlock(
47 address, std::make_shared<std::vector<u8>>(size), 0, size, map_state);
48
49 if (map_result.Failed()) {
50 return map_result.Code();
51 }
52
53 is_mapped = true;
54 return RESULT_SUCCESS;
55}
56
57ResultCode TransferMemory::UnmapMemory(VAddr address, size_t size) {
58 if (memory_size != size) {
59 return ERR_INVALID_SIZE;
60 }
61
62 auto& vm_manager = owner_process->VMManager();
63 const auto result = vm_manager.UnmapRange(address, size);
64
65 if (result.IsError()) {
66 return result;
67 }
68
69 is_mapped = false;
70 return RESULT_SUCCESS;
71}
72
73} // namespace Kernel
diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h
new file mode 100644
index 000000000..ec294951e
--- /dev/null
+++ b/src/core/hle/kernel/transfer_memory.h
@@ -0,0 +1,91 @@
1// Copyright 2019 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "core/hle/kernel/object.h"
8
9union ResultCode;
10
11namespace Kernel {
12
13class KernelCore;
14class Process;
15
16enum class MemoryPermission : u32;
17
18/// Defines the interface for transfer memory objects.
19///
20/// Transfer memory is typically used for the purpose of
21/// transferring memory between separate process instances,
22/// thus the name.
23///
24class TransferMemory final : public Object {
25public:
26 static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory;
27
28 static SharedPtr<TransferMemory> Create(KernelCore& kernel, VAddr base_address, size_t size,
29 MemoryPermission permissions);
30
31 TransferMemory(const TransferMemory&) = delete;
32 TransferMemory& operator=(const TransferMemory&) = delete;
33
34 TransferMemory(TransferMemory&&) = delete;
35 TransferMemory& operator=(TransferMemory&&) = delete;
36
37 std::string GetTypeName() const override {
38 return "TransferMemory";
39 }
40
41 std::string GetName() const override {
42 return GetTypeName();
43 }
44
45 HandleType GetHandleType() const override {
46 return HANDLE_TYPE;
47 }
48
49 /// Attempts to map transfer memory with the given range and memory permissions.
50 ///
51 /// @param address The base address to being mapping memory at.
52 /// @param size The size of the memory to map, in bytes.
53 /// @param permissions The memory permissions to check against when mapping memory.
54 ///
55 /// @pre The given address, size, and memory permissions must all match
56 /// the same values that were given when creating the transfer memory
57 /// instance.
58 ///
59 ResultCode MapMemory(VAddr address, size_t size, MemoryPermission permissions);
60
61 /// Unmaps the transfer memory with the given range
62 ///
63 /// @param address The base address to begin unmapping memory at.
64 /// @param size The size of the memory to unmap, in bytes.
65 ///
66 /// @pre The given address and size must be the same as the ones used
67 /// to create the transfer memory instance.
68 ///
69 ResultCode UnmapMemory(VAddr address, size_t size);
70
71private:
72 explicit TransferMemory(KernelCore& kernel);
73 ~TransferMemory() override;
74
75 /// The base address for the memory managed by this instance.
76 VAddr base_address = 0;
77
78 /// Size of the memory, in bytes, that this instance manages.
79 size_t memory_size = 0;
80
81 /// The memory permissions that are applied to this instance.
82 MemoryPermission owner_permissions{};
83
84 /// The process that this transfer memory instance was created under.
85 Process* owner_process = nullptr;
86
87 /// Whether or not this transfer memory instance has mapped memory.
88 bool is_mapped = false;
89};
90
91} // namespace Kernel
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 22bf55ce7..ec0a480ce 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -256,57 +256,50 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p
256 return RESULT_SUCCESS; 256 return RESULT_SUCCESS;
257} 257}
258 258
259ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) { 259ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
260 if (!IsWithinHeapRegion(target, size)) { 260 if (size > GetHeapRegionSize()) {
261 return ERR_INVALID_ADDRESS; 261 return ERR_OUT_OF_MEMORY;
262 }
263
264 // No need to do any additional work if the heap is already the given size.
265 if (size == GetCurrentHeapSize()) {
266 return MakeResult(heap_region_base);
262 } 267 }
263 268
264 if (heap_memory == nullptr) { 269 if (heap_memory == nullptr) {
265 // Initialize heap 270 // Initialize heap
266 heap_memory = std::make_shared<std::vector<u8>>(); 271 heap_memory = std::make_shared<std::vector<u8>>(size);
267 heap_start = heap_end = target; 272 heap_end = heap_region_base + size;
268 } else { 273 } else {
269 UnmapRange(heap_start, heap_end - heap_start); 274 UnmapRange(heap_region_base, GetCurrentHeapSize());
270 }
271
272 // If necessary, expand backing vector to cover new heap extents.
273 if (target < heap_start) {
274 heap_memory->insert(begin(*heap_memory), heap_start - target, 0);
275 heap_start = target;
276 RefreshMemoryBlockMappings(heap_memory.get());
277 }
278 if (target + size > heap_end) {
279 heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0);
280 heap_end = target + size;
281 RefreshMemoryBlockMappings(heap_memory.get());
282 } 275 }
283 ASSERT(heap_end - heap_start == heap_memory->size());
284 276
285 CASCADE_RESULT(auto vma, MapMemoryBlock(target, heap_memory, target - heap_start, size, 277 // If necessary, expand backing vector to cover new heap extents in
286 MemoryState::Heap)); 278 // the case of allocating. Otherwise, shrink the backing memory,
287 Reprotect(vma, perms); 279 // if a smaller heap has been requested.
280 const u64 old_heap_size = GetCurrentHeapSize();
281 if (size > old_heap_size) {
282 const u64 alloc_size = size - old_heap_size;
288 283
289 heap_used = size; 284 heap_memory->insert(heap_memory->end(), alloc_size, 0);
290 285 RefreshMemoryBlockMappings(heap_memory.get());
291 return MakeResult<VAddr>(heap_end - size); 286 } else if (size < old_heap_size) {
292} 287 heap_memory->resize(size);
288 heap_memory->shrink_to_fit();
293 289
294ResultCode VMManager::HeapFree(VAddr target, u64 size) { 290 RefreshMemoryBlockMappings(heap_memory.get());
295 if (!IsWithinHeapRegion(target, size)) {
296 return ERR_INVALID_ADDRESS;
297 } 291 }
298 292
299 if (size == 0) { 293 heap_end = heap_region_base + size;
300 return RESULT_SUCCESS; 294 ASSERT(GetCurrentHeapSize() == heap_memory->size());
301 }
302 295
303 const ResultCode result = UnmapRange(target, size); 296 const auto mapping_result =
304 if (result.IsError()) { 297 MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap);
305 return result; 298 if (mapping_result.Failed()) {
299 return mapping_result.Code();
306 } 300 }
307 301
308 heap_used -= size; 302 return MakeResult<VAddr>(heap_region_base);
309 return RESULT_SUCCESS;
310} 303}
311 304
312MemoryInfo VMManager::QueryMemory(VAddr address) const { 305MemoryInfo VMManager::QueryMemory(VAddr address) const {
@@ -598,6 +591,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
598 591
599 heap_region_base = map_region_end; 592 heap_region_base = map_region_end;
600 heap_region_end = heap_region_base + heap_region_size; 593 heap_region_end = heap_region_base + heap_region_size;
594 heap_end = heap_region_base;
601 595
602 new_map_region_base = heap_region_end; 596 new_map_region_base = heap_region_end;
603 new_map_region_end = new_map_region_base + new_map_region_size; 597 new_map_region_end = new_map_region_base + new_map_region_size;
@@ -692,10 +686,6 @@ u64 VMManager::GetTotalMemoryUsage() const {
692 return 0xF8000000; 686 return 0xF8000000;
693} 687}
694 688
695u64 VMManager::GetTotalHeapUsage() const {
696 return heap_used;
697}
698
699VAddr VMManager::GetAddressSpaceBaseAddress() const { 689VAddr VMManager::GetAddressSpaceBaseAddress() const {
700 return address_space_base; 690 return address_space_base;
701} 691}
@@ -778,6 +768,10 @@ u64 VMManager::GetHeapRegionSize() const {
778 return heap_region_end - heap_region_base; 768 return heap_region_end - heap_region_base;
779} 769}
780 770
771u64 VMManager::GetCurrentHeapSize() const {
772 return heap_end - heap_region_base;
773}
774
781bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const { 775bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const {
782 return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(), 776 return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(),
783 GetHeapRegionEndAddress()); 777 GetHeapRegionEndAddress());
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 7cdff6094..6f484b7bf 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -380,11 +380,41 @@ public:
380 /// Changes the permissions of a range of addresses, splitting VMAs as necessary. 380 /// Changes the permissions of a range of addresses, splitting VMAs as necessary.
381 ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms); 381 ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms);
382 382
383 ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
384 ResultCode HeapFree(VAddr target, u64 size);
385
386 ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state); 383 ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state);
387 384
385 /// Attempts to allocate a heap with the given size.
386 ///
387 /// @param size The size of the heap to allocate in bytes.
388 ///
389 /// @note If a heap is currently allocated, and this is called
390 /// with a size that is equal to the size of the current heap,
391 /// then this function will do nothing and return the current
392 /// heap's starting address, as there's no need to perform
393 /// any additional heap allocation work.
394 ///
395 /// @note If a heap is currently allocated, and this is called
396 /// with a size less than the current heap's size, then
397 /// this function will attempt to shrink the heap.
398 ///
399 /// @note If a heap is currently allocated, and this is called
400 /// with a size larger than the current heap's size, then
401 /// this function will attempt to extend the size of the heap.
402 ///
403 /// @returns A result indicating either success or failure.
404 /// <p>
405 /// If successful, this function will return a result
406 /// containing the starting address to the allocated heap.
407 /// <p>
408 /// If unsuccessful, this function will return a result
409 /// containing an error code.
410 ///
411 /// @pre The given size must lie within the allowable heap
412 /// memory region managed by this VMManager instance.
413 /// Failure to abide by this will result in ERR_OUT_OF_MEMORY
414 /// being returned as the result.
415 ///
416 ResultVal<VAddr> SetHeapSize(u64 size);
417
388 /// Queries the memory manager for information about the given address. 418 /// Queries the memory manager for information about the given address.
389 /// 419 ///
390 /// @param address The address to query the memory manager about for information. 420 /// @param address The address to query the memory manager about for information.
@@ -418,9 +448,6 @@ public:
418 /// Gets the total memory usage, used by svcGetInfo 448 /// Gets the total memory usage, used by svcGetInfo
419 u64 GetTotalMemoryUsage() const; 449 u64 GetTotalMemoryUsage() const;
420 450
421 /// Gets the total heap usage, used by svcGetInfo
422 u64 GetTotalHeapUsage() const;
423
424 /// Gets the address space base address 451 /// Gets the address space base address
425 VAddr GetAddressSpaceBaseAddress() const; 452 VAddr GetAddressSpaceBaseAddress() const;
426 453
@@ -469,6 +496,13 @@ public:
469 /// Gets the total size of the heap region in bytes. 496 /// Gets the total size of the heap region in bytes.
470 u64 GetHeapRegionSize() const; 497 u64 GetHeapRegionSize() const;
471 498
499 /// Gets the total size of the current heap in bytes.
500 ///
501 /// @note This is the current allocated heap size, not the size
502 /// of the region it's allowed to exist within.
503 ///
504 u64 GetCurrentHeapSize() const;
505
472 /// Determines whether or not the specified range is within the heap region. 506 /// Determines whether or not the specified range is within the heap region.
473 bool IsWithinHeapRegion(VAddr address, u64 size) const; 507 bool IsWithinHeapRegion(VAddr address, u64 size) const;
474 508
@@ -617,9 +651,6 @@ private:
617 VAddr new_map_region_base = 0; 651 VAddr new_map_region_base = 0;
618 VAddr new_map_region_end = 0; 652 VAddr new_map_region_end = 0;
619 653
620 VAddr main_code_region_base = 0;
621 VAddr main_code_region_end = 0;
622
623 VAddr tls_io_region_base = 0; 654 VAddr tls_io_region_base = 0;
624 VAddr tls_io_region_end = 0; 655 VAddr tls_io_region_end = 0;
625 656
@@ -628,9 +659,9 @@ private:
628 // This makes deallocation and reallocation of holes fast and keeps process memory contiguous 659 // This makes deallocation and reallocation of holes fast and keeps process memory contiguous
629 // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. 660 // in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
630 std::shared_ptr<std::vector<u8>> heap_memory; 661 std::shared_ptr<std::vector<u8>> heap_memory;
631 // The left/right bounds of the address space covered by heap_memory. 662
632 VAddr heap_start = 0; 663 // The end of the currently allocated heap. This is not an inclusive
664 // end of the range. This is essentially 'base_address + current_size'.
633 VAddr heap_end = 0; 665 VAddr heap_end = 0;
634 u64 heap_used = 0;
635}; 666};
636} // namespace Kernel 667} // namespace Kernel
diff --git a/src/core/hle/service/fatal/fatal.cpp b/src/core/hle/service/fatal/fatal.cpp
index 770590d0b..2c229bcad 100644
--- a/src/core/hle/service/fatal/fatal.cpp
+++ b/src/core/hle/service/fatal/fatal.cpp
@@ -25,21 +25,34 @@ Module::Interface::Interface(std::shared_ptr<Module> module, const char* name)
25Module::Interface::~Interface() = default; 25Module::Interface::~Interface() = default;
26 26
27struct FatalInfo { 27struct FatalInfo {
28 std::array<u64_le, 31> registers{}; // TODO(ogniK): See if this actually is registers or 28 enum class Architecture : s32 {
29 // not(find a game which has non zero valeus) 29 AArch64,
30 u64_le unk0{}; 30 AArch32,
31 u64_le unk1{}; 31 };
32 u64_le unk2{}; 32
33 u64_le unk3{}; 33 const char* ArchAsString() const {
34 u64_le unk4{}; 34 return arch == Architecture::AArch64 ? "AArch64" : "AArch32";
35 u64_le unk5{}; 35 }
36 u64_le unk6{}; 36
37 std::array<u64_le, 31> registers{};
38 u64_le sp{};
39 u64_le pc{};
40 u64_le pstate{};
41 u64_le afsr0{};
42 u64_le afsr1{};
43 u64_le esr{};
44 u64_le far{};
37 45
38 std::array<u64_le, 32> backtrace{}; 46 std::array<u64_le, 32> backtrace{};
39 u64_le unk7{}; 47 u64_le program_entry_point{};
40 u64_le unk8{}; 48
49 // Bit flags that indicate which registers have been set with values
50 // for this context. The service itself uses these to determine which
51 // registers to specifically print out.
52 u64_le set_flags{};
53
41 u32_le backtrace_size{}; 54 u32_le backtrace_size{};
42 u32_le unk9{}; 55 Architecture arch{};
43 u32_le unk10{}; // TODO(ogniK): Is this even used or is it just padding? 56 u32_le unk10{}; // TODO(ogniK): Is this even used or is it just padding?
44}; 57};
45static_assert(sizeof(FatalInfo) == 0x250, "FatalInfo is an invalid size"); 58static_assert(sizeof(FatalInfo) == 0x250, "FatalInfo is an invalid size");
@@ -52,36 +65,36 @@ enum class FatalType : u32 {
52 65
53static void GenerateErrorReport(ResultCode error_code, const FatalInfo& info) { 66static void GenerateErrorReport(ResultCode error_code, const FatalInfo& info) {
54 const auto title_id = Core::CurrentProcess()->GetTitleID(); 67 const auto title_id = Core::CurrentProcess()->GetTitleID();
55 std::string crash_report = 68 std::string crash_report = fmt::format(
56 fmt::format("Yuzu {}-{} crash report\n" 69 "Yuzu {}-{} crash report\n"
57 "Title ID: {:016x}\n" 70 "Title ID: {:016x}\n"
58 "Result: 0x{:X} ({:04}-{:04d})\n" 71 "Result: 0x{:X} ({:04}-{:04d})\n"
59 "\n", 72 "Set flags: 0x{:16X}\n"
60 Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw, 73 "Program entry point: 0x{:16X}\n"
61 2000 + static_cast<u32>(error_code.module.Value()), 74 "\n",
62 static_cast<u32>(error_code.description.Value()), info.unk8, info.unk7); 75 Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw,
76 2000 + static_cast<u32>(error_code.module.Value()),
77 static_cast<u32>(error_code.description.Value()), info.set_flags, info.program_entry_point);
63 if (info.backtrace_size != 0x0) { 78 if (info.backtrace_size != 0x0) {
64 crash_report += "Registers:\n"; 79 crash_report += "Registers:\n";
65 // TODO(ogniK): This is just a guess, find a game which actually has non zero values
66 for (size_t i = 0; i < info.registers.size(); i++) { 80 for (size_t i = 0; i < info.registers.size(); i++) {
67 crash_report += 81 crash_report +=
68 fmt::format(" X[{:02d}]: {:016x}\n", i, info.registers[i]); 82 fmt::format(" X[{:02d}]: {:016x}\n", i, info.registers[i]);
69 } 83 }
70 crash_report += fmt::format(" Unknown 0: {:016x}\n", info.unk0); 84 crash_report += fmt::format(" SP: {:016x}\n", info.sp);
71 crash_report += fmt::format(" Unknown 1: {:016x}\n", info.unk1); 85 crash_report += fmt::format(" PC: {:016x}\n", info.pc);
72 crash_report += fmt::format(" Unknown 2: {:016x}\n", info.unk2); 86 crash_report += fmt::format(" PSTATE: {:016x}\n", info.pstate);
73 crash_report += fmt::format(" Unknown 3: {:016x}\n", info.unk3); 87 crash_report += fmt::format(" AFSR0: {:016x}\n", info.afsr0);
74 crash_report += fmt::format(" Unknown 4: {:016x}\n", info.unk4); 88 crash_report += fmt::format(" AFSR1: {:016x}\n", info.afsr1);
75 crash_report += fmt::format(" Unknown 5: {:016x}\n", info.unk5); 89 crash_report += fmt::format(" ESR: {:016x}\n", info.esr);
76 crash_report += fmt::format(" Unknown 6: {:016x}\n", info.unk6); 90 crash_report += fmt::format(" FAR: {:016x}\n", info.far);
77 crash_report += "\nBacktrace:\n"; 91 crash_report += "\nBacktrace:\n";
78 for (size_t i = 0; i < info.backtrace_size; i++) { 92 for (size_t i = 0; i < info.backtrace_size; i++) {
79 crash_report += 93 crash_report +=
80 fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]); 94 fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]);
81 } 95 }
82 crash_report += fmt::format("\nUnknown 7: 0x{:016x}\n", info.unk7); 96
83 crash_report += fmt::format("Unknown 8: 0x{:016x}\n", info.unk8); 97 crash_report += fmt::format("Architecture: {}\n", info.ArchAsString());
84 crash_report += fmt::format("Unknown 9: 0x{:016x}\n", info.unk9);
85 crash_report += fmt::format("Unknown 10: 0x{:016x}\n", info.unk10); 98 crash_report += fmt::format("Unknown 10: 0x{:016x}\n", info.unk10);
86 } 99 }
87 100
@@ -125,13 +138,13 @@ static void ThrowFatalError(ResultCode error_code, FatalType fatal_type, const F
125 case FatalType::ErrorReport: 138 case FatalType::ErrorReport:
126 GenerateErrorReport(error_code, info); 139 GenerateErrorReport(error_code, info);
127 break; 140 break;
128 }; 141 }
129} 142}
130 143
131void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) { 144void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) {
132 LOG_ERROR(Service_Fatal, "called"); 145 LOG_ERROR(Service_Fatal, "called");
133 IPC::RequestParser rp{ctx}; 146 IPC::RequestParser rp{ctx};
134 auto error_code = rp.Pop<ResultCode>(); 147 const auto error_code = rp.Pop<ResultCode>();
135 148
136 ThrowFatalError(error_code, FatalType::ErrorScreen, {}); 149 ThrowFatalError(error_code, FatalType::ErrorScreen, {});
137 IPC::ResponseBuilder rb{ctx, 2}; 150 IPC::ResponseBuilder rb{ctx, 2};
@@ -141,8 +154,8 @@ void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) {
141void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) { 154void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) {
142 LOG_ERROR(Service_Fatal, "called"); 155 LOG_ERROR(Service_Fatal, "called");
143 IPC::RequestParser rp(ctx); 156 IPC::RequestParser rp(ctx);
144 auto error_code = rp.Pop<ResultCode>(); 157 const auto error_code = rp.Pop<ResultCode>();
145 auto fatal_type = rp.PopEnum<FatalType>(); 158 const auto fatal_type = rp.PopEnum<FatalType>();
146 159
147 ThrowFatalError(error_code, fatal_type, {}); // No info is passed with ThrowFatalWithPolicy 160 ThrowFatalError(error_code, fatal_type, {}); // No info is passed with ThrowFatalWithPolicy
148 IPC::ResponseBuilder rb{ctx, 2}; 161 IPC::ResponseBuilder rb{ctx, 2};
@@ -152,9 +165,9 @@ void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) {
152void Module::Interface::ThrowFatalWithCpuContext(Kernel::HLERequestContext& ctx) { 165void Module::Interface::ThrowFatalWithCpuContext(Kernel::HLERequestContext& ctx) {
153 LOG_ERROR(Service_Fatal, "called"); 166 LOG_ERROR(Service_Fatal, "called");
154 IPC::RequestParser rp(ctx); 167 IPC::RequestParser rp(ctx);
155 auto error_code = rp.Pop<ResultCode>(); 168 const auto error_code = rp.Pop<ResultCode>();
156 auto fatal_type = rp.PopEnum<FatalType>(); 169 const auto fatal_type = rp.PopEnum<FatalType>();
157 auto fatal_info = ctx.ReadBuffer(); 170 const auto fatal_info = ctx.ReadBuffer();
158 FatalInfo info{}; 171 FatalInfo info{};
159 172
160 ASSERT_MSG(fatal_info.size() == sizeof(FatalInfo), "Invalid fatal info buffer size!"); 173 ASSERT_MSG(fatal_info.size() == sizeof(FatalInfo), "Invalid fatal info buffer size!");
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 8a6de83a2..63b55758b 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -36,9 +36,9 @@ namespace Service::HID {
36 36
37// Updating period for each HID device. 37// Updating period for each HID device.
38// TODO(ogniK): Find actual polling rate of hid 38// TODO(ogniK): Find actual polling rate of hid
39constexpr u64 pad_update_ticks = Core::Timing::BASE_CLOCK_RATE / 66; 39constexpr s64 pad_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 66);
40constexpr u64 accelerometer_update_ticks = Core::Timing::BASE_CLOCK_RATE / 100; 40constexpr s64 accelerometer_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 100);
41constexpr u64 gyroscope_update_ticks = Core::Timing::BASE_CLOCK_RATE / 100; 41constexpr s64 gyroscope_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 100);
42constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000; 42constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000;
43 43
44IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") { 44IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") {
@@ -75,7 +75,7 @@ IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") {
75 // Register update callbacks 75 // Register update callbacks
76 auto& core_timing = Core::System::GetInstance().CoreTiming(); 76 auto& core_timing = Core::System::GetInstance().CoreTiming();
77 pad_update_event = 77 pad_update_event =
78 core_timing.RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, int cycles_late) { 78 core_timing.RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 cycles_late) {
79 UpdateControllers(userdata, cycles_late); 79 UpdateControllers(userdata, cycles_late);
80 }); 80 });
81 81
@@ -106,7 +106,7 @@ void IAppletResource::GetSharedMemoryHandle(Kernel::HLERequestContext& ctx) {
106 rb.PushCopyObjects(shared_mem); 106 rb.PushCopyObjects(shared_mem);
107} 107}
108 108
109void IAppletResource::UpdateControllers(u64 userdata, int cycles_late) { 109void IAppletResource::UpdateControllers(u64 userdata, s64 cycles_late) {
110 auto& core_timing = Core::System::GetInstance().CoreTiming(); 110 auto& core_timing = Core::System::GetInstance().CoreTiming();
111 111
112 const bool should_reload = Settings::values.is_device_reload_pending.exchange(false); 112 const bool should_reload = Settings::values.is_device_reload_pending.exchange(false);
diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h
index 498602de5..d3660cad2 100644
--- a/src/core/hle/service/hid/hid.h
+++ b/src/core/hle/service/hid/hid.h
@@ -65,7 +65,7 @@ private:
65 } 65 }
66 66
67 void GetSharedMemoryHandle(Kernel::HLERequestContext& ctx); 67 void GetSharedMemoryHandle(Kernel::HLERequestContext& ctx);
68 void UpdateControllers(u64 userdata, int cycles_late); 68 void UpdateControllers(u64 userdata, s64 cycles_late);
69 69
70 Kernel::SharedPtr<Kernel::SharedMemory> shared_mem; 70 Kernel::SharedPtr<Kernel::SharedMemory> shared_mem;
71 71
diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp
index 1c4482e47..c6babdd4d 100644
--- a/src/core/hle/service/nfp/nfp.cpp
+++ b/src/core/hle/service/nfp/nfp.cpp
@@ -335,7 +335,7 @@ void Module::Interface::CreateUserInterface(Kernel::HLERequestContext& ctx) {
335} 335}
336 336
337bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) { 337bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) {
338 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 338 std::lock_guard lock{HLE::g_hle_lock};
339 if (buffer.size() < sizeof(AmiiboFile)) { 339 if (buffer.size() < sizeof(AmiiboFile)) {
340 return false; 340 return false;
341 } 341 }
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index fc496b654..c7f5bbf28 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -26,7 +26,7 @@
26namespace Service::NVFlinger { 26namespace Service::NVFlinger {
27 27
28constexpr std::size_t SCREEN_REFRESH_RATE = 60; 28constexpr std::size_t SCREEN_REFRESH_RATE = 60;
29constexpr u64 frame_ticks = static_cast<u64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); 29constexpr s64 frame_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE);
30 30
31NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_timing} { 31NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_timing} {
32 displays.emplace_back(0, "Default"); 32 displays.emplace_back(0, "Default");
@@ -37,7 +37,7 @@ NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_t
37 37
38 // Schedule the screen composition events 38 // Schedule the screen composition events
39 composition_event = 39 composition_event =
40 core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, int cycles_late) { 40 core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, s64 cycles_late) {
41 Compose(); 41 Compose();
42 this->core_timing.ScheduleEvent(frame_ticks - cycles_late, composition_event); 42 this->core_timing.ScheduleEvent(frame_ticks - cycles_late, composition_event);
43 }); 43 });
diff --git a/src/core/hle/service/set/set_sys.cpp b/src/core/hle/service/set/set_sys.cpp
index c9b4da5b0..ecee554bf 100644
--- a/src/core/hle/service/set/set_sys.cpp
+++ b/src/core/hle/service/set/set_sys.cpp
@@ -2,13 +2,88 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/assert.h"
5#include "common/logging/log.h" 6#include "common/logging/log.h"
7#include "core/file_sys/errors.h"
8#include "core/file_sys/system_archive/system_version.h"
6#include "core/hle/ipc_helpers.h" 9#include "core/hle/ipc_helpers.h"
7#include "core/hle/kernel/client_port.h" 10#include "core/hle/kernel/client_port.h"
11#include "core/hle/service/filesystem/filesystem.h"
8#include "core/hle/service/set/set_sys.h" 12#include "core/hle/service/set/set_sys.h"
9 13
10namespace Service::Set { 14namespace Service::Set {
11 15
16namespace {
17constexpr u64 SYSTEM_VERSION_FILE_MINOR_REVISION_OFFSET = 0x05;
18
19enum class GetFirmwareVersionType {
20 Version1,
21 Version2,
22};
23
24void GetFirmwareVersionImpl(Kernel::HLERequestContext& ctx, GetFirmwareVersionType type) {
25 LOG_WARNING(Service_SET, "called - Using hardcoded firmware version '{}'",
26 FileSys::SystemArchive::GetLongDisplayVersion());
27
28 ASSERT_MSG(ctx.GetWriteBufferSize() == 0x100,
29 "FirmwareVersion output buffer must be 0x100 bytes in size!");
30
31 // Instead of using the normal procedure of checking for the real system archive and if it
32 // doesn't exist, synthesizing one, I feel that that would lead to strange bugs because a
33 // used is using a really old or really new SystemVersion title. The synthesized one ensures
34 // consistence (currently reports as 5.1.0-0.0)
35 const auto archive = FileSys::SystemArchive::SystemVersion();
36
37 const auto early_exit_failure = [&ctx](const std::string& desc, ResultCode code) {
38 LOG_ERROR(Service_SET, "General failure while attempting to resolve firmware version ({}).",
39 desc.c_str());
40 IPC::ResponseBuilder rb{ctx, 2};
41 rb.Push(code);
42 };
43
44 if (archive == nullptr) {
45 early_exit_failure("The system version archive couldn't be synthesized.",
46 FileSys::ERROR_FAILED_MOUNT_ARCHIVE);
47 return;
48 }
49
50 const auto ver_file = archive->GetFile("file");
51 if (ver_file == nullptr) {
52 early_exit_failure("The system version archive didn't contain the file 'file'.",
53 FileSys::ERROR_INVALID_ARGUMENT);
54 return;
55 }
56
57 auto data = ver_file->ReadAllBytes();
58 if (data.size() != 0x100) {
59 early_exit_failure("The system version file 'file' was not the correct size.",
60 FileSys::ERROR_OUT_OF_BOUNDS);
61 return;
62 }
63
64 // If the command is GetFirmwareVersion (as opposed to GetFirmwareVersion2), hardware will
65 // zero out the REVISION_MINOR field.
66 if (type == GetFirmwareVersionType::Version1) {
67 data[SYSTEM_VERSION_FILE_MINOR_REVISION_OFFSET] = 0;
68 }
69
70 ctx.WriteBuffer(data);
71
72 IPC::ResponseBuilder rb{ctx, 2};
73 rb.Push(RESULT_SUCCESS);
74}
75} // Anonymous namespace
76
77void SET_SYS::GetFirmwareVersion(Kernel::HLERequestContext& ctx) {
78 LOG_DEBUG(Service_SET, "called");
79 GetFirmwareVersionImpl(ctx, GetFirmwareVersionType::Version1);
80}
81
82void SET_SYS::GetFirmwareVersion2(Kernel::HLERequestContext& ctx) {
83 LOG_DEBUG(Service_SET, "called");
84 GetFirmwareVersionImpl(ctx, GetFirmwareVersionType::Version2);
85}
86
12void SET_SYS::GetColorSetId(Kernel::HLERequestContext& ctx) { 87void SET_SYS::GetColorSetId(Kernel::HLERequestContext& ctx) {
13 LOG_DEBUG(Service_SET, "called"); 88 LOG_DEBUG(Service_SET, "called");
14 89
@@ -33,8 +108,8 @@ SET_SYS::SET_SYS() : ServiceFramework("set:sys") {
33 {0, nullptr, "SetLanguageCode"}, 108 {0, nullptr, "SetLanguageCode"},
34 {1, nullptr, "SetNetworkSettings"}, 109 {1, nullptr, "SetNetworkSettings"},
35 {2, nullptr, "GetNetworkSettings"}, 110 {2, nullptr, "GetNetworkSettings"},
36 {3, nullptr, "GetFirmwareVersion"}, 111 {3, &SET_SYS::GetFirmwareVersion, "GetFirmwareVersion"},
37 {4, nullptr, "GetFirmwareVersion2"}, 112 {4, &SET_SYS::GetFirmwareVersion2, "GetFirmwareVersion2"},
38 {5, nullptr, "GetFirmwareVersionDigest"}, 113 {5, nullptr, "GetFirmwareVersionDigest"},
39 {7, nullptr, "GetLockScreenFlag"}, 114 {7, nullptr, "GetLockScreenFlag"},
40 {8, nullptr, "SetLockScreenFlag"}, 115 {8, nullptr, "SetLockScreenFlag"},
diff --git a/src/core/hle/service/set/set_sys.h b/src/core/hle/service/set/set_sys.h
index f602f3c77..13ee2cf46 100644
--- a/src/core/hle/service/set/set_sys.h
+++ b/src/core/hle/service/set/set_sys.h
@@ -20,6 +20,8 @@ private:
20 BasicBlack = 1, 20 BasicBlack = 1,
21 }; 21 };
22 22
23 void GetFirmwareVersion(Kernel::HLERequestContext& ctx);
24 void GetFirmwareVersion2(Kernel::HLERequestContext& ctx);
23 void GetColorSetId(Kernel::HLERequestContext& ctx); 25 void GetColorSetId(Kernel::HLERequestContext& ctx);
24 void SetColorSetId(Kernel::HLERequestContext& ctx); 26 void SetColorSetId(Kernel::HLERequestContext& ctx);
25 27
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp
index 7494f8a28..714d85a59 100644
--- a/src/core/loader/nso.cpp
+++ b/src/core/loader/nso.cpp
@@ -21,36 +21,8 @@
21#include "core/settings.h" 21#include "core/settings.h"
22 22
23namespace Loader { 23namespace Loader {
24 24namespace {
25struct NsoSegmentHeader { 25struct MODHeader {
26 u32_le offset;
27 u32_le location;
28 u32_le size;
29 union {
30 u32_le alignment;
31 u32_le bss_size;
32 };
33};
34static_assert(sizeof(NsoSegmentHeader) == 0x10, "NsoSegmentHeader has incorrect size.");
35
36struct NsoHeader {
37 u32_le magic;
38 u32_le version;
39 INSERT_PADDING_WORDS(1);
40 u8 flags;
41 std::array<NsoSegmentHeader, 3> segments; // Text, RoData, Data (in that order)
42 std::array<u8, 0x20> build_id;
43 std::array<u32_le, 3> segments_compressed_size;
44
45 bool IsSegmentCompressed(size_t segment_num) const {
46 ASSERT_MSG(segment_num < 3, "Invalid segment {}", segment_num);
47 return ((flags >> segment_num) & 1);
48 }
49};
50static_assert(sizeof(NsoHeader) == 0x6c, "NsoHeader has incorrect size.");
51static_assert(std::is_trivially_copyable_v<NsoHeader>, "NsoHeader isn't trivially copyable.");
52
53struct ModHeader {
54 u32_le magic; 26 u32_le magic;
55 u32_le dynamic_offset; 27 u32_le dynamic_offset;
56 u32_le bss_start_offset; 28 u32_le bss_start_offset;
@@ -59,25 +31,10 @@ struct ModHeader {
59 u32_le eh_frame_hdr_end_offset; 31 u32_le eh_frame_hdr_end_offset;
60 u32_le module_offset; // Offset to runtime-generated module object. typically equal to .bss base 32 u32_le module_offset; // Offset to runtime-generated module object. typically equal to .bss base
61}; 33};
62static_assert(sizeof(ModHeader) == 0x1c, "ModHeader has incorrect size."); 34static_assert(sizeof(MODHeader) == 0x1c, "MODHeader has incorrect size.");
63
64AppLoader_NSO::AppLoader_NSO(FileSys::VirtualFile file) : AppLoader(std::move(file)) {}
65
66FileType AppLoader_NSO::IdentifyType(const FileSys::VirtualFile& file) {
67 u32 magic = 0;
68 if (file->ReadObject(&magic) != sizeof(magic)) {
69 return FileType::Error;
70 }
71
72 if (Common::MakeMagic('N', 'S', 'O', '0') != magic) {
73 return FileType::Error;
74 }
75
76 return FileType::NSO;
77}
78 35
79static std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data, 36std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data,
80 const NsoSegmentHeader& header) { 37 const NSOSegmentHeader& header) {
81 std::vector<u8> uncompressed_data(header.size); 38 std::vector<u8> uncompressed_data(header.size);
82 const int bytes_uncompressed = 39 const int bytes_uncompressed =
83 LZ4_decompress_safe(reinterpret_cast<const char*>(compressed_data.data()), 40 LZ4_decompress_safe(reinterpret_cast<const char*>(compressed_data.data()),
@@ -91,23 +48,47 @@ static std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data,
91 return uncompressed_data; 48 return uncompressed_data;
92} 49}
93 50
94static constexpr u32 PageAlignSize(u32 size) { 51constexpr u32 PageAlignSize(u32 size) {
95 return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK; 52 return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK;
96} 53}
54} // Anonymous namespace
55
56bool NSOHeader::IsSegmentCompressed(size_t segment_num) const {
57 ASSERT_MSG(segment_num < 3, "Invalid segment {}", segment_num);
58 return ((flags >> segment_num) & 1) != 0;
59}
60
61AppLoader_NSO::AppLoader_NSO(FileSys::VirtualFile file) : AppLoader(std::move(file)) {}
62
63FileType AppLoader_NSO::IdentifyType(const FileSys::VirtualFile& file) {
64 u32 magic = 0;
65 if (file->ReadObject(&magic) != sizeof(magic)) {
66 return FileType::Error;
67 }
68
69 if (Common::MakeMagic('N', 'S', 'O', '0') != magic) {
70 return FileType::Error;
71 }
72
73 return FileType::NSO;
74}
97 75
98std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, 76std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process,
99 const FileSys::VfsFile& file, VAddr load_base, 77 const FileSys::VfsFile& file, VAddr load_base,
100 bool should_pass_arguments, 78 bool should_pass_arguments,
101 std::optional<FileSys::PatchManager> pm) { 79 std::optional<FileSys::PatchManager> pm) {
102 if (file.GetSize() < sizeof(NsoHeader)) 80 if (file.GetSize() < sizeof(NSOHeader)) {
103 return {}; 81 return {};
82 }
104 83
105 NsoHeader nso_header{}; 84 NSOHeader nso_header{};
106 if (sizeof(NsoHeader) != file.ReadObject(&nso_header)) 85 if (sizeof(NSOHeader) != file.ReadObject(&nso_header)) {
107 return {}; 86 return {};
87 }
108 88
109 if (nso_header.magic != Common::MakeMagic('N', 'S', 'O', '0')) 89 if (nso_header.magic != Common::MakeMagic('N', 'S', 'O', '0')) {
110 return {}; 90 return {};
91 }
111 92
112 // Build program image 93 // Build program image
113 Kernel::CodeSet codeset; 94 Kernel::CodeSet codeset;
@@ -143,10 +124,10 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process,
143 std::memcpy(&module_offset, program_image.data() + 4, sizeof(u32)); 124 std::memcpy(&module_offset, program_image.data() + 4, sizeof(u32));
144 125
145 // Read MOD header 126 // Read MOD header
146 ModHeader mod_header{}; 127 MODHeader mod_header{};
147 // Default .bss to size in segment header if MOD0 section doesn't exist 128 // Default .bss to size in segment header if MOD0 section doesn't exist
148 u32 bss_size{PageAlignSize(nso_header.segments[2].bss_size)}; 129 u32 bss_size{PageAlignSize(nso_header.segments[2].bss_size)};
149 std::memcpy(&mod_header, program_image.data() + module_offset, sizeof(ModHeader)); 130 std::memcpy(&mod_header, program_image.data() + module_offset, sizeof(MODHeader));
150 const bool has_mod_header{mod_header.magic == Common::MakeMagic('M', 'O', 'D', '0')}; 131 const bool has_mod_header{mod_header.magic == Common::MakeMagic('M', 'O', 'D', '0')};
151 if (has_mod_header) { 132 if (has_mod_header) {
152 // Resize program image to include .bss section and page align each section 133 // Resize program image to include .bss section and page align each section
@@ -158,13 +139,15 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process,
158 139
159 // Apply patches if necessary 140 // Apply patches if necessary
160 if (pm && (pm->HasNSOPatch(nso_header.build_id) || Settings::values.dump_nso)) { 141 if (pm && (pm->HasNSOPatch(nso_header.build_id) || Settings::values.dump_nso)) {
161 std::vector<u8> pi_header(program_image.size() + 0x100); 142 std::vector<u8> pi_header(sizeof(NSOHeader) + program_image.size());
162 std::memcpy(pi_header.data(), &nso_header, sizeof(NsoHeader)); 143 pi_header.insert(pi_header.begin(), reinterpret_cast<u8*>(&nso_header),
163 std::memcpy(pi_header.data() + 0x100, program_image.data(), program_image.size()); 144 reinterpret_cast<u8*>(&nso_header) + sizeof(NSOHeader));
145 pi_header.insert(pi_header.begin() + sizeof(NSOHeader), program_image.begin(),
146 program_image.end());
164 147
165 pi_header = pm->PatchNSO(pi_header); 148 pi_header = pm->PatchNSO(pi_header);
166 149
167 std::memcpy(program_image.data(), pi_header.data() + 0x100, program_image.size()); 150 std::copy(pi_header.begin() + sizeof(NSOHeader), pi_header.end(), program_image.begin());
168 } 151 }
169 152
170 // Apply cheats if they exist and the program has a valid title ID 153 // Apply cheats if they exist and the program has a valid title ID
diff --git a/src/core/loader/nso.h b/src/core/loader/nso.h
index 167c8a694..4674c3724 100644
--- a/src/core/loader/nso.h
+++ b/src/core/loader/nso.h
@@ -4,7 +4,9 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array>
7#include <optional> 8#include <optional>
9#include <type_traits>
8#include "common/common_types.h" 10#include "common/common_types.h"
9#include "common/swap.h" 11#include "common/swap.h"
10#include "core/file_sys/patch_manager.h" 12#include "core/file_sys/patch_manager.h"
@@ -16,6 +18,43 @@ class Process;
16 18
17namespace Loader { 19namespace Loader {
18 20
21struct NSOSegmentHeader {
22 u32_le offset;
23 u32_le location;
24 u32_le size;
25 union {
26 u32_le alignment;
27 u32_le bss_size;
28 };
29};
30static_assert(sizeof(NSOSegmentHeader) == 0x10, "NsoSegmentHeader has incorrect size.");
31
32struct NSOHeader {
33 using SHA256Hash = std::array<u8, 0x20>;
34
35 struct RODataRelativeExtent {
36 u32_le data_offset;
37 u32_le size;
38 };
39
40 u32_le magic;
41 u32_le version;
42 u32 reserved;
43 u32_le flags;
44 std::array<NSOSegmentHeader, 3> segments; // Text, RoData, Data (in that order)
45 std::array<u8, 0x20> build_id;
46 std::array<u32_le, 3> segments_compressed_size;
47 std::array<u8, 0x1C> padding;
48 RODataRelativeExtent api_info_extent;
49 RODataRelativeExtent dynstr_extent;
50 RODataRelativeExtent dynsyn_extent;
51 std::array<SHA256Hash, 3> segment_hashes;
52
53 bool IsSegmentCompressed(size_t segment_num) const;
54};
55static_assert(sizeof(NSOHeader) == 0x100, "NSOHeader has incorrect size.");
56static_assert(std::is_trivially_copyable_v<NSOHeader>, "NSOHeader must be trivially copyable.");
57
19constexpr u64 NSO_ARGUMENT_DATA_ALLOCATION_SIZE = 0x9000; 58constexpr u64 NSO_ARGUMENT_DATA_ALLOCATION_SIZE = 0x9000;
20 59
21struct NSOArgumentHeader { 60struct NSOArgumentHeader {
diff --git a/src/core/perf_stats.cpp b/src/core/perf_stats.cpp
index c716a462b..4afd6c8a3 100644
--- a/src/core/perf_stats.cpp
+++ b/src/core/perf_stats.cpp
@@ -18,13 +18,13 @@ using std::chrono::microseconds;
18namespace Core { 18namespace Core {
19 19
20void PerfStats::BeginSystemFrame() { 20void PerfStats::BeginSystemFrame() {
21 std::lock_guard<std::mutex> lock(object_mutex); 21 std::lock_guard lock{object_mutex};
22 22
23 frame_begin = Clock::now(); 23 frame_begin = Clock::now();
24} 24}
25 25
26void PerfStats::EndSystemFrame() { 26void PerfStats::EndSystemFrame() {
27 std::lock_guard<std::mutex> lock(object_mutex); 27 std::lock_guard lock{object_mutex};
28 28
29 auto frame_end = Clock::now(); 29 auto frame_end = Clock::now();
30 accumulated_frametime += frame_end - frame_begin; 30 accumulated_frametime += frame_end - frame_begin;
@@ -35,13 +35,13 @@ void PerfStats::EndSystemFrame() {
35} 35}
36 36
37void PerfStats::EndGameFrame() { 37void PerfStats::EndGameFrame() {
38 std::lock_guard<std::mutex> lock(object_mutex); 38 std::lock_guard lock{object_mutex};
39 39
40 game_frames += 1; 40 game_frames += 1;
41} 41}
42 42
43PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) { 43PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) {
44 std::lock_guard<std::mutex> lock(object_mutex); 44 std::lock_guard lock{object_mutex};
45 45
46 const auto now = Clock::now(); 46 const auto now = Clock::now();
47 // Walltime elapsed since stats were reset 47 // Walltime elapsed since stats were reset
@@ -67,7 +67,7 @@ PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us
67} 67}
68 68
69double PerfStats::GetLastFrameTimeScale() { 69double PerfStats::GetLastFrameTimeScale() {
70 std::lock_guard<std::mutex> lock(object_mutex); 70 std::lock_guard lock{object_mutex};
71 71
72 constexpr double FRAME_LENGTH = 1.0 / 60; 72 constexpr double FRAME_LENGTH = 1.0 / 60;
73 return duration_cast<DoubleSecs>(previous_frame_length).count() / FRAME_LENGTH; 73 return duration_cast<DoubleSecs>(previous_frame_length).count() / FRAME_LENGTH;
diff --git a/src/input_common/keyboard.cpp b/src/input_common/keyboard.cpp
index 525fe6abc..078374be5 100644
--- a/src/input_common/keyboard.cpp
+++ b/src/input_common/keyboard.cpp
@@ -36,18 +36,18 @@ struct KeyButtonPair {
36class KeyButtonList { 36class KeyButtonList {
37public: 37public:
38 void AddKeyButton(int key_code, KeyButton* key_button) { 38 void AddKeyButton(int key_code, KeyButton* key_button) {
39 std::lock_guard<std::mutex> guard(mutex); 39 std::lock_guard guard{mutex};
40 list.push_back(KeyButtonPair{key_code, key_button}); 40 list.push_back(KeyButtonPair{key_code, key_button});
41 } 41 }
42 42
43 void RemoveKeyButton(const KeyButton* key_button) { 43 void RemoveKeyButton(const KeyButton* key_button) {
44 std::lock_guard<std::mutex> guard(mutex); 44 std::lock_guard guard{mutex};
45 list.remove_if( 45 list.remove_if(
46 [key_button](const KeyButtonPair& pair) { return pair.key_button == key_button; }); 46 [key_button](const KeyButtonPair& pair) { return pair.key_button == key_button; });
47 } 47 }
48 48
49 void ChangeKeyStatus(int key_code, bool pressed) { 49 void ChangeKeyStatus(int key_code, bool pressed) {
50 std::lock_guard<std::mutex> guard(mutex); 50 std::lock_guard guard{mutex};
51 for (const KeyButtonPair& pair : list) { 51 for (const KeyButtonPair& pair : list) {
52 if (pair.key_code == key_code) 52 if (pair.key_code == key_code)
53 pair.key_button->status.store(pressed); 53 pair.key_button->status.store(pressed);
@@ -55,7 +55,7 @@ public:
55 } 55 }
56 56
57 void ChangeAllKeyStatus(bool pressed) { 57 void ChangeAllKeyStatus(bool pressed) {
58 std::lock_guard<std::mutex> guard(mutex); 58 std::lock_guard guard{mutex};
59 for (const KeyButtonPair& pair : list) { 59 for (const KeyButtonPair& pair : list) {
60 pair.key_button->status.store(pressed); 60 pair.key_button->status.store(pressed);
61 } 61 }
diff --git a/src/input_common/motion_emu.cpp b/src/input_common/motion_emu.cpp
index 6d96d4019..868251628 100644
--- a/src/input_common/motion_emu.cpp
+++ b/src/input_common/motion_emu.cpp
@@ -39,7 +39,7 @@ public:
39 void Tilt(int x, int y) { 39 void Tilt(int x, int y) {
40 auto mouse_move = Common::MakeVec(x, y) - mouse_origin; 40 auto mouse_move = Common::MakeVec(x, y) - mouse_origin;
41 if (is_tilting) { 41 if (is_tilting) {
42 std::lock_guard<std::mutex> guard(tilt_mutex); 42 std::lock_guard guard{tilt_mutex};
43 if (mouse_move.x == 0 && mouse_move.y == 0) { 43 if (mouse_move.x == 0 && mouse_move.y == 0) {
44 tilt_angle = 0; 44 tilt_angle = 0;
45 } else { 45 } else {
@@ -51,13 +51,13 @@ public:
51 } 51 }
52 52
53 void EndTilt() { 53 void EndTilt() {
54 std::lock_guard<std::mutex> guard(tilt_mutex); 54 std::lock_guard guard{tilt_mutex};
55 tilt_angle = 0; 55 tilt_angle = 0;
56 is_tilting = false; 56 is_tilting = false;
57 } 57 }
58 58
59 std::tuple<Common::Vec3<float>, Common::Vec3<float>> GetStatus() { 59 std::tuple<Common::Vec3<float>, Common::Vec3<float>> GetStatus() {
60 std::lock_guard<std::mutex> guard(status_mutex); 60 std::lock_guard guard{status_mutex};
61 return status; 61 return status;
62 } 62 }
63 63
@@ -93,7 +93,7 @@ private:
93 old_q = q; 93 old_q = q;
94 94
95 { 95 {
96 std::lock_guard<std::mutex> guard(tilt_mutex); 96 std::lock_guard guard{tilt_mutex};
97 97
98 // Find the quaternion describing current 3DS tilting 98 // Find the quaternion describing current 3DS tilting
99 q = Common::MakeQuaternion( 99 q = Common::MakeQuaternion(
@@ -115,7 +115,7 @@ private:
115 115
116 // Update the sensor state 116 // Update the sensor state
117 { 117 {
118 std::lock_guard<std::mutex> guard(status_mutex); 118 std::lock_guard guard{status_mutex};
119 status = std::make_tuple(gravity, angular_rate); 119 status = std::make_tuple(gravity, angular_rate);
120 } 120 }
121 } 121 }
diff --git a/src/input_common/sdl/sdl_impl.cpp b/src/input_common/sdl/sdl_impl.cpp
index b132d77f5..5949ecbae 100644
--- a/src/input_common/sdl/sdl_impl.cpp
+++ b/src/input_common/sdl/sdl_impl.cpp
@@ -55,22 +55,22 @@ public:
55 : guid{std::move(guid_)}, port{port_}, sdl_joystick{joystick, deleter} {} 55 : guid{std::move(guid_)}, port{port_}, sdl_joystick{joystick, deleter} {}
56 56
57 void SetButton(int button, bool value) { 57 void SetButton(int button, bool value) {
58 std::lock_guard<std::mutex> lock(mutex); 58 std::lock_guard lock{mutex};
59 state.buttons[button] = value; 59 state.buttons[button] = value;
60 } 60 }
61 61
62 bool GetButton(int button) const { 62 bool GetButton(int button) const {
63 std::lock_guard<std::mutex> lock(mutex); 63 std::lock_guard lock{mutex};
64 return state.buttons.at(button); 64 return state.buttons.at(button);
65 } 65 }
66 66
67 void SetAxis(int axis, Sint16 value) { 67 void SetAxis(int axis, Sint16 value) {
68 std::lock_guard<std::mutex> lock(mutex); 68 std::lock_guard lock{mutex};
69 state.axes[axis] = value; 69 state.axes[axis] = value;
70 } 70 }
71 71
72 float GetAxis(int axis) const { 72 float GetAxis(int axis) const {
73 std::lock_guard<std::mutex> lock(mutex); 73 std::lock_guard lock{mutex};
74 return state.axes.at(axis) / 32767.0f; 74 return state.axes.at(axis) / 32767.0f;
75 } 75 }
76 76
@@ -92,12 +92,12 @@ public:
92 } 92 }
93 93
94 void SetHat(int hat, Uint8 direction) { 94 void SetHat(int hat, Uint8 direction) {
95 std::lock_guard<std::mutex> lock(mutex); 95 std::lock_guard lock{mutex};
96 state.hats[hat] = direction; 96 state.hats[hat] = direction;
97 } 97 }
98 98
99 bool GetHatDirection(int hat, Uint8 direction) const { 99 bool GetHatDirection(int hat, Uint8 direction) const {
100 std::lock_guard<std::mutex> lock(mutex); 100 std::lock_guard lock{mutex};
101 return (state.hats.at(hat) & direction) != 0; 101 return (state.hats.at(hat) & direction) != 0;
102 } 102 }
103 /** 103 /**
@@ -140,7 +140,7 @@ private:
140 * Get the nth joystick with the corresponding GUID 140 * Get the nth joystick with the corresponding GUID
141 */ 141 */
142std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickByGUID(const std::string& guid, int port) { 142std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickByGUID(const std::string& guid, int port) {
143 std::lock_guard<std::mutex> lock(joystick_map_mutex); 143 std::lock_guard lock{joystick_map_mutex};
144 const auto it = joystick_map.find(guid); 144 const auto it = joystick_map.find(guid);
145 if (it != joystick_map.end()) { 145 if (it != joystick_map.end()) {
146 while (it->second.size() <= port) { 146 while (it->second.size() <= port) {
@@ -161,7 +161,8 @@ std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickByGUID(const std::string& g
161std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickBySDLID(SDL_JoystickID sdl_id) { 161std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickBySDLID(SDL_JoystickID sdl_id) {
162 auto sdl_joystick = SDL_JoystickFromInstanceID(sdl_id); 162 auto sdl_joystick = SDL_JoystickFromInstanceID(sdl_id);
163 const std::string guid = GetGUID(sdl_joystick); 163 const std::string guid = GetGUID(sdl_joystick);
164 std::lock_guard<std::mutex> lock(joystick_map_mutex); 164
165 std::lock_guard lock{joystick_map_mutex};
165 auto map_it = joystick_map.find(guid); 166 auto map_it = joystick_map.find(guid);
166 if (map_it != joystick_map.end()) { 167 if (map_it != joystick_map.end()) {
167 auto vec_it = std::find_if(map_it->second.begin(), map_it->second.end(), 168 auto vec_it = std::find_if(map_it->second.begin(), map_it->second.end(),
@@ -198,8 +199,9 @@ void SDLState::InitJoystick(int joystick_index) {
198 LOG_ERROR(Input, "failed to open joystick {}", joystick_index); 199 LOG_ERROR(Input, "failed to open joystick {}", joystick_index);
199 return; 200 return;
200 } 201 }
201 std::string guid = GetGUID(sdl_joystick); 202 const std::string guid = GetGUID(sdl_joystick);
202 std::lock_guard<std::mutex> lock(joystick_map_mutex); 203
204 std::lock_guard lock{joystick_map_mutex};
203 if (joystick_map.find(guid) == joystick_map.end()) { 205 if (joystick_map.find(guid) == joystick_map.end()) {
204 auto joystick = std::make_shared<SDLJoystick>(guid, 0, sdl_joystick); 206 auto joystick = std::make_shared<SDLJoystick>(guid, 0, sdl_joystick);
205 joystick_map[guid].emplace_back(std::move(joystick)); 207 joystick_map[guid].emplace_back(std::move(joystick));
@@ -221,7 +223,7 @@ void SDLState::CloseJoystick(SDL_Joystick* sdl_joystick) {
221 std::string guid = GetGUID(sdl_joystick); 223 std::string guid = GetGUID(sdl_joystick);
222 std::shared_ptr<SDLJoystick> joystick; 224 std::shared_ptr<SDLJoystick> joystick;
223 { 225 {
224 std::lock_guard<std::mutex> lock(joystick_map_mutex); 226 std::lock_guard lock{joystick_map_mutex};
225 // This call to guid is safe since the joystick is guaranteed to be in the map 227 // This call to guid is safe since the joystick is guaranteed to be in the map
226 auto& joystick_guid_list = joystick_map[guid]; 228 auto& joystick_guid_list = joystick_map[guid];
227 const auto joystick_it = 229 const auto joystick_it =
@@ -274,7 +276,7 @@ void SDLState::HandleGameControllerEvent(const SDL_Event& event) {
274} 276}
275 277
276void SDLState::CloseJoysticks() { 278void SDLState::CloseJoysticks() {
277 std::lock_guard<std::mutex> lock(joystick_map_mutex); 279 std::lock_guard lock{joystick_map_mutex};
278 joystick_map.clear(); 280 joystick_map.clear();
279} 281}
280 282
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index d0284bdf4..c7038b217 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -1,5 +1,7 @@
1add_executable(tests 1add_executable(tests
2 common/bit_field.cpp 2 common/bit_field.cpp
3 common/bit_utils.cpp
4 common/multi_level_queue.cpp
3 common/param_package.cpp 5 common/param_package.cpp
4 common/ring_buffer.cpp 6 common/ring_buffer.cpp
5 core/arm/arm_test_common.cpp 7 core/arm/arm_test_common.cpp
diff --git a/src/tests/common/bit_utils.cpp b/src/tests/common/bit_utils.cpp
new file mode 100644
index 000000000..479b5995a
--- /dev/null
+++ b/src/tests/common/bit_utils.cpp
@@ -0,0 +1,23 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <catch2/catch.hpp>
6#include <math.h>
7#include "common/bit_util.h"
8
9namespace Common {
10
11TEST_CASE("BitUtils::CountTrailingZeroes", "[common]") {
12 REQUIRE(Common::CountTrailingZeroes32(0) == 32);
13 REQUIRE(Common::CountTrailingZeroes64(0) == 64);
14 REQUIRE(Common::CountTrailingZeroes32(9) == 0);
15 REQUIRE(Common::CountTrailingZeroes32(8) == 3);
16 REQUIRE(Common::CountTrailingZeroes32(0x801000) == 12);
17 REQUIRE(Common::CountTrailingZeroes64(9) == 0);
18 REQUIRE(Common::CountTrailingZeroes64(8) == 3);
19 REQUIRE(Common::CountTrailingZeroes64(0x801000) == 12);
20 REQUIRE(Common::CountTrailingZeroes64(0x801000000000UL) == 36);
21}
22
23} // namespace Common
diff --git a/src/tests/common/multi_level_queue.cpp b/src/tests/common/multi_level_queue.cpp
new file mode 100644
index 000000000..cca7ec7da
--- /dev/null
+++ b/src/tests/common/multi_level_queue.cpp
@@ -0,0 +1,55 @@
1// Copyright 2019 Yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <catch2/catch.hpp>
6#include <math.h>
7#include "common/common_types.h"
8#include "common/multi_level_queue.h"
9
10namespace Common {
11
12TEST_CASE("MultiLevelQueue", "[common]") {
13 std::array<f32, 8> values = {0.0, 5.0, 1.0, 9.0, 8.0, 2.0, 6.0, 7.0};
14 Common::MultiLevelQueue<f32, 64> mlq;
15 REQUIRE(mlq.empty());
16 mlq.add(values[2], 2);
17 mlq.add(values[7], 7);
18 mlq.add(values[3], 3);
19 mlq.add(values[4], 4);
20 mlq.add(values[0], 0);
21 mlq.add(values[5], 5);
22 mlq.add(values[6], 6);
23 mlq.add(values[1], 1);
24 u32 index = 0;
25 bool all_set = true;
26 for (auto& f : mlq) {
27 all_set &= (f == values[index]);
28 index++;
29 }
30 REQUIRE(all_set);
31 REQUIRE(!mlq.empty());
32 f32 v = 8.0;
33 mlq.add(v, 2);
34 v = -7.0;
35 mlq.add(v, 2, false);
36 REQUIRE(mlq.front(2) == -7.0);
37 mlq.yield(2);
38 REQUIRE(mlq.front(2) == values[2]);
39 REQUIRE(mlq.back(2) == -7.0);
40 REQUIRE(mlq.empty(8));
41 v = 10.0;
42 mlq.add(v, 8);
43 mlq.adjust(v, 8, 9);
44 REQUIRE(mlq.front(9) == v);
45 REQUIRE(mlq.empty(8));
46 REQUIRE(!mlq.empty(9));
47 mlq.adjust(values[0], 0, 9);
48 REQUIRE(mlq.highest_priority_set() == 1);
49 REQUIRE(mlq.lowest_priority_set() == 9);
50 mlq.remove(values[1], 1);
51 REQUIRE(mlq.highest_priority_set() == 2);
52 REQUIRE(mlq.empty(1));
53}
54
55} // namespace Common
diff --git a/src/video_core/debug_utils/debug_utils.cpp b/src/video_core/debug_utils/debug_utils.cpp
index 5ffb492ea..f0ef67535 100644
--- a/src/video_core/debug_utils/debug_utils.cpp
+++ b/src/video_core/debug_utils/debug_utils.cpp
@@ -10,7 +10,7 @@ namespace Tegra {
10 10
11void DebugContext::DoOnEvent(Event event, void* data) { 11void DebugContext::DoOnEvent(Event event, void* data) {
12 { 12 {
13 std::unique_lock<std::mutex> lock(breakpoint_mutex); 13 std::unique_lock lock{breakpoint_mutex};
14 14
15 // TODO(Subv): Commit the rasterizer's caches so framebuffers, render targets, etc. will 15 // TODO(Subv): Commit the rasterizer's caches so framebuffers, render targets, etc. will
16 // show on debug widgets 16 // show on debug widgets
@@ -32,7 +32,7 @@ void DebugContext::DoOnEvent(Event event, void* data) {
32 32
33void DebugContext::Resume() { 33void DebugContext::Resume() {
34 { 34 {
35 std::lock_guard<std::mutex> lock(breakpoint_mutex); 35 std::lock_guard lock{breakpoint_mutex};
36 36
37 // Tell all observers that we are about to resume 37 // Tell all observers that we are about to resume
38 for (auto& breakpoint_observer : breakpoint_observers) { 38 for (auto& breakpoint_observer : breakpoint_observers) {
diff --git a/src/video_core/debug_utils/debug_utils.h b/src/video_core/debug_utils/debug_utils.h
index c235faf46..ac3a2eb01 100644
--- a/src/video_core/debug_utils/debug_utils.h
+++ b/src/video_core/debug_utils/debug_utils.h
@@ -40,7 +40,7 @@ public:
40 /// Constructs the object such that it observes events of the given DebugContext. 40 /// Constructs the object such that it observes events of the given DebugContext.
41 explicit BreakPointObserver(std::shared_ptr<DebugContext> debug_context) 41 explicit BreakPointObserver(std::shared_ptr<DebugContext> debug_context)
42 : context_weak(debug_context) { 42 : context_weak(debug_context) {
43 std::unique_lock<std::mutex> lock(debug_context->breakpoint_mutex); 43 std::unique_lock lock{debug_context->breakpoint_mutex};
44 debug_context->breakpoint_observers.push_back(this); 44 debug_context->breakpoint_observers.push_back(this);
45 } 45 }
46 46
@@ -48,7 +48,7 @@ public:
48 auto context = context_weak.lock(); 48 auto context = context_weak.lock();
49 if (context) { 49 if (context) {
50 { 50 {
51 std::unique_lock<std::mutex> lock(context->breakpoint_mutex); 51 std::unique_lock lock{context->breakpoint_mutex};
52 context->breakpoint_observers.remove(this); 52 context->breakpoint_observers.remove(this);
53 } 53 }
54 54
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 267a03f2d..30b29e14d 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -286,9 +286,10 @@ void GPU::ProcessSemaphoreTriggerMethod() {
286 // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of 286 // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of
287 // CoreTiming 287 // CoreTiming
288 block.timestamp = Core::System::GetInstance().CoreTiming().GetTicks(); 288 block.timestamp = Core::System::GetInstance().CoreTiming().GetTicks();
289 memory_manager->WriteBlock(regs.smaphore_address.SmaphoreAddress(), &block, sizeof(block)); 289 memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block,
290 sizeof(block));
290 } else { 291 } else {
291 const u32 word{memory_manager->Read<u32>(regs.smaphore_address.SmaphoreAddress())}; 292 const u32 word{memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress())};
292 if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) || 293 if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) ||
293 (op == GpuSemaphoreOperation::AcquireGequal && 294 (op == GpuSemaphoreOperation::AcquireGequal &&
294 static_cast<s32>(word - regs.semaphore_sequence) > 0) || 295 static_cast<s32>(word - regs.semaphore_sequence) > 0) ||
@@ -315,11 +316,11 @@ void GPU::ProcessSemaphoreTriggerMethod() {
315} 316}
316 317
317void GPU::ProcessSemaphoreRelease() { 318void GPU::ProcessSemaphoreRelease() {
318 memory_manager->Write<u32>(regs.smaphore_address.SmaphoreAddress(), regs.semaphore_release); 319 memory_manager->Write<u32>(regs.semaphore_address.SemaphoreAddress(), regs.semaphore_release);
319} 320}
320 321
321void GPU::ProcessSemaphoreAcquire() { 322void GPU::ProcessSemaphoreAcquire() {
322 const u32 word = memory_manager->Read<u32>(regs.smaphore_address.SmaphoreAddress()); 323 const u32 word = memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress());
323 const auto value = regs.semaphore_acquire; 324 const auto value = regs.semaphore_acquire;
324 if (word != value) { 325 if (word != value) {
325 regs.acquire_active = true; 326 regs.acquire_active = true;
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index c1830ac8d..de30ea354 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -177,11 +177,11 @@ public:
177 u32 address_high; 177 u32 address_high;
178 u32 address_low; 178 u32 address_low;
179 179
180 GPUVAddr SmaphoreAddress() const { 180 GPUVAddr SemaphoreAddress() const {
181 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | 181 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
182 address_low); 182 address_low);
183 } 183 }
184 } smaphore_address; 184 } semaphore_address;
185 185
186 u32 semaphore_sequence; 186 u32 semaphore_sequence;
187 u32 semaphore_trigger; 187 u32 semaphore_trigger;
@@ -263,7 +263,7 @@ private:
263 static_assert(offsetof(GPU::Regs, field_name) == position * 4, \ 263 static_assert(offsetof(GPU::Regs, field_name) == position * 4, \
264 "Field " #field_name " has invalid position") 264 "Field " #field_name " has invalid position")
265 265
266ASSERT_REG_POSITION(smaphore_address, 0x4); 266ASSERT_REG_POSITION(semaphore_address, 0x4);
267ASSERT_REG_POSITION(semaphore_sequence, 0x6); 267ASSERT_REG_POSITION(semaphore_sequence, 0x6);
268ASSERT_REG_POSITION(semaphore_trigger, 0x7); 268ASSERT_REG_POSITION(semaphore_trigger, 0x7);
269ASSERT_REG_POSITION(reference_count, 0x14); 269ASSERT_REG_POSITION(reference_count, 0x14);
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 086b2f625..c5dc199c5 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -52,8 +52,8 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p
52} 52}
53 53
54ThreadManager::ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher) 54ThreadManager::ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher)
55 : renderer{renderer}, dma_pusher{dma_pusher}, thread{RunThread, std::ref(renderer), 55 : renderer{renderer}, thread{RunThread, std::ref(renderer), std::ref(dma_pusher),
56 std::ref(dma_pusher), std::ref(state)} {} 56 std::ref(state)} {}
57 57
58ThreadManager::~ThreadManager() { 58ThreadManager::~ThreadManager() {
59 // Notify GPU thread that a shutdown is pending 59 // Notify GPU thread that a shutdown is pending
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index 8cd7db1c6..70acb2e79 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -4,10 +4,8 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array>
8#include <atomic> 7#include <atomic>
9#include <condition_variable> 8#include <condition_variable>
10#include <memory>
11#include <mutex> 9#include <mutex>
12#include <optional> 10#include <optional>
13#include <thread> 11#include <thread>
@@ -97,13 +95,13 @@ struct SynchState final {
97 std::condition_variable frames_condition; 95 std::condition_variable frames_condition;
98 96
99 void IncrementFramesCounter() { 97 void IncrementFramesCounter() {
100 std::lock_guard<std::mutex> lock{frames_mutex}; 98 std::lock_guard lock{frames_mutex};
101 ++queued_frame_count; 99 ++queued_frame_count;
102 } 100 }
103 101
104 void DecrementFramesCounter() { 102 void DecrementFramesCounter() {
105 { 103 {
106 std::lock_guard<std::mutex> lock{frames_mutex}; 104 std::lock_guard lock{frames_mutex};
107 --queued_frame_count; 105 --queued_frame_count;
108 106
109 if (queued_frame_count) { 107 if (queued_frame_count) {
@@ -115,7 +113,7 @@ struct SynchState final {
115 113
116 void WaitForFrames() { 114 void WaitForFrames() {
117 { 115 {
118 std::lock_guard<std::mutex> lock{frames_mutex}; 116 std::lock_guard lock{frames_mutex};
119 if (!queued_frame_count) { 117 if (!queued_frame_count) {
120 return; 118 return;
121 } 119 }
@@ -123,14 +121,14 @@ struct SynchState final {
123 121
124 // Wait for the GPU to be idle (all commands to be executed) 122 // Wait for the GPU to be idle (all commands to be executed)
125 { 123 {
126 std::unique_lock<std::mutex> lock{frames_mutex}; 124 std::unique_lock lock{frames_mutex};
127 frames_condition.wait(lock, [this] { return !queued_frame_count; }); 125 frames_condition.wait(lock, [this] { return !queued_frame_count; });
128 } 126 }
129 } 127 }
130 128
131 void SignalCommands() { 129 void SignalCommands() {
132 { 130 {
133 std::unique_lock<std::mutex> lock{commands_mutex}; 131 std::unique_lock lock{commands_mutex};
134 if (queue.Empty()) { 132 if (queue.Empty()) {
135 return; 133 return;
136 } 134 }
@@ -140,7 +138,7 @@ struct SynchState final {
140 } 138 }
141 139
142 void WaitForCommands() { 140 void WaitForCommands() {
143 std::unique_lock<std::mutex> lock{commands_mutex}; 141 std::unique_lock lock{commands_mutex};
144 commands_condition.wait(lock, [this] { return !queue.Empty(); }); 142 commands_condition.wait(lock, [this] { return !queue.Empty(); });
145 } 143 }
146 144
@@ -177,7 +175,6 @@ private:
177private: 175private:
178 SynchState state; 176 SynchState state;
179 VideoCore::RendererBase& renderer; 177 VideoCore::RendererBase& renderer;
180 Tegra::DmaPusher& dma_pusher;
181 std::thread thread; 178 std::thread thread;
182 std::thread::id thread_id; 179 std::thread::id thread_id;
183}; 180};
diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h
index 9fc9f3056..291772186 100644
--- a/src/video_core/rasterizer_cache.h
+++ b/src/video_core/rasterizer_cache.h
@@ -71,8 +71,8 @@ private:
71 bool is_registered{}; ///< Whether the object is currently registered with the cache 71 bool is_registered{}; ///< Whether the object is currently registered with the cache
72 bool is_dirty{}; ///< Whether the object is dirty (out of sync with guest memory) 72 bool is_dirty{}; ///< Whether the object is dirty (out of sync with guest memory)
73 u64 last_modified_ticks{}; ///< When the object was last modified, used for in-order flushing 73 u64 last_modified_ticks{}; ///< When the object was last modified, used for in-order flushing
74 CacheAddr cache_addr{}; ///< Cache address memory, unique from emulated virtual address space
75 const u8* host_ptr{}; ///< Pointer to the memory backing this cached region 74 const u8* host_ptr{}; ///< Pointer to the memory backing this cached region
75 CacheAddr cache_addr{}; ///< Cache address memory, unique from emulated virtual address space
76}; 76};
77 77
78template <class T> 78template <class T>
@@ -84,7 +84,7 @@ public:
84 84
85 /// Write any cached resources overlapping the specified region back to memory 85 /// Write any cached resources overlapping the specified region back to memory
86 void FlushRegion(CacheAddr addr, std::size_t size) { 86 void FlushRegion(CacheAddr addr, std::size_t size) {
87 std::lock_guard<std::recursive_mutex> lock{mutex}; 87 std::lock_guard lock{mutex};
88 88
89 const auto& objects{GetSortedObjectsFromRegion(addr, size)}; 89 const auto& objects{GetSortedObjectsFromRegion(addr, size)};
90 for (auto& object : objects) { 90 for (auto& object : objects) {
@@ -94,7 +94,7 @@ public:
94 94
95 /// Mark the specified region as being invalidated 95 /// Mark the specified region as being invalidated
96 void InvalidateRegion(CacheAddr addr, u64 size) { 96 void InvalidateRegion(CacheAddr addr, u64 size) {
97 std::lock_guard<std::recursive_mutex> lock{mutex}; 97 std::lock_guard lock{mutex};
98 98
99 const auto& objects{GetSortedObjectsFromRegion(addr, size)}; 99 const auto& objects{GetSortedObjectsFromRegion(addr, size)};
100 for (auto& object : objects) { 100 for (auto& object : objects) {
@@ -108,7 +108,7 @@ public:
108 108
109 /// Invalidates everything in the cache 109 /// Invalidates everything in the cache
110 void InvalidateAll() { 110 void InvalidateAll() {
111 std::lock_guard<std::recursive_mutex> lock{mutex}; 111 std::lock_guard lock{mutex};
112 112
113 while (interval_cache.begin() != interval_cache.end()) { 113 while (interval_cache.begin() != interval_cache.end()) {
114 Unregister(*interval_cache.begin()->second.begin()); 114 Unregister(*interval_cache.begin()->second.begin());
@@ -133,7 +133,7 @@ protected:
133 133
134 /// Register an object into the cache 134 /// Register an object into the cache
135 virtual void Register(const T& object) { 135 virtual void Register(const T& object) {
136 std::lock_guard<std::recursive_mutex> lock{mutex}; 136 std::lock_guard lock{mutex};
137 137
138 object->SetIsRegistered(true); 138 object->SetIsRegistered(true);
139 interval_cache.add({GetInterval(object), ObjectSet{object}}); 139 interval_cache.add({GetInterval(object), ObjectSet{object}});
@@ -143,7 +143,7 @@ protected:
143 143
144 /// Unregisters an object from the cache 144 /// Unregisters an object from the cache
145 virtual void Unregister(const T& object) { 145 virtual void Unregister(const T& object) {
146 std::lock_guard<std::recursive_mutex> lock{mutex}; 146 std::lock_guard lock{mutex};
147 147
148 object->SetIsRegistered(false); 148 object->SetIsRegistered(false);
149 rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), -1); 149 rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), -1);
@@ -153,14 +153,14 @@ protected:
153 153
154 /// Returns a ticks counter used for tracking when cached objects were last modified 154 /// Returns a ticks counter used for tracking when cached objects were last modified
155 u64 GetModifiedTicks() { 155 u64 GetModifiedTicks() {
156 std::lock_guard<std::recursive_mutex> lock{mutex}; 156 std::lock_guard lock{mutex};
157 157
158 return ++modified_ticks; 158 return ++modified_ticks;
159 } 159 }
160 160
161 /// Flushes the specified object, updating appropriate cache state as needed 161 /// Flushes the specified object, updating appropriate cache state as needed
162 void FlushObject(const T& object) { 162 void FlushObject(const T& object) {
163 std::lock_guard<std::recursive_mutex> lock{mutex}; 163 std::lock_guard lock{mutex};
164 164
165 if (!object->IsDirty()) { 165 if (!object->IsDirty()) {
166 return; 166 return;
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index f75c65825..fd091c84c 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -15,8 +15,8 @@ namespace OpenGL {
15 15
16CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset, 16CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset,
17 std::size_t alignment, u8* host_ptr) 17 std::size_t alignment, u8* host_ptr)
18 : cpu_addr{cpu_addr}, size{size}, offset{offset}, alignment{alignment}, RasterizerCacheObject{ 18 : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, size{size}, offset{offset},
19 host_ptr} {} 19 alignment{alignment} {}
20 20
21OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size) 21OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size)
22 : RasterizerCache{rasterizer}, stream_buffer(size, true) {} 22 : RasterizerCache{rasterizer}, stream_buffer(size, true) {}
diff --git a/src/video_core/renderer_opengl/gl_global_cache.cpp b/src/video_core/renderer_opengl/gl_global_cache.cpp
index 0fbfbad55..da9326253 100644
--- a/src/video_core/renderer_opengl/gl_global_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_global_cache.cpp
@@ -15,7 +15,7 @@
15namespace OpenGL { 15namespace OpenGL {
16 16
17CachedGlobalRegion::CachedGlobalRegion(VAddr cpu_addr, u32 size, u8* host_ptr) 17CachedGlobalRegion::CachedGlobalRegion(VAddr cpu_addr, u32 size, u8* host_ptr)
18 : cpu_addr{cpu_addr}, size{size}, RasterizerCacheObject{host_ptr} { 18 : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, size{size} {
19 buffer.Create(); 19 buffer.Create();
20 // Bind and unbind the buffer so it gets allocated by the driver 20 // Bind and unbind the buffer so it gets allocated by the driver
21 glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle); 21 glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle);
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index e06dfe43f..046fc935b 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -100,11 +100,9 @@ struct FramebufferCacheKey {
100 } 100 }
101}; 101};
102 102
103RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& window, Core::System& system, 103RasterizerOpenGL::RasterizerOpenGL(Core::System& system, ScreenInfo& info)
104 ScreenInfo& info) 104 : res_cache{*this}, shader_cache{*this, system}, global_cache{*this}, system{system},
105 : res_cache{*this}, shader_cache{*this, system}, global_cache{*this}, 105 screen_info{info}, buffer_cache(*this, STREAM_BUFFER_SIZE) {
106 emu_window{window}, system{system}, screen_info{info},
107 buffer_cache(*this, STREAM_BUFFER_SIZE) {
108 // Create sampler objects 106 // Create sampler objects
109 for (std::size_t i = 0; i < texture_samplers.size(); ++i) { 107 for (std::size_t i = 0; i < texture_samplers.size(); ++i) {
110 texture_samplers[i].Create(); 108 texture_samplers[i].Create();
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 30f3e8acb..4de565321 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -50,8 +50,7 @@ struct FramebufferCacheKey;
50 50
51class RasterizerOpenGL : public VideoCore::RasterizerInterface { 51class RasterizerOpenGL : public VideoCore::RasterizerInterface {
52public: 52public:
53 explicit RasterizerOpenGL(Core::Frontend::EmuWindow& window, Core::System& system, 53 explicit RasterizerOpenGL(Core::System& system, ScreenInfo& info);
54 ScreenInfo& info);
55 ~RasterizerOpenGL() override; 54 ~RasterizerOpenGL() override;
56 55
57 void DrawArrays() override; 56 void DrawArrays() override;
@@ -214,7 +213,6 @@ private:
214 ShaderCacheOpenGL shader_cache; 213 ShaderCacheOpenGL shader_cache;
215 GlobalRegionCacheOpenGL global_cache; 214 GlobalRegionCacheOpenGL global_cache;
216 215
217 Core::Frontend::EmuWindow& emu_window;
218 Core::System& system; 216 Core::System& system;
219 217
220 ScreenInfo& screen_info; 218 ScreenInfo& screen_info;
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
index 0235317c0..aba6ce731 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
@@ -562,8 +562,8 @@ void RasterizerCacheOpenGL::CopySurface(const Surface& src_surface, const Surfac
562} 562}
563 563
564CachedSurface::CachedSurface(const SurfaceParams& params) 564CachedSurface::CachedSurface(const SurfaceParams& params)
565 : params{params}, gl_target{SurfaceTargetToGL(params.target)}, 565 : RasterizerCacheObject{params.host_ptr}, params{params},
566 cached_size_in_bytes{params.size_in_bytes}, RasterizerCacheObject{params.host_ptr} { 566 gl_target{SurfaceTargetToGL(params.target)}, cached_size_in_bytes{params.size_in_bytes} {
567 567
568 const auto optional_cpu_addr{ 568 const auto optional_cpu_addr{
569 Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(params.gpu_addr)}; 569 Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(params.gpu_addr)};
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h
index c644271d0..e8073579f 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h
@@ -538,12 +538,12 @@ private:
538 return nullptr; 538 return nullptr;
539 } 539 }
540 540
541 void Register(const Surface& object) { 541 void Register(const Surface& object) override {
542 RasterizerCache<Surface>::Register(object); 542 RasterizerCache<Surface>::Register(object);
543 } 543 }
544 544
545 /// Unregisters an object from the cache 545 /// Unregisters an object from the cache
546 void Unregister(const Surface& object) { 546 void Unregister(const Surface& object) override {
547 if (object->IsReinterpreted()) { 547 if (object->IsReinterpreted()) {
548 auto interval = GetReinterpretInterval(object); 548 auto interval = GetReinterpretInterval(object);
549 reinterpreted_surfaces.erase(interval); 549 reinterpreted_surfaces.erase(interval);
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 1f8eca6f0..290e654bc 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -215,9 +215,9 @@ CachedShader::CachedShader(VAddr cpu_addr, u64 unique_identifier,
215 Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, 215 Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
216 const PrecompiledPrograms& precompiled_programs, 216 const PrecompiledPrograms& precompiled_programs,
217 ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr) 217 ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr)
218 : host_ptr{host_ptr}, cpu_addr{cpu_addr}, unique_identifier{unique_identifier}, 218 : RasterizerCacheObject{host_ptr}, host_ptr{host_ptr}, cpu_addr{cpu_addr},
219 program_type{program_type}, disk_cache{disk_cache}, 219 unique_identifier{unique_identifier}, program_type{program_type}, disk_cache{disk_cache},
220 precompiled_programs{precompiled_programs}, RasterizerCacheObject{host_ptr} { 220 precompiled_programs{precompiled_programs} {
221 221
222 const std::size_t code_size = CalculateProgramSize(program_code); 222 const std::size_t code_size = CalculateProgramSize(program_code);
223 const std::size_t code_size_b = 223 const std::size_t code_size_b =
@@ -245,9 +245,9 @@ CachedShader::CachedShader(VAddr cpu_addr, u64 unique_identifier,
245 Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, 245 Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
246 const PrecompiledPrograms& precompiled_programs, 246 const PrecompiledPrograms& precompiled_programs,
247 GLShader::ProgramResult result, u8* host_ptr) 247 GLShader::ProgramResult result, u8* host_ptr)
248 : cpu_addr{cpu_addr}, unique_identifier{unique_identifier}, program_type{program_type}, 248 : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, unique_identifier{unique_identifier},
249 disk_cache{disk_cache}, precompiled_programs{precompiled_programs}, RasterizerCacheObject{ 249 program_type{program_type}, disk_cache{disk_cache}, precompiled_programs{
250 host_ptr} { 250 precompiled_programs} {
251 251
252 code = std::move(result.first); 252 code = std::move(result.first);
253 entries = result.second; 253 entries = result.second;
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index 5e3d862c6..a01efeb05 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -266,7 +266,7 @@ void RendererOpenGL::CreateRasterizer() {
266 } 266 }
267 // Initialize sRGB Usage 267 // Initialize sRGB Usage
268 OpenGLState::ClearsRGBUsed(); 268 OpenGLState::ClearsRGBUsed();
269 rasterizer = std::make_unique<RasterizerOpenGL>(render_window, system, screen_info); 269 rasterizer = std::make_unique<RasterizerOpenGL>(system, screen_info);
270} 270}
271 271
272void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture, 272void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture,
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index eac51ecb3..388b5ffd5 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -19,8 +19,8 @@ namespace Vulkan {
19 19
20CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, u64 offset, 20CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, u64 offset,
21 std::size_t alignment, u8* host_ptr) 21 std::size_t alignment, u8* host_ptr)
22 : cpu_addr{cpu_addr}, size{size}, offset{offset}, alignment{alignment}, RasterizerCacheObject{ 22 : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, size{size}, offset{offset},
23 host_ptr} {} 23 alignment{alignment} {}
24 24
25VKBufferCache::VKBufferCache(Tegra::MemoryManager& tegra_memory_manager, 25VKBufferCache::VKBufferCache(Tegra::MemoryManager& tegra_memory_manager,
26 VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, 26 VideoCore::RasterizerInterface& rasterizer, const VKDevice& device,
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.cpp b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
index a1e117443..13c46e5b8 100644
--- a/src/video_core/renderer_vulkan/vk_resource_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
@@ -21,7 +21,7 @@ public:
21 CommandBufferPool(const VKDevice& device) 21 CommandBufferPool(const VKDevice& device)
22 : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {} 22 : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {}
23 23
24 void Allocate(std::size_t begin, std::size_t end) { 24 void Allocate(std::size_t begin, std::size_t end) override {
25 const auto dev = device.GetLogical(); 25 const auto dev = device.GetLogical();
26 const auto& dld = device.GetDispatchLoader(); 26 const auto& dld = device.GetDispatchLoader();
27 const u32 graphics_family = device.GetGraphicsFamily(); 27 const u32 graphics_family = device.GetGraphicsFamily();
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.h b/src/video_core/renderer_vulkan/vk_resource_manager.h
index 5bfe4cead..08ee86fa6 100644
--- a/src/video_core/renderer_vulkan/vk_resource_manager.h
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.h
@@ -97,7 +97,7 @@ private:
97class VKFenceWatch final : public VKResource { 97class VKFenceWatch final : public VKResource {
98public: 98public:
99 explicit VKFenceWatch(); 99 explicit VKFenceWatch();
100 ~VKFenceWatch(); 100 ~VKFenceWatch() override;
101 101
102 /// Waits for the fence to be released. 102 /// Waits for the fence to be released.
103 void Wait(); 103 void Wait();
diff --git a/src/web_service/web_backend.cpp b/src/web_service/web_backend.cpp
index 40da1a4e2..dc149d2ed 100644
--- a/src/web_service/web_backend.cpp
+++ b/src/web_service/web_backend.cpp
@@ -24,7 +24,7 @@ constexpr u32 TIMEOUT_SECONDS = 30;
24struct Client::Impl { 24struct Client::Impl {
25 Impl(std::string host, std::string username, std::string token) 25 Impl(std::string host, std::string username, std::string token)
26 : host{std::move(host)}, username{std::move(username)}, token{std::move(token)} { 26 : host{std::move(host)}, username{std::move(username)}, token{std::move(token)} {
27 std::lock_guard<std::mutex> lock(jwt_cache.mutex); 27 std::lock_guard lock{jwt_cache.mutex};
28 if (this->username == jwt_cache.username && this->token == jwt_cache.token) { 28 if (this->username == jwt_cache.username && this->token == jwt_cache.token) {
29 jwt = jwt_cache.jwt; 29 jwt = jwt_cache.jwt;
30 } 30 }
@@ -151,7 +151,7 @@ struct Client::Impl {
151 if (result.result_code != Common::WebResult::Code::Success) { 151 if (result.result_code != Common::WebResult::Code::Success) {
152 LOG_ERROR(WebService, "UpdateJWT failed"); 152 LOG_ERROR(WebService, "UpdateJWT failed");
153 } else { 153 } else {
154 std::lock_guard<std::mutex> lock(jwt_cache.mutex); 154 std::lock_guard lock{jwt_cache.mutex};
155 jwt_cache.username = username; 155 jwt_cache.username = username;
156 jwt_cache.token = token; 156 jwt_cache.token = token;
157 jwt_cache.jwt = jwt = result.returned_data; 157 jwt_cache.jwt = jwt = result.returned_data;
diff --git a/src/yuzu/applets/profile_select.cpp b/src/yuzu/applets/profile_select.cpp
index 5c1b65a2c..730426c16 100644
--- a/src/yuzu/applets/profile_select.cpp
+++ b/src/yuzu/applets/profile_select.cpp
@@ -163,6 +163,6 @@ void QtProfileSelector::SelectProfile(
163 163
164void QtProfileSelector::MainWindowFinishedSelection(std::optional<Service::Account::UUID> uuid) { 164void QtProfileSelector::MainWindowFinishedSelection(std::optional<Service::Account::UUID> uuid) {
165 // Acquire the HLE mutex 165 // Acquire the HLE mutex
166 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 166 std::lock_guard lock{HLE::g_hle_lock};
167 callback(uuid); 167 callback(uuid);
168} 168}
diff --git a/src/yuzu/applets/software_keyboard.cpp b/src/yuzu/applets/software_keyboard.cpp
index 8a26fdff1..eddc9c941 100644
--- a/src/yuzu/applets/software_keyboard.cpp
+++ b/src/yuzu/applets/software_keyboard.cpp
@@ -141,12 +141,12 @@ void QtSoftwareKeyboard::SendTextCheckDialog(std::u16string error_message,
141 141
142void QtSoftwareKeyboard::MainWindowFinishedText(std::optional<std::u16string> text) { 142void QtSoftwareKeyboard::MainWindowFinishedText(std::optional<std::u16string> text) {
143 // Acquire the HLE mutex 143 // Acquire the HLE mutex
144 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 144 std::lock_guard lock{HLE::g_hle_lock};
145 text_output(text); 145 text_output(text);
146} 146}
147 147
148void QtSoftwareKeyboard::MainWindowFinishedCheckDialog() { 148void QtSoftwareKeyboard::MainWindowFinishedCheckDialog() {
149 // Acquire the HLE mutex 149 // Acquire the HLE mutex
150 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 150 std::lock_guard lock{HLE::g_hle_lock};
151 finished_check(); 151 finished_check();
152} 152}
diff --git a/src/yuzu/applets/web_browser.cpp b/src/yuzu/applets/web_browser.cpp
index 979b9ec14..ac80b2fa2 100644
--- a/src/yuzu/applets/web_browser.cpp
+++ b/src/yuzu/applets/web_browser.cpp
@@ -104,12 +104,12 @@ void QtWebBrowser::OpenPage(std::string_view url, std::function<void()> unpack_r
104 104
105void QtWebBrowser::MainWindowUnpackRomFS() { 105void QtWebBrowser::MainWindowUnpackRomFS() {
106 // Acquire the HLE mutex 106 // Acquire the HLE mutex
107 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 107 std::lock_guard lock{HLE::g_hle_lock};
108 unpack_romfs_callback(); 108 unpack_romfs_callback();
109} 109}
110 110
111void QtWebBrowser::MainWindowFinishedBrowsing() { 111void QtWebBrowser::MainWindowFinishedBrowsing() {
112 // Acquire the HLE mutex 112 // Acquire the HLE mutex
113 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 113 std::lock_guard lock{HLE::g_hle_lock};
114 finished_callback(); 114 finished_callback();
115} 115}
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp
index 05ad19e1d..7438fbc0a 100644
--- a/src/yuzu/bootmanager.cpp
+++ b/src/yuzu/bootmanager.cpp
@@ -67,7 +67,7 @@ void EmuThread::run() {
67 67
68 was_active = false; 68 was_active = false;
69 } else { 69 } else {
70 std::unique_lock<std::mutex> lock(running_mutex); 70 std::unique_lock lock{running_mutex};
71 running_cv.wait(lock, [this] { return IsRunning() || exec_step || stop_run; }); 71 running_cv.wait(lock, [this] { return IsRunning() || exec_step || stop_run; });
72 } 72 }
73 } 73 }
diff --git a/src/yuzu/bootmanager.h b/src/yuzu/bootmanager.h
index 7226e690e..3183621bc 100644
--- a/src/yuzu/bootmanager.h
+++ b/src/yuzu/bootmanager.h
@@ -53,7 +53,7 @@ public:
53 * @note This function is thread-safe 53 * @note This function is thread-safe
54 */ 54 */
55 void SetRunning(bool running) { 55 void SetRunning(bool running) {
56 std::unique_lock<std::mutex> lock(running_mutex); 56 std::unique_lock lock{running_mutex};
57 this->running = running; 57 this->running = running;
58 lock.unlock(); 58 lock.unlock();
59 running_cv.notify_all(); 59 running_cv.notify_all();
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index 06ad74ffe..593bb681f 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -234,6 +234,9 @@ QString WaitTreeThread::GetText() const {
234 case Kernel::ThreadStatus::WaitMutex: 234 case Kernel::ThreadStatus::WaitMutex:
235 status = tr("waiting for mutex"); 235 status = tr("waiting for mutex");
236 break; 236 break;
237 case Kernel::ThreadStatus::WaitCondVar:
238 status = tr("waiting for condition variable");
239 break;
237 case Kernel::ThreadStatus::WaitArb: 240 case Kernel::ThreadStatus::WaitArb:
238 status = tr("waiting for address arbiter"); 241 status = tr("waiting for address arbiter");
239 break; 242 break;
@@ -269,6 +272,7 @@ QColor WaitTreeThread::GetColor() const {
269 case Kernel::ThreadStatus::WaitSynchAll: 272 case Kernel::ThreadStatus::WaitSynchAll:
270 case Kernel::ThreadStatus::WaitSynchAny: 273 case Kernel::ThreadStatus::WaitSynchAny:
271 case Kernel::ThreadStatus::WaitMutex: 274 case Kernel::ThreadStatus::WaitMutex:
275 case Kernel::ThreadStatus::WaitCondVar:
272 case Kernel::ThreadStatus::WaitArb: 276 case Kernel::ThreadStatus::WaitArb:
273 return QColor(Qt::GlobalColor::red); 277 return QColor(Qt::GlobalColor::red);
274 case Kernel::ThreadStatus::Dormant: 278 case Kernel::ThreadStatus::Dormant: