summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/common/thread.cpp24
-rw-r--r--src/core/core.cpp7
-rw-r--r--src/core/debugger/gdbstub.cpp10
-rw-r--r--src/core/file_sys/fsmitm_romfsbuild.cpp84
-rw-r--r--src/core/file_sys/patch_manager.cpp8
-rw-r--r--src/core/file_sys/registered_cache.cpp3
-rw-r--r--src/core/file_sys/romfs.cpp5
-rw-r--r--src/core/file_sys/system_archive/system_version.cpp4
-rw-r--r--src/core/file_sys/vfs_cached.cpp6
-rw-r--r--src/core/file_sys/vfs_cached.h2
-rw-r--r--src/core/file_sys/vfs_concat.cpp27
-rw-r--r--src/core/file_sys/vfs_concat.h12
-rw-r--r--src/core/file_sys/vfs_layered.cpp8
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.cpp9
-rw-r--r--src/core/hle/kernel/initial_process.h4
-rw-r--r--src/core/hle/kernel/k_memory_block.h52
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.cpp70
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.h6
-rw-r--r--src/core/hle/kernel/k_memory_layout.h6
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp9
-rw-r--r--src/core/hle/kernel/k_memory_region_type.h105
-rw-r--r--src/core/hle/kernel/k_page_group.h11
-rw-r--r--src/core/hle/kernel/k_page_table.cpp314
-rw-r--r--src/core/hle/kernel/k_page_table.h33
-rw-r--r--src/core/hle/kernel/k_process.cpp2
-rw-r--r--src/core/hle/kernel/kernel.cpp23
-rw-r--r--src/core/hle/kernel/svc/svc_memory.cpp10
-rw-r--r--src/core/hle/kernel/svc_types.h1
-rw-r--r--src/core/hle/service/acc/acc.cpp55
-rw-r--r--src/core/hle/service/acc/acc.h3
-rw-r--r--src/core/hle/service/acc/acc_su.cpp6
-rw-r--r--src/core/hle/service/acc/profile_manager.h3
-rw-r--r--src/core/hle/service/am/am.cpp41
-rw-r--r--src/core/hle/service/am/am.h3
-rw-r--r--src/core/hle/service/hle_ipc.cpp58
-rw-r--r--src/core/hle/service/hle_ipc.h6
-rw-r--r--src/core/hle/service/mii/types/core_data.cpp1
-rw-r--r--src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp2
-rw-r--r--src/core/hle/service/prepo/prepo.cpp40
-rw-r--r--src/core/hle/service/ptm/ts.cpp40
-rw-r--r--src/core/hle/service/ptm/ts.h6
-rw-r--r--src/core/hle/service/set/set_sys.cpp49
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp23
-rw-r--r--src/shader_recompiler/backend/spirv/spirv_emit_context.h36
-rw-r--r--src/shader_recompiler/profile.h4
-rw-r--r--src/tests/common/unique_function.cpp6
-rw-r--r--src/video_core/buffer_cache/buffer_cache_base.h4
-rw-r--r--src/video_core/fence_manager.h5
-rw-r--r--src/video_core/host_shaders/CMakeLists.txt1
-rw-r--r--src/video_core/host_shaders/convert_abgr8_to_d32f.frag15
-rw-r--r--src/video_core/host_shaders/convert_d24s8_to_abgr8.frag8
-rw-r--r--src/video_core/host_shaders/convert_d32f_to_abgr8.frag2
-rw-r--r--src/video_core/host_shaders/convert_s8d24_to_abgr8.frag8
-rw-r--r--src/video_core/renderer_vulkan/blit_image.cpp9
-rw-r--r--src/video_core/renderer_vulkan/blit_image.h4
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp14
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp6
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp23
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_render_pass_cache.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp23
-rw-r--r--src/video_core/texture_cache/formatter.cpp8
-rw-r--r--src/video_core/texture_cache/samples_helper.h2
-rw-r--r--src/video_core/texture_cache/util.cpp11
-rw-r--r--src/video_core/vulkan_common/vulkan_device.cpp35
-rw-r--r--src/video_core/vulkan_common/vulkan_device.h18
-rw-r--r--src/yuzu/configuration/configure_vibration.cpp10
68 files changed, 977 insertions, 473 deletions
diff --git a/src/common/thread.cpp b/src/common/thread.cpp
index 919e33af9..34cc1527b 100644
--- a/src/common/thread.cpp
+++ b/src/common/thread.cpp
@@ -11,6 +11,7 @@
11#include <mach/mach.h> 11#include <mach/mach.h>
12#elif defined(_WIN32) 12#elif defined(_WIN32)
13#include <windows.h> 13#include <windows.h>
14#include "common/string_util.h"
14#else 15#else
15#if defined(__Bitrig__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__) 16#if defined(__Bitrig__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
16#include <pthread_np.h> 17#include <pthread_np.h>
@@ -82,29 +83,8 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
82#ifdef _MSC_VER 83#ifdef _MSC_VER
83 84
84// Sets the debugger-visible name of the current thread. 85// Sets the debugger-visible name of the current thread.
85// Uses trick documented in:
86// https://docs.microsoft.com/en-us/visualstudio/debugger/how-to-set-a-thread-name-in-native-code
87void SetCurrentThreadName(const char* name) { 86void SetCurrentThreadName(const char* name) {
88 static const DWORD MS_VC_EXCEPTION = 0x406D1388; 87 SetThreadDescription(GetCurrentThread(), UTF8ToUTF16W(name).data());
89
90#pragma pack(push, 8)
91 struct THREADNAME_INFO {
92 DWORD dwType; // must be 0x1000
93 LPCSTR szName; // pointer to name (in user addr space)
94 DWORD dwThreadID; // thread ID (-1=caller thread)
95 DWORD dwFlags; // reserved for future use, must be zero
96 } info;
97#pragma pack(pop)
98
99 info.dwType = 0x1000;
100 info.szName = name;
101 info.dwThreadID = std::numeric_limits<DWORD>::max();
102 info.dwFlags = 0;
103
104 __try {
105 RaiseException(MS_VC_EXCEPTION, 0, sizeof(info) / sizeof(ULONG_PTR), (ULONG_PTR*)&info);
106 } __except (EXCEPTION_CONTINUE_EXECUTION) {
107 }
108} 88}
109 89
110#else // !MSVC_VER, so must be POSIX threads 90#else // !MSVC_VER, so must be POSIX threads
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 0ab2e3b76..d7e2efbd7 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -116,11 +116,8 @@ FileSys::VirtualFile GetGameFileFromPath(const FileSys::VirtualFilesystem& vfs,
116 } 116 }
117 } 117 }
118 118
119 if (concat.empty()) { 119 return FileSys::ConcatenatedVfsFile::MakeConcatenatedFile(dir->GetName(),
120 return nullptr; 120 std::move(concat));
121 }
122
123 return FileSys::ConcatenatedVfsFile::MakeConcatenatedFile(concat, dir->GetName());
124 } 121 }
125 122
126 if (Common::FS::IsDir(path)) { 123 if (Common::FS::IsDir(path)) {
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp
index 82964f0a1..2076aa8a2 100644
--- a/src/core/debugger/gdbstub.cpp
+++ b/src/core/debugger/gdbstub.cpp
@@ -822,11 +822,13 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
822 const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-'; 822 const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-';
823 const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-'; 823 const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-';
824 const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-'; 824 const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-';
825 const char p =
826 True(mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-';
825 827
826 reply += 828 reply += fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n",
827 fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{} [{}, {}]\n", 829 mem_info.base_address,
828 mem_info.base_address, mem_info.base_address + mem_info.size - 1, 830 mem_info.base_address + mem_info.size - 1, perm, state, l, i,
829 perm, state, l, i, d, u, mem_info.ipc_count, mem_info.device_count); 831 d, u, p, mem_info.ipc_count, mem_info.device_count);
830 } 832 }
831 833
832 const uintptr_t next_address = mem_info.base_address + mem_info.size; 834 const uintptr_t next_address = mem_info.base_address + mem_info.size;
diff --git a/src/core/file_sys/fsmitm_romfsbuild.cpp b/src/core/file_sys/fsmitm_romfsbuild.cpp
index e39c7b62b..f1d3e4129 100644
--- a/src/core/file_sys/fsmitm_romfsbuild.cpp
+++ b/src/core/file_sys/fsmitm_romfsbuild.cpp
@@ -107,62 +107,56 @@ static u64 romfs_get_hash_table_count(u64 num_entries) {
107 107
108void RomFSBuildContext::VisitDirectory(VirtualDir romfs_dir, VirtualDir ext_dir, 108void RomFSBuildContext::VisitDirectory(VirtualDir romfs_dir, VirtualDir ext_dir,
109 std::shared_ptr<RomFSBuildDirectoryContext> parent) { 109 std::shared_ptr<RomFSBuildDirectoryContext> parent) {
110 std::vector<std::shared_ptr<RomFSBuildDirectoryContext>> child_dirs; 110 for (auto& child_romfs_file : romfs_dir->GetFiles()) {
111 const auto name = child_romfs_file->GetName();
112 const auto child = std::make_shared<RomFSBuildFileContext>();
113 // Set child's path.
114 child->cur_path_ofs = parent->path_len + 1;
115 child->path_len = child->cur_path_ofs + static_cast<u32>(name.size());
116 child->path = parent->path + "/" + name;
117
118 if (ext_dir != nullptr && ext_dir->GetFile(name + ".stub") != nullptr) {
119 continue;
120 }
111 121
112 const auto entries = romfs_dir->GetEntries(); 122 // Sanity check on path_len
123 ASSERT(child->path_len < FS_MAX_PATH);
113 124
114 for (const auto& kv : entries) { 125 child->source = std::move(child_romfs_file);
115 if (kv.second == VfsEntryType::Directory) {
116 const auto child = std::make_shared<RomFSBuildDirectoryContext>();
117 // Set child's path.
118 child->cur_path_ofs = parent->path_len + 1;
119 child->path_len = child->cur_path_ofs + static_cast<u32>(kv.first.size());
120 child->path = parent->path + "/" + kv.first;
121 126
122 if (ext_dir != nullptr && ext_dir->GetFile(kv.first + ".stub") != nullptr) { 127 if (ext_dir != nullptr) {
123 continue; 128 if (const auto ips = ext_dir->GetFile(name + ".ips")) {
129 if (auto patched = PatchIPS(child->source, ips)) {
130 child->source = std::move(patched);
131 }
124 } 132 }
133 }
125 134
126 // Sanity check on path_len 135 child->size = child->source->GetSize();
127 ASSERT(child->path_len < FS_MAX_PATH);
128
129 if (AddDirectory(parent, child)) {
130 child_dirs.push_back(child);
131 }
132 } else {
133 const auto child = std::make_shared<RomFSBuildFileContext>();
134 // Set child's path.
135 child->cur_path_ofs = parent->path_len + 1;
136 child->path_len = child->cur_path_ofs + static_cast<u32>(kv.first.size());
137 child->path = parent->path + "/" + kv.first;
138
139 if (ext_dir != nullptr && ext_dir->GetFile(kv.first + ".stub") != nullptr) {
140 continue;
141 }
142 136
143 // Sanity check on path_len 137 AddFile(parent, child);
144 ASSERT(child->path_len < FS_MAX_PATH); 138 }
145 139
146 child->source = romfs_dir->GetFile(kv.first); 140 for (auto& child_romfs_dir : romfs_dir->GetSubdirectories()) {
141 const auto name = child_romfs_dir->GetName();
142 const auto child = std::make_shared<RomFSBuildDirectoryContext>();
143 // Set child's path.
144 child->cur_path_ofs = parent->path_len + 1;
145 child->path_len = child->cur_path_ofs + static_cast<u32>(name.size());
146 child->path = parent->path + "/" + name;
147 147
148 if (ext_dir != nullptr) { 148 if (ext_dir != nullptr && ext_dir->GetFile(name + ".stub") != nullptr) {
149 if (const auto ips = ext_dir->GetFile(kv.first + ".ips")) { 149 continue;
150 if (auto patched = PatchIPS(child->source, ips)) { 150 }
151 child->source = std::move(patched);
152 }
153 }
154 }
155 151
156 child->size = child->source->GetSize(); 152 // Sanity check on path_len
153 ASSERT(child->path_len < FS_MAX_PATH);
157 154
158 AddFile(parent, child); 155 if (!AddDirectory(parent, child)) {
156 continue;
159 } 157 }
160 }
161 158
162 for (auto& child : child_dirs) { 159 auto child_ext_dir = ext_dir != nullptr ? ext_dir->GetSubdirectory(name) : nullptr;
163 auto subdir_name = std::string_view(child->path).substr(child->cur_path_ofs);
164 auto child_romfs_dir = romfs_dir->GetSubdirectory(subdir_name);
165 auto child_ext_dir = ext_dir != nullptr ? ext_dir->GetSubdirectory(subdir_name) : nullptr;
166 this->VisitDirectory(child_romfs_dir, child_ext_dir, child); 160 this->VisitDirectory(child_romfs_dir, child_ext_dir, child);
167 } 161 }
168} 162}
@@ -293,7 +287,7 @@ std::multimap<u64, VirtualFile> RomFSBuildContext::Build() {
293 287
294 cur_entry.name_size = name_size; 288 cur_entry.name_size = name_size;
295 289
296 out.emplace(cur_file->offset + ROMFS_FILEPARTITION_OFS, cur_file->source); 290 out.emplace(cur_file->offset + ROMFS_FILEPARTITION_OFS, std::move(cur_file->source));
297 std::memcpy(file_table.data() + cur_file->entry_offset, &cur_entry, sizeof(RomFSFileEntry)); 291 std::memcpy(file_table.data() + cur_file->entry_offset, &cur_entry, sizeof(RomFSFileEntry));
298 std::memset(file_table.data() + cur_file->entry_offset + sizeof(RomFSFileEntry), 0, 292 std::memset(file_table.data() + cur_file->entry_offset + sizeof(RomFSFileEntry), 0,
299 Common::AlignUp(cur_entry.name_size, 4)); 293 Common::AlignUp(cur_entry.name_size, 4));
diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp
index 8e475f25a..0bca05587 100644
--- a/src/core/file_sys/patch_manager.cpp
+++ b/src/core/file_sys/patch_manager.cpp
@@ -377,16 +377,16 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
377 377
378 auto romfs_dir = FindSubdirectoryCaseless(subdir, "romfs"); 378 auto romfs_dir = FindSubdirectoryCaseless(subdir, "romfs");
379 if (romfs_dir != nullptr) 379 if (romfs_dir != nullptr)
380 layers.push_back(std::make_shared<CachedVfsDirectory>(romfs_dir)); 380 layers.emplace_back(std::make_shared<CachedVfsDirectory>(std::move(romfs_dir)));
381 381
382 auto ext_dir = FindSubdirectoryCaseless(subdir, "romfs_ext"); 382 auto ext_dir = FindSubdirectoryCaseless(subdir, "romfs_ext");
383 if (ext_dir != nullptr) 383 if (ext_dir != nullptr)
384 layers_ext.push_back(std::make_shared<CachedVfsDirectory>(ext_dir)); 384 layers_ext.emplace_back(std::make_shared<CachedVfsDirectory>(std::move(ext_dir)));
385 385
386 if (type == ContentRecordType::HtmlDocument) { 386 if (type == ContentRecordType::HtmlDocument) {
387 auto manual_dir = FindSubdirectoryCaseless(subdir, "manual_html"); 387 auto manual_dir = FindSubdirectoryCaseless(subdir, "manual_html");
388 if (manual_dir != nullptr) 388 if (manual_dir != nullptr)
389 layers.push_back(std::make_shared<CachedVfsDirectory>(manual_dir)); 389 layers.emplace_back(std::make_shared<CachedVfsDirectory>(std::move(manual_dir)));
390 } 390 }
391 } 391 }
392 392
@@ -400,7 +400,7 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
400 return; 400 return;
401 } 401 }
402 402
403 layers.push_back(std::move(extracted)); 403 layers.emplace_back(std::move(extracted));
404 404
405 auto layered = LayeredVfsDirectory::MakeLayeredDirectory(std::move(layers)); 405 auto layered = LayeredVfsDirectory::MakeLayeredDirectory(std::move(layers));
406 if (layered == nullptr) { 406 if (layered == nullptr) {
diff --git a/src/core/file_sys/registered_cache.cpp b/src/core/file_sys/registered_cache.cpp
index 04da93d5c..1cc77ad14 100644
--- a/src/core/file_sys/registered_cache.cpp
+++ b/src/core/file_sys/registered_cache.cpp
@@ -322,7 +322,8 @@ VirtualFile RegisteredCache::OpenFileOrDirectoryConcat(const VirtualDir& open_di
322 return nullptr; 322 return nullptr;
323 } 323 }
324 324
325 return ConcatenatedVfsFile::MakeConcatenatedFile(concat, concat.front()->GetName()); 325 auto name = concat.front()->GetName();
326 return ConcatenatedVfsFile::MakeConcatenatedFile(std::move(name), std::move(concat));
326} 327}
327 328
328VirtualFile RegisteredCache::GetFileAtID(NcaID id) const { 329VirtualFile RegisteredCache::GetFileAtID(NcaID id) const {
diff --git a/src/core/file_sys/romfs.cpp b/src/core/file_sys/romfs.cpp
index 614da2130..1c580de57 100644
--- a/src/core/file_sys/romfs.cpp
+++ b/src/core/file_sys/romfs.cpp
@@ -133,7 +133,7 @@ VirtualDir ExtractRomFS(VirtualFile file, RomFSExtractionType type) {
133 out = out->GetSubdirectories().front(); 133 out = out->GetSubdirectories().front();
134 } 134 }
135 135
136 return std::make_shared<CachedVfsDirectory>(out); 136 return std::make_shared<CachedVfsDirectory>(std::move(out));
137} 137}
138 138
139VirtualFile CreateRomFS(VirtualDir dir, VirtualDir ext) { 139VirtualFile CreateRomFS(VirtualDir dir, VirtualDir ext) {
@@ -141,8 +141,7 @@ VirtualFile CreateRomFS(VirtualDir dir, VirtualDir ext) {
141 return nullptr; 141 return nullptr;
142 142
143 RomFSBuildContext ctx{dir, ext}; 143 RomFSBuildContext ctx{dir, ext};
144 auto file_map = ctx.Build(); 144 return ConcatenatedVfsFile::MakeConcatenatedFile(0, dir->GetName(), ctx.Build());
145 return ConcatenatedVfsFile::MakeConcatenatedFile(0, file_map, dir->GetName());
146} 145}
147 146
148} // namespace FileSys 147} // namespace FileSys
diff --git a/src/core/file_sys/system_archive/system_version.cpp b/src/core/file_sys/system_archive/system_version.cpp
index bd493ecca..e4751c2b4 100644
--- a/src/core/file_sys/system_archive/system_version.cpp
+++ b/src/core/file_sys/system_archive/system_version.cpp
@@ -1,6 +1,7 @@
1// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include "common/logging/log.h"
4#include "core/file_sys/system_archive/system_version.h" 5#include "core/file_sys/system_archive/system_version.h"
5#include "core/file_sys/vfs_vector.h" 6#include "core/file_sys/vfs_vector.h"
6#include "core/hle/api_version.h" 7#include "core/hle/api_version.h"
@@ -12,6 +13,9 @@ std::string GetLongDisplayVersion() {
12} 13}
13 14
14VirtualDir SystemVersion() { 15VirtualDir SystemVersion() {
16 LOG_WARNING(Common_Filesystem, "called - Using hardcoded firmware version '{}'",
17 GetLongDisplayVersion());
18
15 VirtualFile file = std::make_shared<VectorVfsFile>(std::vector<u8>(0x100), "file"); 19 VirtualFile file = std::make_shared<VectorVfsFile>(std::vector<u8>(0x100), "file");
16 file->WriteObject(HLE::ApiVersion::HOS_VERSION_MAJOR, 0); 20 file->WriteObject(HLE::ApiVersion::HOS_VERSION_MAJOR, 0);
17 file->WriteObject(HLE::ApiVersion::HOS_VERSION_MINOR, 1); 21 file->WriteObject(HLE::ApiVersion::HOS_VERSION_MINOR, 1);
diff --git a/src/core/file_sys/vfs_cached.cpp b/src/core/file_sys/vfs_cached.cpp
index c3154ee81..7ee5300e5 100644
--- a/src/core/file_sys/vfs_cached.cpp
+++ b/src/core/file_sys/vfs_cached.cpp
@@ -6,13 +6,13 @@
6 6
7namespace FileSys { 7namespace FileSys {
8 8
9CachedVfsDirectory::CachedVfsDirectory(VirtualDir& source_dir) 9CachedVfsDirectory::CachedVfsDirectory(VirtualDir&& source_dir)
10 : name(source_dir->GetName()), parent(source_dir->GetParentDirectory()) { 10 : name(source_dir->GetName()), parent(source_dir->GetParentDirectory()) {
11 for (auto& dir : source_dir->GetSubdirectories()) { 11 for (auto& dir : source_dir->GetSubdirectories()) {
12 dirs.emplace(dir->GetName(), std::make_shared<CachedVfsDirectory>(dir)); 12 dirs.emplace(dir->GetName(), std::make_shared<CachedVfsDirectory>(std::move(dir)));
13 } 13 }
14 for (auto& file : source_dir->GetFiles()) { 14 for (auto& file : source_dir->GetFiles()) {
15 files.emplace(file->GetName(), file); 15 files.emplace(file->GetName(), std::move(file));
16 } 16 }
17} 17}
18 18
diff --git a/src/core/file_sys/vfs_cached.h b/src/core/file_sys/vfs_cached.h
index 113acac12..1e5300784 100644
--- a/src/core/file_sys/vfs_cached.h
+++ b/src/core/file_sys/vfs_cached.h
@@ -11,7 +11,7 @@ namespace FileSys {
11 11
12class CachedVfsDirectory : public ReadOnlyVfsDirectory { 12class CachedVfsDirectory : public ReadOnlyVfsDirectory {
13public: 13public:
14 CachedVfsDirectory(VirtualDir& source_directory); 14 CachedVfsDirectory(VirtualDir&& source_directory);
15 15
16 ~CachedVfsDirectory() override; 16 ~CachedVfsDirectory() override;
17 VirtualFile GetFile(std::string_view file_name) const override; 17 VirtualFile GetFile(std::string_view file_name) const override;
diff --git a/src/core/file_sys/vfs_concat.cpp b/src/core/file_sys/vfs_concat.cpp
index 311a59e5f..168b9cbec 100644
--- a/src/core/file_sys/vfs_concat.cpp
+++ b/src/core/file_sys/vfs_concat.cpp
@@ -10,7 +10,7 @@
10 10
11namespace FileSys { 11namespace FileSys {
12 12
13ConcatenatedVfsFile::ConcatenatedVfsFile(ConcatenationMap&& concatenation_map_, std::string&& name_) 13ConcatenatedVfsFile::ConcatenatedVfsFile(std::string&& name_, ConcatenationMap&& concatenation_map_)
14 : concatenation_map(std::move(concatenation_map_)), name(std::move(name_)) { 14 : concatenation_map(std::move(concatenation_map_)), name(std::move(name_)) {
15 DEBUG_ASSERT(this->VerifyContinuity()); 15 DEBUG_ASSERT(this->VerifyContinuity());
16} 16}
@@ -30,8 +30,8 @@ bool ConcatenatedVfsFile::VerifyContinuity() const {
30 30
31ConcatenatedVfsFile::~ConcatenatedVfsFile() = default; 31ConcatenatedVfsFile::~ConcatenatedVfsFile() = default;
32 32
33VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(const std::vector<VirtualFile>& files, 33VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(std::string&& name,
34 std::string&& name) { 34 std::vector<VirtualFile>&& files) {
35 // Fold trivial cases. 35 // Fold trivial cases.
36 if (files.empty()) { 36 if (files.empty()) {
37 return nullptr; 37 return nullptr;
@@ -46,20 +46,21 @@ VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(const std::vector<VirtualF
46 u64 last_offset = 0; 46 u64 last_offset = 0;
47 47
48 for (auto& file : files) { 48 for (auto& file : files) {
49 const auto size = file->GetSize();
50
49 concatenation_map.emplace_back(ConcatenationEntry{ 51 concatenation_map.emplace_back(ConcatenationEntry{
50 .offset = last_offset, 52 .offset = last_offset,
51 .file = file, 53 .file = std::move(file),
52 }); 54 });
53 55
54 last_offset += file->GetSize(); 56 last_offset += size;
55 } 57 }
56 58
57 return VirtualFile(new ConcatenatedVfsFile(std::move(concatenation_map), std::move(name))); 59 return VirtualFile(new ConcatenatedVfsFile(std::move(name), std::move(concatenation_map)));
58} 60}
59 61
60VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte, 62VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte, std::string&& name,
61 const std::multimap<u64, VirtualFile>& files, 63 std::multimap<u64, VirtualFile>&& files) {
62 std::string&& name) {
63 // Fold trivial cases. 64 // Fold trivial cases.
64 if (files.empty()) { 65 if (files.empty()) {
65 return nullptr; 66 return nullptr;
@@ -76,6 +77,8 @@ VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte,
76 77
77 // Iteration of a multimap is ordered, so offset will be strictly non-decreasing. 78 // Iteration of a multimap is ordered, so offset will be strictly non-decreasing.
78 for (auto& [offset, file] : files) { 79 for (auto& [offset, file] : files) {
80 const auto size = file->GetSize();
81
79 if (offset > last_offset) { 82 if (offset > last_offset) {
80 concatenation_map.emplace_back(ConcatenationEntry{ 83 concatenation_map.emplace_back(ConcatenationEntry{
81 .offset = last_offset, 84 .offset = last_offset,
@@ -85,13 +88,13 @@ VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte,
85 88
86 concatenation_map.emplace_back(ConcatenationEntry{ 89 concatenation_map.emplace_back(ConcatenationEntry{
87 .offset = offset, 90 .offset = offset,
88 .file = file, 91 .file = std::move(file),
89 }); 92 });
90 93
91 last_offset = offset + file->GetSize(); 94 last_offset = offset + size;
92 } 95 }
93 96
94 return VirtualFile(new ConcatenatedVfsFile(std::move(concatenation_map), std::move(name))); 97 return VirtualFile(new ConcatenatedVfsFile(std::move(name), std::move(concatenation_map)));
95} 98}
96 99
97std::string ConcatenatedVfsFile::GetName() const { 100std::string ConcatenatedVfsFile::GetName() const {
diff --git a/src/core/file_sys/vfs_concat.h b/src/core/file_sys/vfs_concat.h
index 6b329d545..cbddd12bd 100644
--- a/src/core/file_sys/vfs_concat.h
+++ b/src/core/file_sys/vfs_concat.h
@@ -24,22 +24,20 @@ private:
24 }; 24 };
25 using ConcatenationMap = std::vector<ConcatenationEntry>; 25 using ConcatenationMap = std::vector<ConcatenationEntry>;
26 26
27 explicit ConcatenatedVfsFile(std::vector<ConcatenationEntry>&& concatenation_map, 27 explicit ConcatenatedVfsFile(std::string&& name,
28 std::string&& name); 28 std::vector<ConcatenationEntry>&& concatenation_map);
29 bool VerifyContinuity() const; 29 bool VerifyContinuity() const;
30 30
31public: 31public:
32 ~ConcatenatedVfsFile() override; 32 ~ConcatenatedVfsFile() override;
33 33
34 /// Wrapper function to allow for more efficient handling of files.size() == 0, 1 cases. 34 /// Wrapper function to allow for more efficient handling of files.size() == 0, 1 cases.
35 static VirtualFile MakeConcatenatedFile(const std::vector<VirtualFile>& files, 35 static VirtualFile MakeConcatenatedFile(std::string&& name, std::vector<VirtualFile>&& files);
36 std::string&& name);
37 36
38 /// Convenience function that turns a map of offsets to files into a concatenated file, filling 37 /// Convenience function that turns a map of offsets to files into a concatenated file, filling
39 /// gaps with a given filler byte. 38 /// gaps with a given filler byte.
40 static VirtualFile MakeConcatenatedFile(u8 filler_byte, 39 static VirtualFile MakeConcatenatedFile(u8 filler_byte, std::string&& name,
41 const std::multimap<u64, VirtualFile>& files, 40 std::multimap<u64, VirtualFile>&& files);
42 std::string&& name);
43 41
44 std::string GetName() const override; 42 std::string GetName() const override;
45 std::size_t GetSize() const override; 43 std::size_t GetSize() const override;
diff --git a/src/core/file_sys/vfs_layered.cpp b/src/core/file_sys/vfs_layered.cpp
index 3e6426afc..08daca397 100644
--- a/src/core/file_sys/vfs_layered.cpp
+++ b/src/core/file_sys/vfs_layered.cpp
@@ -38,7 +38,7 @@ VirtualDir LayeredVfsDirectory::GetDirectoryRelative(std::string_view path) cons
38 for (const auto& layer : dirs) { 38 for (const auto& layer : dirs) {
39 auto dir = layer->GetDirectoryRelative(path); 39 auto dir = layer->GetDirectoryRelative(path);
40 if (dir != nullptr) { 40 if (dir != nullptr) {
41 out.push_back(std::move(dir)); 41 out.emplace_back(std::move(dir));
42 } 42 }
43 } 43 }
44 44
@@ -62,11 +62,11 @@ std::vector<VirtualFile> LayeredVfsDirectory::GetFiles() const {
62 std::set<std::string, std::less<>> out_names; 62 std::set<std::string, std::less<>> out_names;
63 63
64 for (const auto& layer : dirs) { 64 for (const auto& layer : dirs) {
65 for (const auto& file : layer->GetFiles()) { 65 for (auto& file : layer->GetFiles()) {
66 auto file_name = file->GetName(); 66 auto file_name = file->GetName();
67 if (!out_names.contains(file_name)) { 67 if (!out_names.contains(file_name)) {
68 out_names.emplace(std::move(file_name)); 68 out_names.emplace(std::move(file_name));
69 out.push_back(file); 69 out.emplace_back(std::move(file));
70 } 70 }
71 } 71 }
72 } 72 }
@@ -86,7 +86,7 @@ std::vector<VirtualDir> LayeredVfsDirectory::GetSubdirectories() const {
86 std::vector<VirtualDir> out; 86 std::vector<VirtualDir> out;
87 out.reserve(names.size()); 87 out.reserve(names.size());
88 for (const auto& subdir : names) 88 for (const auto& subdir : names)
89 out.push_back(GetSubdirectory(subdir)); 89 out.emplace_back(GetSubdirectory(subdir));
90 90
91 return out; 91 return out;
92} 92}
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index 1f2db673c..a0e20bbbb 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -106,7 +106,7 @@ static_assert(KernelPageBufferAdditionalSize ==
106/// memory. 106/// memory.
107static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, 107static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout,
108 KVirtualAddress slab_addr) { 108 KVirtualAddress slab_addr) {
109 slab_addr -= GetInteger(memory_layout.GetSlabRegionAddress()); 109 slab_addr -= memory_layout.GetSlabRegion().GetAddress();
110 return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase; 110 return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase;
111} 111}
112 112
@@ -196,7 +196,12 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
196 auto& kernel = system.Kernel(); 196 auto& kernel = system.Kernel();
197 197
198 // Get the start of the slab region, since that's where we'll be working. 198 // Get the start of the slab region, since that's where we'll be working.
199 KVirtualAddress address = memory_layout.GetSlabRegionAddress(); 199 const KMemoryRegion& slab_region = memory_layout.GetSlabRegion();
200 KVirtualAddress address = slab_region.GetAddress();
201
202 // Clear the slab region.
203 // TODO: implement access to kernel VAs.
204 // std::memset(device_ptr, 0, slab_region.GetSize());
200 205
201 // Initialize slab type array to be in sorted order. 206 // Initialize slab type array to be in sorted order.
202 std::array<KSlabType, KSlabType_Count> slab_types; 207 std::array<KSlabType, KSlabType_Count> slab_types;
diff --git a/src/core/hle/kernel/initial_process.h b/src/core/hle/kernel/initial_process.h
index 82195f4f7..2c95269fc 100644
--- a/src/core/hle/kernel/initial_process.h
+++ b/src/core/hle/kernel/initial_process.h
@@ -19,4 +19,8 @@ static inline KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() {
19 MainMemoryAddress); 19 MainMemoryAddress);
20} 20}
21 21
22static inline size_t GetInitialProcessBinarySize() {
23 return InitialProcessBinarySizeMax;
24}
25
22} // namespace Kernel 26} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index 41a29da24..ef3f61321 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -36,6 +36,7 @@ enum class KMemoryState : u32 {
36 FlagCanChangeAttribute = (1 << 24), 36 FlagCanChangeAttribute = (1 << 24),
37 FlagCanCodeMemory = (1 << 25), 37 FlagCanCodeMemory = (1 << 25),
38 FlagLinearMapped = (1 << 26), 38 FlagLinearMapped = (1 << 26),
39 FlagCanPermissionLock = (1 << 27),
39 40
40 FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | 41 FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
41 FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | 42 FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
@@ -50,12 +51,16 @@ enum class KMemoryState : u32 {
50 FlagLinearMapped, 51 FlagLinearMapped,
51 52
52 Free = static_cast<u32>(Svc::MemoryState::Free), 53 Free = static_cast<u32>(Svc::MemoryState::Free),
53 Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap | 54
54 FlagCanAlignedDeviceMap, 55 IoMemory = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap |
56 FlagCanAlignedDeviceMap,
57 IoRegister =
58 static_cast<u32>(Svc::MemoryState::Io) | FlagCanDeviceMap | FlagCanAlignedDeviceMap,
59
55 Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, 60 Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
56 Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, 61 Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
57 CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | 62 CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
58 FlagCanCodeMemory, 63 FlagCanCodeMemory | FlagCanPermissionLock,
59 Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, 64 Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
60 Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted | 65 Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted |
61 FlagLinearMapped, 66 FlagLinearMapped,
@@ -65,7 +70,8 @@ enum class KMemoryState : u32 {
65 AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | 70 AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
66 FlagCanCodeAlias, 71 FlagCanCodeAlias,
67 AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData | 72 AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData |
68 FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory, 73 FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory |
74 FlagCanPermissionLock,
69 75
70 Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap | 76 Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap |
71 FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, 77 FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
@@ -73,7 +79,7 @@ enum class KMemoryState : u32 {
73 Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | 79 Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
74 FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, 80 FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
75 81
76 ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped, 82 ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagLinearMapped,
77 83
78 Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc | 84 Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
79 FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | 85 FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
@@ -94,7 +100,7 @@ enum class KMemoryState : u32 {
94 NonDeviceIpc = 100 NonDeviceIpc =
95 static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc, 101 static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc,
96 102
97 Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped, 103 Kernel = static_cast<u32>(Svc::MemoryState::Kernel),
98 104
99 GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | 105 GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
100 FlagReferenceCounted | FlagCanDebug | FlagLinearMapped, 106 FlagReferenceCounted | FlagCanDebug | FlagLinearMapped,
@@ -105,34 +111,36 @@ enum class KMemoryState : u32 {
105 111
106 Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted | 112 Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted |
107 FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap | 113 FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap |
108 FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, 114 FlagCanAlignedDeviceMap | FlagCanQueryPhysical | FlagCanUseNonSecureIpc |
115 FlagCanUseNonDeviceIpc,
109}; 116};
110DECLARE_ENUM_FLAG_OPERATORS(KMemoryState); 117DECLARE_ENUM_FLAG_OPERATORS(KMemoryState);
111 118
112static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000); 119static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000);
113static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001); 120static_assert(static_cast<u32>(KMemoryState::IoMemory) == 0x00182001);
121static_assert(static_cast<u32>(KMemoryState::IoRegister) == 0x00180001);
114static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002); 122static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002);
115static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03); 123static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03);
116static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04); 124static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x0FFEBD04);
117static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05); 125static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05);
118static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006); 126static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006);
119 127
120static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08); 128static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08);
121static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09); 129static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x0FFFBD09);
122static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A); 130static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A);
123static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B); 131static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B);
124static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C); 132static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400000C);
125static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D); 133static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D);
126static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E); 134static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E);
127static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F); 135static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F);
128static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010); 136static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010);
129static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811); 137static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811);
130static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812); 138static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812);
131static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013); 139static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00000013);
132static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214); 140static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214);
133static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015); 141static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015);
134static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016); 142static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016);
135static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817); 143static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x055C3817);
136 144
137enum class KMemoryPermission : u8 { 145enum class KMemoryPermission : u8 {
138 None = 0, 146 None = 0,
@@ -182,8 +190,9 @@ enum class KMemoryAttribute : u8 {
182 IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked), 190 IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
183 DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared), 191 DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared),
184 Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached), 192 Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
193 PermissionLocked = static_cast<u8>(Svc::MemoryAttribute::PermissionLocked),
185 194
186 SetMask = Uncached, 195 SetMask = Uncached | PermissionLocked,
187}; 196};
188DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute); 197DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
189 198
@@ -261,6 +270,10 @@ struct KMemoryInfo {
261 return m_state; 270 return m_state;
262 } 271 }
263 272
273 constexpr Svc::MemoryState GetSvcState() const {
274 return static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask);
275 }
276
264 constexpr KMemoryPermission GetPermission() const { 277 constexpr KMemoryPermission GetPermission() const {
265 return m_permission; 278 return m_permission;
266 } 279 }
@@ -326,6 +339,10 @@ public:
326 return this->GetEndAddress() - 1; 339 return this->GetEndAddress() - 1;
327 } 340 }
328 341
342 constexpr KMemoryState GetState() const {
343 return m_memory_state;
344 }
345
329 constexpr u16 GetIpcLockCount() const { 346 constexpr u16 GetIpcLockCount() const {
330 return m_ipc_lock_count; 347 return m_ipc_lock_count;
331 } 348 }
@@ -443,6 +460,13 @@ public:
443 } 460 }
444 } 461 }
445 462
463 constexpr void UpdateAttribute(KMemoryAttribute mask, KMemoryAttribute attr) {
464 ASSERT(False(mask & KMemoryAttribute::IpcLocked));
465 ASSERT(False(mask & KMemoryAttribute::DeviceShared));
466
467 m_attribute = (m_attribute & ~mask) | attr;
468 }
469
446 constexpr void Split(KMemoryBlock* block, KProcessAddress addr) { 470 constexpr void Split(KMemoryBlock* block, KProcessAddress addr) {
447 ASSERT(this->GetAddress() < addr); 471 ASSERT(this->GetAddress() < addr);
448 ASSERT(this->Contains(addr)); 472 ASSERT(this->Contains(addr));
diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp
index ab75f550e..58a1e7216 100644
--- a/src/core/hle/kernel/k_memory_block_manager.cpp
+++ b/src/core/hle/kernel/k_memory_block_manager.cpp
@@ -160,8 +160,8 @@ void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
160 } 160 }
161 161
162 // Update block state. 162 // Update block state.
163 it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr), 163 it->Update(state, perm, attr, it->GetAddress() == address,
164 static_cast<u8>(clear_disable_attr)); 164 static_cast<u8>(set_disable_attr), static_cast<u8>(clear_disable_attr));
165 cur_address += cur_info.GetSize(); 165 cur_address += cur_info.GetSize();
166 remaining_pages -= cur_info.GetNumPages(); 166 remaining_pages -= cur_info.GetNumPages();
167 } 167 }
@@ -175,7 +175,9 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
175 KProcessAddress address, size_t num_pages, 175 KProcessAddress address, size_t num_pages,
176 KMemoryState test_state, KMemoryPermission test_perm, 176 KMemoryState test_state, KMemoryPermission test_perm,
177 KMemoryAttribute test_attr, KMemoryState state, 177 KMemoryAttribute test_attr, KMemoryState state,
178 KMemoryPermission perm, KMemoryAttribute attr) { 178 KMemoryPermission perm, KMemoryAttribute attr,
179 KMemoryBlockDisableMergeAttribute set_disable_attr,
180 KMemoryBlockDisableMergeAttribute clear_disable_attr) {
179 // Ensure for auditing that we never end up with an invalid tree. 181 // Ensure for auditing that we never end up with an invalid tree.
180 KScopedMemoryBlockManagerAuditor auditor(this); 182 KScopedMemoryBlockManagerAuditor auditor(this);
181 ASSERT(Common::IsAligned(GetInteger(address), PageSize)); 183 ASSERT(Common::IsAligned(GetInteger(address), PageSize));
@@ -214,7 +216,8 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
214 } 216 }
215 217
216 // Update block state. 218 // Update block state.
217 it->Update(state, perm, attr, false, 0, 0); 219 it->Update(state, perm, attr, false, static_cast<u8>(set_disable_attr),
220 static_cast<u8>(clear_disable_attr));
218 cur_address += cur_info.GetSize(); 221 cur_address += cur_info.GetSize();
219 remaining_pages -= cur_info.GetNumPages(); 222 remaining_pages -= cur_info.GetNumPages();
220 } else { 223 } else {
@@ -284,6 +287,65 @@ void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocat
284 this->CoalesceForUpdate(allocator, address, num_pages); 287 this->CoalesceForUpdate(allocator, address, num_pages);
285} 288}
286 289
290void KMemoryBlockManager::UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator,
291 KProcessAddress address, size_t num_pages,
292 KMemoryAttribute mask, KMemoryAttribute attr) {
293 // Ensure for auditing that we never end up with an invalid tree.
294 KScopedMemoryBlockManagerAuditor auditor(this);
295 ASSERT(Common::IsAligned(GetInteger(address), PageSize));
296
297 KProcessAddress cur_address = address;
298 size_t remaining_pages = num_pages;
299 iterator it = this->FindIterator(address);
300
301 while (remaining_pages > 0) {
302 const size_t remaining_size = remaining_pages * PageSize;
303 KMemoryInfo cur_info = it->GetMemoryInfo();
304
305 if ((it->GetAttribute() & mask) != attr) {
306 // If we need to, create a new block before and insert it.
307 if (cur_info.GetAddress() != GetInteger(cur_address)) {
308 KMemoryBlock* new_block = allocator->Allocate();
309
310 it->Split(new_block, cur_address);
311 it = m_memory_block_tree.insert(*new_block);
312 it++;
313
314 cur_info = it->GetMemoryInfo();
315 cur_address = cur_info.GetAddress();
316 }
317
318 // If we need to, create a new block after and insert it.
319 if (cur_info.GetSize() > remaining_size) {
320 KMemoryBlock* new_block = allocator->Allocate();
321
322 it->Split(new_block, cur_address + remaining_size);
323 it = m_memory_block_tree.insert(*new_block);
324
325 cur_info = it->GetMemoryInfo();
326 }
327
328 // Update block state.
329 it->UpdateAttribute(mask, attr);
330 cur_address += cur_info.GetSize();
331 remaining_pages -= cur_info.GetNumPages();
332 } else {
333 // If we already have the right attributes, just advance.
334 if (cur_address + remaining_size < cur_info.GetEndAddress()) {
335 remaining_pages = 0;
336 cur_address += remaining_size;
337 } else {
338 remaining_pages =
339 (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
340 cur_address = cur_info.GetEndAddress();
341 }
342 }
343 it++;
344 }
345
346 this->CoalesceForUpdate(allocator, address, num_pages);
347}
348
287// Debug. 349// Debug.
288bool KMemoryBlockManager::CheckState() const { 350bool KMemoryBlockManager::CheckState() const {
289 // Loop over every block, ensuring that we are sorted and coalesced. 351 // Loop over every block, ensuring that we are sorted and coalesced.
diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h
index 96496e990..cb7b6f430 100644
--- a/src/core/hle/kernel/k_memory_block_manager.h
+++ b/src/core/hle/kernel/k_memory_block_manager.h
@@ -115,7 +115,11 @@ public:
115 void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address, 115 void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
116 size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, 116 size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
117 KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, 117 KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
118 KMemoryAttribute attr); 118 KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr,
119 KMemoryBlockDisableMergeAttribute clear_disable_attr);
120
121 void UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
122 size_t num_pages, KMemoryAttribute mask, KMemoryAttribute attr);
119 123
120 iterator FindIterator(KProcessAddress address) const { 124 iterator FindIterator(KProcessAddress address) const {
121 return m_memory_block_tree.find(KMemoryBlock( 125 return m_memory_block_tree.find(KMemoryBlock(
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index 54a71df56..c8122644f 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -137,11 +137,9 @@ public:
137 return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack); 137 return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack);
138 } 138 }
139 139
140 KVirtualAddress GetSlabRegionAddress() const { 140 const KMemoryRegion& GetSlabRegion() const {
141 return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)) 141 return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab));
142 .GetAddress();
143 } 142 }
144
145 const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const { 143 const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const {
146 return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type)); 144 return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type));
147 } 145 }
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index 74d8169e0..637558e10 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -119,7 +119,8 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
119 // Free each region to its corresponding heap. 119 // Free each region to its corresponding heap.
120 size_t reserved_sizes[MaxManagerCount] = {}; 120 size_t reserved_sizes[MaxManagerCount] = {};
121 const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress(); 121 const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress();
122 const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax; 122 const size_t ini_size = GetInitialProcessBinarySize();
123 const KPhysicalAddress ini_end = ini_start + ini_size;
123 const KPhysicalAddress ini_last = ini_end - 1; 124 const KPhysicalAddress ini_last = ini_end - 1;
124 for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { 125 for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
125 if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { 126 if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
@@ -137,13 +138,13 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
137 } 138 }
138 139
139 // Open/reserve the ini memory. 140 // Open/reserve the ini memory.
140 manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize); 141 manager.OpenFirst(ini_start, ini_size / PageSize);
141 reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax; 142 reserved_sizes[it.GetAttributes()] += ini_size;
142 143
143 // Free memory after the ini to the heap. 144 // Free memory after the ini to the heap.
144 if (ini_last != cur_last) { 145 if (ini_last != cur_last) {
145 ASSERT(cur_end != 0); 146 ASSERT(cur_end != 0);
146 manager.Free(ini_end, cur_end - ini_end); 147 manager.Free(ini_end, (cur_end - ini_end) / PageSize);
147 } 148 }
148 } else { 149 } else {
149 // Ensure there's no partial overlap with the ini image. 150 // Ensure there's no partial overlap with the ini image.
diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h
index e5630c1ac..bcbf450f0 100644
--- a/src/core/hle/kernel/k_memory_region_type.h
+++ b/src/core/hle/kernel/k_memory_region_type.h
@@ -190,9 +190,15 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() ==
190constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory = 190constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory =
191 KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute( 191 KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute(
192 KMemoryRegionAttr_LinearMapped); 192 KMemoryRegionAttr_LinearMapped);
193constexpr inline const auto KMemoryRegionType_DramKernelSecureUnknown =
194 KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 1).SetAttribute(
195 KMemoryRegionAttr_LinearMapped);
193static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == 196static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() ==
194 (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | 197 (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
195 KMemoryRegionAttr_LinearMapped)); 198 KMemoryRegionAttr_LinearMapped));
199static_assert(KMemoryRegionType_DramKernelSecureUnknown.GetValue() ==
200 (0x28E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
201 KMemoryRegionAttr_LinearMapped));
196 202
197constexpr inline auto KMemoryRegionType_DramReservedEarly = 203constexpr inline auto KMemoryRegionType_DramReservedEarly =
198 KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); 204 KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
@@ -217,16 +223,18 @@ constexpr inline auto KMemoryRegionType_DramPoolPartition =
217static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == 223static_assert(KMemoryRegionType_DramPoolPartition.GetValue() ==
218 (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); 224 (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
219 225
220constexpr inline auto KMemoryRegionType_DramPoolManagement = 226// UNUSED: .Derive(4, 1);
221 KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( 227// UNUSED: .Derive(4, 2);
228constexpr inline const auto KMemoryRegionType_DramPoolManagement =
229 KMemoryRegionType_DramPoolPartition.Derive(4, 0).SetAttribute(
222 KMemoryRegionAttr_CarveoutProtected); 230 KMemoryRegionAttr_CarveoutProtected);
223constexpr inline auto KMemoryRegionType_DramUserPool = 231constexpr inline const auto KMemoryRegionType_DramUserPool =
224 KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); 232 KMemoryRegionType_DramPoolPartition.Derive(4, 3);
225static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == 233static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==
226 (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | 234 (0xE6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
227 KMemoryRegionAttr_CarveoutProtected)); 235 KMemoryRegionAttr_CarveoutProtected));
228static_assert(KMemoryRegionType_DramUserPool.GetValue() == 236static_assert(KMemoryRegionType_DramUserPool.GetValue() ==
229 (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); 237 (0x266 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
230 238
231constexpr inline auto KMemoryRegionType_DramApplicationPool = 239constexpr inline auto KMemoryRegionType_DramApplicationPool =
232 KMemoryRegionType_DramUserPool.Derive(4, 0); 240 KMemoryRegionType_DramUserPool.Derive(4, 0);
@@ -237,60 +245,63 @@ constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool =
237constexpr inline auto KMemoryRegionType_DramSystemPool = 245constexpr inline auto KMemoryRegionType_DramSystemPool =
238 KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); 246 KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
239static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == 247static_assert(KMemoryRegionType_DramApplicationPool.GetValue() ==
240 (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); 248 (0xE66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
241static_assert(KMemoryRegionType_DramAppletPool.GetValue() == 249static_assert(KMemoryRegionType_DramAppletPool.GetValue() ==
242 (0xBA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); 250 (0x1666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
243static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() == 251static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() ==
244 (0xDA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); 252 (0x1A66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
245static_assert(KMemoryRegionType_DramSystemPool.GetValue() == 253static_assert(KMemoryRegionType_DramSystemPool.GetValue() ==
246 (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | 254 (0x2666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
247 KMemoryRegionAttr_CarveoutProtected)); 255 KMemoryRegionAttr_CarveoutProtected));
248 256
249constexpr inline auto KMemoryRegionType_VirtualDramHeapBase = 257constexpr inline auto KMemoryRegionType_VirtualDramHeapBase =
250 KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); 258 KMemoryRegionType_Dram.DeriveSparse(1, 4, 0);
251constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap = 259constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap =
252 KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); 260 KMemoryRegionType_Dram.DeriveSparse(1, 4, 1);
253constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer = 261constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer =
254 KMemoryRegionType_Dram.DeriveSparse(1, 3, 2); 262 KMemoryRegionType_Dram.DeriveSparse(1, 4, 2);
255static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); 263static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A);
256static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); 264static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A);
257static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); 265static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
258 266
259// UNUSED: .DeriveSparse(2, 2, 0); 267// UNUSED: .Derive(4, 2);
260constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug = 268constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug =
261 KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); 269 KMemoryRegionType_Dram.Advance(2).Derive(4, 0);
262static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); 270constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory =
263 271 KMemoryRegionType_Dram.Advance(2).Derive(4, 1);
264constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = 272constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureUnknown =
265 KMemoryRegionType_Dram.DeriveSparse(3, 1, 0); 273 KMemoryRegionType_Dram.Advance(2).Derive(4, 3);
266static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62)); 274static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x32));
267 275static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x52));
268constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt = 276static_assert(KMemoryRegionType_VirtualDramKernelSecureUnknown.GetValue() == (0x92));
269 KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); 277
270constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement = 278// UNUSED: .Derive(4, 3);
271 KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); 279constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt =
272constexpr inline auto KMemoryRegionType_VirtualDramUserPool = 280 KMemoryRegionType_VirtualDramHeapBase.Derive(4, 0);
273 KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); 281constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement =
274static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A); 282 KMemoryRegionType_VirtualDramHeapBase.Derive(4, 1);
275static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A); 283constexpr inline const auto KMemoryRegionType_VirtualDramUserPool =
276static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); 284 KMemoryRegionType_VirtualDramHeapBase.Derive(4, 2);
285static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x31A);
286static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x51A);
287static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x61A);
277 288
278// NOTE: For unknown reason, the pools are derived out-of-order here. 289// NOTE: For unknown reason, the pools are derived out-of-order here.
279// It's worth eventually trying to understand why Nintendo made this choice. 290// It's worth eventually trying to understand why Nintendo made this choice.
280// UNUSED: .Derive(6, 0); 291// UNUSED: .Derive(6, 0);
281// UNUSED: .Derive(6, 1); 292// UNUSED: .Derive(6, 1);
282constexpr inline auto KMemoryRegionType_VirtualDramAppletPool = 293constexpr inline const auto KMemoryRegionType_VirtualDramApplicationPool =
283 KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); 294 KMemoryRegionType_VirtualDramUserPool.Derive(4, 0);
284constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool = 295constexpr inline const auto KMemoryRegionType_VirtualDramAppletPool =
285 KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); 296 KMemoryRegionType_VirtualDramUserPool.Derive(4, 1);
286constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool = 297constexpr inline const auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
287 KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); 298 KMemoryRegionType_VirtualDramUserPool.Derive(4, 2);
288constexpr inline auto KMemoryRegionType_VirtualDramSystemPool = 299constexpr inline const auto KMemoryRegionType_VirtualDramSystemPool =
289 KMemoryRegionType_VirtualDramUserPool.Derive(6, 5); 300 KMemoryRegionType_VirtualDramUserPool.Derive(4, 3);
290static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A); 301static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x361A);
291static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A); 302static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x561A);
292static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A); 303static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x661A);
293static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); 304static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x961A);
294 305
295constexpr inline auto KMemoryRegionType_ArchDeviceBase = 306constexpr inline auto KMemoryRegionType_ArchDeviceBase =
296 KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); 307 KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly();
@@ -354,12 +365,14 @@ constexpr inline auto KMemoryRegionType_KernelTemp =
354static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); 365static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);
355 366
356constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { 367constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
357 if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) { 368 if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
358 return KMemoryRegionType_VirtualDramKernelTraceBuffer;
359 } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
360 return KMemoryRegionType_VirtualDramKernelPtHeap; 369 return KMemoryRegionType_VirtualDramKernelPtHeap;
361 } else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) { 370 } else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) {
362 return KMemoryRegionType_VirtualDramKernelSecureAppletMemory; 371 return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;
372 } else if (KMemoryRegionType_DramKernelSecureUnknown.IsAncestorOf(type_id)) {
373 return KMemoryRegionType_VirtualDramKernelSecureUnknown;
374 } else if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
375 return KMemoryRegionType_VirtualDramKernelTraceBuffer;
363 } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { 376 } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
364 return KMemoryRegionType_VirtualDramUnknownDebug; 377 return KMemoryRegionType_VirtualDramUnknownDebug;
365 } else { 378 } else {
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h
index b32909f05..de9d63a8d 100644
--- a/src/core/hle/kernel/k_page_group.h
+++ b/src/core/hle/kernel/k_page_group.h
@@ -183,12 +183,17 @@ private:
183 183
184class KScopedPageGroup { 184class KScopedPageGroup {
185public: 185public:
186 explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) { 186 explicit KScopedPageGroup(const KPageGroup* gp, bool not_first = true) : m_pg(gp) {
187 if (m_pg) { 187 if (m_pg) {
188 m_pg->Open(); 188 if (not_first) {
189 m_pg->Open();
190 } else {
191 m_pg->OpenFirst();
192 }
189 } 193 }
190 } 194 }
191 explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {} 195 explicit KScopedPageGroup(const KPageGroup& gp, bool not_first = true)
196 : KScopedPageGroup(std::addressof(gp), not_first) {}
192 ~KScopedPageGroup() { 197 ~KScopedPageGroup() {
193 if (m_pg) { 198 if (m_pg) {
194 m_pg->Close(); 199 m_pg->Close();
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 0b0cef984..217ccbae3 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -505,7 +505,7 @@ Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress
505 R_TRY(this->CheckMemoryStateContiguous( 505 R_TRY(this->CheckMemoryStateContiguous(
506 std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, 506 std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
507 KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, 507 KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
508 KMemoryAttribute::All, KMemoryAttribute::None)); 508 KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
509 509
510 // Determine whether any pages being unmapped are code. 510 // Determine whether any pages being unmapped are code.
511 bool any_code_pages = false; 511 bool any_code_pages = false;
@@ -1724,29 +1724,43 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
1724 PageSize; 1724 PageSize;
1725 1725
1726 // While we have pages to map, map them. 1726 // While we have pages to map, map them.
1727 while (map_pages > 0) { 1727 {
1728 // Check if we're at the end of the physical block. 1728 // Create a page group for the current mapping range.
1729 if (pg_pages == 0) { 1729 KPageGroup cur_pg(m_kernel, m_block_info_manager);
1730 // Ensure there are more pages to map. 1730 {
1731 ASSERT(pg_it != pg.end()); 1731 ON_RESULT_FAILURE_2 {
1732 1732 cur_pg.OpenFirst();
1733 // Advance our physical block. 1733 cur_pg.Close();
1734 ++pg_it; 1734 };
1735 pg_phys_addr = pg_it->GetAddress(); 1735
1736 pg_pages = pg_it->GetNumPages(); 1736 size_t remain_pages = map_pages;
1737 while (remain_pages > 0) {
1738 // Check if we're at the end of the physical block.
1739 if (pg_pages == 0) {
1740 // Ensure there are more pages to map.
1741 ASSERT(pg_it != pg.end());
1742
1743 // Advance our physical block.
1744 ++pg_it;
1745 pg_phys_addr = pg_it->GetAddress();
1746 pg_pages = pg_it->GetNumPages();
1747 }
1748
1749 // Add whatever we can to the current block.
1750 const size_t cur_pages = std::min(pg_pages, remain_pages);
1751 R_TRY(cur_pg.AddBlock(pg_phys_addr +
1752 ((pg_pages - cur_pages) * PageSize),
1753 cur_pages));
1754
1755 // Advance.
1756 remain_pages -= cur_pages;
1757 pg_pages -= cur_pages;
1758 }
1737 } 1759 }
1738 1760
1739 // Map whatever we can. 1761 // Map the pages.
1740 const size_t cur_pages = std::min(pg_pages, map_pages); 1762 R_TRY(this->Operate(cur_address, map_pages, cur_pg,
1741 R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, 1763 OperationType::MapFirstGroup));
1742 OperationType::MapFirst, pg_phys_addr));
1743
1744 // Advance.
1745 cur_address += cur_pages * PageSize;
1746 map_pages -= cur_pages;
1747
1748 pg_phys_addr += cur_pages * PageSize;
1749 pg_pages -= cur_pages;
1750 } 1764 }
1751 } 1765 }
1752 1766
@@ -1770,7 +1784,11 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
1770 m_memory_block_manager.UpdateIfMatch( 1784 m_memory_block_manager.UpdateIfMatch(
1771 std::addressof(allocator), address, size / PageSize, KMemoryState::Free, 1785 std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
1772 KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, 1786 KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
1773 KMemoryPermission::UserReadWrite, KMemoryAttribute::None); 1787 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
1788 address == this->GetAliasRegionStart()
1789 ? KMemoryBlockDisableMergeAttribute::Normal
1790 : KMemoryBlockDisableMergeAttribute::None,
1791 KMemoryBlockDisableMergeAttribute::None);
1774 1792
1775 R_SUCCEED(); 1793 R_SUCCEED();
1776 } 1794 }
@@ -1868,6 +1886,13 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
1868 1886
1869 // Iterate over the memory, unmapping as we go. 1887 // Iterate over the memory, unmapping as we go.
1870 auto it = m_memory_block_manager.FindIterator(cur_address); 1888 auto it = m_memory_block_manager.FindIterator(cur_address);
1889
1890 const auto clear_merge_attr =
1891 (it->GetState() == KMemoryState::Normal &&
1892 it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
1893 ? KMemoryBlockDisableMergeAttribute::Normal
1894 : KMemoryBlockDisableMergeAttribute::None;
1895
1871 while (true) { 1896 while (true) {
1872 // Check that the iterator is valid. 1897 // Check that the iterator is valid.
1873 ASSERT(it != m_memory_block_manager.end()); 1898 ASSERT(it != m_memory_block_manager.end());
@@ -1905,7 +1930,7 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
1905 m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, 1930 m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
1906 KMemoryState::Free, KMemoryPermission::None, 1931 KMemoryState::Free, KMemoryPermission::None,
1907 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, 1932 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
1908 KMemoryBlockDisableMergeAttribute::None); 1933 clear_merge_attr);
1909 1934
1910 // We succeeded. 1935 // We succeeded.
1911 R_SUCCEED(); 1936 R_SUCCEED();
@@ -2379,8 +2404,7 @@ Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
2379 KScopedPageTableUpdater updater(this); 2404 KScopedPageTableUpdater updater(this);
2380 2405
2381 // Perform mapping operation. 2406 // Perform mapping operation.
2382 const KPageProperties properties = {perm, state == KMemoryState::Io, false, 2407 const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
2383 DisableMergeAttribute::DisableHead};
2384 R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); 2408 R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
2385 2409
2386 // Update the blocks. 2410 // Update the blocks.
@@ -2422,8 +2446,7 @@ Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMem
2422 KScopedPageTableUpdater updater(this); 2446 KScopedPageTableUpdater updater(this);
2423 2447
2424 // Perform mapping operation. 2448 // Perform mapping operation.
2425 const KPageProperties properties = {perm, state == KMemoryState::Io, false, 2449 const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
2426 DisableMergeAttribute::DisableHead};
2427 R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); 2450 R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
2428 2451
2429 // Update the blocks. 2452 // Update the blocks.
@@ -2652,11 +2675,18 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas
2652 size_t num_allocator_blocks; 2675 size_t num_allocator_blocks;
2653 constexpr auto AttributeTestMask = 2676 constexpr auto AttributeTestMask =
2654 ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared); 2677 ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
2655 R_TRY(this->CheckMemoryState( 2678 const KMemoryState state_test_mask =
2656 std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), 2679 static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached))
2657 std::addressof(num_allocator_blocks), addr, size, KMemoryState::FlagCanChangeAttribute, 2680 ? static_cast<u32>(KMemoryState::FlagCanChangeAttribute)
2658 KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, 2681 : 0) |
2659 AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); 2682 ((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked))
2683 ? static_cast<u32>(KMemoryState::FlagCanPermissionLock)
2684 : 0));
2685 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
2686 std::addressof(old_attr), std::addressof(num_allocator_blocks),
2687 addr, size, state_test_mask, state_test_mask,
2688 KMemoryPermission::None, KMemoryPermission::None,
2689 AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
2660 2690
2661 // Create an update allocator. 2691 // Create an update allocator.
2662 Result allocator_result{ResultSuccess}; 2692 Result allocator_result{ResultSuccess};
@@ -2664,18 +2694,17 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas
2664 m_memory_block_slab_manager, num_allocator_blocks); 2694 m_memory_block_slab_manager, num_allocator_blocks);
2665 R_TRY(allocator_result); 2695 R_TRY(allocator_result);
2666 2696
2667 // Determine the new attribute. 2697 // If we need to, perform a change attribute operation.
2668 const KMemoryAttribute new_attr = 2698 if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) {
2669 static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) | 2699 // Perform operation.
2670 static_cast<KMemoryAttribute>(attr & mask))); 2700 R_TRY(this->Operate(addr, num_pages, old_perm,
2671 2701 OperationType::ChangePermissionsAndRefreshAndFlush, 0));
2672 // Perform operation. 2702 }
2673 this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh);
2674 2703
2675 // Update the blocks. 2704 // Update the blocks.
2676 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, 2705 m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages,
2677 new_attr, KMemoryBlockDisableMergeAttribute::None, 2706 static_cast<KMemoryAttribute>(mask),
2678 KMemoryBlockDisableMergeAttribute::None); 2707 static_cast<KMemoryAttribute>(attr));
2679 2708
2680 R_SUCCEED(); 2709 R_SUCCEED();
2681} 2710}
@@ -2863,7 +2892,8 @@ Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress
2863 &KMemoryBlock::ShareToDevice, KMemoryPermission::None); 2892 &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
2864 2893
2865 // Set whether the locked memory was io. 2894 // Set whether the locked memory was io.
2866 *out_is_io = old_state == KMemoryState::Io; 2895 *out_is_io =
2896 static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
2867 2897
2868 R_SUCCEED(); 2898 R_SUCCEED();
2869} 2899}
@@ -3021,9 +3051,10 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGr
3021 ASSERT(num_pages == page_group.GetNumPages()); 3051 ASSERT(num_pages == page_group.GetNumPages());
3022 3052
3023 switch (operation) { 3053 switch (operation) {
3024 case OperationType::MapGroup: { 3054 case OperationType::MapGroup:
3055 case OperationType::MapFirstGroup: {
3025 // We want to maintain a new reference to every page in the group. 3056 // We want to maintain a new reference to every page in the group.
3026 KScopedPageGroup spg(page_group); 3057 KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
3027 3058
3028 for (const auto& node : page_group) { 3059 for (const auto& node : page_group) {
3029 const size_t size{node.GetNumPages() * PageSize}; 3060 const size_t size{node.GetNumPages() * PageSize};
@@ -3065,7 +3096,6 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
3065 m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); 3096 m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
3066 break; 3097 break;
3067 } 3098 }
3068 case OperationType::MapFirst:
3069 case OperationType::Map: { 3099 case OperationType::Map: {
3070 ASSERT(map_addr); 3100 ASSERT(map_addr);
3071 ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize)); 3101 ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize));
@@ -3073,11 +3103,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
3073 3103
3074 // Open references to pages, if we should. 3104 // Open references to pages, if we should.
3075 if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) { 3105 if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
3076 if (operation == OperationType::MapFirst) { 3106 m_kernel.MemoryManager().Open(map_addr, num_pages);
3077 m_kernel.MemoryManager().OpenFirst(map_addr, num_pages);
3078 } else {
3079 m_kernel.MemoryManager().Open(map_addr, num_pages);
3080 }
3081 } 3107 }
3082 break; 3108 break;
3083 } 3109 }
@@ -3087,6 +3113,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
3087 } 3113 }
3088 case OperationType::ChangePermissions: 3114 case OperationType::ChangePermissions:
3089 case OperationType::ChangePermissionsAndRefresh: 3115 case OperationType::ChangePermissionsAndRefresh:
3116 case OperationType::ChangePermissionsAndRefreshAndFlush:
3090 break; 3117 break;
3091 default: 3118 default:
3092 ASSERT(false); 3119 ASSERT(false);
@@ -3106,79 +3133,79 @@ void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
3106 } 3133 }
3107} 3134}
3108 3135
3109KProcessAddress KPageTable::GetRegionAddress(KMemoryState state) const { 3136KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const {
3110 switch (state) { 3137 switch (state) {
3111 case KMemoryState::Free: 3138 case Svc::MemoryState::Free:
3112 case KMemoryState::Kernel: 3139 case Svc::MemoryState::Kernel:
3113 return m_address_space_start; 3140 return m_address_space_start;
3114 case KMemoryState::Normal: 3141 case Svc::MemoryState::Normal:
3115 return m_heap_region_start; 3142 return m_heap_region_start;
3116 case KMemoryState::Ipc: 3143 case Svc::MemoryState::Ipc:
3117 case KMemoryState::NonSecureIpc: 3144 case Svc::MemoryState::NonSecureIpc:
3118 case KMemoryState::NonDeviceIpc: 3145 case Svc::MemoryState::NonDeviceIpc:
3119 return m_alias_region_start; 3146 return m_alias_region_start;
3120 case KMemoryState::Stack: 3147 case Svc::MemoryState::Stack:
3121 return m_stack_region_start; 3148 return m_stack_region_start;
3122 case KMemoryState::Static: 3149 case Svc::MemoryState::Static:
3123 case KMemoryState::ThreadLocal: 3150 case Svc::MemoryState::ThreadLocal:
3124 return m_kernel_map_region_start; 3151 return m_kernel_map_region_start;
3125 case KMemoryState::Io: 3152 case Svc::MemoryState::Io:
3126 case KMemoryState::Shared: 3153 case Svc::MemoryState::Shared:
3127 case KMemoryState::AliasCode: 3154 case Svc::MemoryState::AliasCode:
3128 case KMemoryState::AliasCodeData: 3155 case Svc::MemoryState::AliasCodeData:
3129 case KMemoryState::Transfered: 3156 case Svc::MemoryState::Transfered:
3130 case KMemoryState::SharedTransfered: 3157 case Svc::MemoryState::SharedTransfered:
3131 case KMemoryState::SharedCode: 3158 case Svc::MemoryState::SharedCode:
3132 case KMemoryState::GeneratedCode: 3159 case Svc::MemoryState::GeneratedCode:
3133 case KMemoryState::CodeOut: 3160 case Svc::MemoryState::CodeOut:
3134 case KMemoryState::Coverage: 3161 case Svc::MemoryState::Coverage:
3135 case KMemoryState::Insecure: 3162 case Svc::MemoryState::Insecure:
3136 return m_alias_code_region_start; 3163 return m_alias_code_region_start;
3137 case KMemoryState::Code: 3164 case Svc::MemoryState::Code:
3138 case KMemoryState::CodeData: 3165 case Svc::MemoryState::CodeData:
3139 return m_code_region_start; 3166 return m_code_region_start;
3140 default: 3167 default:
3141 UNREACHABLE(); 3168 UNREACHABLE();
3142 } 3169 }
3143} 3170}
3144 3171
3145size_t KPageTable::GetRegionSize(KMemoryState state) const { 3172size_t KPageTable::GetRegionSize(Svc::MemoryState state) const {
3146 switch (state) { 3173 switch (state) {
3147 case KMemoryState::Free: 3174 case Svc::MemoryState::Free:
3148 case KMemoryState::Kernel: 3175 case Svc::MemoryState::Kernel:
3149 return m_address_space_end - m_address_space_start; 3176 return m_address_space_end - m_address_space_start;
3150 case KMemoryState::Normal: 3177 case Svc::MemoryState::Normal:
3151 return m_heap_region_end - m_heap_region_start; 3178 return m_heap_region_end - m_heap_region_start;
3152 case KMemoryState::Ipc: 3179 case Svc::MemoryState::Ipc:
3153 case KMemoryState::NonSecureIpc: 3180 case Svc::MemoryState::NonSecureIpc:
3154 case KMemoryState::NonDeviceIpc: 3181 case Svc::MemoryState::NonDeviceIpc:
3155 return m_alias_region_end - m_alias_region_start; 3182 return m_alias_region_end - m_alias_region_start;
3156 case KMemoryState::Stack: 3183 case Svc::MemoryState::Stack:
3157 return m_stack_region_end - m_stack_region_start; 3184 return m_stack_region_end - m_stack_region_start;
3158 case KMemoryState::Static: 3185 case Svc::MemoryState::Static:
3159 case KMemoryState::ThreadLocal: 3186 case Svc::MemoryState::ThreadLocal:
3160 return m_kernel_map_region_end - m_kernel_map_region_start; 3187 return m_kernel_map_region_end - m_kernel_map_region_start;
3161 case KMemoryState::Io: 3188 case Svc::MemoryState::Io:
3162 case KMemoryState::Shared: 3189 case Svc::MemoryState::Shared:
3163 case KMemoryState::AliasCode: 3190 case Svc::MemoryState::AliasCode:
3164 case KMemoryState::AliasCodeData: 3191 case Svc::MemoryState::AliasCodeData:
3165 case KMemoryState::Transfered: 3192 case Svc::MemoryState::Transfered:
3166 case KMemoryState::SharedTransfered: 3193 case Svc::MemoryState::SharedTransfered:
3167 case KMemoryState::SharedCode: 3194 case Svc::MemoryState::SharedCode:
3168 case KMemoryState::GeneratedCode: 3195 case Svc::MemoryState::GeneratedCode:
3169 case KMemoryState::CodeOut: 3196 case Svc::MemoryState::CodeOut:
3170 case KMemoryState::Coverage: 3197 case Svc::MemoryState::Coverage:
3171 case KMemoryState::Insecure: 3198 case Svc::MemoryState::Insecure:
3172 return m_alias_code_region_end - m_alias_code_region_start; 3199 return m_alias_code_region_end - m_alias_code_region_start;
3173 case KMemoryState::Code: 3200 case Svc::MemoryState::Code:
3174 case KMemoryState::CodeData: 3201 case Svc::MemoryState::CodeData:
3175 return m_code_region_end - m_code_region_start; 3202 return m_code_region_end - m_code_region_start;
3176 default: 3203 default:
3177 UNREACHABLE(); 3204 UNREACHABLE();
3178 } 3205 }
3179} 3206}
3180 3207
3181bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { 3208bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
3182 const KProcessAddress end = addr + size; 3209 const KProcessAddress end = addr + size;
3183 const KProcessAddress last = end - 1; 3210 const KProcessAddress last = end - 1;
3184 3211
@@ -3192,32 +3219,32 @@ bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState stat
3192 const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || 3219 const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
3193 m_alias_region_start == m_alias_region_end); 3220 m_alias_region_start == m_alias_region_end);
3194 switch (state) { 3221 switch (state) {
3195 case KMemoryState::Free: 3222 case Svc::MemoryState::Free:
3196 case KMemoryState::Kernel: 3223 case Svc::MemoryState::Kernel:
3197 return is_in_region; 3224 return is_in_region;
3198 case KMemoryState::Io: 3225 case Svc::MemoryState::Io:
3199 case KMemoryState::Static: 3226 case Svc::MemoryState::Static:
3200 case KMemoryState::Code: 3227 case Svc::MemoryState::Code:
3201 case KMemoryState::CodeData: 3228 case Svc::MemoryState::CodeData:
3202 case KMemoryState::Shared: 3229 case Svc::MemoryState::Shared:
3203 case KMemoryState::AliasCode: 3230 case Svc::MemoryState::AliasCode:
3204 case KMemoryState::AliasCodeData: 3231 case Svc::MemoryState::AliasCodeData:
3205 case KMemoryState::Stack: 3232 case Svc::MemoryState::Stack:
3206 case KMemoryState::ThreadLocal: 3233 case Svc::MemoryState::ThreadLocal:
3207 case KMemoryState::Transfered: 3234 case Svc::MemoryState::Transfered:
3208 case KMemoryState::SharedTransfered: 3235 case Svc::MemoryState::SharedTransfered:
3209 case KMemoryState::SharedCode: 3236 case Svc::MemoryState::SharedCode:
3210 case KMemoryState::GeneratedCode: 3237 case Svc::MemoryState::GeneratedCode:
3211 case KMemoryState::CodeOut: 3238 case Svc::MemoryState::CodeOut:
3212 case KMemoryState::Coverage: 3239 case Svc::MemoryState::Coverage:
3213 case KMemoryState::Insecure: 3240 case Svc::MemoryState::Insecure:
3214 return is_in_region && !is_in_heap && !is_in_alias; 3241 return is_in_region && !is_in_heap && !is_in_alias;
3215 case KMemoryState::Normal: 3242 case Svc::MemoryState::Normal:
3216 ASSERT(is_in_heap); 3243 ASSERT(is_in_heap);
3217 return is_in_region && !is_in_alias; 3244 return is_in_region && !is_in_alias;
3218 case KMemoryState::Ipc: 3245 case Svc::MemoryState::Ipc:
3219 case KMemoryState::NonSecureIpc: 3246 case Svc::MemoryState::NonSecureIpc:
3220 case KMemoryState::NonDeviceIpc: 3247 case Svc::MemoryState::NonDeviceIpc:
3221 ASSERT(is_in_alias); 3248 ASSERT(is_in_alias);
3222 return is_in_region && !is_in_heap; 3249 return is_in_region && !is_in_heap;
3223 default: 3250 default:
@@ -3281,21 +3308,16 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProces
3281 3308
3282Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, 3309Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
3283 KMemoryAttribute* out_attr, size_t* out_blocks_needed, 3310 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
3284 KProcessAddress addr, size_t size, KMemoryState state_mask, 3311 KMemoryBlockManager::const_iterator it,
3312 KProcessAddress last_addr, KMemoryState state_mask,
3285 KMemoryState state, KMemoryPermission perm_mask, 3313 KMemoryState state, KMemoryPermission perm_mask,
3286 KMemoryPermission perm, KMemoryAttribute attr_mask, 3314 KMemoryPermission perm, KMemoryAttribute attr_mask,
3287 KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { 3315 KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
3288 ASSERT(this->IsLockedByCurrentThread()); 3316 ASSERT(this->IsLockedByCurrentThread());
3289 3317
3290 // Get information about the first block. 3318 // Get information about the first block.
3291 const KProcessAddress last_addr = addr + size - 1;
3292 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
3293 KMemoryInfo info = it->GetMemoryInfo(); 3319 KMemoryInfo info = it->GetMemoryInfo();
3294 3320
3295 // If the start address isn't aligned, we need a block.
3296 const size_t blocks_for_start_align =
3297 (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
3298
3299 // Validate all blocks in the range have correct state. 3321 // Validate all blocks in the range have correct state.
3300 const KMemoryState first_state = info.m_state; 3322 const KMemoryState first_state = info.m_state;
3301 const KMemoryPermission first_perm = info.m_permission; 3323 const KMemoryPermission first_perm = info.m_permission;
@@ -3321,10 +3343,6 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
3321 info = it->GetMemoryInfo(); 3343 info = it->GetMemoryInfo();
3322 } 3344 }
3323 3345
3324 // If the end address isn't aligned, we need a block.
3325 const size_t blocks_for_end_align =
3326 (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
3327
3328 // Write output state. 3346 // Write output state.
3329 if (out_state != nullptr) { 3347 if (out_state != nullptr) {
3330 *out_state = first_state; 3348 *out_state = first_state;
@@ -3335,9 +3353,39 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
3335 if (out_attr != nullptr) { 3353 if (out_attr != nullptr) {
3336 *out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr); 3354 *out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
3337 } 3355 }
3356
3357 // If the end address isn't aligned, we need a block.
3338 if (out_blocks_needed != nullptr) { 3358 if (out_blocks_needed != nullptr) {
3339 *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; 3359 const size_t blocks_for_end_align =
3360 (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
3361 ? 1
3362 : 0;
3363 *out_blocks_needed = blocks_for_end_align;
3364 }
3365
3366 R_SUCCEED();
3367}
3368
3369Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
3370 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
3371 KProcessAddress addr, size_t size, KMemoryState state_mask,
3372 KMemoryState state, KMemoryPermission perm_mask,
3373 KMemoryPermission perm, KMemoryAttribute attr_mask,
3374 KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
3375 ASSERT(this->IsLockedByCurrentThread());
3376
3377 // Check memory state.
3378 const KProcessAddress last_addr = addr + size - 1;
3379 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
3380 R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
3381 state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
3382
3383 // If the start address isn't aligned, we need a block.
3384 if (out_blocks_needed != nullptr &&
3385 Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
3386 ++(*out_blocks_needed);
3340 } 3387 }
3388
3341 R_SUCCEED(); 3389 R_SUCCEED();
3342} 3390}
3343 3391
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 7da675f27..3d64b6fb0 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -126,8 +126,6 @@ public:
126 return m_block_info_manager; 126 return m_block_info_manager;
127 } 127 }
128 128
129 bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
130
131 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, 129 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
132 KPhysicalAddress phys_addr, KProcessAddress region_start, 130 KPhysicalAddress phys_addr, KProcessAddress region_start,
133 size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { 131 size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
@@ -162,6 +160,21 @@ public:
162 void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, 160 void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
163 const KPageGroup& pg); 161 const KPageGroup& pg);
164 162
163 KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
164 size_t GetRegionSize(Svc::MemoryState state) const;
165 bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
166
167 KProcessAddress GetRegionAddress(KMemoryState state) const {
168 return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
169 }
170 size_t GetRegionSize(KMemoryState state) const {
171 return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
172 }
173 bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
174 return this->CanContain(addr, size,
175 static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
176 }
177
165protected: 178protected:
166 struct PageLinkedList { 179 struct PageLinkedList {
167 private: 180 private:
@@ -204,12 +217,13 @@ protected:
204private: 217private:
205 enum class OperationType : u32 { 218 enum class OperationType : u32 {
206 Map = 0, 219 Map = 0,
207 MapFirst = 1, 220 MapGroup = 1,
208 MapGroup = 2, 221 MapFirstGroup = 2,
209 Unmap = 3, 222 Unmap = 3,
210 ChangePermissions = 4, 223 ChangePermissions = 4,
211 ChangePermissionsAndRefresh = 5, 224 ChangePermissionsAndRefresh = 5,
212 Separate = 6, 225 ChangePermissionsAndRefreshAndFlush = 6,
226 Separate = 7,
213 }; 227 };
214 228
215 static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = 229 static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
@@ -228,8 +242,6 @@ private:
228 Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm, 242 Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
229 OperationType operation, KPhysicalAddress map_addr = 0); 243 OperationType operation, KPhysicalAddress map_addr = 0);
230 void FinalizeUpdate(PageLinkedList* page_list); 244 void FinalizeUpdate(PageLinkedList* page_list);
231 KProcessAddress GetRegionAddress(KMemoryState state) const;
232 size_t GetRegionSize(KMemoryState state) const;
233 245
234 KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, 246 KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
235 size_t num_pages, size_t alignment, size_t offset, 247 size_t num_pages, size_t alignment, size_t offset,
@@ -252,6 +264,13 @@ private:
252 KMemoryAttribute attr_mask, KMemoryAttribute attr) const; 264 KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
253 Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, 265 Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
254 KMemoryAttribute* out_attr, size_t* out_blocks_needed, 266 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
267 KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
268 KMemoryState state_mask, KMemoryState state,
269 KMemoryPermission perm_mask, KMemoryPermission perm,
270 KMemoryAttribute attr_mask, KMemoryAttribute attr,
271 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
272 Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
273 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
255 KProcessAddress addr, size_t size, KMemoryState state_mask, 274 KProcessAddress addr, size_t size, KMemoryState state_mask,
256 KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, 275 KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
257 KMemoryAttribute attr_mask, KMemoryAttribute attr, 276 KMemoryAttribute attr_mask, KMemoryAttribute attr,
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 4a099286b..7fa34d693 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -149,7 +149,7 @@ u64 KProcess::GetTotalPhysicalMemoryUsed() {
149} 149}
150 150
151u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() { 151u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
152 return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceUsage(); 152 return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceSize();
153} 153}
154 154
155bool KProcess::ReleaseUserException(KThread* thread) { 155bool KProcess::ReleaseUserException(KThread* thread) {
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index cb025c3d6..24433d32b 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -623,14 +623,33 @@ struct KernelCore::Impl {
623 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( 623 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
624 GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab)); 624 GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
625 625
626 // Insert a physical region for the secure applet memory.
627 const auto secure_applet_end_phys_addr =
628 slab_end_phys_addr + KSystemControl::SecureAppletMemorySize;
629 if constexpr (KSystemControl::SecureAppletMemorySize > 0) {
630 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
631 GetInteger(slab_end_phys_addr), KSystemControl::SecureAppletMemorySize,
632 KMemoryRegionType_DramKernelSecureAppletMemory));
633 }
634
635 // Insert a physical region for the unknown debug2 region.
636 constexpr size_t SecureUnknownRegionSize = 0;
637 const size_t secure_unknown_size = SecureUnknownRegionSize;
638 const auto secure_unknown_end_phys_addr = secure_applet_end_phys_addr + secure_unknown_size;
639 if constexpr (SecureUnknownRegionSize > 0) {
640 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
641 GetInteger(secure_applet_end_phys_addr), secure_unknown_size,
642 KMemoryRegionType_DramKernelSecureUnknown));
643 }
644
626 // Determine size available for kernel page table heaps, requiring > 8 MB. 645 // Determine size available for kernel page table heaps, requiring > 8 MB.
627 const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size; 646 const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
628 const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr; 647 const size_t page_table_heap_size = resource_end_phys_addr - secure_unknown_end_phys_addr;
629 ASSERT(page_table_heap_size / 4_MiB > 2); 648 ASSERT(page_table_heap_size / 4_MiB > 2);
630 649
631 // Insert a physical region for the kernel page table heap region 650 // Insert a physical region for the kernel page table heap region
632 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( 651 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
633 GetInteger(slab_end_phys_addr), page_table_heap_size, 652 GetInteger(secure_unknown_end_phys_addr), page_table_heap_size,
634 KMemoryRegionType_DramKernelPtHeap)); 653 KMemoryRegionType_DramKernelPtHeap));
635 654
636 // All DRAM regions that we haven't tagged by this point will be mapped under the linear 655 // All DRAM regions that we haven't tagged by this point will be mapped under the linear
diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp
index 2cab74127..97f1210de 100644
--- a/src/core/hle/kernel/svc/svc_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_memory.cpp
@@ -76,7 +76,7 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 s
76} // namespace 76} // namespace
77 77
78Result SetMemoryPermission(Core::System& system, u64 address, u64 size, MemoryPermission perm) { 78Result SetMemoryPermission(Core::System& system, u64 address, u64 size, MemoryPermission perm) {
79 LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size, 79 LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X}", address, size,
80 perm); 80 perm);
81 81
82 // Validate address / size. 82 // Validate address / size.
@@ -108,10 +108,16 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask,
108 R_UNLESS((address < address + size), ResultInvalidCurrentMemory); 108 R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
109 109
110 // Validate the attribute and mask. 110 // Validate the attribute and mask.
111 constexpr u32 SupportedMask = static_cast<u32>(MemoryAttribute::Uncached); 111 constexpr u32 SupportedMask =
112 static_cast<u32>(MemoryAttribute::Uncached | MemoryAttribute::PermissionLocked);
112 R_UNLESS((mask | attr) == mask, ResultInvalidCombination); 113 R_UNLESS((mask | attr) == mask, ResultInvalidCombination);
113 R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination); 114 R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination);
114 115
116 // Check that permission locked is either being set or not masked.
117 R_UNLESS((static_cast<Svc::MemoryAttribute>(mask) & Svc::MemoryAttribute::PermissionLocked) ==
118 (static_cast<Svc::MemoryAttribute>(attr) & Svc::MemoryAttribute::PermissionLocked),
119 ResultInvalidCombination);
120
115 // Validate that the region is in range for the current process. 121 // Validate that the region is in range for the current process.
116 auto& page_table{GetCurrentProcess(system.Kernel()).GetPageTable()}; 122 auto& page_table{GetCurrentProcess(system.Kernel()).GetPageTable()};
117 R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); 123 R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 7f380ca4f..251e6013c 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -46,6 +46,7 @@ enum class MemoryAttribute : u32 {
46 IpcLocked = (1 << 1), 46 IpcLocked = (1 << 1),
47 DeviceShared = (1 << 2), 47 DeviceShared = (1 << 2),
48 Uncached = (1 << 3), 48 Uncached = (1 << 3),
49 PermissionLocked = (1 << 4),
49}; 50};
50DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute); 51DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
51 52
diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp
index b971401e6..1b1c8190e 100644
--- a/src/core/hle/service/acc/acc.cpp
+++ b/src/core/hle/service/acc/acc.cpp
@@ -49,7 +49,7 @@ public:
49 : ServiceFramework{system_, "IManagerForSystemService"} { 49 : ServiceFramework{system_, "IManagerForSystemService"} {
50 // clang-format off 50 // clang-format off
51 static const FunctionInfo functions[] = { 51 static const FunctionInfo functions[] = {
52 {0, nullptr, "CheckAvailability"}, 52 {0, &IManagerForSystemService::CheckAvailability, "CheckAvailability"},
53 {1, nullptr, "GetAccountId"}, 53 {1, nullptr, "GetAccountId"},
54 {2, nullptr, "EnsureIdTokenCacheAsync"}, 54 {2, nullptr, "EnsureIdTokenCacheAsync"},
55 {3, nullptr, "LoadIdTokenCache"}, 55 {3, nullptr, "LoadIdTokenCache"},
@@ -78,6 +78,13 @@ public:
78 78
79 RegisterHandlers(functions); 79 RegisterHandlers(functions);
80 } 80 }
81
82private:
83 void CheckAvailability(HLERequestContext& ctx) {
84 LOG_WARNING(Service_ACC, "(STUBBED) called");
85 IPC::ResponseBuilder rb{ctx, 2};
86 rb.Push(ResultSuccess);
87 }
81}; 88};
82 89
83// 3.0.0+ 90// 3.0.0+
@@ -400,13 +407,13 @@ protected:
400 IPC::RequestParser rp{ctx}; 407 IPC::RequestParser rp{ctx};
401 const auto base = rp.PopRaw<ProfileBase>(); 408 const auto base = rp.PopRaw<ProfileBase>();
402 409
403 const auto user_data = ctx.ReadBuffer(); 410 const auto image_data = ctx.ReadBufferA(0);
404 const auto image_data = ctx.ReadBuffer(1); 411 const auto user_data = ctx.ReadBufferX(0);
405 412
406 LOG_DEBUG(Service_ACC, "called, username='{}', timestamp={:016X}, uuid=0x{}", 413 LOG_INFO(Service_ACC, "called, username='{}', timestamp={:016X}, uuid=0x{}",
407 Common::StringFromFixedZeroTerminatedBuffer( 414 Common::StringFromFixedZeroTerminatedBuffer(
408 reinterpret_cast<const char*>(base.username.data()), base.username.size()), 415 reinterpret_cast<const char*>(base.username.data()), base.username.size()),
409 base.timestamp, base.user_uuid.RawString()); 416 base.timestamp, base.user_uuid.RawString());
410 417
411 if (user_data.size() < sizeof(UserData)) { 418 if (user_data.size() < sizeof(UserData)) {
412 LOG_ERROR(Service_ACC, "UserData buffer too small!"); 419 LOG_ERROR(Service_ACC, "UserData buffer too small!");
@@ -837,6 +844,29 @@ void Module::Interface::InitializeApplicationInfoV2(HLERequestContext& ctx) {
837 rb.Push(ResultSuccess); 844 rb.Push(ResultSuccess);
838} 845}
839 846
847void Module::Interface::BeginUserRegistration(HLERequestContext& ctx) {
848 const auto user_id = Common::UUID::MakeRandom();
849 profile_manager->CreateNewUser(user_id, "yuzu");
850
851 LOG_INFO(Service_ACC, "called, uuid={}", user_id.FormattedString());
852
853 IPC::ResponseBuilder rb{ctx, 6};
854 rb.Push(ResultSuccess);
855 rb.PushRaw(user_id);
856}
857
858void Module::Interface::CompleteUserRegistration(HLERequestContext& ctx) {
859 IPC::RequestParser rp{ctx};
860 Common::UUID user_id = rp.PopRaw<Common::UUID>();
861
862 LOG_INFO(Service_ACC, "called, uuid={}", user_id.FormattedString());
863
864 profile_manager->WriteUserSaveFile();
865
866 IPC::ResponseBuilder rb{ctx, 2};
867 rb.Push(ResultSuccess);
868}
869
840void Module::Interface::GetProfileEditor(HLERequestContext& ctx) { 870void Module::Interface::GetProfileEditor(HLERequestContext& ctx) {
841 IPC::RequestParser rp{ctx}; 871 IPC::RequestParser rp{ctx};
842 Common::UUID user_id = rp.PopRaw<Common::UUID>(); 872 Common::UUID user_id = rp.PopRaw<Common::UUID>();
@@ -880,6 +910,17 @@ void Module::Interface::StoreSaveDataThumbnailApplication(HLERequestContext& ctx
880 StoreSaveDataThumbnail(ctx, uuid, tid); 910 StoreSaveDataThumbnail(ctx, uuid, tid);
881} 911}
882 912
913void Module::Interface::GetBaasAccountManagerForSystemService(HLERequestContext& ctx) {
914 IPC::RequestParser rp{ctx};
915 const auto uuid = rp.PopRaw<Common::UUID>();
916
917 LOG_INFO(Service_ACC, "called, uuid=0x{}", uuid.RawString());
918
919 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
920 rb.Push(ResultSuccess);
921 rb.PushIpcInterface<IManagerForSystemService>(system, uuid);
922}
923
883void Module::Interface::StoreSaveDataThumbnailSystem(HLERequestContext& ctx) { 924void Module::Interface::StoreSaveDataThumbnailSystem(HLERequestContext& ctx) {
884 IPC::RequestParser rp{ctx}; 925 IPC::RequestParser rp{ctx};
885 const auto uuid = rp.PopRaw<Common::UUID>(); 926 const auto uuid = rp.PopRaw<Common::UUID>();
diff --git a/src/core/hle/service/acc/acc.h b/src/core/hle/service/acc/acc.h
index 6b4735c2f..0395229b4 100644
--- a/src/core/hle/service/acc/acc.h
+++ b/src/core/hle/service/acc/acc.h
@@ -33,10 +33,13 @@ public:
33 void TrySelectUserWithoutInteraction(HLERequestContext& ctx); 33 void TrySelectUserWithoutInteraction(HLERequestContext& ctx);
34 void IsUserAccountSwitchLocked(HLERequestContext& ctx); 34 void IsUserAccountSwitchLocked(HLERequestContext& ctx);
35 void InitializeApplicationInfoV2(HLERequestContext& ctx); 35 void InitializeApplicationInfoV2(HLERequestContext& ctx);
36 void BeginUserRegistration(HLERequestContext& ctx);
37 void CompleteUserRegistration(HLERequestContext& ctx);
36 void GetProfileEditor(HLERequestContext& ctx); 38 void GetProfileEditor(HLERequestContext& ctx);
37 void ListQualifiedUsers(HLERequestContext& ctx); 39 void ListQualifiedUsers(HLERequestContext& ctx);
38 void ListOpenContextStoredUsers(HLERequestContext& ctx); 40 void ListOpenContextStoredUsers(HLERequestContext& ctx);
39 void StoreSaveDataThumbnailApplication(HLERequestContext& ctx); 41 void StoreSaveDataThumbnailApplication(HLERequestContext& ctx);
42 void GetBaasAccountManagerForSystemService(HLERequestContext& ctx);
40 void StoreSaveDataThumbnailSystem(HLERequestContext& ctx); 43 void StoreSaveDataThumbnailSystem(HLERequestContext& ctx);
41 44
42 private: 45 private:
diff --git a/src/core/hle/service/acc/acc_su.cpp b/src/core/hle/service/acc/acc_su.cpp
index d9882ecd3..770d13ec5 100644
--- a/src/core/hle/service/acc/acc_su.cpp
+++ b/src/core/hle/service/acc/acc_su.cpp
@@ -23,7 +23,7 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module_, std::shared_ptr<ProfileManager>
23 {99, nullptr, "DebugActivateOpenContextRetention"}, 23 {99, nullptr, "DebugActivateOpenContextRetention"},
24 {100, nullptr, "GetUserRegistrationNotifier"}, 24 {100, nullptr, "GetUserRegistrationNotifier"},
25 {101, nullptr, "GetUserStateChangeNotifier"}, 25 {101, nullptr, "GetUserStateChangeNotifier"},
26 {102, nullptr, "GetBaasAccountManagerForSystemService"}, 26 {102, &ACC_SU::GetBaasAccountManagerForSystemService, "GetBaasAccountManagerForSystemService"},
27 {103, nullptr, "GetBaasUserAvailabilityChangeNotifier"}, 27 {103, nullptr, "GetBaasUserAvailabilityChangeNotifier"},
28 {104, nullptr, "GetProfileUpdateNotifier"}, 28 {104, nullptr, "GetProfileUpdateNotifier"},
29 {105, nullptr, "CheckNetworkServiceAvailabilityAsync"}, 29 {105, nullptr, "CheckNetworkServiceAvailabilityAsync"},
@@ -40,8 +40,8 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module_, std::shared_ptr<ProfileManager>
40 {152, nullptr, "LoadSignedDeviceIdentifierCacheForNintendoAccount"}, 40 {152, nullptr, "LoadSignedDeviceIdentifierCacheForNintendoAccount"},
41 {190, nullptr, "GetUserLastOpenedApplication"}, 41 {190, nullptr, "GetUserLastOpenedApplication"},
42 {191, nullptr, "ActivateOpenContextHolder"}, 42 {191, nullptr, "ActivateOpenContextHolder"},
43 {200, nullptr, "BeginUserRegistration"}, 43 {200, &ACC_SU::BeginUserRegistration, "BeginUserRegistration"},
44 {201, nullptr, "CompleteUserRegistration"}, 44 {201, &ACC_SU::CompleteUserRegistration, "CompleteUserRegistration"},
45 {202, nullptr, "CancelUserRegistration"}, 45 {202, nullptr, "CancelUserRegistration"},
46 {203, nullptr, "DeleteUser"}, 46 {203, nullptr, "DeleteUser"},
47 {204, nullptr, "SetUserPosition"}, 47 {204, nullptr, "SetUserPosition"},
diff --git a/src/core/hle/service/acc/profile_manager.h b/src/core/hle/service/acc/profile_manager.h
index 993a5a57a..900e32200 100644
--- a/src/core/hle/service/acc/profile_manager.h
+++ b/src/core/hle/service/acc/profile_manager.h
@@ -96,9 +96,10 @@ public:
96 bool SetProfileBaseAndData(Common::UUID uuid, const ProfileBase& profile_new, 96 bool SetProfileBaseAndData(Common::UUID uuid, const ProfileBase& profile_new,
97 const UserData& data_new); 97 const UserData& data_new);
98 98
99 void WriteUserSaveFile();
100
99private: 101private:
100 void ParseUserSaveFile(); 102 void ParseUserSaveFile();
101 void WriteUserSaveFile();
102 std::optional<std::size_t> AddToProfiles(const ProfileInfo& profile); 103 std::optional<std::size_t> AddToProfiles(const ProfileInfo& profile);
103 bool RemoveProfileAtIndex(std::size_t index); 104 bool RemoveProfileAtIndex(std::size_t index);
104 105
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index ac376b55a..98765b81a 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -210,8 +210,8 @@ IDisplayController::IDisplayController(Core::System& system_)
210 {21, nullptr, "ClearAppletTransitionBuffer"}, 210 {21, nullptr, "ClearAppletTransitionBuffer"},
211 {22, nullptr, "AcquireLastApplicationCaptureSharedBuffer"}, 211 {22, nullptr, "AcquireLastApplicationCaptureSharedBuffer"},
212 {23, nullptr, "ReleaseLastApplicationCaptureSharedBuffer"}, 212 {23, nullptr, "ReleaseLastApplicationCaptureSharedBuffer"},
213 {24, nullptr, "AcquireLastForegroundCaptureSharedBuffer"}, 213 {24, &IDisplayController::AcquireLastForegroundCaptureSharedBuffer, "AcquireLastForegroundCaptureSharedBuffer"},
214 {25, nullptr, "ReleaseLastForegroundCaptureSharedBuffer"}, 214 {25, &IDisplayController::ReleaseLastForegroundCaptureSharedBuffer, "ReleaseLastForegroundCaptureSharedBuffer"},
215 {26, &IDisplayController::AcquireCallerAppletCaptureSharedBuffer, "AcquireCallerAppletCaptureSharedBuffer"}, 215 {26, &IDisplayController::AcquireCallerAppletCaptureSharedBuffer, "AcquireCallerAppletCaptureSharedBuffer"},
216 {27, &IDisplayController::ReleaseCallerAppletCaptureSharedBuffer, "ReleaseCallerAppletCaptureSharedBuffer"}, 216 {27, &IDisplayController::ReleaseCallerAppletCaptureSharedBuffer, "ReleaseCallerAppletCaptureSharedBuffer"},
217 {28, nullptr, "TakeScreenShotOfOwnLayerEx"}, 217 {28, nullptr, "TakeScreenShotOfOwnLayerEx"},
@@ -239,6 +239,22 @@ void IDisplayController::TakeScreenShotOfOwnLayer(HLERequestContext& ctx) {
239 rb.Push(ResultSuccess); 239 rb.Push(ResultSuccess);
240} 240}
241 241
242void IDisplayController::AcquireLastForegroundCaptureSharedBuffer(HLERequestContext& ctx) {
243 LOG_WARNING(Service_AM, "(STUBBED) called");
244
245 IPC::ResponseBuilder rb{ctx, 4};
246 rb.Push(ResultSuccess);
247 rb.Push(1U);
248 rb.Push(0);
249}
250
251void IDisplayController::ReleaseLastForegroundCaptureSharedBuffer(HLERequestContext& ctx) {
252 LOG_WARNING(Service_AM, "(STUBBED) called");
253
254 IPC::ResponseBuilder rb{ctx, 2};
255 rb.Push(ResultSuccess);
256}
257
242void IDisplayController::AcquireCallerAppletCaptureSharedBuffer(HLERequestContext& ctx) { 258void IDisplayController::AcquireCallerAppletCaptureSharedBuffer(HLERequestContext& ctx) {
243 LOG_WARNING(Service_AM, "(STUBBED) called"); 259 LOG_WARNING(Service_AM, "(STUBBED) called");
244 260
@@ -1557,7 +1573,7 @@ ILibraryAppletSelfAccessor::ILibraryAppletSelfAccessor(Core::System& system_)
1557 {100, nullptr, "CreateGameMovieTrimmer"}, 1573 {100, nullptr, "CreateGameMovieTrimmer"},
1558 {101, nullptr, "ReserveResourceForMovieOperation"}, 1574 {101, nullptr, "ReserveResourceForMovieOperation"},
1559 {102, nullptr, "UnreserveResourceForMovieOperation"}, 1575 {102, nullptr, "UnreserveResourceForMovieOperation"},
1560 {110, nullptr, "GetMainAppletAvailableUsers"}, 1576 {110, &ILibraryAppletSelfAccessor::GetMainAppletAvailableUsers, "GetMainAppletAvailableUsers"},
1561 {120, nullptr, "GetLaunchStorageInfoForDebug"}, 1577 {120, nullptr, "GetLaunchStorageInfoForDebug"},
1562 {130, nullptr, "GetGpuErrorDetectedSystemEvent"}, 1578 {130, nullptr, "GetGpuErrorDetectedSystemEvent"},
1563 {140, nullptr, "SetApplicationMemoryReservation"}, 1579 {140, nullptr, "SetApplicationMemoryReservation"},
@@ -1652,6 +1668,25 @@ void ILibraryAppletSelfAccessor::GetCallerAppletIdentityInfo(HLERequestContext&
1652 rb.PushRaw(applet_info); 1668 rb.PushRaw(applet_info);
1653} 1669}
1654 1670
1671void ILibraryAppletSelfAccessor::GetMainAppletAvailableUsers(HLERequestContext& ctx) {
1672 const Service::Account::ProfileManager manager{};
1673 bool is_empty{true};
1674 s32 user_count{-1};
1675
1676 LOG_INFO(Service_AM, "called");
1677
1678 if (manager.GetUserCount() > 0) {
1679 is_empty = false;
1680 user_count = static_cast<s32>(manager.GetUserCount());
1681 ctx.WriteBuffer(manager.GetAllUsers());
1682 }
1683
1684 IPC::ResponseBuilder rb{ctx, 4};
1685 rb.Push(ResultSuccess);
1686 rb.Push<u8>(is_empty);
1687 rb.Push(user_count);
1688}
1689
1655void ILibraryAppletSelfAccessor::PushInShowAlbum() { 1690void ILibraryAppletSelfAccessor::PushInShowAlbum() {
1656 const Applets::CommonArguments arguments{ 1691 const Applets::CommonArguments arguments{
1657 .arguments_version = Applets::CommonArgumentVersion::Version3, 1692 .arguments_version = Applets::CommonArgumentVersion::Version3,
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index 4a045cfd4..64b3f3fe2 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -124,6 +124,8 @@ public:
124private: 124private:
125 void GetCallerAppletCaptureImageEx(HLERequestContext& ctx); 125 void GetCallerAppletCaptureImageEx(HLERequestContext& ctx);
126 void TakeScreenShotOfOwnLayer(HLERequestContext& ctx); 126 void TakeScreenShotOfOwnLayer(HLERequestContext& ctx);
127 void AcquireLastForegroundCaptureSharedBuffer(HLERequestContext& ctx);
128 void ReleaseLastForegroundCaptureSharedBuffer(HLERequestContext& ctx);
127 void AcquireCallerAppletCaptureSharedBuffer(HLERequestContext& ctx); 129 void AcquireCallerAppletCaptureSharedBuffer(HLERequestContext& ctx);
128 void ReleaseCallerAppletCaptureSharedBuffer(HLERequestContext& ctx); 130 void ReleaseCallerAppletCaptureSharedBuffer(HLERequestContext& ctx);
129}; 131};
@@ -345,6 +347,7 @@ private:
345 void GetLibraryAppletInfo(HLERequestContext& ctx); 347 void GetLibraryAppletInfo(HLERequestContext& ctx);
346 void ExitProcessAndReturn(HLERequestContext& ctx); 348 void ExitProcessAndReturn(HLERequestContext& ctx);
347 void GetCallerAppletIdentityInfo(HLERequestContext& ctx); 349 void GetCallerAppletIdentityInfo(HLERequestContext& ctx);
350 void GetMainAppletAvailableUsers(HLERequestContext& ctx);
348 351
349 void PushInShowAlbum(); 352 void PushInShowAlbum();
350 void PushInShowCabinetData(); 353 void PushInShowCabinetData();
diff --git a/src/core/hle/service/hle_ipc.cpp b/src/core/hle/service/hle_ipc.cpp
index f6a1e54f2..6f3ae3cc4 100644
--- a/src/core/hle/service/hle_ipc.cpp
+++ b/src/core/hle/service/hle_ipc.cpp
@@ -23,6 +23,17 @@
23#include "core/hle/service/ipc_helpers.h" 23#include "core/hle/service/ipc_helpers.h"
24#include "core/memory.h" 24#include "core/memory.h"
25 25
26namespace {
27static thread_local std::array read_buffer_data_a{
28 Common::ScratchBuffer<u8>(),
29 Common::ScratchBuffer<u8>(),
30};
31static thread_local std::array read_buffer_data_x{
32 Common::ScratchBuffer<u8>(),
33 Common::ScratchBuffer<u8>(),
34};
35} // Anonymous namespace
36
26namespace Service { 37namespace Service {
27 38
28SessionRequestHandler::SessionRequestHandler(Kernel::KernelCore& kernel_, const char* service_name_) 39SessionRequestHandler::SessionRequestHandler(Kernel::KernelCore& kernel_, const char* service_name_)
@@ -328,26 +339,57 @@ std::vector<u8> HLERequestContext::ReadBufferCopy(std::size_t buffer_index) cons
328 } 339 }
329} 340}
330 341
331std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const { 342std::span<const u8> HLERequestContext::ReadBufferA(std::size_t buffer_index) const {
332 static thread_local std::array read_buffer_a{ 343 static thread_local std::array read_buffer_a{
333 Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), 344 Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
334 Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), 345 Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
335 }; 346 };
336 static thread_local std::array read_buffer_data_a{ 347
337 Common::ScratchBuffer<u8>(), 348 ASSERT_OR_EXECUTE_MSG(
338 Common::ScratchBuffer<u8>(), 349 BufferDescriptorA().size() > buffer_index, { return {}; },
339 }; 350 "BufferDescriptorA invalid buffer_index {}", buffer_index);
351 auto& read_buffer = read_buffer_a[buffer_index];
352 return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(),
353 BufferDescriptorA()[buffer_index].Size(),
354 &read_buffer_data_a[buffer_index]);
355}
356
357std::span<const u8> HLERequestContext::ReadBufferX(std::size_t buffer_index) const {
340 static thread_local std::array read_buffer_x{ 358 static thread_local std::array read_buffer_x{
341 Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), 359 Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
342 Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), 360 Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
343 }; 361 };
344 static thread_local std::array read_buffer_data_x{ 362
345 Common::ScratchBuffer<u8>(), 363 ASSERT_OR_EXECUTE_MSG(
346 Common::ScratchBuffer<u8>(), 364 BufferDescriptorX().size() > buffer_index, { return {}; },
365 "BufferDescriptorX invalid buffer_index {}", buffer_index);
366 auto& read_buffer = read_buffer_x[buffer_index];
367 return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(),
368 BufferDescriptorX()[buffer_index].Size(),
369 &read_buffer_data_x[buffer_index]);
370}
371
372std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
373 static thread_local std::array read_buffer_a{
374 Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
375 Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
376 };
377 static thread_local std::array read_buffer_x{
378 Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
379 Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
347 }; 380 };
348 381
349 const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && 382 const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
350 BufferDescriptorA()[buffer_index].Size()}; 383 BufferDescriptorA()[buffer_index].Size()};
384 const bool is_buffer_x{BufferDescriptorX().size() > buffer_index &&
385 BufferDescriptorX()[buffer_index].Size()};
386
387 if (is_buffer_a && is_buffer_x) {
388 LOG_WARNING(Input, "Both buffer descriptors are available a.size={}, x.size={}",
389 BufferDescriptorA()[buffer_index].Size(),
390 BufferDescriptorX()[buffer_index].Size());
391 }
392
351 if (is_buffer_a) { 393 if (is_buffer_a) {
352 ASSERT_OR_EXECUTE_MSG( 394 ASSERT_OR_EXECUTE_MSG(
353 BufferDescriptorA().size() > buffer_index, { return {}; }, 395 BufferDescriptorA().size() > buffer_index, { return {}; },
diff --git a/src/core/hle/service/hle_ipc.h b/src/core/hle/service/hle_ipc.h
index 4bd24c899..ad5259a5c 100644
--- a/src/core/hle/service/hle_ipc.h
+++ b/src/core/hle/service/hle_ipc.h
@@ -253,6 +253,12 @@ public:
253 return domain_message_header.has_value(); 253 return domain_message_header.has_value();
254 } 254 }
255 255
256 /// Helper function to get a span of a buffer using the buffer descriptor A
257 [[nodiscard]] std::span<const u8> ReadBufferA(std::size_t buffer_index = 0) const;
258
259 /// Helper function to get a span of a buffer using the buffer descriptor X
260 [[nodiscard]] std::span<const u8> ReadBufferX(std::size_t buffer_index = 0) const;
261
256 /// Helper function to get a span of a buffer using the appropriate buffer descriptor 262 /// Helper function to get a span of a buffer using the appropriate buffer descriptor
257 [[nodiscard]] std::span<const u8> ReadBuffer(std::size_t buffer_index = 0) const; 263 [[nodiscard]] std::span<const u8> ReadBuffer(std::size_t buffer_index = 0) const;
258 264
diff --git a/src/core/hle/service/mii/types/core_data.cpp b/src/core/hle/service/mii/types/core_data.cpp
index 970c748ca..ba1da76ba 100644
--- a/src/core/hle/service/mii/types/core_data.cpp
+++ b/src/core/hle/service/mii/types/core_data.cpp
@@ -41,6 +41,7 @@ void CoreData::BuildRandom(Age age, Gender gender, Race race) {
41 } 41 }
42 } 42 }
43 43
44 SetDefault();
44 SetGender(gender); 45 SetGender(gender);
45 SetFavoriteColor(MiiUtil::GetRandomValue(FavoriteColor::Max)); 46 SetFavoriteColor(MiiUtil::GetRandomValue(FavoriteColor::Max));
46 SetRegionMove(0); 47 SetRegionMove(0);
diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
index 469a53244..2e29bc848 100644
--- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
+++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
@@ -46,7 +46,7 @@ Result AllocateIoForProcessAddressSpace(Common::ProcessAddress* out_map_address,
46 // Get bounds of where mapping is possible. 46 // Get bounds of where mapping is possible.
47 const VAddr alias_code_begin = GetInteger(page_table.GetAliasCodeRegionStart()); 47 const VAddr alias_code_begin = GetInteger(page_table.GetAliasCodeRegionStart());
48 const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / YUZU_PAGESIZE; 48 const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / YUZU_PAGESIZE;
49 const auto state = Kernel::KMemoryState::Io; 49 const auto state = Kernel::KMemoryState::IoMemory;
50 const auto perm = Kernel::KMemoryPermission::UserReadWrite; 50 const auto perm = Kernel::KMemoryPermission::UserReadWrite;
51 std::mt19937_64 rng{process->GetRandomEntropy(0)}; 51 std::mt19937_64 rng{process->GetRandomEntropy(0)};
52 52
diff --git a/src/core/hle/service/prepo/prepo.cpp b/src/core/hle/service/prepo/prepo.cpp
index ec4a84989..14e8df63a 100644
--- a/src/core/hle/service/prepo/prepo.cpp
+++ b/src/core/hle/service/prepo/prepo.cpp
@@ -58,14 +58,8 @@ private:
58 IPC::RequestParser rp{ctx}; 58 IPC::RequestParser rp{ctx};
59 const auto process_id = rp.PopRaw<u64>(); 59 const auto process_id = rp.PopRaw<u64>();
60 60
61 const auto data1 = ctx.ReadBuffer(0); 61 const auto data1 = ctx.ReadBufferA(0);
62 const auto data2 = [&ctx] { 62 const auto data2 = ctx.ReadBufferX(0);
63 if (ctx.CanReadBuffer(1)) {
64 return ctx.ReadBuffer(1);
65 }
66
67 return std::span<const u8>{};
68 }();
69 63
70 LOG_DEBUG(Service_PREPO, 64 LOG_DEBUG(Service_PREPO,
71 "called, type={:02X}, process_id={:016X}, data1_size={:016X}, data2_size={:016X}", 65 "called, type={:02X}, process_id={:016X}, data1_size={:016X}, data2_size={:016X}",
@@ -85,14 +79,8 @@ private:
85 const auto user_id = rp.PopRaw<u128>(); 79 const auto user_id = rp.PopRaw<u128>();
86 const auto process_id = rp.PopRaw<u64>(); 80 const auto process_id = rp.PopRaw<u64>();
87 81
88 const auto data1 = ctx.ReadBuffer(0); 82 const auto data1 = ctx.ReadBufferA(0);
89 const auto data2 = [&ctx] { 83 const auto data2 = ctx.ReadBufferX(0);
90 if (ctx.CanReadBuffer(1)) {
91 return ctx.ReadBuffer(1);
92 }
93
94 return std::span<const u8>{};
95 }();
96 84
97 LOG_DEBUG(Service_PREPO, 85 LOG_DEBUG(Service_PREPO,
98 "called, type={:02X}, user_id={:016X}{:016X}, process_id={:016X}, " 86 "called, type={:02X}, user_id={:016X}{:016X}, process_id={:016X}, "
@@ -137,14 +125,8 @@ private:
137 IPC::RequestParser rp{ctx}; 125 IPC::RequestParser rp{ctx};
138 const auto title_id = rp.PopRaw<u64>(); 126 const auto title_id = rp.PopRaw<u64>();
139 127
140 const auto data1 = ctx.ReadBuffer(0); 128 const auto data1 = ctx.ReadBufferA(0);
141 const auto data2 = [&ctx] { 129 const auto data2 = ctx.ReadBufferX(0);
142 if (ctx.CanReadBuffer(1)) {
143 return ctx.ReadBuffer(1);
144 }
145
146 return std::span<const u8>{};
147 }();
148 130
149 LOG_DEBUG(Service_PREPO, "called, title_id={:016X}, data1_size={:016X}, data2_size={:016X}", 131 LOG_DEBUG(Service_PREPO, "called, title_id={:016X}, data1_size={:016X}, data2_size={:016X}",
150 title_id, data1.size(), data2.size()); 132 title_id, data1.size(), data2.size());
@@ -161,14 +143,8 @@ private:
161 const auto user_id = rp.PopRaw<u128>(); 143 const auto user_id = rp.PopRaw<u128>();
162 const auto title_id = rp.PopRaw<u64>(); 144 const auto title_id = rp.PopRaw<u64>();
163 145
164 const auto data1 = ctx.ReadBuffer(0); 146 const auto data1 = ctx.ReadBufferA(0);
165 const auto data2 = [&ctx] { 147 const auto data2 = ctx.ReadBufferX(0);
166 if (ctx.CanReadBuffer(1)) {
167 return ctx.ReadBuffer(1);
168 }
169
170 return std::span<const u8>{};
171 }();
172 148
173 LOG_DEBUG(Service_PREPO, 149 LOG_DEBUG(Service_PREPO,
174 "called, user_id={:016X}{:016X}, title_id={:016X}, data1_size={:016X}, " 150 "called, user_id={:016X}{:016X}, title_id={:016X}, data1_size={:016X}, "
diff --git a/src/core/hle/service/ptm/ts.cpp b/src/core/hle/service/ptm/ts.cpp
index ca064dd90..652f38b97 100644
--- a/src/core/hle/service/ptm/ts.cpp
+++ b/src/core/hle/service/ptm/ts.cpp
@@ -9,6 +9,35 @@
9 9
10namespace Service::PTM { 10namespace Service::PTM {
11 11
12enum class Location : u8 {
13 Internal,
14 External,
15};
16
17class ISession : public ServiceFramework<ISession> {
18public:
19 explicit ISession(Core::System& system_) : ServiceFramework{system_, "ISession"} {
20 // clang-format off
21 static const FunctionInfo functions[] = {
22 {0, nullptr, "GetTemperatureRange"},
23 {2, nullptr, "SetMeasurementMode"},
24 {4, &ISession::GetTemperature, "GetTemperature"},
25 };
26 // clang-format on
27
28 RegisterHandlers(functions);
29 }
30
31private:
32 void GetTemperature(HLERequestContext& ctx) {
33 constexpr f32 temperature = 35;
34
35 IPC::ResponseBuilder rb{ctx, 3};
36 rb.Push(ResultSuccess);
37 rb.Push(temperature);
38 }
39};
40
12TS::TS(Core::System& system_) : ServiceFramework{system_, "ts"} { 41TS::TS(Core::System& system_) : ServiceFramework{system_, "ts"} {
13 // clang-format off 42 // clang-format off
14 static const FunctionInfo functions[] = { 43 static const FunctionInfo functions[] = {
@@ -16,7 +45,7 @@ TS::TS(Core::System& system_) : ServiceFramework{system_, "ts"} {
16 {1, &TS::GetTemperature, "GetTemperature"}, 45 {1, &TS::GetTemperature, "GetTemperature"},
17 {2, nullptr, "SetMeasurementMode"}, 46 {2, nullptr, "SetMeasurementMode"},
18 {3, &TS::GetTemperatureMilliC, "GetTemperatureMilliC"}, 47 {3, &TS::GetTemperatureMilliC, "GetTemperatureMilliC"},
19 {4, nullptr, "OpenSession"}, 48 {4, &TS::OpenSession, "OpenSession"},
20 }; 49 };
21 // clang-format on 50 // clang-format on
22 51
@@ -47,4 +76,13 @@ void TS::GetTemperatureMilliC(HLERequestContext& ctx) {
47 rb.Push(temperature); 76 rb.Push(temperature);
48} 77}
49 78
79void TS::OpenSession(HLERequestContext& ctx) {
80 IPC::RequestParser rp{ctx};
81 [[maybe_unused]] const u32 device_code = rp.Pop<u32>();
82
83 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
84 rb.Push(ResultSuccess);
85 rb.PushIpcInterface<ISession>(system);
86}
87
50} // namespace Service::PTM 88} // namespace Service::PTM
diff --git a/src/core/hle/service/ptm/ts.h b/src/core/hle/service/ptm/ts.h
index c3f43d5a3..a10a91a64 100644
--- a/src/core/hle/service/ptm/ts.h
+++ b/src/core/hle/service/ptm/ts.h
@@ -14,13 +14,9 @@ public:
14 ~TS() override; 14 ~TS() override;
15 15
16private: 16private:
17 enum class Location : u8 {
18 Internal,
19 External,
20 };
21
22 void GetTemperature(HLERequestContext& ctx); 17 void GetTemperature(HLERequestContext& ctx);
23 void GetTemperatureMilliC(HLERequestContext& ctx); 18 void GetTemperatureMilliC(HLERequestContext& ctx);
19 void OpenSession(HLERequestContext& ctx);
24}; 20};
25 21
26} // namespace Service::PTM 22} // namespace Service::PTM
diff --git a/src/core/hle/service/set/set_sys.cpp b/src/core/hle/service/set/set_sys.cpp
index 165b97dad..ec3af80af 100644
--- a/src/core/hle/service/set/set_sys.cpp
+++ b/src/core/hle/service/set/set_sys.cpp
@@ -5,8 +5,13 @@
5#include "common/logging/log.h" 5#include "common/logging/log.h"
6#include "common/settings.h" 6#include "common/settings.h"
7#include "common/string_util.h" 7#include "common/string_util.h"
8#include "core/core.h"
9#include "core/file_sys/content_archive.h"
8#include "core/file_sys/errors.h" 10#include "core/file_sys/errors.h"
9#include "core/file_sys/system_archive/system_version.h" 11#include "core/file_sys/nca_metadata.h"
12#include "core/file_sys/registered_cache.h"
13#include "core/file_sys/romfs.h"
14#include "core/file_sys/system_archive/system_archive.h"
10#include "core/hle/service/filesystem/filesystem.h" 15#include "core/hle/service/filesystem/filesystem.h"
11#include "core/hle/service/ipc_helpers.h" 16#include "core/hle/service/ipc_helpers.h"
12#include "core/hle/service/set/set.h" 17#include "core/hle/service/set/set.h"
@@ -22,18 +27,30 @@ enum class GetFirmwareVersionType {
22 Version2, 27 Version2,
23}; 28};
24 29
25void GetFirmwareVersionImpl(HLERequestContext& ctx, GetFirmwareVersionType type) { 30void GetFirmwareVersionImpl(Core::System& system, HLERequestContext& ctx,
26 LOG_WARNING(Service_SET, "called - Using hardcoded firmware version '{}'", 31 GetFirmwareVersionType type) {
27 FileSys::SystemArchive::GetLongDisplayVersion());
28
29 ASSERT_MSG(ctx.GetWriteBufferSize() == 0x100, 32 ASSERT_MSG(ctx.GetWriteBufferSize() == 0x100,
30 "FirmwareVersion output buffer must be 0x100 bytes in size!"); 33 "FirmwareVersion output buffer must be 0x100 bytes in size!");
31 34
32 // Instead of using the normal procedure of checking for the real system archive and if it 35 constexpr u64 FirmwareVersionSystemDataId = 0x0100000000000809;
33 // doesn't exist, synthesizing one, I feel that that would lead to strange bugs because a 36 auto& fsc = system.GetFileSystemController();
34 // used is using a really old or really new SystemVersion title. The synthesized one ensures 37
35 // consistence (currently reports as 5.1.0-0.0) 38 // Attempt to load version data from disk
36 const auto archive = FileSys::SystemArchive::SystemVersion(); 39 const FileSys::RegisteredCache* bis_system{};
40 std::unique_ptr<FileSys::NCA> nca{};
41 FileSys::VirtualDir romfs{};
42
43 bis_system = fsc.GetSystemNANDContents();
44 if (bis_system) {
45 nca = bis_system->GetEntry(FirmwareVersionSystemDataId, FileSys::ContentRecordType::Data);
46 }
47 if (nca) {
48 romfs = FileSys::ExtractRomFS(nca->GetRomFS());
49 }
50 if (!romfs) {
51 romfs = FileSys::ExtractRomFS(
52 FileSys::SystemArchive::SynthesizeSystemArchive(FirmwareVersionSystemDataId));
53 }
37 54
38 const auto early_exit_failure = [&ctx](std::string_view desc, Result code) { 55 const auto early_exit_failure = [&ctx](std::string_view desc, Result code) {
39 LOG_ERROR(Service_SET, "General failure while attempting to resolve firmware version ({}).", 56 LOG_ERROR(Service_SET, "General failure while attempting to resolve firmware version ({}).",
@@ -42,13 +59,7 @@ void GetFirmwareVersionImpl(HLERequestContext& ctx, GetFirmwareVersionType type)
42 rb.Push(code); 59 rb.Push(code);
43 }; 60 };
44 61
45 if (archive == nullptr) { 62 const auto ver_file = romfs->GetFile("file");
46 early_exit_failure("The system version archive couldn't be synthesized.",
47 FileSys::ERROR_FAILED_MOUNT_ARCHIVE);
48 return;
49 }
50
51 const auto ver_file = archive->GetFile("file");
52 if (ver_file == nullptr) { 63 if (ver_file == nullptr) {
53 early_exit_failure("The system version archive didn't contain the file 'file'.", 64 early_exit_failure("The system version archive didn't contain the file 'file'.",
54 FileSys::ERROR_INVALID_ARGUMENT); 65 FileSys::ERROR_INVALID_ARGUMENT);
@@ -87,12 +98,12 @@ void SET_SYS::SetLanguageCode(HLERequestContext& ctx) {
87 98
88void SET_SYS::GetFirmwareVersion(HLERequestContext& ctx) { 99void SET_SYS::GetFirmwareVersion(HLERequestContext& ctx) {
89 LOG_DEBUG(Service_SET, "called"); 100 LOG_DEBUG(Service_SET, "called");
90 GetFirmwareVersionImpl(ctx, GetFirmwareVersionType::Version1); 101 GetFirmwareVersionImpl(system, ctx, GetFirmwareVersionType::Version1);
91} 102}
92 103
93void SET_SYS::GetFirmwareVersion2(HLERequestContext& ctx) { 104void SET_SYS::GetFirmwareVersion2(HLERequestContext& ctx) {
94 LOG_DEBUG(Service_SET, "called"); 105 LOG_DEBUG(Service_SET, "called");
95 GetFirmwareVersionImpl(ctx, GetFirmwareVersionType::Version2); 106 GetFirmwareVersionImpl(system, ctx, GetFirmwareVersionType::Version2);
96} 107}
97 108
98void SET_SYS::GetAccountSettings(HLERequestContext& ctx) { 109void SET_SYS::GetAccountSettings(HLERequestContext& ctx) {
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
index 2868fc57d..1d77426e0 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
@@ -111,16 +111,33 @@ Id GetCbuf(EmitContext& ctx, Id result_type, Id UniformDefinitions::*member_ptr,
111 } else if (element_size > 1) { 111 } else if (element_size > 1) {
112 const u32 log2_element_size{static_cast<u32>(std::countr_zero(element_size))}; 112 const u32 log2_element_size{static_cast<u32>(std::countr_zero(element_size))};
113 const Id shift{ctx.Const(log2_element_size)}; 113 const Id shift{ctx.Const(log2_element_size)};
114 buffer_offset = ctx.OpShiftRightArithmetic(ctx.U32[1], ctx.Def(offset), shift); 114 buffer_offset = ctx.OpShiftRightLogical(ctx.U32[1], ctx.Def(offset), shift);
115 } else { 115 } else {
116 buffer_offset = ctx.Def(offset); 116 buffer_offset = ctx.Def(offset);
117 } 117 }
118 if (!binding.IsImmediate()) { 118 if (!binding.IsImmediate()) {
119 return ctx.OpFunctionCall(result_type, indirect_func, ctx.Def(binding), buffer_offset); 119 return ctx.OpFunctionCall(result_type, indirect_func, ctx.Def(binding), buffer_offset);
120 } 120 }
121
121 const Id cbuf{ctx.cbufs[binding.U32()].*member_ptr}; 122 const Id cbuf{ctx.cbufs[binding.U32()].*member_ptr};
122 const Id access_chain{ctx.OpAccessChain(uniform_type, cbuf, ctx.u32_zero_value, buffer_offset)}; 123 const Id access_chain{ctx.OpAccessChain(uniform_type, cbuf, ctx.u32_zero_value, buffer_offset)};
123 return ctx.OpLoad(result_type, access_chain); 124 const Id val = ctx.OpLoad(result_type, access_chain);
125
126 if (offset.IsImmediate() || !ctx.profile.has_broken_robust) {
127 return val;
128 }
129
130 const auto is_float = UniformDefinitions::IsFloat(member_ptr);
131 const auto num_elements = UniformDefinitions::NumElements(member_ptr);
132 const std::array zero_vec{
133 is_float ? ctx.Const(0.0f) : ctx.Const(0u),
134 is_float ? ctx.Const(0.0f) : ctx.Const(0u),
135 is_float ? ctx.Const(0.0f) : ctx.Const(0u),
136 is_float ? ctx.Const(0.0f) : ctx.Const(0u),
137 };
138 const Id cond = ctx.OpULessThanEqual(ctx.TypeBool(), buffer_offset, ctx.Const(0xFFFFu));
139 const Id zero = ctx.OpCompositeConstruct(result_type, std::span(zero_vec.data(), num_elements));
140 return ctx.OpSelect(result_type, cond, val, zero);
124} 141}
125 142
126Id GetCbufU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) { 143Id GetCbufU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
@@ -138,7 +155,7 @@ Id GetCbufElement(EmitContext& ctx, Id vector, const IR::Value& offset, u32 inde
138 const u32 element{(offset.U32() / 4) % 4 + index_offset}; 155 const u32 element{(offset.U32() / 4) % 4 + index_offset};
139 return ctx.OpCompositeExtract(ctx.U32[1], vector, element); 156 return ctx.OpCompositeExtract(ctx.U32[1], vector, element);
140 } 157 }
141 const Id shift{ctx.OpShiftRightArithmetic(ctx.U32[1], ctx.Def(offset), ctx.Const(2u))}; 158 const Id shift{ctx.OpShiftRightLogical(ctx.U32[1], ctx.Def(offset), ctx.Const(2u))};
142 Id element{ctx.OpBitwiseAnd(ctx.U32[1], shift, ctx.Const(3u))}; 159 Id element{ctx.OpBitwiseAnd(ctx.U32[1], shift, ctx.Const(3u))};
143 if (index_offset > 0) { 160 if (index_offset > 0) {
144 element = ctx.OpIAdd(ctx.U32[1], element, ctx.Const(index_offset)); 161 element = ctx.OpIAdd(ctx.U32[1], element, ctx.Const(index_offset));
diff --git a/src/shader_recompiler/backend/spirv/spirv_emit_context.h b/src/shader_recompiler/backend/spirv/spirv_emit_context.h
index 7c49fd504..1aa79863d 100644
--- a/src/shader_recompiler/backend/spirv/spirv_emit_context.h
+++ b/src/shader_recompiler/backend/spirv/spirv_emit_context.h
@@ -64,6 +64,42 @@ struct UniformDefinitions {
64 Id F32{}; 64 Id F32{};
65 Id U32x2{}; 65 Id U32x2{};
66 Id U32x4{}; 66 Id U32x4{};
67
68 constexpr static size_t NumElements(Id UniformDefinitions::*member_ptr) {
69 if (member_ptr == &UniformDefinitions::U8) {
70 return 1;
71 }
72 if (member_ptr == &UniformDefinitions::S8) {
73 return 1;
74 }
75 if (member_ptr == &UniformDefinitions::U16) {
76 return 1;
77 }
78 if (member_ptr == &UniformDefinitions::S16) {
79 return 1;
80 }
81 if (member_ptr == &UniformDefinitions::U32) {
82 return 1;
83 }
84 if (member_ptr == &UniformDefinitions::F32) {
85 return 1;
86 }
87 if (member_ptr == &UniformDefinitions::U32x2) {
88 return 2;
89 }
90 if (member_ptr == &UniformDefinitions::U32x4) {
91 return 4;
92 }
93 ASSERT(false);
94 return 1;
95 }
96
97 constexpr static bool IsFloat(Id UniformDefinitions::*member_ptr) {
98 if (member_ptr == &UniformDefinitions::F32) {
99 return true;
100 }
101 return false;
102 }
67}; 103};
68 104
69struct StorageTypeDefinition { 105struct StorageTypeDefinition {
diff --git a/src/shader_recompiler/profile.h b/src/shader_recompiler/profile.h
index 9ca97f6a4..38d820db2 100644
--- a/src/shader_recompiler/profile.h
+++ b/src/shader_recompiler/profile.h
@@ -9,7 +9,6 @@ namespace Shader {
9 9
10struct Profile { 10struct Profile {
11 u32 supported_spirv{0x00010000}; 11 u32 supported_spirv{0x00010000};
12
13 bool unified_descriptor_binding{}; 12 bool unified_descriptor_binding{};
14 bool support_descriptor_aliasing{}; 13 bool support_descriptor_aliasing{};
15 bool support_int8{}; 14 bool support_int8{};
@@ -82,6 +81,9 @@ struct Profile {
82 bool has_broken_spirv_subgroup_mask_vector_extract_dynamic{}; 81 bool has_broken_spirv_subgroup_mask_vector_extract_dynamic{};
83 82
84 u32 gl_max_compute_smem_size{}; 83 u32 gl_max_compute_smem_size{};
84
85 /// Maxwell and earlier nVidia architectures have broken robust support
86 bool has_broken_robust{};
85}; 87};
86 88
87} // namespace Shader 89} // namespace Shader
diff --git a/src/tests/common/unique_function.cpp b/src/tests/common/unique_function.cpp
index f7a23e876..42e6ade09 100644
--- a/src/tests/common/unique_function.cpp
+++ b/src/tests/common/unique_function.cpp
@@ -46,8 +46,8 @@ TEST_CASE("UniqueFunction", "[common]") {
46 Noisy noisy; 46 Noisy noisy;
47 REQUIRE(noisy.state == "Default constructed"); 47 REQUIRE(noisy.state == "Default constructed");
48 48
49 Common::UniqueFunction<void> func = [noisy = std::move(noisy)] { 49 Common::UniqueFunction<void> func = [noisy_inner = std::move(noisy)] {
50 REQUIRE(noisy.state == "Move constructed"); 50 REQUIRE(noisy_inner.state == "Move constructed");
51 }; 51 };
52 REQUIRE(noisy.state == "Moved away"); 52 REQUIRE(noisy.state == "Moved away");
53 func(); 53 func();
@@ -101,7 +101,7 @@ TEST_CASE("UniqueFunction", "[common]") {
101 }; 101 };
102 Foo object{&num_destroyed}; 102 Foo object{&num_destroyed};
103 { 103 {
104 Common::UniqueFunction<void> func = [object = std::move(object)] {}; 104 Common::UniqueFunction<void> func = [object_inner = std::move(object)] {};
105 REQUIRE(num_destroyed == 0); 105 REQUIRE(num_destroyed == 0);
106 } 106 }
107 REQUIRE(num_destroyed == 1); 107 REQUIRE(num_destroyed == 1);
diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h
index c4f6e8d12..eed267361 100644
--- a/src/video_core/buffer_cache/buffer_cache_base.h
+++ b/src/video_core/buffer_cache/buffer_cache_base.h
@@ -62,7 +62,11 @@ using BufferId = SlotId;
62using VideoCore::Surface::PixelFormat; 62using VideoCore::Surface::PixelFormat;
63using namespace Common::Literals; 63using namespace Common::Literals;
64 64
65#ifdef __APPLE__
66constexpr u32 NUM_VERTEX_BUFFERS = 16;
67#else
65constexpr u32 NUM_VERTEX_BUFFERS = 32; 68constexpr u32 NUM_VERTEX_BUFFERS = 32;
69#endif
66constexpr u32 NUM_TRANSFORM_FEEDBACK_BUFFERS = 4; 70constexpr u32 NUM_TRANSFORM_FEEDBACK_BUFFERS = 4;
67constexpr u32 NUM_GRAPHICS_UNIFORM_BUFFERS = 18; 71constexpr u32 NUM_GRAPHICS_UNIFORM_BUFFERS = 18;
68constexpr u32 NUM_COMPUTE_UNIFORM_BUFFERS = 8; 72constexpr u32 NUM_COMPUTE_UNIFORM_BUFFERS = 8;
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
index 805a89900..c0e6471fe 100644
--- a/src/video_core/fence_manager.h
+++ b/src/video_core/fence_manager.h
@@ -86,7 +86,10 @@ public:
86 uncommitted_operations.emplace_back(std::move(func)); 86 uncommitted_operations.emplace_back(std::move(func));
87 } 87 }
88 pending_operations.emplace_back(std::move(uncommitted_operations)); 88 pending_operations.emplace_back(std::move(uncommitted_operations));
89 QueueFence(new_fence); 89 {
90 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
91 QueueFence(new_fence);
92 }
90 if (!delay_fence) { 93 if (!delay_fence) {
91 func(); 94 func();
92 } 95 }
diff --git a/src/video_core/host_shaders/CMakeLists.txt b/src/video_core/host_shaders/CMakeLists.txt
index 8bb429578..cd2549232 100644
--- a/src/video_core/host_shaders/CMakeLists.txt
+++ b/src/video_core/host_shaders/CMakeLists.txt
@@ -19,6 +19,7 @@ set(SHADER_FILES
19 block_linear_unswizzle_2d.comp 19 block_linear_unswizzle_2d.comp
20 block_linear_unswizzle_3d.comp 20 block_linear_unswizzle_3d.comp
21 convert_abgr8_to_d24s8.frag 21 convert_abgr8_to_d24s8.frag
22 convert_abgr8_to_d32f.frag
22 convert_d32f_to_abgr8.frag 23 convert_d32f_to_abgr8.frag
23 convert_d24s8_to_abgr8.frag 24 convert_d24s8_to_abgr8.frag
24 convert_depth_to_float.frag 25 convert_depth_to_float.frag
diff --git a/src/video_core/host_shaders/convert_abgr8_to_d32f.frag b/src/video_core/host_shaders/convert_abgr8_to_d32f.frag
new file mode 100644
index 000000000..095b910c2
--- /dev/null
+++ b/src/video_core/host_shaders/convert_abgr8_to_d32f.frag
@@ -0,0 +1,15 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#version 450
5
6layout(binding = 0) uniform sampler2D color_texture;
7
8void main() {
9 ivec2 coord = ivec2(gl_FragCoord.xy);
10 vec4 color = texelFetch(color_texture, coord, 0).abgr;
11
12 float value = color.a * (color.r + color.g + color.b) / 3.0f;
13
14 gl_FragDepth = value;
15}
diff --git a/src/video_core/host_shaders/convert_d24s8_to_abgr8.frag b/src/video_core/host_shaders/convert_d24s8_to_abgr8.frag
index d33131d7c..b81a54056 100644
--- a/src/video_core/host_shaders/convert_d24s8_to_abgr8.frag
+++ b/src/video_core/host_shaders/convert_d24s8_to_abgr8.frag
@@ -3,16 +3,16 @@
3 3
4#version 450 4#version 450
5 5
6precision mediump int;
7precision highp float;
8
6layout(binding = 0) uniform sampler2D depth_tex; 9layout(binding = 0) uniform sampler2D depth_tex;
7layout(binding = 1) uniform isampler2D stencil_tex; 10layout(binding = 1) uniform usampler2D stencil_tex;
8 11
9layout(location = 0) out vec4 color; 12layout(location = 0) out vec4 color;
10 13
11void main() { 14void main() {
12 ivec2 coord = ivec2(gl_FragCoord.xy); 15 ivec2 coord = ivec2(gl_FragCoord.xy);
13 uint depth = uint(textureLod(depth_tex, coord, 0).r * (exp2(24.0) - 1.0f));
14 uint stencil = uint(textureLod(stencil_tex, coord, 0).r);
15
16 highp uint depth_val = 16 highp uint depth_val =
17 uint(textureLod(depth_tex, coord, 0).r * (exp2(32.0) - 1.0)); 17 uint(textureLod(depth_tex, coord, 0).r * (exp2(32.0) - 1.0));
18 lowp uint stencil_val = textureLod(stencil_tex, coord, 0).r; 18 lowp uint stencil_val = textureLod(stencil_tex, coord, 0).r;
diff --git a/src/video_core/host_shaders/convert_d32f_to_abgr8.frag b/src/video_core/host_shaders/convert_d32f_to_abgr8.frag
index 04cfef8b5..4e5a9f955 100644
--- a/src/video_core/host_shaders/convert_d32f_to_abgr8.frag
+++ b/src/video_core/host_shaders/convert_d32f_to_abgr8.frag
@@ -9,6 +9,6 @@ layout(location = 0) out vec4 color;
9 9
10void main() { 10void main() {
11 ivec2 coord = ivec2(gl_FragCoord.xy); 11 ivec2 coord = ivec2(gl_FragCoord.xy);
12 float depth = textureLod(depth_tex, coord, 0).r; 12 float depth = texelFetch(depth_tex, coord, 0).r;
13 color = vec4(depth, depth, depth, 1.0); 13 color = vec4(depth, depth, depth, 1.0);
14} 14}
diff --git a/src/video_core/host_shaders/convert_s8d24_to_abgr8.frag b/src/video_core/host_shaders/convert_s8d24_to_abgr8.frag
index 31db7d426..6a457981d 100644
--- a/src/video_core/host_shaders/convert_s8d24_to_abgr8.frag
+++ b/src/video_core/host_shaders/convert_s8d24_to_abgr8.frag
@@ -3,16 +3,16 @@
3 3
4#version 450 4#version 450
5 5
6precision mediump int;
7precision highp float;
8
6layout(binding = 0) uniform sampler2D depth_tex; 9layout(binding = 0) uniform sampler2D depth_tex;
7layout(binding = 1) uniform isampler2D stencil_tex; 10layout(binding = 1) uniform usampler2D stencil_tex;
8 11
9layout(location = 0) out vec4 color; 12layout(location = 0) out vec4 color;
10 13
11void main() { 14void main() {
12 ivec2 coord = ivec2(gl_FragCoord.xy); 15 ivec2 coord = ivec2(gl_FragCoord.xy);
13 uint depth = uint(textureLod(depth_tex, coord, 0).r * (exp2(24.0) - 1.0f));
14 uint stencil = uint(textureLod(stencil_tex, coord, 0).r);
15
16 highp uint depth_val = 16 highp uint depth_val =
17 uint(textureLod(depth_tex, coord, 0).r * (exp2(32.0) - 1.0)); 17 uint(textureLod(depth_tex, coord, 0).r * (exp2(32.0) - 1.0));
18 lowp uint stencil_val = textureLod(stencil_tex, coord, 0).r; 18 lowp uint stencil_val = textureLod(stencil_tex, coord, 0).r;
diff --git a/src/video_core/renderer_vulkan/blit_image.cpp b/src/video_core/renderer_vulkan/blit_image.cpp
index f01d2394e..c3db09424 100644
--- a/src/video_core/renderer_vulkan/blit_image.cpp
+++ b/src/video_core/renderer_vulkan/blit_image.cpp
@@ -8,6 +8,7 @@
8#include "common/settings.h" 8#include "common/settings.h"
9#include "video_core/host_shaders/blit_color_float_frag_spv.h" 9#include "video_core/host_shaders/blit_color_float_frag_spv.h"
10#include "video_core/host_shaders/convert_abgr8_to_d24s8_frag_spv.h" 10#include "video_core/host_shaders/convert_abgr8_to_d24s8_frag_spv.h"
11#include "video_core/host_shaders/convert_abgr8_to_d32f_frag_spv.h"
11#include "video_core/host_shaders/convert_d24s8_to_abgr8_frag_spv.h" 12#include "video_core/host_shaders/convert_d24s8_to_abgr8_frag_spv.h"
12#include "video_core/host_shaders/convert_d32f_to_abgr8_frag_spv.h" 13#include "video_core/host_shaders/convert_d32f_to_abgr8_frag_spv.h"
13#include "video_core/host_shaders/convert_depth_to_float_frag_spv.h" 14#include "video_core/host_shaders/convert_depth_to_float_frag_spv.h"
@@ -434,6 +435,7 @@ BlitImageHelper::BlitImageHelper(const Device& device_, Scheduler& scheduler_,
434 convert_depth_to_float_frag(BuildShader(device, CONVERT_DEPTH_TO_FLOAT_FRAG_SPV)), 435 convert_depth_to_float_frag(BuildShader(device, CONVERT_DEPTH_TO_FLOAT_FRAG_SPV)),
435 convert_float_to_depth_frag(BuildShader(device, CONVERT_FLOAT_TO_DEPTH_FRAG_SPV)), 436 convert_float_to_depth_frag(BuildShader(device, CONVERT_FLOAT_TO_DEPTH_FRAG_SPV)),
436 convert_abgr8_to_d24s8_frag(BuildShader(device, CONVERT_ABGR8_TO_D24S8_FRAG_SPV)), 437 convert_abgr8_to_d24s8_frag(BuildShader(device, CONVERT_ABGR8_TO_D24S8_FRAG_SPV)),
438 convert_abgr8_to_d32f_frag(BuildShader(device, CONVERT_ABGR8_TO_D32F_FRAG_SPV)),
437 convert_d32f_to_abgr8_frag(BuildShader(device, CONVERT_D32F_TO_ABGR8_FRAG_SPV)), 439 convert_d32f_to_abgr8_frag(BuildShader(device, CONVERT_D32F_TO_ABGR8_FRAG_SPV)),
438 convert_d24s8_to_abgr8_frag(BuildShader(device, CONVERT_D24S8_TO_ABGR8_FRAG_SPV)), 440 convert_d24s8_to_abgr8_frag(BuildShader(device, CONVERT_D24S8_TO_ABGR8_FRAG_SPV)),
439 convert_s8d24_to_abgr8_frag(BuildShader(device, CONVERT_S8D24_TO_ABGR8_FRAG_SPV)), 441 convert_s8d24_to_abgr8_frag(BuildShader(device, CONVERT_S8D24_TO_ABGR8_FRAG_SPV)),
@@ -559,6 +561,13 @@ void BlitImageHelper::ConvertABGR8ToD24S8(const Framebuffer* dst_framebuffer,
559 Convert(*convert_abgr8_to_d24s8_pipeline, dst_framebuffer, src_image_view); 561 Convert(*convert_abgr8_to_d24s8_pipeline, dst_framebuffer, src_image_view);
560} 562}
561 563
564void BlitImageHelper::ConvertABGR8ToD32F(const Framebuffer* dst_framebuffer,
565 const ImageView& src_image_view) {
566 ConvertPipelineDepthTargetEx(convert_abgr8_to_d32f_pipeline, dst_framebuffer->RenderPass(),
567 convert_abgr8_to_d32f_frag);
568 Convert(*convert_abgr8_to_d32f_pipeline, dst_framebuffer, src_image_view);
569}
570
562void BlitImageHelper::ConvertD32FToABGR8(const Framebuffer* dst_framebuffer, 571void BlitImageHelper::ConvertD32FToABGR8(const Framebuffer* dst_framebuffer,
563 ImageView& src_image_view) { 572 ImageView& src_image_view) {
564 ConvertPipelineColorTargetEx(convert_d32f_to_abgr8_pipeline, dst_framebuffer->RenderPass(), 573 ConvertPipelineColorTargetEx(convert_d32f_to_abgr8_pipeline, dst_framebuffer->RenderPass(),
diff --git a/src/video_core/renderer_vulkan/blit_image.h b/src/video_core/renderer_vulkan/blit_image.h
index a032c71fb..b2104a59e 100644
--- a/src/video_core/renderer_vulkan/blit_image.h
+++ b/src/video_core/renderer_vulkan/blit_image.h
@@ -67,6 +67,8 @@ public:
67 67
68 void ConvertABGR8ToD24S8(const Framebuffer* dst_framebuffer, const ImageView& src_image_view); 68 void ConvertABGR8ToD24S8(const Framebuffer* dst_framebuffer, const ImageView& src_image_view);
69 69
70 void ConvertABGR8ToD32F(const Framebuffer* dst_framebuffer, const ImageView& src_image_view);
71
70 void ConvertD32FToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view); 72 void ConvertD32FToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view);
71 73
72 void ConvertD24S8ToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view); 74 void ConvertD24S8ToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view);
@@ -130,6 +132,7 @@ private:
130 vk::ShaderModule convert_depth_to_float_frag; 132 vk::ShaderModule convert_depth_to_float_frag;
131 vk::ShaderModule convert_float_to_depth_frag; 133 vk::ShaderModule convert_float_to_depth_frag;
132 vk::ShaderModule convert_abgr8_to_d24s8_frag; 134 vk::ShaderModule convert_abgr8_to_d24s8_frag;
135 vk::ShaderModule convert_abgr8_to_d32f_frag;
133 vk::ShaderModule convert_d32f_to_abgr8_frag; 136 vk::ShaderModule convert_d32f_to_abgr8_frag;
134 vk::ShaderModule convert_d24s8_to_abgr8_frag; 137 vk::ShaderModule convert_d24s8_to_abgr8_frag;
135 vk::ShaderModule convert_s8d24_to_abgr8_frag; 138 vk::ShaderModule convert_s8d24_to_abgr8_frag;
@@ -149,6 +152,7 @@ private:
149 vk::Pipeline convert_d16_to_r16_pipeline; 152 vk::Pipeline convert_d16_to_r16_pipeline;
150 vk::Pipeline convert_r16_to_d16_pipeline; 153 vk::Pipeline convert_r16_to_d16_pipeline;
151 vk::Pipeline convert_abgr8_to_d24s8_pipeline; 154 vk::Pipeline convert_abgr8_to_d24s8_pipeline;
155 vk::Pipeline convert_abgr8_to_d32f_pipeline;
152 vk::Pipeline convert_d32f_to_abgr8_pipeline; 156 vk::Pipeline convert_d32f_to_abgr8_pipeline;
153 vk::Pipeline convert_d24s8_to_abgr8_pipeline; 157 vk::Pipeline convert_d24s8_to_abgr8_pipeline;
154 vk::Pipeline convert_s8d24_to_abgr8_pipeline; 158 vk::Pipeline convert_s8d24_to_abgr8_pipeline;
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index c4c30d807..7e7a80740 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -132,12 +132,16 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
132 const bool use_accelerated = 132 const bool use_accelerated =
133 rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride); 133 rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride);
134 const bool is_srgb = use_accelerated && screen_info.is_srgb; 134 const bool is_srgb = use_accelerated && screen_info.is_srgb;
135 RenderScreenshot(*framebuffer, use_accelerated);
136 135
137 Frame* frame = present_manager.GetRenderFrame(); 136 {
138 blit_screen.DrawToSwapchain(frame, *framebuffer, use_accelerated, is_srgb); 137 std::scoped_lock lock{rasterizer.LockCaches()};
139 scheduler.Flush(*frame->render_ready); 138 RenderScreenshot(*framebuffer, use_accelerated);
140 present_manager.Present(frame); 139
140 Frame* frame = present_manager.GetRenderFrame();
141 blit_screen.DrawToSwapchain(frame, *framebuffer, use_accelerated, is_srgb);
142 scheduler.Flush(*frame->render_ready);
143 present_manager.Present(frame);
144 }
141 145
142 gpu.RendererFrameEndNotify(); 146 gpu.RendererFrameEndNotify();
143 rasterizer.TickFrame(); 147 rasterizer.TickFrame();
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index a1ec1a100..804b95989 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -356,7 +356,11 @@ PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device
356 .has_broken_fp16_float_controls = driver_id == VK_DRIVER_ID_NVIDIA_PROPRIETARY, 356 .has_broken_fp16_float_controls = driver_id == VK_DRIVER_ID_NVIDIA_PROPRIETARY,
357 .ignore_nan_fp_comparisons = false, 357 .ignore_nan_fp_comparisons = false,
358 .has_broken_spirv_subgroup_mask_vector_extract_dynamic = 358 .has_broken_spirv_subgroup_mask_vector_extract_dynamic =
359 driver_id == VK_DRIVER_ID_QUALCOMM_PROPRIETARY}; 359 driver_id == VK_DRIVER_ID_QUALCOMM_PROPRIETARY,
360 .has_broken_robust =
361 device.IsNvidia() && device.GetNvidiaArch() <= NvidiaArchitecture::Arch_Maxwell,
362 };
363
360 host_info = Shader::HostTranslateInfo{ 364 host_info = Shader::HostTranslateInfo{
361 .support_float64 = device.IsFloat64Supported(), 365 .support_float64 = device.IsFloat64Supported(),
362 .support_float16 = device.IsFloat16Supported(), 366 .support_float16 = device.IsFloat16Supported(),
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 2edaafa7e..66c03bf17 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -1436,6 +1436,7 @@ void QueryCacheRuntime::Barriers(bool is_prebarrier) {
1436 .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, 1436 .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
1437 .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, 1437 .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT,
1438 }; 1438 };
1439 impl->scheduler.RequestOutsideRenderPassOperationContext();
1439 if (is_prebarrier) { 1440 if (is_prebarrier) {
1440 impl->scheduler.Record([](vk::CommandBuffer cmdbuf) { 1441 impl->scheduler.Record([](vk::CommandBuffer cmdbuf) {
1441 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 1442 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 83f2b6045..465eac37e 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -198,7 +198,7 @@ void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) {
198 if (!pipeline) { 198 if (!pipeline) {
199 return; 199 return;
200 } 200 }
201 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 201 std::scoped_lock lock{LockCaches()};
202 // update engine as channel may be different. 202 // update engine as channel may be different.
203 pipeline->SetEngine(maxwell3d, gpu_memory); 203 pipeline->SetEngine(maxwell3d, gpu_memory);
204 pipeline->Configure(is_indexed); 204 pipeline->Configure(is_indexed);
@@ -708,6 +708,7 @@ void RasterizerVulkan::TiledCacheBarrier() {
708} 708}
709 709
710void RasterizerVulkan::FlushCommands() { 710void RasterizerVulkan::FlushCommands() {
711 std::scoped_lock lock{LockCaches()};
711 if (draw_counter == 0) { 712 if (draw_counter == 0) {
712 return; 713 return;
713 } 714 }
@@ -805,6 +806,7 @@ void RasterizerVulkan::FlushWork() {
805 if ((++draw_counter & 7) != 7) { 806 if ((++draw_counter & 7) != 7) {
806 return; 807 return;
807 } 808 }
809 std::scoped_lock lock{LockCaches()};
808 if (draw_counter < DRAWS_TO_DISPATCH) { 810 if (draw_counter < DRAWS_TO_DISPATCH) {
809 // Send recorded tasks to the worker thread 811 // Send recorded tasks to the worker thread
810 scheduler.DispatchWork(); 812 scheduler.DispatchWork();
@@ -975,6 +977,19 @@ void RasterizerVulkan::UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs
975 if (!state_tracker.TouchScissors()) { 977 if (!state_tracker.TouchScissors()) {
976 return; 978 return;
977 } 979 }
980 if (!regs.viewport_scale_offset_enabled) {
981 const auto x = static_cast<float>(regs.surface_clip.x);
982 const auto y = static_cast<float>(regs.surface_clip.y);
983 const auto width = static_cast<float>(regs.surface_clip.width);
984 const auto height = static_cast<float>(regs.surface_clip.height);
985 VkRect2D scissor;
986 scissor.offset.x = static_cast<u32>(x);
987 scissor.offset.y = static_cast<u32>(y);
988 scissor.extent.width = static_cast<u32>(width != 0.0f ? width : 1.0f);
989 scissor.extent.height = static_cast<u32>(height != 0.0f ? height : 1.0f);
990 scheduler.Record([scissor](vk::CommandBuffer cmdbuf) { cmdbuf.SetScissor(0, scissor); });
991 return;
992 }
978 u32 up_scale = 1; 993 u32 up_scale = 1;
979 u32 down_shift = 0; 994 u32 down_shift = 0;
980 if (texture_cache.IsRescaling()) { 995 if (texture_cache.IsRescaling()) {
@@ -1486,7 +1501,7 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs)
1486void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) { 1501void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) {
1487 CreateChannel(channel); 1502 CreateChannel(channel);
1488 { 1503 {
1489 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 1504 std::scoped_lock lock{LockCaches()};
1490 texture_cache.CreateChannel(channel); 1505 texture_cache.CreateChannel(channel);
1491 buffer_cache.CreateChannel(channel); 1506 buffer_cache.CreateChannel(channel);
1492 } 1507 }
@@ -1499,7 +1514,7 @@ void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) {
1499 const s32 channel_id = channel.bind_id; 1514 const s32 channel_id = channel.bind_id;
1500 BindToChannel(channel_id); 1515 BindToChannel(channel_id);
1501 { 1516 {
1502 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 1517 std::scoped_lock lock{LockCaches()};
1503 texture_cache.BindToChannel(channel_id); 1518 texture_cache.BindToChannel(channel_id);
1504 buffer_cache.BindToChannel(channel_id); 1519 buffer_cache.BindToChannel(channel_id);
1505 } 1520 }
@@ -1512,7 +1527,7 @@ void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) {
1512void RasterizerVulkan::ReleaseChannel(s32 channel_id) { 1527void RasterizerVulkan::ReleaseChannel(s32 channel_id) {
1513 EraseChannel(channel_id); 1528 EraseChannel(channel_id);
1514 { 1529 {
1515 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 1530 std::scoped_lock lock{LockCaches()};
1516 texture_cache.EraseChannel(channel_id); 1531 texture_cache.EraseChannel(channel_id);
1517 buffer_cache.EraseChannel(channel_id); 1532 buffer_cache.EraseChannel(channel_id);
1518 } 1533 }
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index ad069556c..ce3dfbaab 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -133,6 +133,10 @@ public:
133 133
134 void ReleaseChannel(s32 channel_id) override; 134 void ReleaseChannel(s32 channel_id) override;
135 135
136 std::scoped_lock<std::recursive_mutex, std::recursive_mutex> LockCaches() {
137 return std::scoped_lock{buffer_cache.mutex, texture_cache.mutex};
138 }
139
136private: 140private:
137 static constexpr size_t MAX_TEXTURES = 192; 141 static constexpr size_t MAX_TEXTURES = 192;
138 static constexpr size_t MAX_IMAGES = 48; 142 static constexpr size_t MAX_IMAGES = 48;
diff --git a/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp b/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
index ae9f1de64..7746a88d3 100644
--- a/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
@@ -19,7 +19,7 @@ VkAttachmentDescription AttachmentDescription(const Device& device, PixelFormat
19 VkSampleCountFlagBits samples) { 19 VkSampleCountFlagBits samples) {
20 using MaxwellToVK::SurfaceFormat; 20 using MaxwellToVK::SurfaceFormat;
21 return { 21 return {
22 .flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT, 22 .flags = {},
23 .format = SurfaceFormat(device, FormatType::Optimal, true, format).format, 23 .format = SurfaceFormat(device, FormatType::Optimal, true, format).format,
24 .samples = samples, 24 .samples = samples,
25 .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD, 25 .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 93773a69f..de34f6d49 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -1194,6 +1194,11 @@ void TextureCacheRuntime::ConvertImage(Framebuffer* dst, ImageView& dst_view, Im
1194 return blit_image_helper.ConvertD16ToR16(dst, src_view); 1194 return blit_image_helper.ConvertD16ToR16(dst, src_view);
1195 } 1195 }
1196 break; 1196 break;
1197 case PixelFormat::A8B8G8R8_SRGB:
1198 if (src_view.format == PixelFormat::D32_FLOAT) {
1199 return blit_image_helper.ConvertD32FToABGR8(dst, src_view);
1200 }
1201 break;
1197 case PixelFormat::A8B8G8R8_UNORM: 1202 case PixelFormat::A8B8G8R8_UNORM:
1198 if (src_view.format == PixelFormat::S8_UINT_D24_UNORM) { 1203 if (src_view.format == PixelFormat::S8_UINT_D24_UNORM) {
1199 return blit_image_helper.ConvertD24S8ToABGR8(dst, src_view); 1204 return blit_image_helper.ConvertD24S8ToABGR8(dst, src_view);
@@ -1205,6 +1210,16 @@ void TextureCacheRuntime::ConvertImage(Framebuffer* dst, ImageView& dst_view, Im
1205 return blit_image_helper.ConvertD32FToABGR8(dst, src_view); 1210 return blit_image_helper.ConvertD32FToABGR8(dst, src_view);
1206 } 1211 }
1207 break; 1212 break;
1213 case PixelFormat::B8G8R8A8_SRGB:
1214 if (src_view.format == PixelFormat::D32_FLOAT) {
1215 return blit_image_helper.ConvertD32FToABGR8(dst, src_view);
1216 }
1217 break;
1218 case PixelFormat::B8G8R8A8_UNORM:
1219 if (src_view.format == PixelFormat::D32_FLOAT) {
1220 return blit_image_helper.ConvertD32FToABGR8(dst, src_view);
1221 }
1222 break;
1208 case PixelFormat::R32_FLOAT: 1223 case PixelFormat::R32_FLOAT:
1209 if (src_view.format == PixelFormat::D32_FLOAT) { 1224 if (src_view.format == PixelFormat::D32_FLOAT) {
1210 return blit_image_helper.ConvertD32ToR32(dst, src_view); 1225 return blit_image_helper.ConvertD32ToR32(dst, src_view);
@@ -1222,6 +1237,12 @@ void TextureCacheRuntime::ConvertImage(Framebuffer* dst, ImageView& dst_view, Im
1222 } 1237 }
1223 break; 1238 break;
1224 case PixelFormat::D32_FLOAT: 1239 case PixelFormat::D32_FLOAT:
1240 if (src_view.format == PixelFormat::A8B8G8R8_UNORM ||
1241 src_view.format == PixelFormat::B8G8R8A8_UNORM ||
1242 src_view.format == PixelFormat::A8B8G8R8_SRGB ||
1243 src_view.format == PixelFormat::B8G8R8A8_SRGB) {
1244 return blit_image_helper.ConvertABGR8ToD32F(dst, src_view);
1245 }
1225 if (src_view.format == PixelFormat::R32_FLOAT) { 1246 if (src_view.format == PixelFormat::R32_FLOAT) {
1226 return blit_image_helper.ConvertR32ToD32(dst, src_view); 1247 return blit_image_helper.ConvertR32ToD32(dst, src_view);
1227 } 1248 }
@@ -2034,7 +2055,7 @@ void TextureCacheRuntime::TransitionImageLayout(Image& image) {
2034 }, 2055 },
2035 }; 2056 };
2036 scheduler.RequestOutsideRenderPassOperationContext(); 2057 scheduler.RequestOutsideRenderPassOperationContext();
2037 scheduler.Record([barrier = barrier](vk::CommandBuffer cmdbuf) { 2058 scheduler.Record([barrier](vk::CommandBuffer cmdbuf) {
2038 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 2059 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
2039 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, barrier); 2060 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, barrier);
2040 }); 2061 });
diff --git a/src/video_core/texture_cache/formatter.cpp b/src/video_core/texture_cache/formatter.cpp
index 6279d8e9e..2b7e0df72 100644
--- a/src/video_core/texture_cache/formatter.cpp
+++ b/src/video_core/texture_cache/formatter.cpp
@@ -10,19 +10,23 @@
10#include "video_core/texture_cache/image_info.h" 10#include "video_core/texture_cache/image_info.h"
11#include "video_core/texture_cache/image_view_base.h" 11#include "video_core/texture_cache/image_view_base.h"
12#include "video_core/texture_cache/render_targets.h" 12#include "video_core/texture_cache/render_targets.h"
13#include "video_core/texture_cache/samples_helper.h"
13 14
14namespace VideoCommon { 15namespace VideoCommon {
15 16
16std::string Name(const ImageBase& image) { 17std::string Name(const ImageBase& image) {
17 const GPUVAddr gpu_addr = image.gpu_addr; 18 const GPUVAddr gpu_addr = image.gpu_addr;
18 const ImageInfo& info = image.info; 19 const ImageInfo& info = image.info;
19 const u32 width = info.size.width; 20 u32 width = info.size.width;
20 const u32 height = info.size.height; 21 u32 height = info.size.height;
21 const u32 depth = info.size.depth; 22 const u32 depth = info.size.depth;
22 const u32 num_layers = image.info.resources.layers; 23 const u32 num_layers = image.info.resources.layers;
23 const u32 num_levels = image.info.resources.levels; 24 const u32 num_levels = image.info.resources.levels;
24 std::string resource; 25 std::string resource;
25 if (image.info.num_samples > 1) { 26 if (image.info.num_samples > 1) {
27 const auto [samples_x, samples_y] = VideoCommon::SamplesLog2(image.info.num_samples);
28 width >>= samples_x;
29 height >>= samples_y;
26 resource += fmt::format(":{}xMSAA", image.info.num_samples); 30 resource += fmt::format(":{}xMSAA", image.info.num_samples);
27 } 31 }
28 if (num_layers > 1) { 32 if (num_layers > 1) {
diff --git a/src/video_core/texture_cache/samples_helper.h b/src/video_core/texture_cache/samples_helper.h
index 203ac1b11..2ee2f8312 100644
--- a/src/video_core/texture_cache/samples_helper.h
+++ b/src/video_core/texture_cache/samples_helper.h
@@ -24,7 +24,7 @@ namespace VideoCommon {
24 return {2, 2}; 24 return {2, 2};
25 } 25 }
26 ASSERT_MSG(false, "Invalid number of samples={}", num_samples); 26 ASSERT_MSG(false, "Invalid number of samples={}", num_samples);
27 return {1, 1}; 27 return {0, 0};
28} 28}
29 29
30[[nodiscard]] inline int NumSamples(Tegra::Texture::MsaaMode msaa_mode) { 30[[nodiscard]] inline int NumSamples(Tegra::Texture::MsaaMode msaa_mode) {
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp
index 8151cabf0..15596c925 100644
--- a/src/video_core/texture_cache/util.cpp
+++ b/src/video_core/texture_cache/util.cpp
@@ -167,6 +167,13 @@ template <u32 GOB_EXTENT>
167} 167}
168 168
169[[nodiscard]] constexpr Extent3D TileShift(const LevelInfo& info, u32 level) { 169[[nodiscard]] constexpr Extent3D TileShift(const LevelInfo& info, u32 level) {
170 if (level == 0 && info.num_levels == 1) {
171 return Extent3D{
172 .width = info.block.width,
173 .height = info.block.height,
174 .depth = info.block.depth,
175 };
176 }
170 const Extent3D blocks = NumLevelBlocks(info, level); 177 const Extent3D blocks = NumLevelBlocks(info, level);
171 return Extent3D{ 178 return Extent3D{
172 .width = AdjustTileSize(info.block.width, GOB_SIZE_X, blocks.width), 179 .width = AdjustTileSize(info.block.width, GOB_SIZE_X, blocks.width),
@@ -1293,9 +1300,9 @@ u32 MapSizeBytes(const ImageBase& image) {
1293 1300
1294static_assert(CalculateLevelSize(LevelInfo{{1920, 1080, 1}, {0, 2, 0}, {1, 1}, 2, 0, 1}, 0) == 1301static_assert(CalculateLevelSize(LevelInfo{{1920, 1080, 1}, {0, 2, 0}, {1, 1}, 2, 0, 1}, 0) ==
1295 0x7f8000); 1302 0x7f8000);
1296static_assert(CalculateLevelSize(LevelInfo{{32, 32, 1}, {0, 0, 4}, {1, 1}, 4, 0, 1}, 0) == 0x4000); 1303static_assert(CalculateLevelSize(LevelInfo{{32, 32, 1}, {0, 0, 4}, {1, 1}, 4, 0, 1}, 0) == 0x40000);
1297 1304
1298static_assert(CalculateLevelSize(LevelInfo{{128, 8, 1}, {0, 4, 0}, {1, 1}, 4, 0, 1}, 0) == 0x4000); 1305static_assert(CalculateLevelSize(LevelInfo{{128, 8, 1}, {0, 4, 0}, {1, 1}, 4, 0, 1}, 0) == 0x40000);
1299 1306
1300static_assert(CalculateLevelOffset(PixelFormat::R8_SINT, {1920, 1080, 1}, {0, 2, 0}, 0, 7) == 1307static_assert(CalculateLevelOffset(PixelFormat::R8_SINT, {1920, 1080, 1}, {0, 2, 0}, 0, 7) ==
1301 0x2afc00); 1308 0x2afc00);
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index 876cec2e8..e518756d2 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -83,15 +83,6 @@ constexpr std::array VK_FORMAT_A4B4G4R4_UNORM_PACK16{
83 83
84} // namespace Alternatives 84} // namespace Alternatives
85 85
86enum class NvidiaArchitecture {
87 KeplerOrOlder,
88 Maxwell,
89 Pascal,
90 Volta,
91 Turing,
92 AmpereOrNewer,
93};
94
95template <typename T> 86template <typename T>
96void SetNext(void**& next, T& data) { 87void SetNext(void**& next, T& data) {
97 *next = &data; 88 *next = &data;
@@ -326,9 +317,9 @@ NvidiaArchitecture GetNvidiaArchitecture(vk::PhysicalDevice physical,
326 if (shading_rate_props.primitiveFragmentShadingRateWithMultipleViewports) { 317 if (shading_rate_props.primitiveFragmentShadingRateWithMultipleViewports) {
327 // Only Ampere and newer support this feature 318 // Only Ampere and newer support this feature
328 // TODO: Find a way to differentiate Ampere and Ada 319 // TODO: Find a way to differentiate Ampere and Ada
329 return NvidiaArchitecture::AmpereOrNewer; 320 return NvidiaArchitecture::Arch_AmpereOrNewer;
330 } 321 }
331 return NvidiaArchitecture::Turing; 322 return NvidiaArchitecture::Arch_Turing;
332 } 323 }
333 324
334 if (exts.contains(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME)) { 325 if (exts.contains(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME)) {
@@ -340,7 +331,7 @@ NvidiaArchitecture GetNvidiaArchitecture(vk::PhysicalDevice physical,
340 physical_properties.pNext = &advanced_blending_props; 331 physical_properties.pNext = &advanced_blending_props;
341 physical.GetProperties2(physical_properties); 332 physical.GetProperties2(physical_properties);
342 if (advanced_blending_props.advancedBlendMaxColorAttachments == 1) { 333 if (advanced_blending_props.advancedBlendMaxColorAttachments == 1) {
343 return NvidiaArchitecture::Maxwell; 334 return NvidiaArchitecture::Arch_Maxwell;
344 } 335 }
345 336
346 if (exts.contains(VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME)) { 337 if (exts.contains(VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME)) {
@@ -350,13 +341,13 @@ NvidiaArchitecture GetNvidiaArchitecture(vk::PhysicalDevice physical,
350 physical_properties.pNext = &conservative_raster_props; 341 physical_properties.pNext = &conservative_raster_props;
351 physical.GetProperties2(physical_properties); 342 physical.GetProperties2(physical_properties);
352 if (conservative_raster_props.degenerateLinesRasterized) { 343 if (conservative_raster_props.degenerateLinesRasterized) {
353 return NvidiaArchitecture::Volta; 344 return NvidiaArchitecture::Arch_Volta;
354 } 345 }
355 return NvidiaArchitecture::Pascal; 346 return NvidiaArchitecture::Arch_Pascal;
356 } 347 }
357 } 348 }
358 349
359 return NvidiaArchitecture::KeplerOrOlder; 350 return NvidiaArchitecture::Arch_KeplerOrOlder;
360} 351}
361 352
362std::vector<const char*> ExtensionListForVulkan( 353std::vector<const char*> ExtensionListForVulkan(
@@ -436,6 +427,10 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
436 throw vk::Exception(VK_ERROR_INCOMPATIBLE_DRIVER); 427 throw vk::Exception(VK_ERROR_INCOMPATIBLE_DRIVER);
437 } 428 }
438 429
430 if (is_nvidia) {
431 nvidia_arch = GetNvidiaArchitecture(physical, supported_extensions);
432 }
433
439 SetupFamilies(surface); 434 SetupFamilies(surface);
440 const auto queue_cis = GetDeviceQueueCreateInfos(); 435 const auto queue_cis = GetDeviceQueueCreateInfos();
441 436
@@ -532,11 +527,11 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
532 527
533 if (is_nvidia) { 528 if (is_nvidia) {
534 const u32 nv_major_version = (properties.properties.driverVersion >> 22) & 0x3ff; 529 const u32 nv_major_version = (properties.properties.driverVersion >> 22) & 0x3ff;
535 const auto arch = GetNvidiaArchitecture(physical, supported_extensions); 530 const auto arch = GetNvidiaArch();
536 if (arch >= NvidiaArchitecture::AmpereOrNewer) { 531 if (arch >= NvidiaArchitecture::Arch_AmpereOrNewer) {
537 LOG_WARNING(Render_Vulkan, "Ampere and newer have broken float16 math"); 532 LOG_WARNING(Render_Vulkan, "Ampere and newer have broken float16 math");
538 features.shader_float16_int8.shaderFloat16 = false; 533 features.shader_float16_int8.shaderFloat16 = false;
539 } else if (arch <= NvidiaArchitecture::Volta) { 534 } else if (arch <= NvidiaArchitecture::Arch_Volta) {
540 if (nv_major_version < 527) { 535 if (nv_major_version < 527) {
541 LOG_WARNING(Render_Vulkan, "Volta and older have broken VK_KHR_push_descriptor"); 536 LOG_WARNING(Render_Vulkan, "Volta and older have broken VK_KHR_push_descriptor");
542 RemoveExtension(extensions.push_descriptor, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); 537 RemoveExtension(extensions.push_descriptor, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
@@ -686,8 +681,8 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
686 RemoveExtension(extensions.push_descriptor, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); 681 RemoveExtension(extensions.push_descriptor, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
687 } 682 }
688 } else if (extensions.push_descriptor && is_nvidia) { 683 } else if (extensions.push_descriptor && is_nvidia) {
689 const auto arch = GetNvidiaArchitecture(physical, supported_extensions); 684 const auto arch = GetNvidiaArch();
690 if (arch <= NvidiaArchitecture::Pascal) { 685 if (arch <= NvidiaArchitecture::Arch_Pascal) {
691 LOG_WARNING(Render_Vulkan, 686 LOG_WARNING(Render_Vulkan,
692 "Pascal and older architectures have broken VK_KHR_push_descriptor"); 687 "Pascal and older architectures have broken VK_KHR_push_descriptor");
693 RemoveExtension(extensions.push_descriptor, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); 688 RemoveExtension(extensions.push_descriptor, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
diff --git a/src/video_core/vulkan_common/vulkan_device.h b/src/video_core/vulkan_common/vulkan_device.h
index 282a2925d..b213ed7dd 100644
--- a/src/video_core/vulkan_common/vulkan_device.h
+++ b/src/video_core/vulkan_common/vulkan_device.h
@@ -177,6 +177,15 @@ enum class FormatType { Linear, Optimal, Buffer };
177/// Subgroup size of the guest emulated hardware (Nvidia has 32 threads per subgroup). 177/// Subgroup size of the guest emulated hardware (Nvidia has 32 threads per subgroup).
178const u32 GuestWarpSize = 32; 178const u32 GuestWarpSize = 32;
179 179
180enum class NvidiaArchitecture {
181 Arch_KeplerOrOlder,
182 Arch_Maxwell,
183 Arch_Pascal,
184 Arch_Volta,
185 Arch_Turing,
186 Arch_AmpereOrNewer,
187};
188
180/// Handles data specific to a physical device. 189/// Handles data specific to a physical device.
181class Device { 190class Device {
182public: 191public:
@@ -670,6 +679,14 @@ public:
670 return false; 679 return false;
671 } 680 }
672 681
682 bool IsNvidia() const noexcept {
683 return properties.driver.driverID == VK_DRIVER_ID_NVIDIA_PROPRIETARY;
684 }
685
686 NvidiaArchitecture GetNvidiaArch() const noexcept {
687 return nvidia_arch;
688 }
689
673private: 690private:
674 /// Checks if the physical device is suitable and configures the object state 691 /// Checks if the physical device is suitable and configures the object state
675 /// with all necessary info about its properties. 692 /// with all necessary info about its properties.
@@ -788,6 +805,7 @@ private:
788 bool supports_conditional_barriers{}; ///< Allows barriers in conditional control flow. 805 bool supports_conditional_barriers{}; ///< Allows barriers in conditional control flow.
789 u64 device_access_memory{}; ///< Total size of device local memory in bytes. 806 u64 device_access_memory{}; ///< Total size of device local memory in bytes.
790 u32 sets_per_pool{}; ///< Sets per Description Pool 807 u32 sets_per_pool{}; ///< Sets per Description Pool
808 NvidiaArchitecture nvidia_arch{NvidiaArchitecture::Arch_AmpereOrNewer};
791 809
792 // Telemetry parameters 810 // Telemetry parameters
793 std::set<std::string, std::less<>> supported_extensions; ///< Reported Vulkan extensions. 811 std::set<std::string, std::less<>> supported_extensions; ///< Reported Vulkan extensions.
diff --git a/src/yuzu/configuration/configure_vibration.cpp b/src/yuzu/configuration/configure_vibration.cpp
index d765e808a..68c28b320 100644
--- a/src/yuzu/configuration/configure_vibration.cpp
+++ b/src/yuzu/configuration/configure_vibration.cpp
@@ -89,7 +89,7 @@ void ConfigureVibration::VibrateController(Core::HID::ControllerTriggerType type
89 89
90 auto& player = Settings::values.players.GetValue()[player_index]; 90 auto& player = Settings::values.players.GetValue()[player_index];
91 auto controller = hid_core.GetEmulatedControllerByIndex(player_index); 91 auto controller = hid_core.GetEmulatedControllerByIndex(player_index);
92 const int vibration_strenght = vibration_spinboxes[player_index]->value(); 92 const int vibration_strength = vibration_spinboxes[player_index]->value();
93 const auto& buttons = controller->GetButtonsValues(); 93 const auto& buttons = controller->GetButtonsValues();
94 94
95 bool button_is_pressed = false; 95 bool button_is_pressed = false;
@@ -105,10 +105,10 @@ void ConfigureVibration::VibrateController(Core::HID::ControllerTriggerType type
105 return; 105 return;
106 } 106 }
107 107
108 const int old_vibration_enabled = player.vibration_enabled; 108 const bool old_vibration_enabled = player.vibration_enabled;
109 const bool old_vibration_strenght = player.vibration_strength; 109 const int old_vibration_strength = player.vibration_strength;
110 player.vibration_enabled = true; 110 player.vibration_enabled = true;
111 player.vibration_strength = vibration_strenght; 111 player.vibration_strength = vibration_strength;
112 112
113 const Core::HID::VibrationValue vibration{ 113 const Core::HID::VibrationValue vibration{
114 .low_amplitude = 1.0f, 114 .low_amplitude = 1.0f,
@@ -121,7 +121,7 @@ void ConfigureVibration::VibrateController(Core::HID::ControllerTriggerType type
121 121
122 // Restore previous values 122 // Restore previous values
123 player.vibration_enabled = old_vibration_enabled; 123 player.vibration_enabled = old_vibration_enabled;
124 player.vibration_strength = old_vibration_strenght; 124 player.vibration_strength = old_vibration_strength;
125} 125}
126 126
127void ConfigureVibration::StopVibrations() { 127void ConfigureVibration::StopVibrations() {