diff options
42 files changed, 896 insertions, 938 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index 7a49318aa..3282ae9d4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt | |||
| @@ -159,15 +159,15 @@ macro(yuzu_find_packages) | |||
| 159 | # Capitalization matters here. We need the naming to match the generated paths from Conan | 159 | # Capitalization matters here. We need the naming to match the generated paths from Conan |
| 160 | set(REQUIRED_LIBS | 160 | set(REQUIRED_LIBS |
| 161 | # Cmake Pkg Prefix Version Conan Pkg | 161 | # Cmake Pkg Prefix Version Conan Pkg |
| 162 | "Boost 1.71 boost/1.72.0" | 162 | "Boost 1.73 boost/1.73.0" |
| 163 | "Catch2 2.11 catch2/2.11.0" | 163 | "Catch2 2.13 catch2/2.13.0" |
| 164 | "fmt 7.0 fmt/7.0.1" | 164 | "fmt 7.0 fmt/7.0.1" |
| 165 | # can't use until https://github.com/bincrafters/community/issues/1173 | 165 | # can't use until https://github.com/bincrafters/community/issues/1173 |
| 166 | #"libzip 1.5 libzip/1.5.2@bincrafters/stable" | 166 | #"libzip 1.5 libzip/1.5.2@bincrafters/stable" |
| 167 | "lz4 1.8 lz4/1.9.2" | 167 | "lz4 1.8 lz4/1.9.2" |
| 168 | "nlohmann_json 3.7 nlohmann_json/3.7.3" | 168 | "nlohmann_json 3.8 nlohmann_json/3.8.0" |
| 169 | "ZLIB 1.2 zlib/1.2.11" | 169 | "ZLIB 1.2 zlib/1.2.11" |
| 170 | "zstd 1.4 zstd/1.4.4" | 170 | "zstd 1.4 zstd/1.4.5" |
| 171 | ) | 171 | ) |
| 172 | 172 | ||
| 173 | foreach(PACKAGE ${REQUIRED_LIBS}) | 173 | foreach(PACKAGE ${REQUIRED_LIBS}) |
diff --git a/src/core/file_sys/mode.h b/src/core/file_sys/mode.h index c95205668..2b4f21073 100644 --- a/src/core/file_sys/mode.h +++ b/src/core/file_sys/mode.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "common/common_funcs.h" | ||
| 7 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 8 | 9 | ||
| 9 | namespace FileSys { | 10 | namespace FileSys { |
| @@ -11,13 +12,11 @@ namespace FileSys { | |||
| 11 | enum class Mode : u32 { | 12 | enum class Mode : u32 { |
| 12 | Read = 1, | 13 | Read = 1, |
| 13 | Write = 2, | 14 | Write = 2, |
| 14 | ReadWrite = 3, | 15 | ReadWrite = Read | Write, |
| 15 | Append = 4, | 16 | Append = 4, |
| 16 | WriteAppend = 6, | 17 | WriteAppend = Write | Append, |
| 17 | }; | 18 | }; |
| 18 | 19 | ||
| 19 | inline u32 operator&(Mode lhs, Mode rhs) { | 20 | DECLARE_ENUM_FLAG_OPERATORS(Mode) |
| 20 | return static_cast<u32>(lhs) & static_cast<u32>(rhs); | ||
| 21 | } | ||
| 22 | 21 | ||
| 23 | } // namespace FileSys | 22 | } // namespace FileSys |
diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp index c47ff863e..729dbb5f4 100644 --- a/src/core/file_sys/patch_manager.cpp +++ b/src/core/file_sys/patch_manager.cpp | |||
| @@ -288,8 +288,8 @@ std::optional<std::vector<Core::Memory::CheatEntry>> ReadCheatFileFromFolder( | |||
| 288 | } | 288 | } |
| 289 | 289 | ||
| 290 | Core::Memory::TextCheatParser parser; | 290 | Core::Memory::TextCheatParser parser; |
| 291 | return parser.Parse( | 291 | return parser.Parse(system, |
| 292 | system, std::string_view(reinterpret_cast<const char* const>(data.data()), data.size())); | 292 | std::string_view(reinterpret_cast<const char*>(data.data()), data.size())); |
| 293 | } | 293 | } |
| 294 | 294 | ||
| 295 | } // Anonymous namespace | 295 | } // Anonymous namespace |
diff --git a/src/core/file_sys/registered_cache.cpp b/src/core/file_sys/registered_cache.cpp index e94eed3b6..f831487dd 100644 --- a/src/core/file_sys/registered_cache.cpp +++ b/src/core/file_sys/registered_cache.cpp | |||
| @@ -344,15 +344,18 @@ VirtualFile RegisteredCache::GetFileAtID(NcaID id) const { | |||
| 344 | 344 | ||
| 345 | static std::optional<NcaID> CheckMapForContentRecord(const std::map<u64, CNMT>& map, u64 title_id, | 345 | static std::optional<NcaID> CheckMapForContentRecord(const std::map<u64, CNMT>& map, u64 title_id, |
| 346 | ContentRecordType type) { | 346 | ContentRecordType type) { |
| 347 | if (map.find(title_id) == map.end()) | 347 | const auto cmnt_iter = map.find(title_id); |
| 348 | return {}; | 348 | if (cmnt_iter == map.cend()) { |
| 349 | 349 | return std::nullopt; | |
| 350 | const auto& cnmt = map.at(title_id); | 350 | } |
| 351 | 351 | ||
| 352 | const auto iter = std::find_if(cnmt.GetContentRecords().begin(), cnmt.GetContentRecords().end(), | 352 | const auto& cnmt = cmnt_iter->second; |
| 353 | const auto& content_records = cnmt.GetContentRecords(); | ||
| 354 | const auto iter = std::find_if(content_records.cbegin(), content_records.cend(), | ||
| 353 | [type](const ContentRecord& rec) { return rec.type == type; }); | 355 | [type](const ContentRecord& rec) { return rec.type == type; }); |
| 354 | if (iter == cnmt.GetContentRecords().end()) | 356 | if (iter == content_records.cend()) { |
| 355 | return {}; | 357 | return std::nullopt; |
| 358 | } | ||
| 356 | 359 | ||
| 357 | return std::make_optional(iter->nca_id); | 360 | return std::make_optional(iter->nca_id); |
| 358 | } | 361 | } |
| @@ -467,14 +470,16 @@ VirtualFile RegisteredCache::GetEntryUnparsed(u64 title_id, ContentRecordType ty | |||
| 467 | 470 | ||
| 468 | std::optional<u32> RegisteredCache::GetEntryVersion(u64 title_id) const { | 471 | std::optional<u32> RegisteredCache::GetEntryVersion(u64 title_id) const { |
| 469 | const auto meta_iter = meta.find(title_id); | 472 | const auto meta_iter = meta.find(title_id); |
| 470 | if (meta_iter != meta.end()) | 473 | if (meta_iter != meta.cend()) { |
| 471 | return meta_iter->second.GetTitleVersion(); | 474 | return meta_iter->second.GetTitleVersion(); |
| 475 | } | ||
| 472 | 476 | ||
| 473 | const auto yuzu_meta_iter = yuzu_meta.find(title_id); | 477 | const auto yuzu_meta_iter = yuzu_meta.find(title_id); |
| 474 | if (yuzu_meta_iter != yuzu_meta.end()) | 478 | if (yuzu_meta_iter != yuzu_meta.cend()) { |
| 475 | return yuzu_meta_iter->second.GetTitleVersion(); | 479 | return yuzu_meta_iter->second.GetTitleVersion(); |
| 480 | } | ||
| 476 | 481 | ||
| 477 | return {}; | 482 | return std::nullopt; |
| 478 | } | 483 | } |
| 479 | 484 | ||
| 480 | VirtualFile RegisteredCache::GetEntryRaw(u64 title_id, ContentRecordType type) const { | 485 | VirtualFile RegisteredCache::GetEntryRaw(u64 title_id, ContentRecordType type) const { |
diff --git a/src/core/file_sys/savedata_factory.cpp b/src/core/file_sys/savedata_factory.cpp index adfd2c1a4..ba4efee3a 100644 --- a/src/core/file_sys/savedata_factory.cpp +++ b/src/core/file_sys/savedata_factory.cpp | |||
| @@ -17,23 +17,23 @@ constexpr char SAVE_DATA_SIZE_FILENAME[] = ".yuzu_save_size"; | |||
| 17 | 17 | ||
| 18 | namespace { | 18 | namespace { |
| 19 | 19 | ||
| 20 | void PrintSaveDataDescriptorWarnings(SaveDataDescriptor meta) { | 20 | void PrintSaveDataAttributeWarnings(SaveDataAttribute meta) { |
| 21 | if (meta.type == SaveDataType::SystemSaveData || meta.type == SaveDataType::SaveData) { | 21 | if (meta.type == SaveDataType::SystemSaveData || meta.type == SaveDataType::SaveData) { |
| 22 | if (meta.zero_1 != 0) { | 22 | if (meta.zero_1 != 0) { |
| 23 | LOG_WARNING(Service_FS, | 23 | LOG_WARNING(Service_FS, |
| 24 | "Possibly incorrect SaveDataDescriptor, type is " | 24 | "Possibly incorrect SaveDataAttribute, type is " |
| 25 | "SystemSaveData||SaveData but offset 0x28 is non-zero ({:016X}).", | 25 | "SystemSaveData||SaveData but offset 0x28 is non-zero ({:016X}).", |
| 26 | meta.zero_1); | 26 | meta.zero_1); |
| 27 | } | 27 | } |
| 28 | if (meta.zero_2 != 0) { | 28 | if (meta.zero_2 != 0) { |
| 29 | LOG_WARNING(Service_FS, | 29 | LOG_WARNING(Service_FS, |
| 30 | "Possibly incorrect SaveDataDescriptor, type is " | 30 | "Possibly incorrect SaveDataAttribute, type is " |
| 31 | "SystemSaveData||SaveData but offset 0x30 is non-zero ({:016X}).", | 31 | "SystemSaveData||SaveData but offset 0x30 is non-zero ({:016X}).", |
| 32 | meta.zero_2); | 32 | meta.zero_2); |
| 33 | } | 33 | } |
| 34 | if (meta.zero_3 != 0) { | 34 | if (meta.zero_3 != 0) { |
| 35 | LOG_WARNING(Service_FS, | 35 | LOG_WARNING(Service_FS, |
| 36 | "Possibly incorrect SaveDataDescriptor, type is " | 36 | "Possibly incorrect SaveDataAttribute, type is " |
| 37 | "SystemSaveData||SaveData but offset 0x38 is non-zero ({:016X}).", | 37 | "SystemSaveData||SaveData but offset 0x38 is non-zero ({:016X}).", |
| 38 | meta.zero_3); | 38 | meta.zero_3); |
| 39 | } | 39 | } |
| @@ -41,33 +41,32 @@ void PrintSaveDataDescriptorWarnings(SaveDataDescriptor meta) { | |||
| 41 | 41 | ||
| 42 | if (meta.type == SaveDataType::SystemSaveData && meta.title_id != 0) { | 42 | if (meta.type == SaveDataType::SystemSaveData && meta.title_id != 0) { |
| 43 | LOG_WARNING(Service_FS, | 43 | LOG_WARNING(Service_FS, |
| 44 | "Possibly incorrect SaveDataDescriptor, type is SystemSaveData but title_id is " | 44 | "Possibly incorrect SaveDataAttribute, type is SystemSaveData but title_id is " |
| 45 | "non-zero ({:016X}).", | 45 | "non-zero ({:016X}).", |
| 46 | meta.title_id); | 46 | meta.title_id); |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | if (meta.type == SaveDataType::DeviceSaveData && meta.user_id != u128{0, 0}) { | 49 | if (meta.type == SaveDataType::DeviceSaveData && meta.user_id != u128{0, 0}) { |
| 50 | LOG_WARNING(Service_FS, | 50 | LOG_WARNING(Service_FS, |
| 51 | "Possibly incorrect SaveDataDescriptor, type is DeviceSaveData but user_id is " | 51 | "Possibly incorrect SaveDataAttribute, type is DeviceSaveData but user_id is " |
| 52 | "non-zero ({:016X}{:016X})", | 52 | "non-zero ({:016X}{:016X})", |
| 53 | meta.user_id[1], meta.user_id[0]); | 53 | meta.user_id[1], meta.user_id[0]); |
| 54 | } | 54 | } |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataDescriptor& desc) { | 57 | bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataAttribute& attr) { |
| 58 | return desc.type == SaveDataType::CacheStorage || desc.type == SaveDataType::TemporaryStorage || | 58 | return attr.type == SaveDataType::CacheStorage || attr.type == SaveDataType::TemporaryStorage || |
| 59 | (space == SaveDataSpaceId::NandUser && ///< Normal Save Data -- Current Title & User | 59 | (space == SaveDataSpaceId::NandUser && ///< Normal Save Data -- Current Title & User |
| 60 | (desc.type == SaveDataType::SaveData || desc.type == SaveDataType::DeviceSaveData) && | 60 | (attr.type == SaveDataType::SaveData || attr.type == SaveDataType::DeviceSaveData) && |
| 61 | desc.title_id == 0 && desc.save_id == 0); | 61 | attr.title_id == 0 && attr.save_id == 0); |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | } // Anonymous namespace | 64 | } // Anonymous namespace |
| 65 | 65 | ||
| 66 | std::string SaveDataDescriptor::DebugInfo() const { | 66 | std::string SaveDataAttribute::DebugInfo() const { |
| 67 | return fmt::format("[type={:02X}, title_id={:016X}, user_id={:016X}{:016X}, " | 67 | return fmt::format("[title_id={:016X}, user_id={:016X}{:016X}, save_id={:016X}, type={:02X}, " |
| 68 | "save_id={:016X}, " | ||
| 69 | "rank={}, index={}]", | 68 | "rank={}, index={}]", |
| 70 | static_cast<u8>(type), title_id, user_id[1], user_id[0], save_id, | 69 | title_id, user_id[1], user_id[0], save_id, static_cast<u8>(type), |
| 71 | static_cast<u8>(rank), index); | 70 | static_cast<u8>(rank), index); |
| 72 | } | 71 | } |
| 73 | 72 | ||
| @@ -80,8 +79,8 @@ SaveDataFactory::SaveDataFactory(VirtualDir save_directory) : dir(std::move(save | |||
| 80 | SaveDataFactory::~SaveDataFactory() = default; | 79 | SaveDataFactory::~SaveDataFactory() = default; |
| 81 | 80 | ||
| 82 | ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space, | 81 | ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space, |
| 83 | const SaveDataDescriptor& meta) const { | 82 | const SaveDataAttribute& meta) const { |
| 84 | PrintSaveDataDescriptorWarnings(meta); | 83 | PrintSaveDataAttributeWarnings(meta); |
| 85 | 84 | ||
| 86 | const auto save_directory = | 85 | const auto save_directory = |
| 87 | GetFullPath(space, meta.type, meta.title_id, meta.user_id, meta.save_id); | 86 | GetFullPath(space, meta.type, meta.title_id, meta.user_id, meta.save_id); |
| @@ -98,7 +97,7 @@ ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space, | |||
| 98 | } | 97 | } |
| 99 | 98 | ||
| 100 | ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space, | 99 | ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space, |
| 101 | const SaveDataDescriptor& meta) const { | 100 | const SaveDataAttribute& meta) const { |
| 102 | 101 | ||
| 103 | const auto save_directory = | 102 | const auto save_directory = |
| 104 | GetFullPath(space, meta.type, meta.title_id, meta.user_id, meta.save_id); | 103 | GetFullPath(space, meta.type, meta.title_id, meta.user_id, meta.save_id); |
diff --git a/src/core/file_sys/savedata_factory.h b/src/core/file_sys/savedata_factory.h index 991e57aa1..6625bbbd8 100644 --- a/src/core/file_sys/savedata_factory.h +++ b/src/core/file_sys/savedata_factory.h | |||
| @@ -21,6 +21,7 @@ enum class SaveDataSpaceId : u8 { | |||
| 21 | TemporaryStorage = 3, | 21 | TemporaryStorage = 3, |
| 22 | SdCardUser = 4, | 22 | SdCardUser = 4, |
| 23 | ProperSystem = 100, | 23 | ProperSystem = 100, |
| 24 | SafeMode = 101, | ||
| 24 | }; | 25 | }; |
| 25 | 26 | ||
| 26 | enum class SaveDataType : u8 { | 27 | enum class SaveDataType : u8 { |
| @@ -30,28 +31,50 @@ enum class SaveDataType : u8 { | |||
| 30 | DeviceSaveData = 3, | 31 | DeviceSaveData = 3, |
| 31 | TemporaryStorage = 4, | 32 | TemporaryStorage = 4, |
| 32 | CacheStorage = 5, | 33 | CacheStorage = 5, |
| 34 | SystemBcat = 6, | ||
| 33 | }; | 35 | }; |
| 34 | 36 | ||
| 35 | enum class SaveDataRank : u8 { | 37 | enum class SaveDataRank : u8 { |
| 36 | Primary, | 38 | Primary = 0, |
| 37 | Secondary, | 39 | Secondary = 1, |
| 38 | }; | 40 | }; |
| 39 | 41 | ||
| 40 | struct SaveDataDescriptor { | 42 | enum class SaveDataFlags : u32 { |
| 41 | u64_le title_id; | 43 | None = (0 << 0), |
| 44 | KeepAfterResettingSystemSaveData = (1 << 0), | ||
| 45 | KeepAfterRefurbishment = (1 << 1), | ||
| 46 | KeepAfterResettingSystemSaveDataWithoutUserSaveData = (1 << 2), | ||
| 47 | NeedsSecureDelete = (1 << 3), | ||
| 48 | }; | ||
| 49 | |||
| 50 | struct SaveDataAttribute { | ||
| 51 | u64 title_id; | ||
| 42 | u128 user_id; | 52 | u128 user_id; |
| 43 | u64_le save_id; | 53 | u64 save_id; |
| 44 | SaveDataType type; | 54 | SaveDataType type; |
| 45 | SaveDataRank rank; | 55 | SaveDataRank rank; |
| 46 | u16_le index; | 56 | u16 index; |
| 47 | INSERT_PADDING_BYTES(4); | 57 | INSERT_PADDING_BYTES(4); |
| 48 | u64_le zero_1; | 58 | u64 zero_1; |
| 49 | u64_le zero_2; | 59 | u64 zero_2; |
| 50 | u64_le zero_3; | 60 | u64 zero_3; |
| 51 | 61 | ||
| 52 | std::string DebugInfo() const; | 62 | std::string DebugInfo() const; |
| 53 | }; | 63 | }; |
| 54 | static_assert(sizeof(SaveDataDescriptor) == 0x40, "SaveDataDescriptor has incorrect size."); | 64 | static_assert(sizeof(SaveDataAttribute) == 0x40, "SaveDataAttribute has incorrect size."); |
| 65 | |||
| 66 | struct SaveDataExtraData { | ||
| 67 | SaveDataAttribute attr; | ||
| 68 | u64 owner_id; | ||
| 69 | s64 timestamp; | ||
| 70 | SaveDataFlags flags; | ||
| 71 | INSERT_PADDING_BYTES(4); | ||
| 72 | s64 available_size; | ||
| 73 | s64 journal_size; | ||
| 74 | s64 commit_id; | ||
| 75 | std::array<u8, 0x190> unused; | ||
| 76 | }; | ||
| 77 | static_assert(sizeof(SaveDataExtraData) == 0x200, "SaveDataExtraData has incorrect size."); | ||
| 55 | 78 | ||
| 56 | struct SaveDataSize { | 79 | struct SaveDataSize { |
| 57 | u64 normal; | 80 | u64 normal; |
| @@ -64,8 +87,8 @@ public: | |||
| 64 | explicit SaveDataFactory(VirtualDir dir); | 87 | explicit SaveDataFactory(VirtualDir dir); |
| 65 | ~SaveDataFactory(); | 88 | ~SaveDataFactory(); |
| 66 | 89 | ||
| 67 | ResultVal<VirtualDir> Create(SaveDataSpaceId space, const SaveDataDescriptor& meta) const; | 90 | ResultVal<VirtualDir> Create(SaveDataSpaceId space, const SaveDataAttribute& meta) const; |
| 68 | ResultVal<VirtualDir> Open(SaveDataSpaceId space, const SaveDataDescriptor& meta) const; | 91 | ResultVal<VirtualDir> Open(SaveDataSpaceId space, const SaveDataAttribute& meta) const; |
| 69 | 92 | ||
| 70 | VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const; | 93 | VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const; |
| 71 | 94 | ||
diff --git a/src/core/file_sys/vfs_real.cpp b/src/core/file_sys/vfs_real.cpp index 96ce5957c..0db0091f6 100644 --- a/src/core/file_sys/vfs_real.cpp +++ b/src/core/file_sys/vfs_real.cpp | |||
| @@ -18,20 +18,22 @@ static std::string ModeFlagsToString(Mode mode) { | |||
| 18 | std::string mode_str; | 18 | std::string mode_str; |
| 19 | 19 | ||
| 20 | // Calculate the correct open mode for the file. | 20 | // Calculate the correct open mode for the file. |
| 21 | if (mode & Mode::Read && mode & Mode::Write) { | 21 | if (True(mode & Mode::Read) && True(mode & Mode::Write)) { |
| 22 | if (mode & Mode::Append) | 22 | if (True(mode & Mode::Append)) { |
| 23 | mode_str = "a+"; | 23 | mode_str = "a+"; |
| 24 | else | 24 | } else { |
| 25 | mode_str = "r+"; | 25 | mode_str = "r+"; |
| 26 | } | ||
| 26 | } else { | 27 | } else { |
| 27 | if (mode & Mode::Read) | 28 | if (True(mode & Mode::Read)) { |
| 28 | mode_str = "r"; | 29 | mode_str = "r"; |
| 29 | else if (mode & Mode::Append) | 30 | } else if (True(mode & Mode::Append)) { |
| 30 | mode_str = "a"; | 31 | mode_str = "a"; |
| 31 | else if (mode & Mode::Write) | 32 | } else if (True(mode & Mode::Write)) { |
| 32 | mode_str = "w"; | 33 | mode_str = "w"; |
| 33 | else | 34 | } else { |
| 34 | UNREACHABLE_MSG("Invalid file open mode: {:02X}", static_cast<u8>(mode)); | 35 | UNREACHABLE_MSG("Invalid file open mode: {:02X}", static_cast<u8>(mode)); |
| 36 | } | ||
| 35 | } | 37 | } |
| 36 | 38 | ||
| 37 | mode_str += "b"; | 39 | mode_str += "b"; |
| @@ -73,8 +75,9 @@ VirtualFile RealVfsFilesystem::OpenFile(std::string_view path_, Mode perms) { | |||
| 73 | } | 75 | } |
| 74 | } | 76 | } |
| 75 | 77 | ||
| 76 | if (!FileUtil::Exists(path) && (perms & Mode::WriteAppend) != 0) | 78 | if (!FileUtil::Exists(path) && True(perms & Mode::WriteAppend)) { |
| 77 | FileUtil::CreateEmptyFile(path); | 79 | FileUtil::CreateEmptyFile(path); |
| 80 | } | ||
| 78 | 81 | ||
| 79 | auto backing = std::make_shared<FileUtil::IOFile>(path, ModeFlagsToString(perms).c_str()); | 82 | auto backing = std::make_shared<FileUtil::IOFile>(path, ModeFlagsToString(perms).c_str()); |
| 80 | cache[path] = backing; | 83 | cache[path] = backing; |
| @@ -247,11 +250,11 @@ std::shared_ptr<VfsDirectory> RealVfsFile::GetContainingDirectory() const { | |||
| 247 | } | 250 | } |
| 248 | 251 | ||
| 249 | bool RealVfsFile::IsWritable() const { | 252 | bool RealVfsFile::IsWritable() const { |
| 250 | return (perms & Mode::WriteAppend) != 0; | 253 | return True(perms & Mode::WriteAppend); |
| 251 | } | 254 | } |
| 252 | 255 | ||
| 253 | bool RealVfsFile::IsReadable() const { | 256 | bool RealVfsFile::IsReadable() const { |
| 254 | return (perms & Mode::ReadWrite) != 0; | 257 | return True(perms & Mode::ReadWrite); |
| 255 | } | 258 | } |
| 256 | 259 | ||
| 257 | std::size_t RealVfsFile::Read(u8* data, std::size_t length, std::size_t offset) const { | 260 | std::size_t RealVfsFile::Read(u8* data, std::size_t length, std::size_t offset) const { |
| @@ -319,8 +322,9 @@ RealVfsDirectory::RealVfsDirectory(RealVfsFilesystem& base_, const std::string& | |||
| 319 | path_components(FileUtil::SplitPathComponents(path)), | 322 | path_components(FileUtil::SplitPathComponents(path)), |
| 320 | parent_components(FileUtil::SliceVector(path_components, 0, path_components.size() - 1)), | 323 | parent_components(FileUtil::SliceVector(path_components, 0, path_components.size() - 1)), |
| 321 | perms(perms_) { | 324 | perms(perms_) { |
| 322 | if (!FileUtil::Exists(path) && perms & Mode::WriteAppend) | 325 | if (!FileUtil::Exists(path) && True(perms & Mode::WriteAppend)) { |
| 323 | FileUtil::CreateDir(path); | 326 | FileUtil::CreateDir(path); |
| 327 | } | ||
| 324 | } | 328 | } |
| 325 | 329 | ||
| 326 | RealVfsDirectory::~RealVfsDirectory() = default; | 330 | RealVfsDirectory::~RealVfsDirectory() = default; |
| @@ -371,11 +375,11 @@ std::vector<std::shared_ptr<VfsDirectory>> RealVfsDirectory::GetSubdirectories() | |||
| 371 | } | 375 | } |
| 372 | 376 | ||
| 373 | bool RealVfsDirectory::IsWritable() const { | 377 | bool RealVfsDirectory::IsWritable() const { |
| 374 | return (perms & Mode::WriteAppend) != 0; | 378 | return True(perms & Mode::WriteAppend); |
| 375 | } | 379 | } |
| 376 | 380 | ||
| 377 | bool RealVfsDirectory::IsReadable() const { | 381 | bool RealVfsDirectory::IsReadable() const { |
| 378 | return (perms & Mode::ReadWrite) != 0; | 382 | return True(perms & Mode::ReadWrite); |
| 379 | } | 383 | } |
| 380 | 384 | ||
| 381 | std::string RealVfsDirectory::GetName() const { | 385 | std::string RealVfsDirectory::GetName() const { |
diff --git a/src/core/hle/service/acc/profile_manager.cpp b/src/core/hle/service/acc/profile_manager.cpp index eb8c81645..a98d57b5c 100644 --- a/src/core/hle/service/acc/profile_manager.cpp +++ b/src/core/hle/service/acc/profile_manager.cpp | |||
| @@ -58,7 +58,7 @@ ProfileManager::~ProfileManager() { | |||
| 58 | /// internal management of the users profiles | 58 | /// internal management of the users profiles |
| 59 | std::optional<std::size_t> ProfileManager::AddToProfiles(const ProfileInfo& profile) { | 59 | std::optional<std::size_t> ProfileManager::AddToProfiles(const ProfileInfo& profile) { |
| 60 | if (user_count >= MAX_USERS) { | 60 | if (user_count >= MAX_USERS) { |
| 61 | return {}; | 61 | return std::nullopt; |
| 62 | } | 62 | } |
| 63 | profiles[user_count] = profile; | 63 | profiles[user_count] = profile; |
| 64 | return user_count++; | 64 | return user_count++; |
| @@ -101,13 +101,14 @@ ResultCode ProfileManager::CreateNewUser(UUID uuid, const ProfileUsername& usern | |||
| 101 | [&uuid](const ProfileInfo& profile) { return uuid == profile.user_uuid; })) { | 101 | [&uuid](const ProfileInfo& profile) { return uuid == profile.user_uuid; })) { |
| 102 | return ERROR_USER_ALREADY_EXISTS; | 102 | return ERROR_USER_ALREADY_EXISTS; |
| 103 | } | 103 | } |
| 104 | ProfileInfo profile; | 104 | |
| 105 | profile.user_uuid = uuid; | 105 | return AddUser({ |
| 106 | profile.username = username; | 106 | .user_uuid = uuid, |
| 107 | profile.data = {}; | 107 | .username = username, |
| 108 | profile.creation_time = 0x0; | 108 | .creation_time = 0, |
| 109 | profile.is_open = false; | 109 | .data = {}, |
| 110 | return AddUser(profile); | 110 | .is_open = false, |
| 111 | }); | ||
| 111 | } | 112 | } |
| 112 | 113 | ||
| 113 | /// Creates a new user on the system. This function allows a much simpler method of registration | 114 | /// Creates a new user on the system. This function allows a much simpler method of registration |
| @@ -126,7 +127,7 @@ ResultCode ProfileManager::CreateNewUser(UUID uuid, const std::string& username) | |||
| 126 | 127 | ||
| 127 | std::optional<UUID> ProfileManager::GetUser(std::size_t index) const { | 128 | std::optional<UUID> ProfileManager::GetUser(std::size_t index) const { |
| 128 | if (index >= MAX_USERS) { | 129 | if (index >= MAX_USERS) { |
| 129 | return {}; | 130 | return std::nullopt; |
| 130 | } | 131 | } |
| 131 | 132 | ||
| 132 | return profiles[index].user_uuid; | 133 | return profiles[index].user_uuid; |
| @@ -135,13 +136,13 @@ std::optional<UUID> ProfileManager::GetUser(std::size_t index) const { | |||
| 135 | /// Returns a users profile index based on their user id. | 136 | /// Returns a users profile index based on their user id. |
| 136 | std::optional<std::size_t> ProfileManager::GetUserIndex(const UUID& uuid) const { | 137 | std::optional<std::size_t> ProfileManager::GetUserIndex(const UUID& uuid) const { |
| 137 | if (!uuid) { | 138 | if (!uuid) { |
| 138 | return {}; | 139 | return std::nullopt; |
| 139 | } | 140 | } |
| 140 | 141 | ||
| 141 | const auto iter = std::find_if(profiles.begin(), profiles.end(), | 142 | const auto iter = std::find_if(profiles.begin(), profiles.end(), |
| 142 | [&uuid](const ProfileInfo& p) { return p.user_uuid == uuid; }); | 143 | [&uuid](const ProfileInfo& p) { return p.user_uuid == uuid; }); |
| 143 | if (iter == profiles.end()) { | 144 | if (iter == profiles.end()) { |
| 144 | return {}; | 145 | return std::nullopt; |
| 145 | } | 146 | } |
| 146 | 147 | ||
| 147 | return static_cast<std::size_t>(std::distance(profiles.begin(), iter)); | 148 | return static_cast<std::size_t>(std::distance(profiles.begin(), iter)); |
| @@ -339,7 +340,13 @@ void ProfileManager::ParseUserSaveFile() { | |||
| 339 | continue; | 340 | continue; |
| 340 | } | 341 | } |
| 341 | 342 | ||
| 342 | AddUser({user.uuid, user.username, user.timestamp, user.extra_data, false}); | 343 | AddUser({ |
| 344 | .user_uuid = user.uuid, | ||
| 345 | .username = user.username, | ||
| 346 | .creation_time = user.timestamp, | ||
| 347 | .data = user.extra_data, | ||
| 348 | .is_open = false, | ||
| 349 | }); | ||
| 343 | } | 350 | } |
| 344 | 351 | ||
| 345 | std::stable_partition(profiles.begin(), profiles.end(), | 352 | std::stable_partition(profiles.begin(), profiles.end(), |
| @@ -350,11 +357,13 @@ void ProfileManager::WriteUserSaveFile() { | |||
| 350 | ProfileDataRaw raw{}; | 357 | ProfileDataRaw raw{}; |
| 351 | 358 | ||
| 352 | for (std::size_t i = 0; i < MAX_USERS; ++i) { | 359 | for (std::size_t i = 0; i < MAX_USERS; ++i) { |
| 353 | raw.users[i].username = profiles[i].username; | 360 | raw.users[i] = { |
| 354 | raw.users[i].uuid2 = profiles[i].user_uuid; | 361 | .uuid = profiles[i].user_uuid, |
| 355 | raw.users[i].uuid = profiles[i].user_uuid; | 362 | .uuid2 = profiles[i].user_uuid, |
| 356 | raw.users[i].timestamp = profiles[i].creation_time; | 363 | .timestamp = profiles[i].creation_time, |
| 357 | raw.users[i].extra_data = profiles[i].data; | 364 | .username = profiles[i].username, |
| 365 | .extra_data = profiles[i].data, | ||
| 366 | }; | ||
| 358 | } | 367 | } |
| 359 | 368 | ||
| 360 | const auto raw_path = | 369 | const auto raw_path = |
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index ceed20609..55a1edf1a 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp | |||
| @@ -1342,12 +1342,12 @@ void IApplicationFunctions::EnsureSaveData(Kernel::HLERequestContext& ctx) { | |||
| 1342 | 1342 | ||
| 1343 | LOG_DEBUG(Service_AM, "called, uid={:016X}{:016X}", user_id[1], user_id[0]); | 1343 | LOG_DEBUG(Service_AM, "called, uid={:016X}{:016X}", user_id[1], user_id[0]); |
| 1344 | 1344 | ||
| 1345 | FileSys::SaveDataDescriptor descriptor{}; | 1345 | FileSys::SaveDataAttribute attribute{}; |
| 1346 | descriptor.title_id = system.CurrentProcess()->GetTitleID(); | 1346 | attribute.title_id = system.CurrentProcess()->GetTitleID(); |
| 1347 | descriptor.user_id = user_id; | 1347 | attribute.user_id = user_id; |
| 1348 | descriptor.type = FileSys::SaveDataType::SaveData; | 1348 | attribute.type = FileSys::SaveDataType::SaveData; |
| 1349 | const auto res = system.GetFileSystemController().CreateSaveData( | 1349 | const auto res = system.GetFileSystemController().CreateSaveData( |
| 1350 | FileSys::SaveDataSpaceId::NandUser, descriptor); | 1350 | FileSys::SaveDataSpaceId::NandUser, attribute); |
| 1351 | 1351 | ||
| 1352 | IPC::ResponseBuilder rb{ctx, 4}; | 1352 | IPC::ResponseBuilder rb{ctx, 4}; |
| 1353 | rb.Push(res.Code()); | 1353 | rb.Push(res.Code()); |
diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp index 106e89743..dd80dd1dc 100644 --- a/src/core/hle/service/audio/audout_u.cpp +++ b/src/core/hle/service/audio/audout_u.cpp | |||
| @@ -71,7 +71,7 @@ public: | |||
| 71 | 71 | ||
| 72 | stream = audio_core.OpenStream(system.CoreTiming(), audio_params.sample_rate, | 72 | stream = audio_core.OpenStream(system.CoreTiming(), audio_params.sample_rate, |
| 73 | audio_params.channel_count, std::move(unique_name), | 73 | audio_params.channel_count, std::move(unique_name), |
| 74 | [=]() { buffer_event.writable->Signal(); }); | 74 | [this] { buffer_event.writable->Signal(); }); |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | private: | 77 | private: |
diff --git a/src/core/hle/service/filesystem/filesystem.cpp b/src/core/hle/service/filesystem/filesystem.cpp index c66124998..4490f8e4c 100644 --- a/src/core/hle/service/filesystem/filesystem.cpp +++ b/src/core/hle/service/filesystem/filesystem.cpp | |||
| @@ -311,7 +311,7 @@ ResultVal<FileSys::VirtualFile> FileSystemController::OpenRomFS( | |||
| 311 | } | 311 | } |
| 312 | 312 | ||
| 313 | ResultVal<FileSys::VirtualDir> FileSystemController::CreateSaveData( | 313 | ResultVal<FileSys::VirtualDir> FileSystemController::CreateSaveData( |
| 314 | FileSys::SaveDataSpaceId space, const FileSys::SaveDataDescriptor& save_struct) const { | 314 | FileSys::SaveDataSpaceId space, const FileSys::SaveDataAttribute& save_struct) const { |
| 315 | LOG_TRACE(Service_FS, "Creating Save Data for space_id={:01X}, save_struct={}", | 315 | LOG_TRACE(Service_FS, "Creating Save Data for space_id={:01X}, save_struct={}", |
| 316 | static_cast<u8>(space), save_struct.DebugInfo()); | 316 | static_cast<u8>(space), save_struct.DebugInfo()); |
| 317 | 317 | ||
| @@ -323,15 +323,15 @@ ResultVal<FileSys::VirtualDir> FileSystemController::CreateSaveData( | |||
| 323 | } | 323 | } |
| 324 | 324 | ||
| 325 | ResultVal<FileSys::VirtualDir> FileSystemController::OpenSaveData( | 325 | ResultVal<FileSys::VirtualDir> FileSystemController::OpenSaveData( |
| 326 | FileSys::SaveDataSpaceId space, const FileSys::SaveDataDescriptor& descriptor) const { | 326 | FileSys::SaveDataSpaceId space, const FileSys::SaveDataAttribute& attribute) const { |
| 327 | LOG_TRACE(Service_FS, "Opening Save Data for space_id={:01X}, save_struct={}", | 327 | LOG_TRACE(Service_FS, "Opening Save Data for space_id={:01X}, save_struct={}", |
| 328 | static_cast<u8>(space), descriptor.DebugInfo()); | 328 | static_cast<u8>(space), attribute.DebugInfo()); |
| 329 | 329 | ||
| 330 | if (save_data_factory == nullptr) { | 330 | if (save_data_factory == nullptr) { |
| 331 | return FileSys::ERROR_ENTITY_NOT_FOUND; | 331 | return FileSys::ERROR_ENTITY_NOT_FOUND; |
| 332 | } | 332 | } |
| 333 | 333 | ||
| 334 | return save_data_factory->Open(space, descriptor); | 334 | return save_data_factory->Open(space, attribute); |
| 335 | } | 335 | } |
| 336 | 336 | ||
| 337 | ResultVal<FileSys::VirtualDir> FileSystemController::OpenSaveDataSpace( | 337 | ResultVal<FileSys::VirtualDir> FileSystemController::OpenSaveDataSpace( |
diff --git a/src/core/hle/service/filesystem/filesystem.h b/src/core/hle/service/filesystem/filesystem.h index 1b0a6a949..6dbbf0b2b 100644 --- a/src/core/hle/service/filesystem/filesystem.h +++ b/src/core/hle/service/filesystem/filesystem.h | |||
| @@ -31,7 +31,7 @@ enum class SaveDataSpaceId : u8; | |||
| 31 | enum class SaveDataType : u8; | 31 | enum class SaveDataType : u8; |
| 32 | enum class StorageId : u8; | 32 | enum class StorageId : u8; |
| 33 | 33 | ||
| 34 | struct SaveDataDescriptor; | 34 | struct SaveDataAttribute; |
| 35 | struct SaveDataSize; | 35 | struct SaveDataSize; |
| 36 | } // namespace FileSys | 36 | } // namespace FileSys |
| 37 | 37 | ||
| @@ -69,9 +69,9 @@ public: | |||
| 69 | ResultVal<FileSys::VirtualFile> OpenRomFS(u64 title_id, FileSys::StorageId storage_id, | 69 | ResultVal<FileSys::VirtualFile> OpenRomFS(u64 title_id, FileSys::StorageId storage_id, |
| 70 | FileSys::ContentRecordType type) const; | 70 | FileSys::ContentRecordType type) const; |
| 71 | ResultVal<FileSys::VirtualDir> CreateSaveData( | 71 | ResultVal<FileSys::VirtualDir> CreateSaveData( |
| 72 | FileSys::SaveDataSpaceId space, const FileSys::SaveDataDescriptor& save_struct) const; | 72 | FileSys::SaveDataSpaceId space, const FileSys::SaveDataAttribute& save_struct) const; |
| 73 | ResultVal<FileSys::VirtualDir> OpenSaveData( | 73 | ResultVal<FileSys::VirtualDir> OpenSaveData( |
| 74 | FileSys::SaveDataSpaceId space, const FileSys::SaveDataDescriptor& save_struct) const; | 74 | FileSys::SaveDataSpaceId space, const FileSys::SaveDataAttribute& save_struct) const; |
| 75 | ResultVal<FileSys::VirtualDir> OpenSaveDataSpace(FileSys::SaveDataSpaceId space) const; | 75 | ResultVal<FileSys::VirtualDir> OpenSaveDataSpace(FileSys::SaveDataSpaceId space) const; |
| 76 | ResultVal<FileSys::VirtualDir> OpenSDMC() const; | 76 | ResultVal<FileSys::VirtualDir> OpenSDMC() const; |
| 77 | ResultVal<FileSys::VirtualDir> OpenBISPartition(FileSys::BisPartitionId id) const; | 77 | ResultVal<FileSys::VirtualDir> OpenBISPartition(FileSys::BisPartitionId id) const; |
diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp index 20c331b77..26fd87f58 100644 --- a/src/core/hle/service/filesystem/fsp_srv.cpp +++ b/src/core/hle/service/filesystem/fsp_srv.cpp | |||
| @@ -696,8 +696,8 @@ FSP_SRV::FSP_SRV(FileSystemController& fsc, const Core::Reporter& reporter) | |||
| 696 | {67, nullptr, "FindSaveDataWithFilter"}, | 696 | {67, nullptr, "FindSaveDataWithFilter"}, |
| 697 | {68, nullptr, "OpenSaveDataInfoReaderBySaveDataFilter"}, | 697 | {68, nullptr, "OpenSaveDataInfoReaderBySaveDataFilter"}, |
| 698 | {69, nullptr, "ReadSaveDataFileSystemExtraDataBySaveDataAttribute"}, | 698 | {69, nullptr, "ReadSaveDataFileSystemExtraDataBySaveDataAttribute"}, |
| 699 | {70, nullptr, "WriteSaveDataFileSystemExtraDataBySaveDataAttribute"}, | 699 | {70, &FSP_SRV::WriteSaveDataFileSystemExtraDataBySaveDataAttribute, "WriteSaveDataFileSystemExtraDataBySaveDataAttribute"}, |
| 700 | {71, nullptr, "ReadSaveDataFileSystemExtraDataWithMaskBySaveDataAttribute"}, | 700 | {71, &FSP_SRV::ReadSaveDataFileSystemExtraDataWithMaskBySaveDataAttribute, "ReadSaveDataFileSystemExtraDataWithMaskBySaveDataAttribute"}, |
| 701 | {80, nullptr, "OpenSaveDataMetaFile"}, | 701 | {80, nullptr, "OpenSaveDataMetaFile"}, |
| 702 | {81, nullptr, "OpenSaveDataTransferManager"}, | 702 | {81, nullptr, "OpenSaveDataTransferManager"}, |
| 703 | {82, nullptr, "OpenSaveDataTransferManagerVersion2"}, | 703 | {82, nullptr, "OpenSaveDataTransferManagerVersion2"}, |
| @@ -812,7 +812,7 @@ void FSP_SRV::OpenSdCardFileSystem(Kernel::HLERequestContext& ctx) { | |||
| 812 | void FSP_SRV::CreateSaveDataFileSystem(Kernel::HLERequestContext& ctx) { | 812 | void FSP_SRV::CreateSaveDataFileSystem(Kernel::HLERequestContext& ctx) { |
| 813 | IPC::RequestParser rp{ctx}; | 813 | IPC::RequestParser rp{ctx}; |
| 814 | 814 | ||
| 815 | auto save_struct = rp.PopRaw<FileSys::SaveDataDescriptor>(); | 815 | auto save_struct = rp.PopRaw<FileSys::SaveDataAttribute>(); |
| 816 | [[maybe_unused]] auto save_create_struct = rp.PopRaw<std::array<u8, 0x40>>(); | 816 | [[maybe_unused]] auto save_create_struct = rp.PopRaw<std::array<u8, 0x40>>(); |
| 817 | u128 uid = rp.PopRaw<u128>(); | 817 | u128 uid = rp.PopRaw<u128>(); |
| 818 | 818 | ||
| @@ -826,17 +826,18 @@ void FSP_SRV::CreateSaveDataFileSystem(Kernel::HLERequestContext& ctx) { | |||
| 826 | } | 826 | } |
| 827 | 827 | ||
| 828 | void FSP_SRV::OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx) { | 828 | void FSP_SRV::OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx) { |
| 829 | LOG_INFO(Service_FS, "called."); | 829 | IPC::RequestParser rp{ctx}; |
| 830 | 830 | ||
| 831 | struct Parameters { | 831 | struct Parameters { |
| 832 | FileSys::SaveDataSpaceId save_data_space_id; | 832 | FileSys::SaveDataSpaceId space_id; |
| 833 | FileSys::SaveDataDescriptor descriptor; | 833 | FileSys::SaveDataAttribute attribute; |
| 834 | }; | 834 | }; |
| 835 | 835 | ||
| 836 | IPC::RequestParser rp{ctx}; | ||
| 837 | const auto parameters = rp.PopRaw<Parameters>(); | 836 | const auto parameters = rp.PopRaw<Parameters>(); |
| 838 | 837 | ||
| 839 | auto dir = fsc.OpenSaveData(parameters.save_data_space_id, parameters.descriptor); | 838 | LOG_INFO(Service_FS, "called."); |
| 839 | |||
| 840 | auto dir = fsc.OpenSaveData(parameters.space_id, parameters.attribute); | ||
| 840 | if (dir.Failed()) { | 841 | if (dir.Failed()) { |
| 841 | IPC::ResponseBuilder rb{ctx, 2, 0, 0}; | 842 | IPC::ResponseBuilder rb{ctx, 2, 0, 0}; |
| 842 | rb.Push(FileSys::ERROR_ENTITY_NOT_FOUND); | 843 | rb.Push(FileSys::ERROR_ENTITY_NOT_FOUND); |
| @@ -844,13 +845,18 @@ void FSP_SRV::OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx) { | |||
| 844 | } | 845 | } |
| 845 | 846 | ||
| 846 | FileSys::StorageId id; | 847 | FileSys::StorageId id; |
| 847 | if (parameters.save_data_space_id == FileSys::SaveDataSpaceId::NandUser) { | 848 | |
| 849 | switch (parameters.space_id) { | ||
| 850 | case FileSys::SaveDataSpaceId::NandUser: | ||
| 848 | id = FileSys::StorageId::NandUser; | 851 | id = FileSys::StorageId::NandUser; |
| 849 | } else if (parameters.save_data_space_id == FileSys::SaveDataSpaceId::SdCardSystem || | 852 | break; |
| 850 | parameters.save_data_space_id == FileSys::SaveDataSpaceId::SdCardUser) { | 853 | case FileSys::SaveDataSpaceId::SdCardSystem: |
| 854 | case FileSys::SaveDataSpaceId::SdCardUser: | ||
| 851 | id = FileSys::StorageId::SdCard; | 855 | id = FileSys::StorageId::SdCard; |
| 852 | } else { | 856 | break; |
| 857 | case FileSys::SaveDataSpaceId::NandSystem: | ||
| 853 | id = FileSys::StorageId::NandSystem; | 858 | id = FileSys::StorageId::NandSystem; |
| 859 | break; | ||
| 854 | } | 860 | } |
| 855 | 861 | ||
| 856 | auto filesystem = | 862 | auto filesystem = |
| @@ -876,22 +882,31 @@ void FSP_SRV::OpenSaveDataInfoReaderBySaveDataSpaceId(Kernel::HLERequestContext& | |||
| 876 | rb.PushIpcInterface<ISaveDataInfoReader>(std::make_shared<ISaveDataInfoReader>(space, fsc)); | 882 | rb.PushIpcInterface<ISaveDataInfoReader>(std::make_shared<ISaveDataInfoReader>(space, fsc)); |
| 877 | } | 883 | } |
| 878 | 884 | ||
| 879 | void FSP_SRV::SetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) { | 885 | void FSP_SRV::WriteSaveDataFileSystemExtraDataBySaveDataAttribute(Kernel::HLERequestContext& ctx) { |
| 880 | IPC::RequestParser rp{ctx}; | 886 | LOG_WARNING(Service_FS, "(STUBBED) called."); |
| 881 | log_mode = rp.PopEnum<LogMode>(); | ||
| 882 | |||
| 883 | LOG_DEBUG(Service_FS, "called, log_mode={:08X}", static_cast<u32>(log_mode)); | ||
| 884 | 887 | ||
| 885 | IPC::ResponseBuilder rb{ctx, 2}; | 888 | IPC::ResponseBuilder rb{ctx, 2}; |
| 886 | rb.Push(RESULT_SUCCESS); | 889 | rb.Push(RESULT_SUCCESS); |
| 887 | } | 890 | } |
| 888 | 891 | ||
| 889 | void FSP_SRV::GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) { | 892 | void FSP_SRV::ReadSaveDataFileSystemExtraDataWithMaskBySaveDataAttribute( |
| 890 | LOG_DEBUG(Service_FS, "called"); | 893 | Kernel::HLERequestContext& ctx) { |
| 894 | IPC::RequestParser rp{ctx}; | ||
| 895 | |||
| 896 | struct Parameters { | ||
| 897 | FileSys::SaveDataSpaceId space_id; | ||
| 898 | FileSys::SaveDataAttribute attribute; | ||
| 899 | }; | ||
| 900 | |||
| 901 | const auto parameters = rp.PopRaw<Parameters>(); | ||
| 902 | // Stub this to None for now, backend needs an impl to read/write the SaveDataExtraData | ||
| 903 | constexpr auto flags = static_cast<u32>(FileSys::SaveDataFlags::None); | ||
| 904 | |||
| 905 | LOG_WARNING(Service_FS, "(STUBBED) called, flags={}", flags); | ||
| 891 | 906 | ||
| 892 | IPC::ResponseBuilder rb{ctx, 3}; | 907 | IPC::ResponseBuilder rb{ctx, 3}; |
| 893 | rb.Push(RESULT_SUCCESS); | 908 | rb.Push(RESULT_SUCCESS); |
| 894 | rb.PushEnum(log_mode); | 909 | rb.Push(flags); |
| 895 | } | 910 | } |
| 896 | 911 | ||
| 897 | void FSP_SRV::OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx) { | 912 | void FSP_SRV::OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx) { |
| @@ -966,6 +981,24 @@ void FSP_SRV::OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ct | |||
| 966 | rb.Push(FileSys::ERROR_ENTITY_NOT_FOUND); | 981 | rb.Push(FileSys::ERROR_ENTITY_NOT_FOUND); |
| 967 | } | 982 | } |
| 968 | 983 | ||
| 984 | void FSP_SRV::SetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) { | ||
| 985 | IPC::RequestParser rp{ctx}; | ||
| 986 | log_mode = rp.PopEnum<LogMode>(); | ||
| 987 | |||
| 988 | LOG_DEBUG(Service_FS, "called, log_mode={:08X}", static_cast<u32>(log_mode)); | ||
| 989 | |||
| 990 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 991 | rb.Push(RESULT_SUCCESS); | ||
| 992 | } | ||
| 993 | |||
| 994 | void FSP_SRV::GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) { | ||
| 995 | LOG_DEBUG(Service_FS, "called"); | ||
| 996 | |||
| 997 | IPC::ResponseBuilder rb{ctx, 3}; | ||
| 998 | rb.Push(RESULT_SUCCESS); | ||
| 999 | rb.PushEnum(log_mode); | ||
| 1000 | } | ||
| 1001 | |||
| 969 | void FSP_SRV::OutputAccessLogToSdCard(Kernel::HLERequestContext& ctx) { | 1002 | void FSP_SRV::OutputAccessLogToSdCard(Kernel::HLERequestContext& ctx) { |
| 970 | const auto raw = ctx.ReadBuffer(); | 1003 | const auto raw = ctx.ReadBuffer(); |
| 971 | auto log = Common::StringFromFixedZeroTerminatedBuffer( | 1004 | auto log = Common::StringFromFixedZeroTerminatedBuffer( |
diff --git a/src/core/hle/service/filesystem/fsp_srv.h b/src/core/hle/service/filesystem/fsp_srv.h index dfb3e395b..4964e874e 100644 --- a/src/core/hle/service/filesystem/fsp_srv.h +++ b/src/core/hle/service/filesystem/fsp_srv.h | |||
| @@ -43,11 +43,13 @@ private: | |||
| 43 | void OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx); | 43 | void OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx); |
| 44 | void OpenReadOnlySaveDataFileSystem(Kernel::HLERequestContext& ctx); | 44 | void OpenReadOnlySaveDataFileSystem(Kernel::HLERequestContext& ctx); |
| 45 | void OpenSaveDataInfoReaderBySaveDataSpaceId(Kernel::HLERequestContext& ctx); | 45 | void OpenSaveDataInfoReaderBySaveDataSpaceId(Kernel::HLERequestContext& ctx); |
| 46 | void SetGlobalAccessLogMode(Kernel::HLERequestContext& ctx); | 46 | void WriteSaveDataFileSystemExtraDataBySaveDataAttribute(Kernel::HLERequestContext& ctx); |
| 47 | void GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx); | 47 | void ReadSaveDataFileSystemExtraDataWithMaskBySaveDataAttribute(Kernel::HLERequestContext& ctx); |
| 48 | void OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx); | 48 | void OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx); |
| 49 | void OpenDataStorageByDataId(Kernel::HLERequestContext& ctx); | 49 | void OpenDataStorageByDataId(Kernel::HLERequestContext& ctx); |
| 50 | void OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx); | 50 | void OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx); |
| 51 | void SetGlobalAccessLogMode(Kernel::HLERequestContext& ctx); | ||
| 52 | void GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx); | ||
| 51 | void OutputAccessLogToSdCard(Kernel::HLERequestContext& ctx); | 53 | void OutputAccessLogToSdCard(Kernel::HLERequestContext& ctx); |
| 52 | void GetAccessLogVersionInfo(Kernel::HLERequestContext& ctx); | 54 | void GetAccessLogVersionInfo(Kernel::HLERequestContext& ctx); |
| 53 | void OpenMultiCommitManager(Kernel::HLERequestContext& ctx); | 55 | void OpenMultiCommitManager(Kernel::HLERequestContext& ctx); |
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index 64a526b9e..d8cd10e31 100644 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp | |||
| @@ -310,7 +310,7 @@ public: | |||
| 310 | 310 | ||
| 311 | ResultVal<VAddr> MapProcessCodeMemory(Kernel::Process* process, VAddr baseAddress, | 311 | ResultVal<VAddr> MapProcessCodeMemory(Kernel::Process* process, VAddr baseAddress, |
| 312 | u64 size) const { | 312 | u64 size) const { |
| 313 | for (int retry{}; retry < MAXIMUM_MAP_RETRIES; retry++) { | 313 | for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) { |
| 314 | auto& page_table{process->PageTable()}; | 314 | auto& page_table{process->PageTable()}; |
| 315 | const VAddr addr{GetRandomMapRegion(page_table, size)}; | 315 | const VAddr addr{GetRandomMapRegion(page_table, size)}; |
| 316 | const ResultCode result{page_table.MapProcessCodeMemory(addr, baseAddress, size)}; | 316 | const ResultCode result{page_table.MapProcessCodeMemory(addr, baseAddress, size)}; |
| @@ -331,8 +331,7 @@ public: | |||
| 331 | 331 | ||
| 332 | ResultVal<VAddr> MapNro(Kernel::Process* process, VAddr nro_addr, std::size_t nro_size, | 332 | ResultVal<VAddr> MapNro(Kernel::Process* process, VAddr nro_addr, std::size_t nro_size, |
| 333 | VAddr bss_addr, std::size_t bss_size, std::size_t size) const { | 333 | VAddr bss_addr, std::size_t bss_size, std::size_t size) const { |
| 334 | 334 | for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) { | |
| 335 | for (int retry{}; retry < MAXIMUM_MAP_RETRIES; retry++) { | ||
| 336 | auto& page_table{process->PageTable()}; | 335 | auto& page_table{process->PageTable()}; |
| 337 | VAddr addr{}; | 336 | VAddr addr{}; |
| 338 | 337 | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 195421cc0..d4ba88147 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp | |||
| @@ -16,11 +16,12 @@ | |||
| 16 | #include "video_core/renderer_base.h" | 16 | #include "video_core/renderer_base.h" |
| 17 | 17 | ||
| 18 | namespace Service::Nvidia::Devices { | 18 | namespace Service::Nvidia::Devices { |
| 19 | |||
| 19 | namespace NvErrCodes { | 20 | namespace NvErrCodes { |
| 20 | enum { | 21 | constexpr u32 Success{}; |
| 21 | InvalidNmapHandle = -22, | 22 | constexpr u32 OutOfMemory{static_cast<u32>(-12)}; |
| 22 | }; | 23 | constexpr u32 InvalidInput{static_cast<u32>(-22)}; |
| 23 | } | 24 | } // namespace NvErrCodes |
| 24 | 25 | ||
| 25 | nvhost_as_gpu::nvhost_as_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev) | 26 | nvhost_as_gpu::nvhost_as_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev) |
| 26 | : nvdevice(system), nvmap_dev(std::move(nvmap_dev)) {} | 27 | : nvdevice(system), nvmap_dev(std::move(nvmap_dev)) {} |
| @@ -49,8 +50,9 @@ u32 nvhost_as_gpu::ioctl(Ioctl command, const std::vector<u8>& input, const std: | |||
| 49 | break; | 50 | break; |
| 50 | } | 51 | } |
| 51 | 52 | ||
| 52 | if (static_cast<IoctlCommand>(command.cmd.Value()) == IoctlCommand::IocRemapCommand) | 53 | if (static_cast<IoctlCommand>(command.cmd.Value()) == IoctlCommand::IocRemapCommand) { |
| 53 | return Remap(input, output); | 54 | return Remap(input, output); |
| 55 | } | ||
| 54 | 56 | ||
| 55 | UNIMPLEMENTED_MSG("Unimplemented ioctl command"); | 57 | UNIMPLEMENTED_MSG("Unimplemented ioctl command"); |
| 56 | return 0; | 58 | return 0; |
| @@ -59,6 +61,7 @@ u32 nvhost_as_gpu::ioctl(Ioctl command, const std::vector<u8>& input, const std: | |||
| 59 | u32 nvhost_as_gpu::InitalizeEx(const std::vector<u8>& input, std::vector<u8>& output) { | 61 | u32 nvhost_as_gpu::InitalizeEx(const std::vector<u8>& input, std::vector<u8>& output) { |
| 60 | IoctlInitalizeEx params{}; | 62 | IoctlInitalizeEx params{}; |
| 61 | std::memcpy(¶ms, input.data(), input.size()); | 63 | std::memcpy(¶ms, input.data(), input.size()); |
| 64 | |||
| 62 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size=0x{:X}", params.big_page_size); | 65 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size=0x{:X}", params.big_page_size); |
| 63 | 66 | ||
| 64 | return 0; | 67 | return 0; |
| @@ -67,53 +70,61 @@ u32 nvhost_as_gpu::InitalizeEx(const std::vector<u8>& input, std::vector<u8>& ou | |||
| 67 | u32 nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output) { | 70 | u32 nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output) { |
| 68 | IoctlAllocSpace params{}; | 71 | IoctlAllocSpace params{}; |
| 69 | std::memcpy(¶ms, input.data(), input.size()); | 72 | std::memcpy(¶ms, input.data(), input.size()); |
| 73 | |||
| 70 | LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages, | 74 | LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages, |
| 71 | params.page_size, params.flags); | 75 | params.page_size, params.flags); |
| 72 | 76 | ||
| 73 | auto& gpu = system.GPU(); | 77 | const auto size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)}; |
| 74 | const u64 size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)}; | 78 | if ((params.flags & AddressSpaceFlags::FixedOffset) != AddressSpaceFlags::None) { |
| 75 | if (params.flags & 1) { | 79 | params.offset = *system.GPU().MemoryManager().AllocateFixed(params.offset, size); |
| 76 | params.offset = gpu.MemoryManager().AllocateSpace(params.offset, size, 1); | ||
| 77 | } else { | 80 | } else { |
| 78 | params.offset = gpu.MemoryManager().AllocateSpace(size, params.align); | 81 | params.offset = system.GPU().MemoryManager().Allocate(size, params.align); |
| 82 | } | ||
| 83 | |||
| 84 | auto result{NvErrCodes::Success}; | ||
| 85 | if (!params.offset) { | ||
| 86 | LOG_CRITICAL(Service_NVDRV, "allocation failed for size {}", size); | ||
| 87 | result = NvErrCodes::OutOfMemory; | ||
| 79 | } | 88 | } |
| 80 | 89 | ||
| 81 | std::memcpy(output.data(), ¶ms, output.size()); | 90 | std::memcpy(output.data(), ¶ms, output.size()); |
| 82 | return 0; | 91 | return result; |
| 83 | } | 92 | } |
| 84 | 93 | ||
| 85 | u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output) { | 94 | u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output) { |
| 86 | std::size_t num_entries = input.size() / sizeof(IoctlRemapEntry); | 95 | const auto num_entries = input.size() / sizeof(IoctlRemapEntry); |
| 87 | 96 | ||
| 88 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, num_entries=0x{:X}", num_entries); | 97 | LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries); |
| 89 | 98 | ||
| 99 | auto result{NvErrCodes::Success}; | ||
| 90 | std::vector<IoctlRemapEntry> entries(num_entries); | 100 | std::vector<IoctlRemapEntry> entries(num_entries); |
| 91 | std::memcpy(entries.data(), input.data(), input.size()); | 101 | std::memcpy(entries.data(), input.data(), input.size()); |
| 92 | 102 | ||
| 93 | auto& gpu = system.GPU(); | ||
| 94 | for (const auto& entry : entries) { | 103 | for (const auto& entry : entries) { |
| 95 | LOG_WARNING(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", | 104 | LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", |
| 96 | entry.offset, entry.nvmap_handle, entry.pages); | 105 | entry.offset, entry.nvmap_handle, entry.pages); |
| 97 | GPUVAddr offset = static_cast<GPUVAddr>(entry.offset) << 0x10; | 106 | |
| 98 | auto object = nvmap_dev->GetObject(entry.nvmap_handle); | 107 | const auto object{nvmap_dev->GetObject(entry.nvmap_handle)}; |
| 99 | if (!object) { | 108 | if (!object) { |
| 100 | LOG_CRITICAL(Service_NVDRV, "nvmap {} is an invalid handle!", entry.nvmap_handle); | 109 | LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle); |
| 101 | std::memcpy(output.data(), entries.data(), output.size()); | 110 | result = NvErrCodes::InvalidInput; |
| 102 | return static_cast<u32>(NvErrCodes::InvalidNmapHandle); | 111 | break; |
| 103 | } | 112 | } |
| 104 | 113 | ||
| 105 | ASSERT(object->status == nvmap::Object::Status::Allocated); | 114 | const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10}; |
| 115 | const auto size{static_cast<u64>(entry.pages) << 0x10}; | ||
| 116 | const auto map_offset{static_cast<u64>(entry.map_offset) << 0x10}; | ||
| 117 | const auto addr{system.GPU().MemoryManager().Map(object->addr + map_offset, offset, size)}; | ||
| 106 | 118 | ||
| 107 | const u64 size = static_cast<u64>(entry.pages) << 0x10; | 119 | if (!addr) { |
| 108 | ASSERT(size <= object->size); | 120 | LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!"); |
| 109 | const u64 map_offset = static_cast<u64>(entry.map_offset) << 0x10; | 121 | result = NvErrCodes::InvalidInput; |
| 110 | 122 | break; | |
| 111 | const GPUVAddr returned = | 123 | } |
| 112 | gpu.MemoryManager().MapBufferEx(object->addr + map_offset, offset, size); | ||
| 113 | ASSERT(returned == offset); | ||
| 114 | } | 124 | } |
| 125 | |||
| 115 | std::memcpy(output.data(), entries.data(), output.size()); | 126 | std::memcpy(output.data(), entries.data(), output.size()); |
| 116 | return 0; | 127 | return result; |
| 117 | } | 128 | } |
| 118 | 129 | ||
| 119 | u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) { | 130 | u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) { |
| @@ -126,44 +137,76 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou | |||
| 126 | params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size, | 137 | params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size, |
| 127 | params.offset); | 138 | params.offset); |
| 128 | 139 | ||
| 129 | if (!params.nvmap_handle) { | 140 | const auto object{nvmap_dev->GetObject(params.nvmap_handle)}; |
| 130 | return 0; | 141 | if (!object) { |
| 142 | LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle); | ||
| 143 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 144 | return NvErrCodes::InvalidInput; | ||
| 131 | } | 145 | } |
| 132 | 146 | ||
| 133 | auto object = nvmap_dev->GetObject(params.nvmap_handle); | ||
| 134 | ASSERT(object); | ||
| 135 | |||
| 136 | // We can only map objects that have already been assigned a CPU address. | ||
| 137 | ASSERT(object->status == nvmap::Object::Status::Allocated); | ||
| 138 | |||
| 139 | ASSERT(params.buffer_offset == 0); | ||
| 140 | |||
| 141 | // The real nvservices doesn't make a distinction between handles and ids, and | 147 | // The real nvservices doesn't make a distinction between handles and ids, and |
| 142 | // object can only have one handle and it will be the same as its id. Assert that this is the | 148 | // object can only have one handle and it will be the same as its id. Assert that this is the |
| 143 | // case to prevent unexpected behavior. | 149 | // case to prevent unexpected behavior. |
| 144 | ASSERT(object->id == params.nvmap_handle); | 150 | ASSERT(object->id == params.nvmap_handle); |
| 145 | |||
| 146 | auto& gpu = system.GPU(); | 151 | auto& gpu = system.GPU(); |
| 147 | 152 | ||
| 148 | if (params.flags & 1) { | 153 | u64 page_size{params.page_size}; |
| 149 | params.offset = gpu.MemoryManager().MapBufferEx(object->addr, params.offset, object->size); | 154 | if (!page_size) { |
| 150 | } else { | 155 | page_size = object->align; |
| 151 | params.offset = gpu.MemoryManager().MapBufferEx(object->addr, object->size); | 156 | } |
| 157 | |||
| 158 | if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) { | ||
| 159 | if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) { | ||
| 160 | const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)}; | ||
| 161 | const auto gpu_addr{static_cast<GPUVAddr>(params.offset + params.buffer_offset)}; | ||
| 162 | |||
| 163 | if (!gpu.MemoryManager().Map(cpu_addr, gpu_addr, params.mapping_size)) { | ||
| 164 | LOG_CRITICAL(Service_NVDRV, | ||
| 165 | "remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, " | ||
| 166 | "mapping_size = {}, offset={}", | ||
| 167 | params.flags, params.nvmap_handle, params.buffer_offset, | ||
| 168 | params.mapping_size, params.offset); | ||
| 169 | |||
| 170 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 171 | return NvErrCodes::InvalidInput; | ||
| 172 | } | ||
| 173 | |||
| 174 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 175 | return NvErrCodes::Success; | ||
| 176 | } else { | ||
| 177 | LOG_CRITICAL(Service_NVDRV, "address not mapped offset={}", params.offset); | ||
| 178 | |||
| 179 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 180 | return NvErrCodes::InvalidInput; | ||
| 181 | } | ||
| 152 | } | 182 | } |
| 153 | 183 | ||
| 154 | // Create a new mapping entry for this operation. | 184 | // We can only map objects that have already been assigned a CPU address. |
| 155 | ASSERT_MSG(buffer_mappings.find(params.offset) == buffer_mappings.end(), | 185 | ASSERT(object->status == nvmap::Object::Status::Allocated); |
| 156 | "Offset is already mapped"); | 186 | |
| 187 | const auto physical_address{object->addr + params.buffer_offset}; | ||
| 188 | u64 size{params.mapping_size}; | ||
| 189 | if (!size) { | ||
| 190 | size = object->size; | ||
| 191 | } | ||
| 157 | 192 | ||
| 158 | BufferMapping mapping{}; | 193 | const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None}; |
| 159 | mapping.nvmap_handle = params.nvmap_handle; | 194 | if (is_alloc) { |
| 160 | mapping.offset = params.offset; | 195 | params.offset = gpu.MemoryManager().MapAllocate(physical_address, size, page_size); |
| 161 | mapping.size = object->size; | 196 | } else { |
| 197 | params.offset = gpu.MemoryManager().Map(physical_address, params.offset, size); | ||
| 198 | } | ||
| 162 | 199 | ||
| 163 | buffer_mappings[params.offset] = mapping; | 200 | auto result{NvErrCodes::Success}; |
| 201 | if (!params.offset) { | ||
| 202 | LOG_CRITICAL(Service_NVDRV, "failed to map size={}", size); | ||
| 203 | result = NvErrCodes::InvalidInput; | ||
| 204 | } else { | ||
| 205 | AddBufferMap(params.offset, size, physical_address, is_alloc); | ||
| 206 | } | ||
| 164 | 207 | ||
| 165 | std::memcpy(output.data(), ¶ms, output.size()); | 208 | std::memcpy(output.data(), ¶ms, output.size()); |
| 166 | return 0; | 209 | return result; |
| 167 | } | 210 | } |
| 168 | 211 | ||
| 169 | u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) { | 212 | u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) { |
| @@ -172,24 +215,20 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou | |||
| 172 | 215 | ||
| 173 | LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset); | 216 | LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset); |
| 174 | 217 | ||
| 175 | const auto itr = buffer_mappings.find(params.offset); | 218 | if (const auto size{RemoveBufferMap(params.offset)}; size) { |
| 176 | if (itr == buffer_mappings.end()) { | 219 | system.GPU().MemoryManager().Unmap(params.offset, *size); |
| 177 | LOG_WARNING(Service_NVDRV, "Tried to unmap an invalid offset 0x{:X}", params.offset); | 220 | } else { |
| 178 | // Hardware tests shows that unmapping an already unmapped buffer always returns successful | 221 | LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset); |
| 179 | // and doesn't fail. | ||
| 180 | return 0; | ||
| 181 | } | 222 | } |
| 182 | 223 | ||
| 183 | params.offset = system.GPU().MemoryManager().UnmapBuffer(params.offset, itr->second.size); | ||
| 184 | buffer_mappings.erase(itr->second.offset); | ||
| 185 | |||
| 186 | std::memcpy(output.data(), ¶ms, output.size()); | 224 | std::memcpy(output.data(), ¶ms, output.size()); |
| 187 | return 0; | 225 | return NvErrCodes::Success; |
| 188 | } | 226 | } |
| 189 | 227 | ||
| 190 | u32 nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) { | 228 | u32 nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) { |
| 191 | IoctlBindChannel params{}; | 229 | IoctlBindChannel params{}; |
| 192 | std::memcpy(¶ms, input.data(), input.size()); | 230 | std::memcpy(¶ms, input.data(), input.size()); |
| 231 | |||
| 193 | LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd); | 232 | LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd); |
| 194 | 233 | ||
| 195 | channel = params.fd; | 234 | channel = params.fd; |
| @@ -199,6 +238,7 @@ u32 nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& ou | |||
| 199 | u32 nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) { | 238 | u32 nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) { |
| 200 | IoctlGetVaRegions params{}; | 239 | IoctlGetVaRegions params{}; |
| 201 | std::memcpy(¶ms, input.data(), input.size()); | 240 | std::memcpy(¶ms, input.data(), input.size()); |
| 241 | |||
| 202 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr, | 242 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr, |
| 203 | params.buf_size); | 243 | params.buf_size); |
| 204 | 244 | ||
| @@ -210,9 +250,43 @@ u32 nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& o | |||
| 210 | params.regions[1].offset = 0x04000000; | 250 | params.regions[1].offset = 0x04000000; |
| 211 | params.regions[1].page_size = 0x10000; | 251 | params.regions[1].page_size = 0x10000; |
| 212 | params.regions[1].pages = 0x1bffff; | 252 | params.regions[1].pages = 0x1bffff; |
| 253 | |||
| 213 | // TODO(ogniK): This probably can stay stubbed but should add support way way later | 254 | // TODO(ogniK): This probably can stay stubbed but should add support way way later |
| 255 | |||
| 214 | std::memcpy(output.data(), ¶ms, output.size()); | 256 | std::memcpy(output.data(), ¶ms, output.size()); |
| 215 | return 0; | 257 | return 0; |
| 216 | } | 258 | } |
| 217 | 259 | ||
| 260 | std::optional<nvhost_as_gpu::BufferMap> nvhost_as_gpu::FindBufferMap(GPUVAddr gpu_addr) const { | ||
| 261 | const auto end{buffer_mappings.upper_bound(gpu_addr)}; | ||
| 262 | for (auto iter{buffer_mappings.begin()}; iter != end; ++iter) { | ||
| 263 | if (gpu_addr >= iter->second.StartAddr() && gpu_addr < iter->second.EndAddr()) { | ||
| 264 | return iter->second; | ||
| 265 | } | ||
| 266 | } | ||
| 267 | |||
| 268 | return {}; | ||
| 269 | } | ||
| 270 | |||
| 271 | void nvhost_as_gpu::AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, | ||
| 272 | bool is_allocated) { | ||
| 273 | buffer_mappings[gpu_addr] = {gpu_addr, size, cpu_addr, is_allocated}; | ||
| 274 | } | ||
| 275 | |||
| 276 | std::optional<std::size_t> nvhost_as_gpu::RemoveBufferMap(GPUVAddr gpu_addr) { | ||
| 277 | if (const auto iter{buffer_mappings.find(gpu_addr)}; iter != buffer_mappings.end()) { | ||
| 278 | std::size_t size{}; | ||
| 279 | |||
| 280 | if (iter->second.IsAllocated()) { | ||
| 281 | size = iter->second.Size(); | ||
| 282 | } | ||
| 283 | |||
| 284 | buffer_mappings.erase(iter); | ||
| 285 | |||
| 286 | return size; | ||
| 287 | } | ||
| 288 | |||
| 289 | return {}; | ||
| 290 | } | ||
| 291 | |||
| 218 | } // namespace Service::Nvidia::Devices | 292 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index f79fcc065..9a0cdff0c 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h | |||
| @@ -4,9 +4,12 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <map> | ||
| 7 | #include <memory> | 8 | #include <memory> |
| 8 | #include <unordered_map> | 9 | #include <optional> |
| 9 | #include <vector> | 10 | #include <vector> |
| 11 | |||
| 12 | #include "common/common_funcs.h" | ||
| 10 | #include "common/common_types.h" | 13 | #include "common/common_types.h" |
| 11 | #include "common/swap.h" | 14 | #include "common/swap.h" |
| 12 | #include "core/hle/service/nvdrv/devices/nvdevice.h" | 15 | #include "core/hle/service/nvdrv/devices/nvdevice.h" |
| @@ -15,6 +18,13 @@ namespace Service::Nvidia::Devices { | |||
| 15 | 18 | ||
| 16 | class nvmap; | 19 | class nvmap; |
| 17 | 20 | ||
| 21 | enum class AddressSpaceFlags : u32 { | ||
| 22 | None = 0x0, | ||
| 23 | FixedOffset = 0x1, | ||
| 24 | Remap = 0x100, | ||
| 25 | }; | ||
| 26 | DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags); | ||
| 27 | |||
| 18 | class nvhost_as_gpu final : public nvdevice { | 28 | class nvhost_as_gpu final : public nvdevice { |
| 19 | public: | 29 | public: |
| 20 | explicit nvhost_as_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev); | 30 | explicit nvhost_as_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev); |
| @@ -25,6 +35,45 @@ public: | |||
| 25 | IoctlVersion version) override; | 35 | IoctlVersion version) override; |
| 26 | 36 | ||
| 27 | private: | 37 | private: |
| 38 | class BufferMap final { | ||
| 39 | public: | ||
| 40 | constexpr BufferMap() = default; | ||
| 41 | |||
| 42 | constexpr BufferMap(GPUVAddr start_addr, std::size_t size) | ||
| 43 | : start_addr{start_addr}, end_addr{start_addr + size} {} | ||
| 44 | |||
| 45 | constexpr BufferMap(GPUVAddr start_addr, std::size_t size, VAddr cpu_addr, | ||
| 46 | bool is_allocated) | ||
| 47 | : start_addr{start_addr}, end_addr{start_addr + size}, cpu_addr{cpu_addr}, | ||
| 48 | is_allocated{is_allocated} {} | ||
| 49 | |||
| 50 | constexpr VAddr StartAddr() const { | ||
| 51 | return start_addr; | ||
| 52 | } | ||
| 53 | |||
| 54 | constexpr VAddr EndAddr() const { | ||
| 55 | return end_addr; | ||
| 56 | } | ||
| 57 | |||
| 58 | constexpr std::size_t Size() const { | ||
| 59 | return end_addr - start_addr; | ||
| 60 | } | ||
| 61 | |||
| 62 | constexpr VAddr CpuAddr() const { | ||
| 63 | return cpu_addr; | ||
| 64 | } | ||
| 65 | |||
| 66 | constexpr bool IsAllocated() const { | ||
| 67 | return is_allocated; | ||
| 68 | } | ||
| 69 | |||
| 70 | private: | ||
| 71 | GPUVAddr start_addr{}; | ||
| 72 | GPUVAddr end_addr{}; | ||
| 73 | VAddr cpu_addr{}; | ||
| 74 | bool is_allocated{}; | ||
| 75 | }; | ||
| 76 | |||
| 28 | enum class IoctlCommand : u32_le { | 77 | enum class IoctlCommand : u32_le { |
| 29 | IocInitalizeExCommand = 0x40284109, | 78 | IocInitalizeExCommand = 0x40284109, |
| 30 | IocAllocateSpaceCommand = 0xC0184102, | 79 | IocAllocateSpaceCommand = 0xC0184102, |
| @@ -49,7 +98,7 @@ private: | |||
| 49 | struct IoctlAllocSpace { | 98 | struct IoctlAllocSpace { |
| 50 | u32_le pages; | 99 | u32_le pages; |
| 51 | u32_le page_size; | 100 | u32_le page_size; |
| 52 | u32_le flags; | 101 | AddressSpaceFlags flags; |
| 53 | INSERT_PADDING_WORDS(1); | 102 | INSERT_PADDING_WORDS(1); |
| 54 | union { | 103 | union { |
| 55 | u64_le offset; | 104 | u64_le offset; |
| @@ -69,18 +118,18 @@ private: | |||
| 69 | static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size"); | 118 | static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size"); |
| 70 | 119 | ||
| 71 | struct IoctlMapBufferEx { | 120 | struct IoctlMapBufferEx { |
| 72 | u32_le flags; // bit0: fixed_offset, bit2: cacheable | 121 | AddressSpaceFlags flags; // bit0: fixed_offset, bit2: cacheable |
| 73 | u32_le kind; // -1 is default | 122 | u32_le kind; // -1 is default |
| 74 | u32_le nvmap_handle; | 123 | u32_le nvmap_handle; |
| 75 | u32_le page_size; // 0 means don't care | 124 | u32_le page_size; // 0 means don't care |
| 76 | u64_le buffer_offset; | 125 | s64_le buffer_offset; |
| 77 | u64_le mapping_size; | 126 | u64_le mapping_size; |
| 78 | u64_le offset; | 127 | s64_le offset; |
| 79 | }; | 128 | }; |
| 80 | static_assert(sizeof(IoctlMapBufferEx) == 40, "IoctlMapBufferEx is incorrect size"); | 129 | static_assert(sizeof(IoctlMapBufferEx) == 40, "IoctlMapBufferEx is incorrect size"); |
| 81 | 130 | ||
| 82 | struct IoctlUnmapBuffer { | 131 | struct IoctlUnmapBuffer { |
| 83 | u64_le offset; | 132 | s64_le offset; |
| 84 | }; | 133 | }; |
| 85 | static_assert(sizeof(IoctlUnmapBuffer) == 8, "IoctlUnmapBuffer is incorrect size"); | 134 | static_assert(sizeof(IoctlUnmapBuffer) == 8, "IoctlUnmapBuffer is incorrect size"); |
| 86 | 135 | ||
| @@ -106,15 +155,6 @@ private: | |||
| 106 | static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(IoctlVaRegion) * 2, | 155 | static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(IoctlVaRegion) * 2, |
| 107 | "IoctlGetVaRegions is incorrect size"); | 156 | "IoctlGetVaRegions is incorrect size"); |
| 108 | 157 | ||
| 109 | struct BufferMapping { | ||
| 110 | u64 offset; | ||
| 111 | u64 size; | ||
| 112 | u32 nvmap_handle; | ||
| 113 | }; | ||
| 114 | |||
| 115 | /// Map containing the nvmap object mappings in GPU memory. | ||
| 116 | std::unordered_map<u64, BufferMapping> buffer_mappings; | ||
| 117 | |||
| 118 | u32 channel{}; | 158 | u32 channel{}; |
| 119 | 159 | ||
| 120 | u32 InitalizeEx(const std::vector<u8>& input, std::vector<u8>& output); | 160 | u32 InitalizeEx(const std::vector<u8>& input, std::vector<u8>& output); |
| @@ -125,7 +165,14 @@ private: | |||
| 125 | u32 BindChannel(const std::vector<u8>& input, std::vector<u8>& output); | 165 | u32 BindChannel(const std::vector<u8>& input, std::vector<u8>& output); |
| 126 | u32 GetVARegions(const std::vector<u8>& input, std::vector<u8>& output); | 166 | u32 GetVARegions(const std::vector<u8>& input, std::vector<u8>& output); |
| 127 | 167 | ||
| 168 | std::optional<BufferMap> FindBufferMap(GPUVAddr gpu_addr) const; | ||
| 169 | void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated); | ||
| 170 | std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr); | ||
| 171 | |||
| 128 | std::shared_ptr<nvmap> nvmap_dev; | 172 | std::shared_ptr<nvmap> nvmap_dev; |
| 173 | |||
| 174 | // This is expected to be ordered, therefore we must use a map, not unordered_map | ||
| 175 | std::map<GPUVAddr, BufferMap> buffer_mappings; | ||
| 129 | }; | 176 | }; |
| 130 | 177 | ||
| 131 | } // namespace Service::Nvidia::Devices | 178 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 8c742316c..9436e16ad 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp | |||
| @@ -18,7 +18,12 @@ enum { | |||
| 18 | }; | 18 | }; |
| 19 | } | 19 | } |
| 20 | 20 | ||
| 21 | nvmap::nvmap(Core::System& system) : nvdevice(system) {} | 21 | nvmap::nvmap(Core::System& system) : nvdevice(system) { |
| 22 | // Handle 0 appears to be used when remapping, so we create a placeholder empty nvmap object to | ||
| 23 | // represent this. | ||
| 24 | CreateObject(0); | ||
| 25 | } | ||
| 26 | |||
| 22 | nvmap::~nvmap() = default; | 27 | nvmap::~nvmap() = default; |
| 23 | 28 | ||
| 24 | VAddr nvmap::GetObjectAddress(u32 handle) const { | 29 | VAddr nvmap::GetObjectAddress(u32 handle) const { |
| @@ -50,6 +55,21 @@ u32 nvmap::ioctl(Ioctl command, const std::vector<u8>& input, const std::vector< | |||
| 50 | return 0; | 55 | return 0; |
| 51 | } | 56 | } |
| 52 | 57 | ||
| 58 | u32 nvmap::CreateObject(u32 size) { | ||
| 59 | // Create a new nvmap object and obtain a handle to it. | ||
| 60 | auto object = std::make_shared<Object>(); | ||
| 61 | object->id = next_id++; | ||
| 62 | object->size = size; | ||
| 63 | object->status = Object::Status::Created; | ||
| 64 | object->refcount = 1; | ||
| 65 | |||
| 66 | const u32 handle = next_handle++; | ||
| 67 | |||
| 68 | handles.insert_or_assign(handle, std::move(object)); | ||
| 69 | |||
| 70 | return handle; | ||
| 71 | } | ||
| 72 | |||
| 53 | u32 nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) { | 73 | u32 nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) { |
| 54 | IocCreateParams params; | 74 | IocCreateParams params; |
| 55 | std::memcpy(¶ms, input.data(), sizeof(params)); | 75 | std::memcpy(¶ms, input.data(), sizeof(params)); |
| @@ -59,17 +79,8 @@ u32 nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) { | |||
| 59 | LOG_ERROR(Service_NVDRV, "Size is 0"); | 79 | LOG_ERROR(Service_NVDRV, "Size is 0"); |
| 60 | return static_cast<u32>(NvErrCodes::InvalidValue); | 80 | return static_cast<u32>(NvErrCodes::InvalidValue); |
| 61 | } | 81 | } |
| 62 | // Create a new nvmap object and obtain a handle to it. | ||
| 63 | auto object = std::make_shared<Object>(); | ||
| 64 | object->id = next_id++; | ||
| 65 | object->size = params.size; | ||
| 66 | object->status = Object::Status::Created; | ||
| 67 | object->refcount = 1; | ||
| 68 | |||
| 69 | u32 handle = next_handle++; | ||
| 70 | handles[handle] = std::move(object); | ||
| 71 | 82 | ||
| 72 | params.handle = handle; | 83 | params.handle = CreateObject(params.size); |
| 73 | 84 | ||
| 74 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 85 | std::memcpy(output.data(), ¶ms, sizeof(params)); |
| 75 | return 0; | 86 | return 0; |
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h index 73c2e8809..84624be00 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.h +++ b/src/core/hle/service/nvdrv/devices/nvmap.h | |||
| @@ -49,10 +49,10 @@ public: | |||
| 49 | 49 | ||
| 50 | private: | 50 | private: |
| 51 | /// Id to use for the next handle that is created. | 51 | /// Id to use for the next handle that is created. |
| 52 | u32 next_handle = 1; | 52 | u32 next_handle = 0; |
| 53 | 53 | ||
| 54 | /// Id to use for the next object that is created. | 54 | /// Id to use for the next object that is created. |
| 55 | u32 next_id = 1; | 55 | u32 next_id = 0; |
| 56 | 56 | ||
| 57 | /// Mapping of currently allocated handles to the objects they represent. | 57 | /// Mapping of currently allocated handles to the objects they represent. |
| 58 | std::unordered_map<u32, std::shared_ptr<Object>> handles; | 58 | std::unordered_map<u32, std::shared_ptr<Object>> handles; |
| @@ -119,6 +119,8 @@ private: | |||
| 119 | }; | 119 | }; |
| 120 | static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size"); | 120 | static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size"); |
| 121 | 121 | ||
| 122 | u32 CreateObject(u32 size); | ||
| 123 | |||
| 122 | u32 IocCreate(const std::vector<u8>& input, std::vector<u8>& output); | 124 | u32 IocCreate(const std::vector<u8>& input, std::vector<u8>& output); |
| 123 | u32 IocAlloc(const std::vector<u8>& input, std::vector<u8>& output); | 125 | u32 IocAlloc(const std::vector<u8>& input, std::vector<u8>& output); |
| 124 | u32 IocGetId(const std::vector<u8>& input, std::vector<u8>& output); | 126 | u32 IocGetId(const std::vector<u8>& input, std::vector<u8>& output); |
diff --git a/src/core/hle/service/nvdrv/interface.cpp b/src/core/hle/service/nvdrv/interface.cpp index deaf0808b..88fbfa9b0 100644 --- a/src/core/hle/service/nvdrv/interface.cpp +++ b/src/core/hle/service/nvdrv/interface.cpp | |||
| @@ -60,24 +60,24 @@ void NVDRV::IoctlBase(Kernel::HLERequestContext& ctx, IoctlVersion version) { | |||
| 60 | 60 | ||
| 61 | if (ctrl.must_delay) { | 61 | if (ctrl.must_delay) { |
| 62 | ctrl.fresh_call = false; | 62 | ctrl.fresh_call = false; |
| 63 | ctx.SleepClientThread("NVServices::DelayedResponse", ctrl.timeout, | 63 | ctx.SleepClientThread( |
| 64 | [=](std::shared_ptr<Kernel::Thread> thread, | 64 | "NVServices::DelayedResponse", ctrl.timeout, |
| 65 | Kernel::HLERequestContext& ctx, | 65 | [=, this](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx_, |
| 66 | Kernel::ThreadWakeupReason reason) { | 66 | Kernel::ThreadWakeupReason reason) { |
| 67 | IoctlCtrl ctrl2{ctrl}; | 67 | IoctlCtrl ctrl2{ctrl}; |
| 68 | std::vector<u8> tmp_output = output; | 68 | std::vector<u8> tmp_output = output; |
| 69 | std::vector<u8> tmp_output2 = output2; | 69 | std::vector<u8> tmp_output2 = output2; |
| 70 | u32 result = nvdrv->Ioctl(fd, command, input, input2, tmp_output, | 70 | const u32 ioctl_result = nvdrv->Ioctl(fd, command, input, input2, tmp_output, |
| 71 | tmp_output2, ctrl2, version); | 71 | tmp_output2, ctrl2, version); |
| 72 | ctx.WriteBuffer(tmp_output, 0); | 72 | ctx_.WriteBuffer(tmp_output, 0); |
| 73 | if (version == IoctlVersion::Version3) { | 73 | if (version == IoctlVersion::Version3) { |
| 74 | ctx.WriteBuffer(tmp_output2, 1); | 74 | ctx_.WriteBuffer(tmp_output2, 1); |
| 75 | } | 75 | } |
| 76 | IPC::ResponseBuilder rb{ctx, 3}; | 76 | IPC::ResponseBuilder rb{ctx_, 3}; |
| 77 | rb.Push(RESULT_SUCCESS); | 77 | rb.Push(RESULT_SUCCESS); |
| 78 | rb.Push(result); | 78 | rb.Push(ioctl_result); |
| 79 | }, | 79 | }, |
| 80 | nvdrv->GetEventWriteable(ctrl.event_id)); | 80 | nvdrv->GetEventWriteable(ctrl.event_id)); |
| 81 | } else { | 81 | } else { |
| 82 | ctx.WriteBuffer(output); | 82 | ctx.WriteBuffer(output); |
| 83 | if (version == IoctlVersion::Version3) { | 83 | if (version == IoctlVersion::Version3) { |
diff --git a/src/core/hle/service/sm/sm.h b/src/core/hle/service/sm/sm.h index b06d2f103..b526a94fe 100644 --- a/src/core/hle/service/sm/sm.h +++ b/src/core/hle/service/sm/sm.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <type_traits> | 9 | #include <type_traits> |
| 10 | #include <unordered_map> | 10 | #include <unordered_map> |
| 11 | 11 | ||
| 12 | #include "common/concepts.h" | ||
| 12 | #include "core/hle/kernel/client_port.h" | 13 | #include "core/hle/kernel/client_port.h" |
| 13 | #include "core/hle/kernel/object.h" | 14 | #include "core/hle/kernel/object.h" |
| 14 | #include "core/hle/kernel/server_port.h" | 15 | #include "core/hle/kernel/server_port.h" |
| @@ -56,10 +57,8 @@ public: | |||
| 56 | ResultVal<std::shared_ptr<Kernel::ClientPort>> GetServicePort(const std::string& name); | 57 | ResultVal<std::shared_ptr<Kernel::ClientPort>> GetServicePort(const std::string& name); |
| 57 | ResultVal<std::shared_ptr<Kernel::ClientSession>> ConnectToService(const std::string& name); | 58 | ResultVal<std::shared_ptr<Kernel::ClientSession>> ConnectToService(const std::string& name); |
| 58 | 59 | ||
| 59 | template <typename T> | 60 | template <Common::IsBaseOf<Kernel::SessionRequestHandler> T> |
| 60 | std::shared_ptr<T> GetService(const std::string& service_name) const { | 61 | std::shared_ptr<T> GetService(const std::string& service_name) const { |
| 61 | static_assert(std::is_base_of_v<Kernel::SessionRequestHandler, T>, | ||
| 62 | "Not a base of ServiceFrameworkBase"); | ||
| 63 | auto service = registered_services.find(service_name); | 62 | auto service = registered_services.find(service_name); |
| 64 | if (service == registered_services.end()) { | 63 | if (service == registered_services.end()) { |
| 65 | LOG_DEBUG(Service, "Can't find service: {}", service_name); | 64 | LOG_DEBUG(Service, "Can't find service: {}", service_name); |
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index 825d11a3f..480d34725 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp | |||
| @@ -548,8 +548,8 @@ private: | |||
| 548 | // Wait the current thread until a buffer becomes available | 548 | // Wait the current thread until a buffer becomes available |
| 549 | ctx.SleepClientThread( | 549 | ctx.SleepClientThread( |
| 550 | "IHOSBinderDriver::DequeueBuffer", UINT64_MAX, | 550 | "IHOSBinderDriver::DequeueBuffer", UINT64_MAX, |
| 551 | [=](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx, | 551 | [=, this](std::shared_ptr<Kernel::Thread> thread, |
| 552 | Kernel::ThreadWakeupReason reason) { | 552 | Kernel::HLERequestContext& ctx, Kernel::ThreadWakeupReason reason) { |
| 553 | // Repeat TransactParcel DequeueBuffer when a buffer is available | 553 | // Repeat TransactParcel DequeueBuffer when a buffer is available |
| 554 | const auto guard = nv_flinger->Lock(); | 554 | const auto guard = nv_flinger->Lock(); |
| 555 | auto& buffer_queue = nv_flinger->FindBufferQueue(id); | 555 | auto& buffer_queue = nv_flinger->FindBufferQueue(id); |
| @@ -1199,6 +1199,23 @@ private: | |||
| 1199 | } | 1199 | } |
| 1200 | } | 1200 | } |
| 1201 | 1201 | ||
| 1202 | void GetIndirectLayerImageRequiredMemoryInfo(Kernel::HLERequestContext& ctx) { | ||
| 1203 | IPC::RequestParser rp{ctx}; | ||
| 1204 | const auto width = rp.Pop<u64>(); | ||
| 1205 | const auto height = rp.Pop<u64>(); | ||
| 1206 | LOG_DEBUG(Service_VI, "called width={}, height={}", width, height); | ||
| 1207 | |||
| 1208 | constexpr std::size_t base_size = 0x20000; | ||
| 1209 | constexpr std::size_t alignment = 0x1000; | ||
| 1210 | const auto texture_size = width * height * 4; | ||
| 1211 | const auto out_size = (texture_size + base_size - 1) / base_size * base_size; | ||
| 1212 | |||
| 1213 | IPC::ResponseBuilder rb{ctx, 6}; | ||
| 1214 | rb.Push(RESULT_SUCCESS); | ||
| 1215 | rb.Push(out_size); | ||
| 1216 | rb.Push(alignment); | ||
| 1217 | } | ||
| 1218 | |||
| 1202 | static ResultVal<ConvertedScaleMode> ConvertScalingModeImpl(NintendoScaleMode mode) { | 1219 | static ResultVal<ConvertedScaleMode> ConvertScalingModeImpl(NintendoScaleMode mode) { |
| 1203 | switch (mode) { | 1220 | switch (mode) { |
| 1204 | case NintendoScaleMode::None: | 1221 | case NintendoScaleMode::None: |
| @@ -1243,7 +1260,8 @@ IApplicationDisplayService::IApplicationDisplayService( | |||
| 1243 | {2102, &IApplicationDisplayService::ConvertScalingMode, "ConvertScalingMode"}, | 1260 | {2102, &IApplicationDisplayService::ConvertScalingMode, "ConvertScalingMode"}, |
| 1244 | {2450, nullptr, "GetIndirectLayerImageMap"}, | 1261 | {2450, nullptr, "GetIndirectLayerImageMap"}, |
| 1245 | {2451, nullptr, "GetIndirectLayerImageCropMap"}, | 1262 | {2451, nullptr, "GetIndirectLayerImageCropMap"}, |
| 1246 | {2460, nullptr, "GetIndirectLayerImageRequiredMemoryInfo"}, | 1263 | {2460, &IApplicationDisplayService::GetIndirectLayerImageRequiredMemoryInfo, |
| 1264 | "GetIndirectLayerImageRequiredMemoryInfo"}, | ||
| 1247 | {5202, &IApplicationDisplayService::GetDisplayVsyncEvent, "GetDisplayVsyncEvent"}, | 1265 | {5202, &IApplicationDisplayService::GetDisplayVsyncEvent, "GetDisplayVsyncEvent"}, |
| 1248 | {5203, nullptr, "GetDisplayVsyncEventForDebug"}, | 1266 | {5203, nullptr, "GetDisplayVsyncEventForDebug"}, |
| 1249 | }; | 1267 | }; |
diff --git a/src/core/memory/dmnt_cheat_vm.cpp b/src/core/memory/dmnt_cheat_vm.cpp index 2e7da23fe..48be80c12 100644 --- a/src/core/memory/dmnt_cheat_vm.cpp +++ b/src/core/memory/dmnt_cheat_vm.cpp | |||
| @@ -313,30 +313,32 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) { | |||
| 313 | 313 | ||
| 314 | switch (opcode_type) { | 314 | switch (opcode_type) { |
| 315 | case CheatVmOpcodeType::StoreStatic: { | 315 | case CheatVmOpcodeType::StoreStatic: { |
| 316 | StoreStaticOpcode store_static{}; | ||
| 317 | // 0TMR00AA AAAAAAAA YYYYYYYY (YYYYYYYY) | 316 | // 0TMR00AA AAAAAAAA YYYYYYYY (YYYYYYYY) |
| 318 | // Read additional words. | 317 | // Read additional words. |
| 319 | const u32 second_dword = GetNextDword(); | 318 | const u32 second_dword = GetNextDword(); |
| 320 | store_static.bit_width = (first_dword >> 24) & 0xF; | 319 | const u32 bit_width = (first_dword >> 24) & 0xF; |
| 321 | store_static.mem_type = static_cast<MemoryAccessType>((first_dword >> 20) & 0xF); | 320 | |
| 322 | store_static.offset_register = ((first_dword >> 16) & 0xF); | 321 | opcode.opcode = StoreStaticOpcode{ |
| 323 | store_static.rel_address = | 322 | .bit_width = bit_width, |
| 324 | (static_cast<u64>(first_dword & 0xFF) << 32ul) | static_cast<u64>(second_dword); | 323 | .mem_type = static_cast<MemoryAccessType>((first_dword >> 20) & 0xF), |
| 325 | store_static.value = GetNextVmInt(store_static.bit_width); | 324 | .offset_register = (first_dword >> 16) & 0xF, |
| 326 | opcode.opcode = store_static; | 325 | .rel_address = (static_cast<u64>(first_dword & 0xFF) << 32) | second_dword, |
| 326 | .value = GetNextVmInt(bit_width), | ||
| 327 | }; | ||
| 327 | } break; | 328 | } break; |
| 328 | case CheatVmOpcodeType::BeginConditionalBlock: { | 329 | case CheatVmOpcodeType::BeginConditionalBlock: { |
| 329 | BeginConditionalOpcode begin_cond{}; | ||
| 330 | // 1TMC00AA AAAAAAAA YYYYYYYY (YYYYYYYY) | 330 | // 1TMC00AA AAAAAAAA YYYYYYYY (YYYYYYYY) |
| 331 | // Read additional words. | 331 | // Read additional words. |
| 332 | const u32 second_dword = GetNextDword(); | 332 | const u32 second_dword = GetNextDword(); |
| 333 | begin_cond.bit_width = (first_dword >> 24) & 0xF; | 333 | const u32 bit_width = (first_dword >> 24) & 0xF; |
| 334 | begin_cond.mem_type = static_cast<MemoryAccessType>((first_dword >> 20) & 0xF); | 334 | |
| 335 | begin_cond.cond_type = static_cast<ConditionalComparisonType>((first_dword >> 16) & 0xF); | 335 | opcode.opcode = BeginConditionalOpcode{ |
| 336 | begin_cond.rel_address = | 336 | .bit_width = bit_width, |
| 337 | (static_cast<u64>(first_dword & 0xFF) << 32ul) | static_cast<u64>(second_dword); | 337 | .mem_type = static_cast<MemoryAccessType>((first_dword >> 20) & 0xF), |
| 338 | begin_cond.value = GetNextVmInt(begin_cond.bit_width); | 338 | .cond_type = static_cast<ConditionalComparisonType>((first_dword >> 16) & 0xF), |
| 339 | opcode.opcode = begin_cond; | 339 | .rel_address = (static_cast<u64>(first_dword & 0xFF) << 32) | second_dword, |
| 340 | .value = GetNextVmInt(bit_width), | ||
| 341 | }; | ||
| 340 | } break; | 342 | } break; |
| 341 | case CheatVmOpcodeType::EndConditionalBlock: { | 343 | case CheatVmOpcodeType::EndConditionalBlock: { |
| 342 | // 20000000 | 344 | // 20000000 |
| @@ -344,12 +346,14 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) { | |||
| 344 | opcode.opcode = EndConditionalOpcode{}; | 346 | opcode.opcode = EndConditionalOpcode{}; |
| 345 | } break; | 347 | } break; |
| 346 | case CheatVmOpcodeType::ControlLoop: { | 348 | case CheatVmOpcodeType::ControlLoop: { |
| 347 | ControlLoopOpcode ctrl_loop{}; | ||
| 348 | // 300R0000 VVVVVVVV | 349 | // 300R0000 VVVVVVVV |
| 349 | // 310R0000 | 350 | // 310R0000 |
| 350 | // Parse register, whether loop start or loop end. | 351 | // Parse register, whether loop start or loop end. |
| 351 | ctrl_loop.start_loop = ((first_dword >> 24) & 0xF) == 0; | 352 | ControlLoopOpcode ctrl_loop{ |
| 352 | ctrl_loop.reg_index = ((first_dword >> 20) & 0xF); | 353 | .start_loop = ((first_dword >> 24) & 0xF) == 0, |
| 354 | .reg_index = (first_dword >> 20) & 0xF, | ||
| 355 | .num_iters = 0, | ||
| 356 | }; | ||
| 353 | 357 | ||
| 354 | // Read number of iters if loop start. | 358 | // Read number of iters if loop start. |
| 355 | if (ctrl_loop.start_loop) { | 359 | if (ctrl_loop.start_loop) { |
| @@ -358,66 +362,65 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) { | |||
| 358 | opcode.opcode = ctrl_loop; | 362 | opcode.opcode = ctrl_loop; |
| 359 | } break; | 363 | } break; |
| 360 | case CheatVmOpcodeType::LoadRegisterStatic: { | 364 | case CheatVmOpcodeType::LoadRegisterStatic: { |
| 361 | LoadRegisterStaticOpcode ldr_static{}; | ||
| 362 | // 400R0000 VVVVVVVV VVVVVVVV | 365 | // 400R0000 VVVVVVVV VVVVVVVV |
| 363 | // Read additional words. | 366 | // Read additional words. |
| 364 | ldr_static.reg_index = ((first_dword >> 16) & 0xF); | 367 | opcode.opcode = LoadRegisterStaticOpcode{ |
| 365 | ldr_static.value = | 368 | .reg_index = (first_dword >> 16) & 0xF, |
| 366 | (static_cast<u64>(GetNextDword()) << 32ul) | static_cast<u64>(GetNextDword()); | 369 | .value = (static_cast<u64>(GetNextDword()) << 32) | GetNextDword(), |
| 367 | opcode.opcode = ldr_static; | 370 | }; |
| 368 | } break; | 371 | } break; |
| 369 | case CheatVmOpcodeType::LoadRegisterMemory: { | 372 | case CheatVmOpcodeType::LoadRegisterMemory: { |
| 370 | LoadRegisterMemoryOpcode ldr_memory{}; | ||
| 371 | // 5TMRI0AA AAAAAAAA | 373 | // 5TMRI0AA AAAAAAAA |
| 372 | // Read additional words. | 374 | // Read additional words. |
| 373 | const u32 second_dword = GetNextDword(); | 375 | const u32 second_dword = GetNextDword(); |
| 374 | ldr_memory.bit_width = (first_dword >> 24) & 0xF; | 376 | opcode.opcode = LoadRegisterMemoryOpcode{ |
| 375 | ldr_memory.mem_type = static_cast<MemoryAccessType>((first_dword >> 20) & 0xF); | 377 | .bit_width = (first_dword >> 24) & 0xF, |
| 376 | ldr_memory.reg_index = ((first_dword >> 16) & 0xF); | 378 | .mem_type = static_cast<MemoryAccessType>((first_dword >> 20) & 0xF), |
| 377 | ldr_memory.load_from_reg = ((first_dword >> 12) & 0xF) != 0; | 379 | .reg_index = ((first_dword >> 16) & 0xF), |
| 378 | ldr_memory.rel_address = | 380 | .load_from_reg = ((first_dword >> 12) & 0xF) != 0, |
| 379 | (static_cast<u64>(first_dword & 0xFF) << 32ul) | static_cast<u64>(second_dword); | 381 | .rel_address = (static_cast<u64>(first_dword & 0xFF) << 32) | second_dword, |
| 380 | opcode.opcode = ldr_memory; | 382 | }; |
| 381 | } break; | 383 | } break; |
| 382 | case CheatVmOpcodeType::StoreStaticToAddress: { | 384 | case CheatVmOpcodeType::StoreStaticToAddress: { |
| 383 | StoreStaticToAddressOpcode str_static{}; | ||
| 384 | // 6T0RIor0 VVVVVVVV VVVVVVVV | 385 | // 6T0RIor0 VVVVVVVV VVVVVVVV |
| 385 | // Read additional words. | 386 | // Read additional words. |
| 386 | str_static.bit_width = (first_dword >> 24) & 0xF; | 387 | opcode.opcode = StoreStaticToAddressOpcode{ |
| 387 | str_static.reg_index = ((first_dword >> 16) & 0xF); | 388 | .bit_width = (first_dword >> 24) & 0xF, |
| 388 | str_static.increment_reg = ((first_dword >> 12) & 0xF) != 0; | 389 | .reg_index = (first_dword >> 16) & 0xF, |
| 389 | str_static.add_offset_reg = ((first_dword >> 8) & 0xF) != 0; | 390 | .increment_reg = ((first_dword >> 12) & 0xF) != 0, |
| 390 | str_static.offset_reg_index = ((first_dword >> 4) & 0xF); | 391 | .add_offset_reg = ((first_dword >> 8) & 0xF) != 0, |
| 391 | str_static.value = | 392 | .offset_reg_index = (first_dword >> 4) & 0xF, |
| 392 | (static_cast<u64>(GetNextDword()) << 32ul) | static_cast<u64>(GetNextDword()); | 393 | .value = (static_cast<u64>(GetNextDword()) << 32) | GetNextDword(), |
| 393 | opcode.opcode = str_static; | 394 | }; |
| 394 | } break; | 395 | } break; |
| 395 | case CheatVmOpcodeType::PerformArithmeticStatic: { | 396 | case CheatVmOpcodeType::PerformArithmeticStatic: { |
| 396 | PerformArithmeticStaticOpcode perform_math_static{}; | ||
| 397 | // 7T0RC000 VVVVVVVV | 397 | // 7T0RC000 VVVVVVVV |
| 398 | // Read additional words. | 398 | // Read additional words. |
| 399 | perform_math_static.bit_width = (first_dword >> 24) & 0xF; | 399 | opcode.opcode = PerformArithmeticStaticOpcode{ |
| 400 | perform_math_static.reg_index = ((first_dword >> 16) & 0xF); | 400 | .bit_width = (first_dword >> 24) & 0xF, |
| 401 | perform_math_static.math_type = | 401 | .reg_index = ((first_dword >> 16) & 0xF), |
| 402 | static_cast<RegisterArithmeticType>((first_dword >> 12) & 0xF); | 402 | .math_type = static_cast<RegisterArithmeticType>((first_dword >> 12) & 0xF), |
| 403 | perform_math_static.value = GetNextDword(); | 403 | .value = GetNextDword(), |
| 404 | opcode.opcode = perform_math_static; | 404 | }; |
| 405 | } break; | 405 | } break; |
| 406 | case CheatVmOpcodeType::BeginKeypressConditionalBlock: { | 406 | case CheatVmOpcodeType::BeginKeypressConditionalBlock: { |
| 407 | BeginKeypressConditionalOpcode begin_keypress_cond{}; | ||
| 408 | // 8kkkkkkk | 407 | // 8kkkkkkk |
| 409 | // Just parse the mask. | 408 | // Just parse the mask. |
| 410 | begin_keypress_cond.key_mask = first_dword & 0x0FFFFFFF; | 409 | opcode.opcode = BeginKeypressConditionalOpcode{ |
| 411 | opcode.opcode = begin_keypress_cond; | 410 | .key_mask = first_dword & 0x0FFFFFFF, |
| 411 | }; | ||
| 412 | } break; | 412 | } break; |
| 413 | case CheatVmOpcodeType::PerformArithmeticRegister: { | 413 | case CheatVmOpcodeType::PerformArithmeticRegister: { |
| 414 | PerformArithmeticRegisterOpcode perform_math_reg{}; | ||
| 415 | // 9TCRSIs0 (VVVVVVVV (VVVVVVVV)) | 414 | // 9TCRSIs0 (VVVVVVVV (VVVVVVVV)) |
| 416 | perform_math_reg.bit_width = (first_dword >> 24) & 0xF; | 415 | PerformArithmeticRegisterOpcode perform_math_reg{ |
| 417 | perform_math_reg.math_type = static_cast<RegisterArithmeticType>((first_dword >> 20) & 0xF); | 416 | .bit_width = (first_dword >> 24) & 0xF, |
| 418 | perform_math_reg.dst_reg_index = ((first_dword >> 16) & 0xF); | 417 | .math_type = static_cast<RegisterArithmeticType>((first_dword >> 20) & 0xF), |
| 419 | perform_math_reg.src_reg_1_index = ((first_dword >> 12) & 0xF); | 418 | .dst_reg_index = (first_dword >> 16) & 0xF, |
| 420 | perform_math_reg.has_immediate = ((first_dword >> 8) & 0xF) != 0; | 419 | .src_reg_1_index = (first_dword >> 12) & 0xF, |
| 420 | .src_reg_2_index = 0, | ||
| 421 | .has_immediate = ((first_dword >> 8) & 0xF) != 0, | ||
| 422 | .value = {}, | ||
| 423 | }; | ||
| 421 | if (perform_math_reg.has_immediate) { | 424 | if (perform_math_reg.has_immediate) { |
| 422 | perform_math_reg.src_reg_2_index = 0; | 425 | perform_math_reg.src_reg_2_index = 0; |
| 423 | perform_math_reg.value = GetNextVmInt(perform_math_reg.bit_width); | 426 | perform_math_reg.value = GetNextVmInt(perform_math_reg.bit_width); |
| @@ -427,7 +430,6 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) { | |||
| 427 | opcode.opcode = perform_math_reg; | 430 | opcode.opcode = perform_math_reg; |
| 428 | } break; | 431 | } break; |
| 429 | case CheatVmOpcodeType::StoreRegisterToAddress: { | 432 | case CheatVmOpcodeType::StoreRegisterToAddress: { |
| 430 | StoreRegisterToAddressOpcode str_register{}; | ||
| 431 | // ATSRIOxa (aaaaaaaa) | 433 | // ATSRIOxa (aaaaaaaa) |
| 432 | // A = opcode 10 | 434 | // A = opcode 10 |
| 433 | // T = bit width | 435 | // T = bit width |
| @@ -439,20 +441,23 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) { | |||
| 439 | // Relative Address | 441 | // Relative Address |
| 440 | // x = offset register (for offset type 1), memory type (for offset type 3) | 442 | // x = offset register (for offset type 1), memory type (for offset type 3) |
| 441 | // a = relative address (for offset type 2+3) | 443 | // a = relative address (for offset type 2+3) |
| 442 | str_register.bit_width = (first_dword >> 24) & 0xF; | 444 | StoreRegisterToAddressOpcode str_register{ |
| 443 | str_register.str_reg_index = ((first_dword >> 20) & 0xF); | 445 | .bit_width = (first_dword >> 24) & 0xF, |
| 444 | str_register.addr_reg_index = ((first_dword >> 16) & 0xF); | 446 | .str_reg_index = (first_dword >> 20) & 0xF, |
| 445 | str_register.increment_reg = ((first_dword >> 12) & 0xF) != 0; | 447 | .addr_reg_index = (first_dword >> 16) & 0xF, |
| 446 | str_register.ofs_type = static_cast<StoreRegisterOffsetType>(((first_dword >> 8) & 0xF)); | 448 | .increment_reg = ((first_dword >> 12) & 0xF) != 0, |
| 447 | str_register.ofs_reg_index = ((first_dword >> 4) & 0xF); | 449 | .ofs_type = static_cast<StoreRegisterOffsetType>(((first_dword >> 8) & 0xF)), |
| 450 | .mem_type = MemoryAccessType::MainNso, | ||
| 451 | .ofs_reg_index = (first_dword >> 4) & 0xF, | ||
| 452 | .rel_address = 0, | ||
| 453 | }; | ||
| 448 | switch (str_register.ofs_type) { | 454 | switch (str_register.ofs_type) { |
| 449 | case StoreRegisterOffsetType::None: | 455 | case StoreRegisterOffsetType::None: |
| 450 | case StoreRegisterOffsetType::Reg: | 456 | case StoreRegisterOffsetType::Reg: |
| 451 | // Nothing more to do | 457 | // Nothing more to do |
| 452 | break; | 458 | break; |
| 453 | case StoreRegisterOffsetType::Imm: | 459 | case StoreRegisterOffsetType::Imm: |
| 454 | str_register.rel_address = | 460 | str_register.rel_address = (static_cast<u64>(first_dword & 0xF) << 32) | GetNextDword(); |
| 455 | ((static_cast<u64>(first_dword & 0xF) << 32ul) | static_cast<u64>(GetNextDword())); | ||
| 456 | break; | 461 | break; |
| 457 | case StoreRegisterOffsetType::MemReg: | 462 | case StoreRegisterOffsetType::MemReg: |
| 458 | str_register.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF); | 463 | str_register.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF); |
| @@ -460,8 +465,7 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) { | |||
| 460 | case StoreRegisterOffsetType::MemImm: | 465 | case StoreRegisterOffsetType::MemImm: |
| 461 | case StoreRegisterOffsetType::MemImmReg: | 466 | case StoreRegisterOffsetType::MemImmReg: |
| 462 | str_register.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF); | 467 | str_register.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF); |
| 463 | str_register.rel_address = | 468 | str_register.rel_address = (static_cast<u64>(first_dword & 0xF) << 32) | GetNextDword(); |
| 464 | ((static_cast<u64>(first_dword & 0xF) << 32ul) | static_cast<u64>(GetNextDword())); | ||
| 465 | break; | 469 | break; |
| 466 | default: | 470 | default: |
| 467 | str_register.ofs_type = StoreRegisterOffsetType::None; | 471 | str_register.ofs_type = StoreRegisterOffsetType::None; |
| @@ -470,7 +474,6 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) { | |||
| 470 | opcode.opcode = str_register; | 474 | opcode.opcode = str_register; |
| 471 | } break; | 475 | } break; |
| 472 | case CheatVmOpcodeType::BeginRegisterConditionalBlock: { | 476 | case CheatVmOpcodeType::BeginRegisterConditionalBlock: { |
| 473 | BeginRegisterConditionalOpcode begin_reg_cond{}; | ||
| 474 | // C0TcSX## | 477 | // C0TcSX## |
| 475 | // C0TcS0Ma aaaaaaaa | 478 | // C0TcS0Ma aaaaaaaa |
| 476 | // C0TcS1Mr | 479 | // C0TcS1Mr |
| @@ -492,11 +495,19 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) { | |||
| 492 | // r = offset register. | 495 | // r = offset register. |
| 493 | // X = other register. | 496 | // X = other register. |
| 494 | // V = value. | 497 | // V = value. |
| 495 | begin_reg_cond.bit_width = (first_dword >> 20) & 0xF; | 498 | |
| 496 | begin_reg_cond.cond_type = | 499 | BeginRegisterConditionalOpcode begin_reg_cond{ |
| 497 | static_cast<ConditionalComparisonType>((first_dword >> 16) & 0xF); | 500 | .bit_width = (first_dword >> 20) & 0xF, |
| 498 | begin_reg_cond.val_reg_index = ((first_dword >> 12) & 0xF); | 501 | .cond_type = static_cast<ConditionalComparisonType>((first_dword >> 16) & 0xF), |
| 499 | begin_reg_cond.comp_type = static_cast<CompareRegisterValueType>((first_dword >> 8) & 0xF); | 502 | .val_reg_index = (first_dword >> 12) & 0xF, |
| 503 | .comp_type = static_cast<CompareRegisterValueType>((first_dword >> 8) & 0xF), | ||
| 504 | .mem_type = MemoryAccessType::MainNso, | ||
| 505 | .addr_reg_index = 0, | ||
| 506 | .other_reg_index = 0, | ||
| 507 | .ofs_reg_index = 0, | ||
| 508 | .rel_address = 0, | ||
| 509 | .value = {}, | ||
| 510 | }; | ||
| 500 | 511 | ||
| 501 | switch (begin_reg_cond.comp_type) { | 512 | switch (begin_reg_cond.comp_type) { |
| 502 | case CompareRegisterValueType::StaticValue: | 513 | case CompareRegisterValueType::StaticValue: |
| @@ -508,26 +519,25 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) { | |||
| 508 | case CompareRegisterValueType::MemoryRelAddr: | 519 | case CompareRegisterValueType::MemoryRelAddr: |
| 509 | begin_reg_cond.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF); | 520 | begin_reg_cond.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF); |
| 510 | begin_reg_cond.rel_address = | 521 | begin_reg_cond.rel_address = |
| 511 | ((static_cast<u64>(first_dword & 0xF) << 32ul) | static_cast<u64>(GetNextDword())); | 522 | (static_cast<u64>(first_dword & 0xF) << 32) | GetNextDword(); |
| 512 | break; | 523 | break; |
| 513 | case CompareRegisterValueType::MemoryOfsReg: | 524 | case CompareRegisterValueType::MemoryOfsReg: |
| 514 | begin_reg_cond.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF); | 525 | begin_reg_cond.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF); |
| 515 | begin_reg_cond.ofs_reg_index = (first_dword & 0xF); | 526 | begin_reg_cond.ofs_reg_index = (first_dword & 0xF); |
| 516 | break; | 527 | break; |
| 517 | case CompareRegisterValueType::RegisterRelAddr: | 528 | case CompareRegisterValueType::RegisterRelAddr: |
| 518 | begin_reg_cond.addr_reg_index = ((first_dword >> 4) & 0xF); | 529 | begin_reg_cond.addr_reg_index = (first_dword >> 4) & 0xF; |
| 519 | begin_reg_cond.rel_address = | 530 | begin_reg_cond.rel_address = |
| 520 | ((static_cast<u64>(first_dword & 0xF) << 32ul) | static_cast<u64>(GetNextDword())); | 531 | (static_cast<u64>(first_dword & 0xF) << 32) | GetNextDword(); |
| 521 | break; | 532 | break; |
| 522 | case CompareRegisterValueType::RegisterOfsReg: | 533 | case CompareRegisterValueType::RegisterOfsReg: |
| 523 | begin_reg_cond.addr_reg_index = ((first_dword >> 4) & 0xF); | 534 | begin_reg_cond.addr_reg_index = (first_dword >> 4) & 0xF; |
| 524 | begin_reg_cond.ofs_reg_index = (first_dword & 0xF); | 535 | begin_reg_cond.ofs_reg_index = first_dword & 0xF; |
| 525 | break; | 536 | break; |
| 526 | } | 537 | } |
| 527 | opcode.opcode = begin_reg_cond; | 538 | opcode.opcode = begin_reg_cond; |
| 528 | } break; | 539 | } break; |
| 529 | case CheatVmOpcodeType::SaveRestoreRegister: { | 540 | case CheatVmOpcodeType::SaveRestoreRegister: { |
| 530 | SaveRestoreRegisterOpcode save_restore_reg{}; | ||
| 531 | // C10D0Sx0 | 541 | // C10D0Sx0 |
| 532 | // C1 = opcode 0xC1 | 542 | // C1 = opcode 0xC1 |
| 533 | // D = destination index. | 543 | // D = destination index. |
| @@ -535,36 +545,37 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) { | |||
| 535 | // x = 3 if clearing reg, 2 if clearing saved value, 1 if saving a register, 0 if restoring | 545 | // x = 3 if clearing reg, 2 if clearing saved value, 1 if saving a register, 0 if restoring |
| 536 | // a register. | 546 | // a register. |
| 537 | // NOTE: If we add more save slots later, current encoding is backwards compatible. | 547 | // NOTE: If we add more save slots later, current encoding is backwards compatible. |
| 538 | save_restore_reg.dst_index = (first_dword >> 16) & 0xF; | 548 | opcode.opcode = SaveRestoreRegisterOpcode{ |
| 539 | save_restore_reg.src_index = (first_dword >> 8) & 0xF; | 549 | .dst_index = (first_dword >> 16) & 0xF, |
| 540 | save_restore_reg.op_type = static_cast<SaveRestoreRegisterOpType>((first_dword >> 4) & 0xF); | 550 | .src_index = (first_dword >> 8) & 0xF, |
| 541 | opcode.opcode = save_restore_reg; | 551 | .op_type = static_cast<SaveRestoreRegisterOpType>((first_dword >> 4) & 0xF), |
| 552 | }; | ||
| 542 | } break; | 553 | } break; |
| 543 | case CheatVmOpcodeType::SaveRestoreRegisterMask: { | 554 | case CheatVmOpcodeType::SaveRestoreRegisterMask: { |
| 544 | SaveRestoreRegisterMaskOpcode save_restore_regmask{}; | ||
| 545 | // C2x0XXXX | 555 | // C2x0XXXX |
| 546 | // C2 = opcode 0xC2 | 556 | // C2 = opcode 0xC2 |
| 547 | // x = 3 if clearing reg, 2 if clearing saved value, 1 if saving, 0 if restoring. | 557 | // x = 3 if clearing reg, 2 if clearing saved value, 1 if saving, 0 if restoring. |
| 548 | // X = 16-bit bitmask, bit i --> save or restore register i. | 558 | // X = 16-bit bitmask, bit i --> save or restore register i. |
| 549 | save_restore_regmask.op_type = | 559 | SaveRestoreRegisterMaskOpcode save_restore_regmask{ |
| 550 | static_cast<SaveRestoreRegisterOpType>((first_dword >> 20) & 0xF); | 560 | .op_type = static_cast<SaveRestoreRegisterOpType>((first_dword >> 20) & 0xF), |
| 561 | .should_operate = {}, | ||
| 562 | }; | ||
| 551 | for (std::size_t i = 0; i < NumRegisters; i++) { | 563 | for (std::size_t i = 0; i < NumRegisters; i++) { |
| 552 | save_restore_regmask.should_operate[i] = (first_dword & (1u << i)) != 0; | 564 | save_restore_regmask.should_operate[i] = (first_dword & (1U << i)) != 0; |
| 553 | } | 565 | } |
| 554 | opcode.opcode = save_restore_regmask; | 566 | opcode.opcode = save_restore_regmask; |
| 555 | } break; | 567 | } break; |
| 556 | case CheatVmOpcodeType::ReadWriteStaticRegister: { | 568 | case CheatVmOpcodeType::ReadWriteStaticRegister: { |
| 557 | ReadWriteStaticRegisterOpcode rw_static_reg{}; | ||
| 558 | // C3000XXx | 569 | // C3000XXx |
| 559 | // C3 = opcode 0xC3. | 570 | // C3 = opcode 0xC3. |
| 560 | // XX = static register index. | 571 | // XX = static register index. |
| 561 | // x = register index. | 572 | // x = register index. |
| 562 | rw_static_reg.static_idx = ((first_dword >> 4) & 0xFF); | 573 | opcode.opcode = ReadWriteStaticRegisterOpcode{ |
| 563 | rw_static_reg.idx = (first_dword & 0xF); | 574 | .static_idx = (first_dword >> 4) & 0xFF, |
| 564 | opcode.opcode = rw_static_reg; | 575 | .idx = first_dword & 0xF, |
| 576 | }; | ||
| 565 | } break; | 577 | } break; |
| 566 | case CheatVmOpcodeType::DebugLog: { | 578 | case CheatVmOpcodeType::DebugLog: { |
| 567 | DebugLogOpcode debug_log{}; | ||
| 568 | // FFFTIX## | 579 | // FFFTIX## |
| 569 | // FFFTI0Ma aaaaaaaa | 580 | // FFFTI0Ma aaaaaaaa |
| 570 | // FFFTI1Mr | 581 | // FFFTI1Mr |
| @@ -583,31 +594,36 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) { | |||
| 583 | // a = relative address. | 594 | // a = relative address. |
| 584 | // r = offset register. | 595 | // r = offset register. |
| 585 | // X = value register. | 596 | // X = value register. |
| 586 | debug_log.bit_width = (first_dword >> 16) & 0xF; | 597 | DebugLogOpcode debug_log{ |
| 587 | debug_log.log_id = ((first_dword >> 12) & 0xF); | 598 | .bit_width = (first_dword >> 16) & 0xF, |
| 588 | debug_log.val_type = static_cast<DebugLogValueType>((first_dword >> 8) & 0xF); | 599 | .log_id = (first_dword >> 12) & 0xF, |
| 600 | .val_type = static_cast<DebugLogValueType>((first_dword >> 8) & 0xF), | ||
| 601 | .mem_type = MemoryAccessType::MainNso, | ||
| 602 | .addr_reg_index = 0, | ||
| 603 | .val_reg_index = 0, | ||
| 604 | .ofs_reg_index = 0, | ||
| 605 | .rel_address = 0, | ||
| 606 | }; | ||
| 589 | 607 | ||
| 590 | switch (debug_log.val_type) { | 608 | switch (debug_log.val_type) { |
| 591 | case DebugLogValueType::RegisterValue: | 609 | case DebugLogValueType::RegisterValue: |
| 592 | debug_log.val_reg_index = ((first_dword >> 4) & 0xF); | 610 | debug_log.val_reg_index = (first_dword >> 4) & 0xF; |
| 593 | break; | 611 | break; |
| 594 | case DebugLogValueType::MemoryRelAddr: | 612 | case DebugLogValueType::MemoryRelAddr: |
| 595 | debug_log.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF); | 613 | debug_log.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF); |
| 596 | debug_log.rel_address = | 614 | debug_log.rel_address = (static_cast<u64>(first_dword & 0xF) << 32) | GetNextDword(); |
| 597 | ((static_cast<u64>(first_dword & 0xF) << 32ul) | static_cast<u64>(GetNextDword())); | ||
| 598 | break; | 615 | break; |
| 599 | case DebugLogValueType::MemoryOfsReg: | 616 | case DebugLogValueType::MemoryOfsReg: |
| 600 | debug_log.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF); | 617 | debug_log.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF); |
| 601 | debug_log.ofs_reg_index = (first_dword & 0xF); | 618 | debug_log.ofs_reg_index = first_dword & 0xF; |
| 602 | break; | 619 | break; |
| 603 | case DebugLogValueType::RegisterRelAddr: | 620 | case DebugLogValueType::RegisterRelAddr: |
| 604 | debug_log.addr_reg_index = ((first_dword >> 4) & 0xF); | 621 | debug_log.addr_reg_index = (first_dword >> 4) & 0xF; |
| 605 | debug_log.rel_address = | 622 | debug_log.rel_address = (static_cast<u64>(first_dword & 0xF) << 32) | GetNextDword(); |
| 606 | ((static_cast<u64>(first_dword & 0xF) << 32ul) | static_cast<u64>(GetNextDword())); | ||
| 607 | break; | 623 | break; |
| 608 | case DebugLogValueType::RegisterOfsReg: | 624 | case DebugLogValueType::RegisterOfsReg: |
| 609 | debug_log.addr_reg_index = ((first_dword >> 4) & 0xF); | 625 | debug_log.addr_reg_index = (first_dword >> 4) & 0xF; |
| 610 | debug_log.ofs_reg_index = (first_dword & 0xF); | 626 | debug_log.ofs_reg_index = first_dword & 0xF; |
| 611 | break; | 627 | break; |
| 612 | } | 628 | } |
| 613 | opcode.opcode = debug_log; | 629 | opcode.opcode = debug_log; |
diff --git a/src/core/perf_stats.cpp b/src/core/perf_stats.cpp index 29339ead7..b899ac884 100644 --- a/src/core/perf_stats.cpp +++ b/src/core/perf_stats.cpp | |||
| @@ -74,15 +74,16 @@ void PerfStats::EndGameFrame() { | |||
| 74 | game_frames += 1; | 74 | game_frames += 1; |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | double PerfStats::GetMeanFrametime() { | 77 | double PerfStats::GetMeanFrametime() const { |
| 78 | std::lock_guard lock{object_mutex}; | 78 | std::lock_guard lock{object_mutex}; |
| 79 | 79 | ||
| 80 | if (current_index <= IgnoreFrames) { | 80 | if (current_index <= IgnoreFrames) { |
| 81 | return 0; | 81 | return 0; |
| 82 | } | 82 | } |
| 83 | |||
| 83 | const double sum = std::accumulate(perf_history.begin() + IgnoreFrames, | 84 | const double sum = std::accumulate(perf_history.begin() + IgnoreFrames, |
| 84 | perf_history.begin() + current_index, 0.0); | 85 | perf_history.begin() + current_index, 0.0); |
| 85 | return sum / (current_index - IgnoreFrames); | 86 | return sum / static_cast<double>(current_index - IgnoreFrames); |
| 86 | } | 87 | } |
| 87 | 88 | ||
| 88 | PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) { | 89 | PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) { |
| @@ -94,12 +95,13 @@ PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us | |||
| 94 | 95 | ||
| 95 | const auto system_us_per_second = (current_system_time_us - reset_point_system_us) / interval; | 96 | const auto system_us_per_second = (current_system_time_us - reset_point_system_us) / interval; |
| 96 | 97 | ||
| 97 | PerfStatsResults results{}; | 98 | const PerfStatsResults results{ |
| 98 | results.system_fps = static_cast<double>(system_frames) / interval; | 99 | .system_fps = static_cast<double>(system_frames) / interval, |
| 99 | results.game_fps = static_cast<double>(game_frames) / interval; | 100 | .game_fps = static_cast<double>(game_frames) / interval, |
| 100 | results.frametime = duration_cast<DoubleSecs>(accumulated_frametime).count() / | 101 | .frametime = duration_cast<DoubleSecs>(accumulated_frametime).count() / |
| 101 | static_cast<double>(system_frames); | 102 | static_cast<double>(system_frames), |
| 102 | results.emulation_speed = system_us_per_second.count() / 1'000'000.0; | 103 | .emulation_speed = system_us_per_second.count() / 1'000'000.0, |
| 104 | }; | ||
| 103 | 105 | ||
| 104 | // Reset counters | 106 | // Reset counters |
| 105 | reset_point = now; | 107 | reset_point = now; |
| @@ -111,7 +113,7 @@ PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us | |||
| 111 | return results; | 113 | return results; |
| 112 | } | 114 | } |
| 113 | 115 | ||
| 114 | double PerfStats::GetLastFrameTimeScale() { | 116 | double PerfStats::GetLastFrameTimeScale() const { |
| 115 | std::lock_guard lock{object_mutex}; | 117 | std::lock_guard lock{object_mutex}; |
| 116 | 118 | ||
| 117 | constexpr double FRAME_LENGTH = 1.0 / 60; | 119 | constexpr double FRAME_LENGTH = 1.0 / 60; |
diff --git a/src/core/perf_stats.h b/src/core/perf_stats.h index d9a64f072..69256b960 100644 --- a/src/core/perf_stats.h +++ b/src/core/perf_stats.h | |||
| @@ -30,7 +30,6 @@ struct PerfStatsResults { | |||
| 30 | class PerfStats { | 30 | class PerfStats { |
| 31 | public: | 31 | public: |
| 32 | explicit PerfStats(u64 title_id); | 32 | explicit PerfStats(u64 title_id); |
| 33 | |||
| 34 | ~PerfStats(); | 33 | ~PerfStats(); |
| 35 | 34 | ||
| 36 | using Clock = std::chrono::high_resolution_clock; | 35 | using Clock = std::chrono::high_resolution_clock; |
| @@ -42,18 +41,18 @@ public: | |||
| 42 | PerfStatsResults GetAndResetStats(std::chrono::microseconds current_system_time_us); | 41 | PerfStatsResults GetAndResetStats(std::chrono::microseconds current_system_time_us); |
| 43 | 42 | ||
| 44 | /** | 43 | /** |
| 45 | * Returns the Arthimetic Mean of all frametime values stored in the performance history. | 44 | * Returns the arithmetic mean of all frametime values stored in the performance history. |
| 46 | */ | 45 | */ |
| 47 | double GetMeanFrametime(); | 46 | double GetMeanFrametime() const; |
| 48 | 47 | ||
| 49 | /** | 48 | /** |
| 50 | * Gets the ratio between walltime and the emulated time of the previous system frame. This is | 49 | * Gets the ratio between walltime and the emulated time of the previous system frame. This is |
| 51 | * useful for scaling inputs or outputs moving between the two time domains. | 50 | * useful for scaling inputs or outputs moving between the two time domains. |
| 52 | */ | 51 | */ |
| 53 | double GetLastFrameTimeScale(); | 52 | double GetLastFrameTimeScale() const; |
| 54 | 53 | ||
| 55 | private: | 54 | private: |
| 56 | std::mutex object_mutex{}; | 55 | mutable std::mutex object_mutex; |
| 57 | 56 | ||
| 58 | /// Title ID for the game that is running. 0 if there is no game running yet | 57 | /// Title ID for the game that is running. 0 if there is no game running yet |
| 59 | u64 title_id{0}; | 58 | u64 title_id{0}; |
| @@ -61,7 +60,7 @@ private: | |||
| 61 | std::size_t current_index{0}; | 60 | std::size_t current_index{0}; |
| 62 | /// Stores an hour of historical frametime data useful for processing and tracking performance | 61 | /// Stores an hour of historical frametime data useful for processing and tracking performance |
| 63 | /// regressions with code changes. | 62 | /// regressions with code changes. |
| 64 | std::array<double, 216000> perf_history = {}; | 63 | std::array<double, 216000> perf_history{}; |
| 65 | 64 | ||
| 66 | /// Point when the cumulative counters were reset | 65 | /// Point when the cumulative counters were reset |
| 67 | Clock::time_point reset_point = Clock::now(); | 66 | Clock::time_point reset_point = Clock::now(); |
diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp index 022b26e6d..b35459152 100644 --- a/src/tests/core/core_timing.cpp +++ b/src/tests/core/core_timing.cpp | |||
| @@ -46,20 +46,16 @@ struct ScopeInit final { | |||
| 46 | Core::Timing::CoreTiming core_timing; | 46 | Core::Timing::CoreTiming core_timing; |
| 47 | }; | 47 | }; |
| 48 | 48 | ||
| 49 | #pragma optimize("", off) | ||
| 50 | |||
| 51 | u64 TestTimerSpeed(Core::Timing::CoreTiming& core_timing) { | 49 | u64 TestTimerSpeed(Core::Timing::CoreTiming& core_timing) { |
| 52 | u64 start = core_timing.GetGlobalTimeNs().count(); | 50 | const u64 start = core_timing.GetGlobalTimeNs().count(); |
| 53 | u64 placebo = 0; | 51 | volatile u64 placebo = 0; |
| 54 | for (std::size_t i = 0; i < 1000; i++) { | 52 | for (std::size_t i = 0; i < 1000; i++) { |
| 55 | placebo += core_timing.GetGlobalTimeNs().count(); | 53 | placebo = placebo + core_timing.GetGlobalTimeNs().count(); |
| 56 | } | 54 | } |
| 57 | u64 end = core_timing.GetGlobalTimeNs().count(); | 55 | const u64 end = core_timing.GetGlobalTimeNs().count(); |
| 58 | return (end - start); | 56 | return end - start; |
| 59 | } | 57 | } |
| 60 | 58 | ||
| 61 | #pragma optimize("", on) | ||
| 62 | |||
| 63 | } // Anonymous namespace | 59 | } // Anonymous namespace |
| 64 | 60 | ||
| 65 | TEST_CASE("CoreTiming[BasicOrder]", "[core]") { | 61 | TEST_CASE("CoreTiming[BasicOrder]", "[core]") { |
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 8e19c3373..512578c8b 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp | |||
| @@ -81,7 +81,7 @@ void GPU::WaitFence(u32 syncpoint_id, u32 value) { | |||
| 81 | } | 81 | } |
| 82 | MICROPROFILE_SCOPE(GPU_wait); | 82 | MICROPROFILE_SCOPE(GPU_wait); |
| 83 | std::unique_lock lock{sync_mutex}; | 83 | std::unique_lock lock{sync_mutex}; |
| 84 | sync_cv.wait(lock, [=]() { return syncpoints[syncpoint_id].load() >= value; }); | 84 | sync_cv.wait(lock, [=, this] { return syncpoints[syncpoint_id].load() >= value; }); |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | void GPU::IncrementSyncPoint(const u32 syncpoint_id) { | 87 | void GPU::IncrementSyncPoint(const u32 syncpoint_id) { |
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index ff5505d12..844164645 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp | |||
| @@ -4,7 +4,6 @@ | |||
| 4 | 4 | ||
| 5 | #include "common/alignment.h" | 5 | #include "common/alignment.h" |
| 6 | #include "common/assert.h" | 6 | #include "common/assert.h" |
| 7 | #include "common/logging/log.h" | ||
| 8 | #include "core/core.h" | 7 | #include "core/core.h" |
| 9 | #include "core/hle/kernel/memory/page_table.h" | 8 | #include "core/hle/kernel/memory/page_table.h" |
| 10 | #include "core/hle/kernel/process.h" | 9 | #include "core/hle/kernel/process.h" |
| @@ -16,121 +15,137 @@ | |||
| 16 | namespace Tegra { | 15 | namespace Tegra { |
| 17 | 16 | ||
| 18 | MemoryManager::MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer) | 17 | MemoryManager::MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer) |
| 19 | : rasterizer{rasterizer}, system{system} { | 18 | : system{system}, rasterizer{rasterizer}, page_table(page_table_size) {} |
| 20 | page_table.Resize(address_space_width, page_bits, false); | ||
| 21 | |||
| 22 | // Initialize the map with a single free region covering the entire managed space. | ||
| 23 | VirtualMemoryArea initial_vma; | ||
| 24 | initial_vma.size = address_space_end; | ||
| 25 | vma_map.emplace(initial_vma.base, initial_vma); | ||
| 26 | |||
| 27 | UpdatePageTableForVMA(initial_vma); | ||
| 28 | } | ||
| 29 | 19 | ||
| 30 | MemoryManager::~MemoryManager() = default; | 20 | MemoryManager::~MemoryManager() = default; |
| 31 | 21 | ||
| 32 | GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) { | 22 | GPUVAddr MemoryManager::UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { |
| 33 | const u64 aligned_size{Common::AlignUp(size, page_size)}; | 23 | u64 remaining_size{size}; |
| 34 | const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)}; | 24 | for (u64 offset{}; offset < size; offset += page_size) { |
| 35 | 25 | if (remaining_size < page_size) { | |
| 36 | AllocateMemory(gpu_addr, 0, aligned_size); | 26 | SetPageEntry(gpu_addr + offset, page_entry + offset, remaining_size); |
| 37 | 27 | } else { | |
| 28 | SetPageEntry(gpu_addr + offset, page_entry + offset); | ||
| 29 | } | ||
| 30 | remaining_size -= page_size; | ||
| 31 | } | ||
| 38 | return gpu_addr; | 32 | return gpu_addr; |
| 39 | } | 33 | } |
| 40 | 34 | ||
| 41 | GPUVAddr MemoryManager::AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align) { | 35 | GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) { |
| 42 | const u64 aligned_size{Common::AlignUp(size, page_size)}; | 36 | return UpdateRange(gpu_addr, cpu_addr, size); |
| 43 | 37 | } | |
| 44 | AllocateMemory(gpu_addr, 0, aligned_size); | ||
| 45 | 38 | ||
| 46 | return gpu_addr; | 39 | GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) { |
| 40 | return Map(cpu_addr, *FindFreeRange(size, align), size); | ||
| 47 | } | 41 | } |
| 48 | 42 | ||
| 49 | GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) { | 43 | void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { |
| 50 | const u64 aligned_size{Common::AlignUp(size, page_size)}; | 44 | if (!size) { |
| 51 | const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)}; | 45 | return; |
| 46 | } | ||
| 52 | 47 | ||
| 53 | MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); | 48 | // Flush and invalidate through the GPU interface, to be asynchronous if possible. |
| 54 | ASSERT( | 49 | system.GPU().FlushAndInvalidateRegion(*GpuToCpuAddress(gpu_addr), size); |
| 55 | system.CurrentProcess()->PageTable().LockForDeviceAddressSpace(cpu_addr, size).IsSuccess()); | ||
| 56 | 50 | ||
| 57 | return gpu_addr; | 51 | UpdateRange(gpu_addr, PageEntry::State::Unmapped, size); |
| 58 | } | 52 | } |
| 59 | 53 | ||
| 60 | GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) { | 54 | std::optional<GPUVAddr> MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) { |
| 61 | ASSERT((gpu_addr & page_mask) == 0); | 55 | for (u64 offset{}; offset < size; offset += page_size) { |
| 56 | if (!GetPageEntry(gpu_addr + offset).IsUnmapped()) { | ||
| 57 | return {}; | ||
| 58 | } | ||
| 59 | } | ||
| 62 | 60 | ||
| 63 | const u64 aligned_size{Common::AlignUp(size, page_size)}; | 61 | return UpdateRange(gpu_addr, PageEntry::State::Allocated, size); |
| 62 | } | ||
| 64 | 63 | ||
| 65 | MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); | 64 | GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) { |
| 66 | ASSERT( | 65 | return *AllocateFixed(*FindFreeRange(size, align), size); |
| 67 | system.CurrentProcess()->PageTable().LockForDeviceAddressSpace(cpu_addr, size).IsSuccess()); | ||
| 68 | return gpu_addr; | ||
| 69 | } | 66 | } |
| 70 | 67 | ||
| 71 | GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) { | 68 | void MemoryManager::TryLockPage(PageEntry page_entry, std::size_t size) { |
| 72 | ASSERT((gpu_addr & page_mask) == 0); | 69 | if (!page_entry.IsValid()) { |
| 70 | return; | ||
| 71 | } | ||
| 73 | 72 | ||
| 74 | const u64 aligned_size{Common::AlignUp(size, page_size)}; | 73 | ASSERT(system.CurrentProcess() |
| 75 | const auto cpu_addr = GpuToCpuAddress(gpu_addr); | 74 | ->PageTable() |
| 76 | ASSERT(cpu_addr); | 75 | .LockForDeviceAddressSpace(page_entry.ToAddress(), size) |
| 76 | .IsSuccess()); | ||
| 77 | } | ||
| 77 | 78 | ||
| 78 | // Flush and invalidate through the GPU interface, to be asynchronous if possible. | 79 | void MemoryManager::TryUnlockPage(PageEntry page_entry, std::size_t size) { |
| 79 | system.GPU().FlushAndInvalidateRegion(*cpu_addr, aligned_size); | 80 | if (!page_entry.IsValid()) { |
| 81 | return; | ||
| 82 | } | ||
| 80 | 83 | ||
| 81 | UnmapRange(gpu_addr, aligned_size); | ||
| 82 | ASSERT(system.CurrentProcess() | 84 | ASSERT(system.CurrentProcess() |
| 83 | ->PageTable() | 85 | ->PageTable() |
| 84 | .UnlockForDeviceAddressSpace(cpu_addr.value(), size) | 86 | .UnlockForDeviceAddressSpace(page_entry.ToAddress(), size) |
| 85 | .IsSuccess()); | 87 | .IsSuccess()); |
| 86 | |||
| 87 | return gpu_addr; | ||
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | GPUVAddr MemoryManager::FindFreeRegion(GPUVAddr region_start, u64 size) const { | 90 | PageEntry MemoryManager::GetPageEntry(GPUVAddr gpu_addr) const { |
| 91 | // Find the first Free VMA. | 91 | return page_table[PageEntryIndex(gpu_addr)]; |
| 92 | const VMAHandle vma_handle{ | 92 | } |
| 93 | std::find_if(vma_map.begin(), vma_map.end(), [region_start, size](const auto& vma) { | ||
| 94 | if (vma.second.type != VirtualMemoryArea::Type::Unmapped) { | ||
| 95 | return false; | ||
| 96 | } | ||
| 97 | 93 | ||
| 98 | const VAddr vma_end{vma.second.base + vma.second.size}; | 94 | void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { |
| 99 | return vma_end > region_start && vma_end >= region_start + size; | 95 | // TODO(bunnei): We should lock/unlock device regions. This currently causes issues due to |
| 100 | })}; | 96 | // improper tracking, but should be fixed in the future. |
| 101 | 97 | ||
| 102 | if (vma_handle == vma_map.end()) { | 98 | //// Unlock the old page |
| 103 | return {}; | 99 | // TryUnlockPage(page_table[PageEntryIndex(gpu_addr)], size); |
| 104 | } | ||
| 105 | 100 | ||
| 106 | return std::max(region_start, vma_handle->second.base); | 101 | //// Lock the new page |
| 107 | } | 102 | // TryLockPage(page_entry, size); |
| 108 | 103 | ||
| 109 | bool MemoryManager::IsAddressValid(GPUVAddr addr) const { | 104 | page_table[PageEntryIndex(gpu_addr)] = page_entry; |
| 110 | return (addr >> page_bits) < page_table.pointers.size(); | ||
| 111 | } | 105 | } |
| 112 | 106 | ||
| 113 | std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr) const { | 107 | std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size_t align) const { |
| 114 | if (!IsAddressValid(addr)) { | 108 | if (!align) { |
| 115 | return {}; | 109 | align = page_size; |
| 110 | } else { | ||
| 111 | align = Common::AlignUp(align, page_size); | ||
| 116 | } | 112 | } |
| 117 | 113 | ||
| 118 | const VAddr cpu_addr{page_table.backing_addr[addr >> page_bits]}; | 114 | u64 available_size{}; |
| 119 | if (cpu_addr) { | 115 | GPUVAddr gpu_addr{address_space_start}; |
| 120 | return cpu_addr + (addr & page_mask); | 116 | while (gpu_addr + available_size < address_space_size) { |
| 117 | if (GetPageEntry(gpu_addr + available_size).IsUnmapped()) { | ||
| 118 | available_size += page_size; | ||
| 119 | |||
| 120 | if (available_size >= size) { | ||
| 121 | return gpu_addr; | ||
| 122 | } | ||
| 123 | } else { | ||
| 124 | gpu_addr += available_size + page_size; | ||
| 125 | available_size = 0; | ||
| 126 | |||
| 127 | const auto remainder{gpu_addr % align}; | ||
| 128 | if (remainder) { | ||
| 129 | gpu_addr = (gpu_addr - remainder) + align; | ||
| 130 | } | ||
| 131 | } | ||
| 121 | } | 132 | } |
| 122 | 133 | ||
| 123 | return {}; | 134 | return {}; |
| 124 | } | 135 | } |
| 125 | 136 | ||
| 126 | template <typename T> | 137 | std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { |
| 127 | T MemoryManager::Read(GPUVAddr addr) const { | 138 | const auto page_entry{GetPageEntry(gpu_addr)}; |
| 128 | if (!IsAddressValid(addr)) { | 139 | if (!page_entry.IsValid()) { |
| 129 | return {}; | 140 | return {}; |
| 130 | } | 141 | } |
| 131 | 142 | ||
| 132 | const u8* page_pointer{GetPointer(addr)}; | 143 | return page_entry.ToAddress() + (gpu_addr & page_mask); |
| 133 | if (page_pointer) { | 144 | } |
| 145 | |||
| 146 | template <typename T> | ||
| 147 | T MemoryManager::Read(GPUVAddr addr) const { | ||
| 148 | if (auto page_pointer{GetPointer(addr)}; page_pointer) { | ||
| 134 | // NOTE: Avoid adding any extra logic to this fast-path block | 149 | // NOTE: Avoid adding any extra logic to this fast-path block |
| 135 | T value; | 150 | T value; |
| 136 | std::memcpy(&value, page_pointer, sizeof(T)); | 151 | std::memcpy(&value, page_pointer, sizeof(T)); |
| @@ -144,12 +159,7 @@ T MemoryManager::Read(GPUVAddr addr) const { | |||
| 144 | 159 | ||
| 145 | template <typename T> | 160 | template <typename T> |
| 146 | void MemoryManager::Write(GPUVAddr addr, T data) { | 161 | void MemoryManager::Write(GPUVAddr addr, T data) { |
| 147 | if (!IsAddressValid(addr)) { | 162 | if (auto page_pointer{GetPointer(addr)}; page_pointer) { |
| 148 | return; | ||
| 149 | } | ||
| 150 | |||
| 151 | u8* page_pointer{GetPointer(addr)}; | ||
| 152 | if (page_pointer) { | ||
| 153 | // NOTE: Avoid adding any extra logic to this fast-path block | 163 | // NOTE: Avoid adding any extra logic to this fast-path block |
| 154 | std::memcpy(page_pointer, &data, sizeof(T)); | 164 | std::memcpy(page_pointer, &data, sizeof(T)); |
| 155 | return; | 165 | return; |
| @@ -167,66 +177,49 @@ template void MemoryManager::Write<u16>(GPUVAddr addr, u16 data); | |||
| 167 | template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data); | 177 | template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data); |
| 168 | template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data); | 178 | template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data); |
| 169 | 179 | ||
| 170 | u8* MemoryManager::GetPointer(GPUVAddr addr) { | 180 | u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) { |
| 171 | if (!IsAddressValid(addr)) { | 181 | if (!GetPageEntry(gpu_addr).IsValid()) { |
| 172 | return {}; | 182 | return {}; |
| 173 | } | 183 | } |
| 174 | 184 | ||
| 175 | auto& memory = system.Memory(); | 185 | const auto address{GpuToCpuAddress(gpu_addr)}; |
| 176 | 186 | if (!address) { | |
| 177 | const VAddr page_addr{page_table.backing_addr[addr >> page_bits]}; | 187 | return {}; |
| 178 | |||
| 179 | if (page_addr != 0) { | ||
| 180 | return memory.GetPointer(page_addr + (addr & page_mask)); | ||
| 181 | } | 188 | } |
| 182 | 189 | ||
| 183 | LOG_ERROR(HW_GPU, "Unknown GetPointer @ 0x{:016X}", addr); | 190 | return system.Memory().GetPointer(*address); |
| 184 | return {}; | ||
| 185 | } | 191 | } |
| 186 | 192 | ||
| 187 | const u8* MemoryManager::GetPointer(GPUVAddr addr) const { | 193 | const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { |
| 188 | if (!IsAddressValid(addr)) { | 194 | if (!GetPageEntry(gpu_addr).IsValid()) { |
| 189 | return {}; | 195 | return {}; |
| 190 | } | 196 | } |
| 191 | 197 | ||
| 192 | const auto& memory = system.Memory(); | 198 | const auto address{GpuToCpuAddress(gpu_addr)}; |
| 193 | 199 | if (!address) { | |
| 194 | const VAddr page_addr{page_table.backing_addr[addr >> page_bits]}; | 200 | return {}; |
| 195 | |||
| 196 | if (page_addr != 0) { | ||
| 197 | return memory.GetPointer(page_addr + (addr & page_mask)); | ||
| 198 | } | 201 | } |
| 199 | 202 | ||
| 200 | LOG_ERROR(HW_GPU, "Unknown GetPointer @ 0x{:016X}", addr); | 203 | return system.Memory().GetPointer(*address); |
| 201 | return {}; | ||
| 202 | } | ||
| 203 | |||
| 204 | bool MemoryManager::IsBlockContinuous(const GPUVAddr start, const std::size_t size) const { | ||
| 205 | const std::size_t inner_size = size - 1; | ||
| 206 | const GPUVAddr end = start + inner_size; | ||
| 207 | const auto host_ptr_start = reinterpret_cast<std::uintptr_t>(GetPointer(start)); | ||
| 208 | const auto host_ptr_end = reinterpret_cast<std::uintptr_t>(GetPointer(end)); | ||
| 209 | const auto range = static_cast<std::size_t>(host_ptr_end - host_ptr_start); | ||
| 210 | return range == inner_size; | ||
| 211 | } | 204 | } |
| 212 | 205 | ||
| 213 | void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, | 206 | void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const { |
| 214 | const std::size_t size) const { | ||
| 215 | std::size_t remaining_size{size}; | 207 | std::size_t remaining_size{size}; |
| 216 | std::size_t page_index{gpu_src_addr >> page_bits}; | 208 | std::size_t page_index{gpu_src_addr >> page_bits}; |
| 217 | std::size_t page_offset{gpu_src_addr & page_mask}; | 209 | std::size_t page_offset{gpu_src_addr & page_mask}; |
| 218 | 210 | ||
| 219 | auto& memory = system.Memory(); | ||
| 220 | |||
| 221 | while (remaining_size > 0) { | 211 | while (remaining_size > 0) { |
| 222 | const std::size_t copy_amount{ | 212 | const std::size_t copy_amount{ |
| 223 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; | 213 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; |
| 224 | 214 | ||
| 225 | const VAddr src_addr{page_table.backing_addr[page_index] + page_offset}; | 215 | if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { |
| 226 | // Flush must happen on the rasterizer interface, such that memory is always synchronous | 216 | const auto src_addr{*page_addr + page_offset}; |
| 227 | // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu. | 217 | |
| 228 | rasterizer.FlushRegion(src_addr, copy_amount); | 218 | // Flush must happen on the rasterizer interface, such that memory is always synchronous |
| 229 | memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); | 219 | // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu. |
| 220 | rasterizer.FlushRegion(src_addr, copy_amount); | ||
| 221 | system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); | ||
| 222 | } | ||
| 230 | 223 | ||
| 231 | page_index++; | 224 | page_index++; |
| 232 | page_offset = 0; | 225 | page_offset = 0; |
| @@ -241,18 +234,17 @@ void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, | |||
| 241 | std::size_t page_index{gpu_src_addr >> page_bits}; | 234 | std::size_t page_index{gpu_src_addr >> page_bits}; |
| 242 | std::size_t page_offset{gpu_src_addr & page_mask}; | 235 | std::size_t page_offset{gpu_src_addr & page_mask}; |
| 243 | 236 | ||
| 244 | auto& memory = system.Memory(); | ||
| 245 | |||
| 246 | while (remaining_size > 0) { | 237 | while (remaining_size > 0) { |
| 247 | const std::size_t copy_amount{ | 238 | const std::size_t copy_amount{ |
| 248 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; | 239 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; |
| 249 | const u8* page_pointer = page_table.pointers[page_index]; | 240 | |
| 250 | if (page_pointer) { | 241 | if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { |
| 251 | const VAddr src_addr{page_table.backing_addr[page_index] + page_offset}; | 242 | const auto src_addr{*page_addr + page_offset}; |
| 252 | memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); | 243 | system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); |
| 253 | } else { | 244 | } else { |
| 254 | std::memset(dest_buffer, 0, copy_amount); | 245 | std::memset(dest_buffer, 0, copy_amount); |
| 255 | } | 246 | } |
| 247 | |||
| 256 | page_index++; | 248 | page_index++; |
| 257 | page_offset = 0; | 249 | page_offset = 0; |
| 258 | dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; | 250 | dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; |
| @@ -260,23 +252,23 @@ void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, | |||
| 260 | } | 252 | } |
| 261 | } | 253 | } |
| 262 | 254 | ||
| 263 | void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, | 255 | void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { |
| 264 | const std::size_t size) { | ||
| 265 | std::size_t remaining_size{size}; | 256 | std::size_t remaining_size{size}; |
| 266 | std::size_t page_index{gpu_dest_addr >> page_bits}; | 257 | std::size_t page_index{gpu_dest_addr >> page_bits}; |
| 267 | std::size_t page_offset{gpu_dest_addr & page_mask}; | 258 | std::size_t page_offset{gpu_dest_addr & page_mask}; |
| 268 | 259 | ||
| 269 | auto& memory = system.Memory(); | ||
| 270 | |||
| 271 | while (remaining_size > 0) { | 260 | while (remaining_size > 0) { |
| 272 | const std::size_t copy_amount{ | 261 | const std::size_t copy_amount{ |
| 273 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; | 262 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; |
| 274 | 263 | ||
| 275 | const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset}; | 264 | if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { |
| 276 | // Invalidate must happen on the rasterizer interface, such that memory is always | 265 | const auto dest_addr{*page_addr + page_offset}; |
| 277 | // synchronous when it is written (even when in asynchronous GPU mode). | 266 | |
| 278 | rasterizer.InvalidateRegion(dest_addr, copy_amount); | 267 | // Invalidate must happen on the rasterizer interface, such that memory is always |
| 279 | memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); | 268 | // synchronous when it is written (even when in asynchronous GPU mode). |
| 269 | rasterizer.InvalidateRegion(dest_addr, copy_amount); | ||
| 270 | system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); | ||
| 271 | } | ||
| 280 | 272 | ||
| 281 | page_index++; | 273 | page_index++; |
| 282 | page_offset = 0; | 274 | page_offset = 0; |
| @@ -286,21 +278,20 @@ void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, | |||
| 286 | } | 278 | } |
| 287 | 279 | ||
| 288 | void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, | 280 | void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, |
| 289 | const std::size_t size) { | 281 | std::size_t size) { |
| 290 | std::size_t remaining_size{size}; | 282 | std::size_t remaining_size{size}; |
| 291 | std::size_t page_index{gpu_dest_addr >> page_bits}; | 283 | std::size_t page_index{gpu_dest_addr >> page_bits}; |
| 292 | std::size_t page_offset{gpu_dest_addr & page_mask}; | 284 | std::size_t page_offset{gpu_dest_addr & page_mask}; |
| 293 | 285 | ||
| 294 | auto& memory = system.Memory(); | ||
| 295 | |||
| 296 | while (remaining_size > 0) { | 286 | while (remaining_size > 0) { |
| 297 | const std::size_t copy_amount{ | 287 | const std::size_t copy_amount{ |
| 298 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; | 288 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; |
| 299 | u8* page_pointer = page_table.pointers[page_index]; | 289 | |
| 300 | if (page_pointer) { | 290 | if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { |
| 301 | const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset}; | 291 | const auto dest_addr{*page_addr + page_offset}; |
| 302 | memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); | 292 | system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); |
| 303 | } | 293 | } |
| 294 | |||
| 304 | page_index++; | 295 | page_index++; |
| 305 | page_offset = 0; | 296 | page_offset = 0; |
| 306 | src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; | 297 | src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; |
| @@ -308,273 +299,26 @@ void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buf | |||
| 308 | } | 299 | } |
| 309 | } | 300 | } |
| 310 | 301 | ||
| 311 | void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, | 302 | void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) { |
| 312 | const std::size_t size) { | ||
| 313 | std::vector<u8> tmp_buffer(size); | 303 | std::vector<u8> tmp_buffer(size); |
| 314 | ReadBlock(gpu_src_addr, tmp_buffer.data(), size); | 304 | ReadBlock(gpu_src_addr, tmp_buffer.data(), size); |
| 315 | WriteBlock(gpu_dest_addr, tmp_buffer.data(), size); | 305 | WriteBlock(gpu_dest_addr, tmp_buffer.data(), size); |
| 316 | } | 306 | } |
| 317 | 307 | ||
| 318 | void MemoryManager::CopyBlockUnsafe(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, | 308 | void MemoryManager::CopyBlockUnsafe(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, |
| 319 | const std::size_t size) { | 309 | std::size_t size) { |
| 320 | std::vector<u8> tmp_buffer(size); | 310 | std::vector<u8> tmp_buffer(size); |
| 321 | ReadBlockUnsafe(gpu_src_addr, tmp_buffer.data(), size); | 311 | ReadBlockUnsafe(gpu_src_addr, tmp_buffer.data(), size); |
| 322 | WriteBlockUnsafe(gpu_dest_addr, tmp_buffer.data(), size); | 312 | WriteBlockUnsafe(gpu_dest_addr, tmp_buffer.data(), size); |
| 323 | } | 313 | } |
| 324 | 314 | ||
| 325 | bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) { | 315 | bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) { |
| 326 | const VAddr addr = page_table.backing_addr[gpu_addr >> page_bits]; | 316 | const auto cpu_addr{GpuToCpuAddress(gpu_addr)}; |
| 327 | const std::size_t page = (addr & Core::Memory::PAGE_MASK) + size; | 317 | if (!cpu_addr) { |
| 328 | return page <= Core::Memory::PAGE_SIZE; | ||
| 329 | } | ||
| 330 | |||
| 331 | void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type, | ||
| 332 | VAddr backing_addr) { | ||
| 333 | LOG_DEBUG(HW_GPU, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * page_size, | ||
| 334 | (base + size) * page_size); | ||
| 335 | |||
| 336 | const VAddr end{base + size}; | ||
| 337 | ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", | ||
| 338 | base + page_table.pointers.size()); | ||
| 339 | |||
| 340 | if (memory == nullptr) { | ||
| 341 | while (base != end) { | ||
| 342 | page_table.pointers[base] = nullptr; | ||
| 343 | page_table.backing_addr[base] = 0; | ||
| 344 | |||
| 345 | base += 1; | ||
| 346 | } | ||
| 347 | } else { | ||
| 348 | while (base != end) { | ||
| 349 | page_table.pointers[base] = memory; | ||
| 350 | page_table.backing_addr[base] = backing_addr; | ||
| 351 | |||
| 352 | base += 1; | ||
| 353 | memory += page_size; | ||
| 354 | backing_addr += page_size; | ||
| 355 | } | ||
| 356 | } | ||
| 357 | } | ||
| 358 | |||
| 359 | void MemoryManager::MapMemoryRegion(GPUVAddr base, u64 size, u8* target, VAddr backing_addr) { | ||
| 360 | ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: {:016X}", size); | ||
| 361 | ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: {:016X}", base); | ||
| 362 | MapPages(base / page_size, size / page_size, target, Common::PageType::Memory, backing_addr); | ||
| 363 | } | ||
| 364 | |||
| 365 | void MemoryManager::UnmapRegion(GPUVAddr base, u64 size) { | ||
| 366 | ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: {:016X}", size); | ||
| 367 | ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: {:016X}", base); | ||
| 368 | MapPages(base / page_size, size / page_size, nullptr, Common::PageType::Unmapped); | ||
| 369 | } | ||
| 370 | |||
| 371 | bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { | ||
| 372 | ASSERT(base + size == next.base); | ||
| 373 | if (type != next.type) { | ||
| 374 | return {}; | ||
| 375 | } | ||
| 376 | if (type == VirtualMemoryArea::Type::Allocated && (offset + size != next.offset)) { | ||
| 377 | return {}; | ||
| 378 | } | ||
| 379 | if (type == VirtualMemoryArea::Type::Mapped && backing_memory + size != next.backing_memory) { | ||
| 380 | return {}; | ||
| 381 | } | ||
| 382 | return true; | ||
| 383 | } | ||
| 384 | |||
| 385 | MemoryManager::VMAHandle MemoryManager::FindVMA(GPUVAddr target) const { | ||
| 386 | if (target >= address_space_end) { | ||
| 387 | return vma_map.end(); | ||
| 388 | } else { | ||
| 389 | return std::prev(vma_map.upper_bound(target)); | ||
| 390 | } | ||
| 391 | } | ||
| 392 | |||
| 393 | MemoryManager::VMAIter MemoryManager::Allocate(VMAIter vma_handle) { | ||
| 394 | VirtualMemoryArea& vma{vma_handle->second}; | ||
| 395 | |||
| 396 | vma.type = VirtualMemoryArea::Type::Allocated; | ||
| 397 | vma.backing_addr = 0; | ||
| 398 | vma.backing_memory = {}; | ||
| 399 | UpdatePageTableForVMA(vma); | ||
| 400 | |||
| 401 | return MergeAdjacent(vma_handle); | ||
| 402 | } | ||
| 403 | |||
| 404 | MemoryManager::VMAHandle MemoryManager::AllocateMemory(GPUVAddr target, std::size_t offset, | ||
| 405 | u64 size) { | ||
| 406 | |||
| 407 | // This is the appropriately sized VMA that will turn into our allocation. | ||
| 408 | VMAIter vma_handle{CarveVMA(target, size)}; | ||
| 409 | VirtualMemoryArea& vma{vma_handle->second}; | ||
| 410 | |||
| 411 | ASSERT(vma.size == size); | ||
| 412 | |||
| 413 | vma.offset = offset; | ||
| 414 | |||
| 415 | return Allocate(vma_handle); | ||
| 416 | } | ||
| 417 | |||
| 418 | MemoryManager::VMAHandle MemoryManager::MapBackingMemory(GPUVAddr target, u8* memory, u64 size, | ||
| 419 | VAddr backing_addr) { | ||
| 420 | // This is the appropriately sized VMA that will turn into our allocation. | ||
| 421 | VMAIter vma_handle{CarveVMA(target, size)}; | ||
| 422 | VirtualMemoryArea& vma{vma_handle->second}; | ||
| 423 | |||
| 424 | ASSERT(vma.size == size); | ||
| 425 | |||
| 426 | vma.type = VirtualMemoryArea::Type::Mapped; | ||
| 427 | vma.backing_memory = memory; | ||
| 428 | vma.backing_addr = backing_addr; | ||
| 429 | UpdatePageTableForVMA(vma); | ||
| 430 | |||
| 431 | return MergeAdjacent(vma_handle); | ||
| 432 | } | ||
| 433 | |||
| 434 | void MemoryManager::UnmapRange(GPUVAddr target, u64 size) { | ||
| 435 | VMAIter vma{CarveVMARange(target, size)}; | ||
| 436 | const VAddr target_end{target + size}; | ||
| 437 | const VMAIter end{vma_map.end()}; | ||
| 438 | |||
| 439 | // The comparison against the end of the range must be done using addresses since VMAs can be | ||
| 440 | // merged during this process, causing invalidation of the iterators. | ||
| 441 | while (vma != end && vma->second.base < target_end) { | ||
| 442 | // Unmapped ranges return to allocated state and can be reused | ||
| 443 | // This behavior is used by Super Mario Odyssey, Sonic Forces, and likely other games | ||
| 444 | vma = std::next(Allocate(vma)); | ||
| 445 | } | ||
| 446 | |||
| 447 | ASSERT(FindVMA(target)->second.size >= size); | ||
| 448 | } | ||
| 449 | |||
| 450 | MemoryManager::VMAIter MemoryManager::StripIterConstness(const VMAHandle& iter) { | ||
| 451 | // This uses a neat C++ trick to convert a const_iterator to a regular iterator, given | ||
| 452 | // non-const access to its container. | ||
| 453 | return vma_map.erase(iter, iter); // Erases an empty range of elements | ||
| 454 | } | ||
| 455 | |||
| 456 | MemoryManager::VMAIter MemoryManager::CarveVMA(GPUVAddr base, u64 size) { | ||
| 457 | ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: 0x{:016X}", size); | ||
| 458 | ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: 0x{:016X}", base); | ||
| 459 | |||
| 460 | VMAIter vma_handle{StripIterConstness(FindVMA(base))}; | ||
| 461 | if (vma_handle == vma_map.end()) { | ||
| 462 | // Target address is outside the managed range | ||
| 463 | return {}; | ||
| 464 | } | ||
| 465 | |||
| 466 | const VirtualMemoryArea& vma{vma_handle->second}; | ||
| 467 | if (vma.type == VirtualMemoryArea::Type::Mapped) { | ||
| 468 | // Region is already allocated | ||
| 469 | return vma_handle; | ||
| 470 | } | ||
| 471 | |||
| 472 | const VAddr start_in_vma{base - vma.base}; | ||
| 473 | const VAddr end_in_vma{start_in_vma + size}; | ||
| 474 | |||
| 475 | ASSERT_MSG(end_in_vma <= vma.size, "region size 0x{:016X} is less than required size 0x{:016X}", | ||
| 476 | vma.size, end_in_vma); | ||
| 477 | |||
| 478 | if (end_in_vma < vma.size) { | ||
| 479 | // Split VMA at the end of the allocated region | ||
| 480 | SplitVMA(vma_handle, end_in_vma); | ||
| 481 | } | ||
| 482 | if (start_in_vma != 0) { | ||
| 483 | // Split VMA at the start of the allocated region | ||
| 484 | vma_handle = SplitVMA(vma_handle, start_in_vma); | ||
| 485 | } | ||
| 486 | |||
| 487 | return vma_handle; | ||
| 488 | } | ||
| 489 | |||
| 490 | MemoryManager::VMAIter MemoryManager::CarveVMARange(GPUVAddr target, u64 size) { | ||
| 491 | ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: 0x{:016X}", size); | ||
| 492 | ASSERT_MSG((target & page_mask) == 0, "non-page aligned base: 0x{:016X}", target); | ||
| 493 | |||
| 494 | const VAddr target_end{target + size}; | ||
| 495 | ASSERT(target_end >= target); | ||
| 496 | ASSERT(size > 0); | ||
| 497 | |||
| 498 | VMAIter begin_vma{StripIterConstness(FindVMA(target))}; | ||
| 499 | const VMAIter i_end{vma_map.lower_bound(target_end)}; | ||
| 500 | if (std::any_of(begin_vma, i_end, [](const auto& entry) { | ||
| 501 | return entry.second.type == VirtualMemoryArea::Type::Unmapped; | ||
| 502 | })) { | ||
| 503 | return {}; | 318 | return {}; |
| 504 | } | 319 | } |
| 505 | 320 | const std::size_t page{(*cpu_addr & Core::Memory::PAGE_MASK) + size}; | |
| 506 | if (target != begin_vma->second.base) { | 321 | return page <= Core::Memory::PAGE_SIZE; |
| 507 | begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base); | ||
| 508 | } | ||
| 509 | |||
| 510 | VMAIter end_vma{StripIterConstness(FindVMA(target_end))}; | ||
| 511 | if (end_vma != vma_map.end() && target_end != end_vma->second.base) { | ||
| 512 | end_vma = SplitVMA(end_vma, target_end - end_vma->second.base); | ||
| 513 | } | ||
| 514 | |||
| 515 | return begin_vma; | ||
| 516 | } | ||
| 517 | |||
| 518 | MemoryManager::VMAIter MemoryManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) { | ||
| 519 | VirtualMemoryArea& old_vma{vma_handle->second}; | ||
| 520 | VirtualMemoryArea new_vma{old_vma}; // Make a copy of the VMA | ||
| 521 | |||
| 522 | // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably | ||
| 523 | // a bug. This restriction might be removed later. | ||
| 524 | ASSERT(offset_in_vma < old_vma.size); | ||
| 525 | ASSERT(offset_in_vma > 0); | ||
| 526 | |||
| 527 | old_vma.size = offset_in_vma; | ||
| 528 | new_vma.base += offset_in_vma; | ||
| 529 | new_vma.size -= offset_in_vma; | ||
| 530 | |||
| 531 | switch (new_vma.type) { | ||
| 532 | case VirtualMemoryArea::Type::Unmapped: | ||
| 533 | break; | ||
| 534 | case VirtualMemoryArea::Type::Allocated: | ||
| 535 | new_vma.offset += offset_in_vma; | ||
| 536 | break; | ||
| 537 | case VirtualMemoryArea::Type::Mapped: | ||
| 538 | new_vma.backing_memory += offset_in_vma; | ||
| 539 | break; | ||
| 540 | } | ||
| 541 | |||
| 542 | ASSERT(old_vma.CanBeMergedWith(new_vma)); | ||
| 543 | |||
| 544 | return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma); | ||
| 545 | } | ||
| 546 | |||
| 547 | MemoryManager::VMAIter MemoryManager::MergeAdjacent(VMAIter iter) { | ||
| 548 | const VMAIter next_vma{std::next(iter)}; | ||
| 549 | if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) { | ||
| 550 | iter->second.size += next_vma->second.size; | ||
| 551 | vma_map.erase(next_vma); | ||
| 552 | } | ||
| 553 | |||
| 554 | if (iter != vma_map.begin()) { | ||
| 555 | VMAIter prev_vma{std::prev(iter)}; | ||
| 556 | if (prev_vma->second.CanBeMergedWith(iter->second)) { | ||
| 557 | prev_vma->second.size += iter->second.size; | ||
| 558 | vma_map.erase(iter); | ||
| 559 | iter = prev_vma; | ||
| 560 | } | ||
| 561 | } | ||
| 562 | |||
| 563 | return iter; | ||
| 564 | } | ||
| 565 | |||
| 566 | void MemoryManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { | ||
| 567 | switch (vma.type) { | ||
| 568 | case VirtualMemoryArea::Type::Unmapped: | ||
| 569 | UnmapRegion(vma.base, vma.size); | ||
| 570 | break; | ||
| 571 | case VirtualMemoryArea::Type::Allocated: | ||
| 572 | MapMemoryRegion(vma.base, vma.size, nullptr, vma.backing_addr); | ||
| 573 | break; | ||
| 574 | case VirtualMemoryArea::Type::Mapped: | ||
| 575 | MapMemoryRegion(vma.base, vma.size, vma.backing_memory, vma.backing_addr); | ||
| 576 | break; | ||
| 577 | } | ||
| 578 | } | 322 | } |
| 579 | 323 | ||
| 580 | } // namespace Tegra | 324 | } // namespace Tegra |
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 87658e87a..681bd9588 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h | |||
| @@ -6,9 +6,9 @@ | |||
| 6 | 6 | ||
| 7 | #include <map> | 7 | #include <map> |
| 8 | #include <optional> | 8 | #include <optional> |
| 9 | #include <vector> | ||
| 9 | 10 | ||
| 10 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 11 | #include "common/page_table.h" | ||
| 12 | 12 | ||
| 13 | namespace VideoCore { | 13 | namespace VideoCore { |
| 14 | class RasterizerInterface; | 14 | class RasterizerInterface; |
| @@ -20,45 +20,57 @@ class System; | |||
| 20 | 20 | ||
| 21 | namespace Tegra { | 21 | namespace Tegra { |
| 22 | 22 | ||
| 23 | /** | 23 | class PageEntry final { |
| 24 | * Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space | 24 | public: |
| 25 | * with homogeneous attributes across its extents. In this particular implementation each VMA is | 25 | enum class State : u32 { |
| 26 | * also backed by a single host memory allocation. | 26 | Unmapped = static_cast<u32>(-1), |
| 27 | */ | 27 | Allocated = static_cast<u32>(-2), |
| 28 | struct VirtualMemoryArea { | ||
| 29 | enum class Type : u8 { | ||
| 30 | Unmapped, | ||
| 31 | Allocated, | ||
| 32 | Mapped, | ||
| 33 | }; | 28 | }; |
| 34 | 29 | ||
| 35 | /// Virtual base address of the region. | 30 | constexpr PageEntry() = default; |
| 36 | GPUVAddr base{}; | 31 | constexpr PageEntry(State state) : state{state} {} |
| 37 | /// Size of the region. | 32 | constexpr PageEntry(VAddr addr) : state{static_cast<State>(addr >> ShiftBits)} {} |
| 38 | u64 size{}; | 33 | |
| 39 | /// Memory area mapping type. | 34 | constexpr bool IsUnmapped() const { |
| 40 | Type type{Type::Unmapped}; | 35 | return state == State::Unmapped; |
| 41 | /// CPU memory mapped address corresponding to this memory area. | 36 | } |
| 42 | VAddr backing_addr{}; | 37 | |
| 43 | /// Offset into the backing_memory the mapping starts from. | 38 | constexpr bool IsAllocated() const { |
| 44 | std::size_t offset{}; | 39 | return state == State::Allocated; |
| 45 | /// Pointer backing this VMA. | 40 | } |
| 46 | u8* backing_memory{}; | 41 | |
| 47 | 42 | constexpr bool IsValid() const { | |
| 48 | /// Tests if this area can be merged to the right with `next`. | 43 | return !IsUnmapped() && !IsAllocated(); |
| 49 | bool CanBeMergedWith(const VirtualMemoryArea& next) const; | 44 | } |
| 45 | |||
| 46 | constexpr VAddr ToAddress() const { | ||
| 47 | if (!IsValid()) { | ||
| 48 | return {}; | ||
| 49 | } | ||
| 50 | |||
| 51 | return static_cast<VAddr>(state) << ShiftBits; | ||
| 52 | } | ||
| 53 | |||
| 54 | constexpr PageEntry operator+(u64 offset) { | ||
| 55 | // If this is a reserved value, offsets do not apply | ||
| 56 | if (!IsValid()) { | ||
| 57 | return *this; | ||
| 58 | } | ||
| 59 | return PageEntry{(static_cast<VAddr>(state) << ShiftBits) + offset}; | ||
| 60 | } | ||
| 61 | |||
| 62 | private: | ||
| 63 | static constexpr std::size_t ShiftBits{12}; | ||
| 64 | |||
| 65 | State state{State::Unmapped}; | ||
| 50 | }; | 66 | }; |
| 67 | static_assert(sizeof(PageEntry) == 4, "PageEntry is too large"); | ||
| 51 | 68 | ||
| 52 | class MemoryManager final { | 69 | class MemoryManager final { |
| 53 | public: | 70 | public: |
| 54 | explicit MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer); | 71 | explicit MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer); |
| 55 | ~MemoryManager(); | 72 | ~MemoryManager(); |
| 56 | 73 | ||
| 57 | GPUVAddr AllocateSpace(u64 size, u64 align); | ||
| 58 | GPUVAddr AllocateSpace(GPUVAddr addr, u64 size, u64 align); | ||
| 59 | GPUVAddr MapBufferEx(VAddr cpu_addr, u64 size); | ||
| 60 | GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr addr, u64 size); | ||
| 61 | GPUVAddr UnmapBuffer(GPUVAddr addr, u64 size); | ||
| 62 | std::optional<VAddr> GpuToCpuAddress(GPUVAddr addr) const; | 74 | std::optional<VAddr> GpuToCpuAddress(GPUVAddr addr) const; |
| 63 | 75 | ||
| 64 | template <typename T> | 76 | template <typename T> |
| @@ -70,9 +82,6 @@ public: | |||
| 70 | u8* GetPointer(GPUVAddr addr); | 82 | u8* GetPointer(GPUVAddr addr); |
| 71 | const u8* GetPointer(GPUVAddr addr) const; | 83 | const u8* GetPointer(GPUVAddr addr) const; |
| 72 | 84 | ||
| 73 | /// Returns true if the block is continuous in host memory, false otherwise | ||
| 74 | bool IsBlockContinuous(GPUVAddr start, std::size_t size) const; | ||
| 75 | |||
| 76 | /** | 85 | /** |
| 77 | * ReadBlock and WriteBlock are full read and write operations over virtual | 86 | * ReadBlock and WriteBlock are full read and write operations over virtual |
| 78 | * GPU Memory. It's important to use these when GPU memory may not be continuous | 87 | * GPU Memory. It's important to use these when GPU memory may not be continuous |
| @@ -98,92 +107,43 @@ public: | |||
| 98 | void CopyBlockUnsafe(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size); | 107 | void CopyBlockUnsafe(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size); |
| 99 | 108 | ||
| 100 | /** | 109 | /** |
| 101 | * IsGranularRange checks if a gpu region can be simply read with a pointer | 110 | * IsGranularRange checks if a gpu region can be simply read with a pointer. |
| 102 | */ | 111 | */ |
| 103 | bool IsGranularRange(GPUVAddr gpu_addr, std::size_t size); | 112 | bool IsGranularRange(GPUVAddr gpu_addr, std::size_t size); |
| 104 | 113 | ||
| 105 | private: | 114 | GPUVAddr Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size); |
| 106 | using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>; | 115 | GPUVAddr MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align); |
| 107 | using VMAHandle = VMAMap::const_iterator; | 116 | std::optional<GPUVAddr> AllocateFixed(GPUVAddr gpu_addr, std::size_t size); |
| 108 | using VMAIter = VMAMap::iterator; | 117 | GPUVAddr Allocate(std::size_t size, std::size_t align); |
| 109 | 118 | void Unmap(GPUVAddr gpu_addr, std::size_t size); | |
| 110 | bool IsAddressValid(GPUVAddr addr) const; | ||
| 111 | void MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type, | ||
| 112 | VAddr backing_addr = 0); | ||
| 113 | void MapMemoryRegion(GPUVAddr base, u64 size, u8* target, VAddr backing_addr); | ||
| 114 | void UnmapRegion(GPUVAddr base, u64 size); | ||
| 115 | |||
| 116 | /// Finds the VMA in which the given address is included in, or `vma_map.end()`. | ||
| 117 | VMAHandle FindVMA(GPUVAddr target) const; | ||
| 118 | |||
| 119 | VMAHandle AllocateMemory(GPUVAddr target, std::size_t offset, u64 size); | ||
| 120 | |||
| 121 | /** | ||
| 122 | * Maps an unmanaged host memory pointer at a given address. | ||
| 123 | * | ||
| 124 | * @param target The guest address to start the mapping at. | ||
| 125 | * @param memory The memory to be mapped. | ||
| 126 | * @param size Size of the mapping in bytes. | ||
| 127 | * @param backing_addr The base address of the range to back this mapping. | ||
| 128 | */ | ||
| 129 | VMAHandle MapBackingMemory(GPUVAddr target, u8* memory, u64 size, VAddr backing_addr); | ||
| 130 | |||
| 131 | /// Unmaps a range of addresses, splitting VMAs as necessary. | ||
| 132 | void UnmapRange(GPUVAddr target, u64 size); | ||
| 133 | |||
| 134 | /// Converts a VMAHandle to a mutable VMAIter. | ||
| 135 | VMAIter StripIterConstness(const VMAHandle& iter); | ||
| 136 | |||
| 137 | /// Marks as the specified VMA as allocated. | ||
| 138 | VMAIter Allocate(VMAIter vma); | ||
| 139 | |||
| 140 | /** | ||
| 141 | * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing | ||
| 142 | * the appropriate error checking. | ||
| 143 | */ | ||
| 144 | VMAIter CarveVMA(GPUVAddr base, u64 size); | ||
| 145 | |||
| 146 | /** | ||
| 147 | * Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each | ||
| 148 | * end of the range. | ||
| 149 | */ | ||
| 150 | VMAIter CarveVMARange(GPUVAddr base, u64 size); | ||
| 151 | |||
| 152 | /** | ||
| 153 | * Splits a VMA in two, at the specified offset. | ||
| 154 | * @returns the right side of the split, with the original iterator becoming the left side. | ||
| 155 | */ | ||
| 156 | VMAIter SplitVMA(VMAIter vma, u64 offset_in_vma); | ||
| 157 | 119 | ||
| 158 | /** | 120 | private: |
| 159 | * Checks for and merges the specified VMA with adjacent ones if possible. | 121 | PageEntry GetPageEntry(GPUVAddr gpu_addr) const; |
| 160 | * @returns the merged VMA or the original if no merging was possible. | 122 | void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size); |
| 161 | */ | 123 | GPUVAddr UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size); |
| 162 | VMAIter MergeAdjacent(VMAIter vma); | 124 | std::optional<GPUVAddr> FindFreeRange(std::size_t size, std::size_t align) const; |
| 163 | 125 | ||
| 164 | /// Updates the pages corresponding to this VMA so they match the VMA's attributes. | 126 | void TryLockPage(PageEntry page_entry, std::size_t size); |
| 165 | void UpdatePageTableForVMA(const VirtualMemoryArea& vma); | 127 | void TryUnlockPage(PageEntry page_entry, std::size_t size); |
| 166 | 128 | ||
| 167 | /// Finds a free (unmapped region) of the specified size starting at the specified address. | 129 | static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) { |
| 168 | GPUVAddr FindFreeRegion(GPUVAddr region_start, u64 size) const; | 130 | return (gpu_addr >> page_bits) & page_table_mask; |
| 131 | } | ||
| 169 | 132 | ||
| 170 | private: | 133 | static constexpr u64 address_space_size = 1ULL << 40; |
| 134 | static constexpr u64 address_space_start = 1ULL << 32; | ||
| 171 | static constexpr u64 page_bits{16}; | 135 | static constexpr u64 page_bits{16}; |
| 172 | static constexpr u64 page_size{1 << page_bits}; | 136 | static constexpr u64 page_size{1 << page_bits}; |
| 173 | static constexpr u64 page_mask{page_size - 1}; | 137 | static constexpr u64 page_mask{page_size - 1}; |
| 138 | static constexpr u64 page_table_bits{24}; | ||
| 139 | static constexpr u64 page_table_size{1 << page_table_bits}; | ||
| 140 | static constexpr u64 page_table_mask{page_table_size - 1}; | ||
| 174 | 141 | ||
| 175 | /// Address space in bits, according to Tegra X1 TRM | 142 | Core::System& system; |
| 176 | static constexpr u32 address_space_width{40}; | ||
| 177 | /// Start address for mapping, this is fairly arbitrary but must be non-zero. | ||
| 178 | static constexpr GPUVAddr address_space_base{0x100000}; | ||
| 179 | /// End of address space, based on address space in bits. | ||
| 180 | static constexpr GPUVAddr address_space_end{1ULL << address_space_width}; | ||
| 181 | 143 | ||
| 182 | Common::PageTable page_table; | ||
| 183 | VMAMap vma_map; | ||
| 184 | VideoCore::RasterizerInterface& rasterizer; | 144 | VideoCore::RasterizerInterface& rasterizer; |
| 185 | 145 | ||
| 186 | Core::System& system; | 146 | std::vector<PageEntry> page_table; |
| 187 | }; | 147 | }; |
| 188 | 148 | ||
| 189 | } // namespace Tegra | 149 | } // namespace Tegra |
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 03e82c599..cb284db77 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp | |||
| @@ -178,16 +178,11 @@ RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWind | |||
| 178 | 178 | ||
| 179 | if (device.UseAsynchronousShaders()) { | 179 | if (device.UseAsynchronousShaders()) { |
| 180 | // Max worker threads we should allow | 180 | // Max worker threads we should allow |
| 181 | constexpr auto MAX_THREADS = 2u; | 181 | constexpr u32 MAX_THREADS = 4; |
| 182 | // Amount of threads we should reserve for other parts of yuzu | 182 | // Deduce how many threads we can use |
| 183 | constexpr auto RESERVED_THREADS = 6u; | 183 | const u32 threads_used = std::thread::hardware_concurrency() / 4; |
| 184 | // Get the amount of threads we can use(this can return zero) | ||
| 185 | const auto cpu_thread_count = | ||
| 186 | std::max(RESERVED_THREADS, std::thread::hardware_concurrency()); | ||
| 187 | // Deduce how many "extra" threads we have to use. | ||
| 188 | const auto max_threads_unused = cpu_thread_count - RESERVED_THREADS; | ||
| 189 | // Always allow at least 1 thread regardless of our settings | 184 | // Always allow at least 1 thread regardless of our settings |
| 190 | const auto max_worker_count = std::max(1u, max_threads_unused); | 185 | const auto max_worker_count = std::max(1U, threads_used); |
| 191 | // Don't use more than MAX_THREADS | 186 | // Don't use more than MAX_THREADS |
| 192 | const auto worker_count = std::min(max_worker_count, MAX_THREADS); | 187 | const auto worker_count = std::min(max_worker_count, MAX_THREADS); |
| 193 | async_shaders.AllocateWorkers(worker_count); | 188 | async_shaders.AllocateWorkers(worker_count); |
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp index ce53e5a6b..a551e3de8 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp +++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp | |||
| @@ -696,6 +696,7 @@ void VKBlitScreen::CreateFramebuffers() { | |||
| 696 | .flags = 0, | 696 | .flags = 0, |
| 697 | .renderPass = *renderpass, | 697 | .renderPass = *renderpass, |
| 698 | .attachmentCount = 1, | 698 | .attachmentCount = 1, |
| 699 | .pAttachments = nullptr, | ||
| 699 | .width = size.width, | 700 | .width = size.width, |
| 700 | .height = size.height, | 701 | .height = size.height, |
| 701 | .layers = 1, | 702 | .layers = 1, |
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp index 6245e0d78..0c03e4d83 100644 --- a/src/video_core/renderer_vulkan/vk_device.cpp +++ b/src/video_core/renderer_vulkan/vk_device.cpp | |||
| @@ -771,8 +771,9 @@ std::vector<VkDeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() const | |||
| 771 | .pNext = nullptr, | 771 | .pNext = nullptr, |
| 772 | .flags = 0, | 772 | .flags = 0, |
| 773 | .queueFamilyIndex = queue_family, | 773 | .queueFamilyIndex = queue_family, |
| 774 | .queueCount = 1, | ||
| 775 | .pQueuePriorities = nullptr, | ||
| 774 | }); | 776 | }); |
| 775 | ci.queueCount = 1; | ||
| 776 | ci.pQueuePriorities = &QUEUE_PRIORITY; | 777 | ci.pQueuePriorities = &QUEUE_PRIORITY; |
| 777 | } | 778 | } |
| 778 | 779 | ||
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index 42b3a744c..418c62bc4 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp | |||
| @@ -261,8 +261,13 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach | |||
| 261 | } | 261 | } |
| 262 | 262 | ||
| 263 | const Specialization specialization{ | 263 | const Specialization specialization{ |
| 264 | .base_binding = 0, | ||
| 264 | .workgroup_size = key.workgroup_size, | 265 | .workgroup_size = key.workgroup_size, |
| 265 | .shared_memory_size = key.shared_memory_size, | 266 | .shared_memory_size = key.shared_memory_size, |
| 267 | .point_size = std::nullopt, | ||
| 268 | .enabled_attributes = {}, | ||
| 269 | .attribute_types = {}, | ||
| 270 | .ndc_minus_one_to_one = false, | ||
| 266 | }; | 271 | }; |
| 267 | const SPIRVShader spirv_shader{Decompile(device, shader->GetIR(), ShaderType::Compute, | 272 | const SPIRVShader spirv_shader{Decompile(device, shader->GetIR(), ShaderType::Compute, |
| 268 | shader->GetRegistry(), specialization), | 273 | shader->GetRegistry(), specialization), |
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 2ed2004f0..7500e8244 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp | |||
| @@ -815,8 +815,13 @@ bool RasterizerVulkan::WalkAttachmentOverlaps(const CachedSurfaceView& attachmen | |||
| 815 | 815 | ||
| 816 | std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers( | 816 | std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers( |
| 817 | VkRenderPass renderpass) { | 817 | VkRenderPass renderpass) { |
| 818 | FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(), | 818 | FramebufferCacheKey key{ |
| 819 | std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()}; | 819 | .renderpass = renderpass, |
| 820 | .width = std::numeric_limits<u32>::max(), | ||
| 821 | .height = std::numeric_limits<u32>::max(), | ||
| 822 | .layers = std::numeric_limits<u32>::max(), | ||
| 823 | .views = {}, | ||
| 824 | }; | ||
| 820 | 825 | ||
| 821 | const auto try_push = [&key](const View& view) { | 826 | const auto try_push = [&key](const View& view) { |
| 822 | if (!view) { | 827 | if (!view) { |
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp index 2d5460776..b068888f9 100644 --- a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp | |||
| @@ -47,6 +47,7 @@ vk::Sampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) c | |||
| 47 | VkSamplerCustomBorderColorCreateInfoEXT border{ | 47 | VkSamplerCustomBorderColorCreateInfoEXT border{ |
| 48 | .sType = VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT, | 48 | .sType = VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT, |
| 49 | .pNext = nullptr, | 49 | .pNext = nullptr, |
| 50 | .customBorderColor = {}, | ||
| 50 | .format = VK_FORMAT_UNDEFINED, | 51 | .format = VK_FORMAT_UNDEFINED, |
| 51 | }; | 52 | }; |
| 52 | std::memcpy(&border.customBorderColor, color.data(), sizeof(color)); | 53 | std::memcpy(&border.customBorderColor, color.data(), sizeof(color)); |
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index efd4bb13b..2c6f54101 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp | |||
| @@ -473,6 +473,8 @@ VkImageView CachedSurfaceView::GetAttachment() { | |||
| 473 | .aspectMask = aspect_mask, | 473 | .aspectMask = aspect_mask, |
| 474 | .baseMipLevel = base_level, | 474 | .baseMipLevel = base_level, |
| 475 | .levelCount = num_levels, | 475 | .levelCount = num_levels, |
| 476 | .baseArrayLayer = 0, | ||
| 477 | .layerCount = 0, | ||
| 476 | }, | 478 | }, |
| 477 | }; | 479 | }; |
| 478 | if (image_view_type == VK_IMAGE_VIEW_TYPE_3D) { | 480 | if (image_view_type == VK_IMAGE_VIEW_TYPE_3D) { |
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp index 5738787ac..8fc322b30 100644 --- a/src/yuzu/bootmanager.cpp +++ b/src/yuzu/bootmanager.cpp | |||
| @@ -567,7 +567,7 @@ void GRenderWindow::CaptureScreenshot(u32 res_scale, const QString& screenshot_p | |||
| 567 | screenshot_image = QImage(QSize(layout.width, layout.height), QImage::Format_RGB32); | 567 | screenshot_image = QImage(QSize(layout.width, layout.height), QImage::Format_RGB32); |
| 568 | renderer.RequestScreenshot( | 568 | renderer.RequestScreenshot( |
| 569 | screenshot_image.bits(), | 569 | screenshot_image.bits(), |
| 570 | [=] { | 570 | [=, this] { |
| 571 | const std::string std_screenshot_path = screenshot_path.toStdString(); | 571 | const std::string std_screenshot_path = screenshot_path.toStdString(); |
| 572 | if (screenshot_image.mirrored(false, true).save(screenshot_path)) { | 572 | if (screenshot_image.mirrored(false, true).save(screenshot_path)) { |
| 573 | LOG_INFO(Frontend, "Screenshot saved to \"{}\"", std_screenshot_path); | 573 | LOG_INFO(Frontend, "Screenshot saved to \"{}\"", std_screenshot_path); |
diff --git a/src/yuzu/configuration/configure_input_player.cpp b/src/yuzu/configuration/configure_input_player.cpp index 00433926d..b1850bc95 100644 --- a/src/yuzu/configuration/configure_input_player.cpp +++ b/src/yuzu/configuration/configure_input_player.cpp | |||
| @@ -280,9 +280,9 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i | |||
| 280 | } | 280 | } |
| 281 | 281 | ||
| 282 | button->setContextMenuPolicy(Qt::CustomContextMenu); | 282 | button->setContextMenuPolicy(Qt::CustomContextMenu); |
| 283 | connect(button, &QPushButton::clicked, [=] { | 283 | connect(button, &QPushButton::clicked, [=, this] { |
| 284 | HandleClick(button_map[button_id], | 284 | HandleClick(button_map[button_id], |
| 285 | [=](Common::ParamPackage params) { | 285 | [=, this](Common::ParamPackage params) { |
| 286 | // Workaround for ZL & ZR for analog triggers like on XBOX controllors. | 286 | // Workaround for ZL & ZR for analog triggers like on XBOX controllors. |
| 287 | // Analog triggers (from controllers like the XBOX controller) would not | 287 | // Analog triggers (from controllers like the XBOX controller) would not |
| 288 | // work due to a different range of their signals (from 0 to 255 on | 288 | // work due to a different range of their signals (from 0 to 255 on |
| @@ -300,19 +300,20 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i | |||
| 300 | }, | 300 | }, |
| 301 | InputCommon::Polling::DeviceType::Button); | 301 | InputCommon::Polling::DeviceType::Button); |
| 302 | }); | 302 | }); |
| 303 | connect(button, &QPushButton::customContextMenuRequested, [=](const QPoint& menu_location) { | 303 | connect(button, &QPushButton::customContextMenuRequested, |
| 304 | QMenu context_menu; | 304 | [=, this](const QPoint& menu_location) { |
| 305 | context_menu.addAction(tr("Clear"), [&] { | 305 | QMenu context_menu; |
| 306 | buttons_param[button_id].Clear(); | 306 | context_menu.addAction(tr("Clear"), [&] { |
| 307 | button_map[button_id]->setText(tr("[not set]")); | 307 | buttons_param[button_id].Clear(); |
| 308 | }); | 308 | button_map[button_id]->setText(tr("[not set]")); |
| 309 | context_menu.addAction(tr("Restore Default"), [&] { | 309 | }); |
| 310 | buttons_param[button_id] = Common::ParamPackage{ | 310 | context_menu.addAction(tr("Restore Default"), [&] { |
| 311 | InputCommon::GenerateKeyboardParam(Config::default_buttons[button_id])}; | 311 | buttons_param[button_id] = Common::ParamPackage{ |
| 312 | button_map[button_id]->setText(ButtonToText(buttons_param[button_id])); | 312 | InputCommon::GenerateKeyboardParam(Config::default_buttons[button_id])}; |
| 313 | }); | 313 | button_map[button_id]->setText(ButtonToText(buttons_param[button_id])); |
| 314 | context_menu.exec(button_map[button_id]->mapToGlobal(menu_location)); | 314 | }); |
| 315 | }); | 315 | context_menu.exec(button_map[button_id]->mapToGlobal(menu_location)); |
| 316 | }); | ||
| 316 | } | 317 | } |
| 317 | 318 | ||
| 318 | for (int analog_id = 0; analog_id < Settings::NativeAnalog::NumAnalogs; analog_id++) { | 319 | for (int analog_id = 0; analog_id < Settings::NativeAnalog::NumAnalogs; analog_id++) { |
| @@ -323,16 +324,16 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i | |||
| 323 | } | 324 | } |
| 324 | 325 | ||
| 325 | analog_button->setContextMenuPolicy(Qt::CustomContextMenu); | 326 | analog_button->setContextMenuPolicy(Qt::CustomContextMenu); |
| 326 | connect(analog_button, &QPushButton::clicked, [=]() { | 327 | connect(analog_button, &QPushButton::clicked, [=, this] { |
| 327 | HandleClick(analog_map_buttons[analog_id][sub_button_id], | 328 | HandleClick(analog_map_buttons[analog_id][sub_button_id], |
| 328 | [=](const Common::ParamPackage& params) { | 329 | [=, this](const Common::ParamPackage& params) { |
| 329 | SetAnalogButton(params, analogs_param[analog_id], | 330 | SetAnalogButton(params, analogs_param[analog_id], |
| 330 | analog_sub_buttons[sub_button_id]); | 331 | analog_sub_buttons[sub_button_id]); |
| 331 | }, | 332 | }, |
| 332 | InputCommon::Polling::DeviceType::Button); | 333 | InputCommon::Polling::DeviceType::Button); |
| 333 | }); | 334 | }); |
| 334 | connect(analog_button, &QPushButton::customContextMenuRequested, | 335 | connect(analog_button, &QPushButton::customContextMenuRequested, |
| 335 | [=](const QPoint& menu_location) { | 336 | [=, this](const QPoint& menu_location) { |
| 336 | QMenu context_menu; | 337 | QMenu context_menu; |
| 337 | context_menu.addAction(tr("Clear"), [&] { | 338 | context_menu.addAction(tr("Clear"), [&] { |
| 338 | analogs_param[analog_id].Erase(analog_sub_buttons[sub_button_id]); | 339 | analogs_param[analog_id].Erase(analog_sub_buttons[sub_button_id]); |
| @@ -350,32 +351,35 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i | |||
| 350 | menu_location)); | 351 | menu_location)); |
| 351 | }); | 352 | }); |
| 352 | } | 353 | } |
| 353 | connect(analog_map_stick[analog_id], &QPushButton::clicked, [=] { | 354 | connect(analog_map_stick[analog_id], &QPushButton::clicked, [=, this] { |
| 354 | if (QMessageBox::information( | 355 | if (QMessageBox::information( |
| 355 | this, tr("Information"), | 356 | this, tr("Information"), |
| 356 | tr("After pressing OK, first move your joystick horizontally, " | 357 | tr("After pressing OK, first move your joystick horizontally, " |
| 357 | "and then vertically."), | 358 | "and then vertically."), |
| 358 | QMessageBox::Ok | QMessageBox::Cancel) == QMessageBox::Ok) { | 359 | QMessageBox::Ok | QMessageBox::Cancel) == QMessageBox::Ok) { |
| 359 | HandleClick( | 360 | HandleClick(analog_map_stick[analog_id], |
| 360 | analog_map_stick[analog_id], | 361 | [=, this](const Common::ParamPackage& params) { |
| 361 | [=](const Common::ParamPackage& params) { analogs_param[analog_id] = params; }, | 362 | analogs_param[analog_id] = params; |
| 362 | InputCommon::Polling::DeviceType::Analog); | 363 | }, |
| 364 | InputCommon::Polling::DeviceType::Analog); | ||
| 363 | } | 365 | } |
| 364 | }); | 366 | }); |
| 365 | 367 | ||
| 366 | connect(analog_map_deadzone_and_modifier_slider[analog_id], &QSlider::valueChanged, [=] { | 368 | connect(analog_map_deadzone_and_modifier_slider[analog_id], &QSlider::valueChanged, |
| 367 | const float slider_value = analog_map_deadzone_and_modifier_slider[analog_id]->value(); | 369 | [=, this] { |
| 368 | if (analogs_param[analog_id].Get("engine", "") == "sdl" || | 370 | const float slider_value = |
| 369 | analogs_param[analog_id].Get("engine", "") == "gcpad") { | 371 | analog_map_deadzone_and_modifier_slider[analog_id]->value(); |
| 370 | analog_map_deadzone_and_modifier_slider_label[analog_id]->setText( | 372 | if (analogs_param[analog_id].Get("engine", "") == "sdl" || |
| 371 | tr("Deadzone: %1%").arg(slider_value)); | 373 | analogs_param[analog_id].Get("engine", "") == "gcpad") { |
| 372 | analogs_param[analog_id].Set("deadzone", slider_value / 100.0f); | 374 | analog_map_deadzone_and_modifier_slider_label[analog_id]->setText( |
| 373 | } else { | 375 | tr("Deadzone: %1%").arg(slider_value)); |
| 374 | analog_map_deadzone_and_modifier_slider_label[analog_id]->setText( | 376 | analogs_param[analog_id].Set("deadzone", slider_value / 100.0f); |
| 375 | tr("Modifier Scale: %1%").arg(slider_value)); | 377 | } else { |
| 376 | analogs_param[analog_id].Set("modifier_scale", slider_value / 100.0f); | 378 | analog_map_deadzone_and_modifier_slider_label[analog_id]->setText( |
| 377 | } | 379 | tr("Modifier Scale: %1%").arg(slider_value)); |
| 378 | }); | 380 | analogs_param[analog_id].Set("modifier_scale", slider_value / 100.0f); |
| 381 | } | ||
| 382 | }); | ||
| 379 | } | 383 | } |
| 380 | 384 | ||
| 381 | connect(ui->buttonClearAll, &QPushButton::clicked, [this] { ClearAll(); }); | 385 | connect(ui->buttonClearAll, &QPushButton::clicked, [this] { ClearAll(); }); |
diff --git a/src/yuzu/configuration/configure_mouse_advanced.cpp b/src/yuzu/configuration/configure_mouse_advanced.cpp index e0647ea5b..ea2549363 100644 --- a/src/yuzu/configuration/configure_mouse_advanced.cpp +++ b/src/yuzu/configuration/configure_mouse_advanced.cpp | |||
| @@ -83,25 +83,28 @@ ConfigureMouseAdvanced::ConfigureMouseAdvanced(QWidget* parent) | |||
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | button->setContextMenuPolicy(Qt::CustomContextMenu); | 85 | button->setContextMenuPolicy(Qt::CustomContextMenu); |
| 86 | connect(button, &QPushButton::clicked, [=] { | 86 | connect(button, &QPushButton::clicked, [=, this] { |
| 87 | HandleClick( | 87 | HandleClick(button_map[button_id], |
| 88 | button_map[button_id], | 88 | [=, this](const Common::ParamPackage& params) { |
| 89 | [=](const Common::ParamPackage& params) { buttons_param[button_id] = params; }, | 89 | buttons_param[button_id] = params; |
| 90 | InputCommon::Polling::DeviceType::Button); | 90 | }, |
| 91 | }); | 91 | InputCommon::Polling::DeviceType::Button); |
| 92 | connect(button, &QPushButton::customContextMenuRequested, [=](const QPoint& menu_location) { | ||
| 93 | QMenu context_menu; | ||
| 94 | context_menu.addAction(tr("Clear"), [&] { | ||
| 95 | buttons_param[button_id].Clear(); | ||
| 96 | button_map[button_id]->setText(tr("[not set]")); | ||
| 97 | }); | ||
| 98 | context_menu.addAction(tr("Restore Default"), [&] { | ||
| 99 | buttons_param[button_id] = Common::ParamPackage{ | ||
| 100 | InputCommon::GenerateKeyboardParam(Config::default_mouse_buttons[button_id])}; | ||
| 101 | button_map[button_id]->setText(ButtonToText(buttons_param[button_id])); | ||
| 102 | }); | ||
| 103 | context_menu.exec(button_map[button_id]->mapToGlobal(menu_location)); | ||
| 104 | }); | 92 | }); |
| 93 | connect(button, &QPushButton::customContextMenuRequested, | ||
| 94 | [=, this](const QPoint& menu_location) { | ||
| 95 | QMenu context_menu; | ||
| 96 | context_menu.addAction(tr("Clear"), [&] { | ||
| 97 | buttons_param[button_id].Clear(); | ||
| 98 | button_map[button_id]->setText(tr("[not set]")); | ||
| 99 | }); | ||
| 100 | context_menu.addAction(tr("Restore Default"), [&] { | ||
| 101 | buttons_param[button_id] = | ||
| 102 | Common::ParamPackage{InputCommon::GenerateKeyboardParam( | ||
| 103 | Config::default_mouse_buttons[button_id])}; | ||
| 104 | button_map[button_id]->setText(ButtonToText(buttons_param[button_id])); | ||
| 105 | }); | ||
| 106 | context_menu.exec(button_map[button_id]->mapToGlobal(menu_location)); | ||
| 107 | }); | ||
| 105 | } | 108 | } |
| 106 | 109 | ||
| 107 | connect(ui->buttonClearAll, &QPushButton::clicked, [this] { ClearAll(); }); | 110 | connect(ui->buttonClearAll, &QPushButton::clicked, [this] { ClearAll(); }); |
diff --git a/src/yuzu/configuration/configure_ui.cpp b/src/yuzu/configuration/configure_ui.cpp index 91c21c572..2c20b68d0 100644 --- a/src/yuzu/configuration/configure_ui.cpp +++ b/src/yuzu/configuration/configure_ui.cpp | |||
| @@ -54,9 +54,9 @@ ConfigureUi::ConfigureUi(QWidget* parent) : QWidget(parent), ui(new Ui::Configur | |||
| 54 | 54 | ||
| 55 | // Update text ComboBoxes after user interaction. | 55 | // Update text ComboBoxes after user interaction. |
| 56 | connect(ui->row_1_text_combobox, QOverload<int>::of(&QComboBox::activated), | 56 | connect(ui->row_1_text_combobox, QOverload<int>::of(&QComboBox::activated), |
| 57 | [=]() { ConfigureUi::UpdateSecondRowComboBox(); }); | 57 | [this] { ConfigureUi::UpdateSecondRowComboBox(); }); |
| 58 | connect(ui->row_2_text_combobox, QOverload<int>::of(&QComboBox::activated), | 58 | connect(ui->row_2_text_combobox, QOverload<int>::of(&QComboBox::activated), |
| 59 | [=]() { ConfigureUi::UpdateFirstRowComboBox(); }); | 59 | [this] { ConfigureUi::UpdateFirstRowComboBox(); }); |
| 60 | 60 | ||
| 61 | // Set screenshot path to user specification. | 61 | // Set screenshot path to user specification. |
| 62 | connect(ui->screenshot_path_button, &QToolButton::pressed, this, [this] { | 62 | connect(ui->screenshot_path_button, &QToolButton::pressed, this, [this] { |
diff --git a/src/yuzu/game_list_worker.cpp b/src/yuzu/game_list_worker.cpp index 2018150db..239016b94 100644 --- a/src/yuzu/game_list_worker.cpp +++ b/src/yuzu/game_list_worker.cpp | |||
| @@ -369,8 +369,8 @@ void GameListWorker::run() { | |||
| 369 | auto* const game_list_dir = new GameListDir(game_dir); | 369 | auto* const game_list_dir = new GameListDir(game_dir); |
| 370 | emit DirEntryReady(game_list_dir); | 370 | emit DirEntryReady(game_list_dir); |
| 371 | provider->ClearAllEntries(); | 371 | provider->ClearAllEntries(); |
| 372 | ScanFileSystem(ScanTarget::FillManualContentProvider, game_dir.path.toStdString(), 2, | 372 | ScanFileSystem(ScanTarget::FillManualContentProvider, game_dir.path.toStdString(), |
| 373 | game_list_dir); | 373 | game_dir.deep_scan ? 256 : 0, game_list_dir); |
| 374 | ScanFileSystem(ScanTarget::PopulateGameList, game_dir.path.toStdString(), | 374 | ScanFileSystem(ScanTarget::PopulateGameList, game_dir.path.toStdString(), |
| 375 | game_dir.deep_scan ? 256 : 0, game_list_dir); | 375 | game_dir.deep_scan ? 256 : 0, game_list_dir); |
| 376 | } | 376 | } |
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index e26cec78c..592993c36 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp | |||
| @@ -583,7 +583,7 @@ void GMainWindow::InitializeWidgets() { | |||
| 583 | renderer_status_button->setObjectName(QStringLiteral("RendererStatusBarButton")); | 583 | renderer_status_button->setObjectName(QStringLiteral("RendererStatusBarButton")); |
| 584 | renderer_status_button->setCheckable(true); | 584 | renderer_status_button->setCheckable(true); |
| 585 | renderer_status_button->setFocusPolicy(Qt::NoFocus); | 585 | renderer_status_button->setFocusPolicy(Qt::NoFocus); |
| 586 | connect(renderer_status_button, &QPushButton::toggled, [=](bool checked) { | 586 | connect(renderer_status_button, &QPushButton::toggled, [this](bool checked) { |
| 587 | renderer_status_button->setText(checked ? tr("VULKAN") : tr("OPENGL")); | 587 | renderer_status_button->setText(checked ? tr("VULKAN") : tr("OPENGL")); |
| 588 | }); | 588 | }); |
| 589 | renderer_status_button->toggle(); | 589 | renderer_status_button->toggle(); |
| @@ -595,7 +595,7 @@ void GMainWindow::InitializeWidgets() { | |||
| 595 | #else | 595 | #else |
| 596 | renderer_status_button->setChecked(Settings::values.renderer_backend.GetValue() == | 596 | renderer_status_button->setChecked(Settings::values.renderer_backend.GetValue() == |
| 597 | Settings::RendererBackend::Vulkan); | 597 | Settings::RendererBackend::Vulkan); |
| 598 | connect(renderer_status_button, &QPushButton::clicked, [=] { | 598 | connect(renderer_status_button, &QPushButton::clicked, [this] { |
| 599 | if (emulation_running) { | 599 | if (emulation_running) { |
| 600 | return; | 600 | return; |
| 601 | } | 601 | } |