summaryrefslogtreecommitdiff
path: root/src/core/file_sys
diff options
context:
space:
mode:
authorGravatar Fernando S2023-08-21 16:29:04 +0200
committerGravatar GitHub2023-08-21 16:29:04 +0200
commit861597eb2e32663dba37813273ff91434566523a (patch)
treea05b7c596209f754a822c03cca162c6f32b6a565 /src/core/file_sys
parentMerge pull request #11320 from Kelebek1/mask_depthstencil_clear (diff)
parentfile_sys: tolerate empty NCA (diff)
downloadyuzu-861597eb2e32663dba37813273ff91434566523a.tar.gz
yuzu-861597eb2e32663dba37813273ff91434566523a.tar.xz
yuzu-861597eb2e32663dba37813273ff91434566523a.zip
Merge pull request #11284 from liamwhite/nca-release
vfs: expand support for NCA reading
Diffstat (limited to 'src/core/file_sys')
-rw-r--r--src/core/file_sys/card_image.cpp6
-rw-r--r--src/core/file_sys/content_archive.cpp585
-rw-r--r--src/core/file_sys/content_archive.h66
-rw-r--r--src/core/file_sys/errors.h70
-rw-r--r--src/core/file_sys/fssystem/fs_i_storage.h58
-rw-r--r--src/core/file_sys/fssystem/fs_types.h46
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp251
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h114
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp129
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h43
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp112
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_xts_storage.h42
-rw-r--r--src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h146
-rw-r--r--src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp204
-rw-r--r--src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h21
-rw-r--r--src/core/file_sys/fssystem/fssystem_bucket_tree.cpp598
-rw-r--r--src/core/file_sys/fssystem/fssystem_bucket_tree.h416
-rw-r--r--src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h170
-rw-r--r--src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h110
-rw-r--r--src/core/file_sys/fssystem/fssystem_compressed_storage.h963
-rw-r--r--src/core/file_sys/fssystem/fssystem_compression_common.h43
-rw-r--r--src/core/file_sys/fssystem/fssystem_compression_configuration.cpp36
-rw-r--r--src/core/file_sys/fssystem/fssystem_compression_configuration.h12
-rw-r--r--src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp65
-rw-r--r--src/core/file_sys/fssystem/fssystem_crypto_configuration.h12
-rw-r--r--src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp127
-rw-r--r--src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h164
-rw-r--r--src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp80
-rw-r--r--src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h44
-rw-r--r--src/core/file_sys/fssystem/fssystem_indirect_storage.cpp119
-rw-r--r--src/core/file_sys/fssystem/fssystem_indirect_storage.h294
-rw-r--r--src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp30
-rw-r--r--src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h42
-rw-r--r--src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp91
-rw-r--r--src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h65
-rw-r--r--src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h61
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp1351
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h364
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_header.cpp20
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_header.h338
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_reader.cpp531
-rw-r--r--src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp61
-rw-r--r--src/core/file_sys/fssystem/fssystem_pooled_buffer.h95
-rw-r--r--src/core/file_sys/fssystem/fssystem_sparse_storage.cpp39
-rw-r--r--src/core/file_sys/fssystem/fssystem_sparse_storage.h72
-rw-r--r--src/core/file_sys/fssystem/fssystem_switch_storage.h80
-rw-r--r--src/core/file_sys/fssystem/fssystem_utility.cpp27
-rw-r--r--src/core/file_sys/fssystem/fssystem_utility.h12
-rw-r--r--src/core/file_sys/nca_patch.cpp217
-rw-r--r--src/core/file_sys/nca_patch.h145
-rw-r--r--src/core/file_sys/patch_manager.cpp39
-rw-r--r--src/core/file_sys/patch_manager.h4
-rw-r--r--src/core/file_sys/registered_cache.cpp8
-rw-r--r--src/core/file_sys/romfs_factory.cpp9
-rw-r--r--src/core/file_sys/romfs_factory.h11
-rw-r--r--src/core/file_sys/submission_package.cpp4
56 files changed, 7890 insertions, 972 deletions
diff --git a/src/core/file_sys/card_image.cpp b/src/core/file_sys/card_image.cpp
index 5d02865f4..3b2588c84 100644
--- a/src/core/file_sys/card_image.cpp
+++ b/src/core/file_sys/card_image.cpp
@@ -183,9 +183,9 @@ u32 XCI::GetSystemUpdateVersion() {
183 } 183 }
184 184
185 for (const auto& update_file : update->GetFiles()) { 185 for (const auto& update_file : update->GetFiles()) {
186 NCA nca{update_file, nullptr, 0}; 186 NCA nca{update_file};
187 187
188 if (nca.GetStatus() != Loader::ResultStatus::Success) { 188 if (nca.GetStatus() != Loader::ResultStatus::Success || nca.GetSubdirectories().empty()) {
189 continue; 189 continue;
190 } 190 }
191 191
@@ -296,7 +296,7 @@ Loader::ResultStatus XCI::AddNCAFromPartition(XCIPartition part) {
296 continue; 296 continue;
297 } 297 }
298 298
299 auto nca = std::make_shared<NCA>(partition_file, nullptr, 0); 299 auto nca = std::make_shared<NCA>(partition_file);
300 if (nca->IsUpdate()) { 300 if (nca->IsUpdate()) {
301 continue; 301 continue;
302 } 302 }
diff --git a/src/core/file_sys/content_archive.cpp b/src/core/file_sys/content_archive.cpp
index 06efab46d..44e6852fe 100644
--- a/src/core/file_sys/content_archive.cpp
+++ b/src/core/file_sys/content_archive.cpp
@@ -12,545 +12,109 @@
12#include "core/crypto/ctr_encryption_layer.h" 12#include "core/crypto/ctr_encryption_layer.h"
13#include "core/crypto/key_manager.h" 13#include "core/crypto/key_manager.h"
14#include "core/file_sys/content_archive.h" 14#include "core/file_sys/content_archive.h"
15#include "core/file_sys/nca_patch.h"
16#include "core/file_sys/partition_filesystem.h" 15#include "core/file_sys/partition_filesystem.h"
17#include "core/file_sys/vfs_offset.h" 16#include "core/file_sys/vfs_offset.h"
18#include "core/loader/loader.h" 17#include "core/loader/loader.h"
19 18
20namespace FileSys { 19#include "core/file_sys/fssystem/fssystem_compression_configuration.h"
20#include "core/file_sys/fssystem/fssystem_crypto_configuration.h"
21#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
21 22
22// Media offsets in headers are stored divided by 512. Mult. by this to get real offset. 23namespace FileSys {
23constexpr u64 MEDIA_OFFSET_MULTIPLIER = 0x200;
24
25constexpr u64 SECTION_HEADER_SIZE = 0x200;
26constexpr u64 SECTION_HEADER_OFFSET = 0x400;
27
28constexpr u32 IVFC_MAX_LEVEL = 6;
29
30enum class NCASectionFilesystemType : u8 {
31 PFS0 = 0x2,
32 ROMFS = 0x3,
33};
34
35struct IVFCLevel {
36 u64_le offset;
37 u64_le size;
38 u32_le block_size;
39 u32_le reserved;
40};
41static_assert(sizeof(IVFCLevel) == 0x18, "IVFCLevel has incorrect size.");
42
43struct IVFCHeader {
44 u32_le magic;
45 u32_le magic_number;
46 INSERT_PADDING_BYTES_NOINIT(8);
47 std::array<IVFCLevel, 6> levels;
48 INSERT_PADDING_BYTES_NOINIT(64);
49};
50static_assert(sizeof(IVFCHeader) == 0xE0, "IVFCHeader has incorrect size.");
51
52struct NCASectionHeaderBlock {
53 INSERT_PADDING_BYTES_NOINIT(3);
54 NCASectionFilesystemType filesystem_type;
55 NCASectionCryptoType crypto_type;
56 INSERT_PADDING_BYTES_NOINIT(3);
57};
58static_assert(sizeof(NCASectionHeaderBlock) == 0x8, "NCASectionHeaderBlock has incorrect size.");
59
60struct NCABucketInfo {
61 u64 table_offset;
62 u64 table_size;
63 std::array<u8, 0x10> table_header;
64};
65static_assert(sizeof(NCABucketInfo) == 0x20, "NCABucketInfo has incorrect size.");
66
67struct NCASparseInfo {
68 NCABucketInfo bucket;
69 u64 physical_offset;
70 u16 generation;
71 INSERT_PADDING_BYTES_NOINIT(0x6);
72};
73static_assert(sizeof(NCASparseInfo) == 0x30, "NCASparseInfo has incorrect size.");
74
75struct NCACompressionInfo {
76 NCABucketInfo bucket;
77 INSERT_PADDING_BYTES_NOINIT(0x8);
78};
79static_assert(sizeof(NCACompressionInfo) == 0x28, "NCACompressionInfo has incorrect size.");
80
81struct NCASectionRaw {
82 NCASectionHeaderBlock header;
83 std::array<u8, 0x138> block_data;
84 std::array<u8, 0x8> section_ctr;
85 NCASparseInfo sparse_info;
86 NCACompressionInfo compression_info;
87 INSERT_PADDING_BYTES_NOINIT(0x60);
88};
89static_assert(sizeof(NCASectionRaw) == 0x200, "NCASectionRaw has incorrect size.");
90
91struct PFS0Superblock {
92 NCASectionHeaderBlock header_block;
93 std::array<u8, 0x20> hash;
94 u32_le size;
95 INSERT_PADDING_BYTES_NOINIT(4);
96 u64_le hash_table_offset;
97 u64_le hash_table_size;
98 u64_le pfs0_header_offset;
99 u64_le pfs0_size;
100 INSERT_PADDING_BYTES_NOINIT(0x1B0);
101};
102static_assert(sizeof(PFS0Superblock) == 0x200, "PFS0Superblock has incorrect size.");
103
104struct RomFSSuperblock {
105 NCASectionHeaderBlock header_block;
106 IVFCHeader ivfc;
107 INSERT_PADDING_BYTES_NOINIT(0x118);
108};
109static_assert(sizeof(RomFSSuperblock) == 0x200, "RomFSSuperblock has incorrect size.");
110
111struct BKTRHeader {
112 u64_le offset;
113 u64_le size;
114 u32_le magic;
115 INSERT_PADDING_BYTES_NOINIT(0x4);
116 u32_le number_entries;
117 INSERT_PADDING_BYTES_NOINIT(0x4);
118};
119static_assert(sizeof(BKTRHeader) == 0x20, "BKTRHeader has incorrect size.");
120
121struct BKTRSuperblock {
122 NCASectionHeaderBlock header_block;
123 IVFCHeader ivfc;
124 INSERT_PADDING_BYTES_NOINIT(0x18);
125 BKTRHeader relocation;
126 BKTRHeader subsection;
127 INSERT_PADDING_BYTES_NOINIT(0xC0);
128};
129static_assert(sizeof(BKTRSuperblock) == 0x200, "BKTRSuperblock has incorrect size.");
130
131union NCASectionHeader {
132 NCASectionRaw raw{};
133 PFS0Superblock pfs0;
134 RomFSSuperblock romfs;
135 BKTRSuperblock bktr;
136};
137static_assert(sizeof(NCASectionHeader) == 0x200, "NCASectionHeader has incorrect size.");
138
139static bool IsValidNCA(const NCAHeader& header) {
140 // TODO(DarkLordZach): Add NCA2/NCA0 support.
141 return header.magic == Common::MakeMagic('N', 'C', 'A', '3');
142}
143 24
144NCA::NCA(VirtualFile file_, VirtualFile bktr_base_romfs_, u64 bktr_base_ivfc_offset) 25NCA::NCA(VirtualFile file_, const NCA* base_nca)
145 : file(std::move(file_)), 26 : file(std::move(file_)), keys{Core::Crypto::KeyManager::Instance()} {
146 bktr_base_romfs(std::move(bktr_base_romfs_)), keys{Core::Crypto::KeyManager::Instance()} {
147 if (file == nullptr) { 27 if (file == nullptr) {
148 status = Loader::ResultStatus::ErrorNullFile; 28 status = Loader::ResultStatus::ErrorNullFile;
149 return; 29 return;
150 } 30 }
151 31
152 if (sizeof(NCAHeader) != file->ReadObject(&header)) { 32 reader = std::make_shared<NcaReader>();
153 LOG_ERROR(Loader, "File reader errored out during header read."); 33 if (Result rc =
34 reader->Initialize(file, GetCryptoConfiguration(), GetNcaCompressionConfiguration());
35 R_FAILED(rc)) {
36 if (rc != ResultInvalidNcaSignature) {
37 LOG_ERROR(Loader, "File reader errored out during header read: {:#x}",
38 rc.GetInnerValue());
39 }
154 status = Loader::ResultStatus::ErrorBadNCAHeader; 40 status = Loader::ResultStatus::ErrorBadNCAHeader;
155 return; 41 return;
156 } 42 }
157 43
158 if (!HandlePotentialHeaderDecryption()) { 44 RightsId rights_id{};
159 return; 45 reader->GetRightsId(rights_id.data(), rights_id.size());
160 } 46 if (rights_id != RightsId{}) {
161 47 // External decryption key required; provide it here.
162 has_rights_id = std::ranges::any_of(header.rights_id, [](char c) { return c != '\0'; }); 48 const auto key_generation = std::max<s32>(reader->GetKeyGeneration(), 1) - 1;
163
164 const std::vector<NCASectionHeader> sections = ReadSectionHeaders();
165 is_update = std::ranges::any_of(sections, [](const NCASectionHeader& nca_header) {
166 return nca_header.raw.header.crypto_type == NCASectionCryptoType::BKTR;
167 });
168
169 if (!ReadSections(sections, bktr_base_ivfc_offset)) {
170 return;
171 }
172
173 status = Loader::ResultStatus::Success;
174}
175
176NCA::~NCA() = default;
177
178bool NCA::CheckSupportedNCA(const NCAHeader& nca_header) {
179 if (nca_header.magic == Common::MakeMagic('N', 'C', 'A', '2')) {
180 status = Loader::ResultStatus::ErrorNCA2;
181 return false;
182 }
183
184 if (nca_header.magic == Common::MakeMagic('N', 'C', 'A', '0')) {
185 status = Loader::ResultStatus::ErrorNCA0;
186 return false;
187 }
188
189 return true;
190}
191 49
192bool NCA::HandlePotentialHeaderDecryption() { 50 u128 rights_id_u128;
193 if (IsValidNCA(header)) { 51 std::memcpy(rights_id_u128.data(), rights_id.data(), sizeof(rights_id));
194 return true;
195 }
196
197 if (!CheckSupportedNCA(header)) {
198 return false;
199 }
200 52
201 NCAHeader dec_header{}; 53 auto titlekey =
202 Core::Crypto::AESCipher<Core::Crypto::Key256> cipher( 54 keys.GetKey(Core::Crypto::S128KeyType::Titlekey, rights_id_u128[1], rights_id_u128[0]);
203 keys.GetKey(Core::Crypto::S256KeyType::Header), Core::Crypto::Mode::XTS); 55 if (titlekey == Core::Crypto::Key128{}) {
204 cipher.XTSTranscode(&header, sizeof(NCAHeader), &dec_header, 0, 0x200, 56 status = Loader::ResultStatus::ErrorMissingTitlekey;
205 Core::Crypto::Op::Decrypt); 57 return;
206 if (IsValidNCA(dec_header)) {
207 header = dec_header;
208 encrypted = true;
209 } else {
210 if (!CheckSupportedNCA(dec_header)) {
211 return false;
212 } 58 }
213 59
214 if (keys.HasKey(Core::Crypto::S256KeyType::Header)) { 60 if (!keys.HasKey(Core::Crypto::S128KeyType::Titlekek, key_generation)) {
215 status = Loader::ResultStatus::ErrorIncorrectHeaderKey; 61 status = Loader::ResultStatus::ErrorMissingTitlekek;
216 } else { 62 return;
217 status = Loader::ResultStatus::ErrorMissingHeaderKey;
218 } 63 }
219 return false;
220 }
221 64
222 return true; 65 auto titlekek = keys.GetKey(Core::Crypto::S128KeyType::Titlekek, key_generation);
223} 66 Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(titlekek, Core::Crypto::Mode::ECB);
67 cipher.Transcode(titlekey.data(), titlekey.size(), titlekey.data(),
68 Core::Crypto::Op::Decrypt);
224 69
225std::vector<NCASectionHeader> NCA::ReadSectionHeaders() const { 70 reader->SetExternalDecryptionKey(titlekey.data(), titlekey.size());
226 const std::ptrdiff_t number_sections =
227 std::ranges::count_if(header.section_tables, [](const NCASectionTableEntry& entry) {
228 return entry.media_offset > 0;
229 });
230
231 std::vector<NCASectionHeader> sections(number_sections);
232 const auto length_sections = SECTION_HEADER_SIZE * number_sections;
233
234 if (encrypted) {
235 auto raw = file->ReadBytes(length_sections, SECTION_HEADER_OFFSET);
236 Core::Crypto::AESCipher<Core::Crypto::Key256> cipher(
237 keys.GetKey(Core::Crypto::S256KeyType::Header), Core::Crypto::Mode::XTS);
238 cipher.XTSTranscode(raw.data(), length_sections, sections.data(), 2, SECTION_HEADER_SIZE,
239 Core::Crypto::Op::Decrypt);
240 } else {
241 file->ReadBytes(sections.data(), length_sections, SECTION_HEADER_OFFSET);
242 } 71 }
243 72
244 return sections; 73 const s32 fs_count = reader->GetFsCount();
245} 74 NcaFileSystemDriver fs(base_nca ? base_nca->reader : nullptr, reader);
246 75 std::vector<VirtualFile> filesystems(fs_count);
247bool NCA::ReadSections(const std::vector<NCASectionHeader>& sections, u64 bktr_base_ivfc_offset) { 76 for (s32 i = 0; i < fs_count; i++) {
248 for (std::size_t i = 0; i < sections.size(); ++i) { 77 NcaFsHeaderReader header_reader;
249 const auto& section = sections[i]; 78 const Result rc = fs.OpenStorage(&filesystems[i], &header_reader, i);
250 79 if (R_FAILED(rc)) {
251 if (section.raw.sparse_info.bucket.table_offset != 0 && 80 LOG_ERROR(Loader, "File reader errored out during read of section {}: {:#x}", i,
252 section.raw.sparse_info.bucket.table_size != 0) { 81 rc.GetInnerValue());
253 LOG_ERROR(Loader, "Sparse NCAs are not supported."); 82 status = Loader::ResultStatus::ErrorBadNCAHeader;
254 status = Loader::ResultStatus::ErrorSparseNCA; 83 return;
255 return false;
256 }
257
258 if (section.raw.compression_info.bucket.table_offset != 0 &&
259 section.raw.compression_info.bucket.table_size != 0) {
260 LOG_ERROR(Loader, "Compressed NCAs are not supported.");
261 status = Loader::ResultStatus::ErrorCompressedNCA;
262 return false;
263 }
264
265 if (section.raw.header.filesystem_type == NCASectionFilesystemType::ROMFS) {
266 if (!ReadRomFSSection(section, header.section_tables[i], bktr_base_ivfc_offset)) {
267 return false;
268 }
269 } else if (section.raw.header.filesystem_type == NCASectionFilesystemType::PFS0) {
270 if (!ReadPFS0Section(section, header.section_tables[i])) {
271 return false;
272 }
273 }
274 }
275
276 return true;
277}
278
279bool NCA::ReadRomFSSection(const NCASectionHeader& section, const NCASectionTableEntry& entry,
280 u64 bktr_base_ivfc_offset) {
281 const std::size_t base_offset = entry.media_offset * MEDIA_OFFSET_MULTIPLIER;
282 ivfc_offset = section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].offset;
283 const std::size_t romfs_offset = base_offset + ivfc_offset;
284 const std::size_t romfs_size = section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].size;
285 auto raw = std::make_shared<OffsetVfsFile>(file, romfs_size, romfs_offset);
286 auto dec = Decrypt(section, raw, romfs_offset);
287
288 if (dec == nullptr) {
289 if (status != Loader::ResultStatus::Success)
290 return false;
291 if (has_rights_id)
292 status = Loader::ResultStatus::ErrorIncorrectTitlekeyOrTitlekek;
293 else
294 status = Loader::ResultStatus::ErrorIncorrectKeyAreaKey;
295 return false;
296 }
297
298 if (section.raw.header.crypto_type == NCASectionCryptoType::BKTR) {
299 if (section.bktr.relocation.magic != Common::MakeMagic('B', 'K', 'T', 'R') ||
300 section.bktr.subsection.magic != Common::MakeMagic('B', 'K', 'T', 'R')) {
301 status = Loader::ResultStatus::ErrorBadBKTRHeader;
302 return false;
303 }
304
305 if (section.bktr.relocation.offset + section.bktr.relocation.size !=
306 section.bktr.subsection.offset) {
307 status = Loader::ResultStatus::ErrorBKTRSubsectionNotAfterRelocation;
308 return false;
309 }
310
311 const u64 size = MEDIA_OFFSET_MULTIPLIER * (entry.media_end_offset - entry.media_offset);
312 if (section.bktr.subsection.offset + section.bktr.subsection.size != size) {
313 status = Loader::ResultStatus::ErrorBKTRSubsectionNotAtEnd;
314 return false;
315 }
316
317 const u64 offset = section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].offset;
318 RelocationBlock relocation_block{};
319 if (dec->ReadObject(&relocation_block, section.bktr.relocation.offset - offset) !=
320 sizeof(RelocationBlock)) {
321 status = Loader::ResultStatus::ErrorBadRelocationBlock;
322 return false;
323 }
324 SubsectionBlock subsection_block{};
325 if (dec->ReadObject(&subsection_block, section.bktr.subsection.offset - offset) !=
326 sizeof(RelocationBlock)) {
327 status = Loader::ResultStatus::ErrorBadSubsectionBlock;
328 return false;
329 }
330
331 std::vector<RelocationBucketRaw> relocation_buckets_raw(
332 (section.bktr.relocation.size - sizeof(RelocationBlock)) / sizeof(RelocationBucketRaw));
333 if (dec->ReadBytes(relocation_buckets_raw.data(),
334 section.bktr.relocation.size - sizeof(RelocationBlock),
335 section.bktr.relocation.offset + sizeof(RelocationBlock) - offset) !=
336 section.bktr.relocation.size - sizeof(RelocationBlock)) {
337 status = Loader::ResultStatus::ErrorBadRelocationBuckets;
338 return false;
339 } 84 }
340 85
341 std::vector<SubsectionBucketRaw> subsection_buckets_raw( 86 if (header_reader.GetFsType() == NcaFsHeader::FsType::RomFs) {
342 (section.bktr.subsection.size - sizeof(SubsectionBlock)) / sizeof(SubsectionBucketRaw)); 87 files.push_back(filesystems[i]);
343 if (dec->ReadBytes(subsection_buckets_raw.data(), 88 romfs = files.back();
344 section.bktr.subsection.size - sizeof(SubsectionBlock),
345 section.bktr.subsection.offset + sizeof(SubsectionBlock) - offset) !=
346 section.bktr.subsection.size - sizeof(SubsectionBlock)) {
347 status = Loader::ResultStatus::ErrorBadSubsectionBuckets;
348 return false;
349 } 89 }
350 90
351 std::vector<RelocationBucket> relocation_buckets(relocation_buckets_raw.size()); 91 if (header_reader.GetFsType() == NcaFsHeader::FsType::PartitionFs) {
352 std::ranges::transform(relocation_buckets_raw, relocation_buckets.begin(), 92 auto npfs = std::make_shared<PartitionFilesystem>(filesystems[i]);
353 &ConvertRelocationBucketRaw); 93 if (npfs->GetStatus() == Loader::ResultStatus::Success) {
354 std::vector<SubsectionBucket> subsection_buckets(subsection_buckets_raw.size()); 94 dirs.push_back(npfs);
355 std::ranges::transform(subsection_buckets_raw, subsection_buckets.begin(), 95 if (IsDirectoryExeFS(npfs)) {
356 &ConvertSubsectionBucketRaw); 96 exefs = dirs.back();
357 97 } else if (IsDirectoryLogoPartition(npfs)) {
358 u32 ctr_low; 98 logo = dirs.back();
359 std::memcpy(&ctr_low, section.raw.section_ctr.data(), sizeof(ctr_low)); 99 } else {
360 subsection_buckets.back().entries.push_back({section.bktr.relocation.offset, {0}, ctr_low}); 100 continue;
361 subsection_buckets.back().entries.push_back({size, {0}, 0});
362
363 std::optional<Core::Crypto::Key128> key;
364 if (encrypted) {
365 if (has_rights_id) {
366 status = Loader::ResultStatus::Success;
367 key = GetTitlekey();
368 if (!key) {
369 status = Loader::ResultStatus::ErrorMissingTitlekey;
370 return false;
371 }
372 } else {
373 key = GetKeyAreaKey(NCASectionCryptoType::BKTR);
374 if (!key) {
375 status = Loader::ResultStatus::ErrorMissingKeyAreaKey;
376 return false;
377 } 101 }
378 } 102 }
379 } 103 }
380 104
381 if (bktr_base_romfs == nullptr) { 105 if (header_reader.GetEncryptionType() == NcaFsHeader::EncryptionType::AesCtrEx) {
382 status = Loader::ResultStatus::ErrorMissingBKTRBaseRomFS; 106 is_update = true;
383 return false;
384 } 107 }
385
386 auto bktr = std::make_shared<BKTR>(
387 bktr_base_romfs, std::make_shared<OffsetVfsFile>(file, romfs_size, base_offset),
388 relocation_block, relocation_buckets, subsection_block, subsection_buckets, encrypted,
389 encrypted ? *key : Core::Crypto::Key128{}, base_offset, bktr_base_ivfc_offset,
390 section.raw.section_ctr);
391
392 // BKTR applies to entire IVFC, so make an offset version to level 6
393 files.push_back(std::make_shared<OffsetVfsFile>(
394 bktr, romfs_size, section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].offset));
395 } else {
396 files.push_back(std::move(dec));
397 } 108 }
398 109
399 romfs = files.back(); 110 if (is_update && base_nca == nullptr) {
400 return true; 111 status = Loader::ResultStatus::ErrorMissingBKTRBaseRomFS;
401}
402
403bool NCA::ReadPFS0Section(const NCASectionHeader& section, const NCASectionTableEntry& entry) {
404 const u64 offset = (static_cast<u64>(entry.media_offset) * MEDIA_OFFSET_MULTIPLIER) +
405 section.pfs0.pfs0_header_offset;
406 const u64 size = MEDIA_OFFSET_MULTIPLIER * (entry.media_end_offset - entry.media_offset);
407
408 auto dec = Decrypt(section, std::make_shared<OffsetVfsFile>(file, size, offset), offset);
409 if (dec != nullptr) {
410 auto npfs = std::make_shared<PartitionFilesystem>(std::move(dec));
411
412 if (npfs->GetStatus() == Loader::ResultStatus::Success) {
413 dirs.push_back(std::move(npfs));
414 if (IsDirectoryExeFS(dirs.back()))
415 exefs = dirs.back();
416 else if (IsDirectoryLogoPartition(dirs.back()))
417 logo = dirs.back();
418 } else {
419 if (has_rights_id)
420 status = Loader::ResultStatus::ErrorIncorrectTitlekeyOrTitlekek;
421 else
422 status = Loader::ResultStatus::ErrorIncorrectKeyAreaKey;
423 return false;
424 }
425 } else { 112 } else {
426 if (status != Loader::ResultStatus::Success) 113 status = Loader::ResultStatus::Success;
427 return false;
428 if (has_rights_id)
429 status = Loader::ResultStatus::ErrorIncorrectTitlekeyOrTitlekek;
430 else
431 status = Loader::ResultStatus::ErrorIncorrectKeyAreaKey;
432 return false;
433 } 114 }
434
435 return true;
436}
437
438u8 NCA::GetCryptoRevision() const {
439 u8 master_key_id = header.crypto_type;
440 if (header.crypto_type_2 > master_key_id)
441 master_key_id = header.crypto_type_2;
442 if (master_key_id > 0)
443 --master_key_id;
444 return master_key_id;
445} 115}
446 116
447std::optional<Core::Crypto::Key128> NCA::GetKeyAreaKey(NCASectionCryptoType type) const { 117NCA::~NCA() = default;
448 const auto master_key_id = GetCryptoRevision();
449
450 if (!keys.HasKey(Core::Crypto::S128KeyType::KeyArea, master_key_id, header.key_index)) {
451 return std::nullopt;
452 }
453
454 std::vector<u8> key_area(header.key_area.begin(), header.key_area.end());
455 Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(
456 keys.GetKey(Core::Crypto::S128KeyType::KeyArea, master_key_id, header.key_index),
457 Core::Crypto::Mode::ECB);
458 cipher.Transcode(key_area.data(), key_area.size(), key_area.data(), Core::Crypto::Op::Decrypt);
459
460 Core::Crypto::Key128 out{};
461 if (type == NCASectionCryptoType::XTS) {
462 std::copy(key_area.begin(), key_area.begin() + 0x10, out.begin());
463 } else if (type == NCASectionCryptoType::CTR || type == NCASectionCryptoType::BKTR) {
464 std::copy(key_area.begin() + 0x20, key_area.begin() + 0x30, out.begin());
465 } else {
466 LOG_CRITICAL(Crypto, "Called GetKeyAreaKey on invalid NCASectionCryptoType type={:02X}",
467 type);
468 }
469
470 u128 out_128{};
471 std::memcpy(out_128.data(), out.data(), sizeof(u128));
472 LOG_TRACE(Crypto, "called with crypto_rev={:02X}, kak_index={:02X}, key={:016X}{:016X}",
473 master_key_id, header.key_index, out_128[1], out_128[0]);
474
475 return out;
476}
477
478std::optional<Core::Crypto::Key128> NCA::GetTitlekey() {
479 const auto master_key_id = GetCryptoRevision();
480
481 u128 rights_id{};
482 memcpy(rights_id.data(), header.rights_id.data(), 16);
483 if (rights_id == u128{}) {
484 status = Loader::ResultStatus::ErrorInvalidRightsID;
485 return std::nullopt;
486 }
487
488 auto titlekey = keys.GetKey(Core::Crypto::S128KeyType::Titlekey, rights_id[1], rights_id[0]);
489 if (titlekey == Core::Crypto::Key128{}) {
490 status = Loader::ResultStatus::ErrorMissingTitlekey;
491 return std::nullopt;
492 }
493
494 if (!keys.HasKey(Core::Crypto::S128KeyType::Titlekek, master_key_id)) {
495 status = Loader::ResultStatus::ErrorMissingTitlekek;
496 return std::nullopt;
497 }
498
499 Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(
500 keys.GetKey(Core::Crypto::S128KeyType::Titlekek, master_key_id), Core::Crypto::Mode::ECB);
501 cipher.Transcode(titlekey.data(), titlekey.size(), titlekey.data(), Core::Crypto::Op::Decrypt);
502
503 return titlekey;
504}
505
506VirtualFile NCA::Decrypt(const NCASectionHeader& s_header, VirtualFile in, u64 starting_offset) {
507 if (!encrypted)
508 return in;
509
510 switch (s_header.raw.header.crypto_type) {
511 case NCASectionCryptoType::NONE:
512 LOG_TRACE(Crypto, "called with mode=NONE");
513 return in;
514 case NCASectionCryptoType::CTR:
515 // During normal BKTR decryption, this entire function is skipped. This is for the metadata,
516 // which uses the same CTR as usual.
517 case NCASectionCryptoType::BKTR:
518 LOG_TRACE(Crypto, "called with mode=CTR, starting_offset={:016X}", starting_offset);
519 {
520 std::optional<Core::Crypto::Key128> key;
521 if (has_rights_id) {
522 status = Loader::ResultStatus::Success;
523 key = GetTitlekey();
524 if (!key) {
525 if (status == Loader::ResultStatus::Success)
526 status = Loader::ResultStatus::ErrorMissingTitlekey;
527 return nullptr;
528 }
529 } else {
530 key = GetKeyAreaKey(NCASectionCryptoType::CTR);
531 if (!key) {
532 status = Loader::ResultStatus::ErrorMissingKeyAreaKey;
533 return nullptr;
534 }
535 }
536
537 auto out = std::make_shared<Core::Crypto::CTREncryptionLayer>(std::move(in), *key,
538 starting_offset);
539 Core::Crypto::CTREncryptionLayer::IVData iv{};
540 for (std::size_t i = 0; i < 8; ++i) {
541 iv[i] = s_header.raw.section_ctr[8 - i - 1];
542 }
543 out->SetIV(iv);
544 return std::static_pointer_cast<VfsFile>(out);
545 }
546 case NCASectionCryptoType::XTS:
547 // TODO(DarkLordZach): Find a test case for XTS-encrypted NCAs
548 default:
549 LOG_ERROR(Crypto, "called with unhandled crypto type={:02X}",
550 s_header.raw.header.crypto_type);
551 return nullptr;
552 }
553}
554 118
555Loader::ResultStatus NCA::GetStatus() const { 119Loader::ResultStatus NCA::GetStatus() const {
556 return status; 120 return status;
@@ -579,21 +143,24 @@ VirtualDir NCA::GetParentDirectory() const {
579} 143}
580 144
581NCAContentType NCA::GetType() const { 145NCAContentType NCA::GetType() const {
582 return header.content_type; 146 return static_cast<NCAContentType>(reader->GetContentType());
583} 147}
584 148
585u64 NCA::GetTitleId() const { 149u64 NCA::GetTitleId() const {
586 if (is_update || status == Loader::ResultStatus::ErrorMissingBKTRBaseRomFS) 150 if (is_update) {
587 return header.title_id | 0x800; 151 return reader->GetProgramId() | 0x800;
588 return header.title_id; 152 }
153 return reader->GetProgramId();
589} 154}
590 155
591std::array<u8, 16> NCA::GetRightsId() const { 156RightsId NCA::GetRightsId() const {
592 return header.rights_id; 157 RightsId result;
158 reader->GetRightsId(result.data(), result.size());
159 return result;
593} 160}
594 161
595u32 NCA::GetSDKVersion() const { 162u32 NCA::GetSDKVersion() const {
596 return header.sdk_version; 163 return reader->GetSdkAddonVersion();
597} 164}
598 165
599bool NCA::IsUpdate() const { 166bool NCA::IsUpdate() const {
@@ -612,10 +179,6 @@ VirtualFile NCA::GetBaseFile() const {
612 return file; 179 return file;
613} 180}
614 181
615u64 NCA::GetBaseIVFCOffset() const {
616 return ivfc_offset;
617}
618
619VirtualDir NCA::GetLogoPartition() const { 182VirtualDir NCA::GetLogoPartition() const {
620 return logo; 183 return logo;
621} 184}
diff --git a/src/core/file_sys/content_archive.h b/src/core/file_sys/content_archive.h
index 20f524f80..af521d453 100644
--- a/src/core/file_sys/content_archive.h
+++ b/src/core/file_sys/content_archive.h
@@ -21,7 +21,7 @@ enum class ResultStatus : u16;
21 21
22namespace FileSys { 22namespace FileSys {
23 23
24union NCASectionHeader; 24class NcaReader;
25 25
26/// Describes the type of content within an NCA archive. 26/// Describes the type of content within an NCA archive.
27enum class NCAContentType : u8 { 27enum class NCAContentType : u8 {
@@ -45,41 +45,7 @@ enum class NCAContentType : u8 {
45 PublicData = 5, 45 PublicData = 5,
46}; 46};
47 47
48enum class NCASectionCryptoType : u8 { 48using RightsId = std::array<u8, 0x10>;
49 NONE = 1,
50 XTS = 2,
51 CTR = 3,
52 BKTR = 4,
53};
54
55struct NCASectionTableEntry {
56 u32_le media_offset;
57 u32_le media_end_offset;
58 INSERT_PADDING_BYTES(0x8);
59};
60static_assert(sizeof(NCASectionTableEntry) == 0x10, "NCASectionTableEntry has incorrect size.");
61
62struct NCAHeader {
63 std::array<u8, 0x100> rsa_signature_1;
64 std::array<u8, 0x100> rsa_signature_2;
65 u32_le magic;
66 u8 is_system;
67 NCAContentType content_type;
68 u8 crypto_type;
69 u8 key_index;
70 u64_le size;
71 u64_le title_id;
72 INSERT_PADDING_BYTES(0x4);
73 u32_le sdk_version;
74 u8 crypto_type_2;
75 INSERT_PADDING_BYTES(15);
76 std::array<u8, 0x10> rights_id;
77 std::array<NCASectionTableEntry, 0x4> section_tables;
78 std::array<std::array<u8, 0x20>, 0x4> hash_tables;
79 std::array<u8, 0x40> key_area;
80 INSERT_PADDING_BYTES(0xC0);
81};
82static_assert(sizeof(NCAHeader) == 0x400, "NCAHeader has incorrect size.");
83 49
84inline bool IsDirectoryExeFS(const VirtualDir& pfs) { 50inline bool IsDirectoryExeFS(const VirtualDir& pfs) {
85 // According to switchbrew, an exefs must only contain these two files: 51 // According to switchbrew, an exefs must only contain these two files:
@@ -97,8 +63,7 @@ inline bool IsDirectoryLogoPartition(const VirtualDir& pfs) {
97// After construction, use GetStatus to determine if the file is valid and ready to be used. 63// After construction, use GetStatus to determine if the file is valid and ready to be used.
98class NCA : public ReadOnlyVfsDirectory { 64class NCA : public ReadOnlyVfsDirectory {
99public: 65public:
100 explicit NCA(VirtualFile file, VirtualFile bktr_base_romfs = nullptr, 66 explicit NCA(VirtualFile file, const NCA* base_nca = nullptr);
101 u64 bktr_base_ivfc_offset = 0);
102 ~NCA() override; 67 ~NCA() override;
103 68
104 Loader::ResultStatus GetStatus() const; 69 Loader::ResultStatus GetStatus() const;
@@ -110,7 +75,7 @@ public:
110 75
111 NCAContentType GetType() const; 76 NCAContentType GetType() const;
112 u64 GetTitleId() const; 77 u64 GetTitleId() const;
113 std::array<u8, 0x10> GetRightsId() const; 78 RightsId GetRightsId() const;
114 u32 GetSDKVersion() const; 79 u32 GetSDKVersion() const;
115 bool IsUpdate() const; 80 bool IsUpdate() const;
116 81
@@ -119,26 +84,9 @@ public:
119 84
120 VirtualFile GetBaseFile() const; 85 VirtualFile GetBaseFile() const;
121 86
122 // Returns the base ivfc offset used in BKTR patching.
123 u64 GetBaseIVFCOffset() const;
124
125 VirtualDir GetLogoPartition() const; 87 VirtualDir GetLogoPartition() const;
126 88
127private: 89private:
128 bool CheckSupportedNCA(const NCAHeader& header);
129 bool HandlePotentialHeaderDecryption();
130
131 std::vector<NCASectionHeader> ReadSectionHeaders() const;
132 bool ReadSections(const std::vector<NCASectionHeader>& sections, u64 bktr_base_ivfc_offset);
133 bool ReadRomFSSection(const NCASectionHeader& section, const NCASectionTableEntry& entry,
134 u64 bktr_base_ivfc_offset);
135 bool ReadPFS0Section(const NCASectionHeader& section, const NCASectionTableEntry& entry);
136
137 u8 GetCryptoRevision() const;
138 std::optional<Core::Crypto::Key128> GetKeyAreaKey(NCASectionCryptoType type) const;
139 std::optional<Core::Crypto::Key128> GetTitlekey();
140 VirtualFile Decrypt(const NCASectionHeader& header, VirtualFile in, u64 starting_offset);
141
142 std::vector<VirtualDir> dirs; 90 std::vector<VirtualDir> dirs;
143 std::vector<VirtualFile> files; 91 std::vector<VirtualFile> files;
144 92
@@ -146,11 +94,6 @@ private:
146 VirtualDir exefs = nullptr; 94 VirtualDir exefs = nullptr;
147 VirtualDir logo = nullptr; 95 VirtualDir logo = nullptr;
148 VirtualFile file; 96 VirtualFile file;
149 VirtualFile bktr_base_romfs;
150 u64 ivfc_offset = 0;
151
152 NCAHeader header{};
153 bool has_rights_id{};
154 97
155 Loader::ResultStatus status{}; 98 Loader::ResultStatus status{};
156 99
@@ -158,6 +101,7 @@ private:
158 bool is_update = false; 101 bool is_update = false;
159 102
160 Core::Crypto::KeyManager& keys; 103 Core::Crypto::KeyManager& keys;
104 std::shared_ptr<NcaReader> reader;
161}; 105};
162 106
163} // namespace FileSys 107} // namespace FileSys
diff --git a/src/core/file_sys/errors.h b/src/core/file_sys/errors.h
index 7cee0c7df..2f5045a67 100644
--- a/src/core/file_sys/errors.h
+++ b/src/core/file_sys/errors.h
@@ -17,4 +17,74 @@ constexpr Result ERROR_INVALID_ARGUMENT{ErrorModule::FS, 6001};
17constexpr Result ERROR_INVALID_OFFSET{ErrorModule::FS, 6061}; 17constexpr Result ERROR_INVALID_OFFSET{ErrorModule::FS, 6061};
18constexpr Result ERROR_INVALID_SIZE{ErrorModule::FS, 6062}; 18constexpr Result ERROR_INVALID_SIZE{ErrorModule::FS, 6062};
19 19
20constexpr Result ResultUnsupportedSdkVersion{ErrorModule::FS, 50};
21constexpr Result ResultPartitionNotFound{ErrorModule::FS, 1001};
22constexpr Result ResultUnsupportedVersion{ErrorModule::FS, 3002};
23constexpr Result ResultOutOfRange{ErrorModule::FS, 3005};
24constexpr Result ResultAllocationMemoryFailedInFileSystemBuddyHeapA{ErrorModule::FS, 3294};
25constexpr Result ResultAllocationMemoryFailedInNcaFileSystemDriverI{ErrorModule::FS, 3341};
26constexpr Result ResultAllocationMemoryFailedInNcaReaderA{ErrorModule::FS, 3363};
27constexpr Result ResultAllocationMemoryFailedInAesCtrCounterExtendedStorageA{ErrorModule::FS, 3399};
28constexpr Result ResultAllocationMemoryFailedInIntegrityRomFsStorageA{ErrorModule::FS, 3412};
29constexpr Result ResultAllocationMemoryFailedMakeUnique{ErrorModule::FS, 3422};
30constexpr Result ResultAllocationMemoryFailedAllocateShared{ErrorModule::FS, 3423};
31constexpr Result ResultInvalidAesCtrCounterExtendedEntryOffset{ErrorModule::FS, 4012};
32constexpr Result ResultIndirectStorageCorrupted{ErrorModule::FS, 4021};
33constexpr Result ResultInvalidIndirectEntryOffset{ErrorModule::FS, 4022};
34constexpr Result ResultInvalidIndirectEntryStorageIndex{ErrorModule::FS, 4023};
35constexpr Result ResultInvalidIndirectStorageSize{ErrorModule::FS, 4024};
36constexpr Result ResultInvalidBucketTreeSignature{ErrorModule::FS, 4032};
37constexpr Result ResultInvalidBucketTreeEntryCount{ErrorModule::FS, 4033};
38constexpr Result ResultInvalidBucketTreeNodeEntryCount{ErrorModule::FS, 4034};
39constexpr Result ResultInvalidBucketTreeNodeOffset{ErrorModule::FS, 4035};
40constexpr Result ResultInvalidBucketTreeEntryOffset{ErrorModule::FS, 4036};
41constexpr Result ResultInvalidBucketTreeEntrySetOffset{ErrorModule::FS, 4037};
42constexpr Result ResultInvalidBucketTreeNodeIndex{ErrorModule::FS, 4038};
43constexpr Result ResultInvalidBucketTreeVirtualOffset{ErrorModule::FS, 4039};
44constexpr Result ResultRomNcaInvalidPatchMetaDataHashType{ErrorModule::FS, 4084};
45constexpr Result ResultRomNcaInvalidIntegrityLayerInfoOffset{ErrorModule::FS, 4085};
46constexpr Result ResultRomNcaInvalidPatchMetaDataHashDataSize{ErrorModule::FS, 4086};
47constexpr Result ResultRomNcaInvalidPatchMetaDataHashDataOffset{ErrorModule::FS, 4087};
48constexpr Result ResultRomNcaInvalidPatchMetaDataHashDataHash{ErrorModule::FS, 4088};
49constexpr Result ResultRomNcaInvalidSparseMetaDataHashType{ErrorModule::FS, 4089};
50constexpr Result ResultRomNcaInvalidSparseMetaDataHashDataSize{ErrorModule::FS, 4090};
51constexpr Result ResultRomNcaInvalidSparseMetaDataHashDataOffset{ErrorModule::FS, 4091};
52constexpr Result ResultRomNcaInvalidSparseMetaDataHashDataHash{ErrorModule::FS, 4091};
53constexpr Result ResultNcaBaseStorageOutOfRangeB{ErrorModule::FS, 4509};
54constexpr Result ResultNcaBaseStorageOutOfRangeC{ErrorModule::FS, 4510};
55constexpr Result ResultNcaBaseStorageOutOfRangeD{ErrorModule::FS, 4511};
56constexpr Result ResultInvalidNcaSignature{ErrorModule::FS, 4517};
57constexpr Result ResultNcaFsHeaderHashVerificationFailed{ErrorModule::FS, 4520};
58constexpr Result ResultInvalidNcaKeyIndex{ErrorModule::FS, 4521};
59constexpr Result ResultInvalidNcaFsHeaderHashType{ErrorModule::FS, 4522};
60constexpr Result ResultInvalidNcaFsHeaderEncryptionType{ErrorModule::FS, 4523};
61constexpr Result ResultInvalidNcaPatchInfoIndirectSize{ErrorModule::FS, 4524};
62constexpr Result ResultInvalidNcaPatchInfoAesCtrExSize{ErrorModule::FS, 4525};
63constexpr Result ResultInvalidNcaPatchInfoAesCtrExOffset{ErrorModule::FS, 4526};
64constexpr Result ResultInvalidNcaHeader{ErrorModule::FS, 4528};
65constexpr Result ResultInvalidNcaFsHeader{ErrorModule::FS, 4529};
66constexpr Result ResultNcaBaseStorageOutOfRangeE{ErrorModule::FS, 4530};
67constexpr Result ResultInvalidHierarchicalSha256BlockSize{ErrorModule::FS, 4532};
68constexpr Result ResultInvalidHierarchicalSha256LayerCount{ErrorModule::FS, 4533};
69constexpr Result ResultHierarchicalSha256BaseStorageTooLarge{ErrorModule::FS, 4534};
70constexpr Result ResultHierarchicalSha256HashVerificationFailed{ErrorModule::FS, 4535};
71constexpr Result ResultInvalidNcaHierarchicalIntegrityVerificationLayerCount{ErrorModule::FS, 4541};
72constexpr Result ResultInvalidNcaIndirectStorageOutOfRange{ErrorModule::FS, 4542};
73constexpr Result ResultInvalidNcaHeader1SignatureKeyGeneration{ErrorModule::FS, 4543};
74constexpr Result ResultInvalidCompressedStorageSize{ErrorModule::FS, 4547};
75constexpr Result ResultInvalidNcaMetaDataHashDataSize{ErrorModule::FS, 4548};
76constexpr Result ResultInvalidNcaMetaDataHashDataHash{ErrorModule::FS, 4549};
77constexpr Result ResultUnexpectedInCompressedStorageA{ErrorModule::FS, 5324};
78constexpr Result ResultUnexpectedInCompressedStorageB{ErrorModule::FS, 5325};
79constexpr Result ResultUnexpectedInCompressedStorageC{ErrorModule::FS, 5326};
80constexpr Result ResultUnexpectedInCompressedStorageD{ErrorModule::FS, 5327};
81constexpr Result ResultInvalidArgument{ErrorModule::FS, 6001};
82constexpr Result ResultInvalidOffset{ErrorModule::FS, 6061};
83constexpr Result ResultInvalidSize{ErrorModule::FS, 6062};
84constexpr Result ResultNullptrArgument{ErrorModule::FS, 6063};
85constexpr Result ResultUnsupportedSetSizeForIndirectStorage{ErrorModule::FS, 6325};
86constexpr Result ResultUnsupportedWriteForCompressedStorage{ErrorModule::FS, 6387};
87constexpr Result ResultUnsupportedOperateRangeForCompressedStorage{ErrorModule::FS, 6388};
88constexpr Result ResultBufferAllocationFailed{ErrorModule::FS, 6705};
89
20} // namespace FileSys 90} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fs_i_storage.h b/src/core/file_sys/fssystem/fs_i_storage.h
new file mode 100644
index 000000000..416dd57b8
--- /dev/null
+++ b/src/core/file_sys/fssystem/fs_i_storage.h
@@ -0,0 +1,58 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/overflow.h"
7#include "core/file_sys/errors.h"
8#include "core/file_sys/vfs.h"
9
10namespace FileSys {
11
12class IStorage : public VfsFile {
13public:
14 virtual std::string GetName() const override {
15 return {};
16 }
17
18 virtual VirtualDir GetContainingDirectory() const override {
19 return {};
20 }
21
22 virtual bool IsWritable() const override {
23 return true;
24 }
25
26 virtual bool IsReadable() const override {
27 return true;
28 }
29
30 virtual bool Resize(size_t size) override {
31 return false;
32 }
33
34 virtual bool Rename(std::string_view name) override {
35 return false;
36 }
37
38 static inline Result CheckAccessRange(s64 offset, s64 size, s64 total_size) {
39 R_UNLESS(offset >= 0, ResultInvalidOffset);
40 R_UNLESS(size >= 0, ResultInvalidSize);
41 R_UNLESS(Common::WrappingAdd(offset, size) >= offset, ResultOutOfRange);
42 R_UNLESS(offset + size <= total_size, ResultOutOfRange);
43 R_SUCCEED();
44 }
45};
46
47class IReadOnlyStorage : public IStorage {
48public:
49 virtual bool IsWritable() const override {
50 return false;
51 }
52
53 virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
54 return 0;
55 }
56};
57
58} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fs_types.h b/src/core/file_sys/fssystem/fs_types.h
new file mode 100644
index 000000000..43aeaf447
--- /dev/null
+++ b/src/core/file_sys/fssystem/fs_types.h
@@ -0,0 +1,46 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/common_funcs.h"
7
8namespace FileSys {
9
10struct Int64 {
11 u32 low;
12 u32 high;
13
14 constexpr void Set(s64 v) {
15 this->low = static_cast<u32>((v & static_cast<u64>(0x00000000FFFFFFFFULL)) >> 0);
16 this->high = static_cast<u32>((v & static_cast<u64>(0xFFFFFFFF00000000ULL)) >> 32);
17 }
18
19 constexpr s64 Get() const {
20 return (static_cast<s64>(this->high) << 32) | (static_cast<s64>(this->low));
21 }
22
23 constexpr Int64& operator=(s64 v) {
24 this->Set(v);
25 return *this;
26 }
27
28 constexpr operator s64() const {
29 return this->Get();
30 }
31};
32
33struct HashSalt {
34 static constexpr size_t Size = 32;
35
36 std::array<u8, Size> value;
37};
38static_assert(std::is_trivial_v<HashSalt>);
39static_assert(sizeof(HashSalt) == HashSalt::Size);
40
41constexpr inline size_t IntegrityMinLayerCount = 2;
42constexpr inline size_t IntegrityMaxLayerCount = 7;
43constexpr inline size_t IntegrityLayerCountSave = 5;
44constexpr inline size_t IntegrityLayerCountSaveDataMeta = 4;
45
46} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp
new file mode 100644
index 000000000..f25c95472
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp
@@ -0,0 +1,251 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h"
5#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h"
6#include "core/file_sys/fssystem/fssystem_nca_header.h"
7#include "core/file_sys/vfs_offset.h"
8
9namespace FileSys {
10
11namespace {
12
13class SoftwareDecryptor final : public AesCtrCounterExtendedStorage::IDecryptor {
14public:
15 virtual void Decrypt(
16 u8* buf, size_t buf_size, const std::array<u8, AesCtrCounterExtendedStorage::KeySize>& key,
17 const std::array<u8, AesCtrCounterExtendedStorage::IvSize>& iv) override final;
18};
19
20} // namespace
21
22Result AesCtrCounterExtendedStorage::CreateSoftwareDecryptor(std::unique_ptr<IDecryptor>* out) {
23 std::unique_ptr<IDecryptor> decryptor = std::make_unique<SoftwareDecryptor>();
24 R_UNLESS(decryptor != nullptr, ResultAllocationMemoryFailedInAesCtrCounterExtendedStorageA);
25 *out = std::move(decryptor);
26 R_SUCCEED();
27}
28
29Result AesCtrCounterExtendedStorage::Initialize(const void* key, size_t key_size, u32 secure_value,
30 VirtualFile data_storage,
31 VirtualFile table_storage) {
32 // Read and verify the bucket tree header.
33 BucketTree::Header header;
34 table_storage->ReadObject(std::addressof(header), 0);
35 R_TRY(header.Verify());
36
37 // Determine extents.
38 const auto node_storage_size = QueryNodeStorageSize(header.entry_count);
39 const auto entry_storage_size = QueryEntryStorageSize(header.entry_count);
40 const auto node_storage_offset = QueryHeaderStorageSize();
41 const auto entry_storage_offset = node_storage_offset + node_storage_size;
42
43 // Create a software decryptor.
44 std::unique_ptr<IDecryptor> sw_decryptor;
45 R_TRY(CreateSoftwareDecryptor(std::addressof(sw_decryptor)));
46
47 // Initialize.
48 R_RETURN(this->Initialize(
49 key, key_size, secure_value, 0, data_storage,
50 std::make_shared<OffsetVfsFile>(table_storage, node_storage_size, node_storage_offset),
51 std::make_shared<OffsetVfsFile>(table_storage, entry_storage_size, entry_storage_offset),
52 header.entry_count, std::move(sw_decryptor)));
53}
54
55Result AesCtrCounterExtendedStorage::Initialize(const void* key, size_t key_size, u32 secure_value,
56 s64 counter_offset, VirtualFile data_storage,
57 VirtualFile node_storage, VirtualFile entry_storage,
58 s32 entry_count,
59 std::unique_ptr<IDecryptor>&& decryptor) {
60 // Validate preconditions.
61 ASSERT(key != nullptr);
62 ASSERT(key_size == KeySize);
63 ASSERT(counter_offset >= 0);
64 ASSERT(decryptor != nullptr);
65
66 // Initialize the bucket tree table.
67 if (entry_count > 0) {
68 R_TRY(
69 m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry), entry_count));
70 } else {
71 m_table.Initialize(NodeSize, 0);
72 }
73
74 // Set members.
75 m_data_storage = data_storage;
76 std::memcpy(m_key.data(), key, key_size);
77 m_secure_value = secure_value;
78 m_counter_offset = counter_offset;
79 m_decryptor = std::move(decryptor);
80
81 R_SUCCEED();
82}
83
84void AesCtrCounterExtendedStorage::Finalize() {
85 if (this->IsInitialized()) {
86 m_table.Finalize();
87 m_data_storage = VirtualFile();
88 }
89}
90
91Result AesCtrCounterExtendedStorage::GetEntryList(Entry* out_entries, s32* out_entry_count,
92 s32 entry_count, s64 offset, s64 size) {
93 // Validate pre-conditions.
94 ASSERT(offset >= 0);
95 ASSERT(size >= 0);
96 ASSERT(this->IsInitialized());
97
98 // Clear the out count.
99 R_UNLESS(out_entry_count != nullptr, ResultNullptrArgument);
100 *out_entry_count = 0;
101
102 // Succeed if there's no range.
103 R_SUCCEED_IF(size == 0);
104
105 // If we have an output array, we need it to be non-null.
106 R_UNLESS(out_entries != nullptr || entry_count == 0, ResultNullptrArgument);
107
108 // Check that our range is valid.
109 BucketTree::Offsets table_offsets;
110 R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
111
112 R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
113
114 // Find the offset in our tree.
115 BucketTree::Visitor visitor;
116 R_TRY(m_table.Find(std::addressof(visitor), offset));
117 {
118 const auto entry_offset = visitor.Get<Entry>()->GetOffset();
119 R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
120 ResultInvalidAesCtrCounterExtendedEntryOffset);
121 }
122
123 // Prepare to loop over entries.
124 const auto end_offset = offset + static_cast<s64>(size);
125 s32 count = 0;
126
127 auto cur_entry = *visitor.Get<Entry>();
128 while (cur_entry.GetOffset() < end_offset) {
129 // Try to write the entry to the out list.
130 if (entry_count != 0) {
131 if (count >= entry_count) {
132 break;
133 }
134 std::memcpy(out_entries + count, std::addressof(cur_entry), sizeof(Entry));
135 }
136
137 count++;
138
139 // Advance.
140 if (visitor.CanMoveNext()) {
141 R_TRY(visitor.MoveNext());
142 cur_entry = *visitor.Get<Entry>();
143 } else {
144 break;
145 }
146 }
147
148 // Write the output count.
149 *out_entry_count = count;
150 R_SUCCEED();
151}
152
153size_t AesCtrCounterExtendedStorage::Read(u8* buffer, size_t size, size_t offset) const {
154 // Validate preconditions.
155 ASSERT(this->IsInitialized());
156
157 // Allow zero size.
158 if (size == 0) {
159 return size;
160 }
161
162 // Validate arguments.
163 ASSERT(buffer != nullptr);
164 ASSERT(Common::IsAligned(offset, BlockSize));
165 ASSERT(Common::IsAligned(size, BlockSize));
166
167 BucketTree::Offsets table_offsets;
168 ASSERT(R_SUCCEEDED(m_table.GetOffsets(std::addressof(table_offsets))));
169
170 ASSERT(table_offsets.IsInclude(offset, size));
171
172 // Read the data.
173 m_data_storage->Read(buffer, size, offset);
174
175 // Find the offset in our tree.
176 BucketTree::Visitor visitor;
177 ASSERT(R_SUCCEEDED(m_table.Find(std::addressof(visitor), offset)));
178 {
179 const auto entry_offset = visitor.Get<Entry>()->GetOffset();
180 ASSERT(Common::IsAligned(entry_offset, BlockSize));
181 ASSERT(0 <= entry_offset && table_offsets.IsInclude(entry_offset));
182 }
183
184 // Prepare to read in chunks.
185 u8* cur_data = static_cast<u8*>(buffer);
186 auto cur_offset = offset;
187 const auto end_offset = offset + static_cast<s64>(size);
188
189 while (cur_offset < end_offset) {
190 // Get the current entry.
191 const auto cur_entry = *visitor.Get<Entry>();
192
193 // Get and validate the entry's offset.
194 const auto cur_entry_offset = cur_entry.GetOffset();
195 ASSERT(static_cast<size_t>(cur_entry_offset) <= cur_offset);
196
197 // Get and validate the next entry offset.
198 s64 next_entry_offset;
199 if (visitor.CanMoveNext()) {
200 ASSERT(R_SUCCEEDED(visitor.MoveNext()));
201 next_entry_offset = visitor.Get<Entry>()->GetOffset();
202 ASSERT(table_offsets.IsInclude(next_entry_offset));
203 } else {
204 next_entry_offset = table_offsets.end_offset;
205 }
206 ASSERT(Common::IsAligned(next_entry_offset, BlockSize));
207 ASSERT(cur_offset < static_cast<size_t>(next_entry_offset));
208
209 // Get the offset of the entry in the data we read.
210 const auto data_offset = cur_offset - cur_entry_offset;
211 const auto data_size = (next_entry_offset - cur_entry_offset) - data_offset;
212 ASSERT(data_size > 0);
213
214 // Determine how much is left.
215 const auto remaining_size = end_offset - cur_offset;
216 const auto cur_size = static_cast<size_t>(std::min(remaining_size, data_size));
217 ASSERT(cur_size <= size);
218
219 // If necessary, perform decryption.
220 if (cur_entry.encryption_value == Entry::Encryption::Encrypted) {
221 // Make the CTR for the data we're decrypting.
222 const auto counter_offset = m_counter_offset + cur_entry_offset + data_offset;
223 NcaAesCtrUpperIv upper_iv = {
224 .part = {.generation = static_cast<u32>(cur_entry.generation),
225 .secure_value = m_secure_value}};
226
227 std::array<u8, IvSize> iv;
228 AesCtrStorage::MakeIv(iv.data(), IvSize, upper_iv.value, counter_offset);
229
230 // Decrypt.
231 m_decryptor->Decrypt(cur_data, cur_size, m_key, iv);
232 }
233
234 // Advance.
235 cur_data += cur_size;
236 cur_offset += cur_size;
237 }
238
239 return size;
240}
241
242void SoftwareDecryptor::Decrypt(u8* buf, size_t buf_size,
243 const std::array<u8, AesCtrCounterExtendedStorage::KeySize>& key,
244 const std::array<u8, AesCtrCounterExtendedStorage::IvSize>& iv) {
245 Core::Crypto::AESCipher<Core::Crypto::Key128, AesCtrCounterExtendedStorage::KeySize> cipher(
246 key, Core::Crypto::Mode::CTR);
247 cipher.SetIV(iv);
248 cipher.Transcode(buf, buf_size, buf, Core::Crypto::Op::Decrypt);
249}
250
251} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h
new file mode 100644
index 000000000..d0e9ceed0
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h
@@ -0,0 +1,114 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <optional>
7
8#include "common/literals.h"
9#include "core/file_sys/fssystem/fs_i_storage.h"
10#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
11
12namespace FileSys {
13
14using namespace Common::Literals;
15
16class AesCtrCounterExtendedStorage : public IReadOnlyStorage {
17 YUZU_NON_COPYABLE(AesCtrCounterExtendedStorage);
18 YUZU_NON_MOVEABLE(AesCtrCounterExtendedStorage);
19
20public:
21 static constexpr size_t BlockSize = 0x10;
22 static constexpr size_t KeySize = 0x10;
23 static constexpr size_t IvSize = 0x10;
24 static constexpr size_t NodeSize = 16_KiB;
25
26 class IDecryptor {
27 public:
28 virtual ~IDecryptor() {}
29 virtual void Decrypt(u8* buf, size_t buf_size, const std::array<u8, KeySize>& key,
30 const std::array<u8, IvSize>& iv) = 0;
31 };
32
33 struct Entry {
34 enum class Encryption : u8 {
35 Encrypted = 0,
36 NotEncrypted = 1,
37 };
38
39 std::array<u8, sizeof(s64)> offset;
40 Encryption encryption_value;
41 std::array<u8, 3> reserved;
42 s32 generation;
43
44 void SetOffset(s64 value) {
45 std::memcpy(this->offset.data(), std::addressof(value), sizeof(s64));
46 }
47
48 s64 GetOffset() const {
49 s64 value;
50 std::memcpy(std::addressof(value), this->offset.data(), sizeof(s64));
51 return value;
52 }
53 };
54 static_assert(sizeof(Entry) == 0x10);
55 static_assert(alignof(Entry) == 4);
56 static_assert(std::is_trivial_v<Entry>);
57
58public:
59 static constexpr s64 QueryHeaderStorageSize() {
60 return BucketTree::QueryHeaderStorageSize();
61 }
62
63 static constexpr s64 QueryNodeStorageSize(s32 entry_count) {
64 return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count);
65 }
66
67 static constexpr s64 QueryEntryStorageSize(s32 entry_count) {
68 return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count);
69 }
70
71 static Result CreateSoftwareDecryptor(std::unique_ptr<IDecryptor>* out);
72
73public:
74 AesCtrCounterExtendedStorage()
75 : m_table(), m_data_storage(), m_secure_value(), m_counter_offset(), m_decryptor() {}
76 virtual ~AesCtrCounterExtendedStorage() {
77 this->Finalize();
78 }
79
80 Result Initialize(const void* key, size_t key_size, u32 secure_value, s64 counter_offset,
81 VirtualFile data_storage, VirtualFile node_storage, VirtualFile entry_storage,
82 s32 entry_count, std::unique_ptr<IDecryptor>&& decryptor);
83 void Finalize();
84
85 bool IsInitialized() const {
86 return m_table.IsInitialized();
87 }
88
89 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
90
91 virtual size_t GetSize() const override {
92 BucketTree::Offsets offsets;
93 ASSERT(R_SUCCEEDED(m_table.GetOffsets(std::addressof(offsets))));
94
95 return offsets.end_offset;
96 }
97
98 Result GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count, s64 offset,
99 s64 size);
100
101private:
102 Result Initialize(const void* key, size_t key_size, u32 secure_value, VirtualFile data_storage,
103 VirtualFile table_storage);
104
105private:
106 mutable BucketTree m_table;
107 VirtualFile m_data_storage;
108 std::array<u8, KeySize> m_key;
109 u32 m_secure_value;
110 s64 m_counter_offset;
111 std::unique_ptr<IDecryptor> m_decryptor;
112};
113
114} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp
new file mode 100644
index 000000000..b65aca18d
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp
@@ -0,0 +1,129 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/alignment.h"
5#include "common/swap.h"
6#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h"
7#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
8#include "core/file_sys/fssystem/fssystem_utility.h"
9
10namespace FileSys {
11
12void AesCtrStorage::MakeIv(void* dst, size_t dst_size, u64 upper, s64 offset) {
13 ASSERT(dst != nullptr);
14 ASSERT(dst_size == IvSize);
15 ASSERT(offset >= 0);
16
17 const uintptr_t out_addr = reinterpret_cast<uintptr_t>(dst);
18
19 *reinterpret_cast<u64_be*>(out_addr + 0) = upper;
20 *reinterpret_cast<s64_be*>(out_addr + sizeof(u64)) = static_cast<s64>(offset / BlockSize);
21}
22
23AesCtrStorage::AesCtrStorage(VirtualFile base, const void* key, size_t key_size, const void* iv,
24 size_t iv_size)
25 : m_base_storage(std::move(base)) {
26 ASSERT(m_base_storage != nullptr);
27 ASSERT(key != nullptr);
28 ASSERT(iv != nullptr);
29 ASSERT(key_size == KeySize);
30 ASSERT(iv_size == IvSize);
31
32 std::memcpy(m_key.data(), key, KeySize);
33 std::memcpy(m_iv.data(), iv, IvSize);
34
35 m_cipher.emplace(m_key, Core::Crypto::Mode::CTR);
36}
37
38size_t AesCtrStorage::Read(u8* buffer, size_t size, size_t offset) const {
39 // Allow zero-size reads.
40 if (size == 0) {
41 return size;
42 }
43
44 // Ensure buffer is valid.
45 ASSERT(buffer != nullptr);
46
47 // We can only read at block aligned offsets.
48 ASSERT(Common::IsAligned(offset, BlockSize));
49 ASSERT(Common::IsAligned(size, BlockSize));
50
51 // Read the data.
52 m_base_storage->Read(buffer, size, offset);
53
54 // Setup the counter.
55 std::array<u8, IvSize> ctr;
56 std::memcpy(ctr.data(), m_iv.data(), IvSize);
57 AddCounter(ctr.data(), IvSize, offset / BlockSize);
58
59 // Decrypt.
60 m_cipher->SetIV(ctr);
61 m_cipher->Transcode(buffer, size, buffer, Core::Crypto::Op::Decrypt);
62
63 return size;
64}
65
66size_t AesCtrStorage::Write(const u8* buffer, size_t size, size_t offset) {
67 // Allow zero-size writes.
68 if (size == 0) {
69 return size;
70 }
71
72 // Ensure buffer is valid.
73 ASSERT(buffer != nullptr);
74
75 // We can only write at block aligned offsets.
76 ASSERT(Common::IsAligned(offset, BlockSize));
77 ASSERT(Common::IsAligned(size, BlockSize));
78
79 // Get a pooled buffer.
80 PooledBuffer pooled_buffer;
81 const bool use_work_buffer = true;
82 if (use_work_buffer) {
83 pooled_buffer.Allocate(size, BlockSize);
84 }
85
86 // Setup the counter.
87 std::array<u8, IvSize> ctr;
88 std::memcpy(ctr.data(), m_iv.data(), IvSize);
89 AddCounter(ctr.data(), IvSize, offset / BlockSize);
90
91 // Loop until all data is written.
92 size_t remaining = size;
93 s64 cur_offset = 0;
94 while (remaining > 0) {
95 // Determine data we're writing and where.
96 const size_t write_size =
97 use_work_buffer ? std::min(pooled_buffer.GetSize(), remaining) : remaining;
98
99 void* write_buf;
100 if (use_work_buffer) {
101 write_buf = pooled_buffer.GetBuffer();
102 } else {
103 write_buf = const_cast<u8*>(buffer);
104 }
105
106 // Encrypt the data.
107 m_cipher->SetIV(ctr);
108 m_cipher->Transcode(buffer, write_size, reinterpret_cast<u8*>(write_buf),
109 Core::Crypto::Op::Encrypt);
110
111 // Write the encrypted data.
112 m_base_storage->Write(reinterpret_cast<u8*>(write_buf), write_size, offset + cur_offset);
113
114 // Advance.
115 cur_offset += write_size;
116 remaining -= write_size;
117 if (remaining > 0) {
118 AddCounter(ctr.data(), IvSize, write_size / BlockSize);
119 }
120 }
121
122 return size;
123}
124
125size_t AesCtrStorage::GetSize() const {
126 return m_base_storage->GetSize();
127}
128
129} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h
new file mode 100644
index 000000000..339e49697
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h
@@ -0,0 +1,43 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <optional>
7
8#include "core/crypto/aes_util.h"
9#include "core/crypto/key_manager.h"
10#include "core/file_sys/errors.h"
11#include "core/file_sys/fssystem/fs_i_storage.h"
12#include "core/file_sys/vfs.h"
13
14namespace FileSys {
15
16class AesCtrStorage : public IStorage {
17 YUZU_NON_COPYABLE(AesCtrStorage);
18 YUZU_NON_MOVEABLE(AesCtrStorage);
19
20public:
21 static constexpr size_t BlockSize = 0x10;
22 static constexpr size_t KeySize = 0x10;
23 static constexpr size_t IvSize = 0x10;
24
25public:
26 static void MakeIv(void* dst, size_t dst_size, u64 upper, s64 offset);
27
28public:
29 AesCtrStorage(VirtualFile base, const void* key, size_t key_size, const void* iv,
30 size_t iv_size);
31
32 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
33 virtual size_t Write(const u8* buffer, size_t size, size_t offset) override;
34 virtual size_t GetSize() const override;
35
36private:
37 VirtualFile m_base_storage;
38 std::array<u8, KeySize> m_key;
39 std::array<u8, IvSize> m_iv;
40 mutable std::optional<Core::Crypto::AESCipher<Core::Crypto::Key128>> m_cipher;
41};
42
43} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp
new file mode 100644
index 000000000..022424229
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp
@@ -0,0 +1,112 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/alignment.h"
5#include "common/swap.h"
6#include "core/file_sys/errors.h"
7#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h"
8#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
9#include "core/file_sys/fssystem/fssystem_utility.h"
10
11namespace FileSys {
12
13void AesXtsStorage::MakeAesXtsIv(void* dst, size_t dst_size, s64 offset, size_t block_size) {
14 ASSERT(dst != nullptr);
15 ASSERT(dst_size == IvSize);
16 ASSERT(offset >= 0);
17
18 const uintptr_t out_addr = reinterpret_cast<uintptr_t>(dst);
19
20 *reinterpret_cast<s64_be*>(out_addr + sizeof(s64)) = offset / block_size;
21}
22
23AesXtsStorage::AesXtsStorage(VirtualFile base, const void* key1, const void* key2, size_t key_size,
24 const void* iv, size_t iv_size, size_t block_size)
25 : m_base_storage(std::move(base)), m_block_size(block_size), m_mutex() {
26 ASSERT(m_base_storage != nullptr);
27 ASSERT(key1 != nullptr);
28 ASSERT(key2 != nullptr);
29 ASSERT(iv != nullptr);
30 ASSERT(key_size == KeySize);
31 ASSERT(iv_size == IvSize);
32 ASSERT(Common::IsAligned(m_block_size, AesBlockSize));
33
34 std::memcpy(m_key.data() + 0, key1, KeySize);
35 std::memcpy(m_key.data() + 0x10, key2, KeySize);
36 std::memcpy(m_iv.data(), iv, IvSize);
37
38 m_cipher.emplace(m_key, Core::Crypto::Mode::XTS);
39}
40
41size_t AesXtsStorage::Read(u8* buffer, size_t size, size_t offset) const {
42 // Allow zero-size reads.
43 if (size == 0) {
44 return size;
45 }
46
47 // Ensure buffer is valid.
48 ASSERT(buffer != nullptr);
49
50 // We can only read at block aligned offsets.
51 ASSERT(Common::IsAligned(offset, AesBlockSize));
52 ASSERT(Common::IsAligned(size, AesBlockSize));
53
54 // Read the data.
55 m_base_storage->Read(buffer, size, offset);
56
57 // Setup the counter.
58 std::array<u8, IvSize> ctr;
59 std::memcpy(ctr.data(), m_iv.data(), IvSize);
60 AddCounter(ctr.data(), IvSize, offset / m_block_size);
61
62 // Handle any unaligned data before the start.
63 size_t processed_size = 0;
64 if ((offset % m_block_size) != 0) {
65 // Determine the size of the pre-data read.
66 const size_t skip_size =
67 static_cast<size_t>(offset - Common::AlignDown(offset, m_block_size));
68 const size_t data_size = std::min(size, m_block_size - skip_size);
69
70 // Decrypt into a pooled buffer.
71 {
72 PooledBuffer tmp_buf(m_block_size, m_block_size);
73 ASSERT(tmp_buf.GetSize() >= m_block_size);
74
75 std::memset(tmp_buf.GetBuffer(), 0, skip_size);
76 std::memcpy(tmp_buf.GetBuffer() + skip_size, buffer, data_size);
77
78 m_cipher->SetIV(ctr);
79 m_cipher->Transcode(tmp_buf.GetBuffer(), m_block_size, tmp_buf.GetBuffer(),
80 Core::Crypto::Op::Decrypt);
81
82 std::memcpy(buffer, tmp_buf.GetBuffer() + skip_size, data_size);
83 }
84
85 AddCounter(ctr.data(), IvSize, 1);
86 processed_size += data_size;
87 ASSERT(processed_size == std::min(size, m_block_size - skip_size));
88 }
89
90 // Decrypt aligned chunks.
91 char* cur = reinterpret_cast<char*>(buffer) + processed_size;
92 size_t remaining = size - processed_size;
93 while (remaining > 0) {
94 const size_t cur_size = std::min(m_block_size, remaining);
95
96 m_cipher->SetIV(ctr);
97 m_cipher->Transcode(cur, cur_size, cur, Core::Crypto::Op::Decrypt);
98
99 remaining -= cur_size;
100 cur += cur_size;
101
102 AddCounter(ctr.data(), IvSize, 1);
103 }
104
105 return size;
106}
107
108size_t AesXtsStorage::GetSize() const {
109 return m_base_storage->GetSize();
110}
111
112} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_xts_storage.h b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.h
new file mode 100644
index 000000000..f342efb57
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.h
@@ -0,0 +1,42 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <optional>
7
8#include "core/crypto/aes_util.h"
9#include "core/crypto/key_manager.h"
10#include "core/file_sys/fssystem/fs_i_storage.h"
11
12namespace FileSys {
13
14class AesXtsStorage : public IReadOnlyStorage {
15 YUZU_NON_COPYABLE(AesXtsStorage);
16 YUZU_NON_MOVEABLE(AesXtsStorage);
17
18public:
19 static constexpr size_t AesBlockSize = 0x10;
20 static constexpr size_t KeySize = 0x20;
21 static constexpr size_t IvSize = 0x10;
22
23public:
24 static void MakeAesXtsIv(void* dst, size_t dst_size, s64 offset, size_t block_size);
25
26public:
27 AesXtsStorage(VirtualFile base, const void* key1, const void* key2, size_t key_size,
28 const void* iv, size_t iv_size, size_t block_size);
29
30 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
31 virtual size_t GetSize() const override;
32
33private:
34 VirtualFile m_base_storage;
35 std::array<u8, KeySize> m_key;
36 std::array<u8, IvSize> m_iv;
37 const size_t m_block_size;
38 std::mutex m_mutex;
39 mutable std::optional<Core::Crypto::AESCipher<Core::Crypto::Key256>> m_cipher;
40};
41
42} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h
new file mode 100644
index 000000000..f96691d03
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h
@@ -0,0 +1,146 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/alignment.h"
7#include "core/file_sys/errors.h"
8#include "core/file_sys/fssystem/fs_i_storage.h"
9#include "core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h"
10#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
11
12namespace FileSys {
13
14template <size_t DataAlign_, size_t BufferAlign_>
15class AlignmentMatchingStorage : public IStorage {
16 YUZU_NON_COPYABLE(AlignmentMatchingStorage);
17 YUZU_NON_MOVEABLE(AlignmentMatchingStorage);
18
19public:
20 static constexpr size_t DataAlign = DataAlign_;
21 static constexpr size_t BufferAlign = BufferAlign_;
22
23 static constexpr size_t DataAlignMax = 0x200;
24 static_assert(DataAlign <= DataAlignMax);
25 static_assert(Common::IsPowerOfTwo(DataAlign));
26 static_assert(Common::IsPowerOfTwo(BufferAlign));
27
28private:
29 VirtualFile m_base_storage;
30 s64 m_base_storage_size;
31
32public:
33 explicit AlignmentMatchingStorage(VirtualFile bs) : m_base_storage(std::move(bs)) {}
34
35 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
36 // Allocate a work buffer on stack.
37 alignas(DataAlignMax) std::array<char, DataAlign> work_buf;
38
39 // Succeed if zero size.
40 if (size == 0) {
41 return size;
42 }
43
44 // Validate arguments.
45 ASSERT(buffer != nullptr);
46
47 s64 bs_size = this->GetSize();
48 ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
49
50 return AlignmentMatchingStorageImpl::Read(m_base_storage, work_buf.data(), work_buf.size(),
51 DataAlign, BufferAlign, offset, buffer, size);
52 }
53
54 virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
55 // Allocate a work buffer on stack.
56 alignas(DataAlignMax) std::array<char, DataAlign> work_buf;
57
58 // Succeed if zero size.
59 if (size == 0) {
60 return size;
61 }
62
63 // Validate arguments.
64 ASSERT(buffer != nullptr);
65
66 s64 bs_size = this->GetSize();
67 ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
68
69 return AlignmentMatchingStorageImpl::Write(m_base_storage, work_buf.data(), work_buf.size(),
70 DataAlign, BufferAlign, offset, buffer, size);
71 }
72
73 virtual size_t GetSize() const override {
74 return m_base_storage->GetSize();
75 }
76};
77
78template <size_t BufferAlign_>
79class AlignmentMatchingStoragePooledBuffer : public IStorage {
80 YUZU_NON_COPYABLE(AlignmentMatchingStoragePooledBuffer);
81 YUZU_NON_MOVEABLE(AlignmentMatchingStoragePooledBuffer);
82
83public:
84 static constexpr size_t BufferAlign = BufferAlign_;
85
86 static_assert(Common::IsPowerOfTwo(BufferAlign));
87
88private:
89 VirtualFile m_base_storage;
90 s64 m_base_storage_size;
91 size_t m_data_align;
92
93public:
94 explicit AlignmentMatchingStoragePooledBuffer(VirtualFile bs, size_t da)
95 : m_base_storage(std::move(bs)), m_data_align(da) {
96 ASSERT(Common::IsPowerOfTwo(da));
97 }
98
99 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
100 // Succeed if zero size.
101 if (size == 0) {
102 return size;
103 }
104
105 // Validate arguments.
106 ASSERT(buffer != nullptr);
107
108 s64 bs_size = this->GetSize();
109 ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
110
111 // Allocate a pooled buffer.
112 PooledBuffer pooled_buffer;
113 pooled_buffer.AllocateParticularlyLarge(m_data_align, m_data_align);
114
115 return AlignmentMatchingStorageImpl::Read(m_base_storage, pooled_buffer.GetBuffer(),
116 pooled_buffer.GetSize(), m_data_align,
117 BufferAlign, offset, buffer, size);
118 }
119
120 virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
121 // Succeed if zero size.
122 if (size == 0) {
123 return size;
124 }
125
126 // Validate arguments.
127 ASSERT(buffer != nullptr);
128
129 s64 bs_size = this->GetSize();
130 ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
131
132 // Allocate a pooled buffer.
133 PooledBuffer pooled_buffer;
134 pooled_buffer.AllocateParticularlyLarge(m_data_align, m_data_align);
135
136 return AlignmentMatchingStorageImpl::Write(m_base_storage, pooled_buffer.GetBuffer(),
137 pooled_buffer.GetSize(), m_data_align,
138 BufferAlign, offset, buffer, size);
139 }
140
141 virtual size_t GetSize() const override {
142 return m_base_storage->GetSize();
143 }
144};
145
146} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp
new file mode 100644
index 000000000..641c888ae
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp
@@ -0,0 +1,204 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/alignment.h"
5#include "core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h"
6
7namespace FileSys {
8
9namespace {
10
11template <typename T>
12constexpr size_t GetRoundDownDifference(T x, size_t align) {
13 return static_cast<size_t>(x - Common::AlignDown(x, align));
14}
15
16template <typename T>
17constexpr size_t GetRoundUpDifference(T x, size_t align) {
18 return static_cast<size_t>(Common::AlignUp(x, align) - x);
19}
20
21template <typename T>
22size_t GetRoundUpDifference(T* x, size_t align) {
23 return GetRoundUpDifference(reinterpret_cast<uintptr_t>(x), align);
24}
25
26} // namespace
27
28size_t AlignmentMatchingStorageImpl::Read(VirtualFile base_storage, char* work_buf,
29 size_t work_buf_size, size_t data_alignment,
30 size_t buffer_alignment, s64 offset, u8* buffer,
31 size_t size) {
32 // Check preconditions.
33 ASSERT(work_buf_size >= data_alignment);
34
35 // Succeed if zero size.
36 if (size == 0) {
37 return size;
38 }
39
40 // Validate arguments.
41 ASSERT(buffer != nullptr);
42
43 // Determine extents.
44 u8* aligned_core_buffer;
45 s64 core_offset;
46 size_t core_size;
47 size_t buffer_gap;
48 size_t offset_gap;
49 s64 covered_offset;
50
51 const size_t offset_round_up_difference = GetRoundUpDifference(offset, data_alignment);
52 if (Common::IsAligned(reinterpret_cast<uintptr_t>(buffer) + offset_round_up_difference,
53 buffer_alignment)) {
54 aligned_core_buffer = buffer + offset_round_up_difference;
55
56 core_offset = Common::AlignUp(offset, data_alignment);
57 core_size = (size < offset_round_up_difference)
58 ? 0
59 : Common::AlignDown(size - offset_round_up_difference, data_alignment);
60 buffer_gap = 0;
61 offset_gap = 0;
62
63 covered_offset = core_size > 0 ? core_offset : offset;
64 } else {
65 const size_t buffer_round_up_difference = GetRoundUpDifference(buffer, buffer_alignment);
66
67 aligned_core_buffer = buffer + buffer_round_up_difference;
68
69 core_offset = Common::AlignDown(offset, data_alignment);
70 core_size = (size < buffer_round_up_difference)
71 ? 0
72 : Common::AlignDown(size - buffer_round_up_difference, data_alignment);
73 buffer_gap = buffer_round_up_difference;
74 offset_gap = GetRoundDownDifference(offset, data_alignment);
75
76 covered_offset = offset;
77 }
78
79 // Read the core portion.
80 if (core_size > 0) {
81 base_storage->Read(aligned_core_buffer, core_size, core_offset);
82
83 if (offset_gap != 0 || buffer_gap != 0) {
84 std::memmove(aligned_core_buffer - buffer_gap, aligned_core_buffer + offset_gap,
85 core_size - offset_gap);
86 core_size -= offset_gap;
87 }
88 }
89
90 // Handle the head portion.
91 if (offset < covered_offset) {
92 const s64 head_offset = Common::AlignDown(offset, data_alignment);
93 const size_t head_size = static_cast<size_t>(covered_offset - offset);
94
95 ASSERT(GetRoundDownDifference(offset, data_alignment) + head_size <= work_buf_size);
96
97 base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset);
98 std::memcpy(buffer, work_buf + GetRoundDownDifference(offset, data_alignment), head_size);
99 }
100
101 // Handle the tail portion.
102 s64 tail_offset = covered_offset + core_size;
103 size_t remaining_tail_size = static_cast<size_t>((offset + size) - tail_offset);
104 while (remaining_tail_size > 0) {
105 const auto aligned_tail_offset = Common::AlignDown(tail_offset, data_alignment);
106 const auto cur_size =
107 std::min(static_cast<size_t>(aligned_tail_offset + data_alignment - tail_offset),
108 remaining_tail_size);
109 base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset);
110
111 ASSERT((tail_offset - offset) + cur_size <= size);
112 ASSERT((tail_offset - aligned_tail_offset) + cur_size <= data_alignment);
113 std::memcpy(reinterpret_cast<char*>(buffer) + (tail_offset - offset),
114 work_buf + (tail_offset - aligned_tail_offset), cur_size);
115
116 remaining_tail_size -= cur_size;
117 tail_offset += cur_size;
118 }
119
120 return size;
121}
122
123size_t AlignmentMatchingStorageImpl::Write(VirtualFile base_storage, char* work_buf,
124 size_t work_buf_size, size_t data_alignment,
125 size_t buffer_alignment, s64 offset, const u8* buffer,
126 size_t size) {
127 // Check preconditions.
128 ASSERT(work_buf_size >= data_alignment);
129
130 // Succeed if zero size.
131 if (size == 0) {
132 return size;
133 }
134
135 // Validate arguments.
136 ASSERT(buffer != nullptr);
137
138 // Determine extents.
139 const u8* aligned_core_buffer;
140 s64 core_offset;
141 size_t core_size;
142 s64 covered_offset;
143
144 const size_t offset_round_up_difference = GetRoundUpDifference(offset, data_alignment);
145 if (Common::IsAligned(reinterpret_cast<uintptr_t>(buffer) + offset_round_up_difference,
146 buffer_alignment)) {
147 aligned_core_buffer = buffer + offset_round_up_difference;
148
149 core_offset = Common::AlignUp(offset, data_alignment);
150 core_size = (size < offset_round_up_difference)
151 ? 0
152 : Common::AlignDown(size - offset_round_up_difference, data_alignment);
153
154 covered_offset = core_size > 0 ? core_offset : offset;
155 } else {
156 aligned_core_buffer = nullptr;
157
158 core_offset = Common::AlignDown(offset, data_alignment);
159 core_size = 0;
160
161 covered_offset = offset;
162 }
163
164 // Write the core portion.
165 if (core_size > 0) {
166 base_storage->Write(aligned_core_buffer, core_size, core_offset);
167 }
168
169 // Handle the head portion.
170 if (offset < covered_offset) {
171 const s64 head_offset = Common::AlignDown(offset, data_alignment);
172 const size_t head_size = static_cast<size_t>(covered_offset - offset);
173
174 ASSERT((offset - head_offset) + head_size <= data_alignment);
175
176 base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset);
177 std::memcpy(work_buf + (offset - head_offset), buffer, head_size);
178 base_storage->Write(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset);
179 }
180
181 // Handle the tail portion.
182 s64 tail_offset = covered_offset + core_size;
183 size_t remaining_tail_size = static_cast<size_t>((offset + size) - tail_offset);
184 while (remaining_tail_size > 0) {
185 ASSERT(static_cast<size_t>(tail_offset - offset) < size);
186
187 const auto aligned_tail_offset = Common::AlignDown(tail_offset, data_alignment);
188 const auto cur_size =
189 std::min(static_cast<size_t>(aligned_tail_offset + data_alignment - tail_offset),
190 remaining_tail_size);
191
192 base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset);
193 std::memcpy(work_buf + GetRoundDownDifference(tail_offset, data_alignment),
194 buffer + (tail_offset - offset), cur_size);
195 base_storage->Write(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset);
196
197 remaining_tail_size -= cur_size;
198 tail_offset += cur_size;
199 }
200
201 return size;
202}
203
204} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h
new file mode 100644
index 000000000..4a05b0e88
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h
@@ -0,0 +1,21 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/errors.h"
7#include "core/file_sys/fssystem/fs_i_storage.h"
8
9namespace FileSys {
10
11class AlignmentMatchingStorageImpl {
12public:
13 static size_t Read(VirtualFile base_storage, char* work_buf, size_t work_buf_size,
14 size_t data_alignment, size_t buffer_alignment, s64 offset, u8* buffer,
15 size_t size);
16 static size_t Write(VirtualFile base_storage, char* work_buf, size_t work_buf_size,
17 size_t data_alignment, size_t buffer_alignment, s64 offset,
18 const u8* buffer, size_t size);
19};
20
21} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree.cpp b/src/core/file_sys/fssystem/fssystem_bucket_tree.cpp
new file mode 100644
index 000000000..af8541009
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_bucket_tree.cpp
@@ -0,0 +1,598 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/errors.h"
5#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
6#include "core/file_sys/fssystem/fssystem_bucket_tree_utils.h"
7#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
8
9namespace FileSys {
10
11namespace {
12
13using Node = impl::BucketTreeNode<const s64*>;
14static_assert(sizeof(Node) == sizeof(BucketTree::NodeHeader));
15static_assert(std::is_trivial_v<Node>);
16
17constexpr inline s32 NodeHeaderSize = sizeof(BucketTree::NodeHeader);
18
19class StorageNode {
20private:
21 class Offset {
22 public:
23 using difference_type = s64;
24
25 private:
26 s64 m_offset;
27 s32 m_stride;
28
29 public:
30 constexpr Offset(s64 offset, s32 stride) : m_offset(offset), m_stride(stride) {}
31
32 constexpr Offset& operator++() {
33 m_offset += m_stride;
34 return *this;
35 }
36 constexpr Offset operator++(int) {
37 Offset ret(*this);
38 m_offset += m_stride;
39 return ret;
40 }
41
42 constexpr Offset& operator--() {
43 m_offset -= m_stride;
44 return *this;
45 }
46 constexpr Offset operator--(int) {
47 Offset ret(*this);
48 m_offset -= m_stride;
49 return ret;
50 }
51
52 constexpr difference_type operator-(const Offset& rhs) const {
53 return (m_offset - rhs.m_offset) / m_stride;
54 }
55
56 constexpr Offset operator+(difference_type ofs) const {
57 return Offset(m_offset + ofs * m_stride, m_stride);
58 }
59 constexpr Offset operator-(difference_type ofs) const {
60 return Offset(m_offset - ofs * m_stride, m_stride);
61 }
62
63 constexpr Offset& operator+=(difference_type ofs) {
64 m_offset += ofs * m_stride;
65 return *this;
66 }
67 constexpr Offset& operator-=(difference_type ofs) {
68 m_offset -= ofs * m_stride;
69 return *this;
70 }
71
72 constexpr bool operator==(const Offset& rhs) const {
73 return m_offset == rhs.m_offset;
74 }
75 constexpr bool operator!=(const Offset& rhs) const {
76 return m_offset != rhs.m_offset;
77 }
78
79 constexpr s64 Get() const {
80 return m_offset;
81 }
82 };
83
84private:
85 const Offset m_start;
86 const s32 m_count;
87 s32 m_index;
88
89public:
90 StorageNode(size_t size, s32 count)
91 : m_start(NodeHeaderSize, static_cast<s32>(size)), m_count(count), m_index(-1) {}
92 StorageNode(s64 ofs, size_t size, s32 count)
93 : m_start(NodeHeaderSize + ofs, static_cast<s32>(size)), m_count(count), m_index(-1) {}
94
95 s32 GetIndex() const {
96 return m_index;
97 }
98
99 void Find(const char* buffer, s64 virtual_address) {
100 s32 end = m_count;
101 auto pos = m_start;
102
103 while (end > 0) {
104 auto half = end / 2;
105 auto mid = pos + half;
106
107 s64 offset = 0;
108 std::memcpy(std::addressof(offset), buffer + mid.Get(), sizeof(s64));
109
110 if (offset <= virtual_address) {
111 pos = mid + 1;
112 end -= half + 1;
113 } else {
114 end = half;
115 }
116 }
117
118 m_index = static_cast<s32>(pos - m_start) - 1;
119 }
120
121 Result Find(VirtualFile storage, s64 virtual_address) {
122 s32 end = m_count;
123 auto pos = m_start;
124
125 while (end > 0) {
126 auto half = end / 2;
127 auto mid = pos + half;
128
129 s64 offset = 0;
130 storage->ReadObject(std::addressof(offset), mid.Get());
131
132 if (offset <= virtual_address) {
133 pos = mid + 1;
134 end -= half + 1;
135 } else {
136 end = half;
137 }
138 }
139
140 m_index = static_cast<s32>(pos - m_start) - 1;
141 R_SUCCEED();
142 }
143};
144
145} // namespace
146
147void BucketTree::Header::Format(s32 entry_count_) {
148 ASSERT(entry_count_ >= 0);
149
150 this->magic = Magic;
151 this->version = Version;
152 this->entry_count = entry_count_;
153 this->reserved = 0;
154}
155
156Result BucketTree::Header::Verify() const {
157 R_UNLESS(this->magic == Magic, ResultInvalidBucketTreeSignature);
158 R_UNLESS(this->entry_count >= 0, ResultInvalidBucketTreeEntryCount);
159 R_UNLESS(this->version <= Version, ResultUnsupportedVersion);
160 R_SUCCEED();
161}
162
163Result BucketTree::NodeHeader::Verify(s32 node_index, size_t node_size, size_t entry_size) const {
164 R_UNLESS(this->index == node_index, ResultInvalidBucketTreeNodeIndex);
165 R_UNLESS(entry_size != 0 && node_size >= entry_size + NodeHeaderSize, ResultInvalidSize);
166
167 const size_t max_entry_count = (node_size - NodeHeaderSize) / entry_size;
168 R_UNLESS(this->count > 0 && static_cast<size_t>(this->count) <= max_entry_count,
169 ResultInvalidBucketTreeNodeEntryCount);
170 R_UNLESS(this->offset >= 0, ResultInvalidBucketTreeNodeOffset);
171
172 R_SUCCEED();
173}
174
175Result BucketTree::Initialize(VirtualFile node_storage, VirtualFile entry_storage, size_t node_size,
176 size_t entry_size, s32 entry_count) {
177 // Validate preconditions.
178 ASSERT(entry_size >= sizeof(s64));
179 ASSERT(node_size >= entry_size + sizeof(NodeHeader));
180 ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
181 ASSERT(Common::IsPowerOfTwo(node_size));
182 ASSERT(!this->IsInitialized());
183
184 // Ensure valid entry count.
185 R_UNLESS(entry_count > 0, ResultInvalidArgument);
186
187 // Allocate node.
188 R_UNLESS(m_node_l1.Allocate(node_size), ResultBufferAllocationFailed);
189 ON_RESULT_FAILURE {
190 m_node_l1.Free(node_size);
191 };
192
193 // Read node.
194 node_storage->Read(reinterpret_cast<u8*>(m_node_l1.Get()), node_size);
195
196 // Verify node.
197 R_TRY(m_node_l1->Verify(0, node_size, sizeof(s64)));
198
199 // Validate offsets.
200 const auto offset_count = GetOffsetCount(node_size);
201 const auto entry_set_count = GetEntrySetCount(node_size, entry_size, entry_count);
202 const auto* const node = m_node_l1.Get<Node>();
203
204 s64 start_offset;
205 if (offset_count < entry_set_count && node->GetCount() < offset_count) {
206 start_offset = *node->GetEnd();
207 } else {
208 start_offset = *node->GetBegin();
209 }
210 const auto end_offset = node->GetEndOffset();
211
212 R_UNLESS(0 <= start_offset && start_offset <= node->GetBeginOffset(),
213 ResultInvalidBucketTreeEntryOffset);
214 R_UNLESS(start_offset < end_offset, ResultInvalidBucketTreeEntryOffset);
215
216 // Set member variables.
217 m_node_storage = node_storage;
218 m_entry_storage = entry_storage;
219 m_node_size = node_size;
220 m_entry_size = entry_size;
221 m_entry_count = entry_count;
222 m_offset_count = offset_count;
223 m_entry_set_count = entry_set_count;
224
225 m_offset_cache.offsets.start_offset = start_offset;
226 m_offset_cache.offsets.end_offset = end_offset;
227 m_offset_cache.is_initialized = true;
228
229 // We succeeded.
230 R_SUCCEED();
231}
232
233void BucketTree::Initialize(size_t node_size, s64 end_offset) {
234 ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
235 ASSERT(Common::IsPowerOfTwo(node_size));
236 ASSERT(end_offset > 0);
237 ASSERT(!this->IsInitialized());
238
239 m_node_size = node_size;
240
241 m_offset_cache.offsets.start_offset = 0;
242 m_offset_cache.offsets.end_offset = end_offset;
243 m_offset_cache.is_initialized = true;
244}
245
246void BucketTree::Finalize() {
247 if (this->IsInitialized()) {
248 m_node_storage = VirtualFile();
249 m_entry_storage = VirtualFile();
250 m_node_l1.Free(m_node_size);
251 m_node_size = 0;
252 m_entry_size = 0;
253 m_entry_count = 0;
254 m_offset_count = 0;
255 m_entry_set_count = 0;
256
257 m_offset_cache.offsets.start_offset = 0;
258 m_offset_cache.offsets.end_offset = 0;
259 m_offset_cache.is_initialized = false;
260 }
261}
262
263Result BucketTree::Find(Visitor* visitor, s64 virtual_address) {
264 ASSERT(visitor != nullptr);
265 ASSERT(this->IsInitialized());
266
267 R_UNLESS(virtual_address >= 0, ResultInvalidOffset);
268 R_UNLESS(!this->IsEmpty(), ResultOutOfRange);
269
270 BucketTree::Offsets offsets;
271 R_TRY(this->GetOffsets(std::addressof(offsets)));
272
273 R_TRY(visitor->Initialize(this, offsets));
274
275 R_RETURN(visitor->Find(virtual_address));
276}
277
278Result BucketTree::InvalidateCache() {
279 // Reset our offsets.
280 m_offset_cache.is_initialized = false;
281
282 R_SUCCEED();
283}
284
285Result BucketTree::EnsureOffsetCache() {
286 // If we already have an offset cache, we're good.
287 R_SUCCEED_IF(m_offset_cache.is_initialized);
288
289 // Acquire exclusive right to edit the offset cache.
290 std::scoped_lock lk(m_offset_cache.mutex);
291
292 // Check again, to be sure.
293 R_SUCCEED_IF(m_offset_cache.is_initialized);
294
295 // Read/verify L1.
296 m_node_storage->Read(reinterpret_cast<u8*>(m_node_l1.Get()), m_node_size);
297 R_TRY(m_node_l1->Verify(0, m_node_size, sizeof(s64)));
298
299 // Get the node.
300 auto* const node = m_node_l1.Get<Node>();
301
302 s64 start_offset;
303 if (m_offset_count < m_entry_set_count && node->GetCount() < m_offset_count) {
304 start_offset = *node->GetEnd();
305 } else {
306 start_offset = *node->GetBegin();
307 }
308 const auto end_offset = node->GetEndOffset();
309
310 R_UNLESS(0 <= start_offset && start_offset <= node->GetBeginOffset(),
311 ResultInvalidBucketTreeEntryOffset);
312 R_UNLESS(start_offset < end_offset, ResultInvalidBucketTreeEntryOffset);
313
314 m_offset_cache.offsets.start_offset = start_offset;
315 m_offset_cache.offsets.end_offset = end_offset;
316 m_offset_cache.is_initialized = true;
317
318 R_SUCCEED();
319}
320
321Result BucketTree::Visitor::Initialize(const BucketTree* tree, const BucketTree::Offsets& offsets) {
322 ASSERT(tree != nullptr);
323 ASSERT(m_tree == nullptr || m_tree == tree);
324
325 if (m_entry == nullptr) {
326 m_entry = ::operator new(tree->m_entry_size);
327 R_UNLESS(m_entry != nullptr, ResultBufferAllocationFailed);
328
329 m_tree = tree;
330 m_offsets = offsets;
331 }
332
333 R_SUCCEED();
334}
335
336Result BucketTree::Visitor::MoveNext() {
337 R_UNLESS(this->IsValid(), ResultOutOfRange);
338
339 // Invalidate our index, and read the header for the next index.
340 auto entry_index = m_entry_index + 1;
341 if (entry_index == m_entry_set.info.count) {
342 const auto entry_set_index = m_entry_set.info.index + 1;
343 R_UNLESS(entry_set_index < m_entry_set_count, ResultOutOfRange);
344
345 m_entry_index = -1;
346
347 const auto end = m_entry_set.info.end;
348
349 const auto entry_set_size = m_tree->m_node_size;
350 const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
351
352 m_tree->m_entry_storage->ReadObject(std::addressof(m_entry_set), entry_set_offset);
353 R_TRY(m_entry_set.header.Verify(entry_set_index, entry_set_size, m_tree->m_entry_size));
354
355 R_UNLESS(m_entry_set.info.start == end && m_entry_set.info.start < m_entry_set.info.end,
356 ResultInvalidBucketTreeEntrySetOffset);
357
358 entry_index = 0;
359 } else {
360 m_entry_index = -1;
361 }
362
363 // Read the new entry.
364 const auto entry_size = m_tree->m_entry_size;
365 const auto entry_offset = impl::GetBucketTreeEntryOffset(
366 m_entry_set.info.index, m_tree->m_node_size, entry_size, entry_index);
367 m_tree->m_entry_storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset);
368
369 // Note that we changed index.
370 m_entry_index = entry_index;
371 R_SUCCEED();
372}
373
374Result BucketTree::Visitor::MovePrevious() {
375 R_UNLESS(this->IsValid(), ResultOutOfRange);
376
377 // Invalidate our index, and read the header for the previous index.
378 auto entry_index = m_entry_index;
379 if (entry_index == 0) {
380 R_UNLESS(m_entry_set.info.index > 0, ResultOutOfRange);
381
382 m_entry_index = -1;
383
384 const auto start = m_entry_set.info.start;
385
386 const auto entry_set_size = m_tree->m_node_size;
387 const auto entry_set_index = m_entry_set.info.index - 1;
388 const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
389
390 m_tree->m_entry_storage->ReadObject(std::addressof(m_entry_set), entry_set_offset);
391 R_TRY(m_entry_set.header.Verify(entry_set_index, entry_set_size, m_tree->m_entry_size));
392
393 R_UNLESS(m_entry_set.info.end == start && m_entry_set.info.start < m_entry_set.info.end,
394 ResultInvalidBucketTreeEntrySetOffset);
395
396 entry_index = m_entry_set.info.count;
397 } else {
398 m_entry_index = -1;
399 }
400
401 --entry_index;
402
403 // Read the new entry.
404 const auto entry_size = m_tree->m_entry_size;
405 const auto entry_offset = impl::GetBucketTreeEntryOffset(
406 m_entry_set.info.index, m_tree->m_node_size, entry_size, entry_index);
407 m_tree->m_entry_storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset);
408
409 // Note that we changed index.
410 m_entry_index = entry_index;
411 R_SUCCEED();
412}
413
414Result BucketTree::Visitor::Find(s64 virtual_address) {
415 ASSERT(m_tree != nullptr);
416
417 // Get the node.
418 const auto* const node = m_tree->m_node_l1.Get<Node>();
419 R_UNLESS(virtual_address < node->GetEndOffset(), ResultOutOfRange);
420
421 // Get the entry set index.
422 s32 entry_set_index = -1;
423 if (m_tree->IsExistOffsetL2OnL1() && virtual_address < node->GetBeginOffset()) {
424 const auto start = node->GetEnd();
425 const auto end = node->GetBegin() + m_tree->m_offset_count;
426
427 auto pos = std::upper_bound(start, end, virtual_address);
428 R_UNLESS(start < pos, ResultOutOfRange);
429 --pos;
430
431 entry_set_index = static_cast<s32>(pos - start);
432 } else {
433 const auto start = node->GetBegin();
434 const auto end = node->GetEnd();
435
436 auto pos = std::upper_bound(start, end, virtual_address);
437 R_UNLESS(start < pos, ResultOutOfRange);
438 --pos;
439
440 if (m_tree->IsExistL2()) {
441 const auto node_index = static_cast<s32>(pos - start);
442 R_UNLESS(0 <= node_index && node_index < m_tree->m_offset_count,
443 ResultInvalidBucketTreeNodeOffset);
444
445 R_TRY(this->FindEntrySet(std::addressof(entry_set_index), virtual_address, node_index));
446 } else {
447 entry_set_index = static_cast<s32>(pos - start);
448 }
449 }
450
451 // Validate the entry set index.
452 R_UNLESS(0 <= entry_set_index && entry_set_index < m_tree->m_entry_set_count,
453 ResultInvalidBucketTreeNodeOffset);
454
455 // Find the entry.
456 R_TRY(this->FindEntry(virtual_address, entry_set_index));
457
458 // Set count.
459 m_entry_set_count = m_tree->m_entry_set_count;
460 R_SUCCEED();
461}
462
463Result BucketTree::Visitor::FindEntrySet(s32* out_index, s64 virtual_address, s32 node_index) {
464 const auto node_size = m_tree->m_node_size;
465
466 PooledBuffer pool(node_size, 1);
467 if (node_size <= pool.GetSize()) {
468 R_RETURN(
469 this->FindEntrySetWithBuffer(out_index, virtual_address, node_index, pool.GetBuffer()));
470 } else {
471 pool.Deallocate();
472 R_RETURN(this->FindEntrySetWithoutBuffer(out_index, virtual_address, node_index));
473 }
474}
475
476Result BucketTree::Visitor::FindEntrySetWithBuffer(s32* out_index, s64 virtual_address,
477 s32 node_index, char* buffer) {
478 // Calculate node extents.
479 const auto node_size = m_tree->m_node_size;
480 const auto node_offset = (node_index + 1) * static_cast<s64>(node_size);
481 VirtualFile storage = m_tree->m_node_storage;
482
483 // Read the node.
484 storage->Read(reinterpret_cast<u8*>(buffer), node_size, node_offset);
485
486 // Validate the header.
487 NodeHeader header;
488 std::memcpy(std::addressof(header), buffer, NodeHeaderSize);
489 R_TRY(header.Verify(node_index, node_size, sizeof(s64)));
490
491 // Create the node, and find.
492 StorageNode node(sizeof(s64), header.count);
493 node.Find(buffer, virtual_address);
494 R_UNLESS(node.GetIndex() >= 0, ResultInvalidBucketTreeVirtualOffset);
495
496 // Return the index.
497 *out_index = static_cast<s32>(m_tree->GetEntrySetIndex(header.index, node.GetIndex()));
498 R_SUCCEED();
499}
500
501Result BucketTree::Visitor::FindEntrySetWithoutBuffer(s32* out_index, s64 virtual_address,
502 s32 node_index) {
503 // Calculate node extents.
504 const auto node_size = m_tree->m_node_size;
505 const auto node_offset = (node_index + 1) * static_cast<s64>(node_size);
506 VirtualFile storage = m_tree->m_node_storage;
507
508 // Read and validate the header.
509 NodeHeader header;
510 storage->ReadObject(std::addressof(header), node_offset);
511 R_TRY(header.Verify(node_index, node_size, sizeof(s64)));
512
513 // Create the node, and find.
514 StorageNode node(node_offset, sizeof(s64), header.count);
515 R_TRY(node.Find(storage, virtual_address));
516 R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange);
517
518 // Return the index.
519 *out_index = static_cast<s32>(m_tree->GetEntrySetIndex(header.index, node.GetIndex()));
520 R_SUCCEED();
521}
522
523Result BucketTree::Visitor::FindEntry(s64 virtual_address, s32 entry_set_index) {
524 const auto entry_set_size = m_tree->m_node_size;
525
526 PooledBuffer pool(entry_set_size, 1);
527 if (entry_set_size <= pool.GetSize()) {
528 R_RETURN(this->FindEntryWithBuffer(virtual_address, entry_set_index, pool.GetBuffer()));
529 } else {
530 pool.Deallocate();
531 R_RETURN(this->FindEntryWithoutBuffer(virtual_address, entry_set_index));
532 }
533}
534
535Result BucketTree::Visitor::FindEntryWithBuffer(s64 virtual_address, s32 entry_set_index,
536 char* buffer) {
537 // Calculate entry set extents.
538 const auto entry_size = m_tree->m_entry_size;
539 const auto entry_set_size = m_tree->m_node_size;
540 const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
541 VirtualFile storage = m_tree->m_entry_storage;
542
543 // Read the entry set.
544 storage->Read(reinterpret_cast<u8*>(buffer), entry_set_size, entry_set_offset);
545
546 // Validate the entry_set.
547 EntrySetHeader entry_set;
548 std::memcpy(std::addressof(entry_set), buffer, sizeof(EntrySetHeader));
549 R_TRY(entry_set.header.Verify(entry_set_index, entry_set_size, entry_size));
550
551 // Create the node, and find.
552 StorageNode node(entry_size, entry_set.info.count);
553 node.Find(buffer, virtual_address);
554 R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange);
555
556 // Copy the data into entry.
557 const auto entry_index = node.GetIndex();
558 const auto entry_offset = impl::GetBucketTreeEntryOffset(0, entry_size, entry_index);
559 std::memcpy(m_entry, buffer + entry_offset, entry_size);
560
561 // Set our entry set/index.
562 m_entry_set = entry_set;
563 m_entry_index = entry_index;
564
565 R_SUCCEED();
566}
567
568Result BucketTree::Visitor::FindEntryWithoutBuffer(s64 virtual_address, s32 entry_set_index) {
569 // Calculate entry set extents.
570 const auto entry_size = m_tree->m_entry_size;
571 const auto entry_set_size = m_tree->m_node_size;
572 const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
573 VirtualFile storage = m_tree->m_entry_storage;
574
575 // Read and validate the entry_set.
576 EntrySetHeader entry_set;
577 storage->ReadObject(std::addressof(entry_set), entry_set_offset);
578 R_TRY(entry_set.header.Verify(entry_set_index, entry_set_size, entry_size));
579
580 // Create the node, and find.
581 StorageNode node(entry_set_offset, entry_size, entry_set.info.count);
582 R_TRY(node.Find(storage, virtual_address));
583 R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange);
584
585 // Copy the data into entry.
586 const auto entry_index = node.GetIndex();
587 const auto entry_offset =
588 impl::GetBucketTreeEntryOffset(entry_set_offset, entry_size, entry_index);
589 storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset);
590
591 // Set our entry set/index.
592 m_entry_set = entry_set;
593 m_entry_index = entry_index;
594
595 R_SUCCEED();
596}
597
598} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree.h b/src/core/file_sys/fssystem/fssystem_bucket_tree.h
new file mode 100644
index 000000000..46850cd48
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_bucket_tree.h
@@ -0,0 +1,416 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <mutex>
7
8#include "common/alignment.h"
9#include "common/common_funcs.h"
10#include "common/common_types.h"
11#include "common/literals.h"
12
13#include "core/file_sys/vfs.h"
14#include "core/hle/result.h"
15
16namespace FileSys {
17
18using namespace Common::Literals;
19
20class BucketTree {
21 YUZU_NON_COPYABLE(BucketTree);
22 YUZU_NON_MOVEABLE(BucketTree);
23
24public:
25 static constexpr u32 Magic = Common::MakeMagic('B', 'K', 'T', 'R');
26 static constexpr u32 Version = 1;
27
28 static constexpr size_t NodeSizeMin = 1_KiB;
29 static constexpr size_t NodeSizeMax = 512_KiB;
30
31public:
32 class Visitor;
33
34 struct Header {
35 u32 magic;
36 u32 version;
37 s32 entry_count;
38 s32 reserved;
39
40 void Format(s32 entry_count);
41 Result Verify() const;
42 };
43 static_assert(std::is_trivial_v<Header>);
44 static_assert(sizeof(Header) == 0x10);
45
46 struct NodeHeader {
47 s32 index;
48 s32 count;
49 s64 offset;
50
51 Result Verify(s32 node_index, size_t node_size, size_t entry_size) const;
52 };
53 static_assert(std::is_trivial_v<NodeHeader>);
54 static_assert(sizeof(NodeHeader) == 0x10);
55
56 struct Offsets {
57 s64 start_offset;
58 s64 end_offset;
59
60 constexpr bool IsInclude(s64 offset) const {
61 return this->start_offset <= offset && offset < this->end_offset;
62 }
63
64 constexpr bool IsInclude(s64 offset, s64 size) const {
65 return size > 0 && this->start_offset <= offset && size <= (this->end_offset - offset);
66 }
67 };
68 static_assert(std::is_trivial_v<Offsets>);
69 static_assert(sizeof(Offsets) == 0x10);
70
71 struct OffsetCache {
72 Offsets offsets;
73 std::mutex mutex;
74 bool is_initialized;
75
76 OffsetCache() : offsets{-1, -1}, mutex(), is_initialized(false) {}
77 };
78
79 class ContinuousReadingInfo {
80 public:
81 constexpr ContinuousReadingInfo() : m_read_size(), m_skip_count(), m_done() {}
82
83 constexpr void Reset() {
84 m_read_size = 0;
85 m_skip_count = 0;
86 m_done = false;
87 }
88
89 constexpr void SetSkipCount(s32 count) {
90 ASSERT(count >= 0);
91 m_skip_count = count;
92 }
93 constexpr s32 GetSkipCount() const {
94 return m_skip_count;
95 }
96 constexpr bool CheckNeedScan() {
97 return (--m_skip_count) <= 0;
98 }
99
100 constexpr void Done() {
101 m_read_size = 0;
102 m_done = true;
103 }
104 constexpr bool IsDone() const {
105 return m_done;
106 }
107
108 constexpr void SetReadSize(size_t size) {
109 m_read_size = size;
110 }
111 constexpr size_t GetReadSize() const {
112 return m_read_size;
113 }
114 constexpr bool CanDo() const {
115 return m_read_size > 0;
116 }
117
118 private:
119 size_t m_read_size;
120 s32 m_skip_count;
121 bool m_done;
122 };
123
124private:
125 class NodeBuffer {
126 YUZU_NON_COPYABLE(NodeBuffer);
127
128 public:
129 NodeBuffer() : m_header() {}
130
131 ~NodeBuffer() {
132 ASSERT(m_header == nullptr);
133 }
134
135 NodeBuffer(NodeBuffer&& rhs) : m_header(rhs.m_header) {
136 rhs.m_header = nullptr;
137 }
138
139 NodeBuffer& operator=(NodeBuffer&& rhs) {
140 if (this != std::addressof(rhs)) {
141 ASSERT(m_header == nullptr);
142
143 m_header = rhs.m_header;
144
145 rhs.m_header = nullptr;
146 }
147 return *this;
148 }
149
150 bool Allocate(size_t node_size) {
151 ASSERT(m_header == nullptr);
152
153 m_header = ::operator new(node_size, std::align_val_t{sizeof(s64)});
154
155 // ASSERT(Common::IsAligned(m_header, sizeof(s64)));
156
157 return m_header != nullptr;
158 }
159
160 void Free(size_t node_size) {
161 if (m_header) {
162 ::operator delete(m_header, std::align_val_t{sizeof(s64)});
163 m_header = nullptr;
164 }
165 }
166
167 void FillZero(size_t node_size) const {
168 if (m_header) {
169 std::memset(m_header, 0, node_size);
170 }
171 }
172
173 NodeHeader* Get() const {
174 return reinterpret_cast<NodeHeader*>(m_header);
175 }
176
177 NodeHeader* operator->() const {
178 return this->Get();
179 }
180
181 template <typename T>
182 T* Get() const {
183 static_assert(std::is_trivial_v<T>);
184 static_assert(sizeof(T) == sizeof(NodeHeader));
185 return reinterpret_cast<T*>(m_header);
186 }
187
188 private:
189 void* m_header;
190 };
191
192private:
193 static constexpr s32 GetEntryCount(size_t node_size, size_t entry_size) {
194 return static_cast<s32>((node_size - sizeof(NodeHeader)) / entry_size);
195 }
196
197 static constexpr s32 GetOffsetCount(size_t node_size) {
198 return static_cast<s32>((node_size - sizeof(NodeHeader)) / sizeof(s64));
199 }
200
201 static constexpr s32 GetEntrySetCount(size_t node_size, size_t entry_size, s32 entry_count) {
202 const s32 entry_count_per_node = GetEntryCount(node_size, entry_size);
203 return Common::DivideUp(entry_count, entry_count_per_node);
204 }
205
206 static constexpr s32 GetNodeL2Count(size_t node_size, size_t entry_size, s32 entry_count) {
207 const s32 offset_count_per_node = GetOffsetCount(node_size);
208 const s32 entry_set_count = GetEntrySetCount(node_size, entry_size, entry_count);
209
210 if (entry_set_count <= offset_count_per_node) {
211 return 0;
212 }
213
214 const s32 node_l2_count = Common::DivideUp(entry_set_count, offset_count_per_node);
215 ASSERT(node_l2_count <= offset_count_per_node);
216
217 return Common::DivideUp(entry_set_count - (offset_count_per_node - (node_l2_count - 1)),
218 offset_count_per_node);
219 }
220
221public:
222 BucketTree()
223 : m_node_storage(), m_entry_storage(), m_node_l1(), m_node_size(), m_entry_size(),
224 m_entry_count(), m_offset_count(), m_entry_set_count(), m_offset_cache() {}
225 ~BucketTree() {
226 this->Finalize();
227 }
228
229 Result Initialize(VirtualFile node_storage, VirtualFile entry_storage, size_t node_size,
230 size_t entry_size, s32 entry_count);
231 void Initialize(size_t node_size, s64 end_offset);
232 void Finalize();
233
234 bool IsInitialized() const {
235 return m_node_size > 0;
236 }
237 bool IsEmpty() const {
238 return m_entry_size == 0;
239 }
240
241 Result Find(Visitor* visitor, s64 virtual_address);
242 Result InvalidateCache();
243
244 s32 GetEntryCount() const {
245 return m_entry_count;
246 }
247
248 Result GetOffsets(Offsets* out) {
249 // Ensure we have an offset cache.
250 R_TRY(this->EnsureOffsetCache());
251
252 // Set the output.
253 *out = m_offset_cache.offsets;
254 R_SUCCEED();
255 }
256
257public:
258 static constexpr s64 QueryHeaderStorageSize() {
259 return sizeof(Header);
260 }
261
262 static constexpr s64 QueryNodeStorageSize(size_t node_size, size_t entry_size,
263 s32 entry_count) {
264 ASSERT(entry_size >= sizeof(s64));
265 ASSERT(node_size >= entry_size + sizeof(NodeHeader));
266 ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
267 ASSERT(Common::IsPowerOfTwo(node_size));
268 ASSERT(entry_count >= 0);
269
270 if (entry_count <= 0) {
271 return 0;
272 }
273 return (1 + GetNodeL2Count(node_size, entry_size, entry_count)) *
274 static_cast<s64>(node_size);
275 }
276
277 static constexpr s64 QueryEntryStorageSize(size_t node_size, size_t entry_size,
278 s32 entry_count) {
279 ASSERT(entry_size >= sizeof(s64));
280 ASSERT(node_size >= entry_size + sizeof(NodeHeader));
281 ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
282 ASSERT(Common::IsPowerOfTwo(node_size));
283 ASSERT(entry_count >= 0);
284
285 if (entry_count <= 0) {
286 return 0;
287 }
288 return GetEntrySetCount(node_size, entry_size, entry_count) * static_cast<s64>(node_size);
289 }
290
291private:
292 template <typename EntryType>
293 struct ContinuousReadingParam {
294 s64 offset;
295 size_t size;
296 NodeHeader entry_set;
297 s32 entry_index;
298 Offsets offsets;
299 EntryType entry;
300 };
301
302private:
303 template <typename EntryType>
304 Result ScanContinuousReading(ContinuousReadingInfo* out_info,
305 const ContinuousReadingParam<EntryType>& param) const;
306
307 bool IsExistL2() const {
308 return m_offset_count < m_entry_set_count;
309 }
310 bool IsExistOffsetL2OnL1() const {
311 return this->IsExistL2() && m_node_l1->count < m_offset_count;
312 }
313
314 s64 GetEntrySetIndex(s32 node_index, s32 offset_index) const {
315 return (m_offset_count - m_node_l1->count) + (m_offset_count * node_index) + offset_index;
316 }
317
318 Result EnsureOffsetCache();
319
320private:
321 mutable VirtualFile m_node_storage;
322 mutable VirtualFile m_entry_storage;
323 NodeBuffer m_node_l1;
324 size_t m_node_size;
325 size_t m_entry_size;
326 s32 m_entry_count;
327 s32 m_offset_count;
328 s32 m_entry_set_count;
329 OffsetCache m_offset_cache;
330};
331
332class BucketTree::Visitor {
333 YUZU_NON_COPYABLE(Visitor);
334 YUZU_NON_MOVEABLE(Visitor);
335
336public:
337 constexpr Visitor()
338 : m_tree(), m_entry(), m_entry_index(-1), m_entry_set_count(), m_entry_set{} {}
339 ~Visitor() {
340 if (m_entry != nullptr) {
341 ::operator delete(m_entry, m_tree->m_entry_size);
342 m_tree = nullptr;
343 m_entry = nullptr;
344 }
345 }
346
347 bool IsValid() const {
348 return m_entry_index >= 0;
349 }
350 bool CanMoveNext() const {
351 return this->IsValid() && (m_entry_index + 1 < m_entry_set.info.count ||
352 m_entry_set.info.index + 1 < m_entry_set_count);
353 }
354 bool CanMovePrevious() const {
355 return this->IsValid() && (m_entry_index > 0 || m_entry_set.info.index > 0);
356 }
357
358 Result MoveNext();
359 Result MovePrevious();
360
361 template <typename EntryType>
362 Result ScanContinuousReading(ContinuousReadingInfo* out_info, s64 offset, size_t size) const;
363
364 const void* Get() const {
365 ASSERT(this->IsValid());
366 return m_entry;
367 }
368
369 template <typename T>
370 const T* Get() const {
371 ASSERT(this->IsValid());
372 return reinterpret_cast<const T*>(m_entry);
373 }
374
375 const BucketTree* GetTree() const {
376 return m_tree;
377 }
378
379private:
380 Result Initialize(const BucketTree* tree, const BucketTree::Offsets& offsets);
381
382 Result Find(s64 virtual_address);
383
384 Result FindEntrySet(s32* out_index, s64 virtual_address, s32 node_index);
385 Result FindEntrySetWithBuffer(s32* out_index, s64 virtual_address, s32 node_index,
386 char* buffer);
387 Result FindEntrySetWithoutBuffer(s32* out_index, s64 virtual_address, s32 node_index);
388
389 Result FindEntry(s64 virtual_address, s32 entry_set_index);
390 Result FindEntryWithBuffer(s64 virtual_address, s32 entry_set_index, char* buffer);
391 Result FindEntryWithoutBuffer(s64 virtual_address, s32 entry_set_index);
392
393private:
394 friend class BucketTree;
395
396 union EntrySetHeader {
397 NodeHeader header;
398 struct Info {
399 s32 index;
400 s32 count;
401 s64 end;
402 s64 start;
403 } info;
404 static_assert(std::is_trivial_v<Info>);
405 };
406 static_assert(std::is_trivial_v<EntrySetHeader>);
407
408 const BucketTree* m_tree;
409 BucketTree::Offsets m_offsets;
410 void* m_entry;
411 s32 m_entry_index;
412 s32 m_entry_set_count;
413 EntrySetHeader m_entry_set;
414};
415
416} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h b/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h
new file mode 100644
index 000000000..030b2916b
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h
@@ -0,0 +1,170 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/errors.h"
7#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
8#include "core/file_sys/fssystem/fssystem_bucket_tree_utils.h"
9#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
10
11namespace FileSys {
12
13template <typename EntryType>
14Result BucketTree::ScanContinuousReading(ContinuousReadingInfo* out_info,
15 const ContinuousReadingParam<EntryType>& param) const {
16 static_assert(std::is_trivial_v<ContinuousReadingParam<EntryType>>);
17
18 // Validate our preconditions.
19 ASSERT(this->IsInitialized());
20 ASSERT(out_info != nullptr);
21 ASSERT(m_entry_size == sizeof(EntryType));
22
23 // Reset the output.
24 out_info->Reset();
25
26 // If there's nothing to read, we're done.
27 R_SUCCEED_IF(param.size == 0);
28
29 // If we're reading a fragment, we're done.
30 R_SUCCEED_IF(param.entry.IsFragment());
31
32 // Validate the first entry.
33 auto entry = param.entry;
34 auto cur_offset = param.offset;
35 R_UNLESS(entry.GetVirtualOffset() <= cur_offset, ResultOutOfRange);
36
37 // Create a pooled buffer for our scan.
38 PooledBuffer pool(m_node_size, 1);
39 char* buffer = nullptr;
40
41 s64 entry_storage_size = m_entry_storage->GetSize();
42
43 // Read the node.
44 if (m_node_size <= pool.GetSize()) {
45 buffer = pool.GetBuffer();
46 const auto ofs = param.entry_set.index * static_cast<s64>(m_node_size);
47 R_UNLESS(m_node_size + ofs <= static_cast<size_t>(entry_storage_size),
48 ResultInvalidBucketTreeNodeEntryCount);
49
50 m_entry_storage->Read(reinterpret_cast<u8*>(buffer), m_node_size, ofs);
51 }
52
53 // Calculate extents.
54 const auto end_offset = cur_offset + static_cast<s64>(param.size);
55 s64 phys_offset = entry.GetPhysicalOffset();
56
57 // Start merge tracking.
58 s64 merge_size = 0;
59 s64 readable_size = 0;
60 bool merged = false;
61
62 // Iterate.
63 auto entry_index = param.entry_index;
64 for (const auto entry_count = param.entry_set.count; entry_index < entry_count; ++entry_index) {
65 // If we're past the end, we're done.
66 if (end_offset <= cur_offset) {
67 break;
68 }
69
70 // Validate the entry offset.
71 const auto entry_offset = entry.GetVirtualOffset();
72 R_UNLESS(entry_offset <= cur_offset, ResultInvalidIndirectEntryOffset);
73
74 // Get the next entry.
75 EntryType next_entry = {};
76 s64 next_entry_offset;
77
78 if (entry_index + 1 < entry_count) {
79 if (buffer != nullptr) {
80 const auto ofs = impl::GetBucketTreeEntryOffset(0, m_entry_size, entry_index + 1);
81 std::memcpy(std::addressof(next_entry), buffer + ofs, m_entry_size);
82 } else {
83 const auto ofs = impl::GetBucketTreeEntryOffset(param.entry_set.index, m_node_size,
84 m_entry_size, entry_index + 1);
85 m_entry_storage->ReadObject(std::addressof(next_entry), ofs);
86 }
87
88 next_entry_offset = next_entry.GetVirtualOffset();
89 R_UNLESS(param.offsets.IsInclude(next_entry_offset), ResultInvalidIndirectEntryOffset);
90 } else {
91 next_entry_offset = param.entry_set.offset;
92 }
93
94 // Validate the next entry offset.
95 R_UNLESS(cur_offset < next_entry_offset, ResultInvalidIndirectEntryOffset);
96
97 // Determine the much data there is.
98 const auto data_size = next_entry_offset - cur_offset;
99 ASSERT(data_size > 0);
100
101 // Determine how much data we should read.
102 const auto remaining_size = end_offset - cur_offset;
103 const size_t read_size = static_cast<size_t>(std::min(data_size, remaining_size));
104 ASSERT(read_size <= param.size);
105
106 // Update our merge tracking.
107 if (entry.IsFragment()) {
108 // If we can't merge, stop looping.
109 if (EntryType::FragmentSizeMax <= read_size || remaining_size <= data_size) {
110 break;
111 }
112
113 // Otherwise, add the current size to the merge size.
114 merge_size += read_size;
115 } else {
116 // If we can't merge, stop looping.
117 if (phys_offset != entry.GetPhysicalOffset()) {
118 break;
119 }
120
121 // Add the size to the readable amount.
122 readable_size += merge_size + read_size;
123 ASSERT(readable_size <= static_cast<s64>(param.size));
124
125 // Update whether we've merged.
126 merged |= merge_size > 0;
127 merge_size = 0;
128 }
129
130 // Advance.
131 cur_offset += read_size;
132 ASSERT(cur_offset <= end_offset);
133
134 phys_offset += next_entry_offset - entry_offset;
135 entry = next_entry;
136 }
137
138 // If we merged, set our readable size.
139 if (merged) {
140 out_info->SetReadSize(static_cast<size_t>(readable_size));
141 }
142 out_info->SetSkipCount(entry_index - param.entry_index);
143
144 R_SUCCEED();
145}
146
147template <typename EntryType>
148Result BucketTree::Visitor::ScanContinuousReading(ContinuousReadingInfo* out_info, s64 offset,
149 size_t size) const {
150 static_assert(std::is_trivial_v<EntryType>);
151 ASSERT(this->IsValid());
152
153 // Create our parameters.
154 ContinuousReadingParam<EntryType> param = {
155 .offset = offset,
156 .size = size,
157 .entry_set = m_entry_set.header,
158 .entry_index = m_entry_index,
159 .offsets{},
160 .entry{},
161 };
162 std::memcpy(std::addressof(param.offsets), std::addressof(m_offsets),
163 sizeof(BucketTree::Offsets));
164 std::memcpy(std::addressof(param.entry), m_entry, sizeof(EntryType));
165
166 // Scan.
167 R_RETURN(m_tree->ScanContinuousReading<EntryType>(out_info, param));
168}
169
170} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h b/src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h
new file mode 100644
index 000000000..5503613fc
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h
@@ -0,0 +1,110 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
7
8namespace FileSys::impl {
9
10class SafeValue {
11public:
12 static s64 GetInt64(const void* ptr) {
13 s64 value;
14 std::memcpy(std::addressof(value), ptr, sizeof(s64));
15 return value;
16 }
17
18 static s64 GetInt64(const s64* ptr) {
19 return GetInt64(static_cast<const void*>(ptr));
20 }
21
22 static s64 GetInt64(const s64& v) {
23 return GetInt64(std::addressof(v));
24 }
25
26 static void SetInt64(void* dst, const void* src) {
27 std::memcpy(dst, src, sizeof(s64));
28 }
29
30 static void SetInt64(void* dst, const s64* src) {
31 return SetInt64(dst, static_cast<const void*>(src));
32 }
33
34 static void SetInt64(void* dst, const s64& v) {
35 return SetInt64(dst, std::addressof(v));
36 }
37};
38
39template <typename IteratorType>
40struct BucketTreeNode {
41 using Header = BucketTree::NodeHeader;
42
43 Header header;
44
45 s32 GetCount() const {
46 return this->header.count;
47 }
48
49 void* GetArray() {
50 return std::addressof(this->header) + 1;
51 }
52 template <typename T>
53 T* GetArray() {
54 return reinterpret_cast<T*>(this->GetArray());
55 }
56 const void* GetArray() const {
57 return std::addressof(this->header) + 1;
58 }
59 template <typename T>
60 const T* GetArray() const {
61 return reinterpret_cast<const T*>(this->GetArray());
62 }
63
64 s64 GetBeginOffset() const {
65 return *this->GetArray<s64>();
66 }
67 s64 GetEndOffset() const {
68 return this->header.offset;
69 }
70
71 IteratorType GetBegin() {
72 return IteratorType(this->GetArray<s64>());
73 }
74 IteratorType GetEnd() {
75 return IteratorType(this->GetArray<s64>()) + this->header.count;
76 }
77 IteratorType GetBegin() const {
78 return IteratorType(this->GetArray<s64>());
79 }
80 IteratorType GetEnd() const {
81 return IteratorType(this->GetArray<s64>()) + this->header.count;
82 }
83
84 IteratorType GetBegin(size_t entry_size) {
85 return IteratorType(this->GetArray(), entry_size);
86 }
87 IteratorType GetEnd(size_t entry_size) {
88 return IteratorType(this->GetArray(), entry_size) + this->header.count;
89 }
90 IteratorType GetBegin(size_t entry_size) const {
91 return IteratorType(this->GetArray(), entry_size);
92 }
93 IteratorType GetEnd(size_t entry_size) const {
94 return IteratorType(this->GetArray(), entry_size) + this->header.count;
95 }
96};
97
98constexpr inline s64 GetBucketTreeEntryOffset(s64 entry_set_offset, size_t entry_size,
99 s32 entry_index) {
100 return entry_set_offset + sizeof(BucketTree::NodeHeader) +
101 entry_index * static_cast<s64>(entry_size);
102}
103
104constexpr inline s64 GetBucketTreeEntryOffset(s32 entry_set_index, size_t node_size,
105 size_t entry_size, s32 entry_index) {
106 return GetBucketTreeEntryOffset(entry_set_index * static_cast<s64>(node_size), entry_size,
107 entry_index);
108}
109
110} // namespace FileSys::impl
diff --git a/src/core/file_sys/fssystem/fssystem_compressed_storage.h b/src/core/file_sys/fssystem/fssystem_compressed_storage.h
new file mode 100644
index 000000000..33d93938e
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_compressed_storage.h
@@ -0,0 +1,963 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/literals.h"
7
8#include "core/file_sys/errors.h"
9#include "core/file_sys/fssystem/fs_i_storage.h"
10#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
11#include "core/file_sys/fssystem/fssystem_compression_common.h"
12#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
13#include "core/file_sys/vfs.h"
14
15namespace FileSys {
16
17using namespace Common::Literals;
18
19class CompressedStorage : public IReadOnlyStorage {
20 YUZU_NON_COPYABLE(CompressedStorage);
21 YUZU_NON_MOVEABLE(CompressedStorage);
22
23public:
24 static constexpr size_t NodeSize = 16_KiB;
25
26 struct Entry {
27 s64 virt_offset;
28 s64 phys_offset;
29 CompressionType compression_type;
30 s32 phys_size;
31
32 s64 GetPhysicalSize() const {
33 return this->phys_size;
34 }
35 };
36 static_assert(std::is_trivial_v<Entry>);
37 static_assert(sizeof(Entry) == 0x18);
38
39public:
40 static constexpr s64 QueryNodeStorageSize(s32 entry_count) {
41 return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count);
42 }
43
44 static constexpr s64 QueryEntryStorageSize(s32 entry_count) {
45 return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count);
46 }
47
48private:
49 class CompressedStorageCore {
50 YUZU_NON_COPYABLE(CompressedStorageCore);
51 YUZU_NON_MOVEABLE(CompressedStorageCore);
52
53 public:
54 CompressedStorageCore() : m_table(), m_data_storage() {}
55
56 ~CompressedStorageCore() {
57 this->Finalize();
58 }
59
60 public:
61 Result Initialize(VirtualFile data_storage, VirtualFile node_storage,
62 VirtualFile entry_storage, s32 bktr_entry_count, size_t block_size_max,
63 size_t continuous_reading_size_max,
64 GetDecompressorFunction get_decompressor) {
65 // Check pre-conditions.
66 ASSERT(0 < block_size_max);
67 ASSERT(block_size_max <= continuous_reading_size_max);
68 ASSERT(get_decompressor != nullptr);
69
70 // Initialize our entry table.
71 R_TRY(m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry),
72 bktr_entry_count));
73
74 // Set our other fields.
75 m_block_size_max = block_size_max;
76 m_continuous_reading_size_max = continuous_reading_size_max;
77 m_data_storage = data_storage;
78 m_get_decompressor_function = get_decompressor;
79
80 R_SUCCEED();
81 }
82
83 void Finalize() {
84 if (this->IsInitialized()) {
85 m_table.Finalize();
86 m_data_storage = VirtualFile();
87 }
88 }
89
90 VirtualFile GetDataStorage() {
91 return m_data_storage;
92 }
93
94 Result GetDataStorageSize(s64* out) {
95 // Check pre-conditions.
96 ASSERT(out != nullptr);
97
98 // Get size.
99 *out = m_data_storage->GetSize();
100
101 R_SUCCEED();
102 }
103
104 BucketTree& GetEntryTable() {
105 return m_table;
106 }
107
108 Result GetEntryList(Entry* out_entries, s32* out_read_count, s32 max_entry_count,
109 s64 offset, s64 size) {
110 // Check pre-conditions.
111 ASSERT(offset >= 0);
112 ASSERT(size >= 0);
113 ASSERT(this->IsInitialized());
114
115 // Check that we can output the count.
116 R_UNLESS(out_read_count != nullptr, ResultNullptrArgument);
117
118 // Check that we have anything to read at all.
119 R_SUCCEED_IF(size == 0);
120
121 // Check that either we have a buffer, or this is to determine how many we need.
122 if (max_entry_count != 0) {
123 R_UNLESS(out_entries != nullptr, ResultNullptrArgument);
124 }
125
126 // Get the table offsets.
127 BucketTree::Offsets table_offsets;
128 R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
129
130 // Validate arguments.
131 R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
132
133 // Find the offset in our tree.
134 BucketTree::Visitor visitor;
135 R_TRY(m_table.Find(std::addressof(visitor), offset));
136 {
137 const auto entry_offset = visitor.Get<Entry>()->virt_offset;
138 R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
139 ResultUnexpectedInCompressedStorageA);
140 }
141
142 // Get the entries.
143 const auto end_offset = offset + size;
144 s32 read_count = 0;
145 while (visitor.Get<Entry>()->virt_offset < end_offset) {
146 // If we should be setting the output, do so.
147 if (max_entry_count != 0) {
148 // Ensure we only read as many entries as we can.
149 if (read_count >= max_entry_count) {
150 break;
151 }
152
153 // Set the current output entry.
154 out_entries[read_count] = *visitor.Get<Entry>();
155 }
156
157 // Increase the read count.
158 ++read_count;
159
160 // If we're at the end, we're done.
161 if (!visitor.CanMoveNext()) {
162 break;
163 }
164
165 // Move to the next entry.
166 R_TRY(visitor.MoveNext());
167 }
168
169 // Set the output read count.
170 *out_read_count = read_count;
171 R_SUCCEED();
172 }
173
174 Result GetSize(s64* out) {
175 // Check pre-conditions.
176 ASSERT(out != nullptr);
177
178 // Get our table offsets.
179 BucketTree::Offsets offsets;
180 R_TRY(m_table.GetOffsets(std::addressof(offsets)));
181
182 // Set the output.
183 *out = offsets.end_offset;
184 R_SUCCEED();
185 }
186
187 Result OperatePerEntry(s64 offset, s64 size, auto f) {
188 // Check pre-conditions.
189 ASSERT(offset >= 0);
190 ASSERT(size >= 0);
191 ASSERT(this->IsInitialized());
192
193 // Succeed if there's nothing to operate on.
194 R_SUCCEED_IF(size == 0);
195
196 // Get the table offsets.
197 BucketTree::Offsets table_offsets;
198 R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
199
200 // Validate arguments.
201 R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
202
203 // Find the offset in our tree.
204 BucketTree::Visitor visitor;
205 R_TRY(m_table.Find(std::addressof(visitor), offset));
206 {
207 const auto entry_offset = visitor.Get<Entry>()->virt_offset;
208 R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
209 ResultUnexpectedInCompressedStorageA);
210 }
211
212 // Prepare to operate in chunks.
213 auto cur_offset = offset;
214 const auto end_offset = offset + static_cast<s64>(size);
215
216 while (cur_offset < end_offset) {
217 // Get the current entry.
218 const auto cur_entry = *visitor.Get<Entry>();
219
220 // Get and validate the entry's offset.
221 const auto cur_entry_offset = cur_entry.virt_offset;
222 R_UNLESS(cur_entry_offset <= cur_offset, ResultUnexpectedInCompressedStorageA);
223
224 // Get and validate the next entry offset.
225 s64 next_entry_offset;
226 if (visitor.CanMoveNext()) {
227 R_TRY(visitor.MoveNext());
228 next_entry_offset = visitor.Get<Entry>()->virt_offset;
229 R_UNLESS(table_offsets.IsInclude(next_entry_offset),
230 ResultUnexpectedInCompressedStorageA);
231 } else {
232 next_entry_offset = table_offsets.end_offset;
233 }
234 R_UNLESS(cur_offset < next_entry_offset, ResultUnexpectedInCompressedStorageA);
235
236 // Get the offset of the entry in the data we read.
237 const auto data_offset = cur_offset - cur_entry_offset;
238 const auto data_size = (next_entry_offset - cur_entry_offset);
239 ASSERT(data_size > 0);
240
241 // Determine how much is left.
242 const auto remaining_size = end_offset - cur_offset;
243 const auto cur_size = std::min<s64>(remaining_size, data_size - data_offset);
244 ASSERT(cur_size <= size);
245
246 // Get the data storage size.
247 s64 storage_size = m_data_storage->GetSize();
248
249 // Check that our read remains naively physically in bounds.
250 R_UNLESS(0 <= cur_entry.phys_offset && cur_entry.phys_offset <= storage_size,
251 ResultUnexpectedInCompressedStorageC);
252
253 // If we have any compression, verify that we remain physically in bounds.
254 if (cur_entry.compression_type != CompressionType::None) {
255 R_UNLESS(cur_entry.phys_offset + cur_entry.GetPhysicalSize() <= storage_size,
256 ResultUnexpectedInCompressedStorageC);
257 }
258
259 // Check that block alignment requirements are met.
260 if (CompressionTypeUtility::IsBlockAlignmentRequired(cur_entry.compression_type)) {
261 R_UNLESS(Common::IsAligned(cur_entry.phys_offset, CompressionBlockAlignment),
262 ResultUnexpectedInCompressedStorageA);
263 }
264
265 // Invoke the operator.
266 bool is_continuous = true;
267 R_TRY(
268 f(std::addressof(is_continuous), cur_entry, data_size, data_offset, cur_size));
269
270 // If not continuous, we're done.
271 if (!is_continuous) {
272 break;
273 }
274
275 // Advance.
276 cur_offset += cur_size;
277 }
278
279 R_SUCCEED();
280 }
281
282 public:
283 using ReadImplFunction = std::function<Result(void*, size_t)>;
284 using ReadFunction = std::function<Result(size_t, const ReadImplFunction&)>;
285
286 public:
287 Result Read(s64 offset, s64 size, const ReadFunction& read_func) {
288 // Check pre-conditions.
289 ASSERT(offset >= 0);
290 ASSERT(this->IsInitialized());
291
292 // Succeed immediately, if we have nothing to read.
293 R_SUCCEED_IF(size == 0);
294
295 // Declare read lambda.
296 constexpr int EntriesCountMax = 0x80;
297 struct Entries {
298 CompressionType compression_type;
299 u32 gap_from_prev;
300 u32 physical_size;
301 u32 virtual_size;
302 };
303 std::array<Entries, EntriesCountMax> entries;
304 s32 entry_count = 0;
305 Entry prev_entry = {
306 .virt_offset = -1,
307 .phys_offset{},
308 .compression_type{},
309 .phys_size{},
310 };
311 bool will_allocate_pooled_buffer = false;
312 s64 required_access_physical_offset = 0;
313 s64 required_access_physical_size = 0;
314
315 auto PerformRequiredRead = [&]() -> Result {
316 // If there are no entries, we have nothing to do.
317 R_SUCCEED_IF(entry_count == 0);
318
319 // Get the remaining size in a convenient form.
320 const size_t total_required_size =
321 static_cast<size_t>(required_access_physical_size);
322
323 // Perform the read based on whether we need to allocate a buffer.
324 if (will_allocate_pooled_buffer) {
325 // Allocate a pooled buffer.
326 PooledBuffer pooled_buffer;
327 if (pooled_buffer.GetAllocatableSizeMax() >= total_required_size) {
328 pooled_buffer.Allocate(total_required_size, m_block_size_max);
329 } else {
330 pooled_buffer.AllocateParticularlyLarge(
331 std::min<size_t>(
332 total_required_size,
333 PooledBuffer::GetAllocatableParticularlyLargeSizeMax()),
334 m_block_size_max);
335 }
336
337 // Read each of the entries.
338 for (s32 entry_idx = 0; entry_idx < entry_count; ++entry_idx) {
339 // Determine the current read size.
340 bool will_use_pooled_buffer = false;
341 const size_t cur_read_size = [&]() -> size_t {
342 if (const size_t target_entry_size =
343 static_cast<size_t>(entries[entry_idx].physical_size) +
344 static_cast<size_t>(entries[entry_idx].gap_from_prev);
345 target_entry_size <= pooled_buffer.GetSize()) {
346 // We'll be using the pooled buffer.
347 will_use_pooled_buffer = true;
348
349 // Determine how much we can read.
350 const size_t max_size = std::min<size_t>(
351 required_access_physical_size, pooled_buffer.GetSize());
352
353 size_t read_size = 0;
354 for (auto n = entry_idx; n < entry_count; ++n) {
355 const size_t cur_entry_size =
356 static_cast<size_t>(entries[n].physical_size) +
357 static_cast<size_t>(entries[n].gap_from_prev);
358 if (read_size + cur_entry_size > max_size) {
359 break;
360 }
361
362 read_size += cur_entry_size;
363 }
364
365 return read_size;
366 } else {
367 // If we don't fit, we must be uncompressed.
368 ASSERT(entries[entry_idx].compression_type ==
369 CompressionType::None);
370
371 // We can perform the whole of an uncompressed read directly.
372 return entries[entry_idx].virtual_size;
373 }
374 }();
375
376 // Perform the read based on whether or not we'll use the pooled buffer.
377 if (will_use_pooled_buffer) {
378 // Read the compressed data into the pooled buffer.
379 auto* const buffer = pooled_buffer.GetBuffer();
380 m_data_storage->Read(reinterpret_cast<u8*>(buffer), cur_read_size,
381 required_access_physical_offset);
382
383 // Decompress the data.
384 size_t buffer_offset;
385 for (buffer_offset = 0;
386 entry_idx < entry_count &&
387 ((static_cast<size_t>(entries[entry_idx].physical_size) +
388 static_cast<size_t>(entries[entry_idx].gap_from_prev)) == 0 ||
389 buffer_offset < cur_read_size);
390 buffer_offset += entries[entry_idx++].physical_size) {
391 // Advance by the relevant gap.
392 buffer_offset += entries[entry_idx].gap_from_prev;
393
394 const auto compression_type = entries[entry_idx].compression_type;
395 switch (compression_type) {
396 case CompressionType::None: {
397 // Check that we can remain within bounds.
398 ASSERT(buffer_offset + entries[entry_idx].virtual_size <=
399 cur_read_size);
400
401 // Perform no decompression.
402 R_TRY(read_func(
403 entries[entry_idx].virtual_size,
404 [&](void* dst, size_t dst_size) -> Result {
405 // Check that the size is valid.
406 ASSERT(dst_size == entries[entry_idx].virtual_size);
407
408 // We have no compression, so just copy the data
409 // out.
410 std::memcpy(dst, buffer + buffer_offset,
411 entries[entry_idx].virtual_size);
412 R_SUCCEED();
413 }));
414
415 break;
416 }
417 case CompressionType::Zeros: {
418 // Check that we can remain within bounds.
419 ASSERT(buffer_offset <= cur_read_size);
420
421 // Zero the memory.
422 R_TRY(read_func(
423 entries[entry_idx].virtual_size,
424 [&](void* dst, size_t dst_size) -> Result {
425 // Check that the size is valid.
426 ASSERT(dst_size == entries[entry_idx].virtual_size);
427
428 // The data is zeroes, so zero the buffer.
429 std::memset(dst, 0, entries[entry_idx].virtual_size);
430 R_SUCCEED();
431 }));
432
433 break;
434 }
435 default: {
436 // Check that we can remain within bounds.
437 ASSERT(buffer_offset + entries[entry_idx].physical_size <=
438 cur_read_size);
439
440 // Get the decompressor.
441 const auto decompressor =
442 this->GetDecompressor(compression_type);
443 R_UNLESS(decompressor != nullptr,
444 ResultUnexpectedInCompressedStorageB);
445
446 // Decompress the data.
447 R_TRY(read_func(entries[entry_idx].virtual_size,
448 [&](void* dst, size_t dst_size) -> Result {
449 // Check that the size is valid.
450 ASSERT(dst_size ==
451 entries[entry_idx].virtual_size);
452
453 // Perform the decompression.
454 R_RETURN(decompressor(
455 dst, entries[entry_idx].virtual_size,
456 buffer + buffer_offset,
457 entries[entry_idx].physical_size));
458 }));
459
460 break;
461 }
462 }
463 }
464
465 // Check that we processed the correct amount of data.
466 ASSERT(buffer_offset == cur_read_size);
467 } else {
468 // Account for the gap from the previous entry.
469 required_access_physical_offset += entries[entry_idx].gap_from_prev;
470 required_access_physical_size -= entries[entry_idx].gap_from_prev;
471
472 // We don't need the buffer (as the data is uncompressed), so just
473 // execute the read.
474 R_TRY(
475 read_func(cur_read_size, [&](void* dst, size_t dst_size) -> Result {
476 // Check that the size is valid.
477 ASSERT(dst_size == cur_read_size);
478
479 // Perform the read.
480 m_data_storage->Read(reinterpret_cast<u8*>(dst), cur_read_size,
481 required_access_physical_offset);
482
483 R_SUCCEED();
484 }));
485 }
486
487 // Advance on.
488 required_access_physical_offset += cur_read_size;
489 required_access_physical_size -= cur_read_size;
490 }
491
492 // Verify that we have nothing remaining to read.
493 ASSERT(required_access_physical_size == 0);
494
495 R_SUCCEED();
496 } else {
497 // We don't need a buffer, so just execute the read.
498 R_TRY(read_func(total_required_size, [&](void* dst, size_t dst_size) -> Result {
499 // Check that the size is valid.
500 ASSERT(dst_size == total_required_size);
501
502 // Perform the read.
503 m_data_storage->Read(reinterpret_cast<u8*>(dst), total_required_size,
504 required_access_physical_offset);
505
506 R_SUCCEED();
507 }));
508 }
509
510 R_SUCCEED();
511 };
512
513 R_TRY(this->OperatePerEntry(
514 offset, size,
515 [&](bool* out_continuous, const Entry& entry, s64 virtual_data_size,
516 s64 data_offset, s64 read_size) -> Result {
517 // Determine the physical extents.
518 s64 physical_offset, physical_size;
519 if (CompressionTypeUtility::IsRandomAccessible(entry.compression_type)) {
520 physical_offset = entry.phys_offset + data_offset;
521 physical_size = read_size;
522 } else {
523 physical_offset = entry.phys_offset;
524 physical_size = entry.GetPhysicalSize();
525 }
526
527 // If we have a pending data storage operation, perform it if we have to.
528 const s64 required_access_physical_end =
529 required_access_physical_offset + required_access_physical_size;
530 if (required_access_physical_size > 0) {
531 const bool required_by_gap =
532 !(required_access_physical_end <= physical_offset &&
533 physical_offset <= Common::AlignUp(required_access_physical_end,
534 CompressionBlockAlignment));
535 const bool required_by_continuous_size =
536 ((physical_size + physical_offset) - required_access_physical_end) +
537 required_access_physical_size >
538 static_cast<s64>(m_continuous_reading_size_max);
539 const bool required_by_entry_count = entry_count == EntriesCountMax;
540 if (required_by_gap || required_by_continuous_size ||
541 required_by_entry_count) {
542 // Check that our planned access is sane.
543 ASSERT(!will_allocate_pooled_buffer ||
544 required_access_physical_size <=
545 static_cast<s64>(m_continuous_reading_size_max));
546
547 // Perform the required read.
548 const Result rc = PerformRequiredRead();
549 if (R_FAILED(rc)) {
550 R_THROW(rc);
551 }
552
553 // Reset our requirements.
554 prev_entry.virt_offset = -1;
555 required_access_physical_size = 0;
556 entry_count = 0;
557 will_allocate_pooled_buffer = false;
558 }
559 }
560
561 // Sanity check that we're within bounds on entries.
562 ASSERT(entry_count < EntriesCountMax);
563
564 // Determine if a buffer allocation is needed.
565 if (entry.compression_type != CompressionType::None ||
566 (prev_entry.virt_offset >= 0 &&
567 entry.virt_offset - prev_entry.virt_offset !=
568 entry.phys_offset - prev_entry.phys_offset)) {
569 will_allocate_pooled_buffer = true;
570 }
571
572 // If we need to access the data storage, update our required access parameters.
573 if (CompressionTypeUtility::IsDataStorageAccessRequired(
574 entry.compression_type)) {
575 // If the data is compressed, ensure the access is sane.
576 if (entry.compression_type != CompressionType::None) {
577 R_UNLESS(data_offset == 0, ResultInvalidOffset);
578 R_UNLESS(virtual_data_size == read_size, ResultInvalidSize);
579 R_UNLESS(entry.GetPhysicalSize() <= static_cast<s64>(m_block_size_max),
580 ResultUnexpectedInCompressedStorageD);
581 }
582
583 // Update the required access parameters.
584 s64 gap_from_prev;
585 if (required_access_physical_size > 0) {
586 gap_from_prev = physical_offset - required_access_physical_end;
587 } else {
588 gap_from_prev = 0;
589 required_access_physical_offset = physical_offset;
590 }
591 required_access_physical_size += physical_size + gap_from_prev;
592
593 // Create an entry to access the data storage.
594 entries[entry_count++] = {
595 .compression_type = entry.compression_type,
596 .gap_from_prev = static_cast<u32>(gap_from_prev),
597 .physical_size = static_cast<u32>(physical_size),
598 .virtual_size = static_cast<u32>(read_size),
599 };
600 } else {
601 // Verify that we're allowed to be operating on the non-data-storage-access
602 // type.
603 R_UNLESS(entry.compression_type == CompressionType::Zeros,
604 ResultUnexpectedInCompressedStorageB);
605
606 // If we have entries, create a fake entry for the zero region.
607 if (entry_count != 0) {
608 // We need to have a physical size.
609 R_UNLESS(entry.GetPhysicalSize() != 0,
610 ResultUnexpectedInCompressedStorageD);
611
612 // Create a fake entry.
613 entries[entry_count++] = {
614 .compression_type = CompressionType::Zeros,
615 .gap_from_prev = 0,
616 .physical_size = 0,
617 .virtual_size = static_cast<u32>(read_size),
618 };
619 } else {
620 // We have no entries, so we can just perform the read.
621 const Result rc =
622 read_func(static_cast<size_t>(read_size),
623 [&](void* dst, size_t dst_size) -> Result {
624 // Check the space we should zero is correct.
625 ASSERT(dst_size == static_cast<size_t>(read_size));
626
627 // Zero the memory.
628 std::memset(dst, 0, read_size);
629 R_SUCCEED();
630 });
631 if (R_FAILED(rc)) {
632 R_THROW(rc);
633 }
634 }
635 }
636
637 // Set the previous entry.
638 prev_entry = entry;
639
640 // We're continuous.
641 *out_continuous = true;
642 R_SUCCEED();
643 }));
644
645 // If we still have a pending access, perform it.
646 if (required_access_physical_size != 0) {
647 R_TRY(PerformRequiredRead());
648 }
649
650 R_SUCCEED();
651 }
652
653 private:
654 DecompressorFunction GetDecompressor(CompressionType type) const {
655 // Check that we can get a decompressor for the type.
656 if (CompressionTypeUtility::IsUnknownType(type)) {
657 return nullptr;
658 }
659
660 // Get the decompressor.
661 return m_get_decompressor_function(type);
662 }
663
664 bool IsInitialized() const {
665 return m_table.IsInitialized();
666 }
667
668 private:
669 size_t m_block_size_max;
670 size_t m_continuous_reading_size_max;
671 BucketTree m_table;
672 VirtualFile m_data_storage;
673 GetDecompressorFunction m_get_decompressor_function;
674 };
675
676 class CacheManager {
677 YUZU_NON_COPYABLE(CacheManager);
678 YUZU_NON_MOVEABLE(CacheManager);
679
680 private:
681 struct AccessRange {
682 s64 virtual_offset;
683 s64 virtual_size;
684 u32 physical_size;
685 bool is_block_alignment_required;
686
687 s64 GetEndVirtualOffset() const {
688 return this->virtual_offset + this->virtual_size;
689 }
690 };
691 static_assert(std::is_trivial_v<AccessRange>);
692
693 public:
694 CacheManager() = default;
695
696 public:
697 Result Initialize(s64 storage_size, size_t cache_size_0, size_t cache_size_1,
698 size_t max_cache_entries) {
699 // Set our fields.
700 m_storage_size = storage_size;
701
702 R_SUCCEED();
703 }
704
705 Result Read(CompressedStorageCore& core, s64 offset, void* buffer, size_t size) {
706 // If we have nothing to read, succeed.
707 R_SUCCEED_IF(size == 0);
708
709 // Check that we have a buffer to read into.
710 R_UNLESS(buffer != nullptr, ResultNullptrArgument);
711
712 // Check that the read is in bounds.
713 R_UNLESS(offset <= m_storage_size, ResultInvalidOffset);
714
715 // Determine how much we can read.
716 const size_t read_size = std::min<size_t>(size, m_storage_size - offset);
717
718 // Create head/tail ranges.
719 AccessRange head_range = {};
720 AccessRange tail_range = {};
721 bool is_tail_set = false;
722
723 // Operate to determine the head range.
724 R_TRY(core.OperatePerEntry(
725 offset, 1,
726 [&](bool* out_continuous, const Entry& entry, s64 virtual_data_size,
727 s64 data_offset, s64 data_read_size) -> Result {
728 // Set the head range.
729 head_range = {
730 .virtual_offset = entry.virt_offset,
731 .virtual_size = virtual_data_size,
732 .physical_size = static_cast<u32>(entry.phys_size),
733 .is_block_alignment_required =
734 CompressionTypeUtility::IsBlockAlignmentRequired(
735 entry.compression_type),
736 };
737
738 // If required, set the tail range.
739 if (static_cast<s64>(offset + read_size) <=
740 entry.virt_offset + virtual_data_size) {
741 tail_range = {
742 .virtual_offset = entry.virt_offset,
743 .virtual_size = virtual_data_size,
744 .physical_size = static_cast<u32>(entry.phys_size),
745 .is_block_alignment_required =
746 CompressionTypeUtility::IsBlockAlignmentRequired(
747 entry.compression_type),
748 };
749 is_tail_set = true;
750 }
751
752 // We only want to determine the head range, so we're not continuous.
753 *out_continuous = false;
754 R_SUCCEED();
755 }));
756
757 // If necessary, determine the tail range.
758 if (!is_tail_set) {
759 R_TRY(core.OperatePerEntry(
760 offset + read_size - 1, 1,
761 [&](bool* out_continuous, const Entry& entry, s64 virtual_data_size,
762 s64 data_offset, s64 data_read_size) -> Result {
763 // Set the tail range.
764 tail_range = {
765 .virtual_offset = entry.virt_offset,
766 .virtual_size = virtual_data_size,
767 .physical_size = static_cast<u32>(entry.phys_size),
768 .is_block_alignment_required =
769 CompressionTypeUtility::IsBlockAlignmentRequired(
770 entry.compression_type),
771 };
772
773 // We only want to determine the tail range, so we're not continuous.
774 *out_continuous = false;
775 R_SUCCEED();
776 }));
777 }
778
779 // Begin performing the accesses.
780 s64 cur_offset = offset;
781 size_t cur_size = read_size;
782 char* cur_dst = static_cast<char*>(buffer);
783
784 // Determine our alignment.
785 const bool head_unaligned = head_range.is_block_alignment_required &&
786 (cur_offset != head_range.virtual_offset ||
787 static_cast<s64>(cur_size) < head_range.virtual_size);
788 const bool tail_unaligned = [&]() -> bool {
789 if (tail_range.is_block_alignment_required) {
790 if (static_cast<s64>(cur_size + cur_offset) ==
791 tail_range.GetEndVirtualOffset()) {
792 return false;
793 } else if (!head_unaligned) {
794 return true;
795 } else {
796 return head_range.GetEndVirtualOffset() <
797 static_cast<s64>(cur_size + cur_offset);
798 }
799 } else {
800 return false;
801 }
802 }();
803
804 // Determine start/end offsets.
805 const s64 start_offset =
806 head_range.is_block_alignment_required ? head_range.virtual_offset : cur_offset;
807 const s64 end_offset = tail_range.is_block_alignment_required
808 ? tail_range.GetEndVirtualOffset()
809 : cur_offset + cur_size;
810
811 // Perform the read.
812 bool is_burst_reading = false;
813 R_TRY(core.Read(
814 start_offset, end_offset - start_offset,
815 [&](size_t size_buffer_required,
816 const CompressedStorageCore::ReadImplFunction& read_impl) -> Result {
817 // Determine whether we're burst reading.
818 const AccessRange* unaligned_range = nullptr;
819 if (!is_burst_reading) {
820 // Check whether we're using head, tail, or none as unaligned.
821 if (head_unaligned && head_range.virtual_offset <= cur_offset &&
822 cur_offset < head_range.GetEndVirtualOffset()) {
823 unaligned_range = std::addressof(head_range);
824 } else if (tail_unaligned && tail_range.virtual_offset <= cur_offset &&
825 cur_offset < tail_range.GetEndVirtualOffset()) {
826 unaligned_range = std::addressof(tail_range);
827 } else {
828 is_burst_reading = true;
829 }
830 }
831 ASSERT((is_burst_reading ^ (unaligned_range != nullptr)));
832
833 // Perform reading by burst, or not.
834 if (is_burst_reading) {
835 // Check that the access is valid for burst reading.
836 ASSERT(size_buffer_required <= cur_size);
837
838 // Perform the read.
839 Result rc = read_impl(cur_dst, size_buffer_required);
840 if (R_FAILED(rc)) {
841 R_THROW(rc);
842 }
843
844 // Advance.
845 cur_dst += size_buffer_required;
846 cur_offset += size_buffer_required;
847 cur_size -= size_buffer_required;
848
849 // Determine whether we're going to continue burst reading.
850 const s64 offset_aligned =
851 tail_unaligned ? tail_range.virtual_offset : end_offset;
852 ASSERT(cur_offset <= offset_aligned);
853
854 if (offset_aligned <= cur_offset) {
855 is_burst_reading = false;
856 }
857 } else {
858 // We're not burst reading, so we have some unaligned range.
859 ASSERT(unaligned_range != nullptr);
860
861 // Check that the size is correct.
862 ASSERT(size_buffer_required ==
863 static_cast<size_t>(unaligned_range->virtual_size));
864
865 // Get a pooled buffer for our read.
866 PooledBuffer pooled_buffer;
867 pooled_buffer.Allocate(size_buffer_required, size_buffer_required);
868
869 // Perform read.
870 Result rc = read_impl(pooled_buffer.GetBuffer(), size_buffer_required);
871 if (R_FAILED(rc)) {
872 R_THROW(rc);
873 }
874
875 // Copy the data we read to the destination.
876 const size_t skip_size = cur_offset - unaligned_range->virtual_offset;
877 const size_t copy_size = std::min<size_t>(
878 cur_size, unaligned_range->GetEndVirtualOffset() - cur_offset);
879
880 std::memcpy(cur_dst, pooled_buffer.GetBuffer() + skip_size, copy_size);
881
882 // Advance.
883 cur_dst += copy_size;
884 cur_offset += copy_size;
885 cur_size -= copy_size;
886 }
887
888 R_SUCCEED();
889 }));
890
891 R_SUCCEED();
892 }
893
894 private:
895 s64 m_storage_size = 0;
896 };
897
898public:
899 CompressedStorage() = default;
900 virtual ~CompressedStorage() {
901 this->Finalize();
902 }
903
904 Result Initialize(VirtualFile data_storage, VirtualFile node_storage, VirtualFile entry_storage,
905 s32 bktr_entry_count, size_t block_size_max,
906 size_t continuous_reading_size_max, GetDecompressorFunction get_decompressor,
907 size_t cache_size_0, size_t cache_size_1, s32 max_cache_entries) {
908 // Initialize our core.
909 R_TRY(m_core.Initialize(data_storage, node_storage, entry_storage, bktr_entry_count,
910 block_size_max, continuous_reading_size_max, get_decompressor));
911
912 // Get our core size.
913 s64 core_size = 0;
914 R_TRY(m_core.GetSize(std::addressof(core_size)));
915
916 // Initialize our cache manager.
917 R_TRY(m_cache_manager.Initialize(core_size, cache_size_0, cache_size_1, max_cache_entries));
918
919 R_SUCCEED();
920 }
921
922 void Finalize() {
923 m_core.Finalize();
924 }
925
926 VirtualFile GetDataStorage() {
927 return m_core.GetDataStorage();
928 }
929
930 Result GetDataStorageSize(s64* out) {
931 R_RETURN(m_core.GetDataStorageSize(out));
932 }
933
934 Result GetEntryList(Entry* out_entries, s32* out_read_count, s32 max_entry_count, s64 offset,
935 s64 size) {
936 R_RETURN(m_core.GetEntryList(out_entries, out_read_count, max_entry_count, offset, size));
937 }
938
939 BucketTree& GetEntryTable() {
940 return m_core.GetEntryTable();
941 }
942
943public:
944 virtual size_t GetSize() const override {
945 s64 ret{};
946 m_core.GetSize(&ret);
947 return ret;
948 }
949
950 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
951 if (R_SUCCEEDED(m_cache_manager.Read(m_core, offset, buffer, size))) {
952 return size;
953 } else {
954 return 0;
955 }
956 }
957
958private:
959 mutable CompressedStorageCore m_core;
960 mutable CacheManager m_cache_manager;
961};
962
963} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_compression_common.h b/src/core/file_sys/fssystem/fssystem_compression_common.h
new file mode 100644
index 000000000..266e0a7e5
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_compression_common.h
@@ -0,0 +1,43 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/hle/result.h"
7
8namespace FileSys {
9
10enum class CompressionType : u8 {
11 None = 0,
12 Zeros = 1,
13 Two = 2,
14 Lz4 = 3,
15 Unknown = 4,
16};
17
18using DecompressorFunction = Result (*)(void*, size_t, const void*, size_t);
19using GetDecompressorFunction = DecompressorFunction (*)(CompressionType);
20
21constexpr s64 CompressionBlockAlignment = 0x10;
22
23namespace CompressionTypeUtility {
24
25constexpr bool IsBlockAlignmentRequired(CompressionType type) {
26 return type != CompressionType::None && type != CompressionType::Zeros;
27}
28
29constexpr bool IsDataStorageAccessRequired(CompressionType type) {
30 return type != CompressionType::Zeros;
31}
32
33constexpr bool IsRandomAccessible(CompressionType type) {
34 return type == CompressionType::None;
35}
36
37constexpr bool IsUnknownType(CompressionType type) {
38 return type >= CompressionType::Unknown;
39}
40
41} // namespace CompressionTypeUtility
42
43} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_compression_configuration.cpp b/src/core/file_sys/fssystem/fssystem_compression_configuration.cpp
new file mode 100644
index 000000000..ef552cefe
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_compression_configuration.cpp
@@ -0,0 +1,36 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/lz4_compression.h"
5#include "core/file_sys/fssystem/fssystem_compression_configuration.h"
6
7namespace FileSys {
8
9namespace {
10
11Result DecompressLz4(void* dst, size_t dst_size, const void* src, size_t src_size) {
12 auto result = Common::Compression::DecompressDataLZ4(dst, dst_size, src, src_size);
13 R_UNLESS(static_cast<size_t>(result) == dst_size, ResultUnexpectedInCompressedStorageC);
14 R_SUCCEED();
15}
16
17constexpr DecompressorFunction GetNcaDecompressorFunction(CompressionType type) {
18 switch (type) {
19 case CompressionType::Lz4:
20 return DecompressLz4;
21 default:
22 return nullptr;
23 }
24}
25
26} // namespace
27
28const NcaCompressionConfiguration& GetNcaCompressionConfiguration() {
29 static const NcaCompressionConfiguration configuration = {
30 .get_decompressor = GetNcaDecompressorFunction,
31 };
32
33 return configuration;
34}
35
36} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_compression_configuration.h b/src/core/file_sys/fssystem/fssystem_compression_configuration.h
new file mode 100644
index 000000000..ec9b48e9a
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_compression_configuration.h
@@ -0,0 +1,12 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
7
8namespace FileSys {
9
10const NcaCompressionConfiguration& GetNcaCompressionConfiguration();
11
12}
diff --git a/src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp b/src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp
new file mode 100644
index 000000000..a4f0cde28
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp
@@ -0,0 +1,65 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/crypto/aes_util.h"
5#include "core/crypto/key_manager.h"
6#include "core/file_sys/fssystem/fssystem_crypto_configuration.h"
7
8namespace FileSys {
9
10namespace {
11
12void GenerateKey(void* dst_key, size_t dst_key_size, const void* src_key, size_t src_key_size,
13 s32 key_type) {
14 if (key_type == static_cast<s32>(KeyType::ZeroKey)) {
15 std::memset(dst_key, 0, dst_key_size);
16 return;
17 }
18
19 if (key_type == static_cast<s32>(KeyType::InvalidKey) ||
20 key_type < static_cast<s32>(KeyType::ZeroKey) ||
21 key_type >= static_cast<s32>(KeyType::NcaExternalKey)) {
22 std::memset(dst_key, 0xFF, dst_key_size);
23 return;
24 }
25
26 const auto& instance = Core::Crypto::KeyManager::Instance();
27
28 if (key_type == static_cast<s32>(KeyType::NcaHeaderKey1) ||
29 key_type == static_cast<s32>(KeyType::NcaHeaderKey2)) {
30 const s32 key_index = static_cast<s32>(KeyType::NcaHeaderKey2) == key_type;
31 const auto key = instance.GetKey(Core::Crypto::S256KeyType::Header);
32 std::memcpy(dst_key, key.data() + key_index * 0x10, std::min(dst_key_size, key.size() / 2));
33 return;
34 }
35
36 const s32 key_generation =
37 std::max(key_type / NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount, 1) - 1;
38 const s32 key_index = key_type % NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount;
39
40 Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(
41 instance.GetKey(Core::Crypto::S128KeyType::KeyArea, key_generation, key_index),
42 Core::Crypto::Mode::ECB);
43 cipher.Transcode(reinterpret_cast<const u8*>(src_key), src_key_size,
44 reinterpret_cast<u8*>(dst_key), Core::Crypto::Op::Decrypt);
45}
46
47} // namespace
48
49const NcaCryptoConfiguration& GetCryptoConfiguration() {
50 static const NcaCryptoConfiguration configuration = {
51 .header_1_sign_key_moduli{},
52 .header_1_sign_key_public_exponent{},
53 .key_area_encryption_key_source{},
54 .header_encryption_key_source{},
55 .header_encrypted_encryption_keys{},
56 .generate_key = GenerateKey,
57 .verify_sign1{},
58 .is_plaintext_header_available{},
59 .is_available_sw_key{},
60 };
61
62 return configuration;
63}
64
65} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_crypto_configuration.h b/src/core/file_sys/fssystem/fssystem_crypto_configuration.h
new file mode 100644
index 000000000..7fd9c5a8d
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_crypto_configuration.h
@@ -0,0 +1,12 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
7
8namespace FileSys {
9
10const NcaCryptoConfiguration& GetCryptoConfiguration();
11
12}
diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp
new file mode 100644
index 000000000..4a75b5308
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp
@@ -0,0 +1,127 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h"
5#include "core/file_sys/vfs_offset.h"
6
7namespace FileSys {
8
9HierarchicalIntegrityVerificationStorage::HierarchicalIntegrityVerificationStorage()
10 : m_data_size(-1) {
11 for (size_t i = 0; i < MaxLayers - 1; i++) {
12 m_verify_storages[i] = std::make_shared<IntegrityVerificationStorage>();
13 }
14}
15
16Result HierarchicalIntegrityVerificationStorage::Initialize(
17 const HierarchicalIntegrityVerificationInformation& info,
18 HierarchicalStorageInformation storage, int max_data_cache_entries, int max_hash_cache_entries,
19 s8 buffer_level) {
20 // Validate preconditions.
21 ASSERT(IntegrityMinLayerCount <= info.max_layers && info.max_layers <= IntegrityMaxLayerCount);
22
23 // Set member variables.
24 m_max_layers = info.max_layers;
25
26 // Initialize the top level verification storage.
27 m_verify_storages[0]->Initialize(storage[HierarchicalStorageInformation::MasterStorage],
28 storage[HierarchicalStorageInformation::Layer1Storage],
29 static_cast<s64>(1) << info.info[0].block_order, HashSize,
30 false);
31
32 // Ensure we don't leak state if further initialization goes wrong.
33 ON_RESULT_FAILURE {
34 m_verify_storages[0]->Finalize();
35 m_data_size = -1;
36 };
37
38 // Initialize the top level buffer storage.
39 m_buffer_storages[0] = m_verify_storages[0];
40 R_UNLESS(m_buffer_storages[0] != nullptr, ResultAllocationMemoryFailedAllocateShared);
41
42 // Prepare to initialize the level storages.
43 s32 level = 0;
44
45 // Ensure we don't leak state if further initialization goes wrong.
46 ON_RESULT_FAILURE_2 {
47 m_verify_storages[level + 1]->Finalize();
48 for (; level > 0; --level) {
49 m_buffer_storages[level].reset();
50 m_verify_storages[level]->Finalize();
51 }
52 };
53
54 // Initialize the level storages.
55 for (; level < m_max_layers - 3; ++level) {
56 // Initialize the verification storage.
57 auto buffer_storage =
58 std::make_shared<OffsetVfsFile>(m_buffer_storages[level], info.info[level].size, 0);
59 m_verify_storages[level + 1]->Initialize(
60 std::move(buffer_storage), storage[level + 2],
61 static_cast<s64>(1) << info.info[level + 1].block_order,
62 static_cast<s64>(1) << info.info[level].block_order, false);
63
64 // Initialize the buffer storage.
65 m_buffer_storages[level + 1] = m_verify_storages[level + 1];
66 R_UNLESS(m_buffer_storages[level + 1] != nullptr,
67 ResultAllocationMemoryFailedAllocateShared);
68 }
69
70 // Initialize the final level storage.
71 {
72 // Initialize the verification storage.
73 auto buffer_storage =
74 std::make_shared<OffsetVfsFile>(m_buffer_storages[level], info.info[level].size, 0);
75 m_verify_storages[level + 1]->Initialize(
76 std::move(buffer_storage), storage[level + 2],
77 static_cast<s64>(1) << info.info[level + 1].block_order,
78 static_cast<s64>(1) << info.info[level].block_order, true);
79
80 // Initialize the buffer storage.
81 m_buffer_storages[level + 1] = m_verify_storages[level + 1];
82 R_UNLESS(m_buffer_storages[level + 1] != nullptr,
83 ResultAllocationMemoryFailedAllocateShared);
84 }
85
86 // Set the data size.
87 m_data_size = info.info[level + 1].size;
88
89 // We succeeded.
90 R_SUCCEED();
91}
92
93void HierarchicalIntegrityVerificationStorage::Finalize() {
94 if (m_data_size >= 0) {
95 m_data_size = 0;
96
97 for (s32 level = m_max_layers - 2; level >= 0; --level) {
98 m_buffer_storages[level].reset();
99 m_verify_storages[level]->Finalize();
100 }
101
102 m_data_size = -1;
103 }
104}
105
106size_t HierarchicalIntegrityVerificationStorage::Read(u8* buffer, size_t size,
107 size_t offset) const {
108 // Validate preconditions.
109 ASSERT(m_data_size >= 0);
110
111 // Succeed if zero-size.
112 if (size == 0) {
113 return size;
114 }
115
116 // Validate arguments.
117 ASSERT(buffer != nullptr);
118
119 // Read the data.
120 return m_buffer_storages[m_max_layers - 2]->Read(buffer, size, offset);
121}
122
123size_t HierarchicalIntegrityVerificationStorage::GetSize() const {
124 return m_data_size;
125}
126
127} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h
new file mode 100644
index 000000000..5cf697efe
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h
@@ -0,0 +1,164 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/alignment.h"
7#include "core/file_sys/fssystem/fs_i_storage.h"
8#include "core/file_sys/fssystem/fs_types.h"
9#include "core/file_sys/fssystem/fssystem_alignment_matching_storage.h"
10#include "core/file_sys/fssystem/fssystem_integrity_verification_storage.h"
11#include "core/file_sys/vfs_offset.h"
12
13namespace FileSys {
14
15struct HierarchicalIntegrityVerificationLevelInformation {
16 Int64 offset;
17 Int64 size;
18 s32 block_order;
19 std::array<u8, 4> reserved;
20};
21static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationLevelInformation>);
22static_assert(sizeof(HierarchicalIntegrityVerificationLevelInformation) == 0x18);
23static_assert(alignof(HierarchicalIntegrityVerificationLevelInformation) == 0x4);
24
25struct HierarchicalIntegrityVerificationInformation {
26 u32 max_layers;
27 std::array<HierarchicalIntegrityVerificationLevelInformation, IntegrityMaxLayerCount - 1> info;
28 HashSalt seed;
29
30 s64 GetLayeredHashSize() const {
31 return this->info[this->max_layers - 2].offset;
32 }
33
34 s64 GetDataOffset() const {
35 return this->info[this->max_layers - 2].offset;
36 }
37
38 s64 GetDataSize() const {
39 return this->info[this->max_layers - 2].size;
40 }
41};
42static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationInformation>);
43
44struct HierarchicalIntegrityVerificationMetaInformation {
45 u32 magic;
46 u32 version;
47 u32 master_hash_size;
48 HierarchicalIntegrityVerificationInformation level_hash_info;
49};
50static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationMetaInformation>);
51
52struct HierarchicalIntegrityVerificationSizeSet {
53 s64 control_size;
54 s64 master_hash_size;
55 std::array<s64, IntegrityMaxLayerCount - 2> layered_hash_sizes;
56};
57static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationSizeSet>);
58
59class HierarchicalIntegrityVerificationStorage : public IReadOnlyStorage {
60 YUZU_NON_COPYABLE(HierarchicalIntegrityVerificationStorage);
61 YUZU_NON_MOVEABLE(HierarchicalIntegrityVerificationStorage);
62
63public:
64 using GenerateRandomFunction = void (*)(void* dst, size_t size);
65
66 class HierarchicalStorageInformation {
67 public:
68 enum {
69 MasterStorage = 0,
70 Layer1Storage = 1,
71 Layer2Storage = 2,
72 Layer3Storage = 3,
73 Layer4Storage = 4,
74 Layer5Storage = 5,
75 DataStorage = 6,
76 };
77
78 private:
79 std::array<VirtualFile, DataStorage + 1> m_storages;
80
81 public:
82 void SetMasterHashStorage(VirtualFile s) {
83 m_storages[MasterStorage] = s;
84 }
85 void SetLayer1HashStorage(VirtualFile s) {
86 m_storages[Layer1Storage] = s;
87 }
88 void SetLayer2HashStorage(VirtualFile s) {
89 m_storages[Layer2Storage] = s;
90 }
91 void SetLayer3HashStorage(VirtualFile s) {
92 m_storages[Layer3Storage] = s;
93 }
94 void SetLayer4HashStorage(VirtualFile s) {
95 m_storages[Layer4Storage] = s;
96 }
97 void SetLayer5HashStorage(VirtualFile s) {
98 m_storages[Layer5Storage] = s;
99 }
100 void SetDataStorage(VirtualFile s) {
101 m_storages[DataStorage] = s;
102 }
103
104 VirtualFile& operator[](s32 index) {
105 ASSERT(MasterStorage <= index && index <= DataStorage);
106 return m_storages[index];
107 }
108 };
109
110public:
111 HierarchicalIntegrityVerificationStorage();
112 virtual ~HierarchicalIntegrityVerificationStorage() override {
113 this->Finalize();
114 }
115
116 Result Initialize(const HierarchicalIntegrityVerificationInformation& info,
117 HierarchicalStorageInformation storage, int max_data_cache_entries,
118 int max_hash_cache_entries, s8 buffer_level);
119 void Finalize();
120
121 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
122 virtual size_t GetSize() const override;
123
124 bool IsInitialized() const {
125 return m_data_size >= 0;
126 }
127
128 s64 GetL1HashVerificationBlockSize() const {
129 return m_verify_storages[m_max_layers - 2]->GetBlockSize();
130 }
131
132 VirtualFile GetL1HashStorage() {
133 return std::make_shared<OffsetVfsFile>(
134 m_buffer_storages[m_max_layers - 3],
135 Common::DivideUp(m_data_size, this->GetL1HashVerificationBlockSize()), 0);
136 }
137
138public:
139 static constexpr s8 GetDefaultDataCacheBufferLevel(u32 max_layers) {
140 return static_cast<s8>(16 + max_layers - 2);
141 }
142
143protected:
144 static constexpr s64 HashSize = 256 / 8;
145 static constexpr size_t MaxLayers = IntegrityMaxLayerCount;
146
147private:
148 static GenerateRandomFunction s_generate_random;
149
150 static void SetGenerateRandomFunction(GenerateRandomFunction func) {
151 s_generate_random = func;
152 }
153
154private:
155 friend struct HierarchicalIntegrityVerificationMetaInformation;
156
157private:
158 std::array<std::shared_ptr<IntegrityVerificationStorage>, MaxLayers - 1> m_verify_storages;
159 std::array<VirtualFile, MaxLayers - 1> m_buffer_storages;
160 s64 m_data_size;
161 s32 m_max_layers;
162};
163
164} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp
new file mode 100644
index 000000000..caea0b8f8
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp
@@ -0,0 +1,80 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/alignment.h"
5#include "common/scope_exit.h"
6#include "core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h"
7
8namespace FileSys {
9
10namespace {
11
12s32 Log2(s32 value) {
13 ASSERT(value > 0);
14 ASSERT(Common::IsPowerOfTwo(value));
15
16 s32 log = 0;
17 while ((value >>= 1) > 0) {
18 ++log;
19 }
20 return log;
21}
22
23} // namespace
24
25Result HierarchicalSha256Storage::Initialize(VirtualFile* base_storages, s32 layer_count,
26 size_t htbs, void* hash_buf, size_t hash_buf_size) {
27 // Validate preconditions.
28 ASSERT(layer_count == LayerCount);
29 ASSERT(Common::IsPowerOfTwo(htbs));
30 ASSERT(hash_buf != nullptr);
31
32 // Set size tracking members.
33 m_hash_target_block_size = static_cast<s32>(htbs);
34 m_log_size_ratio = Log2(m_hash_target_block_size / HashSize);
35
36 // Get the base storage size.
37 m_base_storage_size = base_storages[2]->GetSize();
38 {
39 auto size_guard = SCOPE_GUARD({ m_base_storage_size = 0; });
40 R_UNLESS(m_base_storage_size <= static_cast<s64>(HashSize)
41 << m_log_size_ratio << m_log_size_ratio,
42 ResultHierarchicalSha256BaseStorageTooLarge);
43 size_guard.Cancel();
44 }
45
46 // Set hash buffer tracking members.
47 m_base_storage = base_storages[2];
48 m_hash_buffer = static_cast<char*>(hash_buf);
49 m_hash_buffer_size = hash_buf_size;
50
51 // Read the master hash.
52 std::array<u8, HashSize> master_hash{};
53 base_storages[0]->ReadObject(std::addressof(master_hash));
54
55 // Read and validate the data being hashed.
56 s64 hash_storage_size = base_storages[1]->GetSize();
57 ASSERT(Common::IsAligned(hash_storage_size, HashSize));
58 ASSERT(hash_storage_size <= m_hash_target_block_size);
59 ASSERT(hash_storage_size <= static_cast<s64>(m_hash_buffer_size));
60
61 base_storages[1]->Read(reinterpret_cast<u8*>(m_hash_buffer),
62 static_cast<size_t>(hash_storage_size), 0);
63
64 R_SUCCEED();
65}
66
67size_t HierarchicalSha256Storage::Read(u8* buffer, size_t size, size_t offset) const {
68 // Succeed if zero-size.
69 if (size == 0) {
70 return size;
71 }
72
73 // Validate that we have a buffer to read into.
74 ASSERT(buffer != nullptr);
75
76 // Read the data.
77 return m_base_storage->Read(buffer, size, offset);
78}
79
80} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h
new file mode 100644
index 000000000..18df400af
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h
@@ -0,0 +1,44 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <mutex>
7
8#include "core/file_sys/errors.h"
9#include "core/file_sys/fssystem/fs_i_storage.h"
10#include "core/file_sys/vfs.h"
11
12namespace FileSys {
13
14class HierarchicalSha256Storage : public IReadOnlyStorage {
15 YUZU_NON_COPYABLE(HierarchicalSha256Storage);
16 YUZU_NON_MOVEABLE(HierarchicalSha256Storage);
17
18public:
19 static constexpr s32 LayerCount = 3;
20 static constexpr size_t HashSize = 256 / 8;
21
22public:
23 HierarchicalSha256Storage() : m_mutex() {}
24
25 Result Initialize(VirtualFile* base_storages, s32 layer_count, size_t htbs, void* hash_buf,
26 size_t hash_buf_size);
27
28 virtual size_t GetSize() const override {
29 return m_base_storage->GetSize();
30 }
31
32 virtual size_t Read(u8* buffer, size_t length, size_t offset) const override;
33
34private:
35 VirtualFile m_base_storage;
36 s64 m_base_storage_size;
37 char* m_hash_buffer;
38 size_t m_hash_buffer_size;
39 s32 m_hash_target_block_size;
40 s32 m_log_size_ratio;
41 std::mutex m_mutex;
42};
43
44} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_indirect_storage.cpp b/src/core/file_sys/fssystem/fssystem_indirect_storage.cpp
new file mode 100644
index 000000000..7544e70b2
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_indirect_storage.cpp
@@ -0,0 +1,119 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/errors.h"
5#include "core/file_sys/fssystem/fssystem_indirect_storage.h"
6
7namespace FileSys {
8
9Result IndirectStorage::Initialize(VirtualFile table_storage) {
10 // Read and verify the bucket tree header.
11 BucketTree::Header header;
12 table_storage->ReadObject(std::addressof(header));
13 R_TRY(header.Verify());
14
15 // Determine extents.
16 const auto node_storage_size = QueryNodeStorageSize(header.entry_count);
17 const auto entry_storage_size = QueryEntryStorageSize(header.entry_count);
18 const auto node_storage_offset = QueryHeaderStorageSize();
19 const auto entry_storage_offset = node_storage_offset + node_storage_size;
20
21 // Initialize.
22 R_RETURN(this->Initialize(
23 std::make_shared<OffsetVfsFile>(table_storage, node_storage_size, node_storage_offset),
24 std::make_shared<OffsetVfsFile>(table_storage, entry_storage_size, entry_storage_offset),
25 header.entry_count));
26}
27
28void IndirectStorage::Finalize() {
29 if (this->IsInitialized()) {
30 m_table.Finalize();
31 for (auto i = 0; i < StorageCount; i++) {
32 m_data_storage[i] = VirtualFile();
33 }
34 }
35}
36
37Result IndirectStorage::GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count,
38 s64 offset, s64 size) {
39 // Validate pre-conditions.
40 ASSERT(offset >= 0);
41 ASSERT(size >= 0);
42 ASSERT(this->IsInitialized());
43
44 // Clear the out count.
45 R_UNLESS(out_entry_count != nullptr, ResultNullptrArgument);
46 *out_entry_count = 0;
47
48 // Succeed if there's no range.
49 R_SUCCEED_IF(size == 0);
50
51 // If we have an output array, we need it to be non-null.
52 R_UNLESS(out_entries != nullptr || entry_count == 0, ResultNullptrArgument);
53
54 // Check that our range is valid.
55 BucketTree::Offsets table_offsets;
56 R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
57
58 R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
59
60 // Find the offset in our tree.
61 BucketTree::Visitor visitor;
62 R_TRY(m_table.Find(std::addressof(visitor), offset));
63 {
64 const auto entry_offset = visitor.Get<Entry>()->GetVirtualOffset();
65 R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
66 ResultInvalidIndirectEntryOffset);
67 }
68
69 // Prepare to loop over entries.
70 const auto end_offset = offset + static_cast<s64>(size);
71 s32 count = 0;
72
73 auto cur_entry = *visitor.Get<Entry>();
74 while (cur_entry.GetVirtualOffset() < end_offset) {
75 // Try to write the entry to the out list.
76 if (entry_count != 0) {
77 if (count >= entry_count) {
78 break;
79 }
80 std::memcpy(out_entries + count, std::addressof(cur_entry), sizeof(Entry));
81 }
82
83 count++;
84
85 // Advance.
86 if (visitor.CanMoveNext()) {
87 R_TRY(visitor.MoveNext());
88 cur_entry = *visitor.Get<Entry>();
89 } else {
90 break;
91 }
92 }
93
94 // Write the output count.
95 *out_entry_count = count;
96 R_SUCCEED();
97}
98
99size_t IndirectStorage::Read(u8* buffer, size_t size, size_t offset) const {
100 // Validate pre-conditions.
101 ASSERT(this->IsInitialized());
102 ASSERT(buffer != nullptr);
103
104 // Succeed if there's nothing to read.
105 if (size == 0) {
106 return 0;
107 }
108
109 const_cast<IndirectStorage*>(this)->OperatePerEntry<true, true>(
110 offset, size,
111 [=](VirtualFile storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
112 storage->Read(reinterpret_cast<u8*>(buffer) + (cur_offset - offset),
113 static_cast<size_t>(cur_size), data_offset);
114 R_SUCCEED();
115 });
116
117 return size;
118}
119} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_indirect_storage.h b/src/core/file_sys/fssystem/fssystem_indirect_storage.h
new file mode 100644
index 000000000..7854335bf
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_indirect_storage.h
@@ -0,0 +1,294 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/errors.h"
7#include "core/file_sys/fssystem/fs_i_storage.h"
8#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
9#include "core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h"
10#include "core/file_sys/vfs.h"
11#include "core/file_sys/vfs_offset.h"
12
13namespace FileSys {
14
15class IndirectStorage : public IReadOnlyStorage {
16 YUZU_NON_COPYABLE(IndirectStorage);
17 YUZU_NON_MOVEABLE(IndirectStorage);
18
19public:
20 static constexpr s32 StorageCount = 2;
21 static constexpr size_t NodeSize = 16_KiB;
22
23 struct Entry {
24 std::array<u8, sizeof(s64)> virt_offset;
25 std::array<u8, sizeof(s64)> phys_offset;
26 s32 storage_index;
27
28 void SetVirtualOffset(const s64& ofs) {
29 std::memcpy(this->virt_offset.data(), std::addressof(ofs), sizeof(s64));
30 }
31
32 s64 GetVirtualOffset() const {
33 s64 offset;
34 std::memcpy(std::addressof(offset), this->virt_offset.data(), sizeof(s64));
35 return offset;
36 }
37
38 void SetPhysicalOffset(const s64& ofs) {
39 std::memcpy(this->phys_offset.data(), std::addressof(ofs), sizeof(s64));
40 }
41
42 s64 GetPhysicalOffset() const {
43 s64 offset;
44 std::memcpy(std::addressof(offset), this->phys_offset.data(), sizeof(s64));
45 return offset;
46 }
47 };
48 static_assert(std::is_trivial_v<Entry>);
49 static_assert(sizeof(Entry) == 0x14);
50
51 struct EntryData {
52 s64 virt_offset;
53 s64 phys_offset;
54 s32 storage_index;
55
56 void Set(const Entry& entry) {
57 this->virt_offset = entry.GetVirtualOffset();
58 this->phys_offset = entry.GetPhysicalOffset();
59 this->storage_index = entry.storage_index;
60 }
61 };
62 static_assert(std::is_trivial_v<EntryData>);
63
64public:
65 IndirectStorage() : m_table(), m_data_storage() {}
66 virtual ~IndirectStorage() {
67 this->Finalize();
68 }
69
70 Result Initialize(VirtualFile table_storage);
71 void Finalize();
72
73 bool IsInitialized() const {
74 return m_table.IsInitialized();
75 }
76
77 Result Initialize(VirtualFile node_storage, VirtualFile entry_storage, s32 entry_count) {
78 R_RETURN(
79 m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry), entry_count));
80 }
81
82 void SetStorage(s32 idx, VirtualFile storage) {
83 ASSERT(0 <= idx && idx < StorageCount);
84 m_data_storage[idx] = storage;
85 }
86
87 template <typename T>
88 void SetStorage(s32 idx, T storage, s64 offset, s64 size) {
89 ASSERT(0 <= idx && idx < StorageCount);
90 m_data_storage[idx] = std::make_shared<OffsetVfsFile>(storage, size, offset);
91 }
92
93 Result GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count, s64 offset,
94 s64 size);
95
96 virtual size_t GetSize() const override {
97 BucketTree::Offsets offsets{};
98 m_table.GetOffsets(std::addressof(offsets));
99
100 return offsets.end_offset;
101 }
102
103 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
104
105public:
106 static constexpr s64 QueryHeaderStorageSize() {
107 return BucketTree::QueryHeaderStorageSize();
108 }
109
110 static constexpr s64 QueryNodeStorageSize(s32 entry_count) {
111 return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count);
112 }
113
114 static constexpr s64 QueryEntryStorageSize(s32 entry_count) {
115 return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count);
116 }
117
118protected:
119 BucketTree& GetEntryTable() {
120 return m_table;
121 }
122
123 VirtualFile& GetDataStorage(s32 index) {
124 ASSERT(0 <= index && index < StorageCount);
125 return m_data_storage[index];
126 }
127
128 template <bool ContinuousCheck, bool RangeCheck, typename F>
129 Result OperatePerEntry(s64 offset, s64 size, F func);
130
131private:
132 struct ContinuousReadingEntry {
133 static constexpr size_t FragmentSizeMax = 4_KiB;
134
135 IndirectStorage::Entry entry;
136
137 s64 GetVirtualOffset() const {
138 return this->entry.GetVirtualOffset();
139 }
140
141 s64 GetPhysicalOffset() const {
142 return this->entry.GetPhysicalOffset();
143 }
144
145 bool IsFragment() const {
146 return this->entry.storage_index != 0;
147 }
148 };
149 static_assert(std::is_trivial_v<ContinuousReadingEntry>);
150
151private:
152 mutable BucketTree m_table;
153 std::array<VirtualFile, StorageCount> m_data_storage;
154};
155
156template <bool ContinuousCheck, bool RangeCheck, typename F>
157Result IndirectStorage::OperatePerEntry(s64 offset, s64 size, F func) {
158 // Validate preconditions.
159 ASSERT(offset >= 0);
160 ASSERT(size >= 0);
161 ASSERT(this->IsInitialized());
162
163 // Succeed if there's nothing to operate on.
164 R_SUCCEED_IF(size == 0);
165
166 // Get the table offsets.
167 BucketTree::Offsets table_offsets;
168 R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
169
170 // Validate arguments.
171 R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
172
173 // Find the offset in our tree.
174 BucketTree::Visitor visitor;
175 R_TRY(m_table.Find(std::addressof(visitor), offset));
176 {
177 const auto entry_offset = visitor.Get<Entry>()->GetVirtualOffset();
178 R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
179 ResultInvalidIndirectEntryOffset);
180 }
181
182 // Prepare to operate in chunks.
183 auto cur_offset = offset;
184 const auto end_offset = offset + static_cast<s64>(size);
185 BucketTree::ContinuousReadingInfo cr_info;
186
187 while (cur_offset < end_offset) {
188 // Get the current entry.
189 const auto cur_entry = *visitor.Get<Entry>();
190
191 // Get and validate the entry's offset.
192 const auto cur_entry_offset = cur_entry.GetVirtualOffset();
193 R_UNLESS(cur_entry_offset <= cur_offset, ResultInvalidIndirectEntryOffset);
194
195 // Validate the storage index.
196 R_UNLESS(0 <= cur_entry.storage_index && cur_entry.storage_index < StorageCount,
197 ResultInvalidIndirectEntryStorageIndex);
198
199 // If we need to check the continuous info, do so.
200 if constexpr (ContinuousCheck) {
201 // Scan, if we need to.
202 if (cr_info.CheckNeedScan()) {
203 R_TRY(visitor.ScanContinuousReading<ContinuousReadingEntry>(
204 std::addressof(cr_info), cur_offset,
205 static_cast<size_t>(end_offset - cur_offset)));
206 }
207
208 // Process a base storage entry.
209 if (cr_info.CanDo()) {
210 // Ensure that we can process.
211 R_UNLESS(cur_entry.storage_index == 0, ResultInvalidIndirectEntryStorageIndex);
212
213 // Ensure that we remain within range.
214 const auto data_offset = cur_offset - cur_entry_offset;
215 const auto cur_entry_phys_offset = cur_entry.GetPhysicalOffset();
216 const auto cur_size = static_cast<s64>(cr_info.GetReadSize());
217
218 // If we should, verify the range.
219 if constexpr (RangeCheck) {
220 // Get the current data storage's size.
221 s64 cur_data_storage_size = m_data_storage[0]->GetSize();
222
223 R_UNLESS(0 <= cur_entry_phys_offset &&
224 cur_entry_phys_offset <= cur_data_storage_size,
225 ResultInvalidIndirectEntryOffset);
226 R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <=
227 cur_data_storage_size,
228 ResultInvalidIndirectStorageSize);
229 }
230
231 // Operate.
232 R_TRY(func(m_data_storage[0], cur_entry_phys_offset + data_offset, cur_offset,
233 cur_size));
234
235 // Mark as done.
236 cr_info.Done();
237 }
238 }
239
240 // Get and validate the next entry offset.
241 s64 next_entry_offset;
242 if (visitor.CanMoveNext()) {
243 R_TRY(visitor.MoveNext());
244 next_entry_offset = visitor.Get<Entry>()->GetVirtualOffset();
245 R_UNLESS(table_offsets.IsInclude(next_entry_offset), ResultInvalidIndirectEntryOffset);
246 } else {
247 next_entry_offset = table_offsets.end_offset;
248 }
249 R_UNLESS(cur_offset < next_entry_offset, ResultInvalidIndirectEntryOffset);
250
251 // Get the offset of the entry in the data we read.
252 const auto data_offset = cur_offset - cur_entry_offset;
253 const auto data_size = (next_entry_offset - cur_entry_offset);
254 ASSERT(data_size > 0);
255
256 // Determine how much is left.
257 const auto remaining_size = end_offset - cur_offset;
258 const auto cur_size = std::min<s64>(remaining_size, data_size - data_offset);
259 ASSERT(cur_size <= size);
260
261 // Operate, if we need to.
262 bool needs_operate;
263 if constexpr (!ContinuousCheck) {
264 needs_operate = true;
265 } else {
266 needs_operate = !cr_info.IsDone() || cur_entry.storage_index != 0;
267 }
268
269 if (needs_operate) {
270 const auto cur_entry_phys_offset = cur_entry.GetPhysicalOffset();
271
272 if constexpr (RangeCheck) {
273 // Get the current data storage's size.
274 s64 cur_data_storage_size = m_data_storage[cur_entry.storage_index]->GetSize();
275
276 // Ensure that we remain within range.
277 R_UNLESS(0 <= cur_entry_phys_offset &&
278 cur_entry_phys_offset <= cur_data_storage_size,
279 ResultIndirectStorageCorrupted);
280 R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <= cur_data_storage_size,
281 ResultIndirectStorageCorrupted);
282 }
283
284 R_TRY(func(m_data_storage[cur_entry.storage_index], cur_entry_phys_offset + data_offset,
285 cur_offset, cur_size));
286 }
287
288 cur_offset += cur_size;
289 }
290
291 R_SUCCEED();
292}
293
294} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp
new file mode 100644
index 000000000..2c3da230c
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp
@@ -0,0 +1,30 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_integrity_romfs_storage.h"
5
6namespace FileSys {
7
8Result IntegrityRomFsStorage::Initialize(
9 HierarchicalIntegrityVerificationInformation level_hash_info, Hash master_hash,
10 HierarchicalIntegrityVerificationStorage::HierarchicalStorageInformation storage_info,
11 int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level) {
12 // Set master hash.
13 m_master_hash = master_hash;
14 m_master_hash_storage = std::make_shared<ArrayVfsFile<sizeof(Hash)>>(m_master_hash.value);
15 R_UNLESS(m_master_hash_storage != nullptr,
16 ResultAllocationMemoryFailedInIntegrityRomFsStorageA);
17
18 // Set the master hash storage.
19 storage_info[0] = m_master_hash_storage;
20
21 // Initialize our integrity storage.
22 R_RETURN(m_integrity_storage.Initialize(level_hash_info, storage_info, max_data_cache_entries,
23 max_hash_cache_entries, buffer_level));
24}
25
26void IntegrityRomFsStorage::Finalize() {
27 m_integrity_storage.Finalize();
28}
29
30} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h
new file mode 100644
index 000000000..5f8512b2a
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h
@@ -0,0 +1,42 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h"
7#include "core/file_sys/fssystem/fssystem_nca_header.h"
8#include "core/file_sys/vfs_vector.h"
9
10namespace FileSys {
11
12constexpr inline size_t IntegrityLayerCountRomFs = 7;
13constexpr inline size_t IntegrityHashLayerBlockSize = 16_KiB;
14
15class IntegrityRomFsStorage : public IReadOnlyStorage {
16public:
17 IntegrityRomFsStorage() {}
18 virtual ~IntegrityRomFsStorage() override {
19 this->Finalize();
20 }
21
22 Result Initialize(
23 HierarchicalIntegrityVerificationInformation level_hash_info, Hash master_hash,
24 HierarchicalIntegrityVerificationStorage::HierarchicalStorageInformation storage_info,
25 int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level);
26 void Finalize();
27
28 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
29 return m_integrity_storage.Read(buffer, size, offset);
30 }
31
32 virtual size_t GetSize() const override {
33 return m_integrity_storage.GetSize();
34 }
35
36private:
37 HierarchicalIntegrityVerificationStorage m_integrity_storage;
38 Hash m_master_hash;
39 std::shared_ptr<ArrayVfsFile<sizeof(Hash)>> m_master_hash_storage;
40};
41
42} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp
new file mode 100644
index 000000000..2f73abf86
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp
@@ -0,0 +1,91 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/alignment.h"
5#include "core/file_sys/fssystem/fssystem_integrity_verification_storage.h"
6
7namespace FileSys {
8
9constexpr inline u32 ILog2(u32 val) {
10 ASSERT(val > 0);
11 return static_cast<u32>((sizeof(u32) * 8) - 1 - std::countl_zero<u32>(val));
12}
13
14void IntegrityVerificationStorage::Initialize(VirtualFile hs, VirtualFile ds, s64 verif_block_size,
15 s64 upper_layer_verif_block_size, bool is_real_data) {
16 // Validate preconditions.
17 ASSERT(verif_block_size >= HashSize);
18
19 // Set storages.
20 m_hash_storage = hs;
21 m_data_storage = ds;
22
23 // Set verification block sizes.
24 m_verification_block_size = verif_block_size;
25 m_verification_block_order = ILog2(static_cast<u32>(verif_block_size));
26 ASSERT(m_verification_block_size == 1ll << m_verification_block_order);
27
28 // Set upper layer block sizes.
29 upper_layer_verif_block_size = std::max(upper_layer_verif_block_size, HashSize);
30 m_upper_layer_verification_block_size = upper_layer_verif_block_size;
31 m_upper_layer_verification_block_order = ILog2(static_cast<u32>(upper_layer_verif_block_size));
32 ASSERT(m_upper_layer_verification_block_size == 1ll << m_upper_layer_verification_block_order);
33
34 // Validate sizes.
35 {
36 s64 hash_size = m_hash_storage->GetSize();
37 s64 data_size = m_data_storage->GetSize();
38 ASSERT(((hash_size / HashSize) * m_verification_block_size) >= data_size);
39 }
40
41 // Set data.
42 m_is_real_data = is_real_data;
43}
44
45void IntegrityVerificationStorage::Finalize() {
46 m_hash_storage = VirtualFile();
47 m_data_storage = VirtualFile();
48}
49
50size_t IntegrityVerificationStorage::Read(u8* buffer, size_t size, size_t offset) const {
51 // Succeed if zero size.
52 if (size == 0) {
53 return size;
54 }
55
56 // Validate arguments.
57 ASSERT(buffer != nullptr);
58
59 // Validate the offset.
60 s64 data_size = m_data_storage->GetSize();
61 ASSERT(offset <= static_cast<size_t>(data_size));
62
63 // Validate the access range.
64 ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(
65 offset, size, Common::AlignUp(data_size, static_cast<size_t>(m_verification_block_size)))));
66
67 // Determine the read extents.
68 size_t read_size = size;
69 if (static_cast<s64>(offset + read_size) > data_size) {
70 // Determine the padding sizes.
71 s64 padding_offset = data_size - offset;
72 size_t padding_size = static_cast<size_t>(
73 m_verification_block_size - (padding_offset & (m_verification_block_size - 1)));
74 ASSERT(static_cast<s64>(padding_size) < m_verification_block_size);
75
76 // Clear the padding.
77 std::memset(static_cast<u8*>(buffer) + padding_offset, 0, padding_size);
78
79 // Set the new in-bounds size.
80 read_size = static_cast<size_t>(data_size - offset);
81 }
82
83 // Perform the read.
84 return m_data_storage->Read(buffer, read_size, offset);
85}
86
87size_t IntegrityVerificationStorage::GetSize() const {
88 return m_data_storage->GetSize();
89}
90
91} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h
new file mode 100644
index 000000000..09f76799d
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h
@@ -0,0 +1,65 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <optional>
7
8#include "core/file_sys/fssystem/fs_i_storage.h"
9#include "core/file_sys/fssystem/fs_types.h"
10
11namespace FileSys {
12
13class IntegrityVerificationStorage : public IReadOnlyStorage {
14 YUZU_NON_COPYABLE(IntegrityVerificationStorage);
15 YUZU_NON_MOVEABLE(IntegrityVerificationStorage);
16
17public:
18 static constexpr s64 HashSize = 256 / 8;
19
20 struct BlockHash {
21 std::array<u8, HashSize> hash;
22 };
23 static_assert(std::is_trivial_v<BlockHash>);
24
25public:
26 IntegrityVerificationStorage()
27 : m_verification_block_size(0), m_verification_block_order(0),
28 m_upper_layer_verification_block_size(0), m_upper_layer_verification_block_order(0) {}
29 virtual ~IntegrityVerificationStorage() override {
30 this->Finalize();
31 }
32
33 void Initialize(VirtualFile hs, VirtualFile ds, s64 verif_block_size,
34 s64 upper_layer_verif_block_size, bool is_real_data);
35 void Finalize();
36
37 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
38 virtual size_t GetSize() const override;
39
40 s64 GetBlockSize() const {
41 return m_verification_block_size;
42 }
43
44private:
45 static void SetValidationBit(BlockHash* hash) {
46 ASSERT(hash != nullptr);
47 hash->hash[HashSize - 1] |= 0x80;
48 }
49
50 static bool IsValidationBit(const BlockHash* hash) {
51 ASSERT(hash != nullptr);
52 return (hash->hash[HashSize - 1] & 0x80) != 0;
53 }
54
55private:
56 VirtualFile m_hash_storage;
57 VirtualFile m_data_storage;
58 s64 m_verification_block_size;
59 s64 m_verification_block_order;
60 s64 m_upper_layer_verification_block_size;
61 s64 m_upper_layer_verification_block_order;
62 bool m_is_real_data;
63};
64
65} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h b/src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h
new file mode 100644
index 000000000..c07a127fb
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h
@@ -0,0 +1,61 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fs_i_storage.h"
7
8namespace FileSys {
9
10class MemoryResourceBufferHoldStorage : public IStorage {
11 YUZU_NON_COPYABLE(MemoryResourceBufferHoldStorage);
12 YUZU_NON_MOVEABLE(MemoryResourceBufferHoldStorage);
13
14public:
15 MemoryResourceBufferHoldStorage(VirtualFile storage, size_t buffer_size)
16 : m_storage(std::move(storage)), m_buffer(::operator new(buffer_size)),
17 m_buffer_size(buffer_size) {}
18
19 virtual ~MemoryResourceBufferHoldStorage() {
20 // If we have a buffer, deallocate it.
21 if (m_buffer != nullptr) {
22 ::operator delete(m_buffer);
23 }
24 }
25
26 bool IsValid() const {
27 return m_buffer != nullptr;
28 }
29 void* GetBuffer() const {
30 return m_buffer;
31 }
32
33public:
34 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
35 // Check pre-conditions.
36 ASSERT(m_storage != nullptr);
37
38 return m_storage->Read(buffer, size, offset);
39 }
40
41 virtual size_t GetSize() const override {
42 // Check pre-conditions.
43 ASSERT(m_storage != nullptr);
44
45 return m_storage->GetSize();
46 }
47
48 virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
49 // Check pre-conditions.
50 ASSERT(m_storage != nullptr);
51
52 return m_storage->Write(buffer, size, offset);
53 }
54
55private:
56 VirtualFile m_storage;
57 void* m_buffer;
58 size_t m_buffer_size;
59};
60
61} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp
new file mode 100644
index 000000000..0f5432203
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp
@@ -0,0 +1,1351 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h"
5#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h"
6#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h"
7#include "core/file_sys/fssystem/fssystem_alignment_matching_storage.h"
8#include "core/file_sys/fssystem/fssystem_compressed_storage.h"
9#include "core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h"
10#include "core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h"
11#include "core/file_sys/fssystem/fssystem_indirect_storage.h"
12#include "core/file_sys/fssystem/fssystem_integrity_romfs_storage.h"
13#include "core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h"
14#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
15#include "core/file_sys/fssystem/fssystem_sparse_storage.h"
16#include "core/file_sys/fssystem/fssystem_switch_storage.h"
17#include "core/file_sys/vfs_offset.h"
18#include "core/file_sys/vfs_vector.h"
19
20namespace FileSys {
21
22namespace {
23
24constexpr inline s32 IntegrityDataCacheCount = 24;
25constexpr inline s32 IntegrityHashCacheCount = 8;
26
27constexpr inline s32 IntegrityDataCacheCountForMeta = 16;
28constexpr inline s32 IntegrityHashCacheCountForMeta = 2;
29
30class SharedNcaBodyStorage : public IReadOnlyStorage {
31 YUZU_NON_COPYABLE(SharedNcaBodyStorage);
32 YUZU_NON_MOVEABLE(SharedNcaBodyStorage);
33
34private:
35 VirtualFile m_storage;
36 std::shared_ptr<NcaReader> m_nca_reader;
37
38public:
39 SharedNcaBodyStorage(VirtualFile s, std::shared_ptr<NcaReader> r)
40 : m_storage(std::move(s)), m_nca_reader(std::move(r)) {}
41
42 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
43 // Validate pre-conditions.
44 ASSERT(m_storage != nullptr);
45
46 // Read from the base storage.
47 return m_storage->Read(buffer, size, offset);
48 }
49
50 virtual size_t GetSize() const override {
51 // Validate pre-conditions.
52 ASSERT(m_storage != nullptr);
53
54 return m_storage->GetSize();
55 }
56};
57
58inline s64 GetFsOffset(const NcaReader& reader, s32 fs_index) {
59 return static_cast<s64>(reader.GetFsOffset(fs_index));
60}
61
62inline s64 GetFsEndOffset(const NcaReader& reader, s32 fs_index) {
63 return static_cast<s64>(reader.GetFsEndOffset(fs_index));
64}
65
66using Sha256DataRegion = NcaFsHeader::Region;
67using IntegrityLevelInfo = NcaFsHeader::HashData::IntegrityMetaInfo::LevelHashInfo;
68using IntegrityDataInfo = IntegrityLevelInfo::HierarchicalIntegrityVerificationLevelInformation;
69
70} // namespace
71
72Result NcaFileSystemDriver::OpenStorageWithContext(VirtualFile* out,
73 NcaFsHeaderReader* out_header_reader,
74 s32 fs_index, StorageContext* ctx) {
75 // Open storage.
76 R_RETURN(this->OpenStorageImpl(out, out_header_reader, fs_index, ctx));
77}
78
79Result NcaFileSystemDriver::OpenStorageImpl(VirtualFile* out, NcaFsHeaderReader* out_header_reader,
80 s32 fs_index, StorageContext* ctx) {
81 // Validate preconditions.
82 ASSERT(out != nullptr);
83 ASSERT(out_header_reader != nullptr);
84 ASSERT(0 <= fs_index && fs_index < NcaHeader::FsCountMax);
85
86 // Validate the fs index.
87 R_UNLESS(m_reader->HasFsInfo(fs_index), ResultPartitionNotFound);
88
89 // Initialize our header reader for the fs index.
90 R_TRY(out_header_reader->Initialize(*m_reader, fs_index));
91
92 // Declare the storage we're opening.
93 VirtualFile storage;
94
95 // Process sparse layer.
96 s64 fs_data_offset = 0;
97 if (out_header_reader->ExistsSparseLayer()) {
98 // Get the sparse info.
99 const auto& sparse_info = out_header_reader->GetSparseInfo();
100
101 // Create based on whether we have a meta hash layer.
102 if (out_header_reader->ExistsSparseMetaHashLayer()) {
103 // Create the sparse storage with verification.
104 R_TRY(this->CreateSparseStorageWithVerification(
105 std::addressof(storage), std::addressof(fs_data_offset),
106 ctx != nullptr ? std::addressof(ctx->current_sparse_storage) : nullptr,
107 ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr,
108 ctx != nullptr ? std::addressof(ctx->sparse_layer_info_storage) : nullptr, fs_index,
109 out_header_reader->GetAesCtrUpperIv(), sparse_info,
110 out_header_reader->GetSparseMetaDataHashDataInfo(),
111 out_header_reader->GetSparseMetaHashType()));
112 } else {
113 // Create the sparse storage.
114 R_TRY(this->CreateSparseStorage(
115 std::addressof(storage), std::addressof(fs_data_offset),
116 ctx != nullptr ? std::addressof(ctx->current_sparse_storage) : nullptr,
117 ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr,
118 fs_index, out_header_reader->GetAesCtrUpperIv(), sparse_info));
119 }
120 } else {
121 // Get the data offsets.
122 fs_data_offset = GetFsOffset(*m_reader, fs_index);
123 const auto fs_end_offset = GetFsEndOffset(*m_reader, fs_index);
124
125 // Validate that we're within range.
126 const auto data_size = fs_end_offset - fs_data_offset;
127 R_UNLESS(data_size > 0, ResultInvalidNcaHeader);
128
129 // Create the body substorage.
130 R_TRY(this->CreateBodySubStorage(std::addressof(storage), fs_data_offset, data_size));
131
132 // Potentially save the body substorage to our context.
133 if (ctx != nullptr) {
134 ctx->body_substorage = storage;
135 }
136 }
137
138 // Process patch layer.
139 const auto& patch_info = out_header_reader->GetPatchInfo();
140 VirtualFile patch_meta_aes_ctr_ex_meta_storage;
141 VirtualFile patch_meta_indirect_meta_storage;
142 if (out_header_reader->ExistsPatchMetaHashLayer()) {
143 // Check the meta hash type.
144 R_UNLESS(out_header_reader->GetPatchMetaHashType() ==
145 NcaFsHeader::MetaDataHashType::HierarchicalIntegrity,
146 ResultRomNcaInvalidPatchMetaDataHashType);
147
148 // Create the patch meta storage.
149 R_TRY(this->CreatePatchMetaStorage(
150 std::addressof(patch_meta_aes_ctr_ex_meta_storage),
151 std::addressof(patch_meta_indirect_meta_storage),
152 ctx != nullptr ? std::addressof(ctx->patch_layer_info_storage) : nullptr, storage,
153 fs_data_offset, out_header_reader->GetAesCtrUpperIv(), patch_info,
154 out_header_reader->GetPatchMetaDataHashDataInfo()));
155 }
156
157 if (patch_info.HasAesCtrExTable()) {
158 // Check the encryption type.
159 ASSERT(out_header_reader->GetEncryptionType() == NcaFsHeader::EncryptionType::None ||
160 out_header_reader->GetEncryptionType() == NcaFsHeader::EncryptionType::AesCtrEx ||
161 out_header_reader->GetEncryptionType() ==
162 NcaFsHeader::EncryptionType::AesCtrExSkipLayerHash);
163
164 // Create the ex meta storage.
165 VirtualFile aes_ctr_ex_storage_meta_storage = patch_meta_aes_ctr_ex_meta_storage;
166 if (aes_ctr_ex_storage_meta_storage == nullptr) {
167 // If we don't have a meta storage, we must not have a patch meta hash layer.
168 ASSERT(!out_header_reader->ExistsPatchMetaHashLayer());
169
170 R_TRY(this->CreateAesCtrExStorageMetaStorage(
171 std::addressof(aes_ctr_ex_storage_meta_storage), storage, fs_data_offset,
172 out_header_reader->GetEncryptionType(), out_header_reader->GetAesCtrUpperIv(),
173 patch_info));
174 }
175
176 // Create the ex storage.
177 VirtualFile aes_ctr_ex_storage;
178 R_TRY(this->CreateAesCtrExStorage(
179 std::addressof(aes_ctr_ex_storage),
180 ctx != nullptr ? std::addressof(ctx->aes_ctr_ex_storage) : nullptr, std::move(storage),
181 aes_ctr_ex_storage_meta_storage, fs_data_offset, out_header_reader->GetAesCtrUpperIv(),
182 patch_info));
183
184 // Set the base storage as the ex storage.
185 storage = std::move(aes_ctr_ex_storage);
186
187 // Potentially save storages to our context.
188 if (ctx != nullptr) {
189 ctx->aes_ctr_ex_storage_meta_storage = aes_ctr_ex_storage_meta_storage;
190 ctx->aes_ctr_ex_storage_data_storage = storage;
191 ctx->fs_data_storage = storage;
192 }
193 } else {
194 // Create the appropriate storage for the encryption type.
195 switch (out_header_reader->GetEncryptionType()) {
196 case NcaFsHeader::EncryptionType::None:
197 // If there's no encryption, use the base storage we made previously.
198 break;
199 case NcaFsHeader::EncryptionType::AesXts:
200 R_TRY(this->CreateAesXtsStorage(std::addressof(storage), std::move(storage),
201 fs_data_offset));
202 break;
203 case NcaFsHeader::EncryptionType::AesCtr:
204 R_TRY(this->CreateAesCtrStorage(std::addressof(storage), std::move(storage),
205 fs_data_offset, out_header_reader->GetAesCtrUpperIv(),
206 AlignmentStorageRequirement::None));
207 break;
208 case NcaFsHeader::EncryptionType::AesCtrSkipLayerHash: {
209 // Create the aes ctr storage.
210 VirtualFile aes_ctr_storage;
211 R_TRY(this->CreateAesCtrStorage(std::addressof(aes_ctr_storage), storage,
212 fs_data_offset, out_header_reader->GetAesCtrUpperIv(),
213 AlignmentStorageRequirement::None));
214
215 // Create region switch storage.
216 R_TRY(this->CreateRegionSwitchStorage(std::addressof(storage), out_header_reader,
217 std::move(storage), std::move(aes_ctr_storage)));
218 } break;
219 default:
220 R_THROW(ResultInvalidNcaFsHeaderEncryptionType);
221 }
222
223 // Potentially save storages to our context.
224 if (ctx != nullptr) {
225 ctx->fs_data_storage = storage;
226 }
227 }
228
229 // Process indirect layer.
230 if (patch_info.HasIndirectTable()) {
231 // Create the indirect meta storage.
232 VirtualFile indirect_storage_meta_storage = patch_meta_indirect_meta_storage;
233 if (indirect_storage_meta_storage == nullptr) {
234 // If we don't have a meta storage, we must not have a patch meta hash layer.
235 ASSERT(!out_header_reader->ExistsPatchMetaHashLayer());
236
237 R_TRY(this->CreateIndirectStorageMetaStorage(
238 std::addressof(indirect_storage_meta_storage), storage, patch_info));
239 }
240
241 // Potentially save the indirect meta storage to our context.
242 if (ctx != nullptr) {
243 ctx->indirect_storage_meta_storage = indirect_storage_meta_storage;
244 }
245
246 // Get the original indirectable storage.
247 VirtualFile original_indirectable_storage;
248 if (m_original_reader != nullptr && m_original_reader->HasFsInfo(fs_index)) {
249 // Create a driver for the original.
250 NcaFileSystemDriver original_driver(m_original_reader);
251
252 // Create a header reader for the original.
253 NcaFsHeaderReader original_header_reader;
254 R_TRY(original_header_reader.Initialize(*m_original_reader, fs_index));
255
256 // Open original indirectable storage.
257 R_TRY(original_driver.OpenIndirectableStorageAsOriginal(
258 std::addressof(original_indirectable_storage),
259 std::addressof(original_header_reader), ctx));
260 } else if (ctx != nullptr && ctx->external_original_storage != nullptr) {
261 // Use the external original storage.
262 original_indirectable_storage = ctx->external_original_storage;
263 } else {
264 // Allocate a dummy memory storage as original storage.
265 original_indirectable_storage = std::make_shared<VectorVfsFile>();
266 R_UNLESS(original_indirectable_storage != nullptr,
267 ResultAllocationMemoryFailedAllocateShared);
268 }
269
270 // Create the indirect storage.
271 VirtualFile indirect_storage;
272 R_TRY(this->CreateIndirectStorage(
273 std::addressof(indirect_storage),
274 ctx != nullptr ? std::addressof(ctx->indirect_storage) : nullptr, std::move(storage),
275 std::move(original_indirectable_storage), std::move(indirect_storage_meta_storage),
276 patch_info));
277
278 // Set storage as the indirect storage.
279 storage = std::move(indirect_storage);
280 }
281
282 // Check if we're sparse or requested to skip the integrity layer.
283 if (out_header_reader->ExistsSparseLayer() || (ctx != nullptr && ctx->open_raw_storage)) {
284 *out = std::move(storage);
285 R_SUCCEED();
286 }
287
288 // Create the non-raw storage.
289 R_RETURN(this->CreateStorageByRawStorage(out, out_header_reader, std::move(storage), ctx));
290}
291
292Result NcaFileSystemDriver::CreateStorageByRawStorage(VirtualFile* out,
293 const NcaFsHeaderReader* header_reader,
294 VirtualFile raw_storage,
295 StorageContext* ctx) {
296 // Initialize storage as raw storage.
297 VirtualFile storage = std::move(raw_storage);
298
299 // Process hash/integrity layer.
300 switch (header_reader->GetHashType()) {
301 case NcaFsHeader::HashType::HierarchicalSha256Hash:
302 R_TRY(this->CreateSha256Storage(std::addressof(storage), std::move(storage),
303 header_reader->GetHashData().hierarchical_sha256_data));
304 break;
305 case NcaFsHeader::HashType::HierarchicalIntegrityHash:
306 R_TRY(this->CreateIntegrityVerificationStorage(
307 std::addressof(storage), std::move(storage),
308 header_reader->GetHashData().integrity_meta_info));
309 break;
310 default:
311 R_THROW(ResultInvalidNcaFsHeaderHashType);
312 }
313
314 // Process compression layer.
315 if (header_reader->ExistsCompressionLayer()) {
316 R_TRY(this->CreateCompressedStorage(
317 std::addressof(storage),
318 ctx != nullptr ? std::addressof(ctx->compressed_storage) : nullptr,
319 ctx != nullptr ? std::addressof(ctx->compressed_storage_meta_storage) : nullptr,
320 std::move(storage), header_reader->GetCompressionInfo()));
321 }
322
323 // Set output storage.
324 *out = std::move(storage);
325 R_SUCCEED();
326}
327
328Result NcaFileSystemDriver::OpenIndirectableStorageAsOriginal(
329 VirtualFile* out, const NcaFsHeaderReader* header_reader, StorageContext* ctx) {
330 // Get the fs index.
331 const auto fs_index = header_reader->GetFsIndex();
332
333 // Declare the storage we're opening.
334 VirtualFile storage;
335
336 // Process sparse layer.
337 s64 fs_data_offset = 0;
338 if (header_reader->ExistsSparseLayer()) {
339 // Get the sparse info.
340 const auto& sparse_info = header_reader->GetSparseInfo();
341
342 // Create based on whether we have a meta hash layer.
343 if (header_reader->ExistsSparseMetaHashLayer()) {
344 // Create the sparse storage with verification.
345 R_TRY(this->CreateSparseStorageWithVerification(
346 std::addressof(storage), std::addressof(fs_data_offset),
347 ctx != nullptr ? std::addressof(ctx->original_sparse_storage) : nullptr,
348 ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr,
349 ctx != nullptr ? std::addressof(ctx->sparse_layer_info_storage) : nullptr, fs_index,
350 header_reader->GetAesCtrUpperIv(), sparse_info,
351 header_reader->GetSparseMetaDataHashDataInfo(),
352 header_reader->GetSparseMetaHashType()));
353 } else {
354 // Create the sparse storage.
355 R_TRY(this->CreateSparseStorage(
356 std::addressof(storage), std::addressof(fs_data_offset),
357 ctx != nullptr ? std::addressof(ctx->original_sparse_storage) : nullptr,
358 ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr,
359 fs_index, header_reader->GetAesCtrUpperIv(), sparse_info));
360 }
361 } else {
362 // Get the data offsets.
363 fs_data_offset = GetFsOffset(*m_reader, fs_index);
364 const auto fs_end_offset = GetFsEndOffset(*m_reader, fs_index);
365
366 // Validate that we're within range.
367 const auto data_size = fs_end_offset - fs_data_offset;
368 R_UNLESS(data_size > 0, ResultInvalidNcaHeader);
369
370 // Create the body substorage.
371 R_TRY(this->CreateBodySubStorage(std::addressof(storage), fs_data_offset, data_size));
372 }
373
374 // Create the appropriate storage for the encryption type.
375 switch (header_reader->GetEncryptionType()) {
376 case NcaFsHeader::EncryptionType::None:
377 // If there's no encryption, use the base storage we made previously.
378 break;
379 case NcaFsHeader::EncryptionType::AesXts:
380 R_TRY(
381 this->CreateAesXtsStorage(std::addressof(storage), std::move(storage), fs_data_offset));
382 break;
383 case NcaFsHeader::EncryptionType::AesCtr:
384 R_TRY(this->CreateAesCtrStorage(std::addressof(storage), std::move(storage), fs_data_offset,
385 header_reader->GetAesCtrUpperIv(),
386 AlignmentStorageRequirement::CacheBlockSize));
387 break;
388 default:
389 R_THROW(ResultInvalidNcaFsHeaderEncryptionType);
390 }
391
392 // Set output storage.
393 *out = std::move(storage);
394 R_SUCCEED();
395}
396
397Result NcaFileSystemDriver::CreateBodySubStorage(VirtualFile* out, s64 offset, s64 size) {
398 // Create the body storage.
399 auto body_storage =
400 std::make_shared<SharedNcaBodyStorage>(m_reader->GetSharedBodyStorage(), m_reader);
401 R_UNLESS(body_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
402
403 // Get the body storage size.
404 s64 body_size = body_storage->GetSize();
405
406 // Check that we're within range.
407 R_UNLESS(offset + size <= body_size, ResultNcaBaseStorageOutOfRangeB);
408
409 // Create substorage.
410 auto body_substorage = std::make_shared<OffsetVfsFile>(std::move(body_storage), size, offset);
411 R_UNLESS(body_substorage != nullptr, ResultAllocationMemoryFailedAllocateShared);
412
413 // Set the output storage.
414 *out = std::move(body_substorage);
415 R_SUCCEED();
416}
417
418Result NcaFileSystemDriver::CreateAesCtrStorage(
419 VirtualFile* out, VirtualFile base_storage, s64 offset, const NcaAesCtrUpperIv& upper_iv,
420 AlignmentStorageRequirement alignment_storage_requirement) {
421 // Check pre-conditions.
422 ASSERT(out != nullptr);
423 ASSERT(base_storage != nullptr);
424
425 // Create the iv.
426 std::array<u8, AesCtrStorage::IvSize> iv{};
427 AesCtrStorage::MakeIv(iv.data(), sizeof(iv), upper_iv.value, offset);
428
429 // Create the ctr storage.
430 VirtualFile aes_ctr_storage;
431 if (m_reader->HasExternalDecryptionKey()) {
432 aes_ctr_storage = std::make_shared<AesCtrStorage>(
433 std::move(base_storage), m_reader->GetExternalDecryptionKey(), AesCtrStorage::KeySize,
434 iv.data(), AesCtrStorage::IvSize);
435 R_UNLESS(aes_ctr_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
436 } else {
437 // Create software decryption storage.
438 auto sw_storage = std::make_shared<AesCtrStorage>(
439 base_storage, m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesCtr),
440 AesCtrStorage::KeySize, iv.data(), AesCtrStorage::IvSize);
441 R_UNLESS(sw_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
442
443 aes_ctr_storage = std::move(sw_storage);
444 }
445
446 // Create alignment matching storage.
447 auto aligned_storage = std::make_shared<AlignmentMatchingStorage<NcaHeader::CtrBlockSize, 1>>(
448 std::move(aes_ctr_storage));
449 R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
450
451 // Set the out storage.
452 *out = std::move(aligned_storage);
453 R_SUCCEED();
454}
455
456Result NcaFileSystemDriver::CreateAesXtsStorage(VirtualFile* out, VirtualFile base_storage,
457 s64 offset) {
458 // Check pre-conditions.
459 ASSERT(out != nullptr);
460 ASSERT(base_storage != nullptr);
461
462 // Create the iv.
463 std::array<u8, AesXtsStorage::IvSize> iv{};
464 AesXtsStorage::MakeAesXtsIv(iv.data(), sizeof(iv), offset, NcaHeader::XtsBlockSize);
465
466 // Make the aes xts storage.
467 const auto* const key1 = m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesXts1);
468 const auto* const key2 = m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesXts2);
469 auto xts_storage =
470 std::make_shared<AesXtsStorage>(std::move(base_storage), key1, key2, AesXtsStorage::KeySize,
471 iv.data(), AesXtsStorage::IvSize, NcaHeader::XtsBlockSize);
472 R_UNLESS(xts_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
473
474 // Create alignment matching storage.
475 auto aligned_storage = std::make_shared<AlignmentMatchingStorage<NcaHeader::XtsBlockSize, 1>>(
476 std::move(xts_storage));
477 R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
478
479 // Set the out storage.
480 *out = std::move(xts_storage);
481 R_SUCCEED();
482}
483
484Result NcaFileSystemDriver::CreateSparseStorageMetaStorage(VirtualFile* out,
485 VirtualFile base_storage, s64 offset,
486 const NcaAesCtrUpperIv& upper_iv,
487 const NcaSparseInfo& sparse_info) {
488 // Validate preconditions.
489 ASSERT(out != nullptr);
490 ASSERT(base_storage != nullptr);
491
492 // Get the base storage size.
493 s64 base_size = base_storage->GetSize();
494
495 // Get the meta extents.
496 const auto meta_offset = sparse_info.bucket.offset;
497 const auto meta_size = sparse_info.bucket.size;
498 R_UNLESS(meta_offset + meta_size - offset <= base_size, ResultNcaBaseStorageOutOfRangeB);
499
500 // Create the encrypted storage.
501 auto enc_storage =
502 std::make_shared<OffsetVfsFile>(std::move(base_storage), meta_size, meta_offset);
503 R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
504
505 // Create the decrypted storage.
506 VirtualFile decrypted_storage;
507 R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage),
508 offset + meta_offset, sparse_info.MakeAesCtrUpperIv(upper_iv),
509 AlignmentStorageRequirement::None));
510
511 // Create buffered storage.
512 std::vector<u8> meta_data(meta_size);
513 decrypted_storage->Read(meta_data.data(), meta_size, 0);
514
515 auto buffered_storage = std::make_shared<VectorVfsFile>(std::move(meta_data));
516 R_UNLESS(buffered_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
517
518 // Set the output.
519 *out = std::move(buffered_storage);
520 R_SUCCEED();
521}
522
523Result NcaFileSystemDriver::CreateSparseStorageCore(std::shared_ptr<SparseStorage>* out,
524 VirtualFile base_storage, s64 base_size,
525 VirtualFile meta_storage,
526 const NcaSparseInfo& sparse_info,
527 bool external_info) {
528 // Validate preconditions.
529 ASSERT(out != nullptr);
530 ASSERT(base_storage != nullptr);
531 ASSERT(meta_storage != nullptr);
532
533 // Read and verify the bucket tree header.
534 BucketTree::Header header;
535 std::memcpy(std::addressof(header), sparse_info.bucket.header.data(), sizeof(header));
536 R_TRY(header.Verify());
537
538 // Determine storage extents.
539 const auto node_offset = 0;
540 const auto node_size = SparseStorage::QueryNodeStorageSize(header.entry_count);
541 const auto entry_offset = node_offset + node_size;
542 const auto entry_size = SparseStorage::QueryEntryStorageSize(header.entry_count);
543
544 // Create the sparse storage.
545 auto sparse_storage = std::make_shared<SparseStorage>();
546 R_UNLESS(sparse_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
547
548 // Sanity check that we can be doing this.
549 ASSERT(header.entry_count != 0);
550
551 // Initialize the sparse storage.
552 R_TRY(sparse_storage->Initialize(
553 std::make_shared<OffsetVfsFile>(meta_storage, node_size, node_offset),
554 std::make_shared<OffsetVfsFile>(meta_storage, entry_size, entry_offset),
555 header.entry_count));
556
557 // If not external, set the data storage.
558 if (!external_info) {
559 sparse_storage->SetDataStorage(
560 std::make_shared<OffsetVfsFile>(std::move(base_storage), base_size, 0));
561 }
562
563 // Set the output.
564 *out = std::move(sparse_storage);
565 R_SUCCEED();
566}
567
568Result NcaFileSystemDriver::CreateSparseStorage(VirtualFile* out, s64* out_fs_data_offset,
569 std::shared_ptr<SparseStorage>* out_sparse_storage,
570 VirtualFile* out_meta_storage, s32 index,
571 const NcaAesCtrUpperIv& upper_iv,
572 const NcaSparseInfo& sparse_info) {
573 // Validate preconditions.
574 ASSERT(out != nullptr);
575 ASSERT(out_fs_data_offset != nullptr);
576
577 // Check the sparse info generation.
578 R_UNLESS(sparse_info.generation != 0, ResultInvalidNcaHeader);
579
580 // Read and verify the bucket tree header.
581 BucketTree::Header header;
582 std::memcpy(std::addressof(header), sparse_info.bucket.header.data(), sizeof(header));
583 R_TRY(header.Verify());
584
585 // Determine the storage extents.
586 const auto fs_offset = GetFsOffset(*m_reader, index);
587 const auto fs_end_offset = GetFsEndOffset(*m_reader, index);
588 const auto fs_size = fs_end_offset - fs_offset;
589
590 // Create the sparse storage.
591 std::shared_ptr<SparseStorage> sparse_storage;
592 if (header.entry_count != 0) {
593 // Create the body substorage.
594 VirtualFile body_substorage;
595 R_TRY(this->CreateBodySubStorage(std::addressof(body_substorage),
596 sparse_info.physical_offset,
597 sparse_info.GetPhysicalSize()));
598
599 // Create the meta storage.
600 VirtualFile meta_storage;
601 R_TRY(this->CreateSparseStorageMetaStorage(std::addressof(meta_storage), body_substorage,
602 sparse_info.physical_offset, upper_iv,
603 sparse_info));
604
605 // Potentially set the output meta storage.
606 if (out_meta_storage != nullptr) {
607 *out_meta_storage = meta_storage;
608 }
609
610 // Create the sparse storage.
611 R_TRY(this->CreateSparseStorageCore(std::addressof(sparse_storage), body_substorage,
612 sparse_info.GetPhysicalSize(), std::move(meta_storage),
613 sparse_info, false));
614 } else {
615 // If there are no entries, there's nothing to actually do.
616 sparse_storage = std::make_shared<SparseStorage>();
617 R_UNLESS(sparse_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
618
619 sparse_storage->Initialize(fs_size);
620 }
621
622 // Potentially set the output sparse storage.
623 if (out_sparse_storage != nullptr) {
624 *out_sparse_storage = sparse_storage;
625 }
626
627 // Set the output fs data offset.
628 *out_fs_data_offset = fs_offset;
629
630 // Set the output storage.
631 *out = std::move(sparse_storage);
632 R_SUCCEED();
633}
634
635Result NcaFileSystemDriver::CreateSparseStorageMetaStorageWithVerification(
636 VirtualFile* out, VirtualFile* out_layer_info_storage, VirtualFile base_storage, s64 offset,
637 const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info,
638 const NcaMetaDataHashDataInfo& meta_data_hash_data_info) {
639 // Validate preconditions.
640 ASSERT(out != nullptr);
641 ASSERT(base_storage != nullptr);
642
643 // Get the base storage size.
644 s64 base_size = base_storage->GetSize();
645
646 // Get the meta extents.
647 const auto meta_offset = sparse_info.bucket.offset;
648 const auto meta_size = sparse_info.bucket.size;
649 R_UNLESS(meta_offset + meta_size - offset <= base_size, ResultNcaBaseStorageOutOfRangeB);
650
651 // Get the meta data hash data extents.
652 const s64 meta_data_hash_data_offset = meta_data_hash_data_info.offset;
653 const s64 meta_data_hash_data_size =
654 Common::AlignUp<s64>(meta_data_hash_data_info.size, NcaHeader::CtrBlockSize);
655 R_UNLESS(meta_data_hash_data_offset + meta_data_hash_data_size <= base_size,
656 ResultNcaBaseStorageOutOfRangeB);
657
658 // Check that the meta is before the hash data.
659 R_UNLESS(meta_offset + meta_size <= meta_data_hash_data_offset,
660 ResultRomNcaInvalidSparseMetaDataHashDataOffset);
661
662 // Check that offsets are appropriately aligned.
663 R_UNLESS(Common::IsAligned<s64>(meta_data_hash_data_offset, NcaHeader::CtrBlockSize),
664 ResultRomNcaInvalidSparseMetaDataHashDataOffset);
665 R_UNLESS(Common::IsAligned<s64>(meta_offset, NcaHeader::CtrBlockSize),
666 ResultInvalidNcaFsHeader);
667
668 // Create the meta storage.
669 auto enc_storage = std::make_shared<OffsetVfsFile>(
670 std::move(base_storage),
671 meta_data_hash_data_offset + meta_data_hash_data_size - meta_offset, meta_offset);
672 R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
673
674 // Create the decrypted storage.
675 VirtualFile decrypted_storage;
676 R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage),
677 offset + meta_offset, sparse_info.MakeAesCtrUpperIv(upper_iv),
678 AlignmentStorageRequirement::None));
679
680 // Create the verification storage.
681 VirtualFile integrity_storage;
682 Result rc = this->CreateIntegrityVerificationStorageForMeta(
683 std::addressof(integrity_storage), out_layer_info_storage, std::move(decrypted_storage),
684 meta_offset, meta_data_hash_data_info);
685 if (rc == ResultInvalidNcaMetaDataHashDataSize) {
686 R_THROW(ResultRomNcaInvalidSparseMetaDataHashDataSize);
687 }
688 if (rc == ResultInvalidNcaMetaDataHashDataHash) {
689 R_THROW(ResultRomNcaInvalidSparseMetaDataHashDataHash);
690 }
691 R_TRY(rc);
692
693 // Create the meta storage.
694 auto meta_storage = std::make_shared<OffsetVfsFile>(std::move(integrity_storage), meta_size, 0);
695 R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
696
697 // Set the output.
698 *out = std::move(meta_storage);
699 R_SUCCEED();
700}
701
702Result NcaFileSystemDriver::CreateSparseStorageWithVerification(
703 VirtualFile* out, s64* out_fs_data_offset, std::shared_ptr<SparseStorage>* out_sparse_storage,
704 VirtualFile* out_meta_storage, VirtualFile* out_layer_info_storage, s32 index,
705 const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info,
706 const NcaMetaDataHashDataInfo& meta_data_hash_data_info,
707 NcaFsHeader::MetaDataHashType meta_data_hash_type) {
708 // Validate preconditions.
709 ASSERT(out != nullptr);
710 ASSERT(out_fs_data_offset != nullptr);
711
712 // Check the sparse info generation.
713 R_UNLESS(sparse_info.generation != 0, ResultInvalidNcaHeader);
714
715 // Read and verify the bucket tree header.
716 BucketTree::Header header;
717 std::memcpy(std::addressof(header), sparse_info.bucket.header.data(), sizeof(header));
718 R_TRY(header.Verify());
719
720 // Determine the storage extents.
721 const auto fs_offset = GetFsOffset(*m_reader, index);
722 const auto fs_end_offset = GetFsEndOffset(*m_reader, index);
723 const auto fs_size = fs_end_offset - fs_offset;
724
725 // Create the sparse storage.
726 std::shared_ptr<SparseStorage> sparse_storage;
727 if (header.entry_count != 0) {
728 // Create the body substorage.
729 VirtualFile body_substorage;
730 R_TRY(this->CreateBodySubStorage(
731 std::addressof(body_substorage), sparse_info.physical_offset,
732 Common::AlignUp<s64>(static_cast<s64>(meta_data_hash_data_info.offset) +
733 static_cast<s64>(meta_data_hash_data_info.size),
734 NcaHeader::CtrBlockSize)));
735
736 // Check the meta data hash type.
737 R_UNLESS(meta_data_hash_type == NcaFsHeader::MetaDataHashType::HierarchicalIntegrity,
738 ResultRomNcaInvalidSparseMetaDataHashType);
739
740 // Create the meta storage.
741 VirtualFile meta_storage;
742 R_TRY(this->CreateSparseStorageMetaStorageWithVerification(
743 std::addressof(meta_storage), out_layer_info_storage, body_substorage,
744 sparse_info.physical_offset, upper_iv, sparse_info, meta_data_hash_data_info));
745
746 // Potentially set the output meta storage.
747 if (out_meta_storage != nullptr) {
748 *out_meta_storage = meta_storage;
749 }
750
751 // Create the sparse storage.
752 R_TRY(this->CreateSparseStorageCore(std::addressof(sparse_storage), body_substorage,
753 sparse_info.GetPhysicalSize(), std::move(meta_storage),
754 sparse_info, false));
755 } else {
756 // If there are no entries, there's nothing to actually do.
757 sparse_storage = std::make_shared<SparseStorage>();
758 R_UNLESS(sparse_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
759
760 sparse_storage->Initialize(fs_size);
761 }
762
763 // Potentially set the output sparse storage.
764 if (out_sparse_storage != nullptr) {
765 *out_sparse_storage = sparse_storage;
766 }
767
768 // Set the output fs data offset.
769 *out_fs_data_offset = fs_offset;
770
771 // Set the output storage.
772 *out = std::move(sparse_storage);
773 R_SUCCEED();
774}
775
776Result NcaFileSystemDriver::CreateAesCtrExStorageMetaStorage(
777 VirtualFile* out, VirtualFile base_storage, s64 offset,
778 NcaFsHeader::EncryptionType encryption_type, const NcaAesCtrUpperIv& upper_iv,
779 const NcaPatchInfo& patch_info) {
780 // Validate preconditions.
781 ASSERT(out != nullptr);
782 ASSERT(base_storage != nullptr);
783 ASSERT(encryption_type == NcaFsHeader::EncryptionType::None ||
784 encryption_type == NcaFsHeader::EncryptionType::AesCtrEx ||
785 encryption_type == NcaFsHeader::EncryptionType::AesCtrExSkipLayerHash);
786 ASSERT(patch_info.HasAesCtrExTable());
787
788 // Validate patch info extents.
789 R_UNLESS(patch_info.indirect_size > 0, ResultInvalidNcaPatchInfoIndirectSize);
790 R_UNLESS(patch_info.aes_ctr_ex_size > 0, ResultInvalidNcaPatchInfoAesCtrExSize);
791 R_UNLESS(patch_info.indirect_size + patch_info.indirect_offset <= patch_info.aes_ctr_ex_offset,
792 ResultInvalidNcaPatchInfoAesCtrExOffset);
793
794 // Get the base storage size.
795 s64 base_size = base_storage->GetSize();
796
797 // Get and validate the meta extents.
798 const s64 meta_offset = patch_info.aes_ctr_ex_offset;
799 const s64 meta_size =
800 Common::AlignUp(static_cast<s64>(patch_info.aes_ctr_ex_size), NcaHeader::XtsBlockSize);
801 R_UNLESS(meta_offset + meta_size <= base_size, ResultNcaBaseStorageOutOfRangeB);
802
803 // Create the encrypted storage.
804 auto enc_storage =
805 std::make_shared<OffsetVfsFile>(std::move(base_storage), meta_size, meta_offset);
806 R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
807
808 // Create the decrypted storage.
809 VirtualFile decrypted_storage;
810 if (encryption_type != NcaFsHeader::EncryptionType::None) {
811 R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage),
812 offset + meta_offset, upper_iv,
813 AlignmentStorageRequirement::None));
814 } else {
815 // If encryption type is none, don't do any decryption.
816 decrypted_storage = std::move(enc_storage);
817 }
818
819 // Create meta storage.
820 auto meta_storage = std::make_shared<OffsetVfsFile>(decrypted_storage, meta_size, 0);
821 R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
822
823 // Create buffered storage.
824 std::vector<u8> meta_data(meta_size);
825 meta_storage->Read(meta_data.data(), meta_size, 0);
826
827 auto buffered_storage = std::make_shared<VectorVfsFile>(std::move(meta_data));
828 R_UNLESS(buffered_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
829
830 // Set the output.
831 *out = std::move(buffered_storage);
832 R_SUCCEED();
833}
834
835Result NcaFileSystemDriver::CreateAesCtrExStorage(
836 VirtualFile* out, std::shared_ptr<AesCtrCounterExtendedStorage>* out_ext,
837 VirtualFile base_storage, VirtualFile meta_storage, s64 counter_offset,
838 const NcaAesCtrUpperIv& upper_iv, const NcaPatchInfo& patch_info) {
839 // Validate pre-conditions.
840 ASSERT(out != nullptr);
841 ASSERT(base_storage != nullptr);
842 ASSERT(meta_storage != nullptr);
843 ASSERT(patch_info.HasAesCtrExTable());
844
845 // Read the bucket tree header.
846 BucketTree::Header header;
847 std::memcpy(std::addressof(header), patch_info.aes_ctr_ex_header.data(), sizeof(header));
848 R_TRY(header.Verify());
849
850 // Determine the bucket extents.
851 const auto entry_count = header.entry_count;
852 const s64 data_offset = 0;
853 const s64 data_size = patch_info.aes_ctr_ex_offset;
854 const s64 node_offset = 0;
855 const s64 node_size = AesCtrCounterExtendedStorage::QueryNodeStorageSize(entry_count);
856 const s64 entry_offset = node_offset + node_size;
857 const s64 entry_size = AesCtrCounterExtendedStorage::QueryEntryStorageSize(entry_count);
858
859 // Create bucket storages.
860 auto data_storage =
861 std::make_shared<OffsetVfsFile>(std::move(base_storage), data_size, data_offset);
862 auto node_storage = std::make_shared<OffsetVfsFile>(meta_storage, node_size, node_offset);
863 auto entry_storage = std::make_shared<OffsetVfsFile>(meta_storage, entry_size, entry_offset);
864
865 // Get the secure value.
866 const auto secure_value = upper_iv.part.secure_value;
867
868 // Create the aes ctr ex storage.
869 VirtualFile aes_ctr_ex_storage;
870 if (m_reader->HasExternalDecryptionKey()) {
871 // Create the decryptor.
872 std::unique_ptr<AesCtrCounterExtendedStorage::IDecryptor> decryptor;
873 R_TRY(AesCtrCounterExtendedStorage::CreateSoftwareDecryptor(std::addressof(decryptor)));
874
875 // Create the aes ctr ex storage.
876 auto impl_storage = std::make_shared<AesCtrCounterExtendedStorage>();
877 R_UNLESS(impl_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
878
879 // Initialize the aes ctr ex storage.
880 R_TRY(impl_storage->Initialize(m_reader->GetExternalDecryptionKey(), AesCtrStorage::KeySize,
881 secure_value, counter_offset, data_storage, node_storage,
882 entry_storage, entry_count, std::move(decryptor)));
883
884 // Potentially set the output implementation storage.
885 if (out_ext != nullptr) {
886 *out_ext = impl_storage;
887 }
888
889 // Set the implementation storage.
890 aes_ctr_ex_storage = std::move(impl_storage);
891 } else {
892 // Create the software decryptor.
893 std::unique_ptr<AesCtrCounterExtendedStorage::IDecryptor> sw_decryptor;
894 R_TRY(AesCtrCounterExtendedStorage::CreateSoftwareDecryptor(std::addressof(sw_decryptor)));
895
896 // Make the software storage.
897 auto sw_storage = std::make_shared<AesCtrCounterExtendedStorage>();
898 R_UNLESS(sw_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
899
900 // Initialize the software storage.
901 R_TRY(sw_storage->Initialize(m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesCtr),
902 AesCtrStorage::KeySize, secure_value, counter_offset,
903 data_storage, node_storage, entry_storage, entry_count,
904 std::move(sw_decryptor)));
905
906 // Potentially set the output implementation storage.
907 if (out_ext != nullptr) {
908 *out_ext = sw_storage;
909 }
910
911 // Set the implementation storage.
912 aes_ctr_ex_storage = std::move(sw_storage);
913 }
914
915 // Create an alignment-matching storage.
916 using AlignedStorage = AlignmentMatchingStorage<NcaHeader::CtrBlockSize, 1>;
917 auto aligned_storage = std::make_shared<AlignedStorage>(std::move(aes_ctr_ex_storage));
918 R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
919
920 // Set the output.
921 *out = std::move(aligned_storage);
922 R_SUCCEED();
923}
924
925Result NcaFileSystemDriver::CreateIndirectStorageMetaStorage(VirtualFile* out,
926 VirtualFile base_storage,
927 const NcaPatchInfo& patch_info) {
928 // Validate preconditions.
929 ASSERT(out != nullptr);
930 ASSERT(base_storage != nullptr);
931 ASSERT(patch_info.HasIndirectTable());
932
933 // Get the base storage size.
934 s64 base_size = base_storage->GetSize();
935
936 // Check that we're within range.
937 R_UNLESS(patch_info.indirect_offset + patch_info.indirect_size <= base_size,
938 ResultNcaBaseStorageOutOfRangeE);
939
940 // Create the meta storage.
941 auto meta_storage = std::make_shared<OffsetVfsFile>(base_storage, patch_info.indirect_size,
942 patch_info.indirect_offset);
943 R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
944
945 // Create buffered storage.
946 std::vector<u8> meta_data(patch_info.indirect_size);
947 meta_storage->Read(meta_data.data(), patch_info.indirect_size, 0);
948
949 auto buffered_storage = std::make_shared<VectorVfsFile>(std::move(meta_data));
950 R_UNLESS(buffered_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
951
952 // Set the output.
953 *out = std::move(buffered_storage);
954 R_SUCCEED();
955}
956
957Result NcaFileSystemDriver::CreateIndirectStorage(
958 VirtualFile* out, std::shared_ptr<IndirectStorage>* out_ind, VirtualFile base_storage,
959 VirtualFile original_data_storage, VirtualFile meta_storage, const NcaPatchInfo& patch_info) {
960 // Validate preconditions.
961 ASSERT(out != nullptr);
962 ASSERT(base_storage != nullptr);
963 ASSERT(meta_storage != nullptr);
964 ASSERT(patch_info.HasIndirectTable());
965
966 // Read the bucket tree header.
967 BucketTree::Header header;
968 std::memcpy(std::addressof(header), patch_info.indirect_header.data(), sizeof(header));
969 R_TRY(header.Verify());
970
971 // Determine the storage sizes.
972 const auto node_size = IndirectStorage::QueryNodeStorageSize(header.entry_count);
973 const auto entry_size = IndirectStorage::QueryEntryStorageSize(header.entry_count);
974 R_UNLESS(node_size + entry_size <= patch_info.indirect_size,
975 ResultInvalidNcaIndirectStorageOutOfRange);
976
977 // Get the indirect data size.
978 const s64 indirect_data_size = patch_info.indirect_offset;
979 ASSERT(Common::IsAligned(indirect_data_size, NcaHeader::XtsBlockSize));
980
981 // Create the indirect data storage.
982 auto indirect_data_storage =
983 std::make_shared<OffsetVfsFile>(base_storage, indirect_data_size, 0);
984 R_UNLESS(indirect_data_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
985
986 // Create the indirect storage.
987 auto indirect_storage = std::make_shared<IndirectStorage>();
988 R_UNLESS(indirect_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
989
990 // Initialize the indirect storage.
991 R_TRY(indirect_storage->Initialize(
992 std::make_shared<OffsetVfsFile>(meta_storage, node_size, 0),
993 std::make_shared<OffsetVfsFile>(meta_storage, entry_size, node_size), header.entry_count));
994
995 // Get the original data size.
996 s64 original_data_size = original_data_storage->GetSize();
997
998 // Set the indirect storages.
999 indirect_storage->SetStorage(
1000 0, std::make_shared<OffsetVfsFile>(original_data_storage, original_data_size, 0));
1001 indirect_storage->SetStorage(
1002 1, std::make_shared<OffsetVfsFile>(indirect_data_storage, indirect_data_size, 0));
1003
1004 // If necessary, set the output indirect storage.
1005 if (out_ind != nullptr) {
1006 *out_ind = indirect_storage;
1007 }
1008
1009 // Set the output.
1010 *out = std::move(indirect_storage);
1011 R_SUCCEED();
1012}
1013
1014Result NcaFileSystemDriver::CreatePatchMetaStorage(
1015 VirtualFile* out_aes_ctr_ex_meta, VirtualFile* out_indirect_meta,
1016 VirtualFile* out_layer_info_storage, VirtualFile base_storage, s64 offset,
1017 const NcaAesCtrUpperIv& upper_iv, const NcaPatchInfo& patch_info,
1018 const NcaMetaDataHashDataInfo& meta_data_hash_data_info) {
1019 // Validate preconditions.
1020 ASSERT(out_aes_ctr_ex_meta != nullptr);
1021 ASSERT(out_indirect_meta != nullptr);
1022 ASSERT(base_storage != nullptr);
1023 ASSERT(patch_info.HasAesCtrExTable());
1024 ASSERT(patch_info.HasIndirectTable());
1025 ASSERT(Common::IsAligned<s64>(patch_info.aes_ctr_ex_size, NcaHeader::XtsBlockSize));
1026
1027 // Validate patch info extents.
1028 R_UNLESS(patch_info.indirect_size > 0, ResultInvalidNcaPatchInfoIndirectSize);
1029 R_UNLESS(patch_info.aes_ctr_ex_size >= 0, ResultInvalidNcaPatchInfoAesCtrExSize);
1030 R_UNLESS(patch_info.indirect_size + patch_info.indirect_offset <= patch_info.aes_ctr_ex_offset,
1031 ResultInvalidNcaPatchInfoAesCtrExOffset);
1032 R_UNLESS(patch_info.aes_ctr_ex_offset + patch_info.aes_ctr_ex_size <=
1033 meta_data_hash_data_info.offset,
1034 ResultRomNcaInvalidPatchMetaDataHashDataOffset);
1035
1036 // Get the base storage size.
1037 s64 base_size = base_storage->GetSize();
1038
1039 // Check that extents remain within range.
1040 R_UNLESS(patch_info.indirect_offset + patch_info.indirect_size <= base_size,
1041 ResultNcaBaseStorageOutOfRangeE);
1042 R_UNLESS(patch_info.aes_ctr_ex_offset + patch_info.aes_ctr_ex_size <= base_size,
1043 ResultNcaBaseStorageOutOfRangeB);
1044
1045 // Check that metadata hash data extents remain within range.
1046 const s64 meta_data_hash_data_offset = meta_data_hash_data_info.offset;
1047 const s64 meta_data_hash_data_size =
1048 Common::AlignUp<s64>(meta_data_hash_data_info.size, NcaHeader::CtrBlockSize);
1049 R_UNLESS(meta_data_hash_data_offset + meta_data_hash_data_size <= base_size,
1050 ResultNcaBaseStorageOutOfRangeB);
1051
1052 // Create the encrypted storage.
1053 auto enc_storage = std::make_shared<OffsetVfsFile>(
1054 std::move(base_storage),
1055 meta_data_hash_data_offset + meta_data_hash_data_size - patch_info.indirect_offset,
1056 patch_info.indirect_offset);
1057 R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1058
1059 // Create the decrypted storage.
1060 VirtualFile decrypted_storage;
1061 R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage),
1062 offset + patch_info.indirect_offset, upper_iv,
1063 AlignmentStorageRequirement::None));
1064
1065 // Create the verification storage.
1066 VirtualFile integrity_storage;
1067 Result rc = this->CreateIntegrityVerificationStorageForMeta(
1068 std::addressof(integrity_storage), out_layer_info_storage, std::move(decrypted_storage),
1069 patch_info.indirect_offset, meta_data_hash_data_info);
1070 if (rc == ResultInvalidNcaMetaDataHashDataSize) {
1071 R_THROW(ResultRomNcaInvalidPatchMetaDataHashDataSize);
1072 }
1073 if (rc == ResultInvalidNcaMetaDataHashDataHash) {
1074 R_THROW(ResultRomNcaInvalidPatchMetaDataHashDataHash);
1075 }
1076 R_TRY(rc);
1077
1078 // Create the indirect meta storage.
1079 auto indirect_meta_storage =
1080 std::make_shared<OffsetVfsFile>(integrity_storage, patch_info.indirect_size,
1081 patch_info.indirect_offset - patch_info.indirect_offset);
1082 R_UNLESS(indirect_meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1083
1084 // Create the aes ctr ex meta storage.
1085 auto aes_ctr_ex_meta_storage =
1086 std::make_shared<OffsetVfsFile>(integrity_storage, patch_info.aes_ctr_ex_size,
1087 patch_info.aes_ctr_ex_offset - patch_info.indirect_offset);
1088 R_UNLESS(aes_ctr_ex_meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1089
1090 // Set the output.
1091 *out_aes_ctr_ex_meta = std::move(aes_ctr_ex_meta_storage);
1092 *out_indirect_meta = std::move(indirect_meta_storage);
1093 R_SUCCEED();
1094}
1095
1096Result NcaFileSystemDriver::CreateSha256Storage(
1097 VirtualFile* out, VirtualFile base_storage,
1098 const NcaFsHeader::HashData::HierarchicalSha256Data& hash_data) {
1099 // Validate preconditions.
1100 ASSERT(out != nullptr);
1101 ASSERT(base_storage != nullptr);
1102
1103 // Define storage types.
1104 using VerificationStorage = HierarchicalSha256Storage;
1105
1106 // Validate the hash data.
1107 R_UNLESS(Common::IsPowerOfTwo(hash_data.hash_block_size),
1108 ResultInvalidHierarchicalSha256BlockSize);
1109 R_UNLESS(hash_data.hash_layer_count == VerificationStorage::LayerCount - 1,
1110 ResultInvalidHierarchicalSha256LayerCount);
1111
1112 // Get the regions.
1113 const auto& hash_region = hash_data.hash_layer_region[0];
1114 const auto& data_region = hash_data.hash_layer_region[1];
1115
1116 // Determine buffer sizes.
1117 constexpr s32 CacheBlockCount = 2;
1118 const auto hash_buffer_size = static_cast<size_t>(hash_region.size);
1119 const auto cache_buffer_size = CacheBlockCount * hash_data.hash_block_size;
1120 const auto total_buffer_size = hash_buffer_size + cache_buffer_size;
1121
1122 // Make a buffer holder storage.
1123 auto buffer_hold_storage = std::make_shared<MemoryResourceBufferHoldStorage>(
1124 std::move(base_storage), total_buffer_size);
1125 R_UNLESS(buffer_hold_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1126 R_UNLESS(buffer_hold_storage->IsValid(), ResultAllocationMemoryFailedInNcaFileSystemDriverI);
1127
1128 // Get storage size.
1129 s64 base_size = buffer_hold_storage->GetSize();
1130
1131 // Check that we're within range.
1132 R_UNLESS(hash_region.offset + hash_region.size <= base_size, ResultNcaBaseStorageOutOfRangeC);
1133 R_UNLESS(data_region.offset + data_region.size <= base_size, ResultNcaBaseStorageOutOfRangeC);
1134
1135 // Create the master hash storage.
1136 auto master_hash_storage =
1137 std::make_shared<ArrayVfsFile<sizeof(Hash)>>(hash_data.fs_data_master_hash.value);
1138
1139 // Make the verification storage.
1140 auto verification_storage = std::make_shared<VerificationStorage>();
1141 R_UNLESS(verification_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1142
1143 // Make layer storages.
1144 std::array<VirtualFile, VerificationStorage::LayerCount> layer_storages{
1145 std::make_shared<OffsetVfsFile>(master_hash_storage, sizeof(Hash), 0),
1146 std::make_shared<OffsetVfsFile>(buffer_hold_storage, hash_region.size, hash_region.offset),
1147 std::make_shared<OffsetVfsFile>(buffer_hold_storage, data_region.size, data_region.offset),
1148 };
1149
1150 // Initialize the verification storage.
1151 R_TRY(verification_storage->Initialize(layer_storages.data(), VerificationStorage::LayerCount,
1152 hash_data.hash_block_size,
1153 buffer_hold_storage->GetBuffer(), hash_buffer_size));
1154
1155 // Set the output.
1156 *out = std::move(verification_storage);
1157 R_SUCCEED();
1158}
1159
1160Result NcaFileSystemDriver::CreateIntegrityVerificationStorage(
1161 VirtualFile* out, VirtualFile base_storage,
1162 const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info) {
1163 R_RETURN(this->CreateIntegrityVerificationStorageImpl(
1164 out, base_storage, meta_info, 0, IntegrityDataCacheCount, IntegrityHashCacheCount,
1165 HierarchicalIntegrityVerificationStorage::GetDefaultDataCacheBufferLevel(
1166 meta_info.level_hash_info.max_layers)));
1167}
1168
1169Result NcaFileSystemDriver::CreateIntegrityVerificationStorageForMeta(
1170 VirtualFile* out, VirtualFile* out_layer_info_storage, VirtualFile base_storage, s64 offset,
1171 const NcaMetaDataHashDataInfo& meta_data_hash_data_info) {
1172 // Validate preconditions.
1173 ASSERT(out != nullptr);
1174
1175 // Check the meta data hash data size.
1176 R_UNLESS(meta_data_hash_data_info.size == sizeof(NcaMetaDataHashData),
1177 ResultInvalidNcaMetaDataHashDataSize);
1178
1179 // Read the meta data hash data.
1180 NcaMetaDataHashData meta_data_hash_data;
1181 base_storage->ReadObject(std::addressof(meta_data_hash_data),
1182 meta_data_hash_data_info.offset - offset);
1183
1184 // Set the out layer info storage, if necessary.
1185 if (out_layer_info_storage != nullptr) {
1186 auto layer_info_storage = std::make_shared<OffsetVfsFile>(
1187 base_storage,
1188 meta_data_hash_data_info.offset + meta_data_hash_data_info.size -
1189 meta_data_hash_data.layer_info_offset,
1190 meta_data_hash_data.layer_info_offset - offset);
1191 R_UNLESS(layer_info_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1192
1193 *out_layer_info_storage = std::move(layer_info_storage);
1194 }
1195
1196 // Create the meta storage.
1197 auto meta_storage = std::make_shared<OffsetVfsFile>(
1198 std::move(base_storage), meta_data_hash_data_info.offset - offset, 0);
1199 R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1200
1201 // Create the integrity verification storage.
1202 R_RETURN(this->CreateIntegrityVerificationStorageImpl(
1203 out, std::move(meta_storage), meta_data_hash_data.integrity_meta_info,
1204 meta_data_hash_data.layer_info_offset - offset, IntegrityDataCacheCountForMeta,
1205 IntegrityHashCacheCountForMeta, 0));
1206}
1207
1208Result NcaFileSystemDriver::CreateIntegrityVerificationStorageImpl(
1209 VirtualFile* out, VirtualFile base_storage,
1210 const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info, s64 layer_info_offset,
1211 int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level) {
1212 // Validate preconditions.
1213 ASSERT(out != nullptr);
1214 ASSERT(base_storage != nullptr);
1215 ASSERT(layer_info_offset >= 0);
1216
1217 // Define storage types.
1218 using VerificationStorage = HierarchicalIntegrityVerificationStorage;
1219 using StorageInfo = VerificationStorage::HierarchicalStorageInformation;
1220
1221 // Validate the meta info.
1222 HierarchicalIntegrityVerificationInformation level_hash_info;
1223 std::memcpy(std::addressof(level_hash_info), std::addressof(meta_info.level_hash_info),
1224 sizeof(level_hash_info));
1225
1226 R_UNLESS(IntegrityMinLayerCount <= level_hash_info.max_layers,
1227 ResultInvalidNcaHierarchicalIntegrityVerificationLayerCount);
1228 R_UNLESS(level_hash_info.max_layers <= IntegrityMaxLayerCount,
1229 ResultInvalidNcaHierarchicalIntegrityVerificationLayerCount);
1230
1231 // Get the base storage size.
1232 s64 base_storage_size = base_storage->GetSize();
1233
1234 // Create storage info.
1235 StorageInfo storage_info;
1236 for (s32 i = 0; i < static_cast<s32>(level_hash_info.max_layers - 2); ++i) {
1237 const auto& layer_info = level_hash_info.info[i];
1238 R_UNLESS(layer_info_offset + layer_info.offset + layer_info.size <= base_storage_size,
1239 ResultNcaBaseStorageOutOfRangeD);
1240
1241 storage_info[i + 1] = std::make_shared<OffsetVfsFile>(
1242 base_storage, layer_info.size, layer_info_offset + layer_info.offset);
1243 }
1244
1245 // Set the last layer info.
1246 const auto& layer_info = level_hash_info.info[level_hash_info.max_layers - 2];
1247 const s64 last_layer_info_offset = layer_info_offset > 0 ? 0LL : layer_info.offset.Get();
1248 R_UNLESS(last_layer_info_offset + layer_info.size <= base_storage_size,
1249 ResultNcaBaseStorageOutOfRangeD);
1250 if (layer_info_offset > 0) {
1251 R_UNLESS(last_layer_info_offset + layer_info.size <= layer_info_offset,
1252 ResultRomNcaInvalidIntegrityLayerInfoOffset);
1253 }
1254 storage_info.SetDataStorage(std::make_shared<OffsetVfsFile>(
1255 std::move(base_storage), layer_info.size, last_layer_info_offset));
1256
1257 // Make the integrity romfs storage.
1258 auto integrity_storage = std::make_shared<IntegrityRomFsStorage>();
1259 R_UNLESS(integrity_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1260
1261 // Initialize the integrity storage.
1262 R_TRY(integrity_storage->Initialize(level_hash_info, meta_info.master_hash, storage_info,
1263 max_data_cache_entries, max_hash_cache_entries,
1264 buffer_level));
1265
1266 // Set the output.
1267 *out = std::move(integrity_storage);
1268 R_SUCCEED();
1269}
1270
1271Result NcaFileSystemDriver::CreateRegionSwitchStorage(VirtualFile* out,
1272 const NcaFsHeaderReader* header_reader,
1273 VirtualFile inside_storage,
1274 VirtualFile outside_storage) {
1275 // Check pre-conditions.
1276 ASSERT(header_reader->GetHashType() == NcaFsHeader::HashType::HierarchicalIntegrityHash);
1277
1278 // Create the region.
1279 RegionSwitchStorage::Region region = {};
1280 R_TRY(header_reader->GetHashTargetOffset(std::addressof(region.size)));
1281
1282 // Create the region switch storage.
1283 auto region_switch_storage = std::make_shared<RegionSwitchStorage>(
1284 std::move(inside_storage), std::move(outside_storage), region);
1285 R_UNLESS(region_switch_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1286
1287 // Set the output.
1288 *out = std::move(region_switch_storage);
1289 R_SUCCEED();
1290}
1291
1292Result NcaFileSystemDriver::CreateCompressedStorage(VirtualFile* out,
1293 std::shared_ptr<CompressedStorage>* out_cmp,
1294 VirtualFile* out_meta, VirtualFile base_storage,
1295 const NcaCompressionInfo& compression_info) {
1296 R_RETURN(this->CreateCompressedStorage(out, out_cmp, out_meta, std::move(base_storage),
1297 compression_info, m_reader->GetDecompressor()));
1298}
1299
1300Result NcaFileSystemDriver::CreateCompressedStorage(VirtualFile* out,
1301 std::shared_ptr<CompressedStorage>* out_cmp,
1302 VirtualFile* out_meta, VirtualFile base_storage,
1303 const NcaCompressionInfo& compression_info,
1304 GetDecompressorFunction get_decompressor) {
1305 // Check pre-conditions.
1306 ASSERT(out != nullptr);
1307 ASSERT(base_storage != nullptr);
1308 ASSERT(get_decompressor != nullptr);
1309
1310 // Read and verify the bucket tree header.
1311 BucketTree::Header header;
1312 std::memcpy(std::addressof(header), compression_info.bucket.header.data(), sizeof(header));
1313 R_TRY(header.Verify());
1314
1315 // Determine the storage extents.
1316 const auto table_offset = compression_info.bucket.offset;
1317 const auto table_size = compression_info.bucket.size;
1318 const auto node_size = CompressedStorage::QueryNodeStorageSize(header.entry_count);
1319 const auto entry_size = CompressedStorage::QueryEntryStorageSize(header.entry_count);
1320 R_UNLESS(node_size + entry_size <= table_size, ResultInvalidCompressedStorageSize);
1321
1322 // If we should, set the output meta storage.
1323 if (out_meta != nullptr) {
1324 auto meta_storage = std::make_shared<OffsetVfsFile>(base_storage, table_size, table_offset);
1325 R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1326
1327 *out_meta = std::move(meta_storage);
1328 }
1329
1330 // Allocate the compressed storage.
1331 auto compressed_storage = std::make_shared<CompressedStorage>();
1332 R_UNLESS(compressed_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1333
1334 // Initialize the compressed storage.
1335 R_TRY(compressed_storage->Initialize(
1336 std::make_shared<OffsetVfsFile>(base_storage, table_offset, 0),
1337 std::make_shared<OffsetVfsFile>(base_storage, node_size, table_offset),
1338 std::make_shared<OffsetVfsFile>(base_storage, entry_size, table_offset + node_size),
1339 header.entry_count, 64_KiB, 640_KiB, get_decompressor, 16_KiB, 16_KiB, 32));
1340
1341 // Potentially set the output compressed storage.
1342 if (out_cmp) {
1343 *out_cmp = compressed_storage;
1344 }
1345
1346 // Set the output.
1347 *out = std::move(compressed_storage);
1348 R_SUCCEED();
1349}
1350
1351} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h
new file mode 100644
index 000000000..5771a21fc
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h
@@ -0,0 +1,364 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fssystem_compression_common.h"
7#include "core/file_sys/fssystem/fssystem_nca_header.h"
8#include "core/file_sys/vfs.h"
9
10namespace FileSys {
11
12class CompressedStorage;
13class AesCtrCounterExtendedStorage;
14class IndirectStorage;
15class SparseStorage;
16
17struct NcaCryptoConfiguration;
18
19using KeyGenerationFunction = void (*)(void* dst_key, size_t dst_key_size, const void* src_key,
20 size_t src_key_size, s32 key_type);
21using VerifySign1Function = bool (*)(const void* sig, size_t sig_size, const void* data,
22 size_t data_size, u8 generation);
23
24struct NcaCryptoConfiguration {
25 static constexpr size_t Rsa2048KeyModulusSize = 2048 / 8;
26 static constexpr size_t Rsa2048KeyPublicExponentSize = 3;
27 static constexpr size_t Rsa2048KeyPrivateExponentSize = Rsa2048KeyModulusSize;
28
29 static constexpr size_t Aes128KeySize = 128 / 8;
30
31 static constexpr size_t Header1SignatureKeyGenerationMax = 1;
32
33 static constexpr s32 KeyAreaEncryptionKeyIndexCount = 3;
34 static constexpr s32 HeaderEncryptionKeyCount = 2;
35
36 static constexpr u8 KeyAreaEncryptionKeyIndexZeroKey = 0xFF;
37
38 static constexpr size_t KeyGenerationMax = 32;
39
40 std::array<const u8*, Header1SignatureKeyGenerationMax + 1> header_1_sign_key_moduli;
41 std::array<u8, Rsa2048KeyPublicExponentSize> header_1_sign_key_public_exponent;
42 std::array<std::array<u8, Aes128KeySize>, KeyAreaEncryptionKeyIndexCount>
43 key_area_encryption_key_source;
44 std::array<u8, Aes128KeySize> header_encryption_key_source;
45 std::array<std::array<u8, Aes128KeySize>, HeaderEncryptionKeyCount>
46 header_encrypted_encryption_keys;
47 KeyGenerationFunction generate_key;
48 VerifySign1Function verify_sign1;
49 bool is_plaintext_header_available;
50 bool is_available_sw_key;
51};
52static_assert(std::is_trivial_v<NcaCryptoConfiguration>);
53
54struct NcaCompressionConfiguration {
55 GetDecompressorFunction get_decompressor;
56};
57static_assert(std::is_trivial_v<NcaCompressionConfiguration>);
58
59constexpr inline s32 KeyAreaEncryptionKeyCount =
60 NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount *
61 NcaCryptoConfiguration::KeyGenerationMax;
62
63enum class KeyType : s32 {
64 ZeroKey = -2,
65 InvalidKey = -1,
66 NcaHeaderKey1 = KeyAreaEncryptionKeyCount + 0,
67 NcaHeaderKey2 = KeyAreaEncryptionKeyCount + 1,
68 NcaExternalKey = KeyAreaEncryptionKeyCount + 2,
69 SaveDataDeviceUniqueMac = KeyAreaEncryptionKeyCount + 3,
70 SaveDataSeedUniqueMac = KeyAreaEncryptionKeyCount + 4,
71 SaveDataTransferMac = KeyAreaEncryptionKeyCount + 5,
72};
73
74constexpr inline bool IsInvalidKeyTypeValue(s32 key_type) {
75 return key_type < 0;
76}
77
78constexpr inline s32 GetKeyTypeValue(u8 key_index, u8 key_generation) {
79 if (key_index == NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexZeroKey) {
80 return static_cast<s32>(KeyType::ZeroKey);
81 }
82
83 if (key_index >= NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount) {
84 return static_cast<s32>(KeyType::InvalidKey);
85 }
86
87 return NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount * key_generation + key_index;
88}
89
90class NcaReader {
91 YUZU_NON_COPYABLE(NcaReader);
92 YUZU_NON_MOVEABLE(NcaReader);
93
94public:
95 NcaReader();
96 ~NcaReader();
97
98 Result Initialize(VirtualFile base_storage, const NcaCryptoConfiguration& crypto_cfg,
99 const NcaCompressionConfiguration& compression_cfg);
100
101 VirtualFile GetSharedBodyStorage();
102 u32 GetMagic() const;
103 NcaHeader::DistributionType GetDistributionType() const;
104 NcaHeader::ContentType GetContentType() const;
105 u8 GetHeaderSign1KeyGeneration() const;
106 u8 GetKeyGeneration() const;
107 u8 GetKeyIndex() const;
108 u64 GetContentSize() const;
109 u64 GetProgramId() const;
110 u32 GetContentIndex() const;
111 u32 GetSdkAddonVersion() const;
112 void GetRightsId(u8* dst, size_t dst_size) const;
113 bool HasFsInfo(s32 index) const;
114 s32 GetFsCount() const;
115 const Hash& GetFsHeaderHash(s32 index) const;
116 void GetFsHeaderHash(Hash* dst, s32 index) const;
117 void GetFsInfo(NcaHeader::FsInfo* dst, s32 index) const;
118 u64 GetFsOffset(s32 index) const;
119 u64 GetFsEndOffset(s32 index) const;
120 u64 GetFsSize(s32 index) const;
121 void GetEncryptedKey(void* dst, size_t size) const;
122 const void* GetDecryptionKey(s32 index) const;
123 bool HasValidInternalKey() const;
124 bool HasInternalDecryptionKeyForAesHw() const;
125 bool IsSoftwareAesPrioritized() const;
126 void PrioritizeSoftwareAes();
127 bool IsAvailableSwKey() const;
128 bool HasExternalDecryptionKey() const;
129 const void* GetExternalDecryptionKey() const;
130 void SetExternalDecryptionKey(const void* src, size_t size);
131 void GetRawData(void* dst, size_t dst_size) const;
132 NcaHeader::EncryptionType GetEncryptionType() const;
133 Result ReadHeader(NcaFsHeader* dst, s32 index) const;
134
135 GetDecompressorFunction GetDecompressor() const;
136
137 bool GetHeaderSign1Valid() const;
138
139 void GetHeaderSign2(void* dst, size_t size) const;
140
141private:
142 NcaHeader m_header;
143 std::array<std::array<u8, NcaCryptoConfiguration::Aes128KeySize>,
144 NcaHeader::DecryptionKey_Count>
145 m_decryption_keys;
146 VirtualFile m_body_storage;
147 VirtualFile m_header_storage;
148 std::array<u8, NcaCryptoConfiguration::Aes128KeySize> m_external_decryption_key;
149 bool m_is_software_aes_prioritized;
150 bool m_is_available_sw_key;
151 NcaHeader::EncryptionType m_header_encryption_type;
152 bool m_is_header_sign1_signature_valid;
153 GetDecompressorFunction m_get_decompressor;
154};
155
156class NcaFsHeaderReader {
157 YUZU_NON_COPYABLE(NcaFsHeaderReader);
158 YUZU_NON_MOVEABLE(NcaFsHeaderReader);
159
160public:
161 NcaFsHeaderReader() : m_fs_index(-1) {
162 std::memset(std::addressof(m_data), 0, sizeof(m_data));
163 }
164
165 Result Initialize(const NcaReader& reader, s32 index);
166 bool IsInitialized() const {
167 return m_fs_index >= 0;
168 }
169
170 void GetRawData(void* dst, size_t dst_size) const;
171
172 NcaFsHeader::HashData& GetHashData();
173 const NcaFsHeader::HashData& GetHashData() const;
174 u16 GetVersion() const;
175 s32 GetFsIndex() const;
176 NcaFsHeader::FsType GetFsType() const;
177 NcaFsHeader::HashType GetHashType() const;
178 NcaFsHeader::EncryptionType GetEncryptionType() const;
179 NcaPatchInfo& GetPatchInfo();
180 const NcaPatchInfo& GetPatchInfo() const;
181 const NcaAesCtrUpperIv GetAesCtrUpperIv() const;
182
183 bool IsSkipLayerHashEncryption() const;
184 Result GetHashTargetOffset(s64* out) const;
185
186 bool ExistsSparseLayer() const;
187 NcaSparseInfo& GetSparseInfo();
188 const NcaSparseInfo& GetSparseInfo() const;
189
190 bool ExistsCompressionLayer() const;
191 NcaCompressionInfo& GetCompressionInfo();
192 const NcaCompressionInfo& GetCompressionInfo() const;
193
194 bool ExistsPatchMetaHashLayer() const;
195 NcaMetaDataHashDataInfo& GetPatchMetaDataHashDataInfo();
196 const NcaMetaDataHashDataInfo& GetPatchMetaDataHashDataInfo() const;
197 NcaFsHeader::MetaDataHashType GetPatchMetaHashType() const;
198
199 bool ExistsSparseMetaHashLayer() const;
200 NcaMetaDataHashDataInfo& GetSparseMetaDataHashDataInfo();
201 const NcaMetaDataHashDataInfo& GetSparseMetaDataHashDataInfo() const;
202 NcaFsHeader::MetaDataHashType GetSparseMetaHashType() const;
203
204private:
205 NcaFsHeader m_data;
206 s32 m_fs_index;
207};
208
209class NcaFileSystemDriver {
210 YUZU_NON_COPYABLE(NcaFileSystemDriver);
211 YUZU_NON_MOVEABLE(NcaFileSystemDriver);
212
213public:
214 struct StorageContext {
215 bool open_raw_storage;
216 VirtualFile body_substorage;
217 std::shared_ptr<SparseStorage> current_sparse_storage;
218 VirtualFile sparse_storage_meta_storage;
219 std::shared_ptr<SparseStorage> original_sparse_storage;
220 void* external_current_sparse_storage;
221 void* external_original_sparse_storage;
222 VirtualFile aes_ctr_ex_storage_meta_storage;
223 VirtualFile aes_ctr_ex_storage_data_storage;
224 std::shared_ptr<AesCtrCounterExtendedStorage> aes_ctr_ex_storage;
225 VirtualFile indirect_storage_meta_storage;
226 std::shared_ptr<IndirectStorage> indirect_storage;
227 VirtualFile fs_data_storage;
228 VirtualFile compressed_storage_meta_storage;
229 std::shared_ptr<CompressedStorage> compressed_storage;
230
231 VirtualFile patch_layer_info_storage;
232 VirtualFile sparse_layer_info_storage;
233
234 VirtualFile external_original_storage;
235 };
236
237private:
238 enum class AlignmentStorageRequirement {
239 CacheBlockSize = 0,
240 None = 1,
241 };
242
243public:
244 static Result SetupFsHeaderReader(NcaFsHeaderReader* out, const NcaReader& reader,
245 s32 fs_index);
246
247public:
248 NcaFileSystemDriver(std::shared_ptr<NcaReader> reader) : m_original_reader(), m_reader(reader) {
249 ASSERT(m_reader != nullptr);
250 }
251
252 NcaFileSystemDriver(std::shared_ptr<NcaReader> original_reader,
253 std::shared_ptr<NcaReader> reader)
254 : m_original_reader(original_reader), m_reader(reader) {
255 ASSERT(m_reader != nullptr);
256 }
257
258 Result OpenStorageWithContext(VirtualFile* out, NcaFsHeaderReader* out_header_reader,
259 s32 fs_index, StorageContext* ctx);
260
261 Result OpenStorage(VirtualFile* out, NcaFsHeaderReader* out_header_reader, s32 fs_index) {
262 // Create a storage context.
263 StorageContext ctx{};
264
265 // Open the storage.
266 R_RETURN(OpenStorageWithContext(out, out_header_reader, fs_index, std::addressof(ctx)));
267 }
268
269public:
270 Result CreateStorageByRawStorage(VirtualFile* out, const NcaFsHeaderReader* header_reader,
271 VirtualFile raw_storage, StorageContext* ctx);
272
273private:
274 Result OpenStorageImpl(VirtualFile* out, NcaFsHeaderReader* out_header_reader, s32 fs_index,
275 StorageContext* ctx);
276
277 Result OpenIndirectableStorageAsOriginal(VirtualFile* out,
278 const NcaFsHeaderReader* header_reader,
279 StorageContext* ctx);
280
281 Result CreateBodySubStorage(VirtualFile* out, s64 offset, s64 size);
282
283 Result CreateAesCtrStorage(VirtualFile* out, VirtualFile base_storage, s64 offset,
284 const NcaAesCtrUpperIv& upper_iv,
285 AlignmentStorageRequirement alignment_storage_requirement);
286 Result CreateAesXtsStorage(VirtualFile* out, VirtualFile base_storage, s64 offset);
287
288 Result CreateSparseStorageMetaStorage(VirtualFile* out, VirtualFile base_storage, s64 offset,
289 const NcaAesCtrUpperIv& upper_iv,
290 const NcaSparseInfo& sparse_info);
291 Result CreateSparseStorageCore(std::shared_ptr<SparseStorage>* out, VirtualFile base_storage,
292 s64 base_size, VirtualFile meta_storage,
293 const NcaSparseInfo& sparse_info, bool external_info);
294 Result CreateSparseStorage(VirtualFile* out, s64* out_fs_data_offset,
295 std::shared_ptr<SparseStorage>* out_sparse_storage,
296 VirtualFile* out_meta_storage, s32 index,
297 const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info);
298
299 Result CreateSparseStorageMetaStorageWithVerification(
300 VirtualFile* out, VirtualFile* out_verification, VirtualFile base_storage, s64 offset,
301 const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info,
302 const NcaMetaDataHashDataInfo& meta_data_hash_data_info);
303 Result CreateSparseStorageWithVerification(
304 VirtualFile* out, s64* out_fs_data_offset,
305 std::shared_ptr<SparseStorage>* out_sparse_storage, VirtualFile* out_meta_storage,
306 VirtualFile* out_verification, s32 index, const NcaAesCtrUpperIv& upper_iv,
307 const NcaSparseInfo& sparse_info, const NcaMetaDataHashDataInfo& meta_data_hash_data_info,
308 NcaFsHeader::MetaDataHashType meta_data_hash_type);
309
310 Result CreateAesCtrExStorageMetaStorage(VirtualFile* out, VirtualFile base_storage, s64 offset,
311 NcaFsHeader::EncryptionType encryption_type,
312 const NcaAesCtrUpperIv& upper_iv,
313 const NcaPatchInfo& patch_info);
314 Result CreateAesCtrExStorage(VirtualFile* out,
315 std::shared_ptr<AesCtrCounterExtendedStorage>* out_ext,
316 VirtualFile base_storage, VirtualFile meta_storage,
317 s64 counter_offset, const NcaAesCtrUpperIv& upper_iv,
318 const NcaPatchInfo& patch_info);
319
320 Result CreateIndirectStorageMetaStorage(VirtualFile* out, VirtualFile base_storage,
321 const NcaPatchInfo& patch_info);
322 Result CreateIndirectStorage(VirtualFile* out, std::shared_ptr<IndirectStorage>* out_ind,
323 VirtualFile base_storage, VirtualFile original_data_storage,
324 VirtualFile meta_storage, const NcaPatchInfo& patch_info);
325
326 Result CreatePatchMetaStorage(VirtualFile* out_aes_ctr_ex_meta, VirtualFile* out_indirect_meta,
327 VirtualFile* out_verification, VirtualFile base_storage,
328 s64 offset, const NcaAesCtrUpperIv& upper_iv,
329 const NcaPatchInfo& patch_info,
330 const NcaMetaDataHashDataInfo& meta_data_hash_data_info);
331
332 Result CreateSha256Storage(VirtualFile* out, VirtualFile base_storage,
333 const NcaFsHeader::HashData::HierarchicalSha256Data& sha256_data);
334
335 Result CreateIntegrityVerificationStorage(
336 VirtualFile* out, VirtualFile base_storage,
337 const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info);
338 Result CreateIntegrityVerificationStorageForMeta(
339 VirtualFile* out, VirtualFile* out_verification, VirtualFile base_storage, s64 offset,
340 const NcaMetaDataHashDataInfo& meta_data_hash_data_info);
341 Result CreateIntegrityVerificationStorageImpl(
342 VirtualFile* out, VirtualFile base_storage,
343 const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info, s64 layer_info_offset,
344 int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level);
345
346 Result CreateRegionSwitchStorage(VirtualFile* out, const NcaFsHeaderReader* header_reader,
347 VirtualFile inside_storage, VirtualFile outside_storage);
348
349 Result CreateCompressedStorage(VirtualFile* out, std::shared_ptr<CompressedStorage>* out_cmp,
350 VirtualFile* out_meta, VirtualFile base_storage,
351 const NcaCompressionInfo& compression_info);
352
353public:
354 Result CreateCompressedStorage(VirtualFile* out, std::shared_ptr<CompressedStorage>* out_cmp,
355 VirtualFile* out_meta, VirtualFile base_storage,
356 const NcaCompressionInfo& compression_info,
357 GetDecompressorFunction get_decompressor);
358
359private:
360 std::shared_ptr<NcaReader> m_original_reader;
361 std::shared_ptr<NcaReader> m_reader;
362};
363
364} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_header.cpp b/src/core/file_sys/fssystem/fssystem_nca_header.cpp
new file mode 100644
index 000000000..bf5742d39
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_header.cpp
@@ -0,0 +1,20 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_nca_header.h"
5
6namespace FileSys {
7
8u8 NcaHeader::GetProperKeyGeneration() const {
9 return std::max(this->key_generation, this->key_generation_2);
10}
11
12bool NcaPatchInfo::HasIndirectTable() const {
13 return this->indirect_size != 0;
14}
15
16bool NcaPatchInfo::HasAesCtrExTable() const {
17 return this->aes_ctr_ex_size != 0;
18}
19
20} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_header.h b/src/core/file_sys/fssystem/fssystem_nca_header.h
new file mode 100644
index 000000000..a02c5d881
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_header.h
@@ -0,0 +1,338 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/common_funcs.h"
7#include "common/common_types.h"
8#include "common/literals.h"
9
10#include "core/file_sys/errors.h"
11#include "core/file_sys/fssystem/fs_types.h"
12
13namespace FileSys {
14
15using namespace Common::Literals;
16
17struct Hash {
18 static constexpr std::size_t Size = 256 / 8;
19 std::array<u8, Size> value;
20};
21static_assert(sizeof(Hash) == Hash::Size);
22static_assert(std::is_trivial_v<Hash>);
23
24using NcaDigest = Hash;
25
26struct NcaHeader {
27 enum class ContentType : u8 {
28 Program = 0,
29 Meta = 1,
30 Control = 2,
31 Manual = 3,
32 Data = 4,
33 PublicData = 5,
34
35 Start = Program,
36 End = PublicData,
37 };
38
39 enum class DistributionType : u8 {
40 Download = 0,
41 GameCard = 1,
42
43 Start = Download,
44 End = GameCard,
45 };
46
47 enum class EncryptionType : u8 {
48 Auto = 0,
49 None = 1,
50 };
51
52 enum DecryptionKey {
53 DecryptionKey_AesXts = 0,
54 DecryptionKey_AesXts1 = DecryptionKey_AesXts,
55 DecryptionKey_AesXts2 = 1,
56 DecryptionKey_AesCtr = 2,
57 DecryptionKey_AesCtrEx = 3,
58 DecryptionKey_AesCtrHw = 4,
59 DecryptionKey_Count,
60 };
61
62 struct FsInfo {
63 u32 start_sector;
64 u32 end_sector;
65 u32 hash_sectors;
66 u32 reserved;
67 };
68 static_assert(sizeof(FsInfo) == 0x10);
69 static_assert(std::is_trivial_v<FsInfo>);
70
71 static constexpr u32 Magic0 = Common::MakeMagic('N', 'C', 'A', '0');
72 static constexpr u32 Magic1 = Common::MakeMagic('N', 'C', 'A', '1');
73 static constexpr u32 Magic2 = Common::MakeMagic('N', 'C', 'A', '2');
74 static constexpr u32 Magic3 = Common::MakeMagic('N', 'C', 'A', '3');
75
76 static constexpr u32 Magic = Magic3;
77
78 static constexpr std::size_t Size = 1_KiB;
79 static constexpr s32 FsCountMax = 4;
80 static constexpr std::size_t HeaderSignCount = 2;
81 static constexpr std::size_t HeaderSignSize = 0x100;
82 static constexpr std::size_t EncryptedKeyAreaSize = 0x100;
83 static constexpr std::size_t SectorSize = 0x200;
84 static constexpr std::size_t SectorShift = 9;
85 static constexpr std::size_t RightsIdSize = 0x10;
86 static constexpr std::size_t XtsBlockSize = 0x200;
87 static constexpr std::size_t CtrBlockSize = 0x10;
88
89 static_assert(SectorSize == (1 << SectorShift));
90
91 // Data members.
92 std::array<u8, HeaderSignSize> header_sign_1;
93 std::array<u8, HeaderSignSize> header_sign_2;
94 u32 magic;
95 DistributionType distribution_type;
96 ContentType content_type;
97 u8 key_generation;
98 u8 key_index;
99 u64 content_size;
100 u64 program_id;
101 u32 content_index;
102 u32 sdk_addon_version;
103 u8 key_generation_2;
104 u8 header1_signature_key_generation;
105 std::array<u8, 2> reserved_222;
106 std::array<u32, 3> reserved_224;
107 std::array<u8, RightsIdSize> rights_id;
108 std::array<FsInfo, FsCountMax> fs_info;
109 std::array<Hash, FsCountMax> fs_header_hash;
110 std::array<u8, EncryptedKeyAreaSize> encrypted_key_area;
111
112 static constexpr u64 SectorToByte(u32 sector) {
113 return static_cast<u64>(sector) << SectorShift;
114 }
115
116 static constexpr u32 ByteToSector(u64 byte) {
117 return static_cast<u32>(byte >> SectorShift);
118 }
119
120 u8 GetProperKeyGeneration() const;
121};
122static_assert(sizeof(NcaHeader) == NcaHeader::Size);
123static_assert(std::is_trivial_v<NcaHeader>);
124
125struct NcaBucketInfo {
126 static constexpr size_t HeaderSize = 0x10;
127 Int64 offset;
128 Int64 size;
129 std::array<u8, HeaderSize> header;
130};
131static_assert(std::is_trivial_v<NcaBucketInfo>);
132
133struct NcaPatchInfo {
134 static constexpr size_t Size = 0x40;
135 static constexpr size_t Offset = 0x100;
136
137 Int64 indirect_offset;
138 Int64 indirect_size;
139 std::array<u8, NcaBucketInfo::HeaderSize> indirect_header;
140 Int64 aes_ctr_ex_offset;
141 Int64 aes_ctr_ex_size;
142 std::array<u8, NcaBucketInfo::HeaderSize> aes_ctr_ex_header;
143
144 bool HasIndirectTable() const;
145 bool HasAesCtrExTable() const;
146};
147static_assert(std::is_trivial_v<NcaPatchInfo>);
148
149union NcaAesCtrUpperIv {
150 u64 value;
151 struct {
152 u32 generation;
153 u32 secure_value;
154 } part;
155};
156static_assert(std::is_trivial_v<NcaAesCtrUpperIv>);
157
158struct NcaSparseInfo {
159 NcaBucketInfo bucket;
160 Int64 physical_offset;
161 u16 generation;
162 std::array<u8, 6> reserved;
163
164 s64 GetPhysicalSize() const {
165 return this->bucket.offset + this->bucket.size;
166 }
167
168 u32 GetGeneration() const {
169 return static_cast<u32>(this->generation) << 16;
170 }
171
172 const NcaAesCtrUpperIv MakeAesCtrUpperIv(NcaAesCtrUpperIv upper_iv) const {
173 NcaAesCtrUpperIv sparse_upper_iv = upper_iv;
174 sparse_upper_iv.part.generation = this->GetGeneration();
175 return sparse_upper_iv;
176 }
177};
178static_assert(std::is_trivial_v<NcaSparseInfo>);
179
180struct NcaCompressionInfo {
181 NcaBucketInfo bucket;
182 std::array<u8, 8> resreved;
183};
184static_assert(std::is_trivial_v<NcaCompressionInfo>);
185
186struct NcaMetaDataHashDataInfo {
187 Int64 offset;
188 Int64 size;
189 Hash hash;
190};
191static_assert(std::is_trivial_v<NcaMetaDataHashDataInfo>);
192
193struct NcaFsHeader {
194 static constexpr size_t Size = 0x200;
195 static constexpr size_t HashDataOffset = 0x8;
196
197 struct Region {
198 Int64 offset;
199 Int64 size;
200 };
201 static_assert(std::is_trivial_v<Region>);
202
203 enum class FsType : u8 {
204 RomFs = 0,
205 PartitionFs = 1,
206 };
207
208 enum class EncryptionType : u8 {
209 Auto = 0,
210 None = 1,
211 AesXts = 2,
212 AesCtr = 3,
213 AesCtrEx = 4,
214 AesCtrSkipLayerHash = 5,
215 AesCtrExSkipLayerHash = 6,
216 };
217
218 enum class HashType : u8 {
219 Auto = 0,
220 None = 1,
221 HierarchicalSha256Hash = 2,
222 HierarchicalIntegrityHash = 3,
223 AutoSha3 = 4,
224 HierarchicalSha3256Hash = 5,
225 HierarchicalIntegritySha3Hash = 6,
226 };
227
228 enum class MetaDataHashType : u8 {
229 None = 0,
230 HierarchicalIntegrity = 1,
231 };
232
233 union HashData {
234 struct HierarchicalSha256Data {
235 static constexpr size_t HashLayerCountMax = 5;
236 static const size_t MasterHashOffset;
237
238 Hash fs_data_master_hash;
239 s32 hash_block_size;
240 s32 hash_layer_count;
241 std::array<Region, HashLayerCountMax> hash_layer_region;
242 } hierarchical_sha256_data;
243 static_assert(std::is_trivial_v<HierarchicalSha256Data>);
244
245 struct IntegrityMetaInfo {
246 static const size_t MasterHashOffset;
247
248 u32 magic;
249 u32 version;
250 u32 master_hash_size;
251
252 struct LevelHashInfo {
253 u32 max_layers;
254
255 struct HierarchicalIntegrityVerificationLevelInformation {
256 static constexpr size_t IntegrityMaxLayerCount = 7;
257 Int64 offset;
258 Int64 size;
259 s32 block_order;
260 std::array<u8, 4> reserved;
261 };
262 std::array<
263 HierarchicalIntegrityVerificationLevelInformation,
264 HierarchicalIntegrityVerificationLevelInformation::IntegrityMaxLayerCount - 1>
265 info;
266
267 struct SignatureSalt {
268 static constexpr size_t Size = 0x20;
269 std::array<u8, Size> value;
270 };
271 SignatureSalt seed;
272 } level_hash_info;
273
274 Hash master_hash;
275 } integrity_meta_info;
276 static_assert(std::is_trivial_v<IntegrityMetaInfo>);
277
278 std::array<u8, NcaPatchInfo::Offset - HashDataOffset> padding;
279 };
280
281 u16 version;
282 FsType fs_type;
283 HashType hash_type;
284 EncryptionType encryption_type;
285 MetaDataHashType meta_data_hash_type;
286 std::array<u8, 2> reserved;
287 HashData hash_data;
288 NcaPatchInfo patch_info;
289 NcaAesCtrUpperIv aes_ctr_upper_iv;
290 NcaSparseInfo sparse_info;
291 NcaCompressionInfo compression_info;
292 NcaMetaDataHashDataInfo meta_data_hash_data_info;
293 std::array<u8, 0x30> pad;
294
295 bool IsSkipLayerHashEncryption() const {
296 return this->encryption_type == EncryptionType::AesCtrSkipLayerHash ||
297 this->encryption_type == EncryptionType::AesCtrExSkipLayerHash;
298 }
299
300 Result GetHashTargetOffset(s64* out) const {
301 switch (this->hash_type) {
302 case HashType::HierarchicalIntegrityHash:
303 case HashType::HierarchicalIntegritySha3Hash:
304 *out = this->hash_data.integrity_meta_info.level_hash_info
305 .info[this->hash_data.integrity_meta_info.level_hash_info.max_layers - 2]
306 .offset;
307 R_SUCCEED();
308 case HashType::HierarchicalSha256Hash:
309 case HashType::HierarchicalSha3256Hash:
310 *out =
311 this->hash_data.hierarchical_sha256_data
312 .hash_layer_region[this->hash_data.hierarchical_sha256_data.hash_layer_count -
313 1]
314 .offset;
315 R_SUCCEED();
316 default:
317 R_THROW(ResultInvalidNcaFsHeader);
318 }
319 }
320};
321static_assert(sizeof(NcaFsHeader) == NcaFsHeader::Size);
322static_assert(std::is_trivial_v<NcaFsHeader>);
323static_assert(offsetof(NcaFsHeader, patch_info) == NcaPatchInfo::Offset);
324
325inline constexpr const size_t NcaFsHeader::HashData::HierarchicalSha256Data::MasterHashOffset =
326 offsetof(NcaFsHeader, hash_data.hierarchical_sha256_data.fs_data_master_hash);
327inline constexpr const size_t NcaFsHeader::HashData::IntegrityMetaInfo::MasterHashOffset =
328 offsetof(NcaFsHeader, hash_data.integrity_meta_info.master_hash);
329
330struct NcaMetaDataHashData {
331 s64 layer_info_offset;
332 NcaFsHeader::HashData::IntegrityMetaInfo integrity_meta_info;
333};
334static_assert(sizeof(NcaMetaDataHashData) ==
335 sizeof(NcaFsHeader::HashData::IntegrityMetaInfo) + sizeof(s64));
336static_assert(std::is_trivial_v<NcaMetaDataHashData>);
337
338} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_reader.cpp b/src/core/file_sys/fssystem/fssystem_nca_reader.cpp
new file mode 100644
index 000000000..a3714ab37
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_reader.cpp
@@ -0,0 +1,531 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h"
5#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
6#include "core/file_sys/vfs_offset.h"
7
8namespace FileSys {
9
10namespace {
11
12constexpr inline u32 SdkAddonVersionMin = 0x000B0000;
13constexpr inline size_t Aes128KeySize = 0x10;
14constexpr const std::array<u8, Aes128KeySize> ZeroKey{};
15
16constexpr Result CheckNcaMagic(u32 magic) {
17 // Verify the magic is not a deprecated one.
18 R_UNLESS(magic != NcaHeader::Magic0, ResultUnsupportedSdkVersion);
19 R_UNLESS(magic != NcaHeader::Magic1, ResultUnsupportedSdkVersion);
20 R_UNLESS(magic != NcaHeader::Magic2, ResultUnsupportedSdkVersion);
21
22 // Verify the magic is the current one.
23 R_UNLESS(magic == NcaHeader::Magic3, ResultInvalidNcaSignature);
24
25 R_SUCCEED();
26}
27
28} // namespace
29
30NcaReader::NcaReader()
31 : m_body_storage(), m_header_storage(), m_is_software_aes_prioritized(false),
32 m_is_available_sw_key(false), m_header_encryption_type(NcaHeader::EncryptionType::Auto),
33 m_get_decompressor() {
34 std::memset(std::addressof(m_header), 0, sizeof(m_header));
35 std::memset(std::addressof(m_decryption_keys), 0, sizeof(m_decryption_keys));
36 std::memset(std::addressof(m_external_decryption_key), 0, sizeof(m_external_decryption_key));
37}
38
39NcaReader::~NcaReader() {}
40
41Result NcaReader::Initialize(VirtualFile base_storage, const NcaCryptoConfiguration& crypto_cfg,
42 const NcaCompressionConfiguration& compression_cfg) {
43 // Validate preconditions.
44 ASSERT(base_storage != nullptr);
45 ASSERT(m_body_storage == nullptr);
46
47 // Create the work header storage storage.
48 VirtualFile work_header_storage;
49
50 // We need to be able to generate keys.
51 R_UNLESS(crypto_cfg.generate_key != nullptr, ResultInvalidArgument);
52
53 // Generate keys for header.
54 using AesXtsStorageForNcaHeader = AesXtsStorage;
55
56 constexpr std::array<s32, NcaCryptoConfiguration::HeaderEncryptionKeyCount>
57 HeaderKeyTypeValues = {
58 static_cast<s32>(KeyType::NcaHeaderKey1),
59 static_cast<s32>(KeyType::NcaHeaderKey2),
60 };
61
62 std::array<std::array<u8, NcaCryptoConfiguration::Aes128KeySize>,
63 NcaCryptoConfiguration::HeaderEncryptionKeyCount>
64 header_decryption_keys;
65 for (size_t i = 0; i < NcaCryptoConfiguration::HeaderEncryptionKeyCount; i++) {
66 crypto_cfg.generate_key(header_decryption_keys[i].data(),
67 AesXtsStorageForNcaHeader::KeySize,
68 crypto_cfg.header_encrypted_encryption_keys[i].data(),
69 AesXtsStorageForNcaHeader::KeySize, HeaderKeyTypeValues[i]);
70 }
71
72 // Create the header storage.
73 std::array<u8, AesXtsStorageForNcaHeader::IvSize> header_iv = {};
74 work_header_storage = std::make_unique<AesXtsStorageForNcaHeader>(
75 base_storage, header_decryption_keys[0].data(), header_decryption_keys[1].data(),
76 AesXtsStorageForNcaHeader::KeySize, header_iv.data(), AesXtsStorageForNcaHeader::IvSize,
77 NcaHeader::XtsBlockSize);
78
79 // Check that we successfully created the storage.
80 R_UNLESS(work_header_storage != nullptr, ResultAllocationMemoryFailedInNcaReaderA);
81
82 // Read the header.
83 work_header_storage->ReadObject(std::addressof(m_header), 0);
84
85 // Validate the magic.
86 if (const Result magic_result = CheckNcaMagic(m_header.magic); R_FAILED(magic_result)) {
87 // Try to use a plaintext header.
88 base_storage->ReadObject(std::addressof(m_header), 0);
89 R_UNLESS(R_SUCCEEDED(CheckNcaMagic(m_header.magic)), magic_result);
90
91 // Configure to use the plaintext header.
92 auto base_storage_size = base_storage->GetSize();
93 work_header_storage = std::make_shared<OffsetVfsFile>(base_storage, base_storage_size, 0);
94 R_UNLESS(work_header_storage != nullptr, ResultAllocationMemoryFailedInNcaReaderA);
95
96 // Set encryption type as plaintext.
97 m_header_encryption_type = NcaHeader::EncryptionType::None;
98 }
99
100 // Verify the header sign1.
101 if (crypto_cfg.verify_sign1 != nullptr) {
102 const u8* sig = m_header.header_sign_1.data();
103 const size_t sig_size = NcaHeader::HeaderSignSize;
104 const u8* msg =
105 static_cast<const u8*>(static_cast<const void*>(std::addressof(m_header.magic)));
106 const size_t msg_size =
107 NcaHeader::Size - NcaHeader::HeaderSignSize * NcaHeader::HeaderSignCount;
108
109 m_is_header_sign1_signature_valid = crypto_cfg.verify_sign1(
110 sig, sig_size, msg, msg_size, m_header.header1_signature_key_generation);
111
112 if (!m_is_header_sign1_signature_valid) {
113 LOG_WARNING(Common_Filesystem, "Invalid NCA header sign1");
114 }
115 }
116
117 // Validate the sdk version.
118 R_UNLESS(m_header.sdk_addon_version >= SdkAddonVersionMin, ResultUnsupportedSdkVersion);
119
120 // Validate the key index.
121 R_UNLESS(m_header.key_index < NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount ||
122 m_header.key_index == NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexZeroKey,
123 ResultInvalidNcaKeyIndex);
124
125 // Check if we have a rights id.
126 constexpr const std::array<u8, NcaHeader::RightsIdSize> ZeroRightsId{};
127 if (std::memcmp(ZeroRightsId.data(), m_header.rights_id.data(), NcaHeader::RightsIdSize) == 0) {
128 // If we don't, then we don't have an external key, so we need to generate decryption keys.
129 crypto_cfg.generate_key(
130 m_decryption_keys[NcaHeader::DecryptionKey_AesCtr].data(), Aes128KeySize,
131 m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesCtr * Aes128KeySize,
132 Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
133 crypto_cfg.generate_key(
134 m_decryption_keys[NcaHeader::DecryptionKey_AesXts1].data(), Aes128KeySize,
135 m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesXts1 * Aes128KeySize,
136 Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
137 crypto_cfg.generate_key(
138 m_decryption_keys[NcaHeader::DecryptionKey_AesXts2].data(), Aes128KeySize,
139 m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesXts2 * Aes128KeySize,
140 Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
141 crypto_cfg.generate_key(
142 m_decryption_keys[NcaHeader::DecryptionKey_AesCtrEx].data(), Aes128KeySize,
143 m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesCtrEx * Aes128KeySize,
144 Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
145
146 // Copy the hardware speed emulation key.
147 std::memcpy(m_decryption_keys[NcaHeader::DecryptionKey_AesCtrHw].data(),
148 m_header.encrypted_key_area.data() +
149 NcaHeader::DecryptionKey_AesCtrHw * Aes128KeySize,
150 Aes128KeySize);
151 }
152
153 // Clear the external decryption key.
154 std::memset(m_external_decryption_key.data(), 0, m_external_decryption_key.size());
155
156 // Set software key availability.
157 m_is_available_sw_key = crypto_cfg.is_available_sw_key;
158
159 // Set our decompressor function getter.
160 m_get_decompressor = compression_cfg.get_decompressor;
161
162 // Set our storages.
163 m_header_storage = std::move(work_header_storage);
164 m_body_storage = std::move(base_storage);
165
166 R_SUCCEED();
167}
168
169VirtualFile NcaReader::GetSharedBodyStorage() {
170 ASSERT(m_body_storage != nullptr);
171 return m_body_storage;
172}
173
174u32 NcaReader::GetMagic() const {
175 ASSERT(m_body_storage != nullptr);
176 return m_header.magic;
177}
178
179NcaHeader::DistributionType NcaReader::GetDistributionType() const {
180 ASSERT(m_body_storage != nullptr);
181 return m_header.distribution_type;
182}
183
184NcaHeader::ContentType NcaReader::GetContentType() const {
185 ASSERT(m_body_storage != nullptr);
186 return m_header.content_type;
187}
188
189u8 NcaReader::GetHeaderSign1KeyGeneration() const {
190 ASSERT(m_body_storage != nullptr);
191 return m_header.header1_signature_key_generation;
192}
193
194u8 NcaReader::GetKeyGeneration() const {
195 ASSERT(m_body_storage != nullptr);
196 return m_header.GetProperKeyGeneration();
197}
198
199u8 NcaReader::GetKeyIndex() const {
200 ASSERT(m_body_storage != nullptr);
201 return m_header.key_index;
202}
203
204u64 NcaReader::GetContentSize() const {
205 ASSERT(m_body_storage != nullptr);
206 return m_header.content_size;
207}
208
209u64 NcaReader::GetProgramId() const {
210 ASSERT(m_body_storage != nullptr);
211 return m_header.program_id;
212}
213
214u32 NcaReader::GetContentIndex() const {
215 ASSERT(m_body_storage != nullptr);
216 return m_header.content_index;
217}
218
219u32 NcaReader::GetSdkAddonVersion() const {
220 ASSERT(m_body_storage != nullptr);
221 return m_header.sdk_addon_version;
222}
223
224void NcaReader::GetRightsId(u8* dst, size_t dst_size) const {
225 ASSERT(dst != nullptr);
226 ASSERT(dst_size >= NcaHeader::RightsIdSize);
227
228 std::memcpy(dst, m_header.rights_id.data(), NcaHeader::RightsIdSize);
229}
230
231bool NcaReader::HasFsInfo(s32 index) const {
232 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
233 return m_header.fs_info[index].start_sector != 0 || m_header.fs_info[index].end_sector != 0;
234}
235
236s32 NcaReader::GetFsCount() const {
237 ASSERT(m_body_storage != nullptr);
238 for (s32 i = 0; i < NcaHeader::FsCountMax; i++) {
239 if (!this->HasFsInfo(i)) {
240 return i;
241 }
242 }
243 return NcaHeader::FsCountMax;
244}
245
246const Hash& NcaReader::GetFsHeaderHash(s32 index) const {
247 ASSERT(m_body_storage != nullptr);
248 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
249 return m_header.fs_header_hash[index];
250}
251
252void NcaReader::GetFsHeaderHash(Hash* dst, s32 index) const {
253 ASSERT(m_body_storage != nullptr);
254 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
255 ASSERT(dst != nullptr);
256 std::memcpy(dst, std::addressof(m_header.fs_header_hash[index]), sizeof(*dst));
257}
258
259void NcaReader::GetFsInfo(NcaHeader::FsInfo* dst, s32 index) const {
260 ASSERT(m_body_storage != nullptr);
261 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
262 ASSERT(dst != nullptr);
263 std::memcpy(dst, std::addressof(m_header.fs_info[index]), sizeof(*dst));
264}
265
266u64 NcaReader::GetFsOffset(s32 index) const {
267 ASSERT(m_body_storage != nullptr);
268 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
269 return NcaHeader::SectorToByte(m_header.fs_info[index].start_sector);
270}
271
272u64 NcaReader::GetFsEndOffset(s32 index) const {
273 ASSERT(m_body_storage != nullptr);
274 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
275 return NcaHeader::SectorToByte(m_header.fs_info[index].end_sector);
276}
277
278u64 NcaReader::GetFsSize(s32 index) const {
279 ASSERT(m_body_storage != nullptr);
280 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
281 return NcaHeader::SectorToByte(m_header.fs_info[index].end_sector -
282 m_header.fs_info[index].start_sector);
283}
284
285void NcaReader::GetEncryptedKey(void* dst, size_t size) const {
286 ASSERT(m_body_storage != nullptr);
287 ASSERT(dst != nullptr);
288 ASSERT(size >= NcaHeader::EncryptedKeyAreaSize);
289
290 std::memcpy(dst, m_header.encrypted_key_area.data(), NcaHeader::EncryptedKeyAreaSize);
291}
292
293const void* NcaReader::GetDecryptionKey(s32 index) const {
294 ASSERT(m_body_storage != nullptr);
295 ASSERT(0 <= index && index < NcaHeader::DecryptionKey_Count);
296 return m_decryption_keys[index].data();
297}
298
299bool NcaReader::HasValidInternalKey() const {
300 for (s32 i = 0; i < NcaHeader::DecryptionKey_Count; i++) {
301 if (std::memcmp(ZeroKey.data(), m_header.encrypted_key_area.data() + i * Aes128KeySize,
302 Aes128KeySize) != 0) {
303 return true;
304 }
305 }
306 return false;
307}
308
309bool NcaReader::HasInternalDecryptionKeyForAesHw() const {
310 return std::memcmp(ZeroKey.data(), this->GetDecryptionKey(NcaHeader::DecryptionKey_AesCtrHw),
311 Aes128KeySize) != 0;
312}
313
314bool NcaReader::IsSoftwareAesPrioritized() const {
315 return m_is_software_aes_prioritized;
316}
317
318void NcaReader::PrioritizeSoftwareAes() {
319 m_is_software_aes_prioritized = true;
320}
321
322bool NcaReader::IsAvailableSwKey() const {
323 return m_is_available_sw_key;
324}
325
326bool NcaReader::HasExternalDecryptionKey() const {
327 return std::memcmp(ZeroKey.data(), this->GetExternalDecryptionKey(), Aes128KeySize) != 0;
328}
329
330const void* NcaReader::GetExternalDecryptionKey() const {
331 return m_external_decryption_key.data();
332}
333
334void NcaReader::SetExternalDecryptionKey(const void* src, size_t size) {
335 ASSERT(src != nullptr);
336 ASSERT(size == sizeof(m_external_decryption_key));
337
338 std::memcpy(m_external_decryption_key.data(), src, sizeof(m_external_decryption_key));
339}
340
341void NcaReader::GetRawData(void* dst, size_t dst_size) const {
342 ASSERT(m_body_storage != nullptr);
343 ASSERT(dst != nullptr);
344 ASSERT(dst_size >= sizeof(NcaHeader));
345
346 std::memcpy(dst, std::addressof(m_header), sizeof(NcaHeader));
347}
348
349GetDecompressorFunction NcaReader::GetDecompressor() const {
350 ASSERT(m_get_decompressor != nullptr);
351 return m_get_decompressor;
352}
353
354NcaHeader::EncryptionType NcaReader::GetEncryptionType() const {
355 return m_header_encryption_type;
356}
357
358Result NcaReader::ReadHeader(NcaFsHeader* dst, s32 index) const {
359 ASSERT(dst != nullptr);
360 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
361
362 const s64 offset = sizeof(NcaHeader) + sizeof(NcaFsHeader) * index;
363 m_header_storage->ReadObject(dst, offset);
364
365 R_SUCCEED();
366}
367
368bool NcaReader::GetHeaderSign1Valid() const {
369 return m_is_header_sign1_signature_valid;
370}
371
372void NcaReader::GetHeaderSign2(void* dst, size_t size) const {
373 ASSERT(dst != nullptr);
374 ASSERT(size == NcaHeader::HeaderSignSize);
375
376 std::memcpy(dst, m_header.header_sign_2.data(), size);
377}
378
379Result NcaFsHeaderReader::Initialize(const NcaReader& reader, s32 index) {
380 // Reset ourselves to uninitialized.
381 m_fs_index = -1;
382
383 // Read the header.
384 R_TRY(reader.ReadHeader(std::addressof(m_data), index));
385
386 // Set our index.
387 m_fs_index = index;
388 R_SUCCEED();
389}
390
391void NcaFsHeaderReader::GetRawData(void* dst, size_t dst_size) const {
392 ASSERT(this->IsInitialized());
393 ASSERT(dst != nullptr);
394 ASSERT(dst_size >= sizeof(NcaFsHeader));
395
396 std::memcpy(dst, std::addressof(m_data), sizeof(NcaFsHeader));
397}
398
399NcaFsHeader::HashData& NcaFsHeaderReader::GetHashData() {
400 ASSERT(this->IsInitialized());
401 return m_data.hash_data;
402}
403
404const NcaFsHeader::HashData& NcaFsHeaderReader::GetHashData() const {
405 ASSERT(this->IsInitialized());
406 return m_data.hash_data;
407}
408
409u16 NcaFsHeaderReader::GetVersion() const {
410 ASSERT(this->IsInitialized());
411 return m_data.version;
412}
413
414s32 NcaFsHeaderReader::GetFsIndex() const {
415 ASSERT(this->IsInitialized());
416 return m_fs_index;
417}
418
419NcaFsHeader::FsType NcaFsHeaderReader::GetFsType() const {
420 ASSERT(this->IsInitialized());
421 return m_data.fs_type;
422}
423
424NcaFsHeader::HashType NcaFsHeaderReader::GetHashType() const {
425 ASSERT(this->IsInitialized());
426 return m_data.hash_type;
427}
428
429NcaFsHeader::EncryptionType NcaFsHeaderReader::GetEncryptionType() const {
430 ASSERT(this->IsInitialized());
431 return m_data.encryption_type;
432}
433
434NcaPatchInfo& NcaFsHeaderReader::GetPatchInfo() {
435 ASSERT(this->IsInitialized());
436 return m_data.patch_info;
437}
438
439const NcaPatchInfo& NcaFsHeaderReader::GetPatchInfo() const {
440 ASSERT(this->IsInitialized());
441 return m_data.patch_info;
442}
443
444const NcaAesCtrUpperIv NcaFsHeaderReader::GetAesCtrUpperIv() const {
445 ASSERT(this->IsInitialized());
446 return m_data.aes_ctr_upper_iv;
447}
448
449bool NcaFsHeaderReader::IsSkipLayerHashEncryption() const {
450 ASSERT(this->IsInitialized());
451 return m_data.IsSkipLayerHashEncryption();
452}
453
454Result NcaFsHeaderReader::GetHashTargetOffset(s64* out) const {
455 ASSERT(out != nullptr);
456 ASSERT(this->IsInitialized());
457
458 R_RETURN(m_data.GetHashTargetOffset(out));
459}
460
461bool NcaFsHeaderReader::ExistsSparseLayer() const {
462 ASSERT(this->IsInitialized());
463 return m_data.sparse_info.generation != 0;
464}
465
466NcaSparseInfo& NcaFsHeaderReader::GetSparseInfo() {
467 ASSERT(this->IsInitialized());
468 return m_data.sparse_info;
469}
470
471const NcaSparseInfo& NcaFsHeaderReader::GetSparseInfo() const {
472 ASSERT(this->IsInitialized());
473 return m_data.sparse_info;
474}
475
476bool NcaFsHeaderReader::ExistsCompressionLayer() const {
477 ASSERT(this->IsInitialized());
478 return m_data.compression_info.bucket.offset != 0 && m_data.compression_info.bucket.size != 0;
479}
480
481NcaCompressionInfo& NcaFsHeaderReader::GetCompressionInfo() {
482 ASSERT(this->IsInitialized());
483 return m_data.compression_info;
484}
485
486const NcaCompressionInfo& NcaFsHeaderReader::GetCompressionInfo() const {
487 ASSERT(this->IsInitialized());
488 return m_data.compression_info;
489}
490
491bool NcaFsHeaderReader::ExistsPatchMetaHashLayer() const {
492 ASSERT(this->IsInitialized());
493 return m_data.meta_data_hash_data_info.size != 0 && this->GetPatchInfo().HasIndirectTable();
494}
495
496NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetPatchMetaDataHashDataInfo() {
497 ASSERT(this->IsInitialized());
498 return m_data.meta_data_hash_data_info;
499}
500
501const NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetPatchMetaDataHashDataInfo() const {
502 ASSERT(this->IsInitialized());
503 return m_data.meta_data_hash_data_info;
504}
505
506NcaFsHeader::MetaDataHashType NcaFsHeaderReader::GetPatchMetaHashType() const {
507 ASSERT(this->IsInitialized());
508 return m_data.meta_data_hash_type;
509}
510
511bool NcaFsHeaderReader::ExistsSparseMetaHashLayer() const {
512 ASSERT(this->IsInitialized());
513 return m_data.meta_data_hash_data_info.size != 0 && this->ExistsSparseLayer();
514}
515
516NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetSparseMetaDataHashDataInfo() {
517 ASSERT(this->IsInitialized());
518 return m_data.meta_data_hash_data_info;
519}
520
521const NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetSparseMetaDataHashDataInfo() const {
522 ASSERT(this->IsInitialized());
523 return m_data.meta_data_hash_data_info;
524}
525
526NcaFsHeader::MetaDataHashType NcaFsHeaderReader::GetSparseMetaHashType() const {
527 ASSERT(this->IsInitialized());
528 return m_data.meta_data_hash_type;
529}
530
531} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp b/src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp
new file mode 100644
index 000000000..bbfaab255
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp
@@ -0,0 +1,61 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/alignment.h"
5#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
6
7namespace FileSys {
8
9namespace {
10
11constexpr size_t HeapBlockSize = BufferPoolAlignment;
12static_assert(HeapBlockSize == 4_KiB);
13
14// A heap block is 4KiB. An order is a power of two.
15// This gives blocks of the order 32KiB, 512KiB, 4MiB.
16constexpr s32 HeapOrderMax = 7;
17constexpr s32 HeapOrderMaxForLarge = HeapOrderMax + 3;
18
19constexpr size_t HeapAllocatableSizeMax = HeapBlockSize * (static_cast<size_t>(1) << HeapOrderMax);
20constexpr size_t HeapAllocatableSizeMaxForLarge =
21 HeapBlockSize * (static_cast<size_t>(1) << HeapOrderMaxForLarge);
22
23} // namespace
24
25size_t PooledBuffer::GetAllocatableSizeMaxCore(bool large) {
26 return large ? HeapAllocatableSizeMaxForLarge : HeapAllocatableSizeMax;
27}
28
29void PooledBuffer::AllocateCore(size_t ideal_size, size_t required_size, bool large) {
30 // Ensure preconditions.
31 ASSERT(m_buffer == nullptr);
32
33 // Check that we can allocate this size.
34 ASSERT(required_size <= GetAllocatableSizeMaxCore(large));
35
36 const size_t target_size =
37 std::min(std::max(ideal_size, required_size), GetAllocatableSizeMaxCore(large));
38
39 // Dummy implementation for allocate.
40 if (target_size > 0) {
41 m_buffer =
42 reinterpret_cast<char*>(::operator new(target_size, std::align_val_t{HeapBlockSize}));
43 m_size = target_size;
44
45 // Ensure postconditions.
46 ASSERT(m_buffer != nullptr);
47 }
48}
49
50void PooledBuffer::Shrink(size_t ideal_size) {
51 ASSERT(ideal_size <= GetAllocatableSizeMaxCore(true));
52
53 // Shrinking to zero means that we have no buffer.
54 if (ideal_size == 0) {
55 ::operator delete(m_buffer, std::align_val_t{HeapBlockSize});
56 m_buffer = nullptr;
57 m_size = ideal_size;
58 }
59}
60
61} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_pooled_buffer.h b/src/core/file_sys/fssystem/fssystem_pooled_buffer.h
new file mode 100644
index 000000000..9a6adbcb5
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_pooled_buffer.h
@@ -0,0 +1,95 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/common_funcs.h"
7#include "common/common_types.h"
8#include "common/literals.h"
9#include "core/hle/result.h"
10
11namespace FileSys {
12
13using namespace Common::Literals;
14
15constexpr inline size_t BufferPoolAlignment = 4_KiB;
16constexpr inline size_t BufferPoolWorkSize = 320;
17
18class PooledBuffer {
19 YUZU_NON_COPYABLE(PooledBuffer);
20
21public:
22 // Constructor/Destructor.
23 constexpr PooledBuffer() : m_buffer(), m_size() {}
24
25 PooledBuffer(size_t ideal_size, size_t required_size) : m_buffer(), m_size() {
26 this->Allocate(ideal_size, required_size);
27 }
28
29 ~PooledBuffer() {
30 this->Deallocate();
31 }
32
33 // Move and assignment.
34 explicit PooledBuffer(PooledBuffer&& rhs) : m_buffer(rhs.m_buffer), m_size(rhs.m_size) {
35 rhs.m_buffer = nullptr;
36 rhs.m_size = 0;
37 }
38
39 PooledBuffer& operator=(PooledBuffer&& rhs) {
40 PooledBuffer(std::move(rhs)).Swap(*this);
41 return *this;
42 }
43
44 // Allocation API.
45 void Allocate(size_t ideal_size, size_t required_size) {
46 return this->AllocateCore(ideal_size, required_size, false);
47 }
48
49 void AllocateParticularlyLarge(size_t ideal_size, size_t required_size) {
50 return this->AllocateCore(ideal_size, required_size, true);
51 }
52
53 void Shrink(size_t ideal_size);
54
55 void Deallocate() {
56 // Shrink the buffer to empty.
57 this->Shrink(0);
58 ASSERT(m_buffer == nullptr);
59 }
60
61 char* GetBuffer() const {
62 ASSERT(m_buffer != nullptr);
63 return m_buffer;
64 }
65
66 size_t GetSize() const {
67 ASSERT(m_buffer != nullptr);
68 return m_size;
69 }
70
71public:
72 static size_t GetAllocatableSizeMax() {
73 return GetAllocatableSizeMaxCore(false);
74 }
75 static size_t GetAllocatableParticularlyLargeSizeMax() {
76 return GetAllocatableSizeMaxCore(true);
77 }
78
79private:
80 static size_t GetAllocatableSizeMaxCore(bool large);
81
82private:
83 void Swap(PooledBuffer& rhs) {
84 std::swap(m_buffer, rhs.m_buffer);
85 std::swap(m_size, rhs.m_size);
86 }
87
88 void AllocateCore(size_t ideal_size, size_t required_size, bool large);
89
90private:
91 char* m_buffer;
92 size_t m_size;
93};
94
95} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_sparse_storage.cpp b/src/core/file_sys/fssystem/fssystem_sparse_storage.cpp
new file mode 100644
index 000000000..8574a11dd
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_sparse_storage.cpp
@@ -0,0 +1,39 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_sparse_storage.h"
5
6namespace FileSys {
7
8size_t SparseStorage::Read(u8* buffer, size_t size, size_t offset) const {
9 // Validate preconditions.
10 ASSERT(this->IsInitialized());
11 ASSERT(buffer != nullptr);
12
13 // Allow zero size.
14 if (size == 0) {
15 return size;
16 }
17
18 SparseStorage* self = const_cast<SparseStorage*>(this);
19
20 if (self->GetEntryTable().IsEmpty()) {
21 BucketTree::Offsets table_offsets;
22 ASSERT(R_SUCCEEDED(self->GetEntryTable().GetOffsets(std::addressof(table_offsets))));
23 ASSERT(table_offsets.IsInclude(offset, size));
24
25 std::memset(buffer, 0, size);
26 } else {
27 self->OperatePerEntry<false, true>(
28 offset, size,
29 [=](VirtualFile storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
30 storage->Read(reinterpret_cast<u8*>(buffer) + (cur_offset - offset),
31 static_cast<size_t>(cur_size), data_offset);
32 R_SUCCEED();
33 });
34 }
35
36 return size;
37}
38
39} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_sparse_storage.h b/src/core/file_sys/fssystem/fssystem_sparse_storage.h
new file mode 100644
index 000000000..6c196ec61
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_sparse_storage.h
@@ -0,0 +1,72 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fssystem_indirect_storage.h"
7
8namespace FileSys {
9
10class SparseStorage : public IndirectStorage {
11 YUZU_NON_COPYABLE(SparseStorage);
12 YUZU_NON_MOVEABLE(SparseStorage);
13
14private:
15 class ZeroStorage : public IReadOnlyStorage {
16 public:
17 ZeroStorage() {}
18 virtual ~ZeroStorage() {}
19
20 virtual size_t GetSize() const override {
21 return std::numeric_limits<size_t>::max();
22 }
23
24 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
25 ASSERT(buffer != nullptr || size == 0);
26
27 if (size > 0) {
28 std::memset(buffer, 0, size);
29 }
30
31 return size;
32 }
33 };
34
35public:
36 SparseStorage() : IndirectStorage(), m_zero_storage(std::make_shared<ZeroStorage>()) {}
37 virtual ~SparseStorage() {}
38
39 using IndirectStorage::Initialize;
40
41 void Initialize(s64 end_offset) {
42 this->GetEntryTable().Initialize(NodeSize, end_offset);
43 this->SetZeroStorage();
44 }
45
46 void SetDataStorage(VirtualFile storage) {
47 ASSERT(this->IsInitialized());
48
49 this->SetStorage(0, storage);
50 this->SetZeroStorage();
51 }
52
53 template <typename T>
54 void SetDataStorage(T storage, s64 offset, s64 size) {
55 ASSERT(this->IsInitialized());
56
57 this->SetStorage(0, storage, offset, size);
58 this->SetZeroStorage();
59 }
60
61 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
62
63private:
64 void SetZeroStorage() {
65 return this->SetStorage(1, m_zero_storage, 0, std::numeric_limits<s64>::max());
66 }
67
68private:
69 VirtualFile m_zero_storage;
70};
71
72} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_switch_storage.h b/src/core/file_sys/fssystem/fssystem_switch_storage.h
new file mode 100644
index 000000000..2b43927cb
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_switch_storage.h
@@ -0,0 +1,80 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fs_i_storage.h"
7
8namespace FileSys {
9
10class RegionSwitchStorage : public IReadOnlyStorage {
11 YUZU_NON_COPYABLE(RegionSwitchStorage);
12 YUZU_NON_MOVEABLE(RegionSwitchStorage);
13
14public:
15 struct Region {
16 s64 offset;
17 s64 size;
18 };
19
20public:
21 RegionSwitchStorage(VirtualFile&& i, VirtualFile&& o, Region r)
22 : m_inside_region_storage(std::move(i)), m_outside_region_storage(std::move(o)),
23 m_region(r) {}
24
25 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
26 // Process until we're done.
27 size_t processed = 0;
28 while (processed < size) {
29 // Process on the appropriate storage.
30 s64 cur_size = 0;
31 if (this->CheckRegions(std::addressof(cur_size), offset + processed,
32 size - processed)) {
33 m_inside_region_storage->Read(buffer + processed, cur_size, offset + processed);
34 } else {
35 m_outside_region_storage->Read(buffer + processed, cur_size, offset + processed);
36 }
37
38 // Advance.
39 processed += cur_size;
40 }
41
42 return size;
43 }
44
45 virtual size_t GetSize() const override {
46 return m_inside_region_storage->GetSize();
47 }
48
49private:
50 bool CheckRegions(s64* out_current_size, s64 offset, s64 size) const {
51 // Check if our region contains the access.
52 if (m_region.offset <= offset) {
53 if (offset < m_region.offset + m_region.size) {
54 if (m_region.offset + m_region.size <= offset + size) {
55 *out_current_size = m_region.offset + m_region.size - offset;
56 } else {
57 *out_current_size = size;
58 }
59 return true;
60 } else {
61 *out_current_size = size;
62 return false;
63 }
64 } else {
65 if (m_region.offset <= offset + size) {
66 *out_current_size = m_region.offset - offset;
67 } else {
68 *out_current_size = size;
69 }
70 return false;
71 }
72 }
73
74private:
75 VirtualFile m_inside_region_storage;
76 VirtualFile m_outside_region_storage;
77 Region m_region;
78};
79
80} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_utility.cpp b/src/core/file_sys/fssystem/fssystem_utility.cpp
new file mode 100644
index 000000000..ceabb8ff1
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_utility.cpp
@@ -0,0 +1,27 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_utility.h"
5
6namespace FileSys {
7
8void AddCounter(void* counter_, size_t counter_size, u64 value) {
9 u8* counter = static_cast<u8*>(counter_);
10 u64 remaining = value;
11 u8 carry = 0;
12
13 for (size_t i = 0; i < counter_size; i++) {
14 auto sum = counter[counter_size - 1 - i] + (remaining & 0xFF) + carry;
15 carry = static_cast<u8>(sum >> (sizeof(u8) * 8));
16 auto sum8 = static_cast<u8>(sum & 0xFF);
17
18 counter[counter_size - 1 - i] = sum8;
19
20 remaining >>= (sizeof(u8) * 8);
21 if (carry == 0 && remaining == 0) {
22 break;
23 }
24 }
25}
26
27} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_utility.h b/src/core/file_sys/fssystem/fssystem_utility.h
new file mode 100644
index 000000000..284b8b811
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_utility.h
@@ -0,0 +1,12 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/common_funcs.h"
7
8namespace FileSys {
9
10void AddCounter(void* counter, size_t counter_size, u64 value);
11
12}
diff --git a/src/core/file_sys/nca_patch.cpp b/src/core/file_sys/nca_patch.cpp
deleted file mode 100644
index 2735d053b..000000000
--- a/src/core/file_sys/nca_patch.cpp
+++ /dev/null
@@ -1,217 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include <algorithm>
5#include <array>
6#include <cstddef>
7#include <cstring>
8
9#include "common/assert.h"
10#include "core/crypto/aes_util.h"
11#include "core/file_sys/nca_patch.h"
12
13namespace FileSys {
14namespace {
15template <bool Subsection, typename BlockType, typename BucketType>
16std::pair<std::size_t, std::size_t> SearchBucketEntry(u64 offset, const BlockType& block,
17 const BucketType& buckets) {
18 if constexpr (Subsection) {
19 const auto& last_bucket = buckets[block.number_buckets - 1];
20 if (offset >= last_bucket.entries[last_bucket.number_entries].address_patch) {
21 return {block.number_buckets - 1, last_bucket.number_entries};
22 }
23 } else {
24 ASSERT_MSG(offset <= block.size, "Offset is out of bounds in BKTR relocation block.");
25 }
26
27 std::size_t bucket_id = std::count_if(
28 block.base_offsets.begin() + 1, block.base_offsets.begin() + block.number_buckets,
29 [&offset](u64 base_offset) { return base_offset <= offset; });
30
31 const auto& bucket = buckets[bucket_id];
32
33 if (bucket.number_entries == 1) {
34 return {bucket_id, 0};
35 }
36
37 std::size_t low = 0;
38 std::size_t mid = 0;
39 std::size_t high = bucket.number_entries - 1;
40 while (low <= high) {
41 mid = (low + high) / 2;
42 if (bucket.entries[mid].address_patch > offset) {
43 high = mid - 1;
44 } else {
45 if (mid == bucket.number_entries - 1 ||
46 bucket.entries[mid + 1].address_patch > offset) {
47 return {bucket_id, mid};
48 }
49
50 low = mid + 1;
51 }
52 }
53 ASSERT_MSG(false, "Offset could not be found in BKTR block.");
54 return {0, 0};
55}
56} // Anonymous namespace
57
58BKTR::BKTR(VirtualFile base_romfs_, VirtualFile bktr_romfs_, RelocationBlock relocation_,
59 std::vector<RelocationBucket> relocation_buckets_, SubsectionBlock subsection_,
60 std::vector<SubsectionBucket> subsection_buckets_, bool is_encrypted_,
61 Core::Crypto::Key128 key_, u64 base_offset_, u64 ivfc_offset_,
62 std::array<u8, 8> section_ctr_)
63 : relocation(relocation_), relocation_buckets(std::move(relocation_buckets_)),
64 subsection(subsection_), subsection_buckets(std::move(subsection_buckets_)),
65 base_romfs(std::move(base_romfs_)), bktr_romfs(std::move(bktr_romfs_)),
66 encrypted(is_encrypted_), key(key_), base_offset(base_offset_), ivfc_offset(ivfc_offset_),
67 section_ctr(section_ctr_) {
68 for (std::size_t i = 0; i < relocation.number_buckets - 1; ++i) {
69 relocation_buckets[i].entries.push_back({relocation.base_offsets[i + 1], 0, 0});
70 }
71
72 for (std::size_t i = 0; i < subsection.number_buckets - 1; ++i) {
73 subsection_buckets[i].entries.push_back({subsection_buckets[i + 1].entries[0].address_patch,
74 {0},
75 subsection_buckets[i + 1].entries[0].ctr});
76 }
77
78 relocation_buckets.back().entries.push_back({relocation.size, 0, 0});
79}
80
81BKTR::~BKTR() = default;
82
83std::size_t BKTR::Read(u8* data, std::size_t length, std::size_t offset) const {
84 // Read out of bounds.
85 if (offset >= relocation.size) {
86 return 0;
87 }
88
89 const auto relocation_entry = GetRelocationEntry(offset);
90 const auto section_offset =
91 offset - relocation_entry.address_patch + relocation_entry.address_source;
92 const auto bktr_read = relocation_entry.from_patch;
93
94 const auto next_relocation = GetNextRelocationEntry(offset);
95
96 if (offset + length > next_relocation.address_patch) {
97 const u64 partition = next_relocation.address_patch - offset;
98 return Read(data, partition, offset) +
99 Read(data + partition, length - partition, offset + partition);
100 }
101
102 if (!bktr_read) {
103 ASSERT_MSG(section_offset >= ivfc_offset, "Offset calculation negative.");
104 return base_romfs->Read(data, length, section_offset - ivfc_offset);
105 }
106
107 if (!encrypted) {
108 return bktr_romfs->Read(data, length, section_offset);
109 }
110
111 const auto subsection_entry = GetSubsectionEntry(section_offset);
112 Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(key, Core::Crypto::Mode::CTR);
113
114 // Calculate AES IV
115 std::array<u8, 16> iv{};
116 auto subsection_ctr = subsection_entry.ctr;
117 auto offset_iv = section_offset + base_offset;
118 for (std::size_t i = 0; i < section_ctr.size(); ++i) {
119 iv[i] = section_ctr[0x8 - i - 1];
120 }
121 offset_iv >>= 4;
122 for (std::size_t i = 0; i < sizeof(u64); ++i) {
123 iv[0xF - i] = static_cast<u8>(offset_iv & 0xFF);
124 offset_iv >>= 8;
125 }
126 for (std::size_t i = 0; i < sizeof(u32); ++i) {
127 iv[0x7 - i] = static_cast<u8>(subsection_ctr & 0xFF);
128 subsection_ctr >>= 8;
129 }
130 cipher.SetIV(iv);
131
132 const auto next_subsection = GetNextSubsectionEntry(section_offset);
133
134 if (section_offset + length > next_subsection.address_patch) {
135 const u64 partition = next_subsection.address_patch - section_offset;
136 return Read(data, partition, offset) +
137 Read(data + partition, length - partition, offset + partition);
138 }
139
140 const auto block_offset = section_offset & 0xF;
141 if (block_offset != 0) {
142 auto block = bktr_romfs->ReadBytes(0x10, section_offset & ~0xF);
143 cipher.Transcode(block.data(), block.size(), block.data(), Core::Crypto::Op::Decrypt);
144 if (length + block_offset < 0x10) {
145 std::memcpy(data, block.data() + block_offset, std::min(length, block.size()));
146 return std::min(length, block.size());
147 }
148
149 const auto read = 0x10 - block_offset;
150 std::memcpy(data, block.data() + block_offset, read);
151 return read + Read(data + read, length - read, offset + read);
152 }
153
154 const auto raw_read = bktr_romfs->Read(data, length, section_offset);
155 cipher.Transcode(data, raw_read, data, Core::Crypto::Op::Decrypt);
156 return raw_read;
157}
158
159RelocationEntry BKTR::GetRelocationEntry(u64 offset) const {
160 const auto res = SearchBucketEntry<false>(offset, relocation, relocation_buckets);
161 return relocation_buckets[res.first].entries[res.second];
162}
163
164RelocationEntry BKTR::GetNextRelocationEntry(u64 offset) const {
165 const auto res = SearchBucketEntry<false>(offset, relocation, relocation_buckets);
166 const auto bucket = relocation_buckets[res.first];
167 if (res.second + 1 < bucket.entries.size())
168 return bucket.entries[res.second + 1];
169 return relocation_buckets[res.first + 1].entries[0];
170}
171
172SubsectionEntry BKTR::GetSubsectionEntry(u64 offset) const {
173 const auto res = SearchBucketEntry<true>(offset, subsection, subsection_buckets);
174 return subsection_buckets[res.first].entries[res.second];
175}
176
177SubsectionEntry BKTR::GetNextSubsectionEntry(u64 offset) const {
178 const auto res = SearchBucketEntry<true>(offset, subsection, subsection_buckets);
179 const auto bucket = subsection_buckets[res.first];
180 if (res.second + 1 < bucket.entries.size())
181 return bucket.entries[res.second + 1];
182 return subsection_buckets[res.first + 1].entries[0];
183}
184
185std::string BKTR::GetName() const {
186 return base_romfs->GetName();
187}
188
189std::size_t BKTR::GetSize() const {
190 return relocation.size;
191}
192
193bool BKTR::Resize(std::size_t new_size) {
194 return false;
195}
196
197VirtualDir BKTR::GetContainingDirectory() const {
198 return base_romfs->GetContainingDirectory();
199}
200
201bool BKTR::IsWritable() const {
202 return false;
203}
204
205bool BKTR::IsReadable() const {
206 return true;
207}
208
209std::size_t BKTR::Write(const u8* data, std::size_t length, std::size_t offset) {
210 return 0;
211}
212
213bool BKTR::Rename(std::string_view name) {
214 return base_romfs->Rename(name);
215}
216
217} // namespace FileSys
diff --git a/src/core/file_sys/nca_patch.h b/src/core/file_sys/nca_patch.h
deleted file mode 100644
index 595e3ef09..000000000
--- a/src/core/file_sys/nca_patch.h
+++ /dev/null
@@ -1,145 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <array>
7#include <memory>
8#include <vector>
9
10#include "common/common_funcs.h"
11#include "common/common_types.h"
12#include "common/swap.h"
13#include "core/crypto/key_manager.h"
14
15namespace FileSys {
16
17#pragma pack(push, 1)
18struct RelocationEntry {
19 u64_le address_patch;
20 u64_le address_source;
21 u32 from_patch;
22};
23#pragma pack(pop)
24static_assert(sizeof(RelocationEntry) == 0x14, "RelocationEntry has incorrect size.");
25
26struct RelocationBucketRaw {
27 INSERT_PADDING_BYTES(4);
28 u32_le number_entries;
29 u64_le end_offset;
30 std::array<RelocationEntry, 0x332> relocation_entries;
31 INSERT_PADDING_BYTES(8);
32};
33static_assert(sizeof(RelocationBucketRaw) == 0x4000, "RelocationBucketRaw has incorrect size.");
34
35// Vector version of RelocationBucketRaw
36struct RelocationBucket {
37 u32 number_entries;
38 u64 end_offset;
39 std::vector<RelocationEntry> entries;
40};
41
42struct RelocationBlock {
43 INSERT_PADDING_BYTES(4);
44 u32_le number_buckets;
45 u64_le size;
46 std::array<u64, 0x7FE> base_offsets;
47};
48static_assert(sizeof(RelocationBlock) == 0x4000, "RelocationBlock has incorrect size.");
49
50struct SubsectionEntry {
51 u64_le address_patch;
52 INSERT_PADDING_BYTES(0x4);
53 u32_le ctr;
54};
55static_assert(sizeof(SubsectionEntry) == 0x10, "SubsectionEntry has incorrect size.");
56
57struct SubsectionBucketRaw {
58 INSERT_PADDING_BYTES(4);
59 u32_le number_entries;
60 u64_le end_offset;
61 std::array<SubsectionEntry, 0x3FF> subsection_entries;
62};
63static_assert(sizeof(SubsectionBucketRaw) == 0x4000, "SubsectionBucketRaw has incorrect size.");
64
65// Vector version of SubsectionBucketRaw
66struct SubsectionBucket {
67 u32 number_entries;
68 u64 end_offset;
69 std::vector<SubsectionEntry> entries;
70};
71
72struct SubsectionBlock {
73 INSERT_PADDING_BYTES(4);
74 u32_le number_buckets;
75 u64_le size;
76 std::array<u64, 0x7FE> base_offsets;
77};
78static_assert(sizeof(SubsectionBlock) == 0x4000, "SubsectionBlock has incorrect size.");
79
80inline RelocationBucket ConvertRelocationBucketRaw(RelocationBucketRaw raw) {
81 return {raw.number_entries,
82 raw.end_offset,
83 {raw.relocation_entries.begin(), raw.relocation_entries.begin() + raw.number_entries}};
84}
85
86inline SubsectionBucket ConvertSubsectionBucketRaw(SubsectionBucketRaw raw) {
87 return {raw.number_entries,
88 raw.end_offset,
89 {raw.subsection_entries.begin(), raw.subsection_entries.begin() + raw.number_entries}};
90}
91
92class BKTR : public VfsFile {
93public:
94 BKTR(VirtualFile base_romfs, VirtualFile bktr_romfs, RelocationBlock relocation,
95 std::vector<RelocationBucket> relocation_buckets, SubsectionBlock subsection,
96 std::vector<SubsectionBucket> subsection_buckets, bool is_encrypted,
97 Core::Crypto::Key128 key, u64 base_offset, u64 ivfc_offset, std::array<u8, 8> section_ctr);
98 ~BKTR() override;
99
100 std::size_t Read(u8* data, std::size_t length, std::size_t offset) const override;
101
102 std::string GetName() const override;
103
104 std::size_t GetSize() const override;
105
106 bool Resize(std::size_t new_size) override;
107
108 VirtualDir GetContainingDirectory() const override;
109
110 bool IsWritable() const override;
111
112 bool IsReadable() const override;
113
114 std::size_t Write(const u8* data, std::size_t length, std::size_t offset) override;
115
116 bool Rename(std::string_view name) override;
117
118private:
119 RelocationEntry GetRelocationEntry(u64 offset) const;
120 RelocationEntry GetNextRelocationEntry(u64 offset) const;
121
122 SubsectionEntry GetSubsectionEntry(u64 offset) const;
123 SubsectionEntry GetNextSubsectionEntry(u64 offset) const;
124
125 RelocationBlock relocation;
126 std::vector<RelocationBucket> relocation_buckets;
127 SubsectionBlock subsection;
128 std::vector<SubsectionBucket> subsection_buckets;
129
130 // Should be the raw base romfs, decrypted.
131 VirtualFile base_romfs;
132 // Should be the raw BKTR romfs, (located at media_offset with size media_size).
133 VirtualFile bktr_romfs;
134
135 bool encrypted;
136 Core::Crypto::Key128 key;
137
138 // Base offset into NCA, used for IV calculation.
139 u64 base_offset;
140 // Distance between IVFC start and RomFS start, used for base reads
141 u64 ivfc_offset;
142 std::array<u8, 8> section_ctr;
143};
144
145} // namespace FileSys
diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp
index 2ba1b34a4..0701e3f0e 100644
--- a/src/core/file_sys/patch_manager.cpp
+++ b/src/core/file_sys/patch_manager.cpp
@@ -141,8 +141,7 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const {
141 const auto update_tid = GetUpdateTitleID(title_id); 141 const auto update_tid = GetUpdateTitleID(title_id);
142 const auto update = content_provider.GetEntry(update_tid, ContentRecordType::Program); 142 const auto update = content_provider.GetEntry(update_tid, ContentRecordType::Program);
143 143
144 if (!update_disabled && update != nullptr && update->GetExeFS() != nullptr && 144 if (!update_disabled && update != nullptr && update->GetExeFS() != nullptr) {
145 update->GetStatus() == Loader::ResultStatus::ErrorMissingBKTRBaseRomFS) {
146 LOG_INFO(Loader, " ExeFS: Update ({}) applied successfully", 145 LOG_INFO(Loader, " ExeFS: Update ({}) applied successfully",
147 FormatTitleVersion(content_provider.GetEntryVersion(update_tid).value_or(0))); 146 FormatTitleVersion(content_provider.GetEntryVersion(update_tid).value_or(0)));
148 exefs = update->GetExeFS(); 147 exefs = update->GetExeFS();
@@ -358,11 +357,6 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
358 return; 357 return;
359 } 358 }
360 359
361 auto extracted = ExtractRomFS(romfs);
362 if (extracted == nullptr) {
363 return;
364 }
365
366 const auto& disabled = Settings::values.disabled_addons[title_id]; 360 const auto& disabled = Settings::values.disabled_addons[title_id];
367 std::vector<VirtualDir> patch_dirs = load_dir->GetSubdirectories(); 361 std::vector<VirtualDir> patch_dirs = load_dir->GetSubdirectories();
368 if (std::find(disabled.cbegin(), disabled.cend(), "SDMC") == disabled.cend()) { 362 if (std::find(disabled.cbegin(), disabled.cend(), "SDMC") == disabled.cend()) {
@@ -394,6 +388,11 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
394 return; 388 return;
395 } 389 }
396 390
391 auto extracted = ExtractRomFS(romfs);
392 if (extracted == nullptr) {
393 return;
394 }
395
397 layers.push_back(std::move(extracted)); 396 layers.push_back(std::move(extracted));
398 397
399 auto layered = LayeredVfsDirectory::MakeLayeredDirectory(std::move(layers)); 398 auto layered = LayeredVfsDirectory::MakeLayeredDirectory(std::move(layers));
@@ -412,39 +411,43 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
412 romfs = std::move(packed); 411 romfs = std::move(packed);
413} 412}
414 413
415VirtualFile PatchManager::PatchRomFS(VirtualFile romfs, u64 ivfc_offset, ContentRecordType type, 414VirtualFile PatchManager::PatchRomFS(const NCA* base_nca, VirtualFile base_romfs,
416 VirtualFile update_raw, bool apply_layeredfs) const { 415 ContentRecordType type, VirtualFile packed_update_raw,
416 bool apply_layeredfs) const {
417 const auto log_string = fmt::format("Patching RomFS for title_id={:016X}, type={:02X}", 417 const auto log_string = fmt::format("Patching RomFS for title_id={:016X}, type={:02X}",
418 title_id, static_cast<u8>(type)); 418 title_id, static_cast<u8>(type));
419
420 if (type == ContentRecordType::Program || type == ContentRecordType::Data) { 419 if (type == ContentRecordType::Program || type == ContentRecordType::Data) {
421 LOG_INFO(Loader, "{}", log_string); 420 LOG_INFO(Loader, "{}", log_string);
422 } else { 421 } else {
423 LOG_DEBUG(Loader, "{}", log_string); 422 LOG_DEBUG(Loader, "{}", log_string);
424 } 423 }
425 424
426 if (romfs == nullptr) { 425 if (base_romfs == nullptr) {
427 return romfs; 426 return base_romfs;
428 } 427 }
429 428
429 auto romfs = base_romfs;
430
430 // Game Updates 431 // Game Updates
431 const auto update_tid = GetUpdateTitleID(title_id); 432 const auto update_tid = GetUpdateTitleID(title_id);
432 const auto update = content_provider.GetEntryRaw(update_tid, type); 433 const auto update_raw = content_provider.GetEntryRaw(update_tid, type);
433 434
434 const auto& disabled = Settings::values.disabled_addons[title_id]; 435 const auto& disabled = Settings::values.disabled_addons[title_id];
435 const auto update_disabled = 436 const auto update_disabled =
436 std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend(); 437 std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend();
437 438
438 if (!update_disabled && update != nullptr) { 439 if (!update_disabled && update_raw != nullptr && base_nca != nullptr) {
439 const auto new_nca = std::make_shared<NCA>(update, romfs, ivfc_offset); 440 const auto new_nca = std::make_shared<NCA>(update_raw, base_nca);
440 if (new_nca->GetStatus() == Loader::ResultStatus::Success && 441 if (new_nca->GetStatus() == Loader::ResultStatus::Success &&
441 new_nca->GetRomFS() != nullptr) { 442 new_nca->GetRomFS() != nullptr) {
442 LOG_INFO(Loader, " RomFS: Update ({}) applied successfully", 443 LOG_INFO(Loader, " RomFS: Update ({}) applied successfully",
443 FormatTitleVersion(content_provider.GetEntryVersion(update_tid).value_or(0))); 444 FormatTitleVersion(content_provider.GetEntryVersion(update_tid).value_or(0)));
444 romfs = new_nca->GetRomFS(); 445 romfs = new_nca->GetRomFS();
446 const auto version =
447 FormatTitleVersion(content_provider.GetEntryVersion(update_tid).value_or(0));
445 } 448 }
446 } else if (!update_disabled && update_raw != nullptr) { 449 } else if (!update_disabled && packed_update_raw != nullptr && base_nca != nullptr) {
447 const auto new_nca = std::make_shared<NCA>(update_raw, romfs, ivfc_offset); 450 const auto new_nca = std::make_shared<NCA>(packed_update_raw, base_nca);
448 if (new_nca->GetStatus() == Loader::ResultStatus::Success && 451 if (new_nca->GetStatus() == Loader::ResultStatus::Success &&
449 new_nca->GetRomFS() != nullptr) { 452 new_nca->GetRomFS() != nullptr) {
450 LOG_INFO(Loader, " RomFS: Update (PACKED) applied successfully"); 453 LOG_INFO(Loader, " RomFS: Update (PACKED) applied successfully");
@@ -608,7 +611,7 @@ PatchManager::Metadata PatchManager::ParseControlNCA(const NCA& nca) const {
608 return {}; 611 return {};
609 } 612 }
610 613
611 const auto romfs = PatchRomFS(base_romfs, nca.GetBaseIVFCOffset(), ContentRecordType::Control); 614 const auto romfs = PatchRomFS(&nca, base_romfs, ContentRecordType::Control);
612 if (romfs == nullptr) { 615 if (romfs == nullptr) {
613 return {}; 616 return {};
614 } 617 }
diff --git a/src/core/file_sys/patch_manager.h b/src/core/file_sys/patch_manager.h
index 69d15e2f8..adcde7b7d 100644
--- a/src/core/file_sys/patch_manager.h
+++ b/src/core/file_sys/patch_manager.h
@@ -61,9 +61,9 @@ public:
61 // Currently tracked RomFS patches: 61 // Currently tracked RomFS patches:
62 // - Game Updates 62 // - Game Updates
63 // - LayeredFS 63 // - LayeredFS
64 [[nodiscard]] VirtualFile PatchRomFS(VirtualFile base, u64 ivfc_offset, 64 [[nodiscard]] VirtualFile PatchRomFS(const NCA* base_nca, VirtualFile base_romfs,
65 ContentRecordType type = ContentRecordType::Program, 65 ContentRecordType type = ContentRecordType::Program,
66 VirtualFile update_raw = nullptr, 66 VirtualFile packed_update_raw = nullptr,
67 bool apply_layeredfs = true) const; 67 bool apply_layeredfs = true) const;
68 68
69 // Returns a vector of pairs between patch names and patch versions. 69 // Returns a vector of pairs between patch names and patch versions.
diff --git a/src/core/file_sys/registered_cache.cpp b/src/core/file_sys/registered_cache.cpp
index a6960170c..a28af3594 100644
--- a/src/core/file_sys/registered_cache.cpp
+++ b/src/core/file_sys/registered_cache.cpp
@@ -416,9 +416,9 @@ void RegisteredCache::ProcessFiles(const std::vector<NcaID>& ids) {
416 416
417 if (file == nullptr) 417 if (file == nullptr)
418 continue; 418 continue;
419 const auto nca = std::make_shared<NCA>(parser(file, id), nullptr, 0); 419 const auto nca = std::make_shared<NCA>(parser(file, id));
420 if (nca->GetStatus() != Loader::ResultStatus::Success || 420 if (nca->GetStatus() != Loader::ResultStatus::Success ||
421 nca->GetType() != NCAContentType::Meta) { 421 nca->GetType() != NCAContentType::Meta || nca->GetSubdirectories().empty()) {
422 continue; 422 continue;
423 } 423 }
424 424
@@ -500,7 +500,7 @@ std::unique_ptr<NCA> RegisteredCache::GetEntry(u64 title_id, ContentRecordType t
500 const auto raw = GetEntryRaw(title_id, type); 500 const auto raw = GetEntryRaw(title_id, type);
501 if (raw == nullptr) 501 if (raw == nullptr)
502 return nullptr; 502 return nullptr;
503 return std::make_unique<NCA>(raw, nullptr, 0); 503 return std::make_unique<NCA>(raw);
504} 504}
505 505
506template <typename T> 506template <typename T>
@@ -964,7 +964,7 @@ std::unique_ptr<NCA> ManualContentProvider::GetEntry(u64 title_id, ContentRecord
964 const auto res = GetEntryRaw(title_id, type); 964 const auto res = GetEntryRaw(title_id, type);
965 if (res == nullptr) 965 if (res == nullptr)
966 return nullptr; 966 return nullptr;
967 return std::make_unique<NCA>(res, nullptr, 0); 967 return std::make_unique<NCA>(res);
968} 968}
969 969
970std::vector<ContentProviderEntry> ManualContentProvider::ListEntriesFilter( 970std::vector<ContentProviderEntry> ManualContentProvider::ListEntriesFilter(
diff --git a/src/core/file_sys/romfs_factory.cpp b/src/core/file_sys/romfs_factory.cpp
index aa4726cfa..1bc07dae5 100644
--- a/src/core/file_sys/romfs_factory.cpp
+++ b/src/core/file_sys/romfs_factory.cpp
@@ -26,13 +26,12 @@ RomFSFactory::RomFSFactory(Loader::AppLoader& app_loader, ContentProvider& provi
26 } 26 }
27 27
28 updatable = app_loader.IsRomFSUpdatable(); 28 updatable = app_loader.IsRomFSUpdatable();
29 ivfc_offset = app_loader.ReadRomFSIVFCOffset();
30} 29}
31 30
32RomFSFactory::~RomFSFactory() = default; 31RomFSFactory::~RomFSFactory() = default;
33 32
34void RomFSFactory::SetPackedUpdate(VirtualFile update_raw_file) { 33void RomFSFactory::SetPackedUpdate(VirtualFile update_raw_file) {
35 update_raw = std::move(update_raw_file); 34 packed_update_raw = std::move(update_raw_file);
36} 35}
37 36
38VirtualFile RomFSFactory::OpenCurrentProcess(u64 current_process_title_id) const { 37VirtualFile RomFSFactory::OpenCurrentProcess(u64 current_process_title_id) const {
@@ -40,9 +39,11 @@ VirtualFile RomFSFactory::OpenCurrentProcess(u64 current_process_title_id) const
40 return file; 39 return file;
41 } 40 }
42 41
42 const auto type = ContentRecordType::Program;
43 const auto nca = content_provider.GetEntry(current_process_title_id, type);
43 const PatchManager patch_manager{current_process_title_id, filesystem_controller, 44 const PatchManager patch_manager{current_process_title_id, filesystem_controller,
44 content_provider}; 45 content_provider};
45 return patch_manager.PatchRomFS(file, ivfc_offset, ContentRecordType::Program, update_raw); 46 return patch_manager.PatchRomFS(nca.get(), file, ContentRecordType::Program, packed_update_raw);
46} 47}
47 48
48VirtualFile RomFSFactory::OpenPatchedRomFS(u64 title_id, ContentRecordType type) const { 49VirtualFile RomFSFactory::OpenPatchedRomFS(u64 title_id, ContentRecordType type) const {
@@ -54,7 +55,7 @@ VirtualFile RomFSFactory::OpenPatchedRomFS(u64 title_id, ContentRecordType type)
54 55
55 const PatchManager patch_manager{title_id, filesystem_controller, content_provider}; 56 const PatchManager patch_manager{title_id, filesystem_controller, content_provider};
56 57
57 return patch_manager.PatchRomFS(nca->GetRomFS(), nca->GetBaseIVFCOffset(), type); 58 return patch_manager.PatchRomFS(nca.get(), nca->GetRomFS(), type);
58} 59}
59 60
60VirtualFile RomFSFactory::OpenPatchedRomFSWithProgramIndex(u64 title_id, u8 program_index, 61VirtualFile RomFSFactory::OpenPatchedRomFSWithProgramIndex(u64 title_id, u8 program_index,
diff --git a/src/core/file_sys/romfs_factory.h b/src/core/file_sys/romfs_factory.h
index 7ec40d19d..e4809bc94 100644
--- a/src/core/file_sys/romfs_factory.h
+++ b/src/core/file_sys/romfs_factory.h
@@ -40,21 +40,22 @@ public:
40 Service::FileSystem::FileSystemController& controller); 40 Service::FileSystem::FileSystemController& controller);
41 ~RomFSFactory(); 41 ~RomFSFactory();
42 42
43 void SetPackedUpdate(VirtualFile update_raw_file); 43 void SetPackedUpdate(VirtualFile packed_update_raw);
44 [[nodiscard]] VirtualFile OpenCurrentProcess(u64 current_process_title_id) const; 44 [[nodiscard]] VirtualFile OpenCurrentProcess(u64 current_process_title_id) const;
45 [[nodiscard]] VirtualFile OpenPatchedRomFS(u64 title_id, ContentRecordType type) const; 45 [[nodiscard]] VirtualFile OpenPatchedRomFS(u64 title_id, ContentRecordType type) const;
46 [[nodiscard]] VirtualFile OpenPatchedRomFSWithProgramIndex(u64 title_id, u8 program_index, 46 [[nodiscard]] VirtualFile OpenPatchedRomFSWithProgramIndex(u64 title_id, u8 program_index,
47 ContentRecordType type) const; 47 ContentRecordType type) const;
48 [[nodiscard]] VirtualFile Open(u64 title_id, StorageId storage, ContentRecordType type) const; 48 [[nodiscard]] VirtualFile Open(u64 title_id, StorageId storage, ContentRecordType type) const;
49
50private:
51 [[nodiscard]] std::shared_ptr<NCA> GetEntry(u64 title_id, StorageId storage, 49 [[nodiscard]] std::shared_ptr<NCA> GetEntry(u64 title_id, StorageId storage,
52 ContentRecordType type) const; 50 ContentRecordType type) const;
53 51
52private:
54 VirtualFile file; 53 VirtualFile file;
55 VirtualFile update_raw; 54 VirtualFile packed_update_raw;
55
56 VirtualFile base;
57
56 bool updatable; 58 bool updatable;
57 u64 ivfc_offset;
58 59
59 ContentProvider& content_provider; 60 ContentProvider& content_provider;
60 Service::FileSystem::FileSystemController& filesystem_controller; 61 Service::FileSystem::FileSystemController& filesystem_controller;
diff --git a/src/core/file_sys/submission_package.cpp b/src/core/file_sys/submission_package.cpp
index c90e6e372..e1e89ce2d 100644
--- a/src/core/file_sys/submission_package.cpp
+++ b/src/core/file_sys/submission_package.cpp
@@ -249,7 +249,7 @@ void NSP::ReadNCAs(const std::vector<VirtualFile>& files) {
249 } 249 }
250 250
251 const auto nca = std::make_shared<NCA>(outer_file); 251 const auto nca = std::make_shared<NCA>(outer_file);
252 if (nca->GetStatus() != Loader::ResultStatus::Success) { 252 if (nca->GetStatus() != Loader::ResultStatus::Success || nca->GetSubdirectories().empty()) {
253 program_status[nca->GetTitleId()] = nca->GetStatus(); 253 program_status[nca->GetTitleId()] = nca->GetStatus();
254 continue; 254 continue;
255 } 255 }
@@ -280,7 +280,7 @@ void NSP::ReadNCAs(const std::vector<VirtualFile>& files) {
280 continue; 280 continue;
281 } 281 }
282 282
283 auto next_nca = std::make_shared<NCA>(std::move(next_file), nullptr, 0); 283 auto next_nca = std::make_shared<NCA>(std::move(next_file));
284 284
285 if (next_nca->GetType() == NCAContentType::Program) { 285 if (next_nca->GetType() == NCAContentType::Program) {
286 program_status[next_nca->GetTitleId()] = next_nca->GetStatus(); 286 program_status[next_nca->GetTitleId()] = next_nca->GetStatus();