summaryrefslogtreecommitdiff
path: root/src/core/file_sys/fssystem
diff options
context:
space:
mode:
authorGravatar Liam2023-08-10 21:34:43 -0400
committerGravatar Liam2023-08-15 17:47:25 -0400
commit86f6b6b7b2d930e8203114332b04a5c49a780b06 (patch)
treebf7ff58b0a36051d3c3489a40999d80357c570d0 /src/core/file_sys/fssystem
parentMerge pull request #11287 from liamwhite/replaced-bytes (diff)
downloadyuzu-86f6b6b7b2d930e8203114332b04a5c49a780b06.tar.gz
yuzu-86f6b6b7b2d930e8203114332b04a5c49a780b06.tar.xz
yuzu-86f6b6b7b2d930e8203114332b04a5c49a780b06.zip
vfs: expand support for NCA reading
Diffstat (limited to 'src/core/file_sys/fssystem')
-rw-r--r--src/core/file_sys/fssystem/fs_i_storage.h58
-rw-r--r--src/core/file_sys/fssystem/fs_types.h46
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp252
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h114
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp129
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h43
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp112
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_xts_storage.h42
-rw-r--r--src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h146
-rw-r--r--src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp204
-rw-r--r--src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h21
-rw-r--r--src/core/file_sys/fssystem/fssystem_bucket_tree.cpp598
-rw-r--r--src/core/file_sys/fssystem/fssystem_bucket_tree.h417
-rw-r--r--src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h170
-rw-r--r--src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h110
-rw-r--r--src/core/file_sys/fssystem/fssystem_compressed_storage.h960
-rw-r--r--src/core/file_sys/fssystem/fssystem_compression_common.h43
-rw-r--r--src/core/file_sys/fssystem/fssystem_compression_configuration.cpp36
-rw-r--r--src/core/file_sys/fssystem/fssystem_compression_configuration.h12
-rw-r--r--src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp57
-rw-r--r--src/core/file_sys/fssystem/fssystem_crypto_configuration.h12
-rw-r--r--src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp132
-rw-r--r--src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h164
-rw-r--r--src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp103
-rw-r--r--src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h44
-rw-r--r--src/core/file_sys/fssystem/fssystem_indirect_storage.cpp120
-rw-r--r--src/core/file_sys/fssystem/fssystem_indirect_storage.h294
-rw-r--r--src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp30
-rw-r--r--src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h42
-rw-r--r--src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp95
-rw-r--r--src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h65
-rw-r--r--src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h58
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp1345
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h360
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_header.cpp20
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_header.h338
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_reader.cpp542
-rw-r--r--src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp61
-rw-r--r--src/core/file_sys/fssystem/fssystem_pooled_buffer.h96
-rw-r--r--src/core/file_sys/fssystem/fssystem_sparse_storage.cpp40
-rw-r--r--src/core/file_sys/fssystem/fssystem_sparse_storage.h73
-rw-r--r--src/core/file_sys/fssystem/fssystem_switch_storage.h80
-rw-r--r--src/core/file_sys/fssystem/fssystem_utility.cpp24
-rw-r--r--src/core/file_sys/fssystem/fssystem_utility.h12
44 files changed, 7720 insertions, 0 deletions
diff --git a/src/core/file_sys/fssystem/fs_i_storage.h b/src/core/file_sys/fssystem/fs_i_storage.h
new file mode 100644
index 000000000..416dd57b8
--- /dev/null
+++ b/src/core/file_sys/fssystem/fs_i_storage.h
@@ -0,0 +1,58 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/overflow.h"
7#include "core/file_sys/errors.h"
8#include "core/file_sys/vfs.h"
9
10namespace FileSys {
11
12class IStorage : public VfsFile {
13public:
14 virtual std::string GetName() const override {
15 return {};
16 }
17
18 virtual VirtualDir GetContainingDirectory() const override {
19 return {};
20 }
21
22 virtual bool IsWritable() const override {
23 return true;
24 }
25
26 virtual bool IsReadable() const override {
27 return true;
28 }
29
30 virtual bool Resize(size_t size) override {
31 return false;
32 }
33
34 virtual bool Rename(std::string_view name) override {
35 return false;
36 }
37
38 static inline Result CheckAccessRange(s64 offset, s64 size, s64 total_size) {
39 R_UNLESS(offset >= 0, ResultInvalidOffset);
40 R_UNLESS(size >= 0, ResultInvalidSize);
41 R_UNLESS(Common::WrappingAdd(offset, size) >= offset, ResultOutOfRange);
42 R_UNLESS(offset + size <= total_size, ResultOutOfRange);
43 R_SUCCEED();
44 }
45};
46
47class IReadOnlyStorage : public IStorage {
48public:
49 virtual bool IsWritable() const override {
50 return false;
51 }
52
53 virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
54 return 0;
55 }
56};
57
58} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fs_types.h b/src/core/file_sys/fssystem/fs_types.h
new file mode 100644
index 000000000..43aeaf447
--- /dev/null
+++ b/src/core/file_sys/fssystem/fs_types.h
@@ -0,0 +1,46 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/common_funcs.h"
7
8namespace FileSys {
9
10struct Int64 {
11 u32 low;
12 u32 high;
13
14 constexpr void Set(s64 v) {
15 this->low = static_cast<u32>((v & static_cast<u64>(0x00000000FFFFFFFFULL)) >> 0);
16 this->high = static_cast<u32>((v & static_cast<u64>(0xFFFFFFFF00000000ULL)) >> 32);
17 }
18
19 constexpr s64 Get() const {
20 return (static_cast<s64>(this->high) << 32) | (static_cast<s64>(this->low));
21 }
22
23 constexpr Int64& operator=(s64 v) {
24 this->Set(v);
25 return *this;
26 }
27
28 constexpr operator s64() const {
29 return this->Get();
30 }
31};
32
33struct HashSalt {
34 static constexpr size_t Size = 32;
35
36 std::array<u8, Size> value;
37};
38static_assert(std::is_trivial_v<HashSalt>);
39static_assert(sizeof(HashSalt) == HashSalt::Size);
40
41constexpr inline size_t IntegrityMinLayerCount = 2;
42constexpr inline size_t IntegrityMaxLayerCount = 7;
43constexpr inline size_t IntegrityLayerCountSave = 5;
44constexpr inline size_t IntegrityLayerCountSaveDataMeta = 4;
45
46} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp
new file mode 100644
index 000000000..bf189c606
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp
@@ -0,0 +1,252 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h"
5#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h"
6#include "core/file_sys/fssystem/fssystem_nca_header.h"
7#include "core/file_sys/vfs_offset.h"
8
9namespace FileSys {
10
11namespace {
12
13class SoftwareDecryptor final : public AesCtrCounterExtendedStorage::IDecryptor {
14public:
15 virtual void Decrypt(
16 u8* buf, size_t buf_size, const std::array<u8, AesCtrCounterExtendedStorage::KeySize>& key,
17 const std::array<u8, AesCtrCounterExtendedStorage::IvSize>& iv) override final;
18};
19
20} // namespace
21
22Result AesCtrCounterExtendedStorage::CreateSoftwareDecryptor(std::unique_ptr<IDecryptor>* out) {
23 std::unique_ptr<IDecryptor> decryptor = std::make_unique<SoftwareDecryptor>();
24 R_UNLESS(decryptor != nullptr, ResultAllocationMemoryFailedInAesCtrCounterExtendedStorageA);
25 *out = std::move(decryptor);
26 R_SUCCEED();
27}
28
29Result AesCtrCounterExtendedStorage::Initialize(const void* key, size_t key_size, u32 secure_value,
30 VirtualFile data_storage,
31 VirtualFile table_storage) {
32 // Read and verify the bucket tree header.
33 BucketTree::Header header;
34 table_storage->ReadObject(std::addressof(header), 0);
35 R_TRY(header.Verify());
36
37 // Determine extents.
38 const auto node_storage_size = QueryNodeStorageSize(header.entry_count);
39 const auto entry_storage_size = QueryEntryStorageSize(header.entry_count);
40 const auto node_storage_offset = QueryHeaderStorageSize();
41 const auto entry_storage_offset = node_storage_offset + node_storage_size;
42
43 // Create a software decryptor.
44 std::unique_ptr<IDecryptor> sw_decryptor;
45 R_TRY(CreateSoftwareDecryptor(std::addressof(sw_decryptor)));
46
47 // Initialize.
48 R_RETURN(this->Initialize(
49 key, key_size, secure_value, 0, data_storage,
50 std::make_shared<OffsetVfsFile>(table_storage, node_storage_size, node_storage_offset),
51 std::make_shared<OffsetVfsFile>(table_storage, entry_storage_size, entry_storage_offset),
52 header.entry_count, std::move(sw_decryptor)));
53}
54
55Result AesCtrCounterExtendedStorage::Initialize(const void* key, size_t key_size, u32 secure_value,
56 s64 counter_offset, VirtualFile data_storage,
57 VirtualFile node_storage, VirtualFile entry_storage,
58 s32 entry_count,
59 std::unique_ptr<IDecryptor>&& decryptor) {
60 // Validate preconditions.
61 ASSERT(key != nullptr);
62 ASSERT(key_size == KeySize);
63 ASSERT(counter_offset >= 0);
64 ASSERT(decryptor != nullptr);
65
66 // Initialize the bucket tree table.
67 if (entry_count > 0) {
68 R_TRY(
69 m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry), entry_count));
70 } else {
71 m_table.Initialize(NodeSize, 0);
72 }
73
74 // Set members.
75 m_data_storage = data_storage;
76 std::memcpy(m_key.data(), key, key_size);
77 m_secure_value = secure_value;
78 m_counter_offset = counter_offset;
79 m_decryptor = std::move(decryptor);
80
81 R_SUCCEED();
82}
83
84void AesCtrCounterExtendedStorage::Finalize() {
85 if (this->IsInitialized()) {
86 m_table.Finalize();
87 m_data_storage = VirtualFile();
88 }
89}
90
91Result AesCtrCounterExtendedStorage::GetEntryList(Entry* out_entries, s32* out_entry_count,
92 s32 entry_count, s64 offset, s64 size) {
93 // Validate pre-conditions.
94 ASSERT(offset >= 0);
95 ASSERT(size >= 0);
96 ASSERT(this->IsInitialized());
97
98 // Clear the out count.
99 R_UNLESS(out_entry_count != nullptr, ResultNullptrArgument);
100 *out_entry_count = 0;
101
102 // Succeed if there's no range.
103 R_SUCCEED_IF(size == 0);
104
105 // If we have an output array, we need it to be non-null.
106 R_UNLESS(out_entries != nullptr || entry_count == 0, ResultNullptrArgument);
107
108 // Check that our range is valid.
109 BucketTree::Offsets table_offsets;
110 R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
111
112 R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
113
114 // Find the offset in our tree.
115 BucketTree::Visitor visitor;
116 R_TRY(m_table.Find(std::addressof(visitor), offset));
117 {
118 const auto entry_offset = visitor.Get<Entry>()->GetOffset();
119 R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
120 ResultInvalidAesCtrCounterExtendedEntryOffset);
121 }
122
123 // Prepare to loop over entries.
124 const auto end_offset = offset + static_cast<s64>(size);
125 s32 count = 0;
126
127 auto cur_entry = *visitor.Get<Entry>();
128 while (cur_entry.GetOffset() < end_offset) {
129 // Try to write the entry to the out list
130 if (entry_count != 0) {
131 if (count >= entry_count) {
132 break;
133 }
134 std::memcpy(out_entries + count, std::addressof(cur_entry), sizeof(Entry));
135 }
136
137 count++;
138
139 // Advance.
140 if (visitor.CanMoveNext()) {
141 R_TRY(visitor.MoveNext());
142 cur_entry = *visitor.Get<Entry>();
143 } else {
144 break;
145 }
146 }
147
148 // Write the output count.
149 *out_entry_count = count;
150 R_SUCCEED();
151}
152
153size_t AesCtrCounterExtendedStorage::Read(u8* buffer, size_t size, size_t offset) const {
154 // Validate preconditions.
155 ASSERT(offset >= 0);
156 ASSERT(this->IsInitialized());
157
158 // Allow zero size.
159 if (size == 0) {
160 return size;
161 }
162
163 // Validate arguments.
164 ASSERT(buffer != nullptr);
165 ASSERT(Common::IsAligned(offset, BlockSize));
166 ASSERT(Common::IsAligned(size, BlockSize));
167
168 BucketTree::Offsets table_offsets;
169 ASSERT(R_SUCCEEDED(m_table.GetOffsets(std::addressof(table_offsets))));
170
171 ASSERT(table_offsets.IsInclude(offset, size));
172
173 // Read the data.
174 m_data_storage->Read(buffer, size, offset);
175
176 // Find the offset in our tree.
177 BucketTree::Visitor visitor;
178 ASSERT(R_SUCCEEDED(m_table.Find(std::addressof(visitor), offset)));
179 {
180 const auto entry_offset = visitor.Get<Entry>()->GetOffset();
181 ASSERT(Common::IsAligned(entry_offset, BlockSize));
182 ASSERT(0 <= entry_offset && table_offsets.IsInclude(entry_offset));
183 }
184
185 // Prepare to read in chunks.
186 u8* cur_data = static_cast<u8*>(buffer);
187 auto cur_offset = offset;
188 const auto end_offset = offset + static_cast<s64>(size);
189
190 while (cur_offset < end_offset) {
191 // Get the current entry.
192 const auto cur_entry = *visitor.Get<Entry>();
193
194 // Get and validate the entry's offset.
195 const auto cur_entry_offset = cur_entry.GetOffset();
196 ASSERT(static_cast<size_t>(cur_entry_offset) <= cur_offset);
197
198 // Get and validate the next entry offset.
199 s64 next_entry_offset;
200 if (visitor.CanMoveNext()) {
201 ASSERT(R_SUCCEEDED(visitor.MoveNext()));
202 next_entry_offset = visitor.Get<Entry>()->GetOffset();
203 ASSERT(table_offsets.IsInclude(next_entry_offset));
204 } else {
205 next_entry_offset = table_offsets.end_offset;
206 }
207 ASSERT(Common::IsAligned(next_entry_offset, BlockSize));
208 ASSERT(cur_offset < static_cast<size_t>(next_entry_offset));
209
210 // Get the offset of the entry in the data we read.
211 const auto data_offset = cur_offset - cur_entry_offset;
212 const auto data_size = (next_entry_offset - cur_entry_offset) - data_offset;
213 ASSERT(data_size > 0);
214
215 // Determine how much is left.
216 const auto remaining_size = end_offset - cur_offset;
217 const auto cur_size = static_cast<size_t>(std::min(remaining_size, data_size));
218 ASSERT(cur_size <= size);
219
220 // If necessary, perform decryption.
221 if (cur_entry.encryption_value == Entry::Encryption::Encrypted) {
222 // Make the CTR for the data we're decrypting.
223 const auto counter_offset = m_counter_offset + cur_entry_offset + data_offset;
224 NcaAesCtrUpperIv upper_iv = {
225 .part = {.generation = static_cast<u32>(cur_entry.generation),
226 .secure_value = m_secure_value}};
227
228 std::array<u8, IvSize> iv;
229 AesCtrStorage::MakeIv(iv.data(), IvSize, upper_iv.value, counter_offset);
230
231 // Decrypt.
232 m_decryptor->Decrypt(cur_data, cur_size, m_key, iv);
233 }
234
235 // Advance.
236 cur_data += cur_size;
237 cur_offset += cur_size;
238 }
239
240 return size;
241}
242
243void SoftwareDecryptor::Decrypt(u8* buf, size_t buf_size,
244 const std::array<u8, AesCtrCounterExtendedStorage::KeySize>& key,
245 const std::array<u8, AesCtrCounterExtendedStorage::IvSize>& iv) {
246 Core::Crypto::AESCipher<Core::Crypto::Key128, AesCtrCounterExtendedStorage::KeySize> cipher(
247 key, Core::Crypto::Mode::CTR);
248 cipher.SetIV(iv);
249 cipher.Transcode(buf, buf_size, buf, Core::Crypto::Op::Decrypt);
250}
251
252} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h
new file mode 100644
index 000000000..a79904fad
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h
@@ -0,0 +1,114 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <optional>
7
8#include "common/literals.h"
9#include "core/file_sys/fssystem/fs_i_storage.h"
10#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
11
12namespace FileSys {
13
14using namespace Common::Literals;
15
16class AesCtrCounterExtendedStorage : public IReadOnlyStorage {
17 YUZU_NON_COPYABLE(AesCtrCounterExtendedStorage);
18 YUZU_NON_MOVEABLE(AesCtrCounterExtendedStorage);
19
20public:
21 static constexpr size_t BlockSize = 0x10;
22 static constexpr size_t KeySize = 0x10;
23 static constexpr size_t IvSize = 0x10;
24 static constexpr size_t NodeSize = 16_KiB;
25
26 class IDecryptor {
27 public:
28 virtual ~IDecryptor() {}
29 virtual void Decrypt(u8* buf, size_t buf_size, const std::array<u8, KeySize>& key,
30 const std::array<u8, IvSize>& iv) = 0;
31 };
32
33 struct Entry {
34 enum class Encryption : u8 {
35 Encrypted = 0,
36 NotEncrypted = 1,
37 };
38
39 std::array<u8, sizeof(s64)> offset;
40 Encryption encryption_value;
41 std::array<u8, 3> reserved;
42 s32 generation;
43
44 void SetOffset(s64 value) {
45 std::memcpy(this->offset.data(), std::addressof(value), sizeof(s64));
46 }
47
48 s64 GetOffset() const {
49 s64 value;
50 std::memcpy(std::addressof(value), this->offset.data(), sizeof(s64));
51 return value;
52 }
53 };
54 static_assert(sizeof(Entry) == 0x10);
55 static_assert(alignof(Entry) == 4);
56 static_assert(std::is_trivial_v<Entry>);
57
58public:
59 static constexpr s64 QueryHeaderStorageSize() {
60 return BucketTree::QueryHeaderStorageSize();
61 }
62
63 static constexpr s64 QueryNodeStorageSize(s32 entry_count) {
64 return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count);
65 }
66
67 static constexpr s64 QueryEntryStorageSize(s32 entry_count) {
68 return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count);
69 }
70
71 static Result CreateSoftwareDecryptor(std::unique_ptr<IDecryptor>* out);
72
73private:
74 mutable BucketTree m_table;
75 VirtualFile m_data_storage;
76 std::array<u8, KeySize> m_key;
77 u32 m_secure_value;
78 s64 m_counter_offset;
79 std::unique_ptr<IDecryptor> m_decryptor;
80
81public:
82 AesCtrCounterExtendedStorage()
83 : m_table(), m_data_storage(), m_secure_value(), m_counter_offset(), m_decryptor() {}
84 virtual ~AesCtrCounterExtendedStorage() {
85 this->Finalize();
86 }
87
88 Result Initialize(const void* key, size_t key_size, u32 secure_value, s64 counter_offset,
89 VirtualFile data_storage, VirtualFile node_storage, VirtualFile entry_storage,
90 s32 entry_count, std::unique_ptr<IDecryptor>&& decryptor);
91 void Finalize();
92
93 bool IsInitialized() const {
94 return m_table.IsInitialized();
95 }
96
97 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
98
99 virtual size_t GetSize() const override {
100 BucketTree::Offsets offsets;
101 ASSERT(R_SUCCEEDED(m_table.GetOffsets(std::addressof(offsets))));
102
103 return offsets.end_offset;
104 }
105
106 Result GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count, s64 offset,
107 s64 size);
108
109private:
110 Result Initialize(const void* key, size_t key_size, u32 secure_value, VirtualFile data_storage,
111 VirtualFile table_storage);
112};
113
114} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp
new file mode 100644
index 000000000..b65aca18d
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp
@@ -0,0 +1,129 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/alignment.h"
5#include "common/swap.h"
6#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h"
7#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
8#include "core/file_sys/fssystem/fssystem_utility.h"
9
10namespace FileSys {
11
12void AesCtrStorage::MakeIv(void* dst, size_t dst_size, u64 upper, s64 offset) {
13 ASSERT(dst != nullptr);
14 ASSERT(dst_size == IvSize);
15 ASSERT(offset >= 0);
16
17 const uintptr_t out_addr = reinterpret_cast<uintptr_t>(dst);
18
19 *reinterpret_cast<u64_be*>(out_addr + 0) = upper;
20 *reinterpret_cast<s64_be*>(out_addr + sizeof(u64)) = static_cast<s64>(offset / BlockSize);
21}
22
23AesCtrStorage::AesCtrStorage(VirtualFile base, const void* key, size_t key_size, const void* iv,
24 size_t iv_size)
25 : m_base_storage(std::move(base)) {
26 ASSERT(m_base_storage != nullptr);
27 ASSERT(key != nullptr);
28 ASSERT(iv != nullptr);
29 ASSERT(key_size == KeySize);
30 ASSERT(iv_size == IvSize);
31
32 std::memcpy(m_key.data(), key, KeySize);
33 std::memcpy(m_iv.data(), iv, IvSize);
34
35 m_cipher.emplace(m_key, Core::Crypto::Mode::CTR);
36}
37
38size_t AesCtrStorage::Read(u8* buffer, size_t size, size_t offset) const {
39 // Allow zero-size reads.
40 if (size == 0) {
41 return size;
42 }
43
44 // Ensure buffer is valid.
45 ASSERT(buffer != nullptr);
46
47 // We can only read at block aligned offsets.
48 ASSERT(Common::IsAligned(offset, BlockSize));
49 ASSERT(Common::IsAligned(size, BlockSize));
50
51 // Read the data.
52 m_base_storage->Read(buffer, size, offset);
53
54 // Setup the counter.
55 std::array<u8, IvSize> ctr;
56 std::memcpy(ctr.data(), m_iv.data(), IvSize);
57 AddCounter(ctr.data(), IvSize, offset / BlockSize);
58
59 // Decrypt.
60 m_cipher->SetIV(ctr);
61 m_cipher->Transcode(buffer, size, buffer, Core::Crypto::Op::Decrypt);
62
63 return size;
64}
65
66size_t AesCtrStorage::Write(const u8* buffer, size_t size, size_t offset) {
67 // Allow zero-size writes.
68 if (size == 0) {
69 return size;
70 }
71
72 // Ensure buffer is valid.
73 ASSERT(buffer != nullptr);
74
75 // We can only write at block aligned offsets.
76 ASSERT(Common::IsAligned(offset, BlockSize));
77 ASSERT(Common::IsAligned(size, BlockSize));
78
79 // Get a pooled buffer.
80 PooledBuffer pooled_buffer;
81 const bool use_work_buffer = true;
82 if (use_work_buffer) {
83 pooled_buffer.Allocate(size, BlockSize);
84 }
85
86 // Setup the counter.
87 std::array<u8, IvSize> ctr;
88 std::memcpy(ctr.data(), m_iv.data(), IvSize);
89 AddCounter(ctr.data(), IvSize, offset / BlockSize);
90
91 // Loop until all data is written.
92 size_t remaining = size;
93 s64 cur_offset = 0;
94 while (remaining > 0) {
95 // Determine data we're writing and where.
96 const size_t write_size =
97 use_work_buffer ? std::min(pooled_buffer.GetSize(), remaining) : remaining;
98
99 void* write_buf;
100 if (use_work_buffer) {
101 write_buf = pooled_buffer.GetBuffer();
102 } else {
103 write_buf = const_cast<u8*>(buffer);
104 }
105
106 // Encrypt the data.
107 m_cipher->SetIV(ctr);
108 m_cipher->Transcode(buffer, write_size, reinterpret_cast<u8*>(write_buf),
109 Core::Crypto::Op::Encrypt);
110
111 // Write the encrypted data.
112 m_base_storage->Write(reinterpret_cast<u8*>(write_buf), write_size, offset + cur_offset);
113
114 // Advance.
115 cur_offset += write_size;
116 remaining -= write_size;
117 if (remaining > 0) {
118 AddCounter(ctr.data(), IvSize, write_size / BlockSize);
119 }
120 }
121
122 return size;
123}
124
125size_t AesCtrStorage::GetSize() const {
126 return m_base_storage->GetSize();
127}
128
129} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h
new file mode 100644
index 000000000..bceb1f9ad
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h
@@ -0,0 +1,43 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <optional>
7
8#include "core/crypto/aes_util.h"
9#include "core/crypto/key_manager.h"
10#include "core/file_sys/errors.h"
11#include "core/file_sys/fssystem/fs_i_storage.h"
12#include "core/file_sys/vfs.h"
13
14namespace FileSys {
15
16class AesCtrStorage : public IStorage {
17 YUZU_NON_COPYABLE(AesCtrStorage);
18 YUZU_NON_MOVEABLE(AesCtrStorage);
19
20public:
21 static constexpr size_t BlockSize = 0x10;
22 static constexpr size_t KeySize = 0x10;
23 static constexpr size_t IvSize = 0x10;
24
25private:
26 VirtualFile m_base_storage;
27 std::array<u8, KeySize> m_key;
28 std::array<u8, IvSize> m_iv;
29 mutable std::optional<Core::Crypto::AESCipher<Core::Crypto::Key128>> m_cipher;
30
31public:
32 static void MakeIv(void* dst, size_t dst_size, u64 upper, s64 offset);
33
34public:
35 AesCtrStorage(VirtualFile base, const void* key, size_t key_size, const void* iv,
36 size_t iv_size);
37
38 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
39 virtual size_t Write(const u8* buffer, size_t size, size_t offset) override;
40 virtual size_t GetSize() const override;
41};
42
43} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp
new file mode 100644
index 000000000..022424229
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp
@@ -0,0 +1,112 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/alignment.h"
5#include "common/swap.h"
6#include "core/file_sys/errors.h"
7#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h"
8#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
9#include "core/file_sys/fssystem/fssystem_utility.h"
10
11namespace FileSys {
12
13void AesXtsStorage::MakeAesXtsIv(void* dst, size_t dst_size, s64 offset, size_t block_size) {
14 ASSERT(dst != nullptr);
15 ASSERT(dst_size == IvSize);
16 ASSERT(offset >= 0);
17
18 const uintptr_t out_addr = reinterpret_cast<uintptr_t>(dst);
19
20 *reinterpret_cast<s64_be*>(out_addr + sizeof(s64)) = offset / block_size;
21}
22
23AesXtsStorage::AesXtsStorage(VirtualFile base, const void* key1, const void* key2, size_t key_size,
24 const void* iv, size_t iv_size, size_t block_size)
25 : m_base_storage(std::move(base)), m_block_size(block_size), m_mutex() {
26 ASSERT(m_base_storage != nullptr);
27 ASSERT(key1 != nullptr);
28 ASSERT(key2 != nullptr);
29 ASSERT(iv != nullptr);
30 ASSERT(key_size == KeySize);
31 ASSERT(iv_size == IvSize);
32 ASSERT(Common::IsAligned(m_block_size, AesBlockSize));
33
34 std::memcpy(m_key.data() + 0, key1, KeySize);
35 std::memcpy(m_key.data() + 0x10, key2, KeySize);
36 std::memcpy(m_iv.data(), iv, IvSize);
37
38 m_cipher.emplace(m_key, Core::Crypto::Mode::XTS);
39}
40
41size_t AesXtsStorage::Read(u8* buffer, size_t size, size_t offset) const {
42 // Allow zero-size reads.
43 if (size == 0) {
44 return size;
45 }
46
47 // Ensure buffer is valid.
48 ASSERT(buffer != nullptr);
49
50 // We can only read at block aligned offsets.
51 ASSERT(Common::IsAligned(offset, AesBlockSize));
52 ASSERT(Common::IsAligned(size, AesBlockSize));
53
54 // Read the data.
55 m_base_storage->Read(buffer, size, offset);
56
57 // Setup the counter.
58 std::array<u8, IvSize> ctr;
59 std::memcpy(ctr.data(), m_iv.data(), IvSize);
60 AddCounter(ctr.data(), IvSize, offset / m_block_size);
61
62 // Handle any unaligned data before the start.
63 size_t processed_size = 0;
64 if ((offset % m_block_size) != 0) {
65 // Determine the size of the pre-data read.
66 const size_t skip_size =
67 static_cast<size_t>(offset - Common::AlignDown(offset, m_block_size));
68 const size_t data_size = std::min(size, m_block_size - skip_size);
69
70 // Decrypt into a pooled buffer.
71 {
72 PooledBuffer tmp_buf(m_block_size, m_block_size);
73 ASSERT(tmp_buf.GetSize() >= m_block_size);
74
75 std::memset(tmp_buf.GetBuffer(), 0, skip_size);
76 std::memcpy(tmp_buf.GetBuffer() + skip_size, buffer, data_size);
77
78 m_cipher->SetIV(ctr);
79 m_cipher->Transcode(tmp_buf.GetBuffer(), m_block_size, tmp_buf.GetBuffer(),
80 Core::Crypto::Op::Decrypt);
81
82 std::memcpy(buffer, tmp_buf.GetBuffer() + skip_size, data_size);
83 }
84
85 AddCounter(ctr.data(), IvSize, 1);
86 processed_size += data_size;
87 ASSERT(processed_size == std::min(size, m_block_size - skip_size));
88 }
89
90 // Decrypt aligned chunks.
91 char* cur = reinterpret_cast<char*>(buffer) + processed_size;
92 size_t remaining = size - processed_size;
93 while (remaining > 0) {
94 const size_t cur_size = std::min(m_block_size, remaining);
95
96 m_cipher->SetIV(ctr);
97 m_cipher->Transcode(cur, cur_size, cur, Core::Crypto::Op::Decrypt);
98
99 remaining -= cur_size;
100 cur += cur_size;
101
102 AddCounter(ctr.data(), IvSize, 1);
103 }
104
105 return size;
106}
107
108size_t AesXtsStorage::GetSize() const {
109 return m_base_storage->GetSize();
110}
111
112} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_xts_storage.h b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.h
new file mode 100644
index 000000000..2307a2659
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.h
@@ -0,0 +1,42 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <optional>
7
8#include "core/crypto/aes_util.h"
9#include "core/crypto/key_manager.h"
10#include "core/file_sys/fssystem/fs_i_storage.h"
11
12namespace FileSys {
13
14class AesXtsStorage : public IReadOnlyStorage {
15 YUZU_NON_COPYABLE(AesXtsStorage);
16 YUZU_NON_MOVEABLE(AesXtsStorage);
17
18public:
19 static constexpr size_t AesBlockSize = 0x10;
20 static constexpr size_t KeySize = 0x20;
21 static constexpr size_t IvSize = 0x10;
22
23private:
24 VirtualFile m_base_storage;
25 std::array<u8, KeySize> m_key;
26 std::array<u8, IvSize> m_iv;
27 const size_t m_block_size;
28 std::mutex m_mutex;
29 mutable std::optional<Core::Crypto::AESCipher<Core::Crypto::Key256>> m_cipher;
30
31public:
32 static void MakeAesXtsIv(void* dst, size_t dst_size, s64 offset, size_t block_size);
33
34public:
35 AesXtsStorage(VirtualFile base, const void* key1, const void* key2, size_t key_size,
36 const void* iv, size_t iv_size, size_t block_size);
37
38 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
39 virtual size_t GetSize() const override;
40};
41
42} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h
new file mode 100644
index 000000000..27d34fd17
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h
@@ -0,0 +1,146 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/alignment.h"
7#include "core/file_sys/errors.h"
8#include "core/file_sys/fssystem/fs_i_storage.h"
9#include "core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h"
10#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
11
12namespace FileSys {
13
14template <size_t DataAlign_, size_t BufferAlign_>
15class AlignmentMatchingStorage : public IStorage {
16 YUZU_NON_COPYABLE(AlignmentMatchingStorage);
17 YUZU_NON_MOVEABLE(AlignmentMatchingStorage);
18
19public:
20 static constexpr size_t DataAlign = DataAlign_;
21 static constexpr size_t BufferAlign = BufferAlign_;
22
23 static constexpr size_t DataAlignMax = 0x200;
24 static_assert(DataAlign <= DataAlignMax);
25 static_assert(Common::IsPowerOfTwo(DataAlign));
26 static_assert(Common::IsPowerOfTwo(BufferAlign));
27
28private:
29 VirtualFile m_base_storage;
30 s64 m_base_storage_size;
31
32public:
33 explicit AlignmentMatchingStorage(VirtualFile bs) : m_base_storage(std::move(bs)) {}
34
35 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
36 // Allocate a work buffer on stack.
37 alignas(DataAlignMax) char work_buf[DataAlign];
38
39 // Succeed if zero size.
40 if (size == 0) {
41 return size;
42 }
43
44 // Validate arguments.
45 ASSERT(buffer != nullptr);
46
47 s64 bs_size = this->GetSize();
48 ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
49
50 return AlignmentMatchingStorageImpl::Read(m_base_storage, work_buf, sizeof(work_buf),
51 DataAlign, BufferAlign, offset, buffer, size);
52 }
53
54 virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
55 // Allocate a work buffer on stack.
56 alignas(DataAlignMax) char work_buf[DataAlign];
57
58 // Succeed if zero size.
59 if (size == 0) {
60 return size;
61 }
62
63 // Validate arguments.
64 ASSERT(buffer != nullptr);
65
66 s64 bs_size = this->GetSize();
67 ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
68
69 return AlignmentMatchingStorageImpl::Write(m_base_storage, work_buf, sizeof(work_buf),
70 DataAlign, BufferAlign, offset, buffer, size);
71 }
72
73 virtual size_t GetSize() const override {
74 return m_base_storage->GetSize();
75 }
76};
77
78template <size_t BufferAlign_>
79class AlignmentMatchingStoragePooledBuffer : public IStorage {
80 YUZU_NON_COPYABLE(AlignmentMatchingStoragePooledBuffer);
81 YUZU_NON_MOVEABLE(AlignmentMatchingStoragePooledBuffer);
82
83public:
84 static constexpr size_t BufferAlign = BufferAlign_;
85
86 static_assert(Common::IsPowerOfTwo(BufferAlign));
87
88private:
89 VirtualFile m_base_storage;
90 s64 m_base_storage_size;
91 size_t m_data_align;
92
93public:
94 explicit AlignmentMatchingStoragePooledBuffer(VirtualFile bs, size_t da)
95 : m_base_storage(std::move(bs)), m_data_align(da) {
96 ASSERT(Common::IsPowerOfTwo(da));
97 }
98
99 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
100 // Succeed if zero size.
101 if (size == 0) {
102 return size;
103 }
104
105 // Validate arguments.
106 ASSERT(buffer != nullptr);
107
108 s64 bs_size = this->GetSize();
109 ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
110
111 // Allocate a pooled buffer.
112 PooledBuffer pooled_buffer;
113 pooled_buffer.AllocateParticularlyLarge(m_data_align, m_data_align);
114
115 return AlignmentMatchingStorageImpl::Read(m_base_storage, pooled_buffer.GetBuffer(),
116 pooled_buffer.GetSize(), m_data_align,
117 BufferAlign, offset, buffer, size);
118 }
119
120 virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
121 // Succeed if zero size.
122 if (size == 0) {
123 return size;
124 }
125
126 // Validate arguments.
127 ASSERT(buffer != nullptr);
128
129 s64 bs_size = this->GetSize();
130 ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
131
132 // Allocate a pooled buffer.
133 PooledBuffer pooled_buffer;
134 pooled_buffer.AllocateParticularlyLarge(m_data_align, m_data_align);
135
136 return AlignmentMatchingStorageImpl::Write(m_base_storage, pooled_buffer.GetBuffer(),
137 pooled_buffer.GetSize(), m_data_align,
138 BufferAlign, offset, buffer, size);
139 }
140
141 virtual size_t GetSize() const override {
142 return m_base_storage->GetSize();
143 }
144};
145
146} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp
new file mode 100644
index 000000000..641c888ae
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp
@@ -0,0 +1,204 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/alignment.h"
5#include "core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h"
6
7namespace FileSys {
8
9namespace {
10
11template <typename T>
12constexpr size_t GetRoundDownDifference(T x, size_t align) {
13 return static_cast<size_t>(x - Common::AlignDown(x, align));
14}
15
16template <typename T>
17constexpr size_t GetRoundUpDifference(T x, size_t align) {
18 return static_cast<size_t>(Common::AlignUp(x, align) - x);
19}
20
21template <typename T>
22size_t GetRoundUpDifference(T* x, size_t align) {
23 return GetRoundUpDifference(reinterpret_cast<uintptr_t>(x), align);
24}
25
26} // namespace
27
28size_t AlignmentMatchingStorageImpl::Read(VirtualFile base_storage, char* work_buf,
29 size_t work_buf_size, size_t data_alignment,
30 size_t buffer_alignment, s64 offset, u8* buffer,
31 size_t size) {
32 // Check preconditions.
33 ASSERT(work_buf_size >= data_alignment);
34
35 // Succeed if zero size.
36 if (size == 0) {
37 return size;
38 }
39
40 // Validate arguments.
41 ASSERT(buffer != nullptr);
42
43 // Determine extents.
44 u8* aligned_core_buffer;
45 s64 core_offset;
46 size_t core_size;
47 size_t buffer_gap;
48 size_t offset_gap;
49 s64 covered_offset;
50
51 const size_t offset_round_up_difference = GetRoundUpDifference(offset, data_alignment);
52 if (Common::IsAligned(reinterpret_cast<uintptr_t>(buffer) + offset_round_up_difference,
53 buffer_alignment)) {
54 aligned_core_buffer = buffer + offset_round_up_difference;
55
56 core_offset = Common::AlignUp(offset, data_alignment);
57 core_size = (size < offset_round_up_difference)
58 ? 0
59 : Common::AlignDown(size - offset_round_up_difference, data_alignment);
60 buffer_gap = 0;
61 offset_gap = 0;
62
63 covered_offset = core_size > 0 ? core_offset : offset;
64 } else {
65 const size_t buffer_round_up_difference = GetRoundUpDifference(buffer, buffer_alignment);
66
67 aligned_core_buffer = buffer + buffer_round_up_difference;
68
69 core_offset = Common::AlignDown(offset, data_alignment);
70 core_size = (size < buffer_round_up_difference)
71 ? 0
72 : Common::AlignDown(size - buffer_round_up_difference, data_alignment);
73 buffer_gap = buffer_round_up_difference;
74 offset_gap = GetRoundDownDifference(offset, data_alignment);
75
76 covered_offset = offset;
77 }
78
79 // Read the core portion.
80 if (core_size > 0) {
81 base_storage->Read(aligned_core_buffer, core_size, core_offset);
82
83 if (offset_gap != 0 || buffer_gap != 0) {
84 std::memmove(aligned_core_buffer - buffer_gap, aligned_core_buffer + offset_gap,
85 core_size - offset_gap);
86 core_size -= offset_gap;
87 }
88 }
89
90 // Handle the head portion.
91 if (offset < covered_offset) {
92 const s64 head_offset = Common::AlignDown(offset, data_alignment);
93 const size_t head_size = static_cast<size_t>(covered_offset - offset);
94
95 ASSERT(GetRoundDownDifference(offset, data_alignment) + head_size <= work_buf_size);
96
97 base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset);
98 std::memcpy(buffer, work_buf + GetRoundDownDifference(offset, data_alignment), head_size);
99 }
100
101 // Handle the tail portion.
102 s64 tail_offset = covered_offset + core_size;
103 size_t remaining_tail_size = static_cast<size_t>((offset + size) - tail_offset);
104 while (remaining_tail_size > 0) {
105 const auto aligned_tail_offset = Common::AlignDown(tail_offset, data_alignment);
106 const auto cur_size =
107 std::min(static_cast<size_t>(aligned_tail_offset + data_alignment - tail_offset),
108 remaining_tail_size);
109 base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset);
110
111 ASSERT((tail_offset - offset) + cur_size <= size);
112 ASSERT((tail_offset - aligned_tail_offset) + cur_size <= data_alignment);
113 std::memcpy(reinterpret_cast<char*>(buffer) + (tail_offset - offset),
114 work_buf + (tail_offset - aligned_tail_offset), cur_size);
115
116 remaining_tail_size -= cur_size;
117 tail_offset += cur_size;
118 }
119
120 return size;
121}
122
123size_t AlignmentMatchingStorageImpl::Write(VirtualFile base_storage, char* work_buf,
124 size_t work_buf_size, size_t data_alignment,
125 size_t buffer_alignment, s64 offset, const u8* buffer,
126 size_t size) {
127 // Check preconditions.
128 ASSERT(work_buf_size >= data_alignment);
129
130 // Succeed if zero size.
131 if (size == 0) {
132 return size;
133 }
134
135 // Validate arguments.
136 ASSERT(buffer != nullptr);
137
138 // Determine extents.
139 const u8* aligned_core_buffer;
140 s64 core_offset;
141 size_t core_size;
142 s64 covered_offset;
143
144 const size_t offset_round_up_difference = GetRoundUpDifference(offset, data_alignment);
145 if (Common::IsAligned(reinterpret_cast<uintptr_t>(buffer) + offset_round_up_difference,
146 buffer_alignment)) {
147 aligned_core_buffer = buffer + offset_round_up_difference;
148
149 core_offset = Common::AlignUp(offset, data_alignment);
150 core_size = (size < offset_round_up_difference)
151 ? 0
152 : Common::AlignDown(size - offset_round_up_difference, data_alignment);
153
154 covered_offset = core_size > 0 ? core_offset : offset;
155 } else {
156 aligned_core_buffer = nullptr;
157
158 core_offset = Common::AlignDown(offset, data_alignment);
159 core_size = 0;
160
161 covered_offset = offset;
162 }
163
164 // Write the core portion.
165 if (core_size > 0) {
166 base_storage->Write(aligned_core_buffer, core_size, core_offset);
167 }
168
169 // Handle the head portion.
170 if (offset < covered_offset) {
171 const s64 head_offset = Common::AlignDown(offset, data_alignment);
172 const size_t head_size = static_cast<size_t>(covered_offset - offset);
173
174 ASSERT((offset - head_offset) + head_size <= data_alignment);
175
176 base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset);
177 std::memcpy(work_buf + (offset - head_offset), buffer, head_size);
178 base_storage->Write(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset);
179 }
180
181 // Handle the tail portion.
182 s64 tail_offset = covered_offset + core_size;
183 size_t remaining_tail_size = static_cast<size_t>((offset + size) - tail_offset);
184 while (remaining_tail_size > 0) {
185 ASSERT(static_cast<size_t>(tail_offset - offset) < size);
186
187 const auto aligned_tail_offset = Common::AlignDown(tail_offset, data_alignment);
188 const auto cur_size =
189 std::min(static_cast<size_t>(aligned_tail_offset + data_alignment - tail_offset),
190 remaining_tail_size);
191
192 base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset);
193 std::memcpy(work_buf + GetRoundDownDifference(tail_offset, data_alignment),
194 buffer + (tail_offset - offset), cur_size);
195 base_storage->Write(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset);
196
197 remaining_tail_size -= cur_size;
198 tail_offset += cur_size;
199 }
200
201 return size;
202}
203
204} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h
new file mode 100644
index 000000000..4a05b0e88
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h
@@ -0,0 +1,21 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/errors.h"
7#include "core/file_sys/fssystem/fs_i_storage.h"
8
9namespace FileSys {
10
11class AlignmentMatchingStorageImpl {
12public:
13 static size_t Read(VirtualFile base_storage, char* work_buf, size_t work_buf_size,
14 size_t data_alignment, size_t buffer_alignment, s64 offset, u8* buffer,
15 size_t size);
16 static size_t Write(VirtualFile base_storage, char* work_buf, size_t work_buf_size,
17 size_t data_alignment, size_t buffer_alignment, s64 offset,
18 const u8* buffer, size_t size);
19};
20
21} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree.cpp b/src/core/file_sys/fssystem/fssystem_bucket_tree.cpp
new file mode 100644
index 000000000..699a366f1
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_bucket_tree.cpp
@@ -0,0 +1,598 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/errors.h"
5#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
6#include "core/file_sys/fssystem/fssystem_bucket_tree_utils.h"
7#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
8
9namespace FileSys {
10
11namespace {
12
13using Node = impl::BucketTreeNode<const s64*>;
14static_assert(sizeof(Node) == sizeof(BucketTree::NodeHeader));
15static_assert(std::is_trivial_v<Node>);
16
17constexpr inline s32 NodeHeaderSize = sizeof(BucketTree::NodeHeader);
18
19class StorageNode {
20private:
21 class Offset {
22 public:
23 using difference_type = s64;
24
25 private:
26 s64 m_offset;
27 s32 m_stride;
28
29 public:
30 constexpr Offset(s64 offset, s32 stride) : m_offset(offset), m_stride(stride) {}
31
32 constexpr Offset& operator++() {
33 m_offset += m_stride;
34 return *this;
35 }
36 constexpr Offset operator++(int) {
37 Offset ret(*this);
38 m_offset += m_stride;
39 return ret;
40 }
41
42 constexpr Offset& operator--() {
43 m_offset -= m_stride;
44 return *this;
45 }
46 constexpr Offset operator--(int) {
47 Offset ret(*this);
48 m_offset -= m_stride;
49 return ret;
50 }
51
52 constexpr difference_type operator-(const Offset& rhs) const {
53 return (m_offset - rhs.m_offset) / m_stride;
54 }
55
56 constexpr Offset operator+(difference_type ofs) const {
57 return Offset(m_offset + ofs * m_stride, m_stride);
58 }
59 constexpr Offset operator-(difference_type ofs) const {
60 return Offset(m_offset - ofs * m_stride, m_stride);
61 }
62
63 constexpr Offset& operator+=(difference_type ofs) {
64 m_offset += ofs * m_stride;
65 return *this;
66 }
67 constexpr Offset& operator-=(difference_type ofs) {
68 m_offset -= ofs * m_stride;
69 return *this;
70 }
71
72 constexpr bool operator==(const Offset& rhs) const {
73 return m_offset == rhs.m_offset;
74 }
75 constexpr bool operator!=(const Offset& rhs) const {
76 return m_offset != rhs.m_offset;
77 }
78
79 constexpr s64 Get() const {
80 return m_offset;
81 }
82 };
83
84private:
85 const Offset m_start;
86 const s32 m_count;
87 s32 m_index;
88
89public:
90 StorageNode(size_t size, s32 count)
91 : m_start(NodeHeaderSize, static_cast<s32>(size)), m_count(count), m_index(-1) {}
92 StorageNode(s64 ofs, size_t size, s32 count)
93 : m_start(NodeHeaderSize + ofs, static_cast<s32>(size)), m_count(count), m_index(-1) {}
94
95 s32 GetIndex() const {
96 return m_index;
97 }
98
99 void Find(const char* buffer, s64 virtual_address) {
100 s32 end = m_count;
101 auto pos = m_start;
102
103 while (end > 0) {
104 auto half = end / 2;
105 auto mid = pos + half;
106
107 s64 offset = 0;
108 std::memcpy(std::addressof(offset), buffer + mid.Get(), sizeof(s64));
109
110 if (offset <= virtual_address) {
111 pos = mid + 1;
112 end -= half + 1;
113 } else {
114 end = half;
115 }
116 }
117
118 m_index = static_cast<s32>(pos - m_start) - 1;
119 }
120
121 Result Find(VirtualFile storage, s64 virtual_address) {
122 s32 end = m_count;
123 auto pos = m_start;
124
125 while (end > 0) {
126 auto half = end / 2;
127 auto mid = pos + half;
128
129 s64 offset = 0;
130 storage->ReadObject(std::addressof(offset), mid.Get());
131
132 if (offset <= virtual_address) {
133 pos = mid + 1;
134 end -= half + 1;
135 } else {
136 end = half;
137 }
138 }
139
140 m_index = static_cast<s32>(pos - m_start) - 1;
141 R_SUCCEED();
142 }
143};
144
145} // namespace
146
147void BucketTree::Header::Format(s32 entry_count_) {
148 ASSERT(entry_count_ >= 0);
149
150 this->magic = Magic;
151 this->version = Version;
152 this->entry_count = entry_count_;
153 this->reserved = 0;
154}
155
156Result BucketTree::Header::Verify() const {
157 R_UNLESS(this->magic == Magic, ResultInvalidBucketTreeSignature);
158 R_UNLESS(this->entry_count >= 0, ResultInvalidBucketTreeEntryCount);
159 R_UNLESS(this->version <= Version, ResultUnsupportedVersion);
160 R_SUCCEED();
161}
162
163Result BucketTree::NodeHeader::Verify(s32 node_index, size_t node_size, size_t entry_size) const {
164 R_UNLESS(this->index == node_index, ResultInvalidBucketTreeNodeIndex);
165 R_UNLESS(entry_size != 0 && node_size >= entry_size + NodeHeaderSize, ResultInvalidSize);
166
167 const size_t max_entry_count = (node_size - NodeHeaderSize) / entry_size;
168 R_UNLESS(this->count > 0 && static_cast<size_t>(this->count) <= max_entry_count,
169 ResultInvalidBucketTreeNodeEntryCount);
170 R_UNLESS(this->offset >= 0, ResultInvalidBucketTreeNodeOffset);
171
172 R_SUCCEED();
173}
174
175Result BucketTree::Initialize(VirtualFile node_storage, VirtualFile entry_storage, size_t node_size,
176 size_t entry_size, s32 entry_count) {
177 // Validate preconditions.
178 ASSERT(entry_size >= sizeof(s64));
179 ASSERT(node_size >= entry_size + sizeof(NodeHeader));
180 ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
181 ASSERT(Common::IsPowerOfTwo(node_size));
182 ASSERT(!this->IsInitialized());
183
184 // Ensure valid entry count.
185 R_UNLESS(entry_count > 0, ResultInvalidArgument);
186
187 // Allocate node.
188 R_UNLESS(m_node_l1.Allocate(node_size), ResultBufferAllocationFailed);
189 ON_RESULT_FAILURE {
190 m_node_l1.Free(node_size);
191 };
192
193 // Read node.
194 node_storage->Read(reinterpret_cast<u8*>(m_node_l1.Get()), node_size);
195
196 // Verify node.
197 R_TRY(m_node_l1->Verify(0, node_size, sizeof(s64)));
198
199 // Validate offsets.
200 const auto offset_count = GetOffsetCount(node_size);
201 const auto entry_set_count = GetEntrySetCount(node_size, entry_size, entry_count);
202 const auto* const node = m_node_l1.Get<Node>();
203
204 s64 start_offset;
205 if (offset_count < entry_set_count && node->GetCount() < offset_count) {
206 start_offset = *node->GetEnd();
207 } else {
208 start_offset = *node->GetBegin();
209 }
210 const auto end_offset = node->GetEndOffset();
211
212 R_UNLESS(0 <= start_offset && start_offset <= node->GetBeginOffset(),
213 ResultInvalidBucketTreeEntryOffset);
214 R_UNLESS(start_offset < end_offset, ResultInvalidBucketTreeEntryOffset);
215
216 // Set member variables.
217 m_node_storage = node_storage;
218 m_entry_storage = entry_storage;
219 m_node_size = node_size;
220 m_entry_size = entry_size;
221 m_entry_count = entry_count;
222 m_offset_count = offset_count;
223 m_entry_set_count = entry_set_count;
224
225 m_offset_cache.offsets.start_offset = start_offset;
226 m_offset_cache.offsets.end_offset = end_offset;
227 m_offset_cache.is_initialized = true;
228
229 // Cancel guard.
230 R_SUCCEED();
231}
232
233void BucketTree::Initialize(size_t node_size, s64 end_offset) {
234 ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
235 ASSERT(Common::IsPowerOfTwo(node_size));
236 ASSERT(end_offset > 0);
237 ASSERT(!this->IsInitialized());
238
239 m_node_size = node_size;
240
241 m_offset_cache.offsets.start_offset = 0;
242 m_offset_cache.offsets.end_offset = end_offset;
243 m_offset_cache.is_initialized = true;
244}
245
246void BucketTree::Finalize() {
247 if (this->IsInitialized()) {
248 m_node_storage = VirtualFile();
249 m_entry_storage = VirtualFile();
250 m_node_l1.Free(m_node_size);
251 m_node_size = 0;
252 m_entry_size = 0;
253 m_entry_count = 0;
254 m_offset_count = 0;
255 m_entry_set_count = 0;
256
257 m_offset_cache.offsets.start_offset = 0;
258 m_offset_cache.offsets.end_offset = 0;
259 m_offset_cache.is_initialized = false;
260 }
261}
262
263Result BucketTree::Find(Visitor* visitor, s64 virtual_address) {
264 ASSERT(visitor != nullptr);
265 ASSERT(this->IsInitialized());
266
267 R_UNLESS(virtual_address >= 0, ResultInvalidOffset);
268 R_UNLESS(!this->IsEmpty(), ResultOutOfRange);
269
270 BucketTree::Offsets offsets;
271 R_TRY(this->GetOffsets(std::addressof(offsets)));
272
273 R_TRY(visitor->Initialize(this, offsets));
274
275 R_RETURN(visitor->Find(virtual_address));
276}
277
278Result BucketTree::InvalidateCache() {
279 // Reset our offsets.
280 m_offset_cache.is_initialized = false;
281
282 R_SUCCEED();
283}
284
285Result BucketTree::EnsureOffsetCache() {
286 // If we already have an offset cache, we're good.
287 R_SUCCEED_IF(m_offset_cache.is_initialized);
288
289 // Acquire exclusive right to edit the offset cache.
290 std::scoped_lock lk(m_offset_cache.mutex);
291
292 // Check again, to be sure.
293 R_SUCCEED_IF(m_offset_cache.is_initialized);
294
295 // Read/verify L1.
296 m_node_storage->Read(reinterpret_cast<u8*>(m_node_l1.Get()), m_node_size);
297 R_TRY(m_node_l1->Verify(0, m_node_size, sizeof(s64)));
298
299 // Get the node.
300 auto* const node = m_node_l1.Get<Node>();
301
302 s64 start_offset;
303 if (m_offset_count < m_entry_set_count && node->GetCount() < m_offset_count) {
304 start_offset = *node->GetEnd();
305 } else {
306 start_offset = *node->GetBegin();
307 }
308 const auto end_offset = node->GetEndOffset();
309
310 R_UNLESS(0 <= start_offset && start_offset <= node->GetBeginOffset(),
311 ResultInvalidBucketTreeEntryOffset);
312 R_UNLESS(start_offset < end_offset, ResultInvalidBucketTreeEntryOffset);
313
314 m_offset_cache.offsets.start_offset = start_offset;
315 m_offset_cache.offsets.end_offset = end_offset;
316 m_offset_cache.is_initialized = true;
317
318 R_SUCCEED();
319}
320
321Result BucketTree::Visitor::Initialize(const BucketTree* tree, const BucketTree::Offsets& offsets) {
322 ASSERT(tree != nullptr);
323 ASSERT(m_tree == nullptr || m_tree == tree);
324
325 if (m_entry == nullptr) {
326 m_entry = ::operator new(tree->m_entry_size);
327 R_UNLESS(m_entry != nullptr, ResultBufferAllocationFailed);
328
329 m_tree = tree;
330 m_offsets = offsets;
331 }
332
333 R_SUCCEED();
334}
335
336Result BucketTree::Visitor::MoveNext() {
337 R_UNLESS(this->IsValid(), ResultOutOfRange);
338
339 // Invalidate our index, and read the header for the next index.
340 auto entry_index = m_entry_index + 1;
341 if (entry_index == m_entry_set.info.count) {
342 const auto entry_set_index = m_entry_set.info.index + 1;
343 R_UNLESS(entry_set_index < m_entry_set_count, ResultOutOfRange);
344
345 m_entry_index = -1;
346
347 const auto end = m_entry_set.info.end;
348
349 const auto entry_set_size = m_tree->m_node_size;
350 const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
351
352 m_tree->m_entry_storage->ReadObject(std::addressof(m_entry_set), entry_set_offset);
353 R_TRY(m_entry_set.header.Verify(entry_set_index, entry_set_size, m_tree->m_entry_size));
354
355 R_UNLESS(m_entry_set.info.start == end && m_entry_set.info.start < m_entry_set.info.end,
356 ResultInvalidBucketTreeEntrySetOffset);
357
358 entry_index = 0;
359 } else {
360 m_entry_index = 1;
361 }
362
363 // Read the new entry.
364 const auto entry_size = m_tree->m_entry_size;
365 const auto entry_offset = impl::GetBucketTreeEntryOffset(
366 m_entry_set.info.index, m_tree->m_node_size, entry_size, entry_index);
367 m_tree->m_entry_storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset);
368
369 // Note that we changed index.
370 m_entry_index = entry_index;
371 R_SUCCEED();
372}
373
374Result BucketTree::Visitor::MovePrevious() {
375 R_UNLESS(this->IsValid(), ResultOutOfRange);
376
377 // Invalidate our index, and read the header for the previous index.
378 auto entry_index = m_entry_index;
379 if (entry_index == 0) {
380 R_UNLESS(m_entry_set.info.index > 0, ResultOutOfRange);
381
382 m_entry_index = -1;
383
384 const auto start = m_entry_set.info.start;
385
386 const auto entry_set_size = m_tree->m_node_size;
387 const auto entry_set_index = m_entry_set.info.index - 1;
388 const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
389
390 m_tree->m_entry_storage->ReadObject(std::addressof(m_entry_set), entry_set_offset);
391 R_TRY(m_entry_set.header.Verify(entry_set_index, entry_set_size, m_tree->m_entry_size));
392
393 R_UNLESS(m_entry_set.info.end == start && m_entry_set.info.start < m_entry_set.info.end,
394 ResultInvalidBucketTreeEntrySetOffset);
395
396 entry_index = m_entry_set.info.count;
397 } else {
398 m_entry_index = -1;
399 }
400
401 --entry_index;
402
403 // Read the new entry.
404 const auto entry_size = m_tree->m_entry_size;
405 const auto entry_offset = impl::GetBucketTreeEntryOffset(
406 m_entry_set.info.index, m_tree->m_node_size, entry_size, entry_index);
407 m_tree->m_entry_storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset);
408
409 // Note that we changed index.
410 m_entry_index = entry_index;
411 R_SUCCEED();
412}
413
414Result BucketTree::Visitor::Find(s64 virtual_address) {
415 ASSERT(m_tree != nullptr);
416
417 // Get the node.
418 const auto* const node = m_tree->m_node_l1.Get<Node>();
419 R_UNLESS(virtual_address < node->GetEndOffset(), ResultOutOfRange);
420
421 // Get the entry set index.
422 s32 entry_set_index = -1;
423 if (m_tree->IsExistOffsetL2OnL1() && virtual_address < node->GetBeginOffset()) {
424 const auto start = node->GetEnd();
425 const auto end = node->GetBegin() + m_tree->m_offset_count;
426
427 auto pos = std::upper_bound(start, end, virtual_address);
428 R_UNLESS(start < pos, ResultOutOfRange);
429 --pos;
430
431 entry_set_index = static_cast<s32>(pos - start);
432 } else {
433 const auto start = node->GetBegin();
434 const auto end = node->GetEnd();
435
436 auto pos = std::upper_bound(start, end, virtual_address);
437 R_UNLESS(start < pos, ResultOutOfRange);
438 --pos;
439
440 if (m_tree->IsExistL2()) {
441 const auto node_index = static_cast<s32>(pos - start);
442 R_UNLESS(0 <= node_index && node_index < m_tree->m_offset_count,
443 ResultInvalidBucketTreeNodeOffset);
444
445 R_TRY(this->FindEntrySet(std::addressof(entry_set_index), virtual_address, node_index));
446 } else {
447 entry_set_index = static_cast<s32>(pos - start);
448 }
449 }
450
451 // Validate the entry set index.
452 R_UNLESS(0 <= entry_set_index && entry_set_index < m_tree->m_entry_set_count,
453 ResultInvalidBucketTreeNodeOffset);
454
455 // Find the entry.
456 R_TRY(this->FindEntry(virtual_address, entry_set_index));
457
458 // Set count.
459 m_entry_set_count = m_tree->m_entry_set_count;
460 R_SUCCEED();
461}
462
463Result BucketTree::Visitor::FindEntrySet(s32* out_index, s64 virtual_address, s32 node_index) {
464 const auto node_size = m_tree->m_node_size;
465
466 PooledBuffer pool(node_size, 1);
467 if (node_size <= pool.GetSize()) {
468 R_RETURN(
469 this->FindEntrySetWithBuffer(out_index, virtual_address, node_index, pool.GetBuffer()));
470 } else {
471 pool.Deallocate();
472 R_RETURN(this->FindEntrySetWithoutBuffer(out_index, virtual_address, node_index));
473 }
474}
475
476Result BucketTree::Visitor::FindEntrySetWithBuffer(s32* out_index, s64 virtual_address,
477 s32 node_index, char* buffer) {
478 // Calculate node extents.
479 const auto node_size = m_tree->m_node_size;
480 const auto node_offset = (node_index + 1) * static_cast<s64>(node_size);
481 VirtualFile storage = m_tree->m_node_storage;
482
483 // Read the node.
484 storage->Read(reinterpret_cast<u8*>(buffer), node_size, node_offset);
485
486 // Validate the header.
487 NodeHeader header;
488 std::memcpy(std::addressof(header), buffer, NodeHeaderSize);
489 R_TRY(header.Verify(node_index, node_size, sizeof(s64)));
490
491 // Create the node, and find.
492 StorageNode node(sizeof(s64), header.count);
493 node.Find(buffer, virtual_address);
494 R_UNLESS(node.GetIndex() >= 0, ResultInvalidBucketTreeVirtualOffset);
495
496 // Return the index.
497 *out_index = static_cast<s32>(m_tree->GetEntrySetIndex(header.index, node.GetIndex()));
498 R_SUCCEED();
499}
500
501Result BucketTree::Visitor::FindEntrySetWithoutBuffer(s32* out_index, s64 virtual_address,
502 s32 node_index) {
503 // Calculate node extents.
504 const auto node_size = m_tree->m_node_size;
505 const auto node_offset = (node_index + 1) * static_cast<s64>(node_size);
506 VirtualFile storage = m_tree->m_node_storage;
507
508 // Read and validate the header.
509 NodeHeader header;
510 storage->ReadObject(std::addressof(header), node_offset);
511 R_TRY(header.Verify(node_index, node_size, sizeof(s64)));
512
513 // Create the node, and find.
514 StorageNode node(node_offset, sizeof(s64), header.count);
515 R_TRY(node.Find(storage, virtual_address));
516 R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange);
517
518 // Return the index.
519 *out_index = static_cast<s32>(m_tree->GetEntrySetIndex(header.index, node.GetIndex()));
520 R_SUCCEED();
521}
522
523Result BucketTree::Visitor::FindEntry(s64 virtual_address, s32 entry_set_index) {
524 const auto entry_set_size = m_tree->m_node_size;
525
526 PooledBuffer pool(entry_set_size, 1);
527 if (entry_set_size <= pool.GetSize()) {
528 R_RETURN(this->FindEntryWithBuffer(virtual_address, entry_set_index, pool.GetBuffer()));
529 } else {
530 pool.Deallocate();
531 R_RETURN(this->FindEntryWithoutBuffer(virtual_address, entry_set_index));
532 }
533}
534
535Result BucketTree::Visitor::FindEntryWithBuffer(s64 virtual_address, s32 entry_set_index,
536 char* buffer) {
537 // Calculate entry set extents.
538 const auto entry_size = m_tree->m_entry_size;
539 const auto entry_set_size = m_tree->m_node_size;
540 const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
541 VirtualFile storage = m_tree->m_entry_storage;
542
543 // Read the entry set.
544 storage->Read(reinterpret_cast<u8*>(buffer), entry_set_size, entry_set_offset);
545
546 // Validate the entry_set.
547 EntrySetHeader entry_set;
548 std::memcpy(std::addressof(entry_set), buffer, sizeof(EntrySetHeader));
549 R_TRY(entry_set.header.Verify(entry_set_index, entry_set_size, entry_size));
550
551 // Create the node, and find.
552 StorageNode node(entry_size, entry_set.info.count);
553 node.Find(buffer, virtual_address);
554 R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange);
555
556 // Copy the data into entry.
557 const auto entry_index = node.GetIndex();
558 const auto entry_offset = impl::GetBucketTreeEntryOffset(0, entry_size, entry_index);
559 std::memcpy(m_entry, buffer + entry_offset, entry_size);
560
561 // Set our entry set/index.
562 m_entry_set = entry_set;
563 m_entry_index = entry_index;
564
565 R_SUCCEED();
566}
567
568Result BucketTree::Visitor::FindEntryWithoutBuffer(s64 virtual_address, s32 entry_set_index) {
569 // Calculate entry set extents.
570 const auto entry_size = m_tree->m_entry_size;
571 const auto entry_set_size = m_tree->m_node_size;
572 const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
573 VirtualFile storage = m_tree->m_entry_storage;
574
575 // Read and validate the entry_set.
576 EntrySetHeader entry_set;
577 storage->ReadObject(std::addressof(entry_set), entry_set_offset);
578 R_TRY(entry_set.header.Verify(entry_set_index, entry_set_size, entry_size));
579
580 // Create the node, and find.
581 StorageNode node(entry_set_offset, entry_size, entry_set.info.count);
582 R_TRY(node.Find(storage, virtual_address));
583 R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange);
584
585 // Copy the data into entry.
586 const auto entry_index = node.GetIndex();
587 const auto entry_offset =
588 impl::GetBucketTreeEntryOffset(entry_set_offset, entry_size, entry_index);
589 storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset);
590
591 // Set our entry set/index.
592 m_entry_set = entry_set;
593 m_entry_index = entry_index;
594
595 R_SUCCEED();
596}
597
598} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree.h b/src/core/file_sys/fssystem/fssystem_bucket_tree.h
new file mode 100644
index 000000000..74a2f7583
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_bucket_tree.h
@@ -0,0 +1,417 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <mutex>
7
8#include "common/alignment.h"
9#include "common/common_funcs.h"
10#include "common/common_types.h"
11#include "common/literals.h"
12
13#include "core/file_sys/vfs.h"
14#include "core/hle/result.h"
15
16namespace FileSys {
17
18using namespace Common::Literals;
19
20class BucketTree {
21 YUZU_NON_COPYABLE(BucketTree);
22 YUZU_NON_MOVEABLE(BucketTree);
23
24public:
25 static constexpr u32 Magic = Common::MakeMagic('B', 'K', 'T', 'R');
26 static constexpr u32 Version = 1;
27
28 static constexpr size_t NodeSizeMin = 1_KiB;
29 static constexpr size_t NodeSizeMax = 512_KiB;
30
31public:
32 class Visitor;
33
34 struct Header {
35 u32 magic;
36 u32 version;
37 s32 entry_count;
38 s32 reserved;
39
40 void Format(s32 entry_count);
41 Result Verify() const;
42 };
43 static_assert(std::is_trivial_v<Header>);
44 static_assert(sizeof(Header) == 0x10);
45
46 struct NodeHeader {
47 s32 index;
48 s32 count;
49 s64 offset;
50
51 Result Verify(s32 node_index, size_t node_size, size_t entry_size) const;
52 };
53 static_assert(std::is_trivial_v<NodeHeader>);
54 static_assert(sizeof(NodeHeader) == 0x10);
55
56 struct Offsets {
57 s64 start_offset;
58 s64 end_offset;
59
60 constexpr bool IsInclude(s64 offset) const {
61 return this->start_offset <= offset && offset < this->end_offset;
62 }
63
64 constexpr bool IsInclude(s64 offset, s64 size) const {
65 return size > 0 && this->start_offset <= offset && size <= (this->end_offset - offset);
66 }
67 };
68 static_assert(std::is_trivial_v<Offsets>);
69 static_assert(sizeof(Offsets) == 0x10);
70
71 struct OffsetCache {
72 Offsets offsets;
73 std::mutex mutex;
74 bool is_initialized;
75
76 OffsetCache() : offsets{-1, -1}, mutex(), is_initialized(false) {}
77 };
78
79 class ContinuousReadingInfo {
80 private:
81 size_t m_read_size;
82 s32 m_skip_count;
83 bool m_done;
84
85 public:
86 constexpr ContinuousReadingInfo() : m_read_size(), m_skip_count(), m_done() {}
87
88 constexpr void Reset() {
89 m_read_size = 0;
90 m_skip_count = 0;
91 m_done = false;
92 }
93
94 constexpr void SetSkipCount(s32 count) {
95 ASSERT(count >= 0);
96 m_skip_count = count;
97 }
98 constexpr s32 GetSkipCount() const {
99 return m_skip_count;
100 }
101 constexpr bool CheckNeedScan() {
102 return (--m_skip_count) <= 0;
103 }
104
105 constexpr void Done() {
106 m_read_size = 0;
107 m_done = true;
108 }
109 constexpr bool IsDone() const {
110 return m_done;
111 }
112
113 constexpr void SetReadSize(size_t size) {
114 m_read_size = size;
115 }
116 constexpr size_t GetReadSize() const {
117 return m_read_size;
118 }
119 constexpr bool CanDo() const {
120 return m_read_size > 0;
121 }
122 };
123
124private:
125 class NodeBuffer {
126 YUZU_NON_COPYABLE(NodeBuffer);
127
128 private:
129 void* m_header;
130
131 public:
132 NodeBuffer() : m_header() {}
133
134 ~NodeBuffer() {
135 ASSERT(m_header == nullptr);
136 }
137
138 NodeBuffer(NodeBuffer&& rhs) : m_header(rhs.m_header) {
139 rhs.m_header = nullptr;
140 }
141
142 NodeBuffer& operator=(NodeBuffer&& rhs) {
143 if (this != std::addressof(rhs)) {
144 ASSERT(m_header == nullptr);
145
146 m_header = rhs.m_header;
147
148 rhs.m_header = nullptr;
149 }
150 return *this;
151 }
152
153 bool Allocate(size_t node_size) {
154 ASSERT(m_header == nullptr);
155
156 m_header = ::operator new(node_size, std::align_val_t{sizeof(s64)});
157
158 // ASSERT(Common::IsAligned(m_header, sizeof(s64)));
159
160 return m_header != nullptr;
161 }
162
163 void Free(size_t node_size) {
164 if (m_header) {
165 ::operator delete(m_header, std::align_val_t{sizeof(s64)});
166 m_header = nullptr;
167 }
168 }
169
170 void FillZero(size_t node_size) const {
171 if (m_header) {
172 std::memset(m_header, 0, node_size);
173 }
174 }
175
176 NodeHeader* Get() const {
177 return reinterpret_cast<NodeHeader*>(m_header);
178 }
179
180 NodeHeader* operator->() const {
181 return this->Get();
182 }
183
184 template <typename T>
185 T* Get() const {
186 static_assert(std::is_trivial_v<T>);
187 static_assert(sizeof(T) == sizeof(NodeHeader));
188 return reinterpret_cast<T*>(m_header);
189 }
190 };
191
192private:
193 static constexpr s32 GetEntryCount(size_t node_size, size_t entry_size) {
194 return static_cast<s32>((node_size - sizeof(NodeHeader)) / entry_size);
195 }
196
197 static constexpr s32 GetOffsetCount(size_t node_size) {
198 return static_cast<s32>((node_size - sizeof(NodeHeader)) / sizeof(s64));
199 }
200
201 static constexpr s32 GetEntrySetCount(size_t node_size, size_t entry_size, s32 entry_count) {
202 const s32 entry_count_per_node = GetEntryCount(node_size, entry_size);
203 return Common::DivideUp(entry_count, entry_count_per_node);
204 }
205
206 static constexpr s32 GetNodeL2Count(size_t node_size, size_t entry_size, s32 entry_count) {
207 const s32 offset_count_per_node = GetOffsetCount(node_size);
208 const s32 entry_set_count = GetEntrySetCount(node_size, entry_size, entry_count);
209
210 if (entry_set_count <= offset_count_per_node) {
211 return 0;
212 }
213
214 const s32 node_l2_count = Common::DivideUp(entry_set_count, offset_count_per_node);
215 ASSERT(node_l2_count <= offset_count_per_node);
216
217 return Common::DivideUp(entry_set_count - (offset_count_per_node - (node_l2_count - 1)),
218 offset_count_per_node);
219 }
220
221public:
222 static constexpr s64 QueryHeaderStorageSize() {
223 return sizeof(Header);
224 }
225
226 static constexpr s64 QueryNodeStorageSize(size_t node_size, size_t entry_size,
227 s32 entry_count) {
228 ASSERT(entry_size >= sizeof(s64));
229 ASSERT(node_size >= entry_size + sizeof(NodeHeader));
230 ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
231 ASSERT(Common::IsPowerOfTwo(node_size));
232 ASSERT(entry_count >= 0);
233
234 if (entry_count <= 0) {
235 return 0;
236 }
237 return (1 + GetNodeL2Count(node_size, entry_size, entry_count)) *
238 static_cast<s64>(node_size);
239 }
240
241 static constexpr s64 QueryEntryStorageSize(size_t node_size, size_t entry_size,
242 s32 entry_count) {
243 ASSERT(entry_size >= sizeof(s64));
244 ASSERT(node_size >= entry_size + sizeof(NodeHeader));
245 ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
246 ASSERT(Common::IsPowerOfTwo(node_size));
247 ASSERT(entry_count >= 0);
248
249 if (entry_count <= 0) {
250 return 0;
251 }
252 return GetEntrySetCount(node_size, entry_size, entry_count) * static_cast<s64>(node_size);
253 }
254
255private:
256 mutable VirtualFile m_node_storage;
257 mutable VirtualFile m_entry_storage;
258 NodeBuffer m_node_l1;
259 size_t m_node_size;
260 size_t m_entry_size;
261 s32 m_entry_count;
262 s32 m_offset_count;
263 s32 m_entry_set_count;
264 OffsetCache m_offset_cache;
265
266public:
267 BucketTree()
268 : m_node_storage(), m_entry_storage(), m_node_l1(), m_node_size(), m_entry_size(),
269 m_entry_count(), m_offset_count(), m_entry_set_count(), m_offset_cache() {}
270 ~BucketTree() {
271 this->Finalize();
272 }
273
274 Result Initialize(VirtualFile node_storage, VirtualFile entry_storage, size_t node_size,
275 size_t entry_size, s32 entry_count);
276 void Initialize(size_t node_size, s64 end_offset);
277 void Finalize();
278
279 bool IsInitialized() const {
280 return m_node_size > 0;
281 }
282 bool IsEmpty() const {
283 return m_entry_size == 0;
284 }
285
286 Result Find(Visitor* visitor, s64 virtual_address);
287 Result InvalidateCache();
288
289 s32 GetEntryCount() const {
290 return m_entry_count;
291 }
292
293 Result GetOffsets(Offsets* out) {
294 // Ensure we have an offset cache.
295 R_TRY(this->EnsureOffsetCache());
296
297 // Set the output.
298 *out = m_offset_cache.offsets;
299 R_SUCCEED();
300 }
301
302private:
303 template <typename EntryType>
304 struct ContinuousReadingParam {
305 s64 offset;
306 size_t size;
307 NodeHeader entry_set;
308 s32 entry_index;
309 Offsets offsets;
310 EntryType entry;
311 };
312
313private:
314 template <typename EntryType>
315 Result ScanContinuousReading(ContinuousReadingInfo* out_info,
316 const ContinuousReadingParam<EntryType>& param) const;
317
318 bool IsExistL2() const {
319 return m_offset_count < m_entry_set_count;
320 }
321 bool IsExistOffsetL2OnL1() const {
322 return this->IsExistL2() && m_node_l1->count < m_offset_count;
323 }
324
325 s64 GetEntrySetIndex(s32 node_index, s32 offset_index) const {
326 return (m_offset_count - m_node_l1->count) + (m_offset_count * node_index) + offset_index;
327 }
328
329 Result EnsureOffsetCache();
330};
331
332class BucketTree::Visitor {
333 YUZU_NON_COPYABLE(Visitor);
334 YUZU_NON_MOVEABLE(Visitor);
335
336private:
337 friend class BucketTree;
338
339 union EntrySetHeader {
340 NodeHeader header;
341 struct Info {
342 s32 index;
343 s32 count;
344 s64 end;
345 s64 start;
346 } info;
347 static_assert(std::is_trivial_v<Info>);
348 };
349 static_assert(std::is_trivial_v<EntrySetHeader>);
350
351private:
352 const BucketTree* m_tree;
353 BucketTree::Offsets m_offsets;
354 void* m_entry;
355 s32 m_entry_index;
356 s32 m_entry_set_count;
357 EntrySetHeader m_entry_set;
358
359public:
360 constexpr Visitor()
361 : m_tree(), m_entry(), m_entry_index(-1), m_entry_set_count(), m_entry_set{} {}
362 ~Visitor() {
363 if (m_entry != nullptr) {
364 ::operator delete(m_entry, m_tree->m_entry_size);
365 m_tree = nullptr;
366 m_entry = nullptr;
367 }
368 }
369
370 bool IsValid() const {
371 return m_entry_index >= 0;
372 }
373 bool CanMoveNext() const {
374 return this->IsValid() && (m_entry_index + 1 < m_entry_set.info.count ||
375 m_entry_set.info.index + 1 < m_entry_set_count);
376 }
377 bool CanMovePrevious() const {
378 return this->IsValid() && (m_entry_index > 0 || m_entry_set.info.index > 0);
379 }
380
381 Result MoveNext();
382 Result MovePrevious();
383
384 template <typename EntryType>
385 Result ScanContinuousReading(ContinuousReadingInfo* out_info, s64 offset, size_t size) const;
386
387 const void* Get() const {
388 ASSERT(this->IsValid());
389 return m_entry;
390 }
391
392 template <typename T>
393 const T* Get() const {
394 ASSERT(this->IsValid());
395 return reinterpret_cast<const T*>(m_entry);
396 }
397
398 const BucketTree* GetTree() const {
399 return m_tree;
400 }
401
402private:
403 Result Initialize(const BucketTree* tree, const BucketTree::Offsets& offsets);
404
405 Result Find(s64 virtual_address);
406
407 Result FindEntrySet(s32* out_index, s64 virtual_address, s32 node_index);
408 Result FindEntrySetWithBuffer(s32* out_index, s64 virtual_address, s32 node_index,
409 char* buffer);
410 Result FindEntrySetWithoutBuffer(s32* out_index, s64 virtual_address, s32 node_index);
411
412 Result FindEntry(s64 virtual_address, s32 entry_set_index);
413 Result FindEntryWithBuffer(s64 virtual_address, s32 entry_set_index, char* buffer);
414 Result FindEntryWithoutBuffer(s64 virtual_address, s32 entry_set_index);
415};
416
417} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h b/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h
new file mode 100644
index 000000000..030b2916b
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h
@@ -0,0 +1,170 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/errors.h"
7#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
8#include "core/file_sys/fssystem/fssystem_bucket_tree_utils.h"
9#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
10
11namespace FileSys {
12
13template <typename EntryType>
14Result BucketTree::ScanContinuousReading(ContinuousReadingInfo* out_info,
15 const ContinuousReadingParam<EntryType>& param) const {
16 static_assert(std::is_trivial_v<ContinuousReadingParam<EntryType>>);
17
18 // Validate our preconditions.
19 ASSERT(this->IsInitialized());
20 ASSERT(out_info != nullptr);
21 ASSERT(m_entry_size == sizeof(EntryType));
22
23 // Reset the output.
24 out_info->Reset();
25
26 // If there's nothing to read, we're done.
27 R_SUCCEED_IF(param.size == 0);
28
29 // If we're reading a fragment, we're done.
30 R_SUCCEED_IF(param.entry.IsFragment());
31
32 // Validate the first entry.
33 auto entry = param.entry;
34 auto cur_offset = param.offset;
35 R_UNLESS(entry.GetVirtualOffset() <= cur_offset, ResultOutOfRange);
36
37 // Create a pooled buffer for our scan.
38 PooledBuffer pool(m_node_size, 1);
39 char* buffer = nullptr;
40
41 s64 entry_storage_size = m_entry_storage->GetSize();
42
43 // Read the node.
44 if (m_node_size <= pool.GetSize()) {
45 buffer = pool.GetBuffer();
46 const auto ofs = param.entry_set.index * static_cast<s64>(m_node_size);
47 R_UNLESS(m_node_size + ofs <= static_cast<size_t>(entry_storage_size),
48 ResultInvalidBucketTreeNodeEntryCount);
49
50 m_entry_storage->Read(reinterpret_cast<u8*>(buffer), m_node_size, ofs);
51 }
52
53 // Calculate extents.
54 const auto end_offset = cur_offset + static_cast<s64>(param.size);
55 s64 phys_offset = entry.GetPhysicalOffset();
56
57 // Start merge tracking.
58 s64 merge_size = 0;
59 s64 readable_size = 0;
60 bool merged = false;
61
62 // Iterate.
63 auto entry_index = param.entry_index;
64 for (const auto entry_count = param.entry_set.count; entry_index < entry_count; ++entry_index) {
65 // If we're past the end, we're done.
66 if (end_offset <= cur_offset) {
67 break;
68 }
69
70 // Validate the entry offset.
71 const auto entry_offset = entry.GetVirtualOffset();
72 R_UNLESS(entry_offset <= cur_offset, ResultInvalidIndirectEntryOffset);
73
74 // Get the next entry.
75 EntryType next_entry = {};
76 s64 next_entry_offset;
77
78 if (entry_index + 1 < entry_count) {
79 if (buffer != nullptr) {
80 const auto ofs = impl::GetBucketTreeEntryOffset(0, m_entry_size, entry_index + 1);
81 std::memcpy(std::addressof(next_entry), buffer + ofs, m_entry_size);
82 } else {
83 const auto ofs = impl::GetBucketTreeEntryOffset(param.entry_set.index, m_node_size,
84 m_entry_size, entry_index + 1);
85 m_entry_storage->ReadObject(std::addressof(next_entry), ofs);
86 }
87
88 next_entry_offset = next_entry.GetVirtualOffset();
89 R_UNLESS(param.offsets.IsInclude(next_entry_offset), ResultInvalidIndirectEntryOffset);
90 } else {
91 next_entry_offset = param.entry_set.offset;
92 }
93
94 // Validate the next entry offset.
95 R_UNLESS(cur_offset < next_entry_offset, ResultInvalidIndirectEntryOffset);
96
97 // Determine the much data there is.
98 const auto data_size = next_entry_offset - cur_offset;
99 ASSERT(data_size > 0);
100
101 // Determine how much data we should read.
102 const auto remaining_size = end_offset - cur_offset;
103 const size_t read_size = static_cast<size_t>(std::min(data_size, remaining_size));
104 ASSERT(read_size <= param.size);
105
106 // Update our merge tracking.
107 if (entry.IsFragment()) {
108 // If we can't merge, stop looping.
109 if (EntryType::FragmentSizeMax <= read_size || remaining_size <= data_size) {
110 break;
111 }
112
113 // Otherwise, add the current size to the merge size.
114 merge_size += read_size;
115 } else {
116 // If we can't merge, stop looping.
117 if (phys_offset != entry.GetPhysicalOffset()) {
118 break;
119 }
120
121 // Add the size to the readable amount.
122 readable_size += merge_size + read_size;
123 ASSERT(readable_size <= static_cast<s64>(param.size));
124
125 // Update whether we've merged.
126 merged |= merge_size > 0;
127 merge_size = 0;
128 }
129
130 // Advance.
131 cur_offset += read_size;
132 ASSERT(cur_offset <= end_offset);
133
134 phys_offset += next_entry_offset - entry_offset;
135 entry = next_entry;
136 }
137
138 // If we merged, set our readable size.
139 if (merged) {
140 out_info->SetReadSize(static_cast<size_t>(readable_size));
141 }
142 out_info->SetSkipCount(entry_index - param.entry_index);
143
144 R_SUCCEED();
145}
146
147template <typename EntryType>
148Result BucketTree::Visitor::ScanContinuousReading(ContinuousReadingInfo* out_info, s64 offset,
149 size_t size) const {
150 static_assert(std::is_trivial_v<EntryType>);
151 ASSERT(this->IsValid());
152
153 // Create our parameters.
154 ContinuousReadingParam<EntryType> param = {
155 .offset = offset,
156 .size = size,
157 .entry_set = m_entry_set.header,
158 .entry_index = m_entry_index,
159 .offsets{},
160 .entry{},
161 };
162 std::memcpy(std::addressof(param.offsets), std::addressof(m_offsets),
163 sizeof(BucketTree::Offsets));
164 std::memcpy(std::addressof(param.entry), m_entry, sizeof(EntryType));
165
166 // Scan.
167 R_RETURN(m_tree->ScanContinuousReading<EntryType>(out_info, param));
168}
169
170} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h b/src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h
new file mode 100644
index 000000000..5503613fc
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h
@@ -0,0 +1,110 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
7
8namespace FileSys::impl {
9
10class SafeValue {
11public:
12 static s64 GetInt64(const void* ptr) {
13 s64 value;
14 std::memcpy(std::addressof(value), ptr, sizeof(s64));
15 return value;
16 }
17
18 static s64 GetInt64(const s64* ptr) {
19 return GetInt64(static_cast<const void*>(ptr));
20 }
21
22 static s64 GetInt64(const s64& v) {
23 return GetInt64(std::addressof(v));
24 }
25
26 static void SetInt64(void* dst, const void* src) {
27 std::memcpy(dst, src, sizeof(s64));
28 }
29
30 static void SetInt64(void* dst, const s64* src) {
31 return SetInt64(dst, static_cast<const void*>(src));
32 }
33
34 static void SetInt64(void* dst, const s64& v) {
35 return SetInt64(dst, std::addressof(v));
36 }
37};
38
39template <typename IteratorType>
40struct BucketTreeNode {
41 using Header = BucketTree::NodeHeader;
42
43 Header header;
44
45 s32 GetCount() const {
46 return this->header.count;
47 }
48
49 void* GetArray() {
50 return std::addressof(this->header) + 1;
51 }
52 template <typename T>
53 T* GetArray() {
54 return reinterpret_cast<T*>(this->GetArray());
55 }
56 const void* GetArray() const {
57 return std::addressof(this->header) + 1;
58 }
59 template <typename T>
60 const T* GetArray() const {
61 return reinterpret_cast<const T*>(this->GetArray());
62 }
63
64 s64 GetBeginOffset() const {
65 return *this->GetArray<s64>();
66 }
67 s64 GetEndOffset() const {
68 return this->header.offset;
69 }
70
71 IteratorType GetBegin() {
72 return IteratorType(this->GetArray<s64>());
73 }
74 IteratorType GetEnd() {
75 return IteratorType(this->GetArray<s64>()) + this->header.count;
76 }
77 IteratorType GetBegin() const {
78 return IteratorType(this->GetArray<s64>());
79 }
80 IteratorType GetEnd() const {
81 return IteratorType(this->GetArray<s64>()) + this->header.count;
82 }
83
84 IteratorType GetBegin(size_t entry_size) {
85 return IteratorType(this->GetArray(), entry_size);
86 }
87 IteratorType GetEnd(size_t entry_size) {
88 return IteratorType(this->GetArray(), entry_size) + this->header.count;
89 }
90 IteratorType GetBegin(size_t entry_size) const {
91 return IteratorType(this->GetArray(), entry_size);
92 }
93 IteratorType GetEnd(size_t entry_size) const {
94 return IteratorType(this->GetArray(), entry_size) + this->header.count;
95 }
96};
97
98constexpr inline s64 GetBucketTreeEntryOffset(s64 entry_set_offset, size_t entry_size,
99 s32 entry_index) {
100 return entry_set_offset + sizeof(BucketTree::NodeHeader) +
101 entry_index * static_cast<s64>(entry_size);
102}
103
104constexpr inline s64 GetBucketTreeEntryOffset(s32 entry_set_index, size_t node_size,
105 size_t entry_size, s32 entry_index) {
106 return GetBucketTreeEntryOffset(entry_set_index * static_cast<s64>(node_size), entry_size,
107 entry_index);
108}
109
110} // namespace FileSys::impl
diff --git a/src/core/file_sys/fssystem/fssystem_compressed_storage.h b/src/core/file_sys/fssystem/fssystem_compressed_storage.h
new file mode 100644
index 000000000..e407add1b
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_compressed_storage.h
@@ -0,0 +1,960 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/literals.h"
7
8#include "core/file_sys/errors.h"
9#include "core/file_sys/fssystem/fs_i_storage.h"
10#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
11#include "core/file_sys/fssystem/fssystem_compression_common.h"
12#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
13#include "core/file_sys/vfs.h"
14
15namespace FileSys {
16
17using namespace Common::Literals;
18
19class CompressedStorage : public IReadOnlyStorage {
20 YUZU_NON_COPYABLE(CompressedStorage);
21 YUZU_NON_MOVEABLE(CompressedStorage);
22
23public:
24 static constexpr size_t NodeSize = 16_KiB;
25
26 struct Entry {
27 s64 virt_offset;
28 s64 phys_offset;
29 CompressionType compression_type;
30 s32 phys_size;
31
32 s64 GetPhysicalSize() const {
33 return this->phys_size;
34 }
35 };
36 static_assert(std::is_trivial_v<Entry>);
37 static_assert(sizeof(Entry) == 0x18);
38
39public:
40 static constexpr s64 QueryNodeStorageSize(s32 entry_count) {
41 return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count);
42 }
43
44 static constexpr s64 QueryEntryStorageSize(s32 entry_count) {
45 return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count);
46 }
47
48private:
49 class CompressedStorageCore {
50 YUZU_NON_COPYABLE(CompressedStorageCore);
51 YUZU_NON_MOVEABLE(CompressedStorageCore);
52
53 private:
54 size_t m_block_size_max;
55 size_t m_continuous_reading_size_max;
56 BucketTree m_table;
57 VirtualFile m_data_storage;
58 GetDecompressorFunction m_get_decompressor_function;
59
60 public:
61 CompressedStorageCore() : m_table(), m_data_storage() {}
62
63 ~CompressedStorageCore() {
64 this->Finalize();
65 }
66
67 public:
68 Result Initialize(VirtualFile data_storage, VirtualFile node_storage,
69 VirtualFile entry_storage, s32 bktr_entry_count, size_t block_size_max,
70 size_t continuous_reading_size_max,
71 GetDecompressorFunction get_decompressor) {
72 // Check pre-conditions.
73 ASSERT(0 < block_size_max);
74 ASSERT(block_size_max <= continuous_reading_size_max);
75 ASSERT(get_decompressor != nullptr);
76
77 // Initialize our entry table.
78 R_TRY(m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry),
79 bktr_entry_count));
80
81 // Set our other fields.
82 m_block_size_max = block_size_max;
83 m_continuous_reading_size_max = continuous_reading_size_max;
84 m_data_storage = data_storage;
85 m_get_decompressor_function = get_decompressor;
86
87 R_SUCCEED();
88 }
89
90 void Finalize() {
91 if (this->IsInitialized()) {
92 m_table.Finalize();
93 m_data_storage = VirtualFile();
94 }
95 }
96
97 VirtualFile GetDataStorage() {
98 return m_data_storage;
99 }
100
101 Result GetDataStorageSize(s64* out) {
102 // Check pre-conditions.
103 ASSERT(out != nullptr);
104
105 // Get size.
106 *out = m_data_storage->GetSize();
107
108 R_SUCCEED();
109 }
110
111 BucketTree& GetEntryTable() {
112 return m_table;
113 }
114
115 Result GetEntryList(Entry* out_entries, s32* out_read_count, s32 max_entry_count,
116 s64 offset, s64 size) {
117 // Check pre-conditions.
118 ASSERT(offset >= 0);
119 ASSERT(size >= 0);
120 ASSERT(this->IsInitialized());
121
122 // Check that we can output the count.
123 R_UNLESS(out_read_count != nullptr, ResultNullptrArgument);
124
125 // Check that we have anything to read at all.
126 R_SUCCEED_IF(size == 0);
127
128 // Check that either we have a buffer, or this is to determine how many we need.
129 if (max_entry_count != 0) {
130 R_UNLESS(out_entries != nullptr, ResultNullptrArgument);
131 }
132
133 // Get the table offsets.
134 BucketTree::Offsets table_offsets;
135 R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
136
137 // Validate arguments.
138 R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
139
140 // Find the offset in our tree.
141 BucketTree::Visitor visitor;
142 R_TRY(m_table.Find(std::addressof(visitor), offset));
143 {
144 const auto entry_offset = visitor.Get<Entry>()->virt_offset;
145 R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
146 ResultUnexpectedInCompressedStorageA);
147 }
148
149 // Get the entries.
150 const auto end_offset = offset + size;
151 s32 read_count = 0;
152 while (visitor.Get<Entry>()->virt_offset < end_offset) {
153 // If we should be setting the output, do so.
154 if (max_entry_count != 0) {
155 // Ensure we only read as many entries as we can.
156 if (read_count >= max_entry_count) {
157 break;
158 }
159
160 // Set the current output entry.
161 out_entries[read_count] = *visitor.Get<Entry>();
162 }
163
164 // Increase the read count.
165 ++read_count;
166
167 // If we're at the end, we're done.
168 if (!visitor.CanMoveNext()) {
169 break;
170 }
171
172 // Move to the next entry.
173 R_TRY(visitor.MoveNext());
174 }
175
176 // Set the output read count.
177 *out_read_count = read_count;
178 R_SUCCEED();
179 }
180
181 Result GetSize(s64* out) {
182 // Check pre-conditions.
183 ASSERT(out != nullptr);
184
185 // Get our table offsets.
186 BucketTree::Offsets offsets;
187 R_TRY(m_table.GetOffsets(std::addressof(offsets)));
188
189 // Set the output.
190 *out = offsets.end_offset;
191 R_SUCCEED();
192 }
193
194 Result OperatePerEntry(s64 offset, s64 size, auto f) {
195 // Check pre-conditions.
196 ASSERT(offset >= 0);
197 ASSERT(size >= 0);
198 ASSERT(this->IsInitialized());
199
200 // Succeed if there's nothing to operate on.
201 R_SUCCEED_IF(size == 0);
202
203 // Get the table offsets.
204 BucketTree::Offsets table_offsets;
205 R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
206
207 // Validate arguments.
208 R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
209
210 // Find the offset in our tree.
211 BucketTree::Visitor visitor;
212 R_TRY(m_table.Find(std::addressof(visitor), offset));
213 {
214 const auto entry_offset = visitor.Get<Entry>()->virt_offset;
215 R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
216 ResultUnexpectedInCompressedStorageA);
217 }
218
219 // Prepare to operate in chunks.
220 auto cur_offset = offset;
221 const auto end_offset = offset + static_cast<s64>(size);
222
223 while (cur_offset < end_offset) {
224 // Get the current entry.
225 const auto cur_entry = *visitor.Get<Entry>();
226
227 // Get and validate the entry's offset.
228 const auto cur_entry_offset = cur_entry.virt_offset;
229 R_UNLESS(cur_entry_offset <= cur_offset, ResultUnexpectedInCompressedStorageA);
230
231 // Get and validate the next entry offset.
232 s64 next_entry_offset;
233 if (visitor.CanMoveNext()) {
234 R_TRY(visitor.MoveNext());
235 next_entry_offset = visitor.Get<Entry>()->virt_offset;
236 R_UNLESS(table_offsets.IsInclude(next_entry_offset),
237 ResultUnexpectedInCompressedStorageA);
238 } else {
239 next_entry_offset = table_offsets.end_offset;
240 }
241 R_UNLESS(cur_offset < next_entry_offset, ResultUnexpectedInCompressedStorageA);
242
243 // Get the offset of the entry in the data we read.
244 const auto data_offset = cur_offset - cur_entry_offset;
245 const auto data_size = (next_entry_offset - cur_entry_offset);
246 ASSERT(data_size > 0);
247
248 // Determine how much is left.
249 const auto remaining_size = end_offset - cur_offset;
250 const auto cur_size = std::min<s64>(remaining_size, data_size - data_offset);
251 ASSERT(cur_size <= size);
252
253 // Get the data storage size.
254 s64 storage_size = m_data_storage->GetSize();
255
256 // Check that our read remains naively physically in bounds.
257 R_UNLESS(0 <= cur_entry.phys_offset && cur_entry.phys_offset <= storage_size,
258 ResultUnexpectedInCompressedStorageC);
259
260 // If we have any compression, verify that we remain physically in bounds.
261 if (cur_entry.compression_type != CompressionType::None) {
262 R_UNLESS(cur_entry.phys_offset + cur_entry.GetPhysicalSize() <= storage_size,
263 ResultUnexpectedInCompressedStorageC);
264 }
265
266 // Check that block alignment requirements are met.
267 if (CompressionTypeUtility::IsBlockAlignmentRequired(cur_entry.compression_type)) {
268 R_UNLESS(Common::IsAligned(cur_entry.phys_offset, CompressionBlockAlignment),
269 ResultUnexpectedInCompressedStorageA);
270 }
271
272 // Invoke the operator.
273 bool is_continuous = true;
274 R_TRY(
275 f(std::addressof(is_continuous), cur_entry, data_size, data_offset, cur_size));
276
277 // If not continuous, we're done.
278 if (!is_continuous) {
279 break;
280 }
281
282 // Advance.
283 cur_offset += cur_size;
284 }
285
286 R_SUCCEED();
287 }
288
289 public:
290 using ReadImplFunction = std::function<Result(void*, size_t)>;
291 using ReadFunction = std::function<Result(size_t, const ReadImplFunction&)>;
292
293 public:
294 Result Read(s64 offset, s64 size, const ReadFunction& read_func) {
295 // Check pre-conditions.
296 ASSERT(offset >= 0);
297 ASSERT(this->IsInitialized());
298
299 // Succeed immediately, if we hvae nothing to read.
300 R_SUCCEED_IF(size == 0);
301
302 // Declare read lambda.
303 constexpr int EntriesCountMax = 0x80;
304 struct Entries {
305 CompressionType compression_type;
306 u32 gap_from_prev;
307 u32 physical_size;
308 u32 virtual_size;
309 };
310 Entries entries[EntriesCountMax];
311 s32 entry_count = 0;
312 Entry prev_entry = {
313 .virt_offset = -1,
314 };
315 bool will_allocate_pooled_buffer = false;
316 s64 required_access_physical_offset = 0;
317 s64 required_access_physical_size = 0;
318
319 auto PerformRequiredRead = [&]() -> Result {
320 // If there are no entries, we have nothing to do.
321 R_SUCCEED_IF(entry_count == 0);
322
323 // Get the remaining size in a convenient form.
324 const size_t total_required_size =
325 static_cast<size_t>(required_access_physical_size);
326
327 // Perform the read based on whether we need to allocate a buffer.
328 if (will_allocate_pooled_buffer) {
329 // Allocate a pooled buffer.
330 PooledBuffer pooled_buffer;
331 if (pooled_buffer.GetAllocatableSizeMax() >= total_required_size) {
332 pooled_buffer.Allocate(total_required_size, m_block_size_max);
333 } else {
334 pooled_buffer.AllocateParticularlyLarge(
335 std::min<size_t>(
336 total_required_size,
337 PooledBuffer::GetAllocatableParticularlyLargeSizeMax()),
338 m_block_size_max);
339 }
340
341 // Read each of the entries.
342 for (s32 entry_idx = 0; entry_idx < entry_count; ++entry_idx) {
343 // Determine the current read size.
344 bool will_use_pooled_buffer = false;
345 const size_t cur_read_size = [&]() -> size_t {
346 if (const size_t target_entry_size =
347 static_cast<size_t>(entries[entry_idx].physical_size) +
348 static_cast<size_t>(entries[entry_idx].gap_from_prev);
349 target_entry_size <= pooled_buffer.GetSize()) {
350 // We'll be using the pooled buffer.
351 will_use_pooled_buffer = true;
352
353 // Determine how much we can read.
354 const size_t max_size = std::min<size_t>(
355 required_access_physical_size, pooled_buffer.GetSize());
356
357 size_t read_size = 0;
358 for (auto n = entry_idx; n < entry_count; ++n) {
359 const size_t cur_entry_size =
360 static_cast<size_t>(entries[n].physical_size) +
361 static_cast<size_t>(entries[n].gap_from_prev);
362 if (read_size + cur_entry_size > max_size) {
363 break;
364 }
365
366 read_size += cur_entry_size;
367 }
368
369 return read_size;
370 } else {
371 // If we don't fit, we must be uncompressed.
372 ASSERT(entries[entry_idx].compression_type ==
373 CompressionType::None);
374
375 // We can perform the whole of an uncompressed read directly.
376 return entries[entry_idx].virtual_size;
377 }
378 }();
379
380 // Perform the read based on whether or not we'll use the pooled buffer.
381 if (will_use_pooled_buffer) {
382 // Read the compressed data into the pooled buffer.
383 auto* const buffer = pooled_buffer.GetBuffer();
384 m_data_storage->Read(reinterpret_cast<u8*>(buffer), cur_read_size,
385 required_access_physical_offset);
386
387 // Decompress the data.
388 size_t buffer_offset;
389 for (buffer_offset = 0;
390 entry_idx < entry_count &&
391 ((static_cast<size_t>(entries[entry_idx].physical_size) +
392 static_cast<size_t>(entries[entry_idx].gap_from_prev)) == 0 ||
393 buffer_offset < cur_read_size);
394 buffer_offset += entries[entry_idx++].physical_size) {
395 // Advance by the relevant gap.
396 buffer_offset += entries[entry_idx].gap_from_prev;
397
398 const auto compression_type = entries[entry_idx].compression_type;
399 switch (compression_type) {
400 case CompressionType::None: {
401 // Check that we can remain within bounds.
402 ASSERT(buffer_offset + entries[entry_idx].virtual_size <=
403 cur_read_size);
404
405 // Perform no decompression.
406 R_TRY(read_func(
407 entries[entry_idx].virtual_size,
408 [&](void* dst, size_t dst_size) -> Result {
409 // Check that the size is valid.
410 ASSERT(dst_size == entries[entry_idx].virtual_size);
411
412 // We have no compression, so just copy the data
413 // out.
414 std::memcpy(dst, buffer + buffer_offset,
415 entries[entry_idx].virtual_size);
416 R_SUCCEED();
417 }));
418
419 break;
420 }
421 case CompressionType::Zeros: {
422 // Check that we can remain within bounds.
423 ASSERT(buffer_offset <= cur_read_size);
424
425 // Zero the memory.
426 R_TRY(read_func(
427 entries[entry_idx].virtual_size,
428 [&](void* dst, size_t dst_size) -> Result {
429 // Check that the size is valid.
430 ASSERT(dst_size == entries[entry_idx].virtual_size);
431
432 // The data is zeroes, so zero the buffer.
433 std::memset(dst, 0, entries[entry_idx].virtual_size);
434 R_SUCCEED();
435 }));
436
437 break;
438 }
439 default: {
440 // Check that we can remain within bounds.
441 ASSERT(buffer_offset + entries[entry_idx].physical_size <=
442 cur_read_size);
443
444 // Get the decompressor.
445 const auto decompressor =
446 this->GetDecompressor(compression_type);
447 R_UNLESS(decompressor != nullptr,
448 ResultUnexpectedInCompressedStorageB);
449
450 // Decompress the data.
451 R_TRY(read_func(entries[entry_idx].virtual_size,
452 [&](void* dst, size_t dst_size) -> Result {
453 // Check that the size is valid.
454 ASSERT(dst_size ==
455 entries[entry_idx].virtual_size);
456
457 // Perform the decompression.
458 R_RETURN(decompressor(
459 dst, entries[entry_idx].virtual_size,
460 buffer + buffer_offset,
461 entries[entry_idx].physical_size));
462 }));
463
464 break;
465 }
466 }
467 }
468
469 // Check that we processed the correct amount of data.
470 ASSERT(buffer_offset == cur_read_size);
471 } else {
472 // Account for the gap from the previous entry.
473 required_access_physical_offset += entries[entry_idx].gap_from_prev;
474 required_access_physical_size -= entries[entry_idx].gap_from_prev;
475
476 // We don't need the buffer (as the data is uncompressed), so just
477 // execute the read.
478 R_TRY(
479 read_func(cur_read_size, [&](void* dst, size_t dst_size) -> Result {
480 // Check that the size is valid.
481 ASSERT(dst_size == cur_read_size);
482
483 // Perform the read.
484 m_data_storage->Read(reinterpret_cast<u8*>(dst), cur_read_size,
485 required_access_physical_offset);
486
487 R_SUCCEED();
488 }));
489 }
490
491 // Advance on.
492 required_access_physical_offset += cur_read_size;
493 required_access_physical_size -= cur_read_size;
494 }
495
496 // Verify that we have nothing remaining to read.
497 ASSERT(required_access_physical_size == 0);
498
499 R_SUCCEED();
500 } else {
501 // We don't need a buffer, so just execute the read.
502 R_TRY(read_func(total_required_size, [&](void* dst, size_t dst_size) -> Result {
503 // Check that the size is valid.
504 ASSERT(dst_size == total_required_size);
505
506 // Perform the read.
507 m_data_storage->Read(reinterpret_cast<u8*>(dst), total_required_size,
508 required_access_physical_offset);
509
510 R_SUCCEED();
511 }));
512 }
513
514 R_SUCCEED();
515 };
516
517 R_TRY(this->OperatePerEntry(
518 offset, size,
519 [&](bool* out_continuous, const Entry& entry, s64 virtual_data_size,
520 s64 data_offset, s64 read_size) -> Result {
521 // Determine the physical extents.
522 s64 physical_offset, physical_size;
523 if (CompressionTypeUtility::IsRandomAccessible(entry.compression_type)) {
524 physical_offset = entry.phys_offset + data_offset;
525 physical_size = read_size;
526 } else {
527 physical_offset = entry.phys_offset;
528 physical_size = entry.GetPhysicalSize();
529 }
530
531 // If we have a pending data storage operation, perform it if we have to.
532 const s64 required_access_physical_end =
533 required_access_physical_offset + required_access_physical_size;
534 if (required_access_physical_size > 0) {
535 const bool required_by_gap =
536 !(required_access_physical_end <= physical_offset &&
537 physical_offset <= Common::AlignUp(required_access_physical_end,
538 CompressionBlockAlignment));
539 const bool required_by_continuous_size =
540 ((physical_size + physical_offset) - required_access_physical_end) +
541 required_access_physical_size >
542 static_cast<s64>(m_continuous_reading_size_max);
543 const bool required_by_entry_count = entry_count == EntriesCountMax;
544 if (required_by_gap || required_by_continuous_size ||
545 required_by_entry_count) {
546 // Check that our planned access is sane.
547 ASSERT(!will_allocate_pooled_buffer ||
548 required_access_physical_size <=
549 static_cast<s64>(m_continuous_reading_size_max));
550
551 // Perform the required read.
552 const Result rc = PerformRequiredRead();
553 if (R_FAILED(rc)) {
554 R_THROW(rc);
555 }
556
557 // Reset our requirements.
558 prev_entry.virt_offset = -1;
559 required_access_physical_size = 0;
560 entry_count = 0;
561 will_allocate_pooled_buffer = false;
562 }
563 }
564
565 // Sanity check that we're within bounds on entries.
566 ASSERT(entry_count < EntriesCountMax);
567
568 // Determine if a buffer allocation is needed.
569 if (entry.compression_type != CompressionType::None ||
570 (prev_entry.virt_offset >= 0 &&
571 entry.virt_offset - prev_entry.virt_offset !=
572 entry.phys_offset - prev_entry.phys_offset)) {
573 will_allocate_pooled_buffer = true;
574 }
575
576 // If we need to access the data storage, update our required access parameters.
577 if (CompressionTypeUtility::IsDataStorageAccessRequired(
578 entry.compression_type)) {
579 // If the data is compressed, ensure the access is sane.
580 if (entry.compression_type != CompressionType::None) {
581 R_UNLESS(data_offset == 0, ResultInvalidOffset);
582 R_UNLESS(virtual_data_size == read_size, ResultInvalidSize);
583 R_UNLESS(entry.GetPhysicalSize() <= static_cast<s64>(m_block_size_max),
584 ResultUnexpectedInCompressedStorageD);
585 }
586
587 // Update the required access parameters.
588 s64 gap_from_prev;
589 if (required_access_physical_size > 0) {
590 gap_from_prev = physical_offset - required_access_physical_end;
591 } else {
592 gap_from_prev = 0;
593 required_access_physical_offset = physical_offset;
594 }
595 required_access_physical_size += physical_size + gap_from_prev;
596
597 // Create an entry. to access the data storage.
598 entries[entry_count++] = {
599 .compression_type = entry.compression_type,
600 .gap_from_prev = static_cast<u32>(gap_from_prev),
601 .physical_size = static_cast<u32>(physical_size),
602 .virtual_size = static_cast<u32>(read_size),
603 };
604 } else {
605 // Verify that we're allowed to be operating on the non-data-storage-access
606 // type.
607 R_UNLESS(entry.compression_type == CompressionType::Zeros,
608 ResultUnexpectedInCompressedStorageB);
609
610 // If we have entries, create a fake entry for the zero region.
611 if (entry_count != 0) {
612 // We need to have a physical size.
613 R_UNLESS(entry.GetPhysicalSize() != 0,
614 ResultUnexpectedInCompressedStorageD);
615
616 // Create a fake entry.
617 entries[entry_count++] = {
618 .compression_type = CompressionType::Zeros,
619 .gap_from_prev = 0,
620 .physical_size = 0,
621 .virtual_size = static_cast<u32>(read_size),
622 };
623 } else {
624 // We have no entries, we we can just perform the read.
625 const Result rc =
626 read_func(static_cast<size_t>(read_size),
627 [&](void* dst, size_t dst_size) -> Result {
628 // Check the space we should zero is correct.
629 ASSERT(dst_size == static_cast<size_t>(read_size));
630
631 // Zero the memory.
632 std::memset(dst, 0, read_size);
633 R_SUCCEED();
634 });
635 if (R_FAILED(rc)) {
636 R_THROW(rc);
637 }
638 }
639 }
640
641 // Set the previous entry.
642 prev_entry = entry;
643
644 // We're continuous.
645 *out_continuous = true;
646 R_SUCCEED();
647 }));
648
649 // If we still have a pending access, perform it.
650 if (required_access_physical_size != 0) {
651 R_TRY(PerformRequiredRead());
652 }
653
654 R_SUCCEED();
655 }
656
657 private:
658 DecompressorFunction GetDecompressor(CompressionType type) const {
659 // Check that we can get a decompressor for the type.
660 if (CompressionTypeUtility::IsUnknownType(type)) {
661 return nullptr;
662 }
663
664 // Get the decompressor.
665 return m_get_decompressor_function(type);
666 }
667
668 bool IsInitialized() const {
669 return m_table.IsInitialized();
670 }
671 };
672
673 class CacheManager {
674 YUZU_NON_COPYABLE(CacheManager);
675 YUZU_NON_MOVEABLE(CacheManager);
676
677 private:
678 struct AccessRange {
679 s64 virtual_offset;
680 s64 virtual_size;
681 u32 physical_size;
682 bool is_block_alignment_required;
683
684 s64 GetEndVirtualOffset() const {
685 return this->virtual_offset + this->virtual_size;
686 }
687 };
688 static_assert(std::is_trivial_v<AccessRange>);
689
690 private:
691 s64 m_storage_size = 0;
692
693 public:
694 CacheManager() = default;
695
696 public:
697 Result Initialize(s64 storage_size, size_t cache_size_0, size_t cache_size_1,
698 size_t max_cache_entries) {
699 // Set our fields.
700 m_storage_size = storage_size;
701
702 R_SUCCEED();
703 }
704
705 Result Read(CompressedStorageCore& core, s64 offset, void* buffer, size_t size) {
706 // If we have nothing to read, succeed.
707 R_SUCCEED_IF(size == 0);
708
709 // Check that we have a buffer to read into.
710 R_UNLESS(buffer != nullptr, ResultNullptrArgument);
711
712 // Check that the read is in bounds.
713 R_UNLESS(offset <= m_storage_size, ResultInvalidOffset);
714
715 // Determine how much we can read.
716 const size_t read_size = std::min<size_t>(size, m_storage_size - offset);
717
718 // Create head/tail ranges.
719 AccessRange head_range = {};
720 AccessRange tail_range = {};
721 bool is_tail_set = false;
722
723 // Operate to determine the head range.
724 R_TRY(core.OperatePerEntry(
725 offset, 1,
726 [&](bool* out_continuous, const Entry& entry, s64 virtual_data_size,
727 s64 data_offset, s64 data_read_size) -> Result {
728 // Set the head range.
729 head_range = {
730 .virtual_offset = entry.virt_offset,
731 .virtual_size = virtual_data_size,
732 .physical_size = static_cast<u32>(entry.phys_size),
733 .is_block_alignment_required =
734 CompressionTypeUtility::IsBlockAlignmentRequired(
735 entry.compression_type),
736 };
737
738 // If required, set the tail range.
739 if (static_cast<s64>(offset + read_size) <=
740 entry.virt_offset + virtual_data_size) {
741 tail_range = {
742 .virtual_offset = entry.virt_offset,
743 .virtual_size = virtual_data_size,
744 .physical_size = static_cast<u32>(entry.phys_size),
745 .is_block_alignment_required =
746 CompressionTypeUtility::IsBlockAlignmentRequired(
747 entry.compression_type),
748 };
749 is_tail_set = true;
750 }
751
752 // We only want to determine the head range, so we're not continuous.
753 *out_continuous = false;
754 R_SUCCEED();
755 }));
756
757 // If necessary, determine the tail range.
758 if (!is_tail_set) {
759 R_TRY(core.OperatePerEntry(
760 offset + read_size - 1, 1,
761 [&](bool* out_continuous, const Entry& entry, s64 virtual_data_size,
762 s64 data_offset, s64 data_read_size) -> Result {
763 // Set the tail range.
764 tail_range = {
765 .virtual_offset = entry.virt_offset,
766 .virtual_size = virtual_data_size,
767 .physical_size = static_cast<u32>(entry.phys_size),
768 .is_block_alignment_required =
769 CompressionTypeUtility::IsBlockAlignmentRequired(
770 entry.compression_type),
771 };
772
773 // We only want to determine the tail range, so we're not continuous.
774 *out_continuous = false;
775 R_SUCCEED();
776 }));
777 }
778
779 // Begin performing the accesses.
780 s64 cur_offset = offset;
781 size_t cur_size = read_size;
782 char* cur_dst = static_cast<char*>(buffer);
783
784 // Determine our alignment.
785 const bool head_unaligned = head_range.is_block_alignment_required &&
786 (cur_offset != head_range.virtual_offset ||
787 static_cast<s64>(cur_size) < head_range.virtual_size);
788 const bool tail_unaligned = [&]() -> bool {
789 if (tail_range.is_block_alignment_required) {
790 if (static_cast<s64>(cur_size + cur_offset) ==
791 tail_range.GetEndVirtualOffset()) {
792 return false;
793 } else if (!head_unaligned) {
794 return true;
795 } else {
796 return head_range.GetEndVirtualOffset() <
797 static_cast<s64>(cur_size + cur_offset);
798 }
799 } else {
800 return false;
801 }
802 }();
803
804 // Determine start/end offsets.
805 const s64 start_offset =
806 head_range.is_block_alignment_required ? head_range.virtual_offset : cur_offset;
807 const s64 end_offset = tail_range.is_block_alignment_required
808 ? tail_range.GetEndVirtualOffset()
809 : cur_offset + cur_size;
810
811 // Perform the read.
812 bool is_burst_reading = false;
813 R_TRY(core.Read(
814 start_offset, end_offset - start_offset,
815 [&](size_t size_buffer_required,
816 const CompressedStorageCore::ReadImplFunction& read_impl) -> Result {
817 // Determine whether we're burst reading.
818 const AccessRange* unaligned_range = nullptr;
819 if (!is_burst_reading) {
820 // Check whether we're using head, tail, or none as unaligned.
821 if (head_unaligned && head_range.virtual_offset <= cur_offset &&
822 cur_offset < head_range.GetEndVirtualOffset()) {
823 unaligned_range = std::addressof(head_range);
824 } else if (tail_unaligned && tail_range.virtual_offset <= cur_offset &&
825 cur_offset < tail_range.GetEndVirtualOffset()) {
826 unaligned_range = std::addressof(tail_range);
827 } else {
828 is_burst_reading = true;
829 }
830 }
831 ASSERT((is_burst_reading ^ (unaligned_range != nullptr)));
832
833 // Perform reading by burst, or not.
834 if (is_burst_reading) {
835 // Check that the access is valid for burst reading.
836 ASSERT(size_buffer_required <= cur_size);
837
838 // Perform the read.
839 Result rc = read_impl(cur_dst, size_buffer_required);
840 if (R_FAILED(rc)) {
841 R_THROW(rc);
842 }
843
844 // Advance.
845 cur_dst += size_buffer_required;
846 cur_offset += size_buffer_required;
847 cur_size -= size_buffer_required;
848
849 // Determine whether we're going to continue burst reading.
850 const s64 offset_aligned =
851 tail_unaligned ? tail_range.virtual_offset : end_offset;
852 ASSERT(cur_offset <= offset_aligned);
853
854 if (offset_aligned <= cur_offset) {
855 is_burst_reading = false;
856 }
857 } else {
858 // We're not burst reading, so we have some unaligned range.
859 ASSERT(unaligned_range != nullptr);
860
861 // Check that the size is correct.
862 ASSERT(size_buffer_required ==
863 static_cast<size_t>(unaligned_range->virtual_size));
864
865 // Get a pooled buffer for our read.
866 PooledBuffer pooled_buffer;
867 pooled_buffer.Allocate(size_buffer_required, size_buffer_required);
868
869 // Perform read.
870 Result rc = read_impl(pooled_buffer.GetBuffer(), size_buffer_required);
871 if (R_FAILED(rc)) {
872 R_THROW(rc);
873 }
874
875 // Copy the data we read to the destination.
876 const size_t skip_size = cur_offset - unaligned_range->virtual_offset;
877 const size_t copy_size = std::min<size_t>(
878 cur_size, unaligned_range->GetEndVirtualOffset() - cur_offset);
879
880 std::memcpy(cur_dst, pooled_buffer.GetBuffer() + skip_size, copy_size);
881
882 // Advance.
883 cur_dst += copy_size;
884 cur_offset += copy_size;
885 cur_size -= copy_size;
886 }
887
888 R_SUCCEED();
889 }));
890
891 R_SUCCEED();
892 }
893 };
894
895private:
896 mutable CompressedStorageCore m_core;
897 mutable CacheManager m_cache_manager;
898
899public:
900 CompressedStorage() = default;
901 virtual ~CompressedStorage() {
902 this->Finalize();
903 }
904
905 Result Initialize(VirtualFile data_storage, VirtualFile node_storage, VirtualFile entry_storage,
906 s32 bktr_entry_count, size_t block_size_max,
907 size_t continuous_reading_size_max, GetDecompressorFunction get_decompressor,
908 size_t cache_size_0, size_t cache_size_1, s32 max_cache_entries) {
909 // Initialize our core.
910 R_TRY(m_core.Initialize(data_storage, node_storage, entry_storage, bktr_entry_count,
911 block_size_max, continuous_reading_size_max, get_decompressor));
912
913 // Get our core size.
914 s64 core_size = 0;
915 R_TRY(m_core.GetSize(std::addressof(core_size)));
916
917 // Initialize our cache manager.
918 R_TRY(m_cache_manager.Initialize(core_size, cache_size_0, cache_size_1, max_cache_entries));
919
920 R_SUCCEED();
921 }
922
923 void Finalize() {
924 m_core.Finalize();
925 }
926
927 VirtualFile GetDataStorage() {
928 return m_core.GetDataStorage();
929 }
930
931 Result GetDataStorageSize(s64* out) {
932 R_RETURN(m_core.GetDataStorageSize(out));
933 }
934
935 Result GetEntryList(Entry* out_entries, s32* out_read_count, s32 max_entry_count, s64 offset,
936 s64 size) {
937 R_RETURN(m_core.GetEntryList(out_entries, out_read_count, max_entry_count, offset, size));
938 }
939
940 BucketTree& GetEntryTable() {
941 return m_core.GetEntryTable();
942 }
943
944public:
945 virtual size_t GetSize() const override {
946 s64 ret{};
947 m_core.GetSize(&ret);
948 return ret;
949 }
950
951 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
952 if (R_SUCCEEDED(m_cache_manager.Read(m_core, offset, buffer, size))) {
953 return size;
954 } else {
955 return 0;
956 }
957 }
958};
959
960} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_compression_common.h b/src/core/file_sys/fssystem/fssystem_compression_common.h
new file mode 100644
index 000000000..266e0a7e5
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_compression_common.h
@@ -0,0 +1,43 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/hle/result.h"
7
8namespace FileSys {
9
10enum class CompressionType : u8 {
11 None = 0,
12 Zeros = 1,
13 Two = 2,
14 Lz4 = 3,
15 Unknown = 4,
16};
17
18using DecompressorFunction = Result (*)(void*, size_t, const void*, size_t);
19using GetDecompressorFunction = DecompressorFunction (*)(CompressionType);
20
21constexpr s64 CompressionBlockAlignment = 0x10;
22
23namespace CompressionTypeUtility {
24
25constexpr bool IsBlockAlignmentRequired(CompressionType type) {
26 return type != CompressionType::None && type != CompressionType::Zeros;
27}
28
29constexpr bool IsDataStorageAccessRequired(CompressionType type) {
30 return type != CompressionType::Zeros;
31}
32
33constexpr bool IsRandomAccessible(CompressionType type) {
34 return type == CompressionType::None;
35}
36
37constexpr bool IsUnknownType(CompressionType type) {
38 return type >= CompressionType::Unknown;
39}
40
41} // namespace CompressionTypeUtility
42
43} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_compression_configuration.cpp b/src/core/file_sys/fssystem/fssystem_compression_configuration.cpp
new file mode 100644
index 000000000..8734f84ca
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_compression_configuration.cpp
@@ -0,0 +1,36 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/lz4_compression.h"
5#include "core/file_sys/fssystem/fssystem_compression_configuration.h"
6
7namespace FileSys {
8
9namespace {
10
11Result DecompressLz4(void* dst, size_t dst_size, const void* src, size_t src_size) {
12 auto result = Common::Compression::DecompressLZ4(dst, dst_size, src, src_size);
13 R_UNLESS(static_cast<size_t>(result) == dst_size, ResultUnexpectedInCompressedStorageC);
14 R_SUCCEED();
15}
16
17constexpr DecompressorFunction GetNcaDecompressorFunction(CompressionType type) {
18 switch (type) {
19 case CompressionType::Lz4:
20 return DecompressLz4;
21 default:
22 return nullptr;
23 }
24}
25
26constexpr NcaCompressionConfiguration g_nca_compression_configuration{
27 .get_decompressor = GetNcaDecompressorFunction,
28};
29
30} // namespace
31
32const NcaCompressionConfiguration* GetNcaCompressionConfiguration() {
33 return std::addressof(g_nca_compression_configuration);
34}
35
36} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_compression_configuration.h b/src/core/file_sys/fssystem/fssystem_compression_configuration.h
new file mode 100644
index 000000000..b4ec4f203
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_compression_configuration.h
@@ -0,0 +1,12 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
7
8namespace FileSys {
9
10const NcaCompressionConfiguration* GetNcaCompressionConfiguration();
11
12}
diff --git a/src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp b/src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp
new file mode 100644
index 000000000..7b89d4512
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp
@@ -0,0 +1,57 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/crypto/aes_util.h"
5#include "core/crypto/key_manager.h"
6#include "core/file_sys/fssystem/fssystem_crypto_configuration.h"
7
8namespace FileSys {
9
10namespace {
11
12void GenerateKey(void* dst_key, size_t dst_key_size, const void* src_key, size_t src_key_size,
13 s32 key_type) {
14 if (key_type == static_cast<s32>(KeyType::ZeroKey)) {
15 std::memset(dst_key, 0, dst_key_size);
16 return;
17 }
18
19 if (key_type == static_cast<s32>(KeyType::InvalidKey) ||
20 key_type < static_cast<s32>(KeyType::ZeroKey) ||
21 key_type >= static_cast<s32>(KeyType::NcaExternalKey)) {
22 std::memset(dst_key, 0xFF, dst_key_size);
23 return;
24 }
25
26 const auto& instance = Core::Crypto::KeyManager::Instance();
27
28 if (key_type == static_cast<s32>(KeyType::NcaHeaderKey1) ||
29 key_type == static_cast<s32>(KeyType::NcaHeaderKey2)) {
30 const s32 key_index = static_cast<s32>(KeyType::NcaHeaderKey2) == key_type;
31 const auto key = instance.GetKey(Core::Crypto::S256KeyType::Header);
32 std::memcpy(dst_key, key.data() + key_index * 0x10, std::min(dst_key_size, key.size() / 2));
33 return;
34 }
35
36 const s32 key_generation =
37 std::max(key_type / NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount, 1) - 1;
38 const s32 key_index = key_type % NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount;
39
40 Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(
41 instance.GetKey(Core::Crypto::S128KeyType::KeyArea, key_generation, key_index),
42 Core::Crypto::Mode::ECB);
43 cipher.Transcode(reinterpret_cast<const u8*>(src_key), src_key_size,
44 reinterpret_cast<u8*>(dst_key), Core::Crypto::Op::Decrypt);
45}
46
47} // namespace
48
49const NcaCryptoConfiguration& GetCryptoConfiguration() {
50 static const NcaCryptoConfiguration configuration = {
51 .generate_key = GenerateKey,
52 };
53
54 return configuration;
55}
56
57} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_crypto_configuration.h b/src/core/file_sys/fssystem/fssystem_crypto_configuration.h
new file mode 100644
index 000000000..7fd9c5a8d
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_crypto_configuration.h
@@ -0,0 +1,12 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
7
8namespace FileSys {
9
10const NcaCryptoConfiguration& GetCryptoConfiguration();
11
12}
diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp
new file mode 100644
index 000000000..b2e031d5f
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp
@@ -0,0 +1,132 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h"
5#include "core/file_sys/vfs_offset.h"
6
7namespace FileSys {
8
9HierarchicalIntegrityVerificationStorage::HierarchicalIntegrityVerificationStorage()
10 : m_data_size(-1) {
11 for (size_t i = 0; i < MaxLayers - 1; i++) {
12 m_verify_storages[i] = std::make_shared<IntegrityVerificationStorage>();
13 }
14}
15
16Result HierarchicalIntegrityVerificationStorage::Initialize(
17 const HierarchicalIntegrityVerificationInformation& info,
18 HierarchicalStorageInformation storage, int max_data_cache_entries, int max_hash_cache_entries,
19 s8 buffer_level) {
20 using AlignedStorage = AlignmentMatchingStoragePooledBuffer<1>;
21
22 // Validate preconditions.
23 ASSERT(IntegrityMinLayerCount <= info.max_layers && info.max_layers <= IntegrityMaxLayerCount);
24
25 // Set member variables.
26 m_max_layers = info.max_layers;
27
28 // Initialize the top level verification storage.
29 m_verify_storages[0]->Initialize(storage[HierarchicalStorageInformation::MasterStorage],
30 storage[HierarchicalStorageInformation::Layer1Storage],
31 static_cast<s64>(1) << info.info[0].block_order, HashSize,
32 false);
33
34 // Ensure we don't leak state if further initialization goes wrong.
35 ON_RESULT_FAILURE {
36 m_verify_storages[0]->Finalize();
37 m_data_size = -1;
38 };
39
40 // Initialize the top level buffer storage.
41 m_buffer_storages[0] = std::make_shared<AlignedStorage>(
42 m_verify_storages[0], static_cast<s64>(1) << info.info[0].block_order);
43 R_UNLESS(m_buffer_storages[0] != nullptr, ResultAllocationMemoryFailedAllocateShared);
44
45 // Prepare to initialize the level storages.
46 s32 level = 0;
47
48 // Ensure we don't leak state if further initialization goes wrong.
49 ON_RESULT_FAILURE_2 {
50 m_verify_storages[level + 1]->Finalize();
51 for (; level > 0; --level) {
52 m_buffer_storages[level].reset();
53 m_verify_storages[level]->Finalize();
54 }
55 };
56
57 // Initialize the level storages.
58 for (; level < m_max_layers - 3; ++level) {
59 // Initialize the verification storage.
60 auto buffer_storage =
61 std::make_shared<OffsetVfsFile>(m_buffer_storages[level], info.info[level].size, 0);
62 m_verify_storages[level + 1]->Initialize(
63 std::move(buffer_storage), storage[level + 2],
64 static_cast<s64>(1) << info.info[level + 1].block_order,
65 static_cast<s64>(1) << info.info[level].block_order, false);
66
67 // Initialize the buffer storage.
68 m_buffer_storages[level + 1] = std::make_shared<AlignedStorage>(
69 m_verify_storages[level + 1], static_cast<s64>(1) << info.info[level + 1].block_order);
70 R_UNLESS(m_buffer_storages[level + 1] != nullptr,
71 ResultAllocationMemoryFailedAllocateShared);
72 }
73
74 // Initialize the final level storage.
75 {
76 // Initialize the verification storage.
77 auto buffer_storage =
78 std::make_shared<OffsetVfsFile>(m_buffer_storages[level], info.info[level].size, 0);
79 m_verify_storages[level + 1]->Initialize(
80 std::move(buffer_storage), storage[level + 2],
81 static_cast<s64>(1) << info.info[level + 1].block_order,
82 static_cast<s64>(1) << info.info[level].block_order, true);
83
84 // Initialize the buffer storage.
85 m_buffer_storages[level + 1] = std::make_shared<AlignedStorage>(
86 m_verify_storages[level + 1], static_cast<s64>(1) << info.info[level + 1].block_order);
87 R_UNLESS(m_buffer_storages[level + 1] != nullptr,
88 ResultAllocationMemoryFailedAllocateShared);
89 }
90
91 // Set the data size.
92 m_data_size = info.info[level + 1].size;
93
94 // We succeeded.
95 R_SUCCEED();
96}
97
98void HierarchicalIntegrityVerificationStorage::Finalize() {
99 if (m_data_size >= 0) {
100 m_data_size = 0;
101
102 for (s32 level = m_max_layers - 2; level >= 0; --level) {
103 m_buffer_storages[level].reset();
104 m_verify_storages[level]->Finalize();
105 }
106
107 m_data_size = -1;
108 }
109}
110
111size_t HierarchicalIntegrityVerificationStorage::Read(u8* buffer, size_t size,
112 size_t offset) const {
113 // Validate preconditions.
114 ASSERT(m_data_size >= 0);
115
116 // Succeed if zero-size.
117 if (size == 0) {
118 return size;
119 }
120
121 // Validate arguments.
122 ASSERT(buffer != nullptr);
123
124 // Read the data.
125 return m_buffer_storages[m_max_layers - 2]->Read(buffer, size, offset);
126}
127
128size_t HierarchicalIntegrityVerificationStorage::GetSize() const {
129 return m_data_size;
130}
131
132} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h
new file mode 100644
index 000000000..5e0a1d143
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h
@@ -0,0 +1,164 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/alignment.h"
7#include "core/file_sys/fssystem/fs_i_storage.h"
8#include "core/file_sys/fssystem/fs_types.h"
9#include "core/file_sys/fssystem/fssystem_alignment_matching_storage.h"
10#include "core/file_sys/fssystem/fssystem_integrity_verification_storage.h"
11#include "core/file_sys/vfs_offset.h"
12
13namespace FileSys {
14
15struct HierarchicalIntegrityVerificationLevelInformation {
16 Int64 offset;
17 Int64 size;
18 s32 block_order;
19 std::array<u8, 4> reserved;
20};
21static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationLevelInformation>);
22static_assert(sizeof(HierarchicalIntegrityVerificationLevelInformation) == 0x18);
23static_assert(alignof(HierarchicalIntegrityVerificationLevelInformation) == 0x4);
24
25struct HierarchicalIntegrityVerificationInformation {
26 u32 max_layers;
27 HierarchicalIntegrityVerificationLevelInformation info[IntegrityMaxLayerCount - 1];
28 HashSalt seed;
29
30 s64 GetLayeredHashSize() const {
31 return this->info[this->max_layers - 2].offset;
32 }
33
34 s64 GetDataOffset() const {
35 return this->info[this->max_layers - 2].offset;
36 }
37
38 s64 GetDataSize() const {
39 return this->info[this->max_layers - 2].size;
40 }
41};
42static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationInformation>);
43
44struct HierarchicalIntegrityVerificationMetaInformation {
45 u32 magic;
46 u32 version;
47 u32 master_hash_size;
48 HierarchicalIntegrityVerificationInformation level_hash_info;
49};
50static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationMetaInformation>);
51
52struct HierarchicalIntegrityVerificationSizeSet {
53 s64 control_size;
54 s64 master_hash_size;
55 s64 layered_hash_sizes[IntegrityMaxLayerCount - 2];
56};
57static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationSizeSet>);
58
59class HierarchicalIntegrityVerificationStorage : public IReadOnlyStorage {
60 YUZU_NON_COPYABLE(HierarchicalIntegrityVerificationStorage);
61 YUZU_NON_MOVEABLE(HierarchicalIntegrityVerificationStorage);
62
63private:
64 friend struct HierarchicalIntegrityVerificationMetaInformation;
65
66protected:
67 static constexpr s64 HashSize = 256 / 8;
68 static constexpr size_t MaxLayers = IntegrityMaxLayerCount;
69
70public:
71 using GenerateRandomFunction = void (*)(void* dst, size_t size);
72
73 class HierarchicalStorageInformation {
74 public:
75 enum {
76 MasterStorage = 0,
77 Layer1Storage = 1,
78 Layer2Storage = 2,
79 Layer3Storage = 3,
80 Layer4Storage = 4,
81 Layer5Storage = 5,
82 DataStorage = 6,
83 };
84
85 private:
86 VirtualFile m_storages[DataStorage + 1];
87
88 public:
89 void SetMasterHashStorage(VirtualFile s) {
90 m_storages[MasterStorage] = s;
91 }
92 void SetLayer1HashStorage(VirtualFile s) {
93 m_storages[Layer1Storage] = s;
94 }
95 void SetLayer2HashStorage(VirtualFile s) {
96 m_storages[Layer2Storage] = s;
97 }
98 void SetLayer3HashStorage(VirtualFile s) {
99 m_storages[Layer3Storage] = s;
100 }
101 void SetLayer4HashStorage(VirtualFile s) {
102 m_storages[Layer4Storage] = s;
103 }
104 void SetLayer5HashStorage(VirtualFile s) {
105 m_storages[Layer5Storage] = s;
106 }
107 void SetDataStorage(VirtualFile s) {
108 m_storages[DataStorage] = s;
109 }
110
111 VirtualFile& operator[](s32 index) {
112 ASSERT(MasterStorage <= index && index <= DataStorage);
113 return m_storages[index];
114 }
115 };
116
117private:
118 static GenerateRandomFunction s_generate_random;
119
120 static void SetGenerateRandomFunction(GenerateRandomFunction func) {
121 s_generate_random = func;
122 }
123
124private:
125 std::shared_ptr<IntegrityVerificationStorage> m_verify_storages[MaxLayers - 1];
126 std::shared_ptr<AlignmentMatchingStoragePooledBuffer<1>> m_buffer_storages[MaxLayers - 1];
127 s64 m_data_size;
128 s32 m_max_layers;
129
130public:
131 HierarchicalIntegrityVerificationStorage();
132 virtual ~HierarchicalIntegrityVerificationStorage() override {
133 this->Finalize();
134 }
135
136 Result Initialize(const HierarchicalIntegrityVerificationInformation& info,
137 HierarchicalStorageInformation storage, int max_data_cache_entries,
138 int max_hash_cache_entries, s8 buffer_level);
139 void Finalize();
140
141 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
142 virtual size_t GetSize() const override;
143
144 bool IsInitialized() const {
145 return m_data_size >= 0;
146 }
147
148 s64 GetL1HashVerificationBlockSize() const {
149 return m_verify_storages[m_max_layers - 2]->GetBlockSize();
150 }
151
152 VirtualFile GetL1HashStorage() {
153 return std::make_shared<OffsetVfsFile>(
154 m_buffer_storages[m_max_layers - 3],
155 Common::DivideUp(m_data_size, this->GetL1HashVerificationBlockSize()), 0);
156 }
157
158public:
159 static constexpr s8 GetDefaultDataCacheBufferLevel(u32 max_layers) {
160 return static_cast<s8>(16 + max_layers - 2);
161 }
162};
163
164} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp
new file mode 100644
index 000000000..357fa7741
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp
@@ -0,0 +1,103 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/alignment.h"
5#include "common/scope_exit.h"
6#include "core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h"
7
8namespace FileSys {
9
10namespace {
11
12s32 Log2(s32 value) {
13 ASSERT(value > 0);
14 ASSERT(Common::IsPowerOfTwo(value));
15
16 s32 log = 0;
17 while ((value >>= 1) > 0) {
18 ++log;
19 }
20 return log;
21}
22
23} // namespace
24
25Result HierarchicalSha256Storage::Initialize(VirtualFile* base_storages, s32 layer_count,
26 size_t htbs, void* hash_buf, size_t hash_buf_size) {
27 // Validate preconditions.
28 ASSERT(layer_count == LayerCount);
29 ASSERT(Common::IsPowerOfTwo(htbs));
30 ASSERT(hash_buf != nullptr);
31
32 // Set size tracking members.
33 m_hash_target_block_size = static_cast<s32>(htbs);
34 m_log_size_ratio = Log2(m_hash_target_block_size / HashSize);
35
36 // Get the base storage size.
37 m_base_storage_size = base_storages[2]->GetSize();
38 {
39 auto size_guard = SCOPE_GUARD({ m_base_storage_size = 0; });
40 R_UNLESS(m_base_storage_size <= static_cast<s64>(HashSize)
41 << m_log_size_ratio << m_log_size_ratio,
42 ResultHierarchicalSha256BaseStorageTooLarge);
43 size_guard.Cancel();
44 }
45
46 // Set hash buffer tracking members.
47 m_base_storage = base_storages[2];
48 m_hash_buffer = static_cast<char*>(hash_buf);
49 m_hash_buffer_size = hash_buf_size;
50
51 // Read the master hash.
52 std::array<u8, HashSize> master_hash{};
53 base_storages[0]->ReadObject(std::addressof(master_hash));
54
55 // Read and validate the data being hashed.
56 s64 hash_storage_size = base_storages[1]->GetSize();
57 ASSERT(Common::IsAligned(hash_storage_size, HashSize));
58 ASSERT(hash_storage_size <= m_hash_target_block_size);
59 ASSERT(hash_storage_size <= static_cast<s64>(m_hash_buffer_size));
60
61 base_storages[1]->Read(reinterpret_cast<u8*>(m_hash_buffer),
62 static_cast<size_t>(hash_storage_size), 0);
63
64 R_SUCCEED();
65}
66
67size_t HierarchicalSha256Storage::Read(u8* buffer, size_t size, size_t offset) const {
68 // Succeed if zero-size.
69 if (size == 0) {
70 return size;
71 }
72
73 // Validate that we have a buffer to read into.
74 ASSERT(buffer != nullptr);
75
76 // Validate preconditions.
77 ASSERT(Common::IsAligned(offset, m_hash_target_block_size));
78 ASSERT(Common::IsAligned(size, m_hash_target_block_size));
79
80 // Read the data.
81 const size_t reduced_size = static_cast<size_t>(
82 std::min<s64>(m_base_storage_size,
83 Common::AlignUp(offset + size, m_hash_target_block_size)) -
84 offset);
85 m_base_storage->Read(buffer, reduced_size, offset);
86
87 // Setup tracking variables.
88 auto cur_offset = offset;
89 auto remaining_size = reduced_size;
90 while (remaining_size > 0) {
91 const auto cur_size =
92 static_cast<size_t>(std::min<s64>(m_hash_target_block_size, remaining_size));
93 ASSERT(static_cast<size_t>(cur_offset >> m_log_size_ratio) < m_hash_buffer_size);
94
95 // Advance.
96 cur_offset += cur_size;
97 remaining_size -= cur_size;
98 }
99
100 return size;
101}
102
103} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h
new file mode 100644
index 000000000..717ba9748
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h
@@ -0,0 +1,44 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <mutex>
7
8#include "core/file_sys/errors.h"
9#include "core/file_sys/fssystem/fs_i_storage.h"
10#include "core/file_sys/vfs.h"
11
12namespace FileSys {
13
14class HierarchicalSha256Storage : public IReadOnlyStorage {
15 YUZU_NON_COPYABLE(HierarchicalSha256Storage);
16 YUZU_NON_MOVEABLE(HierarchicalSha256Storage);
17
18public:
19 static constexpr s32 LayerCount = 3;
20 static constexpr size_t HashSize = 256 / 8;
21
22private:
23 VirtualFile m_base_storage;
24 s64 m_base_storage_size;
25 char* m_hash_buffer;
26 size_t m_hash_buffer_size;
27 s32 m_hash_target_block_size;
28 s32 m_log_size_ratio;
29 std::mutex m_mutex;
30
31public:
32 HierarchicalSha256Storage() : m_mutex() {}
33
34 Result Initialize(VirtualFile* base_storages, s32 layer_count, size_t htbs, void* hash_buf,
35 size_t hash_buf_size);
36
37 virtual size_t GetSize() const override {
38 return m_base_storage->GetSize();
39 }
40
41 virtual size_t Read(u8* buffer, size_t length, size_t offset) const override;
42};
43
44} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_indirect_storage.cpp b/src/core/file_sys/fssystem/fssystem_indirect_storage.cpp
new file mode 100644
index 000000000..45aa08d30
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_indirect_storage.cpp
@@ -0,0 +1,120 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/errors.h"
5#include "core/file_sys/fssystem/fssystem_indirect_storage.h"
6
7namespace FileSys {
8
9Result IndirectStorage::Initialize(VirtualFile table_storage) {
10 // Read and verify the bucket tree header.
11 BucketTree::Header header;
12 table_storage->ReadObject(std::addressof(header));
13 R_TRY(header.Verify());
14
15 // Determine extents.
16 const auto node_storage_size = QueryNodeStorageSize(header.entry_count);
17 const auto entry_storage_size = QueryEntryStorageSize(header.entry_count);
18 const auto node_storage_offset = QueryHeaderStorageSize();
19 const auto entry_storage_offset = node_storage_offset + node_storage_size;
20
21 // Initialize.
22 R_RETURN(this->Initialize(
23 std::make_shared<OffsetVfsFile>(table_storage, node_storage_size, node_storage_offset),
24 std::make_shared<OffsetVfsFile>(table_storage, entry_storage_size, entry_storage_offset),
25 header.entry_count));
26}
27
28void IndirectStorage::Finalize() {
29 if (this->IsInitialized()) {
30 m_table.Finalize();
31 for (auto i = 0; i < StorageCount; i++) {
32 m_data_storage[i] = VirtualFile();
33 }
34 }
35}
36
37Result IndirectStorage::GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count,
38 s64 offset, s64 size) {
39 // Validate pre-conditions.
40 ASSERT(offset >= 0);
41 ASSERT(size >= 0);
42 ASSERT(this->IsInitialized());
43
44 // Clear the out count.
45 R_UNLESS(out_entry_count != nullptr, ResultNullptrArgument);
46 *out_entry_count = 0;
47
48 // Succeed if there's no range.
49 R_SUCCEED_IF(size == 0);
50
51 // If we have an output array, we need it to be non-null.
52 R_UNLESS(out_entries != nullptr || entry_count == 0, ResultNullptrArgument);
53
54 // Check that our range is valid.
55 BucketTree::Offsets table_offsets;
56 R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
57
58 R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
59
60 // Find the offset in our tree.
61 BucketTree::Visitor visitor;
62 R_TRY(m_table.Find(std::addressof(visitor), offset));
63 {
64 const auto entry_offset = visitor.Get<Entry>()->GetVirtualOffset();
65 R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
66 ResultInvalidIndirectEntryOffset);
67 }
68
69 // Prepare to loop over entries.
70 const auto end_offset = offset + static_cast<s64>(size);
71 s32 count = 0;
72
73 auto cur_entry = *visitor.Get<Entry>();
74 while (cur_entry.GetVirtualOffset() < end_offset) {
75 // Try to write the entry to the out list
76 if (entry_count != 0) {
77 if (count >= entry_count) {
78 break;
79 }
80 std::memcpy(out_entries + count, std::addressof(cur_entry), sizeof(Entry));
81 }
82
83 count++;
84
85 // Advance.
86 if (visitor.CanMoveNext()) {
87 R_TRY(visitor.MoveNext());
88 cur_entry = *visitor.Get<Entry>();
89 } else {
90 break;
91 }
92 }
93
94 // Write the output count.
95 *out_entry_count = count;
96 R_SUCCEED();
97}
98
99size_t IndirectStorage::Read(u8* buffer, size_t size, size_t offset) const {
100 // Validate pre-conditions.
101 ASSERT(offset >= 0);
102 ASSERT(this->IsInitialized());
103 ASSERT(buffer != nullptr);
104
105 // Succeed if there's nothing to read.
106 if (size == 0) {
107 return 0;
108 }
109
110 const_cast<IndirectStorage*>(this)->OperatePerEntry<true, true>(
111 offset, size,
112 [=](VirtualFile storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
113 storage->Read(reinterpret_cast<u8*>(buffer) + (cur_offset - offset),
114 static_cast<size_t>(cur_size), data_offset);
115 R_SUCCEED();
116 });
117
118 return size;
119}
120} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_indirect_storage.h b/src/core/file_sys/fssystem/fssystem_indirect_storage.h
new file mode 100644
index 000000000..39293667b
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_indirect_storage.h
@@ -0,0 +1,294 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/errors.h"
7#include "core/file_sys/fssystem/fs_i_storage.h"
8#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
9#include "core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h"
10#include "core/file_sys/vfs.h"
11#include "core/file_sys/vfs_offset.h"
12
13namespace FileSys {
14
15class IndirectStorage : public IReadOnlyStorage {
16 YUZU_NON_COPYABLE(IndirectStorage);
17 YUZU_NON_MOVEABLE(IndirectStorage);
18
19public:
20 static constexpr s32 StorageCount = 2;
21 static constexpr size_t NodeSize = 16_KiB;
22
23 struct Entry {
24 u8 virt_offset[sizeof(s64)];
25 u8 phys_offset[sizeof(s64)];
26 s32 storage_index;
27
28 void SetVirtualOffset(const s64& ofs) {
29 std::memcpy(this->virt_offset, std::addressof(ofs), sizeof(s64));
30 }
31
32 s64 GetVirtualOffset() const {
33 s64 offset;
34 std::memcpy(std::addressof(offset), this->virt_offset, sizeof(s64));
35 return offset;
36 }
37
38 void SetPhysicalOffset(const s64& ofs) {
39 std::memcpy(this->phys_offset, std::addressof(ofs), sizeof(s64));
40 }
41
42 s64 GetPhysicalOffset() const {
43 s64 offset;
44 std::memcpy(std::addressof(offset), this->phys_offset, sizeof(s64));
45 return offset;
46 }
47 };
48 static_assert(std::is_trivial_v<Entry>);
49 static_assert(sizeof(Entry) == 0x14);
50
51 struct EntryData {
52 s64 virt_offset;
53 s64 phys_offset;
54 s32 storage_index;
55
56 void Set(const Entry& entry) {
57 this->virt_offset = entry.GetVirtualOffset();
58 this->phys_offset = entry.GetPhysicalOffset();
59 this->storage_index = entry.storage_index;
60 }
61 };
62 static_assert(std::is_trivial_v<EntryData>);
63
64private:
65 struct ContinuousReadingEntry {
66 static constexpr size_t FragmentSizeMax = 4_KiB;
67
68 IndirectStorage::Entry entry;
69
70 s64 GetVirtualOffset() const {
71 return this->entry.GetVirtualOffset();
72 }
73
74 s64 GetPhysicalOffset() const {
75 return this->entry.GetPhysicalOffset();
76 }
77
78 bool IsFragment() const {
79 return this->entry.storage_index != 0;
80 }
81 };
82 static_assert(std::is_trivial_v<ContinuousReadingEntry>);
83
84public:
85 static constexpr s64 QueryHeaderStorageSize() {
86 return BucketTree::QueryHeaderStorageSize();
87 }
88
89 static constexpr s64 QueryNodeStorageSize(s32 entry_count) {
90 return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count);
91 }
92
93 static constexpr s64 QueryEntryStorageSize(s32 entry_count) {
94 return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count);
95 }
96
97private:
98 mutable BucketTree m_table;
99 std::array<VirtualFile, StorageCount> m_data_storage;
100
101public:
102 IndirectStorage() : m_table(), m_data_storage() {}
103 virtual ~IndirectStorage() {
104 this->Finalize();
105 }
106
107 Result Initialize(VirtualFile table_storage);
108 void Finalize();
109
110 bool IsInitialized() const {
111 return m_table.IsInitialized();
112 }
113
114 Result Initialize(VirtualFile node_storage, VirtualFile entry_storage, s32 entry_count) {
115 R_RETURN(
116 m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry), entry_count));
117 }
118
119 void SetStorage(s32 idx, VirtualFile storage) {
120 ASSERT(0 <= idx && idx < StorageCount);
121 m_data_storage[idx] = storage;
122 }
123
124 template <typename T>
125 void SetStorage(s32 idx, T storage, s64 offset, s64 size) {
126 ASSERT(0 <= idx && idx < StorageCount);
127 m_data_storage[idx] = std::make_shared<OffsetVfsFile>(storage, size, offset);
128 }
129
130 Result GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count, s64 offset,
131 s64 size);
132
133 virtual size_t GetSize() const override {
134 BucketTree::Offsets offsets;
135 m_table.GetOffsets(std::addressof(offsets));
136
137 return offsets.end_offset;
138 }
139
140 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
141
142protected:
143 BucketTree& GetEntryTable() {
144 return m_table;
145 }
146
147 VirtualFile& GetDataStorage(s32 index) {
148 ASSERT(0 <= index && index < StorageCount);
149 return m_data_storage[index];
150 }
151
152 template <bool ContinuousCheck, bool RangeCheck, typename F>
153 Result OperatePerEntry(s64 offset, s64 size, F func);
154};
155
156template <bool ContinuousCheck, bool RangeCheck, typename F>
157Result IndirectStorage::OperatePerEntry(s64 offset, s64 size, F func) {
158 // Validate preconditions.
159 ASSERT(offset >= 0);
160 ASSERT(size >= 0);
161 ASSERT(this->IsInitialized());
162
163 // Succeed if there's nothing to operate on.
164 R_SUCCEED_IF(size == 0);
165
166 // Get the table offsets.
167 BucketTree::Offsets table_offsets;
168 R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
169
170 // Validate arguments.
171 R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
172
173 // Find the offset in our tree.
174 BucketTree::Visitor visitor;
175 R_TRY(m_table.Find(std::addressof(visitor), offset));
176 {
177 const auto entry_offset = visitor.Get<Entry>()->GetVirtualOffset();
178 R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
179 ResultInvalidIndirectEntryOffset);
180 }
181
182 // Prepare to operate in chunks.
183 auto cur_offset = offset;
184 const auto end_offset = offset + static_cast<s64>(size);
185 BucketTree::ContinuousReadingInfo cr_info;
186
187 while (cur_offset < end_offset) {
188 // Get the current entry.
189 const auto cur_entry = *visitor.Get<Entry>();
190
191 // Get and validate the entry's offset.
192 const auto cur_entry_offset = cur_entry.GetVirtualOffset();
193 R_UNLESS(cur_entry_offset <= cur_offset, ResultInvalidIndirectEntryOffset);
194
195 // Validate the storage index.
196 R_UNLESS(0 <= cur_entry.storage_index && cur_entry.storage_index < StorageCount,
197 ResultInvalidIndirectEntryStorageIndex);
198
199 // If we need to check the continuous info, do so.
200 if constexpr (ContinuousCheck) {
201 // Scan, if we need to.
202 if (cr_info.CheckNeedScan()) {
203 R_TRY(visitor.ScanContinuousReading<ContinuousReadingEntry>(
204 std::addressof(cr_info), cur_offset,
205 static_cast<size_t>(end_offset - cur_offset)));
206 }
207
208 // Process a base storage entry.
209 if (cr_info.CanDo()) {
210 // Ensure that we can process.
211 R_UNLESS(cur_entry.storage_index == 0, ResultInvalidIndirectEntryStorageIndex);
212
213 // Ensure that we remain within range.
214 const auto data_offset = cur_offset - cur_entry_offset;
215 const auto cur_entry_phys_offset = cur_entry.GetPhysicalOffset();
216 const auto cur_size = static_cast<s64>(cr_info.GetReadSize());
217
218 // If we should, verify the range.
219 if constexpr (RangeCheck) {
220 // Get the current data storage's size.
221 s64 cur_data_storage_size = m_data_storage[0]->GetSize();
222
223 R_UNLESS(0 <= cur_entry_phys_offset &&
224 cur_entry_phys_offset <= cur_data_storage_size,
225 ResultInvalidIndirectEntryOffset);
226 R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <=
227 cur_data_storage_size,
228 ResultInvalidIndirectStorageSize);
229 }
230
231 // Operate.
232 R_TRY(func(m_data_storage[0], cur_entry_phys_offset + data_offset, cur_offset,
233 cur_size));
234
235 // Mark as done.
236 cr_info.Done();
237 }
238 }
239
240 // Get and validate the next entry offset.
241 s64 next_entry_offset;
242 if (visitor.CanMoveNext()) {
243 R_TRY(visitor.MoveNext());
244 next_entry_offset = visitor.Get<Entry>()->GetVirtualOffset();
245 R_UNLESS(table_offsets.IsInclude(next_entry_offset), ResultInvalidIndirectEntryOffset);
246 } else {
247 next_entry_offset = table_offsets.end_offset;
248 }
249 R_UNLESS(cur_offset < next_entry_offset, ResultInvalidIndirectEntryOffset);
250
251 // Get the offset of the entry in the data we read.
252 const auto data_offset = cur_offset - cur_entry_offset;
253 const auto data_size = (next_entry_offset - cur_entry_offset);
254 ASSERT(data_size > 0);
255
256 // Determine how much is left.
257 const auto remaining_size = end_offset - cur_offset;
258 const auto cur_size = std::min<s64>(remaining_size, data_size - data_offset);
259 ASSERT(cur_size <= size);
260
261 // Operate, if we need to.
262 bool needs_operate;
263 if constexpr (!ContinuousCheck) {
264 needs_operate = true;
265 } else {
266 needs_operate = !cr_info.IsDone() || cur_entry.storage_index != 0;
267 }
268
269 if (needs_operate) {
270 const auto cur_entry_phys_offset = cur_entry.GetPhysicalOffset();
271
272 if constexpr (RangeCheck) {
273 // Get the current data storage's size.
274 s64 cur_data_storage_size = m_data_storage[cur_entry.storage_index]->GetSize();
275
276 // Ensure that we remain within range.
277 R_UNLESS(0 <= cur_entry_phys_offset &&
278 cur_entry_phys_offset <= cur_data_storage_size,
279 ResultIndirectStorageCorrupted);
280 R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <= cur_data_storage_size,
281 ResultIndirectStorageCorrupted);
282 }
283
284 R_TRY(func(m_data_storage[cur_entry.storage_index], cur_entry_phys_offset + data_offset,
285 cur_offset, cur_size));
286 }
287
288 cur_offset += cur_size;
289 }
290
291 R_SUCCEED();
292}
293
294} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp
new file mode 100644
index 000000000..2c3da230c
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp
@@ -0,0 +1,30 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_integrity_romfs_storage.h"
5
6namespace FileSys {
7
8Result IntegrityRomFsStorage::Initialize(
9 HierarchicalIntegrityVerificationInformation level_hash_info, Hash master_hash,
10 HierarchicalIntegrityVerificationStorage::HierarchicalStorageInformation storage_info,
11 int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level) {
12 // Set master hash.
13 m_master_hash = master_hash;
14 m_master_hash_storage = std::make_shared<ArrayVfsFile<sizeof(Hash)>>(m_master_hash.value);
15 R_UNLESS(m_master_hash_storage != nullptr,
16 ResultAllocationMemoryFailedInIntegrityRomFsStorageA);
17
18 // Set the master hash storage.
19 storage_info[0] = m_master_hash_storage;
20
21 // Initialize our integrity storage.
22 R_RETURN(m_integrity_storage.Initialize(level_hash_info, storage_info, max_data_cache_entries,
23 max_hash_cache_entries, buffer_level));
24}
25
26void IntegrityRomFsStorage::Finalize() {
27 m_integrity_storage.Finalize();
28}
29
30} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h
new file mode 100644
index 000000000..b80e9a302
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h
@@ -0,0 +1,42 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h"
7#include "core/file_sys/fssystem/fssystem_nca_header.h"
8#include "core/file_sys/vfs_vector.h"
9
10namespace FileSys {
11
12constexpr inline size_t IntegrityLayerCountRomFs = 7;
13constexpr inline size_t IntegrityHashLayerBlockSize = 16_KiB;
14
15class IntegrityRomFsStorage : public IReadOnlyStorage {
16private:
17 HierarchicalIntegrityVerificationStorage m_integrity_storage;
18 Hash m_master_hash;
19 std::shared_ptr<ArrayVfsFile<sizeof(Hash)>> m_master_hash_storage;
20
21public:
22 IntegrityRomFsStorage() {}
23 virtual ~IntegrityRomFsStorage() override {
24 this->Finalize();
25 }
26
27 Result Initialize(
28 HierarchicalIntegrityVerificationInformation level_hash_info, Hash master_hash,
29 HierarchicalIntegrityVerificationStorage::HierarchicalStorageInformation storage_info,
30 int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level);
31 void Finalize();
32
33 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
34 return m_integrity_storage.Read(buffer, size, offset);
35 }
36
37 virtual size_t GetSize() const override {
38 return m_integrity_storage.GetSize();
39 }
40};
41
42} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp
new file mode 100644
index 000000000..ef36b755e
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp
@@ -0,0 +1,95 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/alignment.h"
5#include "core/file_sys/fssystem/fssystem_integrity_verification_storage.h"
6
7namespace FileSys {
8
9constexpr inline u32 ILog2(u32 val) {
10 ASSERT(val > 0);
11 return ((sizeof(u32) * 8) - 1 - std::countl_zero<u32>(val));
12}
13
14void IntegrityVerificationStorage::Initialize(VirtualFile hs, VirtualFile ds, s64 verif_block_size,
15 s64 upper_layer_verif_block_size, bool is_real_data) {
16 // Validate preconditions.
17 ASSERT(verif_block_size >= HashSize);
18
19 // Set storages.
20 m_hash_storage = hs;
21 m_data_storage = ds;
22
23 // Set verification block sizes.
24 m_verification_block_size = verif_block_size;
25 m_verification_block_order = ILog2(static_cast<u32>(verif_block_size));
26 ASSERT(m_verification_block_size == 1ll << m_verification_block_order);
27
28 // Set upper layer block sizes.
29 upper_layer_verif_block_size = std::max(upper_layer_verif_block_size, HashSize);
30 m_upper_layer_verification_block_size = upper_layer_verif_block_size;
31 m_upper_layer_verification_block_order = ILog2(static_cast<u32>(upper_layer_verif_block_size));
32 ASSERT(m_upper_layer_verification_block_size == 1ll << m_upper_layer_verification_block_order);
33
34 // Validate sizes.
35 {
36 s64 hash_size = m_hash_storage->GetSize();
37 s64 data_size = m_data_storage->GetSize();
38 ASSERT(((hash_size / HashSize) * m_verification_block_size) >= data_size);
39 }
40
41 // Set data.
42 m_is_real_data = is_real_data;
43}
44
45void IntegrityVerificationStorage::Finalize() {
46 m_hash_storage = VirtualFile();
47 m_data_storage = VirtualFile();
48}
49
50size_t IntegrityVerificationStorage::Read(u8* buffer, size_t size, size_t offset) const {
51 // Validate preconditions.
52 ASSERT(Common::IsAligned(offset, static_cast<size_t>(m_verification_block_size)));
53 ASSERT(Common::IsAligned(size, static_cast<size_t>(m_verification_block_size)));
54
55 // Succeed if zero size.
56 if (size == 0) {
57 return size;
58 }
59
60 // Validate arguments.
61 ASSERT(buffer != nullptr);
62
63 // Validate the offset.
64 s64 data_size = m_data_storage->GetSize();
65 ASSERT(offset <= static_cast<size_t>(data_size));
66
67 // Validate the access range.
68 ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(
69 offset, size, Common::AlignUp(data_size, static_cast<size_t>(m_verification_block_size)))));
70
71 // Determine the read extents.
72 size_t read_size = size;
73 if (static_cast<s64>(offset + read_size) > data_size) {
74 // Determine the padding sizes.
75 s64 padding_offset = data_size - offset;
76 size_t padding_size = static_cast<size_t>(
77 m_verification_block_size - (padding_offset & (m_verification_block_size - 1)));
78 ASSERT(static_cast<s64>(padding_size) < m_verification_block_size);
79
80 // Clear the padding.
81 std::memset(static_cast<u8*>(buffer) + padding_offset, 0, padding_size);
82
83 // Set the new in-bounds size.
84 read_size = static_cast<size_t>(data_size - offset);
85 }
86
87 // Perform the read.
88 return m_data_storage->Read(buffer, read_size, offset);
89}
90
91size_t IntegrityVerificationStorage::GetSize() const {
92 return m_data_storage->GetSize();
93}
94
95} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h
new file mode 100644
index 000000000..08515a268
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h
@@ -0,0 +1,65 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <optional>
7
8#include "core/file_sys/fssystem/fs_i_storage.h"
9#include "core/file_sys/fssystem/fs_types.h"
10
11namespace FileSys {
12
13class IntegrityVerificationStorage : public IReadOnlyStorage {
14 YUZU_NON_COPYABLE(IntegrityVerificationStorage);
15 YUZU_NON_MOVEABLE(IntegrityVerificationStorage);
16
17public:
18 static constexpr s64 HashSize = 256 / 8;
19
20 struct BlockHash {
21 u8 hash[HashSize];
22 };
23 static_assert(std::is_trivial_v<BlockHash>);
24
25private:
26 VirtualFile m_hash_storage;
27 VirtualFile m_data_storage;
28 s64 m_verification_block_size;
29 s64 m_verification_block_order;
30 s64 m_upper_layer_verification_block_size;
31 s64 m_upper_layer_verification_block_order;
32 bool m_is_real_data;
33
34public:
35 IntegrityVerificationStorage()
36 : m_verification_block_size(0), m_verification_block_order(0),
37 m_upper_layer_verification_block_size(0), m_upper_layer_verification_block_order(0) {}
38 virtual ~IntegrityVerificationStorage() override {
39 this->Finalize();
40 }
41
42 void Initialize(VirtualFile hs, VirtualFile ds, s64 verif_block_size,
43 s64 upper_layer_verif_block_size, bool is_real_data);
44 void Finalize();
45
46 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
47 virtual size_t GetSize() const override;
48
49 s64 GetBlockSize() const {
50 return m_verification_block_size;
51 }
52
53private:
54 static void SetValidationBit(BlockHash* hash) {
55 ASSERT(hash != nullptr);
56 hash->hash[HashSize - 1] |= 0x80;
57 }
58
59 static bool IsValidationBit(const BlockHash* hash) {
60 ASSERT(hash != nullptr);
61 return (hash->hash[HashSize - 1] & 0x80) != 0;
62 }
63};
64
65} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h b/src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h
new file mode 100644
index 000000000..7637272d5
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h
@@ -0,0 +1,58 @@
1#pragma once
2
3#include "core/file_sys/fssystem/fs_i_storage.h"
4
5namespace FileSys {
6
7class MemoryResourceBufferHoldStorage : public IStorage {
8 YUZU_NON_COPYABLE(MemoryResourceBufferHoldStorage);
9 YUZU_NON_MOVEABLE(MemoryResourceBufferHoldStorage);
10
11private:
12 VirtualFile m_storage;
13 void* m_buffer;
14 size_t m_buffer_size;
15
16public:
17 MemoryResourceBufferHoldStorage(VirtualFile storage, size_t buffer_size)
18 : m_storage(std::move(storage)), m_buffer(::operator new(buffer_size)),
19 m_buffer_size(buffer_size) {}
20
21 virtual ~MemoryResourceBufferHoldStorage() {
22 // If we have a buffer, deallocate it.
23 if (m_buffer != nullptr) {
24 ::operator delete(m_buffer);
25 }
26 }
27
28 bool IsValid() const {
29 return m_buffer != nullptr;
30 }
31 void* GetBuffer() const {
32 return m_buffer;
33 }
34
35public:
36 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
37 // Check pre-conditions.
38 ASSERT(m_storage != nullptr);
39
40 return m_storage->Read(buffer, size, offset);
41 }
42
43 virtual size_t GetSize() const override {
44 // Check pre-conditions.
45 ASSERT(m_storage != nullptr);
46
47 return m_storage->GetSize();
48 }
49
50 virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
51 // Check pre-conditions.
52 ASSERT(m_storage != nullptr);
53
54 return m_storage->Write(buffer, size, offset);
55 }
56};
57
58} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp
new file mode 100644
index 000000000..b1b5fb156
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp
@@ -0,0 +1,1345 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h"
5#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h"
6#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h"
7#include "core/file_sys/fssystem/fssystem_alignment_matching_storage.h"
8#include "core/file_sys/fssystem/fssystem_compressed_storage.h"
9#include "core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h"
10#include "core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h"
11#include "core/file_sys/fssystem/fssystem_indirect_storage.h"
12#include "core/file_sys/fssystem/fssystem_integrity_romfs_storage.h"
13#include "core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h"
14#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
15#include "core/file_sys/fssystem/fssystem_sparse_storage.h"
16#include "core/file_sys/fssystem/fssystem_switch_storage.h"
17#include "core/file_sys/vfs_offset.h"
18#include "core/file_sys/vfs_vector.h"
19
20namespace FileSys {
21
22namespace {
23
24constexpr inline s32 IntegrityDataCacheCount = 24;
25constexpr inline s32 IntegrityHashCacheCount = 8;
26
27constexpr inline s32 IntegrityDataCacheCountForMeta = 16;
28constexpr inline s32 IntegrityHashCacheCountForMeta = 2;
29
30class SharedNcaBodyStorage : public IReadOnlyStorage {
31 YUZU_NON_COPYABLE(SharedNcaBodyStorage);
32 YUZU_NON_MOVEABLE(SharedNcaBodyStorage);
33
34private:
35 VirtualFile m_storage;
36 std::shared_ptr<NcaReader> m_nca_reader;
37
38public:
39 SharedNcaBodyStorage(VirtualFile s, std::shared_ptr<NcaReader> r)
40 : m_storage(std::move(s)), m_nca_reader(std::move(r)) {}
41
42 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
43 // Validate pre-conditions.
44 ASSERT(m_storage != nullptr);
45
46 // Read from the base storage.
47 return m_storage->Read(buffer, size, offset);
48 }
49
50 virtual size_t GetSize() const override {
51 // Validate pre-conditions.
52 ASSERT(m_storage != nullptr);
53
54 return m_storage->GetSize();
55 }
56};
57
58inline s64 GetFsOffset(const NcaReader& reader, s32 fs_index) {
59 return static_cast<s64>(reader.GetFsOffset(fs_index));
60}
61
62inline s64 GetFsEndOffset(const NcaReader& reader, s32 fs_index) {
63 return static_cast<s64>(reader.GetFsEndOffset(fs_index));
64}
65
66using Sha256DataRegion = NcaFsHeader::Region;
67using IntegrityLevelInfo = NcaFsHeader::HashData::IntegrityMetaInfo::LevelHashInfo;
68using IntegrityDataInfo = IntegrityLevelInfo::HierarchicalIntegrityVerificationLevelInformation;
69
70} // namespace
71
72Result NcaFileSystemDriver::OpenStorageWithContext(VirtualFile* out,
73 NcaFsHeaderReader* out_header_reader,
74 s32 fs_index, StorageContext* ctx) {
75 // Open storage.
76 R_RETURN(this->OpenStorageImpl(out, out_header_reader, fs_index, ctx));
77}
78
79Result NcaFileSystemDriver::OpenStorageImpl(VirtualFile* out, NcaFsHeaderReader* out_header_reader,
80 s32 fs_index, StorageContext* ctx) {
81 // Validate preconditions.
82 ASSERT(out != nullptr);
83 ASSERT(out_header_reader != nullptr);
84 ASSERT(0 <= fs_index && fs_index < NcaHeader::FsCountMax);
85
86 // Validate the fs index.
87 R_UNLESS(m_reader->HasFsInfo(fs_index), ResultPartitionNotFound);
88
89 // Initialize our header reader for the fs index.
90 R_TRY(out_header_reader->Initialize(*m_reader, fs_index));
91
92 // Declare the storage we're opening.
93 VirtualFile storage;
94
95 // Process sparse layer.
96 s64 fs_data_offset = 0;
97 if (out_header_reader->ExistsSparseLayer()) {
98 // Get the sparse info.
99 const auto& sparse_info = out_header_reader->GetSparseInfo();
100
101 // Create based on whether we have a meta hash layer.
102 if (out_header_reader->ExistsSparseMetaHashLayer()) {
103 // Create the sparse storage with verification.
104 R_TRY(this->CreateSparseStorageWithVerification(
105 std::addressof(storage), std::addressof(fs_data_offset),
106 ctx != nullptr ? std::addressof(ctx->current_sparse_storage) : nullptr,
107 ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr,
108 ctx != nullptr ? std::addressof(ctx->sparse_layer_info_storage) : nullptr, fs_index,
109 out_header_reader->GetAesCtrUpperIv(), sparse_info,
110 out_header_reader->GetSparseMetaDataHashDataInfo(),
111 out_header_reader->GetSparseMetaHashType()));
112 } else {
113 // Create the sparse storage.
114 R_TRY(this->CreateSparseStorage(
115 std::addressof(storage), std::addressof(fs_data_offset),
116 ctx != nullptr ? std::addressof(ctx->current_sparse_storage) : nullptr,
117 ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr,
118 fs_index, out_header_reader->GetAesCtrUpperIv(), sparse_info));
119 }
120 } else {
121 // Get the data offsets.
122 fs_data_offset = GetFsOffset(*m_reader, fs_index);
123 const auto fs_end_offset = GetFsEndOffset(*m_reader, fs_index);
124
125 // Validate that we're within range.
126 const auto data_size = fs_end_offset - fs_data_offset;
127 R_UNLESS(data_size > 0, ResultInvalidNcaHeader);
128
129 // Create the body substorage.
130 R_TRY(this->CreateBodySubStorage(std::addressof(storage), fs_data_offset, data_size));
131
132 // Potentially save the body substorage to our context.
133 if (ctx != nullptr) {
134 ctx->body_substorage = storage;
135 }
136 }
137
138 // Process patch layer.
139 const auto& patch_info = out_header_reader->GetPatchInfo();
140 VirtualFile patch_meta_aes_ctr_ex_meta_storage;
141 VirtualFile patch_meta_indirect_meta_storage;
142 if (out_header_reader->ExistsPatchMetaHashLayer()) {
143 // Check the meta hash type.
144 R_UNLESS(out_header_reader->GetPatchMetaHashType() ==
145 NcaFsHeader::MetaDataHashType::HierarchicalIntegrity,
146 ResultRomNcaInvalidPatchMetaDataHashType);
147
148 // Create the patch meta storage.
149 R_TRY(this->CreatePatchMetaStorage(
150 std::addressof(patch_meta_aes_ctr_ex_meta_storage),
151 std::addressof(patch_meta_indirect_meta_storage),
152 ctx != nullptr ? std::addressof(ctx->patch_layer_info_storage) : nullptr, storage,
153 fs_data_offset, out_header_reader->GetAesCtrUpperIv(), patch_info,
154 out_header_reader->GetPatchMetaDataHashDataInfo()));
155 }
156
157 if (patch_info.HasAesCtrExTable()) {
158 // Check the encryption type.
159 ASSERT(out_header_reader->GetEncryptionType() == NcaFsHeader::EncryptionType::None ||
160 out_header_reader->GetEncryptionType() == NcaFsHeader::EncryptionType::AesCtrEx ||
161 out_header_reader->GetEncryptionType() ==
162 NcaFsHeader::EncryptionType::AesCtrExSkipLayerHash);
163
164 // Create the ex meta storage.
165 VirtualFile aes_ctr_ex_storage_meta_storage = patch_meta_aes_ctr_ex_meta_storage;
166 if (aes_ctr_ex_storage_meta_storage == nullptr) {
167 // If we don't have a meta storage, we must not have a patch meta hash layer.
168 ASSERT(!out_header_reader->ExistsPatchMetaHashLayer());
169
170 R_TRY(this->CreateAesCtrExStorageMetaStorage(
171 std::addressof(aes_ctr_ex_storage_meta_storage), storage, fs_data_offset,
172 out_header_reader->GetEncryptionType(), out_header_reader->GetAesCtrUpperIv(),
173 patch_info));
174 }
175
176 // Create the ex storage.
177 VirtualFile aes_ctr_ex_storage;
178 R_TRY(this->CreateAesCtrExStorage(
179 std::addressof(aes_ctr_ex_storage),
180 ctx != nullptr ? std::addressof(ctx->aes_ctr_ex_storage) : nullptr, std::move(storage),
181 aes_ctr_ex_storage_meta_storage, fs_data_offset, out_header_reader->GetAesCtrUpperIv(),
182 patch_info));
183
184 // Set the base storage as the ex storage.
185 storage = std::move(aes_ctr_ex_storage);
186
187 // Potentially save storages to our context.
188 if (ctx != nullptr) {
189 ctx->aes_ctr_ex_storage_meta_storage = aes_ctr_ex_storage_meta_storage;
190 ctx->aes_ctr_ex_storage_data_storage = storage;
191 ctx->fs_data_storage = storage;
192 }
193 } else {
194 // Create the appropriate storage for the encryption type.
195 switch (out_header_reader->GetEncryptionType()) {
196 case NcaFsHeader::EncryptionType::None:
197 // If there's no encryption, use the base storage we made previously.
198 break;
199 case NcaFsHeader::EncryptionType::AesXts:
200 R_TRY(this->CreateAesXtsStorage(std::addressof(storage), std::move(storage),
201 fs_data_offset));
202 break;
203 case NcaFsHeader::EncryptionType::AesCtr:
204 R_TRY(this->CreateAesCtrStorage(std::addressof(storage), std::move(storage),
205 fs_data_offset, out_header_reader->GetAesCtrUpperIv(),
206 AlignmentStorageRequirement::None));
207 break;
208 case NcaFsHeader::EncryptionType::AesCtrSkipLayerHash: {
209 // Create the aes ctr storage.
210 VirtualFile aes_ctr_storage;
211 R_TRY(this->CreateAesCtrStorage(std::addressof(aes_ctr_storage), storage,
212 fs_data_offset, out_header_reader->GetAesCtrUpperIv(),
213 AlignmentStorageRequirement::None));
214
215 // Create region switch storage.
216 R_TRY(this->CreateRegionSwitchStorage(std::addressof(storage), out_header_reader,
217 std::move(storage), std::move(aes_ctr_storage)));
218 } break;
219 default:
220 R_THROW(ResultInvalidNcaFsHeaderEncryptionType);
221 }
222
223 // Potentially save storages to our context.
224 if (ctx != nullptr) {
225 ctx->fs_data_storage = storage;
226 }
227 }
228
229 // Process indirect layer.
230 if (patch_info.HasIndirectTable()) {
231 // Create the indirect meta storage
232 VirtualFile indirect_storage_meta_storage = patch_meta_indirect_meta_storage;
233 if (indirect_storage_meta_storage == nullptr) {
234 // If we don't have a meta storage, we must not have a patch meta hash layer.
235 ASSERT(!out_header_reader->ExistsPatchMetaHashLayer());
236
237 R_TRY(this->CreateIndirectStorageMetaStorage(
238 std::addressof(indirect_storage_meta_storage), storage, patch_info));
239 }
240
241 // Potentially save the indirect meta storage to our context.
242 if (ctx != nullptr) {
243 ctx->indirect_storage_meta_storage = indirect_storage_meta_storage;
244 }
245
246 // Get the original indirectable storage.
247 VirtualFile original_indirectable_storage;
248 if (m_original_reader != nullptr && m_original_reader->HasFsInfo(fs_index)) {
249 // Create a driver for the original.
250 NcaFileSystemDriver original_driver(m_original_reader);
251
252 // Create a header reader for the original.
253 NcaFsHeaderReader original_header_reader;
254 R_TRY(original_header_reader.Initialize(*m_original_reader, fs_index));
255
256 // Open original indirectable storage.
257 R_TRY(original_driver.OpenIndirectableStorageAsOriginal(
258 std::addressof(original_indirectable_storage),
259 std::addressof(original_header_reader), ctx));
260 } else if (ctx != nullptr && ctx->external_original_storage != nullptr) {
261 // Use the external original storage.
262 original_indirectable_storage = ctx->external_original_storage;
263 } else {
264 // Allocate a dummy memory storage as original storage.
265 original_indirectable_storage = std::make_shared<VectorVfsFile>();
266 R_UNLESS(original_indirectable_storage != nullptr,
267 ResultAllocationMemoryFailedAllocateShared);
268 }
269
270 // Create the indirect storage.
271 VirtualFile indirect_storage;
272 R_TRY(this->CreateIndirectStorage(
273 std::addressof(indirect_storage),
274 ctx != nullptr ? std::addressof(ctx->indirect_storage) : nullptr, std::move(storage),
275 std::move(original_indirectable_storage), std::move(indirect_storage_meta_storage),
276 patch_info));
277
278 // Set storage as the indirect storage.
279 storage = std::move(indirect_storage);
280 }
281
282 // Check if we're sparse or requested to skip the integrity layer.
283 if (out_header_reader->ExistsSparseLayer() || (ctx != nullptr && ctx->open_raw_storage)) {
284 *out = std::move(storage);
285 R_SUCCEED();
286 }
287
288 // Create the non-raw storage.
289 R_RETURN(this->CreateStorageByRawStorage(out, out_header_reader, std::move(storage), ctx));
290}
291
292Result NcaFileSystemDriver::CreateStorageByRawStorage(VirtualFile* out,
293 const NcaFsHeaderReader* header_reader,
294 VirtualFile raw_storage,
295 StorageContext* ctx) {
296 // Initialize storage as raw storage.
297 VirtualFile storage = std::move(raw_storage);
298
299 // Process hash/integrity layer.
300 switch (header_reader->GetHashType()) {
301 case NcaFsHeader::HashType::HierarchicalSha256Hash:
302 R_TRY(this->CreateSha256Storage(std::addressof(storage), std::move(storage),
303 header_reader->GetHashData().hierarchical_sha256_data));
304 break;
305 case NcaFsHeader::HashType::HierarchicalIntegrityHash:
306 R_TRY(this->CreateIntegrityVerificationStorage(
307 std::addressof(storage), std::move(storage),
308 header_reader->GetHashData().integrity_meta_info));
309 break;
310 default:
311 R_THROW(ResultInvalidNcaFsHeaderHashType);
312 }
313
314 // Process compression layer.
315 if (header_reader->ExistsCompressionLayer()) {
316 R_TRY(this->CreateCompressedStorage(
317 std::addressof(storage),
318 ctx != nullptr ? std::addressof(ctx->compressed_storage) : nullptr,
319 ctx != nullptr ? std::addressof(ctx->compressed_storage_meta_storage) : nullptr,
320 std::move(storage), header_reader->GetCompressionInfo()));
321 }
322
323 // Set output storage.
324 *out = std::move(storage);
325 R_SUCCEED();
326}
327
328Result NcaFileSystemDriver::OpenIndirectableStorageAsOriginal(
329 VirtualFile* out, const NcaFsHeaderReader* header_reader, StorageContext* ctx) {
330 // Get the fs index.
331 const auto fs_index = header_reader->GetFsIndex();
332
333 // Declare the storage we're opening.
334 VirtualFile storage;
335
336 // Process sparse layer.
337 s64 fs_data_offset = 0;
338 if (header_reader->ExistsSparseLayer()) {
339 // Get the sparse info.
340 const auto& sparse_info = header_reader->GetSparseInfo();
341
342 // Create based on whether we have a meta hash layer.
343 if (header_reader->ExistsSparseMetaHashLayer()) {
344 // Create the sparse storage with verification.
345 R_TRY(this->CreateSparseStorageWithVerification(
346 std::addressof(storage), std::addressof(fs_data_offset),
347 ctx != nullptr ? std::addressof(ctx->original_sparse_storage) : nullptr,
348 ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr,
349 ctx != nullptr ? std::addressof(ctx->sparse_layer_info_storage) : nullptr, fs_index,
350 header_reader->GetAesCtrUpperIv(), sparse_info,
351 header_reader->GetSparseMetaDataHashDataInfo(),
352 header_reader->GetSparseMetaHashType()));
353 } else {
354 // Create the sparse storage.
355 R_TRY(this->CreateSparseStorage(
356 std::addressof(storage), std::addressof(fs_data_offset),
357 ctx != nullptr ? std::addressof(ctx->original_sparse_storage) : nullptr,
358 ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr,
359 fs_index, header_reader->GetAesCtrUpperIv(), sparse_info));
360 }
361 } else {
362 // Get the data offsets.
363 fs_data_offset = GetFsOffset(*m_reader, fs_index);
364 const auto fs_end_offset = GetFsEndOffset(*m_reader, fs_index);
365
366 // Validate that we're within range.
367 const auto data_size = fs_end_offset - fs_data_offset;
368 R_UNLESS(data_size > 0, ResultInvalidNcaHeader);
369
370 // Create the body substorage.
371 R_TRY(this->CreateBodySubStorage(std::addressof(storage), fs_data_offset, data_size));
372 }
373
374 // Create the appropriate storage for the encryption type.
375 switch (header_reader->GetEncryptionType()) {
376 case NcaFsHeader::EncryptionType::None:
377 // If there's no encryption, use the base storage we made previously.
378 break;
379 case NcaFsHeader::EncryptionType::AesXts:
380 R_TRY(
381 this->CreateAesXtsStorage(std::addressof(storage), std::move(storage), fs_data_offset));
382 break;
383 case NcaFsHeader::EncryptionType::AesCtr:
384 R_TRY(this->CreateAesCtrStorage(std::addressof(storage), std::move(storage), fs_data_offset,
385 header_reader->GetAesCtrUpperIv(),
386 AlignmentStorageRequirement::CacheBlockSize));
387 break;
388 default:
389 R_THROW(ResultInvalidNcaFsHeaderEncryptionType);
390 }
391
392 // Set output storage.
393 *out = std::move(storage);
394 R_SUCCEED();
395}
396
397Result NcaFileSystemDriver::CreateBodySubStorage(VirtualFile* out, s64 offset, s64 size) {
398 // Create the body storage.
399 auto body_storage =
400 std::make_shared<SharedNcaBodyStorage>(m_reader->GetSharedBodyStorage(), m_reader);
401 R_UNLESS(body_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
402
403 // Get the body storage size.
404 s64 body_size = body_storage->GetSize();
405
406 // Check that we're within range.
407 R_UNLESS(offset + size <= body_size, ResultNcaBaseStorageOutOfRangeB);
408
409 // Create substorage.
410 auto body_substorage = std::make_shared<OffsetVfsFile>(std::move(body_storage), size, offset);
411 R_UNLESS(body_substorage != nullptr, ResultAllocationMemoryFailedAllocateShared);
412
413 // Set the output storage.
414 *out = std::move(body_substorage);
415 R_SUCCEED();
416}
417
418Result NcaFileSystemDriver::CreateAesCtrStorage(
419 VirtualFile* out, VirtualFile base_storage, s64 offset, const NcaAesCtrUpperIv& upper_iv,
420 AlignmentStorageRequirement alignment_storage_requirement) {
421 // Check pre-conditions.
422 ASSERT(out != nullptr);
423 ASSERT(base_storage != nullptr);
424
425 // Create the iv.
426 std::array<u8, AesCtrStorage::IvSize> iv{};
427 AesCtrStorage::MakeIv(iv.data(), sizeof(iv), upper_iv.value, offset);
428
429 // Create the ctr storage.
430 VirtualFile aes_ctr_storage;
431 if (m_reader->HasExternalDecryptionKey()) {
432 aes_ctr_storage = std::make_shared<AesCtrStorage>(
433 std::move(base_storage), m_reader->GetExternalDecryptionKey(), AesCtrStorage::KeySize,
434 iv.data(), AesCtrStorage::IvSize);
435 R_UNLESS(aes_ctr_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
436 } else {
437 // Create software decryption storage.
438 auto sw_storage = std::make_shared<AesCtrStorage>(
439 base_storage, m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesCtr),
440 AesCtrStorage::KeySize, iv.data(), AesCtrStorage::IvSize);
441 R_UNLESS(sw_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
442
443 aes_ctr_storage = std::move(sw_storage);
444 }
445
446 // Create alignment matching storage.
447 auto aligned_storage = std::make_shared<AlignmentMatchingStorage<NcaHeader::CtrBlockSize, 1>>(
448 std::move(aes_ctr_storage));
449 R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
450
451 // Set the out storage.
452 *out = std::move(aligned_storage);
453 R_SUCCEED();
454}
455
456Result NcaFileSystemDriver::CreateAesXtsStorage(VirtualFile* out, VirtualFile base_storage,
457 s64 offset) {
458 // Check pre-conditions.
459 ASSERT(out != nullptr);
460 ASSERT(base_storage != nullptr);
461
462 // Create the iv.
463 std::array<u8, AesXtsStorage::IvSize> iv{};
464 AesXtsStorage::MakeAesXtsIv(iv.data(), sizeof(iv), offset, NcaHeader::XtsBlockSize);
465
466 // Make the aes xts storage.
467 const auto* const key1 = m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesXts1);
468 const auto* const key2 = m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesXts2);
469 auto xts_storage =
470 std::make_shared<AesXtsStorage>(std::move(base_storage), key1, key2, AesXtsStorage::KeySize,
471 iv.data(), AesXtsStorage::IvSize, NcaHeader::XtsBlockSize);
472 R_UNLESS(xts_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
473
474 // Create alignment matching storage.
475 auto aligned_storage = std::make_shared<AlignmentMatchingStorage<NcaHeader::XtsBlockSize, 1>>(
476 std::move(xts_storage));
477 R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
478
479 // Set the out storage.
480 *out = std::move(xts_storage);
481 R_SUCCEED();
482}
483
484Result NcaFileSystemDriver::CreateSparseStorageMetaStorage(VirtualFile* out,
485 VirtualFile base_storage, s64 offset,
486 const NcaAesCtrUpperIv& upper_iv,
487 const NcaSparseInfo& sparse_info) {
488 // Validate preconditions.
489 ASSERT(out != nullptr);
490 ASSERT(base_storage != nullptr);
491
492 // Get the base storage size.
493 s64 base_size = base_storage->GetSize();
494
495 // Get the meta extents.
496 const auto meta_offset = sparse_info.bucket.offset;
497 const auto meta_size = sparse_info.bucket.size;
498 R_UNLESS(meta_offset + meta_size - offset <= base_size, ResultNcaBaseStorageOutOfRangeB);
499
500 // Create the encrypted storage.
501 auto enc_storage =
502 std::make_shared<OffsetVfsFile>(std::move(base_storage), meta_size, meta_offset);
503 R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
504
505 // Create the decrypted storage.
506 VirtualFile decrypted_storage;
507 R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage),
508 offset + meta_offset, sparse_info.MakeAesCtrUpperIv(upper_iv),
509 AlignmentStorageRequirement::None));
510
511 // Create meta storage.
512 auto meta_storage = std::make_shared<OffsetVfsFile>(decrypted_storage, meta_size, 0);
513 R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
514
515 // Set the output.
516 *out = std::move(meta_storage);
517 R_SUCCEED();
518}
519
520Result NcaFileSystemDriver::CreateSparseStorageCore(std::shared_ptr<SparseStorage>* out,
521 VirtualFile base_storage, s64 base_size,
522 VirtualFile meta_storage,
523 const NcaSparseInfo& sparse_info,
524 bool external_info) {
525 // Validate preconditions.
526 ASSERT(out != nullptr);
527 ASSERT(base_storage != nullptr);
528 ASSERT(meta_storage != nullptr);
529
530 // Read and verify the bucket tree header.
531 BucketTree::Header header;
532 std::memcpy(std::addressof(header), sparse_info.bucket.header.data(), sizeof(header));
533 R_TRY(header.Verify());
534
535 // Determine storage extents.
536 const auto node_offset = 0;
537 const auto node_size = SparseStorage::QueryNodeStorageSize(header.entry_count);
538 const auto entry_offset = node_offset + node_size;
539 const auto entry_size = SparseStorage::QueryEntryStorageSize(header.entry_count);
540
541 // Create the sparse storage.
542 auto sparse_storage = std::make_shared<SparseStorage>();
543 R_UNLESS(sparse_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
544
545 // Sanity check that we can be doing this.
546 ASSERT(header.entry_count != 0);
547
548 // Initialize the sparse storage.
549 R_TRY(sparse_storage->Initialize(
550 std::make_shared<OffsetVfsFile>(meta_storage, node_size, node_offset),
551 std::make_shared<OffsetVfsFile>(meta_storage, entry_size, entry_offset),
552 header.entry_count));
553
554 // If not external, set the data storage.
555 if (!external_info) {
556 sparse_storage->SetDataStorage(
557 std::make_shared<OffsetVfsFile>(std::move(base_storage), base_size, 0));
558 }
559
560 // Set the output.
561 *out = std::move(sparse_storage);
562 R_SUCCEED();
563}
564
565Result NcaFileSystemDriver::CreateSparseStorage(VirtualFile* out, s64* out_fs_data_offset,
566 std::shared_ptr<SparseStorage>* out_sparse_storage,
567 VirtualFile* out_meta_storage, s32 index,
568 const NcaAesCtrUpperIv& upper_iv,
569 const NcaSparseInfo& sparse_info) {
570 // Validate preconditions.
571 ASSERT(out != nullptr);
572 ASSERT(out_fs_data_offset != nullptr);
573
574 // Check the sparse info generation.
575 R_UNLESS(sparse_info.generation != 0, ResultInvalidNcaHeader);
576
577 // Read and verify the bucket tree header.
578 BucketTree::Header header;
579 std::memcpy(std::addressof(header), sparse_info.bucket.header.data(), sizeof(header));
580 R_TRY(header.Verify());
581
582 // Determine the storage extents.
583 const auto fs_offset = GetFsOffset(*m_reader, index);
584 const auto fs_end_offset = GetFsEndOffset(*m_reader, index);
585 const auto fs_size = fs_end_offset - fs_offset;
586
587 // Create the sparse storage.
588 std::shared_ptr<SparseStorage> sparse_storage;
589 if (header.entry_count != 0) {
590 // Create the body substorage.
591 VirtualFile body_substorage;
592 R_TRY(this->CreateBodySubStorage(std::addressof(body_substorage),
593 sparse_info.physical_offset,
594 sparse_info.GetPhysicalSize()));
595
596 // Create the meta storage.
597 VirtualFile meta_storage;
598 R_TRY(this->CreateSparseStorageMetaStorage(std::addressof(meta_storage), body_substorage,
599 sparse_info.physical_offset, upper_iv,
600 sparse_info));
601
602 // Potentially set the output meta storage.
603 if (out_meta_storage != nullptr) {
604 *out_meta_storage = meta_storage;
605 }
606
607 // Create the sparse storage.
608 R_TRY(this->CreateSparseStorageCore(std::addressof(sparse_storage), body_substorage,
609 sparse_info.GetPhysicalSize(), std::move(meta_storage),
610 sparse_info, false));
611 } else {
612 // If there are no entries, there's nothing to actually do.
613 sparse_storage = std::make_shared<SparseStorage>();
614 R_UNLESS(sparse_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
615
616 sparse_storage->Initialize(fs_size);
617 }
618
619 // Potentially set the output sparse storage.
620 if (out_sparse_storage != nullptr) {
621 *out_sparse_storage = sparse_storage;
622 }
623
624 // Set the output fs data offset.
625 *out_fs_data_offset = fs_offset;
626
627 // Set the output storage.
628 *out = std::move(sparse_storage);
629 R_SUCCEED();
630}
631
632Result NcaFileSystemDriver::CreateSparseStorageMetaStorageWithVerification(
633 VirtualFile* out, VirtualFile* out_layer_info_storage, VirtualFile base_storage, s64 offset,
634 const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info,
635 const NcaMetaDataHashDataInfo& meta_data_hash_data_info) {
636 // Validate preconditions.
637 ASSERT(out != nullptr);
638 ASSERT(base_storage != nullptr);
639
640 // Get the base storage size.
641 s64 base_size = base_storage->GetSize();
642
643 // Get the meta extents.
644 const auto meta_offset = sparse_info.bucket.offset;
645 const auto meta_size = sparse_info.bucket.size;
646 R_UNLESS(meta_offset + meta_size - offset <= base_size, ResultNcaBaseStorageOutOfRangeB);
647
648 // Get the meta data hash data extents.
649 const s64 meta_data_hash_data_offset = meta_data_hash_data_info.offset;
650 const s64 meta_data_hash_data_size =
651 Common::AlignUp<s64>(meta_data_hash_data_info.size, NcaHeader::CtrBlockSize);
652 R_UNLESS(meta_data_hash_data_offset + meta_data_hash_data_size <= base_size,
653 ResultNcaBaseStorageOutOfRangeB);
654
655 // Check that the meta is before the hash data.
656 R_UNLESS(meta_offset + meta_size <= meta_data_hash_data_offset,
657 ResultRomNcaInvalidSparseMetaDataHashDataOffset);
658
659 // Check that offsets are appropriately aligned.
660 R_UNLESS(Common::IsAligned<s64>(meta_data_hash_data_offset, NcaHeader::CtrBlockSize),
661 ResultRomNcaInvalidSparseMetaDataHashDataOffset);
662 R_UNLESS(Common::IsAligned<s64>(meta_offset, NcaHeader::CtrBlockSize),
663 ResultInvalidNcaFsHeader);
664
665 // Create the meta storage.
666 auto enc_storage = std::make_shared<OffsetVfsFile>(
667 std::move(base_storage),
668 meta_data_hash_data_offset + meta_data_hash_data_size - meta_offset, meta_offset);
669 R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
670
671 // Create the decrypted storage.
672 VirtualFile decrypted_storage;
673 R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage),
674 offset + meta_offset, sparse_info.MakeAesCtrUpperIv(upper_iv),
675 AlignmentStorageRequirement::None));
676
677 // Create the verification storage.
678 VirtualFile integrity_storage;
679 Result rc = this->CreateIntegrityVerificationStorageForMeta(
680 std::addressof(integrity_storage), out_layer_info_storage, std::move(decrypted_storage),
681 meta_offset, meta_data_hash_data_info);
682 if (rc == ResultInvalidNcaMetaDataHashDataSize) {
683 R_THROW(ResultRomNcaInvalidSparseMetaDataHashDataSize);
684 }
685 if (rc == ResultInvalidNcaMetaDataHashDataHash) {
686 R_THROW(ResultRomNcaInvalidSparseMetaDataHashDataHash);
687 }
688 R_TRY(rc);
689
690 // Create the meta storage.
691 auto meta_storage = std::make_shared<OffsetVfsFile>(std::move(integrity_storage), meta_size, 0);
692 R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
693
694 // Set the output.
695 *out = std::move(meta_storage);
696 R_SUCCEED();
697}
698
699Result NcaFileSystemDriver::CreateSparseStorageWithVerification(
700 VirtualFile* out, s64* out_fs_data_offset, std::shared_ptr<SparseStorage>* out_sparse_storage,
701 VirtualFile* out_meta_storage, VirtualFile* out_layer_info_storage, s32 index,
702 const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info,
703 const NcaMetaDataHashDataInfo& meta_data_hash_data_info,
704 NcaFsHeader::MetaDataHashType meta_data_hash_type) {
705 // Validate preconditions.
706 ASSERT(out != nullptr);
707 ASSERT(out_fs_data_offset != nullptr);
708
709 // Check the sparse info generation.
710 R_UNLESS(sparse_info.generation != 0, ResultInvalidNcaHeader);
711
712 // Read and verify the bucket tree header.
713 BucketTree::Header header;
714 std::memcpy(std::addressof(header), sparse_info.bucket.header.data(), sizeof(header));
715 R_TRY(header.Verify());
716
717 // Determine the storage extents.
718 const auto fs_offset = GetFsOffset(*m_reader, index);
719 const auto fs_end_offset = GetFsEndOffset(*m_reader, index);
720 const auto fs_size = fs_end_offset - fs_offset;
721
722 // Create the sparse storage.
723 std::shared_ptr<SparseStorage> sparse_storage;
724 if (header.entry_count != 0) {
725 // Create the body substorage.
726 VirtualFile body_substorage;
727 R_TRY(this->CreateBodySubStorage(
728 std::addressof(body_substorage), sparse_info.physical_offset,
729 Common::AlignUp<s64>(static_cast<s64>(meta_data_hash_data_info.offset) +
730 static_cast<s64>(meta_data_hash_data_info.size),
731 NcaHeader::CtrBlockSize)));
732
733 // Check the meta data hash type.
734 R_UNLESS(meta_data_hash_type == NcaFsHeader::MetaDataHashType::HierarchicalIntegrity,
735 ResultRomNcaInvalidSparseMetaDataHashType);
736
737 // Create the meta storage.
738 VirtualFile meta_storage;
739 R_TRY(this->CreateSparseStorageMetaStorageWithVerification(
740 std::addressof(meta_storage), out_layer_info_storage, body_substorage,
741 sparse_info.physical_offset, upper_iv, sparse_info, meta_data_hash_data_info));
742
743 // Potentially set the output meta storage.
744 if (out_meta_storage != nullptr) {
745 *out_meta_storage = meta_storage;
746 }
747
748 // Create the sparse storage.
749 R_TRY(this->CreateSparseStorageCore(std::addressof(sparse_storage), body_substorage,
750 sparse_info.GetPhysicalSize(), std::move(meta_storage),
751 sparse_info, false));
752 } else {
753 // If there are no entries, there's nothing to actually do.
754 sparse_storage = std::make_shared<SparseStorage>();
755 R_UNLESS(sparse_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
756
757 sparse_storage->Initialize(fs_size);
758 }
759
760 // Potentially set the output sparse storage.
761 if (out_sparse_storage != nullptr) {
762 *out_sparse_storage = sparse_storage;
763 }
764
765 // Set the output fs data offset.
766 *out_fs_data_offset = fs_offset;
767
768 // Set the output storage.
769 *out = std::move(sparse_storage);
770 R_SUCCEED();
771}
772
773Result NcaFileSystemDriver::CreateAesCtrExStorageMetaStorage(
774 VirtualFile* out, VirtualFile base_storage, s64 offset,
775 NcaFsHeader::EncryptionType encryption_type, const NcaAesCtrUpperIv& upper_iv,
776 const NcaPatchInfo& patch_info) {
777 // Validate preconditions.
778 ASSERT(out != nullptr);
779 ASSERT(base_storage != nullptr);
780 ASSERT(encryption_type == NcaFsHeader::EncryptionType::None ||
781 encryption_type == NcaFsHeader::EncryptionType::AesCtrEx ||
782 encryption_type == NcaFsHeader::EncryptionType::AesCtrExSkipLayerHash);
783 ASSERT(patch_info.HasAesCtrExTable());
784
785 // Validate patch info extents.
786 R_UNLESS(patch_info.indirect_size > 0, ResultInvalidNcaPatchInfoIndirectSize);
787 R_UNLESS(patch_info.aes_ctr_ex_size > 0, ResultInvalidNcaPatchInfoAesCtrExSize);
788 R_UNLESS(patch_info.indirect_size + patch_info.indirect_offset <= patch_info.aes_ctr_ex_offset,
789 ResultInvalidNcaPatchInfoAesCtrExOffset);
790
791 // Get the base storage size.
792 s64 base_size = base_storage->GetSize();
793
794 // Get and validate the meta extents.
795 const s64 meta_offset = patch_info.aes_ctr_ex_offset;
796 const s64 meta_size =
797 Common::AlignUp(static_cast<s64>(patch_info.aes_ctr_ex_size), NcaHeader::XtsBlockSize);
798 R_UNLESS(meta_offset + meta_size <= base_size, ResultNcaBaseStorageOutOfRangeB);
799
800 // Create the encrypted storage.
801 auto enc_storage =
802 std::make_shared<OffsetVfsFile>(std::move(base_storage), meta_size, meta_offset);
803 R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
804
805 // Create the decrypted storage.
806 VirtualFile decrypted_storage;
807 if (encryption_type != NcaFsHeader::EncryptionType::None) {
808 R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage),
809 offset + meta_offset, upper_iv,
810 AlignmentStorageRequirement::None));
811 } else {
812 // If encryption type is none, don't do any decryption.
813 decrypted_storage = std::move(enc_storage);
814 }
815
816 // Create meta storage.
817 auto meta_storage = std::make_shared<OffsetVfsFile>(decrypted_storage, meta_size, 0);
818 R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
819
820 // Create an alignment-matching storage.
821 using AlignedStorage = AlignmentMatchingStorage<NcaHeader::CtrBlockSize, 1>;
822 auto aligned_storage = std::make_shared<AlignedStorage>(std::move(meta_storage));
823 R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
824
825 // Set the output.
826 *out = std::move(aligned_storage);
827 R_SUCCEED();
828}
829
830Result NcaFileSystemDriver::CreateAesCtrExStorage(
831 VirtualFile* out, std::shared_ptr<AesCtrCounterExtendedStorage>* out_ext,
832 VirtualFile base_storage, VirtualFile meta_storage, s64 counter_offset,
833 const NcaAesCtrUpperIv& upper_iv, const NcaPatchInfo& patch_info) {
834 // Validate pre-conditions.
835 ASSERT(out != nullptr);
836 ASSERT(base_storage != nullptr);
837 ASSERT(meta_storage != nullptr);
838 ASSERT(patch_info.HasAesCtrExTable());
839
840 // Read the bucket tree header.
841 BucketTree::Header header;
842 std::memcpy(std::addressof(header), patch_info.aes_ctr_ex_header.data(), sizeof(header));
843 R_TRY(header.Verify());
844
845 // Determine the bucket extents.
846 const auto entry_count = header.entry_count;
847 const s64 data_offset = 0;
848 const s64 data_size = patch_info.aes_ctr_ex_offset;
849 const s64 node_offset = 0;
850 const s64 node_size = AesCtrCounterExtendedStorage::QueryNodeStorageSize(entry_count);
851 const s64 entry_offset = node_offset + node_size;
852 const s64 entry_size = AesCtrCounterExtendedStorage::QueryEntryStorageSize(entry_count);
853
854 // Create bucket storages.
855 auto data_storage =
856 std::make_shared<OffsetVfsFile>(std::move(base_storage), data_size, data_offset);
857 auto node_storage = std::make_shared<OffsetVfsFile>(meta_storage, node_size, node_offset);
858 auto entry_storage = std::make_shared<OffsetVfsFile>(meta_storage, entry_size, entry_offset);
859
860 // Get the secure value.
861 const auto secure_value = upper_iv.part.secure_value;
862
863 // Create the aes ctr ex storage.
864 VirtualFile aes_ctr_ex_storage;
865 if (m_reader->HasExternalDecryptionKey()) {
866 // Create the decryptor.
867 std::unique_ptr<AesCtrCounterExtendedStorage::IDecryptor> decryptor;
868 R_TRY(AesCtrCounterExtendedStorage::CreateSoftwareDecryptor(std::addressof(decryptor)));
869
870 // Create the aes ctr ex storage.
871 auto impl_storage = std::make_shared<AesCtrCounterExtendedStorage>();
872 R_UNLESS(impl_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
873
874 // Initialize the aes ctr ex storage.
875 R_TRY(impl_storage->Initialize(m_reader->GetExternalDecryptionKey(), AesCtrStorage::KeySize,
876 secure_value, counter_offset, data_storage, node_storage,
877 entry_storage, entry_count, std::move(decryptor)));
878
879 // Potentially set the output implementation storage.
880 if (out_ext != nullptr) {
881 *out_ext = impl_storage;
882 }
883
884 // Set the implementation storage.
885 aes_ctr_ex_storage = std::move(impl_storage);
886 } else {
887 // Create the software decryptor.
888 std::unique_ptr<AesCtrCounterExtendedStorage::IDecryptor> sw_decryptor;
889 R_TRY(AesCtrCounterExtendedStorage::CreateSoftwareDecryptor(std::addressof(sw_decryptor)));
890
891 // Make the software storage.
892 auto sw_storage = std::make_shared<AesCtrCounterExtendedStorage>();
893 R_UNLESS(sw_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
894
895 // Initialize the software storage.
896 R_TRY(sw_storage->Initialize(m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesCtr),
897 AesCtrStorage::KeySize, secure_value, counter_offset,
898 data_storage, node_storage, entry_storage, entry_count,
899 std::move(sw_decryptor)));
900
901 // Potentially set the output implementation storage.
902 if (out_ext != nullptr) {
903 *out_ext = sw_storage;
904 }
905
906 // Set the implementation storage.
907 aes_ctr_ex_storage = std::move(sw_storage);
908 }
909
910 // Create an alignment-matching storage.
911 using AlignedStorage = AlignmentMatchingStorage<NcaHeader::CtrBlockSize, 1>;
912 auto aligned_storage = std::make_shared<AlignedStorage>(std::move(aes_ctr_ex_storage));
913 R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
914
915 // Set the output.
916 *out = std::move(aligned_storage);
917 R_SUCCEED();
918}
919
920Result NcaFileSystemDriver::CreateIndirectStorageMetaStorage(VirtualFile* out,
921 VirtualFile base_storage,
922 const NcaPatchInfo& patch_info) {
923 // Validate preconditions.
924 ASSERT(out != nullptr);
925 ASSERT(base_storage != nullptr);
926 ASSERT(patch_info.HasIndirectTable());
927
928 // Get the base storage size.
929 s64 base_size = base_storage->GetSize();
930
931 // Check that we're within range.
932 R_UNLESS(patch_info.indirect_offset + patch_info.indirect_size <= base_size,
933 ResultNcaBaseStorageOutOfRangeE);
934
935 // Create the meta storage.
936 auto meta_storage = std::make_shared<OffsetVfsFile>(base_storage, patch_info.indirect_size,
937 patch_info.indirect_offset);
938 R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
939
940 // Set the output.
941 *out = std::move(meta_storage);
942 R_SUCCEED();
943}
944
945Result NcaFileSystemDriver::CreateIndirectStorage(
946 VirtualFile* out, std::shared_ptr<IndirectStorage>* out_ind, VirtualFile base_storage,
947 VirtualFile original_data_storage, VirtualFile meta_storage, const NcaPatchInfo& patch_info) {
948 // Validate preconditions.
949 ASSERT(out != nullptr);
950 ASSERT(base_storage != nullptr);
951 ASSERT(meta_storage != nullptr);
952 ASSERT(patch_info.HasIndirectTable());
953
954 // Read the bucket tree header.
955 BucketTree::Header header;
956 std::memcpy(std::addressof(header), patch_info.indirect_header.data(), sizeof(header));
957 R_TRY(header.Verify());
958
959 // Determine the storage sizes.
960 const auto node_size = IndirectStorage::QueryNodeStorageSize(header.entry_count);
961 const auto entry_size = IndirectStorage::QueryEntryStorageSize(header.entry_count);
962 R_UNLESS(node_size + entry_size <= patch_info.indirect_size,
963 ResultInvalidNcaIndirectStorageOutOfRange);
964
965 // Get the indirect data size.
966 const s64 indirect_data_size = patch_info.indirect_offset;
967 ASSERT(Common::IsAligned(indirect_data_size, NcaHeader::XtsBlockSize));
968
969 // Create the indirect data storage.
970 auto indirect_data_storage =
971 std::make_shared<OffsetVfsFile>(base_storage, indirect_data_size, 0);
972 R_UNLESS(indirect_data_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
973
974 // Create the indirect storage.
975 auto indirect_storage = std::make_shared<IndirectStorage>();
976 R_UNLESS(indirect_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
977
978 // Initialize the indirect storage.
979 R_TRY(indirect_storage->Initialize(
980 std::make_shared<OffsetVfsFile>(meta_storage, node_size, 0),
981 std::make_shared<OffsetVfsFile>(meta_storage, entry_size, node_size), header.entry_count));
982
983 // Get the original data size.
984 s64 original_data_size = original_data_storage->GetSize();
985
986 // Set the indirect storages.
987 indirect_storage->SetStorage(
988 0, std::make_shared<OffsetVfsFile>(original_data_storage, original_data_size, 0));
989 indirect_storage->SetStorage(
990 1, std::make_shared<OffsetVfsFile>(indirect_data_storage, indirect_data_size, 0));
991
992 // If necessary, set the output indirect storage.
993 if (out_ind != nullptr) {
994 *out_ind = indirect_storage;
995 }
996
997 // Set the output.
998 *out = std::move(indirect_storage);
999 R_SUCCEED();
1000}
1001
1002Result NcaFileSystemDriver::CreatePatchMetaStorage(
1003 VirtualFile* out_aes_ctr_ex_meta, VirtualFile* out_indirect_meta,
1004 VirtualFile* out_layer_info_storage, VirtualFile base_storage, s64 offset,
1005 const NcaAesCtrUpperIv& upper_iv, const NcaPatchInfo& patch_info,
1006 const NcaMetaDataHashDataInfo& meta_data_hash_data_info) {
1007 // Validate preconditions.
1008 ASSERT(out_aes_ctr_ex_meta != nullptr);
1009 ASSERT(out_indirect_meta != nullptr);
1010 ASSERT(base_storage != nullptr);
1011 ASSERT(patch_info.HasAesCtrExTable());
1012 ASSERT(patch_info.HasIndirectTable());
1013 ASSERT(Common::IsAligned<s64>(patch_info.aes_ctr_ex_size, NcaHeader::XtsBlockSize));
1014
1015 // Validate patch info extents.
1016 R_UNLESS(patch_info.indirect_size > 0, ResultInvalidNcaPatchInfoIndirectSize);
1017 R_UNLESS(patch_info.aes_ctr_ex_size >= 0, ResultInvalidNcaPatchInfoAesCtrExSize);
1018 R_UNLESS(patch_info.indirect_size + patch_info.indirect_offset <= patch_info.aes_ctr_ex_offset,
1019 ResultInvalidNcaPatchInfoAesCtrExOffset);
1020 R_UNLESS(patch_info.aes_ctr_ex_offset + patch_info.aes_ctr_ex_size <=
1021 meta_data_hash_data_info.offset,
1022 ResultRomNcaInvalidPatchMetaDataHashDataOffset);
1023
1024 // Get the base storage size.
1025 s64 base_size = base_storage->GetSize();
1026
1027 // Check that extents remain within range.
1028 R_UNLESS(patch_info.indirect_offset + patch_info.indirect_size <= base_size,
1029 ResultNcaBaseStorageOutOfRangeE);
1030 R_UNLESS(patch_info.aes_ctr_ex_offset + patch_info.aes_ctr_ex_size <= base_size,
1031 ResultNcaBaseStorageOutOfRangeB);
1032
1033 // Check that metadata hash data extents remain within range.
1034 const s64 meta_data_hash_data_offset = meta_data_hash_data_info.offset;
1035 const s64 meta_data_hash_data_size =
1036 Common::AlignUp<s64>(meta_data_hash_data_info.size, NcaHeader::CtrBlockSize);
1037 R_UNLESS(meta_data_hash_data_offset + meta_data_hash_data_size <= base_size,
1038 ResultNcaBaseStorageOutOfRangeB);
1039
1040 // Create the encrypted storage.
1041 auto enc_storage = std::make_shared<OffsetVfsFile>(
1042 std::move(base_storage),
1043 meta_data_hash_data_offset + meta_data_hash_data_size - patch_info.indirect_offset,
1044 patch_info.indirect_offset);
1045 R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1046
1047 // Create the decrypted storage.
1048 VirtualFile decrypted_storage;
1049 R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage),
1050 offset + patch_info.indirect_offset, upper_iv,
1051 AlignmentStorageRequirement::None));
1052
1053 // Create the verification storage.
1054 VirtualFile integrity_storage;
1055 Result rc = this->CreateIntegrityVerificationStorageForMeta(
1056 std::addressof(integrity_storage), out_layer_info_storage, std::move(decrypted_storage),
1057 patch_info.indirect_offset, meta_data_hash_data_info);
1058 if (rc == ResultInvalidNcaMetaDataHashDataSize) {
1059 R_THROW(ResultRomNcaInvalidPatchMetaDataHashDataSize);
1060 }
1061 if (rc == ResultInvalidNcaMetaDataHashDataHash) {
1062 R_THROW(ResultRomNcaInvalidPatchMetaDataHashDataHash);
1063 }
1064 R_TRY(rc);
1065
1066 // Create the indirect meta storage.
1067 auto indirect_meta_storage =
1068 std::make_shared<OffsetVfsFile>(integrity_storage, patch_info.indirect_size,
1069 patch_info.indirect_offset - patch_info.indirect_offset);
1070 R_UNLESS(indirect_meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1071
1072 // Create the aes ctr ex meta storage.
1073 auto aes_ctr_ex_meta_storage =
1074 std::make_shared<OffsetVfsFile>(integrity_storage, patch_info.aes_ctr_ex_size,
1075 patch_info.aes_ctr_ex_offset - patch_info.indirect_offset);
1076 R_UNLESS(aes_ctr_ex_meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1077
1078 // Set the output.
1079 *out_aes_ctr_ex_meta = std::move(aes_ctr_ex_meta_storage);
1080 *out_indirect_meta = std::move(indirect_meta_storage);
1081 R_SUCCEED();
1082}
1083
1084Result NcaFileSystemDriver::CreateSha256Storage(
1085 VirtualFile* out, VirtualFile base_storage,
1086 const NcaFsHeader::HashData::HierarchicalSha256Data& hash_data) {
1087 // Validate preconditions.
1088 ASSERT(out != nullptr);
1089 ASSERT(base_storage != nullptr);
1090
1091 // Define storage types.
1092 using VerificationStorage = HierarchicalSha256Storage;
1093 using AlignedStorage = AlignmentMatchingStoragePooledBuffer<1>;
1094
1095 // Validate the hash data.
1096 R_UNLESS(Common::IsPowerOfTwo(hash_data.hash_block_size),
1097 ResultInvalidHierarchicalSha256BlockSize);
1098 R_UNLESS(hash_data.hash_layer_count == VerificationStorage::LayerCount - 1,
1099 ResultInvalidHierarchicalSha256LayerCount);
1100
1101 // Get the regions.
1102 const auto& hash_region = hash_data.hash_layer_region[0];
1103 const auto& data_region = hash_data.hash_layer_region[1];
1104
1105 // Determine buffer sizes.
1106 constexpr s32 CacheBlockCount = 2;
1107 const auto hash_buffer_size = static_cast<size_t>(hash_region.size);
1108 const auto cache_buffer_size = CacheBlockCount * hash_data.hash_block_size;
1109 const auto total_buffer_size = hash_buffer_size + cache_buffer_size;
1110
1111 // Make a buffer holder storage.
1112 auto buffer_hold_storage = std::make_shared<MemoryResourceBufferHoldStorage>(
1113 std::move(base_storage), total_buffer_size);
1114 R_UNLESS(buffer_hold_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1115 R_UNLESS(buffer_hold_storage->IsValid(), ResultAllocationMemoryFailedInNcaFileSystemDriverI);
1116
1117 // Get storage size.
1118 s64 base_size = buffer_hold_storage->GetSize();
1119
1120 // Check that we're within range.
1121 R_UNLESS(hash_region.offset + hash_region.size <= base_size, ResultNcaBaseStorageOutOfRangeC);
1122 R_UNLESS(data_region.offset + data_region.size <= base_size, ResultNcaBaseStorageOutOfRangeC);
1123
1124 // Create the master hash storage.
1125 auto master_hash_storage =
1126 std::make_shared<ArrayVfsFile<sizeof(Hash)>>(hash_data.fs_data_master_hash.value);
1127
1128 // Make the verification storage.
1129 auto verification_storage = std::make_shared<VerificationStorage>();
1130 R_UNLESS(verification_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1131
1132 // Make layer storages.
1133 std::array<VirtualFile, VerificationStorage::LayerCount> layer_storages{
1134 std::make_shared<OffsetVfsFile>(master_hash_storage, sizeof(Hash), 0),
1135 std::make_shared<OffsetVfsFile>(buffer_hold_storage, hash_region.size, hash_region.offset),
1136 std::make_shared<OffsetVfsFile>(buffer_hold_storage, data_region.size, data_region.offset),
1137 };
1138
1139 // Initialize the verification storage.
1140 R_TRY(verification_storage->Initialize(layer_storages.data(), VerificationStorage::LayerCount,
1141 hash_data.hash_block_size,
1142 buffer_hold_storage->GetBuffer(), hash_buffer_size));
1143
1144 // Make the aligned storage.
1145 auto aligned_storage = std::make_shared<AlignedStorage>(std::move(verification_storage),
1146 hash_data.hash_block_size);
1147 R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1148
1149 // Set the output.
1150 *out = std::move(aligned_storage);
1151 R_SUCCEED();
1152}
1153
1154Result NcaFileSystemDriver::CreateIntegrityVerificationStorage(
1155 VirtualFile* out, VirtualFile base_storage,
1156 const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info) {
1157 R_RETURN(this->CreateIntegrityVerificationStorageImpl(
1158 out, base_storage, meta_info, 0, IntegrityDataCacheCount, IntegrityHashCacheCount,
1159 HierarchicalIntegrityVerificationStorage::GetDefaultDataCacheBufferLevel(
1160 meta_info.level_hash_info.max_layers)));
1161}
1162
1163Result NcaFileSystemDriver::CreateIntegrityVerificationStorageForMeta(
1164 VirtualFile* out, VirtualFile* out_layer_info_storage, VirtualFile base_storage, s64 offset,
1165 const NcaMetaDataHashDataInfo& meta_data_hash_data_info) {
1166 // Validate preconditions.
1167 ASSERT(out != nullptr);
1168
1169 // Check the meta data hash data size.
1170 R_UNLESS(meta_data_hash_data_info.size == sizeof(NcaMetaDataHashData),
1171 ResultInvalidNcaMetaDataHashDataSize);
1172
1173 // Read the meta data hash data.
1174 NcaMetaDataHashData meta_data_hash_data;
1175 base_storage->ReadObject(std::addressof(meta_data_hash_data),
1176 meta_data_hash_data_info.offset - offset);
1177
1178 // Set the out layer info storage, if necessary.
1179 if (out_layer_info_storage != nullptr) {
1180 auto layer_info_storage = std::make_shared<OffsetVfsFile>(
1181 base_storage,
1182 meta_data_hash_data_info.offset + meta_data_hash_data_info.size -
1183 meta_data_hash_data.layer_info_offset,
1184 meta_data_hash_data.layer_info_offset - offset);
1185 R_UNLESS(layer_info_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1186
1187 *out_layer_info_storage = std::move(layer_info_storage);
1188 }
1189
1190 // Create the meta storage.
1191 auto meta_storage = std::make_shared<OffsetVfsFile>(
1192 std::move(base_storage), meta_data_hash_data_info.offset - offset, 0);
1193 R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1194
1195 // Create the integrity verification storage.
1196 R_RETURN(this->CreateIntegrityVerificationStorageImpl(
1197 out, std::move(meta_storage), meta_data_hash_data.integrity_meta_info,
1198 meta_data_hash_data.layer_info_offset - offset, IntegrityDataCacheCountForMeta,
1199 IntegrityHashCacheCountForMeta, 0));
1200}
1201
1202Result NcaFileSystemDriver::CreateIntegrityVerificationStorageImpl(
1203 VirtualFile* out, VirtualFile base_storage,
1204 const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info, s64 layer_info_offset,
1205 int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level) {
1206 // Validate preconditions.
1207 ASSERT(out != nullptr);
1208 ASSERT(base_storage != nullptr);
1209 ASSERT(layer_info_offset >= 0);
1210
1211 // Define storage types.
1212 using VerificationStorage = HierarchicalIntegrityVerificationStorage;
1213 using StorageInfo = VerificationStorage::HierarchicalStorageInformation;
1214
1215 // Validate the meta info.
1216 HierarchicalIntegrityVerificationInformation level_hash_info;
1217 std::memcpy(std::addressof(level_hash_info), std::addressof(meta_info.level_hash_info),
1218 sizeof(level_hash_info));
1219
1220 R_UNLESS(IntegrityMinLayerCount <= level_hash_info.max_layers,
1221 ResultInvalidNcaHierarchicalIntegrityVerificationLayerCount);
1222 R_UNLESS(level_hash_info.max_layers <= IntegrityMaxLayerCount,
1223 ResultInvalidNcaHierarchicalIntegrityVerificationLayerCount);
1224
1225 // Get the base storage size.
1226 s64 base_storage_size = base_storage->GetSize();
1227
1228 // Create storage info.
1229 StorageInfo storage_info;
1230 for (s32 i = 0; i < static_cast<s32>(level_hash_info.max_layers - 2); ++i) {
1231 const auto& layer_info = level_hash_info.info[i];
1232 R_UNLESS(layer_info_offset + layer_info.offset + layer_info.size <= base_storage_size,
1233 ResultNcaBaseStorageOutOfRangeD);
1234
1235 storage_info[i + 1] = std::make_shared<OffsetVfsFile>(
1236 base_storage, layer_info.size, layer_info_offset + layer_info.offset);
1237 }
1238
1239 // Set the last layer info.
1240 const auto& layer_info = level_hash_info.info[level_hash_info.max_layers - 2];
1241 const s64 last_layer_info_offset = layer_info_offset > 0 ? 0LL : layer_info.offset.Get();
1242 R_UNLESS(last_layer_info_offset + layer_info.size <= base_storage_size,
1243 ResultNcaBaseStorageOutOfRangeD);
1244 if (layer_info_offset > 0) {
1245 R_UNLESS(last_layer_info_offset + layer_info.size <= layer_info_offset,
1246 ResultRomNcaInvalidIntegrityLayerInfoOffset);
1247 }
1248 storage_info.SetDataStorage(std::make_shared<OffsetVfsFile>(
1249 std::move(base_storage), layer_info.size, last_layer_info_offset));
1250
1251 // Make the integrity romfs storage.
1252 auto integrity_storage = std::make_shared<IntegrityRomFsStorage>();
1253 R_UNLESS(integrity_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1254
1255 // Initialize the integrity storage.
1256 R_TRY(integrity_storage->Initialize(level_hash_info, meta_info.master_hash, storage_info,
1257 max_data_cache_entries, max_hash_cache_entries,
1258 buffer_level));
1259
1260 // Set the output.
1261 *out = std::move(integrity_storage);
1262 R_SUCCEED();
1263}
1264
1265Result NcaFileSystemDriver::CreateRegionSwitchStorage(VirtualFile* out,
1266 const NcaFsHeaderReader* header_reader,
1267 VirtualFile inside_storage,
1268 VirtualFile outside_storage) {
1269 // Check pre-conditions.
1270 ASSERT(header_reader->GetHashType() == NcaFsHeader::HashType::HierarchicalIntegrityHash);
1271
1272 // Create the region.
1273 RegionSwitchStorage::Region region = {};
1274 R_TRY(header_reader->GetHashTargetOffset(std::addressof(region.size)));
1275
1276 // Create the region switch storage.
1277 auto region_switch_storage = std::make_shared<RegionSwitchStorage>(
1278 std::move(inside_storage), std::move(outside_storage), region);
1279 R_UNLESS(region_switch_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1280
1281 // Set the output.
1282 *out = std::move(region_switch_storage);
1283 R_SUCCEED();
1284}
1285
1286Result NcaFileSystemDriver::CreateCompressedStorage(VirtualFile* out,
1287 std::shared_ptr<CompressedStorage>* out_cmp,
1288 VirtualFile* out_meta, VirtualFile base_storage,
1289 const NcaCompressionInfo& compression_info) {
1290 R_RETURN(this->CreateCompressedStorage(out, out_cmp, out_meta, std::move(base_storage),
1291 compression_info, m_reader->GetDecompressor()));
1292}
1293
1294Result NcaFileSystemDriver::CreateCompressedStorage(VirtualFile* out,
1295 std::shared_ptr<CompressedStorage>* out_cmp,
1296 VirtualFile* out_meta, VirtualFile base_storage,
1297 const NcaCompressionInfo& compression_info,
1298 GetDecompressorFunction get_decompressor) {
1299 // Check pre-conditions.
1300 ASSERT(out != nullptr);
1301 ASSERT(base_storage != nullptr);
1302 ASSERT(get_decompressor != nullptr);
1303
1304 // Read and verify the bucket tree header.
1305 BucketTree::Header header;
1306 std::memcpy(std::addressof(header), compression_info.bucket.header.data(), sizeof(header));
1307 R_TRY(header.Verify());
1308
1309 // Determine the storage extents.
1310 const auto table_offset = compression_info.bucket.offset;
1311 const auto table_size = compression_info.bucket.size;
1312 const auto node_size = CompressedStorage::QueryNodeStorageSize(header.entry_count);
1313 const auto entry_size = CompressedStorage::QueryEntryStorageSize(header.entry_count);
1314 R_UNLESS(node_size + entry_size <= table_size, ResultInvalidCompressedStorageSize);
1315
1316 // If we should, set the output meta storage.
1317 if (out_meta != nullptr) {
1318 auto meta_storage = std::make_shared<OffsetVfsFile>(base_storage, table_size, table_offset);
1319 R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1320
1321 *out_meta = std::move(meta_storage);
1322 }
1323
1324 // Allocate the compressed storage.
1325 auto compressed_storage = std::make_shared<CompressedStorage>();
1326 R_UNLESS(compressed_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
1327
1328 // Initialize the compressed storage.
1329 R_TRY(compressed_storage->Initialize(
1330 std::make_shared<OffsetVfsFile>(base_storage, table_offset, 0),
1331 std::make_shared<OffsetVfsFile>(base_storage, node_size, table_offset),
1332 std::make_shared<OffsetVfsFile>(base_storage, entry_size, table_offset + node_size),
1333 header.entry_count, 64_KiB, 640_KiB, get_decompressor, 16_KiB, 16_KiB, 32));
1334
1335 // Potentially set the output compressed storage.
1336 if (out_cmp) {
1337 *out_cmp = compressed_storage;
1338 }
1339
1340 // Set the output.
1341 *out = std::move(compressed_storage);
1342 R_SUCCEED();
1343}
1344
1345} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h
new file mode 100644
index 000000000..d317b35ac
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h
@@ -0,0 +1,360 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fssystem_compression_common.h"
7#include "core/file_sys/fssystem/fssystem_nca_header.h"
8#include "core/file_sys/vfs.h"
9
10namespace FileSys {
11
12class CompressedStorage;
13class AesCtrCounterExtendedStorage;
14class IndirectStorage;
15class SparseStorage;
16
17struct NcaCryptoConfiguration;
18
19using KeyGenerationFunction = void (*)(void* dst_key, size_t dst_key_size, const void* src_key,
20 size_t src_key_size, s32 key_type);
21using VerifySign1Function = bool (*)(const void* sig, size_t sig_size, const void* data,
22 size_t data_size, u8 generation);
23
24struct NcaCryptoConfiguration {
25 static constexpr size_t Rsa2048KeyModulusSize = 2048 / 8;
26 static constexpr size_t Rsa2048KeyPublicExponentSize = 3;
27 static constexpr size_t Rsa2048KeyPrivateExponentSize = Rsa2048KeyModulusSize;
28
29 static constexpr size_t Aes128KeySize = 128 / 8;
30
31 static constexpr size_t Header1SignatureKeyGenerationMax = 1;
32
33 static constexpr s32 KeyAreaEncryptionKeyIndexCount = 3;
34 static constexpr s32 HeaderEncryptionKeyCount = 2;
35
36 static constexpr u8 KeyAreaEncryptionKeyIndexZeroKey = 0xFF;
37
38 static constexpr size_t KeyGenerationMax = 32;
39
40 const u8* header_1_sign_key_moduli[Header1SignatureKeyGenerationMax + 1];
41 u8 header_1_sign_key_public_exponent[Rsa2048KeyPublicExponentSize];
42 u8 key_area_encryption_key_source[KeyAreaEncryptionKeyIndexCount][Aes128KeySize];
43 u8 header_encryption_key_source[Aes128KeySize];
44 u8 header_encrypted_encryption_keys[HeaderEncryptionKeyCount][Aes128KeySize];
45 KeyGenerationFunction generate_key;
46 VerifySign1Function verify_sign1;
47 bool is_plaintext_header_available;
48 bool is_available_sw_key;
49};
50static_assert(std::is_trivial_v<NcaCryptoConfiguration>);
51
52struct NcaCompressionConfiguration {
53 GetDecompressorFunction get_decompressor;
54};
55static_assert(std::is_trivial_v<NcaCompressionConfiguration>);
56
57constexpr inline s32 KeyAreaEncryptionKeyCount =
58 NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount *
59 NcaCryptoConfiguration::KeyGenerationMax;
60
61enum class KeyType : s32 {
62 ZeroKey = -2,
63 InvalidKey = -1,
64 NcaHeaderKey1 = KeyAreaEncryptionKeyCount + 0,
65 NcaHeaderKey2 = KeyAreaEncryptionKeyCount + 1,
66 NcaExternalKey = KeyAreaEncryptionKeyCount + 2,
67 SaveDataDeviceUniqueMac = KeyAreaEncryptionKeyCount + 3,
68 SaveDataSeedUniqueMac = KeyAreaEncryptionKeyCount + 4,
69 SaveDataTransferMac = KeyAreaEncryptionKeyCount + 5,
70};
71
72constexpr inline bool IsInvalidKeyTypeValue(s32 key_type) {
73 return key_type < 0;
74}
75
76constexpr inline s32 GetKeyTypeValue(u8 key_index, u8 key_generation) {
77 if (key_index == NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexZeroKey) {
78 return static_cast<s32>(KeyType::ZeroKey);
79 }
80
81 if (key_index >= NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount) {
82 return static_cast<s32>(KeyType::InvalidKey);
83 }
84
85 return NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount * key_generation + key_index;
86}
87
88class NcaReader {
89 YUZU_NON_COPYABLE(NcaReader);
90 YUZU_NON_MOVEABLE(NcaReader);
91
92private:
93 NcaHeader m_header;
94 u8 m_decryption_keys[NcaHeader::DecryptionKey_Count][NcaCryptoConfiguration::Aes128KeySize];
95 VirtualFile m_body_storage;
96 VirtualFile m_header_storage;
97 u8 m_external_decryption_key[NcaCryptoConfiguration::Aes128KeySize];
98 bool m_is_software_aes_prioritized;
99 bool m_is_available_sw_key;
100 NcaHeader::EncryptionType m_header_encryption_type;
101 bool m_is_header_sign1_signature_valid;
102 GetDecompressorFunction m_get_decompressor;
103
104public:
105 NcaReader();
106 ~NcaReader();
107
108 Result Initialize(VirtualFile base_storage, const NcaCryptoConfiguration& crypto_cfg,
109 const NcaCompressionConfiguration& compression_cfg);
110
111 VirtualFile GetSharedBodyStorage();
112 u32 GetMagic() const;
113 NcaHeader::DistributionType GetDistributionType() const;
114 NcaHeader::ContentType GetContentType() const;
115 u8 GetHeaderSign1KeyGeneration() const;
116 u8 GetKeyGeneration() const;
117 u8 GetKeyIndex() const;
118 u64 GetContentSize() const;
119 u64 GetProgramId() const;
120 u32 GetContentIndex() const;
121 u32 GetSdkAddonVersion() const;
122 void GetRightsId(u8* dst, size_t dst_size) const;
123 bool HasFsInfo(s32 index) const;
124 s32 GetFsCount() const;
125 const Hash& GetFsHeaderHash(s32 index) const;
126 void GetFsHeaderHash(Hash* dst, s32 index) const;
127 void GetFsInfo(NcaHeader::FsInfo* dst, s32 index) const;
128 u64 GetFsOffset(s32 index) const;
129 u64 GetFsEndOffset(s32 index) const;
130 u64 GetFsSize(s32 index) const;
131 void GetEncryptedKey(void* dst, size_t size) const;
132 const void* GetDecryptionKey(s32 index) const;
133 bool HasValidInternalKey() const;
134 bool HasInternalDecryptionKeyForAesHw() const;
135 bool IsSoftwareAesPrioritized() const;
136 void PrioritizeSoftwareAes();
137 bool IsAvailableSwKey() const;
138 bool HasExternalDecryptionKey() const;
139 const void* GetExternalDecryptionKey() const;
140 void SetExternalDecryptionKey(const void* src, size_t size);
141 void GetRawData(void* dst, size_t dst_size) const;
142 NcaHeader::EncryptionType GetEncryptionType() const;
143 Result ReadHeader(NcaFsHeader* dst, s32 index) const;
144
145 GetDecompressorFunction GetDecompressor() const;
146
147 bool GetHeaderSign1Valid() const;
148
149 void GetHeaderSign2(void* dst, size_t size) const;
150};
151
152class NcaFsHeaderReader {
153 YUZU_NON_COPYABLE(NcaFsHeaderReader);
154 YUZU_NON_MOVEABLE(NcaFsHeaderReader);
155
156private:
157 NcaFsHeader m_data;
158 s32 m_fs_index;
159
160public:
161 NcaFsHeaderReader() : m_fs_index(-1) {
162 std::memset(std::addressof(m_data), 0, sizeof(m_data));
163 }
164
165 Result Initialize(const NcaReader& reader, s32 index);
166 bool IsInitialized() const {
167 return m_fs_index >= 0;
168 }
169
170 void GetRawData(void* dst, size_t dst_size) const;
171
172 NcaFsHeader::HashData& GetHashData();
173 const NcaFsHeader::HashData& GetHashData() const;
174 u16 GetVersion() const;
175 s32 GetFsIndex() const;
176 NcaFsHeader::FsType GetFsType() const;
177 NcaFsHeader::HashType GetHashType() const;
178 NcaFsHeader::EncryptionType GetEncryptionType() const;
179 NcaPatchInfo& GetPatchInfo();
180 const NcaPatchInfo& GetPatchInfo() const;
181 const NcaAesCtrUpperIv GetAesCtrUpperIv() const;
182
183 bool IsSkipLayerHashEncryption() const;
184 Result GetHashTargetOffset(s64* out) const;
185
186 bool ExistsSparseLayer() const;
187 NcaSparseInfo& GetSparseInfo();
188 const NcaSparseInfo& GetSparseInfo() const;
189
190 bool ExistsCompressionLayer() const;
191 NcaCompressionInfo& GetCompressionInfo();
192 const NcaCompressionInfo& GetCompressionInfo() const;
193
194 bool ExistsPatchMetaHashLayer() const;
195 NcaMetaDataHashDataInfo& GetPatchMetaDataHashDataInfo();
196 const NcaMetaDataHashDataInfo& GetPatchMetaDataHashDataInfo() const;
197 NcaFsHeader::MetaDataHashType GetPatchMetaHashType() const;
198
199 bool ExistsSparseMetaHashLayer() const;
200 NcaMetaDataHashDataInfo& GetSparseMetaDataHashDataInfo();
201 const NcaMetaDataHashDataInfo& GetSparseMetaDataHashDataInfo() const;
202 NcaFsHeader::MetaDataHashType GetSparseMetaHashType() const;
203};
204
205class NcaFileSystemDriver {
206 YUZU_NON_COPYABLE(NcaFileSystemDriver);
207 YUZU_NON_MOVEABLE(NcaFileSystemDriver);
208
209public:
210 struct StorageContext {
211 bool open_raw_storage;
212 VirtualFile body_substorage;
213 std::shared_ptr<SparseStorage> current_sparse_storage;
214 VirtualFile sparse_storage_meta_storage;
215 std::shared_ptr<SparseStorage> original_sparse_storage;
216 void* external_current_sparse_storage;
217 void* external_original_sparse_storage;
218 VirtualFile aes_ctr_ex_storage_meta_storage;
219 VirtualFile aes_ctr_ex_storage_data_storage;
220 std::shared_ptr<AesCtrCounterExtendedStorage> aes_ctr_ex_storage;
221 VirtualFile indirect_storage_meta_storage;
222 std::shared_ptr<IndirectStorage> indirect_storage;
223 VirtualFile fs_data_storage;
224 VirtualFile compressed_storage_meta_storage;
225 std::shared_ptr<CompressedStorage> compressed_storage;
226
227 VirtualFile patch_layer_info_storage;
228 VirtualFile sparse_layer_info_storage;
229
230 VirtualFile external_original_storage;
231 };
232
233private:
234 enum class AlignmentStorageRequirement {
235 CacheBlockSize = 0,
236 None = 1,
237 };
238
239private:
240 std::shared_ptr<NcaReader> m_original_reader;
241 std::shared_ptr<NcaReader> m_reader;
242
243public:
244 static Result SetupFsHeaderReader(NcaFsHeaderReader* out, const NcaReader& reader,
245 s32 fs_index);
246
247public:
248 NcaFileSystemDriver(std::shared_ptr<NcaReader> reader) : m_original_reader(), m_reader(reader) {
249 ASSERT(m_reader != nullptr);
250 }
251
252 NcaFileSystemDriver(std::shared_ptr<NcaReader> original_reader,
253 std::shared_ptr<NcaReader> reader)
254 : m_original_reader(original_reader), m_reader(reader) {
255 ASSERT(m_reader != nullptr);
256 }
257
258 Result OpenStorageWithContext(VirtualFile* out, NcaFsHeaderReader* out_header_reader,
259 s32 fs_index, StorageContext* ctx);
260
261 Result OpenStorage(VirtualFile* out, NcaFsHeaderReader* out_header_reader, s32 fs_index) {
262 // Create a storage context.
263 StorageContext ctx{};
264
265 // Open the storage.
266 R_RETURN(OpenStorageWithContext(out, out_header_reader, fs_index, std::addressof(ctx)));
267 }
268
269public:
270 Result CreateStorageByRawStorage(VirtualFile* out, const NcaFsHeaderReader* header_reader,
271 VirtualFile raw_storage, StorageContext* ctx);
272
273private:
274 Result OpenStorageImpl(VirtualFile* out, NcaFsHeaderReader* out_header_reader, s32 fs_index,
275 StorageContext* ctx);
276
277 Result OpenIndirectableStorageAsOriginal(VirtualFile* out,
278 const NcaFsHeaderReader* header_reader,
279 StorageContext* ctx);
280
281 Result CreateBodySubStorage(VirtualFile* out, s64 offset, s64 size);
282
283 Result CreateAesCtrStorage(VirtualFile* out, VirtualFile base_storage, s64 offset,
284 const NcaAesCtrUpperIv& upper_iv,
285 AlignmentStorageRequirement alignment_storage_requirement);
286 Result CreateAesXtsStorage(VirtualFile* out, VirtualFile base_storage, s64 offset);
287
288 Result CreateSparseStorageMetaStorage(VirtualFile* out, VirtualFile base_storage, s64 offset,
289 const NcaAesCtrUpperIv& upper_iv,
290 const NcaSparseInfo& sparse_info);
291 Result CreateSparseStorageCore(std::shared_ptr<SparseStorage>* out, VirtualFile base_storage,
292 s64 base_size, VirtualFile meta_storage,
293 const NcaSparseInfo& sparse_info, bool external_info);
294 Result CreateSparseStorage(VirtualFile* out, s64* out_fs_data_offset,
295 std::shared_ptr<SparseStorage>* out_sparse_storage,
296 VirtualFile* out_meta_storage, s32 index,
297 const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info);
298
299 Result CreateSparseStorageMetaStorageWithVerification(
300 VirtualFile* out, VirtualFile* out_verification, VirtualFile base_storage, s64 offset,
301 const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info,
302 const NcaMetaDataHashDataInfo& meta_data_hash_data_info);
303 Result CreateSparseStorageWithVerification(
304 VirtualFile* out, s64* out_fs_data_offset,
305 std::shared_ptr<SparseStorage>* out_sparse_storage, VirtualFile* out_meta_storage,
306 VirtualFile* out_verification, s32 index, const NcaAesCtrUpperIv& upper_iv,
307 const NcaSparseInfo& sparse_info, const NcaMetaDataHashDataInfo& meta_data_hash_data_info,
308 NcaFsHeader::MetaDataHashType meta_data_hash_type);
309
310 Result CreateAesCtrExStorageMetaStorage(VirtualFile* out, VirtualFile base_storage, s64 offset,
311 NcaFsHeader::EncryptionType encryption_type,
312 const NcaAesCtrUpperIv& upper_iv,
313 const NcaPatchInfo& patch_info);
314 Result CreateAesCtrExStorage(VirtualFile* out,
315 std::shared_ptr<AesCtrCounterExtendedStorage>* out_ext,
316 VirtualFile base_storage, VirtualFile meta_storage,
317 s64 counter_offset, const NcaAesCtrUpperIv& upper_iv,
318 const NcaPatchInfo& patch_info);
319
320 Result CreateIndirectStorageMetaStorage(VirtualFile* out, VirtualFile base_storage,
321 const NcaPatchInfo& patch_info);
322 Result CreateIndirectStorage(VirtualFile* out, std::shared_ptr<IndirectStorage>* out_ind,
323 VirtualFile base_storage, VirtualFile original_data_storage,
324 VirtualFile meta_storage, const NcaPatchInfo& patch_info);
325
326 Result CreatePatchMetaStorage(VirtualFile* out_aes_ctr_ex_meta, VirtualFile* out_indirect_meta,
327 VirtualFile* out_verification, VirtualFile base_storage,
328 s64 offset, const NcaAesCtrUpperIv& upper_iv,
329 const NcaPatchInfo& patch_info,
330 const NcaMetaDataHashDataInfo& meta_data_hash_data_info);
331
332 Result CreateSha256Storage(VirtualFile* out, VirtualFile base_storage,
333 const NcaFsHeader::HashData::HierarchicalSha256Data& sha256_data);
334
335 Result CreateIntegrityVerificationStorage(
336 VirtualFile* out, VirtualFile base_storage,
337 const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info);
338 Result CreateIntegrityVerificationStorageForMeta(
339 VirtualFile* out, VirtualFile* out_verification, VirtualFile base_storage, s64 offset,
340 const NcaMetaDataHashDataInfo& meta_data_hash_data_info);
341 Result CreateIntegrityVerificationStorageImpl(
342 VirtualFile* out, VirtualFile base_storage,
343 const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info, s64 layer_info_offset,
344 int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level);
345
346 Result CreateRegionSwitchStorage(VirtualFile* out, const NcaFsHeaderReader* header_reader,
347 VirtualFile inside_storage, VirtualFile outside_storage);
348
349 Result CreateCompressedStorage(VirtualFile* out, std::shared_ptr<CompressedStorage>* out_cmp,
350 VirtualFile* out_meta, VirtualFile base_storage,
351 const NcaCompressionInfo& compression_info);
352
353public:
354 Result CreateCompressedStorage(VirtualFile* out, std::shared_ptr<CompressedStorage>* out_cmp,
355 VirtualFile* out_meta, VirtualFile base_storage,
356 const NcaCompressionInfo& compression_info,
357 GetDecompressorFunction get_decompressor);
358};
359
360} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_header.cpp b/src/core/file_sys/fssystem/fssystem_nca_header.cpp
new file mode 100644
index 000000000..bf5742d39
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_header.cpp
@@ -0,0 +1,20 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_nca_header.h"
5
6namespace FileSys {
7
8u8 NcaHeader::GetProperKeyGeneration() const {
9 return std::max(this->key_generation, this->key_generation_2);
10}
11
12bool NcaPatchInfo::HasIndirectTable() const {
13 return this->indirect_size != 0;
14}
15
16bool NcaPatchInfo::HasAesCtrExTable() const {
17 return this->aes_ctr_ex_size != 0;
18}
19
20} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_header.h b/src/core/file_sys/fssystem/fssystem_nca_header.h
new file mode 100644
index 000000000..a02c5d881
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_header.h
@@ -0,0 +1,338 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/common_funcs.h"
7#include "common/common_types.h"
8#include "common/literals.h"
9
10#include "core/file_sys/errors.h"
11#include "core/file_sys/fssystem/fs_types.h"
12
13namespace FileSys {
14
15using namespace Common::Literals;
16
17struct Hash {
18 static constexpr std::size_t Size = 256 / 8;
19 std::array<u8, Size> value;
20};
21static_assert(sizeof(Hash) == Hash::Size);
22static_assert(std::is_trivial_v<Hash>);
23
24using NcaDigest = Hash;
25
26struct NcaHeader {
27 enum class ContentType : u8 {
28 Program = 0,
29 Meta = 1,
30 Control = 2,
31 Manual = 3,
32 Data = 4,
33 PublicData = 5,
34
35 Start = Program,
36 End = PublicData,
37 };
38
39 enum class DistributionType : u8 {
40 Download = 0,
41 GameCard = 1,
42
43 Start = Download,
44 End = GameCard,
45 };
46
47 enum class EncryptionType : u8 {
48 Auto = 0,
49 None = 1,
50 };
51
52 enum DecryptionKey {
53 DecryptionKey_AesXts = 0,
54 DecryptionKey_AesXts1 = DecryptionKey_AesXts,
55 DecryptionKey_AesXts2 = 1,
56 DecryptionKey_AesCtr = 2,
57 DecryptionKey_AesCtrEx = 3,
58 DecryptionKey_AesCtrHw = 4,
59 DecryptionKey_Count,
60 };
61
62 struct FsInfo {
63 u32 start_sector;
64 u32 end_sector;
65 u32 hash_sectors;
66 u32 reserved;
67 };
68 static_assert(sizeof(FsInfo) == 0x10);
69 static_assert(std::is_trivial_v<FsInfo>);
70
71 static constexpr u32 Magic0 = Common::MakeMagic('N', 'C', 'A', '0');
72 static constexpr u32 Magic1 = Common::MakeMagic('N', 'C', 'A', '1');
73 static constexpr u32 Magic2 = Common::MakeMagic('N', 'C', 'A', '2');
74 static constexpr u32 Magic3 = Common::MakeMagic('N', 'C', 'A', '3');
75
76 static constexpr u32 Magic = Magic3;
77
78 static constexpr std::size_t Size = 1_KiB;
79 static constexpr s32 FsCountMax = 4;
80 static constexpr std::size_t HeaderSignCount = 2;
81 static constexpr std::size_t HeaderSignSize = 0x100;
82 static constexpr std::size_t EncryptedKeyAreaSize = 0x100;
83 static constexpr std::size_t SectorSize = 0x200;
84 static constexpr std::size_t SectorShift = 9;
85 static constexpr std::size_t RightsIdSize = 0x10;
86 static constexpr std::size_t XtsBlockSize = 0x200;
87 static constexpr std::size_t CtrBlockSize = 0x10;
88
89 static_assert(SectorSize == (1 << SectorShift));
90
91 // Data members.
92 std::array<u8, HeaderSignSize> header_sign_1;
93 std::array<u8, HeaderSignSize> header_sign_2;
94 u32 magic;
95 DistributionType distribution_type;
96 ContentType content_type;
97 u8 key_generation;
98 u8 key_index;
99 u64 content_size;
100 u64 program_id;
101 u32 content_index;
102 u32 sdk_addon_version;
103 u8 key_generation_2;
104 u8 header1_signature_key_generation;
105 std::array<u8, 2> reserved_222;
106 std::array<u32, 3> reserved_224;
107 std::array<u8, RightsIdSize> rights_id;
108 std::array<FsInfo, FsCountMax> fs_info;
109 std::array<Hash, FsCountMax> fs_header_hash;
110 std::array<u8, EncryptedKeyAreaSize> encrypted_key_area;
111
112 static constexpr u64 SectorToByte(u32 sector) {
113 return static_cast<u64>(sector) << SectorShift;
114 }
115
116 static constexpr u32 ByteToSector(u64 byte) {
117 return static_cast<u32>(byte >> SectorShift);
118 }
119
120 u8 GetProperKeyGeneration() const;
121};
122static_assert(sizeof(NcaHeader) == NcaHeader::Size);
123static_assert(std::is_trivial_v<NcaHeader>);
124
125struct NcaBucketInfo {
126 static constexpr size_t HeaderSize = 0x10;
127 Int64 offset;
128 Int64 size;
129 std::array<u8, HeaderSize> header;
130};
131static_assert(std::is_trivial_v<NcaBucketInfo>);
132
133struct NcaPatchInfo {
134 static constexpr size_t Size = 0x40;
135 static constexpr size_t Offset = 0x100;
136
137 Int64 indirect_offset;
138 Int64 indirect_size;
139 std::array<u8, NcaBucketInfo::HeaderSize> indirect_header;
140 Int64 aes_ctr_ex_offset;
141 Int64 aes_ctr_ex_size;
142 std::array<u8, NcaBucketInfo::HeaderSize> aes_ctr_ex_header;
143
144 bool HasIndirectTable() const;
145 bool HasAesCtrExTable() const;
146};
147static_assert(std::is_trivial_v<NcaPatchInfo>);
148
149union NcaAesCtrUpperIv {
150 u64 value;
151 struct {
152 u32 generation;
153 u32 secure_value;
154 } part;
155};
156static_assert(std::is_trivial_v<NcaAesCtrUpperIv>);
157
158struct NcaSparseInfo {
159 NcaBucketInfo bucket;
160 Int64 physical_offset;
161 u16 generation;
162 std::array<u8, 6> reserved;
163
164 s64 GetPhysicalSize() const {
165 return this->bucket.offset + this->bucket.size;
166 }
167
168 u32 GetGeneration() const {
169 return static_cast<u32>(this->generation) << 16;
170 }
171
172 const NcaAesCtrUpperIv MakeAesCtrUpperIv(NcaAesCtrUpperIv upper_iv) const {
173 NcaAesCtrUpperIv sparse_upper_iv = upper_iv;
174 sparse_upper_iv.part.generation = this->GetGeneration();
175 return sparse_upper_iv;
176 }
177};
178static_assert(std::is_trivial_v<NcaSparseInfo>);
179
180struct NcaCompressionInfo {
181 NcaBucketInfo bucket;
182 std::array<u8, 8> resreved;
183};
184static_assert(std::is_trivial_v<NcaCompressionInfo>);
185
186struct NcaMetaDataHashDataInfo {
187 Int64 offset;
188 Int64 size;
189 Hash hash;
190};
191static_assert(std::is_trivial_v<NcaMetaDataHashDataInfo>);
192
193struct NcaFsHeader {
194 static constexpr size_t Size = 0x200;
195 static constexpr size_t HashDataOffset = 0x8;
196
197 struct Region {
198 Int64 offset;
199 Int64 size;
200 };
201 static_assert(std::is_trivial_v<Region>);
202
203 enum class FsType : u8 {
204 RomFs = 0,
205 PartitionFs = 1,
206 };
207
208 enum class EncryptionType : u8 {
209 Auto = 0,
210 None = 1,
211 AesXts = 2,
212 AesCtr = 3,
213 AesCtrEx = 4,
214 AesCtrSkipLayerHash = 5,
215 AesCtrExSkipLayerHash = 6,
216 };
217
218 enum class HashType : u8 {
219 Auto = 0,
220 None = 1,
221 HierarchicalSha256Hash = 2,
222 HierarchicalIntegrityHash = 3,
223 AutoSha3 = 4,
224 HierarchicalSha3256Hash = 5,
225 HierarchicalIntegritySha3Hash = 6,
226 };
227
228 enum class MetaDataHashType : u8 {
229 None = 0,
230 HierarchicalIntegrity = 1,
231 };
232
233 union HashData {
234 struct HierarchicalSha256Data {
235 static constexpr size_t HashLayerCountMax = 5;
236 static const size_t MasterHashOffset;
237
238 Hash fs_data_master_hash;
239 s32 hash_block_size;
240 s32 hash_layer_count;
241 std::array<Region, HashLayerCountMax> hash_layer_region;
242 } hierarchical_sha256_data;
243 static_assert(std::is_trivial_v<HierarchicalSha256Data>);
244
245 struct IntegrityMetaInfo {
246 static const size_t MasterHashOffset;
247
248 u32 magic;
249 u32 version;
250 u32 master_hash_size;
251
252 struct LevelHashInfo {
253 u32 max_layers;
254
255 struct HierarchicalIntegrityVerificationLevelInformation {
256 static constexpr size_t IntegrityMaxLayerCount = 7;
257 Int64 offset;
258 Int64 size;
259 s32 block_order;
260 std::array<u8, 4> reserved;
261 };
262 std::array<
263 HierarchicalIntegrityVerificationLevelInformation,
264 HierarchicalIntegrityVerificationLevelInformation::IntegrityMaxLayerCount - 1>
265 info;
266
267 struct SignatureSalt {
268 static constexpr size_t Size = 0x20;
269 std::array<u8, Size> value;
270 };
271 SignatureSalt seed;
272 } level_hash_info;
273
274 Hash master_hash;
275 } integrity_meta_info;
276 static_assert(std::is_trivial_v<IntegrityMetaInfo>);
277
278 std::array<u8, NcaPatchInfo::Offset - HashDataOffset> padding;
279 };
280
281 u16 version;
282 FsType fs_type;
283 HashType hash_type;
284 EncryptionType encryption_type;
285 MetaDataHashType meta_data_hash_type;
286 std::array<u8, 2> reserved;
287 HashData hash_data;
288 NcaPatchInfo patch_info;
289 NcaAesCtrUpperIv aes_ctr_upper_iv;
290 NcaSparseInfo sparse_info;
291 NcaCompressionInfo compression_info;
292 NcaMetaDataHashDataInfo meta_data_hash_data_info;
293 std::array<u8, 0x30> pad;
294
295 bool IsSkipLayerHashEncryption() const {
296 return this->encryption_type == EncryptionType::AesCtrSkipLayerHash ||
297 this->encryption_type == EncryptionType::AesCtrExSkipLayerHash;
298 }
299
300 Result GetHashTargetOffset(s64* out) const {
301 switch (this->hash_type) {
302 case HashType::HierarchicalIntegrityHash:
303 case HashType::HierarchicalIntegritySha3Hash:
304 *out = this->hash_data.integrity_meta_info.level_hash_info
305 .info[this->hash_data.integrity_meta_info.level_hash_info.max_layers - 2]
306 .offset;
307 R_SUCCEED();
308 case HashType::HierarchicalSha256Hash:
309 case HashType::HierarchicalSha3256Hash:
310 *out =
311 this->hash_data.hierarchical_sha256_data
312 .hash_layer_region[this->hash_data.hierarchical_sha256_data.hash_layer_count -
313 1]
314 .offset;
315 R_SUCCEED();
316 default:
317 R_THROW(ResultInvalidNcaFsHeader);
318 }
319 }
320};
321static_assert(sizeof(NcaFsHeader) == NcaFsHeader::Size);
322static_assert(std::is_trivial_v<NcaFsHeader>);
323static_assert(offsetof(NcaFsHeader, patch_info) == NcaPatchInfo::Offset);
324
325inline constexpr const size_t NcaFsHeader::HashData::HierarchicalSha256Data::MasterHashOffset =
326 offsetof(NcaFsHeader, hash_data.hierarchical_sha256_data.fs_data_master_hash);
327inline constexpr const size_t NcaFsHeader::HashData::IntegrityMetaInfo::MasterHashOffset =
328 offsetof(NcaFsHeader, hash_data.integrity_meta_info.master_hash);
329
330struct NcaMetaDataHashData {
331 s64 layer_info_offset;
332 NcaFsHeader::HashData::IntegrityMetaInfo integrity_meta_info;
333};
334static_assert(sizeof(NcaMetaDataHashData) ==
335 sizeof(NcaFsHeader::HashData::IntegrityMetaInfo) + sizeof(s64));
336static_assert(std::is_trivial_v<NcaMetaDataHashData>);
337
338} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_reader.cpp b/src/core/file_sys/fssystem/fssystem_nca_reader.cpp
new file mode 100644
index 000000000..cd4c49069
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_reader.cpp
@@ -0,0 +1,542 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h"
5#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
6#include "core/file_sys/vfs_offset.h"
7
8namespace FileSys {
9
10namespace {
11
12constexpr inline u32 SdkAddonVersionMin = 0x000B0000;
13constexpr inline size_t Aes128KeySize = 0x10;
14constexpr const std::array<u8, Aes128KeySize> ZeroKey{};
15
16constexpr Result CheckNcaMagic(u32 magic) {
17 // Verify the magic is not a deprecated one.
18 R_UNLESS(magic != NcaHeader::Magic0, ResultUnsupportedSdkVersion);
19 R_UNLESS(magic != NcaHeader::Magic1, ResultUnsupportedSdkVersion);
20 R_UNLESS(magic != NcaHeader::Magic2, ResultUnsupportedSdkVersion);
21
22 // Verify the magic is the current one.
23 R_UNLESS(magic == NcaHeader::Magic3, ResultInvalidNcaSignature);
24
25 R_SUCCEED();
26}
27
28} // namespace
29
30NcaReader::NcaReader()
31 : m_body_storage(), m_header_storage(), m_is_software_aes_prioritized(false),
32 m_is_available_sw_key(false), m_header_encryption_type(NcaHeader::EncryptionType::Auto),
33 m_get_decompressor() {
34 std::memset(std::addressof(m_header), 0, sizeof(m_header));
35 std::memset(std::addressof(m_decryption_keys), 0, sizeof(m_decryption_keys));
36 std::memset(std::addressof(m_external_decryption_key), 0, sizeof(m_external_decryption_key));
37}
38
39NcaReader::~NcaReader() {}
40
41Result NcaReader::Initialize(VirtualFile base_storage, const NcaCryptoConfiguration& crypto_cfg,
42 const NcaCompressionConfiguration& compression_cfg) {
43 // Validate preconditions.
44 ASSERT(base_storage != nullptr);
45 ASSERT(m_body_storage == nullptr);
46
47 // Create the work header storage storage.
48 VirtualFile work_header_storage;
49
50 // We need to be able to generate keys.
51 R_UNLESS(crypto_cfg.generate_key != nullptr, ResultInvalidArgument);
52
53 // Generate keys for header.
54 using AesXtsStorageForNcaHeader = AesXtsStorage;
55
56 constexpr const s32 HeaderKeyTypeValues[NcaCryptoConfiguration::HeaderEncryptionKeyCount] = {
57 static_cast<s32>(KeyType::NcaHeaderKey1),
58 static_cast<s32>(KeyType::NcaHeaderKey2),
59 };
60
61 u8 header_decryption_keys[NcaCryptoConfiguration::HeaderEncryptionKeyCount]
62 [NcaCryptoConfiguration::Aes128KeySize];
63 for (size_t i = 0; i < NcaCryptoConfiguration::HeaderEncryptionKeyCount; i++) {
64 crypto_cfg.generate_key(header_decryption_keys[i], AesXtsStorageForNcaHeader::KeySize,
65 crypto_cfg.header_encrypted_encryption_keys[i],
66 AesXtsStorageForNcaHeader::KeySize, HeaderKeyTypeValues[i]);
67 }
68
69 // Create the header storage.
70 const u8 header_iv[AesXtsStorageForNcaHeader::IvSize] = {};
71 work_header_storage = std::make_unique<AesXtsStorageForNcaHeader>(
72 base_storage, header_decryption_keys[0], header_decryption_keys[1],
73 AesXtsStorageForNcaHeader::KeySize, header_iv, AesXtsStorageForNcaHeader::IvSize,
74 NcaHeader::XtsBlockSize);
75
76 // Check that we successfully created the storage.
77 R_UNLESS(work_header_storage != nullptr, ResultAllocationMemoryFailedInNcaReaderA);
78
79 // Read the header.
80 work_header_storage->ReadObject(std::addressof(m_header), 0);
81
82 // Validate the magic.
83 if (const Result magic_result = CheckNcaMagic(m_header.magic); R_FAILED(magic_result)) {
84 // Try to use a plaintext header.
85 base_storage->ReadObject(std::addressof(m_header), 0);
86 R_UNLESS(R_SUCCEEDED(CheckNcaMagic(m_header.magic)), magic_result);
87
88 // Configure to use the plaintext header.
89 auto base_storage_size = base_storage->GetSize();
90 work_header_storage = std::make_shared<OffsetVfsFile>(base_storage, base_storage_size, 0);
91 R_UNLESS(work_header_storage != nullptr, ResultAllocationMemoryFailedInNcaReaderA);
92
93 // Set encryption type as plaintext.
94 m_header_encryption_type = NcaHeader::EncryptionType::None;
95 }
96
97 // Validate the fixed key signature.
98 if (m_header.header1_signature_key_generation >
99 NcaCryptoConfiguration::Header1SignatureKeyGenerationMax) {
100 LOG_CRITICAL(Frontend,
101 "NcaCryptoConfiguration::Header1SignatureKeyGenerationMax = {}, "
102 "m_header.header1_signature_key_generation = {}",
103 NcaCryptoConfiguration::Header1SignatureKeyGenerationMax,
104 m_header.header1_signature_key_generation);
105 }
106
107 R_UNLESS(m_header.header1_signature_key_generation <=
108 NcaCryptoConfiguration::Header1SignatureKeyGenerationMax,
109 ResultInvalidNcaHeader1SignatureKeyGeneration);
110
111 // Verify the header sign1.
112 if (crypto_cfg.verify_sign1 != nullptr) {
113 const u8* sig = m_header.header_sign_1.data();
114 const size_t sig_size = NcaHeader::HeaderSignSize;
115 const u8* msg =
116 static_cast<const u8*>(static_cast<const void*>(std::addressof(m_header.magic)));
117 const size_t msg_size =
118 NcaHeader::Size - NcaHeader::HeaderSignSize * NcaHeader::HeaderSignCount;
119
120 m_is_header_sign1_signature_valid = crypto_cfg.verify_sign1(
121 sig, sig_size, msg, msg_size, m_header.header1_signature_key_generation);
122
123 if (!m_is_header_sign1_signature_valid) {
124 LOG_WARNING(Common_Filesystem, "Invalid NCA header sign1");
125 }
126 }
127
128 // Validate the sdk version.
129 R_UNLESS(m_header.sdk_addon_version >= SdkAddonVersionMin, ResultUnsupportedSdkVersion);
130
131 // Validate the key index.
132 R_UNLESS(m_header.key_index < NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount ||
133 m_header.key_index == NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexZeroKey,
134 ResultInvalidNcaKeyIndex);
135
136 // Check if we have a rights id.
137 constexpr const std::array<u8, NcaHeader::RightsIdSize> ZeroRightsId{};
138 if (std::memcmp(ZeroRightsId.data(), m_header.rights_id.data(), NcaHeader::RightsIdSize) == 0) {
139 // If we don't, then we don't have an external key, so we need to generate decryption keys.
140 crypto_cfg.generate_key(
141 m_decryption_keys[NcaHeader::DecryptionKey_AesCtr], Aes128KeySize,
142 m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesCtr * Aes128KeySize,
143 Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
144 crypto_cfg.generate_key(
145 m_decryption_keys[NcaHeader::DecryptionKey_AesXts1], Aes128KeySize,
146 m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesXts1 * Aes128KeySize,
147 Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
148 crypto_cfg.generate_key(
149 m_decryption_keys[NcaHeader::DecryptionKey_AesXts2], Aes128KeySize,
150 m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesXts2 * Aes128KeySize,
151 Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
152 crypto_cfg.generate_key(
153 m_decryption_keys[NcaHeader::DecryptionKey_AesCtrEx], Aes128KeySize,
154 m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesCtrEx * Aes128KeySize,
155 Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
156
157 // Copy the hardware speed emulation key.
158 std::memcpy(m_decryption_keys[NcaHeader::DecryptionKey_AesCtrHw],
159 m_header.encrypted_key_area.data() +
160 NcaHeader::DecryptionKey_AesCtrHw * Aes128KeySize,
161 Aes128KeySize);
162 }
163
164 // Clear the external decryption key.
165 std::memset(m_external_decryption_key, 0, sizeof(m_external_decryption_key));
166
167 // Set software key availability.
168 m_is_available_sw_key = crypto_cfg.is_available_sw_key;
169
170 // Set our decompressor function getter.
171 m_get_decompressor = compression_cfg.get_decompressor;
172
173 // Set our storages.
174 m_header_storage = std::move(work_header_storage);
175 m_body_storage = std::move(base_storage);
176
177 R_SUCCEED();
178}
179
180VirtualFile NcaReader::GetSharedBodyStorage() {
181 ASSERT(m_body_storage != nullptr);
182 return m_body_storage;
183}
184
185u32 NcaReader::GetMagic() const {
186 ASSERT(m_body_storage != nullptr);
187 return m_header.magic;
188}
189
190NcaHeader::DistributionType NcaReader::GetDistributionType() const {
191 ASSERT(m_body_storage != nullptr);
192 return m_header.distribution_type;
193}
194
195NcaHeader::ContentType NcaReader::GetContentType() const {
196 ASSERT(m_body_storage != nullptr);
197 return m_header.content_type;
198}
199
200u8 NcaReader::GetHeaderSign1KeyGeneration() const {
201 ASSERT(m_body_storage != nullptr);
202 return m_header.header1_signature_key_generation;
203}
204
205u8 NcaReader::GetKeyGeneration() const {
206 ASSERT(m_body_storage != nullptr);
207 return m_header.GetProperKeyGeneration();
208}
209
210u8 NcaReader::GetKeyIndex() const {
211 ASSERT(m_body_storage != nullptr);
212 return m_header.key_index;
213}
214
215u64 NcaReader::GetContentSize() const {
216 ASSERT(m_body_storage != nullptr);
217 return m_header.content_size;
218}
219
220u64 NcaReader::GetProgramId() const {
221 ASSERT(m_body_storage != nullptr);
222 return m_header.program_id;
223}
224
225u32 NcaReader::GetContentIndex() const {
226 ASSERT(m_body_storage != nullptr);
227 return m_header.content_index;
228}
229
230u32 NcaReader::GetSdkAddonVersion() const {
231 ASSERT(m_body_storage != nullptr);
232 return m_header.sdk_addon_version;
233}
234
235void NcaReader::GetRightsId(u8* dst, size_t dst_size) const {
236 ASSERT(dst != nullptr);
237 ASSERT(dst_size >= NcaHeader::RightsIdSize);
238
239 std::memcpy(dst, m_header.rights_id.data(), NcaHeader::RightsIdSize);
240}
241
242bool NcaReader::HasFsInfo(s32 index) const {
243 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
244 return m_header.fs_info[index].start_sector != 0 || m_header.fs_info[index].end_sector != 0;
245}
246
247s32 NcaReader::GetFsCount() const {
248 ASSERT(m_body_storage != nullptr);
249 for (s32 i = 0; i < NcaHeader::FsCountMax; i++) {
250 if (!this->HasFsInfo(i)) {
251 return i;
252 }
253 }
254 return NcaHeader::FsCountMax;
255}
256
257const Hash& NcaReader::GetFsHeaderHash(s32 index) const {
258 ASSERT(m_body_storage != nullptr);
259 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
260 return m_header.fs_header_hash[index];
261}
262
263void NcaReader::GetFsHeaderHash(Hash* dst, s32 index) const {
264 ASSERT(m_body_storage != nullptr);
265 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
266 ASSERT(dst != nullptr);
267 std::memcpy(dst, std::addressof(m_header.fs_header_hash[index]), sizeof(*dst));
268}
269
270void NcaReader::GetFsInfo(NcaHeader::FsInfo* dst, s32 index) const {
271 ASSERT(m_body_storage != nullptr);
272 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
273 ASSERT(dst != nullptr);
274 std::memcpy(dst, std::addressof(m_header.fs_info[index]), sizeof(*dst));
275}
276
277u64 NcaReader::GetFsOffset(s32 index) const {
278 ASSERT(m_body_storage != nullptr);
279 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
280 return NcaHeader::SectorToByte(m_header.fs_info[index].start_sector);
281}
282
283u64 NcaReader::GetFsEndOffset(s32 index) const {
284 ASSERT(m_body_storage != nullptr);
285 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
286 return NcaHeader::SectorToByte(m_header.fs_info[index].end_sector);
287}
288
289u64 NcaReader::GetFsSize(s32 index) const {
290 ASSERT(m_body_storage != nullptr);
291 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
292 return NcaHeader::SectorToByte(m_header.fs_info[index].end_sector -
293 m_header.fs_info[index].start_sector);
294}
295
296void NcaReader::GetEncryptedKey(void* dst, size_t size) const {
297 ASSERT(m_body_storage != nullptr);
298 ASSERT(dst != nullptr);
299 ASSERT(size >= NcaHeader::EncryptedKeyAreaSize);
300
301 std::memcpy(dst, m_header.encrypted_key_area.data(), NcaHeader::EncryptedKeyAreaSize);
302}
303
304const void* NcaReader::GetDecryptionKey(s32 index) const {
305 ASSERT(m_body_storage != nullptr);
306 ASSERT(0 <= index && index < NcaHeader::DecryptionKey_Count);
307 return m_decryption_keys[index];
308}
309
310bool NcaReader::HasValidInternalKey() const {
311 for (s32 i = 0; i < NcaHeader::DecryptionKey_Count; i++) {
312 if (std::memcmp(ZeroKey.data(), m_header.encrypted_key_area.data() + i * Aes128KeySize,
313 Aes128KeySize) != 0) {
314 return true;
315 }
316 }
317 return false;
318}
319
320bool NcaReader::HasInternalDecryptionKeyForAesHw() const {
321 return std::memcmp(ZeroKey.data(), this->GetDecryptionKey(NcaHeader::DecryptionKey_AesCtrHw),
322 Aes128KeySize) != 0;
323}
324
325bool NcaReader::IsSoftwareAesPrioritized() const {
326 return m_is_software_aes_prioritized;
327}
328
329void NcaReader::PrioritizeSoftwareAes() {
330 m_is_software_aes_prioritized = true;
331}
332
333bool NcaReader::IsAvailableSwKey() const {
334 return m_is_available_sw_key;
335}
336
337bool NcaReader::HasExternalDecryptionKey() const {
338 return std::memcmp(ZeroKey.data(), this->GetExternalDecryptionKey(), Aes128KeySize) != 0;
339}
340
341const void* NcaReader::GetExternalDecryptionKey() const {
342 return m_external_decryption_key;
343}
344
345void NcaReader::SetExternalDecryptionKey(const void* src, size_t size) {
346 ASSERT(src != nullptr);
347 ASSERT(size == sizeof(m_external_decryption_key));
348
349 std::memcpy(m_external_decryption_key, src, sizeof(m_external_decryption_key));
350}
351
352void NcaReader::GetRawData(void* dst, size_t dst_size) const {
353 ASSERT(m_body_storage != nullptr);
354 ASSERT(dst != nullptr);
355 ASSERT(dst_size >= sizeof(NcaHeader));
356
357 std::memcpy(dst, std::addressof(m_header), sizeof(NcaHeader));
358}
359
360GetDecompressorFunction NcaReader::GetDecompressor() const {
361 ASSERT(m_get_decompressor != nullptr);
362 return m_get_decompressor;
363}
364
365NcaHeader::EncryptionType NcaReader::GetEncryptionType() const {
366 return m_header_encryption_type;
367}
368
369Result NcaReader::ReadHeader(NcaFsHeader* dst, s32 index) const {
370 ASSERT(dst != nullptr);
371 ASSERT(0 <= index && index < NcaHeader::FsCountMax);
372
373 const s64 offset = sizeof(NcaHeader) + sizeof(NcaFsHeader) * index;
374 m_header_storage->ReadObject(dst, offset);
375
376 R_SUCCEED();
377}
378
379bool NcaReader::GetHeaderSign1Valid() const {
380 return m_is_header_sign1_signature_valid;
381}
382
383void NcaReader::GetHeaderSign2(void* dst, size_t size) const {
384 ASSERT(dst != nullptr);
385 ASSERT(size == NcaHeader::HeaderSignSize);
386
387 std::memcpy(dst, m_header.header_sign_2.data(), size);
388}
389
390Result NcaFsHeaderReader::Initialize(const NcaReader& reader, s32 index) {
391 // Reset ourselves to uninitialized.
392 m_fs_index = -1;
393
394 // Read the header.
395 R_TRY(reader.ReadHeader(std::addressof(m_data), index));
396
397 // Set our index.
398 m_fs_index = index;
399 R_SUCCEED();
400}
401
402void NcaFsHeaderReader::GetRawData(void* dst, size_t dst_size) const {
403 ASSERT(this->IsInitialized());
404 ASSERT(dst != nullptr);
405 ASSERT(dst_size >= sizeof(NcaFsHeader));
406
407 std::memcpy(dst, std::addressof(m_data), sizeof(NcaFsHeader));
408}
409
410NcaFsHeader::HashData& NcaFsHeaderReader::GetHashData() {
411 ASSERT(this->IsInitialized());
412 return m_data.hash_data;
413}
414
415const NcaFsHeader::HashData& NcaFsHeaderReader::GetHashData() const {
416 ASSERT(this->IsInitialized());
417 return m_data.hash_data;
418}
419
420u16 NcaFsHeaderReader::GetVersion() const {
421 ASSERT(this->IsInitialized());
422 return m_data.version;
423}
424
425s32 NcaFsHeaderReader::GetFsIndex() const {
426 ASSERT(this->IsInitialized());
427 return m_fs_index;
428}
429
430NcaFsHeader::FsType NcaFsHeaderReader::GetFsType() const {
431 ASSERT(this->IsInitialized());
432 return m_data.fs_type;
433}
434
435NcaFsHeader::HashType NcaFsHeaderReader::GetHashType() const {
436 ASSERT(this->IsInitialized());
437 return m_data.hash_type;
438}
439
440NcaFsHeader::EncryptionType NcaFsHeaderReader::GetEncryptionType() const {
441 ASSERT(this->IsInitialized());
442 return m_data.encryption_type;
443}
444
445NcaPatchInfo& NcaFsHeaderReader::GetPatchInfo() {
446 ASSERT(this->IsInitialized());
447 return m_data.patch_info;
448}
449
450const NcaPatchInfo& NcaFsHeaderReader::GetPatchInfo() const {
451 ASSERT(this->IsInitialized());
452 return m_data.patch_info;
453}
454
455const NcaAesCtrUpperIv NcaFsHeaderReader::GetAesCtrUpperIv() const {
456 ASSERT(this->IsInitialized());
457 return m_data.aes_ctr_upper_iv;
458}
459
460bool NcaFsHeaderReader::IsSkipLayerHashEncryption() const {
461 ASSERT(this->IsInitialized());
462 return m_data.IsSkipLayerHashEncryption();
463}
464
465Result NcaFsHeaderReader::GetHashTargetOffset(s64* out) const {
466 ASSERT(out != nullptr);
467 ASSERT(this->IsInitialized());
468
469 R_RETURN(m_data.GetHashTargetOffset(out));
470}
471
472bool NcaFsHeaderReader::ExistsSparseLayer() const {
473 ASSERT(this->IsInitialized());
474 return m_data.sparse_info.generation != 0;
475}
476
477NcaSparseInfo& NcaFsHeaderReader::GetSparseInfo() {
478 ASSERT(this->IsInitialized());
479 return m_data.sparse_info;
480}
481
482const NcaSparseInfo& NcaFsHeaderReader::GetSparseInfo() const {
483 ASSERT(this->IsInitialized());
484 return m_data.sparse_info;
485}
486
487bool NcaFsHeaderReader::ExistsCompressionLayer() const {
488 ASSERT(this->IsInitialized());
489 return m_data.compression_info.bucket.offset != 0 && m_data.compression_info.bucket.size != 0;
490}
491
492NcaCompressionInfo& NcaFsHeaderReader::GetCompressionInfo() {
493 ASSERT(this->IsInitialized());
494 return m_data.compression_info;
495}
496
497const NcaCompressionInfo& NcaFsHeaderReader::GetCompressionInfo() const {
498 ASSERT(this->IsInitialized());
499 return m_data.compression_info;
500}
501
502bool NcaFsHeaderReader::ExistsPatchMetaHashLayer() const {
503 ASSERT(this->IsInitialized());
504 return m_data.meta_data_hash_data_info.size != 0 && this->GetPatchInfo().HasIndirectTable();
505}
506
507NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetPatchMetaDataHashDataInfo() {
508 ASSERT(this->IsInitialized());
509 return m_data.meta_data_hash_data_info;
510}
511
512const NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetPatchMetaDataHashDataInfo() const {
513 ASSERT(this->IsInitialized());
514 return m_data.meta_data_hash_data_info;
515}
516
517NcaFsHeader::MetaDataHashType NcaFsHeaderReader::GetPatchMetaHashType() const {
518 ASSERT(this->IsInitialized());
519 return m_data.meta_data_hash_type;
520}
521
522bool NcaFsHeaderReader::ExistsSparseMetaHashLayer() const {
523 ASSERT(this->IsInitialized());
524 return m_data.meta_data_hash_data_info.size != 0 && this->ExistsSparseLayer();
525}
526
527NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetSparseMetaDataHashDataInfo() {
528 ASSERT(this->IsInitialized());
529 return m_data.meta_data_hash_data_info;
530}
531
532const NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetSparseMetaDataHashDataInfo() const {
533 ASSERT(this->IsInitialized());
534 return m_data.meta_data_hash_data_info;
535}
536
537NcaFsHeader::MetaDataHashType NcaFsHeaderReader::GetSparseMetaHashType() const {
538 ASSERT(this->IsInitialized());
539 return m_data.meta_data_hash_type;
540}
541
542} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp b/src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp
new file mode 100644
index 000000000..bbfaab255
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp
@@ -0,0 +1,61 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/alignment.h"
5#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
6
7namespace FileSys {
8
9namespace {
10
11constexpr size_t HeapBlockSize = BufferPoolAlignment;
12static_assert(HeapBlockSize == 4_KiB);
13
14// A heap block is 4KiB. An order is a power of two.
15// This gives blocks of the order 32KiB, 512KiB, 4MiB.
16constexpr s32 HeapOrderMax = 7;
17constexpr s32 HeapOrderMaxForLarge = HeapOrderMax + 3;
18
19constexpr size_t HeapAllocatableSizeMax = HeapBlockSize * (static_cast<size_t>(1) << HeapOrderMax);
20constexpr size_t HeapAllocatableSizeMaxForLarge =
21 HeapBlockSize * (static_cast<size_t>(1) << HeapOrderMaxForLarge);
22
23} // namespace
24
25size_t PooledBuffer::GetAllocatableSizeMaxCore(bool large) {
26 return large ? HeapAllocatableSizeMaxForLarge : HeapAllocatableSizeMax;
27}
28
29void PooledBuffer::AllocateCore(size_t ideal_size, size_t required_size, bool large) {
30 // Ensure preconditions.
31 ASSERT(m_buffer == nullptr);
32
33 // Check that we can allocate this size.
34 ASSERT(required_size <= GetAllocatableSizeMaxCore(large));
35
36 const size_t target_size =
37 std::min(std::max(ideal_size, required_size), GetAllocatableSizeMaxCore(large));
38
39 // Dummy implementation for allocate.
40 if (target_size > 0) {
41 m_buffer =
42 reinterpret_cast<char*>(::operator new(target_size, std::align_val_t{HeapBlockSize}));
43 m_size = target_size;
44
45 // Ensure postconditions.
46 ASSERT(m_buffer != nullptr);
47 }
48}
49
50void PooledBuffer::Shrink(size_t ideal_size) {
51 ASSERT(ideal_size <= GetAllocatableSizeMaxCore(true));
52
53 // Shrinking to zero means that we have no buffer.
54 if (ideal_size == 0) {
55 ::operator delete(m_buffer, std::align_val_t{HeapBlockSize});
56 m_buffer = nullptr;
57 m_size = ideal_size;
58 }
59}
60
61} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_pooled_buffer.h b/src/core/file_sys/fssystem/fssystem_pooled_buffer.h
new file mode 100644
index 000000000..1df3153a1
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_pooled_buffer.h
@@ -0,0 +1,96 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/common_funcs.h"
7#include "common/common_types.h"
8#include "common/literals.h"
9#include "core/hle/result.h"
10
11namespace FileSys {
12
13using namespace Common::Literals;
14
15constexpr inline size_t BufferPoolAlignment = 4_KiB;
16constexpr inline size_t BufferPoolWorkSize = 320;
17
18class PooledBuffer {
19 YUZU_NON_COPYABLE(PooledBuffer);
20
21private:
22 char* m_buffer;
23 size_t m_size;
24
25private:
26 static size_t GetAllocatableSizeMaxCore(bool large);
27
28public:
29 static size_t GetAllocatableSizeMax() {
30 return GetAllocatableSizeMaxCore(false);
31 }
32 static size_t GetAllocatableParticularlyLargeSizeMax() {
33 return GetAllocatableSizeMaxCore(true);
34 }
35
36private:
37 void Swap(PooledBuffer& rhs) {
38 std::swap(m_buffer, rhs.m_buffer);
39 std::swap(m_size, rhs.m_size);
40 }
41
42public:
43 // Constructor/Destructor.
44 constexpr PooledBuffer() : m_buffer(), m_size() {}
45
46 PooledBuffer(size_t ideal_size, size_t required_size) : m_buffer(), m_size() {
47 this->Allocate(ideal_size, required_size);
48 }
49
50 ~PooledBuffer() {
51 this->Deallocate();
52 }
53
54 // Move and assignment.
55 explicit PooledBuffer(PooledBuffer&& rhs) : m_buffer(rhs.m_buffer), m_size(rhs.m_size) {
56 rhs.m_buffer = nullptr;
57 rhs.m_size = 0;
58 }
59
60 PooledBuffer& operator=(PooledBuffer&& rhs) {
61 PooledBuffer(std::move(rhs)).Swap(*this);
62 return *this;
63 }
64
65 // Allocation API.
66 void Allocate(size_t ideal_size, size_t required_size) {
67 return this->AllocateCore(ideal_size, required_size, false);
68 }
69
70 void AllocateParticularlyLarge(size_t ideal_size, size_t required_size) {
71 return this->AllocateCore(ideal_size, required_size, true);
72 }
73
74 void Shrink(size_t ideal_size);
75
76 void Deallocate() {
77 // Shrink the buffer to empty.
78 this->Shrink(0);
79 ASSERT(m_buffer == nullptr);
80 }
81
82 char* GetBuffer() const {
83 ASSERT(m_buffer != nullptr);
84 return m_buffer;
85 }
86
87 size_t GetSize() const {
88 ASSERT(m_buffer != nullptr);
89 return m_size;
90 }
91
92private:
93 void AllocateCore(size_t ideal_size, size_t required_size, bool large);
94};
95
96} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_sparse_storage.cpp b/src/core/file_sys/fssystem/fssystem_sparse_storage.cpp
new file mode 100644
index 000000000..05e8820f7
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_sparse_storage.cpp
@@ -0,0 +1,40 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/file_sys/fssystem/fssystem_sparse_storage.h"
5
6namespace FileSys {
7
8size_t SparseStorage::Read(u8* buffer, size_t size, size_t offset) const {
9 // Validate preconditions.
10 ASSERT(offset >= 0);
11 ASSERT(this->IsInitialized());
12 ASSERT(buffer != nullptr);
13
14 // Allow zero size.
15 if (size == 0) {
16 return size;
17 }
18
19 SparseStorage* self = const_cast<SparseStorage*>(this);
20
21 if (self->GetEntryTable().IsEmpty()) {
22 BucketTree::Offsets table_offsets;
23 ASSERT(R_SUCCEEDED(self->GetEntryTable().GetOffsets(std::addressof(table_offsets))));
24 ASSERT(table_offsets.IsInclude(offset, size));
25
26 std::memset(buffer, 0, size);
27 } else {
28 self->OperatePerEntry<false, true>(
29 offset, size,
30 [=](VirtualFile storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
31 storage->Read(reinterpret_cast<u8*>(buffer) + (cur_offset - offset),
32 static_cast<size_t>(cur_size), data_offset);
33 R_SUCCEED();
34 });
35 }
36
37 return size;
38}
39
40} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_sparse_storage.h b/src/core/file_sys/fssystem/fssystem_sparse_storage.h
new file mode 100644
index 000000000..c1ade7195
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_sparse_storage.h
@@ -0,0 +1,73 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fssystem_indirect_storage.h"
7
8namespace FileSys {
9
10class SparseStorage : public IndirectStorage {
11 YUZU_NON_COPYABLE(SparseStorage);
12 YUZU_NON_MOVEABLE(SparseStorage);
13
14private:
15 class ZeroStorage : public IReadOnlyStorage {
16 public:
17 ZeroStorage() {}
18 virtual ~ZeroStorage() {}
19
20 virtual size_t GetSize() const override {
21 return std::numeric_limits<size_t>::max();
22 }
23
24 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
25 ASSERT(offset >= 0);
26 ASSERT(buffer != nullptr || size == 0);
27
28 if (size > 0) {
29 std::memset(buffer, 0, size);
30 }
31
32 return size;
33 }
34 };
35
36private:
37 VirtualFile m_zero_storage;
38
39public:
40 SparseStorage() : IndirectStorage(), m_zero_storage(std::make_shared<ZeroStorage>()) {}
41 virtual ~SparseStorage() {}
42
43 using IndirectStorage::Initialize;
44
45 void Initialize(s64 end_offset) {
46 this->GetEntryTable().Initialize(NodeSize, end_offset);
47 this->SetZeroStorage();
48 }
49
50 void SetDataStorage(VirtualFile storage) {
51 ASSERT(this->IsInitialized());
52
53 this->SetStorage(0, storage);
54 this->SetZeroStorage();
55 }
56
57 template <typename T>
58 void SetDataStorage(T storage, s64 offset, s64 size) {
59 ASSERT(this->IsInitialized());
60
61 this->SetStorage(0, storage, offset, size);
62 this->SetZeroStorage();
63 }
64
65 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
66
67private:
68 void SetZeroStorage() {
69 return this->SetStorage(1, m_zero_storage, 0, std::numeric_limits<s64>::max());
70 }
71};
72
73} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_switch_storage.h b/src/core/file_sys/fssystem/fssystem_switch_storage.h
new file mode 100644
index 000000000..140f21ab7
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_switch_storage.h
@@ -0,0 +1,80 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/file_sys/fssystem/fs_i_storage.h"
7
8namespace FileSys {
9
10class RegionSwitchStorage : public IReadOnlyStorage {
11 YUZU_NON_COPYABLE(RegionSwitchStorage);
12 YUZU_NON_MOVEABLE(RegionSwitchStorage);
13
14public:
15 struct Region {
16 s64 offset;
17 s64 size;
18 };
19
20private:
21 VirtualFile m_inside_region_storage;
22 VirtualFile m_outside_region_storage;
23 Region m_region;
24
25public:
26 RegionSwitchStorage(VirtualFile&& i, VirtualFile&& o, Region r)
27 : m_inside_region_storage(std::move(i)), m_outside_region_storage(std::move(o)),
28 m_region(r) {}
29
30 virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
31 // Process until we're done.
32 size_t processed = 0;
33 while (processed < size) {
34 // Process on the appropriate storage.
35 s64 cur_size = 0;
36 if (this->CheckRegions(std::addressof(cur_size), offset + processed,
37 size - processed)) {
38 m_inside_region_storage->Read(buffer + processed, cur_size, offset + processed);
39 } else {
40 m_outside_region_storage->Read(buffer + processed, cur_size, offset + processed);
41 }
42
43 // Advance.
44 processed += cur_size;
45 }
46
47 return size;
48 }
49
50 virtual size_t GetSize() const override {
51 return m_inside_region_storage->GetSize();
52 }
53
54private:
55 bool CheckRegions(s64* out_current_size, s64 offset, s64 size) const {
56 // Check if our region contains the access.
57 if (m_region.offset <= offset) {
58 if (offset < m_region.offset + m_region.size) {
59 if (m_region.offset + m_region.size <= offset + size) {
60 *out_current_size = m_region.offset + m_region.size - offset;
61 } else {
62 *out_current_size = size;
63 }
64 return true;
65 } else {
66 *out_current_size = size;
67 return false;
68 }
69 } else {
70 if (m_region.offset <= offset + size) {
71 *out_current_size = m_region.offset - offset;
72 } else {
73 *out_current_size = size;
74 }
75 return false;
76 }
77 }
78};
79
80} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_utility.cpp b/src/core/file_sys/fssystem/fssystem_utility.cpp
new file mode 100644
index 000000000..4dddfd75a
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_utility.cpp
@@ -0,0 +1,24 @@
1#include "core/file_sys/fssystem/fssystem_utility.h"
2
3namespace FileSys {
4
5void AddCounter(void* counter_, size_t counter_size, u64 value) {
6 u8* counter = static_cast<u8*>(counter_);
7 u64 remaining = value;
8 u8 carry = 0;
9
10 for (size_t i = 0; i < counter_size; i++) {
11 auto sum = counter[counter_size - 1 - i] + (remaining & 0xFF) + carry;
12 carry = static_cast<u8>(sum >> (sizeof(u8) * 8));
13 auto sum8 = static_cast<u8>(sum & 0xFF);
14
15 counter[counter_size - 1 - i] = sum8;
16
17 remaining >>= (sizeof(u8) * 8);
18 if (carry == 0 && remaining == 0) {
19 break;
20 }
21 }
22}
23
24} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_utility.h b/src/core/file_sys/fssystem/fssystem_utility.h
new file mode 100644
index 000000000..284b8b811
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_utility.h
@@ -0,0 +1,12 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/common_funcs.h"
7
8namespace FileSys {
9
10void AddCounter(void* counter, size_t counter_size, u64 value);
11
12}