summaryrefslogtreecommitdiff
path: root/src/core/arm/nce/patcher.cpp
diff options
context:
space:
mode:
authorGravatar Viktor Szépe2024-01-16 00:09:00 +0000
committerGravatar Viktor Szépe2024-01-16 00:09:00 +0000
commit90ab89a0b0174f8df559b79dc06a03479d959f93 (patch)
tree3d11a790e44945e309f0e68f5332b33b42e72bbb /src/core/arm/nce/patcher.cpp
parentFix typos in arrays.xml (diff)
parentMerge pull request #12681 from t895/stick-toggles (diff)
downloadyuzu-90ab89a0b0174f8df559b79dc06a03479d959f93.tar.gz
yuzu-90ab89a0b0174f8df559b79dc06a03479d959f93.tar.xz
yuzu-90ab89a0b0174f8df559b79dc06a03479d959f93.zip
Merge remote-tracking branch 'origin/master' into typos3
Diffstat (limited to 'src/core/arm/nce/patcher.cpp')
-rw-r--r--src/core/arm/nce/patcher.cpp83
1 files changed, 55 insertions, 28 deletions
diff --git a/src/core/arm/nce/patcher.cpp b/src/core/arm/nce/patcher.cpp
index 47a7a8880..c7285e3a0 100644
--- a/src/core/arm/nce/patcher.cpp
+++ b/src/core/arm/nce/patcher.cpp
@@ -22,14 +22,10 @@ using NativeExecutionParameters = Kernel::KThread::NativeExecutionParameters;
22constexpr size_t MaxRelativeBranch = 128_MiB; 22constexpr size_t MaxRelativeBranch = 128_MiB;
23constexpr u32 ModuleCodeIndex = 0x24 / sizeof(u32); 23constexpr u32 ModuleCodeIndex = 0x24 / sizeof(u32);
24 24
25Patcher::Patcher() : c(m_patch_instructions) {} 25Patcher::Patcher() : c(m_patch_instructions) {
26 26 // The first word of the patch section is always a branch to the first instruction of the
27Patcher::~Patcher() = default; 27 // module.
28 28 c.dw(0);
29void Patcher::PatchText(const Kernel::PhysicalMemory& program_image,
30 const Kernel::CodeSet::Segment& code) {
31 // Branch to the first instruction of the module.
32 this->BranchToModule(0);
33 29
34 // Write save context helper function. 30 // Write save context helper function.
35 c.l(m_save_context); 31 c.l(m_save_context);
@@ -38,6 +34,25 @@ void Patcher::PatchText(const Kernel::PhysicalMemory& program_image,
38 // Write load context helper function. 34 // Write load context helper function.
39 c.l(m_load_context); 35 c.l(m_load_context);
40 WriteLoadContext(); 36 WriteLoadContext();
37}
38
39Patcher::~Patcher() = default;
40
41bool Patcher::PatchText(const Kernel::PhysicalMemory& program_image,
42 const Kernel::CodeSet::Segment& code) {
43 // If we have patched modules but cannot reach the new module, then it needs its own patcher.
44 const size_t image_size = program_image.size();
45 if (total_program_size + image_size > MaxRelativeBranch && total_program_size > 0) {
46 return false;
47 }
48
49 // Add a new module patch to our list
50 modules.emplace_back();
51 curr_patch = &modules.back();
52
53 // The first word of the patch section is always a branch to the first instruction of the
54 // module.
55 curr_patch->m_branch_to_module_relocations.push_back({0, 0});
41 56
42 // Retrieve text segment data. 57 // Retrieve text segment data.
43 const auto text = std::span{program_image}.subspan(code.offset, code.size); 58 const auto text = std::span{program_image}.subspan(code.offset, code.size);
@@ -94,16 +109,17 @@ void Patcher::PatchText(const Kernel::PhysicalMemory& program_image,
94 } 109 }
95 110
96 if (auto exclusive = Exclusive{inst}; exclusive.Verify()) { 111 if (auto exclusive = Exclusive{inst}; exclusive.Verify()) {
97 m_exclusives.push_back(i); 112 curr_patch->m_exclusives.push_back(i);
98 } 113 }
99 } 114 }
100 115
101 // Determine patching mode for the final relocation step 116 // Determine patching mode for the final relocation step
102 const size_t image_size = program_image.size(); 117 total_program_size += image_size;
103 this->mode = image_size > MaxRelativeBranch ? PatchMode::PreText : PatchMode::PostData; 118 this->mode = image_size > MaxRelativeBranch ? PatchMode::PreText : PatchMode::PostData;
119 return true;
104} 120}
105 121
106void Patcher::RelocateAndCopy(Common::ProcessAddress load_base, 122bool Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
107 const Kernel::CodeSet::Segment& code, 123 const Kernel::CodeSet::Segment& code,
108 Kernel::PhysicalMemory& program_image, 124 Kernel::PhysicalMemory& program_image,
109 EntryTrampolines* out_trampolines) { 125 EntryTrampolines* out_trampolines) {
@@ -120,7 +136,7 @@ void Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
120 if (mode == PatchMode::PreText) { 136 if (mode == PatchMode::PreText) {
121 rc.B(rel.patch_offset - patch_size - rel.module_offset); 137 rc.B(rel.patch_offset - patch_size - rel.module_offset);
122 } else { 138 } else {
123 rc.B(image_size - rel.module_offset + rel.patch_offset); 139 rc.B(total_program_size - rel.module_offset + rel.patch_offset);
124 } 140 }
125 }; 141 };
126 142
@@ -129,7 +145,7 @@ void Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
129 if (mode == PatchMode::PreText) { 145 if (mode == PatchMode::PreText) {
130 rc.B(patch_size - rel.patch_offset + rel.module_offset); 146 rc.B(patch_size - rel.patch_offset + rel.module_offset);
131 } else { 147 } else {
132 rc.B(rel.module_offset - image_size - rel.patch_offset); 148 rc.B(rel.module_offset - total_program_size - rel.patch_offset);
133 } 149 }
134 }; 150 };
135 151
@@ -137,7 +153,7 @@ void Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
137 if (mode == PatchMode::PreText) { 153 if (mode == PatchMode::PreText) {
138 return GetInteger(load_base) + patch_offset; 154 return GetInteger(load_base) + patch_offset;
139 } else { 155 } else {
140 return GetInteger(load_base) + image_size + patch_offset; 156 return GetInteger(load_base) + total_program_size + patch_offset;
141 } 157 }
142 }; 158 };
143 159
@@ -150,39 +166,50 @@ void Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
150 }; 166 };
151 167
152 // We are now ready to relocate! 168 // We are now ready to relocate!
153 for (const Relocation& rel : m_branch_to_patch_relocations) { 169 auto& patch = modules[m_relocate_module_index++];
170 for (const Relocation& rel : patch.m_branch_to_patch_relocations) {
154 ApplyBranchToPatchRelocation(text_words.data() + rel.module_offset / sizeof(u32), rel); 171 ApplyBranchToPatchRelocation(text_words.data() + rel.module_offset / sizeof(u32), rel);
155 } 172 }
156 for (const Relocation& rel : m_branch_to_module_relocations) { 173 for (const Relocation& rel : patch.m_branch_to_module_relocations) {
157 ApplyBranchToModuleRelocation(m_patch_instructions.data() + rel.patch_offset / sizeof(u32), 174 ApplyBranchToModuleRelocation(m_patch_instructions.data() + rel.patch_offset / sizeof(u32),
158 rel); 175 rel);
159 } 176 }
160 177
161 // Rewrite PC constants and record post trampolines 178 // Rewrite PC constants and record post trampolines
162 for (const Relocation& rel : m_write_module_pc_relocations) { 179 for (const Relocation& rel : patch.m_write_module_pc_relocations) {
163 oaknut::CodeGenerator rc{m_patch_instructions.data() + rel.patch_offset / sizeof(u32)}; 180 oaknut::CodeGenerator rc{m_patch_instructions.data() + rel.patch_offset / sizeof(u32)};
164 rc.dx(RebasePc(rel.module_offset)); 181 rc.dx(RebasePc(rel.module_offset));
165 } 182 }
166 for (const Trampoline& rel : m_trampolines) { 183 for (const Trampoline& rel : patch.m_trampolines) {
167 out_trampolines->insert({RebasePc(rel.module_offset), RebasePatch(rel.patch_offset)}); 184 out_trampolines->insert({RebasePc(rel.module_offset), RebasePatch(rel.patch_offset)});
168 } 185 }
169 186
170 // Cortex-A57 seems to treat all exclusives as ordered, but newer processors do not. 187 // Cortex-A57 seems to treat all exclusives as ordered, but newer processors do not.
171 // Convert to ordered to preserve this assumption. 188 // Convert to ordered to preserve this assumption.
172 for (const ModuleTextAddress i : m_exclusives) { 189 for (const ModuleTextAddress i : patch.m_exclusives) {
173 auto exclusive = Exclusive{text_words[i]}; 190 auto exclusive = Exclusive{text_words[i]};
174 text_words[i] = exclusive.AsOrdered(); 191 text_words[i] = exclusive.AsOrdered();
175 } 192 }
176 193
177 // Copy to program image 194 // Remove the patched module size from the total. This is done so total_program_size
178 if (this->mode == PatchMode::PreText) { 195 // always represents the distance from the currently patched module to the patch section.
179 std::memcpy(program_image.data(), m_patch_instructions.data(), 196 total_program_size -= image_size;
180 m_patch_instructions.size() * sizeof(u32)); 197
181 } else { 198 // Only copy to the program image of the last module
182 program_image.resize(image_size + patch_size); 199 if (m_relocate_module_index == modules.size()) {
183 std::memcpy(program_image.data() + image_size, m_patch_instructions.data(), 200 if (this->mode == PatchMode::PreText) {
184 m_patch_instructions.size() * sizeof(u32)); 201 ASSERT(image_size == total_program_size);
202 std::memcpy(program_image.data(), m_patch_instructions.data(),
203 m_patch_instructions.size() * sizeof(u32));
204 } else {
205 program_image.resize(image_size + patch_size);
206 std::memcpy(program_image.data() + image_size, m_patch_instructions.data(),
207 m_patch_instructions.size() * sizeof(u32));
208 }
209 return true;
185 } 210 }
211
212 return false;
186} 213}
187 214
188size_t Patcher::GetSectionSize() const noexcept { 215size_t Patcher::GetSectionSize() const noexcept {
@@ -322,7 +349,7 @@ void Patcher::WriteSvcTrampoline(ModuleDestLabel module_dest, u32 svc_id) {
322 349
323 // Write the post-SVC trampoline address, which will jump back to the guest after restoring its 350 // Write the post-SVC trampoline address, which will jump back to the guest after restoring its
324 // state. 351 // state.
325 m_trampolines.push_back({c.offset(), module_dest}); 352 curr_patch->m_trampolines.push_back({c.offset(), module_dest});
326 353
327 // Host called this location. Save the return address so we can 354 // Host called this location. Save the return address so we can
328 // unwind the stack properly when jumping back. 355 // unwind the stack properly when jumping back.