summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar Lioncash2019-11-26 13:58:03 -0500
committerGravatar Lioncash2019-11-26 21:53:35 -0500
commitfc7d0a17b6ec7dfc44a56f3e4a8bd97108f1c596 (patch)
treedd83c03104a9f3ceb712bebd47397eafac16354c /src
parentcore/memory: Migrate over address checking functions to the new Memory class (diff)
downloadyuzu-fc7d0a17b6ec7dfc44a56f3e4a8bd97108f1c596.tar.gz
yuzu-fc7d0a17b6ec7dfc44a56f3e4a8bd97108f1c596.tar.xz
yuzu-fc7d0a17b6ec7dfc44a56f3e4a8bd97108f1c596.zip
core/memory: Move memory read/write implementation functions into an anonymous namespace
These will eventually be migrated into the main Memory class, but for now, we put them in an anonymous namespace, so that the other functions that use them, can be migrated over separately.
Diffstat (limited to 'src')
-rw-r--r--src/core/memory.cpp195
1 files changed, 98 insertions, 97 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 4c13ea1e7..017033613 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -20,8 +20,105 @@
20#include "video_core/gpu.h" 20#include "video_core/gpu.h"
21 21
22namespace Memory { 22namespace Memory {
23namespace {
24Common::PageTable* current_page_table = nullptr;
23 25
24static Common::PageTable* current_page_table = nullptr; 26/**
27 * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned)
28 * using a VMA from the current process
29 */
30u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) {
31 const auto& vm_manager = process.VMManager();
32
33 const auto it = vm_manager.FindVMA(vaddr);
34 DEBUG_ASSERT(vm_manager.IsValidHandle(it));
35
36 u8* direct_pointer = nullptr;
37 const auto& vma = it->second;
38 switch (vma.type) {
39 case Kernel::VMAType::AllocatedMemoryBlock:
40 direct_pointer = vma.backing_block->data() + vma.offset;
41 break;
42 case Kernel::VMAType::BackingMemory:
43 direct_pointer = vma.backing_memory;
44 break;
45 case Kernel::VMAType::Free:
46 return nullptr;
47 default:
48 UNREACHABLE();
49 }
50
51 return direct_pointer + (vaddr - vma.base);
52}
53
54/**
55 * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned)
56 * using a VMA from the current process.
57 */
58u8* GetPointerFromVMA(VAddr vaddr) {
59 return ::Memory::GetPointerFromVMA(*Core::System::GetInstance().CurrentProcess(), vaddr);
60}
61
62template <typename T>
63T Read(const VAddr vaddr) {
64 const u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
65 if (page_pointer != nullptr) {
66 // NOTE: Avoid adding any extra logic to this fast-path block
67 T value;
68 std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T));
69 return value;
70 }
71
72 const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
73 switch (type) {
74 case Common::PageType::Unmapped:
75 LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
76 return 0;
77 case Common::PageType::Memory:
78 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
79 break;
80 case Common::PageType::RasterizerCachedMemory: {
81 const u8* const host_ptr{GetPointerFromVMA(vaddr)};
82 Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T));
83 T value;
84 std::memcpy(&value, host_ptr, sizeof(T));
85 return value;
86 }
87 default:
88 UNREACHABLE();
89 }
90 return {};
91}
92
93template <typename T>
94void Write(const VAddr vaddr, const T data) {
95 u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
96 if (page_pointer != nullptr) {
97 // NOTE: Avoid adding any extra logic to this fast-path block
98 std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T));
99 return;
100 }
101
102 Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
103 switch (type) {
104 case Common::PageType::Unmapped:
105 LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
106 static_cast<u32>(data), vaddr);
107 return;
108 case Common::PageType::Memory:
109 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
110 break;
111 case Common::PageType::RasterizerCachedMemory: {
112 u8* const host_ptr{GetPointerFromVMA(vaddr)};
113 Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T));
114 std::memcpy(host_ptr, &data, sizeof(T));
115 break;
116 }
117 default:
118 UNREACHABLE();
119 }
120}
121} // Anonymous namespace
25 122
26// Implementation class used to keep the specifics of the memory subsystem hidden 123// Implementation class used to keep the specifics of the memory subsystem hidden
27// from outside classes. This also allows modification to the internals of the memory 124// from outside classes. This also allows modification to the internals of the memory
@@ -191,102 +288,6 @@ void SetCurrentPageTable(Kernel::Process& process) {
191 system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); 288 system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width);
192} 289}
193 290
194/**
195 * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned)
196 * using a VMA from the current process
197 */
198static u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) {
199 const auto& vm_manager = process.VMManager();
200
201 const auto it = vm_manager.FindVMA(vaddr);
202 DEBUG_ASSERT(vm_manager.IsValidHandle(it));
203
204 u8* direct_pointer = nullptr;
205 const auto& vma = it->second;
206 switch (vma.type) {
207 case Kernel::VMAType::AllocatedMemoryBlock:
208 direct_pointer = vma.backing_block->data() + vma.offset;
209 break;
210 case Kernel::VMAType::BackingMemory:
211 direct_pointer = vma.backing_memory;
212 break;
213 case Kernel::VMAType::Free:
214 return nullptr;
215 default:
216 UNREACHABLE();
217 }
218
219 return direct_pointer + (vaddr - vma.base);
220}
221
222/**
223 * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned)
224 * using a VMA from the current process.
225 */
226static u8* GetPointerFromVMA(VAddr vaddr) {
227 return GetPointerFromVMA(*Core::System::GetInstance().CurrentProcess(), vaddr);
228}
229
230template <typename T>
231T Read(const VAddr vaddr) {
232 const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
233 if (page_pointer) {
234 // NOTE: Avoid adding any extra logic to this fast-path block
235 T value;
236 std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T));
237 return value;
238 }
239
240 Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
241 switch (type) {
242 case Common::PageType::Unmapped:
243 LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
244 return 0;
245 case Common::PageType::Memory:
246 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
247 break;
248 case Common::PageType::RasterizerCachedMemory: {
249 auto host_ptr{GetPointerFromVMA(vaddr)};
250 Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T));
251 T value;
252 std::memcpy(&value, host_ptr, sizeof(T));
253 return value;
254 }
255 default:
256 UNREACHABLE();
257 }
258 return {};
259}
260
261template <typename T>
262void Write(const VAddr vaddr, const T data) {
263 u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
264 if (page_pointer) {
265 // NOTE: Avoid adding any extra logic to this fast-path block
266 std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T));
267 return;
268 }
269
270 Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
271 switch (type) {
272 case Common::PageType::Unmapped:
273 LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
274 static_cast<u32>(data), vaddr);
275 return;
276 case Common::PageType::Memory:
277 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
278 break;
279 case Common::PageType::RasterizerCachedMemory: {
280 auto host_ptr{GetPointerFromVMA(vaddr)};
281 Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T));
282 std::memcpy(host_ptr, &data, sizeof(T));
283 break;
284 }
285 default:
286 UNREACHABLE();
287 }
288}
289
290bool IsKernelVirtualAddress(const VAddr vaddr) { 291bool IsKernelVirtualAddress(const VAddr vaddr) {
291 return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; 292 return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END;
292} 293}