diff options
| author | 2021-08-09 17:09:56 -0700 | |
|---|---|---|
| committer | 2021-08-09 17:09:56 -0700 | |
| commit | 7df790f1ae728ddf68fdeec470a13d84bdeb1ef9 (patch) | |
| tree | 29331bdad1f0175e6b139bd6717d163c1f54ddf7 /src/core/memory.cpp | |
| parent | Merge pull request #6839 from ameerj/frame-cap-positon (diff) | |
| parent | memory: Address lioncash's review (diff) | |
| download | yuzu-7df790f1ae728ddf68fdeec470a13d84bdeb1ef9.tar.gz yuzu-7df790f1ae728ddf68fdeec470a13d84bdeb1ef9.tar.xz yuzu-7df790f1ae728ddf68fdeec470a13d84bdeb1ef9.zip | |
Merge pull request #6823 from yzct12345/memory-cleanup
memory: Clean up code
Diffstat (limited to 'src/core/memory.cpp')
| -rw-r--r-- | src/core/memory.cpp | 552 |
1 files changed, 160 insertions, 392 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index f285c6f63..51c4dea26 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -4,8 +4,6 @@ | |||
| 4 | 4 | ||
| 5 | #include <algorithm> | 5 | #include <algorithm> |
| 6 | #include <cstring> | 6 | #include <cstring> |
| 7 | #include <optional> | ||
| 8 | #include <utility> | ||
| 9 | 7 | ||
| 10 | #include "common/assert.h" | 8 | #include "common/assert.h" |
| 11 | #include "common/atomic_ops.h" | 9 | #include "common/atomic_ops.h" |
| @@ -14,12 +12,10 @@ | |||
| 14 | #include "common/page_table.h" | 12 | #include "common/page_table.h" |
| 15 | #include "common/settings.h" | 13 | #include "common/settings.h" |
| 16 | #include "common/swap.h" | 14 | #include "common/swap.h" |
| 17 | #include "core/arm/arm_interface.h" | ||
| 18 | #include "core/core.h" | 15 | #include "core/core.h" |
| 19 | #include "core/device_memory.h" | 16 | #include "core/device_memory.h" |
| 20 | #include "core/hle/kernel/k_page_table.h" | 17 | #include "core/hle/kernel/k_page_table.h" |
| 21 | #include "core/hle/kernel/k_process.h" | 18 | #include "core/hle/kernel/k_process.h" |
| 22 | #include "core/hle/kernel/physical_memory.h" | ||
| 23 | #include "core/memory.h" | 19 | #include "core/memory.h" |
| 24 | #include "video_core/gpu.h" | 20 | #include "video_core/gpu.h" |
| 25 | 21 | ||
| @@ -62,17 +58,7 @@ struct Memory::Impl { | |||
| 62 | } | 58 | } |
| 63 | } | 59 | } |
| 64 | 60 | ||
| 65 | bool IsValidVirtualAddress(const Kernel::KProcess& process, const VAddr vaddr) const { | 61 | [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const { |
| 66 | const auto& page_table = process.PageTable().PageTableImpl(); | ||
| 67 | const auto [pointer, type] = page_table.pointers[vaddr >> PAGE_BITS].PointerType(); | ||
| 68 | return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory; | ||
| 69 | } | ||
| 70 | |||
| 71 | bool IsValidVirtualAddress(VAddr vaddr) const { | ||
| 72 | return IsValidVirtualAddress(*system.CurrentProcess(), vaddr); | ||
| 73 | } | ||
| 74 | |||
| 75 | u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const { | ||
| 76 | const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; | 62 | const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; |
| 77 | 63 | ||
| 78 | if (!paddr) { | 64 | if (!paddr) { |
| @@ -82,18 +68,6 @@ struct Memory::Impl { | |||
| 82 | return system.DeviceMemory().GetPointer(paddr) + vaddr; | 68 | return system.DeviceMemory().GetPointer(paddr) + vaddr; |
| 83 | } | 69 | } |
| 84 | 70 | ||
| 85 | u8* GetPointer(const VAddr vaddr) const { | ||
| 86 | const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); | ||
| 87 | if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { | ||
| 88 | return pointer + vaddr; | ||
| 89 | } | ||
| 90 | const auto type = Common::PageTable::PageInfo::ExtractType(raw_pointer); | ||
| 91 | if (type == Common::PageType::RasterizerCachedMemory) { | ||
| 92 | return GetPointerFromRasterizerCachedMemory(vaddr); | ||
| 93 | } | ||
| 94 | return nullptr; | ||
| 95 | } | ||
| 96 | |||
| 97 | u8 Read8(const VAddr addr) { | 71 | u8 Read8(const VAddr addr) { |
| 98 | return Read<u8>(addr); | 72 | return Read<u8>(addr); |
| 99 | } | 73 | } |
| @@ -179,7 +153,7 @@ struct Memory::Impl { | |||
| 179 | std::string string; | 153 | std::string string; |
| 180 | string.reserve(max_length); | 154 | string.reserve(max_length); |
| 181 | for (std::size_t i = 0; i < max_length; ++i) { | 155 | for (std::size_t i = 0; i < max_length; ++i) { |
| 182 | const char c = Read8(vaddr); | 156 | const char c = Read<s8>(vaddr); |
| 183 | if (c == '\0') { | 157 | if (c == '\0') { |
| 184 | break; | 158 | break; |
| 185 | } | 159 | } |
| @@ -190,15 +164,14 @@ struct Memory::Impl { | |||
| 190 | return string; | 164 | return string; |
| 191 | } | 165 | } |
| 192 | 166 | ||
| 193 | void ReadBlock(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer, | 167 | void WalkBlock(const Kernel::KProcess& process, const VAddr addr, const std::size_t size, |
| 194 | const std::size_t size) { | 168 | auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) { |
| 195 | const auto& page_table = process.PageTable().PageTableImpl(); | 169 | const auto& page_table = process.PageTable().PageTableImpl(); |
| 196 | |||
| 197 | std::size_t remaining_size = size; | 170 | std::size_t remaining_size = size; |
| 198 | std::size_t page_index = src_addr >> PAGE_BITS; | 171 | std::size_t page_index = addr >> PAGE_BITS; |
| 199 | std::size_t page_offset = src_addr & PAGE_MASK; | 172 | std::size_t page_offset = addr & PAGE_MASK; |
| 200 | 173 | ||
| 201 | while (remaining_size > 0) { | 174 | while (remaining_size) { |
| 202 | const std::size_t copy_amount = | 175 | const std::size_t copy_amount = |
| 203 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | 176 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); |
| 204 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 177 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| @@ -206,22 +179,18 @@ struct Memory::Impl { | |||
| 206 | const auto [pointer, type] = page_table.pointers[page_index].PointerType(); | 179 | const auto [pointer, type] = page_table.pointers[page_index].PointerType(); |
| 207 | switch (type) { | 180 | switch (type) { |
| 208 | case Common::PageType::Unmapped: { | 181 | case Common::PageType::Unmapped: { |
| 209 | LOG_ERROR(HW_Memory, | 182 | on_unmapped(copy_amount, current_vaddr); |
| 210 | "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | ||
| 211 | current_vaddr, src_addr, size); | ||
| 212 | std::memset(dest_buffer, 0, copy_amount); | ||
| 213 | break; | 183 | break; |
| 214 | } | 184 | } |
| 215 | case Common::PageType::Memory: { | 185 | case Common::PageType::Memory: { |
| 216 | DEBUG_ASSERT(pointer); | 186 | DEBUG_ASSERT(pointer); |
| 217 | const u8* const src_ptr = pointer + page_offset + (page_index << PAGE_BITS); | 187 | u8* mem_ptr = pointer + page_offset + (page_index << PAGE_BITS); |
| 218 | std::memcpy(dest_buffer, src_ptr, copy_amount); | 188 | on_memory(copy_amount, mem_ptr); |
| 219 | break; | 189 | break; |
| 220 | } | 190 | } |
| 221 | case Common::PageType::RasterizerCachedMemory: { | 191 | case Common::PageType::RasterizerCachedMemory: { |
| 222 | const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; | 192 | u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; |
| 223 | system.GPU().FlushRegion(current_vaddr, copy_amount); | 193 | on_rasterizer(current_vaddr, copy_amount, host_ptr); |
| 224 | std::memcpy(dest_buffer, host_ptr, copy_amount); | ||
| 225 | break; | 194 | break; |
| 226 | } | 195 | } |
| 227 | default: | 196 | default: |
| @@ -230,248 +199,122 @@ struct Memory::Impl { | |||
| 230 | 199 | ||
| 231 | page_index++; | 200 | page_index++; |
| 232 | page_offset = 0; | 201 | page_offset = 0; |
| 233 | dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; | 202 | increment(copy_amount); |
| 234 | remaining_size -= copy_amount; | 203 | remaining_size -= copy_amount; |
| 235 | } | 204 | } |
| 236 | } | 205 | } |
| 237 | 206 | ||
| 238 | void ReadBlockUnsafe(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer, | 207 | template <bool UNSAFE> |
| 239 | const std::size_t size) { | 208 | void ReadBlockImpl(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer, |
| 240 | const auto& page_table = process.PageTable().PageTableImpl(); | 209 | const std::size_t size) { |
| 241 | 210 | WalkBlock( | |
| 242 | std::size_t remaining_size = size; | 211 | process, src_addr, size, |
| 243 | std::size_t page_index = src_addr >> PAGE_BITS; | 212 | [src_addr, size, &dest_buffer](const std::size_t copy_amount, |
| 244 | std::size_t page_offset = src_addr & PAGE_MASK; | 213 | const VAddr current_vaddr) { |
| 245 | |||
| 246 | while (remaining_size > 0) { | ||
| 247 | const std::size_t copy_amount = | ||
| 248 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | ||
| 249 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | ||
| 250 | |||
| 251 | const auto [pointer, type] = page_table.pointers[page_index].PointerType(); | ||
| 252 | switch (type) { | ||
| 253 | case Common::PageType::Unmapped: { | ||
| 254 | LOG_ERROR(HW_Memory, | 214 | LOG_ERROR(HW_Memory, |
| 255 | "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 215 | "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 256 | current_vaddr, src_addr, size); | 216 | current_vaddr, src_addr, size); |
| 257 | std::memset(dest_buffer, 0, copy_amount); | 217 | std::memset(dest_buffer, 0, copy_amount); |
| 258 | break; | 218 | }, |
| 259 | } | 219 | [&dest_buffer](const std::size_t copy_amount, const u8* const src_ptr) { |
| 260 | case Common::PageType::Memory: { | ||
| 261 | DEBUG_ASSERT(pointer); | ||
| 262 | const u8* const src_ptr = pointer + page_offset + (page_index << PAGE_BITS); | ||
| 263 | std::memcpy(dest_buffer, src_ptr, copy_amount); | 220 | std::memcpy(dest_buffer, src_ptr, copy_amount); |
| 264 | break; | 221 | }, |
| 265 | } | 222 | [&system = system, &dest_buffer](const VAddr current_vaddr, |
| 266 | case Common::PageType::RasterizerCachedMemory: { | 223 | const std::size_t copy_amount, |
| 267 | const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; | 224 | const u8* const host_ptr) { |
| 225 | if constexpr (!UNSAFE) { | ||
| 226 | system.GPU().FlushRegion(current_vaddr, copy_amount); | ||
| 227 | } | ||
| 268 | std::memcpy(dest_buffer, host_ptr, copy_amount); | 228 | std::memcpy(dest_buffer, host_ptr, copy_amount); |
| 269 | break; | 229 | }, |
| 270 | } | 230 | [&dest_buffer](const std::size_t copy_amount) { |
| 271 | default: | 231 | dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; |
| 272 | UNREACHABLE(); | 232 | }); |
| 273 | } | ||
| 274 | |||
| 275 | page_index++; | ||
| 276 | page_offset = 0; | ||
| 277 | dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; | ||
| 278 | remaining_size -= copy_amount; | ||
| 279 | } | ||
| 280 | } | 233 | } |
| 281 | 234 | ||
| 282 | void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { | 235 | void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { |
| 283 | ReadBlock(*system.CurrentProcess(), src_addr, dest_buffer, size); | 236 | ReadBlockImpl<false>(*system.CurrentProcess(), src_addr, dest_buffer, size); |
| 284 | } | 237 | } |
| 285 | 238 | ||
| 286 | void ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { | 239 | void ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { |
| 287 | ReadBlockUnsafe(*system.CurrentProcess(), src_addr, dest_buffer, size); | 240 | ReadBlockImpl<true>(*system.CurrentProcess(), src_addr, dest_buffer, size); |
| 288 | } | 241 | } |
| 289 | 242 | ||
| 290 | void WriteBlock(const Kernel::KProcess& process, const VAddr dest_addr, const void* src_buffer, | 243 | template <bool UNSAFE> |
| 291 | const std::size_t size) { | 244 | void WriteBlockImpl(const Kernel::KProcess& process, const VAddr dest_addr, |
| 292 | const auto& page_table = process.PageTable().PageTableImpl(); | 245 | const void* src_buffer, const std::size_t size) { |
| 293 | std::size_t remaining_size = size; | 246 | WalkBlock( |
| 294 | std::size_t page_index = dest_addr >> PAGE_BITS; | 247 | process, dest_addr, size, |
| 295 | std::size_t page_offset = dest_addr & PAGE_MASK; | 248 | [dest_addr, size](const std::size_t copy_amount, const VAddr current_vaddr) { |
| 296 | |||
| 297 | while (remaining_size > 0) { | ||
| 298 | const std::size_t copy_amount = | ||
| 299 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | ||
| 300 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | ||
| 301 | |||
| 302 | const auto [pointer, type] = page_table.pointers[page_index].PointerType(); | ||
| 303 | switch (type) { | ||
| 304 | case Common::PageType::Unmapped: { | ||
| 305 | LOG_ERROR(HW_Memory, | 249 | LOG_ERROR(HW_Memory, |
| 306 | "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 250 | "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 307 | current_vaddr, dest_addr, size); | 251 | current_vaddr, dest_addr, size); |
| 308 | break; | 252 | }, |
| 309 | } | 253 | [&src_buffer](const std::size_t copy_amount, u8* const dest_ptr) { |
| 310 | case Common::PageType::Memory: { | ||
| 311 | DEBUG_ASSERT(pointer); | ||
| 312 | u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS); | ||
| 313 | std::memcpy(dest_ptr, src_buffer, copy_amount); | 254 | std::memcpy(dest_ptr, src_buffer, copy_amount); |
| 314 | break; | 255 | }, |
| 315 | } | 256 | [&system = system, &src_buffer](const VAddr current_vaddr, |
| 316 | case Common::PageType::RasterizerCachedMemory: { | 257 | const std::size_t copy_amount, u8* const host_ptr) { |
| 317 | u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; | 258 | if constexpr (!UNSAFE) { |
| 318 | system.GPU().InvalidateRegion(current_vaddr, copy_amount); | 259 | system.GPU().InvalidateRegion(current_vaddr, copy_amount); |
| 319 | std::memcpy(host_ptr, src_buffer, copy_amount); | 260 | } |
| 320 | break; | ||
| 321 | } | ||
| 322 | default: | ||
| 323 | UNREACHABLE(); | ||
| 324 | } | ||
| 325 | |||
| 326 | page_index++; | ||
| 327 | page_offset = 0; | ||
| 328 | src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; | ||
| 329 | remaining_size -= copy_amount; | ||
| 330 | } | ||
| 331 | } | ||
| 332 | |||
| 333 | void WriteBlockUnsafe(const Kernel::KProcess& process, const VAddr dest_addr, | ||
| 334 | const void* src_buffer, const std::size_t size) { | ||
| 335 | const auto& page_table = process.PageTable().PageTableImpl(); | ||
| 336 | std::size_t remaining_size = size; | ||
| 337 | std::size_t page_index = dest_addr >> PAGE_BITS; | ||
| 338 | std::size_t page_offset = dest_addr & PAGE_MASK; | ||
| 339 | |||
| 340 | while (remaining_size > 0) { | ||
| 341 | const std::size_t copy_amount = | ||
| 342 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | ||
| 343 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | ||
| 344 | |||
| 345 | const auto [pointer, type] = page_table.pointers[page_index].PointerType(); | ||
| 346 | switch (type) { | ||
| 347 | case Common::PageType::Unmapped: { | ||
| 348 | LOG_ERROR(HW_Memory, | ||
| 349 | "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | ||
| 350 | current_vaddr, dest_addr, size); | ||
| 351 | break; | ||
| 352 | } | ||
| 353 | case Common::PageType::Memory: { | ||
| 354 | DEBUG_ASSERT(pointer); | ||
| 355 | u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS); | ||
| 356 | std::memcpy(dest_ptr, src_buffer, copy_amount); | ||
| 357 | break; | ||
| 358 | } | ||
| 359 | case Common::PageType::RasterizerCachedMemory: { | ||
| 360 | u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; | ||
| 361 | std::memcpy(host_ptr, src_buffer, copy_amount); | 261 | std::memcpy(host_ptr, src_buffer, copy_amount); |
| 362 | break; | 262 | }, |
| 363 | } | 263 | [&src_buffer](const std::size_t copy_amount) { |
| 364 | default: | 264 | src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; |
| 365 | UNREACHABLE(); | 265 | }); |
| 366 | } | ||
| 367 | |||
| 368 | page_index++; | ||
| 369 | page_offset = 0; | ||
| 370 | src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; | ||
| 371 | remaining_size -= copy_amount; | ||
| 372 | } | ||
| 373 | } | 266 | } |
| 374 | 267 | ||
| 375 | void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { | 268 | void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { |
| 376 | WriteBlock(*system.CurrentProcess(), dest_addr, src_buffer, size); | 269 | WriteBlockImpl<false>(*system.CurrentProcess(), dest_addr, src_buffer, size); |
| 377 | } | 270 | } |
| 378 | 271 | ||
| 379 | void WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { | 272 | void WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { |
| 380 | WriteBlockUnsafe(*system.CurrentProcess(), dest_addr, src_buffer, size); | 273 | WriteBlockImpl<true>(*system.CurrentProcess(), dest_addr, src_buffer, size); |
| 381 | } | 274 | } |
| 382 | 275 | ||
| 383 | void ZeroBlock(const Kernel::KProcess& process, const VAddr dest_addr, const std::size_t size) { | 276 | void ZeroBlock(const Kernel::KProcess& process, const VAddr dest_addr, const std::size_t size) { |
| 384 | const auto& page_table = process.PageTable().PageTableImpl(); | 277 | WalkBlock( |
| 385 | std::size_t remaining_size = size; | 278 | process, dest_addr, size, |
| 386 | std::size_t page_index = dest_addr >> PAGE_BITS; | 279 | [dest_addr, size](const std::size_t copy_amount, const VAddr current_vaddr) { |
| 387 | std::size_t page_offset = dest_addr & PAGE_MASK; | ||
| 388 | |||
| 389 | while (remaining_size > 0) { | ||
| 390 | const std::size_t copy_amount = | ||
| 391 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | ||
| 392 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | ||
| 393 | |||
| 394 | const auto [pointer, type] = page_table.pointers[page_index].PointerType(); | ||
| 395 | switch (type) { | ||
| 396 | case Common::PageType::Unmapped: { | ||
| 397 | LOG_ERROR(HW_Memory, | 280 | LOG_ERROR(HW_Memory, |
| 398 | "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 281 | "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 399 | current_vaddr, dest_addr, size); | 282 | current_vaddr, dest_addr, size); |
| 400 | break; | 283 | }, |
| 401 | } | 284 | [](const std::size_t copy_amount, u8* const dest_ptr) { |
| 402 | case Common::PageType::Memory: { | ||
| 403 | DEBUG_ASSERT(pointer); | ||
| 404 | u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS); | ||
| 405 | std::memset(dest_ptr, 0, copy_amount); | 285 | std::memset(dest_ptr, 0, copy_amount); |
| 406 | break; | 286 | }, |
| 407 | } | 287 | [&system = system](const VAddr current_vaddr, const std::size_t copy_amount, |
| 408 | case Common::PageType::RasterizerCachedMemory: { | 288 | u8* const host_ptr) { |
| 409 | u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; | ||
| 410 | system.GPU().InvalidateRegion(current_vaddr, copy_amount); | 289 | system.GPU().InvalidateRegion(current_vaddr, copy_amount); |
| 411 | std::memset(host_ptr, 0, copy_amount); | 290 | std::memset(host_ptr, 0, copy_amount); |
| 412 | break; | 291 | }, |
| 413 | } | 292 | [](const std::size_t copy_amount) {}); |
| 414 | default: | ||
| 415 | UNREACHABLE(); | ||
| 416 | } | ||
| 417 | |||
| 418 | page_index++; | ||
| 419 | page_offset = 0; | ||
| 420 | remaining_size -= copy_amount; | ||
| 421 | } | ||
| 422 | } | ||
| 423 | |||
| 424 | void ZeroBlock(const VAddr dest_addr, const std::size_t size) { | ||
| 425 | ZeroBlock(*system.CurrentProcess(), dest_addr, size); | ||
| 426 | } | 293 | } |
| 427 | 294 | ||
| 428 | void CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr, | 295 | void CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr, |
| 429 | const std::size_t size) { | 296 | const std::size_t size) { |
| 430 | const auto& page_table = process.PageTable().PageTableImpl(); | 297 | WalkBlock( |
| 431 | std::size_t remaining_size = size; | 298 | process, dest_addr, size, |
| 432 | std::size_t page_index = src_addr >> PAGE_BITS; | 299 | [this, &process, &dest_addr, &src_addr, size](const std::size_t copy_amount, |
| 433 | std::size_t page_offset = src_addr & PAGE_MASK; | 300 | const VAddr current_vaddr) { |
| 434 | |||
| 435 | while (remaining_size > 0) { | ||
| 436 | const std::size_t copy_amount = | ||
| 437 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | ||
| 438 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | ||
| 439 | |||
| 440 | const auto [pointer, type] = page_table.pointers[page_index].PointerType(); | ||
| 441 | switch (type) { | ||
| 442 | case Common::PageType::Unmapped: { | ||
| 443 | LOG_ERROR(HW_Memory, | 301 | LOG_ERROR(HW_Memory, |
| 444 | "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 302 | "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 445 | current_vaddr, src_addr, size); | 303 | current_vaddr, src_addr, size); |
| 446 | ZeroBlock(process, dest_addr, copy_amount); | 304 | ZeroBlock(process, dest_addr, copy_amount); |
| 447 | break; | 305 | }, |
| 448 | } | 306 | [this, &process, &dest_addr](const std::size_t copy_amount, const u8* const src_ptr) { |
| 449 | case Common::PageType::Memory: { | 307 | WriteBlockImpl<false>(process, dest_addr, src_ptr, copy_amount); |
| 450 | DEBUG_ASSERT(pointer); | 308 | }, |
| 451 | const u8* src_ptr = pointer + page_offset + (page_index << PAGE_BITS); | 309 | [this, &system = system, &process, &dest_addr]( |
| 452 | WriteBlock(process, dest_addr, src_ptr, copy_amount); | 310 | const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) { |
| 453 | break; | ||
| 454 | } | ||
| 455 | case Common::PageType::RasterizerCachedMemory: { | ||
| 456 | const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; | ||
| 457 | system.GPU().FlushRegion(current_vaddr, copy_amount); | 311 | system.GPU().FlushRegion(current_vaddr, copy_amount); |
| 458 | WriteBlock(process, dest_addr, host_ptr, copy_amount); | 312 | WriteBlockImpl<false>(process, dest_addr, host_ptr, copy_amount); |
| 459 | break; | 313 | }, |
| 460 | } | 314 | [&dest_addr, &src_addr](const std::size_t copy_amount) { |
| 461 | default: | 315 | dest_addr += static_cast<VAddr>(copy_amount); |
| 462 | UNREACHABLE(); | 316 | src_addr += static_cast<VAddr>(copy_amount); |
| 463 | } | 317 | }); |
| 464 | |||
| 465 | page_index++; | ||
| 466 | page_offset = 0; | ||
| 467 | dest_addr += static_cast<VAddr>(copy_amount); | ||
| 468 | src_addr += static_cast<VAddr>(copy_amount); | ||
| 469 | remaining_size -= copy_amount; | ||
| 470 | } | ||
| 471 | } | ||
| 472 | |||
| 473 | void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) { | ||
| 474 | return CopyBlock(*system.CurrentProcess(), dest_addr, src_addr, size); | ||
| 475 | } | 318 | } |
| 476 | 319 | ||
| 477 | void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { | 320 | void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { |
| @@ -514,7 +357,7 @@ struct Memory::Impl { | |||
| 514 | } else { | 357 | } else { |
| 515 | // Switch page type to uncached if now uncached | 358 | // Switch page type to uncached if now uncached |
| 516 | switch (page_type) { | 359 | switch (page_type) { |
| 517 | case Common::PageType::Unmapped: | 360 | case Common::PageType::Unmapped: // NOLINT(bugprone-branch-clone) |
| 518 | // It is not necessary for a process to have this region mapped into its address | 361 | // It is not necessary for a process to have this region mapped into its address |
| 519 | // space, for example, a system module need not have a VRAM mapping. | 362 | // space, for example, a system module need not have a VRAM mapping. |
| 520 | break; | 363 | break; |
| @@ -597,52 +440,68 @@ struct Memory::Impl { | |||
| 597 | } | 440 | } |
| 598 | } | 441 | } |
| 599 | 442 | ||
| 600 | /** | 443 | [[nodiscard]] u8* GetPointerImpl(VAddr vaddr, auto on_unmapped, auto on_rasterizer) const { |
| 601 | * Reads a particular data type out of memory at the given virtual address. | ||
| 602 | * | ||
| 603 | * @param vaddr The virtual address to read the data type from. | ||
| 604 | * | ||
| 605 | * @tparam T The data type to read out of memory. This type *must* be | ||
| 606 | * trivially copyable, otherwise the behavior of this function | ||
| 607 | * is undefined. | ||
| 608 | * | ||
| 609 | * @returns The instance of T read from the specified virtual address. | ||
| 610 | */ | ||
| 611 | template <typename T> | ||
| 612 | T Read(VAddr vaddr) { | ||
| 613 | // AARCH64 masks the upper 16 bit of all memory accesses | 444 | // AARCH64 masks the upper 16 bit of all memory accesses |
| 614 | vaddr &= 0xffffffffffffLL; | 445 | vaddr &= 0xffffffffffffLL; |
| 615 | 446 | ||
| 616 | if (vaddr >= 1uLL << current_page_table->GetAddressSpaceBits()) { | 447 | if (vaddr >= 1uLL << current_page_table->GetAddressSpaceBits()) { |
| 617 | LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); | 448 | on_unmapped(); |
| 618 | return 0; | 449 | return nullptr; |
| 619 | } | 450 | } |
| 620 | 451 | ||
| 621 | // Avoid adding any extra logic to this fast-path block | 452 | // Avoid adding any extra logic to this fast-path block |
| 622 | const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); | 453 | const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); |
| 623 | if (const u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { | 454 | if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { |
| 624 | T value; | 455 | return &pointer[vaddr]; |
| 625 | std::memcpy(&value, &pointer[vaddr], sizeof(T)); | ||
| 626 | return value; | ||
| 627 | } | 456 | } |
| 628 | switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { | 457 | switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { |
| 629 | case Common::PageType::Unmapped: | 458 | case Common::PageType::Unmapped: |
| 630 | LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); | 459 | on_unmapped(); |
| 631 | return 0; | 460 | return nullptr; |
| 632 | case Common::PageType::Memory: | 461 | case Common::PageType::Memory: |
| 633 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); | 462 | ASSERT_MSG(false, "Mapped memory page without a pointer @ 0x{:016X}", vaddr); |
| 634 | break; | 463 | return nullptr; |
| 635 | case Common::PageType::RasterizerCachedMemory: { | 464 | case Common::PageType::RasterizerCachedMemory: { |
| 636 | const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)}; | 465 | u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)}; |
| 637 | system.GPU().FlushRegion(vaddr, sizeof(T)); | 466 | on_rasterizer(); |
| 638 | T value; | 467 | return host_ptr; |
| 639 | std::memcpy(&value, host_ptr, sizeof(T)); | ||
| 640 | return value; | ||
| 641 | } | 468 | } |
| 642 | default: | 469 | default: |
| 643 | UNREACHABLE(); | 470 | UNREACHABLE(); |
| 644 | } | 471 | } |
| 645 | return {}; | 472 | return nullptr; |
| 473 | } | ||
| 474 | |||
| 475 | [[nodiscard]] u8* GetPointer(const VAddr vaddr) const { | ||
| 476 | return GetPointerImpl( | ||
| 477 | vaddr, [vaddr]() { LOG_ERROR(HW_Memory, "Unmapped GetPointer @ 0x{:016X}", vaddr); }, | ||
| 478 | []() {}); | ||
| 479 | } | ||
| 480 | |||
| 481 | /** | ||
| 482 | * Reads a particular data type out of memory at the given virtual address. | ||
| 483 | * | ||
| 484 | * @param vaddr The virtual address to read the data type from. | ||
| 485 | * | ||
| 486 | * @tparam T The data type to read out of memory. This type *must* be | ||
| 487 | * trivially copyable, otherwise the behavior of this function | ||
| 488 | * is undefined. | ||
| 489 | * | ||
| 490 | * @returns The instance of T read from the specified virtual address. | ||
| 491 | */ | ||
| 492 | template <typename T> | ||
| 493 | T Read(VAddr vaddr) { | ||
| 494 | T result = 0; | ||
| 495 | const u8* const ptr = GetPointerImpl( | ||
| 496 | vaddr, | ||
| 497 | [vaddr]() { | ||
| 498 | LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, vaddr); | ||
| 499 | }, | ||
| 500 | [&system = system, vaddr]() { system.GPU().FlushRegion(vaddr, sizeof(T)); }); | ||
| 501 | if (ptr) { | ||
| 502 | std::memcpy(&result, ptr, sizeof(T)); | ||
| 503 | } | ||
| 504 | return result; | ||
| 646 | } | 505 | } |
| 647 | 506 | ||
| 648 | /** | 507 | /** |
| @@ -656,110 +515,46 @@ struct Memory::Impl { | |||
| 656 | */ | 515 | */ |
| 657 | template <typename T> | 516 | template <typename T> |
| 658 | void Write(VAddr vaddr, const T data) { | 517 | void Write(VAddr vaddr, const T data) { |
| 659 | // AARCH64 masks the upper 16 bit of all memory accesses | 518 | u8* const ptr = GetPointerImpl( |
| 660 | vaddr &= 0xffffffffffffLL; | 519 | vaddr, |
| 661 | 520 | [vaddr, data]() { | |
| 662 | if (vaddr >= 1uLL << current_page_table->GetAddressSpaceBits()) { | 521 | LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8, |
| 663 | LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, | 522 | vaddr, static_cast<u64>(data)); |
| 664 | static_cast<u32>(data), vaddr); | 523 | }, |
| 665 | return; | 524 | [&system = system, vaddr]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); }); |
| 666 | } | 525 | if (ptr) { |
| 667 | 526 | std::memcpy(ptr, &data, sizeof(T)); | |
| 668 | // Avoid adding any extra logic to this fast-path block | ||
| 669 | const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); | ||
| 670 | if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { | ||
| 671 | std::memcpy(&pointer[vaddr], &data, sizeof(T)); | ||
| 672 | return; | ||
| 673 | } | ||
| 674 | switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { | ||
| 675 | case Common::PageType::Unmapped: | ||
| 676 | LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, | ||
| 677 | static_cast<u32>(data), vaddr); | ||
| 678 | return; | ||
| 679 | case Common::PageType::Memory: | ||
| 680 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); | ||
| 681 | break; | ||
| 682 | case Common::PageType::RasterizerCachedMemory: { | ||
| 683 | u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)}; | ||
| 684 | system.GPU().InvalidateRegion(vaddr, sizeof(T)); | ||
| 685 | std::memcpy(host_ptr, &data, sizeof(T)); | ||
| 686 | break; | ||
| 687 | } | ||
| 688 | default: | ||
| 689 | UNREACHABLE(); | ||
| 690 | } | 527 | } |
| 691 | } | 528 | } |
| 692 | 529 | ||
| 693 | template <typename T> | 530 | template <typename T> |
| 694 | bool WriteExclusive(VAddr vaddr, const T data, const T expected) { | 531 | bool WriteExclusive(VAddr vaddr, const T data, const T expected) { |
| 695 | // AARCH64 masks the upper 16 bit of all memory accesses | 532 | u8* const ptr = GetPointerImpl( |
| 696 | vaddr &= 0xffffffffffffLL; | 533 | vaddr, |
| 697 | 534 | [vaddr, data]() { | |
| 698 | if (vaddr >= 1uLL << current_page_table->GetAddressSpaceBits()) { | 535 | LOG_ERROR(HW_Memory, "Unmapped WriteExclusive{} @ 0x{:016X} = 0x{:016X}", |
| 699 | LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, | 536 | sizeof(T) * 8, vaddr, static_cast<u64>(data)); |
| 700 | static_cast<u32>(data), vaddr); | 537 | }, |
| 701 | return true; | 538 | [&system = system, vaddr]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); }); |
| 702 | } | 539 | if (ptr) { |
| 703 | 540 | const auto volatile_pointer = reinterpret_cast<volatile T*>(ptr); | |
| 704 | const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); | ||
| 705 | if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { | ||
| 706 | // NOTE: Avoid adding any extra logic to this fast-path block | ||
| 707 | const auto volatile_pointer = reinterpret_cast<volatile T*>(&pointer[vaddr]); | ||
| 708 | return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); | 541 | return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); |
| 709 | } | 542 | } |
| 710 | switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { | ||
| 711 | case Common::PageType::Unmapped: | ||
| 712 | LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, | ||
| 713 | static_cast<u32>(data), vaddr); | ||
| 714 | return true; | ||
| 715 | case Common::PageType::Memory: | ||
| 716 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); | ||
| 717 | break; | ||
| 718 | case Common::PageType::RasterizerCachedMemory: { | ||
| 719 | u8* host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)}; | ||
| 720 | system.GPU().InvalidateRegion(vaddr, sizeof(T)); | ||
| 721 | auto* pointer = reinterpret_cast<volatile T*>(&host_ptr); | ||
| 722 | return Common::AtomicCompareAndSwap(pointer, data, expected); | ||
| 723 | } | ||
| 724 | default: | ||
| 725 | UNREACHABLE(); | ||
| 726 | } | ||
| 727 | return true; | 543 | return true; |
| 728 | } | 544 | } |
| 729 | 545 | ||
| 730 | bool WriteExclusive128(VAddr vaddr, const u128 data, const u128 expected) { | 546 | bool WriteExclusive128(VAddr vaddr, const u128 data, const u128 expected) { |
| 731 | // AARCH64 masks the upper 16 bit of all memory accesses | 547 | u8* const ptr = GetPointerImpl( |
| 732 | vaddr &= 0xffffffffffffLL; | 548 | vaddr, |
| 733 | 549 | [vaddr, data]() { | |
| 734 | if (vaddr >= 1uLL << current_page_table->GetAddressSpaceBits()) { | 550 | LOG_ERROR(HW_Memory, "Unmapped WriteExclusive128 @ 0x{:016X} = 0x{:016X}{:016X}", |
| 735 | LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, | 551 | vaddr, static_cast<u64>(data[1]), static_cast<u64>(data[0])); |
| 736 | static_cast<u32>(data[0]), vaddr); | 552 | }, |
| 737 | return true; | 553 | [&system = system, vaddr]() { system.GPU().InvalidateRegion(vaddr, sizeof(u128)); }); |
| 738 | } | 554 | if (ptr) { |
| 739 | 555 | const auto volatile_pointer = reinterpret_cast<volatile u64*>(ptr); | |
| 740 | const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); | ||
| 741 | if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { | ||
| 742 | // NOTE: Avoid adding any extra logic to this fast-path block | ||
| 743 | const auto volatile_pointer = reinterpret_cast<volatile u64*>(&pointer[vaddr]); | ||
| 744 | return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); | 556 | return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); |
| 745 | } | 557 | } |
| 746 | switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { | ||
| 747 | case Common::PageType::Unmapped: | ||
| 748 | LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}{:016X}", sizeof(data) * 8, | ||
| 749 | static_cast<u64>(data[1]), static_cast<u64>(data[0]), vaddr); | ||
| 750 | return true; | ||
| 751 | case Common::PageType::Memory: | ||
| 752 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); | ||
| 753 | break; | ||
| 754 | case Common::PageType::RasterizerCachedMemory: { | ||
| 755 | u8* host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)}; | ||
| 756 | system.GPU().InvalidateRegion(vaddr, sizeof(u128)); | ||
| 757 | auto* pointer = reinterpret_cast<volatile u64*>(&host_ptr); | ||
| 758 | return Common::AtomicCompareAndSwap(pointer, data, expected); | ||
| 759 | } | ||
| 760 | default: | ||
| 761 | UNREACHABLE(); | ||
| 762 | } | ||
| 763 | return true; | 558 | return true; |
| 764 | } | 559 | } |
| 765 | 560 | ||
| @@ -789,12 +584,11 @@ void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { | |||
| 789 | impl->UnmapRegion(page_table, base, size); | 584 | impl->UnmapRegion(page_table, base, size); |
| 790 | } | 585 | } |
| 791 | 586 | ||
| 792 | bool Memory::IsValidVirtualAddress(const Kernel::KProcess& process, const VAddr vaddr) const { | ||
| 793 | return impl->IsValidVirtualAddress(process, vaddr); | ||
| 794 | } | ||
| 795 | |||
| 796 | bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { | 587 | bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { |
| 797 | return impl->IsValidVirtualAddress(vaddr); | 588 | const Kernel::KProcess& process = *system.CurrentProcess(); |
| 589 | const auto& page_table = process.PageTable().PageTableImpl(); | ||
| 590 | const auto [pointer, type] = page_table.pointers[vaddr >> PAGE_BITS].PointerType(); | ||
| 591 | return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory; | ||
| 798 | } | 592 | } |
| 799 | 593 | ||
| 800 | u8* Memory::GetPointer(VAddr vaddr) { | 594 | u8* Memory::GetPointer(VAddr vaddr) { |
| @@ -863,64 +657,38 @@ std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) { | |||
| 863 | 657 | ||
| 864 | void Memory::ReadBlock(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer, | 658 | void Memory::ReadBlock(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer, |
| 865 | const std::size_t size) { | 659 | const std::size_t size) { |
| 866 | impl->ReadBlock(process, src_addr, dest_buffer, size); | 660 | impl->ReadBlockImpl<false>(process, src_addr, dest_buffer, size); |
| 867 | } | 661 | } |
| 868 | 662 | ||
| 869 | void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { | 663 | void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { |
| 870 | impl->ReadBlock(src_addr, dest_buffer, size); | 664 | impl->ReadBlock(src_addr, dest_buffer, size); |
| 871 | } | 665 | } |
| 872 | 666 | ||
| 873 | void Memory::ReadBlockUnsafe(const Kernel::KProcess& process, const VAddr src_addr, | ||
| 874 | void* dest_buffer, const std::size_t size) { | ||
| 875 | impl->ReadBlockUnsafe(process, src_addr, dest_buffer, size); | ||
| 876 | } | ||
| 877 | |||
| 878 | void Memory::ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { | 667 | void Memory::ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { |
| 879 | impl->ReadBlockUnsafe(src_addr, dest_buffer, size); | 668 | impl->ReadBlockUnsafe(src_addr, dest_buffer, size); |
| 880 | } | 669 | } |
| 881 | 670 | ||
| 882 | void Memory::WriteBlock(const Kernel::KProcess& process, VAddr dest_addr, const void* src_buffer, | 671 | void Memory::WriteBlock(const Kernel::KProcess& process, VAddr dest_addr, const void* src_buffer, |
| 883 | std::size_t size) { | 672 | std::size_t size) { |
| 884 | impl->WriteBlock(process, dest_addr, src_buffer, size); | 673 | impl->WriteBlockImpl<false>(process, dest_addr, src_buffer, size); |
| 885 | } | 674 | } |
| 886 | 675 | ||
| 887 | void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { | 676 | void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { |
| 888 | impl->WriteBlock(dest_addr, src_buffer, size); | 677 | impl->WriteBlock(dest_addr, src_buffer, size); |
| 889 | } | 678 | } |
| 890 | 679 | ||
| 891 | void Memory::WriteBlockUnsafe(const Kernel::KProcess& process, VAddr dest_addr, | ||
| 892 | const void* src_buffer, std::size_t size) { | ||
| 893 | impl->WriteBlockUnsafe(process, dest_addr, src_buffer, size); | ||
| 894 | } | ||
| 895 | |||
| 896 | void Memory::WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, | 680 | void Memory::WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, |
| 897 | const std::size_t size) { | 681 | const std::size_t size) { |
| 898 | impl->WriteBlockUnsafe(dest_addr, src_buffer, size); | 682 | impl->WriteBlockUnsafe(dest_addr, src_buffer, size); |
| 899 | } | 683 | } |
| 900 | 684 | ||
| 901 | void Memory::ZeroBlock(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) { | ||
| 902 | impl->ZeroBlock(process, dest_addr, size); | ||
| 903 | } | ||
| 904 | |||
| 905 | void Memory::ZeroBlock(VAddr dest_addr, std::size_t size) { | ||
| 906 | impl->ZeroBlock(dest_addr, size); | ||
| 907 | } | ||
| 908 | |||
| 909 | void Memory::CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr, | 685 | void Memory::CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr, |
| 910 | const std::size_t size) { | 686 | const std::size_t size) { |
| 911 | impl->CopyBlock(process, dest_addr, src_addr, size); | 687 | impl->CopyBlock(process, dest_addr, src_addr, size); |
| 912 | } | 688 | } |
| 913 | 689 | ||
| 914 | void Memory::CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) { | ||
| 915 | impl->CopyBlock(dest_addr, src_addr, size); | ||
| 916 | } | ||
| 917 | |||
| 918 | void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { | 690 | void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { |
| 919 | impl->RasterizerMarkRegionCached(vaddr, size, cached); | 691 | impl->RasterizerMarkRegionCached(vaddr, size, cached); |
| 920 | } | 692 | } |
| 921 | 693 | ||
| 922 | bool IsKernelVirtualAddress(const VAddr vaddr) { | ||
| 923 | return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; | ||
| 924 | } | ||
| 925 | |||
| 926 | } // namespace Core::Memory | 694 | } // namespace Core::Memory |