diff options
| author | 2021-08-05 20:11:14 +0000 | |
|---|---|---|
| committer | 2021-08-05 20:11:14 +0000 | |
| commit | 6df9611059e21d003fda4a0f8bd6773736438643 (patch) | |
| tree | 9b5d5ce6f35b49489eb9dfa236699177cf8fc711 /src/core/memory.cpp | |
| parent | Merge pull request #6819 from Morph1984/i-am-dumb (diff) | |
| download | yuzu-6df9611059e21d003fda4a0f8bd6773736438643.tar.gz yuzu-6df9611059e21d003fda4a0f8bd6773736438643.tar.xz yuzu-6df9611059e21d003fda4a0f8bd6773736438643.zip | |
memory: Clean up code
Diffstat (limited to 'src/core/memory.cpp')
| -rw-r--r-- | src/core/memory.cpp | 306 |
1 files changed, 77 insertions, 229 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index f285c6f63..7b23c189c 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -1,11 +1,9 @@ | |||
| 1 | // Copyright 2015 Citra Emulator Project | 1 | // Copyright 2021 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <algorithm> | 5 | #include <algorithm> |
| 6 | #include <cstring> | 6 | #include <cstring> |
| 7 | #include <optional> | ||
| 8 | #include <utility> | ||
| 9 | 7 | ||
| 10 | #include "common/assert.h" | 8 | #include "common/assert.h" |
| 11 | #include "common/atomic_ops.h" | 9 | #include "common/atomic_ops.h" |
| @@ -14,12 +12,10 @@ | |||
| 14 | #include "common/page_table.h" | 12 | #include "common/page_table.h" |
| 15 | #include "common/settings.h" | 13 | #include "common/settings.h" |
| 16 | #include "common/swap.h" | 14 | #include "common/swap.h" |
| 17 | #include "core/arm/arm_interface.h" | ||
| 18 | #include "core/core.h" | 15 | #include "core/core.h" |
| 19 | #include "core/device_memory.h" | 16 | #include "core/device_memory.h" |
| 20 | #include "core/hle/kernel/k_page_table.h" | 17 | #include "core/hle/kernel/k_page_table.h" |
| 21 | #include "core/hle/kernel/k_process.h" | 18 | #include "core/hle/kernel/k_process.h" |
| 22 | #include "core/hle/kernel/physical_memory.h" | ||
| 23 | #include "core/memory.h" | 19 | #include "core/memory.h" |
| 24 | #include "video_core/gpu.h" | 20 | #include "video_core/gpu.h" |
| 25 | 21 | ||
| @@ -62,17 +58,7 @@ struct Memory::Impl { | |||
| 62 | } | 58 | } |
| 63 | } | 59 | } |
| 64 | 60 | ||
| 65 | bool IsValidVirtualAddress(const Kernel::KProcess& process, const VAddr vaddr) const { | 61 | [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const { |
| 66 | const auto& page_table = process.PageTable().PageTableImpl(); | ||
| 67 | const auto [pointer, type] = page_table.pointers[vaddr >> PAGE_BITS].PointerType(); | ||
| 68 | return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory; | ||
| 69 | } | ||
| 70 | |||
| 71 | bool IsValidVirtualAddress(VAddr vaddr) const { | ||
| 72 | return IsValidVirtualAddress(*system.CurrentProcess(), vaddr); | ||
| 73 | } | ||
| 74 | |||
| 75 | u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const { | ||
| 76 | const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; | 62 | const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; |
| 77 | 63 | ||
| 78 | if (!paddr) { | 64 | if (!paddr) { |
| @@ -82,7 +68,7 @@ struct Memory::Impl { | |||
| 82 | return system.DeviceMemory().GetPointer(paddr) + vaddr; | 68 | return system.DeviceMemory().GetPointer(paddr) + vaddr; |
| 83 | } | 69 | } |
| 84 | 70 | ||
| 85 | u8* GetPointer(const VAddr vaddr) const { | 71 | [[nodiscard]] u8* GetPointer(const VAddr vaddr) const { |
| 86 | const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); | 72 | const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); |
| 87 | if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { | 73 | if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { |
| 88 | return pointer + vaddr; | 74 | return pointer + vaddr; |
| @@ -179,7 +165,7 @@ struct Memory::Impl { | |||
| 179 | std::string string; | 165 | std::string string; |
| 180 | string.reserve(max_length); | 166 | string.reserve(max_length); |
| 181 | for (std::size_t i = 0; i < max_length; ++i) { | 167 | for (std::size_t i = 0; i < max_length; ++i) { |
| 182 | const char c = Read8(vaddr); | 168 | const char c = Read<s8>(vaddr); |
| 183 | if (c == '\0') { | 169 | if (c == '\0') { |
| 184 | break; | 170 | break; |
| 185 | } | 171 | } |
| @@ -190,15 +176,14 @@ struct Memory::Impl { | |||
| 190 | return string; | 176 | return string; |
| 191 | } | 177 | } |
| 192 | 178 | ||
| 193 | void ReadBlock(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer, | 179 | void WalkBlock(const Kernel::KProcess& process, VAddr addr, const std::size_t size, |
| 194 | const std::size_t size) { | 180 | auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) { |
| 195 | const auto& page_table = process.PageTable().PageTableImpl(); | 181 | const auto& page_table = process.PageTable().PageTableImpl(); |
| 196 | |||
| 197 | std::size_t remaining_size = size; | 182 | std::size_t remaining_size = size; |
| 198 | std::size_t page_index = src_addr >> PAGE_BITS; | 183 | std::size_t page_index = addr >> PAGE_BITS; |
| 199 | std::size_t page_offset = src_addr & PAGE_MASK; | 184 | std::size_t page_offset = addr & PAGE_MASK; |
| 200 | 185 | ||
| 201 | while (remaining_size > 0) { | 186 | while (remaining_size) { |
| 202 | const std::size_t copy_amount = | 187 | const std::size_t copy_amount = |
| 203 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | 188 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); |
| 204 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 189 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| @@ -206,22 +191,18 @@ struct Memory::Impl { | |||
| 206 | const auto [pointer, type] = page_table.pointers[page_index].PointerType(); | 191 | const auto [pointer, type] = page_table.pointers[page_index].PointerType(); |
| 207 | switch (type) { | 192 | switch (type) { |
| 208 | case Common::PageType::Unmapped: { | 193 | case Common::PageType::Unmapped: { |
| 209 | LOG_ERROR(HW_Memory, | 194 | on_unmapped(copy_amount, current_vaddr); |
| 210 | "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | ||
| 211 | current_vaddr, src_addr, size); | ||
| 212 | std::memset(dest_buffer, 0, copy_amount); | ||
| 213 | break; | 195 | break; |
| 214 | } | 196 | } |
| 215 | case Common::PageType::Memory: { | 197 | case Common::PageType::Memory: { |
| 216 | DEBUG_ASSERT(pointer); | 198 | DEBUG_ASSERT(pointer); |
| 217 | const u8* const src_ptr = pointer + page_offset + (page_index << PAGE_BITS); | 199 | u8* mem_ptr = pointer + page_offset + (page_index << PAGE_BITS); |
| 218 | std::memcpy(dest_buffer, src_ptr, copy_amount); | 200 | on_memory(copy_amount, mem_ptr); |
| 219 | break; | 201 | break; |
| 220 | } | 202 | } |
| 221 | case Common::PageType::RasterizerCachedMemory: { | 203 | case Common::PageType::RasterizerCachedMemory: { |
| 222 | const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; | 204 | u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; |
| 223 | system.GPU().FlushRegion(current_vaddr, copy_amount); | 205 | on_rasterizer(current_vaddr, copy_amount, host_ptr); |
| 224 | std::memcpy(dest_buffer, host_ptr, copy_amount); | ||
| 225 | break; | 206 | break; |
| 226 | } | 207 | } |
| 227 | default: | 208 | default: |
| @@ -230,199 +211,98 @@ struct Memory::Impl { | |||
| 230 | 211 | ||
| 231 | page_index++; | 212 | page_index++; |
| 232 | page_offset = 0; | 213 | page_offset = 0; |
| 233 | dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; | 214 | addr += static_cast<VAddr>(copy_amount); |
| 215 | increment(copy_amount); | ||
| 234 | remaining_size -= copy_amount; | 216 | remaining_size -= copy_amount; |
| 235 | } | 217 | } |
| 236 | } | 218 | } |
| 237 | 219 | ||
| 238 | void ReadBlockUnsafe(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer, | 220 | template <bool UNSAFE> |
| 239 | const std::size_t size) { | 221 | void ReadBlockImpl(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer, |
| 240 | const auto& page_table = process.PageTable().PageTableImpl(); | 222 | const std::size_t size) { |
| 241 | 223 | WalkBlock( | |
| 242 | std::size_t remaining_size = size; | 224 | process, src_addr, size, |
| 243 | std::size_t page_index = src_addr >> PAGE_BITS; | 225 | [src_addr, size, &dest_buffer](const std::size_t copy_amount, |
| 244 | std::size_t page_offset = src_addr & PAGE_MASK; | 226 | const VAddr current_vaddr) { |
| 245 | |||
| 246 | while (remaining_size > 0) { | ||
| 247 | const std::size_t copy_amount = | ||
| 248 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | ||
| 249 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | ||
| 250 | |||
| 251 | const auto [pointer, type] = page_table.pointers[page_index].PointerType(); | ||
| 252 | switch (type) { | ||
| 253 | case Common::PageType::Unmapped: { | ||
| 254 | LOG_ERROR(HW_Memory, | 227 | LOG_ERROR(HW_Memory, |
| 255 | "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 228 | "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 256 | current_vaddr, src_addr, size); | 229 | current_vaddr, src_addr, size); |
| 257 | std::memset(dest_buffer, 0, copy_amount); | 230 | std::memset(dest_buffer, 0, copy_amount); |
| 258 | break; | 231 | }, |
| 259 | } | 232 | [&dest_buffer](const std::size_t copy_amount, const u8* const src_ptr) { |
| 260 | case Common::PageType::Memory: { | ||
| 261 | DEBUG_ASSERT(pointer); | ||
| 262 | const u8* const src_ptr = pointer + page_offset + (page_index << PAGE_BITS); | ||
| 263 | std::memcpy(dest_buffer, src_ptr, copy_amount); | 233 | std::memcpy(dest_buffer, src_ptr, copy_amount); |
| 264 | break; | 234 | }, |
| 265 | } | 235 | [&system = system, &dest_buffer](const VAddr current_vaddr, |
| 266 | case Common::PageType::RasterizerCachedMemory: { | 236 | const std::size_t copy_amount, |
| 267 | const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; | 237 | const u8* const host_ptr) { |
| 238 | if (!UNSAFE) { | ||
| 239 | system.GPU().FlushRegion(current_vaddr, copy_amount); | ||
| 240 | } | ||
| 268 | std::memcpy(dest_buffer, host_ptr, copy_amount); | 241 | std::memcpy(dest_buffer, host_ptr, copy_amount); |
| 269 | break; | 242 | }, |
| 270 | } | 243 | [&dest_buffer](const std::size_t copy_amount) { |
| 271 | default: | 244 | dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; |
| 272 | UNREACHABLE(); | 245 | }); |
| 273 | } | ||
| 274 | |||
| 275 | page_index++; | ||
| 276 | page_offset = 0; | ||
| 277 | dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; | ||
| 278 | remaining_size -= copy_amount; | ||
| 279 | } | ||
| 280 | } | 246 | } |
| 281 | 247 | ||
| 282 | void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { | 248 | void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { |
| 283 | ReadBlock(*system.CurrentProcess(), src_addr, dest_buffer, size); | 249 | ReadBlockImpl<false>(*system.CurrentProcess(), src_addr, dest_buffer, size); |
| 284 | } | 250 | } |
| 285 | 251 | ||
| 286 | void ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { | 252 | void ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { |
| 287 | ReadBlockUnsafe(*system.CurrentProcess(), src_addr, dest_buffer, size); | 253 | ReadBlockImpl<true>(*system.CurrentProcess(), src_addr, dest_buffer, size); |
| 288 | } | 254 | } |
| 289 | 255 | ||
| 290 | void WriteBlock(const Kernel::KProcess& process, const VAddr dest_addr, const void* src_buffer, | 256 | template <bool UNSAFE> |
| 291 | const std::size_t size) { | 257 | void WriteBlockImpl(const Kernel::KProcess& process, const VAddr dest_addr, |
| 292 | const auto& page_table = process.PageTable().PageTableImpl(); | 258 | const void* src_buffer, const std::size_t size) { |
| 293 | std::size_t remaining_size = size; | 259 | WalkBlock( |
| 294 | std::size_t page_index = dest_addr >> PAGE_BITS; | 260 | process, dest_addr, size, |
| 295 | std::size_t page_offset = dest_addr & PAGE_MASK; | 261 | [dest_addr, size](const std::size_t copy_amount, const VAddr current_vaddr) { |
| 296 | |||
| 297 | while (remaining_size > 0) { | ||
| 298 | const std::size_t copy_amount = | ||
| 299 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | ||
| 300 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | ||
| 301 | |||
| 302 | const auto [pointer, type] = page_table.pointers[page_index].PointerType(); | ||
| 303 | switch (type) { | ||
| 304 | case Common::PageType::Unmapped: { | ||
| 305 | LOG_ERROR(HW_Memory, | 262 | LOG_ERROR(HW_Memory, |
| 306 | "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 263 | "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 307 | current_vaddr, dest_addr, size); | 264 | current_vaddr, dest_addr, size); |
| 308 | break; | 265 | }, |
| 309 | } | 266 | [&src_buffer](const std::size_t copy_amount, u8* const dest_ptr) { |
| 310 | case Common::PageType::Memory: { | ||
| 311 | DEBUG_ASSERT(pointer); | ||
| 312 | u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS); | ||
| 313 | std::memcpy(dest_ptr, src_buffer, copy_amount); | 267 | std::memcpy(dest_ptr, src_buffer, copy_amount); |
| 314 | break; | 268 | }, |
| 315 | } | 269 | [&system = system, &src_buffer](const VAddr current_vaddr, |
| 316 | case Common::PageType::RasterizerCachedMemory: { | 270 | const std::size_t copy_amount, u8* const host_ptr) { |
| 317 | u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; | 271 | if (!UNSAFE) { |
| 318 | system.GPU().InvalidateRegion(current_vaddr, copy_amount); | 272 | system.GPU().InvalidateRegion(current_vaddr, copy_amount); |
| 319 | std::memcpy(host_ptr, src_buffer, copy_amount); | 273 | } |
| 320 | break; | ||
| 321 | } | ||
| 322 | default: | ||
| 323 | UNREACHABLE(); | ||
| 324 | } | ||
| 325 | |||
| 326 | page_index++; | ||
| 327 | page_offset = 0; | ||
| 328 | src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; | ||
| 329 | remaining_size -= copy_amount; | ||
| 330 | } | ||
| 331 | } | ||
| 332 | |||
| 333 | void WriteBlockUnsafe(const Kernel::KProcess& process, const VAddr dest_addr, | ||
| 334 | const void* src_buffer, const std::size_t size) { | ||
| 335 | const auto& page_table = process.PageTable().PageTableImpl(); | ||
| 336 | std::size_t remaining_size = size; | ||
| 337 | std::size_t page_index = dest_addr >> PAGE_BITS; | ||
| 338 | std::size_t page_offset = dest_addr & PAGE_MASK; | ||
| 339 | |||
| 340 | while (remaining_size > 0) { | ||
| 341 | const std::size_t copy_amount = | ||
| 342 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | ||
| 343 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | ||
| 344 | |||
| 345 | const auto [pointer, type] = page_table.pointers[page_index].PointerType(); | ||
| 346 | switch (type) { | ||
| 347 | case Common::PageType::Unmapped: { | ||
| 348 | LOG_ERROR(HW_Memory, | ||
| 349 | "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | ||
| 350 | current_vaddr, dest_addr, size); | ||
| 351 | break; | ||
| 352 | } | ||
| 353 | case Common::PageType::Memory: { | ||
| 354 | DEBUG_ASSERT(pointer); | ||
| 355 | u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS); | ||
| 356 | std::memcpy(dest_ptr, src_buffer, copy_amount); | ||
| 357 | break; | ||
| 358 | } | ||
| 359 | case Common::PageType::RasterizerCachedMemory: { | ||
| 360 | u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; | ||
| 361 | std::memcpy(host_ptr, src_buffer, copy_amount); | 274 | std::memcpy(host_ptr, src_buffer, copy_amount); |
| 362 | break; | 275 | }, |
| 363 | } | 276 | [&src_buffer](const std::size_t copy_amount) { |
| 364 | default: | 277 | src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; |
| 365 | UNREACHABLE(); | 278 | }); |
| 366 | } | ||
| 367 | |||
| 368 | page_index++; | ||
| 369 | page_offset = 0; | ||
| 370 | src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; | ||
| 371 | remaining_size -= copy_amount; | ||
| 372 | } | ||
| 373 | } | 279 | } |
| 374 | 280 | ||
| 375 | void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { | 281 | void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { |
| 376 | WriteBlock(*system.CurrentProcess(), dest_addr, src_buffer, size); | 282 | WriteBlockImpl<false>(*system.CurrentProcess(), dest_addr, src_buffer, size); |
| 377 | } | 283 | } |
| 378 | 284 | ||
| 379 | void WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { | 285 | void WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { |
| 380 | WriteBlockUnsafe(*system.CurrentProcess(), dest_addr, src_buffer, size); | 286 | WriteBlockImpl<true>(*system.CurrentProcess(), dest_addr, src_buffer, size); |
| 381 | } | 287 | } |
| 382 | 288 | ||
| 383 | void ZeroBlock(const Kernel::KProcess& process, const VAddr dest_addr, const std::size_t size) { | 289 | void ZeroBlock(const Kernel::KProcess& process, const VAddr dest_addr, const std::size_t size) { |
| 384 | const auto& page_table = process.PageTable().PageTableImpl(); | 290 | WalkBlock( |
| 385 | std::size_t remaining_size = size; | 291 | process, dest_addr, size, |
| 386 | std::size_t page_index = dest_addr >> PAGE_BITS; | 292 | [dest_addr, size](const std::size_t copy_amount, const VAddr current_vaddr) { |
| 387 | std::size_t page_offset = dest_addr & PAGE_MASK; | ||
| 388 | |||
| 389 | while (remaining_size > 0) { | ||
| 390 | const std::size_t copy_amount = | ||
| 391 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | ||
| 392 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | ||
| 393 | |||
| 394 | const auto [pointer, type] = page_table.pointers[page_index].PointerType(); | ||
| 395 | switch (type) { | ||
| 396 | case Common::PageType::Unmapped: { | ||
| 397 | LOG_ERROR(HW_Memory, | 293 | LOG_ERROR(HW_Memory, |
| 398 | "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 294 | "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 399 | current_vaddr, dest_addr, size); | 295 | current_vaddr, dest_addr, size); |
| 400 | break; | 296 | }, |
| 401 | } | 297 | [](const std::size_t copy_amount, u8* const dest_ptr) { |
| 402 | case Common::PageType::Memory: { | ||
| 403 | DEBUG_ASSERT(pointer); | ||
| 404 | u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS); | ||
| 405 | std::memset(dest_ptr, 0, copy_amount); | 298 | std::memset(dest_ptr, 0, copy_amount); |
| 406 | break; | 299 | }, |
| 407 | } | 300 | [&system = system](const VAddr current_vaddr, const std::size_t copy_amount, |
| 408 | case Common::PageType::RasterizerCachedMemory: { | 301 | u8* const host_ptr) { |
| 409 | u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; | ||
| 410 | system.GPU().InvalidateRegion(current_vaddr, copy_amount); | 302 | system.GPU().InvalidateRegion(current_vaddr, copy_amount); |
| 411 | std::memset(host_ptr, 0, copy_amount); | 303 | std::memset(host_ptr, 0, copy_amount); |
| 412 | break; | 304 | }, |
| 413 | } | 305 | [](const std::size_t copy_amount) {}); |
| 414 | default: | ||
| 415 | UNREACHABLE(); | ||
| 416 | } | ||
| 417 | |||
| 418 | page_index++; | ||
| 419 | page_offset = 0; | ||
| 420 | remaining_size -= copy_amount; | ||
| 421 | } | ||
| 422 | } | ||
| 423 | |||
| 424 | void ZeroBlock(const VAddr dest_addr, const std::size_t size) { | ||
| 425 | ZeroBlock(*system.CurrentProcess(), dest_addr, size); | ||
| 426 | } | 306 | } |
| 427 | 307 | ||
| 428 | void CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr, | 308 | void CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr, |
| @@ -432,7 +312,7 @@ struct Memory::Impl { | |||
| 432 | std::size_t page_index = src_addr >> PAGE_BITS; | 312 | std::size_t page_index = src_addr >> PAGE_BITS; |
| 433 | std::size_t page_offset = src_addr & PAGE_MASK; | 313 | std::size_t page_offset = src_addr & PAGE_MASK; |
| 434 | 314 | ||
| 435 | while (remaining_size > 0) { | 315 | while (remaining_size) { |
| 436 | const std::size_t copy_amount = | 316 | const std::size_t copy_amount = |
| 437 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | 317 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); |
| 438 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 318 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| @@ -449,13 +329,13 @@ struct Memory::Impl { | |||
| 449 | case Common::PageType::Memory: { | 329 | case Common::PageType::Memory: { |
| 450 | DEBUG_ASSERT(pointer); | 330 | DEBUG_ASSERT(pointer); |
| 451 | const u8* src_ptr = pointer + page_offset + (page_index << PAGE_BITS); | 331 | const u8* src_ptr = pointer + page_offset + (page_index << PAGE_BITS); |
| 452 | WriteBlock(process, dest_addr, src_ptr, copy_amount); | 332 | WriteBlockImpl<false>(process, dest_addr, src_ptr, copy_amount); |
| 453 | break; | 333 | break; |
| 454 | } | 334 | } |
| 455 | case Common::PageType::RasterizerCachedMemory: { | 335 | case Common::PageType::RasterizerCachedMemory: { |
| 456 | const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; | 336 | const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; |
| 457 | system.GPU().FlushRegion(current_vaddr, copy_amount); | 337 | system.GPU().FlushRegion(current_vaddr, copy_amount); |
| 458 | WriteBlock(process, dest_addr, host_ptr, copy_amount); | 338 | WriteBlockImpl<false>(process, dest_addr, host_ptr, copy_amount); |
| 459 | break; | 339 | break; |
| 460 | } | 340 | } |
| 461 | default: | 341 | default: |
| @@ -470,10 +350,6 @@ struct Memory::Impl { | |||
| 470 | } | 350 | } |
| 471 | } | 351 | } |
| 472 | 352 | ||
| 473 | void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) { | ||
| 474 | return CopyBlock(*system.CurrentProcess(), dest_addr, src_addr, size); | ||
| 475 | } | ||
| 476 | |||
| 477 | void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { | 353 | void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { |
| 478 | if (vaddr == 0) { | 354 | if (vaddr == 0) { |
| 479 | return; | 355 | return; |
| @@ -517,7 +393,6 @@ struct Memory::Impl { | |||
| 517 | case Common::PageType::Unmapped: | 393 | case Common::PageType::Unmapped: |
| 518 | // It is not necessary for a process to have this region mapped into its address | 394 | // It is not necessary for a process to have this region mapped into its address |
| 519 | // space, for example, a system module need not have a VRAM mapping. | 395 | // space, for example, a system module need not have a VRAM mapping. |
| 520 | break; | ||
| 521 | case Common::PageType::Memory: | 396 | case Common::PageType::Memory: |
| 522 | // There can be more than one GPU region mapped per CPU region, so it's common | 397 | // There can be more than one GPU region mapped per CPU region, so it's common |
| 523 | // that this area is already unmarked as cached. | 398 | // that this area is already unmarked as cached. |
| @@ -789,12 +664,11 @@ void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { | |||
| 789 | impl->UnmapRegion(page_table, base, size); | 664 | impl->UnmapRegion(page_table, base, size); |
| 790 | } | 665 | } |
| 791 | 666 | ||
| 792 | bool Memory::IsValidVirtualAddress(const Kernel::KProcess& process, const VAddr vaddr) const { | ||
| 793 | return impl->IsValidVirtualAddress(process, vaddr); | ||
| 794 | } | ||
| 795 | |||
| 796 | bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { | 667 | bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { |
| 797 | return impl->IsValidVirtualAddress(vaddr); | 668 | const Kernel::KProcess& process = *system.CurrentProcess(); |
| 669 | const auto& pageTable = process.PageTable().PageTableImpl(); | ||
| 670 | const auto [pointer, type] = pageTable.pointers[vaddr >> PAGE_BITS].PointerType(); | ||
| 671 | return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory; | ||
| 798 | } | 672 | } |
| 799 | 673 | ||
| 800 | u8* Memory::GetPointer(VAddr vaddr) { | 674 | u8* Memory::GetPointer(VAddr vaddr) { |
| @@ -863,64 +737,38 @@ std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) { | |||
| 863 | 737 | ||
| 864 | void Memory::ReadBlock(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer, | 738 | void Memory::ReadBlock(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer, |
| 865 | const std::size_t size) { | 739 | const std::size_t size) { |
| 866 | impl->ReadBlock(process, src_addr, dest_buffer, size); | 740 | impl->ReadBlockImpl<false>(process, src_addr, dest_buffer, size); |
| 867 | } | 741 | } |
| 868 | 742 | ||
| 869 | void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { | 743 | void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { |
| 870 | impl->ReadBlock(src_addr, dest_buffer, size); | 744 | impl->ReadBlock(src_addr, dest_buffer, size); |
| 871 | } | 745 | } |
| 872 | 746 | ||
| 873 | void Memory::ReadBlockUnsafe(const Kernel::KProcess& process, const VAddr src_addr, | ||
| 874 | void* dest_buffer, const std::size_t size) { | ||
| 875 | impl->ReadBlockUnsafe(process, src_addr, dest_buffer, size); | ||
| 876 | } | ||
| 877 | |||
| 878 | void Memory::ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { | 747 | void Memory::ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { |
| 879 | impl->ReadBlockUnsafe(src_addr, dest_buffer, size); | 748 | impl->ReadBlockUnsafe(src_addr, dest_buffer, size); |
| 880 | } | 749 | } |
| 881 | 750 | ||
| 882 | void Memory::WriteBlock(const Kernel::KProcess& process, VAddr dest_addr, const void* src_buffer, | 751 | void Memory::WriteBlock(const Kernel::KProcess& process, VAddr dest_addr, const void* src_buffer, |
| 883 | std::size_t size) { | 752 | std::size_t size) { |
| 884 | impl->WriteBlock(process, dest_addr, src_buffer, size); | 753 | impl->WriteBlockImpl<false>(process, dest_addr, src_buffer, size); |
| 885 | } | 754 | } |
| 886 | 755 | ||
| 887 | void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { | 756 | void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { |
| 888 | impl->WriteBlock(dest_addr, src_buffer, size); | 757 | impl->WriteBlock(dest_addr, src_buffer, size); |
| 889 | } | 758 | } |
| 890 | 759 | ||
| 891 | void Memory::WriteBlockUnsafe(const Kernel::KProcess& process, VAddr dest_addr, | ||
| 892 | const void* src_buffer, std::size_t size) { | ||
| 893 | impl->WriteBlockUnsafe(process, dest_addr, src_buffer, size); | ||
| 894 | } | ||
| 895 | |||
| 896 | void Memory::WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, | 760 | void Memory::WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, |
| 897 | const std::size_t size) { | 761 | const std::size_t size) { |
| 898 | impl->WriteBlockUnsafe(dest_addr, src_buffer, size); | 762 | impl->WriteBlockUnsafe(dest_addr, src_buffer, size); |
| 899 | } | 763 | } |
| 900 | 764 | ||
| 901 | void Memory::ZeroBlock(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) { | ||
| 902 | impl->ZeroBlock(process, dest_addr, size); | ||
| 903 | } | ||
| 904 | |||
| 905 | void Memory::ZeroBlock(VAddr dest_addr, std::size_t size) { | ||
| 906 | impl->ZeroBlock(dest_addr, size); | ||
| 907 | } | ||
| 908 | |||
| 909 | void Memory::CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr, | 765 | void Memory::CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr, |
| 910 | const std::size_t size) { | 766 | const std::size_t size) { |
| 911 | impl->CopyBlock(process, dest_addr, src_addr, size); | 767 | impl->CopyBlock(process, dest_addr, src_addr, size); |
| 912 | } | 768 | } |
| 913 | 769 | ||
| 914 | void Memory::CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) { | ||
| 915 | impl->CopyBlock(dest_addr, src_addr, size); | ||
| 916 | } | ||
| 917 | |||
| 918 | void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { | 770 | void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { |
| 919 | impl->RasterizerMarkRegionCached(vaddr, size, cached); | 771 | impl->RasterizerMarkRegionCached(vaddr, size, cached); |
| 920 | } | 772 | } |
| 921 | 773 | ||
| 922 | bool IsKernelVirtualAddress(const VAddr vaddr) { | ||
| 923 | return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; | ||
| 924 | } | ||
| 925 | |||
| 926 | } // namespace Core::Memory | 774 | } // namespace Core::Memory |