summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar Michael Scire2019-07-07 12:55:30 -0700
committerGravatar Michael Scire2019-07-07 12:55:30 -0700
commitce64a9fab9e6f015a4d2b332abcb7043914549d4 (patch)
tree75b34665635af88a0cababae1f889d5bed4a9440 /src
parentclang-format fixes (diff)
downloadyuzu-ce64a9fab9e6f015a4d2b332abcb7043914549d4.tar.gz
yuzu-ce64a9fab9e6f015a4d2b332abcb7043914549d4.tar.xz
yuzu-ce64a9fab9e6f015a4d2b332abcb7043914549d4.zip
physmem: add helpers, cleanup logic.
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/vm_manager.cpp325
-rw-r--r--src/core/hle/kernel/vm_manager.h16
2 files changed, 170 insertions, 171 deletions
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index bda325e87..775d170bf 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -310,42 +310,22 @@ ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
310} 310}
311 311
312ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { 312ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) {
313 const auto last_addr = target + size - 1; 313 const auto end_addr = target + size;
314 const auto last_addr = end_addr - 1;
314 VAddr cur_addr = target; 315 VAddr cur_addr = target;
315 std::size_t mapped_size = 0;
316 316
317 ResultCode result = RESULT_SUCCESS; 317 ResultCode result = RESULT_SUCCESS;
318 318
319 // Check whether we've already mapped the desired memory. 319 // Check how much memory we've already mapped.
320 { 320 const auto mapped_size_result = SizeOfAllocatedVMAsInRange(target, size);
321 auto vma = FindVMA(target); 321 if (mapped_size_result.Failed()) {
322 ASSERT_MSG(vma != vma_map.end(), "MapPhysicalMemory vma != end"); 322 return mapped_size_result.Code();
323 323 }
324 while (true) {
325 const auto vma_start = vma->second.base;
326 const auto vma_size = vma->second.size;
327 const auto state = vma->second.state;
328
329 // Handle last block.
330 if (last_addr <= (vma_start + vma_size - 1)) {
331 if (state != MemoryState::Unmapped) {
332 mapped_size += last_addr - cur_addr + 1;
333 }
334 break;
335 }
336
337 if (state != MemoryState::Unmapped) {
338 mapped_size += vma_start + vma_size - cur_addr;
339 }
340 cur_addr = vma_start + vma_size;
341 vma++;
342 ASSERT_MSG(vma != vma_map.end(), "MapPhysicalMemory vma != end");
343 }
344 324
345 // If we already have the desired amount mapped, we're done. 325 // If we've already mapped the desired amount, return early.
346 if (mapped_size == size) { 326 const std::size_t mapped_size = *mapped_size_result;
347 return RESULT_SUCCESS; 327 if (mapped_size == size) {
348 } 328 return RESULT_SUCCESS;
349 } 329 }
350 330
351 // Check that we can map the memory we want. 331 // Check that we can map the memory we want.
@@ -360,97 +340,54 @@ ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) {
360 std::vector<std::pair<u64, u64>> mapped_regions; 340 std::vector<std::pair<u64, u64>> mapped_regions;
361 341
362 // Iterate, trying to map memory. 342 // Iterate, trying to map memory.
363 // Map initially with VMAPermission::None.
364 { 343 {
365 cur_addr = target; 344 cur_addr = target;
366 345
367 auto vma = FindVMA(target); 346 auto iter = FindVMA(target);
368 ASSERT_MSG(vma != vma_map.end(), "MapPhysicalMemory vma != end"); 347 ASSERT_MSG(iter != vma_map.end(), "MapPhysicalMemory iter != end");
369 348
370 while (true) { 349 while (true) {
371 const auto vma_start = vma->second.base; 350 const auto& vma = iter->second;
372 const auto vma_size = vma->second.size; 351 const auto vma_start = vma.base;
373 const auto state = vma->second.state; 352 const auto vma_end = vma_start + vma.size;
374 353 const auto vma_last = vma_end - 1;
375 // Handle last block. 354
376 if (last_addr <= (vma_start + vma_size - 1)) { 355 // Map the memory block
377 if (state == MemoryState::Unmapped) { 356 const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr);
378 const auto map_res = MapMemoryBlock( 357 if (vma.state == MemoryState::Unmapped) {
379 cur_addr, std::make_shared<std::vector<u8>>(last_addr - cur_addr + 1, 0), 0, 358 const auto map_res =
380 last_addr - cur_addr + 1, MemoryState::Heap, VMAPermission::None); 359 MapMemoryBlock(cur_addr, std::make_shared<std::vector<u8>>(map_size, 0), 0,
381 result = map_res.Code(); 360 map_size, MemoryState::Heap, VMAPermission::ReadWrite);
382 if (result.IsSuccess()) {
383 mapped_regions.push_back(
384 std::make_pair(cur_addr, last_addr - cur_addr + 1));
385 }
386 }
387 break;
388 }
389
390 if (state == MemoryState::Unmapped) {
391 const auto map_res = MapMemoryBlock(
392 cur_addr, std::make_shared<std::vector<u8>>(vma_start + vma_size - cur_addr, 0),
393 0, vma_start + vma_size - cur_addr, MemoryState::Heap, VMAPermission::None);
394 result = map_res.Code(); 361 result = map_res.Code();
395 if (result.IsSuccess()) { 362 if (result.IsError()) {
396 mapped_regions.push_back(
397 std::make_pair(cur_addr, vma_start + vma_size - cur_addr));
398 } else {
399 break; 363 break;
400 } 364 }
365
366 mapped_regions.emplace_back(cur_addr, map_size);
401 } 367 }
402 cur_addr = vma_start + vma_size; 368
403 vma = FindVMA(cur_addr); 369 // Break once we hit the end of the range.
404 ASSERT_MSG(vma != vma_map.end(), "MapPhysicalMemory vma != end"); 370 if (last_addr <= vma_last) {
371 break;
372 }
373
374 // Advance to the next block.
375 cur_addr = vma_end;
376 iter = FindVMA(cur_addr);
377 ASSERT_MSG(iter != vma_map.end(), "MapPhysicalMemory iter != end");
405 } 378 }
406 } 379 }
407 380
408 // If we failed, unmap memory. 381 // If we failed, unmap memory.
409 if (result.IsError()) { 382 if (result.IsError()) {
410 for (const auto& it : mapped_regions) { 383 for (const auto [unmap_address, unmap_size] : mapped_regions) {
411 const auto unmap_res = UnmapRange(it.first, it.second); 384 ASSERT_MSG(UnmapRange(unmap_address, unmap_size).IsSuccess(),
412 ASSERT_MSG(unmap_res.IsSuccess(), "MapPhysicalMemory un-map on error"); 385 "MapPhysicalMemory un-map on error");
413 } 386 }
414 387
415 return result; 388 return result;
416 } 389 }
417 390
418 // We didn't fail, so reprotect all the memory to ReadWrite.
419 {
420 cur_addr = target;
421
422 auto vma = FindVMA(target);
423 ASSERT_MSG(vma != vma_map.end(), "MapPhysicalMemory vma != end");
424
425 while (true) {
426 const auto vma_start = vma->second.base;
427 const auto vma_size = vma->second.size;
428 const auto state = vma->second.state;
429 const auto perm = vma->second.permissions;
430
431 // Handle last block.
432 if (last_addr <= (vma_start + vma_size - 1)) {
433 if (state == MemoryState::Heap && perm == VMAPermission::None) {
434 ASSERT_MSG(
435 ReprotectRange(cur_addr, last_addr - cur_addr + 1, VMAPermission::ReadWrite)
436 .IsSuccess(),
437 "MapPhysicalMemory reprotect");
438 }
439 break;
440 }
441
442 if (state == MemoryState::Heap && perm == VMAPermission::None) {
443 ASSERT_MSG(ReprotectRange(cur_addr, vma_start + vma_size - cur_addr,
444 VMAPermission::ReadWrite)
445 .IsSuccess(),
446 "MapPhysicalMemory reprotect");
447 }
448 cur_addr = vma_start + vma_size;
449 vma = FindVMA(cur_addr);
450 ASSERT_MSG(vma != vma_map.end(), "MapPhysicalMemory vma != end");
451 }
452 }
453
454 // Update amount of mapped physical memory. 391 // Update amount of mapped physical memory.
455 physical_memory_mapped += size - mapped_size; 392 physical_memory_mapped += size - mapped_size;
456 393
@@ -458,49 +395,22 @@ ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) {
458} 395}
459 396
460ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) { 397ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) {
461 auto last_addr = target + size - 1; 398 const auto end_addr = target + size;
399 const auto last_addr = end_addr - 1;
462 VAddr cur_addr = target; 400 VAddr cur_addr = target;
463 std::size_t mapped_size = 0;
464 401
465 ResultCode result = RESULT_SUCCESS; 402 ResultCode result = RESULT_SUCCESS;
466 403
467 // Check how much of the memory is currently mapped. 404 // Check how much memory is currently mapped.
468 { 405 const auto mapped_size_result = SizeOfUnmappablePhysicalMemoryInRange(target, size);
469 auto vma = FindVMA(target); 406 if (mapped_size_result.Failed()) {
470 ASSERT_MSG(vma != vma_map.end(), "UnmapPhysicalMemory vma != end"); 407 return mapped_size_result.Code();
471 408 }
472 while (true) {
473 const auto vma_start = vma->second.base;
474 const auto vma_size = vma->second.size;
475 const auto state = vma->second.state;
476 const auto attr = vma->second.attribute;
477
478 // Memory within region must be free or mapped heap.
479 if (!((state == MemoryState::Heap && attr == MemoryAttribute::None) ||
480 (state == MemoryState::Unmapped))) {
481 return ERR_INVALID_ADDRESS_STATE;
482 }
483
484 // If this is the last block and it's mapped, update mapped size.
485 if (last_addr <= (vma_start + vma_size - 1)) {
486 if (state == MemoryState::Heap) {
487 mapped_size += last_addr - cur_addr + 1;
488 }
489 break;
490 }
491
492 if (state == MemoryState::Heap) {
493 mapped_size += vma_start + vma_size - cur_addr;
494 }
495 cur_addr = vma_start + vma_size;
496 vma++;
497 ASSERT_MSG(vma != vma_map.end(), "UnmapPhysicalMemory vma != end");
498 }
499 409
500 // If memory is already unmapped, we're done. 410 // If we've already unmapped all the memory, return early.
501 if (mapped_size == 0) { 411 const std::size_t mapped_size = *mapped_size_result;
502 return RESULT_SUCCESS; 412 if (mapped_size == 0) {
503 } 413 return RESULT_SUCCESS;
504 } 414 }
505 415
506 // Keep track of the memory regions we unmap. 416 // Keep track of the memory regions we unmap.
@@ -510,50 +420,45 @@ ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) {
510 { 420 {
511 cur_addr = target; 421 cur_addr = target;
512 422
513 auto vma = FindVMA(target); 423 auto iter = FindVMA(target);
514 ASSERT_MSG(vma != vma_map.end(), "UnmapPhysicalMemory vma != end"); 424 ASSERT_MSG(iter != vma_map.end(), "UnmapPhysicalMemory iter != end");
515 425
516 while (true) { 426 while (true) {
517 const auto vma_start = vma->second.base; 427 const auto& vma = iter->second;
518 const auto vma_size = vma->second.size; 428 const auto vma_start = vma.base;
519 const auto state = vma->second.state; 429 const auto vma_end = vma_start + vma.size;
520 const auto perm = vma->second.permissions; 430 const auto vma_last = vma_end - 1;
521 431
522 // Handle last block. 432 // Unmap the memory block
523 if (last_addr <= (vma_start + vma_size - 1)) { 433 const auto unmap_size = std::min(end_addr - cur_addr, vma_end - cur_addr);
524 if (state == MemoryState::Heap) { 434 if (vma.state == MemoryState::Heap) {
525 result = UnmapRange(cur_addr, last_addr - cur_addr + 1); 435 result = UnmapRange(cur_addr, unmap_size);
526 if (result.IsSuccess()) { 436 if (result.IsError()) {
527 unmapped_regions.push_back( 437 break;
528 std::make_pair(cur_addr, last_addr - cur_addr + 1));
529 }
530 } 438 }
531 break; 439
440 unmapped_regions.emplace_back(cur_addr, unmap_size);
532 } 441 }
533 442
534 if (state == MemoryState::Heap) { 443 // Break once we hit the end of the range.
535 result = UnmapRange(cur_addr, vma_start + vma_size - cur_addr); 444 if (last_addr <= vma_last) {
536 if (result.IsSuccess()) { 445 break;
537 unmapped_regions.push_back(
538 std::make_pair(cur_addr, vma_start + vma_size - cur_addr));
539 } else {
540 break;
541 }
542 } 446 }
543 447
544 cur_addr = vma_start + vma_size; 448 // Advance to the next block.
545 vma = FindVMA(cur_addr); 449 cur_addr = vma_end;
546 ASSERT_MSG(vma != vma_map.end(), "UnmapPhysicalMemory vma != end"); 450 iter = FindVMA(cur_addr);
451 ASSERT_MSG(iter != vma_map.end(), "UnmapPhysicalMemory iter != end");
547 } 452 }
548 } 453 }
549 454
550 // If we failed, re-map regions. 455 // If we failed, re-map regions.
551 // TODO: Preserve memory contents? 456 // TODO: Preserve memory contents?
552 if (result.IsError()) { 457 if (result.IsError()) {
553 for (const auto& it : unmapped_regions) { 458 for (const auto [map_address, map_size] : unmapped_regions) {
554 const auto remap_res = 459 const auto remap_res =
555 MapMemoryBlock(it.first, std::make_shared<std::vector<u8>>(it.second, 0), 0, 460 MapMemoryBlock(map_address, std::make_shared<std::vector<u8>>(map_size, 0), 0,
556 it.second, MemoryState::Heap, VMAPermission::None); 461 map_size, MemoryState::Heap, VMAPermission::None);
557 ASSERT_MSG(remap_res.Succeeded(), "UnmapPhysicalMemory re-map on error"); 462 ASSERT_MSG(remap_res.Succeeded(), "UnmapPhysicalMemory re-map on error");
558 } 463 }
559 } 464 }
@@ -1085,6 +990,84 @@ VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, Memo
1085 std::make_tuple(initial_state, initial_permissions, initial_attributes & ~ignore_mask)); 990 std::make_tuple(initial_state, initial_permissions, initial_attributes & ~ignore_mask));
1086} 991}
1087 992
993ResultVal<std::size_t> VMManager::SizeOfAllocatedVMAsInRange(VAddr address,
994 std::size_t size) const {
995 const VAddr end_addr = address + size;
996 const VAddr last_addr = end_addr - 1;
997 std::size_t mapped_size = 0;
998
999 VAddr cur_addr = address;
1000 auto iter = FindVMA(cur_addr);
1001 ASSERT_MSG(iter != vma_map.end(), "SizeOfAllocatedVMAsInRange iter != end");
1002
1003 while (true) {
1004 const auto& vma = iter->second;
1005 const VAddr vma_start = vma.base;
1006 const VAddr vma_end = vma_start + vma.size;
1007 const VAddr vma_last = vma_end - 1;
1008
1009 // Add size if relevant.
1010 if (vma.state != MemoryState::Unmapped) {
1011 mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr);
1012 }
1013
1014 // Break once we hit the end of the range.
1015 if (last_addr <= vma_last) {
1016 break;
1017 }
1018
1019 // Advance to the next block.
1020 cur_addr = vma_end;
1021 iter = std::next(iter);
1022 ASSERT_MSG(iter != vma_map.end(), "SizeOfAllocatedVMAsInRange iter != end");
1023 }
1024
1025 return MakeResult(mapped_size);
1026}
1027
1028ResultVal<std::size_t> VMManager::SizeOfUnmappablePhysicalMemoryInRange(VAddr address,
1029 std::size_t size) const {
1030 const VAddr end_addr = address + size;
1031 const VAddr last_addr = end_addr - 1;
1032 std::size_t mapped_size = 0;
1033
1034 VAddr cur_addr = address;
1035 auto iter = FindVMA(cur_addr);
1036 ASSERT_MSG(iter != vma_map.end(), "SizeOfUnmappablePhysicalMemoryInRange iter != end");
1037
1038 while (true) {
1039 const auto& vma = iter->second;
1040 const auto vma_start = vma.base;
1041 const auto vma_end = vma_start + vma.size;
1042 const auto vma_last = vma_end - 1;
1043 const auto state = vma.state;
1044 const auto attr = vma.attribute;
1045
1046 // Memory within region must be free or mapped heap.
1047 if (!((state == MemoryState::Heap && attr == MemoryAttribute::None) ||
1048 (state == MemoryState::Unmapped))) {
1049 return ERR_INVALID_ADDRESS_STATE;
1050 }
1051
1052 // Add size if relevant.
1053 if (state != MemoryState::Unmapped) {
1054 mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr);
1055 }
1056
1057 // Break once we hit the end of the range.
1058 if (last_addr <= vma_last) {
1059 break;
1060 }
1061
1062 // Advance to the next block.
1063 cur_addr = vma_end;
1064 iter = std::next(iter);
1065 ASSERT_MSG(iter != vma_map.end(), "SizeOfUnmappablePhysicalMemoryInRange iter != end");
1066 }
1067
1068 return MakeResult(mapped_size);
1069}
1070
1088u64 VMManager::GetTotalPhysicalMemoryAvailable() const { 1071u64 VMManager::GetTotalPhysicalMemoryAvailable() const {
1089 LOG_WARNING(Kernel, "(STUBBED) called"); 1072 LOG_WARNING(Kernel, "(STUBBED) called");
1090 return 0xF8000000; 1073 return 0xF8000000;
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 8be03a6e4..5b27548aa 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -303,6 +303,15 @@ struct VirtualMemoryArea {
303 PAddr paddr = 0; 303 PAddr paddr = 0;
304 Common::MemoryHookPointer mmio_handler = nullptr; 304 Common::MemoryHookPointer mmio_handler = nullptr;
305 305
306 /// If the address lies within this VMA, returns the size left before the
307 /// end of this VMA. If the given address doesn't lie within the VMA, then
308 /// an empty optional value is returned.
309 ///
310 /// For example, given a VMA 100 bytes long. If '10' was given as the
311 /// start address, then this would return 90.
312 ///
313 std::optional<u64> SizeRemainingFromAddress(VAddr address) const;
314
306 /// Tests if this area can be merged to the right with `next`. 315 /// Tests if this area can be merged to the right with `next`.
307 bool CanBeMergedWith(const VirtualMemoryArea& next) const; 316 bool CanBeMergedWith(const VirtualMemoryArea& next) const;
308}; 317};
@@ -735,6 +744,13 @@ private:
735 MemoryAttribute attribute_mask, MemoryAttribute attribute, 744 MemoryAttribute attribute_mask, MemoryAttribute attribute,
736 MemoryAttribute ignore_mask) const; 745 MemoryAttribute ignore_mask) const;
737 746
747 /// Gets the amount of memory currently mapped (state != Unmapped) in a range.
748 ResultVal<std::size_t> SizeOfAllocatedVMAsInRange(VAddr address, std::size_t size) const;
749
750 /// Gets the amount of memory unmappable by UnmapPhysicalMemory in a range.
751 ResultVal<std::size_t> SizeOfUnmappablePhysicalMemoryInRange(VAddr address,
752 std::size_t size) const;
753
738 /** 754 /**
739 * A map covering the entirety of the managed address space, keyed by the `base` field of each 755 * A map covering the entirety of the managed address space, keyed by the `base` field of each
740 * VMA. It must always be modified by splitting or merging VMAs, so that the invariant 756 * VMA. It must always be modified by splitting or merging VMAs, so that the invariant