summaryrefslogtreecommitdiff
path: root/src/common/host_memory.cpp
diff options
context:
space:
mode:
authorGravatar Liam2023-11-17 20:57:39 +0200
committerGravatar t8952023-11-25 00:46:15 -0500
commit4766baddf3501695b6048ed78f251f4ec28ae0aa (patch)
tree963df957037a5155d919973c4a66d12a34df58dd /src/common/host_memory.cpp
parenthost_memory: ensure map base is between 36 and 39 bits (diff)
downloadyuzu-4766baddf3501695b6048ed78f251f4ec28ae0aa.tar.gz
yuzu-4766baddf3501695b6048ed78f251f4ec28ae0aa.tar.xz
yuzu-4766baddf3501695b6048ed78f251f4ec28ae0aa.zip
host_memory: Switch to FreeRegionManager
Diffstat (limited to 'src/common/host_memory.cpp')
-rw-r--r--src/common/host_memory.cpp94
1 files changed, 65 insertions, 29 deletions
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp
index 41ca12ab0..a66fc49e2 100644
--- a/src/common/host_memory.cpp
+++ b/src/common/host_memory.cpp
@@ -32,6 +32,7 @@
32 32
33#include "common/alignment.h" 33#include "common/alignment.h"
34#include "common/assert.h" 34#include "common/assert.h"
35#include "common/free_region_manager.h"
35#include "common/host_memory.h" 36#include "common/host_memory.h"
36#include "common/logging/log.h" 37#include "common/logging/log.h"
37 38
@@ -339,6 +340,11 @@ private:
339 return false; 340 return false;
340 } 341 }
341 342
343 void EnableDirectMappedAddress() {
344 // TODO
345 UNREACHABLE();
346 }
347
342 HANDLE process{}; ///< Current process handle 348 HANDLE process{}; ///< Current process handle
343 HANDLE backing_handle{}; ///< File based backing memory 349 HANDLE backing_handle{}; ///< File based backing memory
344 350
@@ -472,7 +478,7 @@ public:
472 } 478 }
473 } 479 }
474#else 480#else
475 virtual_base = static_cast<u8*>(ChooseVirtualBase(virtual_size)); 481 virtual_base = virtual_map_base = static_cast<u8*>(ChooseVirtualBase(virtual_size));
476 if (virtual_base == MAP_FAILED) { 482 if (virtual_base == MAP_FAILED) {
477 LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno)); 483 LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno));
478 throw std::bad_alloc{}; 484 throw std::bad_alloc{};
@@ -480,7 +486,7 @@ public:
480 madvise(virtual_base, virtual_size, MADV_HUGEPAGE); 486 madvise(virtual_base, virtual_size, MADV_HUGEPAGE);
481#endif 487#endif
482 488
483 placeholders.add({0, virtual_size}); 489 free_manager.SetAddressSpace(virtual_base, virtual_size);
484 good = true; 490 good = true;
485 } 491 }
486 492
@@ -489,10 +495,11 @@ public:
489 } 495 }
490 496
491 void Map(size_t virtual_offset, size_t host_offset, size_t length) { 497 void Map(size_t virtual_offset, size_t host_offset, size_t length) {
492 { 498 // Intersect the range with our address space.
493 std::scoped_lock lock{placeholder_mutex}; 499 AdjustMap(&virtual_offset, &length);
494 placeholders.subtract({virtual_offset, virtual_offset + length}); 500
495 } 501 // We are removing a placeholder.
502 free_manager.AllocateBlock(virtual_base + virtual_offset, length);
496 503
497 void* ret = mmap(virtual_base + virtual_offset, length, PROT_READ | PROT_WRITE, 504 void* ret = mmap(virtual_base + virtual_offset, length, PROT_READ | PROT_WRITE,
498 MAP_SHARED | MAP_FIXED, fd, host_offset); 505 MAP_SHARED | MAP_FIXED, fd, host_offset);
@@ -503,26 +510,23 @@ public:
503 // The method name is wrong. We're still talking about the virtual range. 510 // The method name is wrong. We're still talking about the virtual range.
504 // We don't want to unmap, we want to reserve this memory. 511 // We don't want to unmap, we want to reserve this memory.
505 512
506 { 513 // Intersect the range with our address space.
507 std::scoped_lock lock{placeholder_mutex}; 514 AdjustMap(&virtual_offset, &length);
508 auto it = placeholders.find({virtual_offset - 1, virtual_offset + length + 1});
509 515
510 if (it != placeholders.end()) { 516 // Merge with any adjacent placeholder mappings.
511 size_t prev_upper = virtual_offset + length; 517 auto [merged_pointer, merged_size] =
512 virtual_offset = std::min(virtual_offset, it->lower()); 518 free_manager.FreeBlock(virtual_base + virtual_offset, length);
513 length = std::max(it->upper(), prev_upper) - virtual_offset;
514 }
515 519
516 placeholders.add({virtual_offset, virtual_offset + length}); 520 void* ret = mmap(merged_pointer, merged_size, PROT_NONE,
517 }
518
519 void* ret = mmap(virtual_base + virtual_offset, length, PROT_NONE,
520 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); 521 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
521 ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); 522 ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno));
522 } 523 }
523 524
524 void Protect(size_t virtual_offset, size_t length, bool read, bool write) { 525 void Protect(size_t virtual_offset, size_t length, bool read, bool write) {
525 int flags = 0; 526 // Intersect the range with our address space.
527 AdjustMap(&virtual_offset, &length);
528
529 int flags = PROT_NONE;
526 if (read) { 530 if (read) {
527 flags |= PROT_READ; 531 flags |= PROT_READ;
528 } 532 }
@@ -533,17 +537,22 @@ public:
533 ASSERT_MSG(ret == 0, "mprotect failed: {}", strerror(errno)); 537 ASSERT_MSG(ret == 0, "mprotect failed: {}", strerror(errno));
534 } 538 }
535 539
540 void EnableDirectMappedAddress() {
541 virtual_base = nullptr;
542 }
543
536 const size_t backing_size; ///< Size of the backing memory in bytes 544 const size_t backing_size; ///< Size of the backing memory in bytes
537 const size_t virtual_size; ///< Size of the virtual address placeholder in bytes 545 const size_t virtual_size; ///< Size of the virtual address placeholder in bytes
538 546
539 u8* backing_base{reinterpret_cast<u8*>(MAP_FAILED)}; 547 u8* backing_base{reinterpret_cast<u8*>(MAP_FAILED)};
540 u8* virtual_base{reinterpret_cast<u8*>(MAP_FAILED)}; 548 u8* virtual_base{reinterpret_cast<u8*>(MAP_FAILED)};
549 u8* virtual_map_base{reinterpret_cast<u8*>(MAP_FAILED)};
541 550
542private: 551private:
543 /// Release all resources in the object 552 /// Release all resources in the object
544 void Release() { 553 void Release() {
545 if (virtual_base != MAP_FAILED) { 554 if (virtual_map_base != MAP_FAILED) {
546 int ret = munmap(virtual_base, virtual_size); 555 int ret = munmap(virtual_map_base, virtual_size);
547 ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno)); 556 ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno));
548 } 557 }
549 558
@@ -558,10 +567,29 @@ private:
558 } 567 }
559 } 568 }
560 569
561 int fd{-1}; // memfd file descriptor, -1 is the error value of memfd_create 570 void AdjustMap(size_t* virtual_offset, size_t* length) {
571 if (virtual_base != nullptr) {
572 return;
573 }
574
575 // If we are direct mapped, we want to make sure we are operating on a region
576 // that is in range of our virtual mapping.
577 size_t intended_start = *virtual_offset;
578 size_t intended_end = intended_start + *length;
579 size_t address_space_start = reinterpret_cast<size_t>(virtual_map_base);
580 size_t address_space_end = address_space_start + virtual_size;
562 581
563 boost::icl::interval_set<size_t> placeholders; ///< Mapped placeholders 582 if (address_space_start > intended_end || intended_start > address_space_end) {
564 std::mutex placeholder_mutex; ///< Mutex for placeholders 583 *virtual_offset = 0;
584 *length = 0;
585 } else {
586 *virtual_offset = std::max(intended_start, address_space_start);
587 *length = std::min(intended_end, address_space_end) - *virtual_offset;
588 }
589 }
590
591 int fd{-1}; // memfd file descriptor, -1 is the error value of memfd_create
592 FreeRegionManager free_manager{};
565}; 593};
566 594
567#else // ^^^ Linux ^^^ vvv Generic vvv 595#else // ^^^ Linux ^^^ vvv Generic vvv
@@ -591,15 +619,16 @@ HostMemory::HostMemory(size_t backing_size_, size_t virtual_size_)
591 try { 619 try {
592 // Try to allocate a fastmem arena. 620 // Try to allocate a fastmem arena.
593 // The implementation will fail with std::bad_alloc on errors. 621 // The implementation will fail with std::bad_alloc on errors.
594 impl = std::make_unique<HostMemory::Impl>(AlignUp(backing_size, PageAlignment), 622 impl =
595 AlignUp(virtual_size, PageAlignment) + 623 std::make_unique<HostMemory::Impl>(AlignUp(backing_size, PageAlignment),
596 3 * HugePageSize); 624 AlignUp(virtual_size, PageAlignment) + HugePageSize);
597 backing_base = impl->backing_base; 625 backing_base = impl->backing_base;
598 virtual_base = impl->virtual_base; 626 virtual_base = impl->virtual_base;
599 627
600 if (virtual_base) { 628 if (virtual_base) {
601 virtual_base += 2 * HugePageSize - 1; 629 // Ensure the virtual base is aligned to the L2 block size.
602 virtual_base -= reinterpret_cast<size_t>(virtual_base) & (HugePageSize - 1); 630 virtual_base = reinterpret_cast<u8*>(
631 Common::AlignUp(reinterpret_cast<uintptr_t>(virtual_base), HugePageSize));
603 virtual_base_offset = virtual_base - impl->virtual_base; 632 virtual_base_offset = virtual_base - impl->virtual_base;
604 } 633 }
605 634
@@ -650,4 +679,11 @@ void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool w
650 impl->Protect(virtual_offset + virtual_base_offset, length, read, write); 679 impl->Protect(virtual_offset + virtual_base_offset, length, read, write);
651} 680}
652 681
682void HostMemory::EnableDirectMappedAddress() {
683 if (impl) {
684 impl->EnableDirectMappedAddress();
685 virtual_size += reinterpret_cast<uintptr_t>(virtual_base);
686 }
687}
688
653} // namespace Common 689} // namespace Common