diff options
Diffstat (limited to 'src/common/host_memory.cpp')
| -rw-r--r-- | src/common/host_memory.cpp | 219 |
1 files changed, 165 insertions, 54 deletions
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp index ba22595e0..4bfc64f2d 100644 --- a/src/common/host_memory.cpp +++ b/src/common/host_memory.cpp | |||
| @@ -21,15 +21,22 @@ | |||
| 21 | #include <boost/icl/interval_set.hpp> | 21 | #include <boost/icl/interval_set.hpp> |
| 22 | #include <fcntl.h> | 22 | #include <fcntl.h> |
| 23 | #include <sys/mman.h> | 23 | #include <sys/mman.h> |
| 24 | #include <sys/random.h> | ||
| 24 | #include <unistd.h> | 25 | #include <unistd.h> |
| 25 | #include "common/scope_exit.h" | 26 | #include "common/scope_exit.h" |
| 26 | 27 | ||
| 28 | #ifndef MAP_NORESERVE | ||
| 29 | #define MAP_NORESERVE 0 | ||
| 30 | #endif | ||
| 31 | |||
| 27 | #endif // ^^^ Linux ^^^ | 32 | #endif // ^^^ Linux ^^^ |
| 28 | 33 | ||
| 29 | #include <mutex> | 34 | #include <mutex> |
| 35 | #include <random> | ||
| 30 | 36 | ||
| 31 | #include "common/alignment.h" | 37 | #include "common/alignment.h" |
| 32 | #include "common/assert.h" | 38 | #include "common/assert.h" |
| 39 | #include "common/free_region_manager.h" | ||
| 33 | #include "common/host_memory.h" | 40 | #include "common/host_memory.h" |
| 34 | #include "common/logging/log.h" | 41 | #include "common/logging/log.h" |
| 35 | 42 | ||
| @@ -141,7 +148,7 @@ public: | |||
| 141 | Release(); | 148 | Release(); |
| 142 | } | 149 | } |
| 143 | 150 | ||
| 144 | void Map(size_t virtual_offset, size_t host_offset, size_t length) { | 151 | void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms) { |
| 145 | std::unique_lock lock{placeholder_mutex}; | 152 | std::unique_lock lock{placeholder_mutex}; |
| 146 | if (!IsNiechePlaceholder(virtual_offset, length)) { | 153 | if (!IsNiechePlaceholder(virtual_offset, length)) { |
| 147 | Split(virtual_offset, length); | 154 | Split(virtual_offset, length); |
| @@ -160,7 +167,7 @@ public: | |||
| 160 | } | 167 | } |
| 161 | } | 168 | } |
| 162 | 169 | ||
| 163 | void Protect(size_t virtual_offset, size_t length, bool read, bool write) { | 170 | void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute) { |
| 164 | DWORD new_flags{}; | 171 | DWORD new_flags{}; |
| 165 | if (read && write) { | 172 | if (read && write) { |
| 166 | new_flags = PAGE_READWRITE; | 173 | new_flags = PAGE_READWRITE; |
| @@ -186,6 +193,11 @@ public: | |||
| 186 | } | 193 | } |
| 187 | } | 194 | } |
| 188 | 195 | ||
| 196 | void EnableDirectMappedAddress() { | ||
| 197 | // TODO | ||
| 198 | UNREACHABLE(); | ||
| 199 | } | ||
| 200 | |||
| 189 | const size_t backing_size; ///< Size of the backing memory in bytes | 201 | const size_t backing_size; ///< Size of the backing memory in bytes |
| 190 | const size_t virtual_size; ///< Size of the virtual address placeholder in bytes | 202 | const size_t virtual_size; ///< Size of the virtual address placeholder in bytes |
| 191 | 203 | ||
| @@ -353,6 +365,65 @@ private: | |||
| 353 | 365 | ||
| 354 | #elif defined(__linux__) || defined(__FreeBSD__) // ^^^ Windows ^^^ vvv Linux vvv | 366 | #elif defined(__linux__) || defined(__FreeBSD__) // ^^^ Windows ^^^ vvv Linux vvv |
| 355 | 367 | ||
| 368 | #ifdef ARCHITECTURE_arm64 | ||
| 369 | |||
| 370 | static void* ChooseVirtualBase(size_t virtual_size) { | ||
| 371 | constexpr uintptr_t Map39BitSize = (1ULL << 39); | ||
| 372 | constexpr uintptr_t Map36BitSize = (1ULL << 36); | ||
| 373 | |||
| 374 | // This is not a cryptographic application, we just want something random. | ||
| 375 | std::mt19937_64 rng; | ||
| 376 | |||
| 377 | // We want to ensure we are allocating at an address aligned to the L2 block size. | ||
| 378 | // For Qualcomm devices, we must also allocate memory above 36 bits. | ||
| 379 | const size_t lower = Map36BitSize / HugePageSize; | ||
| 380 | const size_t upper = (Map39BitSize - virtual_size) / HugePageSize; | ||
| 381 | const size_t range = upper - lower; | ||
| 382 | |||
| 383 | // Try up to 64 times to allocate memory at random addresses in the range. | ||
| 384 | for (int i = 0; i < 64; i++) { | ||
| 385 | // Calculate a possible location. | ||
| 386 | uintptr_t hint_address = ((rng() % range) + lower) * HugePageSize; | ||
| 387 | |||
| 388 | // Try to map. | ||
| 389 | // Note: we may be able to take advantage of MAP_FIXED_NOREPLACE here. | ||
| 390 | void* map_pointer = | ||
| 391 | mmap(reinterpret_cast<void*>(hint_address), virtual_size, PROT_READ | PROT_WRITE, | ||
| 392 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0); | ||
| 393 | |||
| 394 | // If we successfully mapped, we're done. | ||
| 395 | if (reinterpret_cast<uintptr_t>(map_pointer) == hint_address) { | ||
| 396 | return map_pointer; | ||
| 397 | } | ||
| 398 | |||
| 399 | // Unmap if necessary, and try again. | ||
| 400 | if (map_pointer != MAP_FAILED) { | ||
| 401 | munmap(map_pointer, virtual_size); | ||
| 402 | } | ||
| 403 | } | ||
| 404 | |||
| 405 | return MAP_FAILED; | ||
| 406 | } | ||
| 407 | |||
| 408 | #else | ||
| 409 | |||
| 410 | static void* ChooseVirtualBase(size_t virtual_size) { | ||
| 411 | #if defined(__FreeBSD__) | ||
| 412 | void* virtual_base = | ||
| 413 | mmap(nullptr, virtual_size, PROT_READ | PROT_WRITE, | ||
| 414 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_ALIGNED_SUPER, -1, 0); | ||
| 415 | |||
| 416 | if (virtual_base != MAP_FAILED) { | ||
| 417 | return virtual_base; | ||
| 418 | } | ||
| 419 | #endif | ||
| 420 | |||
| 421 | return mmap(nullptr, virtual_size, PROT_READ | PROT_WRITE, | ||
| 422 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0); | ||
| 423 | } | ||
| 424 | |||
| 425 | #endif | ||
| 426 | |||
| 356 | class HostMemory::Impl { | 427 | class HostMemory::Impl { |
| 357 | public: | 428 | public: |
| 358 | explicit Impl(size_t backing_size_, size_t virtual_size_) | 429 | explicit Impl(size_t backing_size_, size_t virtual_size_) |
| @@ -402,29 +473,16 @@ public: | |||
| 402 | } | 473 | } |
| 403 | 474 | ||
| 404 | // Virtual memory initialization | 475 | // Virtual memory initialization |
| 405 | #if defined(__FreeBSD__) | 476 | virtual_base = virtual_map_base = static_cast<u8*>(ChooseVirtualBase(virtual_size)); |
| 406 | virtual_base = | ||
| 407 | static_cast<u8*>(mmap(nullptr, virtual_size, PROT_NONE, | ||
| 408 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_ALIGNED_SUPER, -1, 0)); | ||
| 409 | if (virtual_base == MAP_FAILED) { | ||
| 410 | virtual_base = static_cast<u8*>( | ||
| 411 | mmap(nullptr, virtual_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)); | ||
| 412 | if (virtual_base == MAP_FAILED) { | ||
| 413 | LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno)); | ||
| 414 | throw std::bad_alloc{}; | ||
| 415 | } | ||
| 416 | } | ||
| 417 | #else | ||
| 418 | virtual_base = static_cast<u8*>(mmap(nullptr, virtual_size, PROT_NONE, | ||
| 419 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0)); | ||
| 420 | if (virtual_base == MAP_FAILED) { | 477 | if (virtual_base == MAP_FAILED) { |
| 421 | LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno)); | 478 | LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno)); |
| 422 | throw std::bad_alloc{}; | 479 | throw std::bad_alloc{}; |
| 423 | } | 480 | } |
| 481 | #if defined(__linux__) | ||
| 424 | madvise(virtual_base, virtual_size, MADV_HUGEPAGE); | 482 | madvise(virtual_base, virtual_size, MADV_HUGEPAGE); |
| 425 | #endif | 483 | #endif |
| 426 | 484 | ||
| 427 | placeholders.add({0, virtual_size}); | 485 | free_manager.SetAddressSpace(virtual_base, virtual_size); |
| 428 | good = true; | 486 | good = true; |
| 429 | } | 487 | } |
| 430 | 488 | ||
| @@ -432,14 +490,29 @@ public: | |||
| 432 | Release(); | 490 | Release(); |
| 433 | } | 491 | } |
| 434 | 492 | ||
| 435 | void Map(size_t virtual_offset, size_t host_offset, size_t length) { | 493 | void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms) { |
| 436 | { | 494 | // Intersect the range with our address space. |
| 437 | std::scoped_lock lock{placeholder_mutex}; | 495 | AdjustMap(&virtual_offset, &length); |
| 438 | placeholders.subtract({virtual_offset, virtual_offset + length}); | 496 | |
| 497 | // We are removing a placeholder. | ||
| 498 | free_manager.AllocateBlock(virtual_base + virtual_offset, length); | ||
| 499 | |||
| 500 | // Deduce mapping protection flags. | ||
| 501 | int flags = PROT_NONE; | ||
| 502 | if (True(perms & MemoryPermission::Read)) { | ||
| 503 | flags |= PROT_READ; | ||
| 504 | } | ||
| 505 | if (True(perms & MemoryPermission::Write)) { | ||
| 506 | flags |= PROT_WRITE; | ||
| 507 | } | ||
| 508 | #ifdef ARCHITECTURE_arm64 | ||
| 509 | if (True(perms & MemoryPermission::Execute)) { | ||
| 510 | flags |= PROT_EXEC; | ||
| 439 | } | 511 | } |
| 512 | #endif | ||
| 440 | 513 | ||
| 441 | void* ret = mmap(virtual_base + virtual_offset, length, PROT_READ | PROT_WRITE, | 514 | void* ret = mmap(virtual_base + virtual_offset, length, flags, MAP_SHARED | MAP_FIXED, fd, |
| 442 | MAP_SHARED | MAP_FIXED, fd, host_offset); | 515 | host_offset); |
| 443 | ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); | 516 | ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); |
| 444 | } | 517 | } |
| 445 | 518 | ||
| @@ -447,47 +520,54 @@ public: | |||
| 447 | // The method name is wrong. We're still talking about the virtual range. | 520 | // The method name is wrong. We're still talking about the virtual range. |
| 448 | // We don't want to unmap, we want to reserve this memory. | 521 | // We don't want to unmap, we want to reserve this memory. |
| 449 | 522 | ||
| 450 | { | 523 | // Intersect the range with our address space. |
| 451 | std::scoped_lock lock{placeholder_mutex}; | 524 | AdjustMap(&virtual_offset, &length); |
| 452 | auto it = placeholders.find({virtual_offset - 1, virtual_offset + length + 1}); | ||
| 453 | |||
| 454 | if (it != placeholders.end()) { | ||
| 455 | size_t prev_upper = virtual_offset + length; | ||
| 456 | virtual_offset = std::min(virtual_offset, it->lower()); | ||
| 457 | length = std::max(it->upper(), prev_upper) - virtual_offset; | ||
| 458 | } | ||
| 459 | 525 | ||
| 460 | placeholders.add({virtual_offset, virtual_offset + length}); | 526 | // Merge with any adjacent placeholder mappings. |
| 461 | } | 527 | auto [merged_pointer, merged_size] = |
| 528 | free_manager.FreeBlock(virtual_base + virtual_offset, length); | ||
| 462 | 529 | ||
| 463 | void* ret = mmap(virtual_base + virtual_offset, length, PROT_NONE, | 530 | void* ret = mmap(merged_pointer, merged_size, PROT_NONE, |
| 464 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); | 531 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); |
| 465 | ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); | 532 | ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); |
| 466 | } | 533 | } |
| 467 | 534 | ||
| 468 | void Protect(size_t virtual_offset, size_t length, bool read, bool write) { | 535 | void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute) { |
| 469 | int flags = 0; | 536 | // Intersect the range with our address space. |
| 537 | AdjustMap(&virtual_offset, &length); | ||
| 538 | |||
| 539 | int flags = PROT_NONE; | ||
| 470 | if (read) { | 540 | if (read) { |
| 471 | flags |= PROT_READ; | 541 | flags |= PROT_READ; |
| 472 | } | 542 | } |
| 473 | if (write) { | 543 | if (write) { |
| 474 | flags |= PROT_WRITE; | 544 | flags |= PROT_WRITE; |
| 475 | } | 545 | } |
| 546 | #ifdef HAS_NCE | ||
| 547 | if (execute) { | ||
| 548 | flags |= PROT_EXEC; | ||
| 549 | } | ||
| 550 | #endif | ||
| 476 | int ret = mprotect(virtual_base + virtual_offset, length, flags); | 551 | int ret = mprotect(virtual_base + virtual_offset, length, flags); |
| 477 | ASSERT_MSG(ret == 0, "mprotect failed: {}", strerror(errno)); | 552 | ASSERT_MSG(ret == 0, "mprotect failed: {}", strerror(errno)); |
| 478 | } | 553 | } |
| 479 | 554 | ||
| 555 | void EnableDirectMappedAddress() { | ||
| 556 | virtual_base = nullptr; | ||
| 557 | } | ||
| 558 | |||
| 480 | const size_t backing_size; ///< Size of the backing memory in bytes | 559 | const size_t backing_size; ///< Size of the backing memory in bytes |
| 481 | const size_t virtual_size; ///< Size of the virtual address placeholder in bytes | 560 | const size_t virtual_size; ///< Size of the virtual address placeholder in bytes |
| 482 | 561 | ||
| 483 | u8* backing_base{reinterpret_cast<u8*>(MAP_FAILED)}; | 562 | u8* backing_base{reinterpret_cast<u8*>(MAP_FAILED)}; |
| 484 | u8* virtual_base{reinterpret_cast<u8*>(MAP_FAILED)}; | 563 | u8* virtual_base{reinterpret_cast<u8*>(MAP_FAILED)}; |
| 564 | u8* virtual_map_base{reinterpret_cast<u8*>(MAP_FAILED)}; | ||
| 485 | 565 | ||
| 486 | private: | 566 | private: |
| 487 | /// Release all resources in the object | 567 | /// Release all resources in the object |
| 488 | void Release() { | 568 | void Release() { |
| 489 | if (virtual_base != MAP_FAILED) { | 569 | if (virtual_map_base != MAP_FAILED) { |
| 490 | int ret = munmap(virtual_base, virtual_size); | 570 | int ret = munmap(virtual_map_base, virtual_size); |
| 491 | ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno)); | 571 | ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno)); |
| 492 | } | 572 | } |
| 493 | 573 | ||
| @@ -502,10 +582,29 @@ private: | |||
| 502 | } | 582 | } |
| 503 | } | 583 | } |
| 504 | 584 | ||
| 505 | int fd{-1}; // memfd file descriptor, -1 is the error value of memfd_create | 585 | void AdjustMap(size_t* virtual_offset, size_t* length) { |
| 586 | if (virtual_base != nullptr) { | ||
| 587 | return; | ||
| 588 | } | ||
| 589 | |||
| 590 | // If we are direct mapped, we want to make sure we are operating on a region | ||
| 591 | // that is in range of our virtual mapping. | ||
| 592 | size_t intended_start = *virtual_offset; | ||
| 593 | size_t intended_end = intended_start + *length; | ||
| 594 | size_t address_space_start = reinterpret_cast<size_t>(virtual_map_base); | ||
| 595 | size_t address_space_end = address_space_start + virtual_size; | ||
| 506 | 596 | ||
| 507 | boost::icl::interval_set<size_t> placeholders; ///< Mapped placeholders | 597 | if (address_space_start > intended_end || intended_start > address_space_end) { |
| 508 | std::mutex placeholder_mutex; ///< Mutex for placeholders | 598 | *virtual_offset = 0; |
| 599 | *length = 0; | ||
| 600 | } else { | ||
| 601 | *virtual_offset = std::max(intended_start, address_space_start); | ||
| 602 | *length = std::min(intended_end, address_space_end) - *virtual_offset; | ||
| 603 | } | ||
| 604 | } | ||
| 605 | |||
| 606 | int fd{-1}; // memfd file descriptor, -1 is the error value of memfd_create | ||
| 607 | FreeRegionManager free_manager{}; | ||
| 509 | }; | 608 | }; |
| 510 | 609 | ||
| 511 | #else // ^^^ Linux ^^^ vvv Generic vvv | 610 | #else // ^^^ Linux ^^^ vvv Generic vvv |
| @@ -518,11 +617,13 @@ public: | |||
| 518 | throw std::bad_alloc{}; | 617 | throw std::bad_alloc{}; |
| 519 | } | 618 | } |
| 520 | 619 | ||
| 521 | void Map(size_t virtual_offset, size_t host_offset, size_t length) {} | 620 | void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm) {} |
| 522 | 621 | ||
| 523 | void Unmap(size_t virtual_offset, size_t length) {} | 622 | void Unmap(size_t virtual_offset, size_t length) {} |
| 524 | 623 | ||
| 525 | void Protect(size_t virtual_offset, size_t length, bool read, bool write) {} | 624 | void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute) {} |
| 625 | |||
| 626 | void EnableDirectMappedAddress() {} | ||
| 526 | 627 | ||
| 527 | u8* backing_base{nullptr}; | 628 | u8* backing_base{nullptr}; |
| 528 | u8* virtual_base{nullptr}; | 629 | u8* virtual_base{nullptr}; |
| @@ -535,15 +636,16 @@ HostMemory::HostMemory(size_t backing_size_, size_t virtual_size_) | |||
| 535 | try { | 636 | try { |
| 536 | // Try to allocate a fastmem arena. | 637 | // Try to allocate a fastmem arena. |
| 537 | // The implementation will fail with std::bad_alloc on errors. | 638 | // The implementation will fail with std::bad_alloc on errors. |
| 538 | impl = std::make_unique<HostMemory::Impl>(AlignUp(backing_size, PageAlignment), | 639 | impl = |
| 539 | AlignUp(virtual_size, PageAlignment) + | 640 | std::make_unique<HostMemory::Impl>(AlignUp(backing_size, PageAlignment), |
| 540 | 3 * HugePageSize); | 641 | AlignUp(virtual_size, PageAlignment) + HugePageSize); |
| 541 | backing_base = impl->backing_base; | 642 | backing_base = impl->backing_base; |
| 542 | virtual_base = impl->virtual_base; | 643 | virtual_base = impl->virtual_base; |
| 543 | 644 | ||
| 544 | if (virtual_base) { | 645 | if (virtual_base) { |
| 545 | virtual_base += 2 * HugePageSize - 1; | 646 | // Ensure the virtual base is aligned to the L2 block size. |
| 546 | virtual_base -= reinterpret_cast<size_t>(virtual_base) & (HugePageSize - 1); | 647 | virtual_base = reinterpret_cast<u8*>( |
| 648 | Common::AlignUp(reinterpret_cast<uintptr_t>(virtual_base), HugePageSize)); | ||
| 547 | virtual_base_offset = virtual_base - impl->virtual_base; | 649 | virtual_base_offset = virtual_base - impl->virtual_base; |
| 548 | } | 650 | } |
| 549 | 651 | ||
| @@ -562,7 +664,8 @@ HostMemory::HostMemory(HostMemory&&) noexcept = default; | |||
| 562 | 664 | ||
| 563 | HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default; | 665 | HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default; |
| 564 | 666 | ||
| 565 | void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length) { | 667 | void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, |
| 668 | MemoryPermission perms) { | ||
| 566 | ASSERT(virtual_offset % PageAlignment == 0); | 669 | ASSERT(virtual_offset % PageAlignment == 0); |
| 567 | ASSERT(host_offset % PageAlignment == 0); | 670 | ASSERT(host_offset % PageAlignment == 0); |
| 568 | ASSERT(length % PageAlignment == 0); | 671 | ASSERT(length % PageAlignment == 0); |
| @@ -571,7 +674,7 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length) { | |||
| 571 | if (length == 0 || !virtual_base || !impl) { | 674 | if (length == 0 || !virtual_base || !impl) { |
| 572 | return; | 675 | return; |
| 573 | } | 676 | } |
| 574 | impl->Map(virtual_offset + virtual_base_offset, host_offset, length); | 677 | impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms); |
| 575 | } | 678 | } |
| 576 | 679 | ||
| 577 | void HostMemory::Unmap(size_t virtual_offset, size_t length) { | 680 | void HostMemory::Unmap(size_t virtual_offset, size_t length) { |
| @@ -584,14 +687,22 @@ void HostMemory::Unmap(size_t virtual_offset, size_t length) { | |||
| 584 | impl->Unmap(virtual_offset + virtual_base_offset, length); | 687 | impl->Unmap(virtual_offset + virtual_base_offset, length); |
| 585 | } | 688 | } |
| 586 | 689 | ||
| 587 | void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write) { | 690 | void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write, |
| 691 | bool execute) { | ||
| 588 | ASSERT(virtual_offset % PageAlignment == 0); | 692 | ASSERT(virtual_offset % PageAlignment == 0); |
| 589 | ASSERT(length % PageAlignment == 0); | 693 | ASSERT(length % PageAlignment == 0); |
| 590 | ASSERT(virtual_offset + length <= virtual_size); | 694 | ASSERT(virtual_offset + length <= virtual_size); |
| 591 | if (length == 0 || !virtual_base || !impl) { | 695 | if (length == 0 || !virtual_base || !impl) { |
| 592 | return; | 696 | return; |
| 593 | } | 697 | } |
| 594 | impl->Protect(virtual_offset + virtual_base_offset, length, read, write); | 698 | impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute); |
| 699 | } | ||
| 700 | |||
| 701 | void HostMemory::EnableDirectMappedAddress() { | ||
| 702 | if (impl) { | ||
| 703 | impl->EnableDirectMappedAddress(); | ||
| 704 | virtual_size += reinterpret_cast<uintptr_t>(virtual_base); | ||
| 705 | } | ||
| 595 | } | 706 | } |
| 596 | 707 | ||
| 597 | } // namespace Common | 708 | } // namespace Common |