diff options
Diffstat (limited to 'src/common/host_memory.cpp')
| -rw-r--r-- | src/common/host_memory.cpp | 538 |
1 files changed, 538 insertions, 0 deletions
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp new file mode 100644 index 000000000..8bd70abc7 --- /dev/null +++ b/src/common/host_memory.cpp | |||
| @@ -0,0 +1,538 @@ | |||
| 1 | #ifdef _WIN32 | ||
| 2 | |||
| 3 | #include <iterator> | ||
| 4 | #include <unordered_map> | ||
| 5 | #include <boost/icl/separate_interval_set.hpp> | ||
| 6 | #include <windows.h> | ||
| 7 | #include "common/dynamic_library.h" | ||
| 8 | |||
| 9 | #elif defined(__linux__) // ^^^ Windows ^^^ vvv Linux vvv | ||
| 10 | |||
| 11 | #ifndef _GNU_SOURCE | ||
| 12 | #define _GNU_SOURCE | ||
| 13 | #endif | ||
| 14 | #include <fcntl.h> | ||
| 15 | #include <sys/mman.h> | ||
| 16 | #include <unistd.h> | ||
| 17 | |||
| 18 | #endif // ^^^ Linux ^^^ | ||
| 19 | |||
| 20 | #include <mutex> | ||
| 21 | |||
| 22 | #include "common/alignment.h" | ||
| 23 | #include "common/assert.h" | ||
| 24 | #include "common/host_memory.h" | ||
| 25 | #include "common/logging/log.h" | ||
| 26 | #include "common/scope_exit.h" | ||
| 27 | |||
| 28 | namespace Common { | ||
| 29 | |||
| 30 | constexpr size_t PageAlignment = 0x1000; | ||
| 31 | constexpr size_t HugePageSize = 0x200000; | ||
| 32 | |||
| 33 | #ifdef _WIN32 | ||
| 34 | |||
| 35 | // Manually imported for MinGW compatibility | ||
| 36 | #ifndef MEM_RESERVE_PLACEHOLDER | ||
| 37 | #define MEM_RESERVE_PLACEHOLDER 0x0004000 | ||
| 38 | #endif | ||
| 39 | #ifndef MEM_REPLACE_PLACEHOLDER | ||
| 40 | #define MEM_REPLACE_PLACEHOLDER 0x00004000 | ||
| 41 | #endif | ||
| 42 | #ifndef MEM_COALESCE_PLACEHOLDERS | ||
| 43 | #define MEM_COALESCE_PLACEHOLDERS 0x00000001 | ||
| 44 | #endif | ||
| 45 | #ifndef MEM_PRESERVE_PLACEHOLDER | ||
| 46 | #define MEM_PRESERVE_PLACEHOLDER 0x00000002 | ||
| 47 | #endif | ||
| 48 | |||
| 49 | using PFN_CreateFileMapping2 = _Ret_maybenull_ HANDLE(WINAPI*)( | ||
| 50 | _In_ HANDLE File, _In_opt_ SECURITY_ATTRIBUTES* SecurityAttributes, _In_ ULONG DesiredAccess, | ||
| 51 | _In_ ULONG PageProtection, _In_ ULONG AllocationAttributes, _In_ ULONG64 MaximumSize, | ||
| 52 | _In_opt_ PCWSTR Name, | ||
| 53 | _Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters, | ||
| 54 | _In_ ULONG ParameterCount); | ||
| 55 | |||
| 56 | using PFN_VirtualAlloc2 = _Ret_maybenull_ PVOID(WINAPI*)( | ||
| 57 | _In_opt_ HANDLE Process, _In_opt_ PVOID BaseAddress, _In_ SIZE_T Size, | ||
| 58 | _In_ ULONG AllocationType, _In_ ULONG PageProtection, | ||
| 59 | _Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters, | ||
| 60 | _In_ ULONG ParameterCount); | ||
| 61 | |||
| 62 | using PFN_MapViewOfFile3 = _Ret_maybenull_ PVOID(WINAPI*)( | ||
| 63 | _In_ HANDLE FileMapping, _In_opt_ HANDLE Process, _In_opt_ PVOID BaseAddress, | ||
| 64 | _In_ ULONG64 Offset, _In_ SIZE_T ViewSize, _In_ ULONG AllocationType, _In_ ULONG PageProtection, | ||
| 65 | _Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters, | ||
| 66 | _In_ ULONG ParameterCount); | ||
| 67 | |||
| 68 | using PFN_UnmapViewOfFile2 = BOOL(WINAPI*)(_In_ HANDLE Process, _In_ PVOID BaseAddress, | ||
| 69 | _In_ ULONG UnmapFlags); | ||
| 70 | |||
| 71 | template <typename T> | ||
| 72 | static void GetFuncAddress(Common::DynamicLibrary& dll, const char* name, T& pfn) { | ||
| 73 | if (!dll.GetSymbol(name, &pfn)) { | ||
| 74 | LOG_CRITICAL(HW_Memory, "Failed to load {}", name); | ||
| 75 | throw std::bad_alloc{}; | ||
| 76 | } | ||
| 77 | } | ||
| 78 | |||
| 79 | class HostMemory::Impl { | ||
| 80 | public: | ||
| 81 | explicit Impl(size_t backing_size_, size_t virtual_size_) | ||
| 82 | : backing_size{backing_size_}, virtual_size{virtual_size_}, process{GetCurrentProcess()}, | ||
| 83 | kernelbase_dll("Kernelbase") { | ||
| 84 | if (!kernelbase_dll.IsOpen()) { | ||
| 85 | LOG_CRITICAL(HW_Memory, "Failed to load Kernelbase.dll"); | ||
| 86 | throw std::bad_alloc{}; | ||
| 87 | } | ||
| 88 | GetFuncAddress(kernelbase_dll, "CreateFileMapping2", pfn_CreateFileMapping2); | ||
| 89 | GetFuncAddress(kernelbase_dll, "VirtualAlloc2", pfn_VirtualAlloc2); | ||
| 90 | GetFuncAddress(kernelbase_dll, "MapViewOfFile3", pfn_MapViewOfFile3); | ||
| 91 | GetFuncAddress(kernelbase_dll, "UnmapViewOfFile2", pfn_UnmapViewOfFile2); | ||
| 92 | |||
| 93 | // Allocate backing file map | ||
| 94 | backing_handle = | ||
| 95 | pfn_CreateFileMapping2(INVALID_HANDLE_VALUE, nullptr, FILE_MAP_WRITE | FILE_MAP_READ, | ||
| 96 | PAGE_READWRITE, SEC_COMMIT, backing_size, nullptr, nullptr, 0); | ||
| 97 | if (!backing_handle) { | ||
| 98 | LOG_CRITICAL(HW_Memory, "Failed to allocate {} MiB of backing memory", | ||
| 99 | backing_size >> 20); | ||
| 100 | throw std::bad_alloc{}; | ||
| 101 | } | ||
| 102 | // Allocate a virtual memory for the backing file map as placeholder | ||
| 103 | backing_base = static_cast<u8*>(pfn_VirtualAlloc2(process, nullptr, backing_size, | ||
| 104 | MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, | ||
| 105 | PAGE_NOACCESS, nullptr, 0)); | ||
| 106 | if (!backing_base) { | ||
| 107 | Release(); | ||
| 108 | LOG_CRITICAL(HW_Memory, "Failed to reserve {} MiB of virtual memory", | ||
| 109 | backing_size >> 20); | ||
| 110 | throw std::bad_alloc{}; | ||
| 111 | } | ||
| 112 | // Map backing placeholder | ||
| 113 | void* const ret = pfn_MapViewOfFile3(backing_handle, process, backing_base, 0, backing_size, | ||
| 114 | MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0); | ||
| 115 | if (ret != backing_base) { | ||
| 116 | Release(); | ||
| 117 | LOG_CRITICAL(HW_Memory, "Failed to map {} MiB of virtual memory", backing_size >> 20); | ||
| 118 | throw std::bad_alloc{}; | ||
| 119 | } | ||
| 120 | // Allocate virtual address placeholder | ||
| 121 | virtual_base = static_cast<u8*>(pfn_VirtualAlloc2(process, nullptr, virtual_size, | ||
| 122 | MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, | ||
| 123 | PAGE_NOACCESS, nullptr, 0)); | ||
| 124 | if (!virtual_base) { | ||
| 125 | Release(); | ||
| 126 | LOG_CRITICAL(HW_Memory, "Failed to reserve {} GiB of virtual memory", | ||
| 127 | virtual_size >> 30); | ||
| 128 | throw std::bad_alloc{}; | ||
| 129 | } | ||
| 130 | } | ||
| 131 | |||
| 132 | ~Impl() { | ||
| 133 | Release(); | ||
| 134 | } | ||
| 135 | |||
| 136 | void Map(size_t virtual_offset, size_t host_offset, size_t length) { | ||
| 137 | std::unique_lock lock{placeholder_mutex}; | ||
| 138 | if (!IsNiechePlaceholder(virtual_offset, length)) { | ||
| 139 | Split(virtual_offset, length); | ||
| 140 | } | ||
| 141 | ASSERT(placeholders.find({virtual_offset, virtual_offset + length}) == placeholders.end()); | ||
| 142 | TrackPlaceholder(virtual_offset, host_offset, length); | ||
| 143 | |||
| 144 | MapView(virtual_offset, host_offset, length); | ||
| 145 | } | ||
| 146 | |||
| 147 | void Unmap(size_t virtual_offset, size_t length) { | ||
| 148 | std::lock_guard lock{placeholder_mutex}; | ||
| 149 | |||
| 150 | // Unmap until there are no more placeholders | ||
| 151 | while (UnmapOnePlaceholder(virtual_offset, length)) { | ||
| 152 | } | ||
| 153 | } | ||
| 154 | |||
| 155 | void Protect(size_t virtual_offset, size_t length, bool read, bool write) { | ||
| 156 | DWORD new_flags{}; | ||
| 157 | if (read && write) { | ||
| 158 | new_flags = PAGE_READWRITE; | ||
| 159 | } else if (read && !write) { | ||
| 160 | new_flags = PAGE_READONLY; | ||
| 161 | } else if (!read && !write) { | ||
| 162 | new_flags = PAGE_NOACCESS; | ||
| 163 | } else { | ||
| 164 | UNIMPLEMENTED_MSG("Protection flag combination read={} write={}", read, write); | ||
| 165 | } | ||
| 166 | const size_t virtual_end = virtual_offset + length; | ||
| 167 | |||
| 168 | std::lock_guard lock{placeholder_mutex}; | ||
| 169 | auto [it, end] = placeholders.equal_range({virtual_offset, virtual_end}); | ||
| 170 | while (it != end) { | ||
| 171 | const size_t offset = std::max(it->lower(), virtual_offset); | ||
| 172 | const size_t protect_length = std::min(it->upper(), virtual_end) - offset; | ||
| 173 | DWORD old_flags{}; | ||
| 174 | if (!VirtualProtect(virtual_base + offset, protect_length, new_flags, &old_flags)) { | ||
| 175 | LOG_CRITICAL(HW_Memory, "Failed to change virtual memory protect rules"); | ||
| 176 | } | ||
| 177 | ++it; | ||
| 178 | } | ||
| 179 | } | ||
| 180 | |||
| 181 | const size_t backing_size; ///< Size of the backing memory in bytes | ||
| 182 | const size_t virtual_size; ///< Size of the virtual address placeholder in bytes | ||
| 183 | |||
| 184 | u8* backing_base{}; | ||
| 185 | u8* virtual_base{}; | ||
| 186 | |||
| 187 | private: | ||
| 188 | /// Release all resources in the object | ||
| 189 | void Release() { | ||
| 190 | if (!placeholders.empty()) { | ||
| 191 | for (const auto& placeholder : placeholders) { | ||
| 192 | if (!pfn_UnmapViewOfFile2(process, virtual_base + placeholder.lower(), | ||
| 193 | MEM_PRESERVE_PLACEHOLDER)) { | ||
| 194 | LOG_CRITICAL(HW_Memory, "Failed to unmap virtual memory placeholder"); | ||
| 195 | } | ||
| 196 | } | ||
| 197 | Coalesce(0, virtual_size); | ||
| 198 | } | ||
| 199 | if (virtual_base) { | ||
| 200 | if (!VirtualFree(virtual_base, 0, MEM_RELEASE)) { | ||
| 201 | LOG_CRITICAL(HW_Memory, "Failed to free virtual memory"); | ||
| 202 | } | ||
| 203 | } | ||
| 204 | if (backing_base) { | ||
| 205 | if (!pfn_UnmapViewOfFile2(process, backing_base, MEM_PRESERVE_PLACEHOLDER)) { | ||
| 206 | LOG_CRITICAL(HW_Memory, "Failed to unmap backing memory placeholder"); | ||
| 207 | } | ||
| 208 | if (!VirtualFreeEx(process, backing_base, 0, MEM_RELEASE)) { | ||
| 209 | LOG_CRITICAL(HW_Memory, "Failed to free backing memory"); | ||
| 210 | } | ||
| 211 | } | ||
| 212 | if (!CloseHandle(backing_handle)) { | ||
| 213 | LOG_CRITICAL(HW_Memory, "Failed to free backing memory file handle"); | ||
| 214 | } | ||
| 215 | } | ||
| 216 | |||
| 217 | /// Unmap one placeholder in the given range (partial unmaps are supported) | ||
| 218 | /// Return true when there are no more placeholders to unmap | ||
| 219 | bool UnmapOnePlaceholder(size_t virtual_offset, size_t length) { | ||
| 220 | const auto it = placeholders.find({virtual_offset, virtual_offset + length}); | ||
| 221 | const auto begin = placeholders.begin(); | ||
| 222 | const auto end = placeholders.end(); | ||
| 223 | if (it == end) { | ||
| 224 | return false; | ||
| 225 | } | ||
| 226 | const size_t placeholder_begin = it->lower(); | ||
| 227 | const size_t placeholder_end = it->upper(); | ||
| 228 | const size_t unmap_begin = std::max(virtual_offset, placeholder_begin); | ||
| 229 | const size_t unmap_end = std::min(virtual_offset + length, placeholder_end); | ||
| 230 | ASSERT(unmap_begin >= placeholder_begin && unmap_begin < placeholder_end); | ||
| 231 | ASSERT(unmap_end <= placeholder_end && unmap_end > placeholder_begin); | ||
| 232 | |||
| 233 | const auto host_pointer_it = placeholder_host_pointers.find(placeholder_begin); | ||
| 234 | ASSERT(host_pointer_it != placeholder_host_pointers.end()); | ||
| 235 | const size_t host_offset = host_pointer_it->second; | ||
| 236 | |||
| 237 | const bool split_left = unmap_begin > placeholder_begin; | ||
| 238 | const bool split_right = unmap_end < placeholder_end; | ||
| 239 | |||
| 240 | if (!pfn_UnmapViewOfFile2(process, virtual_base + placeholder_begin, | ||
| 241 | MEM_PRESERVE_PLACEHOLDER)) { | ||
| 242 | LOG_CRITICAL(HW_Memory, "Failed to unmap placeholder"); | ||
| 243 | } | ||
| 244 | // If we have to remap memory regions due to partial unmaps, we are in a data race as | ||
| 245 | // Windows doesn't support remapping memory without unmapping first. Avoid adding any extra | ||
| 246 | // logic within the panic region described below. | ||
| 247 | |||
| 248 | // Panic region, we are in a data race right now | ||
| 249 | if (split_left || split_right) { | ||
| 250 | Split(unmap_begin, unmap_end - unmap_begin); | ||
| 251 | } | ||
| 252 | if (split_left) { | ||
| 253 | MapView(placeholder_begin, host_offset, unmap_begin - placeholder_begin); | ||
| 254 | } | ||
| 255 | if (split_right) { | ||
| 256 | MapView(unmap_end, host_offset + unmap_end - placeholder_begin, | ||
| 257 | placeholder_end - unmap_end); | ||
| 258 | } | ||
| 259 | // End panic region | ||
| 260 | |||
| 261 | size_t coalesce_begin = unmap_begin; | ||
| 262 | if (!split_left) { | ||
| 263 | // Try to coalesce pages to the left | ||
| 264 | coalesce_begin = it == begin ? 0 : std::prev(it)->upper(); | ||
| 265 | if (coalesce_begin != placeholder_begin) { | ||
| 266 | Coalesce(coalesce_begin, unmap_end - coalesce_begin); | ||
| 267 | } | ||
| 268 | } | ||
| 269 | if (!split_right) { | ||
| 270 | // Try to coalesce pages to the right | ||
| 271 | const auto next = std::next(it); | ||
| 272 | const size_t next_begin = next == end ? virtual_size : next->lower(); | ||
| 273 | if (placeholder_end != next_begin) { | ||
| 274 | // We can coalesce to the right | ||
| 275 | Coalesce(coalesce_begin, next_begin - coalesce_begin); | ||
| 276 | } | ||
| 277 | } | ||
| 278 | // Remove and reinsert placeholder trackers | ||
| 279 | UntrackPlaceholder(it); | ||
| 280 | if (split_left) { | ||
| 281 | TrackPlaceholder(placeholder_begin, host_offset, unmap_begin - placeholder_begin); | ||
| 282 | } | ||
| 283 | if (split_right) { | ||
| 284 | TrackPlaceholder(unmap_end, host_offset + unmap_end - placeholder_begin, | ||
| 285 | placeholder_end - unmap_end); | ||
| 286 | } | ||
| 287 | return true; | ||
| 288 | } | ||
| 289 | |||
| 290 | void MapView(size_t virtual_offset, size_t host_offset, size_t length) { | ||
| 291 | if (!pfn_MapViewOfFile3(backing_handle, process, virtual_base + virtual_offset, host_offset, | ||
| 292 | length, MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0)) { | ||
| 293 | LOG_CRITICAL(HW_Memory, "Failed to map placeholder"); | ||
| 294 | } | ||
| 295 | } | ||
| 296 | |||
| 297 | void Split(size_t virtual_offset, size_t length) { | ||
| 298 | if (!VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_base + virtual_offset), length, | ||
| 299 | MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) { | ||
| 300 | LOG_CRITICAL(HW_Memory, "Failed to split placeholder"); | ||
| 301 | } | ||
| 302 | } | ||
| 303 | |||
| 304 | void Coalesce(size_t virtual_offset, size_t length) { | ||
| 305 | if (!VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_base + virtual_offset), length, | ||
| 306 | MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS)) { | ||
| 307 | LOG_CRITICAL(HW_Memory, "Failed to coalesce placeholders"); | ||
| 308 | } | ||
| 309 | } | ||
| 310 | |||
| 311 | void TrackPlaceholder(size_t virtual_offset, size_t host_offset, size_t length) { | ||
| 312 | placeholders.insert({virtual_offset, virtual_offset + length}); | ||
| 313 | placeholder_host_pointers.emplace(virtual_offset, host_offset); | ||
| 314 | } | ||
| 315 | |||
| 316 | void UntrackPlaceholder(boost::icl::separate_interval_set<size_t>::iterator it) { | ||
| 317 | placeholders.erase(it); | ||
| 318 | placeholder_host_pointers.erase(it->lower()); | ||
| 319 | } | ||
| 320 | |||
| 321 | /// Return true when a given memory region is a "nieche" and the placeholders don't have to be | ||
| 322 | /// splitted. | ||
| 323 | bool IsNiechePlaceholder(size_t virtual_offset, size_t length) const { | ||
| 324 | const auto it = placeholders.upper_bound({virtual_offset, virtual_offset + length}); | ||
| 325 | if (it != placeholders.end() && it->lower() == virtual_offset + length) { | ||
| 326 | const bool is_root = it == placeholders.begin() && virtual_offset == 0; | ||
| 327 | return is_root || std::prev(it)->upper() == virtual_offset; | ||
| 328 | } | ||
| 329 | return false; | ||
| 330 | } | ||
| 331 | |||
| 332 | HANDLE process{}; ///< Current process handle | ||
| 333 | HANDLE backing_handle{}; ///< File based backing memory | ||
| 334 | |||
| 335 | DynamicLibrary kernelbase_dll; | ||
| 336 | PFN_CreateFileMapping2 pfn_CreateFileMapping2{}; | ||
| 337 | PFN_VirtualAlloc2 pfn_VirtualAlloc2{}; | ||
| 338 | PFN_MapViewOfFile3 pfn_MapViewOfFile3{}; | ||
| 339 | PFN_UnmapViewOfFile2 pfn_UnmapViewOfFile2{}; | ||
| 340 | |||
| 341 | std::mutex placeholder_mutex; ///< Mutex for placeholders | ||
| 342 | boost::icl::separate_interval_set<size_t> placeholders; ///< Mapped placeholders | ||
| 343 | std::unordered_map<size_t, size_t> placeholder_host_pointers; ///< Placeholder backing offset | ||
| 344 | }; | ||
| 345 | |||
| 346 | #elif defined(__linux__) // ^^^ Windows ^^^ vvv Linux vvv | ||
| 347 | |||
| 348 | class HostMemory::Impl { | ||
| 349 | public: | ||
| 350 | explicit Impl(size_t backing_size_, size_t virtual_size_) | ||
| 351 | : backing_size{backing_size_}, virtual_size{virtual_size_} { | ||
| 352 | bool good = false; | ||
| 353 | SCOPE_EXIT({ | ||
| 354 | if (!good) { | ||
| 355 | Release(); | ||
| 356 | } | ||
| 357 | }); | ||
| 358 | |||
| 359 | // Backing memory initialization | ||
| 360 | fd = memfd_create("HostMemory", 0); | ||
| 361 | if (fd == -1) { | ||
| 362 | LOG_CRITICAL(HW_Memory, "memfd_create failed: {}", strerror(errno)); | ||
| 363 | throw std::bad_alloc{}; | ||
| 364 | } | ||
| 365 | |||
| 366 | // Defined to extend the file with zeros | ||
| 367 | int ret = ftruncate(fd, backing_size); | ||
| 368 | if (ret != 0) { | ||
| 369 | LOG_CRITICAL(HW_Memory, "ftruncate failed with {}, are you out-of-memory?", | ||
| 370 | strerror(errno)); | ||
| 371 | throw std::bad_alloc{}; | ||
| 372 | } | ||
| 373 | |||
| 374 | backing_base = static_cast<u8*>( | ||
| 375 | mmap(nullptr, backing_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0)); | ||
| 376 | if (backing_base == MAP_FAILED) { | ||
| 377 | LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno)); | ||
| 378 | throw std::bad_alloc{}; | ||
| 379 | } | ||
| 380 | |||
| 381 | // Virtual memory initialization | ||
| 382 | virtual_base = static_cast<u8*>( | ||
| 383 | mmap(nullptr, virtual_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)); | ||
| 384 | if (virtual_base == MAP_FAILED) { | ||
| 385 | LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno)); | ||
| 386 | throw std::bad_alloc{}; | ||
| 387 | } | ||
| 388 | |||
| 389 | good = true; | ||
| 390 | } | ||
| 391 | |||
| 392 | ~Impl() { | ||
| 393 | Release(); | ||
| 394 | } | ||
| 395 | |||
| 396 | void Map(size_t virtual_offset, size_t host_offset, size_t length) { | ||
| 397 | |||
| 398 | void* ret = mmap(virtual_base + virtual_offset, length, PROT_READ | PROT_WRITE, | ||
| 399 | MAP_SHARED | MAP_FIXED, fd, host_offset); | ||
| 400 | ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); | ||
| 401 | } | ||
| 402 | |||
| 403 | void Unmap(size_t virtual_offset, size_t length) { | ||
| 404 | // The method name is wrong. We're still talking about the virtual range. | ||
| 405 | // We don't want to unmap, we want to reserve this memory. | ||
| 406 | |||
| 407 | void* ret = mmap(virtual_base + virtual_offset, length, PROT_NONE, | ||
| 408 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); | ||
| 409 | ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); | ||
| 410 | } | ||
| 411 | |||
| 412 | void Protect(size_t virtual_offset, size_t length, bool read, bool write) { | ||
| 413 | int flags = 0; | ||
| 414 | if (read) { | ||
| 415 | flags |= PROT_READ; | ||
| 416 | } | ||
| 417 | if (write) { | ||
| 418 | flags |= PROT_WRITE; | ||
| 419 | } | ||
| 420 | int ret = mprotect(virtual_base + virtual_offset, length, flags); | ||
| 421 | ASSERT_MSG(ret == 0, "mprotect failed: {}", strerror(errno)); | ||
| 422 | } | ||
| 423 | |||
| 424 | const size_t backing_size; ///< Size of the backing memory in bytes | ||
| 425 | const size_t virtual_size; ///< Size of the virtual address placeholder in bytes | ||
| 426 | |||
| 427 | u8* backing_base{reinterpret_cast<u8*>(MAP_FAILED)}; | ||
| 428 | u8* virtual_base{reinterpret_cast<u8*>(MAP_FAILED)}; | ||
| 429 | |||
| 430 | private: | ||
| 431 | /// Release all resources in the object | ||
| 432 | void Release() { | ||
| 433 | if (virtual_base != MAP_FAILED) { | ||
| 434 | int ret = munmap(virtual_base, virtual_size); | ||
| 435 | ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno)); | ||
| 436 | } | ||
| 437 | |||
| 438 | if (backing_base != MAP_FAILED) { | ||
| 439 | int ret = munmap(backing_base, backing_size); | ||
| 440 | ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno)); | ||
| 441 | } | ||
| 442 | |||
| 443 | if (fd != -1) { | ||
| 444 | int ret = close(fd); | ||
| 445 | ASSERT_MSG(ret == 0, "close failed: {}", strerror(errno)); | ||
| 446 | } | ||
| 447 | } | ||
| 448 | |||
| 449 | int fd{-1}; // memfd file descriptor, -1 is the error value of memfd_create | ||
| 450 | }; | ||
| 451 | |||
| 452 | #else // ^^^ Linux ^^^ vvv Generic vvv | ||
| 453 | |||
| 454 | class HostMemory::Impl { | ||
| 455 | public: | ||
| 456 | explicit Impl(size_t /*backing_size */, size_t /* virtual_size */) { | ||
| 457 | // This is just a place holder. | ||
| 458 | // Please implement fastmem in a propper way on your platform. | ||
| 459 | throw std::bad_alloc{}; | ||
| 460 | } | ||
| 461 | |||
| 462 | void Map(size_t virtual_offset, size_t host_offset, size_t length) {} | ||
| 463 | |||
| 464 | void Unmap(size_t virtual_offset, size_t length) {} | ||
| 465 | |||
| 466 | void Protect(size_t virtual_offset, size_t length, bool read, bool write) {} | ||
| 467 | |||
| 468 | u8* backing_base{nullptr}; | ||
| 469 | u8* virtual_base{nullptr}; | ||
| 470 | }; | ||
| 471 | |||
| 472 | #endif // ^^^ Generic ^^^ | ||
| 473 | |||
| 474 | HostMemory::HostMemory(size_t backing_size_, size_t virtual_size_) | ||
| 475 | : backing_size(backing_size_), virtual_size(virtual_size_) { | ||
| 476 | try { | ||
| 477 | // Try to allocate a fastmem arena. | ||
| 478 | // The implementation will fail with std::bad_alloc on errors. | ||
| 479 | impl = std::make_unique<HostMemory::Impl>(AlignUp(backing_size, PageAlignment), | ||
| 480 | AlignUp(virtual_size, PageAlignment) + | ||
| 481 | 3 * HugePageSize); | ||
| 482 | backing_base = impl->backing_base; | ||
| 483 | virtual_base = impl->virtual_base; | ||
| 484 | |||
| 485 | if (virtual_base) { | ||
| 486 | virtual_base += 2 * HugePageSize - 1; | ||
| 487 | virtual_base -= reinterpret_cast<size_t>(virtual_base) & (HugePageSize - 1); | ||
| 488 | virtual_base_offset = virtual_base - impl->virtual_base; | ||
| 489 | } | ||
| 490 | |||
| 491 | } catch (const std::bad_alloc&) { | ||
| 492 | LOG_CRITICAL(HW_Memory, | ||
| 493 | "Fastmem unavailable, falling back to VirtualBuffer for memory allocation"); | ||
| 494 | fallback_buffer = std::make_unique<Common::VirtualBuffer<u8>>(backing_size); | ||
| 495 | backing_base = fallback_buffer->data(); | ||
| 496 | virtual_base = nullptr; | ||
| 497 | } | ||
| 498 | } | ||
| 499 | |||
| 500 | HostMemory::~HostMemory() = default; | ||
| 501 | |||
| 502 | HostMemory::HostMemory(HostMemory&&) noexcept = default; | ||
| 503 | |||
| 504 | HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default; | ||
| 505 | |||
| 506 | void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length) { | ||
| 507 | ASSERT(virtual_offset % PageAlignment == 0); | ||
| 508 | ASSERT(host_offset % PageAlignment == 0); | ||
| 509 | ASSERT(length % PageAlignment == 0); | ||
| 510 | ASSERT(virtual_offset + length <= virtual_size); | ||
| 511 | ASSERT(host_offset + length <= backing_size); | ||
| 512 | if (length == 0 || !virtual_base || !impl) { | ||
| 513 | return; | ||
| 514 | } | ||
| 515 | impl->Map(virtual_offset + virtual_base_offset, host_offset, length); | ||
| 516 | } | ||
| 517 | |||
| 518 | void HostMemory::Unmap(size_t virtual_offset, size_t length) { | ||
| 519 | ASSERT(virtual_offset % PageAlignment == 0); | ||
| 520 | ASSERT(length % PageAlignment == 0); | ||
| 521 | ASSERT(virtual_offset + length <= virtual_size); | ||
| 522 | if (length == 0 || !virtual_base || !impl) { | ||
| 523 | return; | ||
| 524 | } | ||
| 525 | impl->Unmap(virtual_offset + virtual_base_offset, length); | ||
| 526 | } | ||
| 527 | |||
| 528 | void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write) { | ||
| 529 | ASSERT(virtual_offset % PageAlignment == 0); | ||
| 530 | ASSERT(length % PageAlignment == 0); | ||
| 531 | ASSERT(virtual_offset + length <= virtual_size); | ||
| 532 | if (length == 0 || !virtual_base || !impl) { | ||
| 533 | return; | ||
| 534 | } | ||
| 535 | impl->Protect(virtual_offset + virtual_base_offset, length, read, write); | ||
| 536 | } | ||
| 537 | |||
| 538 | } // namespace Common | ||