diff options
Diffstat (limited to 'src/common')
| -rw-r--r-- | src/common/CMakeLists.txt | 2 | ||||
| -rw-r--r-- | src/common/host_memory.cpp | 538 | ||||
| -rw-r--r-- | src/common/host_memory.h | 70 | ||||
| -rw-r--r-- | src/common/page_table.h | 2 | ||||
| -rw-r--r-- | src/common/settings.cpp | 8 | ||||
| -rw-r--r-- | src/common/settings.h | 4 |
6 files changed, 624 insertions, 0 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 2d403d471..97fbdcbf9 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt | |||
| @@ -131,6 +131,8 @@ add_library(common STATIC | |||
| 131 | hash.h | 131 | hash.h |
| 132 | hex_util.cpp | 132 | hex_util.cpp |
| 133 | hex_util.h | 133 | hex_util.h |
| 134 | host_memory.cpp | ||
| 135 | host_memory.h | ||
| 134 | intrusive_red_black_tree.h | 136 | intrusive_red_black_tree.h |
| 135 | logging/backend.cpp | 137 | logging/backend.cpp |
| 136 | logging/backend.h | 138 | logging/backend.h |
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp new file mode 100644 index 000000000..8bd70abc7 --- /dev/null +++ b/src/common/host_memory.cpp | |||
| @@ -0,0 +1,538 @@ | |||
| 1 | #ifdef _WIN32 | ||
| 2 | |||
| 3 | #include <iterator> | ||
| 4 | #include <unordered_map> | ||
| 5 | #include <boost/icl/separate_interval_set.hpp> | ||
| 6 | #include <windows.h> | ||
| 7 | #include "common/dynamic_library.h" | ||
| 8 | |||
| 9 | #elif defined(__linux__) // ^^^ Windows ^^^ vvv Linux vvv | ||
| 10 | |||
| 11 | #ifndef _GNU_SOURCE | ||
| 12 | #define _GNU_SOURCE | ||
| 13 | #endif | ||
| 14 | #include <fcntl.h> | ||
| 15 | #include <sys/mman.h> | ||
| 16 | #include <unistd.h> | ||
| 17 | |||
| 18 | #endif // ^^^ Linux ^^^ | ||
| 19 | |||
| 20 | #include <mutex> | ||
| 21 | |||
| 22 | #include "common/alignment.h" | ||
| 23 | #include "common/assert.h" | ||
| 24 | #include "common/host_memory.h" | ||
| 25 | #include "common/logging/log.h" | ||
| 26 | #include "common/scope_exit.h" | ||
| 27 | |||
| 28 | namespace Common { | ||
| 29 | |||
| 30 | constexpr size_t PageAlignment = 0x1000; | ||
| 31 | constexpr size_t HugePageSize = 0x200000; | ||
| 32 | |||
| 33 | #ifdef _WIN32 | ||
| 34 | |||
| 35 | // Manually imported for MinGW compatibility | ||
| 36 | #ifndef MEM_RESERVE_PLACEHOLDER | ||
| 37 | #define MEM_RESERVE_PLACEHOLDER 0x0004000 | ||
| 38 | #endif | ||
| 39 | #ifndef MEM_REPLACE_PLACEHOLDER | ||
| 40 | #define MEM_REPLACE_PLACEHOLDER 0x00004000 | ||
| 41 | #endif | ||
| 42 | #ifndef MEM_COALESCE_PLACEHOLDERS | ||
| 43 | #define MEM_COALESCE_PLACEHOLDERS 0x00000001 | ||
| 44 | #endif | ||
| 45 | #ifndef MEM_PRESERVE_PLACEHOLDER | ||
| 46 | #define MEM_PRESERVE_PLACEHOLDER 0x00000002 | ||
| 47 | #endif | ||
| 48 | |||
| 49 | using PFN_CreateFileMapping2 = _Ret_maybenull_ HANDLE(WINAPI*)( | ||
| 50 | _In_ HANDLE File, _In_opt_ SECURITY_ATTRIBUTES* SecurityAttributes, _In_ ULONG DesiredAccess, | ||
| 51 | _In_ ULONG PageProtection, _In_ ULONG AllocationAttributes, _In_ ULONG64 MaximumSize, | ||
| 52 | _In_opt_ PCWSTR Name, | ||
| 53 | _Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters, | ||
| 54 | _In_ ULONG ParameterCount); | ||
| 55 | |||
| 56 | using PFN_VirtualAlloc2 = _Ret_maybenull_ PVOID(WINAPI*)( | ||
| 57 | _In_opt_ HANDLE Process, _In_opt_ PVOID BaseAddress, _In_ SIZE_T Size, | ||
| 58 | _In_ ULONG AllocationType, _In_ ULONG PageProtection, | ||
| 59 | _Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters, | ||
| 60 | _In_ ULONG ParameterCount); | ||
| 61 | |||
| 62 | using PFN_MapViewOfFile3 = _Ret_maybenull_ PVOID(WINAPI*)( | ||
| 63 | _In_ HANDLE FileMapping, _In_opt_ HANDLE Process, _In_opt_ PVOID BaseAddress, | ||
| 64 | _In_ ULONG64 Offset, _In_ SIZE_T ViewSize, _In_ ULONG AllocationType, _In_ ULONG PageProtection, | ||
| 65 | _Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters, | ||
| 66 | _In_ ULONG ParameterCount); | ||
| 67 | |||
| 68 | using PFN_UnmapViewOfFile2 = BOOL(WINAPI*)(_In_ HANDLE Process, _In_ PVOID BaseAddress, | ||
| 69 | _In_ ULONG UnmapFlags); | ||
| 70 | |||
| 71 | template <typename T> | ||
| 72 | static void GetFuncAddress(Common::DynamicLibrary& dll, const char* name, T& pfn) { | ||
| 73 | if (!dll.GetSymbol(name, &pfn)) { | ||
| 74 | LOG_CRITICAL(HW_Memory, "Failed to load {}", name); | ||
| 75 | throw std::bad_alloc{}; | ||
| 76 | } | ||
| 77 | } | ||
| 78 | |||
| 79 | class HostMemory::Impl { | ||
| 80 | public: | ||
| 81 | explicit Impl(size_t backing_size_, size_t virtual_size_) | ||
| 82 | : backing_size{backing_size_}, virtual_size{virtual_size_}, process{GetCurrentProcess()}, | ||
| 83 | kernelbase_dll("Kernelbase") { | ||
| 84 | if (!kernelbase_dll.IsOpen()) { | ||
| 85 | LOG_CRITICAL(HW_Memory, "Failed to load Kernelbase.dll"); | ||
| 86 | throw std::bad_alloc{}; | ||
| 87 | } | ||
| 88 | GetFuncAddress(kernelbase_dll, "CreateFileMapping2", pfn_CreateFileMapping2); | ||
| 89 | GetFuncAddress(kernelbase_dll, "VirtualAlloc2", pfn_VirtualAlloc2); | ||
| 90 | GetFuncAddress(kernelbase_dll, "MapViewOfFile3", pfn_MapViewOfFile3); | ||
| 91 | GetFuncAddress(kernelbase_dll, "UnmapViewOfFile2", pfn_UnmapViewOfFile2); | ||
| 92 | |||
| 93 | // Allocate backing file map | ||
| 94 | backing_handle = | ||
| 95 | pfn_CreateFileMapping2(INVALID_HANDLE_VALUE, nullptr, FILE_MAP_WRITE | FILE_MAP_READ, | ||
| 96 | PAGE_READWRITE, SEC_COMMIT, backing_size, nullptr, nullptr, 0); | ||
| 97 | if (!backing_handle) { | ||
| 98 | LOG_CRITICAL(HW_Memory, "Failed to allocate {} MiB of backing memory", | ||
| 99 | backing_size >> 20); | ||
| 100 | throw std::bad_alloc{}; | ||
| 101 | } | ||
| 102 | // Allocate a virtual memory for the backing file map as placeholder | ||
| 103 | backing_base = static_cast<u8*>(pfn_VirtualAlloc2(process, nullptr, backing_size, | ||
| 104 | MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, | ||
| 105 | PAGE_NOACCESS, nullptr, 0)); | ||
| 106 | if (!backing_base) { | ||
| 107 | Release(); | ||
| 108 | LOG_CRITICAL(HW_Memory, "Failed to reserve {} MiB of virtual memory", | ||
| 109 | backing_size >> 20); | ||
| 110 | throw std::bad_alloc{}; | ||
| 111 | } | ||
| 112 | // Map backing placeholder | ||
| 113 | void* const ret = pfn_MapViewOfFile3(backing_handle, process, backing_base, 0, backing_size, | ||
| 114 | MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0); | ||
| 115 | if (ret != backing_base) { | ||
| 116 | Release(); | ||
| 117 | LOG_CRITICAL(HW_Memory, "Failed to map {} MiB of virtual memory", backing_size >> 20); | ||
| 118 | throw std::bad_alloc{}; | ||
| 119 | } | ||
| 120 | // Allocate virtual address placeholder | ||
| 121 | virtual_base = static_cast<u8*>(pfn_VirtualAlloc2(process, nullptr, virtual_size, | ||
| 122 | MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, | ||
| 123 | PAGE_NOACCESS, nullptr, 0)); | ||
| 124 | if (!virtual_base) { | ||
| 125 | Release(); | ||
| 126 | LOG_CRITICAL(HW_Memory, "Failed to reserve {} GiB of virtual memory", | ||
| 127 | virtual_size >> 30); | ||
| 128 | throw std::bad_alloc{}; | ||
| 129 | } | ||
| 130 | } | ||
| 131 | |||
| 132 | ~Impl() { | ||
| 133 | Release(); | ||
| 134 | } | ||
| 135 | |||
| 136 | void Map(size_t virtual_offset, size_t host_offset, size_t length) { | ||
| 137 | std::unique_lock lock{placeholder_mutex}; | ||
| 138 | if (!IsNiechePlaceholder(virtual_offset, length)) { | ||
| 139 | Split(virtual_offset, length); | ||
| 140 | } | ||
| 141 | ASSERT(placeholders.find({virtual_offset, virtual_offset + length}) == placeholders.end()); | ||
| 142 | TrackPlaceholder(virtual_offset, host_offset, length); | ||
| 143 | |||
| 144 | MapView(virtual_offset, host_offset, length); | ||
| 145 | } | ||
| 146 | |||
| 147 | void Unmap(size_t virtual_offset, size_t length) { | ||
| 148 | std::lock_guard lock{placeholder_mutex}; | ||
| 149 | |||
| 150 | // Unmap until there are no more placeholders | ||
| 151 | while (UnmapOnePlaceholder(virtual_offset, length)) { | ||
| 152 | } | ||
| 153 | } | ||
| 154 | |||
| 155 | void Protect(size_t virtual_offset, size_t length, bool read, bool write) { | ||
| 156 | DWORD new_flags{}; | ||
| 157 | if (read && write) { | ||
| 158 | new_flags = PAGE_READWRITE; | ||
| 159 | } else if (read && !write) { | ||
| 160 | new_flags = PAGE_READONLY; | ||
| 161 | } else if (!read && !write) { | ||
| 162 | new_flags = PAGE_NOACCESS; | ||
| 163 | } else { | ||
| 164 | UNIMPLEMENTED_MSG("Protection flag combination read={} write={}", read, write); | ||
| 165 | } | ||
| 166 | const size_t virtual_end = virtual_offset + length; | ||
| 167 | |||
| 168 | std::lock_guard lock{placeholder_mutex}; | ||
| 169 | auto [it, end] = placeholders.equal_range({virtual_offset, virtual_end}); | ||
| 170 | while (it != end) { | ||
| 171 | const size_t offset = std::max(it->lower(), virtual_offset); | ||
| 172 | const size_t protect_length = std::min(it->upper(), virtual_end) - offset; | ||
| 173 | DWORD old_flags{}; | ||
| 174 | if (!VirtualProtect(virtual_base + offset, protect_length, new_flags, &old_flags)) { | ||
| 175 | LOG_CRITICAL(HW_Memory, "Failed to change virtual memory protect rules"); | ||
| 176 | } | ||
| 177 | ++it; | ||
| 178 | } | ||
| 179 | } | ||
| 180 | |||
| 181 | const size_t backing_size; ///< Size of the backing memory in bytes | ||
| 182 | const size_t virtual_size; ///< Size of the virtual address placeholder in bytes | ||
| 183 | |||
| 184 | u8* backing_base{}; | ||
| 185 | u8* virtual_base{}; | ||
| 186 | |||
| 187 | private: | ||
| 188 | /// Release all resources in the object | ||
| 189 | void Release() { | ||
| 190 | if (!placeholders.empty()) { | ||
| 191 | for (const auto& placeholder : placeholders) { | ||
| 192 | if (!pfn_UnmapViewOfFile2(process, virtual_base + placeholder.lower(), | ||
| 193 | MEM_PRESERVE_PLACEHOLDER)) { | ||
| 194 | LOG_CRITICAL(HW_Memory, "Failed to unmap virtual memory placeholder"); | ||
| 195 | } | ||
| 196 | } | ||
| 197 | Coalesce(0, virtual_size); | ||
| 198 | } | ||
| 199 | if (virtual_base) { | ||
| 200 | if (!VirtualFree(virtual_base, 0, MEM_RELEASE)) { | ||
| 201 | LOG_CRITICAL(HW_Memory, "Failed to free virtual memory"); | ||
| 202 | } | ||
| 203 | } | ||
| 204 | if (backing_base) { | ||
| 205 | if (!pfn_UnmapViewOfFile2(process, backing_base, MEM_PRESERVE_PLACEHOLDER)) { | ||
| 206 | LOG_CRITICAL(HW_Memory, "Failed to unmap backing memory placeholder"); | ||
| 207 | } | ||
| 208 | if (!VirtualFreeEx(process, backing_base, 0, MEM_RELEASE)) { | ||
| 209 | LOG_CRITICAL(HW_Memory, "Failed to free backing memory"); | ||
| 210 | } | ||
| 211 | } | ||
| 212 | if (!CloseHandle(backing_handle)) { | ||
| 213 | LOG_CRITICAL(HW_Memory, "Failed to free backing memory file handle"); | ||
| 214 | } | ||
| 215 | } | ||
| 216 | |||
| 217 | /// Unmap one placeholder in the given range (partial unmaps are supported) | ||
| 218 | /// Return true when there are no more placeholders to unmap | ||
| 219 | bool UnmapOnePlaceholder(size_t virtual_offset, size_t length) { | ||
| 220 | const auto it = placeholders.find({virtual_offset, virtual_offset + length}); | ||
| 221 | const auto begin = placeholders.begin(); | ||
| 222 | const auto end = placeholders.end(); | ||
| 223 | if (it == end) { | ||
| 224 | return false; | ||
| 225 | } | ||
| 226 | const size_t placeholder_begin = it->lower(); | ||
| 227 | const size_t placeholder_end = it->upper(); | ||
| 228 | const size_t unmap_begin = std::max(virtual_offset, placeholder_begin); | ||
| 229 | const size_t unmap_end = std::min(virtual_offset + length, placeholder_end); | ||
| 230 | ASSERT(unmap_begin >= placeholder_begin && unmap_begin < placeholder_end); | ||
| 231 | ASSERT(unmap_end <= placeholder_end && unmap_end > placeholder_begin); | ||
| 232 | |||
| 233 | const auto host_pointer_it = placeholder_host_pointers.find(placeholder_begin); | ||
| 234 | ASSERT(host_pointer_it != placeholder_host_pointers.end()); | ||
| 235 | const size_t host_offset = host_pointer_it->second; | ||
| 236 | |||
| 237 | const bool split_left = unmap_begin > placeholder_begin; | ||
| 238 | const bool split_right = unmap_end < placeholder_end; | ||
| 239 | |||
| 240 | if (!pfn_UnmapViewOfFile2(process, virtual_base + placeholder_begin, | ||
| 241 | MEM_PRESERVE_PLACEHOLDER)) { | ||
| 242 | LOG_CRITICAL(HW_Memory, "Failed to unmap placeholder"); | ||
| 243 | } | ||
| 244 | // If we have to remap memory regions due to partial unmaps, we are in a data race as | ||
| 245 | // Windows doesn't support remapping memory without unmapping first. Avoid adding any extra | ||
| 246 | // logic within the panic region described below. | ||
| 247 | |||
| 248 | // Panic region, we are in a data race right now | ||
| 249 | if (split_left || split_right) { | ||
| 250 | Split(unmap_begin, unmap_end - unmap_begin); | ||
| 251 | } | ||
| 252 | if (split_left) { | ||
| 253 | MapView(placeholder_begin, host_offset, unmap_begin - placeholder_begin); | ||
| 254 | } | ||
| 255 | if (split_right) { | ||
| 256 | MapView(unmap_end, host_offset + unmap_end - placeholder_begin, | ||
| 257 | placeholder_end - unmap_end); | ||
| 258 | } | ||
| 259 | // End panic region | ||
| 260 | |||
| 261 | size_t coalesce_begin = unmap_begin; | ||
| 262 | if (!split_left) { | ||
| 263 | // Try to coalesce pages to the left | ||
| 264 | coalesce_begin = it == begin ? 0 : std::prev(it)->upper(); | ||
| 265 | if (coalesce_begin != placeholder_begin) { | ||
| 266 | Coalesce(coalesce_begin, unmap_end - coalesce_begin); | ||
| 267 | } | ||
| 268 | } | ||
| 269 | if (!split_right) { | ||
| 270 | // Try to coalesce pages to the right | ||
| 271 | const auto next = std::next(it); | ||
| 272 | const size_t next_begin = next == end ? virtual_size : next->lower(); | ||
| 273 | if (placeholder_end != next_begin) { | ||
| 274 | // We can coalesce to the right | ||
| 275 | Coalesce(coalesce_begin, next_begin - coalesce_begin); | ||
| 276 | } | ||
| 277 | } | ||
| 278 | // Remove and reinsert placeholder trackers | ||
| 279 | UntrackPlaceholder(it); | ||
| 280 | if (split_left) { | ||
| 281 | TrackPlaceholder(placeholder_begin, host_offset, unmap_begin - placeholder_begin); | ||
| 282 | } | ||
| 283 | if (split_right) { | ||
| 284 | TrackPlaceholder(unmap_end, host_offset + unmap_end - placeholder_begin, | ||
| 285 | placeholder_end - unmap_end); | ||
| 286 | } | ||
| 287 | return true; | ||
| 288 | } | ||
| 289 | |||
| 290 | void MapView(size_t virtual_offset, size_t host_offset, size_t length) { | ||
| 291 | if (!pfn_MapViewOfFile3(backing_handle, process, virtual_base + virtual_offset, host_offset, | ||
| 292 | length, MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0)) { | ||
| 293 | LOG_CRITICAL(HW_Memory, "Failed to map placeholder"); | ||
| 294 | } | ||
| 295 | } | ||
| 296 | |||
| 297 | void Split(size_t virtual_offset, size_t length) { | ||
| 298 | if (!VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_base + virtual_offset), length, | ||
| 299 | MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) { | ||
| 300 | LOG_CRITICAL(HW_Memory, "Failed to split placeholder"); | ||
| 301 | } | ||
| 302 | } | ||
| 303 | |||
| 304 | void Coalesce(size_t virtual_offset, size_t length) { | ||
| 305 | if (!VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_base + virtual_offset), length, | ||
| 306 | MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS)) { | ||
| 307 | LOG_CRITICAL(HW_Memory, "Failed to coalesce placeholders"); | ||
| 308 | } | ||
| 309 | } | ||
| 310 | |||
| 311 | void TrackPlaceholder(size_t virtual_offset, size_t host_offset, size_t length) { | ||
| 312 | placeholders.insert({virtual_offset, virtual_offset + length}); | ||
| 313 | placeholder_host_pointers.emplace(virtual_offset, host_offset); | ||
| 314 | } | ||
| 315 | |||
| 316 | void UntrackPlaceholder(boost::icl::separate_interval_set<size_t>::iterator it) { | ||
| 317 | placeholders.erase(it); | ||
| 318 | placeholder_host_pointers.erase(it->lower()); | ||
| 319 | } | ||
| 320 | |||
| 321 | /// Return true when a given memory region is a "nieche" and the placeholders don't have to be | ||
| 322 | /// splitted. | ||
| 323 | bool IsNiechePlaceholder(size_t virtual_offset, size_t length) const { | ||
| 324 | const auto it = placeholders.upper_bound({virtual_offset, virtual_offset + length}); | ||
| 325 | if (it != placeholders.end() && it->lower() == virtual_offset + length) { | ||
| 326 | const bool is_root = it == placeholders.begin() && virtual_offset == 0; | ||
| 327 | return is_root || std::prev(it)->upper() == virtual_offset; | ||
| 328 | } | ||
| 329 | return false; | ||
| 330 | } | ||
| 331 | |||
| 332 | HANDLE process{}; ///< Current process handle | ||
| 333 | HANDLE backing_handle{}; ///< File based backing memory | ||
| 334 | |||
| 335 | DynamicLibrary kernelbase_dll; | ||
| 336 | PFN_CreateFileMapping2 pfn_CreateFileMapping2{}; | ||
| 337 | PFN_VirtualAlloc2 pfn_VirtualAlloc2{}; | ||
| 338 | PFN_MapViewOfFile3 pfn_MapViewOfFile3{}; | ||
| 339 | PFN_UnmapViewOfFile2 pfn_UnmapViewOfFile2{}; | ||
| 340 | |||
| 341 | std::mutex placeholder_mutex; ///< Mutex for placeholders | ||
| 342 | boost::icl::separate_interval_set<size_t> placeholders; ///< Mapped placeholders | ||
| 343 | std::unordered_map<size_t, size_t> placeholder_host_pointers; ///< Placeholder backing offset | ||
| 344 | }; | ||
| 345 | |||
| 346 | #elif defined(__linux__) // ^^^ Windows ^^^ vvv Linux vvv | ||
| 347 | |||
| 348 | class HostMemory::Impl { | ||
| 349 | public: | ||
| 350 | explicit Impl(size_t backing_size_, size_t virtual_size_) | ||
| 351 | : backing_size{backing_size_}, virtual_size{virtual_size_} { | ||
| 352 | bool good = false; | ||
| 353 | SCOPE_EXIT({ | ||
| 354 | if (!good) { | ||
| 355 | Release(); | ||
| 356 | } | ||
| 357 | }); | ||
| 358 | |||
| 359 | // Backing memory initialization | ||
| 360 | fd = memfd_create("HostMemory", 0); | ||
| 361 | if (fd == -1) { | ||
| 362 | LOG_CRITICAL(HW_Memory, "memfd_create failed: {}", strerror(errno)); | ||
| 363 | throw std::bad_alloc{}; | ||
| 364 | } | ||
| 365 | |||
| 366 | // Defined to extend the file with zeros | ||
| 367 | int ret = ftruncate(fd, backing_size); | ||
| 368 | if (ret != 0) { | ||
| 369 | LOG_CRITICAL(HW_Memory, "ftruncate failed with {}, are you out-of-memory?", | ||
| 370 | strerror(errno)); | ||
| 371 | throw std::bad_alloc{}; | ||
| 372 | } | ||
| 373 | |||
| 374 | backing_base = static_cast<u8*>( | ||
| 375 | mmap(nullptr, backing_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0)); | ||
| 376 | if (backing_base == MAP_FAILED) { | ||
| 377 | LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno)); | ||
| 378 | throw std::bad_alloc{}; | ||
| 379 | } | ||
| 380 | |||
| 381 | // Virtual memory initialization | ||
| 382 | virtual_base = static_cast<u8*>( | ||
| 383 | mmap(nullptr, virtual_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)); | ||
| 384 | if (virtual_base == MAP_FAILED) { | ||
| 385 | LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno)); | ||
| 386 | throw std::bad_alloc{}; | ||
| 387 | } | ||
| 388 | |||
| 389 | good = true; | ||
| 390 | } | ||
| 391 | |||
| 392 | ~Impl() { | ||
| 393 | Release(); | ||
| 394 | } | ||
| 395 | |||
| 396 | void Map(size_t virtual_offset, size_t host_offset, size_t length) { | ||
| 397 | |||
| 398 | void* ret = mmap(virtual_base + virtual_offset, length, PROT_READ | PROT_WRITE, | ||
| 399 | MAP_SHARED | MAP_FIXED, fd, host_offset); | ||
| 400 | ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); | ||
| 401 | } | ||
| 402 | |||
| 403 | void Unmap(size_t virtual_offset, size_t length) { | ||
| 404 | // The method name is wrong. We're still talking about the virtual range. | ||
| 405 | // We don't want to unmap, we want to reserve this memory. | ||
| 406 | |||
| 407 | void* ret = mmap(virtual_base + virtual_offset, length, PROT_NONE, | ||
| 408 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); | ||
| 409 | ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); | ||
| 410 | } | ||
| 411 | |||
| 412 | void Protect(size_t virtual_offset, size_t length, bool read, bool write) { | ||
| 413 | int flags = 0; | ||
| 414 | if (read) { | ||
| 415 | flags |= PROT_READ; | ||
| 416 | } | ||
| 417 | if (write) { | ||
| 418 | flags |= PROT_WRITE; | ||
| 419 | } | ||
| 420 | int ret = mprotect(virtual_base + virtual_offset, length, flags); | ||
| 421 | ASSERT_MSG(ret == 0, "mprotect failed: {}", strerror(errno)); | ||
| 422 | } | ||
| 423 | |||
| 424 | const size_t backing_size; ///< Size of the backing memory in bytes | ||
| 425 | const size_t virtual_size; ///< Size of the virtual address placeholder in bytes | ||
| 426 | |||
| 427 | u8* backing_base{reinterpret_cast<u8*>(MAP_FAILED)}; | ||
| 428 | u8* virtual_base{reinterpret_cast<u8*>(MAP_FAILED)}; | ||
| 429 | |||
| 430 | private: | ||
| 431 | /// Release all resources in the object | ||
| 432 | void Release() { | ||
| 433 | if (virtual_base != MAP_FAILED) { | ||
| 434 | int ret = munmap(virtual_base, virtual_size); | ||
| 435 | ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno)); | ||
| 436 | } | ||
| 437 | |||
| 438 | if (backing_base != MAP_FAILED) { | ||
| 439 | int ret = munmap(backing_base, backing_size); | ||
| 440 | ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno)); | ||
| 441 | } | ||
| 442 | |||
| 443 | if (fd != -1) { | ||
| 444 | int ret = close(fd); | ||
| 445 | ASSERT_MSG(ret == 0, "close failed: {}", strerror(errno)); | ||
| 446 | } | ||
| 447 | } | ||
| 448 | |||
| 449 | int fd{-1}; // memfd file descriptor, -1 is the error value of memfd_create | ||
| 450 | }; | ||
| 451 | |||
| 452 | #else // ^^^ Linux ^^^ vvv Generic vvv | ||
| 453 | |||
| 454 | class HostMemory::Impl { | ||
| 455 | public: | ||
| 456 | explicit Impl(size_t /*backing_size */, size_t /* virtual_size */) { | ||
| 457 | // This is just a place holder. | ||
| 458 | // Please implement fastmem in a propper way on your platform. | ||
| 459 | throw std::bad_alloc{}; | ||
| 460 | } | ||
| 461 | |||
| 462 | void Map(size_t virtual_offset, size_t host_offset, size_t length) {} | ||
| 463 | |||
| 464 | void Unmap(size_t virtual_offset, size_t length) {} | ||
| 465 | |||
| 466 | void Protect(size_t virtual_offset, size_t length, bool read, bool write) {} | ||
| 467 | |||
| 468 | u8* backing_base{nullptr}; | ||
| 469 | u8* virtual_base{nullptr}; | ||
| 470 | }; | ||
| 471 | |||
| 472 | #endif // ^^^ Generic ^^^ | ||
| 473 | |||
| 474 | HostMemory::HostMemory(size_t backing_size_, size_t virtual_size_) | ||
| 475 | : backing_size(backing_size_), virtual_size(virtual_size_) { | ||
| 476 | try { | ||
| 477 | // Try to allocate a fastmem arena. | ||
| 478 | // The implementation will fail with std::bad_alloc on errors. | ||
| 479 | impl = std::make_unique<HostMemory::Impl>(AlignUp(backing_size, PageAlignment), | ||
| 480 | AlignUp(virtual_size, PageAlignment) + | ||
| 481 | 3 * HugePageSize); | ||
| 482 | backing_base = impl->backing_base; | ||
| 483 | virtual_base = impl->virtual_base; | ||
| 484 | |||
| 485 | if (virtual_base) { | ||
| 486 | virtual_base += 2 * HugePageSize - 1; | ||
| 487 | virtual_base -= reinterpret_cast<size_t>(virtual_base) & (HugePageSize - 1); | ||
| 488 | virtual_base_offset = virtual_base - impl->virtual_base; | ||
| 489 | } | ||
| 490 | |||
| 491 | } catch (const std::bad_alloc&) { | ||
| 492 | LOG_CRITICAL(HW_Memory, | ||
| 493 | "Fastmem unavailable, falling back to VirtualBuffer for memory allocation"); | ||
| 494 | fallback_buffer = std::make_unique<Common::VirtualBuffer<u8>>(backing_size); | ||
| 495 | backing_base = fallback_buffer->data(); | ||
| 496 | virtual_base = nullptr; | ||
| 497 | } | ||
| 498 | } | ||
| 499 | |||
| 500 | HostMemory::~HostMemory() = default; | ||
| 501 | |||
| 502 | HostMemory::HostMemory(HostMemory&&) noexcept = default; | ||
| 503 | |||
| 504 | HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default; | ||
| 505 | |||
| 506 | void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length) { | ||
| 507 | ASSERT(virtual_offset % PageAlignment == 0); | ||
| 508 | ASSERT(host_offset % PageAlignment == 0); | ||
| 509 | ASSERT(length % PageAlignment == 0); | ||
| 510 | ASSERT(virtual_offset + length <= virtual_size); | ||
| 511 | ASSERT(host_offset + length <= backing_size); | ||
| 512 | if (length == 0 || !virtual_base || !impl) { | ||
| 513 | return; | ||
| 514 | } | ||
| 515 | impl->Map(virtual_offset + virtual_base_offset, host_offset, length); | ||
| 516 | } | ||
| 517 | |||
| 518 | void HostMemory::Unmap(size_t virtual_offset, size_t length) { | ||
| 519 | ASSERT(virtual_offset % PageAlignment == 0); | ||
| 520 | ASSERT(length % PageAlignment == 0); | ||
| 521 | ASSERT(virtual_offset + length <= virtual_size); | ||
| 522 | if (length == 0 || !virtual_base || !impl) { | ||
| 523 | return; | ||
| 524 | } | ||
| 525 | impl->Unmap(virtual_offset + virtual_base_offset, length); | ||
| 526 | } | ||
| 527 | |||
| 528 | void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write) { | ||
| 529 | ASSERT(virtual_offset % PageAlignment == 0); | ||
| 530 | ASSERT(length % PageAlignment == 0); | ||
| 531 | ASSERT(virtual_offset + length <= virtual_size); | ||
| 532 | if (length == 0 || !virtual_base || !impl) { | ||
| 533 | return; | ||
| 534 | } | ||
| 535 | impl->Protect(virtual_offset + virtual_base_offset, length, read, write); | ||
| 536 | } | ||
| 537 | |||
| 538 | } // namespace Common | ||
diff --git a/src/common/host_memory.h b/src/common/host_memory.h new file mode 100644 index 000000000..9b8326d0f --- /dev/null +++ b/src/common/host_memory.h | |||
| @@ -0,0 +1,70 @@ | |||
| 1 | // Copyright 2019 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <memory> | ||
| 8 | #include "common/common_types.h" | ||
| 9 | #include "common/virtual_buffer.h" | ||
| 10 | |||
| 11 | namespace Common { | ||
| 12 | |||
| 13 | /** | ||
| 14 | * A low level linear memory buffer, which supports multiple mappings | ||
| 15 | * Its purpose is to rebuild a given sparse memory layout, including mirrors. | ||
| 16 | */ | ||
| 17 | class HostMemory { | ||
| 18 | public: | ||
| 19 | explicit HostMemory(size_t backing_size_, size_t virtual_size_); | ||
| 20 | ~HostMemory(); | ||
| 21 | |||
| 22 | /** | ||
| 23 | * Copy constructors. They shall return a copy of the buffer without the mappings. | ||
| 24 | * TODO: Implement them with COW if needed. | ||
| 25 | */ | ||
| 26 | HostMemory(const HostMemory& other) = delete; | ||
| 27 | HostMemory& operator=(const HostMemory& other) = delete; | ||
| 28 | |||
| 29 | /** | ||
| 30 | * Move constructors. They will move the buffer and the mappings to the new object. | ||
| 31 | */ | ||
| 32 | HostMemory(HostMemory&& other) noexcept; | ||
| 33 | HostMemory& operator=(HostMemory&& other) noexcept; | ||
| 34 | |||
| 35 | void Map(size_t virtual_offset, size_t host_offset, size_t length); | ||
| 36 | |||
| 37 | void Unmap(size_t virtual_offset, size_t length); | ||
| 38 | |||
| 39 | void Protect(size_t virtual_offset, size_t length, bool read, bool write); | ||
| 40 | |||
| 41 | [[nodiscard]] u8* BackingBasePointer() noexcept { | ||
| 42 | return backing_base; | ||
| 43 | } | ||
| 44 | [[nodiscard]] const u8* BackingBasePointer() const noexcept { | ||
| 45 | return backing_base; | ||
| 46 | } | ||
| 47 | |||
| 48 | [[nodiscard]] u8* VirtualBasePointer() noexcept { | ||
| 49 | return virtual_base; | ||
| 50 | } | ||
| 51 | [[nodiscard]] const u8* VirtualBasePointer() const noexcept { | ||
| 52 | return virtual_base; | ||
| 53 | } | ||
| 54 | |||
| 55 | private: | ||
| 56 | size_t backing_size{}; | ||
| 57 | size_t virtual_size{}; | ||
| 58 | |||
| 59 | // Low level handler for the platform dependent memory routines | ||
| 60 | class Impl; | ||
| 61 | std::unique_ptr<Impl> impl; | ||
| 62 | u8* backing_base{}; | ||
| 63 | u8* virtual_base{}; | ||
| 64 | size_t virtual_base_offset{}; | ||
| 65 | |||
| 66 | // Fallback if fastmem is not supported on this platform | ||
| 67 | std::unique_ptr<Common::VirtualBuffer<u8>> fallback_buffer; | ||
| 68 | }; | ||
| 69 | |||
| 70 | } // namespace Common | ||
diff --git a/src/common/page_table.h b/src/common/page_table.h index e92b66b2b..8267e8b4d 100644 --- a/src/common/page_table.h +++ b/src/common/page_table.h | |||
| @@ -111,6 +111,8 @@ struct PageTable { | |||
| 111 | VirtualBuffer<u64> backing_addr; | 111 | VirtualBuffer<u64> backing_addr; |
| 112 | 112 | ||
| 113 | size_t current_address_space_width_in_bits; | 113 | size_t current_address_space_width_in_bits; |
| 114 | |||
| 115 | u8* fastmem_arena; | ||
| 114 | }; | 116 | }; |
| 115 | 117 | ||
| 116 | } // namespace Common | 118 | } // namespace Common |
diff --git a/src/common/settings.cpp b/src/common/settings.cpp index bcb4e4be1..360e878d6 100644 --- a/src/common/settings.cpp +++ b/src/common/settings.cpp | |||
| @@ -90,6 +90,13 @@ bool IsGPULevelHigh() { | |||
| 90 | values.gpu_accuracy.GetValue() == GPUAccuracy::High; | 90 | values.gpu_accuracy.GetValue() == GPUAccuracy::High; |
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | bool IsFastmemEnabled() { | ||
| 94 | if (values.cpu_accuracy.GetValue() == CPUAccuracy::DebugMode) { | ||
| 95 | return values.cpuopt_fastmem; | ||
| 96 | } | ||
| 97 | return true; | ||
| 98 | } | ||
| 99 | |||
| 93 | float Volume() { | 100 | float Volume() { |
| 94 | if (values.audio_muted) { | 101 | if (values.audio_muted) { |
| 95 | return 0.0f; | 102 | return 0.0f; |
| @@ -115,6 +122,7 @@ void RestoreGlobalState(bool is_powered_on) { | |||
| 115 | values.cpuopt_unsafe_unfuse_fma.SetGlobal(true); | 122 | values.cpuopt_unsafe_unfuse_fma.SetGlobal(true); |
| 116 | values.cpuopt_unsafe_reduce_fp_error.SetGlobal(true); | 123 | values.cpuopt_unsafe_reduce_fp_error.SetGlobal(true); |
| 117 | values.cpuopt_unsafe_inaccurate_nan.SetGlobal(true); | 124 | values.cpuopt_unsafe_inaccurate_nan.SetGlobal(true); |
| 125 | values.cpuopt_unsafe_fastmem_check.SetGlobal(true); | ||
| 118 | 126 | ||
| 119 | // Renderer | 127 | // Renderer |
| 120 | values.renderer_backend.SetGlobal(true); | 128 | values.renderer_backend.SetGlobal(true); |
diff --git a/src/common/settings.h b/src/common/settings.h index 48085b9a9..1af8c5ac2 100644 --- a/src/common/settings.h +++ b/src/common/settings.h | |||
| @@ -125,10 +125,12 @@ struct Values { | |||
| 125 | bool cpuopt_const_prop; | 125 | bool cpuopt_const_prop; |
| 126 | bool cpuopt_misc_ir; | 126 | bool cpuopt_misc_ir; |
| 127 | bool cpuopt_reduce_misalign_checks; | 127 | bool cpuopt_reduce_misalign_checks; |
| 128 | bool cpuopt_fastmem; | ||
| 128 | 129 | ||
| 129 | Setting<bool> cpuopt_unsafe_unfuse_fma; | 130 | Setting<bool> cpuopt_unsafe_unfuse_fma; |
| 130 | Setting<bool> cpuopt_unsafe_reduce_fp_error; | 131 | Setting<bool> cpuopt_unsafe_reduce_fp_error; |
| 131 | Setting<bool> cpuopt_unsafe_inaccurate_nan; | 132 | Setting<bool> cpuopt_unsafe_inaccurate_nan; |
| 133 | Setting<bool> cpuopt_unsafe_fastmem_check; | ||
| 132 | 134 | ||
| 133 | // Renderer | 135 | // Renderer |
| 134 | Setting<RendererBackend> renderer_backend; | 136 | Setting<RendererBackend> renderer_backend; |
| @@ -249,6 +251,8 @@ void SetConfiguringGlobal(bool is_global); | |||
| 249 | bool IsGPULevelExtreme(); | 251 | bool IsGPULevelExtreme(); |
| 250 | bool IsGPULevelHigh(); | 252 | bool IsGPULevelHigh(); |
| 251 | 253 | ||
| 254 | bool IsFastmemEnabled(); | ||
| 255 | |||
| 252 | float Volume(); | 256 | float Volume(); |
| 253 | 257 | ||
| 254 | std::string GetTimeZoneString(); | 258 | std::string GetTimeZoneString(); |