diff options
| -rw-r--r-- | src/common/CMakeLists.txt | 2 | ||||
| -rw-r--r-- | src/common/host_memory.cpp | 320 | ||||
| -rw-r--r-- | src/common/host_memory.h | 62 |
3 files changed, 384 insertions, 0 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 2d403d471..97fbdcbf9 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt | |||
| @@ -131,6 +131,8 @@ add_library(common STATIC | |||
| 131 | hash.h | 131 | hash.h |
| 132 | hex_util.cpp | 132 | hex_util.cpp |
| 133 | hex_util.h | 133 | hex_util.h |
| 134 | host_memory.cpp | ||
| 135 | host_memory.h | ||
| 134 | intrusive_red_black_tree.h | 136 | intrusive_red_black_tree.h |
| 135 | logging/backend.cpp | 137 | logging/backend.cpp |
| 136 | logging/backend.h | 138 | logging/backend.h |
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp new file mode 100644 index 000000000..4f5086e90 --- /dev/null +++ b/src/common/host_memory.cpp | |||
| @@ -0,0 +1,320 @@ | |||
| 1 | #ifdef __linux__ | ||
| 2 | #ifndef _GNU_SOURCE | ||
| 3 | #define _GNU_SOURCE | ||
| 4 | #endif | ||
| 5 | #include <fcntl.h> | ||
| 6 | #include <sys/mman.h> | ||
| 7 | #include <unistd.h> | ||
| 8 | #elif defined(_WIN32) // ^^^ Linux ^^^ vvv Windows vvv | ||
| 9 | #ifdef _WIN32_WINNT | ||
| 10 | #undef _WIN32_WINNT | ||
| 11 | #endif | ||
| 12 | #define _WIN32_WINNT 0x0A00 // Windows 10 | ||
| 13 | |||
| 14 | #include <windows.h> | ||
| 15 | |||
| 16 | #include <boost/icl/separate_interval_set.hpp> | ||
| 17 | |||
| 18 | #include <iterator> | ||
| 19 | #include <unordered_map> | ||
| 20 | |||
| 21 | #pragma comment(lib, "mincore.lib") | ||
| 22 | |||
| 23 | #endif // ^^^ Windows ^^^ | ||
| 24 | |||
| 25 | #include <mutex> | ||
| 26 | |||
| 27 | #include "common/assert.h" | ||
| 28 | #include "common/host_memory.h" | ||
| 29 | #include "common/logging/log.h" | ||
| 30 | |||
| 31 | namespace Common { | ||
| 32 | |||
| 33 | constexpr size_t PageAlignment = 0x1000; | ||
| 34 | |||
| 35 | #ifdef _WIN32 | ||
| 36 | |||
| 37 | class HostMemory::Impl { | ||
| 38 | public: | ||
| 39 | explicit Impl(size_t backing_size_, size_t virtual_size_) | ||
| 40 | : backing_size{backing_size_}, virtual_size{virtual_size_}, process{GetCurrentProcess()} { | ||
| 41 | // Allocate backing file map | ||
| 42 | backing_handle = | ||
| 43 | CreateFileMapping2(INVALID_HANDLE_VALUE, nullptr, FILE_MAP_WRITE | FILE_MAP_READ, | ||
| 44 | PAGE_READWRITE, SEC_COMMIT, backing_size, nullptr, nullptr, 0); | ||
| 45 | if (!backing_handle) { | ||
| 46 | throw std::bad_alloc{}; | ||
| 47 | } | ||
| 48 | // Allocate a virtual memory for the backing file map as placeholder | ||
| 49 | backing_base = static_cast<u8*>(VirtualAlloc2(process, nullptr, backing_size, | ||
| 50 | MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, | ||
| 51 | PAGE_NOACCESS, nullptr, 0)); | ||
| 52 | if (!backing_base) { | ||
| 53 | Release(); | ||
| 54 | throw std::bad_alloc{}; | ||
| 55 | } | ||
| 56 | // Map backing placeholder | ||
| 57 | void* const ret = MapViewOfFile3(backing_handle, process, backing_base, 0, backing_size, | ||
| 58 | MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0); | ||
| 59 | if (ret != backing_base) { | ||
| 60 | Release(); | ||
| 61 | throw std::bad_alloc{}; | ||
| 62 | } | ||
| 63 | // Allocate virtual address placeholder | ||
| 64 | virtual_base = static_cast<u8*>(VirtualAlloc2(process, nullptr, virtual_size, | ||
| 65 | MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, | ||
| 66 | PAGE_NOACCESS, nullptr, 0)); | ||
| 67 | if (!virtual_base) { | ||
| 68 | Release(); | ||
| 69 | throw std::bad_alloc{}; | ||
| 70 | } | ||
| 71 | } | ||
| 72 | |||
| 73 | ~Impl() { | ||
| 74 | Release(); | ||
| 75 | } | ||
| 76 | |||
| 77 | void Map(size_t virtual_offset, size_t host_offset, size_t length) { | ||
| 78 | std::unique_lock lock{placeholder_mutex}; | ||
| 79 | if (!IsNiechePlaceholder(virtual_offset, length)) { | ||
| 80 | Split(virtual_offset, length); | ||
| 81 | } | ||
| 82 | ASSERT(placeholders.find({virtual_offset, virtual_offset + length}) == placeholders.end()); | ||
| 83 | TrackPlaceholder(virtual_offset, host_offset, length); | ||
| 84 | |||
| 85 | MapView(virtual_offset, host_offset, length); | ||
| 86 | } | ||
| 87 | |||
| 88 | void Unmap(size_t virtual_offset, size_t length) { | ||
| 89 | std::lock_guard lock{placeholder_mutex}; | ||
| 90 | |||
| 91 | // Unmap until there are no more placeholders | ||
| 92 | while (UnmapOnePlaceholder(virtual_offset, length)) { | ||
| 93 | } | ||
| 94 | } | ||
| 95 | |||
| 96 | void Protect(size_t virtual_offset, size_t length, bool read, bool write) { | ||
| 97 | DWORD new_flags{}; | ||
| 98 | if (read && write) { | ||
| 99 | new_flags = PAGE_READWRITE; | ||
| 100 | } else if (read && !write) { | ||
| 101 | new_flags = PAGE_READONLY; | ||
| 102 | } else if (!read && !write) { | ||
| 103 | new_flags = PAGE_NOACCESS; | ||
| 104 | } else { | ||
| 105 | UNIMPLEMENTED_MSG("Protection flag combination read={} write={}", read, write); | ||
| 106 | } | ||
| 107 | DWORD old_flags{}; | ||
| 108 | if (!VirtualProtect(virtual_base + virtual_offset, length, new_flags, &old_flags)) { | ||
| 109 | LOG_CRITICAL(HW_Memory, "Failed to change virtual memory protect rules"); | ||
| 110 | } | ||
| 111 | } | ||
| 112 | |||
| 113 | const size_t backing_size; ///< Size of the backing memory in bytes | ||
| 114 | const size_t virtual_size; ///< Size of the virtual address placeholder in bytes | ||
| 115 | |||
| 116 | u8* backing_base{}; | ||
| 117 | u8* virtual_base{}; | ||
| 118 | |||
| 119 | private: | ||
| 120 | /// Release all resources in the object | ||
| 121 | void Release() { | ||
| 122 | if (!placeholders.empty()) { | ||
| 123 | for (const auto& placeholder : placeholders) { | ||
| 124 | if (!UnmapViewOfFile2(process, virtual_base + placeholder.lower(), | ||
| 125 | MEM_PRESERVE_PLACEHOLDER)) { | ||
| 126 | LOG_CRITICAL(HW_Memory, "Failed to unmap virtual memory placeholder"); | ||
| 127 | } | ||
| 128 | } | ||
| 129 | Coalesce(0, virtual_size); | ||
| 130 | } | ||
| 131 | if (virtual_base) { | ||
| 132 | if (!VirtualFree(virtual_base, 0, MEM_RELEASE)) { | ||
| 133 | LOG_CRITICAL(HW_Memory, "Failed to free virtual memory"); | ||
| 134 | } | ||
| 135 | } | ||
| 136 | if (backing_base) { | ||
| 137 | if (!UnmapViewOfFile2(process, backing_base, MEM_PRESERVE_PLACEHOLDER)) { | ||
| 138 | LOG_CRITICAL(HW_Memory, "Failed to unmap backing memory placeholder"); | ||
| 139 | } | ||
| 140 | if (!VirtualFreeEx(process, backing_base, 0, MEM_RELEASE)) { | ||
| 141 | LOG_CRITICAL(HW_Memory, "Failed to free backing memory"); | ||
| 142 | } | ||
| 143 | } | ||
| 144 | if (!CloseHandle(backing_handle)) { | ||
| 145 | LOG_CRITICAL(HW_Memory, "Failed to free backing memory file handle"); | ||
| 146 | } | ||
| 147 | } | ||
| 148 | |||
| 149 | /// Unmap one placeholder in the given range (partial unmaps are supported) | ||
| 150 | /// Return true when there are no more placeholders to unmap | ||
| 151 | bool UnmapOnePlaceholder(size_t virtual_offset, size_t length) { | ||
| 152 | const auto it = placeholders.find({virtual_offset, virtual_offset + length}); | ||
| 153 | const auto begin = placeholders.begin(); | ||
| 154 | const auto end = placeholders.end(); | ||
| 155 | if (it == end) { | ||
| 156 | return false; | ||
| 157 | } | ||
| 158 | const size_t placeholder_begin = it->lower(); | ||
| 159 | const size_t placeholder_end = it->upper(); | ||
| 160 | const size_t unmap_begin = std::max(virtual_offset, placeholder_begin); | ||
| 161 | const size_t unmap_end = std::min(virtual_offset + length, placeholder_end); | ||
| 162 | ASSERT(unmap_begin >= placeholder_begin && unmap_begin < placeholder_end); | ||
| 163 | ASSERT(unmap_end <= placeholder_end && unmap_end > placeholder_begin); | ||
| 164 | |||
| 165 | const auto host_pointer_it = placeholder_host_pointers.find(placeholder_begin); | ||
| 166 | ASSERT(host_pointer_it != placeholder_host_pointers.end()); | ||
| 167 | const size_t host_offset = host_pointer_it->second; | ||
| 168 | |||
| 169 | const bool split_left = unmap_begin > placeholder_begin; | ||
| 170 | const bool split_right = unmap_end < placeholder_end; | ||
| 171 | |||
| 172 | if (!UnmapViewOfFile2(process, virtual_base + placeholder_begin, | ||
| 173 | MEM_PRESERVE_PLACEHOLDER)) { | ||
| 174 | LOG_CRITICAL(HW_Memory, "Failed to unmap placeholder"); | ||
| 175 | } | ||
| 176 | // If we have to remap memory regions due to partial unmaps, we are in a data race as | ||
| 177 | // Windows doesn't support remapping memory without unmapping first. Avoid adding any extra | ||
| 178 | // logic within the panic region described below. | ||
| 179 | |||
| 180 | // Panic region, we are in a data race right now | ||
| 181 | if (split_left || split_right) { | ||
| 182 | Split(unmap_begin, unmap_end - unmap_begin); | ||
| 183 | } | ||
| 184 | if (split_left) { | ||
| 185 | MapView(placeholder_begin, host_offset, unmap_begin - placeholder_begin); | ||
| 186 | } | ||
| 187 | if (split_right) { | ||
| 188 | MapView(unmap_end, host_offset + unmap_end - placeholder_begin, | ||
| 189 | placeholder_end - unmap_end); | ||
| 190 | } | ||
| 191 | // End panic region | ||
| 192 | |||
| 193 | size_t coalesce_begin = unmap_begin; | ||
| 194 | if (!split_left) { | ||
| 195 | // Try to coalesce pages to the left | ||
| 196 | coalesce_begin = it == begin ? 0 : std::prev(it)->upper(); | ||
| 197 | if (coalesce_begin != placeholder_begin) { | ||
| 198 | Coalesce(coalesce_begin, unmap_end - coalesce_begin); | ||
| 199 | } | ||
| 200 | } | ||
| 201 | if (!split_right) { | ||
| 202 | // Try to coalesce pages to the right | ||
| 203 | const auto next = std::next(it); | ||
| 204 | const size_t next_begin = next == end ? virtual_size : next->lower(); | ||
| 205 | if (placeholder_end != next_begin) { | ||
| 206 | // We can coalesce to the right | ||
| 207 | Coalesce(coalesce_begin, next_begin - coalesce_begin); | ||
| 208 | } | ||
| 209 | } | ||
| 210 | // Remove and reinsert placeholder trackers | ||
| 211 | UntrackPlaceholder(it); | ||
| 212 | if (split_left) { | ||
| 213 | TrackPlaceholder(placeholder_begin, host_offset, unmap_begin - placeholder_begin); | ||
| 214 | } | ||
| 215 | if (split_right) { | ||
| 216 | TrackPlaceholder(unmap_end, host_offset + unmap_end - placeholder_begin, | ||
| 217 | placeholder_end - unmap_end); | ||
| 218 | } | ||
| 219 | return true; | ||
| 220 | } | ||
| 221 | |||
| 222 | void MapView(size_t virtual_offset, size_t host_offset, size_t length) { | ||
| 223 | if (!MapViewOfFile3(backing_handle, process, virtual_base + virtual_offset, host_offset, | ||
| 224 | length, MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0)) { | ||
| 225 | LOG_CRITICAL(HW_Memory, "Failed to map placeholder"); | ||
| 226 | } | ||
| 227 | } | ||
| 228 | |||
| 229 | void Split(size_t virtual_offset, size_t length) { | ||
| 230 | if (!VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_base + virtual_offset), length, | ||
| 231 | MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) { | ||
| 232 | LOG_CRITICAL(HW_Memory, "Failed to split placeholder"); | ||
| 233 | } | ||
| 234 | } | ||
| 235 | |||
| 236 | void Coalesce(size_t virtual_offset, size_t length) { | ||
| 237 | if (!VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_base + virtual_offset), length, | ||
| 238 | MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS)) { | ||
| 239 | LOG_CRITICAL(HW_Memory, "Failed to coalesce placeholders"); | ||
| 240 | } | ||
| 241 | } | ||
| 242 | |||
| 243 | void TrackPlaceholder(size_t virtual_offset, size_t host_offset, size_t length) { | ||
| 244 | placeholders.insert({virtual_offset, virtual_offset + length}); | ||
| 245 | placeholder_host_pointers.emplace(virtual_offset, host_offset); | ||
| 246 | } | ||
| 247 | |||
| 248 | void UntrackPlaceholder(boost::icl::separate_interval_set<size_t>::iterator it) { | ||
| 249 | placeholders.erase(it); | ||
| 250 | placeholder_host_pointers.erase(it->lower()); | ||
| 251 | } | ||
| 252 | |||
| 253 | /// Return true when a given memory region is a "nieche" and the placeholders don't have to be | ||
| 254 | /// splitted. | ||
| 255 | bool IsNiechePlaceholder(size_t virtual_offset, size_t length) const { | ||
| 256 | const auto it = placeholders.upper_bound({virtual_offset, virtual_offset + length}); | ||
| 257 | if (it != placeholders.end() && it->lower() == virtual_offset + length) { | ||
| 258 | const bool is_root = it == placeholders.begin() && virtual_offset == 0; | ||
| 259 | return is_root || std::prev(it)->upper() == virtual_offset; | ||
| 260 | } | ||
| 261 | return false; | ||
| 262 | } | ||
| 263 | |||
| 264 | HANDLE process{}; ///< Current process handle | ||
| 265 | HANDLE backing_handle{}; ///< File based backing memory | ||
| 266 | |||
| 267 | std::mutex placeholder_mutex; ///< Mutex for placeholders | ||
| 268 | boost::icl::separate_interval_set<size_t> placeholders; ///< Mapped placeholders | ||
| 269 | std::unordered_map<size_t, size_t> placeholder_host_pointers; ///< Placeholder backing offset | ||
| 270 | }; | ||
| 271 | |||
| 272 | #else | ||
| 273 | |||
| 274 | #error Please implement the host memory for your platform | ||
| 275 | |||
| 276 | #endif | ||
| 277 | |||
| 278 | HostMemory::HostMemory(size_t backing_size, size_t virtual_size) | ||
| 279 | : impl{std::make_unique<HostMemory::Impl>(backing_size, virtual_size)}, | ||
| 280 | backing_base{impl->backing_base}, virtual_base{impl->virtual_base} {} | ||
| 281 | |||
| 282 | HostMemory::~HostMemory() = default; | ||
| 283 | |||
| 284 | HostMemory::HostMemory(HostMemory&&) noexcept = default; | ||
| 285 | |||
| 286 | HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default; | ||
| 287 | |||
| 288 | void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length) { | ||
| 289 | ASSERT(virtual_offset % PageAlignment == 0); | ||
| 290 | ASSERT(host_offset % PageAlignment == 0); | ||
| 291 | ASSERT(length % PageAlignment == 0); | ||
| 292 | ASSERT(virtual_offset + length <= impl->virtual_size); | ||
| 293 | ASSERT(host_offset + length <= impl->backing_size); | ||
| 294 | if (length == 0) { | ||
| 295 | return; | ||
| 296 | } | ||
| 297 | impl->Map(virtual_offset, host_offset, length); | ||
| 298 | } | ||
| 299 | |||
| 300 | void HostMemory::Unmap(size_t virtual_offset, size_t length) { | ||
| 301 | ASSERT(virtual_offset % PageAlignment == 0); | ||
| 302 | ASSERT(length % PageAlignment == 0); | ||
| 303 | ASSERT(virtual_offset + length <= impl->virtual_size); | ||
| 304 | if (length == 0) { | ||
| 305 | return; | ||
| 306 | } | ||
| 307 | impl->Unmap(virtual_offset, length); | ||
| 308 | } | ||
| 309 | |||
| 310 | void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write) { | ||
| 311 | ASSERT(virtual_offset % PageAlignment == 0); | ||
| 312 | ASSERT(length % PageAlignment == 0); | ||
| 313 | ASSERT(virtual_offset + length <= impl->virtual_size); | ||
| 314 | if (length == 0) { | ||
| 315 | return; | ||
| 316 | } | ||
| 317 | impl->Protect(virtual_offset, length, read, write); | ||
| 318 | } | ||
| 319 | |||
| 320 | } // namespace Common | ||
diff --git a/src/common/host_memory.h b/src/common/host_memory.h new file mode 100644 index 000000000..98005df7a --- /dev/null +++ b/src/common/host_memory.h | |||
| @@ -0,0 +1,62 @@ | |||
| 1 | // Copyright 2019 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <memory> | ||
| 8 | #include "common/common_types.h" | ||
| 9 | |||
| 10 | namespace Common { | ||
| 11 | |||
| 12 | /** | ||
| 13 | * A low level linear memory buffer, which supports multiple mappings | ||
| 14 | * Its purpose is to rebuild a given sparse memory layout, including mirrors. | ||
| 15 | */ | ||
| 16 | class HostMemory { | ||
| 17 | public: | ||
| 18 | explicit HostMemory(size_t backing_size, size_t virtual_size); | ||
| 19 | ~HostMemory(); | ||
| 20 | |||
| 21 | /** | ||
| 22 | * Copy constructors. They shall return a copy of the buffer without the mappings. | ||
| 23 | * TODO: Implement them with COW if needed. | ||
| 24 | */ | ||
| 25 | HostMemory(const HostMemory& other) = delete; | ||
| 26 | HostMemory& operator=(const HostMemory& other) = delete; | ||
| 27 | |||
| 28 | /** | ||
| 29 | * Move constructors. They will move the buffer and the mappings to the new object. | ||
| 30 | */ | ||
| 31 | HostMemory(HostMemory&& other) noexcept; | ||
| 32 | HostMemory& operator=(HostMemory&& other) noexcept; | ||
| 33 | |||
| 34 | void Map(size_t virtual_offset, size_t host_offset, size_t length); | ||
| 35 | |||
| 36 | void Unmap(size_t virtual_offset, size_t length); | ||
| 37 | |||
| 38 | void Protect(size_t virtual_offset, size_t length, bool read, bool write); | ||
| 39 | |||
| 40 | [[nodiscard]] u8* BackingBasePointer() noexcept { | ||
| 41 | return backing_base; | ||
| 42 | } | ||
| 43 | [[nodiscard]] const u8* BackingBasePointer() const noexcept { | ||
| 44 | return backing_base; | ||
| 45 | } | ||
| 46 | |||
| 47 | [[nodiscard]] u8* VirtualBasePointer() noexcept { | ||
| 48 | return virtual_base; | ||
| 49 | } | ||
| 50 | [[nodiscard]] const u8* VirtualBasePointer() const noexcept { | ||
| 51 | return virtual_base; | ||
| 52 | } | ||
| 53 | |||
| 54 | private: | ||
| 55 | // Low level handler for the platform dependent memory routines | ||
| 56 | class Impl; | ||
| 57 | std::unique_ptr<Impl> impl; | ||
| 58 | u8* backing_base{}; | ||
| 59 | u8* virtual_base{}; | ||
| 60 | }; | ||
| 61 | |||
| 62 | } // namespace Common | ||