diff options
Diffstat (limited to 'src/common')
| -rw-r--r-- | src/common/CMakeLists.txt | 33 | ||||
| -rw-r--r-- | src/common/address_space.cpp | 10 | ||||
| -rw-r--r-- | src/common/address_space.h | 150 | ||||
| -rw-r--r-- | src/common/address_space.inc | 366 | ||||
| -rw-r--r-- | src/common/algorithm.h | 8 | ||||
| -rw-r--r-- | src/common/hash.h | 7 | ||||
| -rw-r--r-- | src/common/input.h | 27 | ||||
| -rw-r--r-- | src/common/logging/backend.cpp | 2 | ||||
| -rw-r--r-- | src/common/multi_level_page_table.cpp | 9 | ||||
| -rw-r--r-- | src/common/multi_level_page_table.h | 78 | ||||
| -rw-r--r-- | src/common/multi_level_page_table.inc | 84 | ||||
| -rw-r--r-- | src/common/settings.h | 1 |
12 files changed, 747 insertions, 28 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 68436a4bc..a02696873 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt | |||
| @@ -14,34 +14,11 @@ if (DEFINED ENV{DISPLAYVERSION}) | |||
| 14 | set(DISPLAY_VERSION $ENV{DISPLAYVERSION}) | 14 | set(DISPLAY_VERSION $ENV{DISPLAYVERSION}) |
| 15 | endif () | 15 | endif () |
| 16 | 16 | ||
| 17 | # Pass the path to git to the GenerateSCMRev.cmake as well | 17 | include(GenerateSCMRev) |
| 18 | find_package(Git QUIET) | ||
| 19 | |||
| 20 | add_custom_command(OUTPUT scm_rev.cpp | ||
| 21 | COMMAND ${CMAKE_COMMAND} | ||
| 22 | -DSRC_DIR=${PROJECT_SOURCE_DIR} | ||
| 23 | -DBUILD_REPOSITORY=${BUILD_REPOSITORY} | ||
| 24 | -DTITLE_BAR_FORMAT_IDLE=${TITLE_BAR_FORMAT_IDLE} | ||
| 25 | -DTITLE_BAR_FORMAT_RUNNING=${TITLE_BAR_FORMAT_RUNNING} | ||
| 26 | -DBUILD_TAG=${BUILD_TAG} | ||
| 27 | -DBUILD_ID=${DISPLAY_VERSION} | ||
| 28 | -DGIT_REF_SPEC=${GIT_REF_SPEC} | ||
| 29 | -DGIT_REV=${GIT_REV} | ||
| 30 | -DGIT_DESC=${GIT_DESC} | ||
| 31 | -DGIT_BRANCH=${GIT_BRANCH} | ||
| 32 | -DBUILD_FULLNAME=${BUILD_FULLNAME} | ||
| 33 | -DGIT_EXECUTABLE=${GIT_EXECUTABLE} | ||
| 34 | -P ${PROJECT_SOURCE_DIR}/CMakeModules/GenerateSCMRev.cmake | ||
| 35 | DEPENDS | ||
| 36 | # Check that the scm_rev files haven't changed | ||
| 37 | "${CMAKE_CURRENT_SOURCE_DIR}/scm_rev.cpp.in" | ||
| 38 | "${CMAKE_CURRENT_SOURCE_DIR}/scm_rev.h" | ||
| 39 | # technically we should regenerate if the git version changed, but its not worth the effort imo | ||
| 40 | "${PROJECT_SOURCE_DIR}/CMakeModules/GenerateSCMRev.cmake" | ||
| 41 | VERBATIM | ||
| 42 | ) | ||
| 43 | 18 | ||
| 44 | add_library(common STATIC | 19 | add_library(common STATIC |
| 20 | address_space.cpp | ||
| 21 | address_space.h | ||
| 45 | algorithm.h | 22 | algorithm.h |
| 46 | alignment.h | 23 | alignment.h |
| 47 | announce_multiplayer_room.h | 24 | announce_multiplayer_room.h |
| @@ -106,6 +83,8 @@ add_library(common STATIC | |||
| 106 | microprofile.cpp | 83 | microprofile.cpp |
| 107 | microprofile.h | 84 | microprofile.h |
| 108 | microprofileui.h | 85 | microprofileui.h |
| 86 | multi_level_page_table.cpp | ||
| 87 | multi_level_page_table.h | ||
| 109 | nvidia_flags.cpp | 88 | nvidia_flags.cpp |
| 110 | nvidia_flags.h | 89 | nvidia_flags.h |
| 111 | page_table.cpp | 90 | page_table.cpp |
| @@ -117,7 +96,7 @@ add_library(common STATIC | |||
| 117 | quaternion.h | 96 | quaternion.h |
| 118 | reader_writer_queue.h | 97 | reader_writer_queue.h |
| 119 | ring_buffer.h | 98 | ring_buffer.h |
| 120 | scm_rev.cpp | 99 | ${CMAKE_CURRENT_BINARY_DIR}/scm_rev.cpp |
| 121 | scm_rev.h | 100 | scm_rev.h |
| 122 | scope_exit.h | 101 | scope_exit.h |
| 123 | settings.cpp | 102 | settings.cpp |
diff --git a/src/common/address_space.cpp b/src/common/address_space.cpp new file mode 100644 index 000000000..866e78dbe --- /dev/null +++ b/src/common/address_space.cpp | |||
| @@ -0,0 +1,10 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #include "common/address_space.inc" | ||
| 5 | |||
| 6 | namespace Common { | ||
| 7 | |||
| 8 | template class Common::FlatAllocator<u32, 0, 32>; | ||
| 9 | |||
| 10 | } | ||
diff --git a/src/common/address_space.h b/src/common/address_space.h new file mode 100644 index 000000000..9222b2fdc --- /dev/null +++ b/src/common/address_space.h | |||
| @@ -0,0 +1,150 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <concepts> | ||
| 7 | #include <functional> | ||
| 8 | #include <mutex> | ||
| 9 | #include <vector> | ||
| 10 | |||
| 11 | #include "common/common_types.h" | ||
| 12 | |||
| 13 | namespace Common { | ||
| 14 | template <typename VaType, size_t AddressSpaceBits> | ||
| 15 | concept AddressSpaceValid = std::is_unsigned_v<VaType> && sizeof(VaType) * 8 >= AddressSpaceBits; | ||
| 16 | |||
| 17 | struct EmptyStruct {}; | ||
| 18 | |||
| 19 | /** | ||
| 20 | * @brief FlatAddressSpaceMap provides a generic VA->PA mapping implementation using a sorted vector | ||
| 21 | */ | ||
| 22 | template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, | ||
| 23 | bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct> | ||
| 24 | requires AddressSpaceValid<VaType, AddressSpaceBits> | ||
| 25 | class FlatAddressSpaceMap { | ||
| 26 | public: | ||
| 27 | /// The maximum VA that this AS can technically reach | ||
| 28 | static constexpr VaType VaMaximum{(1ULL << (AddressSpaceBits - 1)) + | ||
| 29 | ((1ULL << (AddressSpaceBits - 1)) - 1)}; | ||
| 30 | |||
| 31 | explicit FlatAddressSpaceMap(VaType va_limit, | ||
| 32 | std::function<void(VaType, VaType)> unmap_callback = {}); | ||
| 33 | |||
| 34 | FlatAddressSpaceMap() = default; | ||
| 35 | |||
| 36 | void Map(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info = {}) { | ||
| 37 | std::scoped_lock lock(block_mutex); | ||
| 38 | MapLocked(virt, phys, size, extra_info); | ||
| 39 | } | ||
| 40 | |||
| 41 | void Unmap(VaType virt, VaType size) { | ||
| 42 | std::scoped_lock lock(block_mutex); | ||
| 43 | UnmapLocked(virt, size); | ||
| 44 | } | ||
| 45 | |||
| 46 | VaType GetVALimit() const { | ||
| 47 | return va_limit; | ||
| 48 | } | ||
| 49 | |||
| 50 | protected: | ||
| 51 | /** | ||
| 52 | * @brief Represents a block of memory in the AS, the physical mapping is contiguous until | ||
| 53 | * another block with a different phys address is hit | ||
| 54 | */ | ||
| 55 | struct Block { | ||
| 56 | /// VA of the block | ||
| 57 | VaType virt{UnmappedVa}; | ||
| 58 | /// PA of the block, will increase 1-1 with VA until a new block is encountered | ||
| 59 | PaType phys{UnmappedPa}; | ||
| 60 | [[no_unique_address]] ExtraBlockInfo extra_info; | ||
| 61 | |||
| 62 | Block() = default; | ||
| 63 | |||
| 64 | Block(VaType virt_, PaType phys_, ExtraBlockInfo extra_info_) | ||
| 65 | : virt(virt_), phys(phys_), extra_info(extra_info_) {} | ||
| 66 | |||
| 67 | bool Valid() const { | ||
| 68 | return virt != UnmappedVa; | ||
| 69 | } | ||
| 70 | |||
| 71 | bool Mapped() const { | ||
| 72 | return phys != UnmappedPa; | ||
| 73 | } | ||
| 74 | |||
| 75 | bool Unmapped() const { | ||
| 76 | return phys == UnmappedPa; | ||
| 77 | } | ||
| 78 | |||
| 79 | bool operator<(const VaType& p_virt) const { | ||
| 80 | return virt < p_virt; | ||
| 81 | } | ||
| 82 | }; | ||
| 83 | |||
| 84 | /** | ||
| 85 | * @brief Maps a PA range into the given AS region | ||
| 86 | * @note block_mutex MUST be locked when calling this | ||
| 87 | */ | ||
| 88 | void MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info); | ||
| 89 | |||
| 90 | /** | ||
| 91 | * @brief Unmaps the given range and merges it with other unmapped regions | ||
| 92 | * @note block_mutex MUST be locked when calling this | ||
| 93 | */ | ||
| 94 | void UnmapLocked(VaType virt, VaType size); | ||
| 95 | |||
| 96 | std::mutex block_mutex; | ||
| 97 | std::vector<Block> blocks{Block{}}; | ||
| 98 | |||
| 99 | /// a soft limit on the maximum VA of the AS | ||
| 100 | VaType va_limit{VaMaximum}; | ||
| 101 | |||
| 102 | private: | ||
| 103 | /// Callback called when the mappings in an region have changed | ||
| 104 | std::function<void(VaType, VaType)> unmap_callback{}; | ||
| 105 | }; | ||
| 106 | |||
| 107 | /** | ||
| 108 | * @brief FlatMemoryManager specialises FlatAddressSpaceMap to work as an allocator, with an | ||
| 109 | * initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block | ||
| 110 | */ | ||
| 111 | template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> | ||
| 112 | requires AddressSpaceValid<VaType, AddressSpaceBits> | ||
| 113 | class FlatAllocator | ||
| 114 | : public FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits> { | ||
| 115 | private: | ||
| 116 | using Base = FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits>; | ||
| 117 | |||
| 118 | public: | ||
| 119 | explicit FlatAllocator(VaType virt_start, VaType va_limit = Base::VaMaximum); | ||
| 120 | |||
| 121 | /** | ||
| 122 | * @brief Allocates a region in the AS of the given size and returns its address | ||
| 123 | */ | ||
| 124 | VaType Allocate(VaType size); | ||
| 125 | |||
| 126 | /** | ||
| 127 | * @brief Marks the given region in the AS as allocated | ||
| 128 | */ | ||
| 129 | void AllocateFixed(VaType virt, VaType size); | ||
| 130 | |||
| 131 | /** | ||
| 132 | * @brief Frees an AS region so it can be used again | ||
| 133 | */ | ||
| 134 | void Free(VaType virt, VaType size); | ||
| 135 | |||
| 136 | VaType GetVAStart() const { | ||
| 137 | return virt_start; | ||
| 138 | } | ||
| 139 | |||
| 140 | private: | ||
| 141 | /// The base VA of the allocator, no allocations will be below this | ||
| 142 | VaType virt_start; | ||
| 143 | |||
| 144 | /** | ||
| 145 | * The end address for the initial linear allocation pass | ||
| 146 | * Once this reaches the AS limit the slower allocation path will be used | ||
| 147 | */ | ||
| 148 | VaType current_linear_alloc_end; | ||
| 149 | }; | ||
| 150 | } // namespace Common | ||
diff --git a/src/common/address_space.inc b/src/common/address_space.inc new file mode 100644 index 000000000..2195dabd5 --- /dev/null +++ b/src/common/address_space.inc | |||
| @@ -0,0 +1,366 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #include "common/address_space.h" | ||
| 5 | #include "common/assert.h" | ||
| 6 | |||
| 7 | #define MAP_MEMBER(returnType) \ | ||
| 8 | template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \ | ||
| 9 | bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \ | ||
| 10 | requires AddressSpaceValid<VaType, AddressSpaceBits> returnType FlatAddressSpaceMap< \ | ||
| 11 | VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo> | ||
| 12 | #define MAP_MEMBER_CONST() \ | ||
| 13 | template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \ | ||
| 14 | bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \ | ||
| 15 | requires AddressSpaceValid<VaType, AddressSpaceBits> FlatAddressSpaceMap< \ | ||
| 16 | VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo> | ||
| 17 | |||
| 18 | #define MM_MEMBER(returnType) \ | ||
| 19 | template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \ | ||
| 20 | requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \ | ||
| 21 | FlatMemoryManager<VaType, UnmappedVa, AddressSpaceBits> | ||
| 22 | |||
| 23 | #define ALLOC_MEMBER(returnType) \ | ||
| 24 | template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \ | ||
| 25 | requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \ | ||
| 26 | FlatAllocator<VaType, UnmappedVa, AddressSpaceBits> | ||
| 27 | #define ALLOC_MEMBER_CONST() \ | ||
| 28 | template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \ | ||
| 29 | requires AddressSpaceValid<VaType, AddressSpaceBits> \ | ||
| 30 | FlatAllocator<VaType, UnmappedVa, AddressSpaceBits> | ||
| 31 | |||
| 32 | namespace Common { | ||
| 33 | MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType va_limit_, | ||
| 34 | std::function<void(VaType, VaType)> unmap_callback_) | ||
| 35 | : va_limit{va_limit_}, unmap_callback{std::move(unmap_callback_)} { | ||
| 36 | if (va_limit > VaMaximum) { | ||
| 37 | ASSERT_MSG(false, "Invalid VA limit!"); | ||
| 38 | } | ||
| 39 | } | ||
| 40 | |||
| 41 | MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info) { | ||
| 42 | VaType virt_end{virt + size}; | ||
| 43 | |||
| 44 | if (virt_end > va_limit) { | ||
| 45 | ASSERT_MSG(false, | ||
| 46 | "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", | ||
| 47 | virt_end, va_limit); | ||
| 48 | } | ||
| 49 | |||
| 50 | auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; | ||
| 51 | if (block_end_successor == blocks.begin()) { | ||
| 52 | ASSERT_MSG(false, "Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end); | ||
| 53 | } | ||
| 54 | |||
| 55 | auto block_end_predecessor{std::prev(block_end_successor)}; | ||
| 56 | |||
| 57 | if (block_end_successor != blocks.end()) { | ||
| 58 | // We have blocks in front of us, if one is directly in front then we don't have to add a | ||
| 59 | // tail | ||
| 60 | if (block_end_successor->virt != virt_end) { | ||
| 61 | PaType tailPhys{[&]() -> PaType { | ||
| 62 | if constexpr (!PaContigSplit) { | ||
| 63 | // Always propagate unmapped regions rather than calculating offset | ||
| 64 | return block_end_predecessor->phys; | ||
| 65 | } else { | ||
| 66 | if (block_end_predecessor->Unmapped()) { | ||
| 67 | // Always propagate unmapped regions rather than calculating offset | ||
| 68 | return block_end_predecessor->phys; | ||
| 69 | } else { | ||
| 70 | return block_end_predecessor->phys + virt_end - block_end_predecessor->virt; | ||
| 71 | } | ||
| 72 | } | ||
| 73 | }()}; | ||
| 74 | |||
| 75 | if (block_end_predecessor->virt >= virt) { | ||
| 76 | // If this block's start would be overlapped by the map then reuse it as a tail | ||
| 77 | // block | ||
| 78 | block_end_predecessor->virt = virt_end; | ||
| 79 | block_end_predecessor->phys = tailPhys; | ||
| 80 | block_end_predecessor->extra_info = block_end_predecessor->extra_info; | ||
| 81 | |||
| 82 | // No longer predecessor anymore | ||
| 83 | block_end_successor = block_end_predecessor--; | ||
| 84 | } else { | ||
| 85 | // Else insert a new one and we're done | ||
| 86 | blocks.insert(block_end_successor, | ||
| 87 | {Block(virt, phys, extra_info), | ||
| 88 | Block(virt_end, tailPhys, block_end_predecessor->extra_info)}); | ||
| 89 | if (unmap_callback) { | ||
| 90 | unmap_callback(virt, size); | ||
| 91 | } | ||
| 92 | |||
| 93 | return; | ||
| 94 | } | ||
| 95 | } | ||
| 96 | } else { | ||
| 97 | // block_end_predecessor will always be unmapped as blocks has to be terminated by an | ||
| 98 | // unmapped chunk | ||
| 99 | if (block_end_predecessor != blocks.begin() && block_end_predecessor->virt >= virt) { | ||
| 100 | // Move the unmapped block start backwards | ||
| 101 | block_end_predecessor->virt = virt_end; | ||
| 102 | |||
| 103 | // No longer predecessor anymore | ||
| 104 | block_end_successor = block_end_predecessor--; | ||
| 105 | } else { | ||
| 106 | // Else insert a new one and we're done | ||
| 107 | blocks.insert(block_end_successor, | ||
| 108 | {Block(virt, phys, extra_info), Block(virt_end, UnmappedPa, {})}); | ||
| 109 | if (unmap_callback) { | ||
| 110 | unmap_callback(virt, size); | ||
| 111 | } | ||
| 112 | |||
| 113 | return; | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 117 | auto block_start_successor{block_end_successor}; | ||
| 118 | |||
| 119 | // Walk the block vector to find the start successor as this is more efficient than another | ||
| 120 | // binary search in most scenarios | ||
| 121 | while (std::prev(block_start_successor)->virt >= virt) { | ||
| 122 | block_start_successor--; | ||
| 123 | } | ||
| 124 | |||
| 125 | // Check that the start successor is either the end block or something in between | ||
| 126 | if (block_start_successor->virt > virt_end) { | ||
| 127 | ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); | ||
| 128 | } else if (block_start_successor->virt == virt_end) { | ||
| 129 | // We need to create a new block as there are none spare that we would overwrite | ||
| 130 | blocks.insert(block_start_successor, Block(virt, phys, extra_info)); | ||
| 131 | } else { | ||
| 132 | // Erase overwritten blocks | ||
| 133 | if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) { | ||
| 134 | blocks.erase(eraseStart, block_end_successor); | ||
| 135 | } | ||
| 136 | |||
| 137 | // Reuse a block that would otherwise be overwritten as a start block | ||
| 138 | block_start_successor->virt = virt; | ||
| 139 | block_start_successor->phys = phys; | ||
| 140 | block_start_successor->extra_info = extra_info; | ||
| 141 | } | ||
| 142 | |||
| 143 | if (unmap_callback) { | ||
| 144 | unmap_callback(virt, size); | ||
| 145 | } | ||
| 146 | } | ||
| 147 | |||
| 148 | MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { | ||
| 149 | VaType virt_end{virt + size}; | ||
| 150 | |||
| 151 | if (virt_end > va_limit) { | ||
| 152 | ASSERT_MSG(false, | ||
| 153 | "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", | ||
| 154 | virt_end, va_limit); | ||
| 155 | } | ||
| 156 | |||
| 157 | auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; | ||
| 158 | if (block_end_successor == blocks.begin()) { | ||
| 159 | ASSERT_MSG(false, "Trying to unmap a block before the VA start: virt_end: 0x{:X}", | ||
| 160 | virt_end); | ||
| 161 | } | ||
| 162 | |||
| 163 | auto block_end_predecessor{std::prev(block_end_successor)}; | ||
| 164 | |||
| 165 | auto walk_back_to_predecessor{[&](auto iter) { | ||
| 166 | while (iter->virt >= virt) { | ||
| 167 | iter--; | ||
| 168 | } | ||
| 169 | |||
| 170 | return iter; | ||
| 171 | }}; | ||
| 172 | |||
| 173 | auto erase_blocks_with_end_unmapped{[&](auto unmappedEnd) { | ||
| 174 | auto block_start_predecessor{walk_back_to_predecessor(unmappedEnd)}; | ||
| 175 | auto block_start_successor{std::next(block_start_predecessor)}; | ||
| 176 | |||
| 177 | auto eraseEnd{[&]() { | ||
| 178 | if (block_start_predecessor->Unmapped()) { | ||
| 179 | // If the start predecessor is unmapped then we can erase everything in our region | ||
| 180 | // and be done | ||
| 181 | return std::next(unmappedEnd); | ||
| 182 | } else { | ||
| 183 | // Else reuse the end predecessor as the start of our unmapped region then erase all | ||
| 184 | // up to it | ||
| 185 | unmappedEnd->virt = virt; | ||
| 186 | return unmappedEnd; | ||
| 187 | } | ||
| 188 | }()}; | ||
| 189 | |||
| 190 | // We can't have two unmapped regions after each other | ||
| 191 | if (eraseEnd != blocks.end() && | ||
| 192 | (eraseEnd == block_start_successor || | ||
| 193 | (block_start_predecessor->Unmapped() && eraseEnd->Unmapped()))) { | ||
| 194 | ASSERT_MSG(false, "Multiple contiguous unmapped regions are unsupported!"); | ||
| 195 | } | ||
| 196 | |||
| 197 | blocks.erase(block_start_successor, eraseEnd); | ||
| 198 | }}; | ||
| 199 | |||
| 200 | // We can avoid any splitting logic if these are the case | ||
| 201 | if (block_end_predecessor->Unmapped()) { | ||
| 202 | if (block_end_predecessor->virt > virt) { | ||
| 203 | erase_blocks_with_end_unmapped(block_end_predecessor); | ||
| 204 | } | ||
| 205 | |||
| 206 | if (unmap_callback) { | ||
| 207 | unmap_callback(virt, size); | ||
| 208 | } | ||
| 209 | |||
| 210 | return; // The region is unmapped, bail out early | ||
| 211 | } else if (block_end_successor->virt == virt_end && block_end_successor->Unmapped()) { | ||
| 212 | erase_blocks_with_end_unmapped(block_end_successor); | ||
| 213 | |||
| 214 | if (unmap_callback) { | ||
| 215 | unmap_callback(virt, size); | ||
| 216 | } | ||
| 217 | |||
| 218 | return; // The region is unmapped here and doesn't need splitting, bail out early | ||
| 219 | } else if (block_end_successor == blocks.end()) { | ||
| 220 | // This should never happen as the end should always follow an unmapped block | ||
| 221 | ASSERT_MSG(false, "Unexpected Memory Manager state!"); | ||
| 222 | } else if (block_end_successor->virt != virt_end) { | ||
| 223 | // If one block is directly in front then we don't have to add a tail | ||
| 224 | |||
| 225 | // The previous block is mapped so we will need to add a tail with an offset | ||
| 226 | PaType tailPhys{[&]() { | ||
| 227 | if constexpr (PaContigSplit) { | ||
| 228 | return block_end_predecessor->phys + virt_end - block_end_predecessor->virt; | ||
| 229 | } else { | ||
| 230 | return block_end_predecessor->phys; | ||
| 231 | } | ||
| 232 | }()}; | ||
| 233 | |||
| 234 | if (block_end_predecessor->virt >= virt) { | ||
| 235 | // If this block's start would be overlapped by the unmap then reuse it as a tail block | ||
| 236 | block_end_predecessor->virt = virt_end; | ||
| 237 | block_end_predecessor->phys = tailPhys; | ||
| 238 | |||
| 239 | // No longer predecessor anymore | ||
| 240 | block_end_successor = block_end_predecessor--; | ||
| 241 | } else { | ||
| 242 | blocks.insert(block_end_successor, | ||
| 243 | {Block(virt, UnmappedPa, {}), | ||
| 244 | Block(virt_end, tailPhys, block_end_predecessor->extra_info)}); | ||
| 245 | if (unmap_callback) { | ||
| 246 | unmap_callback(virt, size); | ||
| 247 | } | ||
| 248 | |||
| 249 | // The previous block is mapped and ends before | ||
| 250 | return; | ||
| 251 | } | ||
| 252 | } | ||
| 253 | |||
| 254 | // Walk the block vector to find the start predecessor as this is more efficient than another | ||
| 255 | // binary search in most scenarios | ||
| 256 | auto block_start_predecessor{walk_back_to_predecessor(block_end_successor)}; | ||
| 257 | auto block_start_successor{std::next(block_start_predecessor)}; | ||
| 258 | |||
| 259 | if (block_start_successor->virt > virt_end) { | ||
| 260 | ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); | ||
| 261 | } else if (block_start_successor->virt == virt_end) { | ||
| 262 | // There are no blocks between the start and the end that would let us skip inserting a new | ||
| 263 | // one for head | ||
| 264 | |||
| 265 | // The previous block is may be unmapped, if so we don't need to insert any unmaps after it | ||
| 266 | if (block_start_predecessor->Mapped()) { | ||
| 267 | blocks.insert(block_start_successor, Block(virt, UnmappedPa, {})); | ||
| 268 | } | ||
| 269 | } else if (block_start_predecessor->Unmapped()) { | ||
| 270 | // If the previous block is unmapped | ||
| 271 | blocks.erase(block_start_successor, block_end_predecessor); | ||
| 272 | } else { | ||
| 273 | // Erase overwritten blocks, skipping the first one as we have written the unmapped start | ||
| 274 | // block there | ||
| 275 | if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) { | ||
| 276 | blocks.erase(eraseStart, block_end_successor); | ||
| 277 | } | ||
| 278 | |||
| 279 | // Add in the unmapped block header | ||
| 280 | block_start_successor->virt = virt; | ||
| 281 | block_start_successor->phys = UnmappedPa; | ||
| 282 | } | ||
| 283 | |||
| 284 | if (unmap_callback) | ||
| 285 | unmap_callback(virt, size); | ||
| 286 | } | ||
| 287 | |||
| 288 | ALLOC_MEMBER_CONST()::FlatAllocator(VaType virt_start_, VaType va_limit_) | ||
| 289 | : Base{va_limit_}, virt_start{virt_start_}, current_linear_alloc_end{virt_start_} {} | ||
| 290 | |||
| 291 | ALLOC_MEMBER(VaType)::Allocate(VaType size) { | ||
| 292 | std::scoped_lock lock(this->block_mutex); | ||
| 293 | |||
| 294 | VaType alloc_start{UnmappedVa}; | ||
| 295 | VaType alloc_end{current_linear_alloc_end + size}; | ||
| 296 | |||
| 297 | // Avoid searching backwards in the address space if possible | ||
| 298 | if (alloc_end >= current_linear_alloc_end && alloc_end <= this->va_limit) { | ||
| 299 | auto alloc_end_successor{ | ||
| 300 | std::lower_bound(this->blocks.begin(), this->blocks.end(), alloc_end)}; | ||
| 301 | if (alloc_end_successor == this->blocks.begin()) { | ||
| 302 | ASSERT_MSG(false, "First block in AS map is invalid!"); | ||
| 303 | } | ||
| 304 | |||
| 305 | auto alloc_end_predecessor{std::prev(alloc_end_successor)}; | ||
| 306 | if (alloc_end_predecessor->virt <= current_linear_alloc_end) { | ||
| 307 | alloc_start = current_linear_alloc_end; | ||
| 308 | } else { | ||
| 309 | // Skip over fixed any mappings in front of us | ||
| 310 | while (alloc_end_successor != this->blocks.end()) { | ||
| 311 | if (alloc_end_successor->virt - alloc_end_predecessor->virt < size || | ||
| 312 | alloc_end_predecessor->Mapped()) { | ||
| 313 | alloc_start = alloc_end_predecessor->virt; | ||
| 314 | break; | ||
| 315 | } | ||
| 316 | |||
| 317 | alloc_end_predecessor = alloc_end_successor++; | ||
| 318 | |||
| 319 | // Use the VA limit to calculate if we can fit in the final block since it has no | ||
| 320 | // successor | ||
| 321 | if (alloc_end_successor == this->blocks.end()) { | ||
| 322 | alloc_end = alloc_end_predecessor->virt + size; | ||
| 323 | |||
| 324 | if (alloc_end >= alloc_end_predecessor->virt && alloc_end <= this->va_limit) { | ||
| 325 | alloc_start = alloc_end_predecessor->virt; | ||
| 326 | } | ||
| 327 | } | ||
| 328 | } | ||
| 329 | } | ||
| 330 | } | ||
| 331 | |||
| 332 | if (alloc_start != UnmappedVa) { | ||
| 333 | current_linear_alloc_end = alloc_start + size; | ||
| 334 | } else { // If linear allocation overflows the AS then find a gap | ||
| 335 | if (this->blocks.size() <= 2) { | ||
| 336 | ASSERT_MSG(false, "Unexpected allocator state!"); | ||
| 337 | } | ||
| 338 | |||
| 339 | auto search_predecessor{this->blocks.begin()}; | ||
| 340 | auto search_successor{std::next(search_predecessor)}; | ||
| 341 | |||
| 342 | while (search_successor != this->blocks.end() && | ||
| 343 | (search_successor->virt - search_predecessor->virt < size || | ||
| 344 | search_predecessor->Mapped())) { | ||
| 345 | search_predecessor = search_successor++; | ||
| 346 | } | ||
| 347 | |||
| 348 | if (search_successor != this->blocks.end()) { | ||
| 349 | alloc_start = search_predecessor->virt; | ||
| 350 | } else { | ||
| 351 | return {}; // AS is full | ||
| 352 | } | ||
| 353 | } | ||
| 354 | |||
| 355 | this->MapLocked(alloc_start, true, size, {}); | ||
| 356 | return alloc_start; | ||
| 357 | } | ||
| 358 | |||
| 359 | ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) { | ||
| 360 | this->Map(virt, true, size); | ||
| 361 | } | ||
| 362 | |||
| 363 | ALLOC_MEMBER(void)::Free(VaType virt, VaType size) { | ||
| 364 | this->Unmap(virt, size); | ||
| 365 | } | ||
| 366 | } // namespace Common | ||
diff --git a/src/common/algorithm.h b/src/common/algorithm.h index 9ddfd637b..c27c9241d 100644 --- a/src/common/algorithm.h +++ b/src/common/algorithm.h | |||
| @@ -24,4 +24,12 @@ template <class ForwardIt, class T, class Compare = std::less<>> | |||
| 24 | return first != last && !comp(value, *first) ? first : last; | 24 | return first != last && !comp(value, *first) ? first : last; |
| 25 | } | 25 | } |
| 26 | 26 | ||
| 27 | template <typename T, typename Func, typename... Args> | ||
| 28 | T FoldRight(T initial_value, Func&& func, Args&&... args) { | ||
| 29 | T value{initial_value}; | ||
| 30 | const auto high_func = [&value, &func]<typename U>(U x) { value = func(value, x); }; | ||
| 31 | (std::invoke(high_func, std::forward<Args>(args)), ...); | ||
| 32 | return value; | ||
| 33 | } | ||
| 34 | |||
| 27 | } // namespace Common | 35 | } // namespace Common |
diff --git a/src/common/hash.h b/src/common/hash.h index b6f3e6d6f..e8fe78b07 100644 --- a/src/common/hash.h +++ b/src/common/hash.h | |||
| @@ -18,4 +18,11 @@ struct PairHash { | |||
| 18 | } | 18 | } |
| 19 | }; | 19 | }; |
| 20 | 20 | ||
| 21 | template <typename T> | ||
| 22 | struct IdentityHash { | ||
| 23 | [[nodiscard]] size_t operator()(T value) const noexcept { | ||
| 24 | return static_cast<size_t>(value); | ||
| 25 | } | ||
| 26 | }; | ||
| 27 | |||
| 21 | } // namespace Common | 28 | } // namespace Common |
diff --git a/src/common/input.h b/src/common/input.h index 825b0d650..bfa0639f5 100644 --- a/src/common/input.h +++ b/src/common/input.h | |||
| @@ -76,6 +76,19 @@ enum class PollingError { | |||
| 76 | Unknown, | 76 | Unknown, |
| 77 | }; | 77 | }; |
| 78 | 78 | ||
| 79 | // Nfc reply from the controller | ||
| 80 | enum class NfcState { | ||
| 81 | Success, | ||
| 82 | NewAmiibo, | ||
| 83 | WaitingForAmiibo, | ||
| 84 | AmiiboRemoved, | ||
| 85 | NotAnAmiibo, | ||
| 86 | NotSupported, | ||
| 87 | WrongDeviceState, | ||
| 88 | WriteFailed, | ||
| 89 | Unknown, | ||
| 90 | }; | ||
| 91 | |||
| 79 | // Ir camera reply from the controller | 92 | // Ir camera reply from the controller |
| 80 | enum class CameraError { | 93 | enum class CameraError { |
| 81 | None, | 94 | None, |
| @@ -202,6 +215,11 @@ struct CameraStatus { | |||
| 202 | std::vector<u8> data{}; | 215 | std::vector<u8> data{}; |
| 203 | }; | 216 | }; |
| 204 | 217 | ||
| 218 | struct NfcStatus { | ||
| 219 | NfcState state{}; | ||
| 220 | std::vector<u8> data{}; | ||
| 221 | }; | ||
| 222 | |||
| 205 | // List of buttons to be passed to Qt that can be translated | 223 | // List of buttons to be passed to Qt that can be translated |
| 206 | enum class ButtonNames { | 224 | enum class ButtonNames { |
| 207 | Undefined, | 225 | Undefined, |
| @@ -260,6 +278,7 @@ struct CallbackStatus { | |||
| 260 | BatteryStatus battery_status{}; | 278 | BatteryStatus battery_status{}; |
| 261 | VibrationStatus vibration_status{}; | 279 | VibrationStatus vibration_status{}; |
| 262 | CameraStatus camera_status{}; | 280 | CameraStatus camera_status{}; |
| 281 | NfcStatus nfc_status{}; | ||
| 263 | }; | 282 | }; |
| 264 | 283 | ||
| 265 | // Triggered once every input change | 284 | // Triggered once every input change |
| @@ -312,6 +331,14 @@ public: | |||
| 312 | virtual CameraError SetCameraFormat([[maybe_unused]] CameraFormat camera_format) { | 331 | virtual CameraError SetCameraFormat([[maybe_unused]] CameraFormat camera_format) { |
| 313 | return CameraError::NotSupported; | 332 | return CameraError::NotSupported; |
| 314 | } | 333 | } |
| 334 | |||
| 335 | virtual NfcState SupportsNfc() const { | ||
| 336 | return NfcState::NotSupported; | ||
| 337 | } | ||
| 338 | |||
| 339 | virtual NfcState WriteNfcData([[maybe_unused]] const std::vector<u8>& data) { | ||
| 340 | return NfcState::NotSupported; | ||
| 341 | } | ||
| 315 | }; | 342 | }; |
| 316 | 343 | ||
| 317 | /// An abstract class template for a factory that can create input devices. | 344 | /// An abstract class template for a factory that can create input devices. |
diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp index 8ce1c2fd1..15d92505e 100644 --- a/src/common/logging/backend.cpp +++ b/src/common/logging/backend.cpp | |||
| @@ -219,7 +219,7 @@ private: | |||
| 219 | 219 | ||
| 220 | void StartBackendThread() { | 220 | void StartBackendThread() { |
| 221 | backend_thread = std::jthread([this](std::stop_token stop_token) { | 221 | backend_thread = std::jthread([this](std::stop_token stop_token) { |
| 222 | Common::SetCurrentThreadName("yuzu:Log"); | 222 | Common::SetCurrentThreadName("Logger"); |
| 223 | Entry entry; | 223 | Entry entry; |
| 224 | const auto write_logs = [this, &entry]() { | 224 | const auto write_logs = [this, &entry]() { |
| 225 | ForEachBackend([&entry](Backend& backend) { backend.Write(entry); }); | 225 | ForEachBackend([&entry](Backend& backend) { backend.Write(entry); }); |
diff --git a/src/common/multi_level_page_table.cpp b/src/common/multi_level_page_table.cpp new file mode 100644 index 000000000..46e362f3b --- /dev/null +++ b/src/common/multi_level_page_table.cpp | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include "common/multi_level_page_table.inc" | ||
| 5 | |||
| 6 | namespace Common { | ||
| 7 | template class Common::MultiLevelPageTable<u64>; | ||
| 8 | template class Common::MultiLevelPageTable<u32>; | ||
| 9 | } // namespace Common | ||
diff --git a/src/common/multi_level_page_table.h b/src/common/multi_level_page_table.h new file mode 100644 index 000000000..31f6676a0 --- /dev/null +++ b/src/common/multi_level_page_table.h | |||
| @@ -0,0 +1,78 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <type_traits> | ||
| 7 | #include <utility> | ||
| 8 | #include <vector> | ||
| 9 | |||
| 10 | #include "common/common_types.h" | ||
| 11 | |||
| 12 | namespace Common { | ||
| 13 | |||
| 14 | template <typename BaseAddr> | ||
| 15 | class MultiLevelPageTable final { | ||
| 16 | public: | ||
| 17 | constexpr MultiLevelPageTable() = default; | ||
| 18 | explicit MultiLevelPageTable(std::size_t address_space_bits, std::size_t first_level_bits, | ||
| 19 | std::size_t page_bits); | ||
| 20 | |||
| 21 | ~MultiLevelPageTable() noexcept; | ||
| 22 | |||
| 23 | MultiLevelPageTable(const MultiLevelPageTable&) = delete; | ||
| 24 | MultiLevelPageTable& operator=(const MultiLevelPageTable&) = delete; | ||
| 25 | |||
| 26 | MultiLevelPageTable(MultiLevelPageTable&& other) noexcept | ||
| 27 | : address_space_bits{std::exchange(other.address_space_bits, 0)}, | ||
| 28 | first_level_bits{std::exchange(other.first_level_bits, 0)}, page_bits{std::exchange( | ||
| 29 | other.page_bits, 0)}, | ||
| 30 | first_level_shift{std::exchange(other.first_level_shift, 0)}, | ||
| 31 | first_level_chunk_size{std::exchange(other.first_level_chunk_size, 0)}, | ||
| 32 | first_level_map{std::move(other.first_level_map)}, base_ptr{std::exchange(other.base_ptr, | ||
| 33 | nullptr)} {} | ||
| 34 | |||
| 35 | MultiLevelPageTable& operator=(MultiLevelPageTable&& other) noexcept { | ||
| 36 | address_space_bits = std::exchange(other.address_space_bits, 0); | ||
| 37 | first_level_bits = std::exchange(other.first_level_bits, 0); | ||
| 38 | page_bits = std::exchange(other.page_bits, 0); | ||
| 39 | first_level_shift = std::exchange(other.first_level_shift, 0); | ||
| 40 | first_level_chunk_size = std::exchange(other.first_level_chunk_size, 0); | ||
| 41 | alloc_size = std::exchange(other.alloc_size, 0); | ||
| 42 | first_level_map = std::move(other.first_level_map); | ||
| 43 | base_ptr = std::exchange(other.base_ptr, nullptr); | ||
| 44 | return *this; | ||
| 45 | } | ||
| 46 | |||
| 47 | void ReserveRange(u64 start, std::size_t size); | ||
| 48 | |||
| 49 | [[nodiscard]] const BaseAddr& operator[](std::size_t index) const { | ||
| 50 | return base_ptr[index]; | ||
| 51 | } | ||
| 52 | |||
| 53 | [[nodiscard]] BaseAddr& operator[](std::size_t index) { | ||
| 54 | return base_ptr[index]; | ||
| 55 | } | ||
| 56 | |||
| 57 | [[nodiscard]] BaseAddr* data() { | ||
| 58 | return base_ptr; | ||
| 59 | } | ||
| 60 | |||
| 61 | [[nodiscard]] const BaseAddr* data() const { | ||
| 62 | return base_ptr; | ||
| 63 | } | ||
| 64 | |||
| 65 | private: | ||
| 66 | void AllocateLevel(u64 level); | ||
| 67 | |||
| 68 | std::size_t address_space_bits{}; | ||
| 69 | std::size_t first_level_bits{}; | ||
| 70 | std::size_t page_bits{}; | ||
| 71 | std::size_t first_level_shift{}; | ||
| 72 | std::size_t first_level_chunk_size{}; | ||
| 73 | std::size_t alloc_size{}; | ||
| 74 | std::vector<void*> first_level_map{}; | ||
| 75 | BaseAddr* base_ptr{}; | ||
| 76 | }; | ||
| 77 | |||
| 78 | } // namespace Common | ||
diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc new file mode 100644 index 000000000..8ac506fa0 --- /dev/null +++ b/src/common/multi_level_page_table.inc | |||
| @@ -0,0 +1,84 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #ifdef _WIN32 | ||
| 5 | #include <windows.h> | ||
| 6 | #else | ||
| 7 | #include <sys/mman.h> | ||
| 8 | #endif | ||
| 9 | |||
| 10 | #include "common/assert.h" | ||
| 11 | #include "common/multi_level_page_table.h" | ||
| 12 | |||
| 13 | namespace Common { | ||
| 14 | |||
| 15 | template <typename BaseAddr> | ||
| 16 | MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_, | ||
| 17 | std::size_t first_level_bits_, | ||
| 18 | std::size_t page_bits_) | ||
| 19 | : address_space_bits{address_space_bits_}, | ||
| 20 | first_level_bits{first_level_bits_}, page_bits{page_bits_} { | ||
| 21 | if (page_bits == 0) { | ||
| 22 | return; | ||
| 23 | } | ||
| 24 | first_level_shift = address_space_bits - first_level_bits; | ||
| 25 | first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr); | ||
| 26 | alloc_size = (1ULL << (address_space_bits - page_bits)) * sizeof(BaseAddr); | ||
| 27 | std::size_t first_level_size = 1ULL << first_level_bits; | ||
| 28 | first_level_map.resize(first_level_size, nullptr); | ||
| 29 | #ifdef _WIN32 | ||
| 30 | void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)}; | ||
| 31 | #else | ||
| 32 | void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)}; | ||
| 33 | |||
| 34 | if (base == MAP_FAILED) { | ||
| 35 | base = nullptr; | ||
| 36 | } | ||
| 37 | #endif | ||
| 38 | |||
| 39 | ASSERT(base); | ||
| 40 | base_ptr = reinterpret_cast<BaseAddr*>(base); | ||
| 41 | } | ||
| 42 | |||
| 43 | template <typename BaseAddr> | ||
| 44 | MultiLevelPageTable<BaseAddr>::~MultiLevelPageTable() noexcept { | ||
| 45 | if (!base_ptr) { | ||
| 46 | return; | ||
| 47 | } | ||
| 48 | #ifdef _WIN32 | ||
| 49 | ASSERT(VirtualFree(base_ptr, 0, MEM_RELEASE)); | ||
| 50 | #else | ||
| 51 | ASSERT(munmap(base_ptr, alloc_size) == 0); | ||
| 52 | #endif | ||
| 53 | } | ||
| 54 | |||
| 55 | template <typename BaseAddr> | ||
| 56 | void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) { | ||
| 57 | const u64 new_start = start >> first_level_shift; | ||
| 58 | const u64 new_end = (start + size) >> first_level_shift; | ||
| 59 | for (u64 i = new_start; i <= new_end; i++) { | ||
| 60 | if (!first_level_map[i]) { | ||
| 61 | AllocateLevel(i); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | } | ||
| 65 | |||
| 66 | template <typename BaseAddr> | ||
| 67 | void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 level) { | ||
| 68 | void* ptr = reinterpret_cast<char *>(base_ptr) + level * first_level_chunk_size; | ||
| 69 | #ifdef _WIN32 | ||
| 70 | void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)}; | ||
| 71 | #else | ||
| 72 | void* base{mmap(ptr, first_level_chunk_size, PROT_READ | PROT_WRITE, | ||
| 73 | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)}; | ||
| 74 | |||
| 75 | if (base == MAP_FAILED) { | ||
| 76 | base = nullptr; | ||
| 77 | } | ||
| 78 | #endif | ||
| 79 | ASSERT(base); | ||
| 80 | |||
| 81 | first_level_map[level] = base; | ||
| 82 | } | ||
| 83 | |||
| 84 | } // namespace Common | ||
diff --git a/src/common/settings.h b/src/common/settings.h index 851812f28..d2452c93b 100644 --- a/src/common/settings.h +++ b/src/common/settings.h | |||
| @@ -531,6 +531,7 @@ struct Values { | |||
| 531 | Setting<bool> use_auto_stub{false, "use_auto_stub"}; | 531 | Setting<bool> use_auto_stub{false, "use_auto_stub"}; |
| 532 | Setting<bool> enable_all_controllers{false, "enable_all_controllers"}; | 532 | Setting<bool> enable_all_controllers{false, "enable_all_controllers"}; |
| 533 | Setting<bool> create_crash_dumps{false, "create_crash_dumps"}; | 533 | Setting<bool> create_crash_dumps{false, "create_crash_dumps"}; |
| 534 | Setting<bool> perform_vulkan_check{true, "perform_vulkan_check"}; | ||
| 534 | 535 | ||
| 535 | // Miscellaneous | 536 | // Miscellaneous |
| 536 | Setting<std::string> log_filter{"*:Info", "log_filter"}; | 537 | Setting<std::string> log_filter{"*:Info", "log_filter"}; |