diff options
| -rw-r--r-- | src/common/address_space.h | 105 | ||||
| -rw-r--r-- | src/common/address_space.inc | 319 | ||||
| -rw-r--r-- | src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp | 8 |
3 files changed, 237 insertions, 195 deletions
diff --git a/src/common/address_space.h b/src/common/address_space.h index 5b3832f07..bf649018c 100644 --- a/src/common/address_space.h +++ b/src/common/address_space.h | |||
| @@ -23,9 +23,29 @@ template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa | |||
| 23 | bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct> | 23 | bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct> |
| 24 | requires AddressSpaceValid<VaType, AddressSpaceBits> | 24 | requires AddressSpaceValid<VaType, AddressSpaceBits> |
| 25 | class FlatAddressSpaceMap { | 25 | class FlatAddressSpaceMap { |
| 26 | private: | 26 | public: |
| 27 | std::function<void(VaType, VaType)> | 27 | /// The maximum VA that this AS can technically reach |
| 28 | unmapCallback{}; //!< Callback called when the mappings in an region have changed | 28 | static constexpr VaType VaMaximum{(1ULL << (AddressSpaceBits - 1)) + |
| 29 | ((1ULL << (AddressSpaceBits - 1)) - 1)}; | ||
| 30 | |||
| 31 | explicit FlatAddressSpaceMap(VaType va_limit, | ||
| 32 | std::function<void(VaType, VaType)> unmap_callback = {}); | ||
| 33 | |||
| 34 | FlatAddressSpaceMap() = default; | ||
| 35 | |||
| 36 | void Map(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info = {}) { | ||
| 37 | std::scoped_lock lock(block_mutex); | ||
| 38 | MapLocked(virt, phys, size, extra_info); | ||
| 39 | } | ||
| 40 | |||
| 41 | void Unmap(VaType virt, VaType size) { | ||
| 42 | std::scoped_lock lock(block_mutex); | ||
| 43 | UnmapLocked(virt, size); | ||
| 44 | } | ||
| 45 | |||
| 46 | VaType GetVALimit() const { | ||
| 47 | return va_limit; | ||
| 48 | } | ||
| 29 | 49 | ||
| 30 | protected: | 50 | protected: |
| 31 | /** | 51 | /** |
| @@ -33,68 +53,55 @@ protected: | |||
| 33 | * another block with a different phys address is hit | 53 | * another block with a different phys address is hit |
| 34 | */ | 54 | */ |
| 35 | struct Block { | 55 | struct Block { |
| 36 | VaType virt{UnmappedVa}; //!< VA of the block | 56 | /// VA of the block |
| 37 | PaType phys{UnmappedPa}; //!< PA of the block, will increase 1-1 with VA until a new block | 57 | VaType virt{UnmappedVa}; |
| 38 | //!< is encountered | 58 | /// PA of the block, will increase 1-1 with VA until a new block is encountered |
| 39 | [[no_unique_address]] ExtraBlockInfo extraInfo; | 59 | PaType phys{UnmappedPa}; |
| 60 | [[no_unique_address]] ExtraBlockInfo extra_info; | ||
| 40 | 61 | ||
| 41 | Block() = default; | 62 | Block() = default; |
| 42 | 63 | ||
| 43 | Block(VaType virt_, PaType phys_, ExtraBlockInfo extraInfo_) | 64 | Block(VaType virt_, PaType phys_, ExtraBlockInfo extra_info_) |
| 44 | : virt(virt_), phys(phys_), extraInfo(extraInfo_) {} | 65 | : virt(virt_), phys(phys_), extra_info(extra_info_) {} |
| 45 | 66 | ||
| 46 | constexpr bool Valid() { | 67 | bool Valid() const { |
| 47 | return virt != UnmappedVa; | 68 | return virt != UnmappedVa; |
| 48 | } | 69 | } |
| 49 | 70 | ||
| 50 | constexpr bool Mapped() { | 71 | bool Mapped() const { |
| 51 | return phys != UnmappedPa; | 72 | return phys != UnmappedPa; |
| 52 | } | 73 | } |
| 53 | 74 | ||
| 54 | constexpr bool Unmapped() { | 75 | bool Unmapped() const { |
| 55 | return phys == UnmappedPa; | 76 | return phys == UnmappedPa; |
| 56 | } | 77 | } |
| 57 | 78 | ||
| 58 | bool operator<(const VaType& pVirt) const { | 79 | bool operator<(const VaType& p_virt) const { |
| 59 | return virt < pVirt; | 80 | return virt < p_virt; |
| 60 | } | 81 | } |
| 61 | }; | 82 | }; |
| 62 | 83 | ||
| 63 | std::mutex blockMutex; | ||
| 64 | std::vector<Block> blocks{Block{}}; | ||
| 65 | |||
| 66 | /** | 84 | /** |
| 67 | * @brief Maps a PA range into the given AS region | 85 | * @brief Maps a PA range into the given AS region |
| 68 | * @note blockMutex MUST be locked when calling this | 86 | * @note block_mutex MUST be locked when calling this |
| 69 | */ | 87 | */ |
| 70 | void MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo); | 88 | void MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info); |
| 71 | 89 | ||
| 72 | /** | 90 | /** |
| 73 | * @brief Unmaps the given range and merges it with other unmapped regions | 91 | * @brief Unmaps the given range and merges it with other unmapped regions |
| 74 | * @note blockMutex MUST be locked when calling this | 92 | * @note block_mutex MUST be locked when calling this |
| 75 | */ | 93 | */ |
| 76 | void UnmapLocked(VaType virt, VaType size); | 94 | void UnmapLocked(VaType virt, VaType size); |
| 77 | 95 | ||
| 78 | public: | 96 | std::mutex block_mutex; |
| 79 | static constexpr VaType VaMaximum{(1ULL << (AddressSpaceBits - 1)) + | 97 | std::vector<Block> blocks{Block{}}; |
| 80 | ((1ULL << (AddressSpaceBits - 1)) - | ||
| 81 | 1)}; //!< The maximum VA that this AS can technically reach | ||
| 82 | |||
| 83 | VaType vaLimit{VaMaximum}; //!< A soft limit on the maximum VA of the AS | ||
| 84 | |||
| 85 | FlatAddressSpaceMap(VaType vaLimit, std::function<void(VaType, VaType)> unmapCallback = {}); | ||
| 86 | |||
| 87 | FlatAddressSpaceMap() = default; | ||
| 88 | 98 | ||
| 89 | void Map(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo = {}) { | 99 | /// a soft limit on the maximum VA of the AS |
| 90 | std::scoped_lock lock(blockMutex); | 100 | VaType va_limit{VaMaximum}; |
| 91 | MapLocked(virt, phys, size, extraInfo); | ||
| 92 | } | ||
| 93 | 101 | ||
| 94 | void Unmap(VaType virt, VaType size) { | 102 | private: |
| 95 | std::scoped_lock lock(blockMutex); | 103 | /// Callback called when the mappings in an region have changed |
| 96 | UnmapLocked(virt, size); | 104 | std::function<void(VaType, VaType)> unmap_callback{}; |
| 97 | } | ||
| 98 | }; | 105 | }; |
| 99 | 106 | ||
| 100 | /** | 107 | /** |
| @@ -108,14 +115,8 @@ class FlatAllocator | |||
| 108 | private: | 115 | private: |
| 109 | using Base = FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits>; | 116 | using Base = FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits>; |
| 110 | 117 | ||
| 111 | VaType currentLinearAllocEnd; //!< The end address for the initial linear allocation pass, once | ||
| 112 | //!< this reaches the AS limit the slower allocation path will be | ||
| 113 | //!< used | ||
| 114 | |||
| 115 | public: | 118 | public: |
| 116 | VaType vaStart; //!< The base VA of the allocator, no allocations will be below this | 119 | explicit FlatAllocator(VaType va_start, VaType va_limit = Base::VaMaximum); |
| 117 | |||
| 118 | FlatAllocator(VaType vaStart, VaType vaLimit = Base::VaMaximum); | ||
| 119 | 120 | ||
| 120 | /** | 121 | /** |
| 121 | * @brief Allocates a region in the AS of the given size and returns its address | 122 | * @brief Allocates a region in the AS of the given size and returns its address |
| @@ -131,5 +132,19 @@ public: | |||
| 131 | * @brief Frees an AS region so it can be used again | 132 | * @brief Frees an AS region so it can be used again |
| 132 | */ | 133 | */ |
| 133 | void Free(VaType virt, VaType size); | 134 | void Free(VaType virt, VaType size); |
| 135 | |||
| 136 | VaType GetVAStart() const { | ||
| 137 | return va_start; | ||
| 138 | } | ||
| 139 | |||
| 140 | private: | ||
| 141 | /// The base VA of the allocator, no allocations will be below this | ||
| 142 | VaType va_start; | ||
| 143 | |||
| 144 | /** | ||
| 145 | * The end address for the initial linear allocation pass | ||
| 146 | * Once this reaches the AS limit the slower allocation path will be used | ||
| 147 | */ | ||
| 148 | VaType current_linear_alloc_end; | ||
| 134 | }; | 149 | }; |
| 135 | } // namespace Common | 150 | } // namespace Common |
diff --git a/src/common/address_space.inc b/src/common/address_space.inc index a063782b3..3661b298e 100644 --- a/src/common/address_space.inc +++ b/src/common/address_space.inc | |||
| @@ -30,137 +30,151 @@ | |||
| 30 | FlatAllocator<VaType, UnmappedVa, AddressSpaceBits> | 30 | FlatAllocator<VaType, UnmappedVa, AddressSpaceBits> |
| 31 | 31 | ||
| 32 | namespace Common { | 32 | namespace Common { |
| 33 | MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType vaLimit_, | 33 | MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType va_limit_, |
| 34 | std::function<void(VaType, VaType)> unmapCallback_) | 34 | std::function<void(VaType, VaType)> unmap_callback_) |
| 35 | : unmapCallback(std::move(unmapCallback_)), vaLimit(vaLimit_) { | 35 | : va_limit{va_limit_}, unmap_callback{std::move(unmap_callback_)} { |
| 36 | if (vaLimit > VaMaximum) | 36 | if (va_limit > VaMaximum) { |
| 37 | UNREACHABLE_MSG("Invalid VA limit!"); | 37 | UNREACHABLE_MSG("Invalid VA limit!"); |
| 38 | } | ||
| 38 | } | 39 | } |
| 39 | 40 | ||
| 40 | MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo) { | 41 | MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info) { |
| 41 | VaType virtEnd{virt + size}; | 42 | VaType virt_end{virt + size}; |
| 42 | 43 | ||
| 43 | if (virtEnd > vaLimit) | 44 | if (virt_end > va_limit) { |
| 44 | UNREACHABLE_MSG("Trying to map a block past the VA limit: virtEnd: 0x{:X}, vaLimit: 0x{:X}", | 45 | UNREACHABLE_MSG( |
| 45 | virtEnd, vaLimit); | 46 | "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", virt_end, |
| 47 | va_limit); | ||
| 48 | } | ||
| 46 | 49 | ||
| 47 | auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)}; | 50 | auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; |
| 48 | if (blockEndSuccessor == blocks.begin()) | 51 | if (block_end_successor == blocks.begin()) { |
| 49 | UNREACHABLE_MSG("Trying to map a block before the VA start: virtEnd: 0x{:X}", virtEnd); | 52 | UNREACHABLE_MSG("Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end); |
| 53 | } | ||
| 50 | 54 | ||
| 51 | auto blockEndPredecessor{std::prev(blockEndSuccessor)}; | 55 | auto block_end_predecessor{std::prev(block_end_successor)}; |
| 52 | 56 | ||
| 53 | if (blockEndSuccessor != blocks.end()) { | 57 | if (block_end_successor != blocks.end()) { |
| 54 | // We have blocks in front of us, if one is directly in front then we don't have to add a | 58 | // We have blocks in front of us, if one is directly in front then we don't have to add a |
| 55 | // tail | 59 | // tail |
| 56 | if (blockEndSuccessor->virt != virtEnd) { | 60 | if (block_end_successor->virt != virt_end) { |
| 57 | PaType tailPhys{[&]() -> PaType { | 61 | PaType tailPhys{[&]() -> PaType { |
| 58 | if constexpr (!PaContigSplit) { | 62 | if constexpr (!PaContigSplit) { |
| 59 | return blockEndPredecessor | 63 | // Always propagate unmapped regions rather than calculating offset |
| 60 | ->phys; // Always propagate unmapped regions rather than calculating offset | 64 | return block_end_predecessor->phys; |
| 61 | } else { | 65 | } else { |
| 62 | if (blockEndPredecessor->Unmapped()) | 66 | if (block_end_predecessor->Unmapped()) { |
| 63 | return blockEndPredecessor->phys; // Always propagate unmapped regions | 67 | // Always propagate unmapped regions rather than calculating offset |
| 64 | // rather than calculating offset | 68 | return block_end_predecessor->phys; |
| 65 | else | 69 | } else { |
| 66 | return blockEndPredecessor->phys + virtEnd - blockEndPredecessor->virt; | 70 | return block_end_predecessor->phys + virt_end - block_end_predecessor->virt; |
| 71 | } | ||
| 67 | } | 72 | } |
| 68 | }()}; | 73 | }()}; |
| 69 | 74 | ||
| 70 | if (blockEndPredecessor->virt >= virt) { | 75 | if (block_end_predecessor->virt >= virt) { |
| 71 | // If this block's start would be overlapped by the map then reuse it as a tail | 76 | // If this block's start would be overlapped by the map then reuse it as a tail |
| 72 | // block | 77 | // block |
| 73 | blockEndPredecessor->virt = virtEnd; | 78 | block_end_predecessor->virt = virt_end; |
| 74 | blockEndPredecessor->phys = tailPhys; | 79 | block_end_predecessor->phys = tailPhys; |
| 75 | blockEndPredecessor->extraInfo = blockEndPredecessor->extraInfo; | 80 | block_end_predecessor->extra_info = block_end_predecessor->extra_info; |
| 76 | 81 | ||
| 77 | // No longer predecessor anymore | 82 | // No longer predecessor anymore |
| 78 | blockEndSuccessor = blockEndPredecessor--; | 83 | block_end_successor = block_end_predecessor--; |
| 79 | } else { | 84 | } else { |
| 80 | // Else insert a new one and we're done | 85 | // Else insert a new one and we're done |
| 81 | blocks.insert(blockEndSuccessor, | 86 | blocks.insert(block_end_successor, |
| 82 | {Block(virt, phys, extraInfo), | 87 | {Block(virt, phys, extra_info), |
| 83 | Block(virtEnd, tailPhys, blockEndPredecessor->extraInfo)}); | 88 | Block(virt_end, tailPhys, block_end_predecessor->extra_info)}); |
| 84 | if (unmapCallback) | 89 | if (unmap_callback) { |
| 85 | unmapCallback(virt, size); | 90 | unmap_callback(virt, size); |
| 91 | } | ||
| 86 | 92 | ||
| 87 | return; | 93 | return; |
| 88 | } | 94 | } |
| 89 | } | 95 | } |
| 90 | } else { | 96 | } else { |
| 91 | // blockEndPredecessor will always be unmapped as blocks has to be terminated by an unmapped | 97 | // block_end_predecessor will always be unmapped as blocks has to be terminated by an |
| 92 | // chunk | 98 | // unmapped chunk |
| 93 | if (blockEndPredecessor != blocks.begin() && blockEndPredecessor->virt >= virt) { | 99 | if (block_end_predecessor != blocks.begin() && block_end_predecessor->virt >= virt) { |
| 94 | // Move the unmapped block start backwards | 100 | // Move the unmapped block start backwards |
| 95 | blockEndPredecessor->virt = virtEnd; | 101 | block_end_predecessor->virt = virt_end; |
| 96 | 102 | ||
| 97 | // No longer predecessor anymore | 103 | // No longer predecessor anymore |
| 98 | blockEndSuccessor = blockEndPredecessor--; | 104 | block_end_successor = block_end_predecessor--; |
| 99 | } else { | 105 | } else { |
| 100 | // Else insert a new one and we're done | 106 | // Else insert a new one and we're done |
| 101 | blocks.insert(blockEndSuccessor, | 107 | blocks.insert(block_end_successor, |
| 102 | {Block(virt, phys, extraInfo), Block(virtEnd, UnmappedPa, {})}); | 108 | {Block(virt, phys, extra_info), Block(virt_end, UnmappedPa, {})}); |
| 103 | if (unmapCallback) | 109 | if (unmap_callback) { |
| 104 | unmapCallback(virt, size); | 110 | unmap_callback(virt, size); |
| 111 | } | ||
| 105 | 112 | ||
| 106 | return; | 113 | return; |
| 107 | } | 114 | } |
| 108 | } | 115 | } |
| 109 | 116 | ||
| 110 | auto blockStartSuccessor{blockEndSuccessor}; | 117 | auto block_start_successor{block_end_successor}; |
| 111 | 118 | ||
| 112 | // Walk the block vector to find the start successor as this is more efficient than another | 119 | // Walk the block vector to find the start successor as this is more efficient than another |
| 113 | // binary search in most scenarios | 120 | // binary search in most scenarios |
| 114 | while (std::prev(blockStartSuccessor)->virt >= virt) | 121 | while (std::prev(block_start_successor)->virt >= virt) { |
| 115 | blockStartSuccessor--; | 122 | block_start_successor--; |
| 123 | } | ||
| 116 | 124 | ||
| 117 | // Check that the start successor is either the end block or something in between | 125 | // Check that the start successor is either the end block or something in between |
| 118 | if (blockStartSuccessor->virt > virtEnd) { | 126 | if (block_start_successor->virt > virt_end) { |
| 119 | UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", blockStartSuccessor->virt); | 127 | UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); |
| 120 | } else if (blockStartSuccessor->virt == virtEnd) { | 128 | } else if (block_start_successor->virt == virt_end) { |
| 121 | // We need to create a new block as there are none spare that we would overwrite | 129 | // We need to create a new block as there are none spare that we would overwrite |
| 122 | blocks.insert(blockStartSuccessor, Block(virt, phys, extraInfo)); | 130 | blocks.insert(block_start_successor, Block(virt, phys, extra_info)); |
| 123 | } else { | 131 | } else { |
| 124 | // Erase overwritten blocks | 132 | // Erase overwritten blocks |
| 125 | if (auto eraseStart{std::next(blockStartSuccessor)}; eraseStart != blockEndSuccessor) | 133 | if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) { |
| 126 | blocks.erase(eraseStart, blockEndSuccessor); | 134 | blocks.erase(eraseStart, block_end_successor); |
| 135 | } | ||
| 127 | 136 | ||
| 128 | // Reuse a block that would otherwise be overwritten as a start block | 137 | // Reuse a block that would otherwise be overwritten as a start block |
| 129 | blockStartSuccessor->virt = virt; | 138 | block_start_successor->virt = virt; |
| 130 | blockStartSuccessor->phys = phys; | 139 | block_start_successor->phys = phys; |
| 131 | blockStartSuccessor->extraInfo = extraInfo; | 140 | block_start_successor->extra_info = extra_info; |
| 132 | } | 141 | } |
| 133 | 142 | ||
| 134 | if (unmapCallback) | 143 | if (unmap_callback) { |
| 135 | unmapCallback(virt, size); | 144 | unmap_callback(virt, size); |
| 145 | } | ||
| 136 | } | 146 | } |
| 137 | 147 | ||
| 138 | MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { | 148 | MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { |
| 139 | VaType virtEnd{virt + size}; | 149 | VaType virt_end{virt + size}; |
| 140 | 150 | ||
| 141 | if (virtEnd > vaLimit) | 151 | if (virt_end > va_limit) { |
| 142 | UNREACHABLE_MSG("Trying to map a block past the VA limit: virtEnd: 0x{:X}, vaLimit: 0x{:X}", | 152 | UNREACHABLE_MSG( |
| 143 | virtEnd, vaLimit); | 153 | "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", virt_end, |
| 154 | va_limit); | ||
| 155 | } | ||
| 144 | 156 | ||
| 145 | auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)}; | 157 | auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; |
| 146 | if (blockEndSuccessor == blocks.begin()) | 158 | if (block_end_successor == blocks.begin()) { |
| 147 | UNREACHABLE_MSG("Trying to unmap a block before the VA start: virtEnd: 0x{:X}", virtEnd); | 159 | UNREACHABLE_MSG("Trying to unmap a block before the VA start: virt_end: 0x{:X}", virt_end); |
| 160 | } | ||
| 148 | 161 | ||
| 149 | auto blockEndPredecessor{std::prev(blockEndSuccessor)}; | 162 | auto block_end_predecessor{std::prev(block_end_successor)}; |
| 150 | 163 | ||
| 151 | auto walkBackToPredecessor{[&](auto iter) { | 164 | auto walk_back_to_predecessor{[&](auto iter) { |
| 152 | while (iter->virt >= virt) | 165 | while (iter->virt >= virt) { |
| 153 | iter--; | 166 | iter--; |
| 167 | } | ||
| 154 | 168 | ||
| 155 | return iter; | 169 | return iter; |
| 156 | }}; | 170 | }}; |
| 157 | 171 | ||
| 158 | auto eraseBlocksWithEndUnmapped{[&](auto unmappedEnd) { | 172 | auto erase_blocks_with_end_unmapped{[&](auto unmappedEnd) { |
| 159 | auto blockStartPredecessor{walkBackToPredecessor(unmappedEnd)}; | 173 | auto block_start_predecessor{walk_back_to_predecessor(unmappedEnd)}; |
| 160 | auto blockStartSuccessor{std::next(blockStartPredecessor)}; | 174 | auto block_start_successor{std::next(block_start_predecessor)}; |
| 161 | 175 | ||
| 162 | auto eraseEnd{[&]() { | 176 | auto eraseEnd{[&]() { |
| 163 | if (blockStartPredecessor->Unmapped()) { | 177 | if (block_start_predecessor->Unmapped()) { |
| 164 | // If the start predecessor is unmapped then we can erase everything in our region | 178 | // If the start predecessor is unmapped then we can erase everything in our region |
| 165 | // and be done | 179 | // and be done |
| 166 | return std::next(unmappedEnd); | 180 | return std::next(unmappedEnd); |
| @@ -174,158 +188,171 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { | |||
| 174 | 188 | ||
| 175 | // We can't have two unmapped regions after each other | 189 | // We can't have two unmapped regions after each other |
| 176 | if (eraseEnd != blocks.end() && | 190 | if (eraseEnd != blocks.end() && |
| 177 | (eraseEnd == blockStartSuccessor || | 191 | (eraseEnd == block_start_successor || |
| 178 | (blockStartPredecessor->Unmapped() && eraseEnd->Unmapped()))) | 192 | (block_start_predecessor->Unmapped() && eraseEnd->Unmapped()))) { |
| 179 | UNREACHABLE_MSG("Multiple contiguous unmapped regions are unsupported!"); | 193 | UNREACHABLE_MSG("Multiple contiguous unmapped regions are unsupported!"); |
| 194 | } | ||
| 180 | 195 | ||
| 181 | blocks.erase(blockStartSuccessor, eraseEnd); | 196 | blocks.erase(block_start_successor, eraseEnd); |
| 182 | }}; | 197 | }}; |
| 183 | 198 | ||
| 184 | // We can avoid any splitting logic if these are the case | 199 | // We can avoid any splitting logic if these are the case |
| 185 | if (blockEndPredecessor->Unmapped()) { | 200 | if (block_end_predecessor->Unmapped()) { |
| 186 | if (blockEndPredecessor->virt > virt) | 201 | if (block_end_predecessor->virt > virt) { |
| 187 | eraseBlocksWithEndUnmapped(blockEndPredecessor); | 202 | erase_blocks_with_end_unmapped(block_end_predecessor); |
| 203 | } | ||
| 188 | 204 | ||
| 189 | if (unmapCallback) | 205 | if (unmap_callback) { |
| 190 | unmapCallback(virt, size); | 206 | unmap_callback(virt, size); |
| 207 | } | ||
| 191 | 208 | ||
| 192 | return; // The region is unmapped, bail out early | 209 | return; // The region is unmapped, bail out early |
| 193 | } else if (blockEndSuccessor->virt == virtEnd && blockEndSuccessor->Unmapped()) { | 210 | } else if (block_end_successor->virt == virt_end && block_end_successor->Unmapped()) { |
| 194 | eraseBlocksWithEndUnmapped(blockEndSuccessor); | 211 | erase_blocks_with_end_unmapped(block_end_successor); |
| 195 | 212 | ||
| 196 | if (unmapCallback) | 213 | if (unmap_callback) { |
| 197 | unmapCallback(virt, size); | 214 | unmap_callback(virt, size); |
| 215 | } | ||
| 198 | 216 | ||
| 199 | return; // The region is unmapped here and doesn't need splitting, bail out early | 217 | return; // The region is unmapped here and doesn't need splitting, bail out early |
| 200 | } else if (blockEndSuccessor == blocks.end()) { | 218 | } else if (block_end_successor == blocks.end()) { |
| 201 | // This should never happen as the end should always follow an unmapped block | 219 | // This should never happen as the end should always follow an unmapped block |
| 202 | UNREACHABLE_MSG("Unexpected Memory Manager state!"); | 220 | UNREACHABLE_MSG("Unexpected Memory Manager state!"); |
| 203 | } else if (blockEndSuccessor->virt != virtEnd) { | 221 | } else if (block_end_successor->virt != virt_end) { |
| 204 | // If one block is directly in front then we don't have to add a tail | 222 | // If one block is directly in front then we don't have to add a tail |
| 205 | 223 | ||
| 206 | // The previous block is mapped so we will need to add a tail with an offset | 224 | // The previous block is mapped so we will need to add a tail with an offset |
| 207 | PaType tailPhys{[&]() { | 225 | PaType tailPhys{[&]() { |
| 208 | if constexpr (PaContigSplit) | 226 | if constexpr (PaContigSplit) { |
| 209 | return blockEndPredecessor->phys + virtEnd - blockEndPredecessor->virt; | 227 | return block_end_predecessor->phys + virt_end - block_end_predecessor->virt; |
| 210 | else | 228 | } else { |
| 211 | return blockEndPredecessor->phys; | 229 | return block_end_predecessor->phys; |
| 230 | } | ||
| 212 | }()}; | 231 | }()}; |
| 213 | 232 | ||
| 214 | if (blockEndPredecessor->virt >= virt) { | 233 | if (block_end_predecessor->virt >= virt) { |
| 215 | // If this block's start would be overlapped by the unmap then reuse it as a tail block | 234 | // If this block's start would be overlapped by the unmap then reuse it as a tail block |
| 216 | blockEndPredecessor->virt = virtEnd; | 235 | block_end_predecessor->virt = virt_end; |
| 217 | blockEndPredecessor->phys = tailPhys; | 236 | block_end_predecessor->phys = tailPhys; |
| 218 | 237 | ||
| 219 | // No longer predecessor anymore | 238 | // No longer predecessor anymore |
| 220 | blockEndSuccessor = blockEndPredecessor--; | 239 | block_end_successor = block_end_predecessor--; |
| 221 | } else { | 240 | } else { |
| 222 | blocks.insert(blockEndSuccessor, | 241 | blocks.insert(block_end_successor, |
| 223 | {Block(virt, UnmappedPa, {}), | 242 | {Block(virt, UnmappedPa, {}), |
| 224 | Block(virtEnd, tailPhys, blockEndPredecessor->extraInfo)}); | 243 | Block(virt_end, tailPhys, block_end_predecessor->extra_info)}); |
| 225 | if (unmapCallback) | 244 | if (unmap_callback) { |
| 226 | unmapCallback(virt, size); | 245 | unmap_callback(virt, size); |
| 246 | } | ||
| 227 | 247 | ||
| 228 | return; // The previous block is mapped and ends before | 248 | // The previous block is mapped and ends before |
| 249 | return; | ||
| 229 | } | 250 | } |
| 230 | } | 251 | } |
| 231 | 252 | ||
| 232 | // Walk the block vector to find the start predecessor as this is more efficient than another | 253 | // Walk the block vector to find the start predecessor as this is more efficient than another |
| 233 | // binary search in most scenarios | 254 | // binary search in most scenarios |
| 234 | auto blockStartPredecessor{walkBackToPredecessor(blockEndSuccessor)}; | 255 | auto block_start_predecessor{walk_back_to_predecessor(block_end_successor)}; |
| 235 | auto blockStartSuccessor{std::next(blockStartPredecessor)}; | 256 | auto block_start_successor{std::next(block_start_predecessor)}; |
| 236 | 257 | ||
| 237 | if (blockStartSuccessor->virt > virtEnd) { | 258 | if (block_start_successor->virt > virt_end) { |
| 238 | UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", blockStartSuccessor->virt); | 259 | UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); |
| 239 | } else if (blockStartSuccessor->virt == virtEnd) { | 260 | } else if (block_start_successor->virt == virt_end) { |
| 240 | // There are no blocks between the start and the end that would let us skip inserting a new | 261 | // There are no blocks between the start and the end that would let us skip inserting a new |
| 241 | // one for head | 262 | // one for head |
| 242 | 263 | ||
| 243 | // The previous block is may be unmapped, if so we don't need to insert any unmaps after it | 264 | // The previous block is may be unmapped, if so we don't need to insert any unmaps after it |
| 244 | if (blockStartPredecessor->Mapped()) | 265 | if (block_start_predecessor->Mapped()) { |
| 245 | blocks.insert(blockStartSuccessor, Block(virt, UnmappedPa, {})); | 266 | blocks.insert(block_start_successor, Block(virt, UnmappedPa, {})); |
| 246 | } else if (blockStartPredecessor->Unmapped()) { | 267 | } |
| 268 | } else if (block_start_predecessor->Unmapped()) { | ||
| 247 | // If the previous block is unmapped | 269 | // If the previous block is unmapped |
| 248 | blocks.erase(blockStartSuccessor, blockEndPredecessor); | 270 | blocks.erase(block_start_successor, block_end_predecessor); |
| 249 | } else { | 271 | } else { |
| 250 | // Erase overwritten blocks, skipping the first one as we have written the unmapped start | 272 | // Erase overwritten blocks, skipping the first one as we have written the unmapped start |
| 251 | // block there | 273 | // block there |
| 252 | if (auto eraseStart{std::next(blockStartSuccessor)}; eraseStart != blockEndSuccessor) | 274 | if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) { |
| 253 | blocks.erase(eraseStart, blockEndSuccessor); | 275 | blocks.erase(eraseStart, block_end_successor); |
| 276 | } | ||
| 254 | 277 | ||
| 255 | // Add in the unmapped block header | 278 | // Add in the unmapped block header |
| 256 | blockStartSuccessor->virt = virt; | 279 | block_start_successor->virt = virt; |
| 257 | blockStartSuccessor->phys = UnmappedPa; | 280 | block_start_successor->phys = UnmappedPa; |
| 258 | } | 281 | } |
| 259 | 282 | ||
| 260 | if (unmapCallback) | 283 | if (unmap_callback) |
| 261 | unmapCallback(virt, size); | 284 | unmap_callback(virt, size); |
| 262 | } | 285 | } |
| 263 | 286 | ||
| 264 | ALLOC_MEMBER_CONST()::FlatAllocator(VaType vaStart_, VaType vaLimit_) | 287 | ALLOC_MEMBER_CONST()::FlatAllocator(VaType va_start_, VaType va_limit_) |
| 265 | : Base(vaLimit_), currentLinearAllocEnd(vaStart_), vaStart(vaStart_) {} | 288 | : Base{va_limit_}, va_start{va_start_}, current_linear_alloc_end{va_start_} {} |
| 266 | 289 | ||
| 267 | ALLOC_MEMBER(VaType)::Allocate(VaType size) { | 290 | ALLOC_MEMBER(VaType)::Allocate(VaType size) { |
| 268 | std::scoped_lock lock(this->blockMutex); | 291 | std::scoped_lock lock(this->block_mutex); |
| 269 | 292 | ||
| 270 | VaType allocStart{UnmappedVa}; | 293 | VaType alloc_start{UnmappedVa}; |
| 271 | VaType allocEnd{currentLinearAllocEnd + size}; | 294 | VaType alloc_end{current_linear_alloc_end + size}; |
| 272 | 295 | ||
| 273 | // Avoid searching backwards in the address space if possible | 296 | // Avoid searching backwards in the address space if possible |
| 274 | if (allocEnd >= currentLinearAllocEnd && allocEnd <= this->vaLimit) { | 297 | if (alloc_end >= current_linear_alloc_end && alloc_end <= this->va_limit) { |
| 275 | auto allocEndSuccessor{ | 298 | auto alloc_end_successor{ |
| 276 | std::lower_bound(this->blocks.begin(), this->blocks.end(), allocEnd)}; | 299 | std::lower_bound(this->blocks.begin(), this->blocks.end(), alloc_end)}; |
| 277 | if (allocEndSuccessor == this->blocks.begin()) | 300 | if (alloc_end_successor == this->blocks.begin()) { |
| 278 | UNREACHABLE_MSG("First block in AS map is invalid!"); | 301 | UNREACHABLE_MSG("First block in AS map is invalid!"); |
| 302 | } | ||
| 279 | 303 | ||
| 280 | auto allocEndPredecessor{std::prev(allocEndSuccessor)}; | 304 | auto alloc_end_predecessor{std::prev(alloc_end_successor)}; |
| 281 | if (allocEndPredecessor->virt <= currentLinearAllocEnd) { | 305 | if (alloc_end_predecessor->virt <= current_linear_alloc_end) { |
| 282 | allocStart = currentLinearAllocEnd; | 306 | alloc_start = current_linear_alloc_end; |
| 283 | } else { | 307 | } else { |
| 284 | // Skip over fixed any mappings in front of us | 308 | // Skip over fixed any mappings in front of us |
| 285 | while (allocEndSuccessor != this->blocks.end()) { | 309 | while (alloc_end_successor != this->blocks.end()) { |
| 286 | if (allocEndSuccessor->virt - allocEndPredecessor->virt < size || | 310 | if (alloc_end_successor->virt - alloc_end_predecessor->virt < size || |
| 287 | allocEndPredecessor->Mapped()) { | 311 | alloc_end_predecessor->Mapped()) { |
| 288 | allocStart = allocEndPredecessor->virt; | 312 | alloc_start = alloc_end_predecessor->virt; |
| 289 | break; | 313 | break; |
| 290 | } | 314 | } |
| 291 | 315 | ||
| 292 | allocEndPredecessor = allocEndSuccessor++; | 316 | alloc_end_predecessor = alloc_end_successor++; |
| 293 | 317 | ||
| 294 | // Use the VA limit to calculate if we can fit in the final block since it has no | 318 | // Use the VA limit to calculate if we can fit in the final block since it has no |
| 295 | // successor | 319 | // successor |
| 296 | if (allocEndSuccessor == this->blocks.end()) { | 320 | if (alloc_end_successor == this->blocks.end()) { |
| 297 | allocEnd = allocEndPredecessor->virt + size; | 321 | alloc_end = alloc_end_predecessor->virt + size; |
| 298 | 322 | ||
| 299 | if (allocEnd >= allocEndPredecessor->virt && allocEnd <= this->vaLimit) | 323 | if (alloc_end >= alloc_end_predecessor->virt && alloc_end <= this->va_limit) { |
| 300 | allocStart = allocEndPredecessor->virt; | 324 | alloc_start = alloc_end_predecessor->virt; |
| 325 | } | ||
| 301 | } | 326 | } |
| 302 | } | 327 | } |
| 303 | } | 328 | } |
| 304 | } | 329 | } |
| 305 | 330 | ||
| 306 | if (allocStart != UnmappedVa) { | 331 | if (alloc_start != UnmappedVa) { |
| 307 | currentLinearAllocEnd = allocStart + size; | 332 | current_linear_alloc_end = alloc_start + size; |
| 308 | } else { // If linear allocation overflows the AS then find a gap | 333 | } else { // If linear allocation overflows the AS then find a gap |
| 309 | if (this->blocks.size() <= 2) | 334 | if (this->blocks.size() <= 2) { |
| 310 | UNREACHABLE_MSG("Unexpected allocator state!"); | 335 | UNREACHABLE_MSG("Unexpected allocator state!"); |
| 336 | } | ||
| 311 | 337 | ||
| 312 | auto searchPredecessor{this->blocks.begin()}; | 338 | auto search_predecessor{this->blocks.begin()}; |
| 313 | auto searchSuccessor{std::next(searchPredecessor)}; | 339 | auto search_successor{std::next(search_predecessor)}; |
| 314 | 340 | ||
| 315 | while (searchSuccessor != this->blocks.end() && | 341 | while (search_successor != this->blocks.end() && |
| 316 | (searchSuccessor->virt - searchPredecessor->virt < size || | 342 | (search_successor->virt - search_predecessor->virt < size || |
| 317 | searchPredecessor->Mapped())) { | 343 | search_predecessor->Mapped())) { |
| 318 | searchPredecessor = searchSuccessor++; | 344 | search_predecessor = search_successor++; |
| 319 | } | 345 | } |
| 320 | 346 | ||
| 321 | if (searchSuccessor != this->blocks.end()) | 347 | if (search_successor != this->blocks.end()) { |
| 322 | allocStart = searchPredecessor->virt; | 348 | alloc_start = search_predecessor->virt; |
| 323 | else | 349 | } else { |
| 324 | return {}; // AS is full | 350 | return {}; // AS is full |
| 351 | } | ||
| 325 | } | 352 | } |
| 326 | 353 | ||
| 327 | this->MapLocked(allocStart, true, size, {}); | 354 | this->MapLocked(alloc_start, true, size, {}); |
| 328 | return allocStart; | 355 | return alloc_start; |
| 329 | } | 356 | } |
| 330 | 357 | ||
| 331 | ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) { | 358 | ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) { |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index b48f7fcaf..7a95f5305 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp | |||
| @@ -472,16 +472,16 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) { | |||
| 472 | 472 | ||
| 473 | params.regions = std::array<VaRegion, 2>{ | 473 | params.regions = std::array<VaRegion, 2>{ |
| 474 | VaRegion{ | 474 | VaRegion{ |
| 475 | .offset = vm.small_page_allocator->vaStart << VM::PAGE_SIZE_BITS, | 475 | .offset = vm.small_page_allocator->GetVAStart() << VM::PAGE_SIZE_BITS, |
| 476 | .page_size = VM::YUZU_PAGESIZE, | 476 | .page_size = VM::YUZU_PAGESIZE, |
| 477 | ._pad0_{}, | 477 | ._pad0_{}, |
| 478 | .pages = vm.small_page_allocator->vaLimit - vm.small_page_allocator->vaStart, | 478 | .pages = vm.small_page_allocator->GetVALimit() - vm.small_page_allocator->GetVAStart(), |
| 479 | }, | 479 | }, |
| 480 | VaRegion{ | 480 | VaRegion{ |
| 481 | .offset = vm.big_page_allocator->vaStart << vm.big_page_size_bits, | 481 | .offset = vm.big_page_allocator->GetVAStart() << vm.big_page_size_bits, |
| 482 | .page_size = vm.big_page_size, | 482 | .page_size = vm.big_page_size, |
| 483 | ._pad0_{}, | 483 | ._pad0_{}, |
| 484 | .pages = vm.big_page_allocator->vaLimit - vm.big_page_allocator->vaStart, | 484 | .pages = vm.big_page_allocator->GetVALimit() - vm.big_page_allocator->GetVAStart(), |
| 485 | }, | 485 | }, |
| 486 | }; | 486 | }; |
| 487 | } | 487 | } |