diff options
| author | 2022-10-06 21:29:53 +0200 | |
|---|---|---|
| committer | 2022-10-06 21:29:53 +0200 | |
| commit | 1effa578f12f79d7816e3543291f302f126cc1d2 (patch) | |
| tree | 14803b31b6817294d40d57446f6fa94c5ff3fe9a /src | |
| parent | Merge pull request #9025 from FernandoS27/slava-ukrayini (diff) | |
| parent | vulkan_blitter: Fix pool allocation double free. (diff) | |
| download | yuzu-1effa578f12f79d7816e3543291f302f126cc1d2.tar.gz yuzu-1effa578f12f79d7816e3543291f302f126cc1d2.tar.xz yuzu-1effa578f12f79d7816e3543291f302f126cc1d2.zip | |
Merge pull request #8467 from FernandoS27/yfc-rel-1
Project yuzu Fried Chicken (Y.F.C.) Part 1
Diffstat (limited to 'src')
169 files changed, 6498 insertions, 3194 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 54de1dc94..3575a3cb3 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt | |||
| @@ -121,6 +121,7 @@ else() | |||
| 121 | 121 | ||
| 122 | if (ARCHITECTURE_x86_64) | 122 | if (ARCHITECTURE_x86_64) |
| 123 | add_compile_options("-mcx16") | 123 | add_compile_options("-mcx16") |
| 124 | add_compile_options("-fwrapv") | ||
| 124 | endif() | 125 | endif() |
| 125 | 126 | ||
| 126 | if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang) | 127 | if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang) |
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 3447fabd8..a02696873 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt | |||
| @@ -17,6 +17,8 @@ endif () | |||
| 17 | include(GenerateSCMRev) | 17 | include(GenerateSCMRev) |
| 18 | 18 | ||
| 19 | add_library(common STATIC | 19 | add_library(common STATIC |
| 20 | address_space.cpp | ||
| 21 | address_space.h | ||
| 20 | algorithm.h | 22 | algorithm.h |
| 21 | alignment.h | 23 | alignment.h |
| 22 | announce_multiplayer_room.h | 24 | announce_multiplayer_room.h |
| @@ -81,6 +83,8 @@ add_library(common STATIC | |||
| 81 | microprofile.cpp | 83 | microprofile.cpp |
| 82 | microprofile.h | 84 | microprofile.h |
| 83 | microprofileui.h | 85 | microprofileui.h |
| 86 | multi_level_page_table.cpp | ||
| 87 | multi_level_page_table.h | ||
| 84 | nvidia_flags.cpp | 88 | nvidia_flags.cpp |
| 85 | nvidia_flags.h | 89 | nvidia_flags.h |
| 86 | page_table.cpp | 90 | page_table.cpp |
diff --git a/src/common/address_space.cpp b/src/common/address_space.cpp new file mode 100644 index 000000000..866e78dbe --- /dev/null +++ b/src/common/address_space.cpp | |||
| @@ -0,0 +1,10 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #include "common/address_space.inc" | ||
| 5 | |||
| 6 | namespace Common { | ||
| 7 | |||
| 8 | template class Common::FlatAllocator<u32, 0, 32>; | ||
| 9 | |||
| 10 | } | ||
diff --git a/src/common/address_space.h b/src/common/address_space.h new file mode 100644 index 000000000..9222b2fdc --- /dev/null +++ b/src/common/address_space.h | |||
| @@ -0,0 +1,150 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <concepts> | ||
| 7 | #include <functional> | ||
| 8 | #include <mutex> | ||
| 9 | #include <vector> | ||
| 10 | |||
| 11 | #include "common/common_types.h" | ||
| 12 | |||
| 13 | namespace Common { | ||
| 14 | template <typename VaType, size_t AddressSpaceBits> | ||
| 15 | concept AddressSpaceValid = std::is_unsigned_v<VaType> && sizeof(VaType) * 8 >= AddressSpaceBits; | ||
| 16 | |||
| 17 | struct EmptyStruct {}; | ||
| 18 | |||
| 19 | /** | ||
| 20 | * @brief FlatAddressSpaceMap provides a generic VA->PA mapping implementation using a sorted vector | ||
| 21 | */ | ||
| 22 | template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, | ||
| 23 | bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct> | ||
| 24 | requires AddressSpaceValid<VaType, AddressSpaceBits> | ||
| 25 | class FlatAddressSpaceMap { | ||
| 26 | public: | ||
| 27 | /// The maximum VA that this AS can technically reach | ||
| 28 | static constexpr VaType VaMaximum{(1ULL << (AddressSpaceBits - 1)) + | ||
| 29 | ((1ULL << (AddressSpaceBits - 1)) - 1)}; | ||
| 30 | |||
| 31 | explicit FlatAddressSpaceMap(VaType va_limit, | ||
| 32 | std::function<void(VaType, VaType)> unmap_callback = {}); | ||
| 33 | |||
| 34 | FlatAddressSpaceMap() = default; | ||
| 35 | |||
| 36 | void Map(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info = {}) { | ||
| 37 | std::scoped_lock lock(block_mutex); | ||
| 38 | MapLocked(virt, phys, size, extra_info); | ||
| 39 | } | ||
| 40 | |||
| 41 | void Unmap(VaType virt, VaType size) { | ||
| 42 | std::scoped_lock lock(block_mutex); | ||
| 43 | UnmapLocked(virt, size); | ||
| 44 | } | ||
| 45 | |||
| 46 | VaType GetVALimit() const { | ||
| 47 | return va_limit; | ||
| 48 | } | ||
| 49 | |||
| 50 | protected: | ||
| 51 | /** | ||
| 52 | * @brief Represents a block of memory in the AS, the physical mapping is contiguous until | ||
| 53 | * another block with a different phys address is hit | ||
| 54 | */ | ||
| 55 | struct Block { | ||
| 56 | /// VA of the block | ||
| 57 | VaType virt{UnmappedVa}; | ||
| 58 | /// PA of the block, will increase 1-1 with VA until a new block is encountered | ||
| 59 | PaType phys{UnmappedPa}; | ||
| 60 | [[no_unique_address]] ExtraBlockInfo extra_info; | ||
| 61 | |||
| 62 | Block() = default; | ||
| 63 | |||
| 64 | Block(VaType virt_, PaType phys_, ExtraBlockInfo extra_info_) | ||
| 65 | : virt(virt_), phys(phys_), extra_info(extra_info_) {} | ||
| 66 | |||
| 67 | bool Valid() const { | ||
| 68 | return virt != UnmappedVa; | ||
| 69 | } | ||
| 70 | |||
| 71 | bool Mapped() const { | ||
| 72 | return phys != UnmappedPa; | ||
| 73 | } | ||
| 74 | |||
| 75 | bool Unmapped() const { | ||
| 76 | return phys == UnmappedPa; | ||
| 77 | } | ||
| 78 | |||
| 79 | bool operator<(const VaType& p_virt) const { | ||
| 80 | return virt < p_virt; | ||
| 81 | } | ||
| 82 | }; | ||
| 83 | |||
| 84 | /** | ||
| 85 | * @brief Maps a PA range into the given AS region | ||
| 86 | * @note block_mutex MUST be locked when calling this | ||
| 87 | */ | ||
| 88 | void MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info); | ||
| 89 | |||
| 90 | /** | ||
| 91 | * @brief Unmaps the given range and merges it with other unmapped regions | ||
| 92 | * @note block_mutex MUST be locked when calling this | ||
| 93 | */ | ||
| 94 | void UnmapLocked(VaType virt, VaType size); | ||
| 95 | |||
| 96 | std::mutex block_mutex; | ||
| 97 | std::vector<Block> blocks{Block{}}; | ||
| 98 | |||
| 99 | /// a soft limit on the maximum VA of the AS | ||
| 100 | VaType va_limit{VaMaximum}; | ||
| 101 | |||
| 102 | private: | ||
| 103 | /// Callback called when the mappings in an region have changed | ||
| 104 | std::function<void(VaType, VaType)> unmap_callback{}; | ||
| 105 | }; | ||
| 106 | |||
| 107 | /** | ||
| 108 | * @brief FlatMemoryManager specialises FlatAddressSpaceMap to work as an allocator, with an | ||
| 109 | * initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block | ||
| 110 | */ | ||
| 111 | template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> | ||
| 112 | requires AddressSpaceValid<VaType, AddressSpaceBits> | ||
| 113 | class FlatAllocator | ||
| 114 | : public FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits> { | ||
| 115 | private: | ||
| 116 | using Base = FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits>; | ||
| 117 | |||
| 118 | public: | ||
| 119 | explicit FlatAllocator(VaType virt_start, VaType va_limit = Base::VaMaximum); | ||
| 120 | |||
| 121 | /** | ||
| 122 | * @brief Allocates a region in the AS of the given size and returns its address | ||
| 123 | */ | ||
| 124 | VaType Allocate(VaType size); | ||
| 125 | |||
| 126 | /** | ||
| 127 | * @brief Marks the given region in the AS as allocated | ||
| 128 | */ | ||
| 129 | void AllocateFixed(VaType virt, VaType size); | ||
| 130 | |||
| 131 | /** | ||
| 132 | * @brief Frees an AS region so it can be used again | ||
| 133 | */ | ||
| 134 | void Free(VaType virt, VaType size); | ||
| 135 | |||
| 136 | VaType GetVAStart() const { | ||
| 137 | return virt_start; | ||
| 138 | } | ||
| 139 | |||
| 140 | private: | ||
| 141 | /// The base VA of the allocator, no allocations will be below this | ||
| 142 | VaType virt_start; | ||
| 143 | |||
| 144 | /** | ||
| 145 | * The end address for the initial linear allocation pass | ||
| 146 | * Once this reaches the AS limit the slower allocation path will be used | ||
| 147 | */ | ||
| 148 | VaType current_linear_alloc_end; | ||
| 149 | }; | ||
| 150 | } // namespace Common | ||
diff --git a/src/common/address_space.inc b/src/common/address_space.inc new file mode 100644 index 000000000..2195dabd5 --- /dev/null +++ b/src/common/address_space.inc | |||
| @@ -0,0 +1,366 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #include "common/address_space.h" | ||
| 5 | #include "common/assert.h" | ||
| 6 | |||
| 7 | #define MAP_MEMBER(returnType) \ | ||
| 8 | template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \ | ||
| 9 | bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \ | ||
| 10 | requires AddressSpaceValid<VaType, AddressSpaceBits> returnType FlatAddressSpaceMap< \ | ||
| 11 | VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo> | ||
| 12 | #define MAP_MEMBER_CONST() \ | ||
| 13 | template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \ | ||
| 14 | bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \ | ||
| 15 | requires AddressSpaceValid<VaType, AddressSpaceBits> FlatAddressSpaceMap< \ | ||
| 16 | VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo> | ||
| 17 | |||
| 18 | #define MM_MEMBER(returnType) \ | ||
| 19 | template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \ | ||
| 20 | requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \ | ||
| 21 | FlatMemoryManager<VaType, UnmappedVa, AddressSpaceBits> | ||
| 22 | |||
| 23 | #define ALLOC_MEMBER(returnType) \ | ||
| 24 | template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \ | ||
| 25 | requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \ | ||
| 26 | FlatAllocator<VaType, UnmappedVa, AddressSpaceBits> | ||
| 27 | #define ALLOC_MEMBER_CONST() \ | ||
| 28 | template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \ | ||
| 29 | requires AddressSpaceValid<VaType, AddressSpaceBits> \ | ||
| 30 | FlatAllocator<VaType, UnmappedVa, AddressSpaceBits> | ||
| 31 | |||
| 32 | namespace Common { | ||
| 33 | MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType va_limit_, | ||
| 34 | std::function<void(VaType, VaType)> unmap_callback_) | ||
| 35 | : va_limit{va_limit_}, unmap_callback{std::move(unmap_callback_)} { | ||
| 36 | if (va_limit > VaMaximum) { | ||
| 37 | ASSERT_MSG(false, "Invalid VA limit!"); | ||
| 38 | } | ||
| 39 | } | ||
| 40 | |||
| 41 | MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info) { | ||
| 42 | VaType virt_end{virt + size}; | ||
| 43 | |||
| 44 | if (virt_end > va_limit) { | ||
| 45 | ASSERT_MSG(false, | ||
| 46 | "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", | ||
| 47 | virt_end, va_limit); | ||
| 48 | } | ||
| 49 | |||
| 50 | auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; | ||
| 51 | if (block_end_successor == blocks.begin()) { | ||
| 52 | ASSERT_MSG(false, "Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end); | ||
| 53 | } | ||
| 54 | |||
| 55 | auto block_end_predecessor{std::prev(block_end_successor)}; | ||
| 56 | |||
| 57 | if (block_end_successor != blocks.end()) { | ||
| 58 | // We have blocks in front of us, if one is directly in front then we don't have to add a | ||
| 59 | // tail | ||
| 60 | if (block_end_successor->virt != virt_end) { | ||
| 61 | PaType tailPhys{[&]() -> PaType { | ||
| 62 | if constexpr (!PaContigSplit) { | ||
| 63 | // Always propagate unmapped regions rather than calculating offset | ||
| 64 | return block_end_predecessor->phys; | ||
| 65 | } else { | ||
| 66 | if (block_end_predecessor->Unmapped()) { | ||
| 67 | // Always propagate unmapped regions rather than calculating offset | ||
| 68 | return block_end_predecessor->phys; | ||
| 69 | } else { | ||
| 70 | return block_end_predecessor->phys + virt_end - block_end_predecessor->virt; | ||
| 71 | } | ||
| 72 | } | ||
| 73 | }()}; | ||
| 74 | |||
| 75 | if (block_end_predecessor->virt >= virt) { | ||
| 76 | // If this block's start would be overlapped by the map then reuse it as a tail | ||
| 77 | // block | ||
| 78 | block_end_predecessor->virt = virt_end; | ||
| 79 | block_end_predecessor->phys = tailPhys; | ||
| 80 | block_end_predecessor->extra_info = block_end_predecessor->extra_info; | ||
| 81 | |||
| 82 | // No longer predecessor anymore | ||
| 83 | block_end_successor = block_end_predecessor--; | ||
| 84 | } else { | ||
| 85 | // Else insert a new one and we're done | ||
| 86 | blocks.insert(block_end_successor, | ||
| 87 | {Block(virt, phys, extra_info), | ||
| 88 | Block(virt_end, tailPhys, block_end_predecessor->extra_info)}); | ||
| 89 | if (unmap_callback) { | ||
| 90 | unmap_callback(virt, size); | ||
| 91 | } | ||
| 92 | |||
| 93 | return; | ||
| 94 | } | ||
| 95 | } | ||
| 96 | } else { | ||
| 97 | // block_end_predecessor will always be unmapped as blocks has to be terminated by an | ||
| 98 | // unmapped chunk | ||
| 99 | if (block_end_predecessor != blocks.begin() && block_end_predecessor->virt >= virt) { | ||
| 100 | // Move the unmapped block start backwards | ||
| 101 | block_end_predecessor->virt = virt_end; | ||
| 102 | |||
| 103 | // No longer predecessor anymore | ||
| 104 | block_end_successor = block_end_predecessor--; | ||
| 105 | } else { | ||
| 106 | // Else insert a new one and we're done | ||
| 107 | blocks.insert(block_end_successor, | ||
| 108 | {Block(virt, phys, extra_info), Block(virt_end, UnmappedPa, {})}); | ||
| 109 | if (unmap_callback) { | ||
| 110 | unmap_callback(virt, size); | ||
| 111 | } | ||
| 112 | |||
| 113 | return; | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 117 | auto block_start_successor{block_end_successor}; | ||
| 118 | |||
| 119 | // Walk the block vector to find the start successor as this is more efficient than another | ||
| 120 | // binary search in most scenarios | ||
| 121 | while (std::prev(block_start_successor)->virt >= virt) { | ||
| 122 | block_start_successor--; | ||
| 123 | } | ||
| 124 | |||
| 125 | // Check that the start successor is either the end block or something in between | ||
| 126 | if (block_start_successor->virt > virt_end) { | ||
| 127 | ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); | ||
| 128 | } else if (block_start_successor->virt == virt_end) { | ||
| 129 | // We need to create a new block as there are none spare that we would overwrite | ||
| 130 | blocks.insert(block_start_successor, Block(virt, phys, extra_info)); | ||
| 131 | } else { | ||
| 132 | // Erase overwritten blocks | ||
| 133 | if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) { | ||
| 134 | blocks.erase(eraseStart, block_end_successor); | ||
| 135 | } | ||
| 136 | |||
| 137 | // Reuse a block that would otherwise be overwritten as a start block | ||
| 138 | block_start_successor->virt = virt; | ||
| 139 | block_start_successor->phys = phys; | ||
| 140 | block_start_successor->extra_info = extra_info; | ||
| 141 | } | ||
| 142 | |||
| 143 | if (unmap_callback) { | ||
| 144 | unmap_callback(virt, size); | ||
| 145 | } | ||
| 146 | } | ||
| 147 | |||
| 148 | MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { | ||
| 149 | VaType virt_end{virt + size}; | ||
| 150 | |||
| 151 | if (virt_end > va_limit) { | ||
| 152 | ASSERT_MSG(false, | ||
| 153 | "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", | ||
| 154 | virt_end, va_limit); | ||
| 155 | } | ||
| 156 | |||
| 157 | auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; | ||
| 158 | if (block_end_successor == blocks.begin()) { | ||
| 159 | ASSERT_MSG(false, "Trying to unmap a block before the VA start: virt_end: 0x{:X}", | ||
| 160 | virt_end); | ||
| 161 | } | ||
| 162 | |||
| 163 | auto block_end_predecessor{std::prev(block_end_successor)}; | ||
| 164 | |||
| 165 | auto walk_back_to_predecessor{[&](auto iter) { | ||
| 166 | while (iter->virt >= virt) { | ||
| 167 | iter--; | ||
| 168 | } | ||
| 169 | |||
| 170 | return iter; | ||
| 171 | }}; | ||
| 172 | |||
| 173 | auto erase_blocks_with_end_unmapped{[&](auto unmappedEnd) { | ||
| 174 | auto block_start_predecessor{walk_back_to_predecessor(unmappedEnd)}; | ||
| 175 | auto block_start_successor{std::next(block_start_predecessor)}; | ||
| 176 | |||
| 177 | auto eraseEnd{[&]() { | ||
| 178 | if (block_start_predecessor->Unmapped()) { | ||
| 179 | // If the start predecessor is unmapped then we can erase everything in our region | ||
| 180 | // and be done | ||
| 181 | return std::next(unmappedEnd); | ||
| 182 | } else { | ||
| 183 | // Else reuse the end predecessor as the start of our unmapped region then erase all | ||
| 184 | // up to it | ||
| 185 | unmappedEnd->virt = virt; | ||
| 186 | return unmappedEnd; | ||
| 187 | } | ||
| 188 | }()}; | ||
| 189 | |||
| 190 | // We can't have two unmapped regions after each other | ||
| 191 | if (eraseEnd != blocks.end() && | ||
| 192 | (eraseEnd == block_start_successor || | ||
| 193 | (block_start_predecessor->Unmapped() && eraseEnd->Unmapped()))) { | ||
| 194 | ASSERT_MSG(false, "Multiple contiguous unmapped regions are unsupported!"); | ||
| 195 | } | ||
| 196 | |||
| 197 | blocks.erase(block_start_successor, eraseEnd); | ||
| 198 | }}; | ||
| 199 | |||
| 200 | // We can avoid any splitting logic if these are the case | ||
| 201 | if (block_end_predecessor->Unmapped()) { | ||
| 202 | if (block_end_predecessor->virt > virt) { | ||
| 203 | erase_blocks_with_end_unmapped(block_end_predecessor); | ||
| 204 | } | ||
| 205 | |||
| 206 | if (unmap_callback) { | ||
| 207 | unmap_callback(virt, size); | ||
| 208 | } | ||
| 209 | |||
| 210 | return; // The region is unmapped, bail out early | ||
| 211 | } else if (block_end_successor->virt == virt_end && block_end_successor->Unmapped()) { | ||
| 212 | erase_blocks_with_end_unmapped(block_end_successor); | ||
| 213 | |||
| 214 | if (unmap_callback) { | ||
| 215 | unmap_callback(virt, size); | ||
| 216 | } | ||
| 217 | |||
| 218 | return; // The region is unmapped here and doesn't need splitting, bail out early | ||
| 219 | } else if (block_end_successor == blocks.end()) { | ||
| 220 | // This should never happen as the end should always follow an unmapped block | ||
| 221 | ASSERT_MSG(false, "Unexpected Memory Manager state!"); | ||
| 222 | } else if (block_end_successor->virt != virt_end) { | ||
| 223 | // If one block is directly in front then we don't have to add a tail | ||
| 224 | |||
| 225 | // The previous block is mapped so we will need to add a tail with an offset | ||
| 226 | PaType tailPhys{[&]() { | ||
| 227 | if constexpr (PaContigSplit) { | ||
| 228 | return block_end_predecessor->phys + virt_end - block_end_predecessor->virt; | ||
| 229 | } else { | ||
| 230 | return block_end_predecessor->phys; | ||
| 231 | } | ||
| 232 | }()}; | ||
| 233 | |||
| 234 | if (block_end_predecessor->virt >= virt) { | ||
| 235 | // If this block's start would be overlapped by the unmap then reuse it as a tail block | ||
| 236 | block_end_predecessor->virt = virt_end; | ||
| 237 | block_end_predecessor->phys = tailPhys; | ||
| 238 | |||
| 239 | // No longer predecessor anymore | ||
| 240 | block_end_successor = block_end_predecessor--; | ||
| 241 | } else { | ||
| 242 | blocks.insert(block_end_successor, | ||
| 243 | {Block(virt, UnmappedPa, {}), | ||
| 244 | Block(virt_end, tailPhys, block_end_predecessor->extra_info)}); | ||
| 245 | if (unmap_callback) { | ||
| 246 | unmap_callback(virt, size); | ||
| 247 | } | ||
| 248 | |||
| 249 | // The previous block is mapped and ends before | ||
| 250 | return; | ||
| 251 | } | ||
| 252 | } | ||
| 253 | |||
| 254 | // Walk the block vector to find the start predecessor as this is more efficient than another | ||
| 255 | // binary search in most scenarios | ||
| 256 | auto block_start_predecessor{walk_back_to_predecessor(block_end_successor)}; | ||
| 257 | auto block_start_successor{std::next(block_start_predecessor)}; | ||
| 258 | |||
| 259 | if (block_start_successor->virt > virt_end) { | ||
| 260 | ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); | ||
| 261 | } else if (block_start_successor->virt == virt_end) { | ||
| 262 | // There are no blocks between the start and the end that would let us skip inserting a new | ||
| 263 | // one for head | ||
| 264 | |||
| 265 | // The previous block is may be unmapped, if so we don't need to insert any unmaps after it | ||
| 266 | if (block_start_predecessor->Mapped()) { | ||
| 267 | blocks.insert(block_start_successor, Block(virt, UnmappedPa, {})); | ||
| 268 | } | ||
| 269 | } else if (block_start_predecessor->Unmapped()) { | ||
| 270 | // If the previous block is unmapped | ||
| 271 | blocks.erase(block_start_successor, block_end_predecessor); | ||
| 272 | } else { | ||
| 273 | // Erase overwritten blocks, skipping the first one as we have written the unmapped start | ||
| 274 | // block there | ||
| 275 | if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) { | ||
| 276 | blocks.erase(eraseStart, block_end_successor); | ||
| 277 | } | ||
| 278 | |||
| 279 | // Add in the unmapped block header | ||
| 280 | block_start_successor->virt = virt; | ||
| 281 | block_start_successor->phys = UnmappedPa; | ||
| 282 | } | ||
| 283 | |||
| 284 | if (unmap_callback) | ||
| 285 | unmap_callback(virt, size); | ||
| 286 | } | ||
| 287 | |||
| 288 | ALLOC_MEMBER_CONST()::FlatAllocator(VaType virt_start_, VaType va_limit_) | ||
| 289 | : Base{va_limit_}, virt_start{virt_start_}, current_linear_alloc_end{virt_start_} {} | ||
| 290 | |||
| 291 | ALLOC_MEMBER(VaType)::Allocate(VaType size) { | ||
| 292 | std::scoped_lock lock(this->block_mutex); | ||
| 293 | |||
| 294 | VaType alloc_start{UnmappedVa}; | ||
| 295 | VaType alloc_end{current_linear_alloc_end + size}; | ||
| 296 | |||
| 297 | // Avoid searching backwards in the address space if possible | ||
| 298 | if (alloc_end >= current_linear_alloc_end && alloc_end <= this->va_limit) { | ||
| 299 | auto alloc_end_successor{ | ||
| 300 | std::lower_bound(this->blocks.begin(), this->blocks.end(), alloc_end)}; | ||
| 301 | if (alloc_end_successor == this->blocks.begin()) { | ||
| 302 | ASSERT_MSG(false, "First block in AS map is invalid!"); | ||
| 303 | } | ||
| 304 | |||
| 305 | auto alloc_end_predecessor{std::prev(alloc_end_successor)}; | ||
| 306 | if (alloc_end_predecessor->virt <= current_linear_alloc_end) { | ||
| 307 | alloc_start = current_linear_alloc_end; | ||
| 308 | } else { | ||
| 309 | // Skip over fixed any mappings in front of us | ||
| 310 | while (alloc_end_successor != this->blocks.end()) { | ||
| 311 | if (alloc_end_successor->virt - alloc_end_predecessor->virt < size || | ||
| 312 | alloc_end_predecessor->Mapped()) { | ||
| 313 | alloc_start = alloc_end_predecessor->virt; | ||
| 314 | break; | ||
| 315 | } | ||
| 316 | |||
| 317 | alloc_end_predecessor = alloc_end_successor++; | ||
| 318 | |||
| 319 | // Use the VA limit to calculate if we can fit in the final block since it has no | ||
| 320 | // successor | ||
| 321 | if (alloc_end_successor == this->blocks.end()) { | ||
| 322 | alloc_end = alloc_end_predecessor->virt + size; | ||
| 323 | |||
| 324 | if (alloc_end >= alloc_end_predecessor->virt && alloc_end <= this->va_limit) { | ||
| 325 | alloc_start = alloc_end_predecessor->virt; | ||
| 326 | } | ||
| 327 | } | ||
| 328 | } | ||
| 329 | } | ||
| 330 | } | ||
| 331 | |||
| 332 | if (alloc_start != UnmappedVa) { | ||
| 333 | current_linear_alloc_end = alloc_start + size; | ||
| 334 | } else { // If linear allocation overflows the AS then find a gap | ||
| 335 | if (this->blocks.size() <= 2) { | ||
| 336 | ASSERT_MSG(false, "Unexpected allocator state!"); | ||
| 337 | } | ||
| 338 | |||
| 339 | auto search_predecessor{this->blocks.begin()}; | ||
| 340 | auto search_successor{std::next(search_predecessor)}; | ||
| 341 | |||
| 342 | while (search_successor != this->blocks.end() && | ||
| 343 | (search_successor->virt - search_predecessor->virt < size || | ||
| 344 | search_predecessor->Mapped())) { | ||
| 345 | search_predecessor = search_successor++; | ||
| 346 | } | ||
| 347 | |||
| 348 | if (search_successor != this->blocks.end()) { | ||
| 349 | alloc_start = search_predecessor->virt; | ||
| 350 | } else { | ||
| 351 | return {}; // AS is full | ||
| 352 | } | ||
| 353 | } | ||
| 354 | |||
| 355 | this->MapLocked(alloc_start, true, size, {}); | ||
| 356 | return alloc_start; | ||
| 357 | } | ||
| 358 | |||
| 359 | ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) { | ||
| 360 | this->Map(virt, true, size); | ||
| 361 | } | ||
| 362 | |||
| 363 | ALLOC_MEMBER(void)::Free(VaType virt, VaType size) { | ||
| 364 | this->Unmap(virt, size); | ||
| 365 | } | ||
| 366 | } // namespace Common | ||
diff --git a/src/common/algorithm.h b/src/common/algorithm.h index 9ddfd637b..c27c9241d 100644 --- a/src/common/algorithm.h +++ b/src/common/algorithm.h | |||
| @@ -24,4 +24,12 @@ template <class ForwardIt, class T, class Compare = std::less<>> | |||
| 24 | return first != last && !comp(value, *first) ? first : last; | 24 | return first != last && !comp(value, *first) ? first : last; |
| 25 | } | 25 | } |
| 26 | 26 | ||
| 27 | template <typename T, typename Func, typename... Args> | ||
| 28 | T FoldRight(T initial_value, Func&& func, Args&&... args) { | ||
| 29 | T value{initial_value}; | ||
| 30 | const auto high_func = [&value, &func]<typename U>(U x) { value = func(value, x); }; | ||
| 31 | (std::invoke(high_func, std::forward<Args>(args)), ...); | ||
| 32 | return value; | ||
| 33 | } | ||
| 34 | |||
| 27 | } // namespace Common | 35 | } // namespace Common |
diff --git a/src/common/hash.h b/src/common/hash.h index b6f3e6d6f..e8fe78b07 100644 --- a/src/common/hash.h +++ b/src/common/hash.h | |||
| @@ -18,4 +18,11 @@ struct PairHash { | |||
| 18 | } | 18 | } |
| 19 | }; | 19 | }; |
| 20 | 20 | ||
| 21 | template <typename T> | ||
| 22 | struct IdentityHash { | ||
| 23 | [[nodiscard]] size_t operator()(T value) const noexcept { | ||
| 24 | return static_cast<size_t>(value); | ||
| 25 | } | ||
| 26 | }; | ||
| 27 | |||
| 21 | } // namespace Common | 28 | } // namespace Common |
diff --git a/src/common/multi_level_page_table.cpp b/src/common/multi_level_page_table.cpp new file mode 100644 index 000000000..46e362f3b --- /dev/null +++ b/src/common/multi_level_page_table.cpp | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include "common/multi_level_page_table.inc" | ||
| 5 | |||
| 6 | namespace Common { | ||
| 7 | template class Common::MultiLevelPageTable<u64>; | ||
| 8 | template class Common::MultiLevelPageTable<u32>; | ||
| 9 | } // namespace Common | ||
diff --git a/src/common/multi_level_page_table.h b/src/common/multi_level_page_table.h new file mode 100644 index 000000000..31f6676a0 --- /dev/null +++ b/src/common/multi_level_page_table.h | |||
| @@ -0,0 +1,78 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <type_traits> | ||
| 7 | #include <utility> | ||
| 8 | #include <vector> | ||
| 9 | |||
| 10 | #include "common/common_types.h" | ||
| 11 | |||
| 12 | namespace Common { | ||
| 13 | |||
| 14 | template <typename BaseAddr> | ||
| 15 | class MultiLevelPageTable final { | ||
| 16 | public: | ||
| 17 | constexpr MultiLevelPageTable() = default; | ||
| 18 | explicit MultiLevelPageTable(std::size_t address_space_bits, std::size_t first_level_bits, | ||
| 19 | std::size_t page_bits); | ||
| 20 | |||
| 21 | ~MultiLevelPageTable() noexcept; | ||
| 22 | |||
| 23 | MultiLevelPageTable(const MultiLevelPageTable&) = delete; | ||
| 24 | MultiLevelPageTable& operator=(const MultiLevelPageTable&) = delete; | ||
| 25 | |||
| 26 | MultiLevelPageTable(MultiLevelPageTable&& other) noexcept | ||
| 27 | : address_space_bits{std::exchange(other.address_space_bits, 0)}, | ||
| 28 | first_level_bits{std::exchange(other.first_level_bits, 0)}, page_bits{std::exchange( | ||
| 29 | other.page_bits, 0)}, | ||
| 30 | first_level_shift{std::exchange(other.first_level_shift, 0)}, | ||
| 31 | first_level_chunk_size{std::exchange(other.first_level_chunk_size, 0)}, | ||
| 32 | first_level_map{std::move(other.first_level_map)}, base_ptr{std::exchange(other.base_ptr, | ||
| 33 | nullptr)} {} | ||
| 34 | |||
| 35 | MultiLevelPageTable& operator=(MultiLevelPageTable&& other) noexcept { | ||
| 36 | address_space_bits = std::exchange(other.address_space_bits, 0); | ||
| 37 | first_level_bits = std::exchange(other.first_level_bits, 0); | ||
| 38 | page_bits = std::exchange(other.page_bits, 0); | ||
| 39 | first_level_shift = std::exchange(other.first_level_shift, 0); | ||
| 40 | first_level_chunk_size = std::exchange(other.first_level_chunk_size, 0); | ||
| 41 | alloc_size = std::exchange(other.alloc_size, 0); | ||
| 42 | first_level_map = std::move(other.first_level_map); | ||
| 43 | base_ptr = std::exchange(other.base_ptr, nullptr); | ||
| 44 | return *this; | ||
| 45 | } | ||
| 46 | |||
| 47 | void ReserveRange(u64 start, std::size_t size); | ||
| 48 | |||
| 49 | [[nodiscard]] const BaseAddr& operator[](std::size_t index) const { | ||
| 50 | return base_ptr[index]; | ||
| 51 | } | ||
| 52 | |||
| 53 | [[nodiscard]] BaseAddr& operator[](std::size_t index) { | ||
| 54 | return base_ptr[index]; | ||
| 55 | } | ||
| 56 | |||
| 57 | [[nodiscard]] BaseAddr* data() { | ||
| 58 | return base_ptr; | ||
| 59 | } | ||
| 60 | |||
| 61 | [[nodiscard]] const BaseAddr* data() const { | ||
| 62 | return base_ptr; | ||
| 63 | } | ||
| 64 | |||
| 65 | private: | ||
| 66 | void AllocateLevel(u64 level); | ||
| 67 | |||
| 68 | std::size_t address_space_bits{}; | ||
| 69 | std::size_t first_level_bits{}; | ||
| 70 | std::size_t page_bits{}; | ||
| 71 | std::size_t first_level_shift{}; | ||
| 72 | std::size_t first_level_chunk_size{}; | ||
| 73 | std::size_t alloc_size{}; | ||
| 74 | std::vector<void*> first_level_map{}; | ||
| 75 | BaseAddr* base_ptr{}; | ||
| 76 | }; | ||
| 77 | |||
| 78 | } // namespace Common | ||
diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc new file mode 100644 index 000000000..8ac506fa0 --- /dev/null +++ b/src/common/multi_level_page_table.inc | |||
| @@ -0,0 +1,84 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #ifdef _WIN32 | ||
| 5 | #include <windows.h> | ||
| 6 | #else | ||
| 7 | #include <sys/mman.h> | ||
| 8 | #endif | ||
| 9 | |||
| 10 | #include "common/assert.h" | ||
| 11 | #include "common/multi_level_page_table.h" | ||
| 12 | |||
| 13 | namespace Common { | ||
| 14 | |||
| 15 | template <typename BaseAddr> | ||
| 16 | MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_, | ||
| 17 | std::size_t first_level_bits_, | ||
| 18 | std::size_t page_bits_) | ||
| 19 | : address_space_bits{address_space_bits_}, | ||
| 20 | first_level_bits{first_level_bits_}, page_bits{page_bits_} { | ||
| 21 | if (page_bits == 0) { | ||
| 22 | return; | ||
| 23 | } | ||
| 24 | first_level_shift = address_space_bits - first_level_bits; | ||
| 25 | first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr); | ||
| 26 | alloc_size = (1ULL << (address_space_bits - page_bits)) * sizeof(BaseAddr); | ||
| 27 | std::size_t first_level_size = 1ULL << first_level_bits; | ||
| 28 | first_level_map.resize(first_level_size, nullptr); | ||
| 29 | #ifdef _WIN32 | ||
| 30 | void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)}; | ||
| 31 | #else | ||
| 32 | void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)}; | ||
| 33 | |||
| 34 | if (base == MAP_FAILED) { | ||
| 35 | base = nullptr; | ||
| 36 | } | ||
| 37 | #endif | ||
| 38 | |||
| 39 | ASSERT(base); | ||
| 40 | base_ptr = reinterpret_cast<BaseAddr*>(base); | ||
| 41 | } | ||
| 42 | |||
| 43 | template <typename BaseAddr> | ||
| 44 | MultiLevelPageTable<BaseAddr>::~MultiLevelPageTable() noexcept { | ||
| 45 | if (!base_ptr) { | ||
| 46 | return; | ||
| 47 | } | ||
| 48 | #ifdef _WIN32 | ||
| 49 | ASSERT(VirtualFree(base_ptr, 0, MEM_RELEASE)); | ||
| 50 | #else | ||
| 51 | ASSERT(munmap(base_ptr, alloc_size) == 0); | ||
| 52 | #endif | ||
| 53 | } | ||
| 54 | |||
| 55 | template <typename BaseAddr> | ||
| 56 | void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) { | ||
| 57 | const u64 new_start = start >> first_level_shift; | ||
| 58 | const u64 new_end = (start + size) >> first_level_shift; | ||
| 59 | for (u64 i = new_start; i <= new_end; i++) { | ||
| 60 | if (!first_level_map[i]) { | ||
| 61 | AllocateLevel(i); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | } | ||
| 65 | |||
| 66 | template <typename BaseAddr> | ||
| 67 | void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 level) { | ||
| 68 | void* ptr = reinterpret_cast<char *>(base_ptr) + level * first_level_chunk_size; | ||
| 69 | #ifdef _WIN32 | ||
| 70 | void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)}; | ||
| 71 | #else | ||
| 72 | void* base{mmap(ptr, first_level_chunk_size, PROT_READ | PROT_WRITE, | ||
| 73 | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)}; | ||
| 74 | |||
| 75 | if (base == MAP_FAILED) { | ||
| 76 | base = nullptr; | ||
| 77 | } | ||
| 78 | #endif | ||
| 79 | ASSERT(base); | ||
| 80 | |||
| 81 | first_level_map[level] = base; | ||
| 82 | } | ||
| 83 | |||
| 84 | } // namespace Common | ||
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 8e3fd4505..95302c419 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -138,8 +138,6 @@ add_library(core STATIC | |||
| 138 | frontend/emu_window.h | 138 | frontend/emu_window.h |
| 139 | frontend/framebuffer_layout.cpp | 139 | frontend/framebuffer_layout.cpp |
| 140 | frontend/framebuffer_layout.h | 140 | frontend/framebuffer_layout.h |
| 141 | hardware_interrupt_manager.cpp | ||
| 142 | hardware_interrupt_manager.h | ||
| 143 | hid/emulated_console.cpp | 141 | hid/emulated_console.cpp |
| 144 | hid/emulated_console.h | 142 | hid/emulated_console.h |
| 145 | hid/emulated_controller.cpp | 143 | hid/emulated_controller.cpp |
| @@ -550,6 +548,12 @@ add_library(core STATIC | |||
| 550 | hle/service/ns/ns.h | 548 | hle/service/ns/ns.h |
| 551 | hle/service/ns/pdm_qry.cpp | 549 | hle/service/ns/pdm_qry.cpp |
| 552 | hle/service/ns/pdm_qry.h | 550 | hle/service/ns/pdm_qry.h |
| 551 | hle/service/nvdrv/core/container.cpp | ||
| 552 | hle/service/nvdrv/core/container.h | ||
| 553 | hle/service/nvdrv/core/nvmap.cpp | ||
| 554 | hle/service/nvdrv/core/nvmap.h | ||
| 555 | hle/service/nvdrv/core/syncpoint_manager.cpp | ||
| 556 | hle/service/nvdrv/core/syncpoint_manager.h | ||
| 553 | hle/service/nvdrv/devices/nvdevice.h | 557 | hle/service/nvdrv/devices/nvdevice.h |
| 554 | hle/service/nvdrv/devices/nvdisp_disp0.cpp | 558 | hle/service/nvdrv/devices/nvdisp_disp0.cpp |
| 555 | hle/service/nvdrv/devices/nvdisp_disp0.h | 559 | hle/service/nvdrv/devices/nvdisp_disp0.h |
| @@ -578,8 +582,6 @@ add_library(core STATIC | |||
| 578 | hle/service/nvdrv/nvdrv_interface.h | 582 | hle/service/nvdrv/nvdrv_interface.h |
| 579 | hle/service/nvdrv/nvmemp.cpp | 583 | hle/service/nvdrv/nvmemp.cpp |
| 580 | hle/service/nvdrv/nvmemp.h | 584 | hle/service/nvdrv/nvmemp.h |
| 581 | hle/service/nvdrv/syncpoint_manager.cpp | ||
| 582 | hle/service/nvdrv/syncpoint_manager.h | ||
| 583 | hle/service/nvflinger/binder.h | 585 | hle/service/nvflinger/binder.h |
| 584 | hle/service/nvflinger/buffer_item.h | 586 | hle/service/nvflinger/buffer_item.h |
| 585 | hle/service/nvflinger/buffer_item_consumer.cpp | 587 | hle/service/nvflinger/buffer_item_consumer.cpp |
diff --git a/src/core/core.cpp b/src/core/core.cpp index 121092868..1deeee154 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp | |||
| @@ -27,7 +27,6 @@ | |||
| 27 | #include "core/file_sys/savedata_factory.h" | 27 | #include "core/file_sys/savedata_factory.h" |
| 28 | #include "core/file_sys/vfs_concat.h" | 28 | #include "core/file_sys/vfs_concat.h" |
| 29 | #include "core/file_sys/vfs_real.h" | 29 | #include "core/file_sys/vfs_real.h" |
| 30 | #include "core/hardware_interrupt_manager.h" | ||
| 31 | #include "core/hid/hid_core.h" | 30 | #include "core/hid/hid_core.h" |
| 32 | #include "core/hle/kernel/k_memory_manager.h" | 31 | #include "core/hle/kernel/k_memory_manager.h" |
| 33 | #include "core/hle/kernel/k_process.h" | 32 | #include "core/hle/kernel/k_process.h" |
| @@ -51,6 +50,7 @@ | |||
| 51 | #include "core/telemetry_session.h" | 50 | #include "core/telemetry_session.h" |
| 52 | #include "core/tools/freezer.h" | 51 | #include "core/tools/freezer.h" |
| 53 | #include "network/network.h" | 52 | #include "network/network.h" |
| 53 | #include "video_core/host1x/host1x.h" | ||
| 54 | #include "video_core/renderer_base.h" | 54 | #include "video_core/renderer_base.h" |
| 55 | #include "video_core/video_core.h" | 55 | #include "video_core/video_core.h" |
| 56 | 56 | ||
| @@ -215,6 +215,7 @@ struct System::Impl { | |||
| 215 | 215 | ||
| 216 | telemetry_session = std::make_unique<Core::TelemetrySession>(); | 216 | telemetry_session = std::make_unique<Core::TelemetrySession>(); |
| 217 | 217 | ||
| 218 | host1x_core = std::make_unique<Tegra::Host1x::Host1x>(system); | ||
| 218 | gpu_core = VideoCore::CreateGPU(emu_window, system); | 219 | gpu_core = VideoCore::CreateGPU(emu_window, system); |
| 219 | if (!gpu_core) { | 220 | if (!gpu_core) { |
| 220 | return SystemResultStatus::ErrorVideoCore; | 221 | return SystemResultStatus::ErrorVideoCore; |
| @@ -224,7 +225,6 @@ struct System::Impl { | |||
| 224 | 225 | ||
| 225 | service_manager = std::make_shared<Service::SM::ServiceManager>(kernel); | 226 | service_manager = std::make_shared<Service::SM::ServiceManager>(kernel); |
| 226 | services = std::make_unique<Service::Services>(service_manager, system); | 227 | services = std::make_unique<Service::Services>(service_manager, system); |
| 227 | interrupt_manager = std::make_unique<Hardware::InterruptManager>(system); | ||
| 228 | 228 | ||
| 229 | // Initialize time manager, which must happen after kernel is created | 229 | // Initialize time manager, which must happen after kernel is created |
| 230 | time_manager.Initialize(); | 230 | time_manager.Initialize(); |
| @@ -373,6 +373,7 @@ struct System::Impl { | |||
| 373 | app_loader.reset(); | 373 | app_loader.reset(); |
| 374 | audio_core.reset(); | 374 | audio_core.reset(); |
| 375 | gpu_core.reset(); | 375 | gpu_core.reset(); |
| 376 | host1x_core.reset(); | ||
| 376 | perf_stats.reset(); | 377 | perf_stats.reset(); |
| 377 | kernel.Shutdown(); | 378 | kernel.Shutdown(); |
| 378 | memory.Reset(); | 379 | memory.Reset(); |
| @@ -450,7 +451,7 @@ struct System::Impl { | |||
| 450 | /// AppLoader used to load the current executing application | 451 | /// AppLoader used to load the current executing application |
| 451 | std::unique_ptr<Loader::AppLoader> app_loader; | 452 | std::unique_ptr<Loader::AppLoader> app_loader; |
| 452 | std::unique_ptr<Tegra::GPU> gpu_core; | 453 | std::unique_ptr<Tegra::GPU> gpu_core; |
| 453 | std::unique_ptr<Hardware::InterruptManager> interrupt_manager; | 454 | std::unique_ptr<Tegra::Host1x::Host1x> host1x_core; |
| 454 | std::unique_ptr<Core::DeviceMemory> device_memory; | 455 | std::unique_ptr<Core::DeviceMemory> device_memory; |
| 455 | std::unique_ptr<AudioCore::AudioCore> audio_core; | 456 | std::unique_ptr<AudioCore::AudioCore> audio_core; |
| 456 | Core::Memory::Memory memory; | 457 | Core::Memory::Memory memory; |
| @@ -668,12 +669,12 @@ const Tegra::GPU& System::GPU() const { | |||
| 668 | return *impl->gpu_core; | 669 | return *impl->gpu_core; |
| 669 | } | 670 | } |
| 670 | 671 | ||
| 671 | Core::Hardware::InterruptManager& System::InterruptManager() { | 672 | Tegra::Host1x::Host1x& System::Host1x() { |
| 672 | return *impl->interrupt_manager; | 673 | return *impl->host1x_core; |
| 673 | } | 674 | } |
| 674 | 675 | ||
| 675 | const Core::Hardware::InterruptManager& System::InterruptManager() const { | 676 | const Tegra::Host1x::Host1x& System::Host1x() const { |
| 676 | return *impl->interrupt_manager; | 677 | return *impl->host1x_core; |
| 677 | } | 678 | } |
| 678 | 679 | ||
| 679 | VideoCore::RendererBase& System::Renderer() { | 680 | VideoCore::RendererBase& System::Renderer() { |
diff --git a/src/core/core.h b/src/core/core.h index 0ce3b1d60..7843cc8ad 100644 --- a/src/core/core.h +++ b/src/core/core.h | |||
| @@ -74,6 +74,9 @@ class TimeManager; | |||
| 74 | namespace Tegra { | 74 | namespace Tegra { |
| 75 | class DebugContext; | 75 | class DebugContext; |
| 76 | class GPU; | 76 | class GPU; |
| 77 | namespace Host1x { | ||
| 78 | class Host1x; | ||
| 79 | } // namespace Host1x | ||
| 77 | } // namespace Tegra | 80 | } // namespace Tegra |
| 78 | 81 | ||
| 79 | namespace VideoCore { | 82 | namespace VideoCore { |
| @@ -88,10 +91,6 @@ namespace Core::Timing { | |||
| 88 | class CoreTiming; | 91 | class CoreTiming; |
| 89 | } | 92 | } |
| 90 | 93 | ||
| 91 | namespace Core::Hardware { | ||
| 92 | class InterruptManager; | ||
| 93 | } | ||
| 94 | |||
| 95 | namespace Core::HID { | 94 | namespace Core::HID { |
| 96 | class HIDCore; | 95 | class HIDCore; |
| 97 | } | 96 | } |
| @@ -260,6 +259,12 @@ public: | |||
| 260 | /// Gets an immutable reference to the GPU interface. | 259 | /// Gets an immutable reference to the GPU interface. |
| 261 | [[nodiscard]] const Tegra::GPU& GPU() const; | 260 | [[nodiscard]] const Tegra::GPU& GPU() const; |
| 262 | 261 | ||
| 262 | /// Gets a mutable reference to the Host1x interface | ||
| 263 | [[nodiscard]] Tegra::Host1x::Host1x& Host1x(); | ||
| 264 | |||
| 265 | /// Gets an immutable reference to the Host1x interface. | ||
| 266 | [[nodiscard]] const Tegra::Host1x::Host1x& Host1x() const; | ||
| 267 | |||
| 263 | /// Gets a mutable reference to the renderer. | 268 | /// Gets a mutable reference to the renderer. |
| 264 | [[nodiscard]] VideoCore::RendererBase& Renderer(); | 269 | [[nodiscard]] VideoCore::RendererBase& Renderer(); |
| 265 | 270 | ||
| @@ -296,12 +301,6 @@ public: | |||
| 296 | /// Provides a constant reference to the core timing instance. | 301 | /// Provides a constant reference to the core timing instance. |
| 297 | [[nodiscard]] const Timing::CoreTiming& CoreTiming() const; | 302 | [[nodiscard]] const Timing::CoreTiming& CoreTiming() const; |
| 298 | 303 | ||
| 299 | /// Provides a reference to the interrupt manager instance. | ||
| 300 | [[nodiscard]] Core::Hardware::InterruptManager& InterruptManager(); | ||
| 301 | |||
| 302 | /// Provides a constant reference to the interrupt manager instance. | ||
| 303 | [[nodiscard]] const Core::Hardware::InterruptManager& InterruptManager() const; | ||
| 304 | |||
| 305 | /// Provides a reference to the kernel instance. | 304 | /// Provides a reference to the kernel instance. |
| 306 | [[nodiscard]] Kernel::KernelCore& Kernel(); | 305 | [[nodiscard]] Kernel::KernelCore& Kernel(); |
| 307 | 306 | ||
diff --git a/src/core/hardware_interrupt_manager.cpp b/src/core/hardware_interrupt_manager.cpp deleted file mode 100644 index d08cc3315..000000000 --- a/src/core/hardware_interrupt_manager.cpp +++ /dev/null | |||
| @@ -1,32 +0,0 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include "core/core.h" | ||
| 5 | #include "core/core_timing.h" | ||
| 6 | #include "core/hardware_interrupt_manager.h" | ||
| 7 | #include "core/hle/service/nvdrv/nvdrv_interface.h" | ||
| 8 | #include "core/hle/service/sm/sm.h" | ||
| 9 | |||
| 10 | namespace Core::Hardware { | ||
| 11 | |||
| 12 | InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) { | ||
| 13 | gpu_interrupt_event = Core::Timing::CreateEvent( | ||
| 14 | "GPUInterrupt", | ||
| 15 | [this](std::uintptr_t message, u64 time, | ||
| 16 | std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> { | ||
| 17 | auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv"); | ||
| 18 | const u32 syncpt = static_cast<u32>(message >> 32); | ||
| 19 | const u32 value = static_cast<u32>(message); | ||
| 20 | nvdrv->SignalGPUInterruptSyncpt(syncpt, value); | ||
| 21 | return std::nullopt; | ||
| 22 | }); | ||
| 23 | } | ||
| 24 | |||
| 25 | InterruptManager::~InterruptManager() = default; | ||
| 26 | |||
| 27 | void InterruptManager::GPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) { | ||
| 28 | const u64 msg = (static_cast<u64>(syncpoint_id) << 32ULL) | value; | ||
| 29 | system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{10}, gpu_interrupt_event, msg); | ||
| 30 | } | ||
| 31 | |||
| 32 | } // namespace Core::Hardware | ||
diff --git a/src/core/hardware_interrupt_manager.h b/src/core/hardware_interrupt_manager.h deleted file mode 100644 index 5665c5918..000000000 --- a/src/core/hardware_interrupt_manager.h +++ /dev/null | |||
| @@ -1,32 +0,0 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <memory> | ||
| 7 | |||
| 8 | #include "common/common_types.h" | ||
| 9 | |||
| 10 | namespace Core { | ||
| 11 | class System; | ||
| 12 | } | ||
| 13 | |||
| 14 | namespace Core::Timing { | ||
| 15 | struct EventType; | ||
| 16 | } | ||
| 17 | |||
| 18 | namespace Core::Hardware { | ||
| 19 | |||
| 20 | class InterruptManager { | ||
| 21 | public: | ||
| 22 | explicit InterruptManager(Core::System& system); | ||
| 23 | ~InterruptManager(); | ||
| 24 | |||
| 25 | void GPUInterruptSyncpt(u32 syncpoint_id, u32 value); | ||
| 26 | |||
| 27 | private: | ||
| 28 | Core::System& system; | ||
| 29 | std::shared_ptr<Core::Timing::EventType> gpu_interrupt_event; | ||
| 30 | }; | ||
| 31 | |||
| 32 | } // namespace Core::Hardware | ||
diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp new file mode 100644 index 000000000..37ca24f5d --- /dev/null +++ b/src/core/hle/service/nvdrv/core/container.cpp | |||
| @@ -0,0 +1,50 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2022 yuzu Emulator Project | ||
| 2 | // SPDX-FileCopyrightText: 2022 Skyline Team and Contributors | ||
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 4 | |||
| 5 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 6 | #include "core/hle/service/nvdrv/core/nvmap.h" | ||
| 7 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" | ||
| 8 | #include "video_core/host1x/host1x.h" | ||
| 9 | |||
| 10 | namespace Service::Nvidia::NvCore { | ||
| 11 | |||
| 12 | struct ContainerImpl { | ||
| 13 | explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_) | ||
| 14 | : file{host1x_}, manager{host1x_}, device_file_data{} {} | ||
| 15 | NvMap file; | ||
| 16 | SyncpointManager manager; | ||
| 17 | Container::Host1xDeviceFileData device_file_data; | ||
| 18 | }; | ||
| 19 | |||
| 20 | Container::Container(Tegra::Host1x::Host1x& host1x_) { | ||
| 21 | impl = std::make_unique<ContainerImpl>(host1x_); | ||
| 22 | } | ||
| 23 | |||
| 24 | Container::~Container() = default; | ||
| 25 | |||
| 26 | NvMap& Container::GetNvMapFile() { | ||
| 27 | return impl->file; | ||
| 28 | } | ||
| 29 | |||
| 30 | const NvMap& Container::GetNvMapFile() const { | ||
| 31 | return impl->file; | ||
| 32 | } | ||
| 33 | |||
| 34 | Container::Host1xDeviceFileData& Container::Host1xDeviceFile() { | ||
| 35 | return impl->device_file_data; | ||
| 36 | } | ||
| 37 | |||
| 38 | const Container::Host1xDeviceFileData& Container::Host1xDeviceFile() const { | ||
| 39 | return impl->device_file_data; | ||
| 40 | } | ||
| 41 | |||
| 42 | SyncpointManager& Container::GetSyncpointManager() { | ||
| 43 | return impl->manager; | ||
| 44 | } | ||
| 45 | |||
| 46 | const SyncpointManager& Container::GetSyncpointManager() const { | ||
| 47 | return impl->manager; | ||
| 48 | } | ||
| 49 | |||
| 50 | } // namespace Service::Nvidia::NvCore | ||
diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h new file mode 100644 index 000000000..b4b63ac90 --- /dev/null +++ b/src/core/hle/service/nvdrv/core/container.h | |||
| @@ -0,0 +1,52 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2022 yuzu Emulator Project | ||
| 2 | // SPDX-FileCopyrightText: 2022 Skyline Team and Contributors | ||
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <deque> | ||
| 8 | #include <memory> | ||
| 9 | #include <unordered_map> | ||
| 10 | |||
| 11 | #include "core/hle/service/nvdrv/nvdata.h" | ||
| 12 | |||
| 13 | namespace Tegra::Host1x { | ||
| 14 | class Host1x; | ||
| 15 | } // namespace Tegra::Host1x | ||
| 16 | |||
| 17 | namespace Service::Nvidia::NvCore { | ||
| 18 | |||
| 19 | class NvMap; | ||
| 20 | class SyncpointManager; | ||
| 21 | |||
| 22 | struct ContainerImpl; | ||
| 23 | |||
| 24 | class Container { | ||
| 25 | public: | ||
| 26 | explicit Container(Tegra::Host1x::Host1x& host1x); | ||
| 27 | ~Container(); | ||
| 28 | |||
| 29 | NvMap& GetNvMapFile(); | ||
| 30 | |||
| 31 | const NvMap& GetNvMapFile() const; | ||
| 32 | |||
| 33 | SyncpointManager& GetSyncpointManager(); | ||
| 34 | |||
| 35 | const SyncpointManager& GetSyncpointManager() const; | ||
| 36 | |||
| 37 | struct Host1xDeviceFileData { | ||
| 38 | std::unordered_map<DeviceFD, u32> fd_to_id{}; | ||
| 39 | std::deque<u32> syncpts_accumulated{}; | ||
| 40 | u32 nvdec_next_id{}; | ||
| 41 | u32 vic_next_id{}; | ||
| 42 | }; | ||
| 43 | |||
| 44 | Host1xDeviceFileData& Host1xDeviceFile(); | ||
| 45 | |||
| 46 | const Host1xDeviceFileData& Host1xDeviceFile() const; | ||
| 47 | |||
| 48 | private: | ||
| 49 | std::unique_ptr<ContainerImpl> impl; | ||
| 50 | }; | ||
| 51 | |||
| 52 | } // namespace Service::Nvidia::NvCore | ||
diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp new file mode 100644 index 000000000..fbd8a74a5 --- /dev/null +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp | |||
| @@ -0,0 +1,272 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2022 yuzu Emulator Project | ||
| 2 | // SPDX-FileCopyrightText: 2022 Skyline Team and Contributors | ||
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 4 | |||
| 5 | #include "common/alignment.h" | ||
| 6 | #include "common/assert.h" | ||
| 7 | #include "common/logging/log.h" | ||
| 8 | #include "core/hle/service/nvdrv/core/nvmap.h" | ||
| 9 | #include "core/memory.h" | ||
| 10 | #include "video_core/host1x/host1x.h" | ||
| 11 | |||
| 12 | using Core::Memory::YUZU_PAGESIZE; | ||
| 13 | |||
| 14 | namespace Service::Nvidia::NvCore { | ||
| 15 | NvMap::Handle::Handle(u64 size_, Id id_) | ||
| 16 | : size(size_), aligned_size(size), orig_size(size), id(id_) { | ||
| 17 | flags.raw = 0; | ||
| 18 | } | ||
| 19 | |||
| 20 | NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) { | ||
| 21 | std::scoped_lock lock(mutex); | ||
| 22 | |||
| 23 | // Handles cannot be allocated twice | ||
| 24 | if (allocated) { | ||
| 25 | return NvResult::AccessDenied; | ||
| 26 | } | ||
| 27 | |||
| 28 | flags = pFlags; | ||
| 29 | kind = pKind; | ||
| 30 | align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign; | ||
| 31 | |||
| 32 | // This flag is only applicable for handles with an address passed | ||
| 33 | if (pAddress) { | ||
| 34 | flags.keep_uncached_after_free.Assign(0); | ||
| 35 | } else { | ||
| 36 | LOG_CRITICAL(Service_NVDRV, | ||
| 37 | "Mapping nvmap handles without a CPU side address is unimplemented!"); | ||
| 38 | } | ||
| 39 | |||
| 40 | size = Common::AlignUp(size, YUZU_PAGESIZE); | ||
| 41 | aligned_size = Common::AlignUp(size, align); | ||
| 42 | address = pAddress; | ||
| 43 | allocated = true; | ||
| 44 | |||
| 45 | return NvResult::Success; | ||
| 46 | } | ||
| 47 | |||
| 48 | NvResult NvMap::Handle::Duplicate(bool internal_session) { | ||
| 49 | std::scoped_lock lock(mutex); | ||
| 50 | // Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS) | ||
| 51 | if (!allocated) [[unlikely]] { | ||
| 52 | return NvResult::BadValue; | ||
| 53 | } | ||
| 54 | |||
| 55 | // If we internally use FromId the duplication tracking of handles won't work accurately due to | ||
| 56 | // us not implementing per-process handle refs. | ||
| 57 | if (internal_session) { | ||
| 58 | internal_dupes++; | ||
| 59 | } else { | ||
| 60 | dupes++; | ||
| 61 | } | ||
| 62 | |||
| 63 | return NvResult::Success; | ||
| 64 | } | ||
| 65 | |||
| 66 | NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {} | ||
| 67 | |||
| 68 | void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) { | ||
| 69 | std::scoped_lock lock(handles_lock); | ||
| 70 | |||
| 71 | handles.emplace(handle_description->id, std::move(handle_description)); | ||
| 72 | } | ||
| 73 | |||
| 74 | void NvMap::UnmapHandle(Handle& handle_description) { | ||
| 75 | // Remove pending unmap queue entry if needed | ||
| 76 | if (handle_description.unmap_queue_entry) { | ||
| 77 | unmap_queue.erase(*handle_description.unmap_queue_entry); | ||
| 78 | handle_description.unmap_queue_entry.reset(); | ||
| 79 | } | ||
| 80 | |||
| 81 | // Free and unmap the handle from the SMMU | ||
| 82 | host1x.MemoryManager().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address), | ||
| 83 | handle_description.aligned_size); | ||
| 84 | host1x.Allocator().Free(handle_description.pin_virt_address, | ||
| 85 | static_cast<u32>(handle_description.aligned_size)); | ||
| 86 | handle_description.pin_virt_address = 0; | ||
| 87 | } | ||
| 88 | |||
| 89 | bool NvMap::TryRemoveHandle(const Handle& handle_description) { | ||
| 90 | // No dupes left, we can remove from handle map | ||
| 91 | if (handle_description.dupes == 0 && handle_description.internal_dupes == 0) { | ||
| 92 | std::scoped_lock lock(handles_lock); | ||
| 93 | |||
| 94 | auto it{handles.find(handle_description.id)}; | ||
| 95 | if (it != handles.end()) { | ||
| 96 | handles.erase(it); | ||
| 97 | } | ||
| 98 | |||
| 99 | return true; | ||
| 100 | } else { | ||
| 101 | return false; | ||
| 102 | } | ||
| 103 | } | ||
| 104 | |||
| 105 | NvResult NvMap::CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out) { | ||
| 106 | if (!size) [[unlikely]] { | ||
| 107 | return NvResult::BadValue; | ||
| 108 | } | ||
| 109 | |||
| 110 | u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)}; | ||
| 111 | auto handle_description{std::make_shared<Handle>(size, id)}; | ||
| 112 | AddHandle(handle_description); | ||
| 113 | |||
| 114 | result_out = handle_description; | ||
| 115 | return NvResult::Success; | ||
| 116 | } | ||
| 117 | |||
| 118 | std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) { | ||
| 119 | std::scoped_lock lock(handles_lock); | ||
| 120 | try { | ||
| 121 | return handles.at(handle); | ||
| 122 | } catch (std::out_of_range&) { | ||
| 123 | return nullptr; | ||
| 124 | } | ||
| 125 | } | ||
| 126 | |||
| 127 | VAddr NvMap::GetHandleAddress(Handle::Id handle) { | ||
| 128 | std::scoped_lock lock(handles_lock); | ||
| 129 | try { | ||
| 130 | return handles.at(handle)->address; | ||
| 131 | } catch (std::out_of_range&) { | ||
| 132 | return 0; | ||
| 133 | } | ||
| 134 | } | ||
| 135 | |||
| 136 | u32 NvMap::PinHandle(NvMap::Handle::Id handle) { | ||
| 137 | auto handle_description{GetHandle(handle)}; | ||
| 138 | if (!handle_description) [[unlikely]] { | ||
| 139 | return 0; | ||
| 140 | } | ||
| 141 | |||
| 142 | std::scoped_lock lock(handle_description->mutex); | ||
| 143 | if (!handle_description->pins) { | ||
| 144 | // If we're in the unmap queue we can just remove ourselves and return since we're already | ||
| 145 | // mapped | ||
| 146 | { | ||
| 147 | // Lock now to prevent our queue entry from being removed for allocation in-between the | ||
| 148 | // following check and erase | ||
| 149 | std::scoped_lock queueLock(unmap_queue_lock); | ||
| 150 | if (handle_description->unmap_queue_entry) { | ||
| 151 | unmap_queue.erase(*handle_description->unmap_queue_entry); | ||
| 152 | handle_description->unmap_queue_entry.reset(); | ||
| 153 | |||
| 154 | handle_description->pins++; | ||
| 155 | return handle_description->pin_virt_address; | ||
| 156 | } | ||
| 157 | } | ||
| 158 | |||
| 159 | // If not then allocate some space and map it | ||
| 160 | u32 address{}; | ||
| 161 | auto& smmu_allocator = host1x.Allocator(); | ||
| 162 | auto& smmu_memory_manager = host1x.MemoryManager(); | ||
| 163 | while (!(address = | ||
| 164 | smmu_allocator.Allocate(static_cast<u32>(handle_description->aligned_size)))) { | ||
| 165 | // Free handles until the allocation succeeds | ||
| 166 | std::scoped_lock queueLock(unmap_queue_lock); | ||
| 167 | if (auto freeHandleDesc{unmap_queue.front()}) { | ||
| 168 | // Handles in the unmap queue are guaranteed not to be pinned so don't bother | ||
| 169 | // checking if they are before unmapping | ||
| 170 | std::scoped_lock freeLock(freeHandleDesc->mutex); | ||
| 171 | if (handle_description->pin_virt_address) | ||
| 172 | UnmapHandle(*freeHandleDesc); | ||
| 173 | } else { | ||
| 174 | LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!"); | ||
| 175 | } | ||
| 176 | } | ||
| 177 | |||
| 178 | smmu_memory_manager.Map(static_cast<GPUVAddr>(address), handle_description->address, | ||
| 179 | handle_description->aligned_size); | ||
| 180 | handle_description->pin_virt_address = address; | ||
| 181 | } | ||
| 182 | |||
| 183 | handle_description->pins++; | ||
| 184 | return handle_description->pin_virt_address; | ||
| 185 | } | ||
| 186 | |||
| 187 | void NvMap::UnpinHandle(Handle::Id handle) { | ||
| 188 | auto handle_description{GetHandle(handle)}; | ||
| 189 | if (!handle_description) { | ||
| 190 | return; | ||
| 191 | } | ||
| 192 | |||
| 193 | std::scoped_lock lock(handle_description->mutex); | ||
| 194 | if (--handle_description->pins < 0) { | ||
| 195 | LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!"); | ||
| 196 | } else if (!handle_description->pins) { | ||
| 197 | std::scoped_lock queueLock(unmap_queue_lock); | ||
| 198 | |||
| 199 | // Add to the unmap queue allowing this handle's memory to be freed if needed | ||
| 200 | unmap_queue.push_back(handle_description); | ||
| 201 | handle_description->unmap_queue_entry = std::prev(unmap_queue.end()); | ||
| 202 | } | ||
| 203 | } | ||
| 204 | |||
| 205 | void NvMap::DuplicateHandle(Handle::Id handle, bool internal_session) { | ||
| 206 | auto handle_description{GetHandle(handle)}; | ||
| 207 | if (!handle_description) { | ||
| 208 | LOG_CRITICAL(Service_NVDRV, "Unregistered handle!"); | ||
| 209 | return; | ||
| 210 | } | ||
| 211 | |||
| 212 | auto result = handle_description->Duplicate(internal_session); | ||
| 213 | if (result != NvResult::Success) { | ||
| 214 | LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!"); | ||
| 215 | } | ||
| 216 | } | ||
| 217 | |||
| 218 | std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool internal_session) { | ||
| 219 | std::weak_ptr<Handle> hWeak{GetHandle(handle)}; | ||
| 220 | FreeInfo freeInfo; | ||
| 221 | |||
| 222 | // We use a weak ptr here so we can tell when the handle has been freed and report that back to | ||
| 223 | // guest | ||
| 224 | if (auto handle_description = hWeak.lock()) { | ||
| 225 | std::scoped_lock lock(handle_description->mutex); | ||
| 226 | |||
| 227 | if (internal_session) { | ||
| 228 | if (--handle_description->internal_dupes < 0) | ||
| 229 | LOG_WARNING(Service_NVDRV, "Internal duplicate count imbalance detected!"); | ||
| 230 | } else { | ||
| 231 | if (--handle_description->dupes < 0) { | ||
| 232 | LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!"); | ||
| 233 | } else if (handle_description->dupes == 0) { | ||
| 234 | // Force unmap the handle | ||
| 235 | if (handle_description->pin_virt_address) { | ||
| 236 | std::scoped_lock queueLock(unmap_queue_lock); | ||
| 237 | UnmapHandle(*handle_description); | ||
| 238 | } | ||
| 239 | |||
| 240 | handle_description->pins = 0; | ||
| 241 | } | ||
| 242 | } | ||
| 243 | |||
| 244 | // Try to remove the shared ptr to the handle from the map, if nothing else is using the | ||
| 245 | // handle then it will now be freed when `handle_description` goes out of scope | ||
| 246 | if (TryRemoveHandle(*handle_description)) { | ||
| 247 | LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle); | ||
| 248 | } else { | ||
| 249 | LOG_DEBUG(Service_NVDRV, | ||
| 250 | "Tried to free nvmap handle: {} but didn't as it still has duplicates", | ||
| 251 | handle); | ||
| 252 | } | ||
| 253 | |||
| 254 | freeInfo = { | ||
| 255 | .address = handle_description->address, | ||
| 256 | .size = handle_description->size, | ||
| 257 | .was_uncached = handle_description->flags.map_uncached.Value() != 0, | ||
| 258 | }; | ||
| 259 | } else { | ||
| 260 | return std::nullopt; | ||
| 261 | } | ||
| 262 | |||
| 263 | // Handle hasn't been freed from memory, set address to 0 to mark that the handle wasn't freed | ||
| 264 | if (!hWeak.expired()) { | ||
| 265 | LOG_DEBUG(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle); | ||
| 266 | freeInfo.address = 0; | ||
| 267 | } | ||
| 268 | |||
| 269 | return freeInfo; | ||
| 270 | } | ||
| 271 | |||
| 272 | } // namespace Service::Nvidia::NvCore | ||
diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h new file mode 100644 index 000000000..b9dd3801f --- /dev/null +++ b/src/core/hle/service/nvdrv/core/nvmap.h | |||
| @@ -0,0 +1,175 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2022 yuzu Emulator Project | ||
| 2 | // SPDX-FileCopyrightText: 2022 Skyline Team and Contributors | ||
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <atomic> | ||
| 8 | #include <list> | ||
| 9 | #include <memory> | ||
| 10 | #include <mutex> | ||
| 11 | #include <optional> | ||
| 12 | #include <unordered_map> | ||
| 13 | #include <assert.h> | ||
| 14 | |||
| 15 | #include "common/bit_field.h" | ||
| 16 | #include "common/common_types.h" | ||
| 17 | #include "core/hle/service/nvdrv/nvdata.h" | ||
| 18 | |||
| 19 | namespace Tegra { | ||
| 20 | |||
| 21 | namespace Host1x { | ||
| 22 | class Host1x; | ||
| 23 | } // namespace Host1x | ||
| 24 | |||
| 25 | } // namespace Tegra | ||
| 26 | |||
| 27 | namespace Service::Nvidia::NvCore { | ||
| 28 | /** | ||
| 29 | * @brief The nvmap core class holds the global state for nvmap and provides methods to manage | ||
| 30 | * handles | ||
| 31 | */ | ||
| 32 | class NvMap { | ||
| 33 | public: | ||
| 34 | /** | ||
| 35 | * @brief A handle to a contiguous block of memory in an application's address space | ||
| 36 | */ | ||
| 37 | struct Handle { | ||
| 38 | std::mutex mutex; | ||
| 39 | |||
| 40 | u64 align{}; //!< The alignment to use when pinning the handle onto the SMMU | ||
| 41 | u64 size; //!< Page-aligned size of the memory the handle refers to | ||
| 42 | u64 aligned_size; //!< `align`-aligned size of the memory the handle refers to | ||
| 43 | u64 orig_size; //!< Original unaligned size of the memory this handle refers to | ||
| 44 | |||
| 45 | s32 dupes{1}; //!< How many guest references there are to this handle | ||
| 46 | s32 internal_dupes{0}; //!< How many emulator-internal references there are to this handle | ||
| 47 | |||
| 48 | using Id = u32; | ||
| 49 | Id id; //!< A globally unique identifier for this handle | ||
| 50 | |||
| 51 | s32 pins{}; | ||
| 52 | u32 pin_virt_address{}; | ||
| 53 | std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{}; | ||
| 54 | |||
| 55 | union Flags { | ||
| 56 | u32 raw; | ||
| 57 | BitField<0, 1, u32> map_uncached; //!< If the handle should be mapped as uncached | ||
| 58 | BitField<2, 1, u32> keep_uncached_after_free; //!< Only applicable when the handle was | ||
| 59 | //!< allocated with a fixed address | ||
| 60 | BitField<4, 1, u32> _unk0_; //!< Passed to IOVMM for pins | ||
| 61 | } flags{}; | ||
| 62 | static_assert(sizeof(Flags) == sizeof(u32)); | ||
| 63 | |||
| 64 | u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to, | ||
| 65 | //!< this can also be in the nvdrv tmem | ||
| 66 | bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC | ||
| 67 | //!< call | ||
| 68 | |||
| 69 | u8 kind{}; //!< Used for memory compression | ||
| 70 | bool allocated{}; //!< If the handle has been allocated with `Alloc` | ||
| 71 | |||
| 72 | u64 dma_map_addr{}; //! remove me after implementing pinning. | ||
| 73 | |||
| 74 | Handle(u64 size, Id id); | ||
| 75 | |||
| 76 | /** | ||
| 77 | * @brief Sets up the handle with the given memory config, can allocate memory from the tmem | ||
| 78 | * if a 0 address is passed | ||
| 79 | */ | ||
| 80 | [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress); | ||
| 81 | |||
| 82 | /** | ||
| 83 | * @brief Increases the dupe counter of the handle for the given session | ||
| 84 | */ | ||
| 85 | [[nodiscard]] NvResult Duplicate(bool internal_session); | ||
| 86 | |||
| 87 | /** | ||
| 88 | * @brief Obtains a pointer to the handle's memory and marks the handle it as having been | ||
| 89 | * mapped | ||
| 90 | */ | ||
| 91 | u8* GetPointer() { | ||
| 92 | if (!address) { | ||
| 93 | return nullptr; | ||
| 94 | } | ||
| 95 | |||
| 96 | is_shared_mem_mapped = true; | ||
| 97 | return reinterpret_cast<u8*>(address); | ||
| 98 | } | ||
| 99 | }; | ||
| 100 | |||
| 101 | /** | ||
| 102 | * @brief Encapsulates the result of a FreeHandle operation | ||
| 103 | */ | ||
| 104 | struct FreeInfo { | ||
| 105 | u64 address; //!< Address the handle referred to before deletion | ||
| 106 | u64 size; //!< Page-aligned handle size | ||
| 107 | bool was_uncached; //!< If the handle was allocated as uncached | ||
| 108 | }; | ||
| 109 | |||
| 110 | explicit NvMap(Tegra::Host1x::Host1x& host1x); | ||
| 111 | |||
| 112 | /** | ||
| 113 | * @brief Creates an unallocated handle of the given size | ||
| 114 | */ | ||
| 115 | [[nodiscard]] NvResult CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out); | ||
| 116 | |||
| 117 | std::shared_ptr<Handle> GetHandle(Handle::Id handle); | ||
| 118 | |||
| 119 | VAddr GetHandleAddress(Handle::Id handle); | ||
| 120 | |||
| 121 | /** | ||
| 122 | * @brief Maps a handle into the SMMU address space | ||
| 123 | * @note This operation is refcounted, the number of calls to this must eventually match the | ||
| 124 | * number of calls to `UnpinHandle` | ||
| 125 | * @return The SMMU virtual address that the handle has been mapped to | ||
| 126 | */ | ||
| 127 | u32 PinHandle(Handle::Id handle); | ||
| 128 | |||
| 129 | /** | ||
| 130 | * @brief When this has been called an equal number of times to `PinHandle` for the supplied | ||
| 131 | * handle it will be added to a list of handles to be freed when necessary | ||
| 132 | */ | ||
| 133 | void UnpinHandle(Handle::Id handle); | ||
| 134 | |||
| 135 | /** | ||
| 136 | * @brief Tries to duplicate a handle | ||
| 137 | */ | ||
| 138 | void DuplicateHandle(Handle::Id handle, bool internal_session = false); | ||
| 139 | |||
| 140 | /** | ||
| 141 | * @brief Tries to free a handle and remove a single dupe | ||
| 142 | * @note If a handle has no dupes left and has no other users a FreeInfo struct will be returned | ||
| 143 | * describing the prior state of the handle | ||
| 144 | */ | ||
| 145 | std::optional<FreeInfo> FreeHandle(Handle::Id handle, bool internal_session); | ||
| 146 | |||
| 147 | private: | ||
| 148 | std::list<std::shared_ptr<Handle>> unmap_queue{}; | ||
| 149 | std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue` | ||
| 150 | |||
| 151 | std::unordered_map<Handle::Id, std::shared_ptr<Handle>> | ||
| 152 | handles{}; //!< Main owning map of handles | ||
| 153 | std::mutex handles_lock; //!< Protects access to `handles` | ||
| 154 | |||
| 155 | static constexpr u32 HandleIdIncrement{ | ||
| 156 | 4}; //!< Each new handle ID is an increment of 4 from the previous | ||
| 157 | std::atomic<u32> next_handle_id{HandleIdIncrement}; | ||
| 158 | Tegra::Host1x::Host1x& host1x; | ||
| 159 | |||
| 160 | void AddHandle(std::shared_ptr<Handle> handle); | ||
| 161 | |||
| 162 | /** | ||
| 163 | * @brief Unmaps and frees the SMMU memory region a handle is mapped to | ||
| 164 | * @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this | ||
| 165 | */ | ||
| 166 | void UnmapHandle(Handle& handle_description); | ||
| 167 | |||
| 168 | /** | ||
| 169 | * @brief Removes a handle from the map taking its dupes into account | ||
| 170 | * @note handle_description.mutex MUST be locked when calling this | ||
| 171 | * @return If the handle was removed from the map | ||
| 172 | */ | ||
| 173 | bool TryRemoveHandle(const Handle& handle_description); | ||
| 174 | }; | ||
| 175 | } // namespace Service::Nvidia::NvCore | ||
diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp new file mode 100644 index 000000000..eda2041a0 --- /dev/null +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp | |||
| @@ -0,0 +1,121 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2022 yuzu Emulator Project | ||
| 2 | // SPDX-FileCopyrightText: 2022 Skyline Team and Contributors | ||
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 4 | |||
| 5 | #include "common/assert.h" | ||
| 6 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" | ||
| 7 | #include "video_core/host1x/host1x.h" | ||
| 8 | |||
| 9 | namespace Service::Nvidia::NvCore { | ||
| 10 | |||
| 11 | SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} { | ||
| 12 | constexpr u32 VBlank0SyncpointId{26}; | ||
| 13 | constexpr u32 VBlank1SyncpointId{27}; | ||
| 14 | |||
| 15 | // Reserve both vblank syncpoints as client managed as they use Continuous Mode | ||
| 16 | // Refer to section 14.3.5.3 of the TRM for more information on Continuous Mode | ||
| 17 | // https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/drm/dc.c#L660 | ||
| 18 | ReserveSyncpoint(VBlank0SyncpointId, true); | ||
| 19 | ReserveSyncpoint(VBlank1SyncpointId, true); | ||
| 20 | |||
| 21 | for (u32 syncpoint_id : channel_syncpoints) { | ||
| 22 | if (syncpoint_id) { | ||
| 23 | ReserveSyncpoint(syncpoint_id, false); | ||
| 24 | } | ||
| 25 | } | ||
| 26 | } | ||
| 27 | |||
| 28 | SyncpointManager::~SyncpointManager() = default; | ||
| 29 | |||
| 30 | u32 SyncpointManager::ReserveSyncpoint(u32 id, bool client_managed) { | ||
| 31 | if (syncpoints.at(id).reserved) { | ||
| 32 | ASSERT_MSG(false, "Requested syncpoint is in use"); | ||
| 33 | return 0; | ||
| 34 | } | ||
| 35 | |||
| 36 | syncpoints.at(id).reserved = true; | ||
| 37 | syncpoints.at(id).interface_managed = client_managed; | ||
| 38 | |||
| 39 | return id; | ||
| 40 | } | ||
| 41 | |||
| 42 | u32 SyncpointManager::FindFreeSyncpoint() { | ||
| 43 | for (u32 i{1}; i < syncpoints.size(); i++) { | ||
| 44 | if (!syncpoints[i].reserved) { | ||
| 45 | return i; | ||
| 46 | } | ||
| 47 | } | ||
| 48 | ASSERT_MSG(false, "Failed to find a free syncpoint!"); | ||
| 49 | return 0; | ||
| 50 | } | ||
| 51 | |||
| 52 | u32 SyncpointManager::AllocateSyncpoint(bool client_managed) { | ||
| 53 | std::lock_guard lock(reservation_lock); | ||
| 54 | return ReserveSyncpoint(FindFreeSyncpoint(), client_managed); | ||
| 55 | } | ||
| 56 | |||
| 57 | void SyncpointManager::FreeSyncpoint(u32 id) { | ||
| 58 | std::lock_guard lock(reservation_lock); | ||
| 59 | ASSERT(syncpoints.at(id).reserved); | ||
| 60 | syncpoints.at(id).reserved = false; | ||
| 61 | } | ||
| 62 | |||
| 63 | bool SyncpointManager::IsSyncpointAllocated(u32 id) { | ||
| 64 | return (id <= SyncpointCount) && syncpoints[id].reserved; | ||
| 65 | } | ||
| 66 | |||
| 67 | bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) const { | ||
| 68 | const SyncpointInfo& syncpoint{syncpoints.at(id)}; | ||
| 69 | |||
| 70 | if (!syncpoint.reserved) { | ||
| 71 | ASSERT(false); | ||
| 72 | return 0; | ||
| 73 | } | ||
| 74 | |||
| 75 | // If the interface manages counters then we don't keep track of the maximum value as it handles | ||
| 76 | // sanity checking the values then | ||
| 77 | if (syncpoint.interface_managed) { | ||
| 78 | return static_cast<s32>(syncpoint.counter_min - threshold) >= 0; | ||
| 79 | } else { | ||
| 80 | return (syncpoint.counter_max - threshold) >= (syncpoint.counter_min - threshold); | ||
| 81 | } | ||
| 82 | } | ||
| 83 | |||
| 84 | u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) { | ||
| 85 | if (!syncpoints.at(id).reserved) { | ||
| 86 | ASSERT(false); | ||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | |||
| 90 | return syncpoints.at(id).counter_max += amount; | ||
| 91 | } | ||
| 92 | |||
| 93 | u32 SyncpointManager::ReadSyncpointMinValue(u32 id) { | ||
| 94 | if (!syncpoints.at(id).reserved) { | ||
| 95 | ASSERT(false); | ||
| 96 | return 0; | ||
| 97 | } | ||
| 98 | |||
| 99 | return syncpoints.at(id).counter_min; | ||
| 100 | } | ||
| 101 | |||
| 102 | u32 SyncpointManager::UpdateMin(u32 id) { | ||
| 103 | if (!syncpoints.at(id).reserved) { | ||
| 104 | ASSERT(false); | ||
| 105 | return 0; | ||
| 106 | } | ||
| 107 | |||
| 108 | syncpoints.at(id).counter_min = host1x.GetSyncpointManager().GetHostSyncpointValue(id); | ||
| 109 | return syncpoints.at(id).counter_min; | ||
| 110 | } | ||
| 111 | |||
| 112 | NvFence SyncpointManager::GetSyncpointFence(u32 id) { | ||
| 113 | if (!syncpoints.at(id).reserved) { | ||
| 114 | ASSERT(false); | ||
| 115 | return NvFence{}; | ||
| 116 | } | ||
| 117 | |||
| 118 | return {.id = static_cast<s32>(id), .value = syncpoints.at(id).counter_max}; | ||
| 119 | } | ||
| 120 | |||
| 121 | } // namespace Service::Nvidia::NvCore | ||
diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.h b/src/core/hle/service/nvdrv/core/syncpoint_manager.h new file mode 100644 index 000000000..b76ef9032 --- /dev/null +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.h | |||
| @@ -0,0 +1,134 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2022 yuzu Emulator Project | ||
| 2 | // SPDX-FileCopyrightText: 2022 Skyline Team and Contributors | ||
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <array> | ||
| 8 | #include <atomic> | ||
| 9 | #include <mutex> | ||
| 10 | |||
| 11 | #include "common/common_types.h" | ||
| 12 | #include "core/hle/service/nvdrv/nvdata.h" | ||
| 13 | |||
| 14 | namespace Tegra::Host1x { | ||
| 15 | class Host1x; | ||
| 16 | } // namespace Tegra::Host1x | ||
| 17 | |||
| 18 | namespace Service::Nvidia::NvCore { | ||
| 19 | |||
| 20 | enum class ChannelType : u32 { | ||
| 21 | MsEnc = 0, | ||
| 22 | VIC = 1, | ||
| 23 | GPU = 2, | ||
| 24 | NvDec = 3, | ||
| 25 | Display = 4, | ||
| 26 | NvJpg = 5, | ||
| 27 | TSec = 6, | ||
| 28 | Max = 7 | ||
| 29 | }; | ||
| 30 | |||
| 31 | /** | ||
| 32 | * @brief SyncpointManager handles allocating and accessing host1x syncpoints, these are cached | ||
| 33 | * versions of the HW syncpoints which are intermittently synced | ||
| 34 | * @note Refer to Chapter 14 of the Tegra X1 TRM for an exhaustive overview of them | ||
| 35 | * @url https://http.download.nvidia.com/tegra-public-appnotes/host1x.html | ||
| 36 | * @url | ||
| 37 | * https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/jetson-tx1/drivers/video/tegra/host/nvhost_syncpt.c | ||
| 38 | */ | ||
| 39 | class SyncpointManager final { | ||
| 40 | public: | ||
| 41 | explicit SyncpointManager(Tegra::Host1x::Host1x& host1x); | ||
| 42 | ~SyncpointManager(); | ||
| 43 | |||
| 44 | /** | ||
| 45 | * @brief Checks if the given syncpoint is both allocated and below the number of HW syncpoints | ||
| 46 | */ | ||
| 47 | bool IsSyncpointAllocated(u32 id); | ||
| 48 | |||
| 49 | /** | ||
| 50 | * @brief Finds a free syncpoint and reserves it | ||
| 51 | * @return The ID of the reserved syncpoint | ||
| 52 | */ | ||
| 53 | u32 AllocateSyncpoint(bool client_managed); | ||
| 54 | |||
| 55 | /** | ||
| 56 | * @url | ||
| 57 | * https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/syncpt.c#L259 | ||
| 58 | */ | ||
| 59 | bool HasSyncpointExpired(u32 id, u32 threshold) const; | ||
| 60 | |||
| 61 | bool IsFenceSignalled(NvFence fence) const { | ||
| 62 | return HasSyncpointExpired(fence.id, fence.value); | ||
| 63 | } | ||
| 64 | |||
| 65 | /** | ||
| 66 | * @brief Atomically increments the maximum value of a syncpoint by the given amount | ||
| 67 | * @return The new max value of the syncpoint | ||
| 68 | */ | ||
| 69 | u32 IncrementSyncpointMaxExt(u32 id, u32 amount); | ||
| 70 | |||
| 71 | /** | ||
| 72 | * @return The minimum value of the syncpoint | ||
| 73 | */ | ||
| 74 | u32 ReadSyncpointMinValue(u32 id); | ||
| 75 | |||
| 76 | /** | ||
| 77 | * @brief Synchronises the minimum value of the syncpoint to with the GPU | ||
| 78 | * @return The new minimum value of the syncpoint | ||
| 79 | */ | ||
| 80 | u32 UpdateMin(u32 id); | ||
| 81 | |||
| 82 | /** | ||
| 83 | * @brief Frees the usage of a syncpoint. | ||
| 84 | */ | ||
| 85 | void FreeSyncpoint(u32 id); | ||
| 86 | |||
| 87 | /** | ||
| 88 | * @return A fence that will be signalled once this syncpoint hits its maximum value | ||
| 89 | */ | ||
| 90 | NvFence GetSyncpointFence(u32 id); | ||
| 91 | |||
| 92 | static constexpr std::array<u32, static_cast<u32>(ChannelType::Max)> channel_syncpoints{ | ||
| 93 | 0x0, // `MsEnc` is unimplemented | ||
| 94 | 0xC, // `VIC` | ||
| 95 | 0x0, // `GPU` syncpoints are allocated per-channel instead | ||
| 96 | 0x36, // `NvDec` | ||
| 97 | 0x0, // `Display` is unimplemented | ||
| 98 | 0x37, // `NvJpg` | ||
| 99 | 0x0, // `TSec` is unimplemented | ||
| 100 | }; //!< Maps each channel ID to a constant syncpoint | ||
| 101 | |||
| 102 | private: | ||
| 103 | /** | ||
| 104 | * @note reservation_lock should be locked when calling this | ||
| 105 | */ | ||
| 106 | u32 ReserveSyncpoint(u32 id, bool client_managed); | ||
| 107 | |||
| 108 | /** | ||
| 109 | * @return The ID of the first free syncpoint | ||
| 110 | */ | ||
| 111 | u32 FindFreeSyncpoint(); | ||
| 112 | |||
| 113 | struct SyncpointInfo { | ||
| 114 | std::atomic<u32> counter_min; //!< The least value the syncpoint can be (The value it was | ||
| 115 | //!< when it was last synchronized with host1x) | ||
| 116 | std::atomic<u32> counter_max; //!< The maximum value the syncpoint can reach according to | ||
| 117 | //!< the current usage | ||
| 118 | bool interface_managed; //!< If the syncpoint is managed by a host1x client interface, a | ||
| 119 | //!< client interface is a HW block that can handle host1x | ||
| 120 | //!< transactions on behalf of a host1x client (Which would | ||
| 121 | //!< otherwise need to be manually synced using PIO which is | ||
| 122 | //!< synchronous and requires direct cooperation of the CPU) | ||
| 123 | bool reserved; //!< If the syncpoint is reserved or not, not to be confused with a reserved | ||
| 124 | //!< value | ||
| 125 | }; | ||
| 126 | |||
| 127 | constexpr static std::size_t SyncpointCount{192}; | ||
| 128 | std::array<SyncpointInfo, SyncpointCount> syncpoints{}; | ||
| 129 | std::mutex reservation_lock; | ||
| 130 | |||
| 131 | Tegra::Host1x::Host1x& host1x; | ||
| 132 | }; | ||
| 133 | |||
| 134 | } // namespace Service::Nvidia::NvCore | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h index 696e8121e..204b0e757 100644 --- a/src/core/hle/service/nvdrv/devices/nvdevice.h +++ b/src/core/hle/service/nvdrv/devices/nvdevice.h | |||
| @@ -11,6 +11,10 @@ namespace Core { | |||
| 11 | class System; | 11 | class System; |
| 12 | } | 12 | } |
| 13 | 13 | ||
| 14 | namespace Kernel { | ||
| 15 | class KEvent; | ||
| 16 | } | ||
| 17 | |||
| 14 | namespace Service::Nvidia::Devices { | 18 | namespace Service::Nvidia::Devices { |
| 15 | 19 | ||
| 16 | /// Represents an abstract nvidia device node. It is to be subclassed by concrete device nodes to | 20 | /// Represents an abstract nvidia device node. It is to be subclassed by concrete device nodes to |
| @@ -64,6 +68,10 @@ public: | |||
| 64 | */ | 68 | */ |
| 65 | virtual void OnClose(DeviceFD fd) = 0; | 69 | virtual void OnClose(DeviceFD fd) = 0; |
| 66 | 70 | ||
| 71 | virtual Kernel::KEvent* QueryEvent(u32 event_id) { | ||
| 72 | return nullptr; | ||
| 73 | } | ||
| 74 | |||
| 67 | protected: | 75 | protected: |
| 68 | Core::System& system; | 76 | Core::System& system; |
| 69 | }; | 77 | }; |
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp index 604711914..4122fc98d 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp | |||
| @@ -5,15 +5,16 @@ | |||
| 5 | #include "common/logging/log.h" | 5 | #include "common/logging/log.h" |
| 6 | #include "core/core.h" | 6 | #include "core/core.h" |
| 7 | #include "core/core_timing.h" | 7 | #include "core/core_timing.h" |
| 8 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 9 | #include "core/hle/service/nvdrv/core/nvmap.h" | ||
| 8 | #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" | 10 | #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" |
| 9 | #include "core/hle/service/nvdrv/devices/nvmap.h" | ||
| 10 | #include "core/perf_stats.h" | 11 | #include "core/perf_stats.h" |
| 11 | #include "video_core/gpu.h" | 12 | #include "video_core/gpu.h" |
| 12 | 13 | ||
| 13 | namespace Service::Nvidia::Devices { | 14 | namespace Service::Nvidia::Devices { |
| 14 | 15 | ||
| 15 | nvdisp_disp0::nvdisp_disp0(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_) | 16 | nvdisp_disp0::nvdisp_disp0(Core::System& system_, NvCore::Container& core) |
| 16 | : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {} | 17 | : nvdevice{system_}, container{core}, nvmap{core.GetNvMapFile()} {} |
| 17 | nvdisp_disp0::~nvdisp_disp0() = default; | 18 | nvdisp_disp0::~nvdisp_disp0() = default; |
| 18 | 19 | ||
| 19 | NvResult nvdisp_disp0::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 20 | NvResult nvdisp_disp0::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| @@ -39,8 +40,9 @@ void nvdisp_disp0::OnClose(DeviceFD fd) {} | |||
| 39 | 40 | ||
| 40 | void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, | 41 | void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, |
| 41 | u32 height, u32 stride, android::BufferTransformFlags transform, | 42 | u32 height, u32 stride, android::BufferTransformFlags transform, |
| 42 | const Common::Rectangle<int>& crop_rect) { | 43 | const Common::Rectangle<int>& crop_rect, |
| 43 | const VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle); | 44 | std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences) { |
| 45 | const VAddr addr = nvmap.GetHandleAddress(buffer_handle); | ||
| 44 | LOG_TRACE(Service, | 46 | LOG_TRACE(Service, |
| 45 | "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}", | 47 | "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}", |
| 46 | addr, offset, width, height, stride, format); | 48 | addr, offset, width, height, stride, format); |
| @@ -48,10 +50,15 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat form | |||
| 48 | const Tegra::FramebufferConfig framebuffer{addr, offset, width, height, | 50 | const Tegra::FramebufferConfig framebuffer{addr, offset, width, height, |
| 49 | stride, format, transform, crop_rect}; | 51 | stride, format, transform, crop_rect}; |
| 50 | 52 | ||
| 53 | system.GPU().RequestSwapBuffers(&framebuffer, fences, num_fences); | ||
| 51 | system.GetPerfStats().EndSystemFrame(); | 54 | system.GetPerfStats().EndSystemFrame(); |
| 52 | system.GPU().SwapBuffers(&framebuffer); | ||
| 53 | system.SpeedLimiter().DoSpeedLimiting(system.CoreTiming().GetGlobalTimeUs()); | 55 | system.SpeedLimiter().DoSpeedLimiting(system.CoreTiming().GetGlobalTimeUs()); |
| 54 | system.GetPerfStats().BeginSystemFrame(); | 56 | system.GetPerfStats().BeginSystemFrame(); |
| 55 | } | 57 | } |
| 56 | 58 | ||
| 59 | Kernel::KEvent* nvdisp_disp0::QueryEvent(u32 event_id) { | ||
| 60 | LOG_CRITICAL(Service_NVDRV, "Unknown DISP Event {}", event_id); | ||
| 61 | return nullptr; | ||
| 62 | } | ||
| 63 | |||
| 57 | } // namespace Service::Nvidia::Devices | 64 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h index 67b105e02..04217ab12 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h | |||
| @@ -11,13 +11,18 @@ | |||
| 11 | #include "core/hle/service/nvflinger/buffer_transform_flags.h" | 11 | #include "core/hle/service/nvflinger/buffer_transform_flags.h" |
| 12 | #include "core/hle/service/nvflinger/pixel_format.h" | 12 | #include "core/hle/service/nvflinger/pixel_format.h" |
| 13 | 13 | ||
| 14 | namespace Service::Nvidia::NvCore { | ||
| 15 | class Container; | ||
| 16 | class NvMap; | ||
| 17 | } // namespace Service::Nvidia::NvCore | ||
| 18 | |||
| 14 | namespace Service::Nvidia::Devices { | 19 | namespace Service::Nvidia::Devices { |
| 15 | 20 | ||
| 16 | class nvmap; | 21 | class nvmap; |
| 17 | 22 | ||
| 18 | class nvdisp_disp0 final : public nvdevice { | 23 | class nvdisp_disp0 final : public nvdevice { |
| 19 | public: | 24 | public: |
| 20 | explicit nvdisp_disp0(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_); | 25 | explicit nvdisp_disp0(Core::System& system_, NvCore::Container& core); |
| 21 | ~nvdisp_disp0() override; | 26 | ~nvdisp_disp0() override; |
| 22 | 27 | ||
| 23 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 28 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| @@ -33,10 +38,14 @@ public: | |||
| 33 | /// Performs a screen flip, drawing the buffer pointed to by the handle. | 38 | /// Performs a screen flip, drawing the buffer pointed to by the handle. |
| 34 | void flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, u32 height, | 39 | void flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, u32 height, |
| 35 | u32 stride, android::BufferTransformFlags transform, | 40 | u32 stride, android::BufferTransformFlags transform, |
| 36 | const Common::Rectangle<int>& crop_rect); | 41 | const Common::Rectangle<int>& crop_rect, |
| 42 | std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences); | ||
| 43 | |||
| 44 | Kernel::KEvent* QueryEvent(u32 event_id) override; | ||
| 37 | 45 | ||
| 38 | private: | 46 | private: |
| 39 | std::shared_ptr<nvmap> nvmap_dev; | 47 | NvCore::Container& container; |
| 48 | NvCore::NvMap& nvmap; | ||
| 40 | }; | 49 | }; |
| 41 | 50 | ||
| 42 | } // namespace Service::Nvidia::Devices | 51 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 9867a648d..6411dbf43 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp | |||
| @@ -1,21 +1,30 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors |
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | 4 | ||
| 4 | #include <cstring> | 5 | #include <cstring> |
| 5 | #include <utility> | 6 | #include <utility> |
| 6 | 7 | ||
| 8 | #include "common/alignment.h" | ||
| 7 | #include "common/assert.h" | 9 | #include "common/assert.h" |
| 8 | #include "common/logging/log.h" | 10 | #include "common/logging/log.h" |
| 9 | #include "core/core.h" | 11 | #include "core/core.h" |
| 12 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 13 | #include "core/hle/service/nvdrv/core/nvmap.h" | ||
| 10 | #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" | 14 | #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" |
| 11 | #include "core/hle/service/nvdrv/devices/nvmap.h" | 15 | #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" |
| 16 | #include "core/hle/service/nvdrv/nvdrv.h" | ||
| 17 | #include "video_core/control/channel_state.h" | ||
| 18 | #include "video_core/gpu.h" | ||
| 12 | #include "video_core/memory_manager.h" | 19 | #include "video_core/memory_manager.h" |
| 13 | #include "video_core/rasterizer_interface.h" | 20 | #include "video_core/rasterizer_interface.h" |
| 14 | 21 | ||
| 15 | namespace Service::Nvidia::Devices { | 22 | namespace Service::Nvidia::Devices { |
| 16 | 23 | ||
| 17 | nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_) | 24 | nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, Module& module_, NvCore::Container& core) |
| 18 | : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {} | 25 | : nvdevice{system_}, module{module_}, container{core}, nvmap{core.GetNvMapFile()}, vm{}, |
| 26 | gmmu{} {} | ||
| 27 | |||
| 19 | nvhost_as_gpu::~nvhost_as_gpu() = default; | 28 | nvhost_as_gpu::~nvhost_as_gpu() = default; |
| 20 | 29 | ||
| 21 | NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 30 | NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| @@ -82,12 +91,52 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>& | |||
| 82 | IoctlAllocAsEx params{}; | 91 | IoctlAllocAsEx params{}; |
| 83 | std::memcpy(¶ms, input.data(), input.size()); | 92 | std::memcpy(¶ms, input.data(), input.size()); |
| 84 | 93 | ||
| 85 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size=0x{:X}", params.big_page_size); | 94 | LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size); |
| 86 | if (params.big_page_size == 0) { | 95 | |
| 87 | params.big_page_size = DEFAULT_BIG_PAGE_SIZE; | 96 | std::scoped_lock lock(mutex); |
| 97 | |||
| 98 | if (vm.initialised) { | ||
| 99 | ASSERT_MSG(false, "Cannot initialise an address space twice!"); | ||
| 100 | return NvResult::InvalidState; | ||
| 101 | } | ||
| 102 | |||
| 103 | if (params.big_page_size) { | ||
| 104 | if (!std::has_single_bit(params.big_page_size)) { | ||
| 105 | LOG_ERROR(Service_NVDRV, "Non power-of-2 big page size: 0x{:X}!", params.big_page_size); | ||
| 106 | return NvResult::BadValue; | ||
| 107 | } | ||
| 108 | |||
| 109 | if ((params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES) == 0) { | ||
| 110 | LOG_ERROR(Service_NVDRV, "Unsupported big page size: 0x{:X}!", params.big_page_size); | ||
| 111 | return NvResult::BadValue; | ||
| 112 | } | ||
| 113 | |||
| 114 | vm.big_page_size = params.big_page_size; | ||
| 115 | vm.big_page_size_bits = static_cast<u32>(std::countr_zero(params.big_page_size)); | ||
| 116 | |||
| 117 | vm.va_range_start = params.big_page_size << VM::VA_START_SHIFT; | ||
| 118 | } | ||
| 119 | |||
| 120 | // If this is unspecified then default values should be used | ||
| 121 | if (params.va_range_start) { | ||
| 122 | vm.va_range_start = params.va_range_start; | ||
| 123 | vm.va_range_split = params.va_range_split; | ||
| 124 | vm.va_range_end = params.va_range_end; | ||
| 88 | } | 125 | } |
| 89 | 126 | ||
| 90 | big_page_size = params.big_page_size; | 127 | const auto start_pages{static_cast<u32>(vm.va_range_start >> VM::PAGE_SIZE_BITS)}; |
| 128 | const auto end_pages{static_cast<u32>(vm.va_range_split >> VM::PAGE_SIZE_BITS)}; | ||
| 129 | vm.small_page_allocator = std::make_shared<VM::Allocator>(start_pages, end_pages); | ||
| 130 | |||
| 131 | const auto start_big_pages{static_cast<u32>(vm.va_range_split >> vm.big_page_size_bits)}; | ||
| 132 | const auto end_big_pages{ | ||
| 133 | static_cast<u32>((vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits)}; | ||
| 134 | vm.big_page_allocator = std::make_unique<VM::Allocator>(start_big_pages, end_big_pages); | ||
| 135 | |||
| 136 | gmmu = std::make_shared<Tegra::MemoryManager>(system, 40, vm.big_page_size_bits, | ||
| 137 | VM::PAGE_SIZE_BITS); | ||
| 138 | system.GPU().InitAddressSpace(*gmmu); | ||
| 139 | vm.initialised = true; | ||
| 91 | 140 | ||
| 92 | return NvResult::Success; | 141 | return NvResult::Success; |
| 93 | } | 142 | } |
| @@ -99,21 +148,76 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector< | |||
| 99 | LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages, | 148 | LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages, |
| 100 | params.page_size, params.flags); | 149 | params.page_size, params.flags); |
| 101 | 150 | ||
| 102 | const auto size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)}; | 151 | std::scoped_lock lock(mutex); |
| 103 | if ((params.flags & AddressSpaceFlags::FixedOffset) != AddressSpaceFlags::None) { | 152 | |
| 104 | params.offset = *system.GPU().MemoryManager().AllocateFixed(params.offset, size); | 153 | if (!vm.initialised) { |
| 154 | return NvResult::BadValue; | ||
| 155 | } | ||
| 156 | |||
| 157 | if (params.page_size != VM::YUZU_PAGESIZE && params.page_size != vm.big_page_size) { | ||
| 158 | return NvResult::BadValue; | ||
| 159 | } | ||
| 160 | |||
| 161 | if (params.page_size != vm.big_page_size && | ||
| 162 | ((params.flags & MappingFlags::Sparse) != MappingFlags::None)) { | ||
| 163 | UNIMPLEMENTED_MSG("Sparse small pages are not implemented!"); | ||
| 164 | return NvResult::NotImplemented; | ||
| 165 | } | ||
| 166 | |||
| 167 | const u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS | ||
| 168 | : vm.big_page_size_bits}; | ||
| 169 | |||
| 170 | auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator | ||
| 171 | : *vm.big_page_allocator}; | ||
| 172 | |||
| 173 | if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) { | ||
| 174 | allocator.AllocateFixed(static_cast<u32>(params.offset >> page_size_bits), params.pages); | ||
| 105 | } else { | 175 | } else { |
| 106 | params.offset = system.GPU().MemoryManager().Allocate(size, params.align); | 176 | params.offset = static_cast<u64>(allocator.Allocate(params.pages)) << page_size_bits; |
| 177 | if (!params.offset) { | ||
| 178 | ASSERT_MSG(false, "Failed to allocate free space in the GPU AS!"); | ||
| 179 | return NvResult::InsufficientMemory; | ||
| 180 | } | ||
| 107 | } | 181 | } |
| 108 | 182 | ||
| 109 | auto result = NvResult::Success; | 183 | u64 size{static_cast<u64>(params.pages) * params.page_size}; |
| 110 | if (!params.offset) { | 184 | |
| 111 | LOG_CRITICAL(Service_NVDRV, "allocation failed for size {}", size); | 185 | if ((params.flags & MappingFlags::Sparse) != MappingFlags::None) { |
| 112 | result = NvResult::InsufficientMemory; | 186 | gmmu->MapSparse(params.offset, size); |
| 113 | } | 187 | } |
| 114 | 188 | ||
| 189 | allocation_map[params.offset] = { | ||
| 190 | .size = size, | ||
| 191 | .mappings{}, | ||
| 192 | .page_size = params.page_size, | ||
| 193 | .sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None, | ||
| 194 | .big_pages = params.page_size != VM::YUZU_PAGESIZE, | ||
| 195 | }; | ||
| 196 | |||
| 115 | std::memcpy(output.data(), ¶ms, output.size()); | 197 | std::memcpy(output.data(), ¶ms, output.size()); |
| 116 | return result; | 198 | return NvResult::Success; |
| 199 | } | ||
| 200 | |||
| 201 | void nvhost_as_gpu::FreeMappingLocked(u64 offset) { | ||
| 202 | auto mapping{mapping_map.at(offset)}; | ||
| 203 | |||
| 204 | if (!mapping->fixed) { | ||
| 205 | auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; | ||
| 206 | u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS}; | ||
| 207 | |||
| 208 | allocator.Free(static_cast<u32>(mapping->offset >> page_size_bits), | ||
| 209 | static_cast<u32>(mapping->size >> page_size_bits)); | ||
| 210 | } | ||
| 211 | |||
| 212 | // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state | ||
| 213 | // Only FreeSpace can unmap them fully | ||
| 214 | if (mapping->sparse_alloc) { | ||
| 215 | gmmu->MapSparse(offset, mapping->size, mapping->big_page); | ||
| 216 | } else { | ||
| 217 | gmmu->Unmap(offset, mapping->size); | ||
| 218 | } | ||
| 219 | |||
| 220 | mapping_map.erase(offset); | ||
| 117 | } | 221 | } |
| 118 | 222 | ||
| 119 | NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>& output) { | 223 | NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>& output) { |
| @@ -123,8 +227,40 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>& | |||
| 123 | LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset, | 227 | LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset, |
| 124 | params.pages, params.page_size); | 228 | params.pages, params.page_size); |
| 125 | 229 | ||
| 126 | system.GPU().MemoryManager().Unmap(params.offset, | 230 | std::scoped_lock lock(mutex); |
| 127 | static_cast<std::size_t>(params.pages) * params.page_size); | 231 | |
| 232 | if (!vm.initialised) { | ||
| 233 | return NvResult::BadValue; | ||
| 234 | } | ||
| 235 | |||
| 236 | try { | ||
| 237 | auto allocation{allocation_map[params.offset]}; | ||
| 238 | |||
| 239 | if (allocation.page_size != params.page_size || | ||
| 240 | allocation.size != (static_cast<u64>(params.pages) * params.page_size)) { | ||
| 241 | return NvResult::BadValue; | ||
| 242 | } | ||
| 243 | |||
| 244 | for (const auto& mapping : allocation.mappings) { | ||
| 245 | FreeMappingLocked(mapping->offset); | ||
| 246 | } | ||
| 247 | |||
| 248 | // Unset sparse flag if required | ||
| 249 | if (allocation.sparse) { | ||
| 250 | gmmu->Unmap(params.offset, allocation.size); | ||
| 251 | } | ||
| 252 | |||
| 253 | auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator | ||
| 254 | : *vm.big_page_allocator}; | ||
| 255 | u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS | ||
| 256 | : vm.big_page_size_bits}; | ||
| 257 | |||
| 258 | allocator.Free(static_cast<u32>(params.offset >> page_size_bits), | ||
| 259 | static_cast<u32>(allocation.size >> page_size_bits)); | ||
| 260 | allocation_map.erase(params.offset); | ||
| 261 | } catch (const std::out_of_range&) { | ||
| 262 | return NvResult::BadValue; | ||
| 263 | } | ||
| 128 | 264 | ||
| 129 | std::memcpy(output.data(), ¶ms, output.size()); | 265 | std::memcpy(output.data(), ¶ms, output.size()); |
| 130 | return NvResult::Success; | 266 | return NvResult::Success; |
| @@ -135,35 +271,52 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out | |||
| 135 | 271 | ||
| 136 | LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries); | 272 | LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries); |
| 137 | 273 | ||
| 138 | auto result = NvResult::Success; | ||
| 139 | std::vector<IoctlRemapEntry> entries(num_entries); | 274 | std::vector<IoctlRemapEntry> entries(num_entries); |
| 140 | std::memcpy(entries.data(), input.data(), input.size()); | 275 | std::memcpy(entries.data(), input.data(), input.size()); |
| 141 | 276 | ||
| 277 | std::scoped_lock lock(mutex); | ||
| 278 | |||
| 279 | if (!vm.initialised) { | ||
| 280 | return NvResult::BadValue; | ||
| 281 | } | ||
| 282 | |||
| 142 | for (const auto& entry : entries) { | 283 | for (const auto& entry : entries) { |
| 143 | LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", | 284 | GPUVAddr virtual_address{static_cast<u64>(entry.as_offset_big_pages) |
| 144 | entry.offset, entry.nvmap_handle, entry.pages); | 285 | << vm.big_page_size_bits}; |
| 286 | u64 size{static_cast<u64>(entry.big_pages) << vm.big_page_size_bits}; | ||
| 145 | 287 | ||
| 146 | const auto object{nvmap_dev->GetObject(entry.nvmap_handle)}; | 288 | auto alloc{allocation_map.upper_bound(virtual_address)}; |
| 147 | if (!object) { | 289 | |
| 148 | LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle); | 290 | if (alloc-- == allocation_map.begin() || |
| 149 | result = NvResult::InvalidState; | 291 | (virtual_address - alloc->first) + size > alloc->second.size) { |
| 150 | break; | 292 | LOG_WARNING(Service_NVDRV, "Cannot remap into an unallocated region!"); |
| 293 | return NvResult::BadValue; | ||
| 151 | } | 294 | } |
| 152 | 295 | ||
| 153 | const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10}; | 296 | if (!alloc->second.sparse) { |
| 154 | const auto size{static_cast<u64>(entry.pages) << 0x10}; | 297 | LOG_WARNING(Service_NVDRV, "Cannot remap a non-sparse mapping!"); |
| 155 | const auto map_offset{static_cast<u64>(entry.map_offset) << 0x10}; | 298 | return NvResult::BadValue; |
| 156 | const auto addr{system.GPU().MemoryManager().Map(object->addr + map_offset, offset, size)}; | 299 | } |
| 157 | 300 | ||
| 158 | if (!addr) { | 301 | const bool use_big_pages = alloc->second.big_pages; |
| 159 | LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!"); | 302 | if (!entry.handle) { |
| 160 | result = NvResult::InvalidState; | 303 | gmmu->MapSparse(virtual_address, size, use_big_pages); |
| 161 | break; | 304 | } else { |
| 305 | auto handle{nvmap.GetHandle(entry.handle)}; | ||
| 306 | if (!handle) { | ||
| 307 | return NvResult::BadValue; | ||
| 308 | } | ||
| 309 | |||
| 310 | VAddr cpu_address{static_cast<VAddr>( | ||
| 311 | handle->address + | ||
| 312 | (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))}; | ||
| 313 | |||
| 314 | gmmu->Map(virtual_address, cpu_address, size, use_big_pages); | ||
| 162 | } | 315 | } |
| 163 | } | 316 | } |
| 164 | 317 | ||
| 165 | std::memcpy(output.data(), entries.data(), output.size()); | 318 | std::memcpy(output.data(), entries.data(), output.size()); |
| 166 | return result; | 319 | return NvResult::Success; |
| 167 | } | 320 | } |
| 168 | 321 | ||
| 169 | NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) { | 322 | NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) { |
| @@ -173,79 +326,98 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8 | |||
| 173 | LOG_DEBUG(Service_NVDRV, | 326 | LOG_DEBUG(Service_NVDRV, |
| 174 | "called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}" | 327 | "called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}" |
| 175 | ", offset={}", | 328 | ", offset={}", |
| 176 | params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size, | 329 | params.flags, params.handle, params.buffer_offset, params.mapping_size, |
| 177 | params.offset); | 330 | params.offset); |
| 178 | 331 | ||
| 179 | const auto object{nvmap_dev->GetObject(params.nvmap_handle)}; | 332 | std::scoped_lock lock(mutex); |
| 180 | if (!object) { | ||
| 181 | LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle); | ||
| 182 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 183 | return NvResult::InvalidState; | ||
| 184 | } | ||
| 185 | |||
| 186 | // The real nvservices doesn't make a distinction between handles and ids, and | ||
| 187 | // object can only have one handle and it will be the same as its id. Assert that this is the | ||
| 188 | // case to prevent unexpected behavior. | ||
| 189 | ASSERT(object->id == params.nvmap_handle); | ||
| 190 | auto& gpu = system.GPU(); | ||
| 191 | 333 | ||
| 192 | u64 page_size{params.page_size}; | 334 | if (!vm.initialised) { |
| 193 | if (!page_size) { | 335 | return NvResult::BadValue; |
| 194 | page_size = object->align; | ||
| 195 | } | 336 | } |
| 196 | 337 | ||
| 197 | if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) { | 338 | // Remaps a subregion of an existing mapping to a different PA |
| 198 | if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) { | 339 | if ((params.flags & MappingFlags::Remap) != MappingFlags::None) { |
| 199 | const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)}; | 340 | try { |
| 200 | const auto gpu_addr{static_cast<GPUVAddr>(params.offset + params.buffer_offset)}; | 341 | auto mapping{mapping_map.at(params.offset)}; |
| 201 | 342 | ||
| 202 | if (!gpu.MemoryManager().Map(cpu_addr, gpu_addr, params.mapping_size)) { | 343 | if (mapping->size < params.mapping_size) { |
| 203 | LOG_CRITICAL(Service_NVDRV, | 344 | LOG_WARNING(Service_NVDRV, |
| 204 | "remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, " | 345 | "Cannot remap a partially mapped GPU address space region: 0x{:X}", |
| 205 | "mapping_size = {}, offset={}", | 346 | params.offset); |
| 206 | params.flags, params.nvmap_handle, params.buffer_offset, | 347 | return NvResult::BadValue; |
| 207 | params.mapping_size, params.offset); | ||
| 208 | |||
| 209 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 210 | return NvResult::InvalidState; | ||
| 211 | } | 348 | } |
| 212 | 349 | ||
| 213 | std::memcpy(output.data(), ¶ms, output.size()); | 350 | u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)}; |
| 214 | return NvResult::Success; | 351 | VAddr cpu_address{mapping->ptr + params.buffer_offset}; |
| 215 | } else { | 352 | |
| 216 | LOG_CRITICAL(Service_NVDRV, "address not mapped offset={}", params.offset); | 353 | gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page); |
| 217 | 354 | ||
| 218 | std::memcpy(output.data(), ¶ms, output.size()); | 355 | return NvResult::Success; |
| 219 | return NvResult::InvalidState; | 356 | } catch (const std::out_of_range&) { |
| 357 | LOG_WARNING(Service_NVDRV, "Cannot remap an unmapped GPU address space region: 0x{:X}", | ||
| 358 | params.offset); | ||
| 359 | return NvResult::BadValue; | ||
| 220 | } | 360 | } |
| 221 | } | 361 | } |
| 222 | 362 | ||
| 223 | // We can only map objects that have already been assigned a CPU address. | 363 | auto handle{nvmap.GetHandle(params.handle)}; |
| 224 | ASSERT(object->status == nvmap::Object::Status::Allocated); | 364 | if (!handle) { |
| 225 | 365 | return NvResult::BadValue; | |
| 226 | const auto physical_address{object->addr + params.buffer_offset}; | ||
| 227 | u64 size{params.mapping_size}; | ||
| 228 | if (!size) { | ||
| 229 | size = object->size; | ||
| 230 | } | 366 | } |
| 231 | 367 | ||
| 232 | const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None}; | 368 | VAddr cpu_address{static_cast<VAddr>(handle->address + params.buffer_offset)}; |
| 233 | if (is_alloc) { | 369 | u64 size{params.mapping_size ? params.mapping_size : handle->orig_size}; |
| 234 | params.offset = gpu.MemoryManager().MapAllocate(physical_address, size, page_size); | 370 | |
| 235 | } else { | 371 | bool big_page{[&]() { |
| 236 | params.offset = gpu.MemoryManager().Map(physical_address, params.offset, size); | 372 | if (Common::IsAligned(handle->align, vm.big_page_size)) { |
| 237 | } | 373 | return true; |
| 374 | } else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) { | ||
| 375 | return false; | ||
| 376 | } else { | ||
| 377 | ASSERT(false); | ||
| 378 | return false; | ||
| 379 | } | ||
| 380 | }()}; | ||
| 381 | |||
| 382 | if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) { | ||
| 383 | auto alloc{allocation_map.upper_bound(params.offset)}; | ||
| 238 | 384 | ||
| 239 | auto result = NvResult::Success; | 385 | if (alloc-- == allocation_map.begin() || |
| 240 | if (!params.offset) { | 386 | (params.offset - alloc->first) + size > alloc->second.size) { |
| 241 | LOG_CRITICAL(Service_NVDRV, "failed to map size={}", size); | 387 | ASSERT_MSG(false, "Cannot perform a fixed mapping into an unallocated region!"); |
| 242 | result = NvResult::InvalidState; | 388 | return NvResult::BadValue; |
| 389 | } | ||
| 390 | |||
| 391 | const bool use_big_pages = alloc->second.big_pages && big_page; | ||
| 392 | gmmu->Map(params.offset, cpu_address, size, use_big_pages); | ||
| 393 | |||
| 394 | auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true, | ||
| 395 | use_big_pages, alloc->second.sparse)}; | ||
| 396 | alloc->second.mappings.push_back(mapping); | ||
| 397 | mapping_map[params.offset] = mapping; | ||
| 243 | } else { | 398 | } else { |
| 244 | AddBufferMap(params.offset, size, physical_address, is_alloc); | 399 | |
| 400 | auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; | ||
| 401 | u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE}; | ||
| 402 | u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS}; | ||
| 403 | |||
| 404 | params.offset = static_cast<u64>(allocator.Allocate( | ||
| 405 | static_cast<u32>(Common::AlignUp(size, page_size) >> page_size_bits))) | ||
| 406 | << page_size_bits; | ||
| 407 | if (!params.offset) { | ||
| 408 | ASSERT_MSG(false, "Failed to allocate free space in the GPU AS!"); | ||
| 409 | return NvResult::InsufficientMemory; | ||
| 410 | } | ||
| 411 | |||
| 412 | gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), big_page); | ||
| 413 | |||
| 414 | auto mapping{ | ||
| 415 | std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)}; | ||
| 416 | mapping_map[params.offset] = mapping; | ||
| 245 | } | 417 | } |
| 246 | 418 | ||
| 247 | std::memcpy(output.data(), ¶ms, output.size()); | 419 | std::memcpy(output.data(), ¶ms, output.size()); |
| 248 | return result; | 420 | return NvResult::Success; |
| 249 | } | 421 | } |
| 250 | 422 | ||
| 251 | NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) { | 423 | NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) { |
| @@ -254,47 +426,82 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8 | |||
| 254 | 426 | ||
| 255 | LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset); | 427 | LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset); |
| 256 | 428 | ||
| 257 | if (const auto size{RemoveBufferMap(params.offset)}; size) { | 429 | std::scoped_lock lock(mutex); |
| 258 | system.GPU().MemoryManager().Unmap(params.offset, *size); | 430 | |
| 259 | } else { | 431 | if (!vm.initialised) { |
| 260 | LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset); | 432 | return NvResult::BadValue; |
| 433 | } | ||
| 434 | |||
| 435 | try { | ||
| 436 | auto mapping{mapping_map.at(params.offset)}; | ||
| 437 | |||
| 438 | if (!mapping->fixed) { | ||
| 439 | auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; | ||
| 440 | u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS}; | ||
| 441 | |||
| 442 | allocator.Free(static_cast<u32>(mapping->offset >> page_size_bits), | ||
| 443 | static_cast<u32>(mapping->size >> page_size_bits)); | ||
| 444 | } | ||
| 445 | |||
| 446 | // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state | ||
| 447 | // Only FreeSpace can unmap them fully | ||
| 448 | if (mapping->sparse_alloc) { | ||
| 449 | gmmu->MapSparse(params.offset, mapping->size, mapping->big_page); | ||
| 450 | } else { | ||
| 451 | gmmu->Unmap(params.offset, mapping->size); | ||
| 452 | } | ||
| 453 | |||
| 454 | mapping_map.erase(params.offset); | ||
| 455 | } catch (const std::out_of_range&) { | ||
| 456 | LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset); | ||
| 261 | } | 457 | } |
| 262 | 458 | ||
| 263 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 264 | return NvResult::Success; | 459 | return NvResult::Success; |
| 265 | } | 460 | } |
| 266 | 461 | ||
| 267 | NvResult nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) { | 462 | NvResult nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) { |
| 268 | IoctlBindChannel params{}; | 463 | IoctlBindChannel params{}; |
| 269 | std::memcpy(¶ms, input.data(), input.size()); | 464 | std::memcpy(¶ms, input.data(), input.size()); |
| 270 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}", params.fd); | 465 | LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd); |
| 271 | 466 | ||
| 272 | channel = params.fd; | 467 | auto gpu_channel_device = module.GetDevice<nvhost_gpu>(params.fd); |
| 468 | gpu_channel_device->channel_state->memory_manager = gmmu; | ||
| 273 | return NvResult::Success; | 469 | return NvResult::Success; |
| 274 | } | 470 | } |
| 275 | 471 | ||
| 472 | void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) { | ||
| 473 | params.buf_size = 2 * sizeof(VaRegion); | ||
| 474 | |||
| 475 | params.regions = std::array<VaRegion, 2>{ | ||
| 476 | VaRegion{ | ||
| 477 | .offset = vm.small_page_allocator->GetVAStart() << VM::PAGE_SIZE_BITS, | ||
| 478 | .page_size = VM::YUZU_PAGESIZE, | ||
| 479 | ._pad0_{}, | ||
| 480 | .pages = vm.small_page_allocator->GetVALimit() - vm.small_page_allocator->GetVAStart(), | ||
| 481 | }, | ||
| 482 | VaRegion{ | ||
| 483 | .offset = vm.big_page_allocator->GetVAStart() << vm.big_page_size_bits, | ||
| 484 | .page_size = vm.big_page_size, | ||
| 485 | ._pad0_{}, | ||
| 486 | .pages = vm.big_page_allocator->GetVALimit() - vm.big_page_allocator->GetVAStart(), | ||
| 487 | }, | ||
| 488 | }; | ||
| 489 | } | ||
| 490 | |||
| 276 | NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) { | 491 | NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) { |
| 277 | IoctlGetVaRegions params{}; | 492 | IoctlGetVaRegions params{}; |
| 278 | std::memcpy(¶ms, input.data(), input.size()); | 493 | std::memcpy(¶ms, input.data(), input.size()); |
| 279 | 494 | ||
| 280 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr, | 495 | LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr, |
| 281 | params.buf_size); | 496 | params.buf_size); |
| 282 | |||
| 283 | params.buf_size = 0x30; | ||
| 284 | 497 | ||
| 285 | params.small = IoctlVaRegion{ | 498 | std::scoped_lock lock(mutex); |
| 286 | .offset = 0x04000000, | ||
| 287 | .page_size = DEFAULT_SMALL_PAGE_SIZE, | ||
| 288 | .pages = 0x3fbfff, | ||
| 289 | }; | ||
| 290 | 499 | ||
| 291 | params.big = IoctlVaRegion{ | 500 | if (!vm.initialised) { |
| 292 | .offset = 0x04000000, | 501 | return NvResult::BadValue; |
| 293 | .page_size = big_page_size, | 502 | } |
| 294 | .pages = 0x1bffff, | ||
| 295 | }; | ||
| 296 | 503 | ||
| 297 | // TODO(ogniK): This probably can stay stubbed but should add support way way later | 504 | GetVARegionsImpl(params); |
| 298 | 505 | ||
| 299 | std::memcpy(output.data(), ¶ms, output.size()); | 506 | std::memcpy(output.data(), ¶ms, output.size()); |
| 300 | return NvResult::Success; | 507 | return NvResult::Success; |
| @@ -305,62 +512,27 @@ NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u | |||
| 305 | IoctlGetVaRegions params{}; | 512 | IoctlGetVaRegions params{}; |
| 306 | std::memcpy(¶ms, input.data(), input.size()); | 513 | std::memcpy(¶ms, input.data(), input.size()); |
| 307 | 514 | ||
| 308 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr, | 515 | LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr, |
| 309 | params.buf_size); | 516 | params.buf_size); |
| 310 | |||
| 311 | params.buf_size = 0x30; | ||
| 312 | 517 | ||
| 313 | params.small = IoctlVaRegion{ | 518 | std::scoped_lock lock(mutex); |
| 314 | .offset = 0x04000000, | ||
| 315 | .page_size = 0x1000, | ||
| 316 | .pages = 0x3fbfff, | ||
| 317 | }; | ||
| 318 | 519 | ||
| 319 | params.big = IoctlVaRegion{ | 520 | if (!vm.initialised) { |
| 320 | .offset = 0x04000000, | 521 | return NvResult::BadValue; |
| 321 | .page_size = big_page_size, | 522 | } |
| 322 | .pages = 0x1bffff, | ||
| 323 | }; | ||
| 324 | 523 | ||
| 325 | // TODO(ogniK): This probably can stay stubbed but should add support way way later | 524 | GetVARegionsImpl(params); |
| 326 | 525 | ||
| 327 | std::memcpy(output.data(), ¶ms, output.size()); | 526 | std::memcpy(output.data(), ¶ms, output.size()); |
| 328 | std::memcpy(inline_output.data(), ¶ms.small, sizeof(IoctlVaRegion)); | 527 | std::memcpy(inline_output.data(), ¶ms.regions[0], sizeof(VaRegion)); |
| 329 | std::memcpy(inline_output.data() + sizeof(IoctlVaRegion), ¶ms.big, sizeof(IoctlVaRegion)); | 528 | std::memcpy(inline_output.data() + sizeof(VaRegion), ¶ms.regions[1], sizeof(VaRegion)); |
| 330 | 529 | ||
| 331 | return NvResult::Success; | 530 | return NvResult::Success; |
| 332 | } | 531 | } |
| 333 | 532 | ||
| 334 | std::optional<nvhost_as_gpu::BufferMap> nvhost_as_gpu::FindBufferMap(GPUVAddr gpu_addr) const { | 533 | Kernel::KEvent* nvhost_as_gpu::QueryEvent(u32 event_id) { |
| 335 | const auto end{buffer_mappings.upper_bound(gpu_addr)}; | 534 | LOG_CRITICAL(Service_NVDRV, "Unknown AS GPU Event {}", event_id); |
| 336 | for (auto iter{buffer_mappings.begin()}; iter != end; ++iter) { | 535 | return nullptr; |
| 337 | if (gpu_addr >= iter->second.StartAddr() && gpu_addr < iter->second.EndAddr()) { | ||
| 338 | return iter->second; | ||
| 339 | } | ||
| 340 | } | ||
| 341 | |||
| 342 | return std::nullopt; | ||
| 343 | } | ||
| 344 | |||
| 345 | void nvhost_as_gpu::AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, | ||
| 346 | bool is_allocated) { | ||
| 347 | buffer_mappings[gpu_addr] = {gpu_addr, size, cpu_addr, is_allocated}; | ||
| 348 | } | ||
| 349 | |||
| 350 | std::optional<std::size_t> nvhost_as_gpu::RemoveBufferMap(GPUVAddr gpu_addr) { | ||
| 351 | if (const auto iter{buffer_mappings.find(gpu_addr)}; iter != buffer_mappings.end()) { | ||
| 352 | std::size_t size{}; | ||
| 353 | |||
| 354 | if (iter->second.IsAllocated()) { | ||
| 355 | size = iter->second.Size(); | ||
| 356 | } | ||
| 357 | |||
| 358 | buffer_mappings.erase(iter); | ||
| 359 | |||
| 360 | return size; | ||
| 361 | } | ||
| 362 | |||
| 363 | return std::nullopt; | ||
| 364 | } | 536 | } |
| 365 | 537 | ||
| 366 | } // namespace Service::Nvidia::Devices | 538 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index 555843a6f..86fe71c75 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h | |||
| @@ -1,35 +1,50 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors |
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | 4 | ||
| 4 | #pragma once | 5 | #pragma once |
| 5 | 6 | ||
| 7 | #include <bit> | ||
| 8 | #include <list> | ||
| 6 | #include <map> | 9 | #include <map> |
| 7 | #include <memory> | 10 | #include <memory> |
| 11 | #include <mutex> | ||
| 8 | #include <optional> | 12 | #include <optional> |
| 9 | #include <vector> | 13 | #include <vector> |
| 10 | 14 | ||
| 15 | #include "common/address_space.h" | ||
| 11 | #include "common/common_funcs.h" | 16 | #include "common/common_funcs.h" |
| 12 | #include "common/common_types.h" | 17 | #include "common/common_types.h" |
| 13 | #include "common/swap.h" | 18 | #include "common/swap.h" |
| 19 | #include "core/hle/service/nvdrv/core/nvmap.h" | ||
| 14 | #include "core/hle/service/nvdrv/devices/nvdevice.h" | 20 | #include "core/hle/service/nvdrv/devices/nvdevice.h" |
| 15 | 21 | ||
| 16 | namespace Service::Nvidia::Devices { | 22 | namespace Tegra { |
| 23 | class MemoryManager; | ||
| 24 | } // namespace Tegra | ||
| 25 | |||
| 26 | namespace Service::Nvidia { | ||
| 27 | class Module; | ||
| 28 | } | ||
| 17 | 29 | ||
| 18 | constexpr u32 DEFAULT_BIG_PAGE_SIZE = 1 << 16; | 30 | namespace Service::Nvidia::NvCore { |
| 19 | constexpr u32 DEFAULT_SMALL_PAGE_SIZE = 1 << 12; | 31 | class Container; |
| 32 | class NvMap; | ||
| 33 | } // namespace Service::Nvidia::NvCore | ||
| 20 | 34 | ||
| 21 | class nvmap; | 35 | namespace Service::Nvidia::Devices { |
| 22 | 36 | ||
| 23 | enum class AddressSpaceFlags : u32 { | 37 | enum class MappingFlags : u32 { |
| 24 | None = 0x0, | 38 | None = 0, |
| 25 | FixedOffset = 0x1, | 39 | Fixed = 1 << 0, |
| 26 | Remap = 0x100, | 40 | Sparse = 1 << 1, |
| 41 | Remap = 1 << 8, | ||
| 27 | }; | 42 | }; |
| 28 | DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags); | 43 | DECLARE_ENUM_FLAG_OPERATORS(MappingFlags); |
| 29 | 44 | ||
| 30 | class nvhost_as_gpu final : public nvdevice { | 45 | class nvhost_as_gpu final : public nvdevice { |
| 31 | public: | 46 | public: |
| 32 | explicit nvhost_as_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_); | 47 | explicit nvhost_as_gpu(Core::System& system_, Module& module, NvCore::Container& core); |
| 33 | ~nvhost_as_gpu() override; | 48 | ~nvhost_as_gpu() override; |
| 34 | 49 | ||
| 35 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 50 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| @@ -42,46 +57,17 @@ public: | |||
| 42 | void OnOpen(DeviceFD fd) override; | 57 | void OnOpen(DeviceFD fd) override; |
| 43 | void OnClose(DeviceFD fd) override; | 58 | void OnClose(DeviceFD fd) override; |
| 44 | 59 | ||
| 45 | private: | 60 | Kernel::KEvent* QueryEvent(u32 event_id) override; |
| 46 | class BufferMap final { | 61 | |
| 47 | public: | 62 | struct VaRegion { |
| 48 | constexpr BufferMap() = default; | 63 | u64 offset; |
| 49 | 64 | u32 page_size; | |
| 50 | constexpr BufferMap(GPUVAddr start_addr_, std::size_t size_) | 65 | u32 _pad0_; |
| 51 | : start_addr{start_addr_}, end_addr{start_addr_ + size_} {} | 66 | u64 pages; |
| 52 | |||
| 53 | constexpr BufferMap(GPUVAddr start_addr_, std::size_t size_, VAddr cpu_addr_, | ||
| 54 | bool is_allocated_) | ||
| 55 | : start_addr{start_addr_}, end_addr{start_addr_ + size_}, cpu_addr{cpu_addr_}, | ||
| 56 | is_allocated{is_allocated_} {} | ||
| 57 | |||
| 58 | constexpr VAddr StartAddr() const { | ||
| 59 | return start_addr; | ||
| 60 | } | ||
| 61 | |||
| 62 | constexpr VAddr EndAddr() const { | ||
| 63 | return end_addr; | ||
| 64 | } | ||
| 65 | |||
| 66 | constexpr std::size_t Size() const { | ||
| 67 | return end_addr - start_addr; | ||
| 68 | } | ||
| 69 | |||
| 70 | constexpr VAddr CpuAddr() const { | ||
| 71 | return cpu_addr; | ||
| 72 | } | ||
| 73 | |||
| 74 | constexpr bool IsAllocated() const { | ||
| 75 | return is_allocated; | ||
| 76 | } | ||
| 77 | |||
| 78 | private: | ||
| 79 | GPUVAddr start_addr{}; | ||
| 80 | GPUVAddr end_addr{}; | ||
| 81 | VAddr cpu_addr{}; | ||
| 82 | bool is_allocated{}; | ||
| 83 | }; | 67 | }; |
| 68 | static_assert(sizeof(VaRegion) == 0x18); | ||
| 84 | 69 | ||
| 70 | private: | ||
| 85 | struct IoctlAllocAsEx { | 71 | struct IoctlAllocAsEx { |
| 86 | u32_le flags{}; // usually passes 1 | 72 | u32_le flags{}; // usually passes 1 |
| 87 | s32_le as_fd{}; // ignored; passes 0 | 73 | s32_le as_fd{}; // ignored; passes 0 |
| @@ -96,7 +82,7 @@ private: | |||
| 96 | struct IoctlAllocSpace { | 82 | struct IoctlAllocSpace { |
| 97 | u32_le pages{}; | 83 | u32_le pages{}; |
| 98 | u32_le page_size{}; | 84 | u32_le page_size{}; |
| 99 | AddressSpaceFlags flags{}; | 85 | MappingFlags flags{}; |
| 100 | INSERT_PADDING_WORDS(1); | 86 | INSERT_PADDING_WORDS(1); |
| 101 | union { | 87 | union { |
| 102 | u64_le offset; | 88 | u64_le offset; |
| @@ -113,19 +99,19 @@ private: | |||
| 113 | static_assert(sizeof(IoctlFreeSpace) == 16, "IoctlFreeSpace is incorrect size"); | 99 | static_assert(sizeof(IoctlFreeSpace) == 16, "IoctlFreeSpace is incorrect size"); |
| 114 | 100 | ||
| 115 | struct IoctlRemapEntry { | 101 | struct IoctlRemapEntry { |
| 116 | u16_le flags{}; | 102 | u16 flags; |
| 117 | u16_le kind{}; | 103 | u16 kind; |
| 118 | u32_le nvmap_handle{}; | 104 | NvCore::NvMap::Handle::Id handle; |
| 119 | u32_le map_offset{}; | 105 | u32 handle_offset_big_pages; |
| 120 | u32_le offset{}; | 106 | u32 as_offset_big_pages; |
| 121 | u32_le pages{}; | 107 | u32 big_pages; |
| 122 | }; | 108 | }; |
| 123 | static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size"); | 109 | static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size"); |
| 124 | 110 | ||
| 125 | struct IoctlMapBufferEx { | 111 | struct IoctlMapBufferEx { |
| 126 | AddressSpaceFlags flags{}; // bit0: fixed_offset, bit2: cacheable | 112 | MappingFlags flags{}; // bit0: fixed_offset, bit2: cacheable |
| 127 | u32_le kind{}; // -1 is default | 113 | u32_le kind{}; // -1 is default |
| 128 | u32_le nvmap_handle{}; | 114 | NvCore::NvMap::Handle::Id handle; |
| 129 | u32_le page_size{}; // 0 means don't care | 115 | u32_le page_size{}; // 0 means don't care |
| 130 | s64_le buffer_offset{}; | 116 | s64_le buffer_offset{}; |
| 131 | u64_le mapping_size{}; | 117 | u64_le mapping_size{}; |
| @@ -143,27 +129,15 @@ private: | |||
| 143 | }; | 129 | }; |
| 144 | static_assert(sizeof(IoctlBindChannel) == 4, "IoctlBindChannel is incorrect size"); | 130 | static_assert(sizeof(IoctlBindChannel) == 4, "IoctlBindChannel is incorrect size"); |
| 145 | 131 | ||
| 146 | struct IoctlVaRegion { | ||
| 147 | u64_le offset{}; | ||
| 148 | u32_le page_size{}; | ||
| 149 | INSERT_PADDING_WORDS(1); | ||
| 150 | u64_le pages{}; | ||
| 151 | }; | ||
| 152 | static_assert(sizeof(IoctlVaRegion) == 24, "IoctlVaRegion is incorrect size"); | ||
| 153 | |||
| 154 | struct IoctlGetVaRegions { | 132 | struct IoctlGetVaRegions { |
| 155 | u64_le buf_addr{}; // (contained output user ptr on linux, ignored) | 133 | u64_le buf_addr{}; // (contained output user ptr on linux, ignored) |
| 156 | u32_le buf_size{}; // forced to 2*sizeof(struct va_region) | 134 | u32_le buf_size{}; // forced to 2*sizeof(struct va_region) |
| 157 | u32_le reserved{}; | 135 | u32_le reserved{}; |
| 158 | IoctlVaRegion small{}; | 136 | std::array<VaRegion, 2> regions{}; |
| 159 | IoctlVaRegion big{}; | ||
| 160 | }; | 137 | }; |
| 161 | static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(IoctlVaRegion) * 2, | 138 | static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2, |
| 162 | "IoctlGetVaRegions is incorrect size"); | 139 | "IoctlGetVaRegions is incorrect size"); |
| 163 | 140 | ||
| 164 | s32 channel{}; | ||
| 165 | u32 big_page_size{DEFAULT_BIG_PAGE_SIZE}; | ||
| 166 | |||
| 167 | NvResult AllocAsEx(const std::vector<u8>& input, std::vector<u8>& output); | 141 | NvResult AllocAsEx(const std::vector<u8>& input, std::vector<u8>& output); |
| 168 | NvResult AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output); | 142 | NvResult AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output); |
| 169 | NvResult Remap(const std::vector<u8>& input, std::vector<u8>& output); | 143 | NvResult Remap(const std::vector<u8>& input, std::vector<u8>& output); |
| @@ -172,18 +146,75 @@ private: | |||
| 172 | NvResult FreeSpace(const std::vector<u8>& input, std::vector<u8>& output); | 146 | NvResult FreeSpace(const std::vector<u8>& input, std::vector<u8>& output); |
| 173 | NvResult BindChannel(const std::vector<u8>& input, std::vector<u8>& output); | 147 | NvResult BindChannel(const std::vector<u8>& input, std::vector<u8>& output); |
| 174 | 148 | ||
| 149 | void GetVARegionsImpl(IoctlGetVaRegions& params); | ||
| 175 | NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output); | 150 | NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output); |
| 176 | NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output, | 151 | NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output, |
| 177 | std::vector<u8>& inline_output); | 152 | std::vector<u8>& inline_output); |
| 178 | 153 | ||
| 179 | std::optional<BufferMap> FindBufferMap(GPUVAddr gpu_addr) const; | 154 | void FreeMappingLocked(u64 offset); |
| 180 | void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated); | 155 | |
| 181 | std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr); | 156 | Module& module; |
| 157 | |||
| 158 | NvCore::Container& container; | ||
| 159 | NvCore::NvMap& nvmap; | ||
| 182 | 160 | ||
| 183 | std::shared_ptr<nvmap> nvmap_dev; | 161 | struct Mapping { |
| 162 | VAddr ptr; | ||
| 163 | u64 offset; | ||
| 164 | u64 size; | ||
| 165 | bool fixed; | ||
| 166 | bool big_page; // Only valid if fixed == false | ||
| 167 | bool sparse_alloc; | ||
| 168 | |||
| 169 | Mapping(VAddr ptr_, u64 offset_, u64 size_, bool fixed_, bool big_page_, bool sparse_alloc_) | ||
| 170 | : ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), big_page(big_page_), | ||
| 171 | sparse_alloc(sparse_alloc_) {} | ||
| 172 | }; | ||
| 173 | |||
| 174 | struct Allocation { | ||
| 175 | u64 size; | ||
| 176 | std::list<std::shared_ptr<Mapping>> mappings; | ||
| 177 | u32 page_size; | ||
| 178 | bool sparse; | ||
| 179 | bool big_pages; | ||
| 180 | }; | ||
| 184 | 181 | ||
| 185 | // This is expected to be ordered, therefore we must use a map, not unordered_map | 182 | std::map<u64, std::shared_ptr<Mapping>> |
| 186 | std::map<GPUVAddr, BufferMap> buffer_mappings; | 183 | mapping_map; //!< This maps the base addresses of mapped buffers to their total sizes and |
| 184 | //!< mapping type, this is needed as what was originally a single buffer may | ||
| 185 | //!< have been split into multiple GPU side buffers with the remap flag. | ||
| 186 | std::map<u64, Allocation> allocation_map; //!< Holds allocations created by AllocSpace from | ||
| 187 | //!< which fixed buffers can be mapped into | ||
| 188 | std::mutex mutex; //!< Locks all AS operations | ||
| 189 | |||
| 190 | struct VM { | ||
| 191 | static constexpr u32 YUZU_PAGESIZE{0x1000}; | ||
| 192 | static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(YUZU_PAGESIZE)}; | ||
| 193 | |||
| 194 | static constexpr u32 SUPPORTED_BIG_PAGE_SIZES{0x30000}; | ||
| 195 | static constexpr u32 DEFAULT_BIG_PAGE_SIZE{0x20000}; | ||
| 196 | u32 big_page_size{DEFAULT_BIG_PAGE_SIZE}; | ||
| 197 | u32 big_page_size_bits{std::countr_zero(DEFAULT_BIG_PAGE_SIZE)}; | ||
| 198 | |||
| 199 | static constexpr u32 VA_START_SHIFT{10}; | ||
| 200 | static constexpr u64 DEFAULT_VA_SPLIT{1ULL << 34}; | ||
| 201 | static constexpr u64 DEFAULT_VA_RANGE{1ULL << 37}; | ||
| 202 | u64 va_range_start{DEFAULT_BIG_PAGE_SIZE << VA_START_SHIFT}; | ||
| 203 | u64 va_range_split{DEFAULT_VA_SPLIT}; | ||
| 204 | u64 va_range_end{DEFAULT_VA_RANGE}; | ||
| 205 | |||
| 206 | using Allocator = Common::FlatAllocator<u32, 0, 32>; | ||
| 207 | |||
| 208 | std::unique_ptr<Allocator> big_page_allocator; | ||
| 209 | std::shared_ptr<Allocator> | ||
| 210 | small_page_allocator; //! Shared as this is also used by nvhost::GpuChannel | ||
| 211 | |||
| 212 | bool initialised{}; | ||
| 213 | } vm; | ||
| 214 | std::shared_ptr<Tegra::MemoryManager> gmmu; | ||
| 215 | |||
| 216 | // s32 channel{}; | ||
| 217 | // u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE}; | ||
| 187 | }; | 218 | }; |
| 188 | 219 | ||
| 189 | } // namespace Service::Nvidia::Devices | 220 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index 527531f29..5bee4a3d3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp | |||
| @@ -1,24 +1,39 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors |
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | 4 | ||
| 5 | #include <bit> | ||
| 4 | #include <cstdlib> | 6 | #include <cstdlib> |
| 5 | #include <cstring> | 7 | #include <cstring> |
| 6 | 8 | ||
| 9 | #include <fmt/format.h> | ||
| 7 | #include "common/assert.h" | 10 | #include "common/assert.h" |
| 8 | #include "common/logging/log.h" | 11 | #include "common/logging/log.h" |
| 12 | #include "common/scope_exit.h" | ||
| 9 | #include "core/core.h" | 13 | #include "core/core.h" |
| 10 | #include "core/hle/kernel/k_event.h" | 14 | #include "core/hle/kernel/k_event.h" |
| 11 | #include "core/hle/kernel/k_writable_event.h" | 15 | #include "core/hle/kernel/k_writable_event.h" |
| 16 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 17 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" | ||
| 12 | #include "core/hle/service/nvdrv/devices/nvhost_ctrl.h" | 18 | #include "core/hle/service/nvdrv/devices/nvhost_ctrl.h" |
| 13 | #include "video_core/gpu.h" | 19 | #include "video_core/gpu.h" |
| 20 | #include "video_core/host1x/host1x.h" | ||
| 14 | 21 | ||
| 15 | namespace Service::Nvidia::Devices { | 22 | namespace Service::Nvidia::Devices { |
| 16 | 23 | ||
| 17 | nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_, | 24 | nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_, |
| 18 | SyncpointManager& syncpoint_manager_) | 25 | NvCore::Container& core_) |
| 19 | : nvdevice{system_}, events_interface{events_interface_}, syncpoint_manager{ | 26 | : nvdevice{system_}, events_interface{events_interface_}, core{core_}, |
| 20 | syncpoint_manager_} {} | 27 | syncpoint_manager{core_.GetSyncpointManager()} {} |
| 21 | nvhost_ctrl::~nvhost_ctrl() = default; | 28 | |
| 29 | nvhost_ctrl::~nvhost_ctrl() { | ||
| 30 | for (auto& event : events) { | ||
| 31 | if (!event.registered) { | ||
| 32 | continue; | ||
| 33 | } | ||
| 34 | events_interface.FreeEvent(event.kevent); | ||
| 35 | } | ||
| 36 | } | ||
| 22 | 37 | ||
| 23 | NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 38 | NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| 24 | std::vector<u8>& output) { | 39 | std::vector<u8>& output) { |
| @@ -30,13 +45,15 @@ NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& | |||
| 30 | case 0x1c: | 45 | case 0x1c: |
| 31 | return IocCtrlClearEventWait(input, output); | 46 | return IocCtrlClearEventWait(input, output); |
| 32 | case 0x1d: | 47 | case 0x1d: |
| 33 | return IocCtrlEventWait(input, output, false); | ||
| 34 | case 0x1e: | ||
| 35 | return IocCtrlEventWait(input, output, true); | 48 | return IocCtrlEventWait(input, output, true); |
| 49 | case 0x1e: | ||
| 50 | return IocCtrlEventWait(input, output, false); | ||
| 36 | case 0x1f: | 51 | case 0x1f: |
| 37 | return IocCtrlEventRegister(input, output); | 52 | return IocCtrlEventRegister(input, output); |
| 38 | case 0x20: | 53 | case 0x20: |
| 39 | return IocCtrlEventUnregister(input, output); | 54 | return IocCtrlEventUnregister(input, output); |
| 55 | case 0x21: | ||
| 56 | return IocCtrlEventUnregisterBatch(input, output); | ||
| 40 | } | 57 | } |
| 41 | break; | 58 | break; |
| 42 | default: | 59 | default: |
| @@ -60,6 +77,7 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& | |||
| 60 | } | 77 | } |
| 61 | 78 | ||
| 62 | void nvhost_ctrl::OnOpen(DeviceFD fd) {} | 79 | void nvhost_ctrl::OnOpen(DeviceFD fd) {} |
| 80 | |||
| 63 | void nvhost_ctrl::OnClose(DeviceFD fd) {} | 81 | void nvhost_ctrl::OnClose(DeviceFD fd) {} |
| 64 | 82 | ||
| 65 | NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output) { | 83 | NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output) { |
| @@ -71,116 +89,167 @@ NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector | |||
| 71 | } | 89 | } |
| 72 | 90 | ||
| 73 | NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, | 91 | NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, |
| 74 | bool is_async) { | 92 | bool is_allocation) { |
| 75 | IocCtrlEventWaitParams params{}; | 93 | IocCtrlEventWaitParams params{}; |
| 76 | std::memcpy(¶ms, input.data(), sizeof(params)); | 94 | std::memcpy(¶ms, input.data(), sizeof(params)); |
| 77 | LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_async={}", | 95 | LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_allocation={}", |
| 78 | params.syncpt_id, params.threshold, params.timeout, is_async); | 96 | params.fence.id, params.fence.value, params.timeout, is_allocation); |
| 79 | 97 | ||
| 80 | if (params.syncpt_id >= MaxSyncPoints) { | 98 | bool must_unmark_fail = !is_allocation; |
| 81 | return NvResult::BadParameter; | 99 | const u32 event_id = params.value.raw; |
| 82 | } | 100 | SCOPE_EXIT({ |
| 101 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 102 | if (must_unmark_fail) { | ||
| 103 | events[event_id].fails = 0; | ||
| 104 | } | ||
| 105 | }); | ||
| 83 | 106 | ||
| 84 | u32 event_id = params.value & 0x00FF; | 107 | const u32 fence_id = static_cast<u32>(params.fence.id); |
| 85 | 108 | ||
| 86 | if (event_id >= MaxNvEvents) { | 109 | if (fence_id >= MaxSyncPoints) { |
| 87 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 88 | return NvResult::BadParameter; | 110 | return NvResult::BadParameter; |
| 89 | } | 111 | } |
| 90 | 112 | ||
| 91 | if (syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) { | 113 | if (params.fence.value == 0) { |
| 92 | params.value = syncpoint_manager.GetSyncpointMin(params.syncpt_id); | 114 | if (!syncpoint_manager.IsSyncpointAllocated(params.fence.id)) { |
| 93 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 115 | LOG_WARNING(Service_NVDRV, |
| 94 | events_interface.failed[event_id] = false; | 116 | "Unallocated syncpt_id={}, threshold={}, timeout={}, is_allocation={}", |
| 117 | params.fence.id, params.fence.value, params.timeout, is_allocation); | ||
| 118 | } else { | ||
| 119 | params.value.raw = syncpoint_manager.ReadSyncpointMinValue(fence_id); | ||
| 120 | } | ||
| 95 | return NvResult::Success; | 121 | return NvResult::Success; |
| 96 | } | 122 | } |
| 97 | 123 | ||
| 98 | if (const auto new_value = syncpoint_manager.RefreshSyncpoint(params.syncpt_id); | 124 | if (syncpoint_manager.IsFenceSignalled(params.fence)) { |
| 99 | syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) { | 125 | params.value.raw = syncpoint_manager.ReadSyncpointMinValue(fence_id); |
| 100 | params.value = new_value; | ||
| 101 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 102 | events_interface.failed[event_id] = false; | ||
| 103 | return NvResult::Success; | 126 | return NvResult::Success; |
| 104 | } | 127 | } |
| 105 | 128 | ||
| 106 | auto& event = events_interface.events[event_id]; | 129 | if (const auto new_value = syncpoint_manager.UpdateMin(fence_id); |
| 107 | auto& gpu = system.GPU(); | 130 | syncpoint_manager.IsFenceSignalled(params.fence)) { |
| 108 | 131 | params.value.raw = new_value; | |
| 109 | // This is mostly to take into account unimplemented features. As synced | ||
| 110 | // gpu is always synced. | ||
| 111 | if (!gpu.IsAsync()) { | ||
| 112 | event.event->GetWritableEvent().Signal(); | ||
| 113 | return NvResult::Success; | ||
| 114 | } | ||
| 115 | const u32 current_syncpoint_value = event.fence.value; | ||
| 116 | const s32 diff = current_syncpoint_value - params.threshold; | ||
| 117 | if (diff >= 0) { | ||
| 118 | event.event->GetWritableEvent().Signal(); | ||
| 119 | params.value = current_syncpoint_value; | ||
| 120 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 121 | events_interface.failed[event_id] = false; | ||
| 122 | return NvResult::Success; | 132 | return NvResult::Success; |
| 123 | } | 133 | } |
| 124 | const u32 target_value = current_syncpoint_value - diff; | ||
| 125 | 134 | ||
| 126 | if (!is_async) { | 135 | auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager(); |
| 127 | params.value = 0; | 136 | const u32 target_value = params.fence.value; |
| 137 | |||
| 138 | auto lock = NvEventsLock(); | ||
| 139 | |||
| 140 | u32 slot = [&]() { | ||
| 141 | if (is_allocation) { | ||
| 142 | params.value.raw = 0; | ||
| 143 | return FindFreeNvEvent(fence_id); | ||
| 144 | } else { | ||
| 145 | return params.value.raw; | ||
| 146 | } | ||
| 147 | }(); | ||
| 148 | |||
| 149 | must_unmark_fail = false; | ||
| 150 | |||
| 151 | const auto check_failing = [&]() { | ||
| 152 | if (events[slot].fails > 2) { | ||
| 153 | { | ||
| 154 | auto lk = system.StallProcesses(); | ||
| 155 | host1x_syncpoint_manager.WaitHost(fence_id, target_value); | ||
| 156 | system.UnstallProcesses(); | ||
| 157 | } | ||
| 158 | params.value.raw = target_value; | ||
| 159 | return true; | ||
| 160 | } | ||
| 161 | return false; | ||
| 162 | }; | ||
| 163 | |||
| 164 | if (slot >= MaxNvEvents) { | ||
| 165 | return NvResult::BadParameter; | ||
| 128 | } | 166 | } |
| 129 | 167 | ||
| 130 | if (params.timeout == 0) { | 168 | if (params.timeout == 0) { |
| 131 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 169 | if (check_failing()) { |
| 170 | events[slot].fails = 0; | ||
| 171 | return NvResult::Success; | ||
| 172 | } | ||
| 132 | return NvResult::Timeout; | 173 | return NvResult::Timeout; |
| 133 | } | 174 | } |
| 134 | 175 | ||
| 135 | EventState status = events_interface.status[event_id]; | 176 | auto& event = events[slot]; |
| 136 | const bool bad_parameter = status == EventState::Busy; | 177 | |
| 137 | if (bad_parameter) { | 178 | if (!event.registered) { |
| 138 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 139 | return NvResult::BadParameter; | 179 | return NvResult::BadParameter; |
| 140 | } | 180 | } |
| 141 | events_interface.SetEventStatus(event_id, EventState::Waiting); | 181 | |
| 142 | events_interface.assigned_syncpt[event_id] = params.syncpt_id; | 182 | if (event.IsBeingUsed()) { |
| 143 | events_interface.assigned_value[event_id] = target_value; | 183 | return NvResult::BadParameter; |
| 144 | if (is_async) { | 184 | } |
| 145 | params.value = params.syncpt_id << 4; | 185 | |
| 146 | } else { | 186 | if (check_failing()) { |
| 147 | params.value = ((params.syncpt_id & 0xfff) << 16) | 0x10000000; | 187 | event.fails = 0; |
| 148 | } | ||
| 149 | params.value |= event_id; | ||
| 150 | event.event->GetWritableEvent().Clear(); | ||
| 151 | if (events_interface.failed[event_id]) { | ||
| 152 | { | ||
| 153 | auto lk = system.StallProcesses(); | ||
| 154 | gpu.WaitFence(params.syncpt_id, target_value); | ||
| 155 | system.UnstallProcesses(); | ||
| 156 | } | ||
| 157 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 158 | events_interface.failed[event_id] = false; | ||
| 159 | return NvResult::Success; | 188 | return NvResult::Success; |
| 160 | } | 189 | } |
| 161 | gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value); | 190 | |
| 162 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 191 | params.value.raw = 0; |
| 192 | |||
| 193 | event.status.store(EventState::Waiting, std::memory_order_release); | ||
| 194 | event.assigned_syncpt = fence_id; | ||
| 195 | event.assigned_value = target_value; | ||
| 196 | if (is_allocation) { | ||
| 197 | params.value.syncpoint_id_for_allocation.Assign(static_cast<u16>(fence_id)); | ||
| 198 | params.value.event_allocated.Assign(1); | ||
| 199 | } else { | ||
| 200 | params.value.syncpoint_id.Assign(fence_id); | ||
| 201 | } | ||
| 202 | params.value.raw |= slot; | ||
| 203 | |||
| 204 | event.wait_handle = | ||
| 205 | host1x_syncpoint_manager.RegisterHostAction(fence_id, target_value, [this, slot]() { | ||
| 206 | auto& event_ = events[slot]; | ||
| 207 | if (event_.status.exchange(EventState::Signalling, std::memory_order_acq_rel) == | ||
| 208 | EventState::Waiting) { | ||
| 209 | event_.kevent->GetWritableEvent().Signal(); | ||
| 210 | } | ||
| 211 | event_.status.store(EventState::Signalled, std::memory_order_release); | ||
| 212 | }); | ||
| 163 | return NvResult::Timeout; | 213 | return NvResult::Timeout; |
| 164 | } | 214 | } |
| 165 | 215 | ||
| 216 | NvResult nvhost_ctrl::FreeEvent(u32 slot) { | ||
| 217 | if (slot >= MaxNvEvents) { | ||
| 218 | return NvResult::BadParameter; | ||
| 219 | } | ||
| 220 | |||
| 221 | auto& event = events[slot]; | ||
| 222 | |||
| 223 | if (!event.registered) { | ||
| 224 | return NvResult::Success; | ||
| 225 | } | ||
| 226 | |||
| 227 | if (event.IsBeingUsed()) { | ||
| 228 | return NvResult::Busy; | ||
| 229 | } | ||
| 230 | |||
| 231 | FreeNvEvent(slot); | ||
| 232 | return NvResult::Success; | ||
| 233 | } | ||
| 234 | |||
| 166 | NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output) { | 235 | NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output) { |
| 167 | IocCtrlEventRegisterParams params{}; | 236 | IocCtrlEventRegisterParams params{}; |
| 168 | std::memcpy(¶ms, input.data(), sizeof(params)); | 237 | std::memcpy(¶ms, input.data(), sizeof(params)); |
| 169 | const u32 event_id = params.user_event_id & 0x00FF; | 238 | const u32 event_id = params.user_event_id; |
| 170 | LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); | 239 | LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); |
| 171 | if (event_id >= MaxNvEvents) { | 240 | if (event_id >= MaxNvEvents) { |
| 172 | return NvResult::BadParameter; | 241 | return NvResult::BadParameter; |
| 173 | } | 242 | } |
| 174 | if (events_interface.registered[event_id]) { | 243 | |
| 175 | const auto event_state = events_interface.status[event_id]; | 244 | auto lock = NvEventsLock(); |
| 176 | if (event_state != EventState::Free) { | 245 | |
| 177 | LOG_WARNING(Service_NVDRV, "Event already registered! Unregistering previous event"); | 246 | if (events[event_id].registered) { |
| 178 | events_interface.UnregisterEvent(event_id); | 247 | const auto result = FreeEvent(event_id); |
| 179 | } else { | 248 | if (result != NvResult::Success) { |
| 180 | return NvResult::BadParameter; | 249 | return result; |
| 181 | } | 250 | } |
| 182 | } | 251 | } |
| 183 | events_interface.RegisterEvent(event_id); | 252 | CreateNvEvent(event_id); |
| 184 | return NvResult::Success; | 253 | return NvResult::Success; |
| 185 | } | 254 | } |
| 186 | 255 | ||
| @@ -190,34 +259,142 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(const std::vector<u8>& input, | |||
| 190 | std::memcpy(¶ms, input.data(), sizeof(params)); | 259 | std::memcpy(¶ms, input.data(), sizeof(params)); |
| 191 | const u32 event_id = params.user_event_id & 0x00FF; | 260 | const u32 event_id = params.user_event_id & 0x00FF; |
| 192 | LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); | 261 | LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); |
| 193 | if (event_id >= MaxNvEvents) { | 262 | |
| 194 | return NvResult::BadParameter; | 263 | auto lock = NvEventsLock(); |
| 195 | } | 264 | return FreeEvent(event_id); |
| 196 | if (!events_interface.registered[event_id]) { | 265 | } |
| 197 | return NvResult::BadParameter; | 266 | |
| 267 | NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(const std::vector<u8>& input, | ||
| 268 | std::vector<u8>& output) { | ||
| 269 | IocCtrlEventUnregisterBatchParams params{}; | ||
| 270 | std::memcpy(¶ms, input.data(), sizeof(params)); | ||
| 271 | u64 event_mask = params.user_events; | ||
| 272 | LOG_DEBUG(Service_NVDRV, " called, event_mask: {:X}", event_mask); | ||
| 273 | |||
| 274 | auto lock = NvEventsLock(); | ||
| 275 | while (event_mask != 0) { | ||
| 276 | const u64 event_id = std::countr_zero(event_mask); | ||
| 277 | event_mask &= ~(1ULL << event_id); | ||
| 278 | const auto result = FreeEvent(static_cast<u32>(event_id)); | ||
| 279 | if (result != NvResult::Success) { | ||
| 280 | return result; | ||
| 281 | } | ||
| 198 | } | 282 | } |
| 199 | events_interface.UnregisterEvent(event_id); | ||
| 200 | return NvResult::Success; | 283 | return NvResult::Success; |
| 201 | } | 284 | } |
| 202 | 285 | ||
| 203 | NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output) { | 286 | NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output) { |
| 204 | IocCtrlEventSignalParams params{}; | 287 | IocCtrlEventClearParams params{}; |
| 205 | std::memcpy(¶ms, input.data(), sizeof(params)); | 288 | std::memcpy(¶ms, input.data(), sizeof(params)); |
| 206 | 289 | ||
| 207 | u32 event_id = params.event_id & 0x00FF; | 290 | u32 event_id = params.event_id.slot; |
| 208 | LOG_WARNING(Service_NVDRV, "cleared event wait on, event_id: {:X}", event_id); | 291 | LOG_DEBUG(Service_NVDRV, "called, event_id: {:X}", event_id); |
| 209 | 292 | ||
| 210 | if (event_id >= MaxNvEvents) { | 293 | if (event_id >= MaxNvEvents) { |
| 211 | return NvResult::BadParameter; | 294 | return NvResult::BadParameter; |
| 212 | } | 295 | } |
| 213 | if (events_interface.status[event_id] == EventState::Waiting) { | ||
| 214 | events_interface.LiberateEvent(event_id); | ||
| 215 | } | ||
| 216 | events_interface.failed[event_id] = true; | ||
| 217 | 296 | ||
| 218 | syncpoint_manager.RefreshSyncpoint(events_interface.events[event_id].fence.id); | 297 | auto lock = NvEventsLock(); |
| 298 | |||
| 299 | auto& event = events[event_id]; | ||
| 300 | if (event.status.exchange(EventState::Cancelling, std::memory_order_acq_rel) == | ||
| 301 | EventState::Waiting) { | ||
| 302 | auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager(); | ||
| 303 | host1x_syncpoint_manager.DeregisterHostAction(event.assigned_syncpt, event.wait_handle); | ||
| 304 | syncpoint_manager.UpdateMin(event.assigned_syncpt); | ||
| 305 | event.wait_handle = {}; | ||
| 306 | } | ||
| 307 | event.fails++; | ||
| 308 | event.status.store(EventState::Cancelled, std::memory_order_release); | ||
| 309 | event.kevent->GetWritableEvent().Clear(); | ||
| 219 | 310 | ||
| 220 | return NvResult::Success; | 311 | return NvResult::Success; |
| 221 | } | 312 | } |
| 222 | 313 | ||
| 314 | Kernel::KEvent* nvhost_ctrl::QueryEvent(u32 event_id) { | ||
| 315 | const auto desired_event = SyncpointEventValue{.raw = event_id}; | ||
| 316 | |||
| 317 | const bool allocated = desired_event.event_allocated.Value() != 0; | ||
| 318 | const u32 slot{allocated ? desired_event.partial_slot.Value() | ||
| 319 | : static_cast<u32>(desired_event.slot)}; | ||
| 320 | if (slot >= MaxNvEvents) { | ||
| 321 | ASSERT(false); | ||
| 322 | return nullptr; | ||
| 323 | } | ||
| 324 | |||
| 325 | const u32 syncpoint_id{allocated ? desired_event.syncpoint_id_for_allocation.Value() | ||
| 326 | : desired_event.syncpoint_id.Value()}; | ||
| 327 | |||
| 328 | auto lock = NvEventsLock(); | ||
| 329 | |||
| 330 | auto& event = events[slot]; | ||
| 331 | if (event.registered && event.assigned_syncpt == syncpoint_id) { | ||
| 332 | ASSERT(event.kevent); | ||
| 333 | return event.kevent; | ||
| 334 | } | ||
| 335 | // Is this possible in hardware? | ||
| 336 | ASSERT_MSG(false, "Slot:{}, SyncpointID:{}, requested", slot, syncpoint_id); | ||
| 337 | return nullptr; | ||
| 338 | } | ||
| 339 | |||
| 340 | std::unique_lock<std::mutex> nvhost_ctrl::NvEventsLock() { | ||
| 341 | return std::unique_lock<std::mutex>(events_mutex); | ||
| 342 | } | ||
| 343 | |||
| 344 | void nvhost_ctrl::CreateNvEvent(u32 event_id) { | ||
| 345 | auto& event = events[event_id]; | ||
| 346 | ASSERT(!event.kevent); | ||
| 347 | ASSERT(!event.registered); | ||
| 348 | ASSERT(!event.IsBeingUsed()); | ||
| 349 | event.kevent = events_interface.CreateEvent(fmt::format("NVCTRL::NvEvent_{}", event_id)); | ||
| 350 | event.status = EventState::Available; | ||
| 351 | event.registered = true; | ||
| 352 | const u64 mask = 1ULL << event_id; | ||
| 353 | event.fails = 0; | ||
| 354 | events_mask |= mask; | ||
| 355 | event.assigned_syncpt = 0; | ||
| 356 | } | ||
| 357 | |||
| 358 | void nvhost_ctrl::FreeNvEvent(u32 event_id) { | ||
| 359 | auto& event = events[event_id]; | ||
| 360 | ASSERT(event.kevent); | ||
| 361 | ASSERT(event.registered); | ||
| 362 | ASSERT(!event.IsBeingUsed()); | ||
| 363 | events_interface.FreeEvent(event.kevent); | ||
| 364 | event.kevent = nullptr; | ||
| 365 | event.status = EventState::Available; | ||
| 366 | event.registered = false; | ||
| 367 | const u64 mask = ~(1ULL << event_id); | ||
| 368 | events_mask &= mask; | ||
| 369 | } | ||
| 370 | |||
| 371 | u32 nvhost_ctrl::FindFreeNvEvent(u32 syncpoint_id) { | ||
| 372 | u32 slot{MaxNvEvents}; | ||
| 373 | u32 free_slot{MaxNvEvents}; | ||
| 374 | for (u32 i = 0; i < MaxNvEvents; i++) { | ||
| 375 | auto& event = events[i]; | ||
| 376 | if (event.registered) { | ||
| 377 | if (!event.IsBeingUsed()) { | ||
| 378 | slot = i; | ||
| 379 | if (event.assigned_syncpt == syncpoint_id) { | ||
| 380 | return slot; | ||
| 381 | } | ||
| 382 | } | ||
| 383 | } else if (free_slot == MaxNvEvents) { | ||
| 384 | free_slot = i; | ||
| 385 | } | ||
| 386 | } | ||
| 387 | if (free_slot < MaxNvEvents) { | ||
| 388 | CreateNvEvent(free_slot); | ||
| 389 | return free_slot; | ||
| 390 | } | ||
| 391 | |||
| 392 | if (slot < MaxNvEvents) { | ||
| 393 | return slot; | ||
| 394 | } | ||
| 395 | |||
| 396 | LOG_CRITICAL(Service_NVDRV, "Failed to allocate an event"); | ||
| 397 | return 0; | ||
| 398 | } | ||
| 399 | |||
| 223 | } // namespace Service::Nvidia::Devices | 400 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index 4fbb89b15..0b56d7070 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h | |||
| @@ -1,20 +1,28 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors |
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | 4 | ||
| 4 | #pragma once | 5 | #pragma once |
| 5 | 6 | ||
| 6 | #include <array> | 7 | #include <array> |
| 7 | #include <vector> | 8 | #include <vector> |
| 9 | #include "common/bit_field.h" | ||
| 8 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 9 | #include "core/hle/service/nvdrv/devices/nvdevice.h" | 11 | #include "core/hle/service/nvdrv/devices/nvdevice.h" |
| 10 | #include "core/hle/service/nvdrv/nvdrv.h" | 12 | #include "core/hle/service/nvdrv/nvdrv.h" |
| 13 | #include "video_core/host1x/syncpoint_manager.h" | ||
| 14 | |||
| 15 | namespace Service::Nvidia::NvCore { | ||
| 16 | class Container; | ||
| 17 | class SyncpointManager; | ||
| 18 | } // namespace Service::Nvidia::NvCore | ||
| 11 | 19 | ||
| 12 | namespace Service::Nvidia::Devices { | 20 | namespace Service::Nvidia::Devices { |
| 13 | 21 | ||
| 14 | class nvhost_ctrl final : public nvdevice { | 22 | class nvhost_ctrl final : public nvdevice { |
| 15 | public: | 23 | public: |
| 16 | explicit nvhost_ctrl(Core::System& system_, EventInterface& events_interface_, | 24 | explicit nvhost_ctrl(Core::System& system_, EventInterface& events_interface_, |
| 17 | SyncpointManager& syncpoint_manager_); | 25 | NvCore::Container& core); |
| 18 | ~nvhost_ctrl() override; | 26 | ~nvhost_ctrl() override; |
| 19 | 27 | ||
| 20 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 28 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| @@ -27,7 +35,70 @@ public: | |||
| 27 | void OnOpen(DeviceFD fd) override; | 35 | void OnOpen(DeviceFD fd) override; |
| 28 | void OnClose(DeviceFD fd) override; | 36 | void OnClose(DeviceFD fd) override; |
| 29 | 37 | ||
| 38 | Kernel::KEvent* QueryEvent(u32 event_id) override; | ||
| 39 | |||
| 40 | union SyncpointEventValue { | ||
| 41 | u32 raw; | ||
| 42 | |||
| 43 | union { | ||
| 44 | BitField<0, 4, u32> partial_slot; | ||
| 45 | BitField<4, 28, u32> syncpoint_id; | ||
| 46 | }; | ||
| 47 | |||
| 48 | struct { | ||
| 49 | u16 slot; | ||
| 50 | union { | ||
| 51 | BitField<0, 12, u16> syncpoint_id_for_allocation; | ||
| 52 | BitField<12, 1, u16> event_allocated; | ||
| 53 | }; | ||
| 54 | }; | ||
| 55 | }; | ||
| 56 | static_assert(sizeof(SyncpointEventValue) == sizeof(u32)); | ||
| 57 | |||
| 30 | private: | 58 | private: |
| 59 | struct InternalEvent { | ||
| 60 | // Mask representing registered events | ||
| 61 | |||
| 62 | // Each kernel event associated to an NV event | ||
| 63 | Kernel::KEvent* kevent{}; | ||
| 64 | // The status of the current NVEvent | ||
| 65 | std::atomic<EventState> status{}; | ||
| 66 | |||
| 67 | // Tells the NVEvent that it has failed. | ||
| 68 | u32 fails{}; | ||
| 69 | // When an NVEvent is waiting on GPU interrupt, this is the sync_point | ||
| 70 | // associated with it. | ||
| 71 | u32 assigned_syncpt{}; | ||
| 72 | // This is the value of the GPU interrupt for which the NVEvent is waiting | ||
| 73 | // for. | ||
| 74 | u32 assigned_value{}; | ||
| 75 | |||
| 76 | // Tells if an NVEvent is registered or not | ||
| 77 | bool registered{}; | ||
| 78 | |||
| 79 | // Used for waiting on a syncpoint & canceling it. | ||
| 80 | Tegra::Host1x::SyncpointManager::ActionHandle wait_handle{}; | ||
| 81 | |||
| 82 | bool IsBeingUsed() const { | ||
| 83 | const auto current_status = status.load(std::memory_order_acquire); | ||
| 84 | return current_status == EventState::Waiting || | ||
| 85 | current_status == EventState::Cancelling || | ||
| 86 | current_status == EventState::Signalling; | ||
| 87 | } | ||
| 88 | }; | ||
| 89 | |||
| 90 | std::unique_lock<std::mutex> NvEventsLock(); | ||
| 91 | |||
| 92 | void CreateNvEvent(u32 event_id); | ||
| 93 | |||
| 94 | void FreeNvEvent(u32 event_id); | ||
| 95 | |||
| 96 | u32 FindFreeNvEvent(u32 syncpoint_id); | ||
| 97 | |||
| 98 | std::array<InternalEvent, MaxNvEvents> events{}; | ||
| 99 | std::mutex events_mutex; | ||
| 100 | u64 events_mask{}; | ||
| 101 | |||
| 31 | struct IocSyncptReadParams { | 102 | struct IocSyncptReadParams { |
| 32 | u32_le id{}; | 103 | u32_le id{}; |
| 33 | u32_le value{}; | 104 | u32_le value{}; |
| @@ -83,27 +154,18 @@ private: | |||
| 83 | }; | 154 | }; |
| 84 | static_assert(sizeof(IocGetConfigParams) == 387, "IocGetConfigParams is incorrect size"); | 155 | static_assert(sizeof(IocGetConfigParams) == 387, "IocGetConfigParams is incorrect size"); |
| 85 | 156 | ||
| 86 | struct IocCtrlEventSignalParams { | 157 | struct IocCtrlEventClearParams { |
| 87 | u32_le event_id{}; | 158 | SyncpointEventValue event_id{}; |
| 88 | }; | 159 | }; |
| 89 | static_assert(sizeof(IocCtrlEventSignalParams) == 4, | 160 | static_assert(sizeof(IocCtrlEventClearParams) == 4, |
| 90 | "IocCtrlEventSignalParams is incorrect size"); | 161 | "IocCtrlEventClearParams is incorrect size"); |
| 91 | 162 | ||
| 92 | struct IocCtrlEventWaitParams { | 163 | struct IocCtrlEventWaitParams { |
| 93 | u32_le syncpt_id{}; | 164 | NvFence fence{}; |
| 94 | u32_le threshold{}; | ||
| 95 | s32_le timeout{}; | ||
| 96 | u32_le value{}; | ||
| 97 | }; | ||
| 98 | static_assert(sizeof(IocCtrlEventWaitParams) == 16, "IocCtrlEventWaitParams is incorrect size"); | ||
| 99 | |||
| 100 | struct IocCtrlEventWaitAsyncParams { | ||
| 101 | u32_le syncpt_id{}; | ||
| 102 | u32_le threshold{}; | ||
| 103 | u32_le timeout{}; | 165 | u32_le timeout{}; |
| 104 | u32_le value{}; | 166 | SyncpointEventValue value{}; |
| 105 | }; | 167 | }; |
| 106 | static_assert(sizeof(IocCtrlEventWaitAsyncParams) == 16, | 168 | static_assert(sizeof(IocCtrlEventWaitParams) == 16, |
| 107 | "IocCtrlEventWaitAsyncParams is incorrect size"); | 169 | "IocCtrlEventWaitAsyncParams is incorrect size"); |
| 108 | 170 | ||
| 109 | struct IocCtrlEventRegisterParams { | 171 | struct IocCtrlEventRegisterParams { |
| @@ -118,19 +180,25 @@ private: | |||
| 118 | static_assert(sizeof(IocCtrlEventUnregisterParams) == 4, | 180 | static_assert(sizeof(IocCtrlEventUnregisterParams) == 4, |
| 119 | "IocCtrlEventUnregisterParams is incorrect size"); | 181 | "IocCtrlEventUnregisterParams is incorrect size"); |
| 120 | 182 | ||
| 121 | struct IocCtrlEventKill { | 183 | struct IocCtrlEventUnregisterBatchParams { |
| 122 | u64_le user_events{}; | 184 | u64_le user_events{}; |
| 123 | }; | 185 | }; |
| 124 | static_assert(sizeof(IocCtrlEventKill) == 8, "IocCtrlEventKill is incorrect size"); | 186 | static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8, |
| 187 | "IocCtrlEventKill is incorrect size"); | ||
| 125 | 188 | ||
| 126 | NvResult NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output); | 189 | NvResult NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output); |
| 127 | NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, bool is_async); | 190 | NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, |
| 191 | bool is_allocation); | ||
| 128 | NvResult IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output); | 192 | NvResult IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output); |
| 129 | NvResult IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output); | 193 | NvResult IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output); |
| 194 | NvResult IocCtrlEventUnregisterBatch(const std::vector<u8>& input, std::vector<u8>& output); | ||
| 130 | NvResult IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output); | 195 | NvResult IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output); |
| 131 | 196 | ||
| 197 | NvResult FreeEvent(u32 slot); | ||
| 198 | |||
| 132 | EventInterface& events_interface; | 199 | EventInterface& events_interface; |
| 133 | SyncpointManager& syncpoint_manager; | 200 | NvCore::Container& core; |
| 201 | NvCore::SyncpointManager& syncpoint_manager; | ||
| 134 | }; | 202 | }; |
| 135 | 203 | ||
| 136 | } // namespace Service::Nvidia::Devices | 204 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp index 2b3b7efea..ced57dfe6 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp | |||
| @@ -7,11 +7,19 @@ | |||
| 7 | #include "core/core.h" | 7 | #include "core/core.h" |
| 8 | #include "core/core_timing.h" | 8 | #include "core/core_timing.h" |
| 9 | #include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h" | 9 | #include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h" |
| 10 | #include "core/hle/service/nvdrv/nvdrv.h" | ||
| 10 | 11 | ||
| 11 | namespace Service::Nvidia::Devices { | 12 | namespace Service::Nvidia::Devices { |
| 12 | 13 | ||
| 13 | nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_) : nvdevice{system_} {} | 14 | nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_) |
| 14 | nvhost_ctrl_gpu::~nvhost_ctrl_gpu() = default; | 15 | : nvdevice{system_}, events_interface{events_interface_} { |
| 16 | error_notifier_event = events_interface.CreateEvent("CtrlGpuErrorNotifier"); | ||
| 17 | unknown_event = events_interface.CreateEvent("CtrlGpuUknownEvent"); | ||
| 18 | } | ||
| 19 | nvhost_ctrl_gpu::~nvhost_ctrl_gpu() { | ||
| 20 | events_interface.FreeEvent(error_notifier_event); | ||
| 21 | events_interface.FreeEvent(unknown_event); | ||
| 22 | } | ||
| 15 | 23 | ||
| 16 | NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 24 | NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| 17 | std::vector<u8>& output) { | 25 | std::vector<u8>& output) { |
| @@ -286,4 +294,17 @@ NvResult nvhost_ctrl_gpu::GetGpuTime(const std::vector<u8>& input, std::vector<u | |||
| 286 | return NvResult::Success; | 294 | return NvResult::Success; |
| 287 | } | 295 | } |
| 288 | 296 | ||
| 297 | Kernel::KEvent* nvhost_ctrl_gpu::QueryEvent(u32 event_id) { | ||
| 298 | switch (event_id) { | ||
| 299 | case 1: | ||
| 300 | return error_notifier_event; | ||
| 301 | case 2: | ||
| 302 | return unknown_event; | ||
| 303 | default: { | ||
| 304 | LOG_CRITICAL(Service_NVDRV, "Unknown Ctrl GPU Event {}", event_id); | ||
| 305 | } | ||
| 306 | } | ||
| 307 | return nullptr; | ||
| 308 | } | ||
| 309 | |||
| 289 | } // namespace Service::Nvidia::Devices | 310 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h index 97e9a90cb..1e8f254e2 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h | |||
| @@ -10,11 +10,15 @@ | |||
| 10 | #include "common/swap.h" | 10 | #include "common/swap.h" |
| 11 | #include "core/hle/service/nvdrv/devices/nvdevice.h" | 11 | #include "core/hle/service/nvdrv/devices/nvdevice.h" |
| 12 | 12 | ||
| 13 | namespace Service::Nvidia { | ||
| 14 | class EventInterface; | ||
| 15 | } | ||
| 16 | |||
| 13 | namespace Service::Nvidia::Devices { | 17 | namespace Service::Nvidia::Devices { |
| 14 | 18 | ||
| 15 | class nvhost_ctrl_gpu final : public nvdevice { | 19 | class nvhost_ctrl_gpu final : public nvdevice { |
| 16 | public: | 20 | public: |
| 17 | explicit nvhost_ctrl_gpu(Core::System& system_); | 21 | explicit nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_); |
| 18 | ~nvhost_ctrl_gpu() override; | 22 | ~nvhost_ctrl_gpu() override; |
| 19 | 23 | ||
| 20 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 24 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| @@ -27,6 +31,8 @@ public: | |||
| 27 | void OnOpen(DeviceFD fd) override; | 31 | void OnOpen(DeviceFD fd) override; |
| 28 | void OnClose(DeviceFD fd) override; | 32 | void OnClose(DeviceFD fd) override; |
| 29 | 33 | ||
| 34 | Kernel::KEvent* QueryEvent(u32 event_id) override; | ||
| 35 | |||
| 30 | private: | 36 | private: |
| 31 | struct IoctlGpuCharacteristics { | 37 | struct IoctlGpuCharacteristics { |
| 32 | u32_le arch; // 0x120 (NVGPU_GPU_ARCH_GM200) | 38 | u32_le arch; // 0x120 (NVGPU_GPU_ARCH_GM200) |
| @@ -160,6 +166,12 @@ private: | |||
| 160 | NvResult ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output); | 166 | NvResult ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output); |
| 161 | NvResult FlushL2(const std::vector<u8>& input, std::vector<u8>& output); | 167 | NvResult FlushL2(const std::vector<u8>& input, std::vector<u8>& output); |
| 162 | NvResult GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output); | 168 | NvResult GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output); |
| 169 | |||
| 170 | EventInterface& events_interface; | ||
| 171 | |||
| 172 | // Events | ||
| 173 | Kernel::KEvent* error_notifier_event; | ||
| 174 | Kernel::KEvent* unknown_event; | ||
| 163 | }; | 175 | }; |
| 164 | 176 | ||
| 165 | } // namespace Service::Nvidia::Devices | 177 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index b98e63011..45a759fa8 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp | |||
| @@ -5,29 +5,46 @@ | |||
| 5 | #include "common/assert.h" | 5 | #include "common/assert.h" |
| 6 | #include "common/logging/log.h" | 6 | #include "common/logging/log.h" |
| 7 | #include "core/core.h" | 7 | #include "core/core.h" |
| 8 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 9 | #include "core/hle/service/nvdrv/core/nvmap.h" | ||
| 10 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" | ||
| 8 | #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" | 11 | #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" |
| 9 | #include "core/hle/service/nvdrv/syncpoint_manager.h" | 12 | #include "core/hle/service/nvdrv/nvdrv.h" |
| 10 | #include "core/memory.h" | 13 | #include "core/memory.h" |
| 14 | #include "video_core/control/channel_state.h" | ||
| 15 | #include "video_core/engines/puller.h" | ||
| 11 | #include "video_core/gpu.h" | 16 | #include "video_core/gpu.h" |
| 17 | #include "video_core/host1x/host1x.h" | ||
| 12 | 18 | ||
| 13 | namespace Service::Nvidia::Devices { | 19 | namespace Service::Nvidia::Devices { |
| 14 | namespace { | 20 | namespace { |
| 15 | Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoint_id) { | 21 | Tegra::CommandHeader BuildFenceAction(Tegra::Engines::Puller::FenceOperation op, u32 syncpoint_id) { |
| 16 | Tegra::GPU::FenceAction result{}; | 22 | Tegra::Engines::Puller::FenceAction result{}; |
| 17 | result.op.Assign(op); | 23 | result.op.Assign(op); |
| 18 | result.syncpoint_id.Assign(syncpoint_id); | 24 | result.syncpoint_id.Assign(syncpoint_id); |
| 19 | return {result.raw}; | 25 | return {result.raw}; |
| 20 | } | 26 | } |
| 21 | } // namespace | 27 | } // namespace |
| 22 | 28 | ||
| 23 | nvhost_gpu::nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, | 29 | nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_, |
| 24 | SyncpointManager& syncpoint_manager_) | 30 | NvCore::Container& core_) |
| 25 | : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} { | 31 | : nvdevice{system_}, events_interface{events_interface_}, core{core_}, |
| 26 | channel_fence.id = syncpoint_manager_.AllocateSyncpoint(); | 32 | syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()}, |
| 27 | channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id); | 33 | channel_state{system.GPU().AllocateChannel()} { |
| 34 | channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false); | ||
| 35 | sm_exception_breakpoint_int_report_event = | ||
| 36 | events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt"); | ||
| 37 | sm_exception_breakpoint_pause_report_event = | ||
| 38 | events_interface.CreateEvent("GpuChannelSMExceptionBreakpointPause"); | ||
| 39 | error_notifier_event = events_interface.CreateEvent("GpuChannelErrorNotifier"); | ||
| 28 | } | 40 | } |
| 29 | 41 | ||
| 30 | nvhost_gpu::~nvhost_gpu() = default; | 42 | nvhost_gpu::~nvhost_gpu() { |
| 43 | events_interface.FreeEvent(sm_exception_breakpoint_int_report_event); | ||
| 44 | events_interface.FreeEvent(sm_exception_breakpoint_pause_report_event); | ||
| 45 | events_interface.FreeEvent(error_notifier_event); | ||
| 46 | syncpoint_manager.FreeSyncpoint(channel_syncpoint); | ||
| 47 | } | ||
| 31 | 48 | ||
| 32 | NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 49 | NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| 33 | std::vector<u8>& output) { | 50 | std::vector<u8>& output) { |
| @@ -167,9 +184,14 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8 | |||
| 167 | params.num_entries, params.flags, params.unk0, params.unk1, params.unk2, | 184 | params.num_entries, params.flags, params.unk0, params.unk1, params.unk2, |
| 168 | params.unk3); | 185 | params.unk3); |
| 169 | 186 | ||
| 170 | channel_fence.value = system.GPU().GetSyncpointValue(channel_fence.id); | 187 | if (channel_state->initialized) { |
| 188 | LOG_CRITICAL(Service_NVDRV, "Already allocated!"); | ||
| 189 | return NvResult::AlreadyAllocated; | ||
| 190 | } | ||
| 191 | |||
| 192 | system.GPU().InitChannel(*channel_state); | ||
| 171 | 193 | ||
| 172 | params.fence_out = channel_fence; | 194 | params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint); |
| 173 | 195 | ||
| 174 | std::memcpy(output.data(), ¶ms, output.size()); | 196 | std::memcpy(output.data(), ¶ms, output.size()); |
| 175 | return NvResult::Success; | 197 | return NvResult::Success; |
| @@ -188,39 +210,37 @@ NvResult nvhost_gpu::AllocateObjectContext(const std::vector<u8>& input, std::ve | |||
| 188 | 210 | ||
| 189 | static std::vector<Tegra::CommandHeader> BuildWaitCommandList(NvFence fence) { | 211 | static std::vector<Tegra::CommandHeader> BuildWaitCommandList(NvFence fence) { |
| 190 | return { | 212 | return { |
| 191 | Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1, | 213 | Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1, |
| 192 | Tegra::SubmissionMode::Increasing), | 214 | Tegra::SubmissionMode::Increasing), |
| 193 | {fence.value}, | 215 | {fence.value}, |
| 194 | Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1, | 216 | Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1, |
| 195 | Tegra::SubmissionMode::Increasing), | 217 | Tegra::SubmissionMode::Increasing), |
| 196 | BuildFenceAction(Tegra::GPU::FenceOperation::Acquire, fence.id), | 218 | BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Acquire, fence.id), |
| 197 | }; | 219 | }; |
| 198 | } | 220 | } |
| 199 | 221 | ||
| 200 | static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(NvFence fence, | 222 | static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(NvFence fence) { |
| 201 | u32 add_increment) { | ||
| 202 | std::vector<Tegra::CommandHeader> result{ | 223 | std::vector<Tegra::CommandHeader> result{ |
| 203 | Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1, | 224 | Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1, |
| 204 | Tegra::SubmissionMode::Increasing), | 225 | Tegra::SubmissionMode::Increasing), |
| 205 | {}}; | 226 | {}}; |
| 206 | 227 | ||
| 207 | for (u32 count = 0; count < add_increment; ++count) { | 228 | for (u32 count = 0; count < 2; ++count) { |
| 208 | result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1, | 229 | result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1, |
| 209 | Tegra::SubmissionMode::Increasing)); | 230 | Tegra::SubmissionMode::Increasing)); |
| 210 | result.emplace_back(BuildFenceAction(Tegra::GPU::FenceOperation::Increment, fence.id)); | 231 | result.emplace_back( |
| 232 | BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Increment, fence.id)); | ||
| 211 | } | 233 | } |
| 212 | 234 | ||
| 213 | return result; | 235 | return result; |
| 214 | } | 236 | } |
| 215 | 237 | ||
| 216 | static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(NvFence fence, | 238 | static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(NvFence fence) { |
| 217 | u32 add_increment) { | ||
| 218 | std::vector<Tegra::CommandHeader> result{ | 239 | std::vector<Tegra::CommandHeader> result{ |
| 219 | Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForInterrupt, 1, | 240 | Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForIdle, 1, |
| 220 | Tegra::SubmissionMode::Increasing), | 241 | Tegra::SubmissionMode::Increasing), |
| 221 | {}}; | 242 | {}}; |
| 222 | const std::vector<Tegra::CommandHeader> increment{ | 243 | const std::vector<Tegra::CommandHeader> increment{BuildIncrementCommandList(fence)}; |
| 223 | BuildIncrementCommandList(fence, add_increment)}; | ||
| 224 | 244 | ||
| 225 | result.insert(result.end(), increment.begin(), increment.end()); | 245 | result.insert(result.end(), increment.begin(), increment.end()); |
| 226 | 246 | ||
| @@ -234,33 +254,41 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector<u8> | |||
| 234 | 254 | ||
| 235 | auto& gpu = system.GPU(); | 255 | auto& gpu = system.GPU(); |
| 236 | 256 | ||
| 237 | params.fence_out.id = channel_fence.id; | 257 | std::scoped_lock lock(channel_mutex); |
| 238 | 258 | ||
| 239 | if (params.flags.add_wait.Value() && | 259 | const auto bind_id = channel_state->bind_id; |
| 240 | !syncpoint_manager.IsSyncpointExpired(params.fence_out.id, params.fence_out.value)) { | ||
| 241 | gpu.PushGPUEntries(Tegra::CommandList{BuildWaitCommandList(params.fence_out)}); | ||
| 242 | } | ||
| 243 | 260 | ||
| 244 | if (params.flags.add_increment.Value() || params.flags.increment.Value()) { | 261 | auto& flags = params.flags; |
| 245 | const u32 increment_value = params.flags.increment.Value() ? params.fence_out.value : 0; | 262 | |
| 246 | params.fence_out.value = syncpoint_manager.IncreaseSyncpoint( | 263 | if (flags.fence_wait.Value()) { |
| 247 | params.fence_out.id, params.AddIncrementValue() + increment_value); | 264 | if (flags.increment_value.Value()) { |
| 248 | } else { | 265 | return NvResult::BadParameter; |
| 249 | params.fence_out.value = syncpoint_manager.GetSyncpointMax(params.fence_out.id); | 266 | } |
| 267 | |||
| 268 | if (!syncpoint_manager.IsFenceSignalled(params.fence)) { | ||
| 269 | gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildWaitCommandList(params.fence)}); | ||
| 270 | } | ||
| 250 | } | 271 | } |
| 251 | 272 | ||
| 252 | gpu.PushGPUEntries(std::move(entries)); | 273 | params.fence.id = channel_syncpoint; |
| 274 | |||
| 275 | u32 increment{(flags.fence_increment.Value() != 0 ? 2 : 0) + | ||
| 276 | (flags.increment_value.Value() != 0 ? params.fence.value : 0)}; | ||
| 277 | params.fence.value = syncpoint_manager.IncrementSyncpointMaxExt(channel_syncpoint, increment); | ||
| 278 | gpu.PushGPUEntries(bind_id, std::move(entries)); | ||
| 253 | 279 | ||
| 254 | if (params.flags.add_increment.Value()) { | 280 | if (flags.fence_increment.Value()) { |
| 255 | if (params.flags.suppress_wfi) { | 281 | if (flags.suppress_wfi.Value()) { |
| 256 | gpu.PushGPUEntries(Tegra::CommandList{ | 282 | gpu.PushGPUEntries(bind_id, |
| 257 | BuildIncrementCommandList(params.fence_out, params.AddIncrementValue())}); | 283 | Tegra::CommandList{BuildIncrementCommandList(params.fence)}); |
| 258 | } else { | 284 | } else { |
| 259 | gpu.PushGPUEntries(Tegra::CommandList{ | 285 | gpu.PushGPUEntries(bind_id, |
| 260 | BuildIncrementWithWfiCommandList(params.fence_out, params.AddIncrementValue())}); | 286 | Tegra::CommandList{BuildIncrementWithWfiCommandList(params.fence)}); |
| 261 | } | 287 | } |
| 262 | } | 288 | } |
| 263 | 289 | ||
| 290 | flags.raw = 0; | ||
| 291 | |||
| 264 | std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmitGpfifo)); | 292 | std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmitGpfifo)); |
| 265 | return NvResult::Success; | 293 | return NvResult::Success; |
| 266 | } | 294 | } |
| @@ -328,4 +356,19 @@ NvResult nvhost_gpu::ChannelSetTimeslice(const std::vector<u8>& input, std::vect | |||
| 328 | return NvResult::Success; | 356 | return NvResult::Success; |
| 329 | } | 357 | } |
| 330 | 358 | ||
| 359 | Kernel::KEvent* nvhost_gpu::QueryEvent(u32 event_id) { | ||
| 360 | switch (event_id) { | ||
| 361 | case 1: | ||
| 362 | return sm_exception_breakpoint_int_report_event; | ||
| 363 | case 2: | ||
| 364 | return sm_exception_breakpoint_pause_report_event; | ||
| 365 | case 3: | ||
| 366 | return error_notifier_event; | ||
| 367 | default: { | ||
| 368 | LOG_CRITICAL(Service_NVDRV, "Unknown Ctrl GPU Event {}", event_id); | ||
| 369 | } | ||
| 370 | } | ||
| 371 | return nullptr; | ||
| 372 | } | ||
| 373 | |||
| 331 | } // namespace Service::Nvidia::Devices | 374 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h index 8a9f7775a..1e4ecd55b 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h | |||
| @@ -13,17 +13,31 @@ | |||
| 13 | #include "core/hle/service/nvdrv/nvdata.h" | 13 | #include "core/hle/service/nvdrv/nvdata.h" |
| 14 | #include "video_core/dma_pusher.h" | 14 | #include "video_core/dma_pusher.h" |
| 15 | 15 | ||
| 16 | namespace Tegra { | ||
| 17 | namespace Control { | ||
| 18 | struct ChannelState; | ||
| 19 | } | ||
| 20 | } // namespace Tegra | ||
| 21 | |||
| 16 | namespace Service::Nvidia { | 22 | namespace Service::Nvidia { |
| 23 | |||
| 24 | namespace NvCore { | ||
| 25 | class Container; | ||
| 26 | class NvMap; | ||
| 17 | class SyncpointManager; | 27 | class SyncpointManager; |
| 18 | } | 28 | } // namespace NvCore |
| 29 | |||
| 30 | class EventInterface; | ||
| 31 | } // namespace Service::Nvidia | ||
| 19 | 32 | ||
| 20 | namespace Service::Nvidia::Devices { | 33 | namespace Service::Nvidia::Devices { |
| 21 | 34 | ||
| 35 | class nvhost_as_gpu; | ||
| 22 | class nvmap; | 36 | class nvmap; |
| 23 | class nvhost_gpu final : public nvdevice { | 37 | class nvhost_gpu final : public nvdevice { |
| 24 | public: | 38 | public: |
| 25 | explicit nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, | 39 | explicit nvhost_gpu(Core::System& system_, EventInterface& events_interface_, |
| 26 | SyncpointManager& syncpoint_manager_); | 40 | NvCore::Container& core); |
| 27 | ~nvhost_gpu() override; | 41 | ~nvhost_gpu() override; |
| 28 | 42 | ||
| 29 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 43 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| @@ -36,7 +50,10 @@ public: | |||
| 36 | void OnOpen(DeviceFD fd) override; | 50 | void OnOpen(DeviceFD fd) override; |
| 37 | void OnClose(DeviceFD fd) override; | 51 | void OnClose(DeviceFD fd) override; |
| 38 | 52 | ||
| 53 | Kernel::KEvent* QueryEvent(u32 event_id) override; | ||
| 54 | |||
| 39 | private: | 55 | private: |
| 56 | friend class nvhost_as_gpu; | ||
| 40 | enum class CtxObjects : u32_le { | 57 | enum class CtxObjects : u32_le { |
| 41 | Ctx2D = 0x902D, | 58 | Ctx2D = 0x902D, |
| 42 | Ctx3D = 0xB197, | 59 | Ctx3D = 0xB197, |
| @@ -146,17 +163,13 @@ private: | |||
| 146 | u32_le num_entries{}; // number of fence objects being submitted | 163 | u32_le num_entries{}; // number of fence objects being submitted |
| 147 | union { | 164 | union { |
| 148 | u32_le raw; | 165 | u32_le raw; |
| 149 | BitField<0, 1, u32_le> add_wait; // append a wait sync_point to the list | 166 | BitField<0, 1, u32_le> fence_wait; // append a wait sync_point to the list |
| 150 | BitField<1, 1, u32_le> add_increment; // append an increment to the list | 167 | BitField<1, 1, u32_le> fence_increment; // append an increment to the list |
| 151 | BitField<2, 1, u32_le> new_hw_format; // mostly ignored | 168 | BitField<2, 1, u32_le> new_hw_format; // mostly ignored |
| 152 | BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt | 169 | BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt |
| 153 | BitField<8, 1, u32_le> increment; // increment the returned fence | 170 | BitField<8, 1, u32_le> increment_value; // increment the returned fence |
| 154 | } flags; | 171 | } flags; |
| 155 | NvFence fence_out{}; // returned new fence object for others to wait on | 172 | NvFence fence{}; // returned new fence object for others to wait on |
| 156 | |||
| 157 | u32 AddIncrementValue() const { | ||
| 158 | return flags.add_increment.Value() << 1; | ||
| 159 | } | ||
| 160 | }; | 173 | }; |
| 161 | static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(NvFence), | 174 | static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(NvFence), |
| 162 | "IoctlSubmitGpfifo is incorrect size"); | 175 | "IoctlSubmitGpfifo is incorrect size"); |
| @@ -191,9 +204,18 @@ private: | |||
| 191 | NvResult ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output); | 204 | NvResult ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output); |
| 192 | NvResult ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output); | 205 | NvResult ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output); |
| 193 | 206 | ||
| 194 | std::shared_ptr<nvmap> nvmap_dev; | 207 | EventInterface& events_interface; |
| 195 | SyncpointManager& syncpoint_manager; | 208 | NvCore::Container& core; |
| 196 | NvFence channel_fence; | 209 | NvCore::SyncpointManager& syncpoint_manager; |
| 210 | NvCore::NvMap& nvmap; | ||
| 211 | std::shared_ptr<Tegra::Control::ChannelState> channel_state; | ||
| 212 | u32 channel_syncpoint; | ||
| 213 | std::mutex channel_mutex; | ||
| 214 | |||
| 215 | // Events | ||
| 216 | Kernel::KEvent* sm_exception_breakpoint_int_report_event; | ||
| 217 | Kernel::KEvent* sm_exception_breakpoint_pause_report_event; | ||
| 218 | Kernel::KEvent* error_notifier_event; | ||
| 197 | }; | 219 | }; |
| 198 | 220 | ||
| 199 | } // namespace Service::Nvidia::Devices | 221 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index a7385fce8..1703f9cc3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp | |||
| @@ -5,14 +5,14 @@ | |||
| 5 | #include "common/assert.h" | 5 | #include "common/assert.h" |
| 6 | #include "common/logging/log.h" | 6 | #include "common/logging/log.h" |
| 7 | #include "core/core.h" | 7 | #include "core/core.h" |
| 8 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 8 | #include "core/hle/service/nvdrv/devices/nvhost_nvdec.h" | 9 | #include "core/hle/service/nvdrv/devices/nvhost_nvdec.h" |
| 9 | #include "video_core/renderer_base.h" | 10 | #include "video_core/renderer_base.h" |
| 10 | 11 | ||
| 11 | namespace Service::Nvidia::Devices { | 12 | namespace Service::Nvidia::Devices { |
| 12 | 13 | ||
| 13 | nvhost_nvdec::nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, | 14 | nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_) |
| 14 | SyncpointManager& syncpoint_manager_) | 15 | : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {} |
| 15 | : nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {} | ||
| 16 | nvhost_nvdec::~nvhost_nvdec() = default; | 16 | nvhost_nvdec::~nvhost_nvdec() = default; |
| 17 | 17 | ||
| 18 | NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 18 | NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| @@ -21,8 +21,9 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& | |||
| 21 | case 0x0: | 21 | case 0x0: |
| 22 | switch (command.cmd) { | 22 | switch (command.cmd) { |
| 23 | case 0x1: { | 23 | case 0x1: { |
| 24 | if (!fd_to_id.contains(fd)) { | 24 | auto& host1x_file = core.Host1xDeviceFile(); |
| 25 | fd_to_id[fd] = next_id++; | 25 | if (!host1x_file.fd_to_id.contains(fd)) { |
| 26 | host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++; | ||
| 26 | } | 27 | } |
| 27 | return Submit(fd, input, output); | 28 | return Submit(fd, input, output); |
| 28 | } | 29 | } |
| @@ -73,8 +74,9 @@ void nvhost_nvdec::OnOpen(DeviceFD fd) { | |||
| 73 | 74 | ||
| 74 | void nvhost_nvdec::OnClose(DeviceFD fd) { | 75 | void nvhost_nvdec::OnClose(DeviceFD fd) { |
| 75 | LOG_INFO(Service_NVDRV, "NVDEC video stream ended"); | 76 | LOG_INFO(Service_NVDRV, "NVDEC video stream ended"); |
| 76 | const auto iter = fd_to_id.find(fd); | 77 | auto& host1x_file = core.Host1xDeviceFile(); |
| 77 | if (iter != fd_to_id.end()) { | 78 | const auto iter = host1x_file.fd_to_id.find(fd); |
| 79 | if (iter != host1x_file.fd_to_id.end()) { | ||
| 78 | system.GPU().ClearCdmaInstance(iter->second); | 80 | system.GPU().ClearCdmaInstance(iter->second); |
| 79 | } | 81 | } |
| 80 | system.AudioCore().SetNVDECActive(false); | 82 | system.AudioCore().SetNVDECActive(false); |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h index 29b3e6a36..c1b4e53e8 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h | |||
| @@ -10,8 +10,7 @@ namespace Service::Nvidia::Devices { | |||
| 10 | 10 | ||
| 11 | class nvhost_nvdec final : public nvhost_nvdec_common { | 11 | class nvhost_nvdec final : public nvhost_nvdec_common { |
| 12 | public: | 12 | public: |
| 13 | explicit nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, | 13 | explicit nvhost_nvdec(Core::System& system_, NvCore::Container& core); |
| 14 | SyncpointManager& syncpoint_manager_); | ||
| 15 | ~nvhost_nvdec() override; | 14 | ~nvhost_nvdec() override; |
| 16 | 15 | ||
| 17 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 16 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| @@ -23,9 +22,6 @@ public: | |||
| 23 | 22 | ||
| 24 | void OnOpen(DeviceFD fd) override; | 23 | void OnOpen(DeviceFD fd) override; |
| 25 | void OnClose(DeviceFD fd) override; | 24 | void OnClose(DeviceFD fd) override; |
| 26 | |||
| 27 | private: | ||
| 28 | u32 next_id{}; | ||
| 29 | }; | 25 | }; |
| 30 | 26 | ||
| 31 | } // namespace Service::Nvidia::Devices | 27 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 8b2cd9bf1..99eede702 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp | |||
| @@ -8,10 +8,12 @@ | |||
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "common/logging/log.h" | 9 | #include "common/logging/log.h" |
| 10 | #include "core/core.h" | 10 | #include "core/core.h" |
| 11 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 12 | #include "core/hle/service/nvdrv/core/nvmap.h" | ||
| 13 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" | ||
| 11 | #include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h" | 14 | #include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h" |
| 12 | #include "core/hle/service/nvdrv/devices/nvmap.h" | ||
| 13 | #include "core/hle/service/nvdrv/syncpoint_manager.h" | ||
| 14 | #include "core/memory.h" | 15 | #include "core/memory.h" |
| 16 | #include "video_core/host1x/host1x.h" | ||
| 15 | #include "video_core/memory_manager.h" | 17 | #include "video_core/memory_manager.h" |
| 16 | #include "video_core/renderer_base.h" | 18 | #include "video_core/renderer_base.h" |
| 17 | 19 | ||
| @@ -44,10 +46,22 @@ std::size_t WriteVectors(std::vector<u8>& dst, const std::vector<T>& src, std::s | |||
| 44 | } | 46 | } |
| 45 | } // Anonymous namespace | 47 | } // Anonymous namespace |
| 46 | 48 | ||
| 47 | nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, | 49 | nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_, |
| 48 | SyncpointManager& syncpoint_manager_) | 50 | NvCore::ChannelType channel_type_) |
| 49 | : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} {} | 51 | : nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()}, |
| 50 | nvhost_nvdec_common::~nvhost_nvdec_common() = default; | 52 | nvmap{core.GetNvMapFile()}, channel_type{channel_type_} { |
| 53 | auto& syncpts_accumulated = core.Host1xDeviceFile().syncpts_accumulated; | ||
| 54 | if (syncpts_accumulated.empty()) { | ||
| 55 | channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false); | ||
| 56 | } else { | ||
| 57 | channel_syncpoint = syncpts_accumulated.front(); | ||
| 58 | syncpts_accumulated.pop_front(); | ||
| 59 | } | ||
| 60 | } | ||
| 61 | |||
| 62 | nvhost_nvdec_common::~nvhost_nvdec_common() { | ||
| 63 | core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint); | ||
| 64 | } | ||
| 51 | 65 | ||
| 52 | NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) { | 66 | NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) { |
| 53 | IoctlSetNvmapFD params{}; | 67 | IoctlSetNvmapFD params{}; |
| @@ -84,16 +98,16 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector<u8>& input, | |||
| 84 | for (std::size_t i = 0; i < syncpt_increments.size(); i++) { | 98 | for (std::size_t i = 0; i < syncpt_increments.size(); i++) { |
| 85 | const SyncptIncr& syncpt_incr = syncpt_increments[i]; | 99 | const SyncptIncr& syncpt_incr = syncpt_increments[i]; |
| 86 | fence_thresholds[i] = | 100 | fence_thresholds[i] = |
| 87 | syncpoint_manager.IncreaseSyncpoint(syncpt_incr.id, syncpt_incr.increments); | 101 | syncpoint_manager.IncrementSyncpointMaxExt(syncpt_incr.id, syncpt_incr.increments); |
| 88 | } | 102 | } |
| 89 | } | 103 | } |
| 90 | for (const auto& cmd_buffer : command_buffers) { | 104 | for (const auto& cmd_buffer : command_buffers) { |
| 91 | const auto object = nvmap_dev->GetObject(cmd_buffer.memory_id); | 105 | const auto object = nvmap.GetHandle(cmd_buffer.memory_id); |
| 92 | ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;); | 106 | ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;); |
| 93 | Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count); | 107 | Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count); |
| 94 | system.Memory().ReadBlock(object->addr + cmd_buffer.offset, cmdlist.data(), | 108 | system.Memory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(), |
| 95 | cmdlist.size() * sizeof(u32)); | 109 | cmdlist.size() * sizeof(u32)); |
| 96 | gpu.PushCommandBuffer(fd_to_id[fd], cmdlist); | 110 | gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist); |
| 97 | } | 111 | } |
| 98 | std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit)); | 112 | std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit)); |
| 99 | // Some games expect command_buffers to be written back | 113 | // Some games expect command_buffers to be written back |
| @@ -112,10 +126,8 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::ve | |||
| 112 | std::memcpy(¶ms, input.data(), sizeof(IoctlGetSyncpoint)); | 126 | std::memcpy(¶ms, input.data(), sizeof(IoctlGetSyncpoint)); |
| 113 | LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param); | 127 | LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param); |
| 114 | 128 | ||
| 115 | if (device_syncpoints[params.param] == 0 && system.GPU().UseNvdec()) { | 129 | // const u32 id{NvCore::SyncpointManager::channel_syncpoints[static_cast<u32>(channel_type)]}; |
| 116 | device_syncpoints[params.param] = syncpoint_manager.AllocateSyncpoint(); | 130 | params.value = channel_syncpoint; |
| 117 | } | ||
| 118 | params.value = device_syncpoints[params.param]; | ||
| 119 | std::memcpy(output.data(), ¶ms, sizeof(IoctlGetSyncpoint)); | 131 | std::memcpy(output.data(), ¶ms, sizeof(IoctlGetSyncpoint)); |
| 120 | 132 | ||
| 121 | return NvResult::Success; | 133 | return NvResult::Success; |
| @@ -123,6 +135,7 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::ve | |||
| 123 | 135 | ||
| 124 | NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) { | 136 | NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) { |
| 125 | IoctlGetWaitbase params{}; | 137 | IoctlGetWaitbase params{}; |
| 138 | LOG_CRITICAL(Service_NVDRV, "called WAITBASE"); | ||
| 126 | std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase)); | 139 | std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase)); |
| 127 | params.value = 0; // Seems to be hard coded at 0 | 140 | params.value = 0; // Seems to be hard coded at 0 |
| 128 | std::memcpy(output.data(), ¶ms, sizeof(IoctlGetWaitbase)); | 141 | std::memcpy(output.data(), ¶ms, sizeof(IoctlGetWaitbase)); |
| @@ -136,28 +149,8 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto | |||
| 136 | 149 | ||
| 137 | SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); | 150 | SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); |
| 138 | 151 | ||
| 139 | auto& gpu = system.GPU(); | ||
| 140 | |||
| 141 | for (auto& cmd_buffer : cmd_buffer_handles) { | 152 | for (auto& cmd_buffer : cmd_buffer_handles) { |
| 142 | auto object{nvmap_dev->GetObject(cmd_buffer.map_handle)}; | 153 | cmd_buffer.map_address = nvmap.PinHandle(cmd_buffer.map_handle); |
| 143 | if (!object) { | ||
| 144 | LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmd_buffer.map_handle); | ||
| 145 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 146 | return NvResult::InvalidState; | ||
| 147 | } | ||
| 148 | if (object->dma_map_addr == 0) { | ||
| 149 | // NVDEC and VIC memory is in the 32-bit address space | ||
| 150 | // MapAllocate32 will attempt to map a lower 32-bit value in the shared gpu memory space | ||
| 151 | const GPUVAddr low_addr = gpu.MemoryManager().MapAllocate32(object->addr, object->size); | ||
| 152 | object->dma_map_addr = static_cast<u32>(low_addr); | ||
| 153 | // Ensure that the dma_map_addr is indeed in the lower 32-bit address space. | ||
| 154 | ASSERT(object->dma_map_addr == low_addr); | ||
| 155 | } | ||
| 156 | if (!object->dma_map_addr) { | ||
| 157 | LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size); | ||
| 158 | } else { | ||
| 159 | cmd_buffer.map_address = object->dma_map_addr; | ||
| 160 | } | ||
| 161 | } | 154 | } |
| 162 | std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer)); | 155 | std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer)); |
| 163 | std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(), | 156 | std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(), |
| @@ -167,11 +160,16 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto | |||
| 167 | } | 160 | } |
| 168 | 161 | ||
| 169 | NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) { | 162 | NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) { |
| 170 | // This is intntionally stubbed. | 163 | IoctlMapBuffer params{}; |
| 171 | // Skip unmapping buffers here, as to not break the continuity of the VP9 reference frame | 164 | std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer)); |
| 172 | // addresses, and risk invalidating data before the async GPU thread is done with it | 165 | std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries); |
| 166 | |||
| 167 | SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); | ||
| 168 | for (auto& cmd_buffer : cmd_buffer_handles) { | ||
| 169 | nvmap.UnpinHandle(cmd_buffer.map_handle); | ||
| 170 | } | ||
| 171 | |||
| 173 | std::memset(output.data(), 0, output.size()); | 172 | std::memset(output.data(), 0, output.size()); |
| 174 | LOG_DEBUG(Service_NVDRV, "(STUBBED) called"); | ||
| 175 | return NvResult::Success; | 173 | return NvResult::Success; |
| 176 | } | 174 | } |
| 177 | 175 | ||
| @@ -182,4 +180,9 @@ NvResult nvhost_nvdec_common::SetSubmitTimeout(const std::vector<u8>& input, | |||
| 182 | return NvResult::Success; | 180 | return NvResult::Success; |
| 183 | } | 181 | } |
| 184 | 182 | ||
| 183 | Kernel::KEvent* nvhost_nvdec_common::QueryEvent(u32 event_id) { | ||
| 184 | LOG_CRITICAL(Service_NVDRV, "Unknown HOSTX1 Event {}", event_id); | ||
| 185 | return nullptr; | ||
| 186 | } | ||
| 187 | |||
| 185 | } // namespace Service::Nvidia::Devices | 188 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index 12d39946d..fe76100c8 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h | |||
| @@ -3,21 +3,26 @@ | |||
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <deque> | ||
| 6 | #include <vector> | 7 | #include <vector> |
| 7 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 8 | #include "common/swap.h" | 9 | #include "common/swap.h" |
| 10 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" | ||
| 9 | #include "core/hle/service/nvdrv/devices/nvdevice.h" | 11 | #include "core/hle/service/nvdrv/devices/nvdevice.h" |
| 10 | 12 | ||
| 11 | namespace Service::Nvidia { | 13 | namespace Service::Nvidia { |
| 12 | class SyncpointManager; | 14 | |
| 15 | namespace NvCore { | ||
| 16 | class Container; | ||
| 17 | class NvMap; | ||
| 18 | } // namespace NvCore | ||
| 13 | 19 | ||
| 14 | namespace Devices { | 20 | namespace Devices { |
| 15 | class nvmap; | ||
| 16 | 21 | ||
| 17 | class nvhost_nvdec_common : public nvdevice { | 22 | class nvhost_nvdec_common : public nvdevice { |
| 18 | public: | 23 | public: |
| 19 | explicit nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, | 24 | explicit nvhost_nvdec_common(Core::System& system_, NvCore::Container& core, |
| 20 | SyncpointManager& syncpoint_manager_); | 25 | NvCore::ChannelType channel_type); |
| 21 | ~nvhost_nvdec_common() override; | 26 | ~nvhost_nvdec_common() override; |
| 22 | 27 | ||
| 23 | protected: | 28 | protected: |
| @@ -110,11 +115,15 @@ protected: | |||
| 110 | NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output); | 115 | NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output); |
| 111 | NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output); | 116 | NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output); |
| 112 | 117 | ||
| 113 | std::unordered_map<DeviceFD, u32> fd_to_id{}; | 118 | Kernel::KEvent* QueryEvent(u32 event_id) override; |
| 119 | |||
| 120 | u32 channel_syncpoint; | ||
| 114 | s32_le nvmap_fd{}; | 121 | s32_le nvmap_fd{}; |
| 115 | u32_le submit_timeout{}; | 122 | u32_le submit_timeout{}; |
| 116 | std::shared_ptr<nvmap> nvmap_dev; | 123 | NvCore::Container& core; |
| 117 | SyncpointManager& syncpoint_manager; | 124 | NvCore::SyncpointManager& syncpoint_manager; |
| 125 | NvCore::NvMap& nvmap; | ||
| 126 | NvCore::ChannelType channel_type; | ||
| 118 | std::array<u32, MaxSyncPoints> device_syncpoints{}; | 127 | std::array<u32, MaxSyncPoints> device_syncpoints{}; |
| 119 | }; | 128 | }; |
| 120 | }; // namespace Devices | 129 | }; // namespace Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index f58e8bada..73f97136e 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp | |||
| @@ -4,13 +4,14 @@ | |||
| 4 | #include "common/assert.h" | 4 | #include "common/assert.h" |
| 5 | #include "common/logging/log.h" | 5 | #include "common/logging/log.h" |
| 6 | #include "core/core.h" | 6 | #include "core/core.h" |
| 7 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 7 | #include "core/hle/service/nvdrv/devices/nvhost_vic.h" | 8 | #include "core/hle/service/nvdrv/devices/nvhost_vic.h" |
| 8 | #include "video_core/renderer_base.h" | 9 | #include "video_core/renderer_base.h" |
| 9 | 10 | ||
| 10 | namespace Service::Nvidia::Devices { | 11 | namespace Service::Nvidia::Devices { |
| 11 | nvhost_vic::nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, | 12 | |
| 12 | SyncpointManager& syncpoint_manager_) | 13 | nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_) |
| 13 | : nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {} | 14 | : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {} |
| 14 | 15 | ||
| 15 | nvhost_vic::~nvhost_vic() = default; | 16 | nvhost_vic::~nvhost_vic() = default; |
| 16 | 17 | ||
| @@ -19,11 +20,13 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& i | |||
| 19 | switch (command.group) { | 20 | switch (command.group) { |
| 20 | case 0x0: | 21 | case 0x0: |
| 21 | switch (command.cmd) { | 22 | switch (command.cmd) { |
| 22 | case 0x1: | 23 | case 0x1: { |
| 23 | if (!fd_to_id.contains(fd)) { | 24 | auto& host1x_file = core.Host1xDeviceFile(); |
| 24 | fd_to_id[fd] = next_id++; | 25 | if (!host1x_file.fd_to_id.contains(fd)) { |
| 26 | host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++; | ||
| 25 | } | 27 | } |
| 26 | return Submit(fd, input, output); | 28 | return Submit(fd, input, output); |
| 29 | } | ||
| 27 | case 0x2: | 30 | case 0x2: |
| 28 | return GetSyncpoint(input, output); | 31 | return GetSyncpoint(input, output); |
| 29 | case 0x3: | 32 | case 0x3: |
| @@ -67,8 +70,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& i | |||
| 67 | void nvhost_vic::OnOpen(DeviceFD fd) {} | 70 | void nvhost_vic::OnOpen(DeviceFD fd) {} |
| 68 | 71 | ||
| 69 | void nvhost_vic::OnClose(DeviceFD fd) { | 72 | void nvhost_vic::OnClose(DeviceFD fd) { |
| 70 | const auto iter = fd_to_id.find(fd); | 73 | auto& host1x_file = core.Host1xDeviceFile(); |
| 71 | if (iter != fd_to_id.end()) { | 74 | const auto iter = host1x_file.fd_to_id.find(fd); |
| 75 | if (iter != host1x_file.fd_to_id.end()) { | ||
| 72 | system.GPU().ClearCdmaInstance(iter->second); | 76 | system.GPU().ClearCdmaInstance(iter->second); |
| 73 | } | 77 | } |
| 74 | } | 78 | } |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h index b41b195ae..f164caafb 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h | |||
| @@ -9,8 +9,7 @@ namespace Service::Nvidia::Devices { | |||
| 9 | 9 | ||
| 10 | class nvhost_vic final : public nvhost_nvdec_common { | 10 | class nvhost_vic final : public nvhost_nvdec_common { |
| 11 | public: | 11 | public: |
| 12 | explicit nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, | 12 | explicit nvhost_vic(Core::System& system_, NvCore::Container& core); |
| 13 | SyncpointManager& syncpoint_manager_); | ||
| 14 | ~nvhost_vic(); | 13 | ~nvhost_vic(); |
| 15 | 14 | ||
| 16 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 15 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| @@ -22,8 +21,5 @@ public: | |||
| 22 | 21 | ||
| 23 | void OnOpen(DeviceFD fd) override; | 22 | void OnOpen(DeviceFD fd) override; |
| 24 | void OnClose(DeviceFD fd) override; | 23 | void OnClose(DeviceFD fd) override; |
| 25 | |||
| 26 | private: | ||
| 27 | u32 next_id{}; | ||
| 28 | }; | 24 | }; |
| 29 | } // namespace Service::Nvidia::Devices | 25 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index d8518149d..ddf273b5e 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp | |||
| @@ -2,19 +2,26 @@ | |||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include <algorithm> | 4 | #include <algorithm> |
| 5 | #include <bit> | ||
| 5 | #include <cstring> | 6 | #include <cstring> |
| 6 | 7 | ||
| 8 | #include "common/alignment.h" | ||
| 7 | #include "common/assert.h" | 9 | #include "common/assert.h" |
| 8 | #include "common/logging/log.h" | 10 | #include "common/logging/log.h" |
| 11 | #include "core/core.h" | ||
| 12 | #include "core/hle/kernel/k_page_table.h" | ||
| 13 | #include "core/hle/kernel/k_process.h" | ||
| 14 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 15 | #include "core/hle/service/nvdrv/core/nvmap.h" | ||
| 9 | #include "core/hle/service/nvdrv/devices/nvmap.h" | 16 | #include "core/hle/service/nvdrv/devices/nvmap.h" |
| 17 | #include "core/memory.h" | ||
| 18 | |||
| 19 | using Core::Memory::YUZU_PAGESIZE; | ||
| 10 | 20 | ||
| 11 | namespace Service::Nvidia::Devices { | 21 | namespace Service::Nvidia::Devices { |
| 12 | 22 | ||
| 13 | nvmap::nvmap(Core::System& system_) : nvdevice{system_} { | 23 | nvmap::nvmap(Core::System& system_, NvCore::Container& container_) |
| 14 | // Handle 0 appears to be used when remapping, so we create a placeholder empty nvmap object to | 24 | : nvdevice{system_}, container{container_}, file{container.GetNvMapFile()} {} |
| 15 | // represent this. | ||
| 16 | CreateObject(0); | ||
| 17 | } | ||
| 18 | 25 | ||
| 19 | nvmap::~nvmap() = default; | 26 | nvmap::~nvmap() = default; |
| 20 | 27 | ||
| @@ -62,39 +69,21 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | |||
| 62 | void nvmap::OnOpen(DeviceFD fd) {} | 69 | void nvmap::OnOpen(DeviceFD fd) {} |
| 63 | void nvmap::OnClose(DeviceFD fd) {} | 70 | void nvmap::OnClose(DeviceFD fd) {} |
| 64 | 71 | ||
| 65 | VAddr nvmap::GetObjectAddress(u32 handle) const { | ||
| 66 | auto object = GetObject(handle); | ||
| 67 | ASSERT(object); | ||
| 68 | ASSERT(object->status == Object::Status::Allocated); | ||
| 69 | return object->addr; | ||
| 70 | } | ||
| 71 | |||
| 72 | u32 nvmap::CreateObject(u32 size) { | ||
| 73 | // Create a new nvmap object and obtain a handle to it. | ||
| 74 | auto object = std::make_shared<Object>(); | ||
| 75 | object->id = next_id++; | ||
| 76 | object->size = size; | ||
| 77 | object->status = Object::Status::Created; | ||
| 78 | object->refcount = 1; | ||
| 79 | |||
| 80 | const u32 handle = next_handle++; | ||
| 81 | |||
| 82 | handles.insert_or_assign(handle, std::move(object)); | ||
| 83 | |||
| 84 | return handle; | ||
| 85 | } | ||
| 86 | |||
| 87 | NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) { | 72 | NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) { |
| 88 | IocCreateParams params; | 73 | IocCreateParams params; |
| 89 | std::memcpy(¶ms, input.data(), sizeof(params)); | 74 | std::memcpy(¶ms, input.data(), sizeof(params)); |
| 90 | LOG_DEBUG(Service_NVDRV, "size=0x{:08X}", params.size); | 75 | LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size); |
| 91 | 76 | ||
| 92 | if (!params.size) { | 77 | std::shared_ptr<NvCore::NvMap::Handle> handle_description{}; |
| 93 | LOG_ERROR(Service_NVDRV, "Size is 0"); | 78 | auto result = |
| 94 | return NvResult::BadValue; | 79 | file.CreateHandle(Common::AlignUp(params.size, YUZU_PAGESIZE), handle_description); |
| 80 | if (result != NvResult::Success) { | ||
| 81 | LOG_CRITICAL(Service_NVDRV, "Failed to create Object"); | ||
| 82 | return result; | ||
| 95 | } | 83 | } |
| 96 | 84 | handle_description->orig_size = params.size; // Orig size is the unaligned size | |
| 97 | params.handle = CreateObject(params.size); | 85 | params.handle = handle_description->id; |
| 86 | LOG_DEBUG(Service_NVDRV, "handle: {}, size: 0x{:X}", handle_description->id, params.size); | ||
| 98 | 87 | ||
| 99 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 88 | std::memcpy(output.data(), ¶ms, sizeof(params)); |
| 100 | return NvResult::Success; | 89 | return NvResult::Success; |
| @@ -103,63 +92,68 @@ NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) | |||
| 103 | NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) { | 92 | NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) { |
| 104 | IocAllocParams params; | 93 | IocAllocParams params; |
| 105 | std::memcpy(¶ms, input.data(), sizeof(params)); | 94 | std::memcpy(¶ms, input.data(), sizeof(params)); |
| 106 | LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.addr); | 95 | LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address); |
| 107 | 96 | ||
| 108 | if (!params.handle) { | 97 | if (!params.handle) { |
| 109 | LOG_ERROR(Service_NVDRV, "Handle is 0"); | 98 | LOG_CRITICAL(Service_NVDRV, "Handle is 0"); |
| 110 | return NvResult::BadValue; | 99 | return NvResult::BadValue; |
| 111 | } | 100 | } |
| 112 | 101 | ||
| 113 | if ((params.align - 1) & params.align) { | 102 | if ((params.align - 1) & params.align) { |
| 114 | LOG_ERROR(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align); | 103 | LOG_CRITICAL(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align); |
| 115 | return NvResult::BadValue; | 104 | return NvResult::BadValue; |
| 116 | } | 105 | } |
| 117 | 106 | ||
| 118 | const u32 min_alignment = 0x1000; | 107 | // Force page size alignment at a minimum |
| 119 | if (params.align < min_alignment) { | 108 | if (params.align < YUZU_PAGESIZE) { |
| 120 | params.align = min_alignment; | 109 | params.align = YUZU_PAGESIZE; |
| 121 | } | 110 | } |
| 122 | 111 | ||
| 123 | auto object = GetObject(params.handle); | 112 | auto handle_description{file.GetHandle(params.handle)}; |
| 124 | if (!object) { | 113 | if (!handle_description) { |
| 125 | LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); | 114 | LOG_CRITICAL(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); |
| 126 | return NvResult::BadValue; | 115 | return NvResult::BadValue; |
| 127 | } | 116 | } |
| 128 | 117 | ||
| 129 | if (object->status == Object::Status::Allocated) { | 118 | if (handle_description->allocated) { |
| 130 | LOG_ERROR(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle); | 119 | LOG_CRITICAL(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle); |
| 131 | return NvResult::InsufficientMemory; | 120 | return NvResult::InsufficientMemory; |
| 132 | } | 121 | } |
| 133 | 122 | ||
| 134 | object->flags = params.flags; | 123 | const auto result = |
| 135 | object->align = params.align; | 124 | handle_description->Alloc(params.flags, params.align, params.kind, params.address); |
| 136 | object->kind = params.kind; | 125 | if (result != NvResult::Success) { |
| 137 | object->addr = params.addr; | 126 | LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); |
| 138 | object->status = Object::Status::Allocated; | 127 | return result; |
| 139 | 128 | } | |
| 129 | ASSERT(system.CurrentProcess() | ||
| 130 | ->PageTable() | ||
| 131 | .LockForDeviceAddressSpace(handle_description->address, handle_description->size) | ||
| 132 | .IsSuccess()); | ||
| 140 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 133 | std::memcpy(output.data(), ¶ms, sizeof(params)); |
| 141 | return NvResult::Success; | 134 | return result; |
| 142 | } | 135 | } |
| 143 | 136 | ||
| 144 | NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) { | 137 | NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) { |
| 145 | IocGetIdParams params; | 138 | IocGetIdParams params; |
| 146 | std::memcpy(¶ms, input.data(), sizeof(params)); | 139 | std::memcpy(¶ms, input.data(), sizeof(params)); |
| 147 | 140 | ||
| 148 | LOG_WARNING(Service_NVDRV, "called"); | 141 | LOG_DEBUG(Service_NVDRV, "called"); |
| 149 | 142 | ||
| 143 | // See the comment in FromId for extra info on this function | ||
| 150 | if (!params.handle) { | 144 | if (!params.handle) { |
| 151 | LOG_ERROR(Service_NVDRV, "Handle is zero"); | 145 | LOG_CRITICAL(Service_NVDRV, "Error!"); |
| 152 | return NvResult::BadValue; | 146 | return NvResult::BadValue; |
| 153 | } | 147 | } |
| 154 | 148 | ||
| 155 | auto object = GetObject(params.handle); | 149 | auto handle_description{file.GetHandle(params.handle)}; |
| 156 | if (!object) { | 150 | if (!handle_description) { |
| 157 | LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); | 151 | LOG_CRITICAL(Service_NVDRV, "Error!"); |
| 158 | return NvResult::BadValue; | 152 | return NvResult::AccessDenied; // This will always return EPERM irrespective of if the |
| 153 | // handle exists or not | ||
| 159 | } | 154 | } |
| 160 | 155 | ||
| 161 | params.id = object->id; | 156 | params.id = handle_description->id; |
| 162 | |||
| 163 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 157 | std::memcpy(output.data(), ¶ms, sizeof(params)); |
| 164 | return NvResult::Success; | 158 | return NvResult::Success; |
| 165 | } | 159 | } |
| @@ -168,26 +162,29 @@ NvResult nvmap::IocFromId(const std::vector<u8>& input, std::vector<u8>& output) | |||
| 168 | IocFromIdParams params; | 162 | IocFromIdParams params; |
| 169 | std::memcpy(¶ms, input.data(), sizeof(params)); | 163 | std::memcpy(¶ms, input.data(), sizeof(params)); |
| 170 | 164 | ||
| 171 | LOG_WARNING(Service_NVDRV, "(STUBBED) called"); | 165 | LOG_DEBUG(Service_NVDRV, "called, id:{}", params.id); |
| 172 | 166 | ||
| 173 | auto itr = std::find_if(handles.begin(), handles.end(), | 167 | // Handles and IDs are always the same value in nvmap however IDs can be used globally given the |
| 174 | [&](const auto& entry) { return entry.second->id == params.id; }); | 168 | // right permissions. |
| 175 | if (itr == handles.end()) { | 169 | // Since we don't plan on ever supporting multiprocess we can skip implementing handle refs and |
| 176 | LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); | 170 | // so this function just does simple validation and passes through the handle id. |
| 171 | if (!params.id) { | ||
| 172 | LOG_CRITICAL(Service_NVDRV, "Zero Id is invalid!"); | ||
| 177 | return NvResult::BadValue; | 173 | return NvResult::BadValue; |
| 178 | } | 174 | } |
| 179 | 175 | ||
| 180 | auto& object = itr->second; | 176 | auto handle_description{file.GetHandle(params.id)}; |
| 181 | if (object->status != Object::Status::Allocated) { | 177 | if (!handle_description) { |
| 182 | LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle); | 178 | LOG_CRITICAL(Service_NVDRV, "Unregistered handle!"); |
| 183 | return NvResult::BadValue; | 179 | return NvResult::BadValue; |
| 184 | } | 180 | } |
| 185 | 181 | ||
| 186 | itr->second->refcount++; | 182 | auto result = handle_description->Duplicate(false); |
| 187 | 183 | if (result != NvResult::Success) { | |
| 188 | // Return the existing handle instead of creating a new one. | 184 | LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!"); |
| 189 | params.handle = itr->first; | 185 | return result; |
| 190 | 186 | } | |
| 187 | params.handle = handle_description->id; | ||
| 191 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 188 | std::memcpy(output.data(), ¶ms, sizeof(params)); |
| 192 | return NvResult::Success; | 189 | return NvResult::Success; |
| 193 | } | 190 | } |
| @@ -198,35 +195,43 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output) | |||
| 198 | IocParamParams params; | 195 | IocParamParams params; |
| 199 | std::memcpy(¶ms, input.data(), sizeof(params)); | 196 | std::memcpy(¶ms, input.data(), sizeof(params)); |
| 200 | 197 | ||
| 201 | LOG_DEBUG(Service_NVDRV, "(STUBBED) called type={}", params.param); | 198 | LOG_DEBUG(Service_NVDRV, "called type={}", params.param); |
| 202 | 199 | ||
| 203 | auto object = GetObject(params.handle); | 200 | if (!params.handle) { |
| 204 | if (!object) { | 201 | LOG_CRITICAL(Service_NVDRV, "Invalid handle!"); |
| 205 | LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); | ||
| 206 | return NvResult::BadValue; | 202 | return NvResult::BadValue; |
| 207 | } | 203 | } |
| 208 | 204 | ||
| 209 | if (object->status != Object::Status::Allocated) { | 205 | auto handle_description{file.GetHandle(params.handle)}; |
| 210 | LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle); | 206 | if (!handle_description) { |
| 207 | LOG_CRITICAL(Service_NVDRV, "Not registered handle!"); | ||
| 211 | return NvResult::BadValue; | 208 | return NvResult::BadValue; |
| 212 | } | 209 | } |
| 213 | 210 | ||
| 214 | switch (static_cast<ParamTypes>(params.param)) { | 211 | switch (params.param) { |
| 215 | case ParamTypes::Size: | 212 | case HandleParameterType::Size: |
| 216 | params.result = object->size; | 213 | params.result = static_cast<u32_le>(handle_description->orig_size); |
| 214 | break; | ||
| 215 | case HandleParameterType::Alignment: | ||
| 216 | params.result = static_cast<u32_le>(handle_description->align); | ||
| 217 | break; | 217 | break; |
| 218 | case ParamTypes::Alignment: | 218 | case HandleParameterType::Base: |
| 219 | params.result = object->align; | 219 | params.result = static_cast<u32_le>(-22); // posix EINVAL |
| 220 | break; | 220 | break; |
| 221 | case ParamTypes::Heap: | 221 | case HandleParameterType::Heap: |
| 222 | // TODO(Subv): Seems to be a hardcoded value? | 222 | if (handle_description->allocated) |
| 223 | params.result = 0x40000000; | 223 | params.result = 0x40000000; |
| 224 | else | ||
| 225 | params.result = 0; | ||
| 224 | break; | 226 | break; |
| 225 | case ParamTypes::Kind: | 227 | case HandleParameterType::Kind: |
| 226 | params.result = object->kind; | 228 | params.result = handle_description->kind; |
| 229 | break; | ||
| 230 | case HandleParameterType::IsSharedMemMapped: | ||
| 231 | params.result = handle_description->is_shared_mem_mapped; | ||
| 227 | break; | 232 | break; |
| 228 | default: | 233 | default: |
| 229 | UNIMPLEMENTED(); | 234 | return NvResult::BadValue; |
| 230 | } | 235 | } |
| 231 | 236 | ||
| 232 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 237 | std::memcpy(output.data(), ¶ms, sizeof(params)); |
| @@ -234,46 +239,29 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output) | |||
| 234 | } | 239 | } |
| 235 | 240 | ||
| 236 | NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) { | 241 | NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) { |
| 237 | // TODO(Subv): These flags are unconfirmed. | ||
| 238 | enum FreeFlags { | ||
| 239 | Freed = 0, | ||
| 240 | NotFreedYet = 1, | ||
| 241 | }; | ||
| 242 | |||
| 243 | IocFreeParams params; | 242 | IocFreeParams params; |
| 244 | std::memcpy(¶ms, input.data(), sizeof(params)); | 243 | std::memcpy(¶ms, input.data(), sizeof(params)); |
| 245 | 244 | ||
| 246 | LOG_DEBUG(Service_NVDRV, "(STUBBED) called"); | 245 | LOG_DEBUG(Service_NVDRV, "called"); |
| 247 | 246 | ||
| 248 | auto itr = handles.find(params.handle); | 247 | if (!params.handle) { |
| 249 | if (itr == handles.end()) { | 248 | LOG_CRITICAL(Service_NVDRV, "Handle null freed?"); |
| 250 | LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); | 249 | return NvResult::Success; |
| 251 | return NvResult::BadValue; | ||
| 252 | } | ||
| 253 | if (!itr->second->refcount) { | ||
| 254 | LOG_ERROR( | ||
| 255 | Service_NVDRV, | ||
| 256 | "There is no references to this object. The object is already freed. handle={:08X}", | ||
| 257 | params.handle); | ||
| 258 | return NvResult::BadValue; | ||
| 259 | } | 250 | } |
| 260 | 251 | ||
| 261 | itr->second->refcount--; | 252 | if (auto freeInfo{file.FreeHandle(params.handle, false)}) { |
| 262 | 253 | ASSERT(system.CurrentProcess() | |
| 263 | params.size = itr->second->size; | 254 | ->PageTable() |
| 264 | 255 | .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size) | |
| 265 | if (itr->second->refcount == 0) { | 256 | .IsSuccess()); |
| 266 | params.flags = Freed; | 257 | params.address = freeInfo->address; |
| 267 | // The address of the nvmap is written to the output if we're finally freeing it, otherwise | 258 | params.size = static_cast<u32>(freeInfo->size); |
| 268 | // 0 is written. | 259 | params.flags.raw = 0; |
| 269 | params.address = itr->second->addr; | 260 | params.flags.map_uncached.Assign(freeInfo->was_uncached); |
| 270 | } else { | 261 | } else { |
| 271 | params.flags = NotFreedYet; | 262 | // This is possible when there's internel dups or other duplicates. |
| 272 | params.address = 0; | ||
| 273 | } | 263 | } |
| 274 | 264 | ||
| 275 | handles.erase(params.handle); | ||
| 276 | |||
| 277 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 265 | std::memcpy(output.data(), ¶ms, sizeof(params)); |
| 278 | return NvResult::Success; | 266 | return NvResult::Success; |
| 279 | } | 267 | } |
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h index d5360d6e5..e9bfd0358 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.h +++ b/src/core/hle/service/nvdrv/devices/nvmap.h | |||
| @@ -9,15 +9,23 @@ | |||
| 9 | #include "common/common_funcs.h" | 9 | #include "common/common_funcs.h" |
| 10 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 11 | #include "common/swap.h" | 11 | #include "common/swap.h" |
| 12 | #include "core/hle/service/nvdrv/core/nvmap.h" | ||
| 12 | #include "core/hle/service/nvdrv/devices/nvdevice.h" | 13 | #include "core/hle/service/nvdrv/devices/nvdevice.h" |
| 13 | 14 | ||
| 15 | namespace Service::Nvidia::NvCore { | ||
| 16 | class Container; | ||
| 17 | } // namespace Service::Nvidia::NvCore | ||
| 18 | |||
| 14 | namespace Service::Nvidia::Devices { | 19 | namespace Service::Nvidia::Devices { |
| 15 | 20 | ||
| 16 | class nvmap final : public nvdevice { | 21 | class nvmap final : public nvdevice { |
| 17 | public: | 22 | public: |
| 18 | explicit nvmap(Core::System& system_); | 23 | explicit nvmap(Core::System& system_, NvCore::Container& container); |
| 19 | ~nvmap() override; | 24 | ~nvmap() override; |
| 20 | 25 | ||
| 26 | nvmap(const nvmap&) = delete; | ||
| 27 | nvmap& operator=(const nvmap&) = delete; | ||
| 28 | |||
| 21 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 29 | NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| 22 | std::vector<u8>& output) override; | 30 | std::vector<u8>& output) override; |
| 23 | NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | 31 | NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input, |
| @@ -28,31 +36,15 @@ public: | |||
| 28 | void OnOpen(DeviceFD fd) override; | 36 | void OnOpen(DeviceFD fd) override; |
| 29 | void OnClose(DeviceFD fd) override; | 37 | void OnClose(DeviceFD fd) override; |
| 30 | 38 | ||
| 31 | /// Returns the allocated address of an nvmap object given its handle. | 39 | enum class HandleParameterType : u32_le { |
| 32 | VAddr GetObjectAddress(u32 handle) const; | 40 | Size = 1, |
| 33 | 41 | Alignment = 2, | |
| 34 | /// Represents an nvmap object. | 42 | Base = 3, |
| 35 | struct Object { | 43 | Heap = 4, |
| 36 | enum class Status { Created, Allocated }; | 44 | Kind = 5, |
| 37 | u32 id; | 45 | IsSharedMemMapped = 6 |
| 38 | u32 size; | ||
| 39 | u32 flags; | ||
| 40 | u32 align; | ||
| 41 | u8 kind; | ||
| 42 | VAddr addr; | ||
| 43 | Status status; | ||
| 44 | u32 refcount; | ||
| 45 | u32 dma_map_addr; | ||
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | std::shared_ptr<Object> GetObject(u32 handle) const { | ||
| 49 | auto itr = handles.find(handle); | ||
| 50 | if (itr != handles.end()) { | ||
| 51 | return itr->second; | ||
| 52 | } | ||
| 53 | return {}; | ||
| 54 | } | ||
| 55 | |||
| 56 | private: | 48 | private: |
| 57 | /// Id to use for the next handle that is created. | 49 | /// Id to use for the next handle that is created. |
| 58 | u32 next_handle = 0; | 50 | u32 next_handle = 0; |
| @@ -60,9 +52,6 @@ private: | |||
| 60 | /// Id to use for the next object that is created. | 52 | /// Id to use for the next object that is created. |
| 61 | u32 next_id = 0; | 53 | u32 next_id = 0; |
| 62 | 54 | ||
| 63 | /// Mapping of currently allocated handles to the objects they represent. | ||
| 64 | std::unordered_map<u32, std::shared_ptr<Object>> handles; | ||
| 65 | |||
| 66 | struct IocCreateParams { | 55 | struct IocCreateParams { |
| 67 | // Input | 56 | // Input |
| 68 | u32_le size{}; | 57 | u32_le size{}; |
| @@ -83,11 +72,11 @@ private: | |||
| 83 | // Input | 72 | // Input |
| 84 | u32_le handle{}; | 73 | u32_le handle{}; |
| 85 | u32_le heap_mask{}; | 74 | u32_le heap_mask{}; |
| 86 | u32_le flags{}; | 75 | NvCore::NvMap::Handle::Flags flags{}; |
| 87 | u32_le align{}; | 76 | u32_le align{}; |
| 88 | u8 kind{}; | 77 | u8 kind{}; |
| 89 | INSERT_PADDING_BYTES(7); | 78 | INSERT_PADDING_BYTES(7); |
| 90 | u64_le addr{}; | 79 | u64_le address{}; |
| 91 | }; | 80 | }; |
| 92 | static_assert(sizeof(IocAllocParams) == 32, "IocAllocParams has wrong size"); | 81 | static_assert(sizeof(IocAllocParams) == 32, "IocAllocParams has wrong size"); |
| 93 | 82 | ||
| @@ -96,14 +85,14 @@ private: | |||
| 96 | INSERT_PADDING_BYTES(4); | 85 | INSERT_PADDING_BYTES(4); |
| 97 | u64_le address{}; | 86 | u64_le address{}; |
| 98 | u32_le size{}; | 87 | u32_le size{}; |
| 99 | u32_le flags{}; | 88 | NvCore::NvMap::Handle::Flags flags{}; |
| 100 | }; | 89 | }; |
| 101 | static_assert(sizeof(IocFreeParams) == 24, "IocFreeParams has wrong size"); | 90 | static_assert(sizeof(IocFreeParams) == 24, "IocFreeParams has wrong size"); |
| 102 | 91 | ||
| 103 | struct IocParamParams { | 92 | struct IocParamParams { |
| 104 | // Input | 93 | // Input |
| 105 | u32_le handle{}; | 94 | u32_le handle{}; |
| 106 | u32_le param{}; | 95 | HandleParameterType param{}; |
| 107 | // Output | 96 | // Output |
| 108 | u32_le result{}; | 97 | u32_le result{}; |
| 109 | }; | 98 | }; |
| @@ -117,14 +106,15 @@ private: | |||
| 117 | }; | 106 | }; |
| 118 | static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size"); | 107 | static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size"); |
| 119 | 108 | ||
| 120 | u32 CreateObject(u32 size); | ||
| 121 | |||
| 122 | NvResult IocCreate(const std::vector<u8>& input, std::vector<u8>& output); | 109 | NvResult IocCreate(const std::vector<u8>& input, std::vector<u8>& output); |
| 123 | NvResult IocAlloc(const std::vector<u8>& input, std::vector<u8>& output); | 110 | NvResult IocAlloc(const std::vector<u8>& input, std::vector<u8>& output); |
| 124 | NvResult IocGetId(const std::vector<u8>& input, std::vector<u8>& output); | 111 | NvResult IocGetId(const std::vector<u8>& input, std::vector<u8>& output); |
| 125 | NvResult IocFromId(const std::vector<u8>& input, std::vector<u8>& output); | 112 | NvResult IocFromId(const std::vector<u8>& input, std::vector<u8>& output); |
| 126 | NvResult IocParam(const std::vector<u8>& input, std::vector<u8>& output); | 113 | NvResult IocParam(const std::vector<u8>& input, std::vector<u8>& output); |
| 127 | NvResult IocFree(const std::vector<u8>& input, std::vector<u8>& output); | 114 | NvResult IocFree(const std::vector<u8>& input, std::vector<u8>& output); |
| 115 | |||
| 116 | NvCore::Container& container; | ||
| 117 | NvCore::NvMap& file; | ||
| 128 | }; | 118 | }; |
| 129 | 119 | ||
| 130 | } // namespace Service::Nvidia::Devices | 120 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/nvdata.h b/src/core/hle/service/nvdrv/nvdata.h index 1d00394c8..0e2f47075 100644 --- a/src/core/hle/service/nvdrv/nvdata.h +++ b/src/core/hle/service/nvdrv/nvdata.h | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors |
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | 4 | ||
| 4 | #pragma once | 5 | #pragma once |
| 5 | 6 | ||
| @@ -78,11 +79,15 @@ enum class NvResult : u32 { | |||
| 78 | ModuleNotPresent = 0xA000E, | 79 | ModuleNotPresent = 0xA000E, |
| 79 | }; | 80 | }; |
| 80 | 81 | ||
| 82 | // obtained from | ||
| 83 | // https://github.com/skyline-emu/skyline/blob/nvdec-dev/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost/ctrl.h#L47 | ||
| 81 | enum class EventState { | 84 | enum class EventState { |
| 82 | Free = 0, | 85 | Available = 0, |
| 83 | Registered = 1, | 86 | Waiting = 1, |
| 84 | Waiting = 2, | 87 | Cancelling = 2, |
| 85 | Busy = 3, | 88 | Signalling = 3, |
| 89 | Signalled = 4, | ||
| 90 | Cancelled = 5, | ||
| 86 | }; | 91 | }; |
| 87 | 92 | ||
| 88 | union Ioctl { | 93 | union Ioctl { |
diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 756eb7453..5e7b7468f 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors |
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | 4 | ||
| 4 | #include <utility> | 5 | #include <utility> |
| 5 | 6 | ||
| @@ -8,6 +9,7 @@ | |||
| 8 | #include "core/hle/ipc_helpers.h" | 9 | #include "core/hle/ipc_helpers.h" |
| 9 | #include "core/hle/kernel/k_event.h" | 10 | #include "core/hle/kernel/k_event.h" |
| 10 | #include "core/hle/kernel/k_writable_event.h" | 11 | #include "core/hle/kernel/k_writable_event.h" |
| 12 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 11 | #include "core/hle/service/nvdrv/devices/nvdevice.h" | 13 | #include "core/hle/service/nvdrv/devices/nvdevice.h" |
| 12 | #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" | 14 | #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" |
| 13 | #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" | 15 | #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" |
| @@ -15,17 +17,31 @@ | |||
| 15 | #include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h" | 17 | #include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h" |
| 16 | #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" | 18 | #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" |
| 17 | #include "core/hle/service/nvdrv/devices/nvhost_nvdec.h" | 19 | #include "core/hle/service/nvdrv/devices/nvhost_nvdec.h" |
| 20 | #include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h" | ||
| 18 | #include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h" | 21 | #include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h" |
| 19 | #include "core/hle/service/nvdrv/devices/nvhost_vic.h" | 22 | #include "core/hle/service/nvdrv/devices/nvhost_vic.h" |
| 20 | #include "core/hle/service/nvdrv/devices/nvmap.h" | 23 | #include "core/hle/service/nvdrv/devices/nvmap.h" |
| 21 | #include "core/hle/service/nvdrv/nvdrv.h" | 24 | #include "core/hle/service/nvdrv/nvdrv.h" |
| 22 | #include "core/hle/service/nvdrv/nvdrv_interface.h" | 25 | #include "core/hle/service/nvdrv/nvdrv_interface.h" |
| 23 | #include "core/hle/service/nvdrv/nvmemp.h" | 26 | #include "core/hle/service/nvdrv/nvmemp.h" |
| 24 | #include "core/hle/service/nvdrv/syncpoint_manager.h" | ||
| 25 | #include "core/hle/service/nvflinger/nvflinger.h" | 27 | #include "core/hle/service/nvflinger/nvflinger.h" |
| 28 | #include "video_core/gpu.h" | ||
| 26 | 29 | ||
| 27 | namespace Service::Nvidia { | 30 | namespace Service::Nvidia { |
| 28 | 31 | ||
| 32 | EventInterface::EventInterface(Module& module_) : module{module_}, guard{}, on_signal{} {} | ||
| 33 | |||
| 34 | EventInterface::~EventInterface() = default; | ||
| 35 | |||
| 36 | Kernel::KEvent* EventInterface::CreateEvent(std::string name) { | ||
| 37 | Kernel::KEvent* new_event = module.service_context.CreateEvent(std::move(name)); | ||
| 38 | return new_event; | ||
| 39 | } | ||
| 40 | |||
| 41 | void EventInterface::FreeEvent(Kernel::KEvent* event) { | ||
| 42 | module.service_context.CloseEvent(event); | ||
| 43 | } | ||
| 44 | |||
| 29 | void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger, | 45 | void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger, |
| 30 | Core::System& system) { | 46 | Core::System& system) { |
| 31 | auto module_ = std::make_shared<Module>(system); | 47 | auto module_ = std::make_shared<Module>(system); |
| @@ -38,34 +54,54 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger | |||
| 38 | } | 54 | } |
| 39 | 55 | ||
| 40 | Module::Module(Core::System& system) | 56 | Module::Module(Core::System& system) |
| 41 | : syncpoint_manager{system.GPU()}, service_context{system, "nvdrv"} { | 57 | : service_context{system, "nvdrv"}, events_interface{*this}, container{system.Host1x()} { |
| 42 | for (u32 i = 0; i < MaxNvEvents; i++) { | 58 | builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) { |
| 43 | events_interface.events[i].event = | 59 | std::shared_ptr<Devices::nvdevice> device = |
| 44 | service_context.CreateEvent(fmt::format("NVDRV::NvEvent_{}", i)); | 60 | std::make_shared<Devices::nvhost_as_gpu>(system, *this, container); |
| 45 | events_interface.status[i] = EventState::Free; | 61 | return open_files.emplace(fd, device).first; |
| 46 | events_interface.registered[i] = false; | 62 | }; |
| 47 | } | 63 | builders["/dev/nvhost-gpu"] = [this, &system](DeviceFD fd) { |
| 48 | auto nvmap_dev = std::make_shared<Devices::nvmap>(system); | 64 | std::shared_ptr<Devices::nvdevice> device = |
| 49 | devices["/dev/nvhost-as-gpu"] = std::make_shared<Devices::nvhost_as_gpu>(system, nvmap_dev); | 65 | std::make_shared<Devices::nvhost_gpu>(system, events_interface, container); |
| 50 | devices["/dev/nvhost-gpu"] = | 66 | return open_files.emplace(fd, device).first; |
| 51 | std::make_shared<Devices::nvhost_gpu>(system, nvmap_dev, syncpoint_manager); | 67 | }; |
| 52 | devices["/dev/nvhost-ctrl-gpu"] = std::make_shared<Devices::nvhost_ctrl_gpu>(system); | 68 | builders["/dev/nvhost-ctrl-gpu"] = [this, &system](DeviceFD fd) { |
| 53 | devices["/dev/nvmap"] = nvmap_dev; | 69 | std::shared_ptr<Devices::nvdevice> device = |
| 54 | devices["/dev/nvdisp_disp0"] = std::make_shared<Devices::nvdisp_disp0>(system, nvmap_dev); | 70 | std::make_shared<Devices::nvhost_ctrl_gpu>(system, events_interface); |
| 55 | devices["/dev/nvhost-ctrl"] = | 71 | return open_files.emplace(fd, device).first; |
| 56 | std::make_shared<Devices::nvhost_ctrl>(system, events_interface, syncpoint_manager); | 72 | }; |
| 57 | devices["/dev/nvhost-nvdec"] = | 73 | builders["/dev/nvmap"] = [this, &system](DeviceFD fd) { |
| 58 | std::make_shared<Devices::nvhost_nvdec>(system, nvmap_dev, syncpoint_manager); | 74 | std::shared_ptr<Devices::nvdevice> device = |
| 59 | devices["/dev/nvhost-nvjpg"] = std::make_shared<Devices::nvhost_nvjpg>(system); | 75 | std::make_shared<Devices::nvmap>(system, container); |
| 60 | devices["/dev/nvhost-vic"] = | 76 | return open_files.emplace(fd, device).first; |
| 61 | std::make_shared<Devices::nvhost_vic>(system, nvmap_dev, syncpoint_manager); | 77 | }; |
| 78 | builders["/dev/nvdisp_disp0"] = [this, &system](DeviceFD fd) { | ||
| 79 | std::shared_ptr<Devices::nvdevice> device = | ||
| 80 | std::make_shared<Devices::nvdisp_disp0>(system, container); | ||
| 81 | return open_files.emplace(fd, device).first; | ||
| 82 | }; | ||
| 83 | builders["/dev/nvhost-ctrl"] = [this, &system](DeviceFD fd) { | ||
| 84 | std::shared_ptr<Devices::nvdevice> device = | ||
| 85 | std::make_shared<Devices::nvhost_ctrl>(system, events_interface, container); | ||
| 86 | return open_files.emplace(fd, device).first; | ||
| 87 | }; | ||
| 88 | builders["/dev/nvhost-nvdec"] = [this, &system](DeviceFD fd) { | ||
| 89 | std::shared_ptr<Devices::nvdevice> device = | ||
| 90 | std::make_shared<Devices::nvhost_nvdec>(system, container); | ||
| 91 | return open_files.emplace(fd, device).first; | ||
| 92 | }; | ||
| 93 | builders["/dev/nvhost-nvjpg"] = [this, &system](DeviceFD fd) { | ||
| 94 | std::shared_ptr<Devices::nvdevice> device = std::make_shared<Devices::nvhost_nvjpg>(system); | ||
| 95 | return open_files.emplace(fd, device).first; | ||
| 96 | }; | ||
| 97 | builders["/dev/nvhost-vic"] = [this, &system](DeviceFD fd) { | ||
| 98 | std::shared_ptr<Devices::nvdevice> device = | ||
| 99 | std::make_shared<Devices::nvhost_vic>(system, container); | ||
| 100 | return open_files.emplace(fd, device).first; | ||
| 101 | }; | ||
| 62 | } | 102 | } |
| 63 | 103 | ||
| 64 | Module::~Module() { | 104 | Module::~Module() {} |
| 65 | for (u32 i = 0; i < MaxNvEvents; i++) { | ||
| 66 | service_context.CloseEvent(events_interface.events[i].event); | ||
| 67 | } | ||
| 68 | } | ||
| 69 | 105 | ||
| 70 | NvResult Module::VerifyFD(DeviceFD fd) const { | 106 | NvResult Module::VerifyFD(DeviceFD fd) const { |
| 71 | if (fd < 0) { | 107 | if (fd < 0) { |
| @@ -82,18 +118,18 @@ NvResult Module::VerifyFD(DeviceFD fd) const { | |||
| 82 | } | 118 | } |
| 83 | 119 | ||
| 84 | DeviceFD Module::Open(const std::string& device_name) { | 120 | DeviceFD Module::Open(const std::string& device_name) { |
| 85 | if (devices.find(device_name) == devices.end()) { | 121 | auto it = builders.find(device_name); |
| 122 | if (it == builders.end()) { | ||
| 86 | LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name); | 123 | LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name); |
| 87 | return INVALID_NVDRV_FD; | 124 | return INVALID_NVDRV_FD; |
| 88 | } | 125 | } |
| 89 | 126 | ||
| 90 | auto device = devices[device_name]; | ||
| 91 | const DeviceFD fd = next_fd++; | 127 | const DeviceFD fd = next_fd++; |
| 128 | auto& builder = it->second; | ||
| 129 | auto device = builder(fd)->second; | ||
| 92 | 130 | ||
| 93 | device->OnOpen(fd); | 131 | device->OnOpen(fd); |
| 94 | 132 | ||
| 95 | open_files[fd] = std::move(device); | ||
| 96 | |||
| 97 | return fd; | 133 | return fd; |
| 98 | } | 134 | } |
| 99 | 135 | ||
| @@ -168,22 +204,24 @@ NvResult Module::Close(DeviceFD fd) { | |||
| 168 | return NvResult::Success; | 204 | return NvResult::Success; |
| 169 | } | 205 | } |
| 170 | 206 | ||
| 171 | void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) { | 207 | NvResult Module::QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event) { |
| 172 | for (u32 i = 0; i < MaxNvEvents; i++) { | 208 | if (fd < 0) { |
| 173 | if (events_interface.assigned_syncpt[i] == syncpoint_id && | 209 | LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd); |
| 174 | events_interface.assigned_value[i] == value) { | 210 | return NvResult::InvalidState; |
| 175 | events_interface.LiberateEvent(i); | ||
| 176 | events_interface.events[i].event->GetWritableEvent().Signal(); | ||
| 177 | } | ||
| 178 | } | 211 | } |
| 179 | } | ||
| 180 | 212 | ||
| 181 | Kernel::KReadableEvent& Module::GetEvent(const u32 event_id) { | 213 | const auto itr = open_files.find(fd); |
| 182 | return events_interface.events[event_id].event->GetReadableEvent(); | ||
| 183 | } | ||
| 184 | 214 | ||
| 185 | Kernel::KWritableEvent& Module::GetEventWriteable(const u32 event_id) { | 215 | if (itr == open_files.end()) { |
| 186 | return events_interface.events[event_id].event->GetWritableEvent(); | 216 | LOG_ERROR(Service_NVDRV, "Could not find DeviceFD={}!", fd); |
| 217 | return NvResult::NotImplemented; | ||
| 218 | } | ||
| 219 | |||
| 220 | event = itr->second->QueryEvent(event_id); | ||
| 221 | if (!event) { | ||
| 222 | return NvResult::BadParameter; | ||
| 223 | } | ||
| 224 | return NvResult::Success; | ||
| 187 | } | 225 | } |
| 188 | 226 | ||
| 189 | } // namespace Service::Nvidia | 227 | } // namespace Service::Nvidia |
diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index c929e5106..146d046a9 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h | |||
| @@ -1,16 +1,20 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors |
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | 4 | ||
| 4 | #pragma once | 5 | #pragma once |
| 5 | 6 | ||
| 7 | #include <functional> | ||
| 8 | #include <list> | ||
| 6 | #include <memory> | 9 | #include <memory> |
| 10 | #include <string> | ||
| 7 | #include <unordered_map> | 11 | #include <unordered_map> |
| 8 | #include <vector> | 12 | #include <vector> |
| 9 | 13 | ||
| 10 | #include "common/common_types.h" | 14 | #include "common/common_types.h" |
| 11 | #include "core/hle/service/kernel_helpers.h" | 15 | #include "core/hle/service/kernel_helpers.h" |
| 16 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 12 | #include "core/hle/service/nvdrv/nvdata.h" | 17 | #include "core/hle/service/nvdrv/nvdata.h" |
| 13 | #include "core/hle/service/nvdrv/syncpoint_manager.h" | ||
| 14 | #include "core/hle/service/nvflinger/ui/fence.h" | 18 | #include "core/hle/service/nvflinger/ui/fence.h" |
| 15 | #include "core/hle/service/service.h" | 19 | #include "core/hle/service/service.h" |
| 16 | 20 | ||
| @@ -28,81 +32,31 @@ class NVFlinger; | |||
| 28 | 32 | ||
| 29 | namespace Service::Nvidia { | 33 | namespace Service::Nvidia { |
| 30 | 34 | ||
| 35 | namespace NvCore { | ||
| 36 | class Container; | ||
| 31 | class SyncpointManager; | 37 | class SyncpointManager; |
| 38 | } // namespace NvCore | ||
| 32 | 39 | ||
| 33 | namespace Devices { | 40 | namespace Devices { |
| 34 | class nvdevice; | 41 | class nvdevice; |
| 35 | } | 42 | class nvhost_ctrl; |
| 43 | } // namespace Devices | ||
| 36 | 44 | ||
| 37 | /// Represents an Nvidia event | 45 | class Module; |
| 38 | struct NvEvent { | ||
| 39 | Kernel::KEvent* event{}; | ||
| 40 | NvFence fence{}; | ||
| 41 | }; | ||
| 42 | 46 | ||
| 43 | struct EventInterface { | 47 | class EventInterface { |
| 44 | // Mask representing currently busy events | 48 | public: |
| 45 | u64 events_mask{}; | 49 | explicit EventInterface(Module& module_); |
| 46 | // Each kernel event associated to an NV event | 50 | ~EventInterface(); |
| 47 | std::array<NvEvent, MaxNvEvents> events; | 51 | |
| 48 | // The status of the current NVEvent | 52 | Kernel::KEvent* CreateEvent(std::string name); |
| 49 | std::array<EventState, MaxNvEvents> status{}; | 53 | |
| 50 | // Tells if an NVEvent is registered or not | 54 | void FreeEvent(Kernel::KEvent* event); |
| 51 | std::array<bool, MaxNvEvents> registered{}; | 55 | |
| 52 | // Tells the NVEvent that it has failed. | 56 | private: |
| 53 | std::array<bool, MaxNvEvents> failed{}; | 57 | Module& module; |
| 54 | // When an NVEvent is waiting on GPU interrupt, this is the sync_point | 58 | std::mutex guard; |
| 55 | // associated with it. | 59 | std::list<Devices::nvhost_ctrl*> on_signal; |
| 56 | std::array<u32, MaxNvEvents> assigned_syncpt{}; | ||
| 57 | // This is the value of the GPU interrupt for which the NVEvent is waiting | ||
| 58 | // for. | ||
| 59 | std::array<u32, MaxNvEvents> assigned_value{}; | ||
| 60 | // Constant to denote an unasigned syncpoint. | ||
| 61 | static constexpr u32 unassigned_syncpt = 0xFFFFFFFF; | ||
| 62 | std::optional<u32> GetFreeEvent() const { | ||
| 63 | u64 mask = events_mask; | ||
| 64 | for (u32 i = 0; i < MaxNvEvents; i++) { | ||
| 65 | const bool is_free = (mask & 0x1) == 0; | ||
| 66 | if (is_free) { | ||
| 67 | if (status[i] == EventState::Registered || status[i] == EventState::Free) { | ||
| 68 | return {i}; | ||
| 69 | } | ||
| 70 | } | ||
| 71 | mask = mask >> 1; | ||
| 72 | } | ||
| 73 | return std::nullopt; | ||
| 74 | } | ||
| 75 | void SetEventStatus(const u32 event_id, EventState new_status) { | ||
| 76 | EventState old_status = status[event_id]; | ||
| 77 | if (old_status == new_status) { | ||
| 78 | return; | ||
| 79 | } | ||
| 80 | status[event_id] = new_status; | ||
| 81 | if (new_status == EventState::Registered) { | ||
| 82 | registered[event_id] = true; | ||
| 83 | } | ||
| 84 | if (new_status == EventState::Waiting || new_status == EventState::Busy) { | ||
| 85 | events_mask |= (1ULL << event_id); | ||
| 86 | } | ||
| 87 | } | ||
| 88 | void RegisterEvent(const u32 event_id) { | ||
| 89 | registered[event_id] = true; | ||
| 90 | if (status[event_id] == EventState::Free) { | ||
| 91 | status[event_id] = EventState::Registered; | ||
| 92 | } | ||
| 93 | } | ||
| 94 | void UnregisterEvent(const u32 event_id) { | ||
| 95 | registered[event_id] = false; | ||
| 96 | if (status[event_id] == EventState::Registered) { | ||
| 97 | status[event_id] = EventState::Free; | ||
| 98 | } | ||
| 99 | } | ||
| 100 | void LiberateEvent(const u32 event_id) { | ||
| 101 | status[event_id] = registered[event_id] ? EventState::Registered : EventState::Free; | ||
| 102 | events_mask &= ~(1ULL << event_id); | ||
| 103 | assigned_syncpt[event_id] = unassigned_syncpt; | ||
| 104 | assigned_value[event_id] = 0; | ||
| 105 | } | ||
| 106 | }; | 60 | }; |
| 107 | 61 | ||
| 108 | class Module final { | 62 | class Module final { |
| @@ -112,9 +66,9 @@ public: | |||
| 112 | 66 | ||
| 113 | /// Returns a pointer to one of the available devices, identified by its name. | 67 | /// Returns a pointer to one of the available devices, identified by its name. |
| 114 | template <typename T> | 68 | template <typename T> |
| 115 | std::shared_ptr<T> GetDevice(const std::string& name) { | 69 | std::shared_ptr<T> GetDevice(DeviceFD fd) { |
| 116 | auto itr = devices.find(name); | 70 | auto itr = open_files.find(fd); |
| 117 | if (itr == devices.end()) | 71 | if (itr == open_files.end()) |
| 118 | return nullptr; | 72 | return nullptr; |
| 119 | return std::static_pointer_cast<T>(itr->second); | 73 | return std::static_pointer_cast<T>(itr->second); |
| 120 | } | 74 | } |
| @@ -137,28 +91,27 @@ public: | |||
| 137 | /// Closes a device file descriptor and returns operation success. | 91 | /// Closes a device file descriptor and returns operation success. |
| 138 | NvResult Close(DeviceFD fd); | 92 | NvResult Close(DeviceFD fd); |
| 139 | 93 | ||
| 140 | void SignalSyncpt(const u32 syncpoint_id, const u32 value); | 94 | NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event); |
| 141 | |||
| 142 | Kernel::KReadableEvent& GetEvent(u32 event_id); | ||
| 143 | |||
| 144 | Kernel::KWritableEvent& GetEventWriteable(u32 event_id); | ||
| 145 | 95 | ||
| 146 | private: | 96 | private: |
| 147 | /// Manages syncpoints on the host | 97 | friend class EventInterface; |
| 148 | SyncpointManager syncpoint_manager; | 98 | friend class Service::NVFlinger::NVFlinger; |
| 149 | 99 | ||
| 150 | /// Id to use for the next open file descriptor. | 100 | /// Id to use for the next open file descriptor. |
| 151 | DeviceFD next_fd = 1; | 101 | DeviceFD next_fd = 1; |
| 152 | 102 | ||
| 103 | using FilesContainerType = std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>>; | ||
| 153 | /// Mapping of file descriptors to the devices they reference. | 104 | /// Mapping of file descriptors to the devices they reference. |
| 154 | std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>> open_files; | 105 | FilesContainerType open_files; |
| 155 | 106 | ||
| 156 | /// Mapping of device node names to their implementation. | 107 | KernelHelpers::ServiceContext service_context; |
| 157 | std::unordered_map<std::string, std::shared_ptr<Devices::nvdevice>> devices; | ||
| 158 | 108 | ||
| 159 | EventInterface events_interface; | 109 | EventInterface events_interface; |
| 160 | 110 | ||
| 161 | KernelHelpers::ServiceContext service_context; | 111 | /// Manages syncpoints on the host |
| 112 | NvCore::Container container; | ||
| 113 | |||
| 114 | std::unordered_map<std::string, std::function<FilesContainerType::iterator(DeviceFD)>> builders; | ||
| 162 | }; | 115 | }; |
| 163 | 116 | ||
| 164 | /// Registers all NVDRV services with the specified service manager. | 117 | /// Registers all NVDRV services with the specified service manager. |
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp index b5a980384..edbdfee43 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp +++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp | |||
| @@ -1,10 +1,12 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors |
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | 4 | ||
| 4 | #include <cinttypes> | 5 | #include <cinttypes> |
| 5 | #include "common/logging/log.h" | 6 | #include "common/logging/log.h" |
| 6 | #include "core/core.h" | 7 | #include "core/core.h" |
| 7 | #include "core/hle/ipc_helpers.h" | 8 | #include "core/hle/ipc_helpers.h" |
| 9 | #include "core/hle/kernel/k_event.h" | ||
| 8 | #include "core/hle/kernel/k_readable_event.h" | 10 | #include "core/hle/kernel/k_readable_event.h" |
| 9 | #include "core/hle/service/nvdrv/nvdata.h" | 11 | #include "core/hle/service/nvdrv/nvdata.h" |
| 10 | #include "core/hle/service/nvdrv/nvdrv.h" | 12 | #include "core/hle/service/nvdrv/nvdrv.h" |
| @@ -12,10 +14,6 @@ | |||
| 12 | 14 | ||
| 13 | namespace Service::Nvidia { | 15 | namespace Service::Nvidia { |
| 14 | 16 | ||
| 15 | void NVDRV::SignalGPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) { | ||
| 16 | nvdrv->SignalSyncpt(syncpoint_id, value); | ||
| 17 | } | ||
| 18 | |||
| 19 | void NVDRV::Open(Kernel::HLERequestContext& ctx) { | 17 | void NVDRV::Open(Kernel::HLERequestContext& ctx) { |
| 20 | LOG_DEBUG(Service_NVDRV, "called"); | 18 | LOG_DEBUG(Service_NVDRV, "called"); |
| 21 | IPC::ResponseBuilder rb{ctx, 4}; | 19 | IPC::ResponseBuilder rb{ctx, 4}; |
| @@ -164,8 +162,7 @@ void NVDRV::Initialize(Kernel::HLERequestContext& ctx) { | |||
| 164 | void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) { | 162 | void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) { |
| 165 | IPC::RequestParser rp{ctx}; | 163 | IPC::RequestParser rp{ctx}; |
| 166 | const auto fd = rp.Pop<DeviceFD>(); | 164 | const auto fd = rp.Pop<DeviceFD>(); |
| 167 | const auto event_id = rp.Pop<u32>() & 0x00FF; | 165 | const auto event_id = rp.Pop<u32>(); |
| 168 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}, event_id={:X}", fd, event_id); | ||
| 169 | 166 | ||
| 170 | if (!is_initialized) { | 167 | if (!is_initialized) { |
| 171 | ServiceError(ctx, NvResult::NotInitialized); | 168 | ServiceError(ctx, NvResult::NotInitialized); |
| @@ -173,24 +170,20 @@ void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) { | |||
| 173 | return; | 170 | return; |
| 174 | } | 171 | } |
| 175 | 172 | ||
| 176 | const auto nv_result = nvdrv->VerifyFD(fd); | 173 | Kernel::KEvent* event = nullptr; |
| 177 | if (nv_result != NvResult::Success) { | 174 | NvResult result = nvdrv->QueryEvent(fd, event_id, event); |
| 178 | LOG_ERROR(Service_NVDRV, "Invalid FD specified DeviceFD={}!", fd); | ||
| 179 | ServiceError(ctx, nv_result); | ||
| 180 | return; | ||
| 181 | } | ||
| 182 | 175 | ||
| 183 | if (event_id < MaxNvEvents) { | 176 | if (result == NvResult::Success) { |
| 184 | IPC::ResponseBuilder rb{ctx, 3, 1}; | 177 | IPC::ResponseBuilder rb{ctx, 3, 1}; |
| 185 | rb.Push(ResultSuccess); | 178 | rb.Push(ResultSuccess); |
| 186 | auto& event = nvdrv->GetEvent(event_id); | 179 | auto& readable_event = event->GetReadableEvent(); |
| 187 | event.Clear(); | 180 | rb.PushCopyObjects(readable_event); |
| 188 | rb.PushCopyObjects(event); | ||
| 189 | rb.PushEnum(NvResult::Success); | 181 | rb.PushEnum(NvResult::Success); |
| 190 | } else { | 182 | } else { |
| 183 | LOG_ERROR(Service_NVDRV, "Invalid event request!"); | ||
| 191 | IPC::ResponseBuilder rb{ctx, 3}; | 184 | IPC::ResponseBuilder rb{ctx, 3}; |
| 192 | rb.Push(ResultSuccess); | 185 | rb.Push(ResultSuccess); |
| 193 | rb.PushEnum(NvResult::BadParameter); | 186 | rb.PushEnum(result); |
| 194 | } | 187 | } |
| 195 | } | 188 | } |
| 196 | 189 | ||
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.h b/src/core/hle/service/nvdrv/nvdrv_interface.h index cbd37b52b..cd58a4f35 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.h +++ b/src/core/hle/service/nvdrv/nvdrv_interface.h | |||
| @@ -18,8 +18,6 @@ public: | |||
| 18 | explicit NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char* name); | 18 | explicit NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char* name); |
| 19 | ~NVDRV() override; | 19 | ~NVDRV() override; |
| 20 | 20 | ||
| 21 | void SignalGPUInterruptSyncpt(u32 syncpoint_id, u32 value); | ||
| 22 | |||
| 23 | private: | 21 | private: |
| 24 | void Open(Kernel::HLERequestContext& ctx); | 22 | void Open(Kernel::HLERequestContext& ctx); |
| 25 | void Ioctl1(Kernel::HLERequestContext& ctx); | 23 | void Ioctl1(Kernel::HLERequestContext& ctx); |
diff --git a/src/core/hle/service/nvdrv/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/syncpoint_manager.cpp deleted file mode 100644 index a6fa943e8..000000000 --- a/src/core/hle/service/nvdrv/syncpoint_manager.cpp +++ /dev/null | |||
| @@ -1,38 +0,0 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include "common/assert.h" | ||
| 5 | #include "core/hle/service/nvdrv/syncpoint_manager.h" | ||
| 6 | #include "video_core/gpu.h" | ||
| 7 | |||
| 8 | namespace Service::Nvidia { | ||
| 9 | |||
| 10 | SyncpointManager::SyncpointManager(Tegra::GPU& gpu_) : gpu{gpu_} {} | ||
| 11 | |||
| 12 | SyncpointManager::~SyncpointManager() = default; | ||
| 13 | |||
| 14 | u32 SyncpointManager::RefreshSyncpoint(u32 syncpoint_id) { | ||
| 15 | syncpoints[syncpoint_id].min = gpu.GetSyncpointValue(syncpoint_id); | ||
| 16 | return GetSyncpointMin(syncpoint_id); | ||
| 17 | } | ||
| 18 | |||
| 19 | u32 SyncpointManager::AllocateSyncpoint() { | ||
| 20 | for (u32 syncpoint_id = 1; syncpoint_id < MaxSyncPoints; syncpoint_id++) { | ||
| 21 | if (!syncpoints[syncpoint_id].is_allocated) { | ||
| 22 | syncpoints[syncpoint_id].is_allocated = true; | ||
| 23 | return syncpoint_id; | ||
| 24 | } | ||
| 25 | } | ||
| 26 | ASSERT_MSG(false, "No more available syncpoints!"); | ||
| 27 | return {}; | ||
| 28 | } | ||
| 29 | |||
| 30 | u32 SyncpointManager::IncreaseSyncpoint(u32 syncpoint_id, u32 value) { | ||
| 31 | for (u32 index = 0; index < value; ++index) { | ||
| 32 | syncpoints[syncpoint_id].max.fetch_add(1, std::memory_order_relaxed); | ||
| 33 | } | ||
| 34 | |||
| 35 | return GetSyncpointMax(syncpoint_id); | ||
| 36 | } | ||
| 37 | |||
| 38 | } // namespace Service::Nvidia | ||
diff --git a/src/core/hle/service/nvdrv/syncpoint_manager.h b/src/core/hle/service/nvdrv/syncpoint_manager.h deleted file mode 100644 index 7f080f76e..000000000 --- a/src/core/hle/service/nvdrv/syncpoint_manager.h +++ /dev/null | |||
| @@ -1,84 +0,0 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <array> | ||
| 7 | #include <atomic> | ||
| 8 | |||
| 9 | #include "common/common_types.h" | ||
| 10 | #include "core/hle/service/nvdrv/nvdata.h" | ||
| 11 | |||
| 12 | namespace Tegra { | ||
| 13 | class GPU; | ||
| 14 | } | ||
| 15 | |||
| 16 | namespace Service::Nvidia { | ||
| 17 | |||
| 18 | class SyncpointManager final { | ||
| 19 | public: | ||
| 20 | explicit SyncpointManager(Tegra::GPU& gpu_); | ||
| 21 | ~SyncpointManager(); | ||
| 22 | |||
| 23 | /** | ||
| 24 | * Returns true if the specified syncpoint is expired for the given value. | ||
| 25 | * @param syncpoint_id Syncpoint ID to check. | ||
| 26 | * @param value Value to check against the specified syncpoint. | ||
| 27 | * @returns True if the specified syncpoint is expired for the given value, otherwise False. | ||
| 28 | */ | ||
| 29 | bool IsSyncpointExpired(u32 syncpoint_id, u32 value) const { | ||
| 30 | return (GetSyncpointMax(syncpoint_id) - value) >= (GetSyncpointMin(syncpoint_id) - value); | ||
| 31 | } | ||
| 32 | |||
| 33 | /** | ||
| 34 | * Gets the lower bound for the specified syncpoint. | ||
| 35 | * @param syncpoint_id Syncpoint ID to get the lower bound for. | ||
| 36 | * @returns The lower bound for the specified syncpoint. | ||
| 37 | */ | ||
| 38 | u32 GetSyncpointMin(u32 syncpoint_id) const { | ||
| 39 | return syncpoints.at(syncpoint_id).min.load(std::memory_order_relaxed); | ||
| 40 | } | ||
| 41 | |||
| 42 | /** | ||
| 43 | * Gets the uper bound for the specified syncpoint. | ||
| 44 | * @param syncpoint_id Syncpoint ID to get the upper bound for. | ||
| 45 | * @returns The upper bound for the specified syncpoint. | ||
| 46 | */ | ||
| 47 | u32 GetSyncpointMax(u32 syncpoint_id) const { | ||
| 48 | return syncpoints.at(syncpoint_id).max.load(std::memory_order_relaxed); | ||
| 49 | } | ||
| 50 | |||
| 51 | /** | ||
| 52 | * Refreshes the minimum value for the specified syncpoint. | ||
| 53 | * @param syncpoint_id Syncpoint ID to be refreshed. | ||
| 54 | * @returns The new syncpoint minimum value. | ||
| 55 | */ | ||
| 56 | u32 RefreshSyncpoint(u32 syncpoint_id); | ||
| 57 | |||
| 58 | /** | ||
| 59 | * Allocates a new syncoint. | ||
| 60 | * @returns The syncpoint ID for the newly allocated syncpoint. | ||
| 61 | */ | ||
| 62 | u32 AllocateSyncpoint(); | ||
| 63 | |||
| 64 | /** | ||
| 65 | * Increases the maximum value for the specified syncpoint. | ||
| 66 | * @param syncpoint_id Syncpoint ID to be increased. | ||
| 67 | * @param value Value to increase the specified syncpoint by. | ||
| 68 | * @returns The new syncpoint maximum value. | ||
| 69 | */ | ||
| 70 | u32 IncreaseSyncpoint(u32 syncpoint_id, u32 value); | ||
| 71 | |||
| 72 | private: | ||
| 73 | struct Syncpoint { | ||
| 74 | std::atomic<u32> min; | ||
| 75 | std::atomic<u32> max; | ||
| 76 | std::atomic<bool> is_allocated; | ||
| 77 | }; | ||
| 78 | |||
| 79 | std::array<Syncpoint, MaxSyncPoints> syncpoints{}; | ||
| 80 | |||
| 81 | Tegra::GPU& gpu; | ||
| 82 | }; | ||
| 83 | |||
| 84 | } // namespace Service::Nvidia | ||
diff --git a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp index 4b3d5efd6..1ce67c771 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp | |||
| @@ -5,15 +5,18 @@ | |||
| 5 | // https://cs.android.com/android/platform/superproject/+/android-5.1.1_r38:frameworks/native/libs/gui/BufferQueueConsumer.cpp | 5 | // https://cs.android.com/android/platform/superproject/+/android-5.1.1_r38:frameworks/native/libs/gui/BufferQueueConsumer.cpp |
| 6 | 6 | ||
| 7 | #include "common/logging/log.h" | 7 | #include "common/logging/log.h" |
| 8 | #include "core/hle/service/nvdrv/core/nvmap.h" | ||
| 8 | #include "core/hle/service/nvflinger/buffer_item.h" | 9 | #include "core/hle/service/nvflinger/buffer_item.h" |
| 9 | #include "core/hle/service/nvflinger/buffer_queue_consumer.h" | 10 | #include "core/hle/service/nvflinger/buffer_queue_consumer.h" |
| 10 | #include "core/hle/service/nvflinger/buffer_queue_core.h" | 11 | #include "core/hle/service/nvflinger/buffer_queue_core.h" |
| 11 | #include "core/hle/service/nvflinger/producer_listener.h" | 12 | #include "core/hle/service/nvflinger/producer_listener.h" |
| 13 | #include "core/hle/service/nvflinger/ui/graphic_buffer.h" | ||
| 12 | 14 | ||
| 13 | namespace Service::android { | 15 | namespace Service::android { |
| 14 | 16 | ||
| 15 | BufferQueueConsumer::BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_) | 17 | BufferQueueConsumer::BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_, |
| 16 | : core{std::move(core_)}, slots{core->slots} {} | 18 | Service::Nvidia::NvCore::NvMap& nvmap_) |
| 19 | : core{std::move(core_)}, slots{core->slots}, nvmap(nvmap_) {} | ||
| 17 | 20 | ||
| 18 | BufferQueueConsumer::~BufferQueueConsumer() = default; | 21 | BufferQueueConsumer::~BufferQueueConsumer() = default; |
| 19 | 22 | ||
| @@ -133,6 +136,8 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc | |||
| 133 | 136 | ||
| 134 | slots[slot].buffer_state = BufferState::Free; | 137 | slots[slot].buffer_state = BufferState::Free; |
| 135 | 138 | ||
| 139 | nvmap.FreeHandle(slots[slot].graphic_buffer->BufferId(), true); | ||
| 140 | |||
| 136 | listener = core->connected_producer_listener; | 141 | listener = core->connected_producer_listener; |
| 137 | 142 | ||
| 138 | LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot); | 143 | LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot); |
diff --git a/src/core/hle/service/nvflinger/buffer_queue_consumer.h b/src/core/hle/service/nvflinger/buffer_queue_consumer.h index b598c314f..4ec06ca13 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_consumer.h +++ b/src/core/hle/service/nvflinger/buffer_queue_consumer.h | |||
| @@ -13,6 +13,10 @@ | |||
| 13 | #include "core/hle/service/nvflinger/buffer_queue_defs.h" | 13 | #include "core/hle/service/nvflinger/buffer_queue_defs.h" |
| 14 | #include "core/hle/service/nvflinger/status.h" | 14 | #include "core/hle/service/nvflinger/status.h" |
| 15 | 15 | ||
| 16 | namespace Service::Nvidia::NvCore { | ||
| 17 | class NvMap; | ||
| 18 | } // namespace Service::Nvidia::NvCore | ||
| 19 | |||
| 16 | namespace Service::android { | 20 | namespace Service::android { |
| 17 | 21 | ||
| 18 | class BufferItem; | 22 | class BufferItem; |
| @@ -21,7 +25,8 @@ class IConsumerListener; | |||
| 21 | 25 | ||
| 22 | class BufferQueueConsumer final { | 26 | class BufferQueueConsumer final { |
| 23 | public: | 27 | public: |
| 24 | explicit BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_); | 28 | explicit BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_, |
| 29 | Service::Nvidia::NvCore::NvMap& nvmap_); | ||
| 25 | ~BufferQueueConsumer(); | 30 | ~BufferQueueConsumer(); |
| 26 | 31 | ||
| 27 | Status AcquireBuffer(BufferItem* out_buffer, std::chrono::nanoseconds expected_present); | 32 | Status AcquireBuffer(BufferItem* out_buffer, std::chrono::nanoseconds expected_present); |
| @@ -32,6 +37,7 @@ public: | |||
| 32 | private: | 37 | private: |
| 33 | std::shared_ptr<BufferQueueCore> core; | 38 | std::shared_ptr<BufferQueueCore> core; |
| 34 | BufferQueueDefs::SlotsType& slots; | 39 | BufferQueueDefs::SlotsType& slots; |
| 40 | Service::Nvidia::NvCore::NvMap& nvmap; | ||
| 35 | }; | 41 | }; |
| 36 | 42 | ||
| 37 | } // namespace Service::android | 43 | } // namespace Service::android |
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp index 337431488..d4ab23a10 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | #include "core/hle/kernel/k_writable_event.h" | 14 | #include "core/hle/kernel/k_writable_event.h" |
| 15 | #include "core/hle/kernel/kernel.h" | 15 | #include "core/hle/kernel/kernel.h" |
| 16 | #include "core/hle/service/kernel_helpers.h" | 16 | #include "core/hle/service/kernel_helpers.h" |
| 17 | #include "core/hle/service/nvdrv/nvdrv.h" | 17 | #include "core/hle/service/nvdrv/core/nvmap.h" |
| 18 | #include "core/hle/service/nvflinger/buffer_queue_core.h" | 18 | #include "core/hle/service/nvflinger/buffer_queue_core.h" |
| 19 | #include "core/hle/service/nvflinger/buffer_queue_producer.h" | 19 | #include "core/hle/service/nvflinger/buffer_queue_producer.h" |
| 20 | #include "core/hle/service/nvflinger/consumer_listener.h" | 20 | #include "core/hle/service/nvflinger/consumer_listener.h" |
| @@ -26,8 +26,10 @@ | |||
| 26 | namespace Service::android { | 26 | namespace Service::android { |
| 27 | 27 | ||
| 28 | BufferQueueProducer::BufferQueueProducer(Service::KernelHelpers::ServiceContext& service_context_, | 28 | BufferQueueProducer::BufferQueueProducer(Service::KernelHelpers::ServiceContext& service_context_, |
| 29 | std::shared_ptr<BufferQueueCore> buffer_queue_core_) | 29 | std::shared_ptr<BufferQueueCore> buffer_queue_core_, |
| 30 | : service_context{service_context_}, core{std::move(buffer_queue_core_)}, slots(core->slots) { | 30 | Service::Nvidia::NvCore::NvMap& nvmap_) |
| 31 | : service_context{service_context_}, core{std::move(buffer_queue_core_)}, slots(core->slots), | ||
| 32 | nvmap(nvmap_) { | ||
| 31 | buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent"); | 33 | buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent"); |
| 32 | } | 34 | } |
| 33 | 35 | ||
| @@ -530,6 +532,8 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input, | |||
| 530 | item.is_droppable = core->dequeue_buffer_cannot_block || async; | 532 | item.is_droppable = core->dequeue_buffer_cannot_block || async; |
| 531 | item.swap_interval = swap_interval; | 533 | item.swap_interval = swap_interval; |
| 532 | 534 | ||
| 535 | nvmap.DuplicateHandle(item.graphic_buffer->BufferId(), true); | ||
| 536 | |||
| 533 | sticky_transform = sticky_transform_; | 537 | sticky_transform = sticky_transform_; |
| 534 | 538 | ||
| 535 | if (core->queue.empty()) { | 539 | if (core->queue.empty()) { |
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.h b/src/core/hle/service/nvflinger/buffer_queue_producer.h index 42d4722dc..0ba03a568 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_producer.h +++ b/src/core/hle/service/nvflinger/buffer_queue_producer.h | |||
| @@ -31,6 +31,10 @@ namespace Service::KernelHelpers { | |||
| 31 | class ServiceContext; | 31 | class ServiceContext; |
| 32 | } // namespace Service::KernelHelpers | 32 | } // namespace Service::KernelHelpers |
| 33 | 33 | ||
| 34 | namespace Service::Nvidia::NvCore { | ||
| 35 | class NvMap; | ||
| 36 | } // namespace Service::Nvidia::NvCore | ||
| 37 | |||
| 34 | namespace Service::android { | 38 | namespace Service::android { |
| 35 | 39 | ||
| 36 | class BufferQueueCore; | 40 | class BufferQueueCore; |
| @@ -39,7 +43,8 @@ class IProducerListener; | |||
| 39 | class BufferQueueProducer final : public IBinder { | 43 | class BufferQueueProducer final : public IBinder { |
| 40 | public: | 44 | public: |
| 41 | explicit BufferQueueProducer(Service::KernelHelpers::ServiceContext& service_context_, | 45 | explicit BufferQueueProducer(Service::KernelHelpers::ServiceContext& service_context_, |
| 42 | std::shared_ptr<BufferQueueCore> buffer_queue_core_); | 46 | std::shared_ptr<BufferQueueCore> buffer_queue_core_, |
| 47 | Service::Nvidia::NvCore::NvMap& nvmap_); | ||
| 43 | ~BufferQueueProducer(); | 48 | ~BufferQueueProducer(); |
| 44 | 49 | ||
| 45 | void Transact(Kernel::HLERequestContext& ctx, android::TransactionId code, u32 flags) override; | 50 | void Transact(Kernel::HLERequestContext& ctx, android::TransactionId code, u32 flags) override; |
| @@ -78,6 +83,8 @@ private: | |||
| 78 | s32 next_callback_ticket{}; | 83 | s32 next_callback_ticket{}; |
| 79 | s32 current_callback_ticket{}; | 84 | s32 current_callback_ticket{}; |
| 80 | std::condition_variable_any callback_condition; | 85 | std::condition_variable_any callback_condition; |
| 86 | |||
| 87 | Service::Nvidia::NvCore::NvMap& nvmap; | ||
| 81 | }; | 88 | }; |
| 82 | 89 | ||
| 83 | } // namespace Service::android | 90 | } // namespace Service::android |
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index 4246e5e25..aa14d2cbc 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp | |||
| @@ -24,6 +24,8 @@ | |||
| 24 | #include "core/hle/service/vi/layer/vi_layer.h" | 24 | #include "core/hle/service/vi/layer/vi_layer.h" |
| 25 | #include "core/hle/service/vi/vi_results.h" | 25 | #include "core/hle/service/vi/vi_results.h" |
| 26 | #include "video_core/gpu.h" | 26 | #include "video_core/gpu.h" |
| 27 | #include "video_core/host1x/host1x.h" | ||
| 28 | #include "video_core/host1x/syncpoint_manager.h" | ||
| 27 | 29 | ||
| 28 | namespace Service::NVFlinger { | 30 | namespace Service::NVFlinger { |
| 29 | 31 | ||
| @@ -105,10 +107,15 @@ NVFlinger::~NVFlinger() { | |||
| 105 | display.GetLayer(layer).Core().NotifyShutdown(); | 107 | display.GetLayer(layer).Core().NotifyShutdown(); |
| 106 | } | 108 | } |
| 107 | } | 109 | } |
| 110 | |||
| 111 | if (nvdrv) { | ||
| 112 | nvdrv->Close(disp_fd); | ||
| 113 | } | ||
| 108 | } | 114 | } |
| 109 | 115 | ||
| 110 | void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) { | 116 | void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) { |
| 111 | nvdrv = std::move(instance); | 117 | nvdrv = std::move(instance); |
| 118 | disp_fd = nvdrv->Open("/dev/nvdisp_disp0"); | ||
| 112 | } | 119 | } |
| 113 | 120 | ||
| 114 | std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) { | 121 | std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) { |
| @@ -142,7 +149,7 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { | |||
| 142 | 149 | ||
| 143 | void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) { | 150 | void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) { |
| 144 | const auto buffer_id = next_buffer_queue_id++; | 151 | const auto buffer_id = next_buffer_queue_id++; |
| 145 | display.CreateLayer(layer_id, buffer_id); | 152 | display.CreateLayer(layer_id, buffer_id, nvdrv->container); |
| 146 | } | 153 | } |
| 147 | 154 | ||
| 148 | void NVFlinger::CloseLayer(u64 layer_id) { | 155 | void NVFlinger::CloseLayer(u64 layer_id) { |
| @@ -262,30 +269,24 @@ void NVFlinger::Compose() { | |||
| 262 | return; // We are likely shutting down | 269 | return; // We are likely shutting down |
| 263 | } | 270 | } |
| 264 | 271 | ||
| 265 | auto& gpu = system.GPU(); | ||
| 266 | const auto& multi_fence = buffer.fence; | ||
| 267 | guard->unlock(); | ||
| 268 | for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) { | ||
| 269 | const auto& fence = multi_fence.fences[fence_id]; | ||
| 270 | gpu.WaitFence(fence.id, fence.value); | ||
| 271 | } | ||
| 272 | guard->lock(); | ||
| 273 | |||
| 274 | MicroProfileFlip(); | ||
| 275 | |||
| 276 | // Now send the buffer to the GPU for drawing. | 272 | // Now send the buffer to the GPU for drawing. |
| 277 | // TODO(Subv): Support more than just disp0. The display device selection is probably based | 273 | // TODO(Subv): Support more than just disp0. The display device selection is probably based |
| 278 | // on which display we're drawing (Default, Internal, External, etc) | 274 | // on which display we're drawing (Default, Internal, External, etc) |
| 279 | auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>("/dev/nvdisp_disp0"); | 275 | auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd); |
| 280 | ASSERT(nvdisp); | 276 | ASSERT(nvdisp); |
| 281 | 277 | ||
| 278 | guard->unlock(); | ||
| 282 | Common::Rectangle<int> crop_rect{ | 279 | Common::Rectangle<int> crop_rect{ |
| 283 | static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()), | 280 | static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()), |
| 284 | static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())}; | 281 | static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())}; |
| 285 | 282 | ||
| 286 | nvdisp->flip(igbp_buffer.BufferId(), igbp_buffer.Offset(), igbp_buffer.ExternalFormat(), | 283 | nvdisp->flip(igbp_buffer.BufferId(), igbp_buffer.Offset(), igbp_buffer.ExternalFormat(), |
| 287 | igbp_buffer.Width(), igbp_buffer.Height(), igbp_buffer.Stride(), | 284 | igbp_buffer.Width(), igbp_buffer.Height(), igbp_buffer.Stride(), |
| 288 | static_cast<android::BufferTransformFlags>(buffer.transform), crop_rect); | 285 | static_cast<android::BufferTransformFlags>(buffer.transform), crop_rect, |
| 286 | buffer.fence.fences, buffer.fence.num_fences); | ||
| 287 | |||
| 288 | MicroProfileFlip(); | ||
| 289 | guard->lock(); | ||
| 289 | 290 | ||
| 290 | swap_interval = buffer.swap_interval; | 291 | swap_interval = buffer.swap_interval; |
| 291 | 292 | ||
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h index 3bbe5d92b..b62615de2 100644 --- a/src/core/hle/service/nvflinger/nvflinger.h +++ b/src/core/hle/service/nvflinger/nvflinger.h | |||
| @@ -116,6 +116,7 @@ private: | |||
| 116 | void SplitVSync(std::stop_token stop_token); | 116 | void SplitVSync(std::stop_token stop_token); |
| 117 | 117 | ||
| 118 | std::shared_ptr<Nvidia::Module> nvdrv; | 118 | std::shared_ptr<Nvidia::Module> nvdrv; |
| 119 | s32 disp_fd; | ||
| 119 | 120 | ||
| 120 | std::list<VI::Display> displays; | 121 | std::list<VI::Display> displays; |
| 121 | 122 | ||
diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp index aa49aa775..288aafaaf 100644 --- a/src/core/hle/service/vi/display/vi_display.cpp +++ b/src/core/hle/service/vi/display/vi_display.cpp | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include "core/hle/kernel/k_readable_event.h" | 12 | #include "core/hle/kernel/k_readable_event.h" |
| 13 | #include "core/hle/kernel/k_writable_event.h" | 13 | #include "core/hle/kernel/k_writable_event.h" |
| 14 | #include "core/hle/service/kernel_helpers.h" | 14 | #include "core/hle/service/kernel_helpers.h" |
| 15 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 15 | #include "core/hle/service/nvflinger/buffer_item_consumer.h" | 16 | #include "core/hle/service/nvflinger/buffer_item_consumer.h" |
| 16 | #include "core/hle/service/nvflinger/buffer_queue_consumer.h" | 17 | #include "core/hle/service/nvflinger/buffer_queue_consumer.h" |
| 17 | #include "core/hle/service/nvflinger/buffer_queue_core.h" | 18 | #include "core/hle/service/nvflinger/buffer_queue_core.h" |
| @@ -29,11 +30,13 @@ struct BufferQueue { | |||
| 29 | std::unique_ptr<android::BufferQueueConsumer> consumer; | 30 | std::unique_ptr<android::BufferQueueConsumer> consumer; |
| 30 | }; | 31 | }; |
| 31 | 32 | ||
| 32 | static BufferQueue CreateBufferQueue(KernelHelpers::ServiceContext& service_context) { | 33 | static BufferQueue CreateBufferQueue(KernelHelpers::ServiceContext& service_context, |
| 34 | Service::Nvidia::NvCore::NvMap& nvmap) { | ||
| 33 | auto buffer_queue_core = std::make_shared<android::BufferQueueCore>(); | 35 | auto buffer_queue_core = std::make_shared<android::BufferQueueCore>(); |
| 34 | return {buffer_queue_core, | 36 | return { |
| 35 | std::make_unique<android::BufferQueueProducer>(service_context, buffer_queue_core), | 37 | buffer_queue_core, |
| 36 | std::make_unique<android::BufferQueueConsumer>(buffer_queue_core)}; | 38 | std::make_unique<android::BufferQueueProducer>(service_context, buffer_queue_core, nvmap), |
| 39 | std::make_unique<android::BufferQueueConsumer>(buffer_queue_core, nvmap)}; | ||
| 37 | } | 40 | } |
| 38 | 41 | ||
| 39 | Display::Display(u64 id, std::string name_, | 42 | Display::Display(u64 id, std::string name_, |
| @@ -74,10 +77,11 @@ void Display::SignalVSyncEvent() { | |||
| 74 | vsync_event->GetWritableEvent().Signal(); | 77 | vsync_event->GetWritableEvent().Signal(); |
| 75 | } | 78 | } |
| 76 | 79 | ||
| 77 | void Display::CreateLayer(u64 layer_id, u32 binder_id) { | 80 | void Display::CreateLayer(u64 layer_id, u32 binder_id, |
| 81 | Service::Nvidia::NvCore::Container& nv_core) { | ||
| 78 | ASSERT_MSG(layers.empty(), "Only one layer is supported per display at the moment"); | 82 | ASSERT_MSG(layers.empty(), "Only one layer is supported per display at the moment"); |
| 79 | 83 | ||
| 80 | auto [core, producer, consumer] = CreateBufferQueue(service_context); | 84 | auto [core, producer, consumer] = CreateBufferQueue(service_context, nv_core.GetNvMapFile()); |
| 81 | 85 | ||
| 82 | auto buffer_item_consumer = std::make_shared<android::BufferItemConsumer>(std::move(consumer)); | 86 | auto buffer_item_consumer = std::make_shared<android::BufferItemConsumer>(std::move(consumer)); |
| 83 | buffer_item_consumer->Connect(false); | 87 | buffer_item_consumer->Connect(false); |
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h index 8dbb0ef80..33d5f398c 100644 --- a/src/core/hle/service/vi/display/vi_display.h +++ b/src/core/hle/service/vi/display/vi_display.h | |||
| @@ -27,6 +27,11 @@ namespace Service::NVFlinger { | |||
| 27 | class HosBinderDriverServer; | 27 | class HosBinderDriverServer; |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | namespace Service::Nvidia::NvCore { | ||
| 31 | class Container; | ||
| 32 | class NvMap; | ||
| 33 | } // namespace Service::Nvidia::NvCore | ||
| 34 | |||
| 30 | namespace Service::VI { | 35 | namespace Service::VI { |
| 31 | 36 | ||
| 32 | class Layer; | 37 | class Layer; |
| @@ -93,7 +98,7 @@ public: | |||
| 93 | /// @param layer_id The ID to assign to the created layer. | 98 | /// @param layer_id The ID to assign to the created layer. |
| 94 | /// @param binder_id The ID assigned to the buffer queue. | 99 | /// @param binder_id The ID assigned to the buffer queue. |
| 95 | /// | 100 | /// |
| 96 | void CreateLayer(u64 layer_id, u32 binder_id); | 101 | void CreateLayer(u64 layer_id, u32 binder_id, Service::Nvidia::NvCore::Container& core); |
| 97 | 102 | ||
| 98 | /// Closes and removes a layer from this display with the given ID. | 103 | /// Closes and removes a layer from this display with the given ID. |
| 99 | /// | 104 | /// |
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index f083811ec..9c917cacf 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp | |||
| @@ -58,6 +58,7 @@ static_assert(sizeof(DisplayInfo) == 0x60, "DisplayInfo has wrong size"); | |||
| 58 | class NativeWindow final { | 58 | class NativeWindow final { |
| 59 | public: | 59 | public: |
| 60 | constexpr explicit NativeWindow(u32 id_) : id{id_} {} | 60 | constexpr explicit NativeWindow(u32 id_) : id{id_} {} |
| 61 | constexpr explicit NativeWindow(const NativeWindow& other) = default; | ||
| 61 | 62 | ||
| 62 | private: | 63 | private: |
| 63 | const u32 magic = 2; | 64 | const u32 magic = 2; |
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 34ad7cadd..2ac792566 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -551,6 +551,11 @@ struct Memory::Impl { | |||
| 551 | []() {}); | 551 | []() {}); |
| 552 | } | 552 | } |
| 553 | 553 | ||
| 554 | [[nodiscard]] u8* GetPointerSilent(const VAddr vaddr) const { | ||
| 555 | return GetPointerImpl( | ||
| 556 | vaddr, []() {}, []() {}); | ||
| 557 | } | ||
| 558 | |||
| 554 | /** | 559 | /** |
| 555 | * Reads a particular data type out of memory at the given virtual address. | 560 | * Reads a particular data type out of memory at the given virtual address. |
| 556 | * | 561 | * |
| @@ -686,6 +691,10 @@ u8* Memory::GetPointer(VAddr vaddr) { | |||
| 686 | return impl->GetPointer(vaddr); | 691 | return impl->GetPointer(vaddr); |
| 687 | } | 692 | } |
| 688 | 693 | ||
| 694 | u8* Memory::GetPointerSilent(VAddr vaddr) { | ||
| 695 | return impl->GetPointerSilent(vaddr); | ||
| 696 | } | ||
| 697 | |||
| 689 | const u8* Memory::GetPointer(VAddr vaddr) const { | 698 | const u8* Memory::GetPointer(VAddr vaddr) const { |
| 690 | return impl->GetPointer(vaddr); | 699 | return impl->GetPointer(vaddr); |
| 691 | } | 700 | } |
diff --git a/src/core/memory.h b/src/core/memory.h index a11ff8766..81eac448b 100644 --- a/src/core/memory.h +++ b/src/core/memory.h | |||
| @@ -114,6 +114,7 @@ public: | |||
| 114 | * If the address is not valid, nullptr will be returned. | 114 | * If the address is not valid, nullptr will be returned. |
| 115 | */ | 115 | */ |
| 116 | u8* GetPointer(VAddr vaddr); | 116 | u8* GetPointer(VAddr vaddr); |
| 117 | u8* GetPointerSilent(VAddr vaddr); | ||
| 117 | 118 | ||
| 118 | template <typename T> | 119 | template <typename T> |
| 119 | T* GetPointer(VAddr vaddr) { | 120 | T* GetPointer(VAddr vaddr) { |
diff --git a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp index 578bc8c1b..ce42475d4 100644 --- a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp +++ b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp | |||
| @@ -964,9 +964,9 @@ private: | |||
| 964 | demote_endif_node.type = Type::EndIf; | 964 | demote_endif_node.type = Type::EndIf; |
| 965 | demote_endif_node.data.end_if.merge = return_block_it->data.block; | 965 | demote_endif_node.data.end_if.merge = return_block_it->data.block; |
| 966 | 966 | ||
| 967 | asl.insert(return_block_it, demote_endif_node); | 967 | const auto next_it_1 = asl.insert(return_block_it, demote_endif_node); |
| 968 | asl.insert(return_block_it, demote_node); | 968 | const auto next_it_2 = asl.insert(next_it_1, demote_node); |
| 969 | asl.insert(return_block_it, demote_if_node); | 969 | asl.insert(next_it_2, demote_if_node); |
| 970 | } | 970 | } |
| 971 | 971 | ||
| 972 | ObjectPool<Statement>& stmt_pool; | 972 | ObjectPool<Statement>& stmt_pool; |
diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp index 597112ba4..e8be58357 100644 --- a/src/shader_recompiler/ir_opt/texture_pass.cpp +++ b/src/shader_recompiler/ir_opt/texture_pass.cpp | |||
| @@ -19,8 +19,10 @@ namespace { | |||
| 19 | struct ConstBufferAddr { | 19 | struct ConstBufferAddr { |
| 20 | u32 index; | 20 | u32 index; |
| 21 | u32 offset; | 21 | u32 offset; |
| 22 | u32 shift_left; | ||
| 22 | u32 secondary_index; | 23 | u32 secondary_index; |
| 23 | u32 secondary_offset; | 24 | u32 secondary_offset; |
| 25 | u32 secondary_shift_left; | ||
| 24 | IR::U32 dynamic_offset; | 26 | IR::U32 dynamic_offset; |
| 25 | u32 count; | 27 | u32 count; |
| 26 | bool has_secondary; | 28 | bool has_secondary; |
| @@ -172,19 +174,41 @@ bool IsTextureInstruction(const IR::Inst& inst) { | |||
| 172 | return IndexedInstruction(inst) != IR::Opcode::Void; | 174 | return IndexedInstruction(inst) != IR::Opcode::Void; |
| 173 | } | 175 | } |
| 174 | 176 | ||
| 175 | std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst); | 177 | std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environment& env); |
| 176 | 178 | ||
| 177 | std::optional<ConstBufferAddr> Track(const IR::Value& value) { | 179 | std::optional<ConstBufferAddr> Track(const IR::Value& value, Environment& env) { |
| 178 | return IR::BreadthFirstSearch(value, TryGetConstBuffer); | 180 | return IR::BreadthFirstSearch( |
| 181 | value, [&env](const IR::Inst* inst) { return TryGetConstBuffer(inst, env); }); | ||
| 179 | } | 182 | } |
| 180 | 183 | ||
| 181 | std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) { | 184 | std::optional<u32> TryGetConstant(IR::Value& value, Environment& env) { |
| 185 | const IR::Inst* inst = value.InstRecursive(); | ||
| 186 | if (inst->GetOpcode() != IR::Opcode::GetCbufU32) { | ||
| 187 | return std::nullopt; | ||
| 188 | } | ||
| 189 | const IR::Value index{inst->Arg(0)}; | ||
| 190 | const IR::Value offset{inst->Arg(1)}; | ||
| 191 | if (!index.IsImmediate()) { | ||
| 192 | return std::nullopt; | ||
| 193 | } | ||
| 194 | if (!offset.IsImmediate()) { | ||
| 195 | return std::nullopt; | ||
| 196 | } | ||
| 197 | const auto index_number = index.U32(); | ||
| 198 | if (index_number != 1) { | ||
| 199 | return std::nullopt; | ||
| 200 | } | ||
| 201 | const auto offset_number = offset.U32(); | ||
| 202 | return env.ReadCbufValue(index_number, offset_number); | ||
| 203 | } | ||
| 204 | |||
| 205 | std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environment& env) { | ||
| 182 | switch (inst->GetOpcode()) { | 206 | switch (inst->GetOpcode()) { |
| 183 | default: | 207 | default: |
| 184 | return std::nullopt; | 208 | return std::nullopt; |
| 185 | case IR::Opcode::BitwiseOr32: { | 209 | case IR::Opcode::BitwiseOr32: { |
| 186 | std::optional lhs{Track(inst->Arg(0))}; | 210 | std::optional lhs{Track(inst->Arg(0), env)}; |
| 187 | std::optional rhs{Track(inst->Arg(1))}; | 211 | std::optional rhs{Track(inst->Arg(1), env)}; |
| 188 | if (!lhs || !rhs) { | 212 | if (!lhs || !rhs) { |
| 189 | return std::nullopt; | 213 | return std::nullopt; |
| 190 | } | 214 | } |
| @@ -194,19 +218,62 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) { | |||
| 194 | if (lhs->count > 1 || rhs->count > 1) { | 218 | if (lhs->count > 1 || rhs->count > 1) { |
| 195 | return std::nullopt; | 219 | return std::nullopt; |
| 196 | } | 220 | } |
| 197 | if (lhs->index > rhs->index || lhs->offset > rhs->offset) { | 221 | if (lhs->shift_left > 0 || lhs->index > rhs->index || lhs->offset > rhs->offset) { |
| 198 | std::swap(lhs, rhs); | 222 | std::swap(lhs, rhs); |
| 199 | } | 223 | } |
| 200 | return ConstBufferAddr{ | 224 | return ConstBufferAddr{ |
| 201 | .index = lhs->index, | 225 | .index = lhs->index, |
| 202 | .offset = lhs->offset, | 226 | .offset = lhs->offset, |
| 227 | .shift_left = lhs->shift_left, | ||
| 203 | .secondary_index = rhs->index, | 228 | .secondary_index = rhs->index, |
| 204 | .secondary_offset = rhs->offset, | 229 | .secondary_offset = rhs->offset, |
| 230 | .secondary_shift_left = rhs->shift_left, | ||
| 205 | .dynamic_offset = {}, | 231 | .dynamic_offset = {}, |
| 206 | .count = 1, | 232 | .count = 1, |
| 207 | .has_secondary = true, | 233 | .has_secondary = true, |
| 208 | }; | 234 | }; |
| 209 | } | 235 | } |
| 236 | case IR::Opcode::ShiftLeftLogical32: { | ||
| 237 | const IR::Value shift{inst->Arg(1)}; | ||
| 238 | if (!shift.IsImmediate()) { | ||
| 239 | return std::nullopt; | ||
| 240 | } | ||
| 241 | std::optional lhs{Track(inst->Arg(0), env)}; | ||
| 242 | if (lhs) { | ||
| 243 | lhs->shift_left = shift.U32(); | ||
| 244 | } | ||
| 245 | return lhs; | ||
| 246 | break; | ||
| 247 | } | ||
| 248 | case IR::Opcode::BitwiseAnd32: { | ||
| 249 | IR::Value op1{inst->Arg(0)}; | ||
| 250 | IR::Value op2{inst->Arg(1)}; | ||
| 251 | if (op1.IsImmediate()) { | ||
| 252 | std::swap(op1, op2); | ||
| 253 | } | ||
| 254 | if (!op2.IsImmediate() && !op1.IsImmediate()) { | ||
| 255 | do { | ||
| 256 | auto try_index = TryGetConstant(op1, env); | ||
| 257 | if (try_index) { | ||
| 258 | op1 = op2; | ||
| 259 | op2 = IR::Value{*try_index}; | ||
| 260 | break; | ||
| 261 | } | ||
| 262 | auto try_index_2 = TryGetConstant(op2, env); | ||
| 263 | if (try_index_2) { | ||
| 264 | op2 = IR::Value{*try_index_2}; | ||
| 265 | break; | ||
| 266 | } | ||
| 267 | return std::nullopt; | ||
| 268 | } while (false); | ||
| 269 | } | ||
| 270 | std::optional lhs{Track(op1, env)}; | ||
| 271 | if (lhs) { | ||
| 272 | lhs->shift_left = static_cast<u32>(std::countr_zero(op2.U32())); | ||
| 273 | } | ||
| 274 | return lhs; | ||
| 275 | break; | ||
| 276 | } | ||
| 210 | case IR::Opcode::GetCbufU32x2: | 277 | case IR::Opcode::GetCbufU32x2: |
| 211 | case IR::Opcode::GetCbufU32: | 278 | case IR::Opcode::GetCbufU32: |
| 212 | break; | 279 | break; |
| @@ -222,8 +289,10 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) { | |||
| 222 | return ConstBufferAddr{ | 289 | return ConstBufferAddr{ |
| 223 | .index = index.U32(), | 290 | .index = index.U32(), |
| 224 | .offset = offset.U32(), | 291 | .offset = offset.U32(), |
| 292 | .shift_left = 0, | ||
| 225 | .secondary_index = 0, | 293 | .secondary_index = 0, |
| 226 | .secondary_offset = 0, | 294 | .secondary_offset = 0, |
| 295 | .secondary_shift_left = 0, | ||
| 227 | .dynamic_offset = {}, | 296 | .dynamic_offset = {}, |
| 228 | .count = 1, | 297 | .count = 1, |
| 229 | .has_secondary = false, | 298 | .has_secondary = false, |
| @@ -247,8 +316,10 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) { | |||
| 247 | return ConstBufferAddr{ | 316 | return ConstBufferAddr{ |
| 248 | .index = index.U32(), | 317 | .index = index.U32(), |
| 249 | .offset = base_offset, | 318 | .offset = base_offset, |
| 319 | .shift_left = 0, | ||
| 250 | .secondary_index = 0, | 320 | .secondary_index = 0, |
| 251 | .secondary_offset = 0, | 321 | .secondary_offset = 0, |
| 322 | .secondary_shift_left = 0, | ||
| 252 | .dynamic_offset = dynamic_offset, | 323 | .dynamic_offset = dynamic_offset, |
| 253 | .count = 8, | 324 | .count = 8, |
| 254 | .has_secondary = false, | 325 | .has_secondary = false, |
| @@ -258,7 +329,7 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) { | |||
| 258 | TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) { | 329 | TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) { |
| 259 | ConstBufferAddr addr; | 330 | ConstBufferAddr addr; |
| 260 | if (IsBindless(inst)) { | 331 | if (IsBindless(inst)) { |
| 261 | const std::optional<ConstBufferAddr> track_addr{Track(inst.Arg(0))}; | 332 | const std::optional<ConstBufferAddr> track_addr{Track(inst.Arg(0), env)}; |
| 262 | if (!track_addr) { | 333 | if (!track_addr) { |
| 263 | throw NotImplementedException("Failed to track bindless texture constant buffer"); | 334 | throw NotImplementedException("Failed to track bindless texture constant buffer"); |
| 264 | } | 335 | } |
| @@ -267,8 +338,10 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) { | |||
| 267 | addr = ConstBufferAddr{ | 338 | addr = ConstBufferAddr{ |
| 268 | .index = env.TextureBoundBuffer(), | 339 | .index = env.TextureBoundBuffer(), |
| 269 | .offset = inst.Arg(0).U32(), | 340 | .offset = inst.Arg(0).U32(), |
| 341 | .shift_left = 0, | ||
| 270 | .secondary_index = 0, | 342 | .secondary_index = 0, |
| 271 | .secondary_offset = 0, | 343 | .secondary_offset = 0, |
| 344 | .secondary_shift_left = 0, | ||
| 272 | .dynamic_offset = {}, | 345 | .dynamic_offset = {}, |
| 273 | .count = 1, | 346 | .count = 1, |
| 274 | .has_secondary = false, | 347 | .has_secondary = false, |
| @@ -284,8 +357,9 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) { | |||
| 284 | TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) { | 357 | TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) { |
| 285 | const u32 secondary_index{cbuf.has_secondary ? cbuf.secondary_index : cbuf.index}; | 358 | const u32 secondary_index{cbuf.has_secondary ? cbuf.secondary_index : cbuf.index}; |
| 286 | const u32 secondary_offset{cbuf.has_secondary ? cbuf.secondary_offset : cbuf.offset}; | 359 | const u32 secondary_offset{cbuf.has_secondary ? cbuf.secondary_offset : cbuf.offset}; |
| 287 | const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset)}; | 360 | const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset) << cbuf.shift_left}; |
| 288 | const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset)}; | 361 | const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset) |
| 362 | << cbuf.secondary_shift_left}; | ||
| 289 | return env.ReadTextureType(lhs_raw | rhs_raw); | 363 | return env.ReadTextureType(lhs_raw | rhs_raw); |
| 290 | } | 364 | } |
| 291 | 365 | ||
| @@ -487,8 +561,10 @@ void TexturePass(Environment& env, IR::Program& program) { | |||
| 487 | .has_secondary = cbuf.has_secondary, | 561 | .has_secondary = cbuf.has_secondary, |
| 488 | .cbuf_index = cbuf.index, | 562 | .cbuf_index = cbuf.index, |
| 489 | .cbuf_offset = cbuf.offset, | 563 | .cbuf_offset = cbuf.offset, |
| 564 | .shift_left = cbuf.shift_left, | ||
| 490 | .secondary_cbuf_index = cbuf.secondary_index, | 565 | .secondary_cbuf_index = cbuf.secondary_index, |
| 491 | .secondary_cbuf_offset = cbuf.secondary_offset, | 566 | .secondary_cbuf_offset = cbuf.secondary_offset, |
| 567 | .secondary_shift_left = cbuf.secondary_shift_left, | ||
| 492 | .count = cbuf.count, | 568 | .count = cbuf.count, |
| 493 | .size_shift = DESCRIPTOR_SIZE_SHIFT, | 569 | .size_shift = DESCRIPTOR_SIZE_SHIFT, |
| 494 | }); | 570 | }); |
| @@ -499,8 +575,10 @@ void TexturePass(Environment& env, IR::Program& program) { | |||
| 499 | .has_secondary = cbuf.has_secondary, | 575 | .has_secondary = cbuf.has_secondary, |
| 500 | .cbuf_index = cbuf.index, | 576 | .cbuf_index = cbuf.index, |
| 501 | .cbuf_offset = cbuf.offset, | 577 | .cbuf_offset = cbuf.offset, |
| 578 | .shift_left = cbuf.shift_left, | ||
| 502 | .secondary_cbuf_index = cbuf.secondary_index, | 579 | .secondary_cbuf_index = cbuf.secondary_index, |
| 503 | .secondary_cbuf_offset = cbuf.secondary_offset, | 580 | .secondary_cbuf_offset = cbuf.secondary_offset, |
| 581 | .secondary_shift_left = cbuf.secondary_shift_left, | ||
| 504 | .count = cbuf.count, | 582 | .count = cbuf.count, |
| 505 | .size_shift = DESCRIPTOR_SIZE_SHIFT, | 583 | .size_shift = DESCRIPTOR_SIZE_SHIFT, |
| 506 | }); | 584 | }); |
diff --git a/src/shader_recompiler/shader_info.h b/src/shader_recompiler/shader_info.h index f5690805c..cc596da4f 100644 --- a/src/shader_recompiler/shader_info.h +++ b/src/shader_recompiler/shader_info.h | |||
| @@ -61,8 +61,10 @@ struct TextureBufferDescriptor { | |||
| 61 | bool has_secondary; | 61 | bool has_secondary; |
| 62 | u32 cbuf_index; | 62 | u32 cbuf_index; |
| 63 | u32 cbuf_offset; | 63 | u32 cbuf_offset; |
| 64 | u32 shift_left; | ||
| 64 | u32 secondary_cbuf_index; | 65 | u32 secondary_cbuf_index; |
| 65 | u32 secondary_cbuf_offset; | 66 | u32 secondary_cbuf_offset; |
| 67 | u32 secondary_shift_left; | ||
| 66 | u32 count; | 68 | u32 count; |
| 67 | u32 size_shift; | 69 | u32 size_shift; |
| 68 | }; | 70 | }; |
| @@ -85,8 +87,10 @@ struct TextureDescriptor { | |||
| 85 | bool has_secondary; | 87 | bool has_secondary; |
| 86 | u32 cbuf_index; | 88 | u32 cbuf_index; |
| 87 | u32 cbuf_offset; | 89 | u32 cbuf_offset; |
| 90 | u32 shift_left; | ||
| 88 | u32 secondary_cbuf_index; | 91 | u32 secondary_cbuf_index; |
| 89 | u32 secondary_cbuf_offset; | 92 | u32 secondary_cbuf_offset; |
| 93 | u32 secondary_shift_left; | ||
| 90 | u32 count; | 94 | u32 count; |
| 91 | u32 size_shift; | 95 | u32 size_shift; |
| 92 | }; | 96 | }; |
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 5b3808351..40e6d1ec4 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | add_subdirectory(host_shaders) | 4 | add_subdirectory(host_shaders) |
| 5 | 5 | ||
| 6 | if(LIBVA_FOUND) | 6 | if(LIBVA_FOUND) |
| 7 | set_source_files_properties(command_classes/codecs/codec.cpp | 7 | set_source_files_properties(host1x/codecs/codec.cpp |
| 8 | PROPERTIES COMPILE_DEFINITIONS LIBVA_FOUND=1) | 8 | PROPERTIES COMPILE_DEFINITIONS LIBVA_FOUND=1) |
| 9 | list(APPEND FFmpeg_LIBRARIES ${LIBVA_LIBRARIES}) | 9 | list(APPEND FFmpeg_LIBRARIES ${LIBVA_LIBRARIES}) |
| 10 | endif() | 10 | endif() |
| @@ -15,26 +15,14 @@ add_library(video_core STATIC | |||
| 15 | buffer_cache/buffer_cache.h | 15 | buffer_cache/buffer_cache.h |
| 16 | cdma_pusher.cpp | 16 | cdma_pusher.cpp |
| 17 | cdma_pusher.h | 17 | cdma_pusher.h |
| 18 | command_classes/codecs/codec.cpp | ||
| 19 | command_classes/codecs/codec.h | ||
| 20 | command_classes/codecs/h264.cpp | ||
| 21 | command_classes/codecs/h264.h | ||
| 22 | command_classes/codecs/vp8.cpp | ||
| 23 | command_classes/codecs/vp8.h | ||
| 24 | command_classes/codecs/vp9.cpp | ||
| 25 | command_classes/codecs/vp9.h | ||
| 26 | command_classes/codecs/vp9_types.h | ||
| 27 | command_classes/host1x.cpp | ||
| 28 | command_classes/host1x.h | ||
| 29 | command_classes/nvdec.cpp | ||
| 30 | command_classes/nvdec.h | ||
| 31 | command_classes/nvdec_common.h | ||
| 32 | command_classes/sync_manager.cpp | ||
| 33 | command_classes/sync_manager.h | ||
| 34 | command_classes/vic.cpp | ||
| 35 | command_classes/vic.h | ||
| 36 | compatible_formats.cpp | 18 | compatible_formats.cpp |
| 37 | compatible_formats.h | 19 | compatible_formats.h |
| 20 | control/channel_state.cpp | ||
| 21 | control/channel_state.h | ||
| 22 | control/channel_state_cache.cpp | ||
| 23 | control/channel_state_cache.h | ||
| 24 | control/scheduler.cpp | ||
| 25 | control/scheduler.h | ||
| 38 | delayed_destruction_ring.h | 26 | delayed_destruction_ring.h |
| 39 | dirty_flags.cpp | 27 | dirty_flags.cpp |
| 40 | dirty_flags.h | 28 | dirty_flags.h |
| @@ -54,7 +42,31 @@ add_library(video_core STATIC | |||
| 54 | engines/maxwell_3d.h | 42 | engines/maxwell_3d.h |
| 55 | engines/maxwell_dma.cpp | 43 | engines/maxwell_dma.cpp |
| 56 | engines/maxwell_dma.h | 44 | engines/maxwell_dma.h |
| 45 | engines/puller.cpp | ||
| 46 | engines/puller.h | ||
| 57 | framebuffer_config.h | 47 | framebuffer_config.h |
| 48 | host1x/codecs/codec.cpp | ||
| 49 | host1x/codecs/codec.h | ||
| 50 | host1x/codecs/h264.cpp | ||
| 51 | host1x/codecs/h264.h | ||
| 52 | host1x/codecs/vp8.cpp | ||
| 53 | host1x/codecs/vp8.h | ||
| 54 | host1x/codecs/vp9.cpp | ||
| 55 | host1x/codecs/vp9.h | ||
| 56 | host1x/codecs/vp9_types.h | ||
| 57 | host1x/control.cpp | ||
| 58 | host1x/control.h | ||
| 59 | host1x/host1x.cpp | ||
| 60 | host1x/host1x.h | ||
| 61 | host1x/nvdec.cpp | ||
| 62 | host1x/nvdec.h | ||
| 63 | host1x/nvdec_common.h | ||
| 64 | host1x/sync_manager.cpp | ||
| 65 | host1x/sync_manager.h | ||
| 66 | host1x/syncpoint_manager.cpp | ||
| 67 | host1x/syncpoint_manager.h | ||
| 68 | host1x/vic.cpp | ||
| 69 | host1x/vic.h | ||
| 58 | macro/macro.cpp | 70 | macro/macro.cpp |
| 59 | macro/macro.h | 71 | macro/macro.h |
| 60 | macro/macro_hle.cpp | 72 | macro/macro_hle.cpp |
| @@ -195,6 +207,7 @@ add_library(video_core STATIC | |||
| 195 | texture_cache/render_targets.h | 207 | texture_cache/render_targets.h |
| 196 | texture_cache/samples_helper.h | 208 | texture_cache/samples_helper.h |
| 197 | texture_cache/slot_vector.h | 209 | texture_cache/slot_vector.h |
| 210 | texture_cache/texture_cache.cpp | ||
| 198 | texture_cache/texture_cache.h | 211 | texture_cache/texture_cache.h |
| 199 | texture_cache/texture_cache_base.h | 212 | texture_cache/texture_cache_base.h |
| 200 | texture_cache/types.h | 213 | texture_cache/types.h |
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index f015dae56..8e26b3f95 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h | |||
| @@ -5,7 +5,6 @@ | |||
| 5 | 5 | ||
| 6 | #include <algorithm> | 6 | #include <algorithm> |
| 7 | #include <array> | 7 | #include <array> |
| 8 | #include <deque> | ||
| 9 | #include <memory> | 8 | #include <memory> |
| 10 | #include <mutex> | 9 | #include <mutex> |
| 11 | #include <numeric> | 10 | #include <numeric> |
| @@ -23,6 +22,7 @@ | |||
| 23 | #include "common/settings.h" | 22 | #include "common/settings.h" |
| 24 | #include "core/memory.h" | 23 | #include "core/memory.h" |
| 25 | #include "video_core/buffer_cache/buffer_base.h" | 24 | #include "video_core/buffer_cache/buffer_base.h" |
| 25 | #include "video_core/control/channel_state_cache.h" | ||
| 26 | #include "video_core/delayed_destruction_ring.h" | 26 | #include "video_core/delayed_destruction_ring.h" |
| 27 | #include "video_core/dirty_flags.h" | 27 | #include "video_core/dirty_flags.h" |
| 28 | #include "video_core/engines/kepler_compute.h" | 28 | #include "video_core/engines/kepler_compute.h" |
| @@ -56,7 +56,7 @@ using UniformBufferSizes = std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFE | |||
| 56 | using ComputeUniformBufferSizes = std::array<u32, NUM_COMPUTE_UNIFORM_BUFFERS>; | 56 | using ComputeUniformBufferSizes = std::array<u32, NUM_COMPUTE_UNIFORM_BUFFERS>; |
| 57 | 57 | ||
| 58 | template <typename P> | 58 | template <typename P> |
| 59 | class BufferCache { | 59 | class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { |
| 60 | 60 | ||
| 61 | // Page size for caching purposes. | 61 | // Page size for caching purposes. |
| 62 | // This is unrelated to the CPU page size and it can be changed as it seems optimal. | 62 | // This is unrelated to the CPU page size and it can be changed as it seems optimal. |
| @@ -116,10 +116,7 @@ public: | |||
| 116 | static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB); | 116 | static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB); |
| 117 | 117 | ||
| 118 | explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_, | 118 | explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_, |
| 119 | Tegra::Engines::Maxwell3D& maxwell3d_, | 119 | Core::Memory::Memory& cpu_memory_, Runtime& runtime_); |
| 120 | Tegra::Engines::KeplerCompute& kepler_compute_, | ||
| 121 | Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, | ||
| 122 | Runtime& runtime_); | ||
| 123 | 120 | ||
| 124 | void TickFrame(); | 121 | void TickFrame(); |
| 125 | 122 | ||
| @@ -129,7 +126,7 @@ public: | |||
| 129 | 126 | ||
| 130 | void DownloadMemory(VAddr cpu_addr, u64 size); | 127 | void DownloadMemory(VAddr cpu_addr, u64 size); |
| 131 | 128 | ||
| 132 | bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<u8> inlined_buffer); | 129 | bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer); |
| 133 | 130 | ||
| 134 | void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size); | 131 | void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size); |
| 135 | 132 | ||
| @@ -353,7 +350,7 @@ private: | |||
| 353 | 350 | ||
| 354 | void NotifyBufferDeletion(); | 351 | void NotifyBufferDeletion(); |
| 355 | 352 | ||
| 356 | [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr) const; | 353 | [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr, bool is_written = false) const; |
| 357 | 354 | ||
| 358 | [[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size, | 355 | [[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size, |
| 359 | PixelFormat format); | 356 | PixelFormat format); |
| @@ -367,9 +364,6 @@ private: | |||
| 367 | void ClearDownload(IntervalType subtract_interval); | 364 | void ClearDownload(IntervalType subtract_interval); |
| 368 | 365 | ||
| 369 | VideoCore::RasterizerInterface& rasterizer; | 366 | VideoCore::RasterizerInterface& rasterizer; |
| 370 | Tegra::Engines::Maxwell3D& maxwell3d; | ||
| 371 | Tegra::Engines::KeplerCompute& kepler_compute; | ||
| 372 | Tegra::MemoryManager& gpu_memory; | ||
| 373 | Core::Memory::Memory& cpu_memory; | 367 | Core::Memory::Memory& cpu_memory; |
| 374 | 368 | ||
| 375 | SlotVector<Buffer> slot_buffers; | 369 | SlotVector<Buffer> slot_buffers; |
| @@ -444,12 +438,8 @@ private: | |||
| 444 | 438 | ||
| 445 | template <class P> | 439 | template <class P> |
| 446 | BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_, | 440 | BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_, |
| 447 | Tegra::Engines::Maxwell3D& maxwell3d_, | 441 | Core::Memory::Memory& cpu_memory_, Runtime& runtime_) |
| 448 | Tegra::Engines::KeplerCompute& kepler_compute_, | 442 | : runtime{runtime_}, rasterizer{rasterizer_}, cpu_memory{cpu_memory_} { |
| 449 | Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, | ||
| 450 | Runtime& runtime_) | ||
| 451 | : runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_}, | ||
| 452 | kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, cpu_memory{cpu_memory_} { | ||
| 453 | // Ensure the first slot is used for the null buffer | 443 | // Ensure the first slot is used for the null buffer |
| 454 | void(slot_buffers.insert(runtime, NullBufferParams{})); | 444 | void(slot_buffers.insert(runtime, NullBufferParams{})); |
| 455 | common_ranges.clear(); | 445 | common_ranges.clear(); |
| @@ -552,8 +542,8 @@ void BufferCache<P>::ClearDownload(IntervalType subtract_interval) { | |||
| 552 | 542 | ||
| 553 | template <class P> | 543 | template <class P> |
| 554 | bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { | 544 | bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { |
| 555 | const std::optional<VAddr> cpu_src_address = gpu_memory.GpuToCpuAddress(src_address); | 545 | const std::optional<VAddr> cpu_src_address = gpu_memory->GpuToCpuAddress(src_address); |
| 556 | const std::optional<VAddr> cpu_dest_address = gpu_memory.GpuToCpuAddress(dest_address); | 546 | const std::optional<VAddr> cpu_dest_address = gpu_memory->GpuToCpuAddress(dest_address); |
| 557 | if (!cpu_src_address || !cpu_dest_address) { | 547 | if (!cpu_src_address || !cpu_dest_address) { |
| 558 | return false; | 548 | return false; |
| 559 | } | 549 | } |
| @@ -611,7 +601,7 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am | |||
| 611 | 601 | ||
| 612 | template <class P> | 602 | template <class P> |
| 613 | bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) { | 603 | bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) { |
| 614 | const std::optional<VAddr> cpu_dst_address = gpu_memory.GpuToCpuAddress(dst_address); | 604 | const std::optional<VAddr> cpu_dst_address = gpu_memory->GpuToCpuAddress(dst_address); |
| 615 | if (!cpu_dst_address) { | 605 | if (!cpu_dst_address) { |
| 616 | return false; | 606 | return false; |
| 617 | } | 607 | } |
| @@ -635,7 +625,7 @@ bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) { | |||
| 635 | template <class P> | 625 | template <class P> |
| 636 | void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, | 626 | void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, |
| 637 | u32 size) { | 627 | u32 size) { |
| 638 | const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); | 628 | const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); |
| 639 | const Binding binding{ | 629 | const Binding binding{ |
| 640 | .cpu_addr = *cpu_addr, | 630 | .cpu_addr = *cpu_addr, |
| 641 | .size = size, | 631 | .size = size, |
| @@ -673,7 +663,7 @@ void BufferCache<P>::BindHostGeometryBuffers(bool is_indexed) { | |||
| 673 | if (is_indexed) { | 663 | if (is_indexed) { |
| 674 | BindHostIndexBuffer(); | 664 | BindHostIndexBuffer(); |
| 675 | } else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { | 665 | } else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { |
| 676 | const auto& regs = maxwell3d.regs; | 666 | const auto& regs = maxwell3d->regs; |
| 677 | if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) { | 667 | if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) { |
| 678 | runtime.BindQuadArrayIndexBuffer(regs.vertex_buffer.first, regs.vertex_buffer.count); | 668 | runtime.BindQuadArrayIndexBuffer(regs.vertex_buffer.first, regs.vertex_buffer.count); |
| 679 | } | 669 | } |
| @@ -733,9 +723,9 @@ void BufferCache<P>::BindGraphicsStorageBuffer(size_t stage, size_t ssbo_index, | |||
| 733 | enabled_storage_buffers[stage] |= 1U << ssbo_index; | 723 | enabled_storage_buffers[stage] |= 1U << ssbo_index; |
| 734 | written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index; | 724 | written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index; |
| 735 | 725 | ||
| 736 | const auto& cbufs = maxwell3d.state.shader_stages[stage]; | 726 | const auto& cbufs = maxwell3d->state.shader_stages[stage]; |
| 737 | const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset; | 727 | const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset; |
| 738 | storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr); | 728 | storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr, is_written); |
| 739 | } | 729 | } |
| 740 | 730 | ||
| 741 | template <class P> | 731 | template <class P> |
| @@ -770,12 +760,12 @@ void BufferCache<P>::BindComputeStorageBuffer(size_t ssbo_index, u32 cbuf_index, | |||
| 770 | enabled_compute_storage_buffers |= 1U << ssbo_index; | 760 | enabled_compute_storage_buffers |= 1U << ssbo_index; |
| 771 | written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index; | 761 | written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index; |
| 772 | 762 | ||
| 773 | const auto& launch_desc = kepler_compute.launch_description; | 763 | const auto& launch_desc = kepler_compute->launch_description; |
| 774 | ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0); | 764 | ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0); |
| 775 | 765 | ||
| 776 | const auto& cbufs = launch_desc.const_buffer_config; | 766 | const auto& cbufs = launch_desc.const_buffer_config; |
| 777 | const GPUVAddr ssbo_addr = cbufs[cbuf_index].Address() + cbuf_offset; | 767 | const GPUVAddr ssbo_addr = cbufs[cbuf_index].Address() + cbuf_offset; |
| 778 | compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr); | 768 | compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr, is_written); |
| 779 | } | 769 | } |
| 780 | 770 | ||
| 781 | template <class P> | 771 | template <class P> |
| @@ -836,6 +826,19 @@ void BufferCache<P>::CommitAsyncFlushesHigh() { | |||
| 836 | const bool is_accuracy_normal = | 826 | const bool is_accuracy_normal = |
| 837 | Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::Normal; | 827 | Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::Normal; |
| 838 | 828 | ||
| 829 | auto it = committed_ranges.begin(); | ||
| 830 | while (it != committed_ranges.end()) { | ||
| 831 | auto& current_intervals = *it; | ||
| 832 | auto next_it = std::next(it); | ||
| 833 | while (next_it != committed_ranges.end()) { | ||
| 834 | for (auto& interval : *next_it) { | ||
| 835 | current_intervals.subtract(interval); | ||
| 836 | } | ||
| 837 | next_it++; | ||
| 838 | } | ||
| 839 | it++; | ||
| 840 | } | ||
| 841 | |||
| 839 | boost::container::small_vector<std::pair<BufferCopy, BufferId>, 1> downloads; | 842 | boost::container::small_vector<std::pair<BufferCopy, BufferId>, 1> downloads; |
| 840 | u64 total_size_bytes = 0; | 843 | u64 total_size_bytes = 0; |
| 841 | u64 largest_copy = 0; | 844 | u64 largest_copy = 0; |
| @@ -991,19 +994,19 @@ void BufferCache<P>::BindHostIndexBuffer() { | |||
| 991 | const u32 size = index_buffer.size; | 994 | const u32 size = index_buffer.size; |
| 992 | SynchronizeBuffer(buffer, index_buffer.cpu_addr, size); | 995 | SynchronizeBuffer(buffer, index_buffer.cpu_addr, size); |
| 993 | if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { | 996 | if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { |
| 994 | const u32 new_offset = offset + maxwell3d.regs.index_array.first * | 997 | const u32 new_offset = offset + maxwell3d->regs.index_array.first * |
| 995 | maxwell3d.regs.index_array.FormatSizeInBytes(); | 998 | maxwell3d->regs.index_array.FormatSizeInBytes(); |
| 996 | runtime.BindIndexBuffer(buffer, new_offset, size); | 999 | runtime.BindIndexBuffer(buffer, new_offset, size); |
| 997 | } else { | 1000 | } else { |
| 998 | runtime.BindIndexBuffer(maxwell3d.regs.draw.topology, maxwell3d.regs.index_array.format, | 1001 | runtime.BindIndexBuffer(maxwell3d->regs.draw.topology, maxwell3d->regs.index_array.format, |
| 999 | maxwell3d.regs.index_array.first, maxwell3d.regs.index_array.count, | 1002 | maxwell3d->regs.index_array.first, |
| 1000 | buffer, offset, size); | 1003 | maxwell3d->regs.index_array.count, buffer, offset, size); |
| 1001 | } | 1004 | } |
| 1002 | } | 1005 | } |
| 1003 | 1006 | ||
| 1004 | template <class P> | 1007 | template <class P> |
| 1005 | void BufferCache<P>::BindHostVertexBuffers() { | 1008 | void BufferCache<P>::BindHostVertexBuffers() { |
| 1006 | auto& flags = maxwell3d.dirty.flags; | 1009 | auto& flags = maxwell3d->dirty.flags; |
| 1007 | for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) { | 1010 | for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) { |
| 1008 | const Binding& binding = vertex_buffers[index]; | 1011 | const Binding& binding = vertex_buffers[index]; |
| 1009 | Buffer& buffer = slot_buffers[binding.buffer_id]; | 1012 | Buffer& buffer = slot_buffers[binding.buffer_id]; |
| @@ -1014,7 +1017,7 @@ void BufferCache<P>::BindHostVertexBuffers() { | |||
| 1014 | } | 1017 | } |
| 1015 | flags[Dirty::VertexBuffer0 + index] = false; | 1018 | flags[Dirty::VertexBuffer0 + index] = false; |
| 1016 | 1019 | ||
| 1017 | const u32 stride = maxwell3d.regs.vertex_array[index].stride; | 1020 | const u32 stride = maxwell3d->regs.vertex_array[index].stride; |
| 1018 | const u32 offset = buffer.Offset(binding.cpu_addr); | 1021 | const u32 offset = buffer.Offset(binding.cpu_addr); |
| 1019 | runtime.BindVertexBuffer(index, buffer, offset, binding.size, stride); | 1022 | runtime.BindVertexBuffer(index, buffer, offset, binding.size, stride); |
| 1020 | } | 1023 | } |
| @@ -1154,7 +1157,7 @@ void BufferCache<P>::BindHostGraphicsTextureBuffers(size_t stage) { | |||
| 1154 | 1157 | ||
| 1155 | template <class P> | 1158 | template <class P> |
| 1156 | void BufferCache<P>::BindHostTransformFeedbackBuffers() { | 1159 | void BufferCache<P>::BindHostTransformFeedbackBuffers() { |
| 1157 | if (maxwell3d.regs.tfb_enabled == 0) { | 1160 | if (maxwell3d->regs.tfb_enabled == 0) { |
| 1158 | return; | 1161 | return; |
| 1159 | } | 1162 | } |
| 1160 | for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) { | 1163 | for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) { |
| @@ -1239,16 +1242,19 @@ void BufferCache<P>::BindHostComputeTextureBuffers() { | |||
| 1239 | 1242 | ||
| 1240 | template <class P> | 1243 | template <class P> |
| 1241 | void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) { | 1244 | void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) { |
| 1242 | if (is_indexed) { | 1245 | do { |
| 1243 | UpdateIndexBuffer(); | 1246 | has_deleted_buffers = false; |
| 1244 | } | 1247 | if (is_indexed) { |
| 1245 | UpdateVertexBuffers(); | 1248 | UpdateIndexBuffer(); |
| 1246 | UpdateTransformFeedbackBuffers(); | 1249 | } |
| 1247 | for (size_t stage = 0; stage < NUM_STAGES; ++stage) { | 1250 | UpdateVertexBuffers(); |
| 1248 | UpdateUniformBuffers(stage); | 1251 | UpdateTransformFeedbackBuffers(); |
| 1249 | UpdateStorageBuffers(stage); | 1252 | for (size_t stage = 0; stage < NUM_STAGES; ++stage) { |
| 1250 | UpdateTextureBuffers(stage); | 1253 | UpdateUniformBuffers(stage); |
| 1251 | } | 1254 | UpdateStorageBuffers(stage); |
| 1255 | UpdateTextureBuffers(stage); | ||
| 1256 | } | ||
| 1257 | } while (has_deleted_buffers); | ||
| 1252 | } | 1258 | } |
| 1253 | 1259 | ||
| 1254 | template <class P> | 1260 | template <class P> |
| @@ -1262,8 +1268,8 @@ template <class P> | |||
| 1262 | void BufferCache<P>::UpdateIndexBuffer() { | 1268 | void BufferCache<P>::UpdateIndexBuffer() { |
| 1263 | // We have to check for the dirty flags and index count | 1269 | // We have to check for the dirty flags and index count |
| 1264 | // The index count is currently changed without updating the dirty flags | 1270 | // The index count is currently changed without updating the dirty flags |
| 1265 | const auto& index_array = maxwell3d.regs.index_array; | 1271 | const auto& index_array = maxwell3d->regs.index_array; |
| 1266 | auto& flags = maxwell3d.dirty.flags; | 1272 | auto& flags = maxwell3d->dirty.flags; |
| 1267 | if (!flags[Dirty::IndexBuffer] && last_index_count == index_array.count) { | 1273 | if (!flags[Dirty::IndexBuffer] && last_index_count == index_array.count) { |
| 1268 | return; | 1274 | return; |
| 1269 | } | 1275 | } |
| @@ -1272,7 +1278,7 @@ void BufferCache<P>::UpdateIndexBuffer() { | |||
| 1272 | 1278 | ||
| 1273 | const GPUVAddr gpu_addr_begin = index_array.StartAddress(); | 1279 | const GPUVAddr gpu_addr_begin = index_array.StartAddress(); |
| 1274 | const GPUVAddr gpu_addr_end = index_array.EndAddress(); | 1280 | const GPUVAddr gpu_addr_end = index_array.EndAddress(); |
| 1275 | const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin); | 1281 | const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); |
| 1276 | const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin); | 1282 | const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin); |
| 1277 | const u32 draw_size = (index_array.count + index_array.first) * index_array.FormatSizeInBytes(); | 1283 | const u32 draw_size = (index_array.count + index_array.first) * index_array.FormatSizeInBytes(); |
| 1278 | const u32 size = std::min(address_size, draw_size); | 1284 | const u32 size = std::min(address_size, draw_size); |
| @@ -1289,8 +1295,8 @@ void BufferCache<P>::UpdateIndexBuffer() { | |||
| 1289 | 1295 | ||
| 1290 | template <class P> | 1296 | template <class P> |
| 1291 | void BufferCache<P>::UpdateVertexBuffers() { | 1297 | void BufferCache<P>::UpdateVertexBuffers() { |
| 1292 | auto& flags = maxwell3d.dirty.flags; | 1298 | auto& flags = maxwell3d->dirty.flags; |
| 1293 | if (!maxwell3d.dirty.flags[Dirty::VertexBuffers]) { | 1299 | if (!maxwell3d->dirty.flags[Dirty::VertexBuffers]) { |
| 1294 | return; | 1300 | return; |
| 1295 | } | 1301 | } |
| 1296 | flags[Dirty::VertexBuffers] = false; | 1302 | flags[Dirty::VertexBuffers] = false; |
| @@ -1302,33 +1308,25 @@ void BufferCache<P>::UpdateVertexBuffers() { | |||
| 1302 | 1308 | ||
| 1303 | template <class P> | 1309 | template <class P> |
| 1304 | void BufferCache<P>::UpdateVertexBuffer(u32 index) { | 1310 | void BufferCache<P>::UpdateVertexBuffer(u32 index) { |
| 1305 | if (!maxwell3d.dirty.flags[Dirty::VertexBuffer0 + index]) { | 1311 | if (!maxwell3d->dirty.flags[Dirty::VertexBuffer0 + index]) { |
| 1306 | return; | 1312 | return; |
| 1307 | } | 1313 | } |
| 1308 | const auto& array = maxwell3d.regs.vertex_array[index]; | 1314 | const auto& array = maxwell3d->regs.vertex_array[index]; |
| 1309 | const auto& limit = maxwell3d.regs.vertex_array_limit[index]; | 1315 | const auto& limit = maxwell3d->regs.vertex_array_limit[index]; |
| 1310 | const GPUVAddr gpu_addr_begin = array.StartAddress(); | 1316 | const GPUVAddr gpu_addr_begin = array.StartAddress(); |
| 1311 | const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1; | 1317 | const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1; |
| 1312 | const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin); | 1318 | const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); |
| 1313 | u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin); | 1319 | u32 address_size = static_cast<u32>( |
| 1314 | if (address_size >= 64_MiB) { | 1320 | std::min(gpu_addr_end - gpu_addr_begin, static_cast<u64>(std::numeric_limits<u32>::max()))); |
| 1315 | // Reported vertex buffer size is very large, cap to mapped buffer size | 1321 | if (array.enable == 0 || address_size == 0 || !cpu_addr) { |
| 1316 | GPUVAddr submapped_addr_end = gpu_addr_begin; | ||
| 1317 | |||
| 1318 | const auto ranges{gpu_memory.GetSubmappedRange(gpu_addr_begin, address_size)}; | ||
| 1319 | if (ranges.size() > 0) { | ||
| 1320 | const auto& [addr, size] = *ranges.begin(); | ||
| 1321 | submapped_addr_end = addr + size; | ||
| 1322 | } | ||
| 1323 | |||
| 1324 | address_size = | ||
| 1325 | std::min(address_size, static_cast<u32>(submapped_addr_end - gpu_addr_begin)); | ||
| 1326 | } | ||
| 1327 | const u32 size = address_size; // TODO: Analyze stride and number of vertices | ||
| 1328 | if (array.enable == 0 || size == 0 || !cpu_addr) { | ||
| 1329 | vertex_buffers[index] = NULL_BINDING; | 1322 | vertex_buffers[index] = NULL_BINDING; |
| 1330 | return; | 1323 | return; |
| 1331 | } | 1324 | } |
| 1325 | if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) { | ||
| 1326 | address_size = | ||
| 1327 | static_cast<u32>(gpu_memory->MaxContinousRange(gpu_addr_begin, address_size)); | ||
| 1328 | } | ||
| 1329 | const u32 size = address_size; // TODO: Analyze stride and number of vertices | ||
| 1332 | vertex_buffers[index] = Binding{ | 1330 | vertex_buffers[index] = Binding{ |
| 1333 | .cpu_addr = *cpu_addr, | 1331 | .cpu_addr = *cpu_addr, |
| 1334 | .size = size, | 1332 | .size = size, |
| @@ -1382,7 +1380,7 @@ void BufferCache<P>::UpdateTextureBuffers(size_t stage) { | |||
| 1382 | 1380 | ||
| 1383 | template <class P> | 1381 | template <class P> |
| 1384 | void BufferCache<P>::UpdateTransformFeedbackBuffers() { | 1382 | void BufferCache<P>::UpdateTransformFeedbackBuffers() { |
| 1385 | if (maxwell3d.regs.tfb_enabled == 0) { | 1383 | if (maxwell3d->regs.tfb_enabled == 0) { |
| 1386 | return; | 1384 | return; |
| 1387 | } | 1385 | } |
| 1388 | for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) { | 1386 | for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) { |
| @@ -1392,10 +1390,10 @@ void BufferCache<P>::UpdateTransformFeedbackBuffers() { | |||
| 1392 | 1390 | ||
| 1393 | template <class P> | 1391 | template <class P> |
| 1394 | void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) { | 1392 | void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) { |
| 1395 | const auto& binding = maxwell3d.regs.tfb_bindings[index]; | 1393 | const auto& binding = maxwell3d->regs.tfb_bindings[index]; |
| 1396 | const GPUVAddr gpu_addr = binding.Address() + binding.buffer_offset; | 1394 | const GPUVAddr gpu_addr = binding.Address() + binding.buffer_offset; |
| 1397 | const u32 size = binding.buffer_size; | 1395 | const u32 size = binding.buffer_size; |
| 1398 | const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); | 1396 | const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); |
| 1399 | if (binding.buffer_enable == 0 || size == 0 || !cpu_addr) { | 1397 | if (binding.buffer_enable == 0 || size == 0 || !cpu_addr) { |
| 1400 | transform_feedback_buffers[index] = NULL_BINDING; | 1398 | transform_feedback_buffers[index] = NULL_BINDING; |
| 1401 | return; | 1399 | return; |
| @@ -1414,10 +1412,10 @@ void BufferCache<P>::UpdateComputeUniformBuffers() { | |||
| 1414 | ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) { | 1412 | ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) { |
| 1415 | Binding& binding = compute_uniform_buffers[index]; | 1413 | Binding& binding = compute_uniform_buffers[index]; |
| 1416 | binding = NULL_BINDING; | 1414 | binding = NULL_BINDING; |
| 1417 | const auto& launch_desc = kepler_compute.launch_description; | 1415 | const auto& launch_desc = kepler_compute->launch_description; |
| 1418 | if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) { | 1416 | if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) { |
| 1419 | const auto& cbuf = launch_desc.const_buffer_config[index]; | 1417 | const auto& cbuf = launch_desc.const_buffer_config[index]; |
| 1420 | const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(cbuf.Address()); | 1418 | const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(cbuf.Address()); |
| 1421 | if (cpu_addr) { | 1419 | if (cpu_addr) { |
| 1422 | binding.cpu_addr = *cpu_addr; | 1420 | binding.cpu_addr = *cpu_addr; |
| 1423 | binding.size = cbuf.size; | 1421 | binding.size = cbuf.size; |
| @@ -1567,6 +1565,8 @@ BufferId BufferCache<P>::CreateBuffer(VAddr cpu_addr, u32 wanted_size) { | |||
| 1567 | const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size); | 1565 | const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size); |
| 1568 | const u32 size = static_cast<u32>(overlap.end - overlap.begin); | 1566 | const u32 size = static_cast<u32>(overlap.end - overlap.begin); |
| 1569 | const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size); | 1567 | const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size); |
| 1568 | auto& new_buffer = slot_buffers[new_buffer_id]; | ||
| 1569 | runtime.ClearBuffer(new_buffer, 0, new_buffer.SizeBytes(), 0); | ||
| 1570 | for (const BufferId overlap_id : overlap.ids) { | 1570 | for (const BufferId overlap_id : overlap.ids) { |
| 1571 | JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap); | 1571 | JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap); |
| 1572 | } | 1572 | } |
| @@ -1695,7 +1695,7 @@ void BufferCache<P>::MappedUploadMemory(Buffer& buffer, u64 total_size_bytes, | |||
| 1695 | 1695 | ||
| 1696 | template <class P> | 1696 | template <class P> |
| 1697 | bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size, | 1697 | bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size, |
| 1698 | std::span<u8> inlined_buffer) { | 1698 | std::span<const u8> inlined_buffer) { |
| 1699 | const bool is_dirty = IsRegionRegistered(dest_address, copy_size); | 1699 | const bool is_dirty = IsRegionRegistered(dest_address, copy_size); |
| 1700 | if (!is_dirty) { | 1700 | if (!is_dirty) { |
| 1701 | return false; | 1701 | return false; |
| @@ -1831,7 +1831,7 @@ void BufferCache<P>::NotifyBufferDeletion() { | |||
| 1831 | dirty_uniform_buffers.fill(~u32{0}); | 1831 | dirty_uniform_buffers.fill(~u32{0}); |
| 1832 | uniform_buffer_binding_sizes.fill({}); | 1832 | uniform_buffer_binding_sizes.fill({}); |
| 1833 | } | 1833 | } |
| 1834 | auto& flags = maxwell3d.dirty.flags; | 1834 | auto& flags = maxwell3d->dirty.flags; |
| 1835 | flags[Dirty::IndexBuffer] = true; | 1835 | flags[Dirty::IndexBuffer] = true; |
| 1836 | flags[Dirty::VertexBuffers] = true; | 1836 | flags[Dirty::VertexBuffers] = true; |
| 1837 | for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) { | 1837 | for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) { |
| @@ -1841,16 +1841,18 @@ void BufferCache<P>::NotifyBufferDeletion() { | |||
| 1841 | } | 1841 | } |
| 1842 | 1842 | ||
| 1843 | template <class P> | 1843 | template <class P> |
| 1844 | typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr) const { | 1844 | typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr, |
| 1845 | const GPUVAddr gpu_addr = gpu_memory.Read<u64>(ssbo_addr); | 1845 | bool is_written) const { |
| 1846 | const u32 size = gpu_memory.Read<u32>(ssbo_addr + 8); | 1846 | const GPUVAddr gpu_addr = gpu_memory->Read<u64>(ssbo_addr); |
| 1847 | const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); | 1847 | const u32 size = gpu_memory->Read<u32>(ssbo_addr + 8); |
| 1848 | const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); | ||
| 1848 | if (!cpu_addr || size == 0) { | 1849 | if (!cpu_addr || size == 0) { |
| 1849 | return NULL_BINDING; | 1850 | return NULL_BINDING; |
| 1850 | } | 1851 | } |
| 1852 | const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::YUZU_PAGESIZE); | ||
| 1851 | const Binding binding{ | 1853 | const Binding binding{ |
| 1852 | .cpu_addr = *cpu_addr, | 1854 | .cpu_addr = *cpu_addr, |
| 1853 | .size = size, | 1855 | .size = is_written ? size : static_cast<u32>(cpu_end - *cpu_addr), |
| 1854 | .buffer_id = BufferId{}, | 1856 | .buffer_id = BufferId{}, |
| 1855 | }; | 1857 | }; |
| 1856 | return binding; | 1858 | return binding; |
| @@ -1859,7 +1861,7 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s | |||
| 1859 | template <class P> | 1861 | template <class P> |
| 1860 | typename BufferCache<P>::TextureBufferBinding BufferCache<P>::GetTextureBufferBinding( | 1862 | typename BufferCache<P>::TextureBufferBinding BufferCache<P>::GetTextureBufferBinding( |
| 1861 | GPUVAddr gpu_addr, u32 size, PixelFormat format) { | 1863 | GPUVAddr gpu_addr, u32 size, PixelFormat format) { |
| 1862 | const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); | 1864 | const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); |
| 1863 | TextureBufferBinding binding; | 1865 | TextureBufferBinding binding; |
| 1864 | if (!cpu_addr || size == 0) { | 1866 | if (!cpu_addr || size == 0) { |
| 1865 | binding.cpu_addr = 0; | 1867 | binding.cpu_addr = 0; |
diff --git a/src/video_core/cdma_pusher.cpp b/src/video_core/cdma_pusher.cpp index 8e890a85e..28a2d2090 100644 --- a/src/video_core/cdma_pusher.cpp +++ b/src/video_core/cdma_pusher.cpp | |||
| @@ -2,20 +2,22 @@ | |||
| 2 | // SPDX-License-Identifier: MIT | 2 | // SPDX-License-Identifier: MIT |
| 3 | 3 | ||
| 4 | #include <bit> | 4 | #include <bit> |
| 5 | #include "command_classes/host1x.h" | ||
| 6 | #include "command_classes/nvdec.h" | ||
| 7 | #include "command_classes/vic.h" | ||
| 8 | #include "video_core/cdma_pusher.h" | 5 | #include "video_core/cdma_pusher.h" |
| 9 | #include "video_core/command_classes/sync_manager.h" | ||
| 10 | #include "video_core/engines/maxwell_3d.h" | 6 | #include "video_core/engines/maxwell_3d.h" |
| 11 | #include "video_core/gpu.h" | 7 | #include "video_core/host1x/control.h" |
| 8 | #include "video_core/host1x/host1x.h" | ||
| 9 | #include "video_core/host1x/nvdec.h" | ||
| 10 | #include "video_core/host1x/nvdec_common.h" | ||
| 11 | #include "video_core/host1x/sync_manager.h" | ||
| 12 | #include "video_core/host1x/vic.h" | ||
| 13 | #include "video_core/memory_manager.h" | ||
| 12 | 14 | ||
| 13 | namespace Tegra { | 15 | namespace Tegra { |
| 14 | CDmaPusher::CDmaPusher(GPU& gpu_) | 16 | CDmaPusher::CDmaPusher(Host1x::Host1x& host1x_) |
| 15 | : gpu{gpu_}, nvdec_processor(std::make_shared<Nvdec>(gpu)), | 17 | : host1x{host1x_}, nvdec_processor(std::make_shared<Host1x::Nvdec>(host1x)), |
| 16 | vic_processor(std::make_unique<Vic>(gpu, nvdec_processor)), | 18 | vic_processor(std::make_unique<Host1x::Vic>(host1x, nvdec_processor)), |
| 17 | host1x_processor(std::make_unique<Host1x>(gpu)), | 19 | host1x_processor(std::make_unique<Host1x::Control>(host1x)), |
| 18 | sync_manager(std::make_unique<SyncptIncrManager>(gpu)) {} | 20 | sync_manager(std::make_unique<Host1x::SyncptIncrManager>(host1x)) {} |
| 19 | 21 | ||
| 20 | CDmaPusher::~CDmaPusher() = default; | 22 | CDmaPusher::~CDmaPusher() = default; |
| 21 | 23 | ||
| @@ -109,16 +111,17 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) { | |||
| 109 | case ThiMethod::SetMethod1: | 111 | case ThiMethod::SetMethod1: |
| 110 | LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})", | 112 | LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})", |
| 111 | static_cast<u32>(vic_thi_state.method_0), data); | 113 | static_cast<u32>(vic_thi_state.method_0), data); |
| 112 | vic_processor->ProcessMethod(static_cast<Vic::Method>(vic_thi_state.method_0), data); | 114 | vic_processor->ProcessMethod(static_cast<Host1x::Vic::Method>(vic_thi_state.method_0), |
| 115 | data); | ||
| 113 | break; | 116 | break; |
| 114 | default: | 117 | default: |
| 115 | break; | 118 | break; |
| 116 | } | 119 | } |
| 117 | break; | 120 | break; |
| 118 | case ChClassId::Host1x: | 121 | case ChClassId::Control: |
| 119 | // This device is mainly for syncpoint synchronization | 122 | // This device is mainly for syncpoint synchronization |
| 120 | LOG_DEBUG(Service_NVDRV, "Host1X Class Method"); | 123 | LOG_DEBUG(Service_NVDRV, "Host1X Class Method"); |
| 121 | host1x_processor->ProcessMethod(static_cast<Host1x::Method>(offset), data); | 124 | host1x_processor->ProcessMethod(static_cast<Host1x::Control::Method>(offset), data); |
| 122 | break; | 125 | break; |
| 123 | default: | 126 | default: |
| 124 | UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class)); | 127 | UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class)); |
diff --git a/src/video_core/cdma_pusher.h b/src/video_core/cdma_pusher.h index d6ffef95f..83112dfce 100644 --- a/src/video_core/cdma_pusher.h +++ b/src/video_core/cdma_pusher.h | |||
| @@ -12,11 +12,13 @@ | |||
| 12 | 12 | ||
| 13 | namespace Tegra { | 13 | namespace Tegra { |
| 14 | 14 | ||
| 15 | class GPU; | 15 | namespace Host1x { |
| 16 | class Control; | ||
| 16 | class Host1x; | 17 | class Host1x; |
| 17 | class Nvdec; | 18 | class Nvdec; |
| 18 | class SyncptIncrManager; | 19 | class SyncptIncrManager; |
| 19 | class Vic; | 20 | class Vic; |
| 21 | } // namespace Host1x | ||
| 20 | 22 | ||
| 21 | enum class ChSubmissionMode : u32 { | 23 | enum class ChSubmissionMode : u32 { |
| 22 | SetClass = 0, | 24 | SetClass = 0, |
| @@ -30,7 +32,7 @@ enum class ChSubmissionMode : u32 { | |||
| 30 | 32 | ||
| 31 | enum class ChClassId : u32 { | 33 | enum class ChClassId : u32 { |
| 32 | NoClass = 0x0, | 34 | NoClass = 0x0, |
| 33 | Host1x = 0x1, | 35 | Control = 0x1, |
| 34 | VideoEncodeMpeg = 0x20, | 36 | VideoEncodeMpeg = 0x20, |
| 35 | VideoEncodeNvEnc = 0x21, | 37 | VideoEncodeNvEnc = 0x21, |
| 36 | VideoStreamingVi = 0x30, | 38 | VideoStreamingVi = 0x30, |
| @@ -88,7 +90,7 @@ enum class ThiMethod : u32 { | |||
| 88 | 90 | ||
| 89 | class CDmaPusher { | 91 | class CDmaPusher { |
| 90 | public: | 92 | public: |
| 91 | explicit CDmaPusher(GPU& gpu_); | 93 | explicit CDmaPusher(Host1x::Host1x& host1x); |
| 92 | ~CDmaPusher(); | 94 | ~CDmaPusher(); |
| 93 | 95 | ||
| 94 | /// Process the command entry | 96 | /// Process the command entry |
| @@ -101,11 +103,11 @@ private: | |||
| 101 | /// Write arguments value to the ThiRegisters member at the specified offset | 103 | /// Write arguments value to the ThiRegisters member at the specified offset |
| 102 | void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument); | 104 | void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument); |
| 103 | 105 | ||
| 104 | GPU& gpu; | 106 | Host1x::Host1x& host1x; |
| 105 | std::shared_ptr<Tegra::Nvdec> nvdec_processor; | 107 | std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor; |
| 106 | std::unique_ptr<Tegra::Vic> vic_processor; | 108 | std::unique_ptr<Tegra::Host1x::Vic> vic_processor; |
| 107 | std::unique_ptr<Tegra::Host1x> host1x_processor; | 109 | std::unique_ptr<Tegra::Host1x::Control> host1x_processor; |
| 108 | std::unique_ptr<SyncptIncrManager> sync_manager; | 110 | std::unique_ptr<Host1x::SyncptIncrManager> sync_manager; |
| 109 | ChClassId current_class{}; | 111 | ChClassId current_class{}; |
| 110 | ThiRegisters vic_thi_state{}; | 112 | ThiRegisters vic_thi_state{}; |
| 111 | ThiRegisters nvdec_thi_state{}; | 113 | ThiRegisters nvdec_thi_state{}; |
diff --git a/src/video_core/command_classes/host1x.cpp b/src/video_core/command_classes/host1x.cpp deleted file mode 100644 index 11855fe10..000000000 --- a/src/video_core/command_classes/host1x.cpp +++ /dev/null | |||
| @@ -1,29 +0,0 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include "common/assert.h" | ||
| 5 | #include "video_core/command_classes/host1x.h" | ||
| 6 | #include "video_core/gpu.h" | ||
| 7 | |||
| 8 | Tegra::Host1x::Host1x(GPU& gpu_) : gpu(gpu_) {} | ||
| 9 | |||
| 10 | Tegra::Host1x::~Host1x() = default; | ||
| 11 | |||
| 12 | void Tegra::Host1x::ProcessMethod(Method method, u32 argument) { | ||
| 13 | switch (method) { | ||
| 14 | case Method::LoadSyncptPayload32: | ||
| 15 | syncpoint_value = argument; | ||
| 16 | break; | ||
| 17 | case Method::WaitSyncpt: | ||
| 18 | case Method::WaitSyncpt32: | ||
| 19 | Execute(argument); | ||
| 20 | break; | ||
| 21 | default: | ||
| 22 | UNIMPLEMENTED_MSG("Host1x method 0x{:X}", static_cast<u32>(method)); | ||
| 23 | break; | ||
| 24 | } | ||
| 25 | } | ||
| 26 | |||
| 27 | void Tegra::Host1x::Execute(u32 data) { | ||
| 28 | gpu.WaitFence(data, syncpoint_value); | ||
| 29 | } | ||
diff --git a/src/video_core/control/channel_state.cpp b/src/video_core/control/channel_state.cpp new file mode 100644 index 000000000..cdecc3a91 --- /dev/null +++ b/src/video_core/control/channel_state.cpp | |||
| @@ -0,0 +1,40 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #include "common/assert.h" | ||
| 5 | #include "video_core/control/channel_state.h" | ||
| 6 | #include "video_core/dma_pusher.h" | ||
| 7 | #include "video_core/engines/fermi_2d.h" | ||
| 8 | #include "video_core/engines/kepler_compute.h" | ||
| 9 | #include "video_core/engines/kepler_memory.h" | ||
| 10 | #include "video_core/engines/maxwell_3d.h" | ||
| 11 | #include "video_core/engines/maxwell_dma.h" | ||
| 12 | #include "video_core/engines/puller.h" | ||
| 13 | #include "video_core/memory_manager.h" | ||
| 14 | |||
| 15 | namespace Tegra::Control { | ||
| 16 | |||
| 17 | ChannelState::ChannelState(s32 bind_id_) : bind_id{bind_id_}, initialized{} {} | ||
| 18 | |||
| 19 | void ChannelState::Init(Core::System& system, GPU& gpu) { | ||
| 20 | ASSERT(memory_manager); | ||
| 21 | dma_pusher = std::make_unique<Tegra::DmaPusher>(system, gpu, *memory_manager, *this); | ||
| 22 | maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, *memory_manager); | ||
| 23 | fermi_2d = std::make_unique<Engines::Fermi2D>(); | ||
| 24 | kepler_compute = std::make_unique<Engines::KeplerCompute>(system, *memory_manager); | ||
| 25 | maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager); | ||
| 26 | kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager); | ||
| 27 | initialized = true; | ||
| 28 | } | ||
| 29 | |||
| 30 | void ChannelState::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) { | ||
| 31 | dma_pusher->BindRasterizer(rasterizer); | ||
| 32 | memory_manager->BindRasterizer(rasterizer); | ||
| 33 | maxwell_3d->BindRasterizer(rasterizer); | ||
| 34 | fermi_2d->BindRasterizer(rasterizer); | ||
| 35 | kepler_memory->BindRasterizer(rasterizer); | ||
| 36 | kepler_compute->BindRasterizer(rasterizer); | ||
| 37 | maxwell_dma->BindRasterizer(rasterizer); | ||
| 38 | } | ||
| 39 | |||
| 40 | } // namespace Tegra::Control | ||
diff --git a/src/video_core/control/channel_state.h b/src/video_core/control/channel_state.h new file mode 100644 index 000000000..3a7b9872c --- /dev/null +++ b/src/video_core/control/channel_state.h | |||
| @@ -0,0 +1,68 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <memory> | ||
| 7 | |||
| 8 | #include "common/common_types.h" | ||
| 9 | |||
| 10 | namespace Core { | ||
| 11 | class System; | ||
| 12 | } | ||
| 13 | |||
| 14 | namespace VideoCore { | ||
| 15 | class RasterizerInterface; | ||
| 16 | } | ||
| 17 | |||
| 18 | namespace Tegra { | ||
| 19 | |||
| 20 | class GPU; | ||
| 21 | |||
| 22 | namespace Engines { | ||
| 23 | class Puller; | ||
| 24 | class Fermi2D; | ||
| 25 | class Maxwell3D; | ||
| 26 | class MaxwellDMA; | ||
| 27 | class KeplerCompute; | ||
| 28 | class KeplerMemory; | ||
| 29 | } // namespace Engines | ||
| 30 | |||
| 31 | class MemoryManager; | ||
| 32 | class DmaPusher; | ||
| 33 | |||
| 34 | namespace Control { | ||
| 35 | |||
| 36 | struct ChannelState { | ||
| 37 | explicit ChannelState(s32 bind_id); | ||
| 38 | ChannelState(const ChannelState& state) = delete; | ||
| 39 | ChannelState& operator=(const ChannelState&) = delete; | ||
| 40 | ChannelState(ChannelState&& other) noexcept = default; | ||
| 41 | ChannelState& operator=(ChannelState&& other) noexcept = default; | ||
| 42 | |||
| 43 | void Init(Core::System& system, GPU& gpu); | ||
| 44 | |||
| 45 | void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); | ||
| 46 | |||
| 47 | s32 bind_id = -1; | ||
| 48 | /// 3D engine | ||
| 49 | std::unique_ptr<Engines::Maxwell3D> maxwell_3d; | ||
| 50 | /// 2D engine | ||
| 51 | std::unique_ptr<Engines::Fermi2D> fermi_2d; | ||
| 52 | /// Compute engine | ||
| 53 | std::unique_ptr<Engines::KeplerCompute> kepler_compute; | ||
| 54 | /// DMA engine | ||
| 55 | std::unique_ptr<Engines::MaxwellDMA> maxwell_dma; | ||
| 56 | /// Inline memory engine | ||
| 57 | std::unique_ptr<Engines::KeplerMemory> kepler_memory; | ||
| 58 | |||
| 59 | std::shared_ptr<MemoryManager> memory_manager; | ||
| 60 | |||
| 61 | std::unique_ptr<DmaPusher> dma_pusher; | ||
| 62 | |||
| 63 | bool initialized{}; | ||
| 64 | }; | ||
| 65 | |||
| 66 | } // namespace Control | ||
| 67 | |||
| 68 | } // namespace Tegra | ||
diff --git a/src/video_core/control/channel_state_cache.cpp b/src/video_core/control/channel_state_cache.cpp new file mode 100644 index 000000000..4ebeb6356 --- /dev/null +++ b/src/video_core/control/channel_state_cache.cpp | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #include "video_core/control/channel_state_cache.inc" | ||
| 5 | |||
| 6 | namespace VideoCommon { | ||
| 7 | |||
| 8 | ChannelInfo::ChannelInfo(Tegra::Control::ChannelState& channel_state) | ||
| 9 | : maxwell3d{*channel_state.maxwell_3d}, kepler_compute{*channel_state.kepler_compute}, | ||
| 10 | gpu_memory{*channel_state.memory_manager} {} | ||
| 11 | |||
| 12 | template class VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo>; | ||
| 13 | |||
| 14 | } // namespace VideoCommon | ||
diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h new file mode 100644 index 000000000..584a0c26c --- /dev/null +++ b/src/video_core/control/channel_state_cache.h | |||
| @@ -0,0 +1,101 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <deque> | ||
| 7 | #include <limits> | ||
| 8 | #include <mutex> | ||
| 9 | #include <optional> | ||
| 10 | #include <unordered_map> | ||
| 11 | #include <vector> | ||
| 12 | |||
| 13 | #include "common/common_types.h" | ||
| 14 | |||
| 15 | namespace Tegra { | ||
| 16 | |||
| 17 | namespace Engines { | ||
| 18 | class Maxwell3D; | ||
| 19 | class KeplerCompute; | ||
| 20 | } // namespace Engines | ||
| 21 | |||
| 22 | class MemoryManager; | ||
| 23 | |||
| 24 | namespace Control { | ||
| 25 | struct ChannelState; | ||
| 26 | } | ||
| 27 | |||
| 28 | } // namespace Tegra | ||
| 29 | |||
| 30 | namespace VideoCommon { | ||
| 31 | |||
| 32 | class ChannelInfo { | ||
| 33 | public: | ||
| 34 | ChannelInfo() = delete; | ||
| 35 | explicit ChannelInfo(Tegra::Control::ChannelState& state); | ||
| 36 | ChannelInfo(const ChannelInfo& state) = delete; | ||
| 37 | ChannelInfo& operator=(const ChannelInfo&) = delete; | ||
| 38 | ChannelInfo(ChannelInfo&& other) = default; | ||
| 39 | ChannelInfo& operator=(ChannelInfo&& other) = default; | ||
| 40 | |||
| 41 | Tegra::Engines::Maxwell3D& maxwell3d; | ||
| 42 | Tegra::Engines::KeplerCompute& kepler_compute; | ||
| 43 | Tegra::MemoryManager& gpu_memory; | ||
| 44 | }; | ||
| 45 | |||
| 46 | template <class P> | ||
| 47 | class ChannelSetupCaches { | ||
| 48 | public: | ||
| 49 | /// Operations for seting the channel of execution. | ||
| 50 | virtual ~ChannelSetupCaches(); | ||
| 51 | |||
| 52 | /// Create channel state. | ||
| 53 | virtual void CreateChannel(Tegra::Control::ChannelState& channel); | ||
| 54 | |||
| 55 | /// Bind a channel for execution. | ||
| 56 | void BindToChannel(s32 id); | ||
| 57 | |||
| 58 | /// Erase channel's state. | ||
| 59 | void EraseChannel(s32 id); | ||
| 60 | |||
| 61 | Tegra::MemoryManager* GetFromID(size_t id) const { | ||
| 62 | std::unique_lock<std::mutex> lk(config_mutex); | ||
| 63 | const auto ref = address_spaces.find(id); | ||
| 64 | return ref->second.gpu_memory; | ||
| 65 | } | ||
| 66 | |||
| 67 | std::optional<size_t> getStorageID(size_t id) const { | ||
| 68 | std::unique_lock<std::mutex> lk(config_mutex); | ||
| 69 | const auto ref = address_spaces.find(id); | ||
| 70 | if (ref == address_spaces.end()) { | ||
| 71 | return std::nullopt; | ||
| 72 | } | ||
| 73 | return ref->second.storage_id; | ||
| 74 | } | ||
| 75 | |||
| 76 | protected: | ||
| 77 | static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()}; | ||
| 78 | |||
| 79 | P* channel_state; | ||
| 80 | size_t current_channel_id{UNSET_CHANNEL}; | ||
| 81 | size_t current_address_space{}; | ||
| 82 | Tegra::Engines::Maxwell3D* maxwell3d; | ||
| 83 | Tegra::Engines::KeplerCompute* kepler_compute; | ||
| 84 | Tegra::MemoryManager* gpu_memory; | ||
| 85 | |||
| 86 | std::deque<P> channel_storage; | ||
| 87 | std::deque<size_t> free_channel_ids; | ||
| 88 | std::unordered_map<s32, size_t> channel_map; | ||
| 89 | std::vector<size_t> active_channel_ids; | ||
| 90 | struct AddresSpaceRef { | ||
| 91 | size_t ref_count; | ||
| 92 | size_t storage_id; | ||
| 93 | Tegra::MemoryManager* gpu_memory; | ||
| 94 | }; | ||
| 95 | std::unordered_map<size_t, AddresSpaceRef> address_spaces; | ||
| 96 | mutable std::mutex config_mutex; | ||
| 97 | |||
| 98 | virtual void OnGPUASRegister([[maybe_unused]] size_t map_id) {} | ||
| 99 | }; | ||
| 100 | |||
| 101 | } // namespace VideoCommon | ||
diff --git a/src/video_core/control/channel_state_cache.inc b/src/video_core/control/channel_state_cache.inc new file mode 100644 index 000000000..460313893 --- /dev/null +++ b/src/video_core/control/channel_state_cache.inc | |||
| @@ -0,0 +1,86 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #include <algorithm> | ||
| 5 | |||
| 6 | #include "video_core/control/channel_state.h" | ||
| 7 | #include "video_core/control/channel_state_cache.h" | ||
| 8 | #include "video_core/engines/kepler_compute.h" | ||
| 9 | #include "video_core/engines/maxwell_3d.h" | ||
| 10 | #include "video_core/memory_manager.h" | ||
| 11 | |||
| 12 | namespace VideoCommon { | ||
| 13 | |||
| 14 | template <class P> | ||
| 15 | ChannelSetupCaches<P>::~ChannelSetupCaches() = default; | ||
| 16 | |||
| 17 | template <class P> | ||
| 18 | void ChannelSetupCaches<P>::CreateChannel(struct Tegra::Control::ChannelState& channel) { | ||
| 19 | std::unique_lock<std::mutex> lk(config_mutex); | ||
| 20 | ASSERT(channel_map.find(channel.bind_id) == channel_map.end() && channel.bind_id >= 0); | ||
| 21 | auto new_id = [this, &channel]() { | ||
| 22 | if (!free_channel_ids.empty()) { | ||
| 23 | auto id = free_channel_ids.front(); | ||
| 24 | free_channel_ids.pop_front(); | ||
| 25 | new (&channel_storage[id]) P(channel); | ||
| 26 | return id; | ||
| 27 | } | ||
| 28 | channel_storage.emplace_back(channel); | ||
| 29 | return channel_storage.size() - 1; | ||
| 30 | }(); | ||
| 31 | channel_map.emplace(channel.bind_id, new_id); | ||
| 32 | if (current_channel_id != UNSET_CHANNEL) { | ||
| 33 | channel_state = &channel_storage[current_channel_id]; | ||
| 34 | } | ||
| 35 | active_channel_ids.push_back(new_id); | ||
| 36 | auto as_it = address_spaces.find(channel.memory_manager->GetID()); | ||
| 37 | if (as_it != address_spaces.end()) { | ||
| 38 | as_it->second.ref_count++; | ||
| 39 | return; | ||
| 40 | } | ||
| 41 | AddresSpaceRef new_gpu_mem_ref{ | ||
| 42 | .ref_count = 1, | ||
| 43 | .storage_id = address_spaces.size(), | ||
| 44 | .gpu_memory = channel.memory_manager.get(), | ||
| 45 | }; | ||
| 46 | address_spaces.emplace(channel.memory_manager->GetID(), new_gpu_mem_ref); | ||
| 47 | OnGPUASRegister(channel.memory_manager->GetID()); | ||
| 48 | } | ||
| 49 | |||
| 50 | /// Bind a channel for execution. | ||
| 51 | template <class P> | ||
| 52 | void ChannelSetupCaches<P>::BindToChannel(s32 id) { | ||
| 53 | std::unique_lock<std::mutex> lk(config_mutex); | ||
| 54 | auto it = channel_map.find(id); | ||
| 55 | ASSERT(it != channel_map.end() && id >= 0); | ||
| 56 | current_channel_id = it->second; | ||
| 57 | channel_state = &channel_storage[current_channel_id]; | ||
| 58 | maxwell3d = &channel_state->maxwell3d; | ||
| 59 | kepler_compute = &channel_state->kepler_compute; | ||
| 60 | gpu_memory = &channel_state->gpu_memory; | ||
| 61 | current_address_space = gpu_memory->GetID(); | ||
| 62 | } | ||
| 63 | |||
| 64 | /// Erase channel's channel_state. | ||
| 65 | template <class P> | ||
| 66 | void ChannelSetupCaches<P>::EraseChannel(s32 id) { | ||
| 67 | std::unique_lock<std::mutex> lk(config_mutex); | ||
| 68 | const auto it = channel_map.find(id); | ||
| 69 | ASSERT(it != channel_map.end() && id >= 0); | ||
| 70 | const auto this_id = it->second; | ||
| 71 | free_channel_ids.push_back(this_id); | ||
| 72 | channel_map.erase(it); | ||
| 73 | if (this_id == current_channel_id) { | ||
| 74 | current_channel_id = UNSET_CHANNEL; | ||
| 75 | channel_state = nullptr; | ||
| 76 | maxwell3d = nullptr; | ||
| 77 | kepler_compute = nullptr; | ||
| 78 | gpu_memory = nullptr; | ||
| 79 | } else if (current_channel_id != UNSET_CHANNEL) { | ||
| 80 | channel_state = &channel_storage[current_channel_id]; | ||
| 81 | } | ||
| 82 | active_channel_ids.erase( | ||
| 83 | std::find(active_channel_ids.begin(), active_channel_ids.end(), this_id)); | ||
| 84 | } | ||
| 85 | |||
| 86 | } // namespace VideoCommon | ||
diff --git a/src/video_core/control/scheduler.cpp b/src/video_core/control/scheduler.cpp new file mode 100644 index 000000000..f7cbe204e --- /dev/null +++ b/src/video_core/control/scheduler.cpp | |||
| @@ -0,0 +1,32 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #include <memory> | ||
| 5 | |||
| 6 | #include "common/assert.h" | ||
| 7 | #include "video_core/control/channel_state.h" | ||
| 8 | #include "video_core/control/scheduler.h" | ||
| 9 | #include "video_core/gpu.h" | ||
| 10 | |||
| 11 | namespace Tegra::Control { | ||
| 12 | Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {} | ||
| 13 | |||
| 14 | Scheduler::~Scheduler() = default; | ||
| 15 | |||
| 16 | void Scheduler::Push(s32 channel, CommandList&& entries) { | ||
| 17 | std::unique_lock lk(scheduling_guard); | ||
| 18 | auto it = channels.find(channel); | ||
| 19 | ASSERT(it != channels.end()); | ||
| 20 | auto channel_state = it->second; | ||
| 21 | gpu.BindChannel(channel_state->bind_id); | ||
| 22 | channel_state->dma_pusher->Push(std::move(entries)); | ||
| 23 | channel_state->dma_pusher->DispatchCalls(); | ||
| 24 | } | ||
| 25 | |||
| 26 | void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) { | ||
| 27 | s32 channel = new_channel->bind_id; | ||
| 28 | std::unique_lock lk(scheduling_guard); | ||
| 29 | channels.emplace(channel, new_channel); | ||
| 30 | } | ||
| 31 | |||
| 32 | } // namespace Tegra::Control | ||
diff --git a/src/video_core/control/scheduler.h b/src/video_core/control/scheduler.h new file mode 100644 index 000000000..44addf61c --- /dev/null +++ b/src/video_core/control/scheduler.h | |||
| @@ -0,0 +1,37 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <memory> | ||
| 7 | #include <mutex> | ||
| 8 | #include <unordered_map> | ||
| 9 | |||
| 10 | #include "video_core/dma_pusher.h" | ||
| 11 | |||
| 12 | namespace Tegra { | ||
| 13 | |||
| 14 | class GPU; | ||
| 15 | |||
| 16 | namespace Control { | ||
| 17 | |||
| 18 | struct ChannelState; | ||
| 19 | |||
| 20 | class Scheduler { | ||
| 21 | public: | ||
| 22 | explicit Scheduler(GPU& gpu_); | ||
| 23 | ~Scheduler(); | ||
| 24 | |||
| 25 | void Push(s32 channel, CommandList&& entries); | ||
| 26 | |||
| 27 | void DeclareChannel(std::shared_ptr<ChannelState> new_channel); | ||
| 28 | |||
| 29 | private: | ||
| 30 | std::unordered_map<s32, std::shared_ptr<ChannelState>> channels; | ||
| 31 | std::mutex scheduling_guard; | ||
| 32 | GPU& gpu; | ||
| 33 | }; | ||
| 34 | |||
| 35 | } // namespace Control | ||
| 36 | |||
| 37 | } // namespace Tegra | ||
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp index 29b8582ab..9835e3ac1 100644 --- a/src/video_core/dma_pusher.cpp +++ b/src/video_core/dma_pusher.cpp | |||
| @@ -12,7 +12,10 @@ | |||
| 12 | 12 | ||
| 13 | namespace Tegra { | 13 | namespace Tegra { |
| 14 | 14 | ||
| 15 | DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_) : gpu{gpu_}, system{system_} {} | 15 | DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_, |
| 16 | Control::ChannelState& channel_state_) | ||
| 17 | : gpu{gpu_}, system{system_}, memory_manager{memory_manager_}, puller{gpu_, memory_manager_, | ||
| 18 | *this, channel_state_} {} | ||
| 16 | 19 | ||
| 17 | DmaPusher::~DmaPusher() = default; | 20 | DmaPusher::~DmaPusher() = default; |
| 18 | 21 | ||
| @@ -21,8 +24,6 @@ MICROPROFILE_DEFINE(DispatchCalls, "GPU", "Execute command buffer", MP_RGB(128, | |||
| 21 | void DmaPusher::DispatchCalls() { | 24 | void DmaPusher::DispatchCalls() { |
| 22 | MICROPROFILE_SCOPE(DispatchCalls); | 25 | MICROPROFILE_SCOPE(DispatchCalls); |
| 23 | 26 | ||
| 24 | gpu.SyncGuestHost(); | ||
| 25 | |||
| 26 | dma_pushbuffer_subindex = 0; | 27 | dma_pushbuffer_subindex = 0; |
| 27 | 28 | ||
| 28 | dma_state.is_last_call = true; | 29 | dma_state.is_last_call = true; |
| @@ -33,7 +34,6 @@ void DmaPusher::DispatchCalls() { | |||
| 33 | } | 34 | } |
| 34 | } | 35 | } |
| 35 | gpu.FlushCommands(); | 36 | gpu.FlushCommands(); |
| 36 | gpu.SyncGuestHost(); | ||
| 37 | gpu.OnCommandListEnd(); | 37 | gpu.OnCommandListEnd(); |
| 38 | } | 38 | } |
| 39 | 39 | ||
| @@ -76,11 +76,11 @@ bool DmaPusher::Step() { | |||
| 76 | // Push buffer non-empty, read a word | 76 | // Push buffer non-empty, read a word |
| 77 | command_headers.resize(command_list_header.size); | 77 | command_headers.resize(command_list_header.size); |
| 78 | if (Settings::IsGPULevelHigh()) { | 78 | if (Settings::IsGPULevelHigh()) { |
| 79 | gpu.MemoryManager().ReadBlock(dma_get, command_headers.data(), | 79 | memory_manager.ReadBlock(dma_get, command_headers.data(), |
| 80 | command_list_header.size * sizeof(u32)); | 80 | command_list_header.size * sizeof(u32)); |
| 81 | } else { | 81 | } else { |
| 82 | gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(), | 82 | memory_manager.ReadBlockUnsafe(dma_get, command_headers.data(), |
| 83 | command_list_header.size * sizeof(u32)); | 83 | command_list_header.size * sizeof(u32)); |
| 84 | } | 84 | } |
| 85 | } | 85 | } |
| 86 | for (std::size_t index = 0; index < command_headers.size();) { | 86 | for (std::size_t index = 0; index < command_headers.size();) { |
| @@ -154,7 +154,7 @@ void DmaPusher::SetState(const CommandHeader& command_header) { | |||
| 154 | 154 | ||
| 155 | void DmaPusher::CallMethod(u32 argument) const { | 155 | void DmaPusher::CallMethod(u32 argument) const { |
| 156 | if (dma_state.method < non_puller_methods) { | 156 | if (dma_state.method < non_puller_methods) { |
| 157 | gpu.CallMethod(GPU::MethodCall{ | 157 | puller.CallPullerMethod(Engines::Puller::MethodCall{ |
| 158 | dma_state.method, | 158 | dma_state.method, |
| 159 | argument, | 159 | argument, |
| 160 | dma_state.subchannel, | 160 | dma_state.subchannel, |
| @@ -168,12 +168,16 @@ void DmaPusher::CallMethod(u32 argument) const { | |||
| 168 | 168 | ||
| 169 | void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const { | 169 | void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const { |
| 170 | if (dma_state.method < non_puller_methods) { | 170 | if (dma_state.method < non_puller_methods) { |
| 171 | gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods, | 171 | puller.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods, |
| 172 | dma_state.method_count); | 172 | dma_state.method_count); |
| 173 | } else { | 173 | } else { |
| 174 | subchannels[dma_state.subchannel]->CallMultiMethod(dma_state.method, base_start, | 174 | subchannels[dma_state.subchannel]->CallMultiMethod(dma_state.method, base_start, |
| 175 | num_methods, dma_state.method_count); | 175 | num_methods, dma_state.method_count); |
| 176 | } | 176 | } |
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | void DmaPusher::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) { | ||
| 180 | puller.BindRasterizer(rasterizer); | ||
| 181 | } | ||
| 182 | |||
| 179 | } // namespace Tegra | 183 | } // namespace Tegra |
diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h index 872fd146a..938f0f11c 100644 --- a/src/video_core/dma_pusher.h +++ b/src/video_core/dma_pusher.h | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include "common/bit_field.h" | 10 | #include "common/bit_field.h" |
| 11 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "video_core/engines/engine_interface.h" | 12 | #include "video_core/engines/engine_interface.h" |
| 13 | #include "video_core/engines/puller.h" | ||
| 13 | 14 | ||
| 14 | namespace Core { | 15 | namespace Core { |
| 15 | class System; | 16 | class System; |
| @@ -17,7 +18,12 @@ class System; | |||
| 17 | 18 | ||
| 18 | namespace Tegra { | 19 | namespace Tegra { |
| 19 | 20 | ||
| 21 | namespace Control { | ||
| 22 | struct ChannelState; | ||
| 23 | } | ||
| 24 | |||
| 20 | class GPU; | 25 | class GPU; |
| 26 | class MemoryManager; | ||
| 21 | 27 | ||
| 22 | enum class SubmissionMode : u32 { | 28 | enum class SubmissionMode : u32 { |
| 23 | IncreasingOld = 0, | 29 | IncreasingOld = 0, |
| @@ -31,24 +37,32 @@ enum class SubmissionMode : u32 { | |||
| 31 | // Note that, traditionally, methods are treated as 4-byte addressable locations, and hence | 37 | // Note that, traditionally, methods are treated as 4-byte addressable locations, and hence |
| 32 | // their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4. | 38 | // their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4. |
| 33 | // So the values you see in docs might be multiplied by 4. | 39 | // So the values you see in docs might be multiplied by 4. |
| 40 | // Register documentation: | ||
| 41 | // https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/classes/host/cla26f.h | ||
| 42 | // | ||
| 43 | // Register Description (approx): | ||
| 44 | // https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/manuals/volta/gv100/dev_pbdma.ref.txt | ||
| 34 | enum class BufferMethods : u32 { | 45 | enum class BufferMethods : u32 { |
| 35 | BindObject = 0x0, | 46 | BindObject = 0x0, |
| 47 | Illegal = 0x1, | ||
| 36 | Nop = 0x2, | 48 | Nop = 0x2, |
| 37 | SemaphoreAddressHigh = 0x4, | 49 | SemaphoreAddressHigh = 0x4, |
| 38 | SemaphoreAddressLow = 0x5, | 50 | SemaphoreAddressLow = 0x5, |
| 39 | SemaphoreSequence = 0x6, | 51 | SemaphoreSequencePayload = 0x6, |
| 40 | SemaphoreTrigger = 0x7, | 52 | SemaphoreOperation = 0x7, |
| 41 | NotifyIntr = 0x8, | 53 | NonStallInterrupt = 0x8, |
| 42 | WrcacheFlush = 0x9, | 54 | WrcacheFlush = 0x9, |
| 43 | Unk28 = 0xA, | 55 | MemOpA = 0xA, |
| 44 | UnkCacheFlush = 0xB, | 56 | MemOpB = 0xB, |
| 57 | MemOpC = 0xC, | ||
| 58 | MemOpD = 0xD, | ||
| 45 | RefCnt = 0x14, | 59 | RefCnt = 0x14, |
| 46 | SemaphoreAcquire = 0x1A, | 60 | SemaphoreAcquire = 0x1A, |
| 47 | SemaphoreRelease = 0x1B, | 61 | SemaphoreRelease = 0x1B, |
| 48 | FenceValue = 0x1C, | 62 | SyncpointPayload = 0x1C, |
| 49 | FenceAction = 0x1D, | 63 | SyncpointOperation = 0x1D, |
| 50 | WaitForInterrupt = 0x1E, | 64 | WaitForIdle = 0x1E, |
| 51 | Unk7c = 0x1F, | 65 | CRCCheck = 0x1F, |
| 52 | Yield = 0x20, | 66 | Yield = 0x20, |
| 53 | NonPullerMethods = 0x40, | 67 | NonPullerMethods = 0x40, |
| 54 | }; | 68 | }; |
| @@ -102,7 +116,8 @@ struct CommandList final { | |||
| 102 | */ | 116 | */ |
| 103 | class DmaPusher final { | 117 | class DmaPusher final { |
| 104 | public: | 118 | public: |
| 105 | explicit DmaPusher(Core::System& system_, GPU& gpu_); | 119 | explicit DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_, |
| 120 | Control::ChannelState& channel_state_); | ||
| 106 | ~DmaPusher(); | 121 | ~DmaPusher(); |
| 107 | 122 | ||
| 108 | void Push(CommandList&& entries) { | 123 | void Push(CommandList&& entries) { |
| @@ -115,6 +130,8 @@ public: | |||
| 115 | subchannels[subchannel_id] = engine; | 130 | subchannels[subchannel_id] = engine; |
| 116 | } | 131 | } |
| 117 | 132 | ||
| 133 | void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); | ||
| 134 | |||
| 118 | private: | 135 | private: |
| 119 | static constexpr u32 non_puller_methods = 0x40; | 136 | static constexpr u32 non_puller_methods = 0x40; |
| 120 | static constexpr u32 max_subchannels = 8; | 137 | static constexpr u32 max_subchannels = 8; |
| @@ -148,6 +165,8 @@ private: | |||
| 148 | 165 | ||
| 149 | GPU& gpu; | 166 | GPU& gpu; |
| 150 | Core::System& system; | 167 | Core::System& system; |
| 168 | MemoryManager& memory_manager; | ||
| 169 | mutable Engines::Puller puller; | ||
| 151 | }; | 170 | }; |
| 152 | 171 | ||
| 153 | } // namespace Tegra | 172 | } // namespace Tegra |
diff --git a/src/video_core/engines/engine_upload.cpp b/src/video_core/engines/engine_upload.cpp index 6ff5b1eca..a34819234 100644 --- a/src/video_core/engines/engine_upload.cpp +++ b/src/video_core/engines/engine_upload.cpp | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <cstring> | 4 | #include <cstring> |
| 5 | 5 | ||
| 6 | #include "common/algorithm.h" | ||
| 6 | #include "common/assert.h" | 7 | #include "common/assert.h" |
| 7 | #include "video_core/engines/engine_upload.h" | 8 | #include "video_core/engines/engine_upload.h" |
| 8 | #include "video_core/memory_manager.h" | 9 | #include "video_core/memory_manager.h" |
| @@ -34,21 +35,48 @@ void State::ProcessData(const u32 data, const bool is_last_call) { | |||
| 34 | if (!is_last_call) { | 35 | if (!is_last_call) { |
| 35 | return; | 36 | return; |
| 36 | } | 37 | } |
| 38 | ProcessData(inner_buffer); | ||
| 39 | } | ||
| 40 | |||
| 41 | void State::ProcessData(const u32* data, size_t num_data) { | ||
| 42 | std::span<const u8> read_buffer(reinterpret_cast<const u8*>(data), num_data * sizeof(u32)); | ||
| 43 | ProcessData(read_buffer); | ||
| 44 | } | ||
| 45 | |||
| 46 | void State::ProcessData(std::span<const u8> read_buffer) { | ||
| 37 | const GPUVAddr address{regs.dest.Address()}; | 47 | const GPUVAddr address{regs.dest.Address()}; |
| 38 | if (is_linear) { | 48 | if (is_linear) { |
| 39 | rasterizer->AccelerateInlineToMemory(address, copy_size, inner_buffer); | 49 | if (regs.line_count == 1) { |
| 50 | rasterizer->AccelerateInlineToMemory(address, copy_size, read_buffer); | ||
| 51 | } else { | ||
| 52 | for (u32 line = 0; line < regs.line_count; ++line) { | ||
| 53 | const GPUVAddr dest_line = address + static_cast<size_t>(line) * regs.dest.pitch; | ||
| 54 | memory_manager.WriteBlockUnsafe( | ||
| 55 | dest_line, read_buffer.data() + static_cast<size_t>(line) * regs.line_length_in, | ||
| 56 | regs.line_length_in); | ||
| 57 | } | ||
| 58 | memory_manager.InvalidateRegion(address, regs.dest.pitch * regs.line_count); | ||
| 59 | } | ||
| 40 | } else { | 60 | } else { |
| 41 | UNIMPLEMENTED_IF(regs.dest.z != 0); | 61 | u32 width = regs.dest.width; |
| 42 | UNIMPLEMENTED_IF(regs.dest.depth != 1); | 62 | u32 x_elements = regs.line_length_in; |
| 43 | UNIMPLEMENTED_IF(regs.dest.BlockWidth() != 0); | 63 | u32 x_offset = regs.dest.x; |
| 44 | UNIMPLEMENTED_IF(regs.dest.BlockDepth() != 0); | 64 | const u32 bpp_shift = Common::FoldRight( |
| 65 | 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); }, | ||
| 66 | width, x_elements, x_offset, static_cast<u32>(address)); | ||
| 67 | width >>= bpp_shift; | ||
| 68 | x_elements >>= bpp_shift; | ||
| 69 | x_offset >>= bpp_shift; | ||
| 70 | const u32 bytes_per_pixel = 1U << bpp_shift; | ||
| 45 | const std::size_t dst_size = Tegra::Texture::CalculateSize( | 71 | const std::size_t dst_size = Tegra::Texture::CalculateSize( |
| 46 | true, 1, regs.dest.width, regs.dest.height, 1, regs.dest.BlockHeight(), 0); | 72 | true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth, |
| 73 | regs.dest.BlockHeight(), regs.dest.BlockDepth()); | ||
| 47 | tmp_buffer.resize(dst_size); | 74 | tmp_buffer.resize(dst_size); |
| 48 | memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size); | 75 | memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size); |
| 49 | Tegra::Texture::SwizzleKepler(regs.dest.width, regs.dest.height, regs.dest.x, regs.dest.y, | 76 | Tegra::Texture::SwizzleSubrect(tmp_buffer, read_buffer, bytes_per_pixel, width, |
| 50 | regs.dest.BlockHeight(), copy_size, inner_buffer.data(), | 77 | regs.dest.height, regs.dest.depth, x_offset, regs.dest.y, |
| 51 | tmp_buffer.data()); | 78 | x_elements, regs.line_count, regs.dest.BlockHeight(), |
| 79 | regs.dest.BlockDepth(), regs.line_length_in); | ||
| 52 | memory_manager.WriteBlock(address, tmp_buffer.data(), dst_size); | 80 | memory_manager.WriteBlock(address, tmp_buffer.data(), dst_size); |
| 53 | } | 81 | } |
| 54 | } | 82 | } |
diff --git a/src/video_core/engines/engine_upload.h b/src/video_core/engines/engine_upload.h index 94ff3314a..f08f6e36a 100644 --- a/src/video_core/engines/engine_upload.h +++ b/src/video_core/engines/engine_upload.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <span> | ||
| 6 | #include <vector> | 7 | #include <vector> |
| 7 | #include "common/bit_field.h" | 8 | #include "common/bit_field.h" |
| 8 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| @@ -33,7 +34,7 @@ struct Registers { | |||
| 33 | u32 width; | 34 | u32 width; |
| 34 | u32 height; | 35 | u32 height; |
| 35 | u32 depth; | 36 | u32 depth; |
| 36 | u32 z; | 37 | u32 layer; |
| 37 | u32 x; | 38 | u32 x; |
| 38 | u32 y; | 39 | u32 y; |
| 39 | 40 | ||
| @@ -62,11 +63,14 @@ public: | |||
| 62 | 63 | ||
| 63 | void ProcessExec(bool is_linear_); | 64 | void ProcessExec(bool is_linear_); |
| 64 | void ProcessData(u32 data, bool is_last_call); | 65 | void ProcessData(u32 data, bool is_last_call); |
| 66 | void ProcessData(const u32* data, size_t num_data); | ||
| 65 | 67 | ||
| 66 | /// Binds a rasterizer to this engine. | 68 | /// Binds a rasterizer to this engine. |
| 67 | void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); | 69 | void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); |
| 68 | 70 | ||
| 69 | private: | 71 | private: |
| 72 | void ProcessData(std::span<const u8> read_buffer); | ||
| 73 | |||
| 70 | u32 write_offset = 0; | 74 | u32 write_offset = 0; |
| 71 | u32 copy_size = 0; | 75 | u32 copy_size = 0; |
| 72 | std::vector<u8> inner_buffer; | 76 | std::vector<u8> inner_buffer; |
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp index 5db254d94..7c50bdbe0 100644 --- a/src/video_core/engines/kepler_compute.cpp +++ b/src/video_core/engines/kepler_compute.cpp | |||
| @@ -36,8 +36,6 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal | |||
| 36 | } | 36 | } |
| 37 | case KEPLER_COMPUTE_REG_INDEX(data_upload): { | 37 | case KEPLER_COMPUTE_REG_INDEX(data_upload): { |
| 38 | upload_state.ProcessData(method_argument, is_last_call); | 38 | upload_state.ProcessData(method_argument, is_last_call); |
| 39 | if (is_last_call) { | ||
| 40 | } | ||
| 41 | break; | 39 | break; |
| 42 | } | 40 | } |
| 43 | case KEPLER_COMPUTE_REG_INDEX(launch): | 41 | case KEPLER_COMPUTE_REG_INDEX(launch): |
| @@ -50,8 +48,15 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal | |||
| 50 | 48 | ||
| 51 | void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount, | 49 | void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount, |
| 52 | u32 methods_pending) { | 50 | u32 methods_pending) { |
| 53 | for (std::size_t i = 0; i < amount; i++) { | 51 | switch (method) { |
| 54 | CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); | 52 | case KEPLER_COMPUTE_REG_INDEX(data_upload): |
| 53 | upload_state.ProcessData(base_start, static_cast<size_t>(amount)); | ||
| 54 | return; | ||
| 55 | default: | ||
| 56 | for (std::size_t i = 0; i < amount; i++) { | ||
| 57 | CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); | ||
| 58 | } | ||
| 59 | break; | ||
| 55 | } | 60 | } |
| 56 | } | 61 | } |
| 57 | 62 | ||
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp index e2b029542..a3fbab1e5 100644 --- a/src/video_core/engines/kepler_memory.cpp +++ b/src/video_core/engines/kepler_memory.cpp | |||
| @@ -33,8 +33,6 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call | |||
| 33 | } | 33 | } |
| 34 | case KEPLERMEMORY_REG_INDEX(data): { | 34 | case KEPLERMEMORY_REG_INDEX(data): { |
| 35 | upload_state.ProcessData(method_argument, is_last_call); | 35 | upload_state.ProcessData(method_argument, is_last_call); |
| 36 | if (is_last_call) { | ||
| 37 | } | ||
| 38 | break; | 36 | break; |
| 39 | } | 37 | } |
| 40 | } | 38 | } |
| @@ -42,8 +40,15 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call | |||
| 42 | 40 | ||
| 43 | void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount, | 41 | void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount, |
| 44 | u32 methods_pending) { | 42 | u32 methods_pending) { |
| 45 | for (std::size_t i = 0; i < amount; i++) { | 43 | switch (method) { |
| 46 | CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); | 44 | case KEPLERMEMORY_REG_INDEX(data): |
| 45 | upload_state.ProcessData(base_start, static_cast<size_t>(amount)); | ||
| 46 | return; | ||
| 47 | default: | ||
| 48 | for (std::size_t i = 0; i < amount; i++) { | ||
| 49 | CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); | ||
| 50 | } | ||
| 51 | break; | ||
| 47 | } | 52 | } |
| 48 | } | 53 | } |
| 49 | 54 | ||
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 3a4646289..3c6e44a25 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp | |||
| @@ -219,6 +219,8 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume | |||
| 219 | regs.index_array.count = regs.small_index_2.count; | 219 | regs.index_array.count = regs.small_index_2.count; |
| 220 | regs.index_array.first = regs.small_index_2.first; | 220 | regs.index_array.first = regs.small_index_2.first; |
| 221 | dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; | 221 | dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; |
| 222 | // a macro calls this one over and over, should it increase instancing? | ||
| 223 | // Used by Hades and likely other Vulkan games. | ||
| 222 | return DrawArrays(); | 224 | return DrawArrays(); |
| 223 | case MAXWELL3D_REG_INDEX(topology_override): | 225 | case MAXWELL3D_REG_INDEX(topology_override): |
| 224 | use_topology_override = true; | 226 | use_topology_override = true; |
| @@ -237,11 +239,12 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume | |||
| 237 | return upload_state.ProcessExec(regs.exec_upload.linear != 0); | 239 | return upload_state.ProcessExec(regs.exec_upload.linear != 0); |
| 238 | case MAXWELL3D_REG_INDEX(data_upload): | 240 | case MAXWELL3D_REG_INDEX(data_upload): |
| 239 | upload_state.ProcessData(argument, is_last_call); | 241 | upload_state.ProcessData(argument, is_last_call); |
| 240 | if (is_last_call) { | ||
| 241 | } | ||
| 242 | return; | 242 | return; |
| 243 | case MAXWELL3D_REG_INDEX(fragment_barrier): | 243 | case MAXWELL3D_REG_INDEX(fragment_barrier): |
| 244 | return rasterizer->FragmentBarrier(); | 244 | return rasterizer->FragmentBarrier(); |
| 245 | case MAXWELL3D_REG_INDEX(invalidate_texture_data_cache): | ||
| 246 | rasterizer->InvalidateGPUCache(); | ||
| 247 | return rasterizer->WaitForIdle(); | ||
| 245 | case MAXWELL3D_REG_INDEX(tiled_cache_barrier): | 248 | case MAXWELL3D_REG_INDEX(tiled_cache_barrier): |
| 246 | return rasterizer->TiledCacheBarrier(); | 249 | return rasterizer->TiledCacheBarrier(); |
| 247 | } | 250 | } |
| @@ -311,6 +314,9 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount, | |||
| 311 | case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15: | 314 | case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15: |
| 312 | ProcessCBMultiData(base_start, amount); | 315 | ProcessCBMultiData(base_start, amount); |
| 313 | break; | 316 | break; |
| 317 | case MAXWELL3D_REG_INDEX(data_upload): | ||
| 318 | upload_state.ProcessData(base_start, static_cast<size_t>(amount)); | ||
| 319 | return; | ||
| 314 | default: | 320 | default: |
| 315 | for (std::size_t i = 0; i < amount; i++) { | 321 | for (std::size_t i = 0; i < amount; i++) { |
| 316 | CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); | 322 | CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); |
| @@ -447,18 +453,10 @@ void Maxwell3D::ProcessFirmwareCall4() { | |||
| 447 | } | 453 | } |
| 448 | 454 | ||
| 449 | void Maxwell3D::StampQueryResult(u64 payload, bool long_query) { | 455 | void Maxwell3D::StampQueryResult(u64 payload, bool long_query) { |
| 450 | struct LongQueryResult { | ||
| 451 | u64_le value; | ||
| 452 | u64_le timestamp; | ||
| 453 | }; | ||
| 454 | static_assert(sizeof(LongQueryResult) == 16, "LongQueryResult has wrong size"); | ||
| 455 | const GPUVAddr sequence_address{regs.query.QueryAddress()}; | 456 | const GPUVAddr sequence_address{regs.query.QueryAddress()}; |
| 456 | if (long_query) { | 457 | if (long_query) { |
| 457 | // Write the 128-bit result structure in long mode. Note: We emulate an infinitely fast | 458 | memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks()); |
| 458 | // GPU, this command may actually take a while to complete in real hardware due to GPU | 459 | memory_manager.Write<u64>(sequence_address, payload); |
| 459 | // wait queues. | ||
| 460 | LongQueryResult query_result{payload, system.GPU().GetTicks()}; | ||
| 461 | memory_manager.WriteBlock(sequence_address, &query_result, sizeof(query_result)); | ||
| 462 | } else { | 460 | } else { |
| 463 | memory_manager.Write<u32>(sequence_address, static_cast<u32>(payload)); | 461 | memory_manager.Write<u32>(sequence_address, static_cast<u32>(payload)); |
| 464 | } | 462 | } |
| @@ -472,10 +470,25 @@ void Maxwell3D::ProcessQueryGet() { | |||
| 472 | 470 | ||
| 473 | switch (regs.query.query_get.operation) { | 471 | switch (regs.query.query_get.operation) { |
| 474 | case Regs::QueryOperation::Release: | 472 | case Regs::QueryOperation::Release: |
| 475 | if (regs.query.query_get.fence == 1) { | 473 | if (regs.query.query_get.fence == 1 || regs.query.query_get.short_query != 0) { |
| 476 | rasterizer->SignalSemaphore(regs.query.QueryAddress(), regs.query.query_sequence); | 474 | const GPUVAddr sequence_address{regs.query.QueryAddress()}; |
| 475 | const u32 payload = regs.query.query_sequence; | ||
| 476 | std::function<void()> operation([this, sequence_address, payload] { | ||
| 477 | memory_manager.Write<u32>(sequence_address, payload); | ||
| 478 | }); | ||
| 479 | rasterizer->SignalFence(std::move(operation)); | ||
| 477 | } else { | 480 | } else { |
| 478 | StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0); | 481 | struct LongQueryResult { |
| 482 | u64_le value; | ||
| 483 | u64_le timestamp; | ||
| 484 | }; | ||
| 485 | const GPUVAddr sequence_address{regs.query.QueryAddress()}; | ||
| 486 | const u32 payload = regs.query.query_sequence; | ||
| 487 | std::function<void()> operation([this, sequence_address, payload] { | ||
| 488 | memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks()); | ||
| 489 | memory_manager.Write<u64>(sequence_address, payload); | ||
| 490 | }); | ||
| 491 | rasterizer->SyncOperation(std::move(operation)); | ||
| 479 | } | 492 | } |
| 480 | break; | 493 | break; |
| 481 | case Regs::QueryOperation::Acquire: | 494 | case Regs::QueryOperation::Acquire: |
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index 0efe58282..3909d36c1 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include "common/algorithm.h" | ||
| 4 | #include "common/assert.h" | 5 | #include "common/assert.h" |
| 5 | #include "common/logging/log.h" | 6 | #include "common/logging/log.h" |
| 6 | #include "common/microprofile.h" | 7 | #include "common/microprofile.h" |
| @@ -54,8 +55,6 @@ void MaxwellDMA::Launch() { | |||
| 54 | const LaunchDMA& launch = regs.launch_dma; | 55 | const LaunchDMA& launch = regs.launch_dma; |
| 55 | ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE); | 56 | ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE); |
| 56 | ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED); | 57 | ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED); |
| 57 | ASSERT(regs.dst_params.origin.x == 0); | ||
| 58 | ASSERT(regs.dst_params.origin.y == 0); | ||
| 59 | 58 | ||
| 60 | const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH; | 59 | const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH; |
| 61 | const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH; | 60 | const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH; |
| @@ -121,23 +120,40 @@ void MaxwellDMA::CopyPitchToPitch() { | |||
| 121 | 120 | ||
| 122 | void MaxwellDMA::CopyBlockLinearToPitch() { | 121 | void MaxwellDMA::CopyBlockLinearToPitch() { |
| 123 | UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0); | 122 | UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0); |
| 124 | UNIMPLEMENTED_IF(regs.src_params.block_size.depth != 0); | ||
| 125 | UNIMPLEMENTED_IF(regs.src_params.layer != 0); | 123 | UNIMPLEMENTED_IF(regs.src_params.layer != 0); |
| 126 | 124 | ||
| 125 | const bool is_remapping = regs.launch_dma.remap_enable != 0; | ||
| 126 | |||
| 127 | // Optimized path for micro copies. | 127 | // Optimized path for micro copies. |
| 128 | const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count; | 128 | const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count; |
| 129 | if (dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X && | 129 | if (!is_remapping && dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X && |
| 130 | regs.src_params.height > GOB_SIZE_Y) { | 130 | regs.src_params.height > GOB_SIZE_Y) { |
| 131 | FastCopyBlockLinearToPitch(); | 131 | FastCopyBlockLinearToPitch(); |
| 132 | return; | 132 | return; |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | // Deswizzle the input and copy it over. | 135 | // Deswizzle the input and copy it over. |
| 136 | UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0); | ||
| 137 | const u32 bytes_per_pixel = | ||
| 138 | regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1; | ||
| 139 | const Parameters& src_params = regs.src_params; | 136 | const Parameters& src_params = regs.src_params; |
| 140 | const u32 width = src_params.width; | 137 | |
| 138 | const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1; | ||
| 139 | const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1; | ||
| 140 | |||
| 141 | const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size; | ||
| 142 | |||
| 143 | u32 width = src_params.width; | ||
| 144 | u32 x_elements = regs.line_length_in; | ||
| 145 | u32 x_offset = src_params.origin.x; | ||
| 146 | u32 bpp_shift = 0U; | ||
| 147 | if (!is_remapping) { | ||
| 148 | bpp_shift = Common::FoldRight( | ||
| 149 | 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); }, | ||
| 150 | width, x_elements, x_offset, static_cast<u32>(regs.offset_in)); | ||
| 151 | width >>= bpp_shift; | ||
| 152 | x_elements >>= bpp_shift; | ||
| 153 | x_offset >>= bpp_shift; | ||
| 154 | } | ||
| 155 | |||
| 156 | const u32 bytes_per_pixel = base_bpp << bpp_shift; | ||
| 141 | const u32 height = src_params.height; | 157 | const u32 height = src_params.height; |
| 142 | const u32 depth = src_params.depth; | 158 | const u32 depth = src_params.depth; |
| 143 | const u32 block_height = src_params.block_size.height; | 159 | const u32 block_height = src_params.block_size.height; |
| @@ -155,30 +171,45 @@ void MaxwellDMA::CopyBlockLinearToPitch() { | |||
| 155 | memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); | 171 | memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); |
| 156 | memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); | 172 | memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); |
| 157 | 173 | ||
| 158 | UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, width, bytes_per_pixel, | 174 | UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset, |
| 159 | block_height, src_params.origin.x, src_params.origin.y, write_buffer.data(), | 175 | src_params.origin.y, x_elements, regs.line_count, block_height, block_depth, |
| 160 | read_buffer.data()); | 176 | regs.pitch_out); |
| 161 | 177 | ||
| 162 | memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); | 178 | memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); |
| 163 | } | 179 | } |
| 164 | 180 | ||
| 165 | void MaxwellDMA::CopyPitchToBlockLinear() { | 181 | void MaxwellDMA::CopyPitchToBlockLinear() { |
| 166 | UNIMPLEMENTED_IF_MSG(regs.dst_params.block_size.width != 0, "Block width is not one"); | 182 | UNIMPLEMENTED_IF_MSG(regs.dst_params.block_size.width != 0, "Block width is not one"); |
| 167 | UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0); | 183 | UNIMPLEMENTED_IF(regs.dst_params.layer != 0); |
| 184 | |||
| 185 | const bool is_remapping = regs.launch_dma.remap_enable != 0; | ||
| 186 | const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1; | ||
| 187 | const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1; | ||
| 168 | 188 | ||
| 169 | const auto& dst_params = regs.dst_params; | 189 | const auto& dst_params = regs.dst_params; |
| 170 | const u32 bytes_per_pixel = | 190 | |
| 171 | regs.launch_dma.remap_enable ? regs.pitch_in / regs.line_length_in : 1; | 191 | const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size; |
| 172 | const u32 width = dst_params.width; | 192 | |
| 193 | u32 width = dst_params.width; | ||
| 194 | u32 x_elements = regs.line_length_in; | ||
| 195 | u32 x_offset = dst_params.origin.x; | ||
| 196 | u32 bpp_shift = 0U; | ||
| 197 | if (!is_remapping) { | ||
| 198 | bpp_shift = Common::FoldRight( | ||
| 199 | 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); }, | ||
| 200 | width, x_elements, x_offset, static_cast<u32>(regs.offset_out)); | ||
| 201 | width >>= bpp_shift; | ||
| 202 | x_elements >>= bpp_shift; | ||
| 203 | x_offset >>= bpp_shift; | ||
| 204 | } | ||
| 205 | |||
| 206 | const u32 bytes_per_pixel = base_bpp << bpp_shift; | ||
| 173 | const u32 height = dst_params.height; | 207 | const u32 height = dst_params.height; |
| 174 | const u32 depth = dst_params.depth; | 208 | const u32 depth = dst_params.depth; |
| 175 | const u32 block_height = dst_params.block_size.height; | 209 | const u32 block_height = dst_params.block_size.height; |
| 176 | const u32 block_depth = dst_params.block_size.depth; | 210 | const u32 block_depth = dst_params.block_size.depth; |
| 177 | const size_t dst_size = | 211 | const size_t dst_size = |
| 178 | CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth); | 212 | CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth); |
| 179 | const size_t dst_layer_size = | ||
| 180 | CalculateSize(true, bytes_per_pixel, width, height, 1, block_height, block_depth); | ||
| 181 | |||
| 182 | const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count; | 213 | const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count; |
| 183 | 214 | ||
| 184 | if (read_buffer.size() < src_size) { | 215 | if (read_buffer.size() < src_size) { |
| @@ -188,32 +219,23 @@ void MaxwellDMA::CopyPitchToBlockLinear() { | |||
| 188 | write_buffer.resize(dst_size); | 219 | write_buffer.resize(dst_size); |
| 189 | } | 220 | } |
| 190 | 221 | ||
| 222 | memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); | ||
| 191 | if (Settings::IsGPULevelExtreme()) { | 223 | if (Settings::IsGPULevelExtreme()) { |
| 192 | memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); | ||
| 193 | memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); | 224 | memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); |
| 194 | } else { | 225 | } else { |
| 195 | memory_manager.ReadBlockUnsafe(regs.offset_in, read_buffer.data(), src_size); | ||
| 196 | memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size); | 226 | memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size); |
| 197 | } | 227 | } |
| 198 | 228 | ||
| 199 | // If the input is linear and the output is tiled, swizzle the input and copy it over. | 229 | // If the input is linear and the output is tiled, swizzle the input and copy it over. |
| 200 | if (regs.dst_params.block_size.depth > 0) { | 230 | SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset, |
| 201 | ASSERT(dst_params.layer == 0); | 231 | dst_params.origin.y, x_elements, regs.line_count, block_height, block_depth, |
| 202 | SwizzleSliceToVoxel(regs.line_length_in, regs.line_count, regs.pitch_in, width, height, | 232 | regs.pitch_in); |
| 203 | bytes_per_pixel, block_height, block_depth, dst_params.origin.x, | ||
| 204 | dst_params.origin.y, write_buffer.data(), read_buffer.data()); | ||
| 205 | } else { | ||
| 206 | SwizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_in, width, bytes_per_pixel, | ||
| 207 | write_buffer.data() + dst_layer_size * dst_params.layer, read_buffer.data(), | ||
| 208 | block_height, dst_params.origin.x, dst_params.origin.y); | ||
| 209 | } | ||
| 210 | 233 | ||
| 211 | memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); | 234 | memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); |
| 212 | } | 235 | } |
| 213 | 236 | ||
| 214 | void MaxwellDMA::FastCopyBlockLinearToPitch() { | 237 | void MaxwellDMA::FastCopyBlockLinearToPitch() { |
| 215 | const u32 bytes_per_pixel = | 238 | const u32 bytes_per_pixel = 1U; |
| 216 | regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1; | ||
| 217 | const size_t src_size = GOB_SIZE; | 239 | const size_t src_size = GOB_SIZE; |
| 218 | const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count; | 240 | const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count; |
| 219 | u32 pos_x = regs.src_params.origin.x; | 241 | u32 pos_x = regs.src_params.origin.x; |
| @@ -239,9 +261,10 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() { | |||
| 239 | memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size); | 261 | memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size); |
| 240 | } | 262 | } |
| 241 | 263 | ||
| 242 | UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, regs.src_params.width, | 264 | UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, regs.src_params.width, |
| 243 | bytes_per_pixel, regs.src_params.block_size.height, pos_x, pos_y, | 265 | regs.src_params.height, 1, pos_x, pos_y, regs.line_length_in, regs.line_count, |
| 244 | write_buffer.data(), read_buffer.data()); | 266 | regs.src_params.block_size.height, regs.src_params.block_size.depth, |
| 267 | regs.pitch_out); | ||
| 245 | 268 | ||
| 246 | memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); | 269 | memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); |
| 247 | } | 270 | } |
| @@ -249,16 +272,24 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() { | |||
| 249 | void MaxwellDMA::ReleaseSemaphore() { | 272 | void MaxwellDMA::ReleaseSemaphore() { |
| 250 | const auto type = regs.launch_dma.semaphore_type; | 273 | const auto type = regs.launch_dma.semaphore_type; |
| 251 | const GPUVAddr address = regs.semaphore.address; | 274 | const GPUVAddr address = regs.semaphore.address; |
| 275 | const u32 payload = regs.semaphore.payload; | ||
| 252 | switch (type) { | 276 | switch (type) { |
| 253 | case LaunchDMA::SemaphoreType::NONE: | 277 | case LaunchDMA::SemaphoreType::NONE: |
| 254 | break; | 278 | break; |
| 255 | case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: | 279 | case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: { |
| 256 | memory_manager.Write<u32>(address, regs.semaphore.payload); | 280 | std::function<void()> operation( |
| 281 | [this, address, payload] { memory_manager.Write<u32>(address, payload); }); | ||
| 282 | rasterizer->SignalFence(std::move(operation)); | ||
| 257 | break; | 283 | break; |
| 258 | case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: | 284 | } |
| 259 | memory_manager.Write<u64>(address, static_cast<u64>(regs.semaphore.payload)); | 285 | case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: { |
| 260 | memory_manager.Write<u64>(address + 8, system.GPU().GetTicks()); | 286 | std::function<void()> operation([this, address, payload] { |
| 287 | memory_manager.Write<u64>(address + sizeof(u64), system.GPU().GetTicks()); | ||
| 288 | memory_manager.Write<u64>(address, payload); | ||
| 289 | }); | ||
| 290 | rasterizer->SignalFence(std::move(operation)); | ||
| 261 | break; | 291 | break; |
| 292 | } | ||
| 262 | default: | 293 | default: |
| 263 | ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value())); | 294 | ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value())); |
| 264 | } | 295 | } |
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h index 074bac92c..bc48320ce 100644 --- a/src/video_core/engines/maxwell_dma.h +++ b/src/video_core/engines/maxwell_dma.h | |||
| @@ -189,10 +189,16 @@ public: | |||
| 189 | BitField<4, 3, Swizzle> dst_y; | 189 | BitField<4, 3, Swizzle> dst_y; |
| 190 | BitField<8, 3, Swizzle> dst_z; | 190 | BitField<8, 3, Swizzle> dst_z; |
| 191 | BitField<12, 3, Swizzle> dst_w; | 191 | BitField<12, 3, Swizzle> dst_w; |
| 192 | BitField<0, 12, u32> dst_components_raw; | ||
| 192 | BitField<16, 2, u32> component_size_minus_one; | 193 | BitField<16, 2, u32> component_size_minus_one; |
| 193 | BitField<20, 2, u32> num_src_components_minus_one; | 194 | BitField<20, 2, u32> num_src_components_minus_one; |
| 194 | BitField<24, 2, u32> num_dst_components_minus_one; | 195 | BitField<24, 2, u32> num_dst_components_minus_one; |
| 195 | }; | 196 | }; |
| 197 | |||
| 198 | Swizzle GetComponent(size_t i) const { | ||
| 199 | const u32 raw = dst_components_raw; | ||
| 200 | return static_cast<Swizzle>((raw >> (i * 3)) & 0x7); | ||
| 201 | } | ||
| 196 | }; | 202 | }; |
| 197 | static_assert(sizeof(RemapConst) == 12); | 203 | static_assert(sizeof(RemapConst) == 12); |
| 198 | 204 | ||
diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp new file mode 100644 index 000000000..cca890792 --- /dev/null +++ b/src/video_core/engines/puller.cpp | |||
| @@ -0,0 +1,306 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #include "common/assert.h" | ||
| 5 | #include "common/logging/log.h" | ||
| 6 | #include "common/settings.h" | ||
| 7 | #include "core/core.h" | ||
| 8 | #include "video_core/control/channel_state.h" | ||
| 9 | #include "video_core/dma_pusher.h" | ||
| 10 | #include "video_core/engines/fermi_2d.h" | ||
| 11 | #include "video_core/engines/kepler_compute.h" | ||
| 12 | #include "video_core/engines/kepler_memory.h" | ||
| 13 | #include "video_core/engines/maxwell_3d.h" | ||
| 14 | #include "video_core/engines/maxwell_dma.h" | ||
| 15 | #include "video_core/engines/puller.h" | ||
| 16 | #include "video_core/gpu.h" | ||
| 17 | #include "video_core/memory_manager.h" | ||
| 18 | #include "video_core/rasterizer_interface.h" | ||
| 19 | |||
| 20 | namespace Tegra::Engines { | ||
| 21 | |||
| 22 | Puller::Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher_, | ||
| 23 | Control::ChannelState& channel_state_) | ||
| 24 | : gpu{gpu_}, memory_manager{memory_manager_}, dma_pusher{dma_pusher_}, channel_state{ | ||
| 25 | channel_state_} {} | ||
| 26 | |||
| 27 | Puller::~Puller() = default; | ||
| 28 | |||
| 29 | void Puller::ProcessBindMethod(const MethodCall& method_call) { | ||
| 30 | // Bind the current subchannel to the desired engine id. | ||
| 31 | LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel, | ||
| 32 | method_call.argument); | ||
| 33 | const auto engine_id = static_cast<EngineID>(method_call.argument); | ||
| 34 | bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id); | ||
| 35 | switch (engine_id) { | ||
| 36 | case EngineID::FERMI_TWOD_A: | ||
| 37 | dma_pusher.BindSubchannel(channel_state.fermi_2d.get(), method_call.subchannel); | ||
| 38 | break; | ||
| 39 | case EngineID::MAXWELL_B: | ||
| 40 | dma_pusher.BindSubchannel(channel_state.maxwell_3d.get(), method_call.subchannel); | ||
| 41 | break; | ||
| 42 | case EngineID::KEPLER_COMPUTE_B: | ||
| 43 | dma_pusher.BindSubchannel(channel_state.kepler_compute.get(), method_call.subchannel); | ||
| 44 | break; | ||
| 45 | case EngineID::MAXWELL_DMA_COPY_A: | ||
| 46 | dma_pusher.BindSubchannel(channel_state.maxwell_dma.get(), method_call.subchannel); | ||
| 47 | break; | ||
| 48 | case EngineID::KEPLER_INLINE_TO_MEMORY_B: | ||
| 49 | dma_pusher.BindSubchannel(channel_state.kepler_memory.get(), method_call.subchannel); | ||
| 50 | break; | ||
| 51 | default: | ||
| 52 | UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id); | ||
| 53 | } | ||
| 54 | } | ||
| 55 | |||
| 56 | void Puller::ProcessFenceActionMethod() { | ||
| 57 | switch (regs.fence_action.op) { | ||
| 58 | case Puller::FenceOperation::Acquire: | ||
| 59 | // UNIMPLEMENTED_MSG("Channel Scheduling pending."); | ||
| 60 | // WaitFence(regs.fence_action.syncpoint_id, regs.fence_value); | ||
| 61 | rasterizer->ReleaseFences(); | ||
| 62 | break; | ||
| 63 | case Puller::FenceOperation::Increment: | ||
| 64 | rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id); | ||
| 65 | break; | ||
| 66 | default: | ||
| 67 | UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value()); | ||
| 68 | } | ||
| 69 | } | ||
| 70 | |||
| 71 | void Puller::ProcessSemaphoreTriggerMethod() { | ||
| 72 | const auto semaphoreOperationMask = 0xF; | ||
| 73 | const auto op = | ||
| 74 | static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask); | ||
| 75 | if (op == GpuSemaphoreOperation::WriteLong) { | ||
| 76 | const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()}; | ||
| 77 | const u32 payload = regs.semaphore_sequence; | ||
| 78 | std::function<void()> operation([this, sequence_address, payload] { | ||
| 79 | memory_manager.Write<u64>(sequence_address + sizeof(u64), gpu.GetTicks()); | ||
| 80 | memory_manager.Write<u64>(sequence_address, payload); | ||
| 81 | }); | ||
| 82 | rasterizer->SignalFence(std::move(operation)); | ||
| 83 | } else { | ||
| 84 | do { | ||
| 85 | const u32 word{memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress())}; | ||
| 86 | regs.acquire_source = true; | ||
| 87 | regs.acquire_value = regs.semaphore_sequence; | ||
| 88 | if (op == GpuSemaphoreOperation::AcquireEqual) { | ||
| 89 | regs.acquire_active = true; | ||
| 90 | regs.acquire_mode = false; | ||
| 91 | if (word != regs.acquire_value) { | ||
| 92 | rasterizer->ReleaseFences(); | ||
| 93 | continue; | ||
| 94 | } | ||
| 95 | } else if (op == GpuSemaphoreOperation::AcquireGequal) { | ||
| 96 | regs.acquire_active = true; | ||
| 97 | regs.acquire_mode = true; | ||
| 98 | if (word < regs.acquire_value) { | ||
| 99 | rasterizer->ReleaseFences(); | ||
| 100 | continue; | ||
| 101 | } | ||
| 102 | } else if (op == GpuSemaphoreOperation::AcquireMask) { | ||
| 103 | if (word && regs.semaphore_sequence == 0) { | ||
| 104 | rasterizer->ReleaseFences(); | ||
| 105 | continue; | ||
| 106 | } | ||
| 107 | } else { | ||
| 108 | LOG_ERROR(HW_GPU, "Invalid semaphore operation"); | ||
| 109 | } | ||
| 110 | } while (false); | ||
| 111 | } | ||
| 112 | } | ||
| 113 | |||
| 114 | void Puller::ProcessSemaphoreRelease() { | ||
| 115 | const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()}; | ||
| 116 | const u32 payload = regs.semaphore_release; | ||
| 117 | std::function<void()> operation([this, sequence_address, payload] { | ||
| 118 | memory_manager.Write<u32>(sequence_address, payload); | ||
| 119 | }); | ||
| 120 | rasterizer->SyncOperation(std::move(operation)); | ||
| 121 | } | ||
| 122 | |||
| 123 | void Puller::ProcessSemaphoreAcquire() { | ||
| 124 | u32 word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress()); | ||
| 125 | const auto value = regs.semaphore_acquire; | ||
| 126 | while (word != value) { | ||
| 127 | regs.acquire_active = true; | ||
| 128 | regs.acquire_value = value; | ||
| 129 | std::this_thread::sleep_for(std::chrono::milliseconds(1)); | ||
| 130 | rasterizer->ReleaseFences(); | ||
| 131 | word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress()); | ||
| 132 | // TODO(kemathe73) figure out how to do the acquire_timeout | ||
| 133 | regs.acquire_mode = false; | ||
| 134 | regs.acquire_source = false; | ||
| 135 | } | ||
| 136 | } | ||
| 137 | |||
| 138 | /// Calls a GPU puller method. | ||
| 139 | void Puller::CallPullerMethod(const MethodCall& method_call) { | ||
| 140 | regs.reg_array[method_call.method] = method_call.argument; | ||
| 141 | const auto method = static_cast<BufferMethods>(method_call.method); | ||
| 142 | |||
| 143 | switch (method) { | ||
| 144 | case BufferMethods::BindObject: { | ||
| 145 | ProcessBindMethod(method_call); | ||
| 146 | break; | ||
| 147 | } | ||
| 148 | case BufferMethods::Nop: | ||
| 149 | case BufferMethods::SemaphoreAddressHigh: | ||
| 150 | case BufferMethods::SemaphoreAddressLow: | ||
| 151 | case BufferMethods::SemaphoreSequencePayload: | ||
| 152 | case BufferMethods::SyncpointPayload: | ||
| 153 | break; | ||
| 154 | case BufferMethods::WrcacheFlush: | ||
| 155 | case BufferMethods::RefCnt: | ||
| 156 | rasterizer->SignalReference(); | ||
| 157 | break; | ||
| 158 | case BufferMethods::SyncpointOperation: | ||
| 159 | ProcessFenceActionMethod(); | ||
| 160 | break; | ||
| 161 | case BufferMethods::WaitForIdle: | ||
| 162 | rasterizer->WaitForIdle(); | ||
| 163 | break; | ||
| 164 | case BufferMethods::SemaphoreOperation: { | ||
| 165 | ProcessSemaphoreTriggerMethod(); | ||
| 166 | break; | ||
| 167 | } | ||
| 168 | case BufferMethods::NonStallInterrupt: { | ||
| 169 | LOG_ERROR(HW_GPU, "Special puller engine method NonStallInterrupt not implemented"); | ||
| 170 | break; | ||
| 171 | } | ||
| 172 | case BufferMethods::MemOpA: { | ||
| 173 | LOG_ERROR(HW_GPU, "Memory Operation A"); | ||
| 174 | break; | ||
| 175 | } | ||
| 176 | case BufferMethods::MemOpB: { | ||
| 177 | // Implement this better. | ||
| 178 | rasterizer->InvalidateGPUCache(); | ||
| 179 | break; | ||
| 180 | } | ||
| 181 | case BufferMethods::MemOpC: | ||
| 182 | case BufferMethods::MemOpD: { | ||
| 183 | LOG_ERROR(HW_GPU, "Memory Operation C,D"); | ||
| 184 | break; | ||
| 185 | } | ||
| 186 | case BufferMethods::SemaphoreAcquire: { | ||
| 187 | ProcessSemaphoreAcquire(); | ||
| 188 | break; | ||
| 189 | } | ||
| 190 | case BufferMethods::SemaphoreRelease: { | ||
| 191 | ProcessSemaphoreRelease(); | ||
| 192 | break; | ||
| 193 | } | ||
| 194 | case BufferMethods::Yield: { | ||
| 195 | // TODO(Kmather73): Research and implement this method. | ||
| 196 | LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented"); | ||
| 197 | break; | ||
| 198 | } | ||
| 199 | default: | ||
| 200 | LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method); | ||
| 201 | break; | ||
| 202 | } | ||
| 203 | } | ||
| 204 | |||
| 205 | /// Calls a GPU engine method. | ||
| 206 | void Puller::CallEngineMethod(const MethodCall& method_call) { | ||
| 207 | const EngineID engine = bound_engines[method_call.subchannel]; | ||
| 208 | |||
| 209 | switch (engine) { | ||
| 210 | case EngineID::FERMI_TWOD_A: | ||
| 211 | channel_state.fermi_2d->CallMethod(method_call.method, method_call.argument, | ||
| 212 | method_call.IsLastCall()); | ||
| 213 | break; | ||
| 214 | case EngineID::MAXWELL_B: | ||
| 215 | channel_state.maxwell_3d->CallMethod(method_call.method, method_call.argument, | ||
| 216 | method_call.IsLastCall()); | ||
| 217 | break; | ||
| 218 | case EngineID::KEPLER_COMPUTE_B: | ||
| 219 | channel_state.kepler_compute->CallMethod(method_call.method, method_call.argument, | ||
| 220 | method_call.IsLastCall()); | ||
| 221 | break; | ||
| 222 | case EngineID::MAXWELL_DMA_COPY_A: | ||
| 223 | channel_state.maxwell_dma->CallMethod(method_call.method, method_call.argument, | ||
| 224 | method_call.IsLastCall()); | ||
| 225 | break; | ||
| 226 | case EngineID::KEPLER_INLINE_TO_MEMORY_B: | ||
| 227 | channel_state.kepler_memory->CallMethod(method_call.method, method_call.argument, | ||
| 228 | method_call.IsLastCall()); | ||
| 229 | break; | ||
| 230 | default: | ||
| 231 | UNIMPLEMENTED_MSG("Unimplemented engine"); | ||
| 232 | } | ||
| 233 | } | ||
| 234 | |||
| 235 | /// Calls a GPU engine multivalue method. | ||
| 236 | void Puller::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, | ||
| 237 | u32 methods_pending) { | ||
| 238 | const EngineID engine = bound_engines[subchannel]; | ||
| 239 | |||
| 240 | switch (engine) { | ||
| 241 | case EngineID::FERMI_TWOD_A: | ||
| 242 | channel_state.fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending); | ||
| 243 | break; | ||
| 244 | case EngineID::MAXWELL_B: | ||
| 245 | channel_state.maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending); | ||
| 246 | break; | ||
| 247 | case EngineID::KEPLER_COMPUTE_B: | ||
| 248 | channel_state.kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending); | ||
| 249 | break; | ||
| 250 | case EngineID::MAXWELL_DMA_COPY_A: | ||
| 251 | channel_state.maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending); | ||
| 252 | break; | ||
| 253 | case EngineID::KEPLER_INLINE_TO_MEMORY_B: | ||
| 254 | channel_state.kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending); | ||
| 255 | break; | ||
| 256 | default: | ||
| 257 | UNIMPLEMENTED_MSG("Unimplemented engine"); | ||
| 258 | } | ||
| 259 | } | ||
| 260 | |||
| 261 | /// Calls a GPU method. | ||
| 262 | void Puller::CallMethod(const MethodCall& method_call) { | ||
| 263 | LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method, | ||
| 264 | method_call.subchannel); | ||
| 265 | |||
| 266 | ASSERT(method_call.subchannel < bound_engines.size()); | ||
| 267 | |||
| 268 | if (ExecuteMethodOnEngine(method_call.method)) { | ||
| 269 | CallEngineMethod(method_call); | ||
| 270 | } else { | ||
| 271 | CallPullerMethod(method_call); | ||
| 272 | } | ||
| 273 | } | ||
| 274 | |||
| 275 | /// Calls a GPU multivalue method. | ||
| 276 | void Puller::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, | ||
| 277 | u32 methods_pending) { | ||
| 278 | LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel); | ||
| 279 | |||
| 280 | ASSERT(subchannel < bound_engines.size()); | ||
| 281 | |||
| 282 | if (ExecuteMethodOnEngine(method)) { | ||
| 283 | CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending); | ||
| 284 | } else { | ||
| 285 | for (std::size_t i = 0; i < amount; i++) { | ||
| 286 | CallPullerMethod(MethodCall{ | ||
| 287 | method, | ||
| 288 | base_start[i], | ||
| 289 | subchannel, | ||
| 290 | methods_pending - static_cast<u32>(i), | ||
| 291 | }); | ||
| 292 | } | ||
| 293 | } | ||
| 294 | } | ||
| 295 | |||
| 296 | void Puller::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { | ||
| 297 | rasterizer = rasterizer_; | ||
| 298 | } | ||
| 299 | |||
| 300 | /// Determines where the method should be executed. | ||
| 301 | [[nodiscard]] bool Puller::ExecuteMethodOnEngine(u32 method) { | ||
| 302 | const auto buffer_method = static_cast<BufferMethods>(method); | ||
| 303 | return buffer_method >= BufferMethods::NonPullerMethods; | ||
| 304 | } | ||
| 305 | |||
| 306 | } // namespace Tegra::Engines | ||
diff --git a/src/video_core/engines/puller.h b/src/video_core/engines/puller.h new file mode 100644 index 000000000..d4175ee94 --- /dev/null +++ b/src/video_core/engines/puller.h | |||
| @@ -0,0 +1,177 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <array> | ||
| 7 | #include <cstddef> | ||
| 8 | #include <vector> | ||
| 9 | #include "common/bit_field.h" | ||
| 10 | #include "common/common_funcs.h" | ||
| 11 | #include "common/common_types.h" | ||
| 12 | #include "video_core/engines/engine_interface.h" | ||
| 13 | |||
| 14 | namespace Core { | ||
| 15 | class System; | ||
| 16 | } | ||
| 17 | |||
| 18 | namespace Tegra { | ||
| 19 | class MemoryManager; | ||
| 20 | class DmaPusher; | ||
| 21 | |||
| 22 | enum class EngineID { | ||
| 23 | FERMI_TWOD_A = 0x902D, // 2D Engine | ||
| 24 | MAXWELL_B = 0xB197, // 3D Engine | ||
| 25 | KEPLER_COMPUTE_B = 0xB1C0, | ||
| 26 | KEPLER_INLINE_TO_MEMORY_B = 0xA140, | ||
| 27 | MAXWELL_DMA_COPY_A = 0xB0B5, | ||
| 28 | }; | ||
| 29 | |||
| 30 | namespace Control { | ||
| 31 | struct ChannelState; | ||
| 32 | } | ||
| 33 | } // namespace Tegra | ||
| 34 | |||
| 35 | namespace VideoCore { | ||
| 36 | class RasterizerInterface; | ||
| 37 | } | ||
| 38 | |||
| 39 | namespace Tegra::Engines { | ||
| 40 | |||
| 41 | class Puller final { | ||
| 42 | public: | ||
| 43 | struct MethodCall { | ||
| 44 | u32 method{}; | ||
| 45 | u32 argument{}; | ||
| 46 | u32 subchannel{}; | ||
| 47 | u32 method_count{}; | ||
| 48 | |||
| 49 | explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0) | ||
| 50 | : method(method_), argument(argument_), subchannel(subchannel_), | ||
| 51 | method_count(method_count_) {} | ||
| 52 | |||
| 53 | [[nodiscard]] bool IsLastCall() const { | ||
| 54 | return method_count <= 1; | ||
| 55 | } | ||
| 56 | }; | ||
| 57 | |||
| 58 | enum class FenceOperation : u32 { | ||
| 59 | Acquire = 0, | ||
| 60 | Increment = 1, | ||
| 61 | }; | ||
| 62 | |||
| 63 | union FenceAction { | ||
| 64 | u32 raw; | ||
| 65 | BitField<0, 1, FenceOperation> op; | ||
| 66 | BitField<8, 24, u32> syncpoint_id; | ||
| 67 | }; | ||
| 68 | |||
| 69 | explicit Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher, | ||
| 70 | Control::ChannelState& channel_state); | ||
| 71 | ~Puller(); | ||
| 72 | |||
| 73 | void CallMethod(const MethodCall& method_call); | ||
| 74 | |||
| 75 | void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, | ||
| 76 | u32 methods_pending); | ||
| 77 | |||
| 78 | void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); | ||
| 79 | |||
| 80 | void CallPullerMethod(const MethodCall& method_call); | ||
| 81 | |||
| 82 | void CallEngineMethod(const MethodCall& method_call); | ||
| 83 | |||
| 84 | void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, | ||
| 85 | u32 methods_pending); | ||
| 86 | |||
| 87 | private: | ||
| 88 | Tegra::GPU& gpu; | ||
| 89 | |||
| 90 | MemoryManager& memory_manager; | ||
| 91 | DmaPusher& dma_pusher; | ||
| 92 | Control::ChannelState& channel_state; | ||
| 93 | VideoCore::RasterizerInterface* rasterizer = nullptr; | ||
| 94 | |||
| 95 | static constexpr std::size_t NUM_REGS = 0x800; | ||
| 96 | struct Regs { | ||
| 97 | static constexpr size_t NUM_REGS = 0x40; | ||
| 98 | |||
| 99 | union { | ||
| 100 | struct { | ||
| 101 | INSERT_PADDING_WORDS_NOINIT(0x4); | ||
| 102 | struct { | ||
| 103 | u32 address_high; | ||
| 104 | u32 address_low; | ||
| 105 | |||
| 106 | [[nodiscard]] GPUVAddr SemaphoreAddress() const { | ||
| 107 | return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | | ||
| 108 | address_low); | ||
| 109 | } | ||
| 110 | } semaphore_address; | ||
| 111 | |||
| 112 | u32 semaphore_sequence; | ||
| 113 | u32 semaphore_trigger; | ||
| 114 | INSERT_PADDING_WORDS_NOINIT(0xC); | ||
| 115 | |||
| 116 | // The pusher and the puller share the reference counter, the pusher only has read | ||
| 117 | // access | ||
| 118 | u32 reference_count; | ||
| 119 | INSERT_PADDING_WORDS_NOINIT(0x5); | ||
| 120 | |||
| 121 | u32 semaphore_acquire; | ||
| 122 | u32 semaphore_release; | ||
| 123 | u32 fence_value; | ||
| 124 | FenceAction fence_action; | ||
| 125 | INSERT_PADDING_WORDS_NOINIT(0xE2); | ||
| 126 | |||
| 127 | // Puller state | ||
| 128 | u32 acquire_mode; | ||
| 129 | u32 acquire_source; | ||
| 130 | u32 acquire_active; | ||
| 131 | u32 acquire_timeout; | ||
| 132 | u32 acquire_value; | ||
| 133 | }; | ||
| 134 | std::array<u32, NUM_REGS> reg_array; | ||
| 135 | }; | ||
| 136 | } regs{}; | ||
| 137 | |||
| 138 | void ProcessBindMethod(const MethodCall& method_call); | ||
| 139 | void ProcessFenceActionMethod(); | ||
| 140 | void ProcessSemaphoreAcquire(); | ||
| 141 | void ProcessSemaphoreRelease(); | ||
| 142 | void ProcessSemaphoreTriggerMethod(); | ||
| 143 | [[nodiscard]] bool ExecuteMethodOnEngine(u32 method); | ||
| 144 | |||
| 145 | /// Mapping of command subchannels to their bound engine ids | ||
| 146 | std::array<EngineID, 8> bound_engines{}; | ||
| 147 | |||
| 148 | enum class GpuSemaphoreOperation { | ||
| 149 | AcquireEqual = 0x1, | ||
| 150 | WriteLong = 0x2, | ||
| 151 | AcquireGequal = 0x4, | ||
| 152 | AcquireMask = 0x8, | ||
| 153 | }; | ||
| 154 | |||
| 155 | #define ASSERT_REG_POSITION(field_name, position) \ | ||
| 156 | static_assert(offsetof(Regs, field_name) == position * 4, \ | ||
| 157 | "Field " #field_name " has invalid position") | ||
| 158 | |||
| 159 | ASSERT_REG_POSITION(semaphore_address, 0x4); | ||
| 160 | ASSERT_REG_POSITION(semaphore_sequence, 0x6); | ||
| 161 | ASSERT_REG_POSITION(semaphore_trigger, 0x7); | ||
| 162 | ASSERT_REG_POSITION(reference_count, 0x14); | ||
| 163 | ASSERT_REG_POSITION(semaphore_acquire, 0x1A); | ||
| 164 | ASSERT_REG_POSITION(semaphore_release, 0x1B); | ||
| 165 | ASSERT_REG_POSITION(fence_value, 0x1C); | ||
| 166 | ASSERT_REG_POSITION(fence_action, 0x1D); | ||
| 167 | |||
| 168 | ASSERT_REG_POSITION(acquire_mode, 0x100); | ||
| 169 | ASSERT_REG_POSITION(acquire_source, 0x101); | ||
| 170 | ASSERT_REG_POSITION(acquire_active, 0x102); | ||
| 171 | ASSERT_REG_POSITION(acquire_timeout, 0x103); | ||
| 172 | ASSERT_REG_POSITION(acquire_value, 0x104); | ||
| 173 | |||
| 174 | #undef ASSERT_REG_POSITION | ||
| 175 | }; | ||
| 176 | |||
| 177 | } // namespace Tegra::Engines | ||
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h index 1e9832ddd..c390ac91b 100644 --- a/src/video_core/fence_manager.h +++ b/src/video_core/fence_manager.h | |||
| @@ -4,40 +4,24 @@ | |||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <algorithm> | 6 | #include <algorithm> |
| 7 | #include <cstring> | ||
| 8 | #include <deque> | ||
| 9 | #include <functional> | ||
| 10 | #include <memory> | ||
| 7 | #include <queue> | 11 | #include <queue> |
| 8 | 12 | ||
| 9 | #include "common/common_types.h" | 13 | #include "common/common_types.h" |
| 10 | #include "video_core/delayed_destruction_ring.h" | 14 | #include "video_core/delayed_destruction_ring.h" |
| 11 | #include "video_core/gpu.h" | 15 | #include "video_core/gpu.h" |
| 12 | #include "video_core/memory_manager.h" | 16 | #include "video_core/host1x/host1x.h" |
| 17 | #include "video_core/host1x/syncpoint_manager.h" | ||
| 13 | #include "video_core/rasterizer_interface.h" | 18 | #include "video_core/rasterizer_interface.h" |
| 14 | 19 | ||
| 15 | namespace VideoCommon { | 20 | namespace VideoCommon { |
| 16 | 21 | ||
| 17 | class FenceBase { | 22 | class FenceBase { |
| 18 | public: | 23 | public: |
| 19 | explicit FenceBase(u32 payload_, bool is_stubbed_) | 24 | explicit FenceBase(bool is_stubbed_) : is_stubbed{is_stubbed_} {} |
| 20 | : address{}, payload{payload_}, is_semaphore{false}, is_stubbed{is_stubbed_} {} | ||
| 21 | |||
| 22 | explicit FenceBase(GPUVAddr address_, u32 payload_, bool is_stubbed_) | ||
| 23 | : address{address_}, payload{payload_}, is_semaphore{true}, is_stubbed{is_stubbed_} {} | ||
| 24 | |||
| 25 | GPUVAddr GetAddress() const { | ||
| 26 | return address; | ||
| 27 | } | ||
| 28 | |||
| 29 | u32 GetPayload() const { | ||
| 30 | return payload; | ||
| 31 | } | ||
| 32 | |||
| 33 | bool IsSemaphore() const { | ||
| 34 | return is_semaphore; | ||
| 35 | } | ||
| 36 | |||
| 37 | private: | ||
| 38 | GPUVAddr address; | ||
| 39 | u32 payload; | ||
| 40 | bool is_semaphore; | ||
| 41 | 25 | ||
| 42 | protected: | 26 | protected: |
| 43 | bool is_stubbed; | 27 | bool is_stubbed; |
| @@ -57,30 +41,28 @@ public: | |||
| 57 | buffer_cache.AccumulateFlushes(); | 41 | buffer_cache.AccumulateFlushes(); |
| 58 | } | 42 | } |
| 59 | 43 | ||
| 60 | void SignalSemaphore(GPUVAddr addr, u32 value) { | 44 | void SyncOperation(std::function<void()>&& func) { |
| 45 | uncommitted_operations.emplace_back(std::move(func)); | ||
| 46 | } | ||
| 47 | |||
| 48 | void SignalFence(std::function<void()>&& func) { | ||
| 61 | TryReleasePendingFences(); | 49 | TryReleasePendingFences(); |
| 62 | const bool should_flush = ShouldFlush(); | 50 | const bool should_flush = ShouldFlush(); |
| 63 | CommitAsyncFlushes(); | 51 | CommitAsyncFlushes(); |
| 64 | TFence new_fence = CreateFence(addr, value, !should_flush); | 52 | uncommitted_operations.emplace_back(std::move(func)); |
| 53 | CommitOperations(); | ||
| 54 | TFence new_fence = CreateFence(!should_flush); | ||
| 65 | fences.push(new_fence); | 55 | fences.push(new_fence); |
| 66 | QueueFence(new_fence); | 56 | QueueFence(new_fence); |
| 67 | if (should_flush) { | 57 | if (should_flush) { |
| 68 | rasterizer.FlushCommands(); | 58 | rasterizer.FlushCommands(); |
| 69 | } | 59 | } |
| 70 | rasterizer.SyncGuestHost(); | ||
| 71 | } | 60 | } |
| 72 | 61 | ||
| 73 | void SignalSyncPoint(u32 value) { | 62 | void SignalSyncPoint(u32 value) { |
| 74 | TryReleasePendingFences(); | 63 | syncpoint_manager.IncrementGuest(value); |
| 75 | const bool should_flush = ShouldFlush(); | 64 | std::function<void()> func([this, value] { syncpoint_manager.IncrementHost(value); }); |
| 76 | CommitAsyncFlushes(); | 65 | SignalFence(std::move(func)); |
| 77 | TFence new_fence = CreateFence(value, !should_flush); | ||
| 78 | fences.push(new_fence); | ||
| 79 | QueueFence(new_fence); | ||
| 80 | if (should_flush) { | ||
| 81 | rasterizer.FlushCommands(); | ||
| 82 | } | ||
| 83 | rasterizer.SyncGuestHost(); | ||
| 84 | } | 66 | } |
| 85 | 67 | ||
| 86 | void WaitPendingFences() { | 68 | void WaitPendingFences() { |
| @@ -90,11 +72,10 @@ public: | |||
| 90 | WaitFence(current_fence); | 72 | WaitFence(current_fence); |
| 91 | } | 73 | } |
| 92 | PopAsyncFlushes(); | 74 | PopAsyncFlushes(); |
| 93 | if (current_fence->IsSemaphore()) { | 75 | auto operations = std::move(pending_operations.front()); |
| 94 | gpu_memory.template Write<u32>(current_fence->GetAddress(), | 76 | pending_operations.pop_front(); |
| 95 | current_fence->GetPayload()); | 77 | for (auto& operation : operations) { |
| 96 | } else { | 78 | operation(); |
| 97 | gpu.IncrementSyncPoint(current_fence->GetPayload()); | ||
| 98 | } | 79 | } |
| 99 | PopFence(); | 80 | PopFence(); |
| 100 | } | 81 | } |
| @@ -104,16 +85,14 @@ protected: | |||
| 104 | explicit FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_, | 85 | explicit FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_, |
| 105 | TTextureCache& texture_cache_, TTBufferCache& buffer_cache_, | 86 | TTextureCache& texture_cache_, TTBufferCache& buffer_cache_, |
| 106 | TQueryCache& query_cache_) | 87 | TQueryCache& query_cache_) |
| 107 | : rasterizer{rasterizer_}, gpu{gpu_}, gpu_memory{gpu.MemoryManager()}, | 88 | : rasterizer{rasterizer_}, gpu{gpu_}, syncpoint_manager{gpu.Host1x().GetSyncpointManager()}, |
| 108 | texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, query_cache{query_cache_} {} | 89 | texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, query_cache{query_cache_} {} |
| 109 | 90 | ||
| 110 | virtual ~FenceManager() = default; | 91 | virtual ~FenceManager() = default; |
| 111 | 92 | ||
| 112 | /// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is | 93 | /// Creates a Fence Interface, does not create a backend fence if 'is_stubbed' is |
| 113 | /// true | 94 | /// true |
| 114 | virtual TFence CreateFence(u32 value, bool is_stubbed) = 0; | 95 | virtual TFence CreateFence(bool is_stubbed) = 0; |
| 115 | /// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true | ||
| 116 | virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0; | ||
| 117 | /// Queues a fence into the backend if the fence isn't stubbed. | 96 | /// Queues a fence into the backend if the fence isn't stubbed. |
| 118 | virtual void QueueFence(TFence& fence) = 0; | 97 | virtual void QueueFence(TFence& fence) = 0; |
| 119 | /// Notifies that the backend fence has been signaled/reached in host GPU. | 98 | /// Notifies that the backend fence has been signaled/reached in host GPU. |
| @@ -123,7 +102,7 @@ protected: | |||
| 123 | 102 | ||
| 124 | VideoCore::RasterizerInterface& rasterizer; | 103 | VideoCore::RasterizerInterface& rasterizer; |
| 125 | Tegra::GPU& gpu; | 104 | Tegra::GPU& gpu; |
| 126 | Tegra::MemoryManager& gpu_memory; | 105 | Tegra::Host1x::SyncpointManager& syncpoint_manager; |
| 127 | TTextureCache& texture_cache; | 106 | TTextureCache& texture_cache; |
| 128 | TTBufferCache& buffer_cache; | 107 | TTBufferCache& buffer_cache; |
| 129 | TQueryCache& query_cache; | 108 | TQueryCache& query_cache; |
| @@ -136,11 +115,10 @@ private: | |||
| 136 | return; | 115 | return; |
| 137 | } | 116 | } |
| 138 | PopAsyncFlushes(); | 117 | PopAsyncFlushes(); |
| 139 | if (current_fence->IsSemaphore()) { | 118 | auto operations = std::move(pending_operations.front()); |
| 140 | gpu_memory.template Write<u32>(current_fence->GetAddress(), | 119 | pending_operations.pop_front(); |
| 141 | current_fence->GetPayload()); | 120 | for (auto& operation : operations) { |
| 142 | } else { | 121 | operation(); |
| 143 | gpu.IncrementSyncPoint(current_fence->GetPayload()); | ||
| 144 | } | 122 | } |
| 145 | PopFence(); | 123 | PopFence(); |
| 146 | } | 124 | } |
| @@ -159,16 +137,20 @@ private: | |||
| 159 | } | 137 | } |
| 160 | 138 | ||
| 161 | void PopAsyncFlushes() { | 139 | void PopAsyncFlushes() { |
| 162 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | 140 | { |
| 163 | texture_cache.PopAsyncFlushes(); | 141 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; |
| 164 | buffer_cache.PopAsyncFlushes(); | 142 | texture_cache.PopAsyncFlushes(); |
| 143 | buffer_cache.PopAsyncFlushes(); | ||
| 144 | } | ||
| 165 | query_cache.PopAsyncFlushes(); | 145 | query_cache.PopAsyncFlushes(); |
| 166 | } | 146 | } |
| 167 | 147 | ||
| 168 | void CommitAsyncFlushes() { | 148 | void CommitAsyncFlushes() { |
| 169 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | 149 | { |
| 170 | texture_cache.CommitAsyncFlushes(); | 150 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; |
| 171 | buffer_cache.CommitAsyncFlushes(); | 151 | texture_cache.CommitAsyncFlushes(); |
| 152 | buffer_cache.CommitAsyncFlushes(); | ||
| 153 | } | ||
| 172 | query_cache.CommitAsyncFlushes(); | 154 | query_cache.CommitAsyncFlushes(); |
| 173 | } | 155 | } |
| 174 | 156 | ||
| @@ -177,7 +159,13 @@ private: | |||
| 177 | fences.pop(); | 159 | fences.pop(); |
| 178 | } | 160 | } |
| 179 | 161 | ||
| 162 | void CommitOperations() { | ||
| 163 | pending_operations.emplace_back(std::move(uncommitted_operations)); | ||
| 164 | } | ||
| 165 | |||
| 180 | std::queue<TFence> fences; | 166 | std::queue<TFence> fences; |
| 167 | std::deque<std::function<void()>> uncommitted_operations; | ||
| 168 | std::deque<std::deque<std::function<void()>>> pending_operations; | ||
| 181 | 169 | ||
| 182 | DelayedDestructionRing<TFence, 6> delayed_destruction_ring; | 170 | DelayedDestructionRing<TFence, 6> delayed_destruction_ring; |
| 183 | }; | 171 | }; |
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 33431f2a0..28b38273e 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp | |||
| @@ -14,10 +14,11 @@ | |||
| 14 | #include "core/core.h" | 14 | #include "core/core.h" |
| 15 | #include "core/core_timing.h" | 15 | #include "core/core_timing.h" |
| 16 | #include "core/frontend/emu_window.h" | 16 | #include "core/frontend/emu_window.h" |
| 17 | #include "core/hardware_interrupt_manager.h" | ||
| 18 | #include "core/hle/service/nvdrv/nvdata.h" | 17 | #include "core/hle/service/nvdrv/nvdata.h" |
| 19 | #include "core/perf_stats.h" | 18 | #include "core/perf_stats.h" |
| 20 | #include "video_core/cdma_pusher.h" | 19 | #include "video_core/cdma_pusher.h" |
| 20 | #include "video_core/control/channel_state.h" | ||
| 21 | #include "video_core/control/scheduler.h" | ||
| 21 | #include "video_core/dma_pusher.h" | 22 | #include "video_core/dma_pusher.h" |
| 22 | #include "video_core/engines/fermi_2d.h" | 23 | #include "video_core/engines/fermi_2d.h" |
| 23 | #include "video_core/engines/kepler_compute.h" | 24 | #include "video_core/engines/kepler_compute.h" |
| @@ -26,75 +27,64 @@ | |||
| 26 | #include "video_core/engines/maxwell_dma.h" | 27 | #include "video_core/engines/maxwell_dma.h" |
| 27 | #include "video_core/gpu.h" | 28 | #include "video_core/gpu.h" |
| 28 | #include "video_core/gpu_thread.h" | 29 | #include "video_core/gpu_thread.h" |
| 30 | #include "video_core/host1x/host1x.h" | ||
| 31 | #include "video_core/host1x/syncpoint_manager.h" | ||
| 29 | #include "video_core/memory_manager.h" | 32 | #include "video_core/memory_manager.h" |
| 30 | #include "video_core/renderer_base.h" | 33 | #include "video_core/renderer_base.h" |
| 31 | #include "video_core/shader_notify.h" | 34 | #include "video_core/shader_notify.h" |
| 32 | 35 | ||
| 33 | namespace Tegra { | 36 | namespace Tegra { |
| 34 | 37 | ||
| 35 | MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192)); | ||
| 36 | |||
| 37 | struct GPU::Impl { | 38 | struct GPU::Impl { |
| 38 | explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_) | 39 | explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_) |
| 39 | : gpu{gpu_}, system{system_}, memory_manager{std::make_unique<Tegra::MemoryManager>( | 40 | : gpu{gpu_}, system{system_}, host1x{system.Host1x()}, use_nvdec{use_nvdec_}, |
| 40 | system)}, | ||
| 41 | dma_pusher{std::make_unique<Tegra::DmaPusher>(system, gpu)}, use_nvdec{use_nvdec_}, | ||
| 42 | maxwell_3d{std::make_unique<Engines::Maxwell3D>(system, *memory_manager)}, | ||
| 43 | fermi_2d{std::make_unique<Engines::Fermi2D>()}, | ||
| 44 | kepler_compute{std::make_unique<Engines::KeplerCompute>(system, *memory_manager)}, | ||
| 45 | maxwell_dma{std::make_unique<Engines::MaxwellDMA>(system, *memory_manager)}, | ||
| 46 | kepler_memory{std::make_unique<Engines::KeplerMemory>(system, *memory_manager)}, | ||
| 47 | shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_}, | 41 | shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_}, |
| 48 | gpu_thread{system_, is_async_} {} | 42 | gpu_thread{system_, is_async_}, scheduler{std::make_unique<Control::Scheduler>(gpu)} {} |
| 49 | 43 | ||
| 50 | ~Impl() = default; | 44 | ~Impl() = default; |
| 51 | 45 | ||
| 52 | /// Binds a renderer to the GPU. | 46 | std::shared_ptr<Control::ChannelState> CreateChannel(s32 channel_id) { |
| 53 | void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) { | 47 | auto channel_state = std::make_shared<Tegra::Control::ChannelState>(channel_id); |
| 54 | renderer = std::move(renderer_); | 48 | channels.emplace(channel_id, channel_state); |
| 55 | rasterizer = renderer->ReadRasterizer(); | 49 | scheduler->DeclareChannel(channel_state); |
| 56 | 50 | return channel_state; | |
| 57 | memory_manager->BindRasterizer(rasterizer); | ||
| 58 | maxwell_3d->BindRasterizer(rasterizer); | ||
| 59 | fermi_2d->BindRasterizer(rasterizer); | ||
| 60 | kepler_compute->BindRasterizer(rasterizer); | ||
| 61 | kepler_memory->BindRasterizer(rasterizer); | ||
| 62 | maxwell_dma->BindRasterizer(rasterizer); | ||
| 63 | } | 51 | } |
| 64 | 52 | ||
| 65 | /// Calls a GPU method. | 53 | void BindChannel(s32 channel_id) { |
| 66 | void CallMethod(const GPU::MethodCall& method_call) { | 54 | if (bound_channel == channel_id) { |
| 67 | LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method, | 55 | return; |
| 68 | method_call.subchannel); | 56 | } |
| 57 | auto it = channels.find(channel_id); | ||
| 58 | ASSERT(it != channels.end()); | ||
| 59 | bound_channel = channel_id; | ||
| 60 | current_channel = it->second.get(); | ||
| 69 | 61 | ||
| 70 | ASSERT(method_call.subchannel < bound_engines.size()); | 62 | rasterizer->BindChannel(*current_channel); |
| 63 | } | ||
| 71 | 64 | ||
| 72 | if (ExecuteMethodOnEngine(method_call.method)) { | 65 | std::shared_ptr<Control::ChannelState> AllocateChannel() { |
| 73 | CallEngineMethod(method_call); | 66 | return CreateChannel(new_channel_id++); |
| 74 | } else { | ||
| 75 | CallPullerMethod(method_call); | ||
| 76 | } | ||
| 77 | } | 67 | } |
| 78 | 68 | ||
| 79 | /// Calls a GPU multivalue method. | 69 | void InitChannel(Control::ChannelState& to_init) { |
| 80 | void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, | 70 | to_init.Init(system, gpu); |
| 81 | u32 methods_pending) { | 71 | to_init.BindRasterizer(rasterizer); |
| 82 | LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel); | 72 | rasterizer->InitializeChannel(to_init); |
| 73 | } | ||
| 83 | 74 | ||
| 84 | ASSERT(subchannel < bound_engines.size()); | 75 | void InitAddressSpace(Tegra::MemoryManager& memory_manager) { |
| 76 | memory_manager.BindRasterizer(rasterizer); | ||
| 77 | } | ||
| 85 | 78 | ||
| 86 | if (ExecuteMethodOnEngine(method)) { | 79 | void ReleaseChannel(Control::ChannelState& to_release) { |
| 87 | CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending); | 80 | UNIMPLEMENTED(); |
| 88 | } else { | 81 | } |
| 89 | for (std::size_t i = 0; i < amount; i++) { | 82 | |
| 90 | CallPullerMethod(GPU::MethodCall{ | 83 | /// Binds a renderer to the GPU. |
| 91 | method, | 84 | void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) { |
| 92 | base_start[i], | 85 | renderer = std::move(renderer_); |
| 93 | subchannel, | 86 | rasterizer = renderer->ReadRasterizer(); |
| 94 | methods_pending - static_cast<u32>(i), | 87 | host1x.MemoryManager().BindRasterizer(rasterizer); |
| 95 | }); | ||
| 96 | } | ||
| 97 | } | ||
| 98 | } | 88 | } |
| 99 | 89 | ||
| 100 | /// Flush all current written commands into the host GPU for execution. | 90 | /// Flush all current written commands into the host GPU for execution. |
| @@ -103,85 +93,82 @@ struct GPU::Impl { | |||
| 103 | } | 93 | } |
| 104 | 94 | ||
| 105 | /// Synchronizes CPU writes with Host GPU memory. | 95 | /// Synchronizes CPU writes with Host GPU memory. |
| 106 | void SyncGuestHost() { | 96 | void InvalidateGPUCache() { |
| 107 | rasterizer->SyncGuestHost(); | 97 | rasterizer->InvalidateGPUCache(); |
| 108 | } | 98 | } |
| 109 | 99 | ||
| 110 | /// Signal the ending of command list. | 100 | /// Signal the ending of command list. |
| 111 | void OnCommandListEnd() { | 101 | void OnCommandListEnd() { |
| 112 | if (is_async) { | 102 | gpu_thread.OnCommandListEnd(); |
| 113 | // This command only applies to asynchronous GPU mode | ||
| 114 | gpu_thread.OnCommandListEnd(); | ||
| 115 | } | ||
| 116 | } | 103 | } |
| 117 | 104 | ||
| 118 | /// Request a host GPU memory flush from the CPU. | 105 | /// Request a host GPU memory flush from the CPU. |
| 119 | [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size) { | 106 | template <typename Func> |
| 120 | std::unique_lock lck{flush_request_mutex}; | 107 | [[nodiscard]] u64 RequestSyncOperation(Func&& action) { |
| 121 | const u64 fence = ++last_flush_fence; | 108 | std::unique_lock lck{sync_request_mutex}; |
| 122 | flush_requests.emplace_back(fence, addr, size); | 109 | const u64 fence = ++last_sync_fence; |
| 110 | sync_requests.emplace_back(action); | ||
| 123 | return fence; | 111 | return fence; |
| 124 | } | 112 | } |
| 125 | 113 | ||
| 126 | /// Obtains current flush request fence id. | 114 | /// Obtains current flush request fence id. |
| 127 | [[nodiscard]] u64 CurrentFlushRequestFence() const { | 115 | [[nodiscard]] u64 CurrentSyncRequestFence() const { |
| 128 | return current_flush_fence.load(std::memory_order_relaxed); | 116 | return current_sync_fence.load(std::memory_order_relaxed); |
| 117 | } | ||
| 118 | |||
| 119 | void WaitForSyncOperation(const u64 fence) { | ||
| 120 | std::unique_lock lck{sync_request_mutex}; | ||
| 121 | sync_request_cv.wait(lck, [this, fence] { return CurrentSyncRequestFence() >= fence; }); | ||
| 129 | } | 122 | } |
| 130 | 123 | ||
| 131 | /// Tick pending requests within the GPU. | 124 | /// Tick pending requests within the GPU. |
| 132 | void TickWork() { | 125 | void TickWork() { |
| 133 | std::unique_lock lck{flush_request_mutex}; | 126 | std::unique_lock lck{sync_request_mutex}; |
| 134 | while (!flush_requests.empty()) { | 127 | while (!sync_requests.empty()) { |
| 135 | auto& request = flush_requests.front(); | 128 | auto request = std::move(sync_requests.front()); |
| 136 | const u64 fence = request.fence; | 129 | sync_requests.pop_front(); |
| 137 | const VAddr addr = request.addr; | 130 | sync_request_mutex.unlock(); |
| 138 | const std::size_t size = request.size; | 131 | request(); |
| 139 | flush_requests.pop_front(); | 132 | current_sync_fence.fetch_add(1, std::memory_order_release); |
| 140 | flush_request_mutex.unlock(); | 133 | sync_request_mutex.lock(); |
| 141 | rasterizer->FlushRegion(addr, size); | 134 | sync_request_cv.notify_all(); |
| 142 | current_flush_fence.store(fence); | ||
| 143 | flush_request_mutex.lock(); | ||
| 144 | } | 135 | } |
| 145 | } | 136 | } |
| 146 | 137 | ||
| 147 | /// Returns a reference to the Maxwell3D GPU engine. | 138 | /// Returns a reference to the Maxwell3D GPU engine. |
| 148 | [[nodiscard]] Engines::Maxwell3D& Maxwell3D() { | 139 | [[nodiscard]] Engines::Maxwell3D& Maxwell3D() { |
| 149 | return *maxwell_3d; | 140 | ASSERT(current_channel); |
| 141 | return *current_channel->maxwell_3d; | ||
| 150 | } | 142 | } |
| 151 | 143 | ||
| 152 | /// Returns a const reference to the Maxwell3D GPU engine. | 144 | /// Returns a const reference to the Maxwell3D GPU engine. |
| 153 | [[nodiscard]] const Engines::Maxwell3D& Maxwell3D() const { | 145 | [[nodiscard]] const Engines::Maxwell3D& Maxwell3D() const { |
| 154 | return *maxwell_3d; | 146 | ASSERT(current_channel); |
| 147 | return *current_channel->maxwell_3d; | ||
| 155 | } | 148 | } |
| 156 | 149 | ||
| 157 | /// Returns a reference to the KeplerCompute GPU engine. | 150 | /// Returns a reference to the KeplerCompute GPU engine. |
| 158 | [[nodiscard]] Engines::KeplerCompute& KeplerCompute() { | 151 | [[nodiscard]] Engines::KeplerCompute& KeplerCompute() { |
| 159 | return *kepler_compute; | 152 | ASSERT(current_channel); |
| 153 | return *current_channel->kepler_compute; | ||
| 160 | } | 154 | } |
| 161 | 155 | ||
| 162 | /// Returns a reference to the KeplerCompute GPU engine. | 156 | /// Returns a reference to the KeplerCompute GPU engine. |
| 163 | [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const { | 157 | [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const { |
| 164 | return *kepler_compute; | 158 | ASSERT(current_channel); |
| 165 | } | 159 | return *current_channel->kepler_compute; |
| 166 | |||
| 167 | /// Returns a reference to the GPU memory manager. | ||
| 168 | [[nodiscard]] Tegra::MemoryManager& MemoryManager() { | ||
| 169 | return *memory_manager; | ||
| 170 | } | ||
| 171 | |||
| 172 | /// Returns a const reference to the GPU memory manager. | ||
| 173 | [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const { | ||
| 174 | return *memory_manager; | ||
| 175 | } | 160 | } |
| 176 | 161 | ||
| 177 | /// Returns a reference to the GPU DMA pusher. | 162 | /// Returns a reference to the GPU DMA pusher. |
| 178 | [[nodiscard]] Tegra::DmaPusher& DmaPusher() { | 163 | [[nodiscard]] Tegra::DmaPusher& DmaPusher() { |
| 179 | return *dma_pusher; | 164 | ASSERT(current_channel); |
| 165 | return *current_channel->dma_pusher; | ||
| 180 | } | 166 | } |
| 181 | 167 | ||
| 182 | /// Returns a const reference to the GPU DMA pusher. | 168 | /// Returns a const reference to the GPU DMA pusher. |
| 183 | [[nodiscard]] const Tegra::DmaPusher& DmaPusher() const { | 169 | [[nodiscard]] const Tegra::DmaPusher& DmaPusher() const { |
| 184 | return *dma_pusher; | 170 | ASSERT(current_channel); |
| 171 | return *current_channel->dma_pusher; | ||
| 185 | } | 172 | } |
| 186 | 173 | ||
| 187 | /// Returns a reference to the underlying renderer. | 174 | /// Returns a reference to the underlying renderer. |
| @@ -204,77 +191,6 @@ struct GPU::Impl { | |||
| 204 | return *shader_notify; | 191 | return *shader_notify; |
| 205 | } | 192 | } |
| 206 | 193 | ||
| 207 | /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame. | ||
| 208 | void WaitFence(u32 syncpoint_id, u32 value) { | ||
| 209 | // Synced GPU, is always in sync | ||
| 210 | if (!is_async) { | ||
| 211 | return; | ||
| 212 | } | ||
| 213 | if (syncpoint_id == UINT32_MAX) { | ||
| 214 | // TODO: Research what this does. | ||
| 215 | LOG_ERROR(HW_GPU, "Waiting for syncpoint -1 not implemented"); | ||
| 216 | return; | ||
| 217 | } | ||
| 218 | MICROPROFILE_SCOPE(GPU_wait); | ||
| 219 | std::unique_lock lock{sync_mutex}; | ||
| 220 | sync_cv.wait(lock, [=, this] { | ||
| 221 | if (shutting_down.load(std::memory_order_relaxed)) { | ||
| 222 | // We're shutting down, ensure no threads continue to wait for the next syncpoint | ||
| 223 | return true; | ||
| 224 | } | ||
| 225 | return syncpoints.at(syncpoint_id).load() >= value; | ||
| 226 | }); | ||
| 227 | } | ||
| 228 | |||
| 229 | void IncrementSyncPoint(u32 syncpoint_id) { | ||
| 230 | auto& syncpoint = syncpoints.at(syncpoint_id); | ||
| 231 | syncpoint++; | ||
| 232 | std::scoped_lock lock{sync_mutex}; | ||
| 233 | sync_cv.notify_all(); | ||
| 234 | auto& interrupt = syncpt_interrupts.at(syncpoint_id); | ||
| 235 | if (!interrupt.empty()) { | ||
| 236 | u32 value = syncpoint.load(); | ||
| 237 | auto it = interrupt.begin(); | ||
| 238 | while (it != interrupt.end()) { | ||
| 239 | if (value >= *it) { | ||
| 240 | TriggerCpuInterrupt(syncpoint_id, *it); | ||
| 241 | it = interrupt.erase(it); | ||
| 242 | continue; | ||
| 243 | } | ||
| 244 | it++; | ||
| 245 | } | ||
| 246 | } | ||
| 247 | } | ||
| 248 | |||
| 249 | [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const { | ||
| 250 | return syncpoints.at(syncpoint_id).load(); | ||
| 251 | } | ||
| 252 | |||
| 253 | void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) { | ||
| 254 | std::scoped_lock lock{sync_mutex}; | ||
| 255 | auto& interrupt = syncpt_interrupts.at(syncpoint_id); | ||
| 256 | bool contains = std::any_of(interrupt.begin(), interrupt.end(), | ||
| 257 | [value](u32 in_value) { return in_value == value; }); | ||
| 258 | if (contains) { | ||
| 259 | return; | ||
| 260 | } | ||
| 261 | interrupt.emplace_back(value); | ||
| 262 | } | ||
| 263 | |||
| 264 | [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value) { | ||
| 265 | std::scoped_lock lock{sync_mutex}; | ||
| 266 | auto& interrupt = syncpt_interrupts.at(syncpoint_id); | ||
| 267 | const auto iter = | ||
| 268 | std::find_if(interrupt.begin(), interrupt.end(), | ||
| 269 | [value](u32 interrupt_value) { return value == interrupt_value; }); | ||
| 270 | |||
| 271 | if (iter == interrupt.end()) { | ||
| 272 | return false; | ||
| 273 | } | ||
| 274 | interrupt.erase(iter); | ||
| 275 | return true; | ||
| 276 | } | ||
| 277 | |||
| 278 | [[nodiscard]] u64 GetTicks() const { | 194 | [[nodiscard]] u64 GetTicks() const { |
| 279 | // This values were reversed engineered by fincs from NVN | 195 | // This values were reversed engineered by fincs from NVN |
| 280 | // The gpu clock is reported in units of 385/625 nanoseconds | 196 | // The gpu clock is reported in units of 385/625 nanoseconds |
| @@ -306,7 +222,7 @@ struct GPU::Impl { | |||
| 306 | /// This can be used to launch any necessary threads and register any necessary | 222 | /// This can be used to launch any necessary threads and register any necessary |
| 307 | /// core timing events. | 223 | /// core timing events. |
| 308 | void Start() { | 224 | void Start() { |
| 309 | gpu_thread.StartThread(*renderer, renderer->Context(), *dma_pusher); | 225 | gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler); |
| 310 | cpu_context = renderer->GetRenderWindow().CreateSharedContext(); | 226 | cpu_context = renderer->GetRenderWindow().CreateSharedContext(); |
| 311 | cpu_context->MakeCurrent(); | 227 | cpu_context->MakeCurrent(); |
| 312 | } | 228 | } |
| @@ -328,8 +244,8 @@ struct GPU::Impl { | |||
| 328 | } | 244 | } |
| 329 | 245 | ||
| 330 | /// Push GPU command entries to be processed | 246 | /// Push GPU command entries to be processed |
| 331 | void PushGPUEntries(Tegra::CommandList&& entries) { | 247 | void PushGPUEntries(s32 channel, Tegra::CommandList&& entries) { |
| 332 | gpu_thread.SubmitList(std::move(entries)); | 248 | gpu_thread.SubmitList(channel, std::move(entries)); |
| 333 | } | 249 | } |
| 334 | 250 | ||
| 335 | /// Push GPU command buffer entries to be processed | 251 | /// Push GPU command buffer entries to be processed |
| @@ -339,7 +255,7 @@ struct GPU::Impl { | |||
| 339 | } | 255 | } |
| 340 | 256 | ||
| 341 | if (!cdma_pushers.contains(id)) { | 257 | if (!cdma_pushers.contains(id)) { |
| 342 | cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(gpu)); | 258 | cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(host1x)); |
| 343 | } | 259 | } |
| 344 | 260 | ||
| 345 | // SubmitCommandBuffer would make the nvdec operations async, this is not currently working | 261 | // SubmitCommandBuffer would make the nvdec operations async, this is not currently working |
| @@ -376,308 +292,55 @@ struct GPU::Impl { | |||
| 376 | gpu_thread.FlushAndInvalidateRegion(addr, size); | 292 | gpu_thread.FlushAndInvalidateRegion(addr, size); |
| 377 | } | 293 | } |
| 378 | 294 | ||
| 379 | void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const { | 295 | void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer, |
| 380 | auto& interrupt_manager = system.InterruptManager(); | 296 | std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences) { |
| 381 | interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value); | 297 | size_t current_request_counter{}; |
| 382 | } | 298 | { |
| 383 | 299 | std::unique_lock<std::mutex> lk(request_swap_mutex); | |
| 384 | void ProcessBindMethod(const GPU::MethodCall& method_call) { | 300 | if (free_swap_counters.empty()) { |
| 385 | // Bind the current subchannel to the desired engine id. | 301 | current_request_counter = request_swap_counters.size(); |
| 386 | LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel, | 302 | request_swap_counters.emplace_back(num_fences); |
| 387 | method_call.argument); | ||
| 388 | const auto engine_id = static_cast<EngineID>(method_call.argument); | ||
| 389 | bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id); | ||
| 390 | switch (engine_id) { | ||
| 391 | case EngineID::FERMI_TWOD_A: | ||
| 392 | dma_pusher->BindSubchannel(fermi_2d.get(), method_call.subchannel); | ||
| 393 | break; | ||
| 394 | case EngineID::MAXWELL_B: | ||
| 395 | dma_pusher->BindSubchannel(maxwell_3d.get(), method_call.subchannel); | ||
| 396 | break; | ||
| 397 | case EngineID::KEPLER_COMPUTE_B: | ||
| 398 | dma_pusher->BindSubchannel(kepler_compute.get(), method_call.subchannel); | ||
| 399 | break; | ||
| 400 | case EngineID::MAXWELL_DMA_COPY_A: | ||
| 401 | dma_pusher->BindSubchannel(maxwell_dma.get(), method_call.subchannel); | ||
| 402 | break; | ||
| 403 | case EngineID::KEPLER_INLINE_TO_MEMORY_B: | ||
| 404 | dma_pusher->BindSubchannel(kepler_memory.get(), method_call.subchannel); | ||
| 405 | break; | ||
| 406 | default: | ||
| 407 | UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id); | ||
| 408 | } | ||
| 409 | } | ||
| 410 | |||
| 411 | void ProcessFenceActionMethod() { | ||
| 412 | switch (regs.fence_action.op) { | ||
| 413 | case GPU::FenceOperation::Acquire: | ||
| 414 | WaitFence(regs.fence_action.syncpoint_id, regs.fence_value); | ||
| 415 | break; | ||
| 416 | case GPU::FenceOperation::Increment: | ||
| 417 | IncrementSyncPoint(regs.fence_action.syncpoint_id); | ||
| 418 | break; | ||
| 419 | default: | ||
| 420 | UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value()); | ||
| 421 | } | ||
| 422 | } | ||
| 423 | |||
| 424 | void ProcessWaitForInterruptMethod() { | ||
| 425 | // TODO(bunnei) ImplementMe | ||
| 426 | LOG_WARNING(HW_GPU, "(STUBBED) called"); | ||
| 427 | } | ||
| 428 | |||
| 429 | void ProcessSemaphoreTriggerMethod() { | ||
| 430 | const auto semaphoreOperationMask = 0xF; | ||
| 431 | const auto op = | ||
| 432 | static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask); | ||
| 433 | if (op == GpuSemaphoreOperation::WriteLong) { | ||
| 434 | struct Block { | ||
| 435 | u32 sequence; | ||
| 436 | u32 zeros = 0; | ||
| 437 | u64 timestamp; | ||
| 438 | }; | ||
| 439 | |||
| 440 | Block block{}; | ||
| 441 | block.sequence = regs.semaphore_sequence; | ||
| 442 | // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of | ||
| 443 | // CoreTiming | ||
| 444 | block.timestamp = GetTicks(); | ||
| 445 | memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block, | ||
| 446 | sizeof(block)); | ||
| 447 | } else { | ||
| 448 | const u32 word{memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress())}; | ||
| 449 | if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) || | ||
| 450 | (op == GpuSemaphoreOperation::AcquireGequal && | ||
| 451 | static_cast<s32>(word - regs.semaphore_sequence) > 0) || | ||
| 452 | (op == GpuSemaphoreOperation::AcquireMask && (word & regs.semaphore_sequence))) { | ||
| 453 | // Nothing to do in this case | ||
| 454 | } else { | 303 | } else { |
| 455 | regs.acquire_source = true; | 304 | current_request_counter = free_swap_counters.front(); |
| 456 | regs.acquire_value = regs.semaphore_sequence; | 305 | request_swap_counters[current_request_counter] = num_fences; |
| 457 | if (op == GpuSemaphoreOperation::AcquireEqual) { | 306 | free_swap_counters.pop_front(); |
| 458 | regs.acquire_active = true; | ||
| 459 | regs.acquire_mode = false; | ||
| 460 | } else if (op == GpuSemaphoreOperation::AcquireGequal) { | ||
| 461 | regs.acquire_active = true; | ||
| 462 | regs.acquire_mode = true; | ||
| 463 | } else if (op == GpuSemaphoreOperation::AcquireMask) { | ||
| 464 | // TODO(kemathe) The acquire mask operation waits for a value that, ANDed with | ||
| 465 | // semaphore_sequence, gives a non-0 result | ||
| 466 | LOG_ERROR(HW_GPU, "Invalid semaphore operation AcquireMask not implemented"); | ||
| 467 | } else { | ||
| 468 | LOG_ERROR(HW_GPU, "Invalid semaphore operation"); | ||
| 469 | } | ||
| 470 | } | 307 | } |
| 471 | } | 308 | } |
| 472 | } | 309 | const auto wait_fence = |
| 473 | 310 | RequestSyncOperation([this, current_request_counter, framebuffer, fences, num_fences] { | |
| 474 | void ProcessSemaphoreRelease() { | 311 | auto& syncpoint_manager = host1x.GetSyncpointManager(); |
| 475 | memory_manager->Write<u32>(regs.semaphore_address.SemaphoreAddress(), | 312 | if (num_fences == 0) { |
| 476 | regs.semaphore_release); | 313 | renderer->SwapBuffers(framebuffer); |
| 477 | } | 314 | } |
| 478 | 315 | const auto executer = [this, current_request_counter, | |
| 479 | void ProcessSemaphoreAcquire() { | 316 | framebuffer_copy = *framebuffer]() { |
| 480 | const u32 word = memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress()); | 317 | { |
| 481 | const auto value = regs.semaphore_acquire; | 318 | std::unique_lock<std::mutex> lk(request_swap_mutex); |
| 482 | if (word != value) { | 319 | if (--request_swap_counters[current_request_counter] != 0) { |
| 483 | regs.acquire_active = true; | 320 | return; |
| 484 | regs.acquire_value = value; | 321 | } |
| 485 | // TODO(kemathe73) figure out how to do the acquire_timeout | 322 | free_swap_counters.push_back(current_request_counter); |
| 486 | regs.acquire_mode = false; | ||
| 487 | regs.acquire_source = false; | ||
| 488 | } | ||
| 489 | } | ||
| 490 | |||
| 491 | /// Calls a GPU puller method. | ||
| 492 | void CallPullerMethod(const GPU::MethodCall& method_call) { | ||
| 493 | regs.reg_array[method_call.method] = method_call.argument; | ||
| 494 | const auto method = static_cast<BufferMethods>(method_call.method); | ||
| 495 | |||
| 496 | switch (method) { | ||
| 497 | case BufferMethods::BindObject: { | ||
| 498 | ProcessBindMethod(method_call); | ||
| 499 | break; | ||
| 500 | } | ||
| 501 | case BufferMethods::Nop: | ||
| 502 | case BufferMethods::SemaphoreAddressHigh: | ||
| 503 | case BufferMethods::SemaphoreAddressLow: | ||
| 504 | case BufferMethods::SemaphoreSequence: | ||
| 505 | break; | ||
| 506 | case BufferMethods::UnkCacheFlush: | ||
| 507 | rasterizer->SyncGuestHost(); | ||
| 508 | break; | ||
| 509 | case BufferMethods::WrcacheFlush: | ||
| 510 | rasterizer->SignalReference(); | ||
| 511 | break; | ||
| 512 | case BufferMethods::FenceValue: | ||
| 513 | break; | ||
| 514 | case BufferMethods::RefCnt: | ||
| 515 | rasterizer->SignalReference(); | ||
| 516 | break; | ||
| 517 | case BufferMethods::FenceAction: | ||
| 518 | ProcessFenceActionMethod(); | ||
| 519 | break; | ||
| 520 | case BufferMethods::WaitForInterrupt: | ||
| 521 | rasterizer->WaitForIdle(); | ||
| 522 | break; | ||
| 523 | case BufferMethods::SemaphoreTrigger: { | ||
| 524 | ProcessSemaphoreTriggerMethod(); | ||
| 525 | break; | ||
| 526 | } | ||
| 527 | case BufferMethods::NotifyIntr: { | ||
| 528 | // TODO(Kmather73): Research and implement this method. | ||
| 529 | LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented"); | ||
| 530 | break; | ||
| 531 | } | ||
| 532 | case BufferMethods::Unk28: { | ||
| 533 | // TODO(Kmather73): Research and implement this method. | ||
| 534 | LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented"); | ||
| 535 | break; | ||
| 536 | } | ||
| 537 | case BufferMethods::SemaphoreAcquire: { | ||
| 538 | ProcessSemaphoreAcquire(); | ||
| 539 | break; | ||
| 540 | } | ||
| 541 | case BufferMethods::SemaphoreRelease: { | ||
| 542 | ProcessSemaphoreRelease(); | ||
| 543 | break; | ||
| 544 | } | ||
| 545 | case BufferMethods::Yield: { | ||
| 546 | // TODO(Kmather73): Research and implement this method. | ||
| 547 | LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented"); | ||
| 548 | break; | ||
| 549 | } | ||
| 550 | default: | ||
| 551 | LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method); | ||
| 552 | break; | ||
| 553 | } | ||
| 554 | } | ||
| 555 | |||
| 556 | /// Calls a GPU engine method. | ||
| 557 | void CallEngineMethod(const GPU::MethodCall& method_call) { | ||
| 558 | const EngineID engine = bound_engines[method_call.subchannel]; | ||
| 559 | |||
| 560 | switch (engine) { | ||
| 561 | case EngineID::FERMI_TWOD_A: | ||
| 562 | fermi_2d->CallMethod(method_call.method, method_call.argument, | ||
| 563 | method_call.IsLastCall()); | ||
| 564 | break; | ||
| 565 | case EngineID::MAXWELL_B: | ||
| 566 | maxwell_3d->CallMethod(method_call.method, method_call.argument, | ||
| 567 | method_call.IsLastCall()); | ||
| 568 | break; | ||
| 569 | case EngineID::KEPLER_COMPUTE_B: | ||
| 570 | kepler_compute->CallMethod(method_call.method, method_call.argument, | ||
| 571 | method_call.IsLastCall()); | ||
| 572 | break; | ||
| 573 | case EngineID::MAXWELL_DMA_COPY_A: | ||
| 574 | maxwell_dma->CallMethod(method_call.method, method_call.argument, | ||
| 575 | method_call.IsLastCall()); | ||
| 576 | break; | ||
| 577 | case EngineID::KEPLER_INLINE_TO_MEMORY_B: | ||
| 578 | kepler_memory->CallMethod(method_call.method, method_call.argument, | ||
| 579 | method_call.IsLastCall()); | ||
| 580 | break; | ||
| 581 | default: | ||
| 582 | UNIMPLEMENTED_MSG("Unimplemented engine"); | ||
| 583 | } | ||
| 584 | } | ||
| 585 | |||
| 586 | /// Calls a GPU engine multivalue method. | ||
| 587 | void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, | ||
| 588 | u32 methods_pending) { | ||
| 589 | const EngineID engine = bound_engines[subchannel]; | ||
| 590 | |||
| 591 | switch (engine) { | ||
| 592 | case EngineID::FERMI_TWOD_A: | ||
| 593 | fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending); | ||
| 594 | break; | ||
| 595 | case EngineID::MAXWELL_B: | ||
| 596 | maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending); | ||
| 597 | break; | ||
| 598 | case EngineID::KEPLER_COMPUTE_B: | ||
| 599 | kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending); | ||
| 600 | break; | ||
| 601 | case EngineID::MAXWELL_DMA_COPY_A: | ||
| 602 | maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending); | ||
| 603 | break; | ||
| 604 | case EngineID::KEPLER_INLINE_TO_MEMORY_B: | ||
| 605 | kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending); | ||
| 606 | break; | ||
| 607 | default: | ||
| 608 | UNIMPLEMENTED_MSG("Unimplemented engine"); | ||
| 609 | } | ||
| 610 | } | ||
| 611 | |||
| 612 | /// Determines where the method should be executed. | ||
| 613 | [[nodiscard]] bool ExecuteMethodOnEngine(u32 method) { | ||
| 614 | const auto buffer_method = static_cast<BufferMethods>(method); | ||
| 615 | return buffer_method >= BufferMethods::NonPullerMethods; | ||
| 616 | } | ||
| 617 | |||
| 618 | struct Regs { | ||
| 619 | static constexpr size_t NUM_REGS = 0x40; | ||
| 620 | |||
| 621 | union { | ||
| 622 | struct { | ||
| 623 | INSERT_PADDING_WORDS_NOINIT(0x4); | ||
| 624 | struct { | ||
| 625 | u32 address_high; | ||
| 626 | u32 address_low; | ||
| 627 | |||
| 628 | [[nodiscard]] GPUVAddr SemaphoreAddress() const { | ||
| 629 | return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | | ||
| 630 | address_low); | ||
| 631 | } | 323 | } |
| 632 | } semaphore_address; | 324 | renderer->SwapBuffers(&framebuffer_copy); |
| 633 | 325 | }; | |
| 634 | u32 semaphore_sequence; | 326 | for (size_t i = 0; i < num_fences; i++) { |
| 635 | u32 semaphore_trigger; | 327 | syncpoint_manager.RegisterGuestAction(fences[i].id, fences[i].value, executer); |
| 636 | INSERT_PADDING_WORDS_NOINIT(0xC); | 328 | } |
| 637 | 329 | }); | |
| 638 | // The pusher and the puller share the reference counter, the pusher only has read | 330 | gpu_thread.TickGPU(); |
| 639 | // access | 331 | WaitForSyncOperation(wait_fence); |
| 640 | u32 reference_count; | 332 | } |
| 641 | INSERT_PADDING_WORDS_NOINIT(0x5); | ||
| 642 | |||
| 643 | u32 semaphore_acquire; | ||
| 644 | u32 semaphore_release; | ||
| 645 | u32 fence_value; | ||
| 646 | GPU::FenceAction fence_action; | ||
| 647 | INSERT_PADDING_WORDS_NOINIT(0xE2); | ||
| 648 | |||
| 649 | // Puller state | ||
| 650 | u32 acquire_mode; | ||
| 651 | u32 acquire_source; | ||
| 652 | u32 acquire_active; | ||
| 653 | u32 acquire_timeout; | ||
| 654 | u32 acquire_value; | ||
| 655 | }; | ||
| 656 | std::array<u32, NUM_REGS> reg_array; | ||
| 657 | }; | ||
| 658 | } regs{}; | ||
| 659 | 333 | ||
| 660 | GPU& gpu; | 334 | GPU& gpu; |
| 661 | Core::System& system; | 335 | Core::System& system; |
| 662 | std::unique_ptr<Tegra::MemoryManager> memory_manager; | 336 | Host1x::Host1x& host1x; |
| 663 | std::unique_ptr<Tegra::DmaPusher> dma_pusher; | 337 | |
| 664 | std::map<u32, std::unique_ptr<Tegra::CDmaPusher>> cdma_pushers; | 338 | std::map<u32, std::unique_ptr<Tegra::CDmaPusher>> cdma_pushers; |
| 665 | std::unique_ptr<VideoCore::RendererBase> renderer; | 339 | std::unique_ptr<VideoCore::RendererBase> renderer; |
| 666 | VideoCore::RasterizerInterface* rasterizer = nullptr; | 340 | VideoCore::RasterizerInterface* rasterizer = nullptr; |
| 667 | const bool use_nvdec; | 341 | const bool use_nvdec; |
| 668 | 342 | ||
| 669 | /// Mapping of command subchannels to their bound engine ids | 343 | s32 new_channel_id{1}; |
| 670 | std::array<EngineID, 8> bound_engines{}; | ||
| 671 | /// 3D engine | ||
| 672 | std::unique_ptr<Engines::Maxwell3D> maxwell_3d; | ||
| 673 | /// 2D engine | ||
| 674 | std::unique_ptr<Engines::Fermi2D> fermi_2d; | ||
| 675 | /// Compute engine | ||
| 676 | std::unique_ptr<Engines::KeplerCompute> kepler_compute; | ||
| 677 | /// DMA engine | ||
| 678 | std::unique_ptr<Engines::MaxwellDMA> maxwell_dma; | ||
| 679 | /// Inline memory engine | ||
| 680 | std::unique_ptr<Engines::KeplerMemory> kepler_memory; | ||
| 681 | /// Shader build notifier | 344 | /// Shader build notifier |
| 682 | std::unique_ptr<VideoCore::ShaderNotify> shader_notify; | 345 | std::unique_ptr<VideoCore::ShaderNotify> shader_notify; |
| 683 | /// When true, we are about to shut down emulation session, so terminate outstanding tasks | 346 | /// When true, we are about to shut down emulation session, so terminate outstanding tasks |
| @@ -692,51 +355,25 @@ struct GPU::Impl { | |||
| 692 | 355 | ||
| 693 | std::condition_variable sync_cv; | 356 | std::condition_variable sync_cv; |
| 694 | 357 | ||
| 695 | struct FlushRequest { | 358 | std::list<std::function<void()>> sync_requests; |
| 696 | explicit FlushRequest(u64 fence_, VAddr addr_, std::size_t size_) | 359 | std::atomic<u64> current_sync_fence{}; |
| 697 | : fence{fence_}, addr{addr_}, size{size_} {} | 360 | u64 last_sync_fence{}; |
| 698 | u64 fence; | 361 | std::mutex sync_request_mutex; |
| 699 | VAddr addr; | 362 | std::condition_variable sync_request_cv; |
| 700 | std::size_t size; | ||
| 701 | }; | ||
| 702 | |||
| 703 | std::list<FlushRequest> flush_requests; | ||
| 704 | std::atomic<u64> current_flush_fence{}; | ||
| 705 | u64 last_flush_fence{}; | ||
| 706 | std::mutex flush_request_mutex; | ||
| 707 | 363 | ||
| 708 | const bool is_async; | 364 | const bool is_async; |
| 709 | 365 | ||
| 710 | VideoCommon::GPUThread::ThreadManager gpu_thread; | 366 | VideoCommon::GPUThread::ThreadManager gpu_thread; |
| 711 | std::unique_ptr<Core::Frontend::GraphicsContext> cpu_context; | 367 | std::unique_ptr<Core::Frontend::GraphicsContext> cpu_context; |
| 712 | 368 | ||
| 713 | #define ASSERT_REG_POSITION(field_name, position) \ | 369 | std::unique_ptr<Tegra::Control::Scheduler> scheduler; |
| 714 | static_assert(offsetof(Regs, field_name) == position * 4, \ | 370 | std::unordered_map<s32, std::shared_ptr<Tegra::Control::ChannelState>> channels; |
| 715 | "Field " #field_name " has invalid position") | 371 | Tegra::Control::ChannelState* current_channel; |
| 716 | 372 | s32 bound_channel{-1}; | |
| 717 | ASSERT_REG_POSITION(semaphore_address, 0x4); | 373 | |
| 718 | ASSERT_REG_POSITION(semaphore_sequence, 0x6); | 374 | std::deque<size_t> free_swap_counters; |
| 719 | ASSERT_REG_POSITION(semaphore_trigger, 0x7); | 375 | std::deque<size_t> request_swap_counters; |
| 720 | ASSERT_REG_POSITION(reference_count, 0x14); | 376 | std::mutex request_swap_mutex; |
| 721 | ASSERT_REG_POSITION(semaphore_acquire, 0x1A); | ||
| 722 | ASSERT_REG_POSITION(semaphore_release, 0x1B); | ||
| 723 | ASSERT_REG_POSITION(fence_value, 0x1C); | ||
| 724 | ASSERT_REG_POSITION(fence_action, 0x1D); | ||
| 725 | |||
| 726 | ASSERT_REG_POSITION(acquire_mode, 0x100); | ||
| 727 | ASSERT_REG_POSITION(acquire_source, 0x101); | ||
| 728 | ASSERT_REG_POSITION(acquire_active, 0x102); | ||
| 729 | ASSERT_REG_POSITION(acquire_timeout, 0x103); | ||
| 730 | ASSERT_REG_POSITION(acquire_value, 0x104); | ||
| 731 | |||
| 732 | #undef ASSERT_REG_POSITION | ||
| 733 | |||
| 734 | enum class GpuSemaphoreOperation { | ||
| 735 | AcquireEqual = 0x1, | ||
| 736 | WriteLong = 0x2, | ||
| 737 | AcquireGequal = 0x4, | ||
| 738 | AcquireMask = 0x8, | ||
| 739 | }; | ||
| 740 | }; | 377 | }; |
| 741 | 378 | ||
| 742 | GPU::GPU(Core::System& system, bool is_async, bool use_nvdec) | 379 | GPU::GPU(Core::System& system, bool is_async, bool use_nvdec) |
| @@ -744,25 +381,36 @@ GPU::GPU(Core::System& system, bool is_async, bool use_nvdec) | |||
| 744 | 381 | ||
| 745 | GPU::~GPU() = default; | 382 | GPU::~GPU() = default; |
| 746 | 383 | ||
| 747 | void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) { | 384 | std::shared_ptr<Control::ChannelState> GPU::AllocateChannel() { |
| 748 | impl->BindRenderer(std::move(renderer)); | 385 | return impl->AllocateChannel(); |
| 386 | } | ||
| 387 | |||
| 388 | void GPU::InitChannel(Control::ChannelState& to_init) { | ||
| 389 | impl->InitChannel(to_init); | ||
| 390 | } | ||
| 391 | |||
| 392 | void GPU::BindChannel(s32 channel_id) { | ||
| 393 | impl->BindChannel(channel_id); | ||
| 749 | } | 394 | } |
| 750 | 395 | ||
| 751 | void GPU::CallMethod(const MethodCall& method_call) { | 396 | void GPU::ReleaseChannel(Control::ChannelState& to_release) { |
| 752 | impl->CallMethod(method_call); | 397 | impl->ReleaseChannel(to_release); |
| 753 | } | 398 | } |
| 754 | 399 | ||
| 755 | void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, | 400 | void GPU::InitAddressSpace(Tegra::MemoryManager& memory_manager) { |
| 756 | u32 methods_pending) { | 401 | impl->InitAddressSpace(memory_manager); |
| 757 | impl->CallMultiMethod(method, subchannel, base_start, amount, methods_pending); | 402 | } |
| 403 | |||
| 404 | void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) { | ||
| 405 | impl->BindRenderer(std::move(renderer)); | ||
| 758 | } | 406 | } |
| 759 | 407 | ||
| 760 | void GPU::FlushCommands() { | 408 | void GPU::FlushCommands() { |
| 761 | impl->FlushCommands(); | 409 | impl->FlushCommands(); |
| 762 | } | 410 | } |
| 763 | 411 | ||
| 764 | void GPU::SyncGuestHost() { | 412 | void GPU::InvalidateGPUCache() { |
| 765 | impl->SyncGuestHost(); | 413 | impl->InvalidateGPUCache(); |
| 766 | } | 414 | } |
| 767 | 415 | ||
| 768 | void GPU::OnCommandListEnd() { | 416 | void GPU::OnCommandListEnd() { |
| @@ -770,17 +418,32 @@ void GPU::OnCommandListEnd() { | |||
| 770 | } | 418 | } |
| 771 | 419 | ||
| 772 | u64 GPU::RequestFlush(VAddr addr, std::size_t size) { | 420 | u64 GPU::RequestFlush(VAddr addr, std::size_t size) { |
| 773 | return impl->RequestFlush(addr, size); | 421 | return impl->RequestSyncOperation( |
| 422 | [this, addr, size]() { impl->rasterizer->FlushRegion(addr, size); }); | ||
| 774 | } | 423 | } |
| 775 | 424 | ||
| 776 | u64 GPU::CurrentFlushRequestFence() const { | 425 | u64 GPU::CurrentSyncRequestFence() const { |
| 777 | return impl->CurrentFlushRequestFence(); | 426 | return impl->CurrentSyncRequestFence(); |
| 427 | } | ||
| 428 | |||
| 429 | void GPU::WaitForSyncOperation(u64 fence) { | ||
| 430 | return impl->WaitForSyncOperation(fence); | ||
| 778 | } | 431 | } |
| 779 | 432 | ||
| 780 | void GPU::TickWork() { | 433 | void GPU::TickWork() { |
| 781 | impl->TickWork(); | 434 | impl->TickWork(); |
| 782 | } | 435 | } |
| 783 | 436 | ||
| 437 | /// Gets a mutable reference to the Host1x interface | ||
| 438 | Host1x::Host1x& GPU::Host1x() { | ||
| 439 | return impl->host1x; | ||
| 440 | } | ||
| 441 | |||
| 442 | /// Gets an immutable reference to the Host1x interface. | ||
| 443 | const Host1x::Host1x& GPU::Host1x() const { | ||
| 444 | return impl->host1x; | ||
| 445 | } | ||
| 446 | |||
| 784 | Engines::Maxwell3D& GPU::Maxwell3D() { | 447 | Engines::Maxwell3D& GPU::Maxwell3D() { |
| 785 | return impl->Maxwell3D(); | 448 | return impl->Maxwell3D(); |
| 786 | } | 449 | } |
| @@ -797,14 +460,6 @@ const Engines::KeplerCompute& GPU::KeplerCompute() const { | |||
| 797 | return impl->KeplerCompute(); | 460 | return impl->KeplerCompute(); |
| 798 | } | 461 | } |
| 799 | 462 | ||
| 800 | Tegra::MemoryManager& GPU::MemoryManager() { | ||
| 801 | return impl->MemoryManager(); | ||
| 802 | } | ||
| 803 | |||
| 804 | const Tegra::MemoryManager& GPU::MemoryManager() const { | ||
| 805 | return impl->MemoryManager(); | ||
| 806 | } | ||
| 807 | |||
| 808 | Tegra::DmaPusher& GPU::DmaPusher() { | 463 | Tegra::DmaPusher& GPU::DmaPusher() { |
| 809 | return impl->DmaPusher(); | 464 | return impl->DmaPusher(); |
| 810 | } | 465 | } |
| @@ -829,24 +484,9 @@ const VideoCore::ShaderNotify& GPU::ShaderNotify() const { | |||
| 829 | return impl->ShaderNotify(); | 484 | return impl->ShaderNotify(); |
| 830 | } | 485 | } |
| 831 | 486 | ||
| 832 | void GPU::WaitFence(u32 syncpoint_id, u32 value) { | 487 | void GPU::RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer, |
| 833 | impl->WaitFence(syncpoint_id, value); | 488 | std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences) { |
| 834 | } | 489 | impl->RequestSwapBuffers(framebuffer, fences, num_fences); |
| 835 | |||
| 836 | void GPU::IncrementSyncPoint(u32 syncpoint_id) { | ||
| 837 | impl->IncrementSyncPoint(syncpoint_id); | ||
| 838 | } | ||
| 839 | |||
| 840 | u32 GPU::GetSyncpointValue(u32 syncpoint_id) const { | ||
| 841 | return impl->GetSyncpointValue(syncpoint_id); | ||
| 842 | } | ||
| 843 | |||
| 844 | void GPU::RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) { | ||
| 845 | impl->RegisterSyncptInterrupt(syncpoint_id, value); | ||
| 846 | } | ||
| 847 | |||
| 848 | bool GPU::CancelSyncptInterrupt(u32 syncpoint_id, u32 value) { | ||
| 849 | return impl->CancelSyncptInterrupt(syncpoint_id, value); | ||
| 850 | } | 490 | } |
| 851 | 491 | ||
| 852 | u64 GPU::GetTicks() const { | 492 | u64 GPU::GetTicks() const { |
| @@ -881,8 +521,8 @@ void GPU::ReleaseContext() { | |||
| 881 | impl->ReleaseContext(); | 521 | impl->ReleaseContext(); |
| 882 | } | 522 | } |
| 883 | 523 | ||
| 884 | void GPU::PushGPUEntries(Tegra::CommandList&& entries) { | 524 | void GPU::PushGPUEntries(s32 channel, Tegra::CommandList&& entries) { |
| 885 | impl->PushGPUEntries(std::move(entries)); | 525 | impl->PushGPUEntries(channel, std::move(entries)); |
| 886 | } | 526 | } |
| 887 | 527 | ||
| 888 | void GPU::PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) { | 528 | void GPU::PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) { |
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index b939ba315..0a4a8b14f 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h | |||
| @@ -89,73 +89,58 @@ class Maxwell3D; | |||
| 89 | class KeplerCompute; | 89 | class KeplerCompute; |
| 90 | } // namespace Engines | 90 | } // namespace Engines |
| 91 | 91 | ||
| 92 | enum class EngineID { | 92 | namespace Control { |
| 93 | FERMI_TWOD_A = 0x902D, // 2D Engine | 93 | struct ChannelState; |
| 94 | MAXWELL_B = 0xB197, // 3D Engine | 94 | } |
| 95 | KEPLER_COMPUTE_B = 0xB1C0, | 95 | |
| 96 | KEPLER_INLINE_TO_MEMORY_B = 0xA140, | 96 | namespace Host1x { |
| 97 | MAXWELL_DMA_COPY_A = 0xB0B5, | 97 | class Host1x; |
| 98 | }; | 98 | } // namespace Host1x |
| 99 | 99 | ||
| 100 | class MemoryManager; | 100 | class MemoryManager; |
| 101 | 101 | ||
| 102 | class GPU final { | 102 | class GPU final { |
| 103 | public: | 103 | public: |
| 104 | struct MethodCall { | ||
| 105 | u32 method{}; | ||
| 106 | u32 argument{}; | ||
| 107 | u32 subchannel{}; | ||
| 108 | u32 method_count{}; | ||
| 109 | |||
| 110 | explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0) | ||
| 111 | : method(method_), argument(argument_), subchannel(subchannel_), | ||
| 112 | method_count(method_count_) {} | ||
| 113 | |||
| 114 | [[nodiscard]] bool IsLastCall() const { | ||
| 115 | return method_count <= 1; | ||
| 116 | } | ||
| 117 | }; | ||
| 118 | |||
| 119 | enum class FenceOperation : u32 { | ||
| 120 | Acquire = 0, | ||
| 121 | Increment = 1, | ||
| 122 | }; | ||
| 123 | |||
| 124 | union FenceAction { | ||
| 125 | u32 raw; | ||
| 126 | BitField<0, 1, FenceOperation> op; | ||
| 127 | BitField<8, 24, u32> syncpoint_id; | ||
| 128 | }; | ||
| 129 | |||
| 130 | explicit GPU(Core::System& system, bool is_async, bool use_nvdec); | 104 | explicit GPU(Core::System& system, bool is_async, bool use_nvdec); |
| 131 | ~GPU(); | 105 | ~GPU(); |
| 132 | 106 | ||
| 133 | /// Binds a renderer to the GPU. | 107 | /// Binds a renderer to the GPU. |
| 134 | void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer); | 108 | void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer); |
| 135 | 109 | ||
| 136 | /// Calls a GPU method. | ||
| 137 | void CallMethod(const MethodCall& method_call); | ||
| 138 | |||
| 139 | /// Calls a GPU multivalue method. | ||
| 140 | void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, | ||
| 141 | u32 methods_pending); | ||
| 142 | |||
| 143 | /// Flush all current written commands into the host GPU for execution. | 110 | /// Flush all current written commands into the host GPU for execution. |
| 144 | void FlushCommands(); | 111 | void FlushCommands(); |
| 145 | /// Synchronizes CPU writes with Host GPU memory. | 112 | /// Synchronizes CPU writes with Host GPU memory. |
| 146 | void SyncGuestHost(); | 113 | void InvalidateGPUCache(); |
| 147 | /// Signal the ending of command list. | 114 | /// Signal the ending of command list. |
| 148 | void OnCommandListEnd(); | 115 | void OnCommandListEnd(); |
| 149 | 116 | ||
| 117 | std::shared_ptr<Control::ChannelState> AllocateChannel(); | ||
| 118 | |||
| 119 | void InitChannel(Control::ChannelState& to_init); | ||
| 120 | |||
| 121 | void BindChannel(s32 channel_id); | ||
| 122 | |||
| 123 | void ReleaseChannel(Control::ChannelState& to_release); | ||
| 124 | |||
| 125 | void InitAddressSpace(Tegra::MemoryManager& memory_manager); | ||
| 126 | |||
| 150 | /// Request a host GPU memory flush from the CPU. | 127 | /// Request a host GPU memory flush from the CPU. |
| 151 | [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size); | 128 | [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size); |
| 152 | 129 | ||
| 153 | /// Obtains current flush request fence id. | 130 | /// Obtains current flush request fence id. |
| 154 | [[nodiscard]] u64 CurrentFlushRequestFence() const; | 131 | [[nodiscard]] u64 CurrentSyncRequestFence() const; |
| 132 | |||
| 133 | void WaitForSyncOperation(u64 fence); | ||
| 155 | 134 | ||
| 156 | /// Tick pending requests within the GPU. | 135 | /// Tick pending requests within the GPU. |
| 157 | void TickWork(); | 136 | void TickWork(); |
| 158 | 137 | ||
| 138 | /// Gets a mutable reference to the Host1x interface | ||
| 139 | [[nodiscard]] Host1x::Host1x& Host1x(); | ||
| 140 | |||
| 141 | /// Gets an immutable reference to the Host1x interface. | ||
| 142 | [[nodiscard]] const Host1x::Host1x& Host1x() const; | ||
| 143 | |||
| 159 | /// Returns a reference to the Maxwell3D GPU engine. | 144 | /// Returns a reference to the Maxwell3D GPU engine. |
| 160 | [[nodiscard]] Engines::Maxwell3D& Maxwell3D(); | 145 | [[nodiscard]] Engines::Maxwell3D& Maxwell3D(); |
| 161 | 146 | ||
| @@ -168,12 +153,6 @@ public: | |||
| 168 | /// Returns a reference to the KeplerCompute GPU engine. | 153 | /// Returns a reference to the KeplerCompute GPU engine. |
| 169 | [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const; | 154 | [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const; |
| 170 | 155 | ||
| 171 | /// Returns a reference to the GPU memory manager. | ||
| 172 | [[nodiscard]] Tegra::MemoryManager& MemoryManager(); | ||
| 173 | |||
| 174 | /// Returns a const reference to the GPU memory manager. | ||
| 175 | [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const; | ||
| 176 | |||
| 177 | /// Returns a reference to the GPU DMA pusher. | 156 | /// Returns a reference to the GPU DMA pusher. |
| 178 | [[nodiscard]] Tegra::DmaPusher& DmaPusher(); | 157 | [[nodiscard]] Tegra::DmaPusher& DmaPusher(); |
| 179 | 158 | ||
| @@ -192,17 +171,6 @@ public: | |||
| 192 | /// Returns a const reference to the shader notifier. | 171 | /// Returns a const reference to the shader notifier. |
| 193 | [[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const; | 172 | [[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const; |
| 194 | 173 | ||
| 195 | /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame. | ||
| 196 | void WaitFence(u32 syncpoint_id, u32 value); | ||
| 197 | |||
| 198 | void IncrementSyncPoint(u32 syncpoint_id); | ||
| 199 | |||
| 200 | [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const; | ||
| 201 | |||
| 202 | void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value); | ||
| 203 | |||
| 204 | [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value); | ||
| 205 | |||
| 206 | [[nodiscard]] u64 GetTicks() const; | 174 | [[nodiscard]] u64 GetTicks() const; |
| 207 | 175 | ||
| 208 | [[nodiscard]] bool IsAsync() const; | 176 | [[nodiscard]] bool IsAsync() const; |
| @@ -211,6 +179,9 @@ public: | |||
| 211 | 179 | ||
| 212 | void RendererFrameEndNotify(); | 180 | void RendererFrameEndNotify(); |
| 213 | 181 | ||
| 182 | void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer, | ||
| 183 | std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences); | ||
| 184 | |||
| 214 | /// Performs any additional setup necessary in order to begin GPU emulation. | 185 | /// Performs any additional setup necessary in order to begin GPU emulation. |
| 215 | /// This can be used to launch any necessary threads and register any necessary | 186 | /// This can be used to launch any necessary threads and register any necessary |
| 216 | /// core timing events. | 187 | /// core timing events. |
| @@ -226,7 +197,7 @@ public: | |||
| 226 | void ReleaseContext(); | 197 | void ReleaseContext(); |
| 227 | 198 | ||
| 228 | /// Push GPU command entries to be processed | 199 | /// Push GPU command entries to be processed |
| 229 | void PushGPUEntries(Tegra::CommandList&& entries); | 200 | void PushGPUEntries(s32 channel, Tegra::CommandList&& entries); |
| 230 | 201 | ||
| 231 | /// Push GPU command buffer entries to be processed | 202 | /// Push GPU command buffer entries to be processed |
| 232 | void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries); | 203 | void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries); |
| @@ -248,7 +219,7 @@ public: | |||
| 248 | 219 | ||
| 249 | private: | 220 | private: |
| 250 | struct Impl; | 221 | struct Impl; |
| 251 | std::unique_ptr<Impl> impl; | 222 | mutable std::unique_ptr<Impl> impl; |
| 252 | }; | 223 | }; |
| 253 | 224 | ||
| 254 | } // namespace Tegra | 225 | } // namespace Tegra |
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp index f0e48cfbd..1bd477011 100644 --- a/src/video_core/gpu_thread.cpp +++ b/src/video_core/gpu_thread.cpp | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include "common/thread.h" | 8 | #include "common/thread.h" |
| 9 | #include "core/core.h" | 9 | #include "core/core.h" |
| 10 | #include "core/frontend/emu_window.h" | 10 | #include "core/frontend/emu_window.h" |
| 11 | #include "video_core/control/scheduler.h" | ||
| 11 | #include "video_core/dma_pusher.h" | 12 | #include "video_core/dma_pusher.h" |
| 12 | #include "video_core/gpu.h" | 13 | #include "video_core/gpu.h" |
| 13 | #include "video_core/gpu_thread.h" | 14 | #include "video_core/gpu_thread.h" |
| @@ -18,7 +19,7 @@ namespace VideoCommon::GPUThread { | |||
| 18 | /// Runs the GPU thread | 19 | /// Runs the GPU thread |
| 19 | static void RunThread(std::stop_token stop_token, Core::System& system, | 20 | static void RunThread(std::stop_token stop_token, Core::System& system, |
| 20 | VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, | 21 | VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, |
| 21 | Tegra::DmaPusher& dma_pusher, SynchState& state) { | 22 | Tegra::Control::Scheduler& scheduler, SynchState& state) { |
| 22 | std::string name = "GPU"; | 23 | std::string name = "GPU"; |
| 23 | MicroProfileOnThreadCreate(name.c_str()); | 24 | MicroProfileOnThreadCreate(name.c_str()); |
| 24 | SCOPE_EXIT({ MicroProfileOnThreadExit(); }); | 25 | SCOPE_EXIT({ MicroProfileOnThreadExit(); }); |
| @@ -36,8 +37,7 @@ static void RunThread(std::stop_token stop_token, Core::System& system, | |||
| 36 | break; | 37 | break; |
| 37 | } | 38 | } |
| 38 | if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) { | 39 | if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) { |
| 39 | dma_pusher.Push(std::move(submit_list->entries)); | 40 | scheduler.Push(submit_list->channel, std::move(submit_list->entries)); |
| 40 | dma_pusher.DispatchCalls(); | ||
| 41 | } else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) { | 41 | } else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) { |
| 42 | renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr); | 42 | renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr); |
| 43 | } else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) { | 43 | } else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) { |
| @@ -68,14 +68,14 @@ ThreadManager::~ThreadManager() = default; | |||
| 68 | 68 | ||
| 69 | void ThreadManager::StartThread(VideoCore::RendererBase& renderer, | 69 | void ThreadManager::StartThread(VideoCore::RendererBase& renderer, |
| 70 | Core::Frontend::GraphicsContext& context, | 70 | Core::Frontend::GraphicsContext& context, |
| 71 | Tegra::DmaPusher& dma_pusher) { | 71 | Tegra::Control::Scheduler& scheduler) { |
| 72 | rasterizer = renderer.ReadRasterizer(); | 72 | rasterizer = renderer.ReadRasterizer(); |
| 73 | thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context), | 73 | thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context), |
| 74 | std::ref(dma_pusher), std::ref(state)); | 74 | std::ref(scheduler), std::ref(state)); |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | void ThreadManager::SubmitList(Tegra::CommandList&& entries) { | 77 | void ThreadManager::SubmitList(s32 channel, Tegra::CommandList&& entries) { |
| 78 | PushCommand(SubmitListCommand(std::move(entries))); | 78 | PushCommand(SubmitListCommand(channel, std::move(entries))); |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { | 81 | void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { |
| @@ -93,8 +93,12 @@ void ThreadManager::FlushRegion(VAddr addr, u64 size) { | |||
| 93 | } | 93 | } |
| 94 | auto& gpu = system.GPU(); | 94 | auto& gpu = system.GPU(); |
| 95 | u64 fence = gpu.RequestFlush(addr, size); | 95 | u64 fence = gpu.RequestFlush(addr, size); |
| 96 | PushCommand(GPUTickCommand(), true); | 96 | TickGPU(); |
| 97 | ASSERT(fence <= gpu.CurrentFlushRequestFence()); | 97 | gpu.WaitForSyncOperation(fence); |
| 98 | } | ||
| 99 | |||
| 100 | void ThreadManager::TickGPU() { | ||
| 101 | PushCommand(GPUTickCommand()); | ||
| 98 | } | 102 | } |
| 99 | 103 | ||
| 100 | void ThreadManager::InvalidateRegion(VAddr addr, u64 size) { | 104 | void ThreadManager::InvalidateRegion(VAddr addr, u64 size) { |
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h index 2f8210cb9..64628d3e3 100644 --- a/src/video_core/gpu_thread.h +++ b/src/video_core/gpu_thread.h | |||
| @@ -15,7 +15,9 @@ | |||
| 15 | 15 | ||
| 16 | namespace Tegra { | 16 | namespace Tegra { |
| 17 | struct FramebufferConfig; | 17 | struct FramebufferConfig; |
| 18 | class DmaPusher; | 18 | namespace Control { |
| 19 | class Scheduler; | ||
| 20 | } | ||
| 19 | } // namespace Tegra | 21 | } // namespace Tegra |
| 20 | 22 | ||
| 21 | namespace Core { | 23 | namespace Core { |
| @@ -34,8 +36,10 @@ namespace VideoCommon::GPUThread { | |||
| 34 | 36 | ||
| 35 | /// Command to signal to the GPU thread that a command list is ready for processing | 37 | /// Command to signal to the GPU thread that a command list is ready for processing |
| 36 | struct SubmitListCommand final { | 38 | struct SubmitListCommand final { |
| 37 | explicit SubmitListCommand(Tegra::CommandList&& entries_) : entries{std::move(entries_)} {} | 39 | explicit SubmitListCommand(s32 channel_, Tegra::CommandList&& entries_) |
| 40 | : channel{channel_}, entries{std::move(entries_)} {} | ||
| 38 | 41 | ||
| 42 | s32 channel; | ||
| 39 | Tegra::CommandList entries; | 43 | Tegra::CommandList entries; |
| 40 | }; | 44 | }; |
| 41 | 45 | ||
| @@ -112,10 +116,10 @@ public: | |||
| 112 | 116 | ||
| 113 | /// Creates and starts the GPU thread. | 117 | /// Creates and starts the GPU thread. |
| 114 | void StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, | 118 | void StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, |
| 115 | Tegra::DmaPusher& dma_pusher); | 119 | Tegra::Control::Scheduler& scheduler); |
| 116 | 120 | ||
| 117 | /// Push GPU command entries to be processed | 121 | /// Push GPU command entries to be processed |
| 118 | void SubmitList(Tegra::CommandList&& entries); | 122 | void SubmitList(s32 channel, Tegra::CommandList&& entries); |
| 119 | 123 | ||
| 120 | /// Swap buffers (render frame) | 124 | /// Swap buffers (render frame) |
| 121 | void SwapBuffers(const Tegra::FramebufferConfig* framebuffer); | 125 | void SwapBuffers(const Tegra::FramebufferConfig* framebuffer); |
| @@ -131,6 +135,8 @@ public: | |||
| 131 | 135 | ||
| 132 | void OnCommandListEnd(); | 136 | void OnCommandListEnd(); |
| 133 | 137 | ||
| 138 | void TickGPU(); | ||
| 139 | |||
| 134 | private: | 140 | private: |
| 135 | /// Pushes a command to be executed by the GPU thread | 141 | /// Pushes a command to be executed by the GPU thread |
| 136 | u64 PushCommand(CommandData&& command_data, bool block = false); | 142 | u64 PushCommand(CommandData&& command_data, bool block = false); |
diff --git a/src/video_core/command_classes/codecs/codec.cpp b/src/video_core/host1x/codecs/codec.cpp index a5eb97b7f..42e7d6e4f 100644 --- a/src/video_core/command_classes/codecs/codec.cpp +++ b/src/video_core/host1x/codecs/codec.cpp | |||
| @@ -6,11 +6,11 @@ | |||
| 6 | #include <vector> | 6 | #include <vector> |
| 7 | #include "common/assert.h" | 7 | #include "common/assert.h" |
| 8 | #include "common/settings.h" | 8 | #include "common/settings.h" |
| 9 | #include "video_core/command_classes/codecs/codec.h" | 9 | #include "video_core/host1x/codecs/codec.h" |
| 10 | #include "video_core/command_classes/codecs/h264.h" | 10 | #include "video_core/host1x/codecs/h264.h" |
| 11 | #include "video_core/command_classes/codecs/vp8.h" | 11 | #include "video_core/host1x/codecs/vp8.h" |
| 12 | #include "video_core/command_classes/codecs/vp9.h" | 12 | #include "video_core/host1x/codecs/vp9.h" |
| 13 | #include "video_core/gpu.h" | 13 | #include "video_core/host1x/host1x.h" |
| 14 | #include "video_core/memory_manager.h" | 14 | #include "video_core/memory_manager.h" |
| 15 | 15 | ||
| 16 | extern "C" { | 16 | extern "C" { |
| @@ -73,10 +73,10 @@ void AVFrameDeleter(AVFrame* ptr) { | |||
| 73 | av_frame_free(&ptr); | 73 | av_frame_free(&ptr); |
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | Codec::Codec(GPU& gpu_, const NvdecCommon::NvdecRegisters& regs) | 76 | Codec::Codec(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs) |
| 77 | : gpu(gpu_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(gpu)), | 77 | : host1x(host1x_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(host1x)), |
| 78 | vp8_decoder(std::make_unique<Decoder::VP8>(gpu)), | 78 | vp8_decoder(std::make_unique<Decoder::VP8>(host1x)), |
| 79 | vp9_decoder(std::make_unique<Decoder::VP9>(gpu)) {} | 79 | vp9_decoder(std::make_unique<Decoder::VP9>(host1x)) {} |
| 80 | 80 | ||
| 81 | Codec::~Codec() { | 81 | Codec::~Codec() { |
| 82 | if (!initialized) { | 82 | if (!initialized) { |
| @@ -168,11 +168,11 @@ void Codec::InitializeGpuDecoder() { | |||
| 168 | void Codec::Initialize() { | 168 | void Codec::Initialize() { |
| 169 | const AVCodecID codec = [&] { | 169 | const AVCodecID codec = [&] { |
| 170 | switch (current_codec) { | 170 | switch (current_codec) { |
| 171 | case NvdecCommon::VideoCodec::H264: | 171 | case Host1x::NvdecCommon::VideoCodec::H264: |
| 172 | return AV_CODEC_ID_H264; | 172 | return AV_CODEC_ID_H264; |
| 173 | case NvdecCommon::VideoCodec::VP8: | 173 | case Host1x::NvdecCommon::VideoCodec::VP8: |
| 174 | return AV_CODEC_ID_VP8; | 174 | return AV_CODEC_ID_VP8; |
| 175 | case NvdecCommon::VideoCodec::VP9: | 175 | case Host1x::NvdecCommon::VideoCodec::VP9: |
| 176 | return AV_CODEC_ID_VP9; | 176 | return AV_CODEC_ID_VP9; |
| 177 | default: | 177 | default: |
| 178 | UNIMPLEMENTED_MSG("Unknown codec {}", current_codec); | 178 | UNIMPLEMENTED_MSG("Unknown codec {}", current_codec); |
| @@ -197,7 +197,7 @@ void Codec::Initialize() { | |||
| 197 | initialized = true; | 197 | initialized = true; |
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | void Codec::SetTargetCodec(NvdecCommon::VideoCodec codec) { | 200 | void Codec::SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec) { |
| 201 | if (current_codec != codec) { | 201 | if (current_codec != codec) { |
| 202 | current_codec = codec; | 202 | current_codec = codec; |
| 203 | LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName()); | 203 | LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName()); |
| @@ -215,11 +215,11 @@ void Codec::Decode() { | |||
| 215 | bool vp9_hidden_frame = false; | 215 | bool vp9_hidden_frame = false; |
| 216 | const auto& frame_data = [&]() { | 216 | const auto& frame_data = [&]() { |
| 217 | switch (current_codec) { | 217 | switch (current_codec) { |
| 218 | case Tegra::NvdecCommon::VideoCodec::H264: | 218 | case Tegra::Host1x::NvdecCommon::VideoCodec::H264: |
| 219 | return h264_decoder->ComposeFrame(state, is_first_frame); | 219 | return h264_decoder->ComposeFrame(state, is_first_frame); |
| 220 | case Tegra::NvdecCommon::VideoCodec::VP8: | 220 | case Tegra::Host1x::NvdecCommon::VideoCodec::VP8: |
| 221 | return vp8_decoder->ComposeFrame(state); | 221 | return vp8_decoder->ComposeFrame(state); |
| 222 | case Tegra::NvdecCommon::VideoCodec::VP9: | 222 | case Tegra::Host1x::NvdecCommon::VideoCodec::VP9: |
| 223 | vp9_decoder->ComposeFrame(state); | 223 | vp9_decoder->ComposeFrame(state); |
| 224 | vp9_hidden_frame = vp9_decoder->WasFrameHidden(); | 224 | vp9_hidden_frame = vp9_decoder->WasFrameHidden(); |
| 225 | return vp9_decoder->GetFrameBytes(); | 225 | return vp9_decoder->GetFrameBytes(); |
| @@ -287,21 +287,21 @@ AVFramePtr Codec::GetCurrentFrame() { | |||
| 287 | return frame; | 287 | return frame; |
| 288 | } | 288 | } |
| 289 | 289 | ||
| 290 | NvdecCommon::VideoCodec Codec::GetCurrentCodec() const { | 290 | Host1x::NvdecCommon::VideoCodec Codec::GetCurrentCodec() const { |
| 291 | return current_codec; | 291 | return current_codec; |
| 292 | } | 292 | } |
| 293 | 293 | ||
| 294 | std::string_view Codec::GetCurrentCodecName() const { | 294 | std::string_view Codec::GetCurrentCodecName() const { |
| 295 | switch (current_codec) { | 295 | switch (current_codec) { |
| 296 | case NvdecCommon::VideoCodec::None: | 296 | case Host1x::NvdecCommon::VideoCodec::None: |
| 297 | return "None"; | 297 | return "None"; |
| 298 | case NvdecCommon::VideoCodec::H264: | 298 | case Host1x::NvdecCommon::VideoCodec::H264: |
| 299 | return "H264"; | 299 | return "H264"; |
| 300 | case NvdecCommon::VideoCodec::VP8: | 300 | case Host1x::NvdecCommon::VideoCodec::VP8: |
| 301 | return "VP8"; | 301 | return "VP8"; |
| 302 | case NvdecCommon::VideoCodec::H265: | 302 | case Host1x::NvdecCommon::VideoCodec::H265: |
| 303 | return "H265"; | 303 | return "H265"; |
| 304 | case NvdecCommon::VideoCodec::VP9: | 304 | case Host1x::NvdecCommon::VideoCodec::VP9: |
| 305 | return "VP9"; | 305 | return "VP9"; |
| 306 | default: | 306 | default: |
| 307 | return "Unknown"; | 307 | return "Unknown"; |
diff --git a/src/video_core/command_classes/codecs/codec.h b/src/video_core/host1x/codecs/codec.h index 0c2405465..0d45fb7fe 100644 --- a/src/video_core/command_classes/codecs/codec.h +++ b/src/video_core/host1x/codecs/codec.h | |||
| @@ -6,8 +6,8 @@ | |||
| 6 | #include <memory> | 6 | #include <memory> |
| 7 | #include <string_view> | 7 | #include <string_view> |
| 8 | #include <queue> | 8 | #include <queue> |
| 9 | 9 | #include "common/common_types.h" | |
| 10 | #include "video_core/command_classes/nvdec_common.h" | 10 | #include "video_core/host1x/nvdec_common.h" |
| 11 | 11 | ||
| 12 | extern "C" { | 12 | extern "C" { |
| 13 | #if defined(__GNUC__) || defined(__clang__) | 13 | #if defined(__GNUC__) || defined(__clang__) |
| @@ -21,7 +21,6 @@ extern "C" { | |||
| 21 | } | 21 | } |
| 22 | 22 | ||
| 23 | namespace Tegra { | 23 | namespace Tegra { |
| 24 | class GPU; | ||
| 25 | 24 | ||
| 26 | void AVFrameDeleter(AVFrame* ptr); | 25 | void AVFrameDeleter(AVFrame* ptr); |
| 27 | using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>; | 26 | using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>; |
| @@ -32,16 +31,20 @@ class VP8; | |||
| 32 | class VP9; | 31 | class VP9; |
| 33 | } // namespace Decoder | 32 | } // namespace Decoder |
| 34 | 33 | ||
| 34 | namespace Host1x { | ||
| 35 | class Host1x; | ||
| 36 | } // namespace Host1x | ||
| 37 | |||
| 35 | class Codec { | 38 | class Codec { |
| 36 | public: | 39 | public: |
| 37 | explicit Codec(GPU& gpu, const NvdecCommon::NvdecRegisters& regs); | 40 | explicit Codec(Host1x::Host1x& host1x, const Host1x::NvdecCommon::NvdecRegisters& regs); |
| 38 | ~Codec(); | 41 | ~Codec(); |
| 39 | 42 | ||
| 40 | /// Initialize the codec, returning success or failure | 43 | /// Initialize the codec, returning success or failure |
| 41 | void Initialize(); | 44 | void Initialize(); |
| 42 | 45 | ||
| 43 | /// Sets NVDEC video stream codec | 46 | /// Sets NVDEC video stream codec |
| 44 | void SetTargetCodec(NvdecCommon::VideoCodec codec); | 47 | void SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec); |
| 45 | 48 | ||
| 46 | /// Call decoders to construct headers, decode AVFrame with ffmpeg | 49 | /// Call decoders to construct headers, decode AVFrame with ffmpeg |
| 47 | void Decode(); | 50 | void Decode(); |
| @@ -50,7 +53,7 @@ public: | |||
| 50 | [[nodiscard]] AVFramePtr GetCurrentFrame(); | 53 | [[nodiscard]] AVFramePtr GetCurrentFrame(); |
| 51 | 54 | ||
| 52 | /// Returns the value of current_codec | 55 | /// Returns the value of current_codec |
| 53 | [[nodiscard]] NvdecCommon::VideoCodec GetCurrentCodec() const; | 56 | [[nodiscard]] Host1x::NvdecCommon::VideoCodec GetCurrentCodec() const; |
| 54 | 57 | ||
| 55 | /// Return name of the current codec | 58 | /// Return name of the current codec |
| 56 | [[nodiscard]] std::string_view GetCurrentCodecName() const; | 59 | [[nodiscard]] std::string_view GetCurrentCodecName() const; |
| @@ -63,14 +66,14 @@ private: | |||
| 63 | bool CreateGpuAvDevice(); | 66 | bool CreateGpuAvDevice(); |
| 64 | 67 | ||
| 65 | bool initialized{}; | 68 | bool initialized{}; |
| 66 | NvdecCommon::VideoCodec current_codec{NvdecCommon::VideoCodec::None}; | 69 | Host1x::NvdecCommon::VideoCodec current_codec{Host1x::NvdecCommon::VideoCodec::None}; |
| 67 | 70 | ||
| 68 | const AVCodec* av_codec{nullptr}; | 71 | const AVCodec* av_codec{nullptr}; |
| 69 | AVCodecContext* av_codec_ctx{nullptr}; | 72 | AVCodecContext* av_codec_ctx{nullptr}; |
| 70 | AVBufferRef* av_gpu_decoder{nullptr}; | 73 | AVBufferRef* av_gpu_decoder{nullptr}; |
| 71 | 74 | ||
| 72 | GPU& gpu; | 75 | Host1x::Host1x& host1x; |
| 73 | const NvdecCommon::NvdecRegisters& state; | 76 | const Host1x::NvdecCommon::NvdecRegisters& state; |
| 74 | std::unique_ptr<Decoder::H264> h264_decoder; | 77 | std::unique_ptr<Decoder::H264> h264_decoder; |
| 75 | std::unique_ptr<Decoder::VP8> vp8_decoder; | 78 | std::unique_ptr<Decoder::VP8> vp8_decoder; |
| 76 | std::unique_ptr<Decoder::VP9> vp9_decoder; | 79 | std::unique_ptr<Decoder::VP9> vp9_decoder; |
diff --git a/src/video_core/command_classes/codecs/h264.cpp b/src/video_core/host1x/codecs/h264.cpp index e2acd54d4..e87bd65fa 100644 --- a/src/video_core/command_classes/codecs/h264.cpp +++ b/src/video_core/host1x/codecs/h264.cpp | |||
| @@ -5,8 +5,8 @@ | |||
| 5 | #include <bit> | 5 | #include <bit> |
| 6 | 6 | ||
| 7 | #include "common/settings.h" | 7 | #include "common/settings.h" |
| 8 | #include "video_core/command_classes/codecs/h264.h" | 8 | #include "video_core/host1x/codecs/h264.h" |
| 9 | #include "video_core/gpu.h" | 9 | #include "video_core/host1x/host1x.h" |
| 10 | #include "video_core/memory_manager.h" | 10 | #include "video_core/memory_manager.h" |
| 11 | 11 | ||
| 12 | namespace Tegra::Decoder { | 12 | namespace Tegra::Decoder { |
| @@ -24,19 +24,20 @@ constexpr std::array<u8, 16> zig_zag_scan{ | |||
| 24 | }; | 24 | }; |
| 25 | } // Anonymous namespace | 25 | } // Anonymous namespace |
| 26 | 26 | ||
| 27 | H264::H264(GPU& gpu_) : gpu(gpu_) {} | 27 | H264::H264(Host1x::Host1x& host1x_) : host1x{host1x_} {} |
| 28 | 28 | ||
| 29 | H264::~H264() = default; | 29 | H264::~H264() = default; |
| 30 | 30 | ||
| 31 | const std::vector<u8>& H264::ComposeFrame(const NvdecCommon::NvdecRegisters& state, | 31 | const std::vector<u8>& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state, |
| 32 | bool is_first_frame) { | 32 | bool is_first_frame) { |
| 33 | H264DecoderContext context; | 33 | H264DecoderContext context; |
| 34 | gpu.MemoryManager().ReadBlock(state.picture_info_offset, &context, sizeof(H264DecoderContext)); | 34 | host1x.MemoryManager().ReadBlock(state.picture_info_offset, &context, |
| 35 | sizeof(H264DecoderContext)); | ||
| 35 | 36 | ||
| 36 | const s64 frame_number = context.h264_parameter_set.frame_number.Value(); | 37 | const s64 frame_number = context.h264_parameter_set.frame_number.Value(); |
| 37 | if (!is_first_frame && frame_number != 0) { | 38 | if (!is_first_frame && frame_number != 0) { |
| 38 | frame.resize(context.stream_len); | 39 | frame.resize(context.stream_len); |
| 39 | gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size()); | 40 | host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size()); |
| 40 | return frame; | 41 | return frame; |
| 41 | } | 42 | } |
| 42 | 43 | ||
| @@ -155,8 +156,8 @@ const std::vector<u8>& H264::ComposeFrame(const NvdecCommon::NvdecRegisters& sta | |||
| 155 | frame.resize(encoded_header.size() + context.stream_len); | 156 | frame.resize(encoded_header.size() + context.stream_len); |
| 156 | std::memcpy(frame.data(), encoded_header.data(), encoded_header.size()); | 157 | std::memcpy(frame.data(), encoded_header.data(), encoded_header.size()); |
| 157 | 158 | ||
| 158 | gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, | 159 | host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, |
| 159 | frame.data() + encoded_header.size(), context.stream_len); | 160 | frame.data() + encoded_header.size(), context.stream_len); |
| 160 | 161 | ||
| 161 | return frame; | 162 | return frame; |
| 162 | } | 163 | } |
diff --git a/src/video_core/command_classes/codecs/h264.h b/src/video_core/host1x/codecs/h264.h index 261574364..5cc86454e 100644 --- a/src/video_core/command_classes/codecs/h264.h +++ b/src/video_core/host1x/codecs/h264.h | |||
| @@ -8,10 +8,14 @@ | |||
| 8 | #include "common/bit_field.h" | 8 | #include "common/bit_field.h" |
| 9 | #include "common/common_funcs.h" | 9 | #include "common/common_funcs.h" |
| 10 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 11 | #include "video_core/command_classes/nvdec_common.h" | 11 | #include "video_core/host1x/nvdec_common.h" |
| 12 | 12 | ||
| 13 | namespace Tegra { | 13 | namespace Tegra { |
| 14 | class GPU; | 14 | |
| 15 | namespace Host1x { | ||
| 16 | class Host1x; | ||
| 17 | } // namespace Host1x | ||
| 18 | |||
| 15 | namespace Decoder { | 19 | namespace Decoder { |
| 16 | 20 | ||
| 17 | class H264BitWriter { | 21 | class H264BitWriter { |
| @@ -55,16 +59,16 @@ private: | |||
| 55 | 59 | ||
| 56 | class H264 { | 60 | class H264 { |
| 57 | public: | 61 | public: |
| 58 | explicit H264(GPU& gpu); | 62 | explicit H264(Host1x::Host1x& host1x); |
| 59 | ~H264(); | 63 | ~H264(); |
| 60 | 64 | ||
| 61 | /// Compose the H264 frame for FFmpeg decoding | 65 | /// Compose the H264 frame for FFmpeg decoding |
| 62 | [[nodiscard]] const std::vector<u8>& ComposeFrame(const NvdecCommon::NvdecRegisters& state, | 66 | [[nodiscard]] const std::vector<u8>& ComposeFrame( |
| 63 | bool is_first_frame = false); | 67 | const Host1x::NvdecCommon::NvdecRegisters& state, bool is_first_frame = false); |
| 64 | 68 | ||
| 65 | private: | 69 | private: |
| 66 | std::vector<u8> frame; | 70 | std::vector<u8> frame; |
| 67 | GPU& gpu; | 71 | Host1x::Host1x& host1x; |
| 68 | 72 | ||
| 69 | struct H264ParameterSet { | 73 | struct H264ParameterSet { |
| 70 | s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00 | 74 | s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00 |
diff --git a/src/video_core/command_classes/codecs/vp8.cpp b/src/video_core/host1x/codecs/vp8.cpp index c83b9bbc2..28fb12cb8 100644 --- a/src/video_core/command_classes/codecs/vp8.cpp +++ b/src/video_core/host1x/codecs/vp8.cpp | |||
| @@ -3,18 +3,18 @@ | |||
| 3 | 3 | ||
| 4 | #include <vector> | 4 | #include <vector> |
| 5 | 5 | ||
| 6 | #include "video_core/command_classes/codecs/vp8.h" | 6 | #include "video_core/host1x/codecs/vp8.h" |
| 7 | #include "video_core/gpu.h" | 7 | #include "video_core/host1x/host1x.h" |
| 8 | #include "video_core/memory_manager.h" | 8 | #include "video_core/memory_manager.h" |
| 9 | 9 | ||
| 10 | namespace Tegra::Decoder { | 10 | namespace Tegra::Decoder { |
| 11 | VP8::VP8(GPU& gpu_) : gpu(gpu_) {} | 11 | VP8::VP8(Host1x::Host1x& host1x_) : host1x{host1x_} {} |
| 12 | 12 | ||
| 13 | VP8::~VP8() = default; | 13 | VP8::~VP8() = default; |
| 14 | 14 | ||
| 15 | const std::vector<u8>& VP8::ComposeFrame(const NvdecCommon::NvdecRegisters& state) { | 15 | const std::vector<u8>& VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) { |
| 16 | VP8PictureInfo info; | 16 | VP8PictureInfo info; |
| 17 | gpu.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo)); | 17 | host1x.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo)); |
| 18 | 18 | ||
| 19 | const bool is_key_frame = info.key_frame == 1u; | 19 | const bool is_key_frame = info.key_frame == 1u; |
| 20 | const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size); | 20 | const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size); |
| @@ -45,7 +45,7 @@ const std::vector<u8>& VP8::ComposeFrame(const NvdecCommon::NvdecRegisters& stat | |||
| 45 | frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f)); | 45 | frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f)); |
| 46 | } | 46 | } |
| 47 | const u64 bitstream_offset = state.frame_bitstream_offset; | 47 | const u64 bitstream_offset = state.frame_bitstream_offset; |
| 48 | gpu.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size); | 48 | host1x.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size); |
| 49 | 49 | ||
| 50 | return frame; | 50 | return frame; |
| 51 | } | 51 | } |
diff --git a/src/video_core/command_classes/codecs/vp8.h b/src/video_core/host1x/codecs/vp8.h index 3357667b0..5bf07ecab 100644 --- a/src/video_core/command_classes/codecs/vp8.h +++ b/src/video_core/host1x/codecs/vp8.h | |||
| @@ -8,23 +8,28 @@ | |||
| 8 | 8 | ||
| 9 | #include "common/common_funcs.h" | 9 | #include "common/common_funcs.h" |
| 10 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 11 | #include "video_core/command_classes/nvdec_common.h" | 11 | #include "video_core/host1x/nvdec_common.h" |
| 12 | 12 | ||
| 13 | namespace Tegra { | 13 | namespace Tegra { |
| 14 | class GPU; | 14 | |
| 15 | namespace Host1x { | ||
| 16 | class Host1x; | ||
| 17 | } // namespace Host1x | ||
| 18 | |||
| 15 | namespace Decoder { | 19 | namespace Decoder { |
| 16 | 20 | ||
| 17 | class VP8 { | 21 | class VP8 { |
| 18 | public: | 22 | public: |
| 19 | explicit VP8(GPU& gpu); | 23 | explicit VP8(Host1x::Host1x& host1x); |
| 20 | ~VP8(); | 24 | ~VP8(); |
| 21 | 25 | ||
| 22 | /// Compose the VP8 frame for FFmpeg decoding | 26 | /// Compose the VP8 frame for FFmpeg decoding |
| 23 | [[nodiscard]] const std::vector<u8>& ComposeFrame(const NvdecCommon::NvdecRegisters& state); | 27 | [[nodiscard]] const std::vector<u8>& ComposeFrame( |
| 28 | const Host1x::NvdecCommon::NvdecRegisters& state); | ||
| 24 | 29 | ||
| 25 | private: | 30 | private: |
| 26 | std::vector<u8> frame; | 31 | std::vector<u8> frame; |
| 27 | GPU& gpu; | 32 | Host1x::Host1x& host1x; |
| 28 | 33 | ||
| 29 | struct VP8PictureInfo { | 34 | struct VP8PictureInfo { |
| 30 | INSERT_PADDING_WORDS_NOINIT(14); | 35 | INSERT_PADDING_WORDS_NOINIT(14); |
diff --git a/src/video_core/command_classes/codecs/vp9.cpp b/src/video_core/host1x/codecs/vp9.cpp index c01431441..cf40c9012 100644 --- a/src/video_core/command_classes/codecs/vp9.cpp +++ b/src/video_core/host1x/codecs/vp9.cpp | |||
| @@ -4,8 +4,8 @@ | |||
| 4 | #include <algorithm> // for std::copy | 4 | #include <algorithm> // for std::copy |
| 5 | #include <numeric> | 5 | #include <numeric> |
| 6 | #include "common/assert.h" | 6 | #include "common/assert.h" |
| 7 | #include "video_core/command_classes/codecs/vp9.h" | 7 | #include "video_core/host1x/codecs/vp9.h" |
| 8 | #include "video_core/gpu.h" | 8 | #include "video_core/host1x/host1x.h" |
| 9 | #include "video_core/memory_manager.h" | 9 | #include "video_core/memory_manager.h" |
| 10 | 10 | ||
| 11 | namespace Tegra::Decoder { | 11 | namespace Tegra::Decoder { |
| @@ -236,7 +236,7 @@ constexpr std::array<u8, 254> map_lut{ | |||
| 236 | } | 236 | } |
| 237 | } // Anonymous namespace | 237 | } // Anonymous namespace |
| 238 | 238 | ||
| 239 | VP9::VP9(GPU& gpu_) : gpu{gpu_} {} | 239 | VP9::VP9(Host1x::Host1x& host1x_) : host1x{host1x_} {} |
| 240 | 240 | ||
| 241 | VP9::~VP9() = default; | 241 | VP9::~VP9() = default; |
| 242 | 242 | ||
| @@ -355,9 +355,9 @@ void VP9::WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_ | |||
| 355 | } | 355 | } |
| 356 | } | 356 | } |
| 357 | 357 | ||
| 358 | Vp9PictureInfo VP9::GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state) { | 358 | Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& state) { |
| 359 | PictureInfo picture_info; | 359 | PictureInfo picture_info; |
| 360 | gpu.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo)); | 360 | host1x.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo)); |
| 361 | Vp9PictureInfo vp9_info = picture_info.Convert(); | 361 | Vp9PictureInfo vp9_info = picture_info.Convert(); |
| 362 | 362 | ||
| 363 | InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy); | 363 | InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy); |
| @@ -372,18 +372,19 @@ Vp9PictureInfo VP9::GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state) | |||
| 372 | 372 | ||
| 373 | void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) { | 373 | void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) { |
| 374 | EntropyProbs entropy; | 374 | EntropyProbs entropy; |
| 375 | gpu.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs)); | 375 | host1x.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs)); |
| 376 | entropy.Convert(dst); | 376 | entropy.Convert(dst); |
| 377 | } | 377 | } |
| 378 | 378 | ||
| 379 | Vp9FrameContainer VP9::GetCurrentFrame(const NvdecCommon::NvdecRegisters& state) { | 379 | Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters& state) { |
| 380 | Vp9FrameContainer current_frame{}; | 380 | Vp9FrameContainer current_frame{}; |
| 381 | { | 381 | { |
| 382 | gpu.SyncGuestHost(); | 382 | // gpu.SyncGuestHost(); epic, why? |
| 383 | current_frame.info = GetVp9PictureInfo(state); | 383 | current_frame.info = GetVp9PictureInfo(state); |
| 384 | current_frame.bit_stream.resize(current_frame.info.bitstream_size); | 384 | current_frame.bit_stream.resize(current_frame.info.bitstream_size); |
| 385 | gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(), | 385 | host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, |
| 386 | current_frame.info.bitstream_size); | 386 | current_frame.bit_stream.data(), |
| 387 | current_frame.info.bitstream_size); | ||
| 387 | } | 388 | } |
| 388 | if (!next_frame.bit_stream.empty()) { | 389 | if (!next_frame.bit_stream.empty()) { |
| 389 | Vp9FrameContainer temp{ | 390 | Vp9FrameContainer temp{ |
| @@ -769,7 +770,7 @@ VpxBitStreamWriter VP9::ComposeUncompressedHeader() { | |||
| 769 | return uncomp_writer; | 770 | return uncomp_writer; |
| 770 | } | 771 | } |
| 771 | 772 | ||
| 772 | void VP9::ComposeFrame(const NvdecCommon::NvdecRegisters& state) { | 773 | void VP9::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) { |
| 773 | std::vector<u8> bitstream; | 774 | std::vector<u8> bitstream; |
| 774 | { | 775 | { |
| 775 | Vp9FrameContainer curr_frame = GetCurrentFrame(state); | 776 | Vp9FrameContainer curr_frame = GetCurrentFrame(state); |
diff --git a/src/video_core/command_classes/codecs/vp9.h b/src/video_core/host1x/codecs/vp9.h index ecc40e8b1..d4083e8d3 100644 --- a/src/video_core/command_classes/codecs/vp9.h +++ b/src/video_core/host1x/codecs/vp9.h | |||
| @@ -8,11 +8,15 @@ | |||
| 8 | 8 | ||
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "common/stream.h" | 10 | #include "common/stream.h" |
| 11 | #include "video_core/command_classes/codecs/vp9_types.h" | 11 | #include "video_core/host1x/codecs/vp9_types.h" |
| 12 | #include "video_core/command_classes/nvdec_common.h" | 12 | #include "video_core/host1x/nvdec_common.h" |
| 13 | 13 | ||
| 14 | namespace Tegra { | 14 | namespace Tegra { |
| 15 | class GPU; | 15 | |
| 16 | namespace Host1x { | ||
| 17 | class Host1x; | ||
| 18 | } // namespace Host1x | ||
| 19 | |||
| 16 | namespace Decoder { | 20 | namespace Decoder { |
| 17 | 21 | ||
| 18 | /// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the | 22 | /// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the |
| @@ -106,7 +110,7 @@ private: | |||
| 106 | 110 | ||
| 107 | class VP9 { | 111 | class VP9 { |
| 108 | public: | 112 | public: |
| 109 | explicit VP9(GPU& gpu_); | 113 | explicit VP9(Host1x::Host1x& host1x); |
| 110 | ~VP9(); | 114 | ~VP9(); |
| 111 | 115 | ||
| 112 | VP9(const VP9&) = delete; | 116 | VP9(const VP9&) = delete; |
| @@ -117,7 +121,7 @@ public: | |||
| 117 | 121 | ||
| 118 | /// Composes the VP9 frame from the GPU state information. | 122 | /// Composes the VP9 frame from the GPU state information. |
| 119 | /// Based on the official VP9 spec documentation | 123 | /// Based on the official VP9 spec documentation |
| 120 | void ComposeFrame(const NvdecCommon::NvdecRegisters& state); | 124 | void ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state); |
| 121 | 125 | ||
| 122 | /// Returns true if the most recent frame was a hidden frame. | 126 | /// Returns true if the most recent frame was a hidden frame. |
| 123 | [[nodiscard]] bool WasFrameHidden() const { | 127 | [[nodiscard]] bool WasFrameHidden() const { |
| @@ -162,19 +166,21 @@ private: | |||
| 162 | void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob); | 166 | void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob); |
| 163 | 167 | ||
| 164 | /// Returns VP9 information from NVDEC provided offset and size | 168 | /// Returns VP9 information from NVDEC provided offset and size |
| 165 | [[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state); | 169 | [[nodiscard]] Vp9PictureInfo GetVp9PictureInfo( |
| 170 | const Host1x::NvdecCommon::NvdecRegisters& state); | ||
| 166 | 171 | ||
| 167 | /// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct | 172 | /// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct |
| 168 | void InsertEntropy(u64 offset, Vp9EntropyProbs& dst); | 173 | void InsertEntropy(u64 offset, Vp9EntropyProbs& dst); |
| 169 | 174 | ||
| 170 | /// Returns frame to be decoded after buffering | 175 | /// Returns frame to be decoded after buffering |
| 171 | [[nodiscard]] Vp9FrameContainer GetCurrentFrame(const NvdecCommon::NvdecRegisters& state); | 176 | [[nodiscard]] Vp9FrameContainer GetCurrentFrame( |
| 177 | const Host1x::NvdecCommon::NvdecRegisters& state); | ||
| 172 | 178 | ||
| 173 | /// Use NVDEC providied information to compose the headers for the current frame | 179 | /// Use NVDEC providied information to compose the headers for the current frame |
| 174 | [[nodiscard]] std::vector<u8> ComposeCompressedHeader(); | 180 | [[nodiscard]] std::vector<u8> ComposeCompressedHeader(); |
| 175 | [[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader(); | 181 | [[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader(); |
| 176 | 182 | ||
| 177 | GPU& gpu; | 183 | Host1x::Host1x& host1x; |
| 178 | std::vector<u8> frame; | 184 | std::vector<u8> frame; |
| 179 | 185 | ||
| 180 | std::array<s8, 4> loop_filter_ref_deltas{}; | 186 | std::array<s8, 4> loop_filter_ref_deltas{}; |
diff --git a/src/video_core/command_classes/codecs/vp9_types.h b/src/video_core/host1x/codecs/vp9_types.h index bb3d8df6e..adad8ed7e 100644 --- a/src/video_core/command_classes/codecs/vp9_types.h +++ b/src/video_core/host1x/codecs/vp9_types.h | |||
| @@ -9,7 +9,6 @@ | |||
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | 10 | ||
| 11 | namespace Tegra { | 11 | namespace Tegra { |
| 12 | class GPU; | ||
| 13 | 12 | ||
| 14 | namespace Decoder { | 13 | namespace Decoder { |
| 15 | struct Vp9FrameDimensions { | 14 | struct Vp9FrameDimensions { |
diff --git a/src/video_core/host1x/control.cpp b/src/video_core/host1x/control.cpp new file mode 100644 index 000000000..dceefdb7f --- /dev/null +++ b/src/video_core/host1x/control.cpp | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #include "common/assert.h" | ||
| 5 | #include "video_core/host1x/control.h" | ||
| 6 | #include "video_core/host1x/host1x.h" | ||
| 7 | |||
| 8 | namespace Tegra::Host1x { | ||
| 9 | |||
| 10 | Control::Control(Host1x& host1x_) : host1x(host1x_) {} | ||
| 11 | |||
| 12 | Control::~Control() = default; | ||
| 13 | |||
| 14 | void Control::ProcessMethod(Method method, u32 argument) { | ||
| 15 | switch (method) { | ||
| 16 | case Method::LoadSyncptPayload32: | ||
| 17 | syncpoint_value = argument; | ||
| 18 | break; | ||
| 19 | case Method::WaitSyncpt: | ||
| 20 | case Method::WaitSyncpt32: | ||
| 21 | Execute(argument); | ||
| 22 | break; | ||
| 23 | default: | ||
| 24 | UNIMPLEMENTED_MSG("Control method 0x{:X}", static_cast<u32>(method)); | ||
| 25 | break; | ||
| 26 | } | ||
| 27 | } | ||
| 28 | |||
| 29 | void Control::Execute(u32 data) { | ||
| 30 | host1x.GetSyncpointManager().WaitHost(data, syncpoint_value); | ||
| 31 | } | ||
| 32 | |||
| 33 | } // namespace Tegra::Host1x | ||
diff --git a/src/video_core/command_classes/host1x.h b/src/video_core/host1x/control.h index bb48a4381..e117888a3 100644 --- a/src/video_core/command_classes/host1x.h +++ b/src/video_core/host1x/control.h | |||
| @@ -1,15 +1,19 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-FileCopyrightText: 2021 Skyline Team and Contributors |
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | 4 | ||
| 4 | #pragma once | 5 | #pragma once |
| 5 | 6 | ||
| 6 | #include "common/common_types.h" | 7 | #include "common/common_types.h" |
| 7 | 8 | ||
| 8 | namespace Tegra { | 9 | namespace Tegra { |
| 9 | class GPU; | 10 | |
| 11 | namespace Host1x { | ||
| 12 | |||
| 13 | class Host1x; | ||
| 10 | class Nvdec; | 14 | class Nvdec; |
| 11 | 15 | ||
| 12 | class Host1x { | 16 | class Control { |
| 13 | public: | 17 | public: |
| 14 | enum class Method : u32 { | 18 | enum class Method : u32 { |
| 15 | WaitSyncpt = 0x8, | 19 | WaitSyncpt = 0x8, |
| @@ -17,8 +21,8 @@ public: | |||
| 17 | WaitSyncpt32 = 0x50, | 21 | WaitSyncpt32 = 0x50, |
| 18 | }; | 22 | }; |
| 19 | 23 | ||
| 20 | explicit Host1x(GPU& gpu); | 24 | explicit Control(Host1x& host1x); |
| 21 | ~Host1x(); | 25 | ~Control(); |
| 22 | 26 | ||
| 23 | /// Writes the method into the state, Invoke Execute() if encountered | 27 | /// Writes the method into the state, Invoke Execute() if encountered |
| 24 | void ProcessMethod(Method method, u32 argument); | 28 | void ProcessMethod(Method method, u32 argument); |
| @@ -28,7 +32,9 @@ private: | |||
| 28 | void Execute(u32 data); | 32 | void Execute(u32 data); |
| 29 | 33 | ||
| 30 | u32 syncpoint_value{}; | 34 | u32 syncpoint_value{}; |
| 31 | GPU& gpu; | 35 | Host1x& host1x; |
| 32 | }; | 36 | }; |
| 33 | 37 | ||
| 38 | } // namespace Host1x | ||
| 39 | |||
| 34 | } // namespace Tegra | 40 | } // namespace Tegra |
diff --git a/src/video_core/host1x/host1x.cpp b/src/video_core/host1x/host1x.cpp new file mode 100644 index 000000000..7c317a85d --- /dev/null +++ b/src/video_core/host1x/host1x.cpp | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #include "core/core.h" | ||
| 5 | #include "video_core/host1x/host1x.h" | ||
| 6 | |||
| 7 | namespace Tegra { | ||
| 8 | |||
| 9 | namespace Host1x { | ||
| 10 | |||
| 11 | Host1x::Host1x(Core::System& system_) | ||
| 12 | : system{system_}, syncpoint_manager{}, memory_manager{system, 32, 12}, | ||
| 13 | allocator{std::make_unique<Common::FlatAllocator<u32, 0, 32>>(1 << 12)} {} | ||
| 14 | |||
| 15 | } // namespace Host1x | ||
| 16 | |||
| 17 | } // namespace Tegra | ||
diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h new file mode 100644 index 000000000..57082ae54 --- /dev/null +++ b/src/video_core/host1x/host1x.h | |||
| @@ -0,0 +1,57 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include "common/common_types.h" | ||
| 7 | |||
| 8 | #include "common/address_space.h" | ||
| 9 | #include "video_core/host1x/syncpoint_manager.h" | ||
| 10 | #include "video_core/memory_manager.h" | ||
| 11 | |||
| 12 | namespace Core { | ||
| 13 | class System; | ||
| 14 | } // namespace Core | ||
| 15 | |||
| 16 | namespace Tegra { | ||
| 17 | |||
| 18 | namespace Host1x { | ||
| 19 | |||
| 20 | class Host1x { | ||
| 21 | public: | ||
| 22 | explicit Host1x(Core::System& system); | ||
| 23 | |||
| 24 | SyncpointManager& GetSyncpointManager() { | ||
| 25 | return syncpoint_manager; | ||
| 26 | } | ||
| 27 | |||
| 28 | const SyncpointManager& GetSyncpointManager() const { | ||
| 29 | return syncpoint_manager; | ||
| 30 | } | ||
| 31 | |||
| 32 | Tegra::MemoryManager& MemoryManager() { | ||
| 33 | return memory_manager; | ||
| 34 | } | ||
| 35 | |||
| 36 | const Tegra::MemoryManager& MemoryManager() const { | ||
| 37 | return memory_manager; | ||
| 38 | } | ||
| 39 | |||
| 40 | Common::FlatAllocator<u32, 0, 32>& Allocator() { | ||
| 41 | return *allocator; | ||
| 42 | } | ||
| 43 | |||
| 44 | const Common::FlatAllocator<u32, 0, 32>& Allocator() const { | ||
| 45 | return *allocator; | ||
| 46 | } | ||
| 47 | |||
| 48 | private: | ||
| 49 | Core::System& system; | ||
| 50 | SyncpointManager syncpoint_manager; | ||
| 51 | Tegra::MemoryManager memory_manager; | ||
| 52 | std::unique_ptr<Common::FlatAllocator<u32, 0, 32>> allocator; | ||
| 53 | }; | ||
| 54 | |||
| 55 | } // namespace Host1x | ||
| 56 | |||
| 57 | } // namespace Tegra | ||
diff --git a/src/video_core/command_classes/nvdec.cpp b/src/video_core/host1x/nvdec.cpp index 4fbbe3da6..a4bd5b79f 100644 --- a/src/video_core/command_classes/nvdec.cpp +++ b/src/video_core/host1x/nvdec.cpp | |||
| @@ -2,15 +2,16 @@ | |||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include "common/assert.h" | 4 | #include "common/assert.h" |
| 5 | #include "video_core/command_classes/nvdec.h" | 5 | #include "video_core/host1x/host1x.h" |
| 6 | #include "video_core/gpu.h" | 6 | #include "video_core/host1x/nvdec.h" |
| 7 | 7 | ||
| 8 | namespace Tegra { | 8 | namespace Tegra::Host1x { |
| 9 | 9 | ||
| 10 | #define NVDEC_REG_INDEX(field_name) \ | 10 | #define NVDEC_REG_INDEX(field_name) \ |
| 11 | (offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64)) | 11 | (offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64)) |
| 12 | 12 | ||
| 13 | Nvdec::Nvdec(GPU& gpu_) : gpu(gpu_), state{}, codec(std::make_unique<Codec>(gpu, state)) {} | 13 | Nvdec::Nvdec(Host1x& host1x_) |
| 14 | : host1x(host1x_), state{}, codec(std::make_unique<Codec>(host1x, state)) {} | ||
| 14 | 15 | ||
| 15 | Nvdec::~Nvdec() = default; | 16 | Nvdec::~Nvdec() = default; |
| 16 | 17 | ||
| @@ -44,4 +45,4 @@ void Nvdec::Execute() { | |||
| 44 | } | 45 | } |
| 45 | } | 46 | } |
| 46 | 47 | ||
| 47 | } // namespace Tegra | 48 | } // namespace Tegra::Host1x |
diff --git a/src/video_core/command_classes/nvdec.h b/src/video_core/host1x/nvdec.h index 488531fc6..3949d5181 100644 --- a/src/video_core/command_classes/nvdec.h +++ b/src/video_core/host1x/nvdec.h | |||
| @@ -6,14 +6,17 @@ | |||
| 6 | #include <memory> | 6 | #include <memory> |
| 7 | #include <vector> | 7 | #include <vector> |
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "video_core/command_classes/codecs/codec.h" | 9 | #include "video_core/host1x/codecs/codec.h" |
| 10 | 10 | ||
| 11 | namespace Tegra { | 11 | namespace Tegra { |
| 12 | class GPU; | 12 | |
| 13 | namespace Host1x { | ||
| 14 | |||
| 15 | class Host1x; | ||
| 13 | 16 | ||
| 14 | class Nvdec { | 17 | class Nvdec { |
| 15 | public: | 18 | public: |
| 16 | explicit Nvdec(GPU& gpu); | 19 | explicit Nvdec(Host1x& host1x); |
| 17 | ~Nvdec(); | 20 | ~Nvdec(); |
| 18 | 21 | ||
| 19 | /// Writes the method into the state, Invoke Execute() if encountered | 22 | /// Writes the method into the state, Invoke Execute() if encountered |
| @@ -26,8 +29,11 @@ private: | |||
| 26 | /// Invoke codec to decode a frame | 29 | /// Invoke codec to decode a frame |
| 27 | void Execute(); | 30 | void Execute(); |
| 28 | 31 | ||
| 29 | GPU& gpu; | 32 | Host1x& host1x; |
| 30 | NvdecCommon::NvdecRegisters state; | 33 | NvdecCommon::NvdecRegisters state; |
| 31 | std::unique_ptr<Codec> codec; | 34 | std::unique_ptr<Codec> codec; |
| 32 | }; | 35 | }; |
| 36 | |||
| 37 | } // namespace Host1x | ||
| 38 | |||
| 33 | } // namespace Tegra | 39 | } // namespace Tegra |
diff --git a/src/video_core/command_classes/nvdec_common.h b/src/video_core/host1x/nvdec_common.h index 521e5b52b..49d67ebbe 100644 --- a/src/video_core/command_classes/nvdec_common.h +++ b/src/video_core/host1x/nvdec_common.h | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | #include "common/common_funcs.h" | 7 | #include "common/common_funcs.h" |
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | 9 | ||
| 10 | namespace Tegra::NvdecCommon { | 10 | namespace Tegra::Host1x::NvdecCommon { |
| 11 | 11 | ||
| 12 | enum class VideoCodec : u64 { | 12 | enum class VideoCodec : u64 { |
| 13 | None = 0x0, | 13 | None = 0x0, |
| @@ -94,4 +94,4 @@ ASSERT_REG_POSITION(vp9_curr_frame_mvs_offset, 0x176); | |||
| 94 | 94 | ||
| 95 | #undef ASSERT_REG_POSITION | 95 | #undef ASSERT_REG_POSITION |
| 96 | 96 | ||
| 97 | } // namespace Tegra::NvdecCommon | 97 | } // namespace Tegra::Host1x::NvdecCommon |
diff --git a/src/video_core/command_classes/sync_manager.cpp b/src/video_core/host1x/sync_manager.cpp index 67e58046f..5ef9ea217 100644 --- a/src/video_core/command_classes/sync_manager.cpp +++ b/src/video_core/host1x/sync_manager.cpp | |||
| @@ -3,10 +3,13 @@ | |||
| 3 | 3 | ||
| 4 | #include <algorithm> | 4 | #include <algorithm> |
| 5 | #include "sync_manager.h" | 5 | #include "sync_manager.h" |
| 6 | #include "video_core/gpu.h" | 6 | #include "video_core/host1x/host1x.h" |
| 7 | #include "video_core/host1x/syncpoint_manager.h" | ||
| 7 | 8 | ||
| 8 | namespace Tegra { | 9 | namespace Tegra { |
| 9 | SyncptIncrManager::SyncptIncrManager(GPU& gpu_) : gpu(gpu_) {} | 10 | namespace Host1x { |
| 11 | |||
| 12 | SyncptIncrManager::SyncptIncrManager(Host1x& host1x_) : host1x(host1x_) {} | ||
| 10 | SyncptIncrManager::~SyncptIncrManager() = default; | 13 | SyncptIncrManager::~SyncptIncrManager() = default; |
| 11 | 14 | ||
| 12 | void SyncptIncrManager::Increment(u32 id) { | 15 | void SyncptIncrManager::Increment(u32 id) { |
| @@ -36,8 +39,12 @@ void SyncptIncrManager::IncrementAllDone() { | |||
| 36 | if (!increments[done_count].complete) { | 39 | if (!increments[done_count].complete) { |
| 37 | break; | 40 | break; |
| 38 | } | 41 | } |
| 39 | gpu.IncrementSyncPoint(increments[done_count].syncpt_id); | 42 | auto& syncpoint_manager = host1x.GetSyncpointManager(); |
| 43 | syncpoint_manager.IncrementGuest(increments[done_count].syncpt_id); | ||
| 44 | syncpoint_manager.IncrementHost(increments[done_count].syncpt_id); | ||
| 40 | } | 45 | } |
| 41 | increments.erase(increments.begin(), increments.begin() + done_count); | 46 | increments.erase(increments.begin(), increments.begin() + done_count); |
| 42 | } | 47 | } |
| 48 | |||
| 49 | } // namespace Host1x | ||
| 43 | } // namespace Tegra | 50 | } // namespace Tegra |
diff --git a/src/video_core/command_classes/sync_manager.h b/src/video_core/host1x/sync_manager.h index 6dfaae080..7bb77fa27 100644 --- a/src/video_core/command_classes/sync_manager.h +++ b/src/video_core/host1x/sync_manager.h | |||
| @@ -8,7 +8,11 @@ | |||
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | 9 | ||
| 10 | namespace Tegra { | 10 | namespace Tegra { |
| 11 | class GPU; | 11 | |
| 12 | namespace Host1x { | ||
| 13 | |||
| 14 | class Host1x; | ||
| 15 | |||
| 12 | struct SyncptIncr { | 16 | struct SyncptIncr { |
| 13 | u32 id; | 17 | u32 id; |
| 14 | u32 class_id; | 18 | u32 class_id; |
| @@ -21,7 +25,7 @@ struct SyncptIncr { | |||
| 21 | 25 | ||
| 22 | class SyncptIncrManager { | 26 | class SyncptIncrManager { |
| 23 | public: | 27 | public: |
| 24 | explicit SyncptIncrManager(GPU& gpu); | 28 | explicit SyncptIncrManager(Host1x& host1x); |
| 25 | ~SyncptIncrManager(); | 29 | ~SyncptIncrManager(); |
| 26 | 30 | ||
| 27 | /// Add syncpoint id and increment all | 31 | /// Add syncpoint id and increment all |
| @@ -41,7 +45,9 @@ private: | |||
| 41 | std::mutex increment_lock; | 45 | std::mutex increment_lock; |
| 42 | u32 current_id{}; | 46 | u32 current_id{}; |
| 43 | 47 | ||
| 44 | GPU& gpu; | 48 | Host1x& host1x; |
| 45 | }; | 49 | }; |
| 46 | 50 | ||
| 51 | } // namespace Host1x | ||
| 52 | |||
| 47 | } // namespace Tegra | 53 | } // namespace Tegra |
diff --git a/src/video_core/host1x/syncpoint_manager.cpp b/src/video_core/host1x/syncpoint_manager.cpp new file mode 100644 index 000000000..326e8355a --- /dev/null +++ b/src/video_core/host1x/syncpoint_manager.cpp | |||
| @@ -0,0 +1,96 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #include "common/microprofile.h" | ||
| 5 | #include "video_core/host1x/syncpoint_manager.h" | ||
| 6 | |||
| 7 | namespace Tegra { | ||
| 8 | |||
| 9 | namespace Host1x { | ||
| 10 | |||
| 11 | MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192)); | ||
| 12 | |||
| 13 | SyncpointManager::ActionHandle SyncpointManager::RegisterAction( | ||
| 14 | std::atomic<u32>& syncpoint, std::list<RegisteredAction>& action_storage, u32 expected_value, | ||
| 15 | std::function<void()>&& action) { | ||
| 16 | if (syncpoint.load(std::memory_order_acquire) >= expected_value) { | ||
| 17 | action(); | ||
| 18 | return {}; | ||
| 19 | } | ||
| 20 | |||
| 21 | std::unique_lock lk(guard); | ||
| 22 | if (syncpoint.load(std::memory_order_relaxed) >= expected_value) { | ||
| 23 | action(); | ||
| 24 | return {}; | ||
| 25 | } | ||
| 26 | auto it = action_storage.begin(); | ||
| 27 | while (it != action_storage.end()) { | ||
| 28 | if (it->expected_value >= expected_value) { | ||
| 29 | break; | ||
| 30 | } | ||
| 31 | ++it; | ||
| 32 | } | ||
| 33 | return action_storage.emplace(it, expected_value, std::move(action)); | ||
| 34 | } | ||
| 35 | |||
| 36 | void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage, | ||
| 37 | ActionHandle& handle) { | ||
| 38 | std::unique_lock lk(guard); | ||
| 39 | action_storage.erase(handle); | ||
| 40 | } | ||
| 41 | |||
| 42 | void SyncpointManager::DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle) { | ||
| 43 | DeregisterAction(guest_action_storage[syncpoint_id], handle); | ||
| 44 | } | ||
| 45 | |||
| 46 | void SyncpointManager::DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle) { | ||
| 47 | DeregisterAction(host_action_storage[syncpoint_id], handle); | ||
| 48 | } | ||
| 49 | |||
| 50 | void SyncpointManager::IncrementGuest(u32 syncpoint_id) { | ||
| 51 | Increment(syncpoints_guest[syncpoint_id], wait_guest_cv, guest_action_storage[syncpoint_id]); | ||
| 52 | } | ||
| 53 | |||
| 54 | void SyncpointManager::IncrementHost(u32 syncpoint_id) { | ||
| 55 | Increment(syncpoints_host[syncpoint_id], wait_host_cv, host_action_storage[syncpoint_id]); | ||
| 56 | } | ||
| 57 | |||
| 58 | void SyncpointManager::WaitGuest(u32 syncpoint_id, u32 expected_value) { | ||
| 59 | Wait(syncpoints_guest[syncpoint_id], wait_guest_cv, expected_value); | ||
| 60 | } | ||
| 61 | |||
| 62 | void SyncpointManager::WaitHost(u32 syncpoint_id, u32 expected_value) { | ||
| 63 | MICROPROFILE_SCOPE(GPU_wait); | ||
| 64 | Wait(syncpoints_host[syncpoint_id], wait_host_cv, expected_value); | ||
| 65 | } | ||
| 66 | |||
| 67 | void SyncpointManager::Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, | ||
| 68 | std::list<RegisteredAction>& action_storage) { | ||
| 69 | auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1}; | ||
| 70 | |||
| 71 | std::unique_lock lk(guard); | ||
| 72 | auto it = action_storage.begin(); | ||
| 73 | while (it != action_storage.end()) { | ||
| 74 | if (it->expected_value > new_value) { | ||
| 75 | break; | ||
| 76 | } | ||
| 77 | it->action(); | ||
| 78 | it = action_storage.erase(it); | ||
| 79 | } | ||
| 80 | wait_cv.notify_all(); | ||
| 81 | } | ||
| 82 | |||
| 83 | void SyncpointManager::Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, | ||
| 84 | u32 expected_value) { | ||
| 85 | const auto pred = [&]() { return syncpoint.load(std::memory_order_acquire) >= expected_value; }; | ||
| 86 | if (pred()) { | ||
| 87 | return; | ||
| 88 | } | ||
| 89 | |||
| 90 | std::unique_lock lk(guard); | ||
| 91 | wait_cv.wait(lk, pred); | ||
| 92 | } | ||
| 93 | |||
| 94 | } // namespace Host1x | ||
| 95 | |||
| 96 | } // namespace Tegra | ||
diff --git a/src/video_core/host1x/syncpoint_manager.h b/src/video_core/host1x/syncpoint_manager.h new file mode 100644 index 000000000..50a264e23 --- /dev/null +++ b/src/video_core/host1x/syncpoint_manager.h | |||
| @@ -0,0 +1,98 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <array> | ||
| 7 | #include <atomic> | ||
| 8 | #include <condition_variable> | ||
| 9 | #include <functional> | ||
| 10 | #include <list> | ||
| 11 | #include <mutex> | ||
| 12 | |||
| 13 | #include "common/common_types.h" | ||
| 14 | |||
| 15 | namespace Tegra { | ||
| 16 | |||
| 17 | namespace Host1x { | ||
| 18 | |||
| 19 | class SyncpointManager { | ||
| 20 | public: | ||
| 21 | u32 GetGuestSyncpointValue(u32 id) const { | ||
| 22 | return syncpoints_guest[id].load(std::memory_order_acquire); | ||
| 23 | } | ||
| 24 | |||
| 25 | u32 GetHostSyncpointValue(u32 id) const { | ||
| 26 | return syncpoints_host[id].load(std::memory_order_acquire); | ||
| 27 | } | ||
| 28 | |||
| 29 | struct RegisteredAction { | ||
| 30 | explicit RegisteredAction(u32 expected_value_, std::function<void()>&& action_) | ||
| 31 | : expected_value{expected_value_}, action{std::move(action_)} {} | ||
| 32 | u32 expected_value; | ||
| 33 | std::function<void()> action; | ||
| 34 | }; | ||
| 35 | using ActionHandle = std::list<RegisteredAction>::iterator; | ||
| 36 | |||
| 37 | template <typename Func> | ||
| 38 | ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) { | ||
| 39 | std::function<void()> func(action); | ||
| 40 | return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id], | ||
| 41 | expected_value, std::move(func)); | ||
| 42 | } | ||
| 43 | |||
| 44 | template <typename Func> | ||
| 45 | ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) { | ||
| 46 | std::function<void()> func(action); | ||
| 47 | return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id], | ||
| 48 | expected_value, std::move(func)); | ||
| 49 | } | ||
| 50 | |||
| 51 | void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle); | ||
| 52 | |||
| 53 | void DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle); | ||
| 54 | |||
| 55 | void IncrementGuest(u32 syncpoint_id); | ||
| 56 | |||
| 57 | void IncrementHost(u32 syncpoint_id); | ||
| 58 | |||
| 59 | void WaitGuest(u32 syncpoint_id, u32 expected_value); | ||
| 60 | |||
| 61 | void WaitHost(u32 syncpoint_id, u32 expected_value); | ||
| 62 | |||
| 63 | bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) const { | ||
| 64 | return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value; | ||
| 65 | } | ||
| 66 | |||
| 67 | bool IsReadyHost(u32 syncpoint_id, u32 expected_value) const { | ||
| 68 | return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value; | ||
| 69 | } | ||
| 70 | |||
| 71 | private: | ||
| 72 | void Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, | ||
| 73 | std::list<RegisteredAction>& action_storage); | ||
| 74 | |||
| 75 | ActionHandle RegisterAction(std::atomic<u32>& syncpoint, | ||
| 76 | std::list<RegisteredAction>& action_storage, u32 expected_value, | ||
| 77 | std::function<void()>&& action); | ||
| 78 | |||
| 79 | void DeregisterAction(std::list<RegisteredAction>& action_storage, ActionHandle& handle); | ||
| 80 | |||
| 81 | void Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, u32 expected_value); | ||
| 82 | |||
| 83 | static constexpr size_t NUM_MAX_SYNCPOINTS = 192; | ||
| 84 | |||
| 85 | std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_guest{}; | ||
| 86 | std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_host{}; | ||
| 87 | |||
| 88 | std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> guest_action_storage; | ||
| 89 | std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> host_action_storage; | ||
| 90 | |||
| 91 | std::mutex guard; | ||
| 92 | std::condition_variable wait_guest_cv; | ||
| 93 | std::condition_variable wait_host_cv; | ||
| 94 | }; | ||
| 95 | |||
| 96 | } // namespace Host1x | ||
| 97 | |||
| 98 | } // namespace Tegra | ||
diff --git a/src/video_core/command_classes/vic.cpp b/src/video_core/host1x/vic.cpp index 7c17df353..ac0b7d20e 100644 --- a/src/video_core/command_classes/vic.cpp +++ b/src/video_core/host1x/vic.cpp | |||
| @@ -18,14 +18,17 @@ extern "C" { | |||
| 18 | #include "common/bit_field.h" | 18 | #include "common/bit_field.h" |
| 19 | #include "common/logging/log.h" | 19 | #include "common/logging/log.h" |
| 20 | 20 | ||
| 21 | #include "video_core/command_classes/nvdec.h" | ||
| 22 | #include "video_core/command_classes/vic.h" | ||
| 23 | #include "video_core/engines/maxwell_3d.h" | 21 | #include "video_core/engines/maxwell_3d.h" |
| 24 | #include "video_core/gpu.h" | 22 | #include "video_core/host1x/host1x.h" |
| 23 | #include "video_core/host1x/nvdec.h" | ||
| 24 | #include "video_core/host1x/vic.h" | ||
| 25 | #include "video_core/memory_manager.h" | 25 | #include "video_core/memory_manager.h" |
| 26 | #include "video_core/textures/decoders.h" | 26 | #include "video_core/textures/decoders.h" |
| 27 | 27 | ||
| 28 | namespace Tegra { | 28 | namespace Tegra { |
| 29 | |||
| 30 | namespace Host1x { | ||
| 31 | |||
| 29 | namespace { | 32 | namespace { |
| 30 | enum class VideoPixelFormat : u64_le { | 33 | enum class VideoPixelFormat : u64_le { |
| 31 | RGBA8 = 0x1f, | 34 | RGBA8 = 0x1f, |
| @@ -46,8 +49,8 @@ union VicConfig { | |||
| 46 | BitField<46, 14, u64_le> surface_height_minus1; | 49 | BitField<46, 14, u64_le> surface_height_minus1; |
| 47 | }; | 50 | }; |
| 48 | 51 | ||
| 49 | Vic::Vic(GPU& gpu_, std::shared_ptr<Nvdec> nvdec_processor_) | 52 | Vic::Vic(Host1x& host1x_, std::shared_ptr<Nvdec> nvdec_processor_) |
| 50 | : gpu(gpu_), | 53 | : host1x(host1x_), |
| 51 | nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {} | 54 | nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {} |
| 52 | 55 | ||
| 53 | Vic::~Vic() = default; | 56 | Vic::~Vic() = default; |
| @@ -78,7 +81,7 @@ void Vic::Execute() { | |||
| 78 | LOG_ERROR(Service_NVDRV, "VIC Luma address not set."); | 81 | LOG_ERROR(Service_NVDRV, "VIC Luma address not set."); |
| 79 | return; | 82 | return; |
| 80 | } | 83 | } |
| 81 | const VicConfig config{gpu.MemoryManager().Read<u64>(config_struct_address + 0x20)}; | 84 | const VicConfig config{host1x.MemoryManager().Read<u64>(config_struct_address + 0x20)}; |
| 82 | const AVFramePtr frame_ptr = nvdec_processor->GetFrame(); | 85 | const AVFramePtr frame_ptr = nvdec_processor->GetFrame(); |
| 83 | const auto* frame = frame_ptr.get(); | 86 | const auto* frame = frame_ptr.get(); |
| 84 | if (!frame) { | 87 | if (!frame) { |
| @@ -153,15 +156,16 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) { | |||
| 153 | const u32 block_height = static_cast<u32>(config.block_linear_height_log2); | 156 | const u32 block_height = static_cast<u32>(config.block_linear_height_log2); |
| 154 | const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0); | 157 | const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0); |
| 155 | luma_buffer.resize(size); | 158 | luma_buffer.resize(size); |
| 156 | Texture::SwizzleSubrect(width, height, width * 4, width, 4, luma_buffer.data(), | 159 | std::span<const u8> frame_buff(converted_frame_buf_addr, 4 * width * height); |
| 157 | converted_frame_buf_addr, block_height, 0, 0); | 160 | Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height, |
| 161 | block_height, 0, width * 4); | ||
| 158 | 162 | ||
| 159 | gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size); | 163 | host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size); |
| 160 | } else { | 164 | } else { |
| 161 | // send pitch linear frame | 165 | // send pitch linear frame |
| 162 | const size_t linear_size = width * height * 4; | 166 | const size_t linear_size = width * height * 4; |
| 163 | gpu.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr, | 167 | host1x.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr, |
| 164 | linear_size); | 168 | linear_size); |
| 165 | } | 169 | } |
| 166 | } | 170 | } |
| 167 | 171 | ||
| @@ -189,8 +193,8 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) { | |||
| 189 | luma_buffer[dst + x] = luma_src[src + x]; | 193 | luma_buffer[dst + x] = luma_src[src + x]; |
| 190 | } | 194 | } |
| 191 | } | 195 | } |
| 192 | gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), | 196 | host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), |
| 193 | luma_buffer.size()); | 197 | luma_buffer.size()); |
| 194 | 198 | ||
| 195 | // Chroma | 199 | // Chroma |
| 196 | const std::size_t half_height = frame_height / 2; | 200 | const std::size_t half_height = frame_height / 2; |
| @@ -231,8 +235,10 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) { | |||
| 231 | ASSERT(false); | 235 | ASSERT(false); |
| 232 | break; | 236 | break; |
| 233 | } | 237 | } |
| 234 | gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(), | 238 | host1x.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(), |
| 235 | chroma_buffer.size()); | 239 | chroma_buffer.size()); |
| 236 | } | 240 | } |
| 237 | 241 | ||
| 242 | } // namespace Host1x | ||
| 243 | |||
| 238 | } // namespace Tegra | 244 | } // namespace Tegra |
diff --git a/src/video_core/command_classes/vic.h b/src/video_core/host1x/vic.h index 010daa6b6..2b78786e8 100644 --- a/src/video_core/command_classes/vic.h +++ b/src/video_core/host1x/vic.h | |||
| @@ -10,7 +10,10 @@ | |||
| 10 | struct SwsContext; | 10 | struct SwsContext; |
| 11 | 11 | ||
| 12 | namespace Tegra { | 12 | namespace Tegra { |
| 13 | class GPU; | 13 | |
| 14 | namespace Host1x { | ||
| 15 | |||
| 16 | class Host1x; | ||
| 14 | class Nvdec; | 17 | class Nvdec; |
| 15 | union VicConfig; | 18 | union VicConfig; |
| 16 | 19 | ||
| @@ -25,7 +28,7 @@ public: | |||
| 25 | SetOutputSurfaceChromaUnusedOffset = 0x1ca | 28 | SetOutputSurfaceChromaUnusedOffset = 0x1ca |
| 26 | }; | 29 | }; |
| 27 | 30 | ||
| 28 | explicit Vic(GPU& gpu, std::shared_ptr<Nvdec> nvdec_processor); | 31 | explicit Vic(Host1x& host1x, std::shared_ptr<Nvdec> nvdec_processor); |
| 29 | 32 | ||
| 30 | ~Vic(); | 33 | ~Vic(); |
| 31 | 34 | ||
| @@ -39,8 +42,8 @@ private: | |||
| 39 | 42 | ||
| 40 | void WriteYUVFrame(const AVFrame* frame, const VicConfig& config); | 43 | void WriteYUVFrame(const AVFrame* frame, const VicConfig& config); |
| 41 | 44 | ||
| 42 | GPU& gpu; | 45 | Host1x& host1x; |
| 43 | std::shared_ptr<Tegra::Nvdec> nvdec_processor; | 46 | std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor; |
| 44 | 47 | ||
| 45 | /// Avoid reallocation of the following buffers every frame, as their | 48 | /// Avoid reallocation of the following buffers every frame, as their |
| 46 | /// size does not change during a stream | 49 | /// size does not change during a stream |
| @@ -58,4 +61,6 @@ private: | |||
| 58 | s32 scaler_height{}; | 61 | s32 scaler_height{}; |
| 59 | }; | 62 | }; |
| 60 | 63 | ||
| 64 | } // namespace Host1x | ||
| 65 | |||
| 61 | } // namespace Tegra | 66 | } // namespace Tegra |
diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp index 43f8b5904..f61d5998e 100644 --- a/src/video_core/macro/macro.cpp +++ b/src/video_core/macro/macro.cpp | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | 8 | ||
| 9 | #include <boost/container_hash/hash.hpp> | 9 | #include <boost/container_hash/hash.hpp> |
| 10 | 10 | ||
| 11 | #include <fstream> | ||
| 11 | #include "common/assert.h" | 12 | #include "common/assert.h" |
| 12 | #include "common/fs/fs.h" | 13 | #include "common/fs/fs.h" |
| 13 | #include "common/fs/path_util.h" | 14 | #include "common/fs/path_util.h" |
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index bf9eb735d..cca401c74 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include "common/assert.h" | 7 | #include "common/assert.h" |
| 8 | #include "common/logging/log.h" | 8 | #include "common/logging/log.h" |
| 9 | #include "core/core.h" | 9 | #include "core/core.h" |
| 10 | #include "core/device_memory.h" | ||
| 10 | #include "core/hle/kernel/k_page_table.h" | 11 | #include "core/hle/kernel/k_page_table.h" |
| 11 | #include "core/hle/kernel/k_process.h" | 12 | #include "core/hle/kernel/k_process.h" |
| 12 | #include "core/memory.h" | 13 | #include "core/memory.h" |
| @@ -16,172 +17,198 @@ | |||
| 16 | 17 | ||
| 17 | namespace Tegra { | 18 | namespace Tegra { |
| 18 | 19 | ||
| 19 | MemoryManager::MemoryManager(Core::System& system_) | 20 | std::atomic<size_t> MemoryManager::unique_identifier_generator{}; |
| 20 | : system{system_}, page_table(page_table_size) {} | 21 | |
| 22 | MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_, | ||
| 23 | u64 page_bits_) | ||
| 24 | : system{system_}, memory{system.Memory()}, device_memory{system.DeviceMemory()}, | ||
| 25 | address_space_bits{address_space_bits_}, page_bits{page_bits_}, big_page_bits{big_page_bits_}, | ||
| 26 | entries{}, big_entries{}, page_table{address_space_bits, address_space_bits + page_bits - 38, | ||
| 27 | page_bits != big_page_bits ? page_bits : 0}, | ||
| 28 | unique_identifier{unique_identifier_generator.fetch_add(1, std::memory_order_acq_rel)} { | ||
| 29 | address_space_size = 1ULL << address_space_bits; | ||
| 30 | page_size = 1ULL << page_bits; | ||
| 31 | page_mask = page_size - 1ULL; | ||
| 32 | big_page_size = 1ULL << big_page_bits; | ||
| 33 | big_page_mask = big_page_size - 1ULL; | ||
| 34 | const u64 page_table_bits = address_space_bits - page_bits; | ||
| 35 | const u64 big_page_table_bits = address_space_bits - big_page_bits; | ||
| 36 | const u64 page_table_size = 1ULL << page_table_bits; | ||
| 37 | const u64 big_page_table_size = 1ULL << big_page_table_bits; | ||
| 38 | page_table_mask = page_table_size - 1; | ||
| 39 | big_page_table_mask = big_page_table_size - 1; | ||
| 40 | |||
| 41 | big_entries.resize(big_page_table_size / 32, 0); | ||
| 42 | big_page_table_cpu.resize(big_page_table_size); | ||
| 43 | big_page_continous.resize(big_page_table_size / continous_bits, 0); | ||
| 44 | entries.resize(page_table_size / 32, 0); | ||
| 45 | } | ||
| 21 | 46 | ||
| 22 | MemoryManager::~MemoryManager() = default; | 47 | MemoryManager::~MemoryManager() = default; |
| 23 | 48 | ||
| 24 | void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { | 49 | template <bool is_big_page> |
| 25 | rasterizer = rasterizer_; | 50 | MemoryManager::EntryType MemoryManager::GetEntry(size_t position) const { |
| 26 | } | 51 | if constexpr (is_big_page) { |
| 27 | 52 | position = position >> big_page_bits; | |
| 28 | GPUVAddr MemoryManager::UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { | 53 | const u64 entry_mask = big_entries[position / 32]; |
| 29 | u64 remaining_size{size}; | 54 | const size_t sub_index = position % 32; |
| 30 | for (u64 offset{}; offset < size; offset += page_size) { | 55 | return static_cast<EntryType>((entry_mask >> (2 * sub_index)) & 0x03ULL); |
| 31 | if (remaining_size < page_size) { | 56 | } else { |
| 32 | SetPageEntry(gpu_addr + offset, page_entry + offset, remaining_size); | 57 | position = position >> page_bits; |
| 33 | } else { | 58 | const u64 entry_mask = entries[position / 32]; |
| 34 | SetPageEntry(gpu_addr + offset, page_entry + offset); | 59 | const size_t sub_index = position % 32; |
| 35 | } | 60 | return static_cast<EntryType>((entry_mask >> (2 * sub_index)) & 0x03ULL); |
| 36 | remaining_size -= page_size; | ||
| 37 | } | 61 | } |
| 38 | return gpu_addr; | ||
| 39 | } | 62 | } |
| 40 | 63 | ||
| 41 | GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) { | 64 | template <bool is_big_page> |
| 42 | const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first); | 65 | void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) { |
| 43 | if (it != map_ranges.end() && it->first == gpu_addr) { | 66 | if constexpr (is_big_page) { |
| 44 | it->second = size; | 67 | position = position >> big_page_bits; |
| 68 | const u64 entry_mask = big_entries[position / 32]; | ||
| 69 | const size_t sub_index = position % 32; | ||
| 70 | big_entries[position / 32] = | ||
| 71 | (~(3ULL << sub_index * 2) & entry_mask) | (static_cast<u64>(entry) << sub_index * 2); | ||
| 45 | } else { | 72 | } else { |
| 46 | map_ranges.insert(it, MapRange{gpu_addr, size}); | 73 | position = position >> page_bits; |
| 74 | const u64 entry_mask = entries[position / 32]; | ||
| 75 | const size_t sub_index = position % 32; | ||
| 76 | entries[position / 32] = | ||
| 77 | (~(3ULL << sub_index * 2) & entry_mask) | (static_cast<u64>(entry) << sub_index * 2); | ||
| 47 | } | 78 | } |
| 48 | return UpdateRange(gpu_addr, cpu_addr, size); | ||
| 49 | } | 79 | } |
| 50 | 80 | ||
| 51 | GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) { | 81 | inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const { |
| 52 | return Map(cpu_addr, *FindFreeRange(size, align), size); | 82 | const u64 entry_mask = big_page_continous[big_page_index / continous_bits]; |
| 83 | const size_t sub_index = big_page_index % continous_bits; | ||
| 84 | return ((entry_mask >> sub_index) & 0x1ULL) != 0; | ||
| 53 | } | 85 | } |
| 54 | 86 | ||
| 55 | GPUVAddr MemoryManager::MapAllocate32(VAddr cpu_addr, std::size_t size) { | 87 | inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value) { |
| 56 | const std::optional<GPUVAddr> gpu_addr = FindFreeRange(size, 1, true); | 88 | const u64 continous_mask = big_page_continous[big_page_index / continous_bits]; |
| 57 | ASSERT(gpu_addr); | 89 | const size_t sub_index = big_page_index % continous_bits; |
| 58 | return Map(cpu_addr, *gpu_addr, size); | 90 | big_page_continous[big_page_index / continous_bits] = |
| 91 | (~(1ULL << sub_index) & continous_mask) | (value ? 1ULL << sub_index : 0); | ||
| 59 | } | 92 | } |
| 60 | 93 | ||
| 61 | void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { | 94 | template <MemoryManager::EntryType entry_type> |
| 62 | if (size == 0) { | 95 | GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, |
| 63 | return; | 96 | size_t size) { |
| 64 | } | 97 | u64 remaining_size{size}; |
| 65 | const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first); | 98 | if constexpr (entry_type == EntryType::Mapped) { |
| 66 | if (it != map_ranges.end()) { | 99 | page_table.ReserveRange(gpu_addr, size); |
| 67 | ASSERT(it->first == gpu_addr); | ||
| 68 | map_ranges.erase(it); | ||
| 69 | } else { | ||
| 70 | ASSERT_MSG(false, "Unmapping non-existent GPU address=0x{:x}", gpu_addr); | ||
| 71 | } | ||
| 72 | const auto submapped_ranges = GetSubmappedRange(gpu_addr, size); | ||
| 73 | |||
| 74 | for (const auto& [map_addr, map_size] : submapped_ranges) { | ||
| 75 | // Flush and invalidate through the GPU interface, to be asynchronous if possible. | ||
| 76 | const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map_addr); | ||
| 77 | ASSERT(cpu_addr); | ||
| 78 | |||
| 79 | rasterizer->UnmapMemory(*cpu_addr, map_size); | ||
| 80 | } | 100 | } |
| 81 | |||
| 82 | UpdateRange(gpu_addr, PageEntry::State::Unmapped, size); | ||
| 83 | } | ||
| 84 | |||
| 85 | std::optional<GPUVAddr> MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) { | ||
| 86 | for (u64 offset{}; offset < size; offset += page_size) { | 101 | for (u64 offset{}; offset < size; offset += page_size) { |
| 87 | if (!GetPageEntry(gpu_addr + offset).IsUnmapped()) { | 102 | const GPUVAddr current_gpu_addr = gpu_addr + offset; |
| 88 | return std::nullopt; | 103 | [[maybe_unused]] const auto current_entry_type = GetEntry<false>(current_gpu_addr); |
| 104 | SetEntry<false>(current_gpu_addr, entry_type); | ||
| 105 | if (current_entry_type != entry_type) { | ||
| 106 | rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size); | ||
| 107 | } | ||
| 108 | if constexpr (entry_type == EntryType::Mapped) { | ||
| 109 | const VAddr current_cpu_addr = cpu_addr + offset; | ||
| 110 | const auto index = PageEntryIndex<false>(current_gpu_addr); | ||
| 111 | const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits); | ||
| 112 | page_table[index] = sub_value; | ||
| 89 | } | 113 | } |
| 114 | remaining_size -= page_size; | ||
| 90 | } | 115 | } |
| 91 | 116 | return gpu_addr; | |
| 92 | return UpdateRange(gpu_addr, PageEntry::State::Allocated, size); | ||
| 93 | } | ||
| 94 | |||
| 95 | GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) { | ||
| 96 | return *AllocateFixed(*FindFreeRange(size, align), size); | ||
| 97 | } | 117 | } |
| 98 | 118 | ||
| 99 | void MemoryManager::TryLockPage(PageEntry page_entry, std::size_t size) { | 119 | template <MemoryManager::EntryType entry_type> |
| 100 | if (!page_entry.IsValid()) { | 120 | GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, |
| 101 | return; | 121 | size_t size) { |
| 122 | u64 remaining_size{size}; | ||
| 123 | for (u64 offset{}; offset < size; offset += big_page_size) { | ||
| 124 | const GPUVAddr current_gpu_addr = gpu_addr + offset; | ||
| 125 | [[maybe_unused]] const auto current_entry_type = GetEntry<true>(current_gpu_addr); | ||
| 126 | SetEntry<true>(current_gpu_addr, entry_type); | ||
| 127 | if (current_entry_type != entry_type) { | ||
| 128 | rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, big_page_size); | ||
| 129 | } | ||
| 130 | if constexpr (entry_type == EntryType::Mapped) { | ||
| 131 | const VAddr current_cpu_addr = cpu_addr + offset; | ||
| 132 | const auto index = PageEntryIndex<true>(current_gpu_addr); | ||
| 133 | const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits); | ||
| 134 | big_page_table_cpu[index] = sub_value; | ||
| 135 | const bool is_continous = ([&] { | ||
| 136 | uintptr_t base_ptr{ | ||
| 137 | reinterpret_cast<uintptr_t>(memory.GetPointerSilent(current_cpu_addr))}; | ||
| 138 | if (base_ptr == 0) { | ||
| 139 | return false; | ||
| 140 | } | ||
| 141 | for (VAddr start_cpu = current_cpu_addr + page_size; | ||
| 142 | start_cpu < current_cpu_addr + big_page_size; start_cpu += page_size) { | ||
| 143 | base_ptr += page_size; | ||
| 144 | auto next_ptr = reinterpret_cast<uintptr_t>(memory.GetPointerSilent(start_cpu)); | ||
| 145 | if (next_ptr == 0 || base_ptr != next_ptr) { | ||
| 146 | return false; | ||
| 147 | } | ||
| 148 | } | ||
| 149 | return true; | ||
| 150 | })(); | ||
| 151 | SetBigPageContinous(index, is_continous); | ||
| 152 | } | ||
| 153 | remaining_size -= big_page_size; | ||
| 102 | } | 154 | } |
| 103 | 155 | return gpu_addr; | |
| 104 | ASSERT(system.CurrentProcess() | ||
| 105 | ->PageTable() | ||
| 106 | .LockForDeviceAddressSpace(page_entry.ToAddress(), size) | ||
| 107 | .IsSuccess()); | ||
| 108 | } | 156 | } |
| 109 | 157 | ||
| 110 | void MemoryManager::TryUnlockPage(PageEntry page_entry, std::size_t size) { | 158 | void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { |
| 111 | if (!page_entry.IsValid()) { | 159 | rasterizer = rasterizer_; |
| 112 | return; | ||
| 113 | } | ||
| 114 | |||
| 115 | ASSERT(system.CurrentProcess() | ||
| 116 | ->PageTable() | ||
| 117 | .UnlockForDeviceAddressSpace(page_entry.ToAddress(), size) | ||
| 118 | .IsSuccess()); | ||
| 119 | } | 160 | } |
| 120 | 161 | ||
| 121 | PageEntry MemoryManager::GetPageEntry(GPUVAddr gpu_addr) const { | 162 | GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, |
| 122 | return page_table[PageEntryIndex(gpu_addr)]; | 163 | bool is_big_pages) { |
| 164 | if (is_big_pages) [[likely]] { | ||
| 165 | return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size); | ||
| 166 | } | ||
| 167 | return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size); | ||
| 123 | } | 168 | } |
| 124 | 169 | ||
| 125 | void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { | 170 | GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) { |
| 126 | // TODO(bunnei): We should lock/unlock device regions. This currently causes issues due to | 171 | if (is_big_pages) [[likely]] { |
| 127 | // improper tracking, but should be fixed in the future. | 172 | return BigPageTableOp<EntryType::Reserved>(gpu_addr, 0, size); |
| 128 | |||
| 129 | //// Unlock the old page | ||
| 130 | // TryUnlockPage(page_table[PageEntryIndex(gpu_addr)], size); | ||
| 131 | |||
| 132 | //// Lock the new page | ||
| 133 | // TryLockPage(page_entry, size); | ||
| 134 | auto& current_page = page_table[PageEntryIndex(gpu_addr)]; | ||
| 135 | |||
| 136 | if ((!current_page.IsValid() && page_entry.IsValid()) || | ||
| 137 | current_page.ToAddress() != page_entry.ToAddress()) { | ||
| 138 | rasterizer->ModifyGPUMemory(gpu_addr, size); | ||
| 139 | } | 173 | } |
| 140 | 174 | return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size); | |
| 141 | current_page = page_entry; | ||
| 142 | } | 175 | } |
| 143 | 176 | ||
| 144 | std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size_t align, | 177 | void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { |
| 145 | bool start_32bit_address) const { | 178 | if (size == 0) { |
| 146 | if (!align) { | 179 | return; |
| 147 | align = page_size; | ||
| 148 | } else { | ||
| 149 | align = Common::AlignUp(align, page_size); | ||
| 150 | } | 180 | } |
| 181 | const auto submapped_ranges = GetSubmappedRange(gpu_addr, size); | ||
| 151 | 182 | ||
| 152 | u64 available_size{}; | 183 | for (const auto& [map_addr, map_size] : submapped_ranges) { |
| 153 | GPUVAddr gpu_addr{start_32bit_address ? address_space_start_low : address_space_start}; | 184 | // Flush and invalidate through the GPU interface, to be asynchronous if possible. |
| 154 | while (gpu_addr + available_size < address_space_size) { | 185 | const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map_addr); |
| 155 | if (GetPageEntry(gpu_addr + available_size).IsUnmapped()) { | 186 | ASSERT(cpu_addr); |
| 156 | available_size += page_size; | ||
| 157 | |||
| 158 | if (available_size >= size) { | ||
| 159 | return gpu_addr; | ||
| 160 | } | ||
| 161 | } else { | ||
| 162 | gpu_addr += available_size + page_size; | ||
| 163 | available_size = 0; | ||
| 164 | 187 | ||
| 165 | const auto remainder{gpu_addr % align}; | 188 | rasterizer->UnmapMemory(*cpu_addr, map_size); |
| 166 | if (remainder) { | ||
| 167 | gpu_addr = (gpu_addr - remainder) + align; | ||
| 168 | } | ||
| 169 | } | ||
| 170 | } | 189 | } |
| 171 | 190 | ||
| 172 | return std::nullopt; | 191 | BigPageTableOp<EntryType::Free>(gpu_addr, 0, size); |
| 192 | PageTableOp<EntryType::Free>(gpu_addr, 0, size); | ||
| 173 | } | 193 | } |
| 174 | 194 | ||
| 175 | std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { | 195 | std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { |
| 176 | if (gpu_addr == 0) { | 196 | if (!IsWithinGPUAddressRange(gpu_addr)) [[unlikely]] { |
| 177 | return std::nullopt; | 197 | return std::nullopt; |
| 178 | } | 198 | } |
| 179 | const auto page_entry{GetPageEntry(gpu_addr)}; | 199 | if (GetEntry<true>(gpu_addr) != EntryType::Mapped) [[unlikely]] { |
| 180 | if (!page_entry.IsValid()) { | 200 | if (GetEntry<false>(gpu_addr) != EntryType::Mapped) { |
| 181 | return std::nullopt; | 201 | return std::nullopt; |
| 202 | } | ||
| 203 | |||
| 204 | const VAddr cpu_addr_base = static_cast<VAddr>(page_table[PageEntryIndex<false>(gpu_addr)]) | ||
| 205 | << cpu_page_bits; | ||
| 206 | return cpu_addr_base + (gpu_addr & page_mask); | ||
| 182 | } | 207 | } |
| 183 | 208 | ||
| 184 | return page_entry.ToAddress() + (gpu_addr & page_mask); | 209 | const VAddr cpu_addr_base = |
| 210 | static_cast<VAddr>(big_page_table_cpu[PageEntryIndex<true>(gpu_addr)]) << cpu_page_bits; | ||
| 211 | return cpu_addr_base + (gpu_addr & big_page_mask); | ||
| 185 | } | 212 | } |
| 186 | 213 | ||
| 187 | std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const { | 214 | std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const { |
| @@ -189,7 +216,7 @@ std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t s | |||
| 189 | const size_t page_last{(addr + size + page_size - 1) >> page_bits}; | 216 | const size_t page_last{(addr + size + page_size - 1) >> page_bits}; |
| 190 | while (page_index < page_last) { | 217 | while (page_index < page_last) { |
| 191 | const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; | 218 | const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; |
| 192 | if (page_addr && *page_addr != 0) { | 219 | if (page_addr) { |
| 193 | return page_addr; | 220 | return page_addr; |
| 194 | } | 221 | } |
| 195 | ++page_index; | 222 | ++page_index; |
| @@ -232,126 +259,298 @@ template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data); | |||
| 232 | template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data); | 259 | template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data); |
| 233 | 260 | ||
| 234 | u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) { | 261 | u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) { |
| 235 | if (!GetPageEntry(gpu_addr).IsValid()) { | ||
| 236 | return {}; | ||
| 237 | } | ||
| 238 | |||
| 239 | const auto address{GpuToCpuAddress(gpu_addr)}; | 262 | const auto address{GpuToCpuAddress(gpu_addr)}; |
| 240 | if (!address) { | 263 | if (!address) { |
| 241 | return {}; | 264 | return {}; |
| 242 | } | 265 | } |
| 243 | 266 | ||
| 244 | return system.Memory().GetPointer(*address); | 267 | return memory.GetPointer(*address); |
| 245 | } | 268 | } |
| 246 | 269 | ||
| 247 | const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { | 270 | const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { |
| 248 | if (!GetPageEntry(gpu_addr).IsValid()) { | ||
| 249 | return {}; | ||
| 250 | } | ||
| 251 | |||
| 252 | const auto address{GpuToCpuAddress(gpu_addr)}; | 271 | const auto address{GpuToCpuAddress(gpu_addr)}; |
| 253 | if (!address) { | 272 | if (!address) { |
| 254 | return {}; | 273 | return {}; |
| 255 | } | 274 | } |
| 256 | 275 | ||
| 257 | return system.Memory().GetPointer(*address); | 276 | return memory.GetPointer(*address); |
| 258 | } | 277 | } |
| 259 | 278 | ||
| 260 | size_t MemoryManager::BytesToMapEnd(GPUVAddr gpu_addr) const noexcept { | 279 | #ifdef _MSC_VER // no need for gcc / clang but msvc's compiler is more conservative with inlining. |
| 261 | auto it = std::ranges::upper_bound(map_ranges, gpu_addr, {}, &MapRange::first); | 280 | #pragma inline_recursion(on) |
| 262 | --it; | 281 | #endif |
| 263 | return it->second - (gpu_addr - it->first); | 282 | |
| 264 | } | 283 | template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped> |
| 265 | 284 | inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, | |
| 266 | void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, | 285 | FuncMapped&& func_mapped, FuncReserved&& func_reserved, |
| 267 | bool is_safe) const { | 286 | FuncUnmapped&& func_unmapped) const { |
| 287 | static constexpr bool BOOL_BREAK_MAPPED = std::is_same_v<FuncMapped, bool>; | ||
| 288 | static constexpr bool BOOL_BREAK_RESERVED = std::is_same_v<FuncReserved, bool>; | ||
| 289 | static constexpr bool BOOL_BREAK_UNMAPPED = std::is_same_v<FuncUnmapped, bool>; | ||
| 290 | u64 used_page_size; | ||
| 291 | u64 used_page_mask; | ||
| 292 | u64 used_page_bits; | ||
| 293 | if constexpr (is_big_pages) { | ||
| 294 | used_page_size = big_page_size; | ||
| 295 | used_page_mask = big_page_mask; | ||
| 296 | used_page_bits = big_page_bits; | ||
| 297 | } else { | ||
| 298 | used_page_size = page_size; | ||
| 299 | used_page_mask = page_mask; | ||
| 300 | used_page_bits = page_bits; | ||
| 301 | } | ||
| 268 | std::size_t remaining_size{size}; | 302 | std::size_t remaining_size{size}; |
| 269 | std::size_t page_index{gpu_src_addr >> page_bits}; | 303 | std::size_t page_index{gpu_src_addr >> used_page_bits}; |
| 270 | std::size_t page_offset{gpu_src_addr & page_mask}; | 304 | std::size_t page_offset{gpu_src_addr & used_page_mask}; |
| 305 | GPUVAddr current_address = gpu_src_addr; | ||
| 271 | 306 | ||
| 272 | while (remaining_size > 0) { | 307 | while (remaining_size > 0) { |
| 273 | const std::size_t copy_amount{ | 308 | const std::size_t copy_amount{ |
| 274 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; | 309 | std::min(static_cast<std::size_t>(used_page_size) - page_offset, remaining_size)}; |
| 275 | const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; | 310 | auto entry = GetEntry<is_big_pages>(current_address); |
| 276 | if (page_addr && *page_addr != 0) { | 311 | if (entry == EntryType::Mapped) [[likely]] { |
| 277 | const auto src_addr{*page_addr + page_offset}; | 312 | if constexpr (BOOL_BREAK_MAPPED) { |
| 278 | if (is_safe) { | 313 | if (func_mapped(page_index, page_offset, copy_amount)) { |
| 279 | // Flush must happen on the rasterizer interface, such that memory is always | 314 | return; |
| 280 | // synchronous when it is read (even when in asynchronous GPU mode). | 315 | } |
| 281 | // Fixes Dead Cells title menu. | 316 | } else { |
| 282 | rasterizer->FlushRegion(src_addr, copy_amount); | 317 | func_mapped(page_index, page_offset, copy_amount); |
| 283 | } | 318 | } |
| 284 | system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); | ||
| 285 | } else { | ||
| 286 | std::memset(dest_buffer, 0, copy_amount); | ||
| 287 | } | ||
| 288 | 319 | ||
| 320 | } else if (entry == EntryType::Reserved) { | ||
| 321 | if constexpr (BOOL_BREAK_RESERVED) { | ||
| 322 | if (func_reserved(page_index, page_offset, copy_amount)) { | ||
| 323 | return; | ||
| 324 | } | ||
| 325 | } else { | ||
| 326 | func_reserved(page_index, page_offset, copy_amount); | ||
| 327 | } | ||
| 328 | |||
| 329 | } else [[unlikely]] { | ||
| 330 | if constexpr (BOOL_BREAK_UNMAPPED) { | ||
| 331 | if (func_unmapped(page_index, page_offset, copy_amount)) { | ||
| 332 | return; | ||
| 333 | } | ||
| 334 | } else { | ||
| 335 | func_unmapped(page_index, page_offset, copy_amount); | ||
| 336 | } | ||
| 337 | } | ||
| 289 | page_index++; | 338 | page_index++; |
| 290 | page_offset = 0; | 339 | page_offset = 0; |
| 291 | dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; | ||
| 292 | remaining_size -= copy_amount; | 340 | remaining_size -= copy_amount; |
| 341 | current_address += copy_amount; | ||
| 293 | } | 342 | } |
| 294 | } | 343 | } |
| 295 | 344 | ||
| 345 | template <bool is_safe> | ||
| 346 | void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, | ||
| 347 | std::size_t size) const { | ||
| 348 | auto set_to_zero = [&]([[maybe_unused]] std::size_t page_index, | ||
| 349 | [[maybe_unused]] std::size_t offset, std::size_t copy_amount) { | ||
| 350 | std::memset(dest_buffer, 0, copy_amount); | ||
| 351 | dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; | ||
| 352 | }; | ||
| 353 | auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { | ||
| 354 | const VAddr cpu_addr_base = | ||
| 355 | (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; | ||
| 356 | if constexpr (is_safe) { | ||
| 357 | rasterizer->FlushRegion(cpu_addr_base, copy_amount); | ||
| 358 | } | ||
| 359 | u8* physical = memory.GetPointer(cpu_addr_base); | ||
| 360 | std::memcpy(dest_buffer, physical, copy_amount); | ||
| 361 | dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; | ||
| 362 | }; | ||
| 363 | auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { | ||
| 364 | const VAddr cpu_addr_base = | ||
| 365 | (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; | ||
| 366 | if constexpr (is_safe) { | ||
| 367 | rasterizer->FlushRegion(cpu_addr_base, copy_amount); | ||
| 368 | } | ||
| 369 | if (!IsBigPageContinous(page_index)) [[unlikely]] { | ||
| 370 | memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); | ||
| 371 | } else { | ||
| 372 | u8* physical = memory.GetPointer(cpu_addr_base); | ||
| 373 | std::memcpy(dest_buffer, physical, copy_amount); | ||
| 374 | } | ||
| 375 | dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; | ||
| 376 | }; | ||
| 377 | auto read_short_pages = [&](std::size_t page_index, std::size_t offset, | ||
| 378 | std::size_t copy_amount) { | ||
| 379 | GPUVAddr base = (page_index << big_page_bits) + offset; | ||
| 380 | MemoryOperation<false>(base, copy_amount, mapped_normal, set_to_zero, set_to_zero); | ||
| 381 | }; | ||
| 382 | MemoryOperation<true>(gpu_src_addr, size, mapped_big, set_to_zero, read_short_pages); | ||
| 383 | } | ||
| 384 | |||
| 296 | void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const { | 385 | void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const { |
| 297 | ReadBlockImpl(gpu_src_addr, dest_buffer, size, true); | 386 | ReadBlockImpl<true>(gpu_src_addr, dest_buffer, size); |
| 298 | } | 387 | } |
| 299 | 388 | ||
| 300 | void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, | 389 | void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, |
| 301 | const std::size_t size) const { | 390 | const std::size_t size) const { |
| 302 | ReadBlockImpl(gpu_src_addr, dest_buffer, size, false); | 391 | ReadBlockImpl<false>(gpu_src_addr, dest_buffer, size); |
| 303 | } | 392 | } |
| 304 | 393 | ||
| 305 | void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, | 394 | template <bool is_safe> |
| 306 | bool is_safe) { | 395 | void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, |
| 307 | std::size_t remaining_size{size}; | 396 | std::size_t size) { |
| 308 | std::size_t page_index{gpu_dest_addr >> page_bits}; | 397 | auto just_advance = [&]([[maybe_unused]] std::size_t page_index, |
| 309 | std::size_t page_offset{gpu_dest_addr & page_mask}; | 398 | [[maybe_unused]] std::size_t offset, std::size_t copy_amount) { |
| 310 | 399 | src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; | |
| 311 | while (remaining_size > 0) { | 400 | }; |
| 312 | const std::size_t copy_amount{ | 401 | auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { |
| 313 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; | 402 | const VAddr cpu_addr_base = |
| 314 | const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; | 403 | (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; |
| 315 | if (page_addr && *page_addr != 0) { | 404 | if constexpr (is_safe) { |
| 316 | const auto dest_addr{*page_addr + page_offset}; | 405 | rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); |
| 317 | |||
| 318 | if (is_safe) { | ||
| 319 | // Invalidate must happen on the rasterizer interface, such that memory is always | ||
| 320 | // synchronous when it is written (even when in asynchronous GPU mode). | ||
| 321 | rasterizer->InvalidateRegion(dest_addr, copy_amount); | ||
| 322 | } | ||
| 323 | system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); | ||
| 324 | } | 406 | } |
| 325 | 407 | u8* physical = memory.GetPointer(cpu_addr_base); | |
| 326 | page_index++; | 408 | std::memcpy(physical, src_buffer, copy_amount); |
| 327 | page_offset = 0; | ||
| 328 | src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; | 409 | src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; |
| 329 | remaining_size -= copy_amount; | 410 | }; |
| 330 | } | 411 | auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { |
| 412 | const VAddr cpu_addr_base = | ||
| 413 | (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; | ||
| 414 | if constexpr (is_safe) { | ||
| 415 | rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); | ||
| 416 | } | ||
| 417 | if (!IsBigPageContinous(page_index)) [[unlikely]] { | ||
| 418 | memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); | ||
| 419 | } else { | ||
| 420 | u8* physical = memory.GetPointer(cpu_addr_base); | ||
| 421 | std::memcpy(physical, src_buffer, copy_amount); | ||
| 422 | } | ||
| 423 | src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; | ||
| 424 | }; | ||
| 425 | auto write_short_pages = [&](std::size_t page_index, std::size_t offset, | ||
| 426 | std::size_t copy_amount) { | ||
| 427 | GPUVAddr base = (page_index << big_page_bits) + offset; | ||
| 428 | MemoryOperation<false>(base, copy_amount, mapped_normal, just_advance, just_advance); | ||
| 429 | }; | ||
| 430 | MemoryOperation<true>(gpu_dest_addr, size, mapped_big, just_advance, write_short_pages); | ||
| 331 | } | 431 | } |
| 332 | 432 | ||
| 333 | void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { | 433 | void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { |
| 334 | WriteBlockImpl(gpu_dest_addr, src_buffer, size, true); | 434 | WriteBlockImpl<true>(gpu_dest_addr, src_buffer, size); |
| 335 | } | 435 | } |
| 336 | 436 | ||
| 337 | void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, | 437 | void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, |
| 338 | std::size_t size) { | 438 | std::size_t size) { |
| 339 | WriteBlockImpl(gpu_dest_addr, src_buffer, size, false); | 439 | WriteBlockImpl<false>(gpu_dest_addr, src_buffer, size); |
| 340 | } | 440 | } |
| 341 | 441 | ||
| 342 | void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size) const { | 442 | void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size) const { |
| 343 | size_t remaining_size{size}; | 443 | auto do_nothing = [&]([[maybe_unused]] std::size_t page_index, |
| 344 | size_t page_index{gpu_addr >> page_bits}; | 444 | [[maybe_unused]] std::size_t offset, |
| 345 | size_t page_offset{gpu_addr & page_mask}; | 445 | [[maybe_unused]] std::size_t copy_amount) {}; |
| 346 | while (remaining_size > 0) { | 446 | |
| 347 | const size_t num_bytes{std::min(page_size - page_offset, remaining_size)}; | 447 | auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { |
| 348 | if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { | 448 | const VAddr cpu_addr_base = |
| 349 | rasterizer->FlushRegion(*page_addr + page_offset, num_bytes); | 449 | (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; |
| 450 | rasterizer->FlushRegion(cpu_addr_base, copy_amount); | ||
| 451 | }; | ||
| 452 | auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { | ||
| 453 | const VAddr cpu_addr_base = | ||
| 454 | (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; | ||
| 455 | rasterizer->FlushRegion(cpu_addr_base, copy_amount); | ||
| 456 | }; | ||
| 457 | auto flush_short_pages = [&](std::size_t page_index, std::size_t offset, | ||
| 458 | std::size_t copy_amount) { | ||
| 459 | GPUVAddr base = (page_index << big_page_bits) + offset; | ||
| 460 | MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing); | ||
| 461 | }; | ||
| 462 | MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, flush_short_pages); | ||
| 463 | } | ||
| 464 | |||
| 465 | bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size) const { | ||
| 466 | bool result = false; | ||
| 467 | auto do_nothing = [&]([[maybe_unused]] std::size_t page_index, | ||
| 468 | [[maybe_unused]] std::size_t offset, | ||
| 469 | [[maybe_unused]] std::size_t copy_amount) { return false; }; | ||
| 470 | |||
| 471 | auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { | ||
| 472 | const VAddr cpu_addr_base = | ||
| 473 | (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; | ||
| 474 | result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount); | ||
| 475 | return result; | ||
| 476 | }; | ||
| 477 | auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { | ||
| 478 | const VAddr cpu_addr_base = | ||
| 479 | (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; | ||
| 480 | result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount); | ||
| 481 | return result; | ||
| 482 | }; | ||
| 483 | auto check_short_pages = [&](std::size_t page_index, std::size_t offset, | ||
| 484 | std::size_t copy_amount) { | ||
| 485 | GPUVAddr base = (page_index << big_page_bits) + offset; | ||
| 486 | MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing); | ||
| 487 | return result; | ||
| 488 | }; | ||
| 489 | MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, check_short_pages); | ||
| 490 | return result; | ||
| 491 | } | ||
| 492 | |||
| 493 | size_t MemoryManager::MaxContinousRange(GPUVAddr gpu_addr, size_t size) const { | ||
| 494 | std::optional<VAddr> old_page_addr{}; | ||
| 495 | size_t range_so_far = 0; | ||
| 496 | bool result{false}; | ||
| 497 | auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, | ||
| 498 | std::size_t copy_amount) { | ||
| 499 | result = true; | ||
| 500 | return true; | ||
| 501 | }; | ||
| 502 | auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { | ||
| 503 | const VAddr cpu_addr_base = | ||
| 504 | (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; | ||
| 505 | if (old_page_addr && *old_page_addr != cpu_addr_base) { | ||
| 506 | result = true; | ||
| 507 | return true; | ||
| 350 | } | 508 | } |
| 351 | ++page_index; | 509 | range_so_far += copy_amount; |
| 352 | page_offset = 0; | 510 | old_page_addr = {cpu_addr_base + copy_amount}; |
| 353 | remaining_size -= num_bytes; | 511 | return false; |
| 354 | } | 512 | }; |
| 513 | auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { | ||
| 514 | const VAddr cpu_addr_base = | ||
| 515 | (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; | ||
| 516 | if (old_page_addr && *old_page_addr != cpu_addr_base) { | ||
| 517 | return true; | ||
| 518 | } | ||
| 519 | range_so_far += copy_amount; | ||
| 520 | old_page_addr = {cpu_addr_base + copy_amount}; | ||
| 521 | return false; | ||
| 522 | }; | ||
| 523 | auto check_short_pages = [&](std::size_t page_index, std::size_t offset, | ||
| 524 | std::size_t copy_amount) { | ||
| 525 | GPUVAddr base = (page_index << big_page_bits) + offset; | ||
| 526 | MemoryOperation<false>(base, copy_amount, short_check, fail, fail); | ||
| 527 | return result; | ||
| 528 | }; | ||
| 529 | MemoryOperation<true>(gpu_addr, size, big_check, fail, check_short_pages); | ||
| 530 | return range_so_far; | ||
| 531 | } | ||
| 532 | |||
| 533 | void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size) const { | ||
| 534 | auto do_nothing = [&]([[maybe_unused]] std::size_t page_index, | ||
| 535 | [[maybe_unused]] std::size_t offset, | ||
| 536 | [[maybe_unused]] std::size_t copy_amount) {}; | ||
| 537 | |||
| 538 | auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { | ||
| 539 | const VAddr cpu_addr_base = | ||
| 540 | (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; | ||
| 541 | rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); | ||
| 542 | }; | ||
| 543 | auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { | ||
| 544 | const VAddr cpu_addr_base = | ||
| 545 | (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; | ||
| 546 | rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); | ||
| 547 | }; | ||
| 548 | auto invalidate_short_pages = [&](std::size_t page_index, std::size_t offset, | ||
| 549 | std::size_t copy_amount) { | ||
| 550 | GPUVAddr base = (page_index << big_page_bits) + offset; | ||
| 551 | MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing); | ||
| 552 | }; | ||
| 553 | MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, invalidate_short_pages); | ||
| 355 | } | 554 | } |
| 356 | 555 | ||
| 357 | void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) { | 556 | void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) { |
| @@ -365,87 +564,134 @@ void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std | |||
| 365 | } | 564 | } |
| 366 | 565 | ||
| 367 | bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { | 566 | bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { |
| 368 | const auto cpu_addr{GpuToCpuAddress(gpu_addr)}; | 567 | if (GetEntry<true>(gpu_addr) == EntryType::Mapped) [[likely]] { |
| 369 | if (!cpu_addr) { | 568 | size_t page_index = gpu_addr >> big_page_bits; |
| 569 | if (IsBigPageContinous(page_index)) [[likely]] { | ||
| 570 | const std::size_t page{(page_index & big_page_mask) + size}; | ||
| 571 | return page <= big_page_size; | ||
| 572 | } | ||
| 573 | const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; | ||
| 574 | return page <= Core::Memory::YUZU_PAGESIZE; | ||
| 575 | } | ||
| 576 | if (GetEntry<false>(gpu_addr) != EntryType::Mapped) { | ||
| 370 | return false; | 577 | return false; |
| 371 | } | 578 | } |
| 372 | const std::size_t page{(*cpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; | 579 | const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; |
| 373 | return page <= Core::Memory::YUZU_PAGESIZE; | 580 | return page <= Core::Memory::YUZU_PAGESIZE; |
| 374 | } | 581 | } |
| 375 | 582 | ||
| 376 | bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { | 583 | bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { |
| 377 | size_t page_index{gpu_addr >> page_bits}; | ||
| 378 | const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; | ||
| 379 | std::optional<VAddr> old_page_addr{}; | 584 | std::optional<VAddr> old_page_addr{}; |
| 380 | while (page_index != page_last) { | 585 | bool result{true}; |
| 381 | const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; | 586 | auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, |
| 382 | if (!page_addr || *page_addr == 0) { | 587 | std::size_t copy_amount) { |
| 383 | return false; | 588 | result = false; |
| 589 | return true; | ||
| 590 | }; | ||
| 591 | auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { | ||
| 592 | const VAddr cpu_addr_base = | ||
| 593 | (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; | ||
| 594 | if (old_page_addr && *old_page_addr != cpu_addr_base) { | ||
| 595 | result = false; | ||
| 596 | return true; | ||
| 384 | } | 597 | } |
| 385 | if (old_page_addr) { | 598 | old_page_addr = {cpu_addr_base + copy_amount}; |
| 386 | if (*old_page_addr + page_size != *page_addr) { | 599 | return false; |
| 387 | return false; | 600 | }; |
| 388 | } | 601 | auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { |
| 602 | const VAddr cpu_addr_base = | ||
| 603 | (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; | ||
| 604 | if (old_page_addr && *old_page_addr != cpu_addr_base) { | ||
| 605 | result = false; | ||
| 606 | return true; | ||
| 389 | } | 607 | } |
| 390 | old_page_addr = page_addr; | 608 | old_page_addr = {cpu_addr_base + copy_amount}; |
| 391 | ++page_index; | 609 | return false; |
| 392 | } | 610 | }; |
| 393 | return true; | 611 | auto check_short_pages = [&](std::size_t page_index, std::size_t offset, |
| 612 | std::size_t copy_amount) { | ||
| 613 | GPUVAddr base = (page_index << big_page_bits) + offset; | ||
| 614 | MemoryOperation<false>(base, copy_amount, short_check, fail, fail); | ||
| 615 | return !result; | ||
| 616 | }; | ||
| 617 | MemoryOperation<true>(gpu_addr, size, big_check, fail, check_short_pages); | ||
| 618 | return result; | ||
| 394 | } | 619 | } |
| 395 | 620 | ||
| 396 | bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const { | 621 | bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const { |
| 397 | size_t page_index{gpu_addr >> page_bits}; | 622 | bool result{true}; |
| 398 | const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; | 623 | auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, |
| 399 | while (page_index < page_last) { | 624 | [[maybe_unused]] std::size_t copy_amount) { |
| 400 | if (!page_table[page_index].IsValid() || page_table[page_index].ToAddress() == 0) { | 625 | result = false; |
| 401 | return false; | 626 | return true; |
| 402 | } | 627 | }; |
| 403 | ++page_index; | 628 | auto pass = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, |
| 404 | } | 629 | [[maybe_unused]] std::size_t copy_amount) { return false; }; |
| 405 | return true; | 630 | auto check_short_pages = [&](std::size_t page_index, std::size_t offset, |
| 631 | std::size_t copy_amount) { | ||
| 632 | GPUVAddr base = (page_index << big_page_bits) + offset; | ||
| 633 | MemoryOperation<false>(base, copy_amount, pass, pass, fail); | ||
| 634 | return !result; | ||
| 635 | }; | ||
| 636 | MemoryOperation<true>(gpu_addr, size, pass, fail, check_short_pages); | ||
| 637 | return result; | ||
| 406 | } | 638 | } |
| 407 | 639 | ||
| 408 | std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange( | 640 | std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange( |
| 409 | GPUVAddr gpu_addr, std::size_t size) const { | 641 | GPUVAddr gpu_addr, std::size_t size) const { |
| 410 | std::vector<std::pair<GPUVAddr, std::size_t>> result{}; | 642 | std::vector<std::pair<GPUVAddr, std::size_t>> result{}; |
| 411 | size_t page_index{gpu_addr >> page_bits}; | ||
| 412 | size_t remaining_size{size}; | ||
| 413 | size_t page_offset{gpu_addr & page_mask}; | ||
| 414 | std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{}; | 643 | std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{}; |
| 415 | std::optional<VAddr> old_page_addr{}; | 644 | std::optional<VAddr> old_page_addr{}; |
| 416 | const auto extend_size = [&last_segment, &page_index, &page_offset](std::size_t bytes) { | 645 | const auto split = [&last_segment, &result]([[maybe_unused]] std::size_t page_index, |
| 417 | if (!last_segment) { | 646 | [[maybe_unused]] std::size_t offset, |
| 418 | const GPUVAddr new_base_addr = (page_index << page_bits) + page_offset; | 647 | [[maybe_unused]] std::size_t copy_amount) { |
| 419 | last_segment = {new_base_addr, bytes}; | ||
| 420 | } else { | ||
| 421 | last_segment->second += bytes; | ||
| 422 | } | ||
| 423 | }; | ||
| 424 | const auto split = [&last_segment, &result] { | ||
| 425 | if (last_segment) { | 648 | if (last_segment) { |
| 426 | result.push_back(*last_segment); | 649 | result.push_back(*last_segment); |
| 427 | last_segment = std::nullopt; | 650 | last_segment = std::nullopt; |
| 428 | } | 651 | } |
| 429 | }; | 652 | }; |
| 430 | while (remaining_size > 0) { | 653 | const auto extend_size_big = [this, &split, &old_page_addr, |
| 431 | const size_t num_bytes{std::min(page_size - page_offset, remaining_size)}; | 654 | &last_segment](std::size_t page_index, std::size_t offset, |
| 432 | const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; | 655 | std::size_t copy_amount) { |
| 433 | if (!page_addr || *page_addr == 0) { | 656 | const VAddr cpu_addr_base = |
| 434 | split(); | 657 | (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; |
| 435 | } else if (old_page_addr) { | 658 | if (old_page_addr) { |
| 436 | if (*old_page_addr + page_size != *page_addr) { | 659 | if (*old_page_addr != cpu_addr_base) { |
| 437 | split(); | 660 | split(0, 0, 0); |
| 661 | } | ||
| 662 | } | ||
| 663 | old_page_addr = {cpu_addr_base + copy_amount}; | ||
| 664 | if (!last_segment) { | ||
| 665 | const GPUVAddr new_base_addr = (page_index << big_page_bits) + offset; | ||
| 666 | last_segment = {new_base_addr, copy_amount}; | ||
| 667 | } else { | ||
| 668 | last_segment->second += copy_amount; | ||
| 669 | } | ||
| 670 | }; | ||
| 671 | const auto extend_size_short = [this, &split, &old_page_addr, | ||
| 672 | &last_segment](std::size_t page_index, std::size_t offset, | ||
| 673 | std::size_t copy_amount) { | ||
| 674 | const VAddr cpu_addr_base = | ||
| 675 | (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset; | ||
| 676 | if (old_page_addr) { | ||
| 677 | if (*old_page_addr != cpu_addr_base) { | ||
| 678 | split(0, 0, 0); | ||
| 438 | } | 679 | } |
| 439 | extend_size(num_bytes); | 680 | } |
| 681 | old_page_addr = {cpu_addr_base + copy_amount}; | ||
| 682 | if (!last_segment) { | ||
| 683 | const GPUVAddr new_base_addr = (page_index << page_bits) + offset; | ||
| 684 | last_segment = {new_base_addr, copy_amount}; | ||
| 440 | } else { | 685 | } else { |
| 441 | extend_size(num_bytes); | 686 | last_segment->second += copy_amount; |
| 442 | } | 687 | } |
| 443 | ++page_index; | 688 | }; |
| 444 | page_offset = 0; | 689 | auto do_short_pages = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { |
| 445 | remaining_size -= num_bytes; | 690 | GPUVAddr base = (page_index << big_page_bits) + offset; |
| 446 | old_page_addr = page_addr; | 691 | MemoryOperation<false>(base, copy_amount, extend_size_short, split, split); |
| 447 | } | 692 | }; |
| 448 | split(); | 693 | MemoryOperation<true>(gpu_addr, size, extend_size_big, split, do_short_pages); |
| 694 | split(0, 0, 0); | ||
| 449 | return result; | 695 | return result; |
| 450 | } | 696 | } |
| 451 | 697 | ||
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 74f9ce175..f992e29f3 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h | |||
| @@ -3,73 +3,39 @@ | |||
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <atomic> | ||
| 6 | #include <map> | 7 | #include <map> |
| 7 | #include <optional> | 8 | #include <optional> |
| 8 | #include <vector> | 9 | #include <vector> |
| 9 | 10 | ||
| 10 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "common/multi_level_page_table.h" | ||
| 13 | #include "common/virtual_buffer.h" | ||
| 11 | 14 | ||
| 12 | namespace VideoCore { | 15 | namespace VideoCore { |
| 13 | class RasterizerInterface; | 16 | class RasterizerInterface; |
| 14 | } | 17 | } |
| 15 | 18 | ||
| 16 | namespace Core { | 19 | namespace Core { |
| 20 | class DeviceMemory; | ||
| 21 | namespace Memory { | ||
| 22 | class Memory; | ||
| 23 | } // namespace Memory | ||
| 17 | class System; | 24 | class System; |
| 18 | } | 25 | } // namespace Core |
| 19 | 26 | ||
| 20 | namespace Tegra { | 27 | namespace Tegra { |
| 21 | 28 | ||
| 22 | class PageEntry final { | ||
| 23 | public: | ||
| 24 | enum class State : u32 { | ||
| 25 | Unmapped = static_cast<u32>(-1), | ||
| 26 | Allocated = static_cast<u32>(-2), | ||
| 27 | }; | ||
| 28 | |||
| 29 | constexpr PageEntry() = default; | ||
| 30 | constexpr PageEntry(State state_) : state{state_} {} | ||
| 31 | constexpr PageEntry(VAddr addr) : state{static_cast<State>(addr >> ShiftBits)} {} | ||
| 32 | |||
| 33 | [[nodiscard]] constexpr bool IsUnmapped() const { | ||
| 34 | return state == State::Unmapped; | ||
| 35 | } | ||
| 36 | |||
| 37 | [[nodiscard]] constexpr bool IsAllocated() const { | ||
| 38 | return state == State::Allocated; | ||
| 39 | } | ||
| 40 | |||
| 41 | [[nodiscard]] constexpr bool IsValid() const { | ||
| 42 | return !IsUnmapped() && !IsAllocated(); | ||
| 43 | } | ||
| 44 | |||
| 45 | [[nodiscard]] constexpr VAddr ToAddress() const { | ||
| 46 | if (!IsValid()) { | ||
| 47 | return {}; | ||
| 48 | } | ||
| 49 | |||
| 50 | return static_cast<VAddr>(state) << ShiftBits; | ||
| 51 | } | ||
| 52 | |||
| 53 | [[nodiscard]] constexpr PageEntry operator+(u64 offset) const { | ||
| 54 | // If this is a reserved value, offsets do not apply | ||
| 55 | if (!IsValid()) { | ||
| 56 | return *this; | ||
| 57 | } | ||
| 58 | return PageEntry{(static_cast<VAddr>(state) << ShiftBits) + offset}; | ||
| 59 | } | ||
| 60 | |||
| 61 | private: | ||
| 62 | static constexpr std::size_t ShiftBits{12}; | ||
| 63 | |||
| 64 | State state{State::Unmapped}; | ||
| 65 | }; | ||
| 66 | static_assert(sizeof(PageEntry) == 4, "PageEntry is too large"); | ||
| 67 | |||
| 68 | class MemoryManager final { | 29 | class MemoryManager final { |
| 69 | public: | 30 | public: |
| 70 | explicit MemoryManager(Core::System& system_); | 31 | explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40, |
| 32 | u64 big_page_bits_ = 16, u64 page_bits_ = 12); | ||
| 71 | ~MemoryManager(); | 33 | ~MemoryManager(); |
| 72 | 34 | ||
| 35 | size_t GetID() const { | ||
| 36 | return unique_identifier; | ||
| 37 | } | ||
| 38 | |||
| 73 | /// Binds a renderer to the memory manager. | 39 | /// Binds a renderer to the memory manager. |
| 74 | void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); | 40 | void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); |
| 75 | 41 | ||
| @@ -86,9 +52,6 @@ public: | |||
| 86 | [[nodiscard]] u8* GetPointer(GPUVAddr addr); | 52 | [[nodiscard]] u8* GetPointer(GPUVAddr addr); |
| 87 | [[nodiscard]] const u8* GetPointer(GPUVAddr addr) const; | 53 | [[nodiscard]] const u8* GetPointer(GPUVAddr addr) const; |
| 88 | 54 | ||
| 89 | /// Returns the number of bytes until the end of the memory map containing the given GPU address | ||
| 90 | [[nodiscard]] size_t BytesToMapEnd(GPUVAddr gpu_addr) const noexcept; | ||
| 91 | |||
| 92 | /** | 55 | /** |
| 93 | * ReadBlock and WriteBlock are full read and write operations over virtual | 56 | * ReadBlock and WriteBlock are full read and write operations over virtual |
| 94 | * GPU Memory. It's important to use these when GPU memory may not be continuous | 57 | * GPU Memory. It's important to use these when GPU memory may not be continuous |
| @@ -135,54 +98,95 @@ public: | |||
| 135 | std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr, | 98 | std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr, |
| 136 | std::size_t size) const; | 99 | std::size_t size) const; |
| 137 | 100 | ||
| 138 | [[nodiscard]] GPUVAddr Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size); | 101 | GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, bool is_big_pages = true); |
| 139 | [[nodiscard]] GPUVAddr MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align); | 102 | GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true); |
| 140 | [[nodiscard]] GPUVAddr MapAllocate32(VAddr cpu_addr, std::size_t size); | ||
| 141 | [[nodiscard]] std::optional<GPUVAddr> AllocateFixed(GPUVAddr gpu_addr, std::size_t size); | ||
| 142 | [[nodiscard]] GPUVAddr Allocate(std::size_t size, std::size_t align); | ||
| 143 | void Unmap(GPUVAddr gpu_addr, std::size_t size); | 103 | void Unmap(GPUVAddr gpu_addr, std::size_t size); |
| 144 | 104 | ||
| 145 | void FlushRegion(GPUVAddr gpu_addr, size_t size) const; | 105 | void FlushRegion(GPUVAddr gpu_addr, size_t size) const; |
| 146 | 106 | ||
| 107 | void InvalidateRegion(GPUVAddr gpu_addr, size_t size) const; | ||
| 108 | |||
| 109 | bool IsMemoryDirty(GPUVAddr gpu_addr, size_t size) const; | ||
| 110 | |||
| 111 | size_t MaxContinousRange(GPUVAddr gpu_addr, size_t size) const; | ||
| 112 | |||
| 113 | bool IsWithinGPUAddressRange(GPUVAddr gpu_addr) const { | ||
| 114 | return gpu_addr < address_space_size; | ||
| 115 | } | ||
| 116 | |||
| 147 | private: | 117 | private: |
| 148 | [[nodiscard]] PageEntry GetPageEntry(GPUVAddr gpu_addr) const; | 118 | template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped> |
| 149 | void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size); | 119 | inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped, |
| 150 | GPUVAddr UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size); | 120 | FuncReserved&& func_reserved, FuncUnmapped&& func_unmapped) const; |
| 151 | [[nodiscard]] std::optional<GPUVAddr> FindFreeRange(std::size_t size, std::size_t align, | 121 | |
| 152 | bool start_32bit_address = false) const; | 122 | template <bool is_safe> |
| 153 | 123 | void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const; | |
| 154 | void TryLockPage(PageEntry page_entry, std::size_t size); | 124 | |
| 155 | void TryUnlockPage(PageEntry page_entry, std::size_t size); | 125 | template <bool is_safe> |
| 156 | 126 | void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size); | |
| 157 | void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, | 127 | |
| 158 | bool is_safe) const; | 128 | template <bool is_big_page> |
| 159 | void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, | 129 | [[nodiscard]] std::size_t PageEntryIndex(GPUVAddr gpu_addr) const { |
| 160 | bool is_safe); | 130 | if constexpr (is_big_page) { |
| 161 | 131 | return (gpu_addr >> big_page_bits) & big_page_table_mask; | |
| 162 | [[nodiscard]] static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) { | 132 | } else { |
| 163 | return (gpu_addr >> page_bits) & page_table_mask; | 133 | return (gpu_addr >> page_bits) & page_table_mask; |
| 134 | } | ||
| 164 | } | 135 | } |
| 165 | 136 | ||
| 166 | static constexpr u64 address_space_size = 1ULL << 40; | 137 | inline bool IsBigPageContinous(size_t big_page_index) const; |
| 167 | static constexpr u64 address_space_start = 1ULL << 32; | 138 | inline void SetBigPageContinous(size_t big_page_index, bool value); |
| 168 | static constexpr u64 address_space_start_low = 1ULL << 16; | ||
| 169 | static constexpr u64 page_bits{16}; | ||
| 170 | static constexpr u64 page_size{1 << page_bits}; | ||
| 171 | static constexpr u64 page_mask{page_size - 1}; | ||
| 172 | static constexpr u64 page_table_bits{24}; | ||
| 173 | static constexpr u64 page_table_size{1 << page_table_bits}; | ||
| 174 | static constexpr u64 page_table_mask{page_table_size - 1}; | ||
| 175 | 139 | ||
| 176 | Core::System& system; | 140 | Core::System& system; |
| 141 | Core::Memory::Memory& memory; | ||
| 142 | Core::DeviceMemory& device_memory; | ||
| 143 | |||
| 144 | const u64 address_space_bits; | ||
| 145 | const u64 page_bits; | ||
| 146 | u64 address_space_size; | ||
| 147 | u64 page_size; | ||
| 148 | u64 page_mask; | ||
| 149 | u64 page_table_mask; | ||
| 150 | static constexpr u64 cpu_page_bits{12}; | ||
| 151 | |||
| 152 | const u64 big_page_bits; | ||
| 153 | u64 big_page_size; | ||
| 154 | u64 big_page_mask; | ||
| 155 | u64 big_page_table_mask; | ||
| 177 | 156 | ||
| 178 | VideoCore::RasterizerInterface* rasterizer = nullptr; | 157 | VideoCore::RasterizerInterface* rasterizer = nullptr; |
| 179 | 158 | ||
| 180 | std::vector<PageEntry> page_table; | 159 | enum class EntryType : u64 { |
| 160 | Free = 0, | ||
| 161 | Reserved = 1, | ||
| 162 | Mapped = 2, | ||
| 163 | }; | ||
| 164 | |||
| 165 | std::vector<u64> entries; | ||
| 166 | std::vector<u64> big_entries; | ||
| 167 | |||
| 168 | template <EntryType entry_type> | ||
| 169 | GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size); | ||
| 170 | |||
| 171 | template <EntryType entry_type> | ||
| 172 | GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size); | ||
| 173 | |||
| 174 | template <bool is_big_page> | ||
| 175 | inline EntryType GetEntry(size_t position) const; | ||
| 176 | |||
| 177 | template <bool is_big_page> | ||
| 178 | inline void SetEntry(size_t position, EntryType entry); | ||
| 179 | |||
| 180 | Common::MultiLevelPageTable<u32> page_table; | ||
| 181 | Common::VirtualBuffer<u32> big_page_table_cpu; | ||
| 182 | |||
| 183 | std::vector<u64> big_page_continous; | ||
| 184 | |||
| 185 | constexpr static size_t continous_bits = 64; | ||
| 181 | 186 | ||
| 182 | using MapRange = std::pair<GPUVAddr, size_t>; | 187 | const size_t unique_identifier; |
| 183 | std::vector<MapRange> map_ranges; | ||
| 184 | 188 | ||
| 185 | std::vector<std::pair<VAddr, std::size_t>> cache_invalidate_queue; | 189 | static std::atomic<size_t> unique_identifier_generator; |
| 186 | }; | 190 | }; |
| 187 | 191 | ||
| 188 | } // namespace Tegra | 192 | } // namespace Tegra |
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h index 889b606b3..b0ebe71b7 100644 --- a/src/video_core/query_cache.h +++ b/src/video_core/query_cache.h | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | #include "common/assert.h" | 18 | #include "common/assert.h" |
| 19 | #include "common/settings.h" | 19 | #include "common/settings.h" |
| 20 | #include "video_core/control/channel_state_cache.h" | ||
| 20 | #include "video_core/engines/maxwell_3d.h" | 21 | #include "video_core/engines/maxwell_3d.h" |
| 21 | #include "video_core/memory_manager.h" | 22 | #include "video_core/memory_manager.h" |
| 22 | #include "video_core/rasterizer_interface.h" | 23 | #include "video_core/rasterizer_interface.h" |
| @@ -90,13 +91,10 @@ private: | |||
| 90 | }; | 91 | }; |
| 91 | 92 | ||
| 92 | template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter> | 93 | template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter> |
| 93 | class QueryCacheBase { | 94 | class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { |
| 94 | public: | 95 | public: |
| 95 | explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_, | 96 | explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_) |
| 96 | Tegra::Engines::Maxwell3D& maxwell3d_, | 97 | : rasterizer{rasterizer_}, streams{{CounterStream{static_cast<QueryCache&>(*this), |
| 97 | Tegra::MemoryManager& gpu_memory_) | ||
| 98 | : rasterizer{rasterizer_}, maxwell3d{maxwell3d_}, | ||
| 99 | gpu_memory{gpu_memory_}, streams{{CounterStream{static_cast<QueryCache&>(*this), | ||
| 100 | VideoCore::QueryType::SamplesPassed}}} {} | 98 | VideoCore::QueryType::SamplesPassed}}} {} |
| 101 | 99 | ||
| 102 | void InvalidateRegion(VAddr addr, std::size_t size) { | 100 | void InvalidateRegion(VAddr addr, std::size_t size) { |
| @@ -117,13 +115,13 @@ public: | |||
| 117 | */ | 115 | */ |
| 118 | void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) { | 116 | void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) { |
| 119 | std::unique_lock lock{mutex}; | 117 | std::unique_lock lock{mutex}; |
| 120 | const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); | 118 | const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); |
| 121 | ASSERT(cpu_addr); | 119 | ASSERT(cpu_addr); |
| 122 | 120 | ||
| 123 | CachedQuery* query = TryGet(*cpu_addr); | 121 | CachedQuery* query = TryGet(*cpu_addr); |
| 124 | if (!query) { | 122 | if (!query) { |
| 125 | ASSERT_OR_EXECUTE(cpu_addr, return;); | 123 | ASSERT_OR_EXECUTE(cpu_addr, return;); |
| 126 | u8* const host_ptr = gpu_memory.GetPointer(gpu_addr); | 124 | u8* const host_ptr = gpu_memory->GetPointer(gpu_addr); |
| 127 | 125 | ||
| 128 | query = Register(type, *cpu_addr, host_ptr, timestamp.has_value()); | 126 | query = Register(type, *cpu_addr, host_ptr, timestamp.has_value()); |
| 129 | } | 127 | } |
| @@ -137,8 +135,10 @@ public: | |||
| 137 | /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch. | 135 | /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch. |
| 138 | void UpdateCounters() { | 136 | void UpdateCounters() { |
| 139 | std::unique_lock lock{mutex}; | 137 | std::unique_lock lock{mutex}; |
| 140 | const auto& regs = maxwell3d.regs; | 138 | if (maxwell3d) { |
| 141 | Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable); | 139 | const auto& regs = maxwell3d->regs; |
| 140 | Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable); | ||
| 141 | } | ||
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | /// Resets a counter to zero. It doesn't disable the query after resetting. | 144 | /// Resets a counter to zero. It doesn't disable the query after resetting. |
| @@ -264,8 +264,6 @@ private: | |||
| 264 | static constexpr unsigned YUZU_PAGEBITS = 12; | 264 | static constexpr unsigned YUZU_PAGEBITS = 12; |
| 265 | 265 | ||
| 266 | VideoCore::RasterizerInterface& rasterizer; | 266 | VideoCore::RasterizerInterface& rasterizer; |
| 267 | Tegra::Engines::Maxwell3D& maxwell3d; | ||
| 268 | Tegra::MemoryManager& gpu_memory; | ||
| 269 | 267 | ||
| 270 | std::recursive_mutex mutex; | 268 | std::recursive_mutex mutex; |
| 271 | 269 | ||
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index a04a76481..d2d40884c 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h | |||
| @@ -16,6 +16,9 @@ class MemoryManager; | |||
| 16 | namespace Engines { | 16 | namespace Engines { |
| 17 | class AccelerateDMAInterface; | 17 | class AccelerateDMAInterface; |
| 18 | } | 18 | } |
| 19 | namespace Control { | ||
| 20 | struct ChannelState; | ||
| 21 | } | ||
| 19 | } // namespace Tegra | 22 | } // namespace Tegra |
| 20 | 23 | ||
| 21 | namespace VideoCore { | 24 | namespace VideoCore { |
| @@ -59,7 +62,10 @@ public: | |||
| 59 | virtual void DisableGraphicsUniformBuffer(size_t stage, u32 index) = 0; | 62 | virtual void DisableGraphicsUniformBuffer(size_t stage, u32 index) = 0; |
| 60 | 63 | ||
| 61 | /// Signal a GPU based semaphore as a fence | 64 | /// Signal a GPU based semaphore as a fence |
| 62 | virtual void SignalSemaphore(GPUVAddr addr, u32 value) = 0; | 65 | virtual void SignalFence(std::function<void()>&& func) = 0; |
| 66 | |||
| 67 | /// Send an operation to be done after a certain amount of flushes. | ||
| 68 | virtual void SyncOperation(std::function<void()>&& func) = 0; | ||
| 63 | 69 | ||
| 64 | /// Signal a GPU based syncpoint as a fence | 70 | /// Signal a GPU based syncpoint as a fence |
| 65 | virtual void SignalSyncPoint(u32 value) = 0; | 71 | virtual void SignalSyncPoint(u32 value) = 0; |
| @@ -86,13 +92,13 @@ public: | |||
| 86 | virtual void OnCPUWrite(VAddr addr, u64 size) = 0; | 92 | virtual void OnCPUWrite(VAddr addr, u64 size) = 0; |
| 87 | 93 | ||
| 88 | /// Sync memory between guest and host. | 94 | /// Sync memory between guest and host. |
| 89 | virtual void SyncGuestHost() = 0; | 95 | virtual void InvalidateGPUCache() = 0; |
| 90 | 96 | ||
| 91 | /// Unmap memory range | 97 | /// Unmap memory range |
| 92 | virtual void UnmapMemory(VAddr addr, u64 size) = 0; | 98 | virtual void UnmapMemory(VAddr addr, u64 size) = 0; |
| 93 | 99 | ||
| 94 | /// Remap GPU memory range. This means underneath backing memory changed | 100 | /// Remap GPU memory range. This means underneath backing memory changed |
| 95 | virtual void ModifyGPUMemory(GPUVAddr addr, u64 size) = 0; | 101 | virtual void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) = 0; |
| 96 | 102 | ||
| 97 | /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory | 103 | /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory |
| 98 | /// and invalidated | 104 | /// and invalidated |
| @@ -123,7 +129,7 @@ public: | |||
| 123 | [[nodiscard]] virtual Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() = 0; | 129 | [[nodiscard]] virtual Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() = 0; |
| 124 | 130 | ||
| 125 | virtual void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, | 131 | virtual void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, |
| 126 | std::span<u8> memory) = 0; | 132 | std::span<const u8> memory) = 0; |
| 127 | 133 | ||
| 128 | /// Attempt to use a faster method to display the framebuffer to screen | 134 | /// Attempt to use a faster method to display the framebuffer to screen |
| 129 | [[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config, | 135 | [[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config, |
| @@ -137,5 +143,11 @@ public: | |||
| 137 | /// Initialize disk cached resources for the game being emulated | 143 | /// Initialize disk cached resources for the game being emulated |
| 138 | virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading, | 144 | virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading, |
| 139 | const DiskResourceLoadCallback& callback) {} | 145 | const DiskResourceLoadCallback& callback) {} |
| 146 | |||
| 147 | virtual void InitializeChannel(Tegra::Control::ChannelState& channel) {} | ||
| 148 | |||
| 149 | virtual void BindChannel(Tegra::Control::ChannelState& channel) {} | ||
| 150 | |||
| 151 | virtual void ReleaseChannel(s32 channel_id) {} | ||
| 140 | }; | 152 | }; |
| 141 | } // namespace VideoCore | 153 | } // namespace VideoCore |
diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp index 1f0f156ed..26d066004 100644 --- a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp +++ b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp | |||
| @@ -28,12 +28,11 @@ bool ComputePipelineKey::operator==(const ComputePipelineKey& rhs) const noexcep | |||
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | ComputePipeline::ComputePipeline(const Device& device, TextureCache& texture_cache_, | 30 | ComputePipeline::ComputePipeline(const Device& device, TextureCache& texture_cache_, |
| 31 | BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, | 31 | BufferCache& buffer_cache_, ProgramManager& program_manager_, |
| 32 | Tegra::Engines::KeplerCompute& kepler_compute_, | 32 | const Shader::Info& info_, std::string code, |
| 33 | ProgramManager& program_manager_, const Shader::Info& info_, | 33 | std::vector<u32> code_v) |
| 34 | std::string code, std::vector<u32> code_v) | 34 | : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, |
| 35 | : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, gpu_memory{gpu_memory_}, | 35 | program_manager{program_manager_}, info{info_} { |
| 36 | kepler_compute{kepler_compute_}, program_manager{program_manager_}, info{info_} { | ||
| 37 | switch (device.GetShaderBackend()) { | 36 | switch (device.GetShaderBackend()) { |
| 38 | case Settings::ShaderBackend::GLSL: | 37 | case Settings::ShaderBackend::GLSL: |
| 39 | source_program = CreateProgram(code, GL_COMPUTE_SHADER); | 38 | source_program = CreateProgram(code, GL_COMPUTE_SHADER); |
| @@ -86,7 +85,7 @@ void ComputePipeline::Configure() { | |||
| 86 | GLsizei texture_binding{}; | 85 | GLsizei texture_binding{}; |
| 87 | GLsizei image_binding{}; | 86 | GLsizei image_binding{}; |
| 88 | 87 | ||
| 89 | const auto& qmd{kepler_compute.launch_description}; | 88 | const auto& qmd{kepler_compute->launch_description}; |
| 90 | const auto& cbufs{qmd.const_buffer_config}; | 89 | const auto& cbufs{qmd.const_buffer_config}; |
| 91 | const bool via_header_index{qmd.linked_tsc != 0}; | 90 | const bool via_header_index{qmd.linked_tsc != 0}; |
| 92 | const auto read_handle{[&](const auto& desc, u32 index) { | 91 | const auto read_handle{[&](const auto& desc, u32 index) { |
| @@ -101,12 +100,13 @@ void ComputePipeline::Configure() { | |||
| 101 | const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset}; | 100 | const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset}; |
| 102 | const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + | 101 | const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + |
| 103 | secondary_offset}; | 102 | secondary_offset}; |
| 104 | const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; | 103 | const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left}; |
| 105 | const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; | 104 | const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr) |
| 105 | << desc.secondary_shift_left}; | ||
| 106 | return TexturePair(lhs_raw | rhs_raw, via_header_index); | 106 | return TexturePair(lhs_raw | rhs_raw, via_header_index); |
| 107 | } | 107 | } |
| 108 | } | 108 | } |
| 109 | return TexturePair(gpu_memory.Read<u32>(addr), via_header_index); | 109 | return TexturePair(gpu_memory->Read<u32>(addr), via_header_index); |
| 110 | }}; | 110 | }}; |
| 111 | const auto add_image{[&](const auto& desc, bool blacklist) { | 111 | const auto add_image{[&](const auto& desc, bool blacklist) { |
| 112 | for (u32 index = 0; index < desc.count; ++index) { | 112 | for (u32 index = 0; index < desc.count; ++index) { |
diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.h b/src/video_core/renderer_opengl/gl_compute_pipeline.h index 723f27f11..6534dec32 100644 --- a/src/video_core/renderer_opengl/gl_compute_pipeline.h +++ b/src/video_core/renderer_opengl/gl_compute_pipeline.h | |||
| @@ -49,10 +49,8 @@ static_assert(std::is_trivially_constructible_v<ComputePipelineKey>); | |||
| 49 | class ComputePipeline { | 49 | class ComputePipeline { |
| 50 | public: | 50 | public: |
| 51 | explicit ComputePipeline(const Device& device, TextureCache& texture_cache_, | 51 | explicit ComputePipeline(const Device& device, TextureCache& texture_cache_, |
| 52 | BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, | 52 | BufferCache& buffer_cache_, ProgramManager& program_manager_, |
| 53 | Tegra::Engines::KeplerCompute& kepler_compute_, | 53 | const Shader::Info& info_, std::string code, std::vector<u32> code_v); |
| 54 | ProgramManager& program_manager_, const Shader::Info& info_, | ||
| 55 | std::string code, std::vector<u32> code_v); | ||
| 56 | 54 | ||
| 57 | void Configure(); | 55 | void Configure(); |
| 58 | 56 | ||
| @@ -60,11 +58,17 @@ public: | |||
| 60 | return writes_global_memory; | 58 | return writes_global_memory; |
| 61 | } | 59 | } |
| 62 | 60 | ||
| 61 | void SetEngine(Tegra::Engines::KeplerCompute* kepler_compute_, | ||
| 62 | Tegra::MemoryManager* gpu_memory_) { | ||
| 63 | kepler_compute = kepler_compute_; | ||
| 64 | gpu_memory = gpu_memory_; | ||
| 65 | } | ||
| 66 | |||
| 63 | private: | 67 | private: |
| 64 | TextureCache& texture_cache; | 68 | TextureCache& texture_cache; |
| 65 | BufferCache& buffer_cache; | 69 | BufferCache& buffer_cache; |
| 66 | Tegra::MemoryManager& gpu_memory; | 70 | Tegra::MemoryManager* gpu_memory; |
| 67 | Tegra::Engines::KeplerCompute& kepler_compute; | 71 | Tegra::Engines::KeplerCompute* kepler_compute; |
| 68 | ProgramManager& program_manager; | 72 | ProgramManager& program_manager; |
| 69 | 73 | ||
| 70 | Shader::Info info; | 74 | Shader::Info info; |
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp index 6e82c2e28..91463f854 100644 --- a/src/video_core/renderer_opengl/gl_fence_manager.cpp +++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp | |||
| @@ -10,10 +10,7 @@ | |||
| 10 | 10 | ||
| 11 | namespace OpenGL { | 11 | namespace OpenGL { |
| 12 | 12 | ||
| 13 | GLInnerFence::GLInnerFence(u32 payload_, bool is_stubbed_) : FenceBase{payload_, is_stubbed_} {} | 13 | GLInnerFence::GLInnerFence(bool is_stubbed_) : FenceBase{is_stubbed_} {} |
| 14 | |||
| 15 | GLInnerFence::GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_) | ||
| 16 | : FenceBase{address_, payload_, is_stubbed_} {} | ||
| 17 | 14 | ||
| 18 | GLInnerFence::~GLInnerFence() = default; | 15 | GLInnerFence::~GLInnerFence() = default; |
| 19 | 16 | ||
| @@ -48,12 +45,8 @@ FenceManagerOpenGL::FenceManagerOpenGL(VideoCore::RasterizerInterface& rasterize | |||
| 48 | BufferCache& buffer_cache_, QueryCache& query_cache_) | 45 | BufferCache& buffer_cache_, QueryCache& query_cache_) |
| 49 | : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_} {} | 46 | : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_} {} |
| 50 | 47 | ||
| 51 | Fence FenceManagerOpenGL::CreateFence(u32 value, bool is_stubbed) { | 48 | Fence FenceManagerOpenGL::CreateFence(bool is_stubbed) { |
| 52 | return std::make_shared<GLInnerFence>(value, is_stubbed); | 49 | return std::make_shared<GLInnerFence>(is_stubbed); |
| 53 | } | ||
| 54 | |||
| 55 | Fence FenceManagerOpenGL::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) { | ||
| 56 | return std::make_shared<GLInnerFence>(addr, value, is_stubbed); | ||
| 57 | } | 50 | } |
| 58 | 51 | ||
| 59 | void FenceManagerOpenGL::QueueFence(Fence& fence) { | 52 | void FenceManagerOpenGL::QueueFence(Fence& fence) { |
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.h b/src/video_core/renderer_opengl/gl_fence_manager.h index 14ff00db2..f1446e732 100644 --- a/src/video_core/renderer_opengl/gl_fence_manager.h +++ b/src/video_core/renderer_opengl/gl_fence_manager.h | |||
| @@ -16,8 +16,7 @@ namespace OpenGL { | |||
| 16 | 16 | ||
| 17 | class GLInnerFence : public VideoCommon::FenceBase { | 17 | class GLInnerFence : public VideoCommon::FenceBase { |
| 18 | public: | 18 | public: |
| 19 | explicit GLInnerFence(u32 payload_, bool is_stubbed_); | 19 | explicit GLInnerFence(bool is_stubbed_); |
| 20 | explicit GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_); | ||
| 21 | ~GLInnerFence(); | 20 | ~GLInnerFence(); |
| 22 | 21 | ||
| 23 | void Queue(); | 22 | void Queue(); |
| @@ -40,8 +39,7 @@ public: | |||
| 40 | QueryCache& query_cache); | 39 | QueryCache& query_cache); |
| 41 | 40 | ||
| 42 | protected: | 41 | protected: |
| 43 | Fence CreateFence(u32 value, bool is_stubbed) override; | 42 | Fence CreateFence(bool is_stubbed) override; |
| 44 | Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override; | ||
| 45 | void QueueFence(Fence& fence) override; | 43 | void QueueFence(Fence& fence) override; |
| 46 | bool IsFenceSignaled(Fence& fence) const override; | 44 | bool IsFenceSignaled(Fence& fence) const override; |
| 47 | void WaitFence(Fence& fence) override; | 45 | void WaitFence(Fence& fence) override; |
diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp index 67eae369d..41493a7da 100644 --- a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp +++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp | |||
| @@ -169,15 +169,15 @@ ConfigureFuncPtr ConfigureFunc(const std::array<Shader::Info, 5>& infos, u32 ena | |||
| 169 | } | 169 | } |
| 170 | } // Anonymous namespace | 170 | } // Anonymous namespace |
| 171 | 171 | ||
| 172 | GraphicsPipeline::GraphicsPipeline( | 172 | GraphicsPipeline::GraphicsPipeline(const Device& device, TextureCache& texture_cache_, |
| 173 | const Device& device, TextureCache& texture_cache_, BufferCache& buffer_cache_, | 173 | BufferCache& buffer_cache_, ProgramManager& program_manager_, |
| 174 | Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_, | 174 | StateTracker& state_tracker_, ShaderWorker* thread_worker, |
| 175 | ProgramManager& program_manager_, StateTracker& state_tracker_, ShaderWorker* thread_worker, | 175 | VideoCore::ShaderNotify* shader_notify, |
| 176 | VideoCore::ShaderNotify* shader_notify, std::array<std::string, 5> sources, | 176 | std::array<std::string, 5> sources, |
| 177 | std::array<std::vector<u32>, 5> sources_spirv, const std::array<const Shader::Info*, 5>& infos, | 177 | std::array<std::vector<u32>, 5> sources_spirv, |
| 178 | const GraphicsPipelineKey& key_) | 178 | const std::array<const Shader::Info*, 5>& infos, |
| 179 | : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, | 179 | const GraphicsPipelineKey& key_) |
| 180 | gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, program_manager{program_manager_}, | 180 | : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_}, |
| 181 | state_tracker{state_tracker_}, key{key_} { | 181 | state_tracker{state_tracker_}, key{key_} { |
| 182 | if (shader_notify) { | 182 | if (shader_notify) { |
| 183 | shader_notify->MarkShaderBuilding(); | 183 | shader_notify->MarkShaderBuilding(); |
| @@ -285,7 +285,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { | |||
| 285 | buffer_cache.runtime.SetBaseStorageBindings(base_storage_bindings); | 285 | buffer_cache.runtime.SetBaseStorageBindings(base_storage_bindings); |
| 286 | buffer_cache.runtime.SetEnableStorageBuffers(use_storage_buffers); | 286 | buffer_cache.runtime.SetEnableStorageBuffers(use_storage_buffers); |
| 287 | 287 | ||
| 288 | const auto& regs{maxwell3d.regs}; | 288 | const auto& regs{maxwell3d->regs}; |
| 289 | const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex}; | 289 | const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex}; |
| 290 | const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE { | 290 | const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE { |
| 291 | const Shader::Info& info{stage_infos[stage]}; | 291 | const Shader::Info& info{stage_infos[stage]}; |
| @@ -299,7 +299,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { | |||
| 299 | ++ssbo_index; | 299 | ++ssbo_index; |
| 300 | } | 300 | } |
| 301 | } | 301 | } |
| 302 | const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers}; | 302 | const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers}; |
| 303 | const auto read_handle{[&](const auto& desc, u32 index) { | 303 | const auto read_handle{[&](const auto& desc, u32 index) { |
| 304 | ASSERT(cbufs[desc.cbuf_index].enabled); | 304 | ASSERT(cbufs[desc.cbuf_index].enabled); |
| 305 | const u32 index_offset{index << desc.size_shift}; | 305 | const u32 index_offset{index << desc.size_shift}; |
| @@ -312,13 +312,14 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { | |||
| 312 | const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; | 312 | const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; |
| 313 | const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + | 313 | const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + |
| 314 | second_offset}; | 314 | second_offset}; |
| 315 | const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; | 315 | const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left}; |
| 316 | const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; | 316 | const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr) |
| 317 | << desc.secondary_shift_left}; | ||
| 317 | const u32 raw{lhs_raw | rhs_raw}; | 318 | const u32 raw{lhs_raw | rhs_raw}; |
| 318 | return TexturePair(raw, via_header_index); | 319 | return TexturePair(raw, via_header_index); |
| 319 | } | 320 | } |
| 320 | } | 321 | } |
| 321 | return TexturePair(gpu_memory.Read<u32>(addr), via_header_index); | 322 | return TexturePair(gpu_memory->Read<u32>(addr), via_header_index); |
| 322 | }}; | 323 | }}; |
| 323 | const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE { | 324 | const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE { |
| 324 | for (u32 index = 0; index < desc.count; ++index) { | 325 | for (u32 index = 0; index < desc.count; ++index) { |
diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.h b/src/video_core/renderer_opengl/gl_graphics_pipeline.h index 4ec15b966..a0f0e63cb 100644 --- a/src/video_core/renderer_opengl/gl_graphics_pipeline.h +++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.h | |||
| @@ -71,10 +71,9 @@ static_assert(std::is_trivially_constructible_v<GraphicsPipelineKey>); | |||
| 71 | class GraphicsPipeline { | 71 | class GraphicsPipeline { |
| 72 | public: | 72 | public: |
| 73 | explicit GraphicsPipeline(const Device& device, TextureCache& texture_cache_, | 73 | explicit GraphicsPipeline(const Device& device, TextureCache& texture_cache_, |
| 74 | BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, | 74 | BufferCache& buffer_cache_, ProgramManager& program_manager_, |
| 75 | Tegra::Engines::Maxwell3D& maxwell3d_, | 75 | StateTracker& state_tracker_, ShaderWorker* thread_worker, |
| 76 | ProgramManager& program_manager_, StateTracker& state_tracker_, | 76 | VideoCore::ShaderNotify* shader_notify, |
| 77 | ShaderWorker* thread_worker, VideoCore::ShaderNotify* shader_notify, | ||
| 78 | std::array<std::string, 5> sources, | 77 | std::array<std::string, 5> sources, |
| 79 | std::array<std::vector<u32>, 5> sources_spirv, | 78 | std::array<std::vector<u32>, 5> sources_spirv, |
| 80 | const std::array<const Shader::Info*, 5>& infos, | 79 | const std::array<const Shader::Info*, 5>& infos, |
| @@ -107,6 +106,11 @@ public: | |||
| 107 | }; | 106 | }; |
| 108 | } | 107 | } |
| 109 | 108 | ||
| 109 | void SetEngine(Tegra::Engines::Maxwell3D* maxwell3d_, Tegra::MemoryManager* gpu_memory_) { | ||
| 110 | maxwell3d = maxwell3d_; | ||
| 111 | gpu_memory = gpu_memory_; | ||
| 112 | } | ||
| 113 | |||
| 110 | private: | 114 | private: |
| 111 | template <typename Spec> | 115 | template <typename Spec> |
| 112 | void ConfigureImpl(bool is_indexed); | 116 | void ConfigureImpl(bool is_indexed); |
| @@ -119,8 +123,8 @@ private: | |||
| 119 | 123 | ||
| 120 | TextureCache& texture_cache; | 124 | TextureCache& texture_cache; |
| 121 | BufferCache& buffer_cache; | 125 | BufferCache& buffer_cache; |
| 122 | Tegra::MemoryManager& gpu_memory; | 126 | Tegra::MemoryManager* gpu_memory; |
| 123 | Tegra::Engines::Maxwell3D& maxwell3d; | 127 | Tegra::Engines::Maxwell3D* maxwell3d; |
| 124 | ProgramManager& program_manager; | 128 | ProgramManager& program_manager; |
| 125 | StateTracker& state_tracker; | 129 | StateTracker& state_tracker; |
| 126 | const GraphicsPipelineKey key; | 130 | const GraphicsPipelineKey key; |
diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp index ed40f5791..5070db441 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.cpp +++ b/src/video_core/renderer_opengl/gl_query_cache.cpp | |||
| @@ -26,9 +26,8 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) { | |||
| 26 | 26 | ||
| 27 | } // Anonymous namespace | 27 | } // Anonymous namespace |
| 28 | 28 | ||
| 29 | QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, | 29 | QueryCache::QueryCache(RasterizerOpenGL& rasterizer_) |
| 30 | Tegra::MemoryManager& gpu_memory_) | 30 | : QueryCacheBase(rasterizer_), gl_rasterizer{rasterizer_} {} |
| 31 | : QueryCacheBase(rasterizer_, maxwell3d_, gpu_memory_), gl_rasterizer{rasterizer_} {} | ||
| 32 | 31 | ||
| 33 | QueryCache::~QueryCache() = default; | 32 | QueryCache::~QueryCache() = default; |
| 34 | 33 | ||
diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h index 8a49f1ef0..14ce59990 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.h +++ b/src/video_core/renderer_opengl/gl_query_cache.h | |||
| @@ -28,8 +28,7 @@ using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>; | |||
| 28 | class QueryCache final | 28 | class QueryCache final |
| 29 | : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { | 29 | : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { |
| 30 | public: | 30 | public: |
| 31 | explicit QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, | 31 | explicit QueryCache(RasterizerOpenGL& rasterizer_); |
| 32 | Tegra::MemoryManager& gpu_memory_); | ||
| 33 | ~QueryCache(); | 32 | ~QueryCache(); |
| 34 | 33 | ||
| 35 | OGLQuery AllocateQuery(VideoCore::QueryType type); | 34 | OGLQuery AllocateQuery(VideoCore::QueryType type); |
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index a0d048b0b..c2d80605d 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | #include "common/microprofile.h" | 16 | #include "common/microprofile.h" |
| 17 | #include "common/scope_exit.h" | 17 | #include "common/scope_exit.h" |
| 18 | #include "common/settings.h" | 18 | #include "common/settings.h" |
| 19 | 19 | #include "video_core/control/channel_state.h" | |
| 20 | #include "video_core/engines/kepler_compute.h" | 20 | #include "video_core/engines/kepler_compute.h" |
| 21 | #include "video_core/engines/maxwell_3d.h" | 21 | #include "video_core/engines/maxwell_3d.h" |
| 22 | #include "video_core/memory_manager.h" | 22 | #include "video_core/memory_manager.h" |
| @@ -56,22 +56,20 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra | |||
| 56 | Core::Memory::Memory& cpu_memory_, const Device& device_, | 56 | Core::Memory::Memory& cpu_memory_, const Device& device_, |
| 57 | ScreenInfo& screen_info_, ProgramManager& program_manager_, | 57 | ScreenInfo& screen_info_, ProgramManager& program_manager_, |
| 58 | StateTracker& state_tracker_) | 58 | StateTracker& state_tracker_) |
| 59 | : RasterizerAccelerated(cpu_memory_), gpu(gpu_), maxwell3d(gpu.Maxwell3D()), | 59 | : RasterizerAccelerated(cpu_memory_), gpu(gpu_), device(device_), screen_info(screen_info_), |
| 60 | kepler_compute(gpu.KeplerCompute()), gpu_memory(gpu.MemoryManager()), device(device_), | 60 | program_manager(program_manager_), state_tracker(state_tracker_), |
| 61 | screen_info(screen_info_), program_manager(program_manager_), state_tracker(state_tracker_), | ||
| 62 | texture_cache_runtime(device, program_manager, state_tracker), | 61 | texture_cache_runtime(device, program_manager, state_tracker), |
| 63 | texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), | 62 | texture_cache(texture_cache_runtime, *this), buffer_cache_runtime(device), |
| 64 | buffer_cache_runtime(device), | 63 | buffer_cache(*this, cpu_memory_, buffer_cache_runtime), |
| 65 | buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime), | 64 | shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager, |
| 66 | shader_cache(*this, emu_window_, maxwell3d, kepler_compute, gpu_memory, device, texture_cache, | 65 | state_tracker, gpu.ShaderNotify()), |
| 67 | buffer_cache, program_manager, state_tracker, gpu.ShaderNotify()), | 66 | query_cache(*this), accelerate_dma(buffer_cache), |
| 68 | query_cache(*this, maxwell3d, gpu_memory), accelerate_dma(buffer_cache), | ||
| 69 | fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache) {} | 67 | fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache) {} |
| 70 | 68 | ||
| 71 | RasterizerOpenGL::~RasterizerOpenGL() = default; | 69 | RasterizerOpenGL::~RasterizerOpenGL() = default; |
| 72 | 70 | ||
| 73 | void RasterizerOpenGL::SyncVertexFormats() { | 71 | void RasterizerOpenGL::SyncVertexFormats() { |
| 74 | auto& flags = maxwell3d.dirty.flags; | 72 | auto& flags = maxwell3d->dirty.flags; |
| 75 | if (!flags[Dirty::VertexFormats]) { | 73 | if (!flags[Dirty::VertexFormats]) { |
| 76 | return; | 74 | return; |
| 77 | } | 75 | } |
| @@ -89,7 +87,7 @@ void RasterizerOpenGL::SyncVertexFormats() { | |||
| 89 | } | 87 | } |
| 90 | flags[Dirty::VertexFormat0 + index] = false; | 88 | flags[Dirty::VertexFormat0 + index] = false; |
| 91 | 89 | ||
| 92 | const auto attrib = maxwell3d.regs.vertex_attrib_format[index]; | 90 | const auto attrib = maxwell3d->regs.vertex_attrib_format[index]; |
| 93 | const auto gl_index = static_cast<GLuint>(index); | 91 | const auto gl_index = static_cast<GLuint>(index); |
| 94 | 92 | ||
| 95 | // Disable constant attributes. | 93 | // Disable constant attributes. |
| @@ -113,13 +111,13 @@ void RasterizerOpenGL::SyncVertexFormats() { | |||
| 113 | } | 111 | } |
| 114 | 112 | ||
| 115 | void RasterizerOpenGL::SyncVertexInstances() { | 113 | void RasterizerOpenGL::SyncVertexInstances() { |
| 116 | auto& flags = maxwell3d.dirty.flags; | 114 | auto& flags = maxwell3d->dirty.flags; |
| 117 | if (!flags[Dirty::VertexInstances]) { | 115 | if (!flags[Dirty::VertexInstances]) { |
| 118 | return; | 116 | return; |
| 119 | } | 117 | } |
| 120 | flags[Dirty::VertexInstances] = false; | 118 | flags[Dirty::VertexInstances] = false; |
| 121 | 119 | ||
| 122 | const auto& regs = maxwell3d.regs; | 120 | const auto& regs = maxwell3d->regs; |
| 123 | for (std::size_t index = 0; index < NUM_SUPPORTED_VERTEX_ATTRIBUTES; ++index) { | 121 | for (std::size_t index = 0; index < NUM_SUPPORTED_VERTEX_ATTRIBUTES; ++index) { |
| 124 | if (!flags[Dirty::VertexInstance0 + index]) { | 122 | if (!flags[Dirty::VertexInstance0 + index]) { |
| 125 | continue; | 123 | continue; |
| @@ -140,11 +138,11 @@ void RasterizerOpenGL::LoadDiskResources(u64 title_id, std::stop_token stop_load | |||
| 140 | 138 | ||
| 141 | void RasterizerOpenGL::Clear() { | 139 | void RasterizerOpenGL::Clear() { |
| 142 | MICROPROFILE_SCOPE(OpenGL_Clears); | 140 | MICROPROFILE_SCOPE(OpenGL_Clears); |
| 143 | if (!maxwell3d.ShouldExecute()) { | 141 | if (!maxwell3d->ShouldExecute()) { |
| 144 | return; | 142 | return; |
| 145 | } | 143 | } |
| 146 | 144 | ||
| 147 | const auto& regs = maxwell3d.regs; | 145 | const auto& regs = maxwell3d->regs; |
| 148 | bool use_color{}; | 146 | bool use_color{}; |
| 149 | bool use_depth{}; | 147 | bool use_depth{}; |
| 150 | bool use_stencil{}; | 148 | bool use_stencil{}; |
| @@ -217,22 +215,26 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { | |||
| 217 | if (!pipeline) { | 215 | if (!pipeline) { |
| 218 | return; | 216 | return; |
| 219 | } | 217 | } |
| 218 | |||
| 219 | gpu.TickWork(); | ||
| 220 | |||
| 220 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | 221 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; |
| 222 | pipeline->SetEngine(maxwell3d, gpu_memory); | ||
| 221 | pipeline->Configure(is_indexed); | 223 | pipeline->Configure(is_indexed); |
| 222 | 224 | ||
| 223 | SyncState(); | 225 | SyncState(); |
| 224 | 226 | ||
| 225 | const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d.regs.draw.topology); | 227 | const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d->regs.draw.topology); |
| 226 | BeginTransformFeedback(pipeline, primitive_mode); | 228 | BeginTransformFeedback(pipeline, primitive_mode); |
| 227 | 229 | ||
| 228 | const GLuint base_instance = static_cast<GLuint>(maxwell3d.regs.vb_base_instance); | 230 | const GLuint base_instance = static_cast<GLuint>(maxwell3d->regs.vb_base_instance); |
| 229 | const GLsizei num_instances = | 231 | const GLsizei num_instances = |
| 230 | static_cast<GLsizei>(is_instanced ? maxwell3d.mme_draw.instance_count : 1); | 232 | static_cast<GLsizei>(is_instanced ? maxwell3d->mme_draw.instance_count : 1); |
| 231 | if (is_indexed) { | 233 | if (is_indexed) { |
| 232 | const GLint base_vertex = static_cast<GLint>(maxwell3d.regs.vb_element_base); | 234 | const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.vb_element_base); |
| 233 | const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d.regs.index_array.count); | 235 | const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.index_array.count); |
| 234 | const GLvoid* const offset = buffer_cache_runtime.IndexOffset(); | 236 | const GLvoid* const offset = buffer_cache_runtime.IndexOffset(); |
| 235 | const GLenum format = MaxwellToGL::IndexFormat(maxwell3d.regs.index_array.format); | 237 | const GLenum format = MaxwellToGL::IndexFormat(maxwell3d->regs.index_array.format); |
| 236 | if (num_instances == 1 && base_instance == 0 && base_vertex == 0) { | 238 | if (num_instances == 1 && base_instance == 0 && base_vertex == 0) { |
| 237 | glDrawElements(primitive_mode, num_vertices, format, offset); | 239 | glDrawElements(primitive_mode, num_vertices, format, offset); |
| 238 | } else if (num_instances == 1 && base_instance == 0) { | 240 | } else if (num_instances == 1 && base_instance == 0) { |
| @@ -251,8 +253,8 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { | |||
| 251 | base_instance); | 253 | base_instance); |
| 252 | } | 254 | } |
| 253 | } else { | 255 | } else { |
| 254 | const GLint base_vertex = static_cast<GLint>(maxwell3d.regs.vertex_buffer.first); | 256 | const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.vertex_buffer.first); |
| 255 | const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d.regs.vertex_buffer.count); | 257 | const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.vertex_buffer.count); |
| 256 | if (num_instances == 1 && base_instance == 0) { | 258 | if (num_instances == 1 && base_instance == 0) { |
| 257 | glDrawArrays(primitive_mode, base_vertex, num_vertices); | 259 | glDrawArrays(primitive_mode, base_vertex, num_vertices); |
| 258 | } else if (base_instance == 0) { | 260 | } else if (base_instance == 0) { |
| @@ -273,8 +275,9 @@ void RasterizerOpenGL::DispatchCompute() { | |||
| 273 | if (!pipeline) { | 275 | if (!pipeline) { |
| 274 | return; | 276 | return; |
| 275 | } | 277 | } |
| 278 | pipeline->SetEngine(kepler_compute, gpu_memory); | ||
| 276 | pipeline->Configure(); | 279 | pipeline->Configure(); |
| 277 | const auto& qmd{kepler_compute.launch_description}; | 280 | const auto& qmd{kepler_compute->launch_description}; |
| 278 | glDispatchCompute(qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z); | 281 | glDispatchCompute(qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z); |
| 279 | ++num_queued_commands; | 282 | ++num_queued_commands; |
| 280 | has_written_global_memory |= pipeline->WritesGlobalMemory(); | 283 | has_written_global_memory |= pipeline->WritesGlobalMemory(); |
| @@ -359,7 +362,7 @@ void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) { | |||
| 359 | } | 362 | } |
| 360 | } | 363 | } |
| 361 | 364 | ||
| 362 | void RasterizerOpenGL::SyncGuestHost() { | 365 | void RasterizerOpenGL::InvalidateGPUCache() { |
| 363 | MICROPROFILE_SCOPE(OpenGL_CacheManagement); | 366 | MICROPROFILE_SCOPE(OpenGL_CacheManagement); |
| 364 | shader_cache.SyncGuestHost(); | 367 | shader_cache.SyncGuestHost(); |
| 365 | { | 368 | { |
| @@ -380,40 +383,30 @@ void RasterizerOpenGL::UnmapMemory(VAddr addr, u64 size) { | |||
| 380 | shader_cache.OnCPUWrite(addr, size); | 383 | shader_cache.OnCPUWrite(addr, size); |
| 381 | } | 384 | } |
| 382 | 385 | ||
| 383 | void RasterizerOpenGL::ModifyGPUMemory(GPUVAddr addr, u64 size) { | 386 | void RasterizerOpenGL::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) { |
| 384 | { | 387 | { |
| 385 | std::scoped_lock lock{texture_cache.mutex}; | 388 | std::scoped_lock lock{texture_cache.mutex}; |
| 386 | texture_cache.UnmapGPUMemory(addr, size); | 389 | texture_cache.UnmapGPUMemory(as_id, addr, size); |
| 387 | } | 390 | } |
| 388 | } | 391 | } |
| 389 | 392 | ||
| 390 | void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) { | 393 | void RasterizerOpenGL::SignalFence(std::function<void()>&& func) { |
| 391 | if (!gpu.IsAsync()) { | 394 | fence_manager.SignalFence(std::move(func)); |
| 392 | gpu_memory.Write<u32>(addr, value); | 395 | } |
| 393 | return; | 396 | |
| 394 | } | 397 | void RasterizerOpenGL::SyncOperation(std::function<void()>&& func) { |
| 395 | fence_manager.SignalSemaphore(addr, value); | 398 | fence_manager.SyncOperation(std::move(func)); |
| 396 | } | 399 | } |
| 397 | 400 | ||
| 398 | void RasterizerOpenGL::SignalSyncPoint(u32 value) { | 401 | void RasterizerOpenGL::SignalSyncPoint(u32 value) { |
| 399 | if (!gpu.IsAsync()) { | ||
| 400 | gpu.IncrementSyncPoint(value); | ||
| 401 | return; | ||
| 402 | } | ||
| 403 | fence_manager.SignalSyncPoint(value); | 402 | fence_manager.SignalSyncPoint(value); |
| 404 | } | 403 | } |
| 405 | 404 | ||
| 406 | void RasterizerOpenGL::SignalReference() { | 405 | void RasterizerOpenGL::SignalReference() { |
| 407 | if (!gpu.IsAsync()) { | ||
| 408 | return; | ||
| 409 | } | ||
| 410 | fence_manager.SignalOrdering(); | 406 | fence_manager.SignalOrdering(); |
| 411 | } | 407 | } |
| 412 | 408 | ||
| 413 | void RasterizerOpenGL::ReleaseFences() { | 409 | void RasterizerOpenGL::ReleaseFences() { |
| 414 | if (!gpu.IsAsync()) { | ||
| 415 | return; | ||
| 416 | } | ||
| 417 | fence_manager.WaitPendingFences(); | 410 | fence_manager.WaitPendingFences(); |
| 418 | } | 411 | } |
| 419 | 412 | ||
| @@ -430,6 +423,7 @@ void RasterizerOpenGL::WaitForIdle() { | |||
| 430 | } | 423 | } |
| 431 | 424 | ||
| 432 | void RasterizerOpenGL::FragmentBarrier() { | 425 | void RasterizerOpenGL::FragmentBarrier() { |
| 426 | glTextureBarrier(); | ||
| 433 | glMemoryBarrier(GL_FRAMEBUFFER_BARRIER_BIT | GL_TEXTURE_FETCH_BARRIER_BIT); | 427 | glMemoryBarrier(GL_FRAMEBUFFER_BARRIER_BIT | GL_TEXTURE_FETCH_BARRIER_BIT); |
| 434 | } | 428 | } |
| 435 | 429 | ||
| @@ -482,13 +476,13 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerOpenGL::AccessAccelerateDMA() | |||
| 482 | } | 476 | } |
| 483 | 477 | ||
| 484 | void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, | 478 | void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, |
| 485 | std::span<u8> memory) { | 479 | std::span<const u8> memory) { |
| 486 | auto cpu_addr = gpu_memory.GpuToCpuAddress(address); | 480 | auto cpu_addr = gpu_memory->GpuToCpuAddress(address); |
| 487 | if (!cpu_addr) [[unlikely]] { | 481 | if (!cpu_addr) [[unlikely]] { |
| 488 | gpu_memory.WriteBlock(address, memory.data(), copy_size); | 482 | gpu_memory->WriteBlock(address, memory.data(), copy_size); |
| 489 | return; | 483 | return; |
| 490 | } | 484 | } |
| 491 | gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size); | 485 | gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size); |
| 492 | { | 486 | { |
| 493 | std::unique_lock<std::mutex> lock{buffer_cache.mutex}; | 487 | std::unique_lock<std::mutex> lock{buffer_cache.mutex}; |
| 494 | if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) { | 488 | if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) { |
| @@ -551,8 +545,8 @@ void RasterizerOpenGL::SyncState() { | |||
| 551 | } | 545 | } |
| 552 | 546 | ||
| 553 | void RasterizerOpenGL::SyncViewport() { | 547 | void RasterizerOpenGL::SyncViewport() { |
| 554 | auto& flags = maxwell3d.dirty.flags; | 548 | auto& flags = maxwell3d->dirty.flags; |
| 555 | const auto& regs = maxwell3d.regs; | 549 | const auto& regs = maxwell3d->regs; |
| 556 | 550 | ||
| 557 | const bool rescale_viewports = flags[VideoCommon::Dirty::RescaleViewports]; | 551 | const bool rescale_viewports = flags[VideoCommon::Dirty::RescaleViewports]; |
| 558 | const bool dirty_viewport = flags[Dirty::Viewports] || rescale_viewports; | 552 | const bool dirty_viewport = flags[Dirty::Viewports] || rescale_viewports; |
| @@ -657,23 +651,23 @@ void RasterizerOpenGL::SyncViewport() { | |||
| 657 | } | 651 | } |
| 658 | 652 | ||
| 659 | void RasterizerOpenGL::SyncDepthClamp() { | 653 | void RasterizerOpenGL::SyncDepthClamp() { |
| 660 | auto& flags = maxwell3d.dirty.flags; | 654 | auto& flags = maxwell3d->dirty.flags; |
| 661 | if (!flags[Dirty::DepthClampEnabled]) { | 655 | if (!flags[Dirty::DepthClampEnabled]) { |
| 662 | return; | 656 | return; |
| 663 | } | 657 | } |
| 664 | flags[Dirty::DepthClampEnabled] = false; | 658 | flags[Dirty::DepthClampEnabled] = false; |
| 665 | 659 | ||
| 666 | oglEnable(GL_DEPTH_CLAMP, maxwell3d.regs.view_volume_clip_control.depth_clamp_disabled == 0); | 660 | oglEnable(GL_DEPTH_CLAMP, maxwell3d->regs.view_volume_clip_control.depth_clamp_disabled == 0); |
| 667 | } | 661 | } |
| 668 | 662 | ||
| 669 | void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) { | 663 | void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) { |
| 670 | auto& flags = maxwell3d.dirty.flags; | 664 | auto& flags = maxwell3d->dirty.flags; |
| 671 | if (!flags[Dirty::ClipDistances] && !flags[VideoCommon::Dirty::Shaders]) { | 665 | if (!flags[Dirty::ClipDistances] && !flags[VideoCommon::Dirty::Shaders]) { |
| 672 | return; | 666 | return; |
| 673 | } | 667 | } |
| 674 | flags[Dirty::ClipDistances] = false; | 668 | flags[Dirty::ClipDistances] = false; |
| 675 | 669 | ||
| 676 | clip_mask &= maxwell3d.regs.clip_distance_enabled; | 670 | clip_mask &= maxwell3d->regs.clip_distance_enabled; |
| 677 | if (clip_mask == last_clip_distance_mask) { | 671 | if (clip_mask == last_clip_distance_mask) { |
| 678 | return; | 672 | return; |
| 679 | } | 673 | } |
| @@ -689,8 +683,8 @@ void RasterizerOpenGL::SyncClipCoef() { | |||
| 689 | } | 683 | } |
| 690 | 684 | ||
| 691 | void RasterizerOpenGL::SyncCullMode() { | 685 | void RasterizerOpenGL::SyncCullMode() { |
| 692 | auto& flags = maxwell3d.dirty.flags; | 686 | auto& flags = maxwell3d->dirty.flags; |
| 693 | const auto& regs = maxwell3d.regs; | 687 | const auto& regs = maxwell3d->regs; |
| 694 | 688 | ||
| 695 | if (flags[Dirty::CullTest]) { | 689 | if (flags[Dirty::CullTest]) { |
| 696 | flags[Dirty::CullTest] = false; | 690 | flags[Dirty::CullTest] = false; |
| @@ -705,23 +699,23 @@ void RasterizerOpenGL::SyncCullMode() { | |||
| 705 | } | 699 | } |
| 706 | 700 | ||
| 707 | void RasterizerOpenGL::SyncPrimitiveRestart() { | 701 | void RasterizerOpenGL::SyncPrimitiveRestart() { |
| 708 | auto& flags = maxwell3d.dirty.flags; | 702 | auto& flags = maxwell3d->dirty.flags; |
| 709 | if (!flags[Dirty::PrimitiveRestart]) { | 703 | if (!flags[Dirty::PrimitiveRestart]) { |
| 710 | return; | 704 | return; |
| 711 | } | 705 | } |
| 712 | flags[Dirty::PrimitiveRestart] = false; | 706 | flags[Dirty::PrimitiveRestart] = false; |
| 713 | 707 | ||
| 714 | if (maxwell3d.regs.primitive_restart.enabled) { | 708 | if (maxwell3d->regs.primitive_restart.enabled) { |
| 715 | glEnable(GL_PRIMITIVE_RESTART); | 709 | glEnable(GL_PRIMITIVE_RESTART); |
| 716 | glPrimitiveRestartIndex(maxwell3d.regs.primitive_restart.index); | 710 | glPrimitiveRestartIndex(maxwell3d->regs.primitive_restart.index); |
| 717 | } else { | 711 | } else { |
| 718 | glDisable(GL_PRIMITIVE_RESTART); | 712 | glDisable(GL_PRIMITIVE_RESTART); |
| 719 | } | 713 | } |
| 720 | } | 714 | } |
| 721 | 715 | ||
| 722 | void RasterizerOpenGL::SyncDepthTestState() { | 716 | void RasterizerOpenGL::SyncDepthTestState() { |
| 723 | auto& flags = maxwell3d.dirty.flags; | 717 | auto& flags = maxwell3d->dirty.flags; |
| 724 | const auto& regs = maxwell3d.regs; | 718 | const auto& regs = maxwell3d->regs; |
| 725 | 719 | ||
| 726 | if (flags[Dirty::DepthMask]) { | 720 | if (flags[Dirty::DepthMask]) { |
| 727 | flags[Dirty::DepthMask] = false; | 721 | flags[Dirty::DepthMask] = false; |
| @@ -740,13 +734,13 @@ void RasterizerOpenGL::SyncDepthTestState() { | |||
| 740 | } | 734 | } |
| 741 | 735 | ||
| 742 | void RasterizerOpenGL::SyncStencilTestState() { | 736 | void RasterizerOpenGL::SyncStencilTestState() { |
| 743 | auto& flags = maxwell3d.dirty.flags; | 737 | auto& flags = maxwell3d->dirty.flags; |
| 744 | if (!flags[Dirty::StencilTest]) { | 738 | if (!flags[Dirty::StencilTest]) { |
| 745 | return; | 739 | return; |
| 746 | } | 740 | } |
| 747 | flags[Dirty::StencilTest] = false; | 741 | flags[Dirty::StencilTest] = false; |
| 748 | 742 | ||
| 749 | const auto& regs = maxwell3d.regs; | 743 | const auto& regs = maxwell3d->regs; |
| 750 | oglEnable(GL_STENCIL_TEST, regs.stencil_enable); | 744 | oglEnable(GL_STENCIL_TEST, regs.stencil_enable); |
| 751 | 745 | ||
| 752 | glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_func_func), | 746 | glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_func_func), |
| @@ -771,23 +765,23 @@ void RasterizerOpenGL::SyncStencilTestState() { | |||
| 771 | } | 765 | } |
| 772 | 766 | ||
| 773 | void RasterizerOpenGL::SyncRasterizeEnable() { | 767 | void RasterizerOpenGL::SyncRasterizeEnable() { |
| 774 | auto& flags = maxwell3d.dirty.flags; | 768 | auto& flags = maxwell3d->dirty.flags; |
| 775 | if (!flags[Dirty::RasterizeEnable]) { | 769 | if (!flags[Dirty::RasterizeEnable]) { |
| 776 | return; | 770 | return; |
| 777 | } | 771 | } |
| 778 | flags[Dirty::RasterizeEnable] = false; | 772 | flags[Dirty::RasterizeEnable] = false; |
| 779 | 773 | ||
| 780 | oglEnable(GL_RASTERIZER_DISCARD, maxwell3d.regs.rasterize_enable == 0); | 774 | oglEnable(GL_RASTERIZER_DISCARD, maxwell3d->regs.rasterize_enable == 0); |
| 781 | } | 775 | } |
| 782 | 776 | ||
| 783 | void RasterizerOpenGL::SyncPolygonModes() { | 777 | void RasterizerOpenGL::SyncPolygonModes() { |
| 784 | auto& flags = maxwell3d.dirty.flags; | 778 | auto& flags = maxwell3d->dirty.flags; |
| 785 | if (!flags[Dirty::PolygonModes]) { | 779 | if (!flags[Dirty::PolygonModes]) { |
| 786 | return; | 780 | return; |
| 787 | } | 781 | } |
| 788 | flags[Dirty::PolygonModes] = false; | 782 | flags[Dirty::PolygonModes] = false; |
| 789 | 783 | ||
| 790 | const auto& regs = maxwell3d.regs; | 784 | const auto& regs = maxwell3d->regs; |
| 791 | if (regs.fill_rectangle) { | 785 | if (regs.fill_rectangle) { |
| 792 | if (!GLAD_GL_NV_fill_rectangle) { | 786 | if (!GLAD_GL_NV_fill_rectangle) { |
| 793 | LOG_ERROR(Render_OpenGL, "GL_NV_fill_rectangle used and not supported"); | 787 | LOG_ERROR(Render_OpenGL, "GL_NV_fill_rectangle used and not supported"); |
| @@ -820,7 +814,7 @@ void RasterizerOpenGL::SyncPolygonModes() { | |||
| 820 | } | 814 | } |
| 821 | 815 | ||
| 822 | void RasterizerOpenGL::SyncColorMask() { | 816 | void RasterizerOpenGL::SyncColorMask() { |
| 823 | auto& flags = maxwell3d.dirty.flags; | 817 | auto& flags = maxwell3d->dirty.flags; |
| 824 | if (!flags[Dirty::ColorMasks]) { | 818 | if (!flags[Dirty::ColorMasks]) { |
| 825 | return; | 819 | return; |
| 826 | } | 820 | } |
| @@ -829,7 +823,7 @@ void RasterizerOpenGL::SyncColorMask() { | |||
| 829 | const bool force = flags[Dirty::ColorMaskCommon]; | 823 | const bool force = flags[Dirty::ColorMaskCommon]; |
| 830 | flags[Dirty::ColorMaskCommon] = false; | 824 | flags[Dirty::ColorMaskCommon] = false; |
| 831 | 825 | ||
| 832 | const auto& regs = maxwell3d.regs; | 826 | const auto& regs = maxwell3d->regs; |
| 833 | if (regs.color_mask_common) { | 827 | if (regs.color_mask_common) { |
| 834 | if (!force && !flags[Dirty::ColorMask0]) { | 828 | if (!force && !flags[Dirty::ColorMask0]) { |
| 835 | return; | 829 | return; |
| @@ -854,30 +848,30 @@ void RasterizerOpenGL::SyncColorMask() { | |||
| 854 | } | 848 | } |
| 855 | 849 | ||
| 856 | void RasterizerOpenGL::SyncMultiSampleState() { | 850 | void RasterizerOpenGL::SyncMultiSampleState() { |
| 857 | auto& flags = maxwell3d.dirty.flags; | 851 | auto& flags = maxwell3d->dirty.flags; |
| 858 | if (!flags[Dirty::MultisampleControl]) { | 852 | if (!flags[Dirty::MultisampleControl]) { |
| 859 | return; | 853 | return; |
| 860 | } | 854 | } |
| 861 | flags[Dirty::MultisampleControl] = false; | 855 | flags[Dirty::MultisampleControl] = false; |
| 862 | 856 | ||
| 863 | const auto& regs = maxwell3d.regs; | 857 | const auto& regs = maxwell3d->regs; |
| 864 | oglEnable(GL_SAMPLE_ALPHA_TO_COVERAGE, regs.multisample_control.alpha_to_coverage); | 858 | oglEnable(GL_SAMPLE_ALPHA_TO_COVERAGE, regs.multisample_control.alpha_to_coverage); |
| 865 | oglEnable(GL_SAMPLE_ALPHA_TO_ONE, regs.multisample_control.alpha_to_one); | 859 | oglEnable(GL_SAMPLE_ALPHA_TO_ONE, regs.multisample_control.alpha_to_one); |
| 866 | } | 860 | } |
| 867 | 861 | ||
| 868 | void RasterizerOpenGL::SyncFragmentColorClampState() { | 862 | void RasterizerOpenGL::SyncFragmentColorClampState() { |
| 869 | auto& flags = maxwell3d.dirty.flags; | 863 | auto& flags = maxwell3d->dirty.flags; |
| 870 | if (!flags[Dirty::FragmentClampColor]) { | 864 | if (!flags[Dirty::FragmentClampColor]) { |
| 871 | return; | 865 | return; |
| 872 | } | 866 | } |
| 873 | flags[Dirty::FragmentClampColor] = false; | 867 | flags[Dirty::FragmentClampColor] = false; |
| 874 | 868 | ||
| 875 | glClampColor(GL_CLAMP_FRAGMENT_COLOR, maxwell3d.regs.frag_color_clamp ? GL_TRUE : GL_FALSE); | 869 | glClampColor(GL_CLAMP_FRAGMENT_COLOR, maxwell3d->regs.frag_color_clamp ? GL_TRUE : GL_FALSE); |
| 876 | } | 870 | } |
| 877 | 871 | ||
| 878 | void RasterizerOpenGL::SyncBlendState() { | 872 | void RasterizerOpenGL::SyncBlendState() { |
| 879 | auto& flags = maxwell3d.dirty.flags; | 873 | auto& flags = maxwell3d->dirty.flags; |
| 880 | const auto& regs = maxwell3d.regs; | 874 | const auto& regs = maxwell3d->regs; |
| 881 | 875 | ||
| 882 | if (flags[Dirty::BlendColor]) { | 876 | if (flags[Dirty::BlendColor]) { |
| 883 | flags[Dirty::BlendColor] = false; | 877 | flags[Dirty::BlendColor] = false; |
| @@ -934,13 +928,13 @@ void RasterizerOpenGL::SyncBlendState() { | |||
| 934 | } | 928 | } |
| 935 | 929 | ||
| 936 | void RasterizerOpenGL::SyncLogicOpState() { | 930 | void RasterizerOpenGL::SyncLogicOpState() { |
| 937 | auto& flags = maxwell3d.dirty.flags; | 931 | auto& flags = maxwell3d->dirty.flags; |
| 938 | if (!flags[Dirty::LogicOp]) { | 932 | if (!flags[Dirty::LogicOp]) { |
| 939 | return; | 933 | return; |
| 940 | } | 934 | } |
| 941 | flags[Dirty::LogicOp] = false; | 935 | flags[Dirty::LogicOp] = false; |
| 942 | 936 | ||
| 943 | const auto& regs = maxwell3d.regs; | 937 | const auto& regs = maxwell3d->regs; |
| 944 | if (regs.logic_op.enable) { | 938 | if (regs.logic_op.enable) { |
| 945 | glEnable(GL_COLOR_LOGIC_OP); | 939 | glEnable(GL_COLOR_LOGIC_OP); |
| 946 | glLogicOp(MaxwellToGL::LogicOp(regs.logic_op.operation)); | 940 | glLogicOp(MaxwellToGL::LogicOp(regs.logic_op.operation)); |
| @@ -950,7 +944,7 @@ void RasterizerOpenGL::SyncLogicOpState() { | |||
| 950 | } | 944 | } |
| 951 | 945 | ||
| 952 | void RasterizerOpenGL::SyncScissorTest() { | 946 | void RasterizerOpenGL::SyncScissorTest() { |
| 953 | auto& flags = maxwell3d.dirty.flags; | 947 | auto& flags = maxwell3d->dirty.flags; |
| 954 | if (!flags[Dirty::Scissors] && !flags[VideoCommon::Dirty::RescaleScissors]) { | 948 | if (!flags[Dirty::Scissors] && !flags[VideoCommon::Dirty::RescaleScissors]) { |
| 955 | return; | 949 | return; |
| 956 | } | 950 | } |
| @@ -959,7 +953,7 @@ void RasterizerOpenGL::SyncScissorTest() { | |||
| 959 | const bool force = flags[VideoCommon::Dirty::RescaleScissors]; | 953 | const bool force = flags[VideoCommon::Dirty::RescaleScissors]; |
| 960 | flags[VideoCommon::Dirty::RescaleScissors] = false; | 954 | flags[VideoCommon::Dirty::RescaleScissors] = false; |
| 961 | 955 | ||
| 962 | const auto& regs = maxwell3d.regs; | 956 | const auto& regs = maxwell3d->regs; |
| 963 | 957 | ||
| 964 | const auto& resolution = Settings::values.resolution_info; | 958 | const auto& resolution = Settings::values.resolution_info; |
| 965 | const bool is_rescaling{texture_cache.IsRescaling()}; | 959 | const bool is_rescaling{texture_cache.IsRescaling()}; |
| @@ -995,39 +989,39 @@ void RasterizerOpenGL::SyncScissorTest() { | |||
| 995 | } | 989 | } |
| 996 | 990 | ||
| 997 | void RasterizerOpenGL::SyncPointState() { | 991 | void RasterizerOpenGL::SyncPointState() { |
| 998 | auto& flags = maxwell3d.dirty.flags; | 992 | auto& flags = maxwell3d->dirty.flags; |
| 999 | if (!flags[Dirty::PointSize]) { | 993 | if (!flags[Dirty::PointSize]) { |
| 1000 | return; | 994 | return; |
| 1001 | } | 995 | } |
| 1002 | flags[Dirty::PointSize] = false; | 996 | flags[Dirty::PointSize] = false; |
| 1003 | 997 | ||
| 1004 | oglEnable(GL_POINT_SPRITE, maxwell3d.regs.point_sprite_enable); | 998 | oglEnable(GL_POINT_SPRITE, maxwell3d->regs.point_sprite_enable); |
| 1005 | oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d.regs.vp_point_size.enable); | 999 | oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d->regs.vp_point_size.enable); |
| 1006 | const bool is_rescaling{texture_cache.IsRescaling()}; | 1000 | const bool is_rescaling{texture_cache.IsRescaling()}; |
| 1007 | const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f; | 1001 | const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f; |
| 1008 | glPointSize(std::max(1.0f, maxwell3d.regs.point_size * scale)); | 1002 | glPointSize(std::max(1.0f, maxwell3d->regs.point_size * scale)); |
| 1009 | } | 1003 | } |
| 1010 | 1004 | ||
| 1011 | void RasterizerOpenGL::SyncLineState() { | 1005 | void RasterizerOpenGL::SyncLineState() { |
| 1012 | auto& flags = maxwell3d.dirty.flags; | 1006 | auto& flags = maxwell3d->dirty.flags; |
| 1013 | if (!flags[Dirty::LineWidth]) { | 1007 | if (!flags[Dirty::LineWidth]) { |
| 1014 | return; | 1008 | return; |
| 1015 | } | 1009 | } |
| 1016 | flags[Dirty::LineWidth] = false; | 1010 | flags[Dirty::LineWidth] = false; |
| 1017 | 1011 | ||
| 1018 | const auto& regs = maxwell3d.regs; | 1012 | const auto& regs = maxwell3d->regs; |
| 1019 | oglEnable(GL_LINE_SMOOTH, regs.line_smooth_enable); | 1013 | oglEnable(GL_LINE_SMOOTH, regs.line_smooth_enable); |
| 1020 | glLineWidth(regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased); | 1014 | glLineWidth(regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased); |
| 1021 | } | 1015 | } |
| 1022 | 1016 | ||
| 1023 | void RasterizerOpenGL::SyncPolygonOffset() { | 1017 | void RasterizerOpenGL::SyncPolygonOffset() { |
| 1024 | auto& flags = maxwell3d.dirty.flags; | 1018 | auto& flags = maxwell3d->dirty.flags; |
| 1025 | if (!flags[Dirty::PolygonOffset]) { | 1019 | if (!flags[Dirty::PolygonOffset]) { |
| 1026 | return; | 1020 | return; |
| 1027 | } | 1021 | } |
| 1028 | flags[Dirty::PolygonOffset] = false; | 1022 | flags[Dirty::PolygonOffset] = false; |
| 1029 | 1023 | ||
| 1030 | const auto& regs = maxwell3d.regs; | 1024 | const auto& regs = maxwell3d->regs; |
| 1031 | oglEnable(GL_POLYGON_OFFSET_FILL, regs.polygon_offset_fill_enable); | 1025 | oglEnable(GL_POLYGON_OFFSET_FILL, regs.polygon_offset_fill_enable); |
| 1032 | oglEnable(GL_POLYGON_OFFSET_LINE, regs.polygon_offset_line_enable); | 1026 | oglEnable(GL_POLYGON_OFFSET_LINE, regs.polygon_offset_line_enable); |
| 1033 | oglEnable(GL_POLYGON_OFFSET_POINT, regs.polygon_offset_point_enable); | 1027 | oglEnable(GL_POLYGON_OFFSET_POINT, regs.polygon_offset_point_enable); |
| @@ -1041,13 +1035,13 @@ void RasterizerOpenGL::SyncPolygonOffset() { | |||
| 1041 | } | 1035 | } |
| 1042 | 1036 | ||
| 1043 | void RasterizerOpenGL::SyncAlphaTest() { | 1037 | void RasterizerOpenGL::SyncAlphaTest() { |
| 1044 | auto& flags = maxwell3d.dirty.flags; | 1038 | auto& flags = maxwell3d->dirty.flags; |
| 1045 | if (!flags[Dirty::AlphaTest]) { | 1039 | if (!flags[Dirty::AlphaTest]) { |
| 1046 | return; | 1040 | return; |
| 1047 | } | 1041 | } |
| 1048 | flags[Dirty::AlphaTest] = false; | 1042 | flags[Dirty::AlphaTest] = false; |
| 1049 | 1043 | ||
| 1050 | const auto& regs = maxwell3d.regs; | 1044 | const auto& regs = maxwell3d->regs; |
| 1051 | if (regs.alpha_test_enabled) { | 1045 | if (regs.alpha_test_enabled) { |
| 1052 | glEnable(GL_ALPHA_TEST); | 1046 | glEnable(GL_ALPHA_TEST); |
| 1053 | glAlphaFunc(MaxwellToGL::ComparisonOp(regs.alpha_test_func), regs.alpha_test_ref); | 1047 | glAlphaFunc(MaxwellToGL::ComparisonOp(regs.alpha_test_func), regs.alpha_test_ref); |
| @@ -1057,17 +1051,17 @@ void RasterizerOpenGL::SyncAlphaTest() { | |||
| 1057 | } | 1051 | } |
| 1058 | 1052 | ||
| 1059 | void RasterizerOpenGL::SyncFramebufferSRGB() { | 1053 | void RasterizerOpenGL::SyncFramebufferSRGB() { |
| 1060 | auto& flags = maxwell3d.dirty.flags; | 1054 | auto& flags = maxwell3d->dirty.flags; |
| 1061 | if (!flags[Dirty::FramebufferSRGB]) { | 1055 | if (!flags[Dirty::FramebufferSRGB]) { |
| 1062 | return; | 1056 | return; |
| 1063 | } | 1057 | } |
| 1064 | flags[Dirty::FramebufferSRGB] = false; | 1058 | flags[Dirty::FramebufferSRGB] = false; |
| 1065 | 1059 | ||
| 1066 | oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d.regs.framebuffer_srgb); | 1060 | oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d->regs.framebuffer_srgb); |
| 1067 | } | 1061 | } |
| 1068 | 1062 | ||
| 1069 | void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum primitive_mode) { | 1063 | void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum primitive_mode) { |
| 1070 | const auto& regs = maxwell3d.regs; | 1064 | const auto& regs = maxwell3d->regs; |
| 1071 | if (regs.tfb_enabled == 0) { | 1065 | if (regs.tfb_enabled == 0) { |
| 1072 | return; | 1066 | return; |
| 1073 | } | 1067 | } |
| @@ -1086,11 +1080,48 @@ void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum | |||
| 1086 | } | 1080 | } |
| 1087 | 1081 | ||
| 1088 | void RasterizerOpenGL::EndTransformFeedback() { | 1082 | void RasterizerOpenGL::EndTransformFeedback() { |
| 1089 | if (maxwell3d.regs.tfb_enabled != 0) { | 1083 | if (maxwell3d->regs.tfb_enabled != 0) { |
| 1090 | glEndTransformFeedback(); | 1084 | glEndTransformFeedback(); |
| 1091 | } | 1085 | } |
| 1092 | } | 1086 | } |
| 1093 | 1087 | ||
| 1088 | void RasterizerOpenGL::InitializeChannel(Tegra::Control::ChannelState& channel) { | ||
| 1089 | CreateChannel(channel); | ||
| 1090 | { | ||
| 1091 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | ||
| 1092 | texture_cache.CreateChannel(channel); | ||
| 1093 | buffer_cache.CreateChannel(channel); | ||
| 1094 | } | ||
| 1095 | shader_cache.CreateChannel(channel); | ||
| 1096 | query_cache.CreateChannel(channel); | ||
| 1097 | state_tracker.SetupTables(channel); | ||
| 1098 | } | ||
| 1099 | |||
| 1100 | void RasterizerOpenGL::BindChannel(Tegra::Control::ChannelState& channel) { | ||
| 1101 | const s32 channel_id = channel.bind_id; | ||
| 1102 | BindToChannel(channel_id); | ||
| 1103 | { | ||
| 1104 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | ||
| 1105 | texture_cache.BindToChannel(channel_id); | ||
| 1106 | buffer_cache.BindToChannel(channel_id); | ||
| 1107 | } | ||
| 1108 | shader_cache.BindToChannel(channel_id); | ||
| 1109 | query_cache.BindToChannel(channel_id); | ||
| 1110 | state_tracker.ChangeChannel(channel); | ||
| 1111 | state_tracker.InvalidateState(); | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | void RasterizerOpenGL::ReleaseChannel(s32 channel_id) { | ||
| 1115 | EraseChannel(channel_id); | ||
| 1116 | { | ||
| 1117 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | ||
| 1118 | texture_cache.EraseChannel(channel_id); | ||
| 1119 | buffer_cache.EraseChannel(channel_id); | ||
| 1120 | } | ||
| 1121 | shader_cache.EraseChannel(channel_id); | ||
| 1122 | query_cache.EraseChannel(channel_id); | ||
| 1123 | } | ||
| 1124 | |||
| 1094 | AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {} | 1125 | AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {} |
| 1095 | 1126 | ||
| 1096 | bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { | 1127 | bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { |
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 31a16fcba..45131b785 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <glad/glad.h> | 12 | #include <glad/glad.h> |
| 13 | 13 | ||
| 14 | #include "common/common_types.h" | 14 | #include "common/common_types.h" |
| 15 | #include "video_core/control/channel_state_cache.h" | ||
| 15 | #include "video_core/engines/maxwell_dma.h" | 16 | #include "video_core/engines/maxwell_dma.h" |
| 16 | #include "video_core/rasterizer_accelerated.h" | 17 | #include "video_core/rasterizer_accelerated.h" |
| 17 | #include "video_core/rasterizer_interface.h" | 18 | #include "video_core/rasterizer_interface.h" |
| @@ -58,7 +59,8 @@ private: | |||
| 58 | BufferCache& buffer_cache; | 59 | BufferCache& buffer_cache; |
| 59 | }; | 60 | }; |
| 60 | 61 | ||
| 61 | class RasterizerOpenGL : public VideoCore::RasterizerAccelerated { | 62 | class RasterizerOpenGL : public VideoCore::RasterizerAccelerated, |
| 63 | protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { | ||
| 62 | public: | 64 | public: |
| 63 | explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, | 65 | explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, |
| 64 | Core::Memory::Memory& cpu_memory_, const Device& device_, | 66 | Core::Memory::Memory& cpu_memory_, const Device& device_, |
| @@ -78,10 +80,11 @@ public: | |||
| 78 | bool MustFlushRegion(VAddr addr, u64 size) override; | 80 | bool MustFlushRegion(VAddr addr, u64 size) override; |
| 79 | void InvalidateRegion(VAddr addr, u64 size) override; | 81 | void InvalidateRegion(VAddr addr, u64 size) override; |
| 80 | void OnCPUWrite(VAddr addr, u64 size) override; | 82 | void OnCPUWrite(VAddr addr, u64 size) override; |
| 81 | void SyncGuestHost() override; | 83 | void InvalidateGPUCache() override; |
| 82 | void UnmapMemory(VAddr addr, u64 size) override; | 84 | void UnmapMemory(VAddr addr, u64 size) override; |
| 83 | void ModifyGPUMemory(GPUVAddr addr, u64 size) override; | 85 | void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override; |
| 84 | void SignalSemaphore(GPUVAddr addr, u32 value) override; | 86 | void SignalFence(std::function<void()>&& func) override; |
| 87 | void SyncOperation(std::function<void()>&& func) override; | ||
| 85 | void SignalSyncPoint(u32 value) override; | 88 | void SignalSyncPoint(u32 value) override; |
| 86 | void SignalReference() override; | 89 | void SignalReference() override; |
| 87 | void ReleaseFences() override; | 90 | void ReleaseFences() override; |
| @@ -96,7 +99,7 @@ public: | |||
| 96 | const Tegra::Engines::Fermi2D::Config& copy_config) override; | 99 | const Tegra::Engines::Fermi2D::Config& copy_config) override; |
| 97 | Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; | 100 | Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; |
| 98 | void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, | 101 | void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, |
| 99 | std::span<u8> memory) override; | 102 | std::span<const u8> memory) override; |
| 100 | bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, | 103 | bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, |
| 101 | u32 pixel_stride) override; | 104 | u32 pixel_stride) override; |
| 102 | void LoadDiskResources(u64 title_id, std::stop_token stop_loading, | 105 | void LoadDiskResources(u64 title_id, std::stop_token stop_loading, |
| @@ -107,6 +110,12 @@ public: | |||
| 107 | return num_queued_commands > 0; | 110 | return num_queued_commands > 0; |
| 108 | } | 111 | } |
| 109 | 112 | ||
| 113 | void InitializeChannel(Tegra::Control::ChannelState& channel) override; | ||
| 114 | |||
| 115 | void BindChannel(Tegra::Control::ChannelState& channel) override; | ||
| 116 | |||
| 117 | void ReleaseChannel(s32 channel_id) override; | ||
| 118 | |||
| 110 | private: | 119 | private: |
| 111 | static constexpr size_t MAX_TEXTURES = 192; | 120 | static constexpr size_t MAX_TEXTURES = 192; |
| 112 | static constexpr size_t MAX_IMAGES = 48; | 121 | static constexpr size_t MAX_IMAGES = 48; |
| @@ -191,9 +200,6 @@ private: | |||
| 191 | void EndTransformFeedback(); | 200 | void EndTransformFeedback(); |
| 192 | 201 | ||
| 193 | Tegra::GPU& gpu; | 202 | Tegra::GPU& gpu; |
| 194 | Tegra::Engines::Maxwell3D& maxwell3d; | ||
| 195 | Tegra::Engines::KeplerCompute& kepler_compute; | ||
| 196 | Tegra::MemoryManager& gpu_memory; | ||
| 197 | 203 | ||
| 198 | const Device& device; | 204 | const Device& device; |
| 199 | ScreenInfo& screen_info; | 205 | ScreenInfo& screen_info; |
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index 0b8d8ec92..5a29a41d2 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp | |||
| @@ -151,16 +151,13 @@ void SetXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs | |||
| 151 | } // Anonymous namespace | 151 | } // Anonymous namespace |
| 152 | 152 | ||
| 153 | ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_, | 153 | ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_, |
| 154 | Tegra::Engines::Maxwell3D& maxwell3d_, | 154 | const Device& device_, TextureCache& texture_cache_, |
| 155 | Tegra::Engines::KeplerCompute& kepler_compute_, | 155 | BufferCache& buffer_cache_, ProgramManager& program_manager_, |
| 156 | Tegra::MemoryManager& gpu_memory_, const Device& device_, | 156 | StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_) |
| 157 | TextureCache& texture_cache_, BufferCache& buffer_cache_, | 157 | : VideoCommon::ShaderCache{rasterizer_}, emu_window{emu_window_}, device{device_}, |
| 158 | ProgramManager& program_manager_, StateTracker& state_tracker_, | 158 | texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_}, |
| 159 | VideoCore::ShaderNotify& shader_notify_) | 159 | state_tracker{state_tracker_}, shader_notify{shader_notify_}, |
| 160 | : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_}, | 160 | use_asynchronous_shaders{device.UseAsynchronousShaders()}, |
| 161 | emu_window{emu_window_}, device{device_}, texture_cache{texture_cache_}, | ||
| 162 | buffer_cache{buffer_cache_}, program_manager{program_manager_}, state_tracker{state_tracker_}, | ||
| 163 | shader_notify{shader_notify_}, use_asynchronous_shaders{device.UseAsynchronousShaders()}, | ||
| 164 | profile{ | 161 | profile{ |
| 165 | .supported_spirv = 0x00010000, | 162 | .supported_spirv = 0x00010000, |
| 166 | 163 | ||
| @@ -310,7 +307,7 @@ GraphicsPipeline* ShaderCache::CurrentGraphicsPipeline() { | |||
| 310 | current_pipeline = nullptr; | 307 | current_pipeline = nullptr; |
| 311 | return nullptr; | 308 | return nullptr; |
| 312 | } | 309 | } |
| 313 | const auto& regs{maxwell3d.regs}; | 310 | const auto& regs{maxwell3d->regs}; |
| 314 | graphics_key.raw = 0; | 311 | graphics_key.raw = 0; |
| 315 | graphics_key.early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0); | 312 | graphics_key.early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0); |
| 316 | graphics_key.gs_input_topology.Assign(graphics_key.unique_hashes[4] != 0 | 313 | graphics_key.gs_input_topology.Assign(graphics_key.unique_hashes[4] != 0 |
| @@ -351,13 +348,13 @@ GraphicsPipeline* ShaderCache::BuiltPipeline(GraphicsPipeline* pipeline) const n | |||
| 351 | } | 348 | } |
| 352 | // If something is using depth, we can assume that games are not rendering anything which | 349 | // If something is using depth, we can assume that games are not rendering anything which |
| 353 | // will be used one time. | 350 | // will be used one time. |
| 354 | if (maxwell3d.regs.zeta_enable) { | 351 | if (maxwell3d->regs.zeta_enable) { |
| 355 | return nullptr; | 352 | return nullptr; |
| 356 | } | 353 | } |
| 357 | // If games are using a small index count, we can assume these are full screen quads. | 354 | // If games are using a small index count, we can assume these are full screen quads. |
| 358 | // Usually these shaders are only used once for building textures so we can assume they | 355 | // Usually these shaders are only used once for building textures so we can assume they |
| 359 | // can't be built async | 356 | // can't be built async |
| 360 | if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) { | 357 | if (maxwell3d->regs.index_array.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) { |
| 361 | return pipeline; | 358 | return pipeline; |
| 362 | } | 359 | } |
| 363 | return nullptr; | 360 | return nullptr; |
| @@ -368,7 +365,7 @@ ComputePipeline* ShaderCache::CurrentComputePipeline() { | |||
| 368 | if (!shader) { | 365 | if (!shader) { |
| 369 | return nullptr; | 366 | return nullptr; |
| 370 | } | 367 | } |
| 371 | const auto& qmd{kepler_compute.launch_description}; | 368 | const auto& qmd{kepler_compute->launch_description}; |
| 372 | const ComputePipelineKey key{ | 369 | const ComputePipelineKey key{ |
| 373 | .unique_hash = shader->unique_hash, | 370 | .unique_hash = shader->unique_hash, |
| 374 | .shared_memory_size = qmd.shared_alloc, | 371 | .shared_memory_size = qmd.shared_alloc, |
| @@ -480,9 +477,9 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline( | |||
| 480 | previous_program = &program; | 477 | previous_program = &program; |
| 481 | } | 478 | } |
| 482 | auto* const thread_worker{build_in_parallel ? workers.get() : nullptr}; | 479 | auto* const thread_worker{build_in_parallel ? workers.get() : nullptr}; |
| 483 | return std::make_unique<GraphicsPipeline>( | 480 | return std::make_unique<GraphicsPipeline>(device, texture_cache, buffer_cache, program_manager, |
| 484 | device, texture_cache, buffer_cache, gpu_memory, maxwell3d, program_manager, state_tracker, | 481 | state_tracker, thread_worker, &shader_notify, sources, |
| 485 | thread_worker, &shader_notify, sources, sources_spirv, infos, key); | 482 | sources_spirv, infos, key); |
| 486 | 483 | ||
| 487 | } catch (Shader::Exception& exception) { | 484 | } catch (Shader::Exception& exception) { |
| 488 | LOG_ERROR(Render_OpenGL, "{}", exception.what()); | 485 | LOG_ERROR(Render_OpenGL, "{}", exception.what()); |
| @@ -491,9 +488,9 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline( | |||
| 491 | 488 | ||
| 492 | std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline( | 489 | std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline( |
| 493 | const ComputePipelineKey& key, const VideoCommon::ShaderInfo* shader) { | 490 | const ComputePipelineKey& key, const VideoCommon::ShaderInfo* shader) { |
| 494 | const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; | 491 | const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()}; |
| 495 | const auto& qmd{kepler_compute.launch_description}; | 492 | const auto& qmd{kepler_compute->launch_description}; |
| 496 | ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; | 493 | ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start}; |
| 497 | env.SetCachedSize(shader->size_bytes); | 494 | env.SetCachedSize(shader->size_bytes); |
| 498 | 495 | ||
| 499 | main_pools.ReleaseContents(); | 496 | main_pools.ReleaseContents(); |
| @@ -536,9 +533,8 @@ std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline( | |||
| 536 | break; | 533 | break; |
| 537 | } | 534 | } |
| 538 | 535 | ||
| 539 | return std::make_unique<ComputePipeline>(device, texture_cache, buffer_cache, gpu_memory, | 536 | return std::make_unique<ComputePipeline>(device, texture_cache, buffer_cache, program_manager, |
| 540 | kepler_compute, program_manager, program.info, code, | 537 | program.info, code, code_spirv); |
| 541 | code_spirv); | ||
| 542 | } catch (Shader::Exception& exception) { | 538 | } catch (Shader::Exception& exception) { |
| 543 | LOG_ERROR(Render_OpenGL, "{}", exception.what()); | 539 | LOG_ERROR(Render_OpenGL, "{}", exception.what()); |
| 544 | return nullptr; | 540 | return nullptr; |
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index a14269dea..89f181fe3 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h | |||
| @@ -30,12 +30,9 @@ using ShaderWorker = Common::StatefulThreadWorker<ShaderContext::Context>; | |||
| 30 | class ShaderCache : public VideoCommon::ShaderCache { | 30 | class ShaderCache : public VideoCommon::ShaderCache { |
| 31 | public: | 31 | public: |
| 32 | explicit ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_, | 32 | explicit ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_, |
| 33 | Tegra::Engines::Maxwell3D& maxwell3d_, | 33 | const Device& device_, TextureCache& texture_cache_, |
| 34 | Tegra::Engines::KeplerCompute& kepler_compute_, | 34 | BufferCache& buffer_cache_, ProgramManager& program_manager_, |
| 35 | Tegra::MemoryManager& gpu_memory_, const Device& device_, | 35 | StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_); |
| 36 | TextureCache& texture_cache_, BufferCache& buffer_cache_, | ||
| 37 | ProgramManager& program_manager_, StateTracker& state_tracker_, | ||
| 38 | VideoCore::ShaderNotify& shader_notify_); | ||
| 39 | ~ShaderCache(); | 36 | ~ShaderCache(); |
| 40 | 37 | ||
| 41 | void LoadDiskResources(u64 title_id, std::stop_token stop_loading, | 38 | void LoadDiskResources(u64 title_id, std::stop_token stop_loading, |
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.cpp b/src/video_core/renderer_opengl/gl_state_tracker.cpp index 912725ef7..a8f3a0f57 100644 --- a/src/video_core/renderer_opengl/gl_state_tracker.cpp +++ b/src/video_core/renderer_opengl/gl_state_tracker.cpp | |||
| @@ -7,8 +7,8 @@ | |||
| 7 | 7 | ||
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "core/core.h" | 9 | #include "core/core.h" |
| 10 | #include "video_core/control/channel_state.h" | ||
| 10 | #include "video_core/engines/maxwell_3d.h" | 11 | #include "video_core/engines/maxwell_3d.h" |
| 11 | #include "video_core/gpu.h" | ||
| 12 | #include "video_core/renderer_opengl/gl_state_tracker.h" | 12 | #include "video_core/renderer_opengl/gl_state_tracker.h" |
| 13 | 13 | ||
| 14 | #define OFF(field_name) MAXWELL3D_REG_INDEX(field_name) | 14 | #define OFF(field_name) MAXWELL3D_REG_INDEX(field_name) |
| @@ -202,9 +202,8 @@ void SetupDirtyMisc(Tables& tables) { | |||
| 202 | 202 | ||
| 203 | } // Anonymous namespace | 203 | } // Anonymous namespace |
| 204 | 204 | ||
| 205 | StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags} { | 205 | void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) { |
| 206 | auto& dirty = gpu.Maxwell3D().dirty; | 206 | auto& tables{channel_state.maxwell_3d->dirty.tables}; |
| 207 | auto& tables = dirty.tables; | ||
| 208 | SetupDirtyFlags(tables); | 207 | SetupDirtyFlags(tables); |
| 209 | SetupDirtyColorMasks(tables); | 208 | SetupDirtyColorMasks(tables); |
| 210 | SetupDirtyViewports(tables); | 209 | SetupDirtyViewports(tables); |
| @@ -230,4 +229,14 @@ StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags} | |||
| 230 | SetupDirtyMisc(tables); | 229 | SetupDirtyMisc(tables); |
| 231 | } | 230 | } |
| 232 | 231 | ||
| 232 | void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) { | ||
| 233 | flags = &channel_state.maxwell_3d->dirty.flags; | ||
| 234 | } | ||
| 235 | |||
| 236 | void StateTracker::InvalidateState() { | ||
| 237 | flags->set(); | ||
| 238 | } | ||
| 239 | |||
| 240 | StateTracker::StateTracker() : flags{&default_flags} {} | ||
| 241 | |||
| 233 | } // namespace OpenGL | 242 | } // namespace OpenGL |
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.h b/src/video_core/renderer_opengl/gl_state_tracker.h index 04e024f08..19bcf3f35 100644 --- a/src/video_core/renderer_opengl/gl_state_tracker.h +++ b/src/video_core/renderer_opengl/gl_state_tracker.h | |||
| @@ -12,8 +12,10 @@ | |||
| 12 | #include "video_core/engines/maxwell_3d.h" | 12 | #include "video_core/engines/maxwell_3d.h" |
| 13 | 13 | ||
| 14 | namespace Tegra { | 14 | namespace Tegra { |
| 15 | class GPU; | 15 | namespace Control { |
| 16 | struct ChannelState; | ||
| 16 | } | 17 | } |
| 18 | } // namespace Tegra | ||
| 17 | 19 | ||
| 18 | namespace OpenGL { | 20 | namespace OpenGL { |
| 19 | 21 | ||
| @@ -83,7 +85,7 @@ static_assert(Last <= std::numeric_limits<u8>::max()); | |||
| 83 | 85 | ||
| 84 | class StateTracker { | 86 | class StateTracker { |
| 85 | public: | 87 | public: |
| 86 | explicit StateTracker(Tegra::GPU& gpu); | 88 | explicit StateTracker(); |
| 87 | 89 | ||
| 88 | void BindIndexBuffer(GLuint new_index_buffer) { | 90 | void BindIndexBuffer(GLuint new_index_buffer) { |
| 89 | if (index_buffer == new_index_buffer) { | 91 | if (index_buffer == new_index_buffer) { |
| @@ -121,94 +123,107 @@ public: | |||
| 121 | } | 123 | } |
| 122 | 124 | ||
| 123 | void NotifyScreenDrawVertexArray() { | 125 | void NotifyScreenDrawVertexArray() { |
| 124 | flags[OpenGL::Dirty::VertexFormats] = true; | 126 | (*flags)[OpenGL::Dirty::VertexFormats] = true; |
| 125 | flags[OpenGL::Dirty::VertexFormat0 + 0] = true; | 127 | (*flags)[OpenGL::Dirty::VertexFormat0 + 0] = true; |
| 126 | flags[OpenGL::Dirty::VertexFormat0 + 1] = true; | 128 | (*flags)[OpenGL::Dirty::VertexFormat0 + 1] = true; |
| 127 | 129 | ||
| 128 | flags[VideoCommon::Dirty::VertexBuffers] = true; | 130 | (*flags)[VideoCommon::Dirty::VertexBuffers] = true; |
| 129 | flags[VideoCommon::Dirty::VertexBuffer0] = true; | 131 | (*flags)[VideoCommon::Dirty::VertexBuffer0] = true; |
| 130 | 132 | ||
| 131 | flags[OpenGL::Dirty::VertexInstances] = true; | 133 | (*flags)[OpenGL::Dirty::VertexInstances] = true; |
| 132 | flags[OpenGL::Dirty::VertexInstance0 + 0] = true; | 134 | (*flags)[OpenGL::Dirty::VertexInstance0 + 0] = true; |
| 133 | flags[OpenGL::Dirty::VertexInstance0 + 1] = true; | 135 | (*flags)[OpenGL::Dirty::VertexInstance0 + 1] = true; |
| 134 | } | 136 | } |
| 135 | 137 | ||
| 136 | void NotifyPolygonModes() { | 138 | void NotifyPolygonModes() { |
| 137 | flags[OpenGL::Dirty::PolygonModes] = true; | 139 | (*flags)[OpenGL::Dirty::PolygonModes] = true; |
| 138 | flags[OpenGL::Dirty::PolygonModeFront] = true; | 140 | (*flags)[OpenGL::Dirty::PolygonModeFront] = true; |
| 139 | flags[OpenGL::Dirty::PolygonModeBack] = true; | 141 | (*flags)[OpenGL::Dirty::PolygonModeBack] = true; |
| 140 | } | 142 | } |
| 141 | 143 | ||
| 142 | void NotifyViewport0() { | 144 | void NotifyViewport0() { |
| 143 | flags[OpenGL::Dirty::Viewports] = true; | 145 | (*flags)[OpenGL::Dirty::Viewports] = true; |
| 144 | flags[OpenGL::Dirty::Viewport0] = true; | 146 | (*flags)[OpenGL::Dirty::Viewport0] = true; |
| 145 | } | 147 | } |
| 146 | 148 | ||
| 147 | void NotifyScissor0() { | 149 | void NotifyScissor0() { |
| 148 | flags[OpenGL::Dirty::Scissors] = true; | 150 | (*flags)[OpenGL::Dirty::Scissors] = true; |
| 149 | flags[OpenGL::Dirty::Scissor0] = true; | 151 | (*flags)[OpenGL::Dirty::Scissor0] = true; |
| 150 | } | 152 | } |
| 151 | 153 | ||
| 152 | void NotifyColorMask(size_t index) { | 154 | void NotifyColorMask(size_t index) { |
| 153 | flags[OpenGL::Dirty::ColorMasks] = true; | 155 | (*flags)[OpenGL::Dirty::ColorMasks] = true; |
| 154 | flags[OpenGL::Dirty::ColorMask0 + index] = true; | 156 | (*flags)[OpenGL::Dirty::ColorMask0 + index] = true; |
| 155 | } | 157 | } |
| 156 | 158 | ||
| 157 | void NotifyBlend0() { | 159 | void NotifyBlend0() { |
| 158 | flags[OpenGL::Dirty::BlendStates] = true; | 160 | (*flags)[OpenGL::Dirty::BlendStates] = true; |
| 159 | flags[OpenGL::Dirty::BlendState0] = true; | 161 | (*flags)[OpenGL::Dirty::BlendState0] = true; |
| 160 | } | 162 | } |
| 161 | 163 | ||
| 162 | void NotifyFramebuffer() { | 164 | void NotifyFramebuffer() { |
| 163 | flags[VideoCommon::Dirty::RenderTargets] = true; | 165 | (*flags)[VideoCommon::Dirty::RenderTargets] = true; |
| 164 | } | 166 | } |
| 165 | 167 | ||
| 166 | void NotifyFrontFace() { | 168 | void NotifyFrontFace() { |
| 167 | flags[OpenGL::Dirty::FrontFace] = true; | 169 | (*flags)[OpenGL::Dirty::FrontFace] = true; |
| 168 | } | 170 | } |
| 169 | 171 | ||
| 170 | void NotifyCullTest() { | 172 | void NotifyCullTest() { |
| 171 | flags[OpenGL::Dirty::CullTest] = true; | 173 | (*flags)[OpenGL::Dirty::CullTest] = true; |
| 172 | } | 174 | } |
| 173 | 175 | ||
| 174 | void NotifyDepthMask() { | 176 | void NotifyDepthMask() { |
| 175 | flags[OpenGL::Dirty::DepthMask] = true; | 177 | (*flags)[OpenGL::Dirty::DepthMask] = true; |
| 176 | } | 178 | } |
| 177 | 179 | ||
| 178 | void NotifyDepthTest() { | 180 | void NotifyDepthTest() { |
| 179 | flags[OpenGL::Dirty::DepthTest] = true; | 181 | (*flags)[OpenGL::Dirty::DepthTest] = true; |
| 180 | } | 182 | } |
| 181 | 183 | ||
| 182 | void NotifyStencilTest() { | 184 | void NotifyStencilTest() { |
| 183 | flags[OpenGL::Dirty::StencilTest] = true; | 185 | (*flags)[OpenGL::Dirty::StencilTest] = true; |
| 184 | } | 186 | } |
| 185 | 187 | ||
| 186 | void NotifyPolygonOffset() { | 188 | void NotifyPolygonOffset() { |
| 187 | flags[OpenGL::Dirty::PolygonOffset] = true; | 189 | (*flags)[OpenGL::Dirty::PolygonOffset] = true; |
| 188 | } | 190 | } |
| 189 | 191 | ||
| 190 | void NotifyRasterizeEnable() { | 192 | void NotifyRasterizeEnable() { |
| 191 | flags[OpenGL::Dirty::RasterizeEnable] = true; | 193 | (*flags)[OpenGL::Dirty::RasterizeEnable] = true; |
| 192 | } | 194 | } |
| 193 | 195 | ||
| 194 | void NotifyFramebufferSRGB() { | 196 | void NotifyFramebufferSRGB() { |
| 195 | flags[OpenGL::Dirty::FramebufferSRGB] = true; | 197 | (*flags)[OpenGL::Dirty::FramebufferSRGB] = true; |
| 196 | } | 198 | } |
| 197 | 199 | ||
| 198 | void NotifyLogicOp() { | 200 | void NotifyLogicOp() { |
| 199 | flags[OpenGL::Dirty::LogicOp] = true; | 201 | (*flags)[OpenGL::Dirty::LogicOp] = true; |
| 200 | } | 202 | } |
| 201 | 203 | ||
| 202 | void NotifyClipControl() { | 204 | void NotifyClipControl() { |
| 203 | flags[OpenGL::Dirty::ClipControl] = true; | 205 | (*flags)[OpenGL::Dirty::ClipControl] = true; |
| 204 | } | 206 | } |
| 205 | 207 | ||
| 206 | void NotifyAlphaTest() { | 208 | void NotifyAlphaTest() { |
| 207 | flags[OpenGL::Dirty::AlphaTest] = true; | 209 | (*flags)[OpenGL::Dirty::AlphaTest] = true; |
| 208 | } | 210 | } |
| 209 | 211 | ||
| 212 | void NotifyRange(u8 start, u8 end) { | ||
| 213 | for (auto flag = start; flag <= end; flag++) { | ||
| 214 | (*flags)[flag] = true; | ||
| 215 | } | ||
| 216 | } | ||
| 217 | |||
| 218 | void SetupTables(Tegra::Control::ChannelState& channel_state); | ||
| 219 | |||
| 220 | void ChangeChannel(Tegra::Control::ChannelState& channel_state); | ||
| 221 | |||
| 222 | void InvalidateState(); | ||
| 223 | |||
| 210 | private: | 224 | private: |
| 211 | Tegra::Engines::Maxwell3D::DirtyState::Flags& flags; | 225 | Tegra::Engines::Maxwell3D::DirtyState::Flags* flags; |
| 226 | Tegra::Engines::Maxwell3D::DirtyState::Flags default_flags{}; | ||
| 212 | 227 | ||
| 213 | GLuint framebuffer = 0; | 228 | GLuint framebuffer = 0; |
| 214 | GLuint index_buffer = 0; | 229 | GLuint index_buffer = 0; |
diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h index dfe7f26ca..004421236 100644 --- a/src/video_core/renderer_opengl/maxwell_to_gl.h +++ b/src/video_core/renderer_opengl/maxwell_to_gl.h | |||
| @@ -87,7 +87,7 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> FORMAT_TAB | |||
| 87 | {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // BC3_SRGB | 87 | {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // BC3_SRGB |
| 88 | {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7_SRGB | 88 | {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7_SRGB |
| 89 | {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // A4B4G4R4_UNORM | 89 | {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // A4B4G4R4_UNORM |
| 90 | {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // R4G4_UNORM | 90 | {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // G4R4_UNORM |
| 91 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB | 91 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB |
| 92 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB | 92 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB |
| 93 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB | 93 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB |
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index 34f3f7a67..8bd5eba7e 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp | |||
| @@ -131,7 +131,7 @@ RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_, | |||
| 131 | Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, | 131 | Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, |
| 132 | std::unique_ptr<Core::Frontend::GraphicsContext> context_) | 132 | std::unique_ptr<Core::Frontend::GraphicsContext> context_) |
| 133 | : RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_}, | 133 | : RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_}, |
| 134 | emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{gpu}, | 134 | emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{}, |
| 135 | program_manager{device}, | 135 | program_manager{device}, |
| 136 | rasterizer(emu_window, gpu, cpu_memory, device, screen_info, program_manager, state_tracker) { | 136 | rasterizer(emu_window, gpu, cpu_memory, device, screen_info, program_manager, state_tracker) { |
| 137 | if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) { | 137 | if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) { |
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp index 6703b8e68..e7104d377 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp | |||
| @@ -184,7 +184,7 @@ struct FormatTuple { | |||
| 184 | {VK_FORMAT_BC3_SRGB_BLOCK}, // BC3_SRGB | 184 | {VK_FORMAT_BC3_SRGB_BLOCK}, // BC3_SRGB |
| 185 | {VK_FORMAT_BC7_SRGB_BLOCK}, // BC7_SRGB | 185 | {VK_FORMAT_BC7_SRGB_BLOCK}, // BC7_SRGB |
| 186 | {VK_FORMAT_R4G4B4A4_UNORM_PACK16, Attachable}, // A4B4G4R4_UNORM | 186 | {VK_FORMAT_R4G4B4A4_UNORM_PACK16, Attachable}, // A4B4G4R4_UNORM |
| 187 | {VK_FORMAT_R4G4_UNORM_PACK8}, // R4G4_UNORM | 187 | {VK_FORMAT_R4G4_UNORM_PACK8}, // G4R4_UNORM |
| 188 | {VK_FORMAT_ASTC_4x4_SRGB_BLOCK}, // ASTC_2D_4X4_SRGB | 188 | {VK_FORMAT_ASTC_4x4_SRGB_BLOCK}, // ASTC_2D_4X4_SRGB |
| 189 | {VK_FORMAT_ASTC_8x8_SRGB_BLOCK}, // ASTC_2D_8X8_SRGB | 189 | {VK_FORMAT_ASTC_8x8_SRGB_BLOCK}, // ASTC_2D_8X8_SRGB |
| 190 | {VK_FORMAT_ASTC_8x5_SRGB_BLOCK}, // ASTC_2D_8X5_SRGB | 190 | {VK_FORMAT_ASTC_8x5_SRGB_BLOCK}, // ASTC_2D_8X5_SRGB |
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index 7c78d0299..d8131232a 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp | |||
| @@ -102,13 +102,13 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_, | |||
| 102 | debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr), | 102 | debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr), |
| 103 | surface(CreateSurface(instance, render_window)), | 103 | surface(CreateSurface(instance, render_window)), |
| 104 | device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false), | 104 | device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false), |
| 105 | state_tracker(gpu), scheduler(device, state_tracker), | 105 | state_tracker(), scheduler(device, state_tracker), |
| 106 | swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width, | 106 | swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width, |
| 107 | render_window.GetFramebufferLayout().height, false), | 107 | render_window.GetFramebufferLayout().height, false), |
| 108 | blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler, | 108 | blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler, |
| 109 | screen_info), | 109 | screen_info), |
| 110 | rasterizer(render_window, gpu, gpu.MemoryManager(), cpu_memory, screen_info, device, | 110 | rasterizer(render_window, gpu, cpu_memory, screen_info, device, memory_allocator, |
| 111 | memory_allocator, state_tracker, scheduler) { | 111 | state_tracker, scheduler) { |
| 112 | Report(); | 112 | Report(); |
| 113 | } catch (const vk::Exception& exception) { | 113 | } catch (const vk::Exception& exception) { |
| 114 | LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what()); | 114 | LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what()); |
| @@ -142,7 +142,7 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { | |||
| 142 | const auto recreate_swapchain = [&] { | 142 | const auto recreate_swapchain = [&] { |
| 143 | if (!has_been_recreated) { | 143 | if (!has_been_recreated) { |
| 144 | has_been_recreated = true; | 144 | has_been_recreated = true; |
| 145 | scheduler.WaitWorker(); | 145 | scheduler.Finish(); |
| 146 | } | 146 | } |
| 147 | const Layout::FramebufferLayout layout = render_window.GetFramebufferLayout(); | 147 | const Layout::FramebufferLayout layout = render_window.GetFramebufferLayout(); |
| 148 | swapchain.Create(layout.width, layout.height, is_srgb); | 148 | swapchain.Create(layout.width, layout.height, is_srgb); |
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp index 444c29f68..cb7fa2078 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp +++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp | |||
| @@ -145,6 +145,11 @@ VkSemaphore BlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, | |||
| 145 | // Finish any pending renderpass | 145 | // Finish any pending renderpass |
| 146 | scheduler.RequestOutsideRenderPassOperationContext(); | 146 | scheduler.RequestOutsideRenderPassOperationContext(); |
| 147 | 147 | ||
| 148 | if (const auto swapchain_images = swapchain.GetImageCount(); swapchain_images != image_count) { | ||
| 149 | image_count = swapchain_images; | ||
| 150 | Recreate(); | ||
| 151 | } | ||
| 152 | |||
| 148 | const std::size_t image_index = swapchain.GetImageIndex(); | 153 | const std::size_t image_index = swapchain.GetImageIndex(); |
| 149 | 154 | ||
| 150 | scheduler.Wait(resource_ticks[image_index]); | 155 | scheduler.Wait(resource_ticks[image_index]); |
| @@ -448,15 +453,15 @@ vk::Framebuffer BlitScreen::CreateFramebuffer(const VkImageView& image_view, VkE | |||
| 448 | 453 | ||
| 449 | void BlitScreen::CreateStaticResources() { | 454 | void BlitScreen::CreateStaticResources() { |
| 450 | CreateShaders(); | 455 | CreateShaders(); |
| 456 | CreateSampler(); | ||
| 457 | } | ||
| 458 | |||
| 459 | void BlitScreen::CreateDynamicResources() { | ||
| 451 | CreateSemaphores(); | 460 | CreateSemaphores(); |
| 452 | CreateDescriptorPool(); | 461 | CreateDescriptorPool(); |
| 453 | CreateDescriptorSetLayout(); | 462 | CreateDescriptorSetLayout(); |
| 454 | CreateDescriptorSets(); | 463 | CreateDescriptorSets(); |
| 455 | CreatePipelineLayout(); | 464 | CreatePipelineLayout(); |
| 456 | CreateSampler(); | ||
| 457 | } | ||
| 458 | |||
| 459 | void BlitScreen::CreateDynamicResources() { | ||
| 460 | CreateRenderPass(); | 465 | CreateRenderPass(); |
| 461 | CreateFramebuffers(); | 466 | CreateFramebuffers(); |
| 462 | CreateGraphicsPipeline(); | 467 | CreateGraphicsPipeline(); |
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h index b8c67bef0..29e2ea925 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.h +++ b/src/video_core/renderer_vulkan/vk_blit_screen.h | |||
| @@ -109,7 +109,7 @@ private: | |||
| 109 | MemoryAllocator& memory_allocator; | 109 | MemoryAllocator& memory_allocator; |
| 110 | Swapchain& swapchain; | 110 | Swapchain& swapchain; |
| 111 | Scheduler& scheduler; | 111 | Scheduler& scheduler; |
| 112 | const std::size_t image_count; | 112 | std::size_t image_count; |
| 113 | const ScreenInfo& screen_info; | 113 | const ScreenInfo& screen_info; |
| 114 | 114 | ||
| 115 | vk::ShaderModule vertex_shader; | 115 | vk::ShaderModule vertex_shader; |
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp index f17a5ccd6..241d7573e 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp | |||
| @@ -26,8 +26,6 @@ | |||
| 26 | 26 | ||
| 27 | namespace Vulkan { | 27 | namespace Vulkan { |
| 28 | 28 | ||
| 29 | using Tegra::Texture::SWIZZLE_TABLE; | ||
| 30 | |||
| 31 | namespace { | 29 | namespace { |
| 32 | 30 | ||
| 33 | constexpr u32 ASTC_BINDING_INPUT_BUFFER = 0; | 31 | constexpr u32 ASTC_BINDING_INPUT_BUFFER = 0; |
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp index 6447210e2..7906e11a8 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp | |||
| @@ -126,8 +126,8 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute, | |||
| 126 | const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset}; | 126 | const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset}; |
| 127 | const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + | 127 | const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + |
| 128 | secondary_offset}; | 128 | secondary_offset}; |
| 129 | const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; | 129 | const u32 lhs_raw{gpu_memory.Read<u32>(addr) << desc.shift_left}; |
| 130 | const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; | 130 | const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr) << desc.secondary_shift_left}; |
| 131 | return TexturePair(lhs_raw | rhs_raw, via_header_index); | 131 | return TexturePair(lhs_raw | rhs_raw, via_header_index); |
| 132 | } | 132 | } |
| 133 | } | 133 | } |
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp index c249b34d4..0214b103a 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp +++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp | |||
| @@ -11,11 +11,8 @@ | |||
| 11 | 11 | ||
| 12 | namespace Vulkan { | 12 | namespace Vulkan { |
| 13 | 13 | ||
| 14 | InnerFence::InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_) | 14 | InnerFence::InnerFence(Scheduler& scheduler_, bool is_stubbed_) |
| 15 | : FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {} | 15 | : FenceBase{is_stubbed_}, scheduler{scheduler_} {} |
| 16 | |||
| 17 | InnerFence::InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_) | ||
| 18 | : FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {} | ||
| 19 | 16 | ||
| 20 | InnerFence::~InnerFence() = default; | 17 | InnerFence::~InnerFence() = default; |
| 21 | 18 | ||
| @@ -48,12 +45,8 @@ FenceManager::FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::G | |||
| 48 | : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_}, | 45 | : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_}, |
| 49 | scheduler{scheduler_} {} | 46 | scheduler{scheduler_} {} |
| 50 | 47 | ||
| 51 | Fence FenceManager::CreateFence(u32 value, bool is_stubbed) { | 48 | Fence FenceManager::CreateFence(bool is_stubbed) { |
| 52 | return std::make_shared<InnerFence>(scheduler, value, is_stubbed); | 49 | return std::make_shared<InnerFence>(scheduler, is_stubbed); |
| 53 | } | ||
| 54 | |||
| 55 | Fence FenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) { | ||
| 56 | return std::make_shared<InnerFence>(scheduler, addr, value, is_stubbed); | ||
| 57 | } | 50 | } |
| 58 | 51 | ||
| 59 | void FenceManager::QueueFence(Fence& fence) { | 52 | void FenceManager::QueueFence(Fence& fence) { |
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h index 7c0bbd80a..7fe2afcd9 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.h +++ b/src/video_core/renderer_vulkan/vk_fence_manager.h | |||
| @@ -25,8 +25,7 @@ class Scheduler; | |||
| 25 | 25 | ||
| 26 | class InnerFence : public VideoCommon::FenceBase { | 26 | class InnerFence : public VideoCommon::FenceBase { |
| 27 | public: | 27 | public: |
| 28 | explicit InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_); | 28 | explicit InnerFence(Scheduler& scheduler_, bool is_stubbed_); |
| 29 | explicit InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_); | ||
| 30 | ~InnerFence(); | 29 | ~InnerFence(); |
| 31 | 30 | ||
| 32 | void Queue(); | 31 | void Queue(); |
| @@ -50,8 +49,7 @@ public: | |||
| 50 | QueryCache& query_cache, const Device& device, Scheduler& scheduler); | 49 | QueryCache& query_cache, const Device& device, Scheduler& scheduler); |
| 51 | 50 | ||
| 52 | protected: | 51 | protected: |
| 53 | Fence CreateFence(u32 value, bool is_stubbed) override; | 52 | Fence CreateFence(bool is_stubbed) override; |
| 54 | Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override; | ||
| 55 | void QueueFence(Fence& fence) override; | 53 | void QueueFence(Fence& fence) override; |
| 56 | bool IsFenceSignaled(Fence& fence) const override; | 54 | bool IsFenceSignaled(Fence& fence) const override; |
| 57 | void WaitFence(Fence& fence) override; | 55 | void WaitFence(Fence& fence) override; |
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index 5aca8f038..f47786f48 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp | |||
| @@ -215,15 +215,14 @@ ConfigureFuncPtr ConfigureFunc(const std::array<vk::ShaderModule, NUM_STAGES>& m | |||
| 215 | } // Anonymous namespace | 215 | } // Anonymous namespace |
| 216 | 216 | ||
| 217 | GraphicsPipeline::GraphicsPipeline( | 217 | GraphicsPipeline::GraphicsPipeline( |
| 218 | Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, Scheduler& scheduler_, | 218 | Scheduler& scheduler_, BufferCache& buffer_cache_, TextureCache& texture_cache_, |
| 219 | BufferCache& buffer_cache_, TextureCache& texture_cache_, | ||
| 220 | VideoCore::ShaderNotify* shader_notify, const Device& device_, DescriptorPool& descriptor_pool, | 219 | VideoCore::ShaderNotify* shader_notify, const Device& device_, DescriptorPool& descriptor_pool, |
| 221 | UpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread, | 220 | UpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread, |
| 222 | PipelineStatistics* pipeline_statistics, RenderPassCache& render_pass_cache, | 221 | PipelineStatistics* pipeline_statistics, RenderPassCache& render_pass_cache, |
| 223 | const GraphicsPipelineCacheKey& key_, std::array<vk::ShaderModule, NUM_STAGES> stages, | 222 | const GraphicsPipelineCacheKey& key_, std::array<vk::ShaderModule, NUM_STAGES> stages, |
| 224 | const std::array<const Shader::Info*, NUM_STAGES>& infos) | 223 | const std::array<const Shader::Info*, NUM_STAGES>& infos) |
| 225 | : key{key_}, maxwell3d{maxwell3d_}, gpu_memory{gpu_memory_}, device{device_}, | 224 | : key{key_}, device{device_}, texture_cache{texture_cache_}, |
| 226 | texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, scheduler{scheduler_}, | 225 | buffer_cache{buffer_cache_}, scheduler{scheduler_}, |
| 227 | update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} { | 226 | update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} { |
| 228 | if (shader_notify) { | 227 | if (shader_notify) { |
| 229 | shader_notify->MarkShaderBuilding(); | 228 | shader_notify->MarkShaderBuilding(); |
| @@ -288,7 +287,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { | |||
| 288 | 287 | ||
| 289 | buffer_cache.SetUniformBuffersState(enabled_uniform_buffer_masks, &uniform_buffer_sizes); | 288 | buffer_cache.SetUniformBuffersState(enabled_uniform_buffer_masks, &uniform_buffer_sizes); |
| 290 | 289 | ||
| 291 | const auto& regs{maxwell3d.regs}; | 290 | const auto& regs{maxwell3d->regs}; |
| 292 | const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex}; | 291 | const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex}; |
| 293 | const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE { | 292 | const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE { |
| 294 | const Shader::Info& info{stage_infos[stage]}; | 293 | const Shader::Info& info{stage_infos[stage]}; |
| @@ -302,7 +301,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { | |||
| 302 | ++ssbo_index; | 301 | ++ssbo_index; |
| 303 | } | 302 | } |
| 304 | } | 303 | } |
| 305 | const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers}; | 304 | const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers}; |
| 306 | const auto read_handle{[&](const auto& desc, u32 index) { | 305 | const auto read_handle{[&](const auto& desc, u32 index) { |
| 307 | ASSERT(cbufs[desc.cbuf_index].enabled); | 306 | ASSERT(cbufs[desc.cbuf_index].enabled); |
| 308 | const u32 index_offset{index << desc.size_shift}; | 307 | const u32 index_offset{index << desc.size_shift}; |
| @@ -315,13 +314,14 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { | |||
| 315 | const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; | 314 | const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; |
| 316 | const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + | 315 | const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + |
| 317 | second_offset}; | 316 | second_offset}; |
| 318 | const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; | 317 | const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left}; |
| 319 | const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; | 318 | const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr) |
| 319 | << desc.secondary_shift_left}; | ||
| 320 | const u32 raw{lhs_raw | rhs_raw}; | 320 | const u32 raw{lhs_raw | rhs_raw}; |
| 321 | return TexturePair(raw, via_header_index); | 321 | return TexturePair(raw, via_header_index); |
| 322 | } | 322 | } |
| 323 | } | 323 | } |
| 324 | return TexturePair(gpu_memory.Read<u32>(addr), via_header_index); | 324 | return TexturePair(gpu_memory->Read<u32>(addr), via_header_index); |
| 325 | }}; | 325 | }}; |
| 326 | const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE { | 326 | const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE { |
| 327 | for (u32 index = 0; index < desc.count; ++index) { | 327 | for (u32 index = 0; index < desc.count; ++index) { |
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h index e8949a9ab..85602592b 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h | |||
| @@ -69,15 +69,16 @@ class GraphicsPipeline { | |||
| 69 | static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage; | 69 | static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage; |
| 70 | 70 | ||
| 71 | public: | 71 | public: |
| 72 | explicit GraphicsPipeline( | 72 | explicit GraphicsPipeline(Scheduler& scheduler, BufferCache& buffer_cache, |
| 73 | Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory, | 73 | TextureCache& texture_cache, VideoCore::ShaderNotify* shader_notify, |
| 74 | Scheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache, | 74 | const Device& device, DescriptorPool& descriptor_pool, |
| 75 | VideoCore::ShaderNotify* shader_notify, const Device& device, | 75 | UpdateDescriptorQueue& update_descriptor_queue, |
| 76 | DescriptorPool& descriptor_pool, UpdateDescriptorQueue& update_descriptor_queue, | 76 | Common::ThreadWorker* worker_thread, |
| 77 | Common::ThreadWorker* worker_thread, PipelineStatistics* pipeline_statistics, | 77 | PipelineStatistics* pipeline_statistics, |
| 78 | RenderPassCache& render_pass_cache, const GraphicsPipelineCacheKey& key, | 78 | RenderPassCache& render_pass_cache, |
| 79 | std::array<vk::ShaderModule, NUM_STAGES> stages, | 79 | const GraphicsPipelineCacheKey& key, |
| 80 | const std::array<const Shader::Info*, NUM_STAGES>& infos); | 80 | std::array<vk::ShaderModule, NUM_STAGES> stages, |
| 81 | const std::array<const Shader::Info*, NUM_STAGES>& infos); | ||
| 81 | 82 | ||
| 82 | GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete; | 83 | GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete; |
| 83 | GraphicsPipeline(GraphicsPipeline&&) noexcept = delete; | 84 | GraphicsPipeline(GraphicsPipeline&&) noexcept = delete; |
| @@ -109,6 +110,11 @@ public: | |||
| 109 | return [](GraphicsPipeline* pl, bool is_indexed) { pl->ConfigureImpl<Spec>(is_indexed); }; | 110 | return [](GraphicsPipeline* pl, bool is_indexed) { pl->ConfigureImpl<Spec>(is_indexed); }; |
| 110 | } | 111 | } |
| 111 | 112 | ||
| 113 | void SetEngine(Tegra::Engines::Maxwell3D* maxwell3d_, Tegra::MemoryManager* gpu_memory_) { | ||
| 114 | maxwell3d = maxwell3d_; | ||
| 115 | gpu_memory = gpu_memory_; | ||
| 116 | } | ||
| 117 | |||
| 112 | private: | 118 | private: |
| 113 | template <typename Spec> | 119 | template <typename Spec> |
| 114 | void ConfigureImpl(bool is_indexed); | 120 | void ConfigureImpl(bool is_indexed); |
| @@ -120,8 +126,8 @@ private: | |||
| 120 | void Validate(); | 126 | void Validate(); |
| 121 | 127 | ||
| 122 | const GraphicsPipelineCacheKey key; | 128 | const GraphicsPipelineCacheKey key; |
| 123 | Tegra::Engines::Maxwell3D& maxwell3d; | 129 | Tegra::Engines::Maxwell3D* maxwell3d; |
| 124 | Tegra::MemoryManager& gpu_memory; | 130 | Tegra::MemoryManager* gpu_memory; |
| 125 | const Device& device; | 131 | const Device& device; |
| 126 | TextureCache& texture_cache; | 132 | TextureCache& texture_cache; |
| 127 | BufferCache& buffer_cache; | 133 | BufferCache& buffer_cache; |
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index accbfc8e1..732e7b6f2 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp | |||
| @@ -259,17 +259,15 @@ bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) c | |||
| 259 | return std::memcmp(&rhs, this, Size()) == 0; | 259 | return std::memcmp(&rhs, this, Size()) == 0; |
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, | 262 | PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device_, |
| 263 | Tegra::Engines::KeplerCompute& kepler_compute_, | ||
| 264 | Tegra::MemoryManager& gpu_memory_, const Device& device_, | ||
| 265 | Scheduler& scheduler_, DescriptorPool& descriptor_pool_, | 263 | Scheduler& scheduler_, DescriptorPool& descriptor_pool_, |
| 266 | UpdateDescriptorQueue& update_descriptor_queue_, | 264 | UpdateDescriptorQueue& update_descriptor_queue_, |
| 267 | RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_, | 265 | RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_, |
| 268 | TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_) | 266 | TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_) |
| 269 | : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_}, | 267 | : VideoCommon::ShaderCache{rasterizer_}, device{device_}, scheduler{scheduler_}, |
| 270 | device{device_}, scheduler{scheduler_}, descriptor_pool{descriptor_pool_}, | 268 | descriptor_pool{descriptor_pool_}, update_descriptor_queue{update_descriptor_queue_}, |
| 271 | update_descriptor_queue{update_descriptor_queue_}, render_pass_cache{render_pass_cache_}, | 269 | render_pass_cache{render_pass_cache_}, buffer_cache{buffer_cache_}, |
| 272 | buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, shader_notify{shader_notify_}, | 270 | texture_cache{texture_cache_}, shader_notify{shader_notify_}, |
| 273 | use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()}, | 271 | use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()}, |
| 274 | workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "VkPipelineBuilder"), | 272 | workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "VkPipelineBuilder"), |
| 275 | serialization_thread(1, "VkPipelineSerialization") { | 273 | serialization_thread(1, "VkPipelineSerialization") { |
| @@ -337,7 +335,7 @@ GraphicsPipeline* PipelineCache::CurrentGraphicsPipeline() { | |||
| 337 | current_pipeline = nullptr; | 335 | current_pipeline = nullptr; |
| 338 | return nullptr; | 336 | return nullptr; |
| 339 | } | 337 | } |
| 340 | graphics_key.state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported(), | 338 | graphics_key.state.Refresh(*maxwell3d, device.IsExtExtendedDynamicStateSupported(), |
| 341 | device.IsExtVertexInputDynamicStateSupported()); | 339 | device.IsExtVertexInputDynamicStateSupported()); |
| 342 | 340 | ||
| 343 | if (current_pipeline) { | 341 | if (current_pipeline) { |
| @@ -357,7 +355,7 @@ ComputePipeline* PipelineCache::CurrentComputePipeline() { | |||
| 357 | if (!shader) { | 355 | if (!shader) { |
| 358 | return nullptr; | 356 | return nullptr; |
| 359 | } | 357 | } |
| 360 | const auto& qmd{kepler_compute.launch_description}; | 358 | const auto& qmd{kepler_compute->launch_description}; |
| 361 | const ComputePipelineCacheKey key{ | 359 | const ComputePipelineCacheKey key{ |
| 362 | .unique_hash = shader->unique_hash, | 360 | .unique_hash = shader->unique_hash, |
| 363 | .shared_memory_size = qmd.shared_alloc, | 361 | .shared_memory_size = qmd.shared_alloc, |
| @@ -486,13 +484,13 @@ GraphicsPipeline* PipelineCache::BuiltPipeline(GraphicsPipeline* pipeline) const | |||
| 486 | } | 484 | } |
| 487 | // If something is using depth, we can assume that games are not rendering anything which | 485 | // If something is using depth, we can assume that games are not rendering anything which |
| 488 | // will be used one time. | 486 | // will be used one time. |
| 489 | if (maxwell3d.regs.zeta_enable) { | 487 | if (maxwell3d->regs.zeta_enable) { |
| 490 | return nullptr; | 488 | return nullptr; |
| 491 | } | 489 | } |
| 492 | // If games are using a small index count, we can assume these are full screen quads. | 490 | // If games are using a small index count, we can assume these are full screen quads. |
| 493 | // Usually these shaders are only used once for building textures so we can assume they | 491 | // Usually these shaders are only used once for building textures so we can assume they |
| 494 | // can't be built async | 492 | // can't be built async |
| 495 | if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) { | 493 | if (maxwell3d->regs.index_array.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) { |
| 496 | return pipeline; | 494 | return pipeline; |
| 497 | } | 495 | } |
| 498 | return nullptr; | 496 | return nullptr; |
| @@ -557,10 +555,10 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline( | |||
| 557 | previous_stage = &program; | 555 | previous_stage = &program; |
| 558 | } | 556 | } |
| 559 | Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr}; | 557 | Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr}; |
| 560 | return std::make_unique<GraphicsPipeline>( | 558 | return std::make_unique<GraphicsPipeline>(scheduler, buffer_cache, texture_cache, |
| 561 | maxwell3d, gpu_memory, scheduler, buffer_cache, texture_cache, &shader_notify, device, | 559 | &shader_notify, device, descriptor_pool, |
| 562 | descriptor_pool, update_descriptor_queue, thread_worker, statistics, render_pass_cache, key, | 560 | update_descriptor_queue, thread_worker, statistics, |
| 563 | std::move(modules), infos); | 561 | render_pass_cache, key, std::move(modules), infos); |
| 564 | 562 | ||
| 565 | } catch (const Shader::Exception& exception) { | 563 | } catch (const Shader::Exception& exception) { |
| 566 | LOG_ERROR(Render_Vulkan, "{}", exception.what()); | 564 | LOG_ERROR(Render_Vulkan, "{}", exception.what()); |
| @@ -592,9 +590,9 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline() { | |||
| 592 | 590 | ||
| 593 | std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline( | 591 | std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline( |
| 594 | const ComputePipelineCacheKey& key, const ShaderInfo* shader) { | 592 | const ComputePipelineCacheKey& key, const ShaderInfo* shader) { |
| 595 | const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; | 593 | const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()}; |
| 596 | const auto& qmd{kepler_compute.launch_description}; | 594 | const auto& qmd{kepler_compute->launch_description}; |
| 597 | ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; | 595 | ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start}; |
| 598 | env.SetCachedSize(shader->size_bytes); | 596 | env.SetCachedSize(shader->size_bytes); |
| 599 | 597 | ||
| 600 | main_pools.ReleaseContents(); | 598 | main_pools.ReleaseContents(); |
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h index 127957dbf..61f9e9366 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h | |||
| @@ -100,10 +100,8 @@ struct ShaderPools { | |||
| 100 | 100 | ||
| 101 | class PipelineCache : public VideoCommon::ShaderCache { | 101 | class PipelineCache : public VideoCommon::ShaderCache { |
| 102 | public: | 102 | public: |
| 103 | explicit PipelineCache(RasterizerVulkan& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d, | 103 | explicit PipelineCache(RasterizerVulkan& rasterizer, const Device& device, Scheduler& scheduler, |
| 104 | Tegra::Engines::KeplerCompute& kepler_compute, | 104 | DescriptorPool& descriptor_pool, |
| 105 | Tegra::MemoryManager& gpu_memory, const Device& device, | ||
| 106 | Scheduler& scheduler, DescriptorPool& descriptor_pool, | ||
| 107 | UpdateDescriptorQueue& update_descriptor_queue, | 105 | UpdateDescriptorQueue& update_descriptor_queue, |
| 108 | RenderPassCache& render_pass_cache, BufferCache& buffer_cache, | 106 | RenderPassCache& render_pass_cache, BufferCache& buffer_cache, |
| 109 | TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_); | 107 | TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_); |
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp index 2b859c6b8..7cb02631c 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp | |||
| @@ -65,10 +65,9 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) { | |||
| 65 | usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; | 65 | usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, | 68 | QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, |
| 69 | Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, | 69 | Scheduler& scheduler_) |
| 70 | const Device& device_, Scheduler& scheduler_) | 70 | : QueryCacheBase{rasterizer_}, device{device_}, scheduler{scheduler_}, |
| 71 | : QueryCacheBase{rasterizer_, maxwell3d_, gpu_memory_}, device{device_}, scheduler{scheduler_}, | ||
| 72 | query_pools{ | 71 | query_pools{ |
| 73 | QueryPool{device_, scheduler_, QueryType::SamplesPassed}, | 72 | QueryPool{device_, scheduler_, QueryType::SamplesPassed}, |
| 74 | } {} | 73 | } {} |
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h index b0d86c4f8..26762ee09 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.h +++ b/src/video_core/renderer_vulkan/vk_query_cache.h | |||
| @@ -52,9 +52,8 @@ private: | |||
| 52 | class QueryCache final | 52 | class QueryCache final |
| 53 | : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { | 53 | : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { |
| 54 | public: | 54 | public: |
| 55 | explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, | 55 | explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, |
| 56 | Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, | 56 | Scheduler& scheduler_); |
| 57 | const Device& device_, Scheduler& scheduler_); | ||
| 58 | ~QueryCache(); | 57 | ~QueryCache(); |
| 59 | 58 | ||
| 60 | std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type); | 59 | std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type); |
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 7e40c2df1..acfd5da7d 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include "common/microprofile.h" | 11 | #include "common/microprofile.h" |
| 12 | #include "common/scope_exit.h" | 12 | #include "common/scope_exit.h" |
| 13 | #include "common/settings.h" | 13 | #include "common/settings.h" |
| 14 | #include "video_core/control/channel_state.h" | ||
| 14 | #include "video_core/engines/kepler_compute.h" | 15 | #include "video_core/engines/kepler_compute.h" |
| 15 | #include "video_core/engines/maxwell_3d.h" | 16 | #include "video_core/engines/maxwell_3d.h" |
| 16 | #include "video_core/renderer_vulkan/blit_image.h" | 17 | #include "video_core/renderer_vulkan/blit_image.h" |
| @@ -148,14 +149,11 @@ DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instan | |||
| 148 | } // Anonymous namespace | 149 | } // Anonymous namespace |
| 149 | 150 | ||
| 150 | RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, | 151 | RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, |
| 151 | Tegra::MemoryManager& gpu_memory_, | ||
| 152 | Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_, | 152 | Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_, |
| 153 | const Device& device_, MemoryAllocator& memory_allocator_, | 153 | const Device& device_, MemoryAllocator& memory_allocator_, |
| 154 | StateTracker& state_tracker_, Scheduler& scheduler_) | 154 | StateTracker& state_tracker_, Scheduler& scheduler_) |
| 155 | : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, | 155 | : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, screen_info{screen_info_}, device{device_}, |
| 156 | gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()}, | 156 | memory_allocator{memory_allocator_}, state_tracker{state_tracker_}, scheduler{scheduler_}, |
| 157 | screen_info{screen_info_}, device{device_}, memory_allocator{memory_allocator_}, | ||
| 158 | state_tracker{state_tracker_}, scheduler{scheduler_}, | ||
| 159 | staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler), | 157 | staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler), |
| 160 | update_descriptor_queue(device, scheduler), | 158 | update_descriptor_queue(device, scheduler), |
| 161 | blit_image(device, scheduler, state_tracker, descriptor_pool), | 159 | blit_image(device, scheduler, state_tracker, descriptor_pool), |
| @@ -165,14 +163,13 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra | |||
| 165 | memory_allocator, staging_pool, | 163 | memory_allocator, staging_pool, |
| 166 | blit_image, astc_decoder_pass, | 164 | blit_image, astc_decoder_pass, |
| 167 | render_pass_cache}, | 165 | render_pass_cache}, |
| 168 | texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), | 166 | texture_cache(texture_cache_runtime, *this), |
| 169 | buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool, | 167 | buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool, |
| 170 | update_descriptor_queue, descriptor_pool), | 168 | update_descriptor_queue, descriptor_pool), |
| 171 | buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime), | 169 | buffer_cache(*this, cpu_memory_, buffer_cache_runtime), |
| 172 | pipeline_cache(*this, maxwell3d, kepler_compute, gpu_memory, device, scheduler, | 170 | pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue, |
| 173 | descriptor_pool, update_descriptor_queue, render_pass_cache, buffer_cache, | 171 | render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()), |
| 174 | texture_cache, gpu.ShaderNotify()), | 172 | query_cache{*this, device, scheduler}, accelerate_dma{buffer_cache}, |
| 175 | query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, accelerate_dma{buffer_cache}, | ||
| 176 | fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler), | 173 | fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler), |
| 177 | wfi_event(device.GetLogical().CreateEvent()) { | 174 | wfi_event(device.GetLogical().CreateEvent()) { |
| 178 | scheduler.SetQueryCache(query_cache); | 175 | scheduler.SetQueryCache(query_cache); |
| @@ -193,14 +190,16 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { | |||
| 193 | return; | 190 | return; |
| 194 | } | 191 | } |
| 195 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | 192 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; |
| 193 | // update engine as channel may be different. | ||
| 194 | pipeline->SetEngine(maxwell3d, gpu_memory); | ||
| 196 | pipeline->Configure(is_indexed); | 195 | pipeline->Configure(is_indexed); |
| 197 | 196 | ||
| 198 | BeginTransformFeedback(); | 197 | BeginTransformFeedback(); |
| 199 | 198 | ||
| 200 | UpdateDynamicStates(); | 199 | UpdateDynamicStates(); |
| 201 | 200 | ||
| 202 | const auto& regs{maxwell3d.regs}; | 201 | const auto& regs{maxwell3d->regs}; |
| 203 | const u32 num_instances{maxwell3d.mme_draw.instance_count}; | 202 | const u32 num_instances{maxwell3d->mme_draw.instance_count}; |
| 204 | const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)}; | 203 | const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)}; |
| 205 | scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) { | 204 | scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) { |
| 206 | if (draw_params.is_indexed) { | 205 | if (draw_params.is_indexed) { |
| @@ -218,14 +217,14 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { | |||
| 218 | void RasterizerVulkan::Clear() { | 217 | void RasterizerVulkan::Clear() { |
| 219 | MICROPROFILE_SCOPE(Vulkan_Clearing); | 218 | MICROPROFILE_SCOPE(Vulkan_Clearing); |
| 220 | 219 | ||
| 221 | if (!maxwell3d.ShouldExecute()) { | 220 | if (!maxwell3d->ShouldExecute()) { |
| 222 | return; | 221 | return; |
| 223 | } | 222 | } |
| 224 | FlushWork(); | 223 | FlushWork(); |
| 225 | 224 | ||
| 226 | query_cache.UpdateCounters(); | 225 | query_cache.UpdateCounters(); |
| 227 | 226 | ||
| 228 | auto& regs = maxwell3d.regs; | 227 | auto& regs = maxwell3d->regs; |
| 229 | const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || | 228 | const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || |
| 230 | regs.clear_buffers.A; | 229 | regs.clear_buffers.A; |
| 231 | const bool use_depth = regs.clear_buffers.Z; | 230 | const bool use_depth = regs.clear_buffers.Z; |
| @@ -248,8 +247,15 @@ void RasterizerVulkan::Clear() { | |||
| 248 | } | 247 | } |
| 249 | UpdateViewportsState(regs); | 248 | UpdateViewportsState(regs); |
| 250 | 249 | ||
| 250 | VkRect2D default_scissor; | ||
| 251 | default_scissor.offset.x = 0; | ||
| 252 | default_scissor.offset.y = 0; | ||
| 253 | default_scissor.extent.width = std::numeric_limits<s32>::max(); | ||
| 254 | default_scissor.extent.height = std::numeric_limits<s32>::max(); | ||
| 255 | |||
| 251 | VkClearRect clear_rect{ | 256 | VkClearRect clear_rect{ |
| 252 | .rect = GetScissorState(regs, 0, up_scale, down_shift), | 257 | .rect = regs.clear_flags.scissor ? GetScissorState(regs, 0, up_scale, down_shift) |
| 258 | : default_scissor, | ||
| 253 | .baseArrayLayer = regs.clear_buffers.layer, | 259 | .baseArrayLayer = regs.clear_buffers.layer, |
| 254 | .layerCount = 1, | 260 | .layerCount = 1, |
| 255 | }; | 261 | }; |
| @@ -339,9 +345,9 @@ void RasterizerVulkan::DispatchCompute() { | |||
| 339 | return; | 345 | return; |
| 340 | } | 346 | } |
| 341 | std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex}; | 347 | std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex}; |
| 342 | pipeline->Configure(kepler_compute, gpu_memory, scheduler, buffer_cache, texture_cache); | 348 | pipeline->Configure(*kepler_compute, *gpu_memory, scheduler, buffer_cache, texture_cache); |
| 343 | 349 | ||
| 344 | const auto& qmd{kepler_compute.launch_description}; | 350 | const auto& qmd{kepler_compute->launch_description}; |
| 345 | const std::array<u32, 3> dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z}; | 351 | const std::array<u32, 3> dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z}; |
| 346 | scheduler.RequestOutsideRenderPassOperationContext(); | 352 | scheduler.RequestOutsideRenderPassOperationContext(); |
| 347 | scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); }); | 353 | scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); }); |
| @@ -422,7 +428,7 @@ void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) { | |||
| 422 | } | 428 | } |
| 423 | } | 429 | } |
| 424 | 430 | ||
| 425 | void RasterizerVulkan::SyncGuestHost() { | 431 | void RasterizerVulkan::InvalidateGPUCache() { |
| 426 | pipeline_cache.SyncGuestHost(); | 432 | pipeline_cache.SyncGuestHost(); |
| 427 | { | 433 | { |
| 428 | std::scoped_lock lock{buffer_cache.mutex}; | 434 | std::scoped_lock lock{buffer_cache.mutex}; |
| @@ -442,40 +448,30 @@ void RasterizerVulkan::UnmapMemory(VAddr addr, u64 size) { | |||
| 442 | pipeline_cache.OnCPUWrite(addr, size); | 448 | pipeline_cache.OnCPUWrite(addr, size); |
| 443 | } | 449 | } |
| 444 | 450 | ||
| 445 | void RasterizerVulkan::ModifyGPUMemory(GPUVAddr addr, u64 size) { | 451 | void RasterizerVulkan::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) { |
| 446 | { | 452 | { |
| 447 | std::scoped_lock lock{texture_cache.mutex}; | 453 | std::scoped_lock lock{texture_cache.mutex}; |
| 448 | texture_cache.UnmapGPUMemory(addr, size); | 454 | texture_cache.UnmapGPUMemory(as_id, addr, size); |
| 449 | } | 455 | } |
| 450 | } | 456 | } |
| 451 | 457 | ||
| 452 | void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) { | 458 | void RasterizerVulkan::SignalFence(std::function<void()>&& func) { |
| 453 | if (!gpu.IsAsync()) { | 459 | fence_manager.SignalFence(std::move(func)); |
| 454 | gpu_memory.Write<u32>(addr, value); | 460 | } |
| 455 | return; | 461 | |
| 456 | } | 462 | void RasterizerVulkan::SyncOperation(std::function<void()>&& func) { |
| 457 | fence_manager.SignalSemaphore(addr, value); | 463 | fence_manager.SyncOperation(std::move(func)); |
| 458 | } | 464 | } |
| 459 | 465 | ||
| 460 | void RasterizerVulkan::SignalSyncPoint(u32 value) { | 466 | void RasterizerVulkan::SignalSyncPoint(u32 value) { |
| 461 | if (!gpu.IsAsync()) { | ||
| 462 | gpu.IncrementSyncPoint(value); | ||
| 463 | return; | ||
| 464 | } | ||
| 465 | fence_manager.SignalSyncPoint(value); | 467 | fence_manager.SignalSyncPoint(value); |
| 466 | } | 468 | } |
| 467 | 469 | ||
| 468 | void RasterizerVulkan::SignalReference() { | 470 | void RasterizerVulkan::SignalReference() { |
| 469 | if (!gpu.IsAsync()) { | ||
| 470 | return; | ||
| 471 | } | ||
| 472 | fence_manager.SignalOrdering(); | 471 | fence_manager.SignalOrdering(); |
| 473 | } | 472 | } |
| 474 | 473 | ||
| 475 | void RasterizerVulkan::ReleaseFences() { | 474 | void RasterizerVulkan::ReleaseFences() { |
| 476 | if (!gpu.IsAsync()) { | ||
| 477 | return; | ||
| 478 | } | ||
| 479 | fence_manager.WaitPendingFences(); | 475 | fence_manager.WaitPendingFences(); |
| 480 | } | 476 | } |
| 481 | 477 | ||
| @@ -552,13 +548,13 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerVulkan::AccessAccelerateDMA() | |||
| 552 | } | 548 | } |
| 553 | 549 | ||
| 554 | void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, | 550 | void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, |
| 555 | std::span<u8> memory) { | 551 | std::span<const u8> memory) { |
| 556 | auto cpu_addr = gpu_memory.GpuToCpuAddress(address); | 552 | auto cpu_addr = gpu_memory->GpuToCpuAddress(address); |
| 557 | if (!cpu_addr) [[unlikely]] { | 553 | if (!cpu_addr) [[unlikely]] { |
| 558 | gpu_memory.WriteBlock(address, memory.data(), copy_size); | 554 | gpu_memory->WriteBlock(address, memory.data(), copy_size); |
| 559 | return; | 555 | return; |
| 560 | } | 556 | } |
| 561 | gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size); | 557 | gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size); |
| 562 | { | 558 | { |
| 563 | std::unique_lock<std::mutex> lock{buffer_cache.mutex}; | 559 | std::unique_lock<std::mutex> lock{buffer_cache.mutex}; |
| 564 | if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) { | 560 | if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) { |
| @@ -627,7 +623,7 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 | |||
| 627 | } | 623 | } |
| 628 | 624 | ||
| 629 | void RasterizerVulkan::UpdateDynamicStates() { | 625 | void RasterizerVulkan::UpdateDynamicStates() { |
| 630 | auto& regs = maxwell3d.regs; | 626 | auto& regs = maxwell3d->regs; |
| 631 | UpdateViewportsState(regs); | 627 | UpdateViewportsState(regs); |
| 632 | UpdateScissorsState(regs); | 628 | UpdateScissorsState(regs); |
| 633 | UpdateDepthBias(regs); | 629 | UpdateDepthBias(regs); |
| @@ -651,7 +647,7 @@ void RasterizerVulkan::UpdateDynamicStates() { | |||
| 651 | } | 647 | } |
| 652 | 648 | ||
| 653 | void RasterizerVulkan::BeginTransformFeedback() { | 649 | void RasterizerVulkan::BeginTransformFeedback() { |
| 654 | const auto& regs = maxwell3d.regs; | 650 | const auto& regs = maxwell3d->regs; |
| 655 | if (regs.tfb_enabled == 0) { | 651 | if (regs.tfb_enabled == 0) { |
| 656 | return; | 652 | return; |
| 657 | } | 653 | } |
| @@ -667,7 +663,7 @@ void RasterizerVulkan::BeginTransformFeedback() { | |||
| 667 | } | 663 | } |
| 668 | 664 | ||
| 669 | void RasterizerVulkan::EndTransformFeedback() { | 665 | void RasterizerVulkan::EndTransformFeedback() { |
| 670 | const auto& regs = maxwell3d.regs; | 666 | const auto& regs = maxwell3d->regs; |
| 671 | if (regs.tfb_enabled == 0) { | 667 | if (regs.tfb_enabled == 0) { |
| 672 | return; | 668 | return; |
| 673 | } | 669 | } |
| @@ -917,7 +913,7 @@ void RasterizerVulkan::UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs& | |||
| 917 | } | 913 | } |
| 918 | 914 | ||
| 919 | void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) { | 915 | void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) { |
| 920 | auto& dirty{maxwell3d.dirty.flags}; | 916 | auto& dirty{maxwell3d->dirty.flags}; |
| 921 | if (!dirty[Dirty::VertexInput]) { | 917 | if (!dirty[Dirty::VertexInput]) { |
| 922 | return; | 918 | return; |
| 923 | } | 919 | } |
| @@ -974,4 +970,41 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) | |||
| 974 | }); | 970 | }); |
| 975 | } | 971 | } |
| 976 | 972 | ||
| 973 | void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) { | ||
| 974 | CreateChannel(channel); | ||
| 975 | { | ||
| 976 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | ||
| 977 | texture_cache.CreateChannel(channel); | ||
| 978 | buffer_cache.CreateChannel(channel); | ||
| 979 | } | ||
| 980 | pipeline_cache.CreateChannel(channel); | ||
| 981 | query_cache.CreateChannel(channel); | ||
| 982 | state_tracker.SetupTables(channel); | ||
| 983 | } | ||
| 984 | |||
| 985 | void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) { | ||
| 986 | const s32 channel_id = channel.bind_id; | ||
| 987 | BindToChannel(channel_id); | ||
| 988 | { | ||
| 989 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | ||
| 990 | texture_cache.BindToChannel(channel_id); | ||
| 991 | buffer_cache.BindToChannel(channel_id); | ||
| 992 | } | ||
| 993 | pipeline_cache.BindToChannel(channel_id); | ||
| 994 | query_cache.BindToChannel(channel_id); | ||
| 995 | state_tracker.ChangeChannel(channel); | ||
| 996 | state_tracker.InvalidateState(); | ||
| 997 | } | ||
| 998 | |||
| 999 | void RasterizerVulkan::ReleaseChannel(s32 channel_id) { | ||
| 1000 | EraseChannel(channel_id); | ||
| 1001 | { | ||
| 1002 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | ||
| 1003 | texture_cache.EraseChannel(channel_id); | ||
| 1004 | buffer_cache.EraseChannel(channel_id); | ||
| 1005 | } | ||
| 1006 | pipeline_cache.EraseChannel(channel_id); | ||
| 1007 | query_cache.EraseChannel(channel_id); | ||
| 1008 | } | ||
| 1009 | |||
| 977 | } // namespace Vulkan | 1010 | } // namespace Vulkan |
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index 0370ea39b..4cde3c983 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <boost/container/static_vector.hpp> | 8 | #include <boost/container/static_vector.hpp> |
| 9 | 9 | ||
| 10 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 11 | #include "video_core/control/channel_state_cache.h" | ||
| 11 | #include "video_core/engines/maxwell_dma.h" | 12 | #include "video_core/engines/maxwell_dma.h" |
| 12 | #include "video_core/rasterizer_accelerated.h" | 13 | #include "video_core/rasterizer_accelerated.h" |
| 13 | #include "video_core/rasterizer_interface.h" | 14 | #include "video_core/rasterizer_interface.h" |
| @@ -54,13 +55,13 @@ private: | |||
| 54 | BufferCache& buffer_cache; | 55 | BufferCache& buffer_cache; |
| 55 | }; | 56 | }; |
| 56 | 57 | ||
| 57 | class RasterizerVulkan final : public VideoCore::RasterizerAccelerated { | 58 | class RasterizerVulkan final : public VideoCore::RasterizerAccelerated, |
| 59 | protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { | ||
| 58 | public: | 60 | public: |
| 59 | explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, | 61 | explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, |
| 60 | Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, | 62 | Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_, |
| 61 | ScreenInfo& screen_info_, const Device& device_, | 63 | const Device& device_, MemoryAllocator& memory_allocator_, |
| 62 | MemoryAllocator& memory_allocator_, StateTracker& state_tracker_, | 64 | StateTracker& state_tracker_, Scheduler& scheduler_); |
| 63 | Scheduler& scheduler_); | ||
| 64 | ~RasterizerVulkan() override; | 65 | ~RasterizerVulkan() override; |
| 65 | 66 | ||
| 66 | void Draw(bool is_indexed, bool is_instanced) override; | 67 | void Draw(bool is_indexed, bool is_instanced) override; |
| @@ -75,10 +76,11 @@ public: | |||
| 75 | bool MustFlushRegion(VAddr addr, u64 size) override; | 76 | bool MustFlushRegion(VAddr addr, u64 size) override; |
| 76 | void InvalidateRegion(VAddr addr, u64 size) override; | 77 | void InvalidateRegion(VAddr addr, u64 size) override; |
| 77 | void OnCPUWrite(VAddr addr, u64 size) override; | 78 | void OnCPUWrite(VAddr addr, u64 size) override; |
| 78 | void SyncGuestHost() override; | 79 | void InvalidateGPUCache() override; |
| 79 | void UnmapMemory(VAddr addr, u64 size) override; | 80 | void UnmapMemory(VAddr addr, u64 size) override; |
| 80 | void ModifyGPUMemory(GPUVAddr addr, u64 size) override; | 81 | void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override; |
| 81 | void SignalSemaphore(GPUVAddr addr, u32 value) override; | 82 | void SignalFence(std::function<void()>&& func) override; |
| 83 | void SyncOperation(std::function<void()>&& func) override; | ||
| 82 | void SignalSyncPoint(u32 value) override; | 84 | void SignalSyncPoint(u32 value) override; |
| 83 | void SignalReference() override; | 85 | void SignalReference() override; |
| 84 | void ReleaseFences() override; | 86 | void ReleaseFences() override; |
| @@ -93,12 +95,18 @@ public: | |||
| 93 | const Tegra::Engines::Fermi2D::Config& copy_config) override; | 95 | const Tegra::Engines::Fermi2D::Config& copy_config) override; |
| 94 | Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; | 96 | Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; |
| 95 | void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, | 97 | void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, |
| 96 | std::span<u8> memory) override; | 98 | std::span<const u8> memory) override; |
| 97 | bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, | 99 | bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, |
| 98 | u32 pixel_stride) override; | 100 | u32 pixel_stride) override; |
| 99 | void LoadDiskResources(u64 title_id, std::stop_token stop_loading, | 101 | void LoadDiskResources(u64 title_id, std::stop_token stop_loading, |
| 100 | const VideoCore::DiskResourceLoadCallback& callback) override; | 102 | const VideoCore::DiskResourceLoadCallback& callback) override; |
| 101 | 103 | ||
| 104 | void InitializeChannel(Tegra::Control::ChannelState& channel) override; | ||
| 105 | |||
| 106 | void BindChannel(Tegra::Control::ChannelState& channel) override; | ||
| 107 | |||
| 108 | void ReleaseChannel(s32 channel_id) override; | ||
| 109 | |||
| 102 | private: | 110 | private: |
| 103 | static constexpr size_t MAX_TEXTURES = 192; | 111 | static constexpr size_t MAX_TEXTURES = 192; |
| 104 | static constexpr size_t MAX_IMAGES = 48; | 112 | static constexpr size_t MAX_IMAGES = 48; |
| @@ -134,9 +142,6 @@ private: | |||
| 134 | void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs); | 142 | void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs); |
| 135 | 143 | ||
| 136 | Tegra::GPU& gpu; | 144 | Tegra::GPU& gpu; |
| 137 | Tegra::MemoryManager& gpu_memory; | ||
| 138 | Tegra::Engines::Maxwell3D& maxwell3d; | ||
| 139 | Tegra::Engines::KeplerCompute& kepler_compute; | ||
| 140 | 145 | ||
| 141 | ScreenInfo& screen_info; | 146 | ScreenInfo& screen_info; |
| 142 | const Device& device; | 147 | const Device& device; |
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp index 9ad096431..f234e1a31 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp +++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp | |||
| @@ -7,9 +7,9 @@ | |||
| 7 | 7 | ||
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "core/core.h" | 9 | #include "core/core.h" |
| 10 | #include "video_core/control/channel_state.h" | ||
| 10 | #include "video_core/dirty_flags.h" | 11 | #include "video_core/dirty_flags.h" |
| 11 | #include "video_core/engines/maxwell_3d.h" | 12 | #include "video_core/engines/maxwell_3d.h" |
| 12 | #include "video_core/gpu.h" | ||
| 13 | #include "video_core/renderer_vulkan/vk_state_tracker.h" | 13 | #include "video_core/renderer_vulkan/vk_state_tracker.h" |
| 14 | 14 | ||
| 15 | #define OFF(field_name) MAXWELL3D_REG_INDEX(field_name) | 15 | #define OFF(field_name) MAXWELL3D_REG_INDEX(field_name) |
| @@ -174,9 +174,8 @@ void SetupDirtyVertexBindings(Tables& tables) { | |||
| 174 | } | 174 | } |
| 175 | } // Anonymous namespace | 175 | } // Anonymous namespace |
| 176 | 176 | ||
| 177 | StateTracker::StateTracker(Tegra::GPU& gpu) | 177 | void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) { |
| 178 | : flags{gpu.Maxwell3D().dirty.flags}, invalidation_flags{MakeInvalidationFlags()} { | 178 | auto& tables{channel_state.maxwell_3d->dirty.tables}; |
| 179 | auto& tables{gpu.Maxwell3D().dirty.tables}; | ||
| 180 | SetupDirtyFlags(tables); | 179 | SetupDirtyFlags(tables); |
| 181 | SetupDirtyViewports(tables); | 180 | SetupDirtyViewports(tables); |
| 182 | SetupDirtyScissors(tables); | 181 | SetupDirtyScissors(tables); |
| @@ -199,4 +198,15 @@ StateTracker::StateTracker(Tegra::GPU& gpu) | |||
| 199 | SetupDirtyVertexBindings(tables); | 198 | SetupDirtyVertexBindings(tables); |
| 200 | } | 199 | } |
| 201 | 200 | ||
| 201 | void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) { | ||
| 202 | flags = &channel_state.maxwell_3d->dirty.flags; | ||
| 203 | } | ||
| 204 | |||
| 205 | void StateTracker::InvalidateState() { | ||
| 206 | flags->set(); | ||
| 207 | } | ||
| 208 | |||
| 209 | StateTracker::StateTracker() | ||
| 210 | : flags{&default_flags}, default_flags{}, invalidation_flags{MakeInvalidationFlags()} {} | ||
| 211 | |||
| 202 | } // namespace Vulkan | 212 | } // namespace Vulkan |
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h index a85bc1c10..2296dea60 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.h +++ b/src/video_core/renderer_vulkan/vk_state_tracker.h | |||
| @@ -10,6 +10,12 @@ | |||
| 10 | #include "video_core/dirty_flags.h" | 10 | #include "video_core/dirty_flags.h" |
| 11 | #include "video_core/engines/maxwell_3d.h" | 11 | #include "video_core/engines/maxwell_3d.h" |
| 12 | 12 | ||
| 13 | namespace Tegra { | ||
| 14 | namespace Control { | ||
| 15 | struct ChannelState; | ||
| 16 | } | ||
| 17 | } // namespace Tegra | ||
| 18 | |||
| 13 | namespace Vulkan { | 19 | namespace Vulkan { |
| 14 | 20 | ||
| 15 | namespace Dirty { | 21 | namespace Dirty { |
| @@ -53,19 +59,19 @@ class StateTracker { | |||
| 53 | using Maxwell = Tegra::Engines::Maxwell3D::Regs; | 59 | using Maxwell = Tegra::Engines::Maxwell3D::Regs; |
| 54 | 60 | ||
| 55 | public: | 61 | public: |
| 56 | explicit StateTracker(Tegra::GPU& gpu); | 62 | explicit StateTracker(); |
| 57 | 63 | ||
| 58 | void InvalidateCommandBufferState() { | 64 | void InvalidateCommandBufferState() { |
| 59 | flags |= invalidation_flags; | 65 | (*flags) |= invalidation_flags; |
| 60 | current_topology = INVALID_TOPOLOGY; | 66 | current_topology = INVALID_TOPOLOGY; |
| 61 | } | 67 | } |
| 62 | 68 | ||
| 63 | void InvalidateViewports() { | 69 | void InvalidateViewports() { |
| 64 | flags[Dirty::Viewports] = true; | 70 | (*flags)[Dirty::Viewports] = true; |
| 65 | } | 71 | } |
| 66 | 72 | ||
| 67 | void InvalidateScissors() { | 73 | void InvalidateScissors() { |
| 68 | flags[Dirty::Scissors] = true; | 74 | (*flags)[Dirty::Scissors] = true; |
| 69 | } | 75 | } |
| 70 | 76 | ||
| 71 | bool TouchViewports() { | 77 | bool TouchViewports() { |
| @@ -139,16 +145,23 @@ public: | |||
| 139 | return has_changed; | 145 | return has_changed; |
| 140 | } | 146 | } |
| 141 | 147 | ||
| 148 | void SetupTables(Tegra::Control::ChannelState& channel_state); | ||
| 149 | |||
| 150 | void ChangeChannel(Tegra::Control::ChannelState& channel_state); | ||
| 151 | |||
| 152 | void InvalidateState(); | ||
| 153 | |||
| 142 | private: | 154 | private: |
| 143 | static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u); | 155 | static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u); |
| 144 | 156 | ||
| 145 | bool Exchange(std::size_t id, bool new_value) const noexcept { | 157 | bool Exchange(std::size_t id, bool new_value) const noexcept { |
| 146 | const bool is_dirty = flags[id]; | 158 | const bool is_dirty = (*flags)[id]; |
| 147 | flags[id] = new_value; | 159 | (*flags)[id] = new_value; |
| 148 | return is_dirty; | 160 | return is_dirty; |
| 149 | } | 161 | } |
| 150 | 162 | ||
| 151 | Tegra::Engines::Maxwell3D::DirtyState::Flags& flags; | 163 | Tegra::Engines::Maxwell3D::DirtyState::Flags* flags; |
| 164 | Tegra::Engines::Maxwell3D::DirtyState::Flags default_flags; | ||
| 152 | Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags; | 165 | Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags; |
| 153 | Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY; | 166 | Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY; |
| 154 | }; | 167 | }; |
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp index a69ae7725..706d9ba74 100644 --- a/src/video_core/renderer_vulkan/vk_swapchain.cpp +++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp | |||
| @@ -36,7 +36,8 @@ VkPresentModeKHR ChooseSwapPresentMode(vk::Span<VkPresentModeKHR> modes) { | |||
| 36 | // Mailbox (triple buffering) doesn't lock the application like fifo (vsync), | 36 | // Mailbox (triple buffering) doesn't lock the application like fifo (vsync), |
| 37 | // prefer it if vsync option is not selected | 37 | // prefer it if vsync option is not selected |
| 38 | const auto found_mailbox = std::find(modes.begin(), modes.end(), VK_PRESENT_MODE_MAILBOX_KHR); | 38 | const auto found_mailbox = std::find(modes.begin(), modes.end(), VK_PRESENT_MODE_MAILBOX_KHR); |
| 39 | if (found_mailbox != modes.end() && !Settings::values.use_vsync.GetValue()) { | 39 | if (Settings::values.fullscreen_mode.GetValue() == Settings::FullscreenMode::Borderless && |
| 40 | found_mailbox != modes.end() && !Settings::values.use_vsync.GetValue()) { | ||
| 40 | return VK_PRESENT_MODE_MAILBOX_KHR; | 41 | return VK_PRESENT_MODE_MAILBOX_KHR; |
| 41 | } | 42 | } |
| 42 | if (!Settings::values.use_speed_limit.GetValue()) { | 43 | if (!Settings::values.use_speed_limit.GetValue()) { |
| @@ -156,8 +157,16 @@ void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u3 | |||
| 156 | present_mode = ChooseSwapPresentMode(present_modes); | 157 | present_mode = ChooseSwapPresentMode(present_modes); |
| 157 | 158 | ||
| 158 | u32 requested_image_count{capabilities.minImageCount + 1}; | 159 | u32 requested_image_count{capabilities.minImageCount + 1}; |
| 159 | if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) { | 160 | // Ensure Tripple buffering if possible. |
| 160 | requested_image_count = capabilities.maxImageCount; | 161 | if (capabilities.maxImageCount > 0) { |
| 162 | if (requested_image_count > capabilities.maxImageCount) { | ||
| 163 | requested_image_count = capabilities.maxImageCount; | ||
| 164 | } else { | ||
| 165 | requested_image_count = | ||
| 166 | std::max(requested_image_count, std::min(3U, capabilities.maxImageCount)); | ||
| 167 | } | ||
| 168 | } else { | ||
| 169 | requested_image_count = std::max(requested_image_count, 3U); | ||
| 161 | } | 170 | } |
| 162 | VkSwapchainCreateInfoKHR swapchain_ci{ | 171 | VkSwapchainCreateInfoKHR swapchain_ci{ |
| 163 | .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, | 172 | .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, |
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index caca79d79..305ad8aee 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp | |||
| @@ -592,7 +592,7 @@ void TryTransformSwizzleIfNeeded(PixelFormat format, std::array<SwizzleSource, 4 | |||
| 592 | case PixelFormat::A5B5G5R1_UNORM: | 592 | case PixelFormat::A5B5G5R1_UNORM: |
| 593 | std::ranges::transform(swizzle, swizzle.begin(), SwapSpecial); | 593 | std::ranges::transform(swizzle, swizzle.begin(), SwapSpecial); |
| 594 | break; | 594 | break; |
| 595 | case PixelFormat::R4G4_UNORM: | 595 | case PixelFormat::G4R4_UNORM: |
| 596 | std::ranges::transform(swizzle, swizzle.begin(), SwapGreenRed); | 596 | std::ranges::transform(swizzle, swizzle.begin(), SwapGreenRed); |
| 597 | break; | 597 | break; |
| 598 | default: | 598 | default: |
| @@ -1474,13 +1474,14 @@ bool Image::BlitScaleHelper(bool scale_up) { | |||
| 1474 | }; | 1474 | }; |
| 1475 | const VkExtent2D extent{ | 1475 | const VkExtent2D extent{ |
| 1476 | .width = std::max(scaled_width, info.size.width), | 1476 | .width = std::max(scaled_width, info.size.width), |
| 1477 | .height = std::max(scaled_height, info.size.width), | 1477 | .height = std::max(scaled_height, info.size.height), |
| 1478 | }; | 1478 | }; |
| 1479 | 1479 | ||
| 1480 | auto* view_ptr = blit_view.get(); | 1480 | auto* view_ptr = blit_view.get(); |
| 1481 | if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) { | 1481 | if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) { |
| 1482 | if (!blit_framebuffer) { | 1482 | if (!blit_framebuffer) { |
| 1483 | blit_framebuffer = std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent); | 1483 | blit_framebuffer = |
| 1484 | std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent, scale_up); | ||
| 1484 | } | 1485 | } |
| 1485 | const auto color_view = blit_view->Handle(Shader::TextureType::Color2D); | 1486 | const auto color_view = blit_view->Handle(Shader::TextureType::Color2D); |
| 1486 | 1487 | ||
| @@ -1488,7 +1489,8 @@ bool Image::BlitScaleHelper(bool scale_up) { | |||
| 1488 | src_region, operation, BLIT_OPERATION); | 1489 | src_region, operation, BLIT_OPERATION); |
| 1489 | } else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { | 1490 | } else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { |
| 1490 | if (!blit_framebuffer) { | 1491 | if (!blit_framebuffer) { |
| 1491 | blit_framebuffer = std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent); | 1492 | blit_framebuffer = |
| 1493 | std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent, scale_up); | ||
| 1492 | } | 1494 | } |
| 1493 | runtime->blit_image_helper.BlitDepthStencil(blit_framebuffer.get(), blit_view->DepthView(), | 1495 | runtime->blit_image_helper.BlitDepthStencil(blit_framebuffer.get(), blit_view->DepthView(), |
| 1494 | blit_view->StencilView(), dst_region, | 1496 | blit_view->StencilView(), dst_region, |
| @@ -1756,34 +1758,42 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM | |||
| 1756 | .width = key.size.width, | 1758 | .width = key.size.width, |
| 1757 | .height = key.size.height, | 1759 | .height = key.size.height, |
| 1758 | }} { | 1760 | }} { |
| 1759 | CreateFramebuffer(runtime, color_buffers, depth_buffer); | 1761 | CreateFramebuffer(runtime, color_buffers, depth_buffer, key.is_rescaled); |
| 1760 | if (runtime.device.HasDebuggingToolAttached()) { | 1762 | if (runtime.device.HasDebuggingToolAttached()) { |
| 1761 | framebuffer.SetObjectNameEXT(VideoCommon::Name(key).c_str()); | 1763 | framebuffer.SetObjectNameEXT(VideoCommon::Name(key).c_str()); |
| 1762 | } | 1764 | } |
| 1763 | } | 1765 | } |
| 1764 | 1766 | ||
| 1765 | Framebuffer::Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer, | 1767 | Framebuffer::Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer, |
| 1766 | ImageView* depth_buffer, VkExtent2D extent) | 1768 | ImageView* depth_buffer, VkExtent2D extent, bool is_rescaled) |
| 1767 | : render_area{extent} { | 1769 | : render_area{extent} { |
| 1768 | std::array<ImageView*, NUM_RT> color_buffers{color_buffer}; | 1770 | std::array<ImageView*, NUM_RT> color_buffers{color_buffer}; |
| 1769 | CreateFramebuffer(runtime, color_buffers, depth_buffer); | 1771 | CreateFramebuffer(runtime, color_buffers, depth_buffer, is_rescaled); |
| 1770 | } | 1772 | } |
| 1771 | 1773 | ||
| 1772 | Framebuffer::~Framebuffer() = default; | 1774 | Framebuffer::~Framebuffer() = default; |
| 1773 | 1775 | ||
| 1774 | void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime, | 1776 | void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime, |
| 1775 | std::span<ImageView*, NUM_RT> color_buffers, | 1777 | std::span<ImageView*, NUM_RT> color_buffers, |
| 1776 | ImageView* depth_buffer) { | 1778 | ImageView* depth_buffer, bool is_rescaled) { |
| 1777 | std::vector<VkImageView> attachments; | 1779 | std::vector<VkImageView> attachments; |
| 1778 | RenderPassKey renderpass_key{}; | 1780 | RenderPassKey renderpass_key{}; |
| 1779 | s32 num_layers = 1; | 1781 | s32 num_layers = 1; |
| 1780 | 1782 | ||
| 1783 | const auto& resolution = runtime.resolution; | ||
| 1784 | |||
| 1785 | u32 width = 0; | ||
| 1786 | u32 height = 0; | ||
| 1781 | for (size_t index = 0; index < NUM_RT; ++index) { | 1787 | for (size_t index = 0; index < NUM_RT; ++index) { |
| 1782 | const ImageView* const color_buffer = color_buffers[index]; | 1788 | const ImageView* const color_buffer = color_buffers[index]; |
| 1783 | if (!color_buffer) { | 1789 | if (!color_buffer) { |
| 1784 | renderpass_key.color_formats[index] = PixelFormat::Invalid; | 1790 | renderpass_key.color_formats[index] = PixelFormat::Invalid; |
| 1785 | continue; | 1791 | continue; |
| 1786 | } | 1792 | } |
| 1793 | width = std::max(width, is_rescaled ? resolution.ScaleUp(color_buffer->size.width) | ||
| 1794 | : color_buffer->size.width); | ||
| 1795 | height = std::max(height, is_rescaled ? resolution.ScaleUp(color_buffer->size.height) | ||
| 1796 | : color_buffer->size.height); | ||
| 1787 | attachments.push_back(color_buffer->RenderTarget()); | 1797 | attachments.push_back(color_buffer->RenderTarget()); |
| 1788 | renderpass_key.color_formats[index] = color_buffer->format; | 1798 | renderpass_key.color_formats[index] = color_buffer->format; |
| 1789 | num_layers = std::max(num_layers, color_buffer->range.extent.layers); | 1799 | num_layers = std::max(num_layers, color_buffer->range.extent.layers); |
| @@ -1794,6 +1804,10 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime, | |||
| 1794 | } | 1804 | } |
| 1795 | const size_t num_colors = attachments.size(); | 1805 | const size_t num_colors = attachments.size(); |
| 1796 | if (depth_buffer) { | 1806 | if (depth_buffer) { |
| 1807 | width = std::max(width, is_rescaled ? resolution.ScaleUp(depth_buffer->size.width) | ||
| 1808 | : depth_buffer->size.width); | ||
| 1809 | height = std::max(height, is_rescaled ? resolution.ScaleUp(depth_buffer->size.height) | ||
| 1810 | : depth_buffer->size.height); | ||
| 1797 | attachments.push_back(depth_buffer->RenderTarget()); | 1811 | attachments.push_back(depth_buffer->RenderTarget()); |
| 1798 | renderpass_key.depth_format = depth_buffer->format; | 1812 | renderpass_key.depth_format = depth_buffer->format; |
| 1799 | num_layers = std::max(num_layers, depth_buffer->range.extent.layers); | 1813 | num_layers = std::max(num_layers, depth_buffer->range.extent.layers); |
| @@ -1810,6 +1824,8 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime, | |||
| 1810 | renderpass_key.samples = samples; | 1824 | renderpass_key.samples = samples; |
| 1811 | 1825 | ||
| 1812 | renderpass = runtime.render_pass_cache.Get(renderpass_key); | 1826 | renderpass = runtime.render_pass_cache.Get(renderpass_key); |
| 1827 | render_area.width = std::min(render_area.width, width); | ||
| 1828 | render_area.height = std::min(render_area.height, height); | ||
| 1813 | 1829 | ||
| 1814 | num_color_buffers = static_cast<u32>(num_colors); | 1830 | num_color_buffers = static_cast<u32>(num_colors); |
| 1815 | framebuffer = runtime.device.GetLogical().CreateFramebuffer({ | 1831 | framebuffer = runtime.device.GetLogical().CreateFramebuffer({ |
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h index 69f06ee7b..0b7ac0df1 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.h +++ b/src/video_core/renderer_vulkan/vk_texture_cache.h | |||
| @@ -268,7 +268,7 @@ public: | |||
| 268 | ImageView* depth_buffer, const VideoCommon::RenderTargets& key); | 268 | ImageView* depth_buffer, const VideoCommon::RenderTargets& key); |
| 269 | 269 | ||
| 270 | explicit Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer, | 270 | explicit Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer, |
| 271 | ImageView* depth_buffer, VkExtent2D extent); | 271 | ImageView* depth_buffer, VkExtent2D extent, bool is_rescaled); |
| 272 | 272 | ||
| 273 | ~Framebuffer(); | 273 | ~Framebuffer(); |
| 274 | 274 | ||
| @@ -279,7 +279,8 @@ public: | |||
| 279 | Framebuffer& operator=(Framebuffer&&) = default; | 279 | Framebuffer& operator=(Framebuffer&&) = default; |
| 280 | 280 | ||
| 281 | void CreateFramebuffer(TextureCacheRuntime& runtime, | 281 | void CreateFramebuffer(TextureCacheRuntime& runtime, |
| 282 | std::span<ImageView*, NUM_RT> color_buffers, ImageView* depth_buffer); | 282 | std::span<ImageView*, NUM_RT> color_buffers, ImageView* depth_buffer, |
| 283 | bool is_rescaled = false); | ||
| 283 | 284 | ||
| 284 | [[nodiscard]] VkFramebuffer Handle() const noexcept { | 285 | [[nodiscard]] VkFramebuffer Handle() const noexcept { |
| 285 | return *framebuffer; | 286 | return *framebuffer; |
diff --git a/src/video_core/shader_cache.cpp b/src/video_core/shader_cache.cpp index 164e4ee0e..f53066579 100644 --- a/src/video_core/shader_cache.cpp +++ b/src/video_core/shader_cache.cpp | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include "common/assert.h" | 8 | #include "common/assert.h" |
| 9 | #include "shader_recompiler/frontend/maxwell/control_flow.h" | 9 | #include "shader_recompiler/frontend/maxwell/control_flow.h" |
| 10 | #include "shader_recompiler/object_pool.h" | 10 | #include "shader_recompiler/object_pool.h" |
| 11 | #include "video_core/control/channel_state.h" | ||
| 11 | #include "video_core/dirty_flags.h" | 12 | #include "video_core/dirty_flags.h" |
| 12 | #include "video_core/engines/kepler_compute.h" | 13 | #include "video_core/engines/kepler_compute.h" |
| 13 | #include "video_core/engines/maxwell_3d.h" | 14 | #include "video_core/engines/maxwell_3d.h" |
| @@ -33,29 +34,25 @@ void ShaderCache::SyncGuestHost() { | |||
| 33 | RemovePendingShaders(); | 34 | RemovePendingShaders(); |
| 34 | } | 35 | } |
| 35 | 36 | ||
| 36 | ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_, | 37 | ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_) : rasterizer{rasterizer_} {} |
| 37 | Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_, | ||
| 38 | Tegra::Engines::KeplerCompute& kepler_compute_) | ||
| 39 | : gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, kepler_compute{kepler_compute_}, | ||
| 40 | rasterizer{rasterizer_} {} | ||
| 41 | 38 | ||
| 42 | bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) { | 39 | bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) { |
| 43 | auto& dirty{maxwell3d.dirty.flags}; | 40 | auto& dirty{maxwell3d->dirty.flags}; |
| 44 | if (!dirty[VideoCommon::Dirty::Shaders]) { | 41 | if (!dirty[VideoCommon::Dirty::Shaders]) { |
| 45 | return last_shaders_valid; | 42 | return last_shaders_valid; |
| 46 | } | 43 | } |
| 47 | dirty[VideoCommon::Dirty::Shaders] = false; | 44 | dirty[VideoCommon::Dirty::Shaders] = false; |
| 48 | 45 | ||
| 49 | const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()}; | 46 | const GPUVAddr base_addr{maxwell3d->regs.code_address.CodeAddress()}; |
| 50 | for (size_t index = 0; index < Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram; ++index) { | 47 | for (size_t index = 0; index < Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram; ++index) { |
| 51 | if (!maxwell3d.regs.IsShaderConfigEnabled(index)) { | 48 | if (!maxwell3d->regs.IsShaderConfigEnabled(index)) { |
| 52 | unique_hashes[index] = 0; | 49 | unique_hashes[index] = 0; |
| 53 | continue; | 50 | continue; |
| 54 | } | 51 | } |
| 55 | const auto& shader_config{maxwell3d.regs.shader_config[index]}; | 52 | const auto& shader_config{maxwell3d->regs.shader_config[index]}; |
| 56 | const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)}; | 53 | const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)}; |
| 57 | const GPUVAddr shader_addr{base_addr + shader_config.offset}; | 54 | const GPUVAddr shader_addr{base_addr + shader_config.offset}; |
| 58 | const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)}; | 55 | const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)}; |
| 59 | if (!cpu_shader_addr) { | 56 | if (!cpu_shader_addr) { |
| 60 | LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr); | 57 | LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr); |
| 61 | last_shaders_valid = false; | 58 | last_shaders_valid = false; |
| @@ -64,7 +61,7 @@ bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) { | |||
| 64 | const ShaderInfo* shader_info{TryGet(*cpu_shader_addr)}; | 61 | const ShaderInfo* shader_info{TryGet(*cpu_shader_addr)}; |
| 65 | if (!shader_info) { | 62 | if (!shader_info) { |
| 66 | const u32 start_address{shader_config.offset}; | 63 | const u32 start_address{shader_config.offset}; |
| 67 | GraphicsEnvironment env{maxwell3d, gpu_memory, program, base_addr, start_address}; | 64 | GraphicsEnvironment env{*maxwell3d, *gpu_memory, program, base_addr, start_address}; |
| 68 | shader_info = MakeShaderInfo(env, *cpu_shader_addr); | 65 | shader_info = MakeShaderInfo(env, *cpu_shader_addr); |
| 69 | } | 66 | } |
| 70 | shader_infos[index] = shader_info; | 67 | shader_infos[index] = shader_info; |
| @@ -75,10 +72,10 @@ bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) { | |||
| 75 | } | 72 | } |
| 76 | 73 | ||
| 77 | const ShaderInfo* ShaderCache::ComputeShader() { | 74 | const ShaderInfo* ShaderCache::ComputeShader() { |
| 78 | const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; | 75 | const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()}; |
| 79 | const auto& qmd{kepler_compute.launch_description}; | 76 | const auto& qmd{kepler_compute->launch_description}; |
| 80 | const GPUVAddr shader_addr{program_base + qmd.program_start}; | 77 | const GPUVAddr shader_addr{program_base + qmd.program_start}; |
| 81 | const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)}; | 78 | const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)}; |
| 82 | if (!cpu_shader_addr) { | 79 | if (!cpu_shader_addr) { |
| 83 | LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr); | 80 | LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr); |
| 84 | return nullptr; | 81 | return nullptr; |
| @@ -86,22 +83,22 @@ const ShaderInfo* ShaderCache::ComputeShader() { | |||
| 86 | if (const ShaderInfo* const shader = TryGet(*cpu_shader_addr)) { | 83 | if (const ShaderInfo* const shader = TryGet(*cpu_shader_addr)) { |
| 87 | return shader; | 84 | return shader; |
| 88 | } | 85 | } |
| 89 | ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; | 86 | ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start}; |
| 90 | return MakeShaderInfo(env, *cpu_shader_addr); | 87 | return MakeShaderInfo(env, *cpu_shader_addr); |
| 91 | } | 88 | } |
| 92 | 89 | ||
| 93 | void ShaderCache::GetGraphicsEnvironments(GraphicsEnvironments& result, | 90 | void ShaderCache::GetGraphicsEnvironments(GraphicsEnvironments& result, |
| 94 | const std::array<u64, NUM_PROGRAMS>& unique_hashes) { | 91 | const std::array<u64, NUM_PROGRAMS>& unique_hashes) { |
| 95 | size_t env_index{}; | 92 | size_t env_index{}; |
| 96 | const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()}; | 93 | const GPUVAddr base_addr{maxwell3d->regs.code_address.CodeAddress()}; |
| 97 | for (size_t index = 0; index < NUM_PROGRAMS; ++index) { | 94 | for (size_t index = 0; index < NUM_PROGRAMS; ++index) { |
| 98 | if (unique_hashes[index] == 0) { | 95 | if (unique_hashes[index] == 0) { |
| 99 | continue; | 96 | continue; |
| 100 | } | 97 | } |
| 101 | const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)}; | 98 | const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)}; |
| 102 | auto& env{result.envs[index]}; | 99 | auto& env{result.envs[index]}; |
| 103 | const u32 start_address{maxwell3d.regs.shader_config[index].offset}; | 100 | const u32 start_address{maxwell3d->regs.shader_config[index].offset}; |
| 104 | env = GraphicsEnvironment{maxwell3d, gpu_memory, program, base_addr, start_address}; | 101 | env = GraphicsEnvironment{*maxwell3d, *gpu_memory, program, base_addr, start_address}; |
| 105 | env.SetCachedSize(shader_infos[index]->size_bytes); | 102 | env.SetCachedSize(shader_infos[index]->size_bytes); |
| 106 | result.env_ptrs[env_index++] = &env; | 103 | result.env_ptrs[env_index++] = &env; |
| 107 | } | 104 | } |
diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h index f67cea8c4..a4391202d 100644 --- a/src/video_core/shader_cache.h +++ b/src/video_core/shader_cache.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <vector> | 12 | #include <vector> |
| 13 | 13 | ||
| 14 | #include "common/common_types.h" | 14 | #include "common/common_types.h" |
| 15 | #include "video_core/control/channel_state_cache.h" | ||
| 15 | #include "video_core/rasterizer_interface.h" | 16 | #include "video_core/rasterizer_interface.h" |
| 16 | #include "video_core/shader_environment.h" | 17 | #include "video_core/shader_environment.h" |
| 17 | 18 | ||
| @@ -19,6 +20,10 @@ namespace Tegra { | |||
| 19 | class MemoryManager; | 20 | class MemoryManager; |
| 20 | } | 21 | } |
| 21 | 22 | ||
| 23 | namespace Tegra::Control { | ||
| 24 | struct ChannelState; | ||
| 25 | } | ||
| 26 | |||
| 22 | namespace VideoCommon { | 27 | namespace VideoCommon { |
| 23 | 28 | ||
| 24 | class GenericEnvironment; | 29 | class GenericEnvironment; |
| @@ -28,7 +33,7 @@ struct ShaderInfo { | |||
| 28 | size_t size_bytes{}; | 33 | size_t size_bytes{}; |
| 29 | }; | 34 | }; |
| 30 | 35 | ||
| 31 | class ShaderCache { | 36 | class ShaderCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { |
| 32 | static constexpr u64 YUZU_PAGEBITS = 14; | 37 | static constexpr u64 YUZU_PAGEBITS = 14; |
| 33 | static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS; | 38 | static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS; |
| 34 | 39 | ||
| @@ -71,9 +76,7 @@ protected: | |||
| 71 | } | 76 | } |
| 72 | }; | 77 | }; |
| 73 | 78 | ||
| 74 | explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_, | 79 | explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_); |
| 75 | Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_, | ||
| 76 | Tegra::Engines::KeplerCompute& kepler_compute_); | ||
| 77 | 80 | ||
| 78 | /// @brief Update the hashes and information of shader stages | 81 | /// @brief Update the hashes and information of shader stages |
| 79 | /// @param unique_hashes Shader hashes to store into when a stage is enabled | 82 | /// @param unique_hashes Shader hashes to store into when a stage is enabled |
| @@ -88,10 +91,6 @@ protected: | |||
| 88 | void GetGraphicsEnvironments(GraphicsEnvironments& result, | 91 | void GetGraphicsEnvironments(GraphicsEnvironments& result, |
| 89 | const std::array<u64, NUM_PROGRAMS>& unique_hashes); | 92 | const std::array<u64, NUM_PROGRAMS>& unique_hashes); |
| 90 | 93 | ||
| 91 | Tegra::MemoryManager& gpu_memory; | ||
| 92 | Tegra::Engines::Maxwell3D& maxwell3d; | ||
| 93 | Tegra::Engines::KeplerCompute& kepler_compute; | ||
| 94 | |||
| 95 | std::array<const ShaderInfo*, NUM_PROGRAMS> shader_infos{}; | 94 | std::array<const ShaderInfo*, NUM_PROGRAMS> shader_infos{}; |
| 96 | bool last_shaders_valid = false; | 95 | bool last_shaders_valid = false; |
| 97 | 96 | ||
diff --git a/src/video_core/surface.h b/src/video_core/surface.h index 5fd82357c..57ca7f597 100644 --- a/src/video_core/surface.h +++ b/src/video_core/surface.h | |||
| @@ -82,7 +82,7 @@ enum class PixelFormat { | |||
| 82 | BC3_SRGB, | 82 | BC3_SRGB, |
| 83 | BC7_SRGB, | 83 | BC7_SRGB, |
| 84 | A4B4G4R4_UNORM, | 84 | A4B4G4R4_UNORM, |
| 85 | R4G4_UNORM, | 85 | G4R4_UNORM, |
| 86 | ASTC_2D_4X4_SRGB, | 86 | ASTC_2D_4X4_SRGB, |
| 87 | ASTC_2D_8X8_SRGB, | 87 | ASTC_2D_8X8_SRGB, |
| 88 | ASTC_2D_8X5_SRGB, | 88 | ASTC_2D_8X5_SRGB, |
| @@ -218,7 +218,7 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_WIDTH_TABLE = {{ | |||
| 218 | 4, // BC3_SRGB | 218 | 4, // BC3_SRGB |
| 219 | 4, // BC7_SRGB | 219 | 4, // BC7_SRGB |
| 220 | 1, // A4B4G4R4_UNORM | 220 | 1, // A4B4G4R4_UNORM |
| 221 | 1, // R4G4_UNORM | 221 | 1, // G4R4_UNORM |
| 222 | 4, // ASTC_2D_4X4_SRGB | 222 | 4, // ASTC_2D_4X4_SRGB |
| 223 | 8, // ASTC_2D_8X8_SRGB | 223 | 8, // ASTC_2D_8X8_SRGB |
| 224 | 8, // ASTC_2D_8X5_SRGB | 224 | 8, // ASTC_2D_8X5_SRGB |
| @@ -323,7 +323,7 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_HEIGHT_TABLE = {{ | |||
| 323 | 4, // BC3_SRGB | 323 | 4, // BC3_SRGB |
| 324 | 4, // BC7_SRGB | 324 | 4, // BC7_SRGB |
| 325 | 1, // A4B4G4R4_UNORM | 325 | 1, // A4B4G4R4_UNORM |
| 326 | 1, // R4G4_UNORM | 326 | 1, // G4R4_UNORM |
| 327 | 4, // ASTC_2D_4X4_SRGB | 327 | 4, // ASTC_2D_4X4_SRGB |
| 328 | 8, // ASTC_2D_8X8_SRGB | 328 | 8, // ASTC_2D_8X8_SRGB |
| 329 | 5, // ASTC_2D_8X5_SRGB | 329 | 5, // ASTC_2D_8X5_SRGB |
| @@ -428,7 +428,7 @@ constexpr std::array<u8, MaxPixelFormat> BITS_PER_BLOCK_TABLE = {{ | |||
| 428 | 128, // BC3_SRGB | 428 | 128, // BC3_SRGB |
| 429 | 128, // BC7_UNORM | 429 | 128, // BC7_UNORM |
| 430 | 16, // A4B4G4R4_UNORM | 430 | 16, // A4B4G4R4_UNORM |
| 431 | 8, // R4G4_UNORM | 431 | 8, // G4R4_UNORM |
| 432 | 128, // ASTC_2D_4X4_SRGB | 432 | 128, // ASTC_2D_4X4_SRGB |
| 433 | 128, // ASTC_2D_8X8_SRGB | 433 | 128, // ASTC_2D_8X8_SRGB |
| 434 | 128, // ASTC_2D_8X5_SRGB | 434 | 128, // ASTC_2D_8X5_SRGB |
diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp index c71694d2a..ad935d386 100644 --- a/src/video_core/texture_cache/format_lookup_table.cpp +++ b/src/video_core/texture_cache/format_lookup_table.cpp | |||
| @@ -63,7 +63,7 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red, | |||
| 63 | case Hash(TextureFormat::A4B4G4R4, UNORM): | 63 | case Hash(TextureFormat::A4B4G4R4, UNORM): |
| 64 | return PixelFormat::A4B4G4R4_UNORM; | 64 | return PixelFormat::A4B4G4R4_UNORM; |
| 65 | case Hash(TextureFormat::G4R4, UNORM): | 65 | case Hash(TextureFormat::G4R4, UNORM): |
| 66 | return PixelFormat::R4G4_UNORM; | 66 | return PixelFormat::G4R4_UNORM; |
| 67 | case Hash(TextureFormat::A5B5G5R1, UNORM): | 67 | case Hash(TextureFormat::A5B5G5R1, UNORM): |
| 68 | return PixelFormat::A5B5G5R1_UNORM; | 68 | return PixelFormat::A5B5G5R1_UNORM; |
| 69 | case Hash(TextureFormat::R8, UNORM): | 69 | case Hash(TextureFormat::R8, UNORM): |
diff --git a/src/video_core/texture_cache/formatter.h b/src/video_core/texture_cache/formatter.h index 6881e4c90..acc854715 100644 --- a/src/video_core/texture_cache/formatter.h +++ b/src/video_core/texture_cache/formatter.h | |||
| @@ -153,8 +153,8 @@ struct fmt::formatter<VideoCore::Surface::PixelFormat> : fmt::formatter<fmt::str | |||
| 153 | return "BC7_SRGB"; | 153 | return "BC7_SRGB"; |
| 154 | case PixelFormat::A4B4G4R4_UNORM: | 154 | case PixelFormat::A4B4G4R4_UNORM: |
| 155 | return "A4B4G4R4_UNORM"; | 155 | return "A4B4G4R4_UNORM"; |
| 156 | case PixelFormat::R4G4_UNORM: | 156 | case PixelFormat::G4R4_UNORM: |
| 157 | return "R4G4_UNORM"; | 157 | return "G4R4_UNORM"; |
| 158 | case PixelFormat::ASTC_2D_4X4_SRGB: | 158 | case PixelFormat::ASTC_2D_4X4_SRGB: |
| 159 | return "ASTC_2D_4X4_SRGB"; | 159 | return "ASTC_2D_4X4_SRGB"; |
| 160 | case PixelFormat::ASTC_2D_8X8_SRGB: | 160 | case PixelFormat::ASTC_2D_8X8_SRGB: |
diff --git a/src/video_core/texture_cache/image_base.cpp b/src/video_core/texture_cache/image_base.cpp index f61e09ac7..91512022f 100644 --- a/src/video_core/texture_cache/image_base.cpp +++ b/src/video_core/texture_cache/image_base.cpp | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <vector> | 7 | #include <vector> |
| 8 | 8 | ||
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "common/div_ceil.h" | ||
| 10 | #include "video_core/surface.h" | 11 | #include "video_core/surface.h" |
| 11 | #include "video_core/texture_cache/formatter.h" | 12 | #include "video_core/texture_cache/formatter.h" |
| 12 | #include "video_core/texture_cache/image_base.h" | 13 | #include "video_core/texture_cache/image_base.h" |
| @@ -182,10 +183,6 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i | |||
| 182 | }; | 183 | }; |
| 183 | const bool is_lhs_compressed = lhs_block.width > 1 || lhs_block.height > 1; | 184 | const bool is_lhs_compressed = lhs_block.width > 1 || lhs_block.height > 1; |
| 184 | const bool is_rhs_compressed = rhs_block.width > 1 || rhs_block.height > 1; | 185 | const bool is_rhs_compressed = rhs_block.width > 1 || rhs_block.height > 1; |
| 185 | if (is_lhs_compressed && is_rhs_compressed) { | ||
| 186 | LOG_ERROR(HW_GPU, "Compressed to compressed image aliasing is not implemented"); | ||
| 187 | return; | ||
| 188 | } | ||
| 189 | const s32 lhs_mips = lhs.info.resources.levels; | 186 | const s32 lhs_mips = lhs.info.resources.levels; |
| 190 | const s32 rhs_mips = rhs.info.resources.levels; | 187 | const s32 rhs_mips = rhs.info.resources.levels; |
| 191 | const s32 num_mips = std::min(lhs_mips - base->level, rhs_mips); | 188 | const s32 num_mips = std::min(lhs_mips - base->level, rhs_mips); |
| @@ -199,12 +196,12 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i | |||
| 199 | Extent3D lhs_size = MipSize(lhs.info.size, base->level + mip_level); | 196 | Extent3D lhs_size = MipSize(lhs.info.size, base->level + mip_level); |
| 200 | Extent3D rhs_size = MipSize(rhs.info.size, mip_level); | 197 | Extent3D rhs_size = MipSize(rhs.info.size, mip_level); |
| 201 | if (is_lhs_compressed) { | 198 | if (is_lhs_compressed) { |
| 202 | lhs_size.width /= lhs_block.width; | 199 | lhs_size.width = Common::DivCeil(lhs_size.width, lhs_block.width); |
| 203 | lhs_size.height /= lhs_block.height; | 200 | lhs_size.height = Common::DivCeil(lhs_size.height, lhs_block.height); |
| 204 | } | 201 | } |
| 205 | if (is_rhs_compressed) { | 202 | if (is_rhs_compressed) { |
| 206 | rhs_size.width /= rhs_block.width; | 203 | rhs_size.width = Common::DivCeil(rhs_size.width, rhs_block.width); |
| 207 | rhs_size.height /= rhs_block.height; | 204 | rhs_size.height = Common::DivCeil(rhs_size.height, rhs_block.height); |
| 208 | } | 205 | } |
| 209 | const Extent3D copy_size{ | 206 | const Extent3D copy_size{ |
| 210 | .width = std::min(lhs_size.width, rhs_size.width), | 207 | .width = std::min(lhs_size.width, rhs_size.width), |
diff --git a/src/video_core/texture_cache/image_base.h b/src/video_core/texture_cache/image_base.h index 1f85ec9da..620565684 100644 --- a/src/video_core/texture_cache/image_base.h +++ b/src/video_core/texture_cache/image_base.h | |||
| @@ -88,6 +88,9 @@ struct ImageBase { | |||
| 88 | u32 scale_rating = 0; | 88 | u32 scale_rating = 0; |
| 89 | u64 scale_tick = 0; | 89 | u64 scale_tick = 0; |
| 90 | bool has_scaled = false; | 90 | bool has_scaled = false; |
| 91 | |||
| 92 | size_t channel = 0; | ||
| 93 | |||
| 91 | ImageFlagBits flags = ImageFlagBits::CpuModified; | 94 | ImageFlagBits flags = ImageFlagBits::CpuModified; |
| 92 | 95 | ||
| 93 | GPUVAddr gpu_addr = 0; | 96 | GPUVAddr gpu_addr = 0; |
diff --git a/src/video_core/texture_cache/render_targets.h b/src/video_core/texture_cache/render_targets.h index da8ffe9ec..1efbd6507 100644 --- a/src/video_core/texture_cache/render_targets.h +++ b/src/video_core/texture_cache/render_targets.h | |||
| @@ -26,6 +26,7 @@ struct RenderTargets { | |||
| 26 | ImageViewId depth_buffer_id{}; | 26 | ImageViewId depth_buffer_id{}; |
| 27 | std::array<u8, NUM_RT> draw_buffers{}; | 27 | std::array<u8, NUM_RT> draw_buffers{}; |
| 28 | Extent2D size{}; | 28 | Extent2D size{}; |
| 29 | bool is_rescaled{}; | ||
| 29 | }; | 30 | }; |
| 30 | 31 | ||
| 31 | } // namespace VideoCommon | 32 | } // namespace VideoCommon |
diff --git a/src/video_core/texture_cache/texture_cache.cpp b/src/video_core/texture_cache/texture_cache.cpp new file mode 100644 index 000000000..8a9a32f44 --- /dev/null +++ b/src/video_core/texture_cache/texture_cache.cpp | |||
| @@ -0,0 +1,15 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #include "video_core/control/channel_state_cache.inc" | ||
| 5 | #include "video_core/texture_cache/texture_cache_base.h" | ||
| 6 | |||
| 7 | namespace VideoCommon { | ||
| 8 | |||
| 9 | TextureCacheChannelInfo::TextureCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept | ||
| 10 | : ChannelInfo(state), graphics_image_table{gpu_memory}, graphics_sampler_table{gpu_memory}, | ||
| 11 | compute_image_table{gpu_memory}, compute_sampler_table{gpu_memory} {} | ||
| 12 | |||
| 13 | template class VideoCommon::ChannelSetupCaches<VideoCommon::TextureCacheChannelInfo>; | ||
| 14 | |||
| 15 | } // namespace VideoCommon | ||
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 1dbe01bc0..eaf4a1c95 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-3.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| @@ -7,6 +7,7 @@ | |||
| 7 | 7 | ||
| 8 | #include "common/alignment.h" | 8 | #include "common/alignment.h" |
| 9 | #include "common/settings.h" | 9 | #include "common/settings.h" |
| 10 | #include "video_core/control/channel_state.h" | ||
| 10 | #include "video_core/dirty_flags.h" | 11 | #include "video_core/dirty_flags.h" |
| 11 | #include "video_core/engines/kepler_compute.h" | 12 | #include "video_core/engines/kepler_compute.h" |
| 12 | #include "video_core/texture_cache/image_view_base.h" | 13 | #include "video_core/texture_cache/image_view_base.h" |
| @@ -29,12 +30,8 @@ using VideoCore::Surface::SurfaceType; | |||
| 29 | using namespace Common::Literals; | 30 | using namespace Common::Literals; |
| 30 | 31 | ||
| 31 | template <class P> | 32 | template <class P> |
| 32 | TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_, | 33 | TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_) |
| 33 | Tegra::Engines::Maxwell3D& maxwell3d_, | 34 | : runtime{runtime_}, rasterizer{rasterizer_} { |
| 34 | Tegra::Engines::KeplerCompute& kepler_compute_, | ||
| 35 | Tegra::MemoryManager& gpu_memory_) | ||
| 36 | : runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_}, | ||
| 37 | kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_} { | ||
| 38 | // Configure null sampler | 35 | // Configure null sampler |
| 39 | TSCEntry sampler_descriptor{}; | 36 | TSCEntry sampler_descriptor{}; |
| 40 | sampler_descriptor.min_filter.Assign(Tegra::Texture::TextureFilter::Linear); | 37 | sampler_descriptor.min_filter.Assign(Tegra::Texture::TextureFilter::Linear); |
| @@ -93,7 +90,7 @@ void TextureCache<P>::RunGarbageCollector() { | |||
| 93 | const auto copies = FullDownloadCopies(image.info); | 90 | const auto copies = FullDownloadCopies(image.info); |
| 94 | image.DownloadMemory(map, copies); | 91 | image.DownloadMemory(map, copies); |
| 95 | runtime.Finish(); | 92 | runtime.Finish(); |
| 96 | SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); | 93 | SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); |
| 97 | } | 94 | } |
| 98 | if (True(image.flags & ImageFlagBits::Tracked)) { | 95 | if (True(image.flags & ImageFlagBits::Tracked)) { |
| 99 | UntrackImage(image, image_id); | 96 | UntrackImage(image, image_id); |
| @@ -152,22 +149,24 @@ void TextureCache<P>::MarkModification(ImageId id) noexcept { | |||
| 152 | template <class P> | 149 | template <class P> |
| 153 | template <bool has_blacklists> | 150 | template <bool has_blacklists> |
| 154 | void TextureCache<P>::FillGraphicsImageViews(std::span<ImageViewInOut> views) { | 151 | void TextureCache<P>::FillGraphicsImageViews(std::span<ImageViewInOut> views) { |
| 155 | FillImageViews<has_blacklists>(graphics_image_table, graphics_image_view_ids, views); | 152 | FillImageViews<has_blacklists>(channel_state->graphics_image_table, |
| 153 | channel_state->graphics_image_view_ids, views); | ||
| 156 | } | 154 | } |
| 157 | 155 | ||
| 158 | template <class P> | 156 | template <class P> |
| 159 | void TextureCache<P>::FillComputeImageViews(std::span<ImageViewInOut> views) { | 157 | void TextureCache<P>::FillComputeImageViews(std::span<ImageViewInOut> views) { |
| 160 | FillImageViews<true>(compute_image_table, compute_image_view_ids, views); | 158 | FillImageViews<true>(channel_state->compute_image_table, channel_state->compute_image_view_ids, |
| 159 | views); | ||
| 161 | } | 160 | } |
| 162 | 161 | ||
| 163 | template <class P> | 162 | template <class P> |
| 164 | typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) { | 163 | typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) { |
| 165 | if (index > graphics_sampler_table.Limit()) { | 164 | if (index > channel_state->graphics_sampler_table.Limit()) { |
| 166 | LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); | 165 | LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); |
| 167 | return &slot_samplers[NULL_SAMPLER_ID]; | 166 | return &slot_samplers[NULL_SAMPLER_ID]; |
| 168 | } | 167 | } |
| 169 | const auto [descriptor, is_new] = graphics_sampler_table.Read(index); | 168 | const auto [descriptor, is_new] = channel_state->graphics_sampler_table.Read(index); |
| 170 | SamplerId& id = graphics_sampler_ids[index]; | 169 | SamplerId& id = channel_state->graphics_sampler_ids[index]; |
| 171 | if (is_new) { | 170 | if (is_new) { |
| 172 | id = FindSampler(descriptor); | 171 | id = FindSampler(descriptor); |
| 173 | } | 172 | } |
| @@ -176,12 +175,12 @@ typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) { | |||
| 176 | 175 | ||
| 177 | template <class P> | 176 | template <class P> |
| 178 | typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) { | 177 | typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) { |
| 179 | if (index > compute_sampler_table.Limit()) { | 178 | if (index > channel_state->compute_sampler_table.Limit()) { |
| 180 | LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); | 179 | LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); |
| 181 | return &slot_samplers[NULL_SAMPLER_ID]; | 180 | return &slot_samplers[NULL_SAMPLER_ID]; |
| 182 | } | 181 | } |
| 183 | const auto [descriptor, is_new] = compute_sampler_table.Read(index); | 182 | const auto [descriptor, is_new] = channel_state->compute_sampler_table.Read(index); |
| 184 | SamplerId& id = compute_sampler_ids[index]; | 183 | SamplerId& id = channel_state->compute_sampler_ids[index]; |
| 185 | if (is_new) { | 184 | if (is_new) { |
| 186 | id = FindSampler(descriptor); | 185 | id = FindSampler(descriptor); |
| 187 | } | 186 | } |
| @@ -191,34 +190,36 @@ typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) { | |||
| 191 | template <class P> | 190 | template <class P> |
| 192 | void TextureCache<P>::SynchronizeGraphicsDescriptors() { | 191 | void TextureCache<P>::SynchronizeGraphicsDescriptors() { |
| 193 | using SamplerIndex = Tegra::Engines::Maxwell3D::Regs::SamplerIndex; | 192 | using SamplerIndex = Tegra::Engines::Maxwell3D::Regs::SamplerIndex; |
| 194 | const bool linked_tsc = maxwell3d.regs.sampler_index == SamplerIndex::ViaHeaderIndex; | 193 | const bool linked_tsc = maxwell3d->regs.sampler_index == SamplerIndex::ViaHeaderIndex; |
| 195 | const u32 tic_limit = maxwell3d.regs.tic.limit; | 194 | const u32 tic_limit = maxwell3d->regs.tic.limit; |
| 196 | const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d.regs.tsc.limit; | 195 | const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d->regs.tsc.limit; |
| 197 | if (graphics_sampler_table.Synchornize(maxwell3d.regs.tsc.Address(), tsc_limit)) { | 196 | if (channel_state->graphics_sampler_table.Synchornize(maxwell3d->regs.tsc.Address(), |
| 198 | graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); | 197 | tsc_limit)) { |
| 198 | channel_state->graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); | ||
| 199 | } | 199 | } |
| 200 | if (graphics_image_table.Synchornize(maxwell3d.regs.tic.Address(), tic_limit)) { | 200 | if (channel_state->graphics_image_table.Synchornize(maxwell3d->regs.tic.Address(), tic_limit)) { |
| 201 | graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); | 201 | channel_state->graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); |
| 202 | } | 202 | } |
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | template <class P> | 205 | template <class P> |
| 206 | void TextureCache<P>::SynchronizeComputeDescriptors() { | 206 | void TextureCache<P>::SynchronizeComputeDescriptors() { |
| 207 | const bool linked_tsc = kepler_compute.launch_description.linked_tsc; | 207 | const bool linked_tsc = kepler_compute->launch_description.linked_tsc; |
| 208 | const u32 tic_limit = kepler_compute.regs.tic.limit; | 208 | const u32 tic_limit = kepler_compute->regs.tic.limit; |
| 209 | const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute.regs.tsc.limit; | 209 | const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute->regs.tsc.limit; |
| 210 | const GPUVAddr tsc_gpu_addr = kepler_compute.regs.tsc.Address(); | 210 | const GPUVAddr tsc_gpu_addr = kepler_compute->regs.tsc.Address(); |
| 211 | if (compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) { | 211 | if (channel_state->compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) { |
| 212 | compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); | 212 | channel_state->compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); |
| 213 | } | 213 | } |
| 214 | if (compute_image_table.Synchornize(kepler_compute.regs.tic.Address(), tic_limit)) { | 214 | if (channel_state->compute_image_table.Synchornize(kepler_compute->regs.tic.Address(), |
| 215 | compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); | 215 | tic_limit)) { |
| 216 | channel_state->compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); | ||
| 216 | } | 217 | } |
| 217 | } | 218 | } |
| 218 | 219 | ||
| 219 | template <class P> | 220 | template <class P> |
| 220 | bool TextureCache<P>::RescaleRenderTargets(bool is_clear) { | 221 | bool TextureCache<P>::RescaleRenderTargets(bool is_clear) { |
| 221 | auto& flags = maxwell3d.dirty.flags; | 222 | auto& flags = maxwell3d->dirty.flags; |
| 222 | u32 scale_rating = 0; | 223 | u32 scale_rating = 0; |
| 223 | bool rescaled = false; | 224 | bool rescaled = false; |
| 224 | std::array<ImageId, NUM_RT> tmp_color_images{}; | 225 | std::array<ImageId, NUM_RT> tmp_color_images{}; |
| @@ -315,7 +316,7 @@ bool TextureCache<P>::RescaleRenderTargets(bool is_clear) { | |||
| 315 | template <class P> | 316 | template <class P> |
| 316 | void TextureCache<P>::UpdateRenderTargets(bool is_clear) { | 317 | void TextureCache<P>::UpdateRenderTargets(bool is_clear) { |
| 317 | using namespace VideoCommon::Dirty; | 318 | using namespace VideoCommon::Dirty; |
| 318 | auto& flags = maxwell3d.dirty.flags; | 319 | auto& flags = maxwell3d->dirty.flags; |
| 319 | if (!flags[Dirty::RenderTargets]) { | 320 | if (!flags[Dirty::RenderTargets]) { |
| 320 | for (size_t index = 0; index < NUM_RT; ++index) { | 321 | for (size_t index = 0; index < NUM_RT; ++index) { |
| 321 | ImageViewId& color_buffer_id = render_targets.color_buffer_ids[index]; | 322 | ImageViewId& color_buffer_id = render_targets.color_buffer_ids[index]; |
| @@ -342,7 +343,7 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) { | |||
| 342 | PrepareImageView(depth_buffer_id, true, is_clear && IsFullClear(depth_buffer_id)); | 343 | PrepareImageView(depth_buffer_id, true, is_clear && IsFullClear(depth_buffer_id)); |
| 343 | 344 | ||
| 344 | for (size_t index = 0; index < NUM_RT; ++index) { | 345 | for (size_t index = 0; index < NUM_RT; ++index) { |
| 345 | render_targets.draw_buffers[index] = static_cast<u8>(maxwell3d.regs.rt_control.Map(index)); | 346 | render_targets.draw_buffers[index] = static_cast<u8>(maxwell3d->regs.rt_control.Map(index)); |
| 346 | } | 347 | } |
| 347 | u32 up_scale = 1; | 348 | u32 up_scale = 1; |
| 348 | u32 down_shift = 0; | 349 | u32 down_shift = 0; |
| @@ -351,9 +352,10 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) { | |||
| 351 | down_shift = Settings::values.resolution_info.down_shift; | 352 | down_shift = Settings::values.resolution_info.down_shift; |
| 352 | } | 353 | } |
| 353 | render_targets.size = Extent2D{ | 354 | render_targets.size = Extent2D{ |
| 354 | (maxwell3d.regs.render_area.width * up_scale) >> down_shift, | 355 | (maxwell3d->regs.render_area.width * up_scale) >> down_shift, |
| 355 | (maxwell3d.regs.render_area.height * up_scale) >> down_shift, | 356 | (maxwell3d->regs.render_area.height * up_scale) >> down_shift, |
| 356 | }; | 357 | }; |
| 358 | render_targets.is_rescaled = is_rescaling; | ||
| 357 | 359 | ||
| 358 | flags[Dirty::DepthBiasGlobal] = true; | 360 | flags[Dirty::DepthBiasGlobal] = true; |
| 359 | } | 361 | } |
| @@ -458,7 +460,7 @@ void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) { | |||
| 458 | const auto copies = FullDownloadCopies(image.info); | 460 | const auto copies = FullDownloadCopies(image.info); |
| 459 | image.DownloadMemory(map, copies); | 461 | image.DownloadMemory(map, copies); |
| 460 | runtime.Finish(); | 462 | runtime.Finish(); |
| 461 | SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); | 463 | SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); |
| 462 | } | 464 | } |
| 463 | } | 465 | } |
| 464 | 466 | ||
| @@ -477,12 +479,20 @@ void TextureCache<P>::UnmapMemory(VAddr cpu_addr, size_t size) { | |||
| 477 | } | 479 | } |
| 478 | 480 | ||
| 479 | template <class P> | 481 | template <class P> |
| 480 | void TextureCache<P>::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) { | 482 | void TextureCache<P>::UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size) { |
| 481 | std::vector<ImageId> deleted_images; | 483 | std::vector<ImageId> deleted_images; |
| 482 | ForEachImageInRegionGPU(gpu_addr, size, | 484 | ForEachImageInRegionGPU(as_id, gpu_addr, size, |
| 483 | [&](ImageId id, Image&) { deleted_images.push_back(id); }); | 485 | [&](ImageId id, Image&) { deleted_images.push_back(id); }); |
| 484 | for (const ImageId id : deleted_images) { | 486 | for (const ImageId id : deleted_images) { |
| 485 | Image& image = slot_images[id]; | 487 | Image& image = slot_images[id]; |
| 488 | if (True(image.flags & ImageFlagBits::CpuModified)) { | ||
| 489 | return; | ||
| 490 | } | ||
| 491 | image.flags |= ImageFlagBits::CpuModified; | ||
| 492 | if (True(image.flags & ImageFlagBits::Tracked)) { | ||
| 493 | UntrackImage(image, id); | ||
| 494 | } | ||
| 495 | /* | ||
| 486 | if (True(image.flags & ImageFlagBits::Remapped)) { | 496 | if (True(image.flags & ImageFlagBits::Remapped)) { |
| 487 | continue; | 497 | continue; |
| 488 | } | 498 | } |
| @@ -490,6 +500,7 @@ void TextureCache<P>::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) { | |||
| 490 | if (True(image.flags & ImageFlagBits::Tracked)) { | 500 | if (True(image.flags & ImageFlagBits::Tracked)) { |
| 491 | UntrackImage(image, id); | 501 | UntrackImage(image, id); |
| 492 | } | 502 | } |
| 503 | */ | ||
| 493 | } | 504 | } |
| 494 | } | 505 | } |
| 495 | 506 | ||
| @@ -655,7 +666,7 @@ void TextureCache<P>::PopAsyncFlushes() { | |||
| 655 | for (const ImageId image_id : download_ids) { | 666 | for (const ImageId image_id : download_ids) { |
| 656 | const ImageBase& image = slot_images[image_id]; | 667 | const ImageBase& image = slot_images[image_id]; |
| 657 | const auto copies = FullDownloadCopies(image.info); | 668 | const auto copies = FullDownloadCopies(image.info); |
| 658 | SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, download_span); | 669 | SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, download_span); |
| 659 | download_map.offset += image.unswizzled_size_bytes; | 670 | download_map.offset += image.unswizzled_size_bytes; |
| 660 | download_span = download_span.subspan(image.unswizzled_size_bytes); | 671 | download_span = download_span.subspan(image.unswizzled_size_bytes); |
| 661 | } | 672 | } |
| @@ -714,26 +725,26 @@ void TextureCache<P>::UploadImageContents(Image& image, StagingBuffer& staging) | |||
| 714 | const GPUVAddr gpu_addr = image.gpu_addr; | 725 | const GPUVAddr gpu_addr = image.gpu_addr; |
| 715 | 726 | ||
| 716 | if (True(image.flags & ImageFlagBits::AcceleratedUpload)) { | 727 | if (True(image.flags & ImageFlagBits::AcceleratedUpload)) { |
| 717 | gpu_memory.ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes()); | 728 | gpu_memory->ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes()); |
| 718 | const auto uploads = FullUploadSwizzles(image.info); | 729 | const auto uploads = FullUploadSwizzles(image.info); |
| 719 | runtime.AccelerateImageUpload(image, staging, uploads); | 730 | runtime.AccelerateImageUpload(image, staging, uploads); |
| 720 | } else if (True(image.flags & ImageFlagBits::Converted)) { | 731 | } else if (True(image.flags & ImageFlagBits::Converted)) { |
| 721 | std::vector<u8> unswizzled_data(image.unswizzled_size_bytes); | 732 | std::vector<u8> unswizzled_data(image.unswizzled_size_bytes); |
| 722 | auto copies = UnswizzleImage(gpu_memory, gpu_addr, image.info, unswizzled_data); | 733 | auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, unswizzled_data); |
| 723 | ConvertImage(unswizzled_data, image.info, mapped_span, copies); | 734 | ConvertImage(unswizzled_data, image.info, mapped_span, copies); |
| 724 | image.UploadMemory(staging, copies); | 735 | image.UploadMemory(staging, copies); |
| 725 | } else { | 736 | } else { |
| 726 | const auto copies = UnswizzleImage(gpu_memory, gpu_addr, image.info, mapped_span); | 737 | const auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, mapped_span); |
| 727 | image.UploadMemory(staging, copies); | 738 | image.UploadMemory(staging, copies); |
| 728 | } | 739 | } |
| 729 | } | 740 | } |
| 730 | 741 | ||
| 731 | template <class P> | 742 | template <class P> |
| 732 | ImageViewId TextureCache<P>::FindImageView(const TICEntry& config) { | 743 | ImageViewId TextureCache<P>::FindImageView(const TICEntry& config) { |
| 733 | if (!IsValidEntry(gpu_memory, config)) { | 744 | if (!IsValidEntry(*gpu_memory, config)) { |
| 734 | return NULL_IMAGE_VIEW_ID; | 745 | return NULL_IMAGE_VIEW_ID; |
| 735 | } | 746 | } |
| 736 | const auto [pair, is_new] = image_views.try_emplace(config); | 747 | const auto [pair, is_new] = channel_state->image_views.try_emplace(config); |
| 737 | ImageViewId& image_view_id = pair->second; | 748 | ImageViewId& image_view_id = pair->second; |
| 738 | if (is_new) { | 749 | if (is_new) { |
| 739 | image_view_id = CreateImageView(config); | 750 | image_view_id = CreateImageView(config); |
| @@ -777,9 +788,9 @@ ImageId TextureCache<P>::FindOrInsertImage(const ImageInfo& info, GPUVAddr gpu_a | |||
| 777 | template <class P> | 788 | template <class P> |
| 778 | ImageId TextureCache<P>::FindImage(const ImageInfo& info, GPUVAddr gpu_addr, | 789 | ImageId TextureCache<P>::FindImage(const ImageInfo& info, GPUVAddr gpu_addr, |
| 779 | RelaxedOptions options) { | 790 | RelaxedOptions options) { |
| 780 | std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); | 791 | std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); |
| 781 | if (!cpu_addr) { | 792 | if (!cpu_addr) { |
| 782 | cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info)); | 793 | cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info)); |
| 783 | if (!cpu_addr) { | 794 | if (!cpu_addr) { |
| 784 | return ImageId{}; | 795 | return ImageId{}; |
| 785 | } | 796 | } |
| @@ -860,7 +871,7 @@ void TextureCache<P>::InvalidateScale(Image& image) { | |||
| 860 | image.scale_tick = frame_tick + 1; | 871 | image.scale_tick = frame_tick + 1; |
| 861 | } | 872 | } |
| 862 | const std::span<const ImageViewId> image_view_ids = image.image_view_ids; | 873 | const std::span<const ImageViewId> image_view_ids = image.image_view_ids; |
| 863 | auto& dirty = maxwell3d.dirty.flags; | 874 | auto& dirty = maxwell3d->dirty.flags; |
| 864 | dirty[Dirty::RenderTargets] = true; | 875 | dirty[Dirty::RenderTargets] = true; |
| 865 | dirty[Dirty::ZetaBuffer] = true; | 876 | dirty[Dirty::ZetaBuffer] = true; |
| 866 | for (size_t rt = 0; rt < NUM_RT; ++rt) { | 877 | for (size_t rt = 0; rt < NUM_RT; ++rt) { |
| @@ -880,12 +891,15 @@ void TextureCache<P>::InvalidateScale(Image& image) { | |||
| 880 | } | 891 | } |
| 881 | image.image_view_ids.clear(); | 892 | image.image_view_ids.clear(); |
| 882 | image.image_view_infos.clear(); | 893 | image.image_view_infos.clear(); |
| 883 | if constexpr (ENABLE_VALIDATION) { | 894 | for (size_t c : active_channel_ids) { |
| 884 | std::ranges::fill(graphics_image_view_ids, CORRUPT_ID); | 895 | auto& channel_info = channel_storage[c]; |
| 885 | std::ranges::fill(compute_image_view_ids, CORRUPT_ID); | 896 | if constexpr (ENABLE_VALIDATION) { |
| 897 | std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID); | ||
| 898 | std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID); | ||
| 899 | } | ||
| 900 | channel_info.graphics_image_table.Invalidate(); | ||
| 901 | channel_info.compute_image_table.Invalidate(); | ||
| 886 | } | 902 | } |
| 887 | graphics_image_table.Invalidate(); | ||
| 888 | compute_image_table.Invalidate(); | ||
| 889 | has_deleted_images = true; | 903 | has_deleted_images = true; |
| 890 | } | 904 | } |
| 891 | 905 | ||
| @@ -929,10 +943,10 @@ bool TextureCache<P>::ScaleDown(Image& image) { | |||
| 929 | template <class P> | 943 | template <class P> |
| 930 | ImageId TextureCache<P>::InsertImage(const ImageInfo& info, GPUVAddr gpu_addr, | 944 | ImageId TextureCache<P>::InsertImage(const ImageInfo& info, GPUVAddr gpu_addr, |
| 931 | RelaxedOptions options) { | 945 | RelaxedOptions options) { |
| 932 | std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); | 946 | std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); |
| 933 | if (!cpu_addr) { | 947 | if (!cpu_addr) { |
| 934 | const auto size = CalculateGuestSizeInBytes(info); | 948 | const auto size = CalculateGuestSizeInBytes(info); |
| 935 | cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr, size); | 949 | cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, size); |
| 936 | if (!cpu_addr) { | 950 | if (!cpu_addr) { |
| 937 | const VAddr fake_addr = ~(1ULL << 40ULL) + virtual_invalid_space; | 951 | const VAddr fake_addr = ~(1ULL << 40ULL) + virtual_invalid_space; |
| 938 | virtual_invalid_space += Common::AlignUp(size, 32); | 952 | virtual_invalid_space += Common::AlignUp(size, 32); |
| @@ -1050,7 +1064,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA | |||
| 1050 | const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr); | 1064 | const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr); |
| 1051 | Image& new_image = slot_images[new_image_id]; | 1065 | Image& new_image = slot_images[new_image_id]; |
| 1052 | 1066 | ||
| 1053 | if (!gpu_memory.IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) { | 1067 | if (!gpu_memory->IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) { |
| 1054 | new_image.flags |= ImageFlagBits::Sparse; | 1068 | new_image.flags |= ImageFlagBits::Sparse; |
| 1055 | } | 1069 | } |
| 1056 | 1070 | ||
| @@ -1192,7 +1206,7 @@ SamplerId TextureCache<P>::FindSampler(const TSCEntry& config) { | |||
| 1192 | if (std::ranges::all_of(config.raw, [](u64 value) { return value == 0; })) { | 1206 | if (std::ranges::all_of(config.raw, [](u64 value) { return value == 0; })) { |
| 1193 | return NULL_SAMPLER_ID; | 1207 | return NULL_SAMPLER_ID; |
| 1194 | } | 1208 | } |
| 1195 | const auto [pair, is_new] = samplers.try_emplace(config); | 1209 | const auto [pair, is_new] = channel_state->samplers.try_emplace(config); |
| 1196 | if (is_new) { | 1210 | if (is_new) { |
| 1197 | pair->second = slot_samplers.insert(runtime, config); | 1211 | pair->second = slot_samplers.insert(runtime, config); |
| 1198 | } | 1212 | } |
| @@ -1201,7 +1215,7 @@ SamplerId TextureCache<P>::FindSampler(const TSCEntry& config) { | |||
| 1201 | 1215 | ||
| 1202 | template <class P> | 1216 | template <class P> |
| 1203 | ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) { | 1217 | ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) { |
| 1204 | const auto& regs = maxwell3d.regs; | 1218 | const auto& regs = maxwell3d->regs; |
| 1205 | if (index >= regs.rt_control.count) { | 1219 | if (index >= regs.rt_control.count) { |
| 1206 | return ImageViewId{}; | 1220 | return ImageViewId{}; |
| 1207 | } | 1221 | } |
| @@ -1219,7 +1233,7 @@ ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) { | |||
| 1219 | 1233 | ||
| 1220 | template <class P> | 1234 | template <class P> |
| 1221 | ImageViewId TextureCache<P>::FindDepthBuffer(bool is_clear) { | 1235 | ImageViewId TextureCache<P>::FindDepthBuffer(bool is_clear) { |
| 1222 | const auto& regs = maxwell3d.regs; | 1236 | const auto& regs = maxwell3d->regs; |
| 1223 | if (!regs.zeta_enable) { | 1237 | if (!regs.zeta_enable) { |
| 1224 | return ImageViewId{}; | 1238 | return ImageViewId{}; |
| 1225 | } | 1239 | } |
| @@ -1316,11 +1330,17 @@ void TextureCache<P>::ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& f | |||
| 1316 | 1330 | ||
| 1317 | template <class P> | 1331 | template <class P> |
| 1318 | template <typename Func> | 1332 | template <typename Func> |
| 1319 | void TextureCache<P>::ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Func&& func) { | 1333 | void TextureCache<P>::ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size, |
| 1334 | Func&& func) { | ||
| 1320 | using FuncReturn = typename std::invoke_result<Func, ImageId, Image&>::type; | 1335 | using FuncReturn = typename std::invoke_result<Func, ImageId, Image&>::type; |
| 1321 | static constexpr bool BOOL_BREAK = std::is_same_v<FuncReturn, bool>; | 1336 | static constexpr bool BOOL_BREAK = std::is_same_v<FuncReturn, bool>; |
| 1322 | boost::container::small_vector<ImageId, 8> images; | 1337 | boost::container::small_vector<ImageId, 8> images; |
| 1323 | ForEachGPUPage(gpu_addr, size, [this, &images, gpu_addr, size, func](u64 page) { | 1338 | auto storage_id = getStorageID(as_id); |
| 1339 | if (!storage_id) { | ||
| 1340 | return; | ||
| 1341 | } | ||
| 1342 | auto& gpu_page_table = gpu_page_table_storage[*storage_id]; | ||
| 1343 | ForEachGPUPage(gpu_addr, size, [this, gpu_page_table, &images, gpu_addr, size, func](u64 page) { | ||
| 1324 | const auto it = gpu_page_table.find(page); | 1344 | const auto it = gpu_page_table.find(page); |
| 1325 | if (it == gpu_page_table.end()) { | 1345 | if (it == gpu_page_table.end()) { |
| 1326 | if constexpr (BOOL_BREAK) { | 1346 | if constexpr (BOOL_BREAK) { |
| @@ -1403,9 +1423,9 @@ template <typename Func> | |||
| 1403 | void TextureCache<P>::ForEachSparseSegment(ImageBase& image, Func&& func) { | 1423 | void TextureCache<P>::ForEachSparseSegment(ImageBase& image, Func&& func) { |
| 1404 | using FuncReturn = typename std::invoke_result<Func, GPUVAddr, VAddr, size_t>::type; | 1424 | using FuncReturn = typename std::invoke_result<Func, GPUVAddr, VAddr, size_t>::type; |
| 1405 | static constexpr bool RETURNS_BOOL = std::is_same_v<FuncReturn, bool>; | 1425 | static constexpr bool RETURNS_BOOL = std::is_same_v<FuncReturn, bool>; |
| 1406 | const auto segments = gpu_memory.GetSubmappedRange(image.gpu_addr, image.guest_size_bytes); | 1426 | const auto segments = gpu_memory->GetSubmappedRange(image.gpu_addr, image.guest_size_bytes); |
| 1407 | for (const auto& [gpu_addr, size] : segments) { | 1427 | for (const auto& [gpu_addr, size] : segments) { |
| 1408 | std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); | 1428 | std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); |
| 1409 | ASSERT(cpu_addr); | 1429 | ASSERT(cpu_addr); |
| 1410 | if constexpr (RETURNS_BOOL) { | 1430 | if constexpr (RETURNS_BOOL) { |
| 1411 | if (func(gpu_addr, *cpu_addr, size)) { | 1431 | if (func(gpu_addr, *cpu_addr, size)) { |
| @@ -1448,8 +1468,9 @@ void TextureCache<P>::RegisterImage(ImageId image_id) { | |||
| 1448 | } | 1468 | } |
| 1449 | image.lru_index = lru_cache.Insert(image_id, frame_tick); | 1469 | image.lru_index = lru_cache.Insert(image_id, frame_tick); |
| 1450 | 1470 | ||
| 1451 | ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, | 1471 | ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, image_id](u64 page) { |
| 1452 | [this, image_id](u64 page) { gpu_page_table[page].push_back(image_id); }); | 1472 | (*channel_state->gpu_page_table)[page].push_back(image_id); |
| 1473 | }); | ||
| 1453 | if (False(image.flags & ImageFlagBits::Sparse)) { | 1474 | if (False(image.flags & ImageFlagBits::Sparse)) { |
| 1454 | auto map_id = | 1475 | auto map_id = |
| 1455 | slot_map_views.insert(image.gpu_addr, image.cpu_addr, image.guest_size_bytes, image_id); | 1476 | slot_map_views.insert(image.gpu_addr, image.cpu_addr, image.guest_size_bytes, image_id); |
| @@ -1480,9 +1501,9 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) { | |||
| 1480 | image.flags &= ~ImageFlagBits::BadOverlap; | 1501 | image.flags &= ~ImageFlagBits::BadOverlap; |
| 1481 | lru_cache.Free(image.lru_index); | 1502 | lru_cache.Free(image.lru_index); |
| 1482 | const auto& clear_page_table = | 1503 | const auto& clear_page_table = |
| 1483 | [this, image_id]( | 1504 | [this, image_id](u64 page, |
| 1484 | u64 page, | 1505 | std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>>& |
| 1485 | std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>>& selected_page_table) { | 1506 | selected_page_table) { |
| 1486 | const auto page_it = selected_page_table.find(page); | 1507 | const auto page_it = selected_page_table.find(page); |
| 1487 | if (page_it == selected_page_table.end()) { | 1508 | if (page_it == selected_page_table.end()) { |
| 1488 | ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS); | 1509 | ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS); |
| @@ -1497,8 +1518,9 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) { | |||
| 1497 | } | 1518 | } |
| 1498 | image_ids.erase(vector_it); | 1519 | image_ids.erase(vector_it); |
| 1499 | }; | 1520 | }; |
| 1500 | ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, | 1521 | ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, &clear_page_table](u64 page) { |
| 1501 | [this, &clear_page_table](u64 page) { clear_page_table(page, gpu_page_table); }); | 1522 | clear_page_table(page, (*channel_state->gpu_page_table)); |
| 1523 | }); | ||
| 1502 | if (False(image.flags & ImageFlagBits::Sparse)) { | 1524 | if (False(image.flags & ImageFlagBits::Sparse)) { |
| 1503 | const auto map_id = image.map_view_id; | 1525 | const auto map_id = image.map_view_id; |
| 1504 | ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) { | 1526 | ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) { |
| @@ -1631,7 +1653,7 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) { | |||
| 1631 | ASSERT_MSG(False(image.flags & ImageFlagBits::Registered), "Image was not unregistered"); | 1653 | ASSERT_MSG(False(image.flags & ImageFlagBits::Registered), "Image was not unregistered"); |
| 1632 | 1654 | ||
| 1633 | // Mark render targets as dirty | 1655 | // Mark render targets as dirty |
| 1634 | auto& dirty = maxwell3d.dirty.flags; | 1656 | auto& dirty = maxwell3d->dirty.flags; |
| 1635 | dirty[Dirty::RenderTargets] = true; | 1657 | dirty[Dirty::RenderTargets] = true; |
| 1636 | dirty[Dirty::ZetaBuffer] = true; | 1658 | dirty[Dirty::ZetaBuffer] = true; |
| 1637 | for (size_t rt = 0; rt < NUM_RT; ++rt) { | 1659 | for (size_t rt = 0; rt < NUM_RT; ++rt) { |
| @@ -1681,24 +1703,30 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) { | |||
| 1681 | if (alloc_images.empty()) { | 1703 | if (alloc_images.empty()) { |
| 1682 | image_allocs_table.erase(alloc_it); | 1704 | image_allocs_table.erase(alloc_it); |
| 1683 | } | 1705 | } |
| 1684 | if constexpr (ENABLE_VALIDATION) { | 1706 | for (size_t c : active_channel_ids) { |
| 1685 | std::ranges::fill(graphics_image_view_ids, CORRUPT_ID); | 1707 | auto& channel_info = channel_storage[c]; |
| 1686 | std::ranges::fill(compute_image_view_ids, CORRUPT_ID); | 1708 | if constexpr (ENABLE_VALIDATION) { |
| 1709 | std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID); | ||
| 1710 | std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID); | ||
| 1711 | } | ||
| 1712 | channel_info.graphics_image_table.Invalidate(); | ||
| 1713 | channel_info.compute_image_table.Invalidate(); | ||
| 1687 | } | 1714 | } |
| 1688 | graphics_image_table.Invalidate(); | ||
| 1689 | compute_image_table.Invalidate(); | ||
| 1690 | has_deleted_images = true; | 1715 | has_deleted_images = true; |
| 1691 | } | 1716 | } |
| 1692 | 1717 | ||
| 1693 | template <class P> | 1718 | template <class P> |
| 1694 | void TextureCache<P>::RemoveImageViewReferences(std::span<const ImageViewId> removed_views) { | 1719 | void TextureCache<P>::RemoveImageViewReferences(std::span<const ImageViewId> removed_views) { |
| 1695 | auto it = image_views.begin(); | 1720 | for (size_t c : active_channel_ids) { |
| 1696 | while (it != image_views.end()) { | 1721 | auto& channel_info = channel_storage[c]; |
| 1697 | const auto found = std::ranges::find(removed_views, it->second); | 1722 | auto it = channel_info.image_views.begin(); |
| 1698 | if (found != removed_views.end()) { | 1723 | while (it != channel_info.image_views.end()) { |
| 1699 | it = image_views.erase(it); | 1724 | const auto found = std::ranges::find(removed_views, it->second); |
| 1700 | } else { | 1725 | if (found != removed_views.end()) { |
| 1701 | ++it; | 1726 | it = channel_info.image_views.erase(it); |
| 1727 | } else { | ||
| 1728 | ++it; | ||
| 1729 | } | ||
| 1702 | } | 1730 | } |
| 1703 | } | 1731 | } |
| 1704 | } | 1732 | } |
| @@ -1729,6 +1757,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) { | |||
| 1729 | boost::container::small_vector<const AliasedImage*, 1> aliased_images; | 1757 | boost::container::small_vector<const AliasedImage*, 1> aliased_images; |
| 1730 | Image& image = slot_images[image_id]; | 1758 | Image& image = slot_images[image_id]; |
| 1731 | bool any_rescaled = True(image.flags & ImageFlagBits::Rescaled); | 1759 | bool any_rescaled = True(image.flags & ImageFlagBits::Rescaled); |
| 1760 | bool any_modified = True(image.flags & ImageFlagBits::GpuModified); | ||
| 1732 | u64 most_recent_tick = image.modification_tick; | 1761 | u64 most_recent_tick = image.modification_tick; |
| 1733 | for (const AliasedImage& aliased : image.aliased_images) { | 1762 | for (const AliasedImage& aliased : image.aliased_images) { |
| 1734 | ImageBase& aliased_image = slot_images[aliased.id]; | 1763 | ImageBase& aliased_image = slot_images[aliased.id]; |
| @@ -1736,9 +1765,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) { | |||
| 1736 | most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick); | 1765 | most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick); |
| 1737 | aliased_images.push_back(&aliased); | 1766 | aliased_images.push_back(&aliased); |
| 1738 | any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled); | 1767 | any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled); |
| 1739 | if (True(aliased_image.flags & ImageFlagBits::GpuModified)) { | 1768 | any_modified |= True(aliased_image.flags & ImageFlagBits::GpuModified); |
| 1740 | image.flags |= ImageFlagBits::GpuModified; | ||
| 1741 | } | ||
| 1742 | } | 1769 | } |
| 1743 | } | 1770 | } |
| 1744 | if (aliased_images.empty()) { | 1771 | if (aliased_images.empty()) { |
| @@ -1753,6 +1780,9 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) { | |||
| 1753 | } | 1780 | } |
| 1754 | } | 1781 | } |
| 1755 | image.modification_tick = most_recent_tick; | 1782 | image.modification_tick = most_recent_tick; |
| 1783 | if (any_modified) { | ||
| 1784 | image.flags |= ImageFlagBits::GpuModified; | ||
| 1785 | } | ||
| 1756 | std::ranges::sort(aliased_images, [this](const AliasedImage* lhs, const AliasedImage* rhs) { | 1786 | std::ranges::sort(aliased_images, [this](const AliasedImage* lhs, const AliasedImage* rhs) { |
| 1757 | const ImageBase& lhs_image = slot_images[lhs->id]; | 1787 | const ImageBase& lhs_image = slot_images[lhs->id]; |
| 1758 | const ImageBase& rhs_image = slot_images[rhs->id]; | 1788 | const ImageBase& rhs_image = slot_images[rhs->id]; |
| @@ -1931,6 +1961,7 @@ std::pair<FramebufferId, ImageViewId> TextureCache<P>::RenderTargetFromImage( | |||
| 1931 | .color_buffer_ids = {color_view_id}, | 1961 | .color_buffer_ids = {color_view_id}, |
| 1932 | .depth_buffer_id = depth_view_id, | 1962 | .depth_buffer_id = depth_view_id, |
| 1933 | .size = {extent.width >> samples_x, extent.height >> samples_y}, | 1963 | .size = {extent.width >> samples_x, extent.height >> samples_y}, |
| 1964 | .is_rescaled = is_rescaled, | ||
| 1934 | }); | 1965 | }); |
| 1935 | return {framebuffer_id, view_id}; | 1966 | return {framebuffer_id, view_id}; |
| 1936 | } | 1967 | } |
| @@ -1943,7 +1974,7 @@ bool TextureCache<P>::IsFullClear(ImageViewId id) { | |||
| 1943 | const ImageViewBase& image_view = slot_image_views[id]; | 1974 | const ImageViewBase& image_view = slot_image_views[id]; |
| 1944 | const ImageBase& image = slot_images[image_view.image_id]; | 1975 | const ImageBase& image = slot_images[image_view.image_id]; |
| 1945 | const Extent3D size = image_view.size; | 1976 | const Extent3D size = image_view.size; |
| 1946 | const auto& regs = maxwell3d.regs; | 1977 | const auto& regs = maxwell3d->regs; |
| 1947 | const auto& scissor = regs.scissor_test[0]; | 1978 | const auto& scissor = regs.scissor_test[0]; |
| 1948 | if (image.info.resources.levels > 1 || image.info.resources.layers > 1) { | 1979 | if (image.info.resources.levels > 1 || image.info.resources.layers > 1) { |
| 1949 | // Images with multiple resources can't be cleared in a single call | 1980 | // Images with multiple resources can't be cleared in a single call |
| @@ -1958,4 +1989,19 @@ bool TextureCache<P>::IsFullClear(ImageViewId id) { | |||
| 1958 | scissor.max_y >= size.height; | 1989 | scissor.max_y >= size.height; |
| 1959 | } | 1990 | } |
| 1960 | 1991 | ||
| 1992 | template <class P> | ||
| 1993 | void TextureCache<P>::CreateChannel(struct Tegra::Control::ChannelState& channel) { | ||
| 1994 | VideoCommon::ChannelSetupCaches<TextureCacheChannelInfo>::CreateChannel(channel); | ||
| 1995 | const auto it = channel_map.find(channel.bind_id); | ||
| 1996 | auto* this_state = &channel_storage[it->second]; | ||
| 1997 | const auto& this_as_ref = address_spaces[channel.memory_manager->GetID()]; | ||
| 1998 | this_state->gpu_page_table = &gpu_page_table_storage[this_as_ref.storage_id]; | ||
| 1999 | } | ||
| 2000 | |||
| 2001 | /// Bind a channel for execution. | ||
| 2002 | template <class P> | ||
| 2003 | void TextureCache<P>::OnGPUASRegister([[maybe_unused]] size_t map_id) { | ||
| 2004 | gpu_page_table_storage.emplace_back(); | ||
| 2005 | } | ||
| 2006 | |||
| 1961 | } // namespace VideoCommon | 2007 | } // namespace VideoCommon |
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h index 7e6c6cef2..2fa8445eb 100644 --- a/src/video_core/texture_cache/texture_cache_base.h +++ b/src/video_core/texture_cache/texture_cache_base.h | |||
| @@ -1,8 +1,10 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: 2021 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-3.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <deque> | ||
| 7 | #include <limits> | ||
| 6 | #include <mutex> | 8 | #include <mutex> |
| 7 | #include <span> | 9 | #include <span> |
| 8 | #include <type_traits> | 10 | #include <type_traits> |
| @@ -11,9 +13,11 @@ | |||
| 11 | #include <queue> | 13 | #include <queue> |
| 12 | 14 | ||
| 13 | #include "common/common_types.h" | 15 | #include "common/common_types.h" |
| 16 | #include "common/hash.h" | ||
| 14 | #include "common/literals.h" | 17 | #include "common/literals.h" |
| 15 | #include "common/lru_cache.h" | 18 | #include "common/lru_cache.h" |
| 16 | #include "video_core/compatible_formats.h" | 19 | #include "video_core/compatible_formats.h" |
| 20 | #include "video_core/control/channel_state_cache.h" | ||
| 17 | #include "video_core/delayed_destruction_ring.h" | 21 | #include "video_core/delayed_destruction_ring.h" |
| 18 | #include "video_core/engines/fermi_2d.h" | 22 | #include "video_core/engines/fermi_2d.h" |
| 19 | #include "video_core/surface.h" | 23 | #include "video_core/surface.h" |
| @@ -26,6 +30,10 @@ | |||
| 26 | #include "video_core/texture_cache/types.h" | 30 | #include "video_core/texture_cache/types.h" |
| 27 | #include "video_core/textures/texture.h" | 31 | #include "video_core/textures/texture.h" |
| 28 | 32 | ||
| 33 | namespace Tegra::Control { | ||
| 34 | struct ChannelState; | ||
| 35 | } | ||
| 36 | |||
| 29 | namespace VideoCommon { | 37 | namespace VideoCommon { |
| 30 | 38 | ||
| 31 | using Tegra::Texture::SwizzleSource; | 39 | using Tegra::Texture::SwizzleSource; |
| @@ -44,8 +52,35 @@ struct ImageViewInOut { | |||
| 44 | ImageViewId id{}; | 52 | ImageViewId id{}; |
| 45 | }; | 53 | }; |
| 46 | 54 | ||
| 55 | using TextureCacheGPUMap = std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>>; | ||
| 56 | |||
| 57 | class TextureCacheChannelInfo : public ChannelInfo { | ||
| 58 | public: | ||
| 59 | TextureCacheChannelInfo() = delete; | ||
| 60 | TextureCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept; | ||
| 61 | TextureCacheChannelInfo(const TextureCacheChannelInfo& state) = delete; | ||
| 62 | TextureCacheChannelInfo& operator=(const TextureCacheChannelInfo&) = delete; | ||
| 63 | TextureCacheChannelInfo(TextureCacheChannelInfo&& other) noexcept = default; | ||
| 64 | TextureCacheChannelInfo& operator=(TextureCacheChannelInfo&& other) noexcept = default; | ||
| 65 | |||
| 66 | DescriptorTable<TICEntry> graphics_image_table{gpu_memory}; | ||
| 67 | DescriptorTable<TSCEntry> graphics_sampler_table{gpu_memory}; | ||
| 68 | std::vector<SamplerId> graphics_sampler_ids; | ||
| 69 | std::vector<ImageViewId> graphics_image_view_ids; | ||
| 70 | |||
| 71 | DescriptorTable<TICEntry> compute_image_table{gpu_memory}; | ||
| 72 | DescriptorTable<TSCEntry> compute_sampler_table{gpu_memory}; | ||
| 73 | std::vector<SamplerId> compute_sampler_ids; | ||
| 74 | std::vector<ImageViewId> compute_image_view_ids; | ||
| 75 | |||
| 76 | std::unordered_map<TICEntry, ImageViewId> image_views; | ||
| 77 | std::unordered_map<TSCEntry, SamplerId> samplers; | ||
| 78 | |||
| 79 | TextureCacheGPUMap* gpu_page_table; | ||
| 80 | }; | ||
| 81 | |||
| 47 | template <class P> | 82 | template <class P> |
| 48 | class TextureCache { | 83 | class TextureCache : public VideoCommon::ChannelSetupCaches<TextureCacheChannelInfo> { |
| 49 | /// Address shift for caching images into a hash table | 84 | /// Address shift for caching images into a hash table |
| 50 | static constexpr u64 YUZU_PAGEBITS = 20; | 85 | static constexpr u64 YUZU_PAGEBITS = 20; |
| 51 | 86 | ||
| @@ -58,6 +93,8 @@ class TextureCache { | |||
| 58 | /// True when the API can provide info about the memory of the device. | 93 | /// True when the API can provide info about the memory of the device. |
| 59 | static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO; | 94 | static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO; |
| 60 | 95 | ||
| 96 | static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()}; | ||
| 97 | |||
| 61 | static constexpr s64 TARGET_THRESHOLD = 4_GiB; | 98 | static constexpr s64 TARGET_THRESHOLD = 4_GiB; |
| 62 | static constexpr s64 DEFAULT_EXPECTED_MEMORY = 1_GiB + 125_MiB; | 99 | static constexpr s64 DEFAULT_EXPECTED_MEMORY = 1_GiB + 125_MiB; |
| 63 | static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB + 625_MiB; | 100 | static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB + 625_MiB; |
| @@ -77,16 +114,8 @@ class TextureCache { | |||
| 77 | PixelFormat src_format; | 114 | PixelFormat src_format; |
| 78 | }; | 115 | }; |
| 79 | 116 | ||
| 80 | template <typename T> | ||
| 81 | struct IdentityHash { | ||
| 82 | [[nodiscard]] size_t operator()(T value) const noexcept { | ||
| 83 | return static_cast<size_t>(value); | ||
| 84 | } | ||
| 85 | }; | ||
| 86 | |||
| 87 | public: | 117 | public: |
| 88 | explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&, Tegra::Engines::Maxwell3D&, | 118 | explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&); |
| 89 | Tegra::Engines::KeplerCompute&, Tegra::MemoryManager&); | ||
| 90 | 119 | ||
| 91 | /// Notify the cache that a new frame has been queued | 120 | /// Notify the cache that a new frame has been queued |
| 92 | void TickFrame(); | 121 | void TickFrame(); |
| @@ -142,7 +171,7 @@ public: | |||
| 142 | void UnmapMemory(VAddr cpu_addr, size_t size); | 171 | void UnmapMemory(VAddr cpu_addr, size_t size); |
| 143 | 172 | ||
| 144 | /// Remove images in a region | 173 | /// Remove images in a region |
| 145 | void UnmapGPUMemory(GPUVAddr gpu_addr, size_t size); | 174 | void UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size); |
| 146 | 175 | ||
| 147 | /// Blit an image with the given parameters | 176 | /// Blit an image with the given parameters |
| 148 | void BlitImage(const Tegra::Engines::Fermi2D::Surface& dst, | 177 | void BlitImage(const Tegra::Engines::Fermi2D::Surface& dst, |
| @@ -171,6 +200,9 @@ public: | |||
| 171 | 200 | ||
| 172 | [[nodiscard]] bool IsRescaling(const ImageViewBase& image_view) const noexcept; | 201 | [[nodiscard]] bool IsRescaling(const ImageViewBase& image_view) const noexcept; |
| 173 | 202 | ||
| 203 | /// Create channel state. | ||
| 204 | void CreateChannel(Tegra::Control::ChannelState& channel) final override; | ||
| 205 | |||
| 174 | std::mutex mutex; | 206 | std::mutex mutex; |
| 175 | 207 | ||
| 176 | private: | 208 | private: |
| @@ -205,6 +237,8 @@ private: | |||
| 205 | } | 237 | } |
| 206 | } | 238 | } |
| 207 | 239 | ||
| 240 | void OnGPUASRegister(size_t map_id) final override; | ||
| 241 | |||
| 208 | /// Runs the Garbage Collector. | 242 | /// Runs the Garbage Collector. |
| 209 | void RunGarbageCollector(); | 243 | void RunGarbageCollector(); |
| 210 | 244 | ||
| @@ -273,7 +307,7 @@ private: | |||
| 273 | void ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& func); | 307 | void ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& func); |
| 274 | 308 | ||
| 275 | template <typename Func> | 309 | template <typename Func> |
| 276 | void ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Func&& func); | 310 | void ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size, Func&& func); |
| 277 | 311 | ||
| 278 | template <typename Func> | 312 | template <typename Func> |
| 279 | void ForEachSparseImageInRegion(GPUVAddr gpu_addr, size_t size, Func&& func); | 313 | void ForEachSparseImageInRegion(GPUVAddr gpu_addr, size_t size, Func&& func); |
| @@ -338,31 +372,16 @@ private: | |||
| 338 | u64 GetScaledImageSizeBytes(ImageBase& image); | 372 | u64 GetScaledImageSizeBytes(ImageBase& image); |
| 339 | 373 | ||
| 340 | Runtime& runtime; | 374 | Runtime& runtime; |
| 341 | VideoCore::RasterizerInterface& rasterizer; | ||
| 342 | Tegra::Engines::Maxwell3D& maxwell3d; | ||
| 343 | Tegra::Engines::KeplerCompute& kepler_compute; | ||
| 344 | Tegra::MemoryManager& gpu_memory; | ||
| 345 | 375 | ||
| 346 | DescriptorTable<TICEntry> graphics_image_table{gpu_memory}; | 376 | VideoCore::RasterizerInterface& rasterizer; |
| 347 | DescriptorTable<TSCEntry> graphics_sampler_table{gpu_memory}; | 377 | std::deque<TextureCacheGPUMap> gpu_page_table_storage; |
| 348 | std::vector<SamplerId> graphics_sampler_ids; | ||
| 349 | std::vector<ImageViewId> graphics_image_view_ids; | ||
| 350 | |||
| 351 | DescriptorTable<TICEntry> compute_image_table{gpu_memory}; | ||
| 352 | DescriptorTable<TSCEntry> compute_sampler_table{gpu_memory}; | ||
| 353 | std::vector<SamplerId> compute_sampler_ids; | ||
| 354 | std::vector<ImageViewId> compute_image_view_ids; | ||
| 355 | 378 | ||
| 356 | RenderTargets render_targets; | 379 | RenderTargets render_targets; |
| 357 | 380 | ||
| 358 | std::unordered_map<TICEntry, ImageViewId> image_views; | ||
| 359 | std::unordered_map<TSCEntry, SamplerId> samplers; | ||
| 360 | std::unordered_map<RenderTargets, FramebufferId> framebuffers; | 381 | std::unordered_map<RenderTargets, FramebufferId> framebuffers; |
| 361 | 382 | ||
| 362 | std::unordered_map<u64, std::vector<ImageMapId>, IdentityHash<u64>> page_table; | 383 | std::unordered_map<u64, std::vector<ImageMapId>, Common::IdentityHash<u64>> page_table; |
| 363 | std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>> gpu_page_table; | 384 | std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>> sparse_page_table; |
| 364 | std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>> sparse_page_table; | ||
| 365 | |||
| 366 | std::unordered_map<ImageId, std::vector<ImageViewId>> sparse_views; | 385 | std::unordered_map<ImageId, std::vector<ImageViewId>> sparse_views; |
| 367 | 386 | ||
| 368 | VAddr virtual_invalid_space{}; | 387 | VAddr virtual_invalid_space{}; |
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp index 1820823b2..1223df5a0 100644 --- a/src/video_core/texture_cache/util.cpp +++ b/src/video_core/texture_cache/util.cpp | |||
| @@ -517,7 +517,6 @@ void SwizzleBlockLinearImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr | |||
| 517 | const u32 host_bytes_per_layer = num_blocks_per_layer * bytes_per_block; | 517 | const u32 host_bytes_per_layer = num_blocks_per_layer * bytes_per_block; |
| 518 | 518 | ||
| 519 | UNIMPLEMENTED_IF(info.tile_width_spacing > 0); | 519 | UNIMPLEMENTED_IF(info.tile_width_spacing > 0); |
| 520 | |||
| 521 | UNIMPLEMENTED_IF(copy.image_offset.x != 0); | 520 | UNIMPLEMENTED_IF(copy.image_offset.x != 0); |
| 522 | UNIMPLEMENTED_IF(copy.image_offset.y != 0); | 521 | UNIMPLEMENTED_IF(copy.image_offset.y != 0); |
| 523 | UNIMPLEMENTED_IF(copy.image_offset.z != 0); | 522 | UNIMPLEMENTED_IF(copy.image_offset.z != 0); |
| @@ -755,7 +754,7 @@ bool IsValidEntry(const Tegra::MemoryManager& gpu_memory, const TICEntry& config | |||
| 755 | if (address == 0) { | 754 | if (address == 0) { |
| 756 | return false; | 755 | return false; |
| 757 | } | 756 | } |
| 758 | if (address > (1ULL << 48)) { | 757 | if (address >= (1ULL << 40)) { |
| 759 | return false; | 758 | return false; |
| 760 | } | 759 | } |
| 761 | if (gpu_memory.GpuToCpuAddress(address).has_value()) { | 760 | if (gpu_memory.GpuToCpuAddress(address).has_value()) { |
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp index 913f8ebcb..52d067a2d 100644 --- a/src/video_core/textures/decoders.cpp +++ b/src/video_core/textures/decoders.cpp | |||
| @@ -35,7 +35,7 @@ void incrpdep(u32& value) { | |||
| 35 | 35 | ||
| 36 | template <bool TO_LINEAR, u32 BYTES_PER_PIXEL> | 36 | template <bool TO_LINEAR, u32 BYTES_PER_PIXEL> |
| 37 | void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height, u32 depth, | 37 | void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height, u32 depth, |
| 38 | u32 block_height, u32 block_depth, u32 stride_alignment) { | 38 | u32 block_height, u32 block_depth, u32 stride) { |
| 39 | // The origin of the transformation can be configured here, leave it as zero as the current API | 39 | // The origin of the transformation can be configured here, leave it as zero as the current API |
| 40 | // doesn't expose it. | 40 | // doesn't expose it. |
| 41 | static constexpr u32 origin_x = 0; | 41 | static constexpr u32 origin_x = 0; |
| @@ -45,7 +45,6 @@ void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 | |||
| 45 | // We can configure here a custom pitch | 45 | // We can configure here a custom pitch |
| 46 | // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch. | 46 | // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch. |
| 47 | const u32 pitch = width * BYTES_PER_PIXEL; | 47 | const u32 pitch = width * BYTES_PER_PIXEL; |
| 48 | const u32 stride = Common::AlignUpLog2(width, stride_alignment) * BYTES_PER_PIXEL; | ||
| 49 | 48 | ||
| 50 | const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); | 49 | const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); |
| 51 | const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); | 50 | const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); |
| @@ -89,6 +88,69 @@ void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 | |||
| 89 | } | 88 | } |
| 90 | } | 89 | } |
| 91 | 90 | ||
| 91 | template <bool TO_LINEAR, u32 BYTES_PER_PIXEL> | ||
| 92 | void SwizzleSubrectImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height, | ||
| 93 | u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 num_lines, | ||
| 94 | u32 block_height, u32 block_depth, u32 pitch_linear) { | ||
| 95 | // The origin of the transformation can be configured here, leave it as zero as the current API | ||
| 96 | // doesn't expose it. | ||
| 97 | static constexpr u32 origin_z = 0; | ||
| 98 | |||
| 99 | // We can configure here a custom pitch | ||
| 100 | // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch. | ||
| 101 | const u32 pitch = pitch_linear; | ||
| 102 | const u32 stride = Common::AlignUpLog2(width * BYTES_PER_PIXEL, GOB_SIZE_X_SHIFT); | ||
| 103 | |||
| 104 | const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); | ||
| 105 | const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); | ||
| 106 | const u32 slice_size = | ||
| 107 | Common::DivCeilLog2(height, block_height + GOB_SIZE_Y_SHIFT) * block_size; | ||
| 108 | |||
| 109 | const u32 block_height_mask = (1U << block_height) - 1; | ||
| 110 | const u32 block_depth_mask = (1U << block_depth) - 1; | ||
| 111 | const u32 x_shift = GOB_SIZE_SHIFT + block_height + block_depth; | ||
| 112 | |||
| 113 | u32 unprocessed_lines = num_lines; | ||
| 114 | u32 extent_y = std::min(num_lines, height - origin_y); | ||
| 115 | |||
| 116 | for (u32 slice = 0; slice < depth; ++slice) { | ||
| 117 | const u32 z = slice + origin_z; | ||
| 118 | const u32 offset_z = (z >> block_depth) * slice_size + | ||
| 119 | ((z & block_depth_mask) << (GOB_SIZE_SHIFT + block_height)); | ||
| 120 | const u32 lines_in_y = std::min(unprocessed_lines, extent_y); | ||
| 121 | for (u32 line = 0; line < lines_in_y; ++line) { | ||
| 122 | const u32 y = line + origin_y; | ||
| 123 | const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(y); | ||
| 124 | |||
| 125 | const u32 block_y = y >> GOB_SIZE_Y_SHIFT; | ||
| 126 | const u32 offset_y = (block_y >> block_height) * block_size + | ||
| 127 | ((block_y & block_height_mask) << GOB_SIZE_SHIFT); | ||
| 128 | |||
| 129 | u32 swizzled_x = pdep<SWIZZLE_X_BITS>(origin_x * BYTES_PER_PIXEL); | ||
| 130 | for (u32 column = 0; column < extent_x; | ||
| 131 | ++column, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) { | ||
| 132 | const u32 x = (column + origin_x) * BYTES_PER_PIXEL; | ||
| 133 | const u32 offset_x = (x >> GOB_SIZE_X_SHIFT) << x_shift; | ||
| 134 | |||
| 135 | const u32 base_swizzled_offset = offset_z + offset_y + offset_x; | ||
| 136 | const u32 swizzled_offset = base_swizzled_offset + (swizzled_x | swizzled_y); | ||
| 137 | |||
| 138 | const u32 unswizzled_offset = | ||
| 139 | slice * pitch * height + line * pitch + column * BYTES_PER_PIXEL; | ||
| 140 | |||
| 141 | u8* const dst = &output[TO_LINEAR ? swizzled_offset : unswizzled_offset]; | ||
| 142 | const u8* const src = &input[TO_LINEAR ? unswizzled_offset : swizzled_offset]; | ||
| 143 | |||
| 144 | std::memcpy(dst, src, BYTES_PER_PIXEL); | ||
| 145 | } | ||
| 146 | } | ||
| 147 | unprocessed_lines -= lines_in_y; | ||
| 148 | if (unprocessed_lines == 0) { | ||
| 149 | return; | ||
| 150 | } | ||
| 151 | } | ||
| 152 | } | ||
| 153 | |||
| 92 | template <bool TO_LINEAR> | 154 | template <bool TO_LINEAR> |
| 93 | void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, | 155 | void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, |
| 94 | u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) { | 156 | u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) { |
| @@ -111,122 +173,39 @@ void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixe | |||
| 111 | } | 173 | } |
| 112 | } | 174 | } |
| 113 | 175 | ||
| 114 | template <u32 BYTES_PER_PIXEL> | ||
| 115 | void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, | ||
| 116 | u8* swizzled_data, const u8* unswizzled_data, u32 block_height_bit, | ||
| 117 | u32 offset_x, u32 offset_y) { | ||
| 118 | const u32 block_height = 1U << block_height_bit; | ||
| 119 | const u32 image_width_in_gobs = | ||
| 120 | (swizzled_width * BYTES_PER_PIXEL + (GOB_SIZE_X - 1)) / GOB_SIZE_X; | ||
| 121 | for (u32 line = 0; line < subrect_height; ++line) { | ||
| 122 | const u32 dst_y = line + offset_y; | ||
| 123 | const u32 gob_address_y = | ||
| 124 | (dst_y / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height * image_width_in_gobs + | ||
| 125 | ((dst_y % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE; | ||
| 126 | |||
| 127 | const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(dst_y); | ||
| 128 | u32 swizzled_x = pdep<SWIZZLE_X_BITS>(offset_x * BYTES_PER_PIXEL); | ||
| 129 | for (u32 x = 0; x < subrect_width; | ||
| 130 | ++x, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) { | ||
| 131 | const u32 dst_x = x + offset_x; | ||
| 132 | const u32 gob_address = | ||
| 133 | gob_address_y + (dst_x * BYTES_PER_PIXEL / GOB_SIZE_X) * GOB_SIZE * block_height; | ||
| 134 | const u32 swizzled_offset = gob_address + (swizzled_x | swizzled_y); | ||
| 135 | const u32 unswizzled_offset = line * source_pitch + x * BYTES_PER_PIXEL; | ||
| 136 | |||
| 137 | const u8* const source_line = unswizzled_data + unswizzled_offset; | ||
| 138 | u8* const dest_addr = swizzled_data + swizzled_offset; | ||
| 139 | std::memcpy(dest_addr, source_line, BYTES_PER_PIXEL); | ||
| 140 | } | ||
| 141 | } | ||
| 142 | } | ||
| 143 | |||
| 144 | template <u32 BYTES_PER_PIXEL> | ||
| 145 | void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 block_height, | ||
| 146 | u32 origin_x, u32 origin_y, u8* output, const u8* input) { | ||
| 147 | const u32 stride = width * BYTES_PER_PIXEL; | ||
| 148 | const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X; | ||
| 149 | const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height); | ||
| 150 | |||
| 151 | const u32 block_height_mask = (1U << block_height) - 1; | ||
| 152 | const u32 x_shift = GOB_SIZE_SHIFT + block_height; | ||
| 153 | |||
| 154 | for (u32 line = 0; line < line_count; ++line) { | ||
| 155 | const u32 src_y = line + origin_y; | ||
| 156 | const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(src_y); | ||
| 157 | |||
| 158 | const u32 block_y = src_y >> GOB_SIZE_Y_SHIFT; | ||
| 159 | const u32 src_offset_y = (block_y >> block_height) * block_size + | ||
| 160 | ((block_y & block_height_mask) << GOB_SIZE_SHIFT); | ||
| 161 | |||
| 162 | u32 swizzled_x = pdep<SWIZZLE_X_BITS>(origin_x * BYTES_PER_PIXEL); | ||
| 163 | for (u32 column = 0; column < line_length_in; | ||
| 164 | ++column, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) { | ||
| 165 | const u32 src_x = (column + origin_x) * BYTES_PER_PIXEL; | ||
| 166 | const u32 src_offset_x = (src_x >> GOB_SIZE_X_SHIFT) << x_shift; | ||
| 167 | |||
| 168 | const u32 swizzled_offset = src_offset_y + src_offset_x + (swizzled_x | swizzled_y); | ||
| 169 | const u32 unswizzled_offset = line * pitch + column * BYTES_PER_PIXEL; | ||
| 170 | |||
| 171 | std::memcpy(output + unswizzled_offset, input + swizzled_offset, BYTES_PER_PIXEL); | ||
| 172 | } | ||
| 173 | } | ||
| 174 | } | ||
| 175 | |||
| 176 | template <u32 BYTES_PER_PIXEL> | ||
| 177 | void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height, | ||
| 178 | u32 block_height, u32 block_depth, u32 origin_x, u32 origin_y, u8* output, | ||
| 179 | const u8* input) { | ||
| 180 | UNIMPLEMENTED_IF(origin_x > 0); | ||
| 181 | UNIMPLEMENTED_IF(origin_y > 0); | ||
| 182 | |||
| 183 | const u32 stride = width * BYTES_PER_PIXEL; | ||
| 184 | const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X; | ||
| 185 | const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); | ||
| 186 | |||
| 187 | const u32 block_height_mask = (1U << block_height) - 1; | ||
| 188 | const u32 x_shift = static_cast<u32>(GOB_SIZE_SHIFT) + block_height + block_depth; | ||
| 189 | |||
| 190 | for (u32 line = 0; line < line_count; ++line) { | ||
| 191 | const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(line); | ||
| 192 | const u32 block_y = line / GOB_SIZE_Y; | ||
| 193 | const u32 dst_offset_y = | ||
| 194 | (block_y >> block_height) * block_size + (block_y & block_height_mask) * GOB_SIZE; | ||
| 195 | |||
| 196 | u32 swizzled_x = 0; | ||
| 197 | for (u32 x = 0; x < line_length_in; ++x, incrpdep<SWIZZLE_X_BITS, 1>(swizzled_x)) { | ||
| 198 | const u32 dst_offset = | ||
| 199 | ((x / GOB_SIZE_X) << x_shift) + dst_offset_y + (swizzled_x | swizzled_y); | ||
| 200 | const u32 src_offset = x * BYTES_PER_PIXEL + line * pitch; | ||
| 201 | std::memcpy(output + dst_offset, input + src_offset, BYTES_PER_PIXEL); | ||
| 202 | } | ||
| 203 | } | ||
| 204 | } | ||
| 205 | } // Anonymous namespace | 176 | } // Anonymous namespace |
| 206 | 177 | ||
| 207 | void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, | 178 | void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, |
| 208 | u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth, | 179 | u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth, |
| 209 | u32 stride_alignment) { | 180 | u32 stride_alignment) { |
| 181 | const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel; | ||
| 182 | const u32 new_bpp = std::min(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel))); | ||
| 183 | width = (width * bytes_per_pixel) >> new_bpp; | ||
| 184 | bytes_per_pixel = 1U << new_bpp; | ||
| 210 | Swizzle<false>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth, | 185 | Swizzle<false>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth, |
| 211 | stride_alignment); | 186 | stride); |
| 212 | } | 187 | } |
| 213 | 188 | ||
| 214 | void SwizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, | 189 | void SwizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, |
| 215 | u32 height, u32 depth, u32 block_height, u32 block_depth, | 190 | u32 height, u32 depth, u32 block_height, u32 block_depth, |
| 216 | u32 stride_alignment) { | 191 | u32 stride_alignment) { |
| 192 | const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel; | ||
| 193 | const u32 new_bpp = std::min(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel))); | ||
| 194 | width = (width * bytes_per_pixel) >> new_bpp; | ||
| 195 | bytes_per_pixel = 1U << new_bpp; | ||
| 217 | Swizzle<true>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth, | 196 | Swizzle<true>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth, |
| 218 | stride_alignment); | 197 | stride); |
| 219 | } | 198 | } |
| 220 | 199 | ||
| 221 | void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, | 200 | void SwizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, |
| 222 | u32 bytes_per_pixel, u8* swizzled_data, const u8* unswizzled_data, | 201 | u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 extent_y, |
| 223 | u32 block_height_bit, u32 offset_x, u32 offset_y) { | 202 | u32 block_height, u32 block_depth, u32 pitch_linear) { |
| 224 | switch (bytes_per_pixel) { | 203 | switch (bytes_per_pixel) { |
| 225 | #define BPP_CASE(x) \ | 204 | #define BPP_CASE(x) \ |
| 226 | case x: \ | 205 | case x: \ |
| 227 | return SwizzleSubrect<x>(subrect_width, subrect_height, source_pitch, swizzled_width, \ | 206 | return SwizzleSubrectImpl<true, x>(output, input, width, height, depth, origin_x, \ |
| 228 | swizzled_data, unswizzled_data, block_height_bit, offset_x, \ | 207 | origin_y, extent_x, extent_y, block_height, \ |
| 229 | offset_y); | 208 | block_depth, pitch_linear); |
| 230 | BPP_CASE(1) | 209 | BPP_CASE(1) |
| 231 | BPP_CASE(2) | 210 | BPP_CASE(2) |
| 232 | BPP_CASE(3) | 211 | BPP_CASE(3) |
| @@ -241,13 +220,15 @@ void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 | |||
| 241 | } | 220 | } |
| 242 | } | 221 | } |
| 243 | 222 | ||
| 244 | void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel, | 223 | void UnswizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, |
| 245 | u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input) { | 224 | u32 width, u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, |
| 225 | u32 extent_y, u32 block_height, u32 block_depth, u32 pitch_linear) { | ||
| 246 | switch (bytes_per_pixel) { | 226 | switch (bytes_per_pixel) { |
| 247 | #define BPP_CASE(x) \ | 227 | #define BPP_CASE(x) \ |
| 248 | case x: \ | 228 | case x: \ |
| 249 | return UnswizzleSubrect<x>(line_length_in, line_count, pitch, width, block_height, \ | 229 | return SwizzleSubrectImpl<false, x>(output, input, width, height, depth, origin_x, \ |
| 250 | origin_x, origin_y, output, input); | 230 | origin_y, extent_x, extent_y, block_height, \ |
| 231 | block_depth, pitch_linear); | ||
| 251 | BPP_CASE(1) | 232 | BPP_CASE(1) |
| 252 | BPP_CASE(2) | 233 | BPP_CASE(2) |
| 253 | BPP_CASE(3) | 234 | BPP_CASE(3) |
| @@ -262,55 +243,6 @@ void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, | |||
| 262 | } | 243 | } |
| 263 | } | 244 | } |
| 264 | 245 | ||
| 265 | void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height, | ||
| 266 | u32 bytes_per_pixel, u32 block_height, u32 block_depth, u32 origin_x, | ||
| 267 | u32 origin_y, u8* output, const u8* input) { | ||
| 268 | switch (bytes_per_pixel) { | ||
| 269 | #define BPP_CASE(x) \ | ||
| 270 | case x: \ | ||
| 271 | return SwizzleSliceToVoxel<x>(line_length_in, line_count, pitch, width, height, \ | ||
| 272 | block_height, block_depth, origin_x, origin_y, output, \ | ||
| 273 | input); | ||
| 274 | BPP_CASE(1) | ||
| 275 | BPP_CASE(2) | ||
| 276 | BPP_CASE(3) | ||
| 277 | BPP_CASE(4) | ||
| 278 | BPP_CASE(6) | ||
| 279 | BPP_CASE(8) | ||
| 280 | BPP_CASE(12) | ||
| 281 | BPP_CASE(16) | ||
| 282 | #undef BPP_CASE | ||
| 283 | default: | ||
| 284 | ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel); | ||
| 285 | } | ||
| 286 | } | ||
| 287 | |||
| 288 | void SwizzleKepler(const u32 width, const u32 height, const u32 dst_x, const u32 dst_y, | ||
| 289 | const u32 block_height_bit, const std::size_t copy_size, const u8* source_data, | ||
| 290 | u8* swizzle_data) { | ||
| 291 | const u32 block_height = 1U << block_height_bit; | ||
| 292 | const u32 image_width_in_gobs{(width + GOB_SIZE_X - 1) / GOB_SIZE_X}; | ||
| 293 | std::size_t count = 0; | ||
| 294 | for (std::size_t y = dst_y; y < height && count < copy_size; ++y) { | ||
| 295 | const std::size_t gob_address_y = | ||
| 296 | (y / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height * image_width_in_gobs + | ||
| 297 | ((y % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE; | ||
| 298 | const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(static_cast<u32>(y)); | ||
| 299 | u32 swizzled_x = pdep<SWIZZLE_X_BITS>(dst_x); | ||
| 300 | for (std::size_t x = dst_x; x < width && count < copy_size; | ||
| 301 | ++x, incrpdep<SWIZZLE_X_BITS, 1>(swizzled_x)) { | ||
| 302 | const std::size_t gob_address = | ||
| 303 | gob_address_y + (x / GOB_SIZE_X) * GOB_SIZE * block_height; | ||
| 304 | const std::size_t swizzled_offset = gob_address + (swizzled_x | swizzled_y); | ||
| 305 | const u8* source_line = source_data + count; | ||
| 306 | u8* dest_addr = swizzle_data + swizzled_offset; | ||
| 307 | count++; | ||
| 308 | |||
| 309 | *dest_addr = *source_line; | ||
| 310 | } | ||
| 311 | } | ||
| 312 | } | ||
| 313 | |||
| 314 | std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, | 246 | std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, |
| 315 | u32 block_height, u32 block_depth) { | 247 | u32 block_height, u32 block_depth) { |
| 316 | if (tiled) { | 248 | if (tiled) { |
diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h index 31a11708f..e70407692 100644 --- a/src/video_core/textures/decoders.h +++ b/src/video_core/textures/decoders.h | |||
| @@ -40,7 +40,6 @@ constexpr SwizzleTable MakeSwizzleTable() { | |||
| 40 | } | 40 | } |
| 41 | return table; | 41 | return table; |
| 42 | } | 42 | } |
| 43 | constexpr SwizzleTable SWIZZLE_TABLE = MakeSwizzleTable(); | ||
| 44 | 43 | ||
| 45 | /// Unswizzles a block linear texture into linear memory. | 44 | /// Unswizzles a block linear texture into linear memory. |
| 46 | void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, | 45 | void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, |
| @@ -57,34 +56,14 @@ std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height | |||
| 57 | u32 block_height, u32 block_depth); | 56 | u32 block_height, u32 block_depth); |
| 58 | 57 | ||
| 59 | /// Copies an untiled subrectangle into a tiled surface. | 58 | /// Copies an untiled subrectangle into a tiled surface. |
| 60 | void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, | 59 | void SwizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, |
| 61 | u32 bytes_per_pixel, u8* swizzled_data, const u8* unswizzled_data, | 60 | u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 extent_y, |
| 62 | u32 block_height_bit, u32 offset_x, u32 offset_y); | 61 | u32 block_height, u32 block_depth, u32 pitch_linear); |
| 63 | 62 | ||
| 64 | /// Copies a tiled subrectangle into a linear surface. | 63 | /// Copies a tiled subrectangle into a linear surface. |
| 65 | void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel, | 64 | void UnswizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, |
| 66 | u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input); | 65 | u32 width, u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, |
| 67 | 66 | u32 extent_y, u32 block_height, u32 block_depth, u32 pitch_linear); | |
| 68 | /// @brief Swizzles a 2D array of pixels into a 3D texture | ||
| 69 | /// @param line_length_in Number of pixels per line | ||
| 70 | /// @param line_count Number of lines | ||
| 71 | /// @param pitch Number of bytes per line | ||
| 72 | /// @param width Width of the swizzled texture | ||
| 73 | /// @param height Height of the swizzled texture | ||
| 74 | /// @param bytes_per_pixel Number of bytes used per pixel | ||
| 75 | /// @param block_height Block height shift | ||
| 76 | /// @param block_depth Block depth shift | ||
| 77 | /// @param origin_x Column offset in pixels of the swizzled texture | ||
| 78 | /// @param origin_y Row offset in pixels of the swizzled texture | ||
| 79 | /// @param output Pointer to the pixels of the swizzled texture | ||
| 80 | /// @param input Pointer to the 2D array of pixels used as input | ||
| 81 | /// @pre input and output points to an array large enough to hold the number of bytes used | ||
| 82 | void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height, | ||
| 83 | u32 bytes_per_pixel, u32 block_height, u32 block_depth, u32 origin_x, | ||
| 84 | u32 origin_y, u8* output, const u8* input); | ||
| 85 | |||
| 86 | void SwizzleKepler(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height, | ||
| 87 | std::size_t copy_size, const u8* source_data, u8* swizzle_data); | ||
| 88 | 67 | ||
| 89 | /// Obtains the offset of the gob for positions 'dst_x' & 'dst_y' | 68 | /// Obtains the offset of the gob for positions 'dst_x' & 'dst_y' |
| 90 | u64 GetGOBOffset(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height, | 69 | u64 GetGOBOffset(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height, |
diff --git a/src/video_core/vulkan_common/vulkan_wrapper.h b/src/video_core/vulkan_common/vulkan_wrapper.h index 795f16bfb..1b3f493bd 100644 --- a/src/video_core/vulkan_common/vulkan_wrapper.h +++ b/src/video_core/vulkan_common/vulkan_wrapper.h | |||
| @@ -519,9 +519,7 @@ public: | |||
| 519 | dld{rhs.dld} {} | 519 | dld{rhs.dld} {} |
| 520 | 520 | ||
| 521 | /// Assign an allocation transfering ownership from another allocation. | 521 | /// Assign an allocation transfering ownership from another allocation. |
| 522 | /// Releases any previously held allocation. | ||
| 523 | PoolAllocations& operator=(PoolAllocations&& rhs) noexcept { | 522 | PoolAllocations& operator=(PoolAllocations&& rhs) noexcept { |
| 524 | Release(); | ||
| 525 | allocations = std::move(rhs.allocations); | 523 | allocations = std::move(rhs.allocations); |
| 526 | num = rhs.num; | 524 | num = rhs.num; |
| 527 | device = rhs.device; | 525 | device = rhs.device; |
| @@ -530,11 +528,6 @@ public: | |||
| 530 | return *this; | 528 | return *this; |
| 531 | } | 529 | } |
| 532 | 530 | ||
| 533 | /// Destroys any held allocation. | ||
| 534 | ~PoolAllocations() { | ||
| 535 | Release(); | ||
| 536 | } | ||
| 537 | |||
| 538 | /// Returns the number of allocations. | 531 | /// Returns the number of allocations. |
| 539 | std::size_t size() const noexcept { | 532 | std::size_t size() const noexcept { |
| 540 | return num; | 533 | return num; |
| @@ -557,19 +550,6 @@ public: | |||
| 557 | } | 550 | } |
| 558 | 551 | ||
| 559 | private: | 552 | private: |
| 560 | /// Destroys the held allocations if they exist. | ||
| 561 | void Release() noexcept { | ||
| 562 | if (!allocations) { | ||
| 563 | return; | ||
| 564 | } | ||
| 565 | const Span<AllocationType> span(allocations.get(), num); | ||
| 566 | const VkResult result = Free(device, pool, span, *dld); | ||
| 567 | // There's no way to report errors from a destructor. | ||
| 568 | if (result != VK_SUCCESS) { | ||
| 569 | std::terminate(); | ||
| 570 | } | ||
| 571 | } | ||
| 572 | |||
| 573 | std::unique_ptr<AllocationType[]> allocations; | 553 | std::unique_ptr<AllocationType[]> allocations; |
| 574 | std::size_t num = 0; | 554 | std::size_t num = 0; |
| 575 | VkDevice device = nullptr; | 555 | VkDevice device = nullptr; |
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index c63ce3a30..4146ebc2c 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <cinttypes> | 4 | #include <cinttypes> |
| 5 | #include <clocale> | 5 | #include <clocale> |
| 6 | #include <cmath> | ||
| 6 | #include <memory> | 7 | #include <memory> |
| 7 | #include <thread> | 8 | #include <thread> |
| 8 | #ifdef __APPLE__ | 9 | #ifdef __APPLE__ |
| @@ -3451,9 +3452,10 @@ void GMainWindow::UpdateStatusBar() { | |||
| 3451 | } | 3452 | } |
| 3452 | if (!Settings::values.use_speed_limit) { | 3453 | if (!Settings::values.use_speed_limit) { |
| 3453 | game_fps_label->setText( | 3454 | game_fps_label->setText( |
| 3454 | tr("Game: %1 FPS (Unlocked)").arg(results.average_game_fps, 0, 'f', 0)); | 3455 | tr("Game: %1 FPS (Unlocked)").arg(std::round(results.average_game_fps), 0, 'f', 0)); |
| 3455 | } else { | 3456 | } else { |
| 3456 | game_fps_label->setText(tr("Game: %1 FPS").arg(results.average_game_fps, 0, 'f', 0)); | 3457 | game_fps_label->setText( |
| 3458 | tr("Game: %1 FPS").arg(std::round(results.average_game_fps), 0, 'f', 0)); | ||
| 3457 | } | 3459 | } |
| 3458 | emu_frametime_label->setText(tr("Frame: %1 ms").arg(results.frametime * 1000.0, 0, 'f', 2)); | 3460 | emu_frametime_label->setText(tr("Frame: %1 ms").arg(results.frametime * 1000.0, 0, 'f', 2)); |
| 3459 | 3461 | ||