diff options
Diffstat (limited to 'src')
47 files changed, 2491 insertions, 1846 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index aeaf8e81f..f77575a00 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt | |||
| @@ -98,7 +98,6 @@ add_library(common STATIC | |||
| 98 | algorithm.h | 98 | algorithm.h |
| 99 | alignment.h | 99 | alignment.h |
| 100 | assert.h | 100 | assert.h |
| 101 | atomic_ops.cpp | ||
| 102 | atomic_ops.h | 101 | atomic_ops.h |
| 103 | detached_tasks.cpp | 102 | detached_tasks.cpp |
| 104 | detached_tasks.h | 103 | detached_tasks.h |
| @@ -166,8 +165,6 @@ add_library(common STATIC | |||
| 166 | threadsafe_queue.h | 165 | threadsafe_queue.h |
| 167 | time_zone.cpp | 166 | time_zone.cpp |
| 168 | time_zone.h | 167 | time_zone.h |
| 169 | timer.cpp | ||
| 170 | timer.h | ||
| 171 | tree.h | 168 | tree.h |
| 172 | uint128.cpp | 169 | uint128.cpp |
| 173 | uint128.h | 170 | uint128.h |
diff --git a/src/common/alignment.h b/src/common/alignment.h index 5040043de..fb81f10d8 100644 --- a/src/common/alignment.h +++ b/src/common/alignment.h | |||
| @@ -9,50 +9,45 @@ | |||
| 9 | namespace Common { | 9 | namespace Common { |
| 10 | 10 | ||
| 11 | template <typename T> | 11 | template <typename T> |
| 12 | [[nodiscard]] constexpr T AlignUp(T value, std::size_t size) { | 12 | requires std::is_unsigned_v<T>[[nodiscard]] constexpr T AlignUp(T value, size_t size) { |
| 13 | static_assert(std::is_unsigned_v<T>, "T must be an unsigned value."); | ||
| 14 | auto mod{static_cast<T>(value % size)}; | 13 | auto mod{static_cast<T>(value % size)}; |
| 15 | value -= mod; | 14 | value -= mod; |
| 16 | return static_cast<T>(mod == T{0} ? value : value + size); | 15 | return static_cast<T>(mod == T{0} ? value : value + size); |
| 17 | } | 16 | } |
| 18 | 17 | ||
| 19 | template <typename T> | 18 | template <typename T> |
| 20 | [[nodiscard]] constexpr T AlignDown(T value, std::size_t size) { | 19 | requires std::is_unsigned_v<T>[[nodiscard]] constexpr T AlignUpLog2(T value, size_t align_log2) { |
| 21 | static_assert(std::is_unsigned_v<T>, "T must be an unsigned value."); | 20 | return static_cast<T>((value + ((1ULL << align_log2) - 1)) >> align_log2 << align_log2); |
| 22 | return static_cast<T>(value - value % size); | ||
| 23 | } | 21 | } |
| 24 | 22 | ||
| 25 | template <typename T> | 23 | template <typename T> |
| 26 | [[nodiscard]] constexpr T AlignBits(T value, std::size_t align) { | 24 | requires std::is_unsigned_v<T>[[nodiscard]] constexpr T AlignDown(T value, size_t size) { |
| 27 | static_assert(std::is_unsigned_v<T>, "T must be an unsigned value."); | 25 | return static_cast<T>(value - value % size); |
| 28 | return static_cast<T>((value + ((1ULL << align) - 1)) >> align << align); | ||
| 29 | } | 26 | } |
| 30 | 27 | ||
| 31 | template <typename T> | 28 | template <typename T> |
| 32 | [[nodiscard]] constexpr bool Is4KBAligned(T value) { | 29 | requires std::is_unsigned_v<T>[[nodiscard]] constexpr bool Is4KBAligned(T value) { |
| 33 | static_assert(std::is_unsigned_v<T>, "T must be an unsigned value."); | ||
| 34 | return (value & 0xFFF) == 0; | 30 | return (value & 0xFFF) == 0; |
| 35 | } | 31 | } |
| 36 | 32 | ||
| 37 | template <typename T> | 33 | template <typename T> |
| 38 | [[nodiscard]] constexpr bool IsWordAligned(T value) { | 34 | requires std::is_unsigned_v<T>[[nodiscard]] constexpr bool IsWordAligned(T value) { |
| 39 | static_assert(std::is_unsigned_v<T>, "T must be an unsigned value."); | ||
| 40 | return (value & 0b11) == 0; | 35 | return (value & 0b11) == 0; |
| 41 | } | 36 | } |
| 42 | 37 | ||
| 43 | template <typename T> | 38 | template <typename T> |
| 44 | [[nodiscard]] constexpr bool IsAligned(T value, std::size_t alignment) { | 39 | requires std::is_integral_v<T>[[nodiscard]] constexpr bool IsAligned(T value, size_t alignment) { |
| 45 | using U = typename std::make_unsigned<T>::type; | 40 | using U = typename std::make_unsigned_t<T>; |
| 46 | const U mask = static_cast<U>(alignment - 1); | 41 | const U mask = static_cast<U>(alignment - 1); |
| 47 | return (value & mask) == 0; | 42 | return (value & mask) == 0; |
| 48 | } | 43 | } |
| 49 | 44 | ||
| 50 | template <typename T, std::size_t Align = 16> | 45 | template <typename T, size_t Align = 16> |
| 51 | class AlignmentAllocator { | 46 | class AlignmentAllocator { |
| 52 | public: | 47 | public: |
| 53 | using value_type = T; | 48 | using value_type = T; |
| 54 | using size_type = std::size_t; | 49 | using size_type = size_t; |
| 55 | using difference_type = std::ptrdiff_t; | 50 | using difference_type = ptrdiff_t; |
| 56 | 51 | ||
| 57 | using propagate_on_container_copy_assignment = std::true_type; | 52 | using propagate_on_container_copy_assignment = std::true_type; |
| 58 | using propagate_on_container_move_assignment = std::true_type; | 53 | using propagate_on_container_move_assignment = std::true_type; |
diff --git a/src/common/atomic_ops.cpp b/src/common/atomic_ops.cpp deleted file mode 100644 index 1612d0e67..000000000 --- a/src/common/atomic_ops.cpp +++ /dev/null | |||
| @@ -1,75 +0,0 @@ | |||
| 1 | // Copyright 2020 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <cstring> | ||
| 6 | |||
| 7 | #include "common/atomic_ops.h" | ||
| 8 | |||
| 9 | #if _MSC_VER | ||
| 10 | #include <intrin.h> | ||
| 11 | #endif | ||
| 12 | |||
| 13 | namespace Common { | ||
| 14 | |||
| 15 | #if _MSC_VER | ||
| 16 | |||
| 17 | bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) { | ||
| 18 | const u8 result = | ||
| 19 | _InterlockedCompareExchange8(reinterpret_cast<volatile char*>(pointer), value, expected); | ||
| 20 | return result == expected; | ||
| 21 | } | ||
| 22 | |||
| 23 | bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) { | ||
| 24 | const u16 result = | ||
| 25 | _InterlockedCompareExchange16(reinterpret_cast<volatile short*>(pointer), value, expected); | ||
| 26 | return result == expected; | ||
| 27 | } | ||
| 28 | |||
| 29 | bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) { | ||
| 30 | const u32 result = | ||
| 31 | _InterlockedCompareExchange(reinterpret_cast<volatile long*>(pointer), value, expected); | ||
| 32 | return result == expected; | ||
| 33 | } | ||
| 34 | |||
| 35 | bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) { | ||
| 36 | const u64 result = _InterlockedCompareExchange64(reinterpret_cast<volatile __int64*>(pointer), | ||
| 37 | value, expected); | ||
| 38 | return result == expected; | ||
| 39 | } | ||
| 40 | |||
| 41 | bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) { | ||
| 42 | return _InterlockedCompareExchange128(reinterpret_cast<volatile __int64*>(pointer), value[1], | ||
| 43 | value[0], | ||
| 44 | reinterpret_cast<__int64*>(expected.data())) != 0; | ||
| 45 | } | ||
| 46 | |||
| 47 | #else | ||
| 48 | |||
| 49 | bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) { | ||
| 50 | return __sync_bool_compare_and_swap(pointer, expected, value); | ||
| 51 | } | ||
| 52 | |||
| 53 | bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) { | ||
| 54 | return __sync_bool_compare_and_swap(pointer, expected, value); | ||
| 55 | } | ||
| 56 | |||
| 57 | bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) { | ||
| 58 | return __sync_bool_compare_and_swap(pointer, expected, value); | ||
| 59 | } | ||
| 60 | |||
| 61 | bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) { | ||
| 62 | return __sync_bool_compare_and_swap(pointer, expected, value); | ||
| 63 | } | ||
| 64 | |||
| 65 | bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) { | ||
| 66 | unsigned __int128 value_a; | ||
| 67 | unsigned __int128 expected_a; | ||
| 68 | std::memcpy(&value_a, value.data(), sizeof(u128)); | ||
| 69 | std::memcpy(&expected_a, expected.data(), sizeof(u128)); | ||
| 70 | return __sync_bool_compare_and_swap((unsigned __int128*)pointer, expected_a, value_a); | ||
| 71 | } | ||
| 72 | |||
| 73 | #endif | ||
| 74 | |||
| 75 | } // namespace Common | ||
diff --git a/src/common/atomic_ops.h b/src/common/atomic_ops.h index b46888589..2b1f515e8 100644 --- a/src/common/atomic_ops.h +++ b/src/common/atomic_ops.h | |||
| @@ -4,14 +4,75 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <cstring> | ||
| 8 | #include <memory> | ||
| 9 | |||
| 7 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 8 | 11 | ||
| 12 | #if _MSC_VER | ||
| 13 | #include <intrin.h> | ||
| 14 | #endif | ||
| 15 | |||
| 9 | namespace Common { | 16 | namespace Common { |
| 10 | 17 | ||
| 11 | [[nodiscard]] bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected); | 18 | #if _MSC_VER |
| 12 | [[nodiscard]] bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected); | 19 | |
| 13 | [[nodiscard]] bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected); | 20 | [[nodiscard]] inline bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) { |
| 14 | [[nodiscard]] bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected); | 21 | const u8 result = |
| 15 | [[nodiscard]] bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected); | 22 | _InterlockedCompareExchange8(reinterpret_cast<volatile char*>(pointer), value, expected); |
| 23 | return result == expected; | ||
| 24 | } | ||
| 25 | |||
| 26 | [[nodiscard]] inline bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) { | ||
| 27 | const u16 result = | ||
| 28 | _InterlockedCompareExchange16(reinterpret_cast<volatile short*>(pointer), value, expected); | ||
| 29 | return result == expected; | ||
| 30 | } | ||
| 31 | |||
| 32 | [[nodiscard]] inline bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) { | ||
| 33 | const u32 result = | ||
| 34 | _InterlockedCompareExchange(reinterpret_cast<volatile long*>(pointer), value, expected); | ||
| 35 | return result == expected; | ||
| 36 | } | ||
| 37 | |||
| 38 | [[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) { | ||
| 39 | const u64 result = _InterlockedCompareExchange64(reinterpret_cast<volatile __int64*>(pointer), | ||
| 40 | value, expected); | ||
| 41 | return result == expected; | ||
| 42 | } | ||
| 43 | |||
| 44 | [[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) { | ||
| 45 | return _InterlockedCompareExchange128(reinterpret_cast<volatile __int64*>(pointer), value[1], | ||
| 46 | value[0], | ||
| 47 | reinterpret_cast<__int64*>(expected.data())) != 0; | ||
| 48 | } | ||
| 49 | |||
| 50 | #else | ||
| 51 | |||
| 52 | [[nodiscard]] inline bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) { | ||
| 53 | return __sync_bool_compare_and_swap(pointer, expected, value); | ||
| 54 | } | ||
| 55 | |||
| 56 | [[nodiscard]] inline bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) { | ||
| 57 | return __sync_bool_compare_and_swap(pointer, expected, value); | ||
| 58 | } | ||
| 59 | |||
| 60 | [[nodiscard]] inline bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) { | ||
| 61 | return __sync_bool_compare_and_swap(pointer, expected, value); | ||
| 62 | } | ||
| 63 | |||
| 64 | [[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) { | ||
| 65 | return __sync_bool_compare_and_swap(pointer, expected, value); | ||
| 66 | } | ||
| 67 | |||
| 68 | [[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) { | ||
| 69 | unsigned __int128 value_a; | ||
| 70 | unsigned __int128 expected_a; | ||
| 71 | std::memcpy(&value_a, value.data(), sizeof(u128)); | ||
| 72 | std::memcpy(&expected_a, expected.data(), sizeof(u128)); | ||
| 73 | return __sync_bool_compare_and_swap((unsigned __int128*)pointer, expected_a, value_a); | ||
| 74 | } | ||
| 75 | |||
| 76 | #endif | ||
| 16 | 77 | ||
| 17 | } // namespace Common | 78 | } // namespace Common |
diff --git a/src/common/bit_util.h b/src/common/bit_util.h index 29f59a9a3..685e7fc9b 100644 --- a/src/common/bit_util.h +++ b/src/common/bit_util.h | |||
| @@ -22,82 +22,6 @@ template <typename T> | |||
| 22 | } | 22 | } |
| 23 | 23 | ||
| 24 | #ifdef _MSC_VER | 24 | #ifdef _MSC_VER |
| 25 | [[nodiscard]] inline u32 CountLeadingZeroes32(u32 value) { | ||
| 26 | unsigned long leading_zero = 0; | ||
| 27 | |||
| 28 | if (_BitScanReverse(&leading_zero, value) != 0) { | ||
| 29 | return 31 - leading_zero; | ||
| 30 | } | ||
| 31 | |||
| 32 | return 32; | ||
| 33 | } | ||
| 34 | |||
| 35 | [[nodiscard]] inline u32 CountLeadingZeroes64(u64 value) { | ||
| 36 | unsigned long leading_zero = 0; | ||
| 37 | |||
| 38 | if (_BitScanReverse64(&leading_zero, value) != 0) { | ||
| 39 | return 63 - leading_zero; | ||
| 40 | } | ||
| 41 | |||
| 42 | return 64; | ||
| 43 | } | ||
| 44 | #else | ||
| 45 | [[nodiscard]] inline u32 CountLeadingZeroes32(u32 value) { | ||
| 46 | if (value == 0) { | ||
| 47 | return 32; | ||
| 48 | } | ||
| 49 | |||
| 50 | return static_cast<u32>(__builtin_clz(value)); | ||
| 51 | } | ||
| 52 | |||
| 53 | [[nodiscard]] inline u32 CountLeadingZeroes64(u64 value) { | ||
| 54 | if (value == 0) { | ||
| 55 | return 64; | ||
| 56 | } | ||
| 57 | |||
| 58 | return static_cast<u32>(__builtin_clzll(value)); | ||
| 59 | } | ||
| 60 | #endif | ||
| 61 | |||
| 62 | #ifdef _MSC_VER | ||
| 63 | [[nodiscard]] inline u32 CountTrailingZeroes32(u32 value) { | ||
| 64 | unsigned long trailing_zero = 0; | ||
| 65 | |||
| 66 | if (_BitScanForward(&trailing_zero, value) != 0) { | ||
| 67 | return trailing_zero; | ||
| 68 | } | ||
| 69 | |||
| 70 | return 32; | ||
| 71 | } | ||
| 72 | |||
| 73 | [[nodiscard]] inline u32 CountTrailingZeroes64(u64 value) { | ||
| 74 | unsigned long trailing_zero = 0; | ||
| 75 | |||
| 76 | if (_BitScanForward64(&trailing_zero, value) != 0) { | ||
| 77 | return trailing_zero; | ||
| 78 | } | ||
| 79 | |||
| 80 | return 64; | ||
| 81 | } | ||
| 82 | #else | ||
| 83 | [[nodiscard]] inline u32 CountTrailingZeroes32(u32 value) { | ||
| 84 | if (value == 0) { | ||
| 85 | return 32; | ||
| 86 | } | ||
| 87 | |||
| 88 | return static_cast<u32>(__builtin_ctz(value)); | ||
| 89 | } | ||
| 90 | |||
| 91 | [[nodiscard]] inline u32 CountTrailingZeroes64(u64 value) { | ||
| 92 | if (value == 0) { | ||
| 93 | return 64; | ||
| 94 | } | ||
| 95 | |||
| 96 | return static_cast<u32>(__builtin_ctzll(value)); | ||
| 97 | } | ||
| 98 | #endif | ||
| 99 | |||
| 100 | #ifdef _MSC_VER | ||
| 101 | 25 | ||
| 102 | [[nodiscard]] inline u32 MostSignificantBit32(const u32 value) { | 26 | [[nodiscard]] inline u32 MostSignificantBit32(const u32 value) { |
| 103 | unsigned long result; | 27 | unsigned long result; |
diff --git a/src/common/intrusive_red_black_tree.h b/src/common/intrusive_red_black_tree.h index fb55de94e..c0bbcd457 100644 --- a/src/common/intrusive_red_black_tree.h +++ b/src/common/intrusive_red_black_tree.h | |||
| @@ -16,17 +16,30 @@ class IntrusiveRedBlackTreeImpl; | |||
| 16 | } | 16 | } |
| 17 | 17 | ||
| 18 | struct IntrusiveRedBlackTreeNode { | 18 | struct IntrusiveRedBlackTreeNode { |
| 19 | public: | ||
| 20 | using EntryType = RBEntry<IntrusiveRedBlackTreeNode>; | ||
| 21 | |||
| 22 | constexpr IntrusiveRedBlackTreeNode() = default; | ||
| 23 | |||
| 24 | void SetEntry(const EntryType& new_entry) { | ||
| 25 | entry = new_entry; | ||
| 26 | } | ||
| 27 | |||
| 28 | [[nodiscard]] EntryType& GetEntry() { | ||
| 29 | return entry; | ||
| 30 | } | ||
| 31 | |||
| 32 | [[nodiscard]] const EntryType& GetEntry() const { | ||
| 33 | return entry; | ||
| 34 | } | ||
| 19 | 35 | ||
| 20 | private: | 36 | private: |
| 21 | RB_ENTRY(IntrusiveRedBlackTreeNode) entry{}; | 37 | EntryType entry{}; |
| 22 | 38 | ||
| 23 | friend class impl::IntrusiveRedBlackTreeImpl; | 39 | friend class impl::IntrusiveRedBlackTreeImpl; |
| 24 | 40 | ||
| 25 | template <class, class, class> | 41 | template <class, class, class> |
| 26 | friend class IntrusiveRedBlackTree; | 42 | friend class IntrusiveRedBlackTree; |
| 27 | |||
| 28 | public: | ||
| 29 | constexpr IntrusiveRedBlackTreeNode() = default; | ||
| 30 | }; | 43 | }; |
| 31 | 44 | ||
| 32 | template <class T, class Traits, class Comparator> | 45 | template <class T, class Traits, class Comparator> |
| @@ -35,17 +48,12 @@ class IntrusiveRedBlackTree; | |||
| 35 | namespace impl { | 48 | namespace impl { |
| 36 | 49 | ||
| 37 | class IntrusiveRedBlackTreeImpl { | 50 | class IntrusiveRedBlackTreeImpl { |
| 38 | |||
| 39 | private: | 51 | private: |
| 40 | template <class, class, class> | 52 | template <class, class, class> |
| 41 | friend class ::Common::IntrusiveRedBlackTree; | 53 | friend class ::Common::IntrusiveRedBlackTree; |
| 42 | 54 | ||
| 43 | private: | 55 | using RootType = RBHead<IntrusiveRedBlackTreeNode>; |
| 44 | RB_HEAD(IntrusiveRedBlackTreeRoot, IntrusiveRedBlackTreeNode); | 56 | RootType root; |
| 45 | using RootType = IntrusiveRedBlackTreeRoot; | ||
| 46 | |||
| 47 | private: | ||
| 48 | IntrusiveRedBlackTreeRoot root; | ||
| 49 | 57 | ||
| 50 | public: | 58 | public: |
| 51 | template <bool Const> | 59 | template <bool Const> |
| @@ -121,57 +129,45 @@ public: | |||
| 121 | } | 129 | } |
| 122 | }; | 130 | }; |
| 123 | 131 | ||
| 124 | protected: | ||
| 125 | // Generate static implementations for non-comparison operations for IntrusiveRedBlackTreeRoot. | ||
| 126 | RB_GENERATE_WITHOUT_COMPARE_STATIC(IntrusiveRedBlackTreeRoot, IntrusiveRedBlackTreeNode, entry); | ||
| 127 | |||
| 128 | private: | 132 | private: |
| 129 | // Define accessors using RB_* functions. | 133 | // Define accessors using RB_* functions. |
| 130 | constexpr void InitializeImpl() { | ||
| 131 | RB_INIT(&this->root); | ||
| 132 | } | ||
| 133 | |||
| 134 | bool EmptyImpl() const { | 134 | bool EmptyImpl() const { |
| 135 | return RB_EMPTY(&this->root); | 135 | return root.IsEmpty(); |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | IntrusiveRedBlackTreeNode* GetMinImpl() const { | 138 | IntrusiveRedBlackTreeNode* GetMinImpl() const { |
| 139 | return RB_MIN(IntrusiveRedBlackTreeRoot, | 139 | return RB_MIN(const_cast<RootType*>(&root)); |
| 140 | const_cast<IntrusiveRedBlackTreeRoot*>(&this->root)); | ||
| 141 | } | 140 | } |
| 142 | 141 | ||
| 143 | IntrusiveRedBlackTreeNode* GetMaxImpl() const { | 142 | IntrusiveRedBlackTreeNode* GetMaxImpl() const { |
| 144 | return RB_MAX(IntrusiveRedBlackTreeRoot, | 143 | return RB_MAX(const_cast<RootType*>(&root)); |
| 145 | const_cast<IntrusiveRedBlackTreeRoot*>(&this->root)); | ||
| 146 | } | 144 | } |
| 147 | 145 | ||
| 148 | IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) { | 146 | IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) { |
| 149 | return RB_REMOVE(IntrusiveRedBlackTreeRoot, &this->root, node); | 147 | return RB_REMOVE(&root, node); |
| 150 | } | 148 | } |
| 151 | 149 | ||
| 152 | public: | 150 | public: |
| 153 | static IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) { | 151 | static IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) { |
| 154 | return RB_NEXT(IntrusiveRedBlackTreeRoot, nullptr, node); | 152 | return RB_NEXT(node); |
| 155 | } | 153 | } |
| 156 | 154 | ||
| 157 | static IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) { | 155 | static IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) { |
| 158 | return RB_PREV(IntrusiveRedBlackTreeRoot, nullptr, node); | 156 | return RB_PREV(node); |
| 159 | } | 157 | } |
| 160 | 158 | ||
| 161 | static IntrusiveRedBlackTreeNode const* GetNext(const IntrusiveRedBlackTreeNode* node) { | 159 | static const IntrusiveRedBlackTreeNode* GetNext(const IntrusiveRedBlackTreeNode* node) { |
| 162 | return static_cast<const IntrusiveRedBlackTreeNode*>( | 160 | return static_cast<const IntrusiveRedBlackTreeNode*>( |
| 163 | GetNext(const_cast<IntrusiveRedBlackTreeNode*>(node))); | 161 | GetNext(const_cast<IntrusiveRedBlackTreeNode*>(node))); |
| 164 | } | 162 | } |
| 165 | 163 | ||
| 166 | static IntrusiveRedBlackTreeNode const* GetPrev(const IntrusiveRedBlackTreeNode* node) { | 164 | static const IntrusiveRedBlackTreeNode* GetPrev(const IntrusiveRedBlackTreeNode* node) { |
| 167 | return static_cast<const IntrusiveRedBlackTreeNode*>( | 165 | return static_cast<const IntrusiveRedBlackTreeNode*>( |
| 168 | GetPrev(const_cast<IntrusiveRedBlackTreeNode*>(node))); | 166 | GetPrev(const_cast<IntrusiveRedBlackTreeNode*>(node))); |
| 169 | } | 167 | } |
| 170 | 168 | ||
| 171 | public: | 169 | public: |
| 172 | constexpr IntrusiveRedBlackTreeImpl() : root() { | 170 | constexpr IntrusiveRedBlackTreeImpl() {} |
| 173 | this->InitializeImpl(); | ||
| 174 | } | ||
| 175 | 171 | ||
| 176 | // Iterator accessors. | 172 | // Iterator accessors. |
| 177 | iterator begin() { | 173 | iterator begin() { |
| @@ -269,8 +265,6 @@ private: | |||
| 269 | ImplType impl{}; | 265 | ImplType impl{}; |
| 270 | 266 | ||
| 271 | public: | 267 | public: |
| 272 | struct IntrusiveRedBlackTreeRootWithCompare : ImplType::IntrusiveRedBlackTreeRoot {}; | ||
| 273 | |||
| 274 | template <bool Const> | 268 | template <bool Const> |
| 275 | class Iterator; | 269 | class Iterator; |
| 276 | 270 | ||
| @@ -363,11 +357,6 @@ public: | |||
| 363 | }; | 357 | }; |
| 364 | 358 | ||
| 365 | private: | 359 | private: |
| 366 | // Generate static implementations for comparison operations for IntrusiveRedBlackTreeRoot. | ||
| 367 | RB_GENERATE_WITH_COMPARE_STATIC(IntrusiveRedBlackTreeRootWithCompare, IntrusiveRedBlackTreeNode, | ||
| 368 | entry, CompareImpl, LightCompareImpl); | ||
| 369 | |||
| 370 | private: | ||
| 371 | static int CompareImpl(const IntrusiveRedBlackTreeNode* lhs, | 360 | static int CompareImpl(const IntrusiveRedBlackTreeNode* lhs, |
| 372 | const IntrusiveRedBlackTreeNode* rhs) { | 361 | const IntrusiveRedBlackTreeNode* rhs) { |
| 373 | return Comparator::Compare(*Traits::GetParent(lhs), *Traits::GetParent(rhs)); | 362 | return Comparator::Compare(*Traits::GetParent(lhs), *Traits::GetParent(rhs)); |
| @@ -379,41 +368,27 @@ private: | |||
| 379 | 368 | ||
| 380 | // Define accessors using RB_* functions. | 369 | // Define accessors using RB_* functions. |
| 381 | IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) { | 370 | IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) { |
| 382 | return RB_INSERT(IntrusiveRedBlackTreeRootWithCompare, | 371 | return RB_INSERT(&impl.root, node, CompareImpl); |
| 383 | static_cast<IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root), | ||
| 384 | node); | ||
| 385 | } | 372 | } |
| 386 | 373 | ||
| 387 | IntrusiveRedBlackTreeNode* FindImpl(const IntrusiveRedBlackTreeNode* node) const { | 374 | IntrusiveRedBlackTreeNode* FindImpl(const IntrusiveRedBlackTreeNode* node) const { |
| 388 | return RB_FIND( | 375 | return RB_FIND(const_cast<ImplType::RootType*>(&impl.root), |
| 389 | IntrusiveRedBlackTreeRootWithCompare, | 376 | const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl); |
| 390 | const_cast<IntrusiveRedBlackTreeRootWithCompare*>( | ||
| 391 | static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)), | ||
| 392 | const_cast<IntrusiveRedBlackTreeNode*>(node)); | ||
| 393 | } | 377 | } |
| 394 | 378 | ||
| 395 | IntrusiveRedBlackTreeNode* NFindImpl(const IntrusiveRedBlackTreeNode* node) const { | 379 | IntrusiveRedBlackTreeNode* NFindImpl(const IntrusiveRedBlackTreeNode* node) const { |
| 396 | return RB_NFIND( | 380 | return RB_NFIND(const_cast<ImplType::RootType*>(&impl.root), |
| 397 | IntrusiveRedBlackTreeRootWithCompare, | 381 | const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl); |
| 398 | const_cast<IntrusiveRedBlackTreeRootWithCompare*>( | ||
| 399 | static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)), | ||
| 400 | const_cast<IntrusiveRedBlackTreeNode*>(node)); | ||
| 401 | } | 382 | } |
| 402 | 383 | ||
| 403 | IntrusiveRedBlackTreeNode* FindLightImpl(const_light_pointer lelm) const { | 384 | IntrusiveRedBlackTreeNode* FindLightImpl(const_light_pointer lelm) const { |
| 404 | return RB_FIND_LIGHT( | 385 | return RB_FIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root), |
| 405 | IntrusiveRedBlackTreeRootWithCompare, | 386 | static_cast<const void*>(lelm), LightCompareImpl); |
| 406 | const_cast<IntrusiveRedBlackTreeRootWithCompare*>( | ||
| 407 | static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)), | ||
| 408 | static_cast<const void*>(lelm)); | ||
| 409 | } | 387 | } |
| 410 | 388 | ||
| 411 | IntrusiveRedBlackTreeNode* NFindLightImpl(const_light_pointer lelm) const { | 389 | IntrusiveRedBlackTreeNode* NFindLightImpl(const_light_pointer lelm) const { |
| 412 | return RB_NFIND_LIGHT( | 390 | return RB_NFIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root), |
| 413 | IntrusiveRedBlackTreeRootWithCompare, | 391 | static_cast<const void*>(lelm), LightCompareImpl); |
| 414 | const_cast<IntrusiveRedBlackTreeRootWithCompare*>( | ||
| 415 | static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)), | ||
| 416 | static_cast<const void*>(lelm)); | ||
| 417 | } | 392 | } |
| 418 | 393 | ||
| 419 | public: | 394 | public: |
diff --git a/src/common/timer.cpp b/src/common/timer.cpp deleted file mode 100644 index d17dc2a50..000000000 --- a/src/common/timer.cpp +++ /dev/null | |||
| @@ -1,159 +0,0 @@ | |||
| 1 | // Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <ctime> | ||
| 6 | #include <fmt/format.h> | ||
| 7 | #include "common/common_types.h" | ||
| 8 | #include "common/string_util.h" | ||
| 9 | #include "common/timer.h" | ||
| 10 | |||
| 11 | namespace Common { | ||
| 12 | |||
| 13 | std::chrono::milliseconds Timer::GetTimeMs() { | ||
| 14 | return std::chrono::duration_cast<std::chrono::milliseconds>( | ||
| 15 | std::chrono::system_clock::now().time_since_epoch()); | ||
| 16 | } | ||
| 17 | |||
| 18 | // -------------------------------------------- | ||
| 19 | // Initiate, Start, Stop, and Update the time | ||
| 20 | // -------------------------------------------- | ||
| 21 | |||
| 22 | // Set initial values for the class | ||
| 23 | Timer::Timer() : m_LastTime(0), m_StartTime(0), m_Running(false) { | ||
| 24 | Update(); | ||
| 25 | } | ||
| 26 | |||
| 27 | // Write the starting time | ||
| 28 | void Timer::Start() { | ||
| 29 | m_StartTime = GetTimeMs(); | ||
| 30 | m_Running = true; | ||
| 31 | } | ||
| 32 | |||
| 33 | // Stop the timer | ||
| 34 | void Timer::Stop() { | ||
| 35 | // Write the final time | ||
| 36 | m_LastTime = GetTimeMs(); | ||
| 37 | m_Running = false; | ||
| 38 | } | ||
| 39 | |||
| 40 | // Update the last time variable | ||
| 41 | void Timer::Update() { | ||
| 42 | m_LastTime = GetTimeMs(); | ||
| 43 | // TODO(ector) - QPF | ||
| 44 | } | ||
| 45 | |||
| 46 | // ------------------------------------- | ||
| 47 | // Get time difference and elapsed time | ||
| 48 | // ------------------------------------- | ||
| 49 | |||
| 50 | // Get the number of milliseconds since the last Update() | ||
| 51 | std::chrono::milliseconds Timer::GetTimeDifference() { | ||
| 52 | return GetTimeMs() - m_LastTime; | ||
| 53 | } | ||
| 54 | |||
| 55 | // Add the time difference since the last Update() to the starting time. | ||
| 56 | // This is used to compensate for a paused game. | ||
| 57 | void Timer::AddTimeDifference() { | ||
| 58 | m_StartTime += GetTimeDifference(); | ||
| 59 | } | ||
| 60 | |||
| 61 | // Get the time elapsed since the Start() | ||
| 62 | std::chrono::milliseconds Timer::GetTimeElapsed() { | ||
| 63 | // If we have not started yet, return 1 (because then I don't | ||
| 64 | // have to change the FPS calculation in CoreRerecording.cpp . | ||
| 65 | if (m_StartTime.count() == 0) | ||
| 66 | return std::chrono::milliseconds(1); | ||
| 67 | |||
| 68 | // Return the final timer time if the timer is stopped | ||
| 69 | if (!m_Running) | ||
| 70 | return (m_LastTime - m_StartTime); | ||
| 71 | |||
| 72 | return (GetTimeMs() - m_StartTime); | ||
| 73 | } | ||
| 74 | |||
| 75 | // Get the formatted time elapsed since the Start() | ||
| 76 | std::string Timer::GetTimeElapsedFormatted() const { | ||
| 77 | // If we have not started yet, return zero | ||
| 78 | if (m_StartTime.count() == 0) | ||
| 79 | return "00:00:00:000"; | ||
| 80 | |||
| 81 | // The number of milliseconds since the start. | ||
| 82 | // Use a different value if the timer is stopped. | ||
| 83 | std::chrono::milliseconds Milliseconds; | ||
| 84 | if (m_Running) | ||
| 85 | Milliseconds = GetTimeMs() - m_StartTime; | ||
| 86 | else | ||
| 87 | Milliseconds = m_LastTime - m_StartTime; | ||
| 88 | // Seconds | ||
| 89 | std::chrono::seconds Seconds = std::chrono::duration_cast<std::chrono::seconds>(Milliseconds); | ||
| 90 | // Minutes | ||
| 91 | std::chrono::minutes Minutes = std::chrono::duration_cast<std::chrono::minutes>(Milliseconds); | ||
| 92 | // Hours | ||
| 93 | std::chrono::hours Hours = std::chrono::duration_cast<std::chrono::hours>(Milliseconds); | ||
| 94 | |||
| 95 | std::string TmpStr = fmt::format("{:02}:{:02}:{:02}:{:03}", Hours.count(), Minutes.count() % 60, | ||
| 96 | Seconds.count() % 60, Milliseconds.count() % 1000); | ||
| 97 | return TmpStr; | ||
| 98 | } | ||
| 99 | |||
| 100 | // Get the number of seconds since January 1 1970 | ||
| 101 | std::chrono::seconds Timer::GetTimeSinceJan1970() { | ||
| 102 | return std::chrono::duration_cast<std::chrono::seconds>(GetTimeMs()); | ||
| 103 | } | ||
| 104 | |||
| 105 | std::chrono::seconds Timer::GetLocalTimeSinceJan1970() { | ||
| 106 | time_t sysTime, tzDiff, tzDST; | ||
| 107 | struct tm* gmTime; | ||
| 108 | |||
| 109 | time(&sysTime); | ||
| 110 | |||
| 111 | // Account for DST where needed | ||
| 112 | gmTime = localtime(&sysTime); | ||
| 113 | if (gmTime->tm_isdst == 1) | ||
| 114 | tzDST = 3600; | ||
| 115 | else | ||
| 116 | tzDST = 0; | ||
| 117 | |||
| 118 | // Lazy way to get local time in sec | ||
| 119 | gmTime = gmtime(&sysTime); | ||
| 120 | tzDiff = sysTime - mktime(gmTime); | ||
| 121 | |||
| 122 | return std::chrono::seconds(sysTime + tzDiff + tzDST); | ||
| 123 | } | ||
| 124 | |||
| 125 | // Return the current time formatted as Minutes:Seconds:Milliseconds | ||
| 126 | // in the form 00:00:000. | ||
| 127 | std::string Timer::GetTimeFormatted() { | ||
| 128 | time_t sysTime; | ||
| 129 | struct tm* gmTime; | ||
| 130 | char tmp[13]; | ||
| 131 | |||
| 132 | time(&sysTime); | ||
| 133 | gmTime = localtime(&sysTime); | ||
| 134 | |||
| 135 | strftime(tmp, 6, "%M:%S", gmTime); | ||
| 136 | |||
| 137 | u64 milliseconds = static_cast<u64>(GetTimeMs().count()) % 1000; | ||
| 138 | return fmt::format("{}:{:03}", tmp, milliseconds); | ||
| 139 | } | ||
| 140 | |||
| 141 | // Returns a timestamp with decimals for precise time comparisons | ||
| 142 | // ---------------- | ||
| 143 | double Timer::GetDoubleTime() { | ||
| 144 | // Get continuous timestamp | ||
| 145 | auto tmp_seconds = static_cast<u64>(GetTimeSinceJan1970().count()); | ||
| 146 | const auto ms = static_cast<double>(static_cast<u64>(GetTimeMs().count()) % 1000); | ||
| 147 | |||
| 148 | // Remove a few years. We only really want enough seconds to make | ||
| 149 | // sure that we are detecting actual actions, perhaps 60 seconds is | ||
| 150 | // enough really, but I leave a year of seconds anyway, in case the | ||
| 151 | // user's clock is incorrect or something like that. | ||
| 152 | tmp_seconds = tmp_seconds - (38 * 365 * 24 * 60 * 60); | ||
| 153 | |||
| 154 | // Make a smaller integer that fits in the double | ||
| 155 | const auto seconds = static_cast<u32>(tmp_seconds); | ||
| 156 | return seconds + ms; | ||
| 157 | } | ||
| 158 | |||
| 159 | } // Namespace Common | ||
diff --git a/src/common/timer.h b/src/common/timer.h deleted file mode 100644 index 8894a143d..000000000 --- a/src/common/timer.h +++ /dev/null | |||
| @@ -1,41 +0,0 @@ | |||
| 1 | // Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <chrono> | ||
| 8 | #include <string> | ||
| 9 | #include "common/common_types.h" | ||
| 10 | |||
| 11 | namespace Common { | ||
| 12 | class Timer { | ||
| 13 | public: | ||
| 14 | Timer(); | ||
| 15 | |||
| 16 | void Start(); | ||
| 17 | void Stop(); | ||
| 18 | void Update(); | ||
| 19 | |||
| 20 | // The time difference is always returned in milliseconds, regardless of alternative internal | ||
| 21 | // representation | ||
| 22 | [[nodiscard]] std::chrono::milliseconds GetTimeDifference(); | ||
| 23 | void AddTimeDifference(); | ||
| 24 | |||
| 25 | [[nodiscard]] static std::chrono::seconds GetTimeSinceJan1970(); | ||
| 26 | [[nodiscard]] static std::chrono::seconds GetLocalTimeSinceJan1970(); | ||
| 27 | [[nodiscard]] static double GetDoubleTime(); | ||
| 28 | |||
| 29 | [[nodiscard]] static std::string GetTimeFormatted(); | ||
| 30 | [[nodiscard]] std::string GetTimeElapsedFormatted() const; | ||
| 31 | [[nodiscard]] std::chrono::milliseconds GetTimeElapsed(); | ||
| 32 | |||
| 33 | [[nodiscard]] static std::chrono::milliseconds GetTimeMs(); | ||
| 34 | |||
| 35 | private: | ||
| 36 | std::chrono::milliseconds m_LastTime; | ||
| 37 | std::chrono::milliseconds m_StartTime; | ||
| 38 | bool m_Running; | ||
| 39 | }; | ||
| 40 | |||
| 41 | } // Namespace Common | ||
diff --git a/src/common/tree.h b/src/common/tree.h index a6b636646..3da49e422 100644 --- a/src/common/tree.h +++ b/src/common/tree.h | |||
| @@ -27,33 +27,10 @@ | |||
| 27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 28 | */ | 28 | */ |
| 29 | 29 | ||
| 30 | #ifndef _SYS_TREE_H_ | 30 | #pragma once |
| 31 | #define _SYS_TREE_H_ | ||
| 32 | |||
| 33 | /* FreeBSD <sys/cdefs.h> has a lot of defines we don't really want. */ | ||
| 34 | /* tree.h only actually uses __inline and __unused, so we'll just define those. */ | ||
| 35 | |||
| 36 | /* #include <sys/cdefs.h> */ | ||
| 37 | |||
| 38 | #ifndef __inline | ||
| 39 | #define __inline inline | ||
| 40 | #endif | ||
| 41 | 31 | ||
| 42 | /* | 32 | /* |
| 43 | * This file defines data structures for different types of trees: | 33 | * This file defines data structures for red-black trees. |
| 44 | * splay trees and red-black trees. | ||
| 45 | * | ||
| 46 | * A splay tree is a self-organizing data structure. Every operation | ||
| 47 | * on the tree causes a splay to happen. The splay moves the requested | ||
| 48 | * node to the root of the tree and partly rebalances it. | ||
| 49 | * | ||
| 50 | * This has the benefit that request locality causes faster lookups as | ||
| 51 | * the requested nodes move to the top of the tree. On the other hand, | ||
| 52 | * every lookup causes memory writes. | ||
| 53 | * | ||
| 54 | * The Balance Theorem bounds the total access time for m operations | ||
| 55 | * and n inserts on an initially empty tree as O((m + n)lg n). The | ||
| 56 | * amortized cost for a sequence of m accesses to a splay tree is O(lg n); | ||
| 57 | * | 34 | * |
| 58 | * A red-black tree is a binary search tree with the node color as an | 35 | * A red-black tree is a binary search tree with the node color as an |
| 59 | * extra attribute. It fulfills a set of conditions: | 36 | * extra attribute. It fulfills a set of conditions: |
| @@ -66,757 +43,632 @@ | |||
| 66 | * The maximum height of a red-black tree is 2lg (n+1). | 43 | * The maximum height of a red-black tree is 2lg (n+1). |
| 67 | */ | 44 | */ |
| 68 | 45 | ||
| 69 | #define SPLAY_HEAD(name, type) \ | 46 | namespace Common { |
| 70 | struct name { \ | 47 | template <typename T> |
| 71 | struct type* sph_root; /* root of the tree */ \ | 48 | class RBHead { |
| 72 | } | 49 | public: |
| 73 | 50 | [[nodiscard]] T* Root() { | |
| 74 | #define SPLAY_INITIALIZER(root) \ | 51 | return rbh_root; |
| 75 | { NULL } | 52 | } |
| 76 | 53 | ||
| 77 | #define SPLAY_INIT(root) \ | 54 | [[nodiscard]] const T* Root() const { |
| 78 | do { \ | 55 | return rbh_root; |
| 79 | (root)->sph_root = NULL; \ | 56 | } |
| 80 | } while (/*CONSTCOND*/ 0) | 57 | |
| 81 | 58 | void SetRoot(T* root) { | |
| 82 | #define SPLAY_ENTRY(type) \ | 59 | rbh_root = root; |
| 83 | struct { \ | 60 | } |
| 84 | struct type* spe_left; /* left element */ \ | 61 | |
| 85 | struct type* spe_right; /* right element */ \ | 62 | [[nodiscard]] bool IsEmpty() const { |
| 86 | } | 63 | return Root() == nullptr; |
| 87 | 64 | } | |
| 88 | #define SPLAY_LEFT(elm, field) (elm)->field.spe_left | 65 | |
| 89 | #define SPLAY_RIGHT(elm, field) (elm)->field.spe_right | 66 | private: |
| 90 | #define SPLAY_ROOT(head) (head)->sph_root | 67 | T* rbh_root = nullptr; |
| 91 | #define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL) | 68 | }; |
| 92 | 69 | ||
| 93 | /* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */ | 70 | enum class EntryColor { |
| 94 | #define SPLAY_ROTATE_RIGHT(head, tmp, field) \ | 71 | Black, |
| 95 | do { \ | 72 | Red, |
| 96 | SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ | 73 | }; |
| 97 | SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ | 74 | |
| 98 | (head)->sph_root = tmp; \ | 75 | template <typename T> |
| 99 | } while (/*CONSTCOND*/ 0) | 76 | class RBEntry { |
| 100 | 77 | public: | |
| 101 | #define SPLAY_ROTATE_LEFT(head, tmp, field) \ | 78 | [[nodiscard]] T* Left() { |
| 102 | do { \ | 79 | return rbe_left; |
| 103 | SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ | 80 | } |
| 104 | SPLAY_LEFT(tmp, field) = (head)->sph_root; \ | 81 | |
| 105 | (head)->sph_root = tmp; \ | 82 | [[nodiscard]] const T* Left() const { |
| 106 | } while (/*CONSTCOND*/ 0) | 83 | return rbe_left; |
| 107 | 84 | } | |
| 108 | #define SPLAY_LINKLEFT(head, tmp, field) \ | 85 | |
| 109 | do { \ | 86 | void SetLeft(T* left) { |
| 110 | SPLAY_LEFT(tmp, field) = (head)->sph_root; \ | 87 | rbe_left = left; |
| 111 | tmp = (head)->sph_root; \ | 88 | } |
| 112 | (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ | 89 | |
| 113 | } while (/*CONSTCOND*/ 0) | 90 | [[nodiscard]] T* Right() { |
| 114 | 91 | return rbe_right; | |
| 115 | #define SPLAY_LINKRIGHT(head, tmp, field) \ | 92 | } |
| 116 | do { \ | 93 | |
| 117 | SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ | 94 | [[nodiscard]] const T* Right() const { |
| 118 | tmp = (head)->sph_root; \ | 95 | return rbe_right; |
| 119 | (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ | 96 | } |
| 120 | } while (/*CONSTCOND*/ 0) | 97 | |
| 121 | 98 | void SetRight(T* right) { | |
| 122 | #define SPLAY_ASSEMBLE(head, node, left, right, field) \ | 99 | rbe_right = right; |
| 123 | do { \ | 100 | } |
| 124 | SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \ | 101 | |
| 125 | SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field); \ | 102 | [[nodiscard]] T* Parent() { |
| 126 | SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \ | 103 | return rbe_parent; |
| 127 | SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \ | 104 | } |
| 128 | } while (/*CONSTCOND*/ 0) | 105 | |
| 129 | 106 | [[nodiscard]] const T* Parent() const { | |
| 130 | /* Generates prototypes and inline functions */ | 107 | return rbe_parent; |
| 131 | 108 | } | |
| 132 | #define SPLAY_PROTOTYPE(name, type, field, cmp) \ | 109 | |
| 133 | void name##_SPLAY(struct name*, struct type*); \ | 110 | void SetParent(T* parent) { |
| 134 | void name##_SPLAY_MINMAX(struct name*, int); \ | 111 | rbe_parent = parent; |
| 135 | struct type* name##_SPLAY_INSERT(struct name*, struct type*); \ | 112 | } |
| 136 | struct type* name##_SPLAY_REMOVE(struct name*, struct type*); \ | 113 | |
| 137 | \ | 114 | [[nodiscard]] bool IsBlack() const { |
| 138 | /* Finds the node with the same key as elm */ \ | 115 | return rbe_color == EntryColor::Black; |
| 139 | static __inline struct type* name##_SPLAY_FIND(struct name* head, struct type* elm) { \ | 116 | } |
| 140 | if (SPLAY_EMPTY(head)) \ | 117 | |
| 141 | return (NULL); \ | 118 | [[nodiscard]] bool IsRed() const { |
| 142 | name##_SPLAY(head, elm); \ | 119 | return rbe_color == EntryColor::Red; |
| 143 | if ((cmp)(elm, (head)->sph_root) == 0) \ | 120 | } |
| 144 | return (head->sph_root); \ | 121 | |
| 145 | return (NULL); \ | 122 | [[nodiscard]] EntryColor Color() const { |
| 146 | } \ | 123 | return rbe_color; |
| 147 | \ | 124 | } |
| 148 | static __inline struct type* name##_SPLAY_NEXT(struct name* head, struct type* elm) { \ | 125 | |
| 149 | name##_SPLAY(head, elm); \ | 126 | void SetColor(EntryColor color) { |
| 150 | if (SPLAY_RIGHT(elm, field) != NULL) { \ | 127 | rbe_color = color; |
| 151 | elm = SPLAY_RIGHT(elm, field); \ | 128 | } |
| 152 | while (SPLAY_LEFT(elm, field) != NULL) { \ | 129 | |
| 153 | elm = SPLAY_LEFT(elm, field); \ | 130 | private: |
| 154 | } \ | 131 | T* rbe_left = nullptr; |
| 155 | } else \ | 132 | T* rbe_right = nullptr; |
| 156 | elm = NULL; \ | 133 | T* rbe_parent = nullptr; |
| 157 | return (elm); \ | 134 | EntryColor rbe_color{}; |
| 158 | } \ | 135 | }; |
| 159 | \ | 136 | |
| 160 | static __inline struct type* name##_SPLAY_MIN_MAX(struct name* head, int val) { \ | 137 | template <typename Node> |
| 161 | name##_SPLAY_MINMAX(head, val); \ | 138 | [[nodiscard]] RBEntry<Node>& RB_ENTRY(Node* node) { |
| 162 | return (SPLAY_ROOT(head)); \ | 139 | return node->GetEntry(); |
| 163 | } | 140 | } |
| 164 | 141 | ||
| 165 | /* Main splay operation. | 142 | template <typename Node> |
| 166 | * Moves node close to the key of elm to top | 143 | [[nodiscard]] const RBEntry<Node>& RB_ENTRY(const Node* node) { |
| 167 | */ | 144 | return node->GetEntry(); |
| 168 | #define SPLAY_GENERATE(name, type, field, cmp) \ | 145 | } |
| 169 | struct type* name##_SPLAY_INSERT(struct name* head, struct type* elm) { \ | 146 | |
| 170 | if (SPLAY_EMPTY(head)) { \ | 147 | template <typename Node> |
| 171 | SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \ | 148 | [[nodiscard]] Node* RB_PARENT(Node* node) { |
| 172 | } else { \ | 149 | return RB_ENTRY(node).Parent(); |
| 173 | int __comp; \ | 150 | } |
| 174 | name##_SPLAY(head, elm); \ | 151 | |
| 175 | __comp = (cmp)(elm, (head)->sph_root); \ | 152 | template <typename Node> |
| 176 | if (__comp < 0) { \ | 153 | [[nodiscard]] const Node* RB_PARENT(const Node* node) { |
| 177 | SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field); \ | 154 | return RB_ENTRY(node).Parent(); |
| 178 | SPLAY_RIGHT(elm, field) = (head)->sph_root; \ | 155 | } |
| 179 | SPLAY_LEFT((head)->sph_root, field) = NULL; \ | 156 | |
| 180 | } else if (__comp > 0) { \ | 157 | template <typename Node> |
| 181 | SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field); \ | 158 | void RB_SET_PARENT(Node* node, Node* parent) { |
| 182 | SPLAY_LEFT(elm, field) = (head)->sph_root; \ | 159 | return RB_ENTRY(node).SetParent(parent); |
| 183 | SPLAY_RIGHT((head)->sph_root, field) = NULL; \ | 160 | } |
| 184 | } else \ | 161 | |
| 185 | return ((head)->sph_root); \ | 162 | template <typename Node> |
| 186 | } \ | 163 | [[nodiscard]] Node* RB_LEFT(Node* node) { |
| 187 | (head)->sph_root = (elm); \ | 164 | return RB_ENTRY(node).Left(); |
| 188 | return (NULL); \ | 165 | } |
| 189 | } \ | 166 | |
| 190 | \ | 167 | template <typename Node> |
| 191 | struct type* name##_SPLAY_REMOVE(struct name* head, struct type* elm) { \ | 168 | [[nodiscard]] const Node* RB_LEFT(const Node* node) { |
| 192 | struct type* __tmp; \ | 169 | return RB_ENTRY(node).Left(); |
| 193 | if (SPLAY_EMPTY(head)) \ | 170 | } |
| 194 | return (NULL); \ | 171 | |
| 195 | name##_SPLAY(head, elm); \ | 172 | template <typename Node> |
| 196 | if ((cmp)(elm, (head)->sph_root) == 0) { \ | 173 | void RB_SET_LEFT(Node* node, Node* left) { |
| 197 | if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \ | 174 | return RB_ENTRY(node).SetLeft(left); |
| 198 | (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ | 175 | } |
| 199 | } else { \ | 176 | |
| 200 | __tmp = SPLAY_RIGHT((head)->sph_root, field); \ | 177 | template <typename Node> |
| 201 | (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ | 178 | [[nodiscard]] Node* RB_RIGHT(Node* node) { |
| 202 | name##_SPLAY(head, elm); \ | 179 | return RB_ENTRY(node).Right(); |
| 203 | SPLAY_RIGHT((head)->sph_root, field) = __tmp; \ | 180 | } |
| 204 | } \ | 181 | |
| 205 | return (elm); \ | 182 | template <typename Node> |
| 206 | } \ | 183 | [[nodiscard]] const Node* RB_RIGHT(const Node* node) { |
| 207 | return (NULL); \ | 184 | return RB_ENTRY(node).Right(); |
| 208 | } \ | 185 | } |
| 209 | \ | 186 | |
| 210 | void name##_SPLAY(struct name* head, struct type* elm) { \ | 187 | template <typename Node> |
| 211 | struct type __node, *__left, *__right, *__tmp; \ | 188 | void RB_SET_RIGHT(Node* node, Node* right) { |
| 212 | int __comp; \ | 189 | return RB_ENTRY(node).SetRight(right); |
| 213 | \ | 190 | } |
| 214 | SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \ | 191 | |
| 215 | __left = __right = &__node; \ | 192 | template <typename Node> |
| 216 | \ | 193 | [[nodiscard]] bool RB_IS_BLACK(const Node* node) { |
| 217 | while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \ | 194 | return RB_ENTRY(node).IsBlack(); |
| 218 | if (__comp < 0) { \ | 195 | } |
| 219 | __tmp = SPLAY_LEFT((head)->sph_root, field); \ | 196 | |
| 220 | if (__tmp == NULL) \ | 197 | template <typename Node> |
| 221 | break; \ | 198 | [[nodiscard]] bool RB_IS_RED(const Node* node) { |
| 222 | if ((cmp)(elm, __tmp) < 0) { \ | 199 | return RB_ENTRY(node).IsRed(); |
| 223 | SPLAY_ROTATE_RIGHT(head, __tmp, field); \ | 200 | } |
| 224 | if (SPLAY_LEFT((head)->sph_root, field) == NULL) \ | 201 | |
| 225 | break; \ | 202 | template <typename Node> |
| 226 | } \ | 203 | [[nodiscard]] EntryColor RB_COLOR(const Node* node) { |
| 227 | SPLAY_LINKLEFT(head, __right, field); \ | 204 | return RB_ENTRY(node).Color(); |
| 228 | } else if (__comp > 0) { \ | 205 | } |
| 229 | __tmp = SPLAY_RIGHT((head)->sph_root, field); \ | 206 | |
| 230 | if (__tmp == NULL) \ | 207 | template <typename Node> |
| 231 | break; \ | 208 | void RB_SET_COLOR(Node* node, EntryColor color) { |
| 232 | if ((cmp)(elm, __tmp) > 0) { \ | 209 | return RB_ENTRY(node).SetColor(color); |
| 233 | SPLAY_ROTATE_LEFT(head, __tmp, field); \ | 210 | } |
| 234 | if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \ | 211 | |
| 235 | break; \ | 212 | template <typename Node> |
| 236 | } \ | 213 | void RB_SET(Node* node, Node* parent) { |
| 237 | SPLAY_LINKRIGHT(head, __left, field); \ | 214 | auto& entry = RB_ENTRY(node); |
| 238 | } \ | 215 | entry.SetParent(parent); |
| 239 | } \ | 216 | entry.SetLeft(nullptr); |
| 240 | SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ | 217 | entry.SetRight(nullptr); |
| 241 | } \ | 218 | entry.SetColor(EntryColor::Red); |
| 242 | \ | 219 | } |
| 243 | /* Splay with either the minimum or the maximum element \ | 220 | |
| 244 | * Used to find minimum or maximum element in tree. \ | 221 | template <typename Node> |
| 245 | */ \ | 222 | void RB_SET_BLACKRED(Node* black, Node* red) { |
| 246 | void name##_SPLAY_MINMAX(struct name* head, int __comp) { \ | 223 | RB_SET_COLOR(black, EntryColor::Black); |
| 247 | struct type __node, *__left, *__right, *__tmp; \ | 224 | RB_SET_COLOR(red, EntryColor::Red); |
| 248 | \ | 225 | } |
| 249 | SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \ | 226 | |
| 250 | __left = __right = &__node; \ | 227 | template <typename Node> |
| 251 | \ | 228 | void RB_ROTATE_LEFT(RBHead<Node>* head, Node* elm, Node*& tmp) { |
| 252 | while (1) { \ | 229 | tmp = RB_RIGHT(elm); |
| 253 | if (__comp < 0) { \ | 230 | RB_SET_RIGHT(elm, RB_LEFT(tmp)); |
| 254 | __tmp = SPLAY_LEFT((head)->sph_root, field); \ | 231 | if (RB_RIGHT(elm) != nullptr) { |
| 255 | if (__tmp == NULL) \ | 232 | RB_SET_PARENT(RB_LEFT(tmp), elm); |
| 256 | break; \ | 233 | } |
| 257 | if (__comp < 0) { \ | 234 | |
| 258 | SPLAY_ROTATE_RIGHT(head, __tmp, field); \ | 235 | RB_SET_PARENT(tmp, RB_PARENT(elm)); |
| 259 | if (SPLAY_LEFT((head)->sph_root, field) == NULL) \ | 236 | if (RB_PARENT(tmp) != nullptr) { |
| 260 | break; \ | 237 | if (elm == RB_LEFT(RB_PARENT(elm))) { |
| 261 | } \ | 238 | RB_SET_LEFT(RB_PARENT(elm), tmp); |
| 262 | SPLAY_LINKLEFT(head, __right, field); \ | 239 | } else { |
| 263 | } else if (__comp > 0) { \ | 240 | RB_SET_RIGHT(RB_PARENT(elm), tmp); |
| 264 | __tmp = SPLAY_RIGHT((head)->sph_root, field); \ | 241 | } |
| 265 | if (__tmp == NULL) \ | 242 | } else { |
| 266 | break; \ | 243 | head->SetRoot(tmp); |
| 267 | if (__comp > 0) { \ | 244 | } |
| 268 | SPLAY_ROTATE_LEFT(head, __tmp, field); \ | 245 | |
| 269 | if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \ | 246 | RB_SET_LEFT(tmp, elm); |
| 270 | break; \ | 247 | RB_SET_PARENT(elm, tmp); |
| 271 | } \ | 248 | } |
| 272 | SPLAY_LINKRIGHT(head, __left, field); \ | 249 | |
| 273 | } \ | 250 | template <typename Node> |
| 274 | } \ | 251 | void RB_ROTATE_RIGHT(RBHead<Node>* head, Node* elm, Node*& tmp) { |
| 275 | SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ | 252 | tmp = RB_LEFT(elm); |
| 276 | } | 253 | RB_SET_LEFT(elm, RB_RIGHT(tmp)); |
| 277 | 254 | if (RB_LEFT(elm) != nullptr) { | |
| 278 | #define SPLAY_NEGINF -1 | 255 | RB_SET_PARENT(RB_RIGHT(tmp), elm); |
| 279 | #define SPLAY_INF 1 | 256 | } |
| 280 | 257 | ||
| 281 | #define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) | 258 | RB_SET_PARENT(tmp, RB_PARENT(elm)); |
| 282 | #define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) | 259 | if (RB_PARENT(tmp) != nullptr) { |
| 283 | #define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) | 260 | if (elm == RB_LEFT(RB_PARENT(elm))) { |
| 284 | #define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) | 261 | RB_SET_LEFT(RB_PARENT(elm), tmp); |
| 285 | #define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) | 262 | } else { |
| 286 | #define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL : name##_SPLAY_MIN_MAX(x, SPLAY_INF)) | 263 | RB_SET_RIGHT(RB_PARENT(elm), tmp); |
| 287 | 264 | } | |
| 288 | #define SPLAY_FOREACH(x, name, head) \ | 265 | } else { |
| 289 | for ((x) = SPLAY_MIN(name, head); (x) != NULL; (x) = SPLAY_NEXT(name, head, x)) | 266 | head->SetRoot(tmp); |
| 290 | 267 | } | |
| 291 | /* Macros that define a red-black tree */ | 268 | |
| 292 | #define RB_HEAD(name, type) \ | 269 | RB_SET_RIGHT(tmp, elm); |
| 293 | struct name { \ | 270 | RB_SET_PARENT(elm, tmp); |
| 294 | struct type* rbh_root; /* root of the tree */ \ | 271 | } |
| 295 | } | 272 | |
| 296 | 273 | template <typename Node> | |
| 297 | #define RB_INITIALIZER(root) \ | 274 | void RB_INSERT_COLOR(RBHead<Node>* head, Node* elm) { |
| 298 | { NULL } | 275 | Node* parent = nullptr; |
| 299 | 276 | Node* tmp = nullptr; | |
| 300 | #define RB_INIT(root) \ | 277 | |
| 301 | do { \ | 278 | while ((parent = RB_PARENT(elm)) != nullptr && RB_IS_RED(parent)) { |
| 302 | (root)->rbh_root = NULL; \ | 279 | Node* gparent = RB_PARENT(parent); |
| 303 | } while (/*CONSTCOND*/ 0) | 280 | if (parent == RB_LEFT(gparent)) { |
| 304 | 281 | tmp = RB_RIGHT(gparent); | |
| 305 | #define RB_BLACK 0 | 282 | if (tmp && RB_IS_RED(tmp)) { |
| 306 | #define RB_RED 1 | 283 | RB_SET_COLOR(tmp, EntryColor::Black); |
| 307 | #define RB_ENTRY(type) \ | 284 | RB_SET_BLACKRED(parent, gparent); |
| 308 | struct { \ | 285 | elm = gparent; |
| 309 | struct type* rbe_left; /* left element */ \ | 286 | continue; |
| 310 | struct type* rbe_right; /* right element */ \ | 287 | } |
| 311 | struct type* rbe_parent; /* parent element */ \ | 288 | |
| 312 | int rbe_color; /* node color */ \ | 289 | if (RB_RIGHT(parent) == elm) { |
| 313 | } | 290 | RB_ROTATE_LEFT(head, parent, tmp); |
| 314 | 291 | tmp = parent; | |
| 315 | #define RB_LEFT(elm, field) (elm)->field.rbe_left | 292 | parent = elm; |
| 316 | #define RB_RIGHT(elm, field) (elm)->field.rbe_right | 293 | elm = tmp; |
| 317 | #define RB_PARENT(elm, field) (elm)->field.rbe_parent | 294 | } |
| 318 | #define RB_COLOR(elm, field) (elm)->field.rbe_color | 295 | |
| 319 | #define RB_ROOT(head) (head)->rbh_root | 296 | RB_SET_BLACKRED(parent, gparent); |
| 320 | #define RB_EMPTY(head) (RB_ROOT(head) == NULL) | 297 | RB_ROTATE_RIGHT(head, gparent, tmp); |
| 321 | 298 | } else { | |
| 322 | #define RB_SET(elm, parent, field) \ | 299 | tmp = RB_LEFT(gparent); |
| 323 | do { \ | 300 | if (tmp && RB_IS_RED(tmp)) { |
| 324 | RB_PARENT(elm, field) = parent; \ | 301 | RB_SET_COLOR(tmp, EntryColor::Black); |
| 325 | RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \ | 302 | RB_SET_BLACKRED(parent, gparent); |
| 326 | RB_COLOR(elm, field) = RB_RED; \ | 303 | elm = gparent; |
| 327 | } while (/*CONSTCOND*/ 0) | 304 | continue; |
| 328 | 305 | } | |
| 329 | #define RB_SET_BLACKRED(black, red, field) \ | 306 | |
| 330 | do { \ | 307 | if (RB_LEFT(parent) == elm) { |
| 331 | RB_COLOR(black, field) = RB_BLACK; \ | 308 | RB_ROTATE_RIGHT(head, parent, tmp); |
| 332 | RB_COLOR(red, field) = RB_RED; \ | 309 | tmp = parent; |
| 333 | } while (/*CONSTCOND*/ 0) | 310 | parent = elm; |
| 334 | 311 | elm = tmp; | |
| 335 | #ifndef RB_AUGMENT | 312 | } |
| 336 | #define RB_AUGMENT(x) \ | 313 | |
| 337 | do { \ | 314 | RB_SET_BLACKRED(parent, gparent); |
| 338 | } while (0) | 315 | RB_ROTATE_LEFT(head, gparent, tmp); |
| 339 | #endif | 316 | } |
| 340 | 317 | } | |
| 341 | #define RB_ROTATE_LEFT(head, elm, tmp, field) \ | 318 | |
| 342 | do { \ | 319 | RB_SET_COLOR(head->Root(), EntryColor::Black); |
| 343 | (tmp) = RB_RIGHT(elm, field); \ | 320 | } |
| 344 | if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \ | 321 | |
| 345 | RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \ | 322 | template <typename Node> |
| 346 | } \ | 323 | void RB_REMOVE_COLOR(RBHead<Node>* head, Node* parent, Node* elm) { |
| 347 | RB_AUGMENT(elm); \ | 324 | Node* tmp; |
| 348 | if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \ | 325 | while ((elm == nullptr || RB_IS_BLACK(elm)) && elm != head->Root()) { |
| 349 | if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ | 326 | if (RB_LEFT(parent) == elm) { |
| 350 | RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ | 327 | tmp = RB_RIGHT(parent); |
| 351 | else \ | 328 | if (RB_IS_RED(tmp)) { |
| 352 | RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ | 329 | RB_SET_BLACKRED(tmp, parent); |
| 353 | } else \ | 330 | RB_ROTATE_LEFT(head, parent, tmp); |
| 354 | (head)->rbh_root = (tmp); \ | 331 | tmp = RB_RIGHT(parent); |
| 355 | RB_LEFT(tmp, field) = (elm); \ | 332 | } |
| 356 | RB_PARENT(elm, field) = (tmp); \ | 333 | |
| 357 | RB_AUGMENT(tmp); \ | 334 | if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) && |
| 358 | if ((RB_PARENT(tmp, field))) \ | 335 | (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) { |
| 359 | RB_AUGMENT(RB_PARENT(tmp, field)); \ | 336 | RB_SET_COLOR(tmp, EntryColor::Red); |
| 360 | } while (/*CONSTCOND*/ 0) | 337 | elm = parent; |
| 361 | 338 | parent = RB_PARENT(elm); | |
| 362 | #define RB_ROTATE_RIGHT(head, elm, tmp, field) \ | 339 | } else { |
| 363 | do { \ | 340 | if (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp))) { |
| 364 | (tmp) = RB_LEFT(elm, field); \ | 341 | Node* oleft; |
| 365 | if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \ | 342 | if ((oleft = RB_LEFT(tmp)) != nullptr) { |
| 366 | RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \ | 343 | RB_SET_COLOR(oleft, EntryColor::Black); |
| 367 | } \ | 344 | } |
| 368 | RB_AUGMENT(elm); \ | 345 | |
| 369 | if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \ | 346 | RB_SET_COLOR(tmp, EntryColor::Red); |
| 370 | if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ | 347 | RB_ROTATE_RIGHT(head, tmp, oleft); |
| 371 | RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ | 348 | tmp = RB_RIGHT(parent); |
| 372 | else \ | 349 | } |
| 373 | RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ | 350 | |
| 374 | } else \ | 351 | RB_SET_COLOR(tmp, RB_COLOR(parent)); |
| 375 | (head)->rbh_root = (tmp); \ | 352 | RB_SET_COLOR(parent, EntryColor::Black); |
| 376 | RB_RIGHT(tmp, field) = (elm); \ | 353 | if (RB_RIGHT(tmp)) { |
| 377 | RB_PARENT(elm, field) = (tmp); \ | 354 | RB_SET_COLOR(RB_RIGHT(tmp), EntryColor::Black); |
| 378 | RB_AUGMENT(tmp); \ | 355 | } |
| 379 | if ((RB_PARENT(tmp, field))) \ | 356 | |
| 380 | RB_AUGMENT(RB_PARENT(tmp, field)); \ | 357 | RB_ROTATE_LEFT(head, parent, tmp); |
| 381 | } while (/*CONSTCOND*/ 0) | 358 | elm = head->Root(); |
| 382 | 359 | break; | |
| 383 | /* Generates prototypes and inline functions */ | 360 | } |
| 384 | #define RB_PROTOTYPE(name, type, field, cmp) RB_PROTOTYPE_INTERNAL(name, type, field, cmp, ) | 361 | } else { |
| 385 | #define RB_PROTOTYPE_STATIC(name, type, field, cmp) \ | 362 | tmp = RB_LEFT(parent); |
| 386 | RB_PROTOTYPE_INTERNAL(name, type, field, cmp, static) | 363 | if (RB_IS_RED(tmp)) { |
| 387 | #define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \ | 364 | RB_SET_BLACKRED(tmp, parent); |
| 388 | RB_PROTOTYPE_INSERT_COLOR(name, type, attr); \ | 365 | RB_ROTATE_RIGHT(head, parent, tmp); |
| 389 | RB_PROTOTYPE_REMOVE_COLOR(name, type, attr); \ | 366 | tmp = RB_LEFT(parent); |
| 390 | RB_PROTOTYPE_INSERT(name, type, attr); \ | 367 | } |
| 391 | RB_PROTOTYPE_REMOVE(name, type, attr); \ | 368 | |
| 392 | RB_PROTOTYPE_FIND(name, type, attr); \ | 369 | if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) && |
| 393 | RB_PROTOTYPE_NFIND(name, type, attr); \ | 370 | (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) { |
| 394 | RB_PROTOTYPE_FIND_LIGHT(name, type, attr); \ | 371 | RB_SET_COLOR(tmp, EntryColor::Red); |
| 395 | RB_PROTOTYPE_NFIND_LIGHT(name, type, attr); \ | 372 | elm = parent; |
| 396 | RB_PROTOTYPE_NEXT(name, type, attr); \ | 373 | parent = RB_PARENT(elm); |
| 397 | RB_PROTOTYPE_PREV(name, type, attr); \ | 374 | } else { |
| 398 | RB_PROTOTYPE_MINMAX(name, type, attr); | 375 | if (RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) { |
| 399 | #define RB_PROTOTYPE_INSERT_COLOR(name, type, attr) \ | 376 | Node* oright; |
| 400 | attr void name##_RB_INSERT_COLOR(struct name*, struct type*) | 377 | if ((oright = RB_RIGHT(tmp)) != nullptr) { |
| 401 | #define RB_PROTOTYPE_REMOVE_COLOR(name, type, attr) \ | 378 | RB_SET_COLOR(oright, EntryColor::Black); |
| 402 | attr void name##_RB_REMOVE_COLOR(struct name*, struct type*, struct type*) | 379 | } |
| 403 | #define RB_PROTOTYPE_REMOVE(name, type, attr) \ | 380 | |
| 404 | attr struct type* name##_RB_REMOVE(struct name*, struct type*) | 381 | RB_SET_COLOR(tmp, EntryColor::Red); |
| 405 | #define RB_PROTOTYPE_INSERT(name, type, attr) \ | 382 | RB_ROTATE_LEFT(head, tmp, oright); |
| 406 | attr struct type* name##_RB_INSERT(struct name*, struct type*) | 383 | tmp = RB_LEFT(parent); |
| 407 | #define RB_PROTOTYPE_FIND(name, type, attr) \ | 384 | } |
| 408 | attr struct type* name##_RB_FIND(struct name*, struct type*) | 385 | |
| 409 | #define RB_PROTOTYPE_NFIND(name, type, attr) \ | 386 | RB_SET_COLOR(tmp, RB_COLOR(parent)); |
| 410 | attr struct type* name##_RB_NFIND(struct name*, struct type*) | 387 | RB_SET_COLOR(parent, EntryColor::Black); |
| 411 | #define RB_PROTOTYPE_FIND_LIGHT(name, type, attr) \ | 388 | |
| 412 | attr struct type* name##_RB_FIND_LIGHT(struct name*, const void*) | 389 | if (RB_LEFT(tmp)) { |
| 413 | #define RB_PROTOTYPE_NFIND_LIGHT(name, type, attr) \ | 390 | RB_SET_COLOR(RB_LEFT(tmp), EntryColor::Black); |
| 414 | attr struct type* name##_RB_NFIND_LIGHT(struct name*, const void*) | 391 | } |
| 415 | #define RB_PROTOTYPE_NEXT(name, type, attr) attr struct type* name##_RB_NEXT(struct type*) | 392 | |
| 416 | #define RB_PROTOTYPE_PREV(name, type, attr) attr struct type* name##_RB_PREV(struct type*) | 393 | RB_ROTATE_RIGHT(head, parent, tmp); |
| 417 | #define RB_PROTOTYPE_MINMAX(name, type, attr) attr struct type* name##_RB_MINMAX(struct name*, int) | 394 | elm = head->Root(); |
| 418 | 395 | break; | |
| 419 | /* Main rb operation. | 396 | } |
| 420 | * Moves node close to the key of elm to top | 397 | } |
| 421 | */ | 398 | } |
| 422 | #define RB_GENERATE_WITHOUT_COMPARE(name, type, field) \ | 399 | |
| 423 | RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, ) | 400 | if (elm) { |
| 424 | #define RB_GENERATE_WITHOUT_COMPARE_STATIC(name, type, field) \ | 401 | RB_SET_COLOR(elm, EntryColor::Black); |
| 425 | RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, static) | 402 | } |
| 426 | #define RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, attr) \ | 403 | } |
| 427 | RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \ | 404 | |
| 428 | RB_GENERATE_REMOVE(name, type, field, attr) \ | 405 | template <typename Node> |
| 429 | RB_GENERATE_NEXT(name, type, field, attr) \ | 406 | Node* RB_REMOVE(RBHead<Node>* head, Node* elm) { |
| 430 | RB_GENERATE_PREV(name, type, field, attr) \ | 407 | Node* child = nullptr; |
| 431 | RB_GENERATE_MINMAX(name, type, field, attr) | 408 | Node* parent = nullptr; |
| 432 | 409 | Node* old = elm; | |
| 433 | #define RB_GENERATE_WITH_COMPARE(name, type, field, cmp, lcmp) \ | 410 | EntryColor color{}; |
| 434 | RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, ) | 411 | |
| 435 | #define RB_GENERATE_WITH_COMPARE_STATIC(name, type, field, cmp, lcmp) \ | 412 | const auto finalize = [&] { |
| 436 | RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, static) | 413 | if (color == EntryColor::Black) { |
| 437 | #define RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, attr) \ | 414 | RB_REMOVE_COLOR(head, parent, child); |
| 438 | RB_GENERATE_INSERT_COLOR(name, type, field, attr) \ | 415 | } |
| 439 | RB_GENERATE_INSERT(name, type, field, cmp, attr) \ | 416 | |
| 440 | RB_GENERATE_FIND(name, type, field, cmp, attr) \ | 417 | return old; |
| 441 | RB_GENERATE_NFIND(name, type, field, cmp, attr) \ | 418 | }; |
| 442 | RB_GENERATE_FIND_LIGHT(name, type, field, lcmp, attr) \ | 419 | |
| 443 | RB_GENERATE_NFIND_LIGHT(name, type, field, lcmp, attr) | 420 | if (RB_LEFT(elm) == nullptr) { |
| 444 | 421 | child = RB_RIGHT(elm); | |
| 445 | #define RB_GENERATE_ALL(name, type, field, cmp) RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, ) | 422 | } else if (RB_RIGHT(elm) == nullptr) { |
| 446 | #define RB_GENERATE_ALL_STATIC(name, type, field, cmp) \ | 423 | child = RB_LEFT(elm); |
| 447 | RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, static) | 424 | } else { |
| 448 | #define RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, attr) \ | 425 | Node* left; |
| 449 | RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, attr) \ | 426 | elm = RB_RIGHT(elm); |
| 450 | RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, attr) | 427 | while ((left = RB_LEFT(elm)) != nullptr) { |
| 451 | 428 | elm = left; | |
| 452 | #define RB_GENERATE_INSERT_COLOR(name, type, field, attr) \ | 429 | } |
| 453 | attr void name##_RB_INSERT_COLOR(struct name* head, struct type* elm) { \ | 430 | |
| 454 | struct type *parent, *gparent, *tmp; \ | 431 | child = RB_RIGHT(elm); |
| 455 | while ((parent = RB_PARENT(elm, field)) != NULL && RB_COLOR(parent, field) == RB_RED) { \ | 432 | parent = RB_PARENT(elm); |
| 456 | gparent = RB_PARENT(parent, field); \ | 433 | color = RB_COLOR(elm); |
| 457 | if (parent == RB_LEFT(gparent, field)) { \ | 434 | |
| 458 | tmp = RB_RIGHT(gparent, field); \ | 435 | if (child) { |
| 459 | if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ | 436 | RB_SET_PARENT(child, parent); |
| 460 | RB_COLOR(tmp, field) = RB_BLACK; \ | 437 | } |
| 461 | RB_SET_BLACKRED(parent, gparent, field); \ | 438 | if (parent) { |
| 462 | elm = gparent; \ | 439 | if (RB_LEFT(parent) == elm) { |
| 463 | continue; \ | 440 | RB_SET_LEFT(parent, child); |
| 464 | } \ | 441 | } else { |
| 465 | if (RB_RIGHT(parent, field) == elm) { \ | 442 | RB_SET_RIGHT(parent, child); |
| 466 | RB_ROTATE_LEFT(head, parent, tmp, field); \ | 443 | } |
| 467 | tmp = parent; \ | 444 | } else { |
| 468 | parent = elm; \ | 445 | head->SetRoot(child); |
| 469 | elm = tmp; \ | 446 | } |
| 470 | } \ | 447 | |
| 471 | RB_SET_BLACKRED(parent, gparent, field); \ | 448 | if (RB_PARENT(elm) == old) { |
| 472 | RB_ROTATE_RIGHT(head, gparent, tmp, field); \ | 449 | parent = elm; |
| 473 | } else { \ | 450 | } |
| 474 | tmp = RB_LEFT(gparent, field); \ | 451 | |
| 475 | if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ | 452 | elm->SetEntry(old->GetEntry()); |
| 476 | RB_COLOR(tmp, field) = RB_BLACK; \ | 453 | |
| 477 | RB_SET_BLACKRED(parent, gparent, field); \ | 454 | if (RB_PARENT(old)) { |
| 478 | elm = gparent; \ | 455 | if (RB_LEFT(RB_PARENT(old)) == old) { |
| 479 | continue; \ | 456 | RB_SET_LEFT(RB_PARENT(old), elm); |
| 480 | } \ | 457 | } else { |
| 481 | if (RB_LEFT(parent, field) == elm) { \ | 458 | RB_SET_RIGHT(RB_PARENT(old), elm); |
| 482 | RB_ROTATE_RIGHT(head, parent, tmp, field); \ | 459 | } |
| 483 | tmp = parent; \ | 460 | } else { |
| 484 | parent = elm; \ | 461 | head->SetRoot(elm); |
| 485 | elm = tmp; \ | 462 | } |
| 486 | } \ | 463 | RB_SET_PARENT(RB_LEFT(old), elm); |
| 487 | RB_SET_BLACKRED(parent, gparent, field); \ | 464 | if (RB_RIGHT(old)) { |
| 488 | RB_ROTATE_LEFT(head, gparent, tmp, field); \ | 465 | RB_SET_PARENT(RB_RIGHT(old), elm); |
| 489 | } \ | 466 | } |
| 490 | } \ | 467 | if (parent) { |
| 491 | RB_COLOR(head->rbh_root, field) = RB_BLACK; \ | 468 | left = parent; |
| 492 | } | 469 | } |
| 493 | 470 | ||
| 494 | #define RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \ | 471 | return finalize(); |
| 495 | attr void name##_RB_REMOVE_COLOR(struct name* head, struct type* parent, struct type* elm) { \ | 472 | } |
| 496 | struct type* tmp; \ | 473 | |
| 497 | while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && elm != RB_ROOT(head)) { \ | 474 | parent = RB_PARENT(elm); |
| 498 | if (RB_LEFT(parent, field) == elm) { \ | 475 | color = RB_COLOR(elm); |
| 499 | tmp = RB_RIGHT(parent, field); \ | 476 | |
| 500 | if (RB_COLOR(tmp, field) == RB_RED) { \ | 477 | if (child) { |
| 501 | RB_SET_BLACKRED(tmp, parent, field); \ | 478 | RB_SET_PARENT(child, parent); |
| 502 | RB_ROTATE_LEFT(head, parent, tmp, field); \ | 479 | } |
| 503 | tmp = RB_RIGHT(parent, field); \ | 480 | if (parent) { |
| 504 | } \ | 481 | if (RB_LEFT(parent) == elm) { |
| 505 | if ((RB_LEFT(tmp, field) == NULL || \ | 482 | RB_SET_LEFT(parent, child); |
| 506 | RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \ | 483 | } else { |
| 507 | (RB_RIGHT(tmp, field) == NULL || \ | 484 | RB_SET_RIGHT(parent, child); |
| 508 | RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \ | 485 | } |
| 509 | RB_COLOR(tmp, field) = RB_RED; \ | 486 | } else { |
| 510 | elm = parent; \ | 487 | head->SetRoot(child); |
| 511 | parent = RB_PARENT(elm, field); \ | 488 | } |
| 512 | } else { \ | 489 | |
| 513 | if (RB_RIGHT(tmp, field) == NULL || \ | 490 | return finalize(); |
| 514 | RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) { \ | 491 | } |
| 515 | struct type* oleft; \ | 492 | |
| 516 | if ((oleft = RB_LEFT(tmp, field)) != NULL) \ | 493 | // Inserts a node into the RB tree |
| 517 | RB_COLOR(oleft, field) = RB_BLACK; \ | 494 | template <typename Node, typename CompareFunction> |
| 518 | RB_COLOR(tmp, field) = RB_RED; \ | 495 | Node* RB_INSERT(RBHead<Node>* head, Node* elm, CompareFunction cmp) { |
| 519 | RB_ROTATE_RIGHT(head, tmp, oleft, field); \ | 496 | Node* parent = nullptr; |
| 520 | tmp = RB_RIGHT(parent, field); \ | 497 | Node* tmp = head->Root(); |
| 521 | } \ | 498 | int comp = 0; |
| 522 | RB_COLOR(tmp, field) = RB_COLOR(parent, field); \ | 499 | |
| 523 | RB_COLOR(parent, field) = RB_BLACK; \ | 500 | while (tmp) { |
| 524 | if (RB_RIGHT(tmp, field)) \ | 501 | parent = tmp; |
| 525 | RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK; \ | 502 | comp = cmp(elm, parent); |
| 526 | RB_ROTATE_LEFT(head, parent, tmp, field); \ | 503 | if (comp < 0) { |
| 527 | elm = RB_ROOT(head); \ | 504 | tmp = RB_LEFT(tmp); |
| 528 | break; \ | 505 | } else if (comp > 0) { |
| 529 | } \ | 506 | tmp = RB_RIGHT(tmp); |
| 530 | } else { \ | 507 | } else { |
| 531 | tmp = RB_LEFT(parent, field); \ | 508 | return tmp; |
| 532 | if (RB_COLOR(tmp, field) == RB_RED) { \ | 509 | } |
| 533 | RB_SET_BLACKRED(tmp, parent, field); \ | 510 | } |
| 534 | RB_ROTATE_RIGHT(head, parent, tmp, field); \ | 511 | |
| 535 | tmp = RB_LEFT(parent, field); \ | 512 | RB_SET(elm, parent); |
| 536 | } \ | 513 | |
| 537 | if ((RB_LEFT(tmp, field) == NULL || \ | 514 | if (parent != nullptr) { |
| 538 | RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \ | 515 | if (comp < 0) { |
| 539 | (RB_RIGHT(tmp, field) == NULL || \ | 516 | RB_SET_LEFT(parent, elm); |
| 540 | RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \ | 517 | } else { |
| 541 | RB_COLOR(tmp, field) = RB_RED; \ | 518 | RB_SET_RIGHT(parent, elm); |
| 542 | elm = parent; \ | 519 | } |
| 543 | parent = RB_PARENT(elm, field); \ | 520 | } else { |
| 544 | } else { \ | 521 | head->SetRoot(elm); |
| 545 | if (RB_LEFT(tmp, field) == NULL || \ | 522 | } |
| 546 | RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) { \ | 523 | |
| 547 | struct type* oright; \ | 524 | RB_INSERT_COLOR(head, elm); |
| 548 | if ((oright = RB_RIGHT(tmp, field)) != NULL) \ | 525 | return nullptr; |
| 549 | RB_COLOR(oright, field) = RB_BLACK; \ | 526 | } |
| 550 | RB_COLOR(tmp, field) = RB_RED; \ | 527 | |
| 551 | RB_ROTATE_LEFT(head, tmp, oright, field); \ | 528 | // Finds the node with the same key as elm |
| 552 | tmp = RB_LEFT(parent, field); \ | 529 | template <typename Node, typename CompareFunction> |
| 553 | } \ | 530 | Node* RB_FIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) { |
| 554 | RB_COLOR(tmp, field) = RB_COLOR(parent, field); \ | 531 | Node* tmp = head->Root(); |
| 555 | RB_COLOR(parent, field) = RB_BLACK; \ | 532 | |
| 556 | if (RB_LEFT(tmp, field)) \ | 533 | while (tmp) { |
| 557 | RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK; \ | 534 | const int comp = cmp(elm, tmp); |
| 558 | RB_ROTATE_RIGHT(head, parent, tmp, field); \ | 535 | if (comp < 0) { |
| 559 | elm = RB_ROOT(head); \ | 536 | tmp = RB_LEFT(tmp); |
| 560 | break; \ | 537 | } else if (comp > 0) { |
| 561 | } \ | 538 | tmp = RB_RIGHT(tmp); |
| 562 | } \ | 539 | } else { |
| 563 | } \ | 540 | return tmp; |
| 564 | if (elm) \ | 541 | } |
| 565 | RB_COLOR(elm, field) = RB_BLACK; \ | 542 | } |
| 566 | } | 543 | |
| 567 | 544 | return nullptr; | |
| 568 | #define RB_GENERATE_REMOVE(name, type, field, attr) \ | 545 | } |
| 569 | attr struct type* name##_RB_REMOVE(struct name* head, struct type* elm) { \ | 546 | |
| 570 | struct type *child, *parent, *old = elm; \ | 547 | // Finds the first node greater than or equal to the search key |
| 571 | int color; \ | 548 | template <typename Node, typename CompareFunction> |
| 572 | if (RB_LEFT(elm, field) == NULL) \ | 549 | Node* RB_NFIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) { |
| 573 | child = RB_RIGHT(elm, field); \ | 550 | Node* tmp = head->Root(); |
| 574 | else if (RB_RIGHT(elm, field) == NULL) \ | 551 | Node* res = nullptr; |
| 575 | child = RB_LEFT(elm, field); \ | 552 | |
| 576 | else { \ | 553 | while (tmp) { |
| 577 | struct type* left; \ | 554 | const int comp = cmp(elm, tmp); |
| 578 | elm = RB_RIGHT(elm, field); \ | 555 | if (comp < 0) { |
| 579 | while ((left = RB_LEFT(elm, field)) != NULL) \ | 556 | res = tmp; |
| 580 | elm = left; \ | 557 | tmp = RB_LEFT(tmp); |
| 581 | child = RB_RIGHT(elm, field); \ | 558 | } else if (comp > 0) { |
| 582 | parent = RB_PARENT(elm, field); \ | 559 | tmp = RB_RIGHT(tmp); |
| 583 | color = RB_COLOR(elm, field); \ | 560 | } else { |
| 584 | if (child) \ | 561 | return tmp; |
| 585 | RB_PARENT(child, field) = parent; \ | 562 | } |
| 586 | if (parent) { \ | 563 | } |
| 587 | if (RB_LEFT(parent, field) == elm) \ | 564 | |
| 588 | RB_LEFT(parent, field) = child; \ | 565 | return res; |
| 589 | else \ | 566 | } |
| 590 | RB_RIGHT(parent, field) = child; \ | 567 | |
| 591 | RB_AUGMENT(parent); \ | 568 | // Finds the node with the same key as lelm |
| 592 | } else \ | 569 | template <typename Node, typename CompareFunction> |
| 593 | RB_ROOT(head) = child; \ | 570 | Node* RB_FIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) { |
| 594 | if (RB_PARENT(elm, field) == old) \ | 571 | Node* tmp = head->Root(); |
| 595 | parent = elm; \ | 572 | |
| 596 | (elm)->field = (old)->field; \ | 573 | while (tmp) { |
| 597 | if (RB_PARENT(old, field)) { \ | 574 | const int comp = lcmp(lelm, tmp); |
| 598 | if (RB_LEFT(RB_PARENT(old, field), field) == old) \ | 575 | if (comp < 0) { |
| 599 | RB_LEFT(RB_PARENT(old, field), field) = elm; \ | 576 | tmp = RB_LEFT(tmp); |
| 600 | else \ | 577 | } else if (comp > 0) { |
| 601 | RB_RIGHT(RB_PARENT(old, field), field) = elm; \ | 578 | tmp = RB_RIGHT(tmp); |
| 602 | RB_AUGMENT(RB_PARENT(old, field)); \ | 579 | } else { |
| 603 | } else \ | 580 | return tmp; |
| 604 | RB_ROOT(head) = elm; \ | 581 | } |
| 605 | RB_PARENT(RB_LEFT(old, field), field) = elm; \ | 582 | } |
| 606 | if (RB_RIGHT(old, field)) \ | 583 | |
| 607 | RB_PARENT(RB_RIGHT(old, field), field) = elm; \ | 584 | return nullptr; |
| 608 | if (parent) { \ | 585 | } |
| 609 | left = parent; \ | 586 | |
| 610 | do { \ | 587 | // Finds the first node greater than or equal to the search key |
| 611 | RB_AUGMENT(left); \ | 588 | template <typename Node, typename CompareFunction> |
| 612 | } while ((left = RB_PARENT(left, field)) != NULL); \ | 589 | Node* RB_NFIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) { |
| 613 | } \ | 590 | Node* tmp = head->Root(); |
| 614 | goto color; \ | 591 | Node* res = nullptr; |
| 615 | } \ | 592 | |
| 616 | parent = RB_PARENT(elm, field); \ | 593 | while (tmp) { |
| 617 | color = RB_COLOR(elm, field); \ | 594 | const int comp = lcmp(lelm, tmp); |
| 618 | if (child) \ | 595 | if (comp < 0) { |
| 619 | RB_PARENT(child, field) = parent; \ | 596 | res = tmp; |
| 620 | if (parent) { \ | 597 | tmp = RB_LEFT(tmp); |
| 621 | if (RB_LEFT(parent, field) == elm) \ | 598 | } else if (comp > 0) { |
| 622 | RB_LEFT(parent, field) = child; \ | 599 | tmp = RB_RIGHT(tmp); |
| 623 | else \ | 600 | } else { |
| 624 | RB_RIGHT(parent, field) = child; \ | 601 | return tmp; |
| 625 | RB_AUGMENT(parent); \ | 602 | } |
| 626 | } else \ | 603 | } |
| 627 | RB_ROOT(head) = child; \ | 604 | |
| 628 | color: \ | 605 | return res; |
| 629 | if (color == RB_BLACK) \ | 606 | } |
| 630 | name##_RB_REMOVE_COLOR(head, parent, child); \ | 607 | |
| 631 | return (old); \ | 608 | template <typename Node> |
| 632 | } | 609 | Node* RB_NEXT(Node* elm) { |
| 633 | 610 | if (RB_RIGHT(elm)) { | |
| 634 | #define RB_GENERATE_INSERT(name, type, field, cmp, attr) \ | 611 | elm = RB_RIGHT(elm); |
| 635 | /* Inserts a node into the RB tree */ \ | 612 | while (RB_LEFT(elm)) { |
| 636 | attr struct type* name##_RB_INSERT(struct name* head, struct type* elm) { \ | 613 | elm = RB_LEFT(elm); |
| 637 | struct type* tmp; \ | 614 | } |
| 638 | struct type* parent = NULL; \ | 615 | } else { |
| 639 | int comp = 0; \ | 616 | if (RB_PARENT(elm) && (elm == RB_LEFT(RB_PARENT(elm)))) { |
| 640 | tmp = RB_ROOT(head); \ | 617 | elm = RB_PARENT(elm); |
| 641 | while (tmp) { \ | 618 | } else { |
| 642 | parent = tmp; \ | 619 | while (RB_PARENT(elm) && (elm == RB_RIGHT(RB_PARENT(elm)))) { |
| 643 | comp = (cmp)(elm, parent); \ | 620 | elm = RB_PARENT(elm); |
| 644 | if (comp < 0) \ | 621 | } |
| 645 | tmp = RB_LEFT(tmp, field); \ | 622 | elm = RB_PARENT(elm); |
| 646 | else if (comp > 0) \ | 623 | } |
| 647 | tmp = RB_RIGHT(tmp, field); \ | 624 | } |
| 648 | else \ | 625 | return elm; |
| 649 | return (tmp); \ | 626 | } |
| 650 | } \ | 627 | |
| 651 | RB_SET(elm, parent, field); \ | 628 | template <typename Node> |
| 652 | if (parent != NULL) { \ | 629 | Node* RB_PREV(Node* elm) { |
| 653 | if (comp < 0) \ | 630 | if (RB_LEFT(elm)) { |
| 654 | RB_LEFT(parent, field) = elm; \ | 631 | elm = RB_LEFT(elm); |
| 655 | else \ | 632 | while (RB_RIGHT(elm)) { |
| 656 | RB_RIGHT(parent, field) = elm; \ | 633 | elm = RB_RIGHT(elm); |
| 657 | RB_AUGMENT(parent); \ | 634 | } |
| 658 | } else \ | 635 | } else { |
| 659 | RB_ROOT(head) = elm; \ | 636 | if (RB_PARENT(elm) && (elm == RB_RIGHT(RB_PARENT(elm)))) { |
| 660 | name##_RB_INSERT_COLOR(head, elm); \ | 637 | elm = RB_PARENT(elm); |
| 661 | return (NULL); \ | 638 | } else { |
| 662 | } | 639 | while (RB_PARENT(elm) && (elm == RB_LEFT(RB_PARENT(elm)))) { |
| 663 | 640 | elm = RB_PARENT(elm); | |
| 664 | #define RB_GENERATE_FIND(name, type, field, cmp, attr) \ | 641 | } |
| 665 | /* Finds the node with the same key as elm */ \ | 642 | elm = RB_PARENT(elm); |
| 666 | attr struct type* name##_RB_FIND(struct name* head, struct type* elm) { \ | 643 | } |
| 667 | struct type* tmp = RB_ROOT(head); \ | 644 | } |
| 668 | int comp; \ | 645 | return elm; |
| 669 | while (tmp) { \ | 646 | } |
| 670 | comp = cmp(elm, tmp); \ | 647 | |
| 671 | if (comp < 0) \ | 648 | template <typename Node> |
| 672 | tmp = RB_LEFT(tmp, field); \ | 649 | Node* RB_MINMAX(RBHead<Node>* head, bool is_min) { |
| 673 | else if (comp > 0) \ | 650 | Node* tmp = head->Root(); |
| 674 | tmp = RB_RIGHT(tmp, field); \ | 651 | Node* parent = nullptr; |
| 675 | else \ | 652 | |
| 676 | return (tmp); \ | 653 | while (tmp) { |
| 677 | } \ | 654 | parent = tmp; |
| 678 | return (NULL); \ | 655 | if (is_min) { |
| 679 | } | 656 | tmp = RB_LEFT(tmp); |
| 680 | 657 | } else { | |
| 681 | #define RB_GENERATE_NFIND(name, type, field, cmp, attr) \ | 658 | tmp = RB_RIGHT(tmp); |
| 682 | /* Finds the first node greater than or equal to the search key */ \ | 659 | } |
| 683 | attr struct type* name##_RB_NFIND(struct name* head, struct type* elm) { \ | 660 | } |
| 684 | struct type* tmp = RB_ROOT(head); \ | 661 | |
| 685 | struct type* res = NULL; \ | 662 | return parent; |
| 686 | int comp; \ | 663 | } |
| 687 | while (tmp) { \ | 664 | |
| 688 | comp = cmp(elm, tmp); \ | 665 | template <typename Node> |
| 689 | if (comp < 0) { \ | 666 | Node* RB_MIN(RBHead<Node>* head) { |
| 690 | res = tmp; \ | 667 | return RB_MINMAX(head, true); |
| 691 | tmp = RB_LEFT(tmp, field); \ | 668 | } |
| 692 | } else if (comp > 0) \ | 669 | |
| 693 | tmp = RB_RIGHT(tmp, field); \ | 670 | template <typename Node> |
| 694 | else \ | 671 | Node* RB_MAX(RBHead<Node>* head) { |
| 695 | return (tmp); \ | 672 | return RB_MINMAX(head, false); |
| 696 | } \ | 673 | } |
| 697 | return (res); \ | 674 | } // namespace Common |
| 698 | } | ||
| 699 | |||
| 700 | #define RB_GENERATE_FIND_LIGHT(name, type, field, lcmp, attr) \ | ||
| 701 | /* Finds the node with the same key as elm */ \ | ||
| 702 | attr struct type* name##_RB_FIND_LIGHT(struct name* head, const void* lelm) { \ | ||
| 703 | struct type* tmp = RB_ROOT(head); \ | ||
| 704 | int comp; \ | ||
| 705 | while (tmp) { \ | ||
| 706 | comp = lcmp(lelm, tmp); \ | ||
| 707 | if (comp < 0) \ | ||
| 708 | tmp = RB_LEFT(tmp, field); \ | ||
| 709 | else if (comp > 0) \ | ||
| 710 | tmp = RB_RIGHT(tmp, field); \ | ||
| 711 | else \ | ||
| 712 | return (tmp); \ | ||
| 713 | } \ | ||
| 714 | return (NULL); \ | ||
| 715 | } | ||
| 716 | |||
| 717 | #define RB_GENERATE_NFIND_LIGHT(name, type, field, lcmp, attr) \ | ||
| 718 | /* Finds the first node greater than or equal to the search key */ \ | ||
| 719 | attr struct type* name##_RB_NFIND_LIGHT(struct name* head, const void* lelm) { \ | ||
| 720 | struct type* tmp = RB_ROOT(head); \ | ||
| 721 | struct type* res = NULL; \ | ||
| 722 | int comp; \ | ||
| 723 | while (tmp) { \ | ||
| 724 | comp = lcmp(lelm, tmp); \ | ||
| 725 | if (comp < 0) { \ | ||
| 726 | res = tmp; \ | ||
| 727 | tmp = RB_LEFT(tmp, field); \ | ||
| 728 | } else if (comp > 0) \ | ||
| 729 | tmp = RB_RIGHT(tmp, field); \ | ||
| 730 | else \ | ||
| 731 | return (tmp); \ | ||
| 732 | } \ | ||
| 733 | return (res); \ | ||
| 734 | } | ||
| 735 | |||
| 736 | #define RB_GENERATE_NEXT(name, type, field, attr) \ | ||
| 737 | /* ARGSUSED */ \ | ||
| 738 | attr struct type* name##_RB_NEXT(struct type* elm) { \ | ||
| 739 | if (RB_RIGHT(elm, field)) { \ | ||
| 740 | elm = RB_RIGHT(elm, field); \ | ||
| 741 | while (RB_LEFT(elm, field)) \ | ||
| 742 | elm = RB_LEFT(elm, field); \ | ||
| 743 | } else { \ | ||
| 744 | if (RB_PARENT(elm, field) && (elm == RB_LEFT(RB_PARENT(elm, field), field))) \ | ||
| 745 | elm = RB_PARENT(elm, field); \ | ||
| 746 | else { \ | ||
| 747 | while (RB_PARENT(elm, field) && (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \ | ||
| 748 | elm = RB_PARENT(elm, field); \ | ||
| 749 | elm = RB_PARENT(elm, field); \ | ||
| 750 | } \ | ||
| 751 | } \ | ||
| 752 | return (elm); \ | ||
| 753 | } | ||
| 754 | |||
| 755 | #define RB_GENERATE_PREV(name, type, field, attr) \ | ||
| 756 | /* ARGSUSED */ \ | ||
| 757 | attr struct type* name##_RB_PREV(struct type* elm) { \ | ||
| 758 | if (RB_LEFT(elm, field)) { \ | ||
| 759 | elm = RB_LEFT(elm, field); \ | ||
| 760 | while (RB_RIGHT(elm, field)) \ | ||
| 761 | elm = RB_RIGHT(elm, field); \ | ||
| 762 | } else { \ | ||
| 763 | if (RB_PARENT(elm, field) && (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \ | ||
| 764 | elm = RB_PARENT(elm, field); \ | ||
| 765 | else { \ | ||
| 766 | while (RB_PARENT(elm, field) && (elm == RB_LEFT(RB_PARENT(elm, field), field))) \ | ||
| 767 | elm = RB_PARENT(elm, field); \ | ||
| 768 | elm = RB_PARENT(elm, field); \ | ||
| 769 | } \ | ||
| 770 | } \ | ||
| 771 | return (elm); \ | ||
| 772 | } | ||
| 773 | |||
| 774 | #define RB_GENERATE_MINMAX(name, type, field, attr) \ | ||
| 775 | attr struct type* name##_RB_MINMAX(struct name* head, int val) { \ | ||
| 776 | struct type* tmp = RB_ROOT(head); \ | ||
| 777 | struct type* parent = NULL; \ | ||
| 778 | while (tmp) { \ | ||
| 779 | parent = tmp; \ | ||
| 780 | if (val < 0) \ | ||
| 781 | tmp = RB_LEFT(tmp, field); \ | ||
| 782 | else \ | ||
| 783 | tmp = RB_RIGHT(tmp, field); \ | ||
| 784 | } \ | ||
| 785 | return (parent); \ | ||
| 786 | } | ||
| 787 | |||
| 788 | #define RB_NEGINF -1 | ||
| 789 | #define RB_INF 1 | ||
| 790 | |||
| 791 | #define RB_INSERT(name, x, y) name##_RB_INSERT(x, y) | ||
| 792 | #define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y) | ||
| 793 | #define RB_FIND(name, x, y) name##_RB_FIND(x, y) | ||
| 794 | #define RB_NFIND(name, x, y) name##_RB_NFIND(x, y) | ||
| 795 | #define RB_FIND_LIGHT(name, x, y) name##_RB_FIND_LIGHT(x, y) | ||
| 796 | #define RB_NFIND_LIGHT(name, x, y) name##_RB_NFIND_LIGHT(x, y) | ||
| 797 | #define RB_NEXT(name, x, y) name##_RB_NEXT(y) | ||
| 798 | #define RB_PREV(name, x, y) name##_RB_PREV(y) | ||
| 799 | #define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF) | ||
| 800 | #define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF) | ||
| 801 | |||
| 802 | #define RB_FOREACH(x, name, head) \ | ||
| 803 | for ((x) = RB_MIN(name, head); (x) != NULL; (x) = name##_RB_NEXT(x)) | ||
| 804 | |||
| 805 | #define RB_FOREACH_FROM(x, name, y) \ | ||
| 806 | for ((x) = (y); ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); (x) = (y)) | ||
| 807 | |||
| 808 | #define RB_FOREACH_SAFE(x, name, head, y) \ | ||
| 809 | for ((x) = RB_MIN(name, head); ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ | ||
| 810 | (x) = (y)) | ||
| 811 | |||
| 812 | #define RB_FOREACH_REVERSE(x, name, head) \ | ||
| 813 | for ((x) = RB_MAX(name, head); (x) != NULL; (x) = name##_RB_PREV(x)) | ||
| 814 | |||
| 815 | #define RB_FOREACH_REVERSE_FROM(x, name, y) \ | ||
| 816 | for ((x) = (y); ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); (x) = (y)) | ||
| 817 | |||
| 818 | #define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \ | ||
| 819 | for ((x) = RB_MAX(name, head); ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ | ||
| 820 | (x) = (y)) | ||
| 821 | |||
| 822 | #endif /* _SYS_TREE_H_ */ | ||
diff --git a/src/common/x64/native_clock.cpp b/src/common/x64/native_clock.cpp index eb8a7782f..a65f6b832 100644 --- a/src/common/x64/native_clock.cpp +++ b/src/common/x64/native_clock.cpp | |||
| @@ -2,19 +2,74 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <array> | ||
| 5 | #include <chrono> | 6 | #include <chrono> |
| 7 | #include <limits> | ||
| 6 | #include <mutex> | 8 | #include <mutex> |
| 7 | #include <thread> | 9 | #include <thread> |
| 8 | 10 | ||
| 9 | #ifdef _MSC_VER | 11 | #ifdef _MSC_VER |
| 10 | #include <intrin.h> | 12 | #include <intrin.h> |
| 13 | |||
| 14 | #pragma intrinsic(__umulh) | ||
| 15 | #pragma intrinsic(_udiv128) | ||
| 11 | #else | 16 | #else |
| 12 | #include <x86intrin.h> | 17 | #include <x86intrin.h> |
| 13 | #endif | 18 | #endif |
| 14 | 19 | ||
| 20 | #include "common/atomic_ops.h" | ||
| 15 | #include "common/uint128.h" | 21 | #include "common/uint128.h" |
| 16 | #include "common/x64/native_clock.h" | 22 | #include "common/x64/native_clock.h" |
| 17 | 23 | ||
| 24 | namespace { | ||
| 25 | |||
| 26 | [[nodiscard]] u64 GetFixedPoint64Factor(u64 numerator, u64 divisor) { | ||
| 27 | #ifdef __SIZEOF_INT128__ | ||
| 28 | const auto base = static_cast<unsigned __int128>(numerator) << 64ULL; | ||
| 29 | return static_cast<u64>(base / divisor); | ||
| 30 | #elif defined(_M_X64) || defined(_M_ARM64) | ||
| 31 | std::array<u64, 2> r = {0, numerator}; | ||
| 32 | u64 remainder; | ||
| 33 | #if _MSC_VER < 1923 | ||
| 34 | return udiv128(r[1], r[0], divisor, &remainder); | ||
| 35 | #else | ||
| 36 | return _udiv128(r[1], r[0], divisor, &remainder); | ||
| 37 | #endif | ||
| 38 | #else | ||
| 39 | // This one is bit more inaccurate. | ||
| 40 | return MultiplyAndDivide64(std::numeric_limits<u64>::max(), numerator, divisor); | ||
| 41 | #endif | ||
| 42 | } | ||
| 43 | |||
| 44 | [[nodiscard]] u64 MultiplyHigh(u64 a, u64 b) { | ||
| 45 | #ifdef __SIZEOF_INT128__ | ||
| 46 | return (static_cast<unsigned __int128>(a) * static_cast<unsigned __int128>(b)) >> 64; | ||
| 47 | #elif defined(_M_X64) || defined(_M_ARM64) | ||
| 48 | return __umulh(a, b); // MSVC | ||
| 49 | #else | ||
| 50 | // Generic fallback | ||
| 51 | const u64 a_lo = u32(a); | ||
| 52 | const u64 a_hi = a >> 32; | ||
| 53 | const u64 b_lo = u32(b); | ||
| 54 | const u64 b_hi = b >> 32; | ||
| 55 | |||
| 56 | const u64 a_x_b_hi = a_hi * b_hi; | ||
| 57 | const u64 a_x_b_mid = a_hi * b_lo; | ||
| 58 | const u64 b_x_a_mid = b_hi * a_lo; | ||
| 59 | const u64 a_x_b_lo = a_lo * b_lo; | ||
| 60 | |||
| 61 | const u64 carry_bit = (static_cast<u64>(static_cast<u32>(a_x_b_mid)) + | ||
| 62 | static_cast<u64>(static_cast<u32>(b_x_a_mid)) + (a_x_b_lo >> 32)) >> | ||
| 63 | 32; | ||
| 64 | |||
| 65 | const u64 multhi = a_x_b_hi + (a_x_b_mid >> 32) + (b_x_a_mid >> 32) + carry_bit; | ||
| 66 | |||
| 67 | return multhi; | ||
| 68 | #endif | ||
| 69 | } | ||
| 70 | |||
| 71 | } // namespace | ||
| 72 | |||
| 18 | namespace Common { | 73 | namespace Common { |
| 19 | 74 | ||
| 20 | u64 EstimateRDTSCFrequency() { | 75 | u64 EstimateRDTSCFrequency() { |
| @@ -48,54 +103,71 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequen | |||
| 48 | : WallClock(emulated_cpu_frequency_, emulated_clock_frequency_, true), rtsc_frequency{ | 103 | : WallClock(emulated_cpu_frequency_, emulated_clock_frequency_, true), rtsc_frequency{ |
| 49 | rtsc_frequency_} { | 104 | rtsc_frequency_} { |
| 50 | _mm_mfence(); | 105 | _mm_mfence(); |
| 51 | last_measure = __rdtsc(); | 106 | time_point.inner.last_measure = __rdtsc(); |
| 52 | accumulated_ticks = 0U; | 107 | time_point.inner.accumulated_ticks = 0U; |
| 108 | ns_rtsc_factor = GetFixedPoint64Factor(1000000000, rtsc_frequency); | ||
| 109 | us_rtsc_factor = GetFixedPoint64Factor(1000000, rtsc_frequency); | ||
| 110 | ms_rtsc_factor = GetFixedPoint64Factor(1000, rtsc_frequency); | ||
| 111 | clock_rtsc_factor = GetFixedPoint64Factor(emulated_clock_frequency, rtsc_frequency); | ||
| 112 | cpu_rtsc_factor = GetFixedPoint64Factor(emulated_cpu_frequency, rtsc_frequency); | ||
| 53 | } | 113 | } |
| 54 | 114 | ||
| 55 | u64 NativeClock::GetRTSC() { | 115 | u64 NativeClock::GetRTSC() { |
| 56 | std::scoped_lock scope{rtsc_serialize}; | 116 | TimePoint new_time_point{}; |
| 57 | _mm_mfence(); | 117 | TimePoint current_time_point{}; |
| 58 | const u64 current_measure = __rdtsc(); | 118 | do { |
| 59 | u64 diff = current_measure - last_measure; | 119 | current_time_point.pack = time_point.pack; |
| 60 | diff = diff & ~static_cast<u64>(static_cast<s64>(diff) >> 63); // max(diff, 0) | 120 | _mm_mfence(); |
| 61 | if (current_measure > last_measure) { | 121 | const u64 current_measure = __rdtsc(); |
| 62 | last_measure = current_measure; | 122 | u64 diff = current_measure - current_time_point.inner.last_measure; |
| 63 | } | 123 | diff = diff & ~static_cast<u64>(static_cast<s64>(diff) >> 63); // max(diff, 0) |
| 64 | accumulated_ticks += diff; | 124 | new_time_point.inner.last_measure = current_measure > current_time_point.inner.last_measure |
| 125 | ? current_measure | ||
| 126 | : current_time_point.inner.last_measure; | ||
| 127 | new_time_point.inner.accumulated_ticks = current_time_point.inner.accumulated_ticks + diff; | ||
| 128 | } while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack, | ||
| 129 | current_time_point.pack)); | ||
| 65 | /// The clock cannot be more precise than the guest timer, remove the lower bits | 130 | /// The clock cannot be more precise than the guest timer, remove the lower bits |
| 66 | return accumulated_ticks & inaccuracy_mask; | 131 | return new_time_point.inner.accumulated_ticks & inaccuracy_mask; |
| 67 | } | 132 | } |
| 68 | 133 | ||
| 69 | void NativeClock::Pause(bool is_paused) { | 134 | void NativeClock::Pause(bool is_paused) { |
| 70 | if (!is_paused) { | 135 | if (!is_paused) { |
| 71 | _mm_mfence(); | 136 | TimePoint current_time_point{}; |
| 72 | last_measure = __rdtsc(); | 137 | TimePoint new_time_point{}; |
| 138 | do { | ||
| 139 | current_time_point.pack = time_point.pack; | ||
| 140 | new_time_point.pack = current_time_point.pack; | ||
| 141 | _mm_mfence(); | ||
| 142 | new_time_point.inner.last_measure = __rdtsc(); | ||
| 143 | } while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack, | ||
| 144 | current_time_point.pack)); | ||
| 73 | } | 145 | } |
| 74 | } | 146 | } |
| 75 | 147 | ||
| 76 | std::chrono::nanoseconds NativeClock::GetTimeNS() { | 148 | std::chrono::nanoseconds NativeClock::GetTimeNS() { |
| 77 | const u64 rtsc_value = GetRTSC(); | 149 | const u64 rtsc_value = GetRTSC(); |
| 78 | return std::chrono::nanoseconds{MultiplyAndDivide64(rtsc_value, 1000000000, rtsc_frequency)}; | 150 | return std::chrono::nanoseconds{MultiplyHigh(rtsc_value, ns_rtsc_factor)}; |
| 79 | } | 151 | } |
| 80 | 152 | ||
| 81 | std::chrono::microseconds NativeClock::GetTimeUS() { | 153 | std::chrono::microseconds NativeClock::GetTimeUS() { |
| 82 | const u64 rtsc_value = GetRTSC(); | 154 | const u64 rtsc_value = GetRTSC(); |
| 83 | return std::chrono::microseconds{MultiplyAndDivide64(rtsc_value, 1000000, rtsc_frequency)}; | 155 | return std::chrono::microseconds{MultiplyHigh(rtsc_value, us_rtsc_factor)}; |
| 84 | } | 156 | } |
| 85 | 157 | ||
| 86 | std::chrono::milliseconds NativeClock::GetTimeMS() { | 158 | std::chrono::milliseconds NativeClock::GetTimeMS() { |
| 87 | const u64 rtsc_value = GetRTSC(); | 159 | const u64 rtsc_value = GetRTSC(); |
| 88 | return std::chrono::milliseconds{MultiplyAndDivide64(rtsc_value, 1000, rtsc_frequency)}; | 160 | return std::chrono::milliseconds{MultiplyHigh(rtsc_value, ms_rtsc_factor)}; |
| 89 | } | 161 | } |
| 90 | 162 | ||
| 91 | u64 NativeClock::GetClockCycles() { | 163 | u64 NativeClock::GetClockCycles() { |
| 92 | const u64 rtsc_value = GetRTSC(); | 164 | const u64 rtsc_value = GetRTSC(); |
| 93 | return MultiplyAndDivide64(rtsc_value, emulated_clock_frequency, rtsc_frequency); | 165 | return MultiplyHigh(rtsc_value, clock_rtsc_factor); |
| 94 | } | 166 | } |
| 95 | 167 | ||
| 96 | u64 NativeClock::GetCPUCycles() { | 168 | u64 NativeClock::GetCPUCycles() { |
| 97 | const u64 rtsc_value = GetRTSC(); | 169 | const u64 rtsc_value = GetRTSC(); |
| 98 | return MultiplyAndDivide64(rtsc_value, emulated_cpu_frequency, rtsc_frequency); | 170 | return MultiplyHigh(rtsc_value, cpu_rtsc_factor); |
| 99 | } | 171 | } |
| 100 | 172 | ||
| 101 | } // namespace X64 | 173 | } // namespace X64 |
diff --git a/src/common/x64/native_clock.h b/src/common/x64/native_clock.h index 6d1e32ac8..7cbd400d2 100644 --- a/src/common/x64/native_clock.h +++ b/src/common/x64/native_clock.h | |||
| @@ -6,7 +6,6 @@ | |||
| 6 | 6 | ||
| 7 | #include <optional> | 7 | #include <optional> |
| 8 | 8 | ||
| 9 | #include "common/spin_lock.h" | ||
| 10 | #include "common/wall_clock.h" | 9 | #include "common/wall_clock.h" |
| 11 | 10 | ||
| 12 | namespace Common { | 11 | namespace Common { |
| @@ -32,14 +31,28 @@ public: | |||
| 32 | private: | 31 | private: |
| 33 | u64 GetRTSC(); | 32 | u64 GetRTSC(); |
| 34 | 33 | ||
| 34 | union alignas(16) TimePoint { | ||
| 35 | TimePoint() : pack{} {} | ||
| 36 | u128 pack{}; | ||
| 37 | struct Inner { | ||
| 38 | u64 last_measure{}; | ||
| 39 | u64 accumulated_ticks{}; | ||
| 40 | } inner; | ||
| 41 | }; | ||
| 42 | |||
| 35 | /// value used to reduce the native clocks accuracy as some apss rely on | 43 | /// value used to reduce the native clocks accuracy as some apss rely on |
| 36 | /// undefined behavior where the level of accuracy in the clock shouldn't | 44 | /// undefined behavior where the level of accuracy in the clock shouldn't |
| 37 | /// be higher. | 45 | /// be higher. |
| 38 | static constexpr u64 inaccuracy_mask = ~(UINT64_C(0x400) - 1); | 46 | static constexpr u64 inaccuracy_mask = ~(UINT64_C(0x400) - 1); |
| 39 | 47 | ||
| 40 | SpinLock rtsc_serialize{}; | 48 | TimePoint time_point; |
| 41 | u64 last_measure{}; | 49 | // factors |
| 42 | u64 accumulated_ticks{}; | 50 | u64 clock_rtsc_factor{}; |
| 51 | u64 cpu_rtsc_factor{}; | ||
| 52 | u64 ns_rtsc_factor{}; | ||
| 53 | u64 us_rtsc_factor{}; | ||
| 54 | u64 ms_rtsc_factor{}; | ||
| 55 | |||
| 43 | u64 rtsc_frequency; | 56 | u64 rtsc_frequency; |
| 44 | }; | 57 | }; |
| 45 | } // namespace X64 | 58 | } // namespace X64 |
diff --git a/src/core/frontend/input_interpreter.cpp b/src/core/frontend/input_interpreter.cpp index 66ae506cd..ec5fe660e 100644 --- a/src/core/frontend/input_interpreter.cpp +++ b/src/core/frontend/input_interpreter.cpp | |||
| @@ -25,6 +25,10 @@ void InputInterpreter::PollInput() { | |||
| 25 | button_states[current_index] = button_state; | 25 | button_states[current_index] = button_state; |
| 26 | } | 26 | } |
| 27 | 27 | ||
| 28 | bool InputInterpreter::IsButtonPressed(HIDButton button) const { | ||
| 29 | return (button_states[current_index] & (1U << static_cast<u8>(button))) != 0; | ||
| 30 | } | ||
| 31 | |||
| 28 | bool InputInterpreter::IsButtonPressedOnce(HIDButton button) const { | 32 | bool InputInterpreter::IsButtonPressedOnce(HIDButton button) const { |
| 29 | const bool current_press = | 33 | const bool current_press = |
| 30 | (button_states[current_index] & (1U << static_cast<u8>(button))) != 0; | 34 | (button_states[current_index] & (1U << static_cast<u8>(button))) != 0; |
diff --git a/src/core/frontend/input_interpreter.h b/src/core/frontend/input_interpreter.h index fea9aebe6..73fc47ffb 100644 --- a/src/core/frontend/input_interpreter.h +++ b/src/core/frontend/input_interpreter.h | |||
| @@ -67,6 +67,27 @@ public: | |||
| 67 | void PollInput(); | 67 | void PollInput(); |
| 68 | 68 | ||
| 69 | /** | 69 | /** |
| 70 | * Checks whether the button is pressed. | ||
| 71 | * | ||
| 72 | * @param button The button to check. | ||
| 73 | * | ||
| 74 | * @returns True when the button is pressed. | ||
| 75 | */ | ||
| 76 | [[nodiscard]] bool IsButtonPressed(HIDButton button) const; | ||
| 77 | |||
| 78 | /** | ||
| 79 | * Checks whether any of the buttons in the parameter list is pressed. | ||
| 80 | * | ||
| 81 | * @tparam HIDButton The buttons to check. | ||
| 82 | * | ||
| 83 | * @returns True when at least one of the buttons is pressed. | ||
| 84 | */ | ||
| 85 | template <HIDButton... T> | ||
| 86 | [[nodiscard]] bool IsAnyButtonPressed() { | ||
| 87 | return (IsButtonPressed(T) || ...); | ||
| 88 | } | ||
| 89 | |||
| 90 | /** | ||
| 70 | * The specified button is considered to be pressed once | 91 | * The specified button is considered to be pressed once |
| 71 | * if it is currently pressed and not pressed previously. | 92 | * if it is currently pressed and not pressed previously. |
| 72 | * | 93 | * |
| @@ -79,12 +100,12 @@ public: | |||
| 79 | /** | 100 | /** |
| 80 | * Checks whether any of the buttons in the parameter list is pressed once. | 101 | * Checks whether any of the buttons in the parameter list is pressed once. |
| 81 | * | 102 | * |
| 82 | * @tparam HIDButton The buttons to check. | 103 | * @tparam T The buttons to check. |
| 83 | * | 104 | * |
| 84 | * @returns True when at least one of the buttons is pressed once. | 105 | * @returns True when at least one of the buttons is pressed once. |
| 85 | */ | 106 | */ |
| 86 | template <HIDButton... T> | 107 | template <HIDButton... T> |
| 87 | [[nodiscard]] bool IsAnyButtonPressedOnce() { | 108 | [[nodiscard]] bool IsAnyButtonPressedOnce() const { |
| 88 | return (IsButtonPressedOnce(T) || ...); | 109 | return (IsButtonPressedOnce(T) || ...); |
| 89 | } | 110 | } |
| 90 | 111 | ||
| @@ -100,12 +121,12 @@ public: | |||
| 100 | /** | 121 | /** |
| 101 | * Checks whether any of the buttons in the parameter list is held down. | 122 | * Checks whether any of the buttons in the parameter list is held down. |
| 102 | * | 123 | * |
| 103 | * @tparam HIDButton The buttons to check. | 124 | * @tparam T The buttons to check. |
| 104 | * | 125 | * |
| 105 | * @returns True when at least one of the buttons is held down. | 126 | * @returns True when at least one of the buttons is held down. |
| 106 | */ | 127 | */ |
| 107 | template <HIDButton... T> | 128 | template <HIDButton... T> |
| 108 | [[nodiscard]] bool IsAnyButtonHeld() { | 129 | [[nodiscard]] bool IsAnyButtonHeld() const { |
| 109 | return (IsButtonHeld(T) || ...); | 130 | return (IsButtonHeld(T) || ...); |
| 110 | } | 131 | } |
| 111 | 132 | ||
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h index 99fb8fe93..0dc929040 100644 --- a/src/core/hle/kernel/k_priority_queue.h +++ b/src/core/hle/kernel/k_priority_queue.h | |||
| @@ -8,11 +8,11 @@ | |||
| 8 | #pragma once | 8 | #pragma once |
| 9 | 9 | ||
| 10 | #include <array> | 10 | #include <array> |
| 11 | #include <bit> | ||
| 11 | #include <concepts> | 12 | #include <concepts> |
| 12 | 13 | ||
| 13 | #include "common/assert.h" | 14 | #include "common/assert.h" |
| 14 | #include "common/bit_set.h" | 15 | #include "common/bit_set.h" |
| 15 | #include "common/bit_util.h" | ||
| 16 | #include "common/common_types.h" | 16 | #include "common/common_types.h" |
| 17 | #include "common/concepts.h" | 17 | #include "common/concepts.h" |
| 18 | 18 | ||
| @@ -268,7 +268,7 @@ private: | |||
| 268 | } | 268 | } |
| 269 | 269 | ||
| 270 | constexpr s32 GetNextCore(u64& affinity) { | 270 | constexpr s32 GetNextCore(u64& affinity) { |
| 271 | const s32 core = Common::CountTrailingZeroes64(affinity); | 271 | const s32 core = std::countr_zero(affinity); |
| 272 | ClearAffinityBit(affinity, core); | 272 | ClearAffinityBit(affinity, core); |
| 273 | return core; | 273 | return core; |
| 274 | } | 274 | } |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 42f0ea483..12b5619fb 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -5,6 +5,8 @@ | |||
| 5 | // This file references various implementation details from Atmosphere, an open-source firmware for | 5 | // This file references various implementation details from Atmosphere, an open-source firmware for |
| 6 | // the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. | 6 | // the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. |
| 7 | 7 | ||
| 8 | #include <bit> | ||
| 9 | |||
| 8 | #include "common/assert.h" | 10 | #include "common/assert.h" |
| 9 | #include "common/bit_util.h" | 11 | #include "common/bit_util.h" |
| 10 | #include "common/fiber.h" | 12 | #include "common/fiber.h" |
| @@ -31,12 +33,12 @@ static void IncrementScheduledCount(Kernel::Thread* thread) { | |||
| 31 | 33 | ||
| 32 | void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, | 34 | void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, |
| 33 | Core::EmuThreadHandle global_thread) { | 35 | Core::EmuThreadHandle global_thread) { |
| 34 | u32 current_core = global_thread.host_handle; | 36 | const u32 current_core = global_thread.host_handle; |
| 35 | bool must_context_switch = global_thread.guest_handle != InvalidHandle && | 37 | bool must_context_switch = global_thread.guest_handle != InvalidHandle && |
| 36 | (current_core < Core::Hardware::NUM_CPU_CORES); | 38 | (current_core < Core::Hardware::NUM_CPU_CORES); |
| 37 | 39 | ||
| 38 | while (cores_pending_reschedule != 0) { | 40 | while (cores_pending_reschedule != 0) { |
| 39 | u32 core = Common::CountTrailingZeroes64(cores_pending_reschedule); | 41 | const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule)); |
| 40 | ASSERT(core < Core::Hardware::NUM_CPU_CORES); | 42 | ASSERT(core < Core::Hardware::NUM_CPU_CORES); |
| 41 | if (!must_context_switch || core != current_core) { | 43 | if (!must_context_switch || core != current_core) { |
| 42 | auto& phys_core = kernel.PhysicalCore(core); | 44 | auto& phys_core = kernel.PhysicalCore(core); |
| @@ -109,7 +111,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | |||
| 109 | 111 | ||
| 110 | // Idle cores are bad. We're going to try to migrate threads to each idle core in turn. | 112 | // Idle cores are bad. We're going to try to migrate threads to each idle core in turn. |
| 111 | while (idle_cores != 0) { | 113 | while (idle_cores != 0) { |
| 112 | u32 core_id = Common::CountTrailingZeroes64(idle_cores); | 114 | const auto core_id = static_cast<u32>(std::countr_zero(idle_cores)); |
| 113 | if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { | 115 | if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { |
| 114 | s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; | 116 | s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; |
| 115 | size_t num_candidates = 0; | 117 | size_t num_candidates = 0; |
diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h index 22b0de860..131093284 100644 --- a/src/core/hle/kernel/memory/page_heap.h +++ b/src/core/hle/kernel/memory/page_heap.h | |||
| @@ -8,11 +8,11 @@ | |||
| 8 | #pragma once | 8 | #pragma once |
| 9 | 9 | ||
| 10 | #include <array> | 10 | #include <array> |
| 11 | #include <bit> | ||
| 11 | #include <vector> | 12 | #include <vector> |
| 12 | 13 | ||
| 13 | #include "common/alignment.h" | 14 | #include "common/alignment.h" |
| 14 | #include "common/assert.h" | 15 | #include "common/assert.h" |
| 15 | #include "common/bit_util.h" | ||
| 16 | #include "common/common_funcs.h" | 16 | #include "common/common_funcs.h" |
| 17 | #include "common/common_types.h" | 17 | #include "common/common_types.h" |
| 18 | #include "core/hle/kernel/memory/memory_types.h" | 18 | #include "core/hle/kernel/memory/memory_types.h" |
| @@ -105,7 +105,7 @@ private: | |||
| 105 | ASSERT(depth == 0); | 105 | ASSERT(depth == 0); |
| 106 | return -1; | 106 | return -1; |
| 107 | } | 107 | } |
| 108 | offset = offset * 64 + Common::CountTrailingZeroes64(v); | 108 | offset = offset * 64 + static_cast<u32>(std::countr_zero(v)); |
| 109 | ++depth; | 109 | ++depth; |
| 110 | } while (depth < static_cast<s32>(used_depths)); | 110 | } while (depth < static_cast<s32>(used_depths)); |
| 111 | 111 | ||
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp index 0f128c586..0566311b6 100644 --- a/src/core/hle/kernel/process_capability.cpp +++ b/src/core/hle/kernel/process_capability.cpp | |||
| @@ -2,6 +2,8 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <bit> | ||
| 6 | |||
| 5 | #include "common/bit_util.h" | 7 | #include "common/bit_util.h" |
| 6 | #include "common/logging/log.h" | 8 | #include "common/logging/log.h" |
| 7 | #include "core/hle/kernel/errors.h" | 9 | #include "core/hle/kernel/errors.h" |
| @@ -60,7 +62,7 @@ constexpr CapabilityType GetCapabilityType(u32 value) { | |||
| 60 | 62 | ||
| 61 | u32 GetFlagBitOffset(CapabilityType type) { | 63 | u32 GetFlagBitOffset(CapabilityType type) { |
| 62 | const auto value = static_cast<u32>(type); | 64 | const auto value = static_cast<u32>(type); |
| 63 | return static_cast<u32>(Common::BitSize<u32>() - Common::CountLeadingZeroes32(value)); | 65 | return static_cast<u32>(Common::BitSize<u32>() - static_cast<u32>(std::countl_zero(value))); |
| 64 | } | 66 | } |
| 65 | 67 | ||
| 66 | } // Anonymous namespace | 68 | } // Anonymous namespace |
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index 8a606b448..6a5c18945 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt | |||
| @@ -1,11 +1,11 @@ | |||
| 1 | add_executable(tests | 1 | add_executable(tests |
| 2 | common/bit_field.cpp | 2 | common/bit_field.cpp |
| 3 | common/bit_utils.cpp | ||
| 4 | common/fibers.cpp | 3 | common/fibers.cpp |
| 5 | common/param_package.cpp | 4 | common/param_package.cpp |
| 6 | common/ring_buffer.cpp | 5 | common/ring_buffer.cpp |
| 7 | core/core_timing.cpp | 6 | core/core_timing.cpp |
| 8 | tests.cpp | 7 | tests.cpp |
| 8 | video_core/buffer_base.cpp | ||
| 9 | ) | 9 | ) |
| 10 | 10 | ||
| 11 | create_target_directory_groups(tests) | 11 | create_target_directory_groups(tests) |
diff --git a/src/tests/common/bit_utils.cpp b/src/tests/common/bit_utils.cpp deleted file mode 100644 index 479b5995a..000000000 --- a/src/tests/common/bit_utils.cpp +++ /dev/null | |||
| @@ -1,23 +0,0 @@ | |||
| 1 | // Copyright 2017 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <catch2/catch.hpp> | ||
| 6 | #include <math.h> | ||
| 7 | #include "common/bit_util.h" | ||
| 8 | |||
| 9 | namespace Common { | ||
| 10 | |||
| 11 | TEST_CASE("BitUtils::CountTrailingZeroes", "[common]") { | ||
| 12 | REQUIRE(Common::CountTrailingZeroes32(0) == 32); | ||
| 13 | REQUIRE(Common::CountTrailingZeroes64(0) == 64); | ||
| 14 | REQUIRE(Common::CountTrailingZeroes32(9) == 0); | ||
| 15 | REQUIRE(Common::CountTrailingZeroes32(8) == 3); | ||
| 16 | REQUIRE(Common::CountTrailingZeroes32(0x801000) == 12); | ||
| 17 | REQUIRE(Common::CountTrailingZeroes64(9) == 0); | ||
| 18 | REQUIRE(Common::CountTrailingZeroes64(8) == 3); | ||
| 19 | REQUIRE(Common::CountTrailingZeroes64(0x801000) == 12); | ||
| 20 | REQUIRE(Common::CountTrailingZeroes64(0x801000000000UL) == 36); | ||
| 21 | } | ||
| 22 | |||
| 23 | } // namespace Common | ||
diff --git a/src/tests/video_core/buffer_base.cpp b/src/tests/video_core/buffer_base.cpp new file mode 100644 index 000000000..651633e9e --- /dev/null +++ b/src/tests/video_core/buffer_base.cpp | |||
| @@ -0,0 +1,473 @@ | |||
| 1 | // Copyright 2020 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <stdexcept> | ||
| 6 | #include <unordered_map> | ||
| 7 | |||
| 8 | #include <catch2/catch.hpp> | ||
| 9 | |||
| 10 | #include "common/alignment.h" | ||
| 11 | #include "common/common_types.h" | ||
| 12 | #include "video_core/buffer_cache/buffer_base.h" | ||
| 13 | |||
| 14 | namespace { | ||
| 15 | using VideoCommon::BufferBase; | ||
| 16 | using Range = std::pair<u64, u64>; | ||
| 17 | |||
| 18 | constexpr u64 PAGE = 4096; | ||
| 19 | constexpr u64 WORD = 4096 * 64; | ||
| 20 | |||
| 21 | constexpr VAddr c = 0x1328914000; | ||
| 22 | |||
| 23 | class RasterizerInterface { | ||
| 24 | public: | ||
| 25 | void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { | ||
| 26 | const u64 page_start{addr >> Core::Memory::PAGE_BITS}; | ||
| 27 | const u64 page_end{(addr + size + Core::Memory::PAGE_SIZE - 1) >> Core::Memory::PAGE_BITS}; | ||
| 28 | for (u64 page = page_start; page < page_end; ++page) { | ||
| 29 | int& value = page_table[page]; | ||
| 30 | value += delta; | ||
| 31 | if (value < 0) { | ||
| 32 | throw std::logic_error{"negative page"}; | ||
| 33 | } | ||
| 34 | if (value == 0) { | ||
| 35 | page_table.erase(page); | ||
| 36 | } | ||
| 37 | } | ||
| 38 | } | ||
| 39 | |||
| 40 | [[nodiscard]] int Count(VAddr addr) const noexcept { | ||
| 41 | const auto it = page_table.find(addr >> Core::Memory::PAGE_BITS); | ||
| 42 | return it == page_table.end() ? 0 : it->second; | ||
| 43 | } | ||
| 44 | |||
| 45 | [[nodiscard]] unsigned Count() const noexcept { | ||
| 46 | unsigned count = 0; | ||
| 47 | for (const auto [index, value] : page_table) { | ||
| 48 | count += value; | ||
| 49 | } | ||
| 50 | return count; | ||
| 51 | } | ||
| 52 | |||
| 53 | private: | ||
| 54 | std::unordered_map<u64, int> page_table; | ||
| 55 | }; | ||
| 56 | } // Anonymous namespace | ||
| 57 | |||
| 58 | TEST_CASE("BufferBase: Small buffer", "[video_core]") { | ||
| 59 | RasterizerInterface rasterizer; | ||
| 60 | BufferBase buffer(rasterizer, c, WORD); | ||
| 61 | REQUIRE(rasterizer.Count() == 0); | ||
| 62 | buffer.UnmarkRegionAsCpuModified(c, WORD); | ||
| 63 | REQUIRE(rasterizer.Count() == WORD / PAGE); | ||
| 64 | REQUIRE(buffer.ModifiedCpuRegion(c, WORD) == Range{0, 0}); | ||
| 65 | |||
| 66 | buffer.MarkRegionAsCpuModified(c + PAGE, 1); | ||
| 67 | REQUIRE(buffer.ModifiedCpuRegion(c, WORD) == Range{PAGE * 1, PAGE * 2}); | ||
| 68 | } | ||
| 69 | |||
| 70 | TEST_CASE("BufferBase: Large buffer", "[video_core]") { | ||
| 71 | RasterizerInterface rasterizer; | ||
| 72 | BufferBase buffer(rasterizer, c, WORD * 32); | ||
| 73 | buffer.UnmarkRegionAsCpuModified(c, WORD * 32); | ||
| 74 | buffer.MarkRegionAsCpuModified(c + 4096, WORD * 4); | ||
| 75 | REQUIRE(buffer.ModifiedCpuRegion(c, WORD + PAGE * 2) == Range{PAGE, WORD + PAGE * 2}); | ||
| 76 | REQUIRE(buffer.ModifiedCpuRegion(c + PAGE * 2, PAGE * 6) == Range{PAGE * 2, PAGE * 8}); | ||
| 77 | REQUIRE(buffer.ModifiedCpuRegion(c, WORD * 32) == Range{PAGE, WORD * 4 + PAGE}); | ||
| 78 | REQUIRE(buffer.ModifiedCpuRegion(c + WORD * 4, PAGE) == Range{WORD * 4, WORD * 4 + PAGE}); | ||
| 79 | REQUIRE(buffer.ModifiedCpuRegion(c + WORD * 3 + PAGE * 63, PAGE) == | ||
| 80 | Range{WORD * 3 + PAGE * 63, WORD * 4}); | ||
| 81 | |||
| 82 | buffer.MarkRegionAsCpuModified(c + WORD * 5 + PAGE * 6, PAGE); | ||
| 83 | buffer.MarkRegionAsCpuModified(c + WORD * 5 + PAGE * 8, PAGE); | ||
| 84 | REQUIRE(buffer.ModifiedCpuRegion(c + WORD * 5, WORD) == | ||
| 85 | Range{WORD * 5 + PAGE * 6, WORD * 5 + PAGE * 9}); | ||
| 86 | |||
| 87 | buffer.UnmarkRegionAsCpuModified(c + WORD * 5 + PAGE * 8, PAGE); | ||
| 88 | REQUIRE(buffer.ModifiedCpuRegion(c + WORD * 5, WORD) == | ||
| 89 | Range{WORD * 5 + PAGE * 6, WORD * 5 + PAGE * 7}); | ||
| 90 | |||
| 91 | buffer.MarkRegionAsCpuModified(c + PAGE, WORD * 31 + PAGE * 63); | ||
| 92 | REQUIRE(buffer.ModifiedCpuRegion(c, WORD * 32) == Range{PAGE, WORD * 32}); | ||
| 93 | |||
| 94 | buffer.UnmarkRegionAsCpuModified(c + PAGE * 4, PAGE); | ||
| 95 | buffer.UnmarkRegionAsCpuModified(c + PAGE * 6, PAGE); | ||
| 96 | |||
| 97 | buffer.UnmarkRegionAsCpuModified(c, WORD * 32); | ||
| 98 | REQUIRE(buffer.ModifiedCpuRegion(c, WORD * 32) == Range{0, 0}); | ||
| 99 | } | ||
| 100 | |||
| 101 | TEST_CASE("BufferBase: Rasterizer counting", "[video_core]") { | ||
| 102 | RasterizerInterface rasterizer; | ||
| 103 | BufferBase buffer(rasterizer, c, PAGE * 2); | ||
| 104 | REQUIRE(rasterizer.Count() == 0); | ||
| 105 | buffer.UnmarkRegionAsCpuModified(c, PAGE); | ||
| 106 | REQUIRE(rasterizer.Count() == 1); | ||
| 107 | buffer.MarkRegionAsCpuModified(c, PAGE * 2); | ||
| 108 | REQUIRE(rasterizer.Count() == 0); | ||
| 109 | buffer.UnmarkRegionAsCpuModified(c, PAGE); | ||
| 110 | buffer.UnmarkRegionAsCpuModified(c + PAGE, PAGE); | ||
| 111 | REQUIRE(rasterizer.Count() == 2); | ||
| 112 | buffer.MarkRegionAsCpuModified(c, PAGE * 2); | ||
| 113 | REQUIRE(rasterizer.Count() == 0); | ||
| 114 | } | ||
| 115 | |||
| 116 | TEST_CASE("BufferBase: Basic range", "[video_core]") { | ||
| 117 | RasterizerInterface rasterizer; | ||
| 118 | BufferBase buffer(rasterizer, c, WORD); | ||
| 119 | buffer.UnmarkRegionAsCpuModified(c, WORD); | ||
| 120 | buffer.MarkRegionAsCpuModified(c, PAGE); | ||
| 121 | int num = 0; | ||
| 122 | buffer.ForEachUploadRange(c, WORD, [&](u64 offset, u64 size) { | ||
| 123 | REQUIRE(offset == 0U); | ||
| 124 | REQUIRE(size == PAGE); | ||
| 125 | ++num; | ||
| 126 | }); | ||
| 127 | REQUIRE(num == 1U); | ||
| 128 | } | ||
| 129 | |||
| 130 | TEST_CASE("BufferBase: Border upload", "[video_core]") { | ||
| 131 | RasterizerInterface rasterizer; | ||
| 132 | BufferBase buffer(rasterizer, c, WORD * 2); | ||
| 133 | buffer.UnmarkRegionAsCpuModified(c, WORD * 2); | ||
| 134 | buffer.MarkRegionAsCpuModified(c + WORD - PAGE, PAGE * 2); | ||
| 135 | buffer.ForEachUploadRange(c, WORD * 2, [](u64 offset, u64 size) { | ||
| 136 | REQUIRE(offset == WORD - PAGE); | ||
| 137 | REQUIRE(size == PAGE * 2); | ||
| 138 | }); | ||
| 139 | } | ||
| 140 | |||
| 141 | TEST_CASE("BufferBase: Border upload range", "[video_core]") { | ||
| 142 | RasterizerInterface rasterizer; | ||
| 143 | BufferBase buffer(rasterizer, c, WORD * 2); | ||
| 144 | buffer.UnmarkRegionAsCpuModified(c, WORD * 2); | ||
| 145 | buffer.MarkRegionAsCpuModified(c + WORD - PAGE, PAGE * 2); | ||
| 146 | buffer.ForEachUploadRange(c + WORD - PAGE, PAGE * 2, [](u64 offset, u64 size) { | ||
| 147 | REQUIRE(offset == WORD - PAGE); | ||
| 148 | REQUIRE(size == PAGE * 2); | ||
| 149 | }); | ||
| 150 | buffer.MarkRegionAsCpuModified(c + WORD - PAGE, PAGE * 2); | ||
| 151 | buffer.ForEachUploadRange(c + WORD - PAGE, PAGE, [](u64 offset, u64 size) { | ||
| 152 | REQUIRE(offset == WORD - PAGE); | ||
| 153 | REQUIRE(size == PAGE); | ||
| 154 | }); | ||
| 155 | buffer.ForEachUploadRange(c + WORD, PAGE, [](u64 offset, u64 size) { | ||
| 156 | REQUIRE(offset == WORD); | ||
| 157 | REQUIRE(size == PAGE); | ||
| 158 | }); | ||
| 159 | } | ||
| 160 | |||
| 161 | TEST_CASE("BufferBase: Border upload partial range", "[video_core]") { | ||
| 162 | RasterizerInterface rasterizer; | ||
| 163 | BufferBase buffer(rasterizer, c, WORD * 2); | ||
| 164 | buffer.UnmarkRegionAsCpuModified(c, WORD * 2); | ||
| 165 | buffer.MarkRegionAsCpuModified(c + WORD - PAGE, PAGE * 2); | ||
| 166 | buffer.ForEachUploadRange(c + WORD - 1, 2, [](u64 offset, u64 size) { | ||
| 167 | REQUIRE(offset == WORD - PAGE); | ||
| 168 | REQUIRE(size == PAGE * 2); | ||
| 169 | }); | ||
| 170 | buffer.MarkRegionAsCpuModified(c + WORD - PAGE, PAGE * 2); | ||
| 171 | buffer.ForEachUploadRange(c + WORD - 1, 1, [](u64 offset, u64 size) { | ||
| 172 | REQUIRE(offset == WORD - PAGE); | ||
| 173 | REQUIRE(size == PAGE); | ||
| 174 | }); | ||
| 175 | buffer.ForEachUploadRange(c + WORD + 50, 1, [](u64 offset, u64 size) { | ||
| 176 | REQUIRE(offset == WORD); | ||
| 177 | REQUIRE(size == PAGE); | ||
| 178 | }); | ||
| 179 | } | ||
| 180 | |||
| 181 | TEST_CASE("BufferBase: Partial word uploads", "[video_core]") { | ||
| 182 | RasterizerInterface rasterizer; | ||
| 183 | BufferBase buffer(rasterizer, c, 0x9d000); | ||
| 184 | int num = 0; | ||
| 185 | buffer.ForEachUploadRange(c, WORD, [&](u64 offset, u64 size) { | ||
| 186 | REQUIRE(offset == 0U); | ||
| 187 | REQUIRE(size == WORD); | ||
| 188 | ++num; | ||
| 189 | }); | ||
| 190 | REQUIRE(num == 1); | ||
| 191 | buffer.ForEachUploadRange(c + WORD, WORD, [&](u64 offset, u64 size) { | ||
| 192 | REQUIRE(offset == WORD); | ||
| 193 | REQUIRE(size == WORD); | ||
| 194 | ++num; | ||
| 195 | }); | ||
| 196 | REQUIRE(num == 2); | ||
| 197 | buffer.ForEachUploadRange(c + 0x79000, 0x24000, [&](u64 offset, u64 size) { | ||
| 198 | REQUIRE(offset == WORD * 2); | ||
| 199 | REQUIRE(size == PAGE * 0x1d); | ||
| 200 | ++num; | ||
| 201 | }); | ||
| 202 | REQUIRE(num == 3); | ||
| 203 | } | ||
| 204 | |||
| 205 | TEST_CASE("BufferBase: Partial page upload", "[video_core]") { | ||
| 206 | RasterizerInterface rasterizer; | ||
| 207 | BufferBase buffer(rasterizer, c, WORD); | ||
| 208 | buffer.UnmarkRegionAsCpuModified(c, WORD); | ||
| 209 | int num = 0; | ||
| 210 | buffer.MarkRegionAsCpuModified(c + PAGE * 2, PAGE); | ||
| 211 | buffer.MarkRegionAsCpuModified(c + PAGE * 9, PAGE); | ||
| 212 | buffer.ForEachUploadRange(c, PAGE * 3, [&](u64 offset, u64 size) { | ||
| 213 | REQUIRE(offset == PAGE * 2); | ||
| 214 | REQUIRE(size == PAGE); | ||
| 215 | ++num; | ||
| 216 | }); | ||
| 217 | REQUIRE(num == 1); | ||
| 218 | buffer.ForEachUploadRange(c + PAGE * 7, PAGE * 3, [&](u64 offset, u64 size) { | ||
| 219 | REQUIRE(offset == PAGE * 9); | ||
| 220 | REQUIRE(size == PAGE); | ||
| 221 | ++num; | ||
| 222 | }); | ||
| 223 | REQUIRE(num == 2); | ||
| 224 | } | ||
| 225 | |||
| 226 | TEST_CASE("BufferBase: Partial page upload with multiple words on the right") { | ||
| 227 | RasterizerInterface rasterizer; | ||
| 228 | BufferBase buffer(rasterizer, c, WORD * 8); | ||
| 229 | buffer.UnmarkRegionAsCpuModified(c, WORD * 8); | ||
| 230 | buffer.MarkRegionAsCpuModified(c + PAGE * 13, WORD * 7); | ||
| 231 | int num = 0; | ||
| 232 | buffer.ForEachUploadRange(c + PAGE * 10, WORD * 7, [&](u64 offset, u64 size) { | ||
| 233 | REQUIRE(offset == PAGE * 13); | ||
| 234 | REQUIRE(size == WORD * 7 - PAGE * 3); | ||
| 235 | ++num; | ||
| 236 | }); | ||
| 237 | REQUIRE(num == 1); | ||
| 238 | buffer.ForEachUploadRange(c + PAGE, WORD * 8, [&](u64 offset, u64 size) { | ||
| 239 | REQUIRE(offset == WORD * 7 + PAGE * 10); | ||
| 240 | REQUIRE(size == PAGE * 3); | ||
| 241 | ++num; | ||
| 242 | }); | ||
| 243 | REQUIRE(num == 2); | ||
| 244 | } | ||
| 245 | |||
| 246 | TEST_CASE("BufferBase: Partial page upload with multiple words on the left", "[video_core]") { | ||
| 247 | RasterizerInterface rasterizer; | ||
| 248 | BufferBase buffer(rasterizer, c, WORD * 8); | ||
| 249 | buffer.UnmarkRegionAsCpuModified(c, WORD * 8); | ||
| 250 | buffer.MarkRegionAsCpuModified(c + PAGE * 13, WORD * 7); | ||
| 251 | int num = 0; | ||
| 252 | buffer.ForEachUploadRange(c + PAGE * 16, WORD * 7, [&](u64 offset, u64 size) { | ||
| 253 | REQUIRE(offset == PAGE * 16); | ||
| 254 | REQUIRE(size == WORD * 7 - PAGE * 3); | ||
| 255 | ++num; | ||
| 256 | }); | ||
| 257 | REQUIRE(num == 1); | ||
| 258 | buffer.ForEachUploadRange(c + PAGE, WORD, [&](u64 offset, u64 size) { | ||
| 259 | REQUIRE(offset == PAGE * 13); | ||
| 260 | REQUIRE(size == PAGE * 3); | ||
| 261 | ++num; | ||
| 262 | }); | ||
| 263 | REQUIRE(num == 2); | ||
| 264 | } | ||
| 265 | |||
| 266 | TEST_CASE("BufferBase: Partial page upload with multiple words in the middle", "[video_core]") { | ||
| 267 | RasterizerInterface rasterizer; | ||
| 268 | BufferBase buffer(rasterizer, c, WORD * 8); | ||
| 269 | buffer.UnmarkRegionAsCpuModified(c, WORD * 8); | ||
| 270 | buffer.MarkRegionAsCpuModified(c + PAGE * 13, PAGE * 140); | ||
| 271 | int num = 0; | ||
| 272 | buffer.ForEachUploadRange(c + PAGE * 16, WORD, [&](u64 offset, u64 size) { | ||
| 273 | REQUIRE(offset == PAGE * 16); | ||
| 274 | REQUIRE(size == WORD); | ||
| 275 | ++num; | ||
| 276 | }); | ||
| 277 | REQUIRE(num == 1); | ||
| 278 | buffer.ForEachUploadRange(c, WORD, [&](u64 offset, u64 size) { | ||
| 279 | REQUIRE(offset == PAGE * 13); | ||
| 280 | REQUIRE(size == PAGE * 3); | ||
| 281 | ++num; | ||
| 282 | }); | ||
| 283 | REQUIRE(num == 2); | ||
| 284 | buffer.ForEachUploadRange(c, WORD * 8, [&](u64 offset, u64 size) { | ||
| 285 | REQUIRE(offset == WORD + PAGE * 16); | ||
| 286 | REQUIRE(size == PAGE * 73); | ||
| 287 | ++num; | ||
| 288 | }); | ||
| 289 | REQUIRE(num == 3); | ||
| 290 | } | ||
| 291 | |||
| 292 | TEST_CASE("BufferBase: Empty right bits", "[video_core]") { | ||
| 293 | RasterizerInterface rasterizer; | ||
| 294 | BufferBase buffer(rasterizer, c, WORD * 2048); | ||
| 295 | buffer.UnmarkRegionAsCpuModified(c, WORD * 2048); | ||
| 296 | buffer.MarkRegionAsCpuModified(c + WORD - PAGE, PAGE * 2); | ||
| 297 | buffer.ForEachUploadRange(c, WORD * 2048, [](u64 offset, u64 size) { | ||
| 298 | REQUIRE(offset == WORD - PAGE); | ||
| 299 | REQUIRE(size == PAGE * 2); | ||
| 300 | }); | ||
| 301 | } | ||
| 302 | |||
| 303 | TEST_CASE("BufferBase: Out of bound ranges 1", "[video_core]") { | ||
| 304 | RasterizerInterface rasterizer; | ||
| 305 | BufferBase buffer(rasterizer, c, WORD); | ||
| 306 | buffer.UnmarkRegionAsCpuModified(c, WORD); | ||
| 307 | buffer.MarkRegionAsCpuModified(c, PAGE); | ||
| 308 | int num = 0; | ||
| 309 | buffer.ForEachUploadRange(c - WORD, WORD, [&](u64 offset, u64 size) { ++num; }); | ||
| 310 | buffer.ForEachUploadRange(c + WORD, WORD, [&](u64 offset, u64 size) { ++num; }); | ||
| 311 | buffer.ForEachUploadRange(c - PAGE, PAGE, [&](u64 offset, u64 size) { ++num; }); | ||
| 312 | REQUIRE(num == 0); | ||
| 313 | buffer.ForEachUploadRange(c - PAGE, PAGE * 2, [&](u64 offset, u64 size) { ++num; }); | ||
| 314 | REQUIRE(num == 1); | ||
| 315 | buffer.MarkRegionAsCpuModified(c, WORD); | ||
| 316 | REQUIRE(rasterizer.Count() == 0); | ||
| 317 | } | ||
| 318 | |||
| 319 | TEST_CASE("BufferBase: Out of bound ranges 2", "[video_core]") { | ||
| 320 | RasterizerInterface rasterizer; | ||
| 321 | BufferBase buffer(rasterizer, c, 0x22000); | ||
| 322 | REQUIRE_NOTHROW(buffer.UnmarkRegionAsCpuModified(c + 0x22000, PAGE)); | ||
| 323 | REQUIRE_NOTHROW(buffer.UnmarkRegionAsCpuModified(c + 0x28000, PAGE)); | ||
| 324 | REQUIRE(rasterizer.Count() == 0); | ||
| 325 | REQUIRE_NOTHROW(buffer.UnmarkRegionAsCpuModified(c + 0x21100, PAGE - 0x100)); | ||
| 326 | REQUIRE(rasterizer.Count() == 1); | ||
| 327 | REQUIRE_NOTHROW(buffer.UnmarkRegionAsCpuModified(c - 0x1000, PAGE * 2)); | ||
| 328 | buffer.UnmarkRegionAsCpuModified(c - 0x3000, PAGE * 2); | ||
| 329 | buffer.UnmarkRegionAsCpuModified(c - 0x2000, PAGE * 2); | ||
| 330 | REQUIRE(rasterizer.Count() == 2); | ||
| 331 | } | ||
| 332 | |||
| 333 | TEST_CASE("BufferBase: Out of bound ranges 3", "[video_core]") { | ||
| 334 | RasterizerInterface rasterizer; | ||
| 335 | BufferBase buffer(rasterizer, c, 0x310720); | ||
| 336 | buffer.UnmarkRegionAsCpuModified(c, 0x310720); | ||
| 337 | REQUIRE(rasterizer.Count(c) == 1); | ||
| 338 | REQUIRE(rasterizer.Count(c + PAGE) == 1); | ||
| 339 | REQUIRE(rasterizer.Count(c + WORD) == 1); | ||
| 340 | REQUIRE(rasterizer.Count(c + WORD + PAGE) == 1); | ||
| 341 | } | ||
| 342 | |||
| 343 | TEST_CASE("BufferBase: Sparse regions 1", "[video_core]") { | ||
| 344 | RasterizerInterface rasterizer; | ||
| 345 | BufferBase buffer(rasterizer, c, WORD); | ||
| 346 | buffer.UnmarkRegionAsCpuModified(c, WORD); | ||
| 347 | buffer.MarkRegionAsCpuModified(c + PAGE * 1, PAGE); | ||
| 348 | buffer.MarkRegionAsCpuModified(c + PAGE * 3, PAGE * 4); | ||
| 349 | buffer.ForEachUploadRange(c, WORD, [i = 0](u64 offset, u64 size) mutable { | ||
| 350 | static constexpr std::array<u64, 2> offsets{PAGE, PAGE * 3}; | ||
| 351 | static constexpr std::array<u64, 2> sizes{PAGE, PAGE * 4}; | ||
| 352 | REQUIRE(offset == offsets.at(i)); | ||
| 353 | REQUIRE(size == sizes.at(i)); | ||
| 354 | ++i; | ||
| 355 | }); | ||
| 356 | } | ||
| 357 | |||
| 358 | TEST_CASE("BufferBase: Sparse regions 2", "[video_core]") { | ||
| 359 | RasterizerInterface rasterizer; | ||
| 360 | BufferBase buffer(rasterizer, c, 0x22000); | ||
| 361 | buffer.UnmarkRegionAsCpuModified(c, 0x22000); | ||
| 362 | REQUIRE(rasterizer.Count() == 0x22); | ||
| 363 | buffer.MarkRegionAsCpuModified(c + PAGE * 0x1B, PAGE); | ||
| 364 | buffer.MarkRegionAsCpuModified(c + PAGE * 0x21, PAGE); | ||
| 365 | buffer.ForEachUploadRange(c, WORD, [i = 0](u64 offset, u64 size) mutable { | ||
| 366 | static constexpr std::array<u64, 2> offsets{PAGE * 0x1B, PAGE * 0x21}; | ||
| 367 | static constexpr std::array<u64, 2> sizes{PAGE, PAGE}; | ||
| 368 | REQUIRE(offset == offsets.at(i)); | ||
| 369 | REQUIRE(size == sizes.at(i)); | ||
| 370 | ++i; | ||
| 371 | }); | ||
| 372 | } | ||
| 373 | |||
| 374 | TEST_CASE("BufferBase: Single page modified range", "[video_core]") { | ||
| 375 | RasterizerInterface rasterizer; | ||
| 376 | BufferBase buffer(rasterizer, c, PAGE); | ||
| 377 | REQUIRE(buffer.IsRegionCpuModified(c, PAGE)); | ||
| 378 | buffer.UnmarkRegionAsCpuModified(c, PAGE); | ||
| 379 | REQUIRE(!buffer.IsRegionCpuModified(c, PAGE)); | ||
| 380 | } | ||
| 381 | |||
| 382 | TEST_CASE("BufferBase: Two page modified range", "[video_core]") { | ||
| 383 | RasterizerInterface rasterizer; | ||
| 384 | BufferBase buffer(rasterizer, c, PAGE * 2); | ||
| 385 | REQUIRE(buffer.IsRegionCpuModified(c, PAGE)); | ||
| 386 | REQUIRE(buffer.IsRegionCpuModified(c + PAGE, PAGE)); | ||
| 387 | REQUIRE(buffer.IsRegionCpuModified(c, PAGE * 2)); | ||
| 388 | buffer.UnmarkRegionAsCpuModified(c, PAGE); | ||
| 389 | REQUIRE(!buffer.IsRegionCpuModified(c, PAGE)); | ||
| 390 | } | ||
| 391 | |||
| 392 | TEST_CASE("BufferBase: Multi word modified ranges", "[video_core]") { | ||
| 393 | for (int offset = 0; offset < 4; ++offset) { | ||
| 394 | const VAddr address = c + WORD * offset; | ||
| 395 | RasterizerInterface rasterizer; | ||
| 396 | BufferBase buffer(rasterizer, address, WORD * 4); | ||
| 397 | REQUIRE(buffer.IsRegionCpuModified(address, PAGE)); | ||
| 398 | REQUIRE(buffer.IsRegionCpuModified(address + PAGE * 48, PAGE)); | ||
| 399 | REQUIRE(buffer.IsRegionCpuModified(address + PAGE * 56, PAGE)); | ||
| 400 | |||
| 401 | buffer.UnmarkRegionAsCpuModified(address + PAGE * 32, PAGE); | ||
| 402 | REQUIRE(buffer.IsRegionCpuModified(address + PAGE, WORD)); | ||
| 403 | REQUIRE(buffer.IsRegionCpuModified(address + PAGE * 31, PAGE)); | ||
| 404 | REQUIRE(!buffer.IsRegionCpuModified(address + PAGE * 32, PAGE)); | ||
| 405 | REQUIRE(buffer.IsRegionCpuModified(address + PAGE * 33, PAGE)); | ||
| 406 | REQUIRE(buffer.IsRegionCpuModified(address + PAGE * 31, PAGE * 2)); | ||
| 407 | REQUIRE(buffer.IsRegionCpuModified(address + PAGE * 32, PAGE * 2)); | ||
| 408 | |||
| 409 | buffer.UnmarkRegionAsCpuModified(address + PAGE * 33, PAGE); | ||
| 410 | REQUIRE(!buffer.IsRegionCpuModified(address + PAGE * 32, PAGE * 2)); | ||
| 411 | } | ||
| 412 | } | ||
| 413 | |||
| 414 | TEST_CASE("BufferBase: Single page in large buffer", "[video_core]") { | ||
| 415 | RasterizerInterface rasterizer; | ||
| 416 | BufferBase buffer(rasterizer, c, WORD * 16); | ||
| 417 | buffer.UnmarkRegionAsCpuModified(c, WORD * 16); | ||
| 418 | REQUIRE(!buffer.IsRegionCpuModified(c, WORD * 16)); | ||
| 419 | |||
| 420 | buffer.MarkRegionAsCpuModified(c + WORD * 12 + PAGE * 8, PAGE); | ||
| 421 | REQUIRE(buffer.IsRegionCpuModified(c, WORD * 16)); | ||
| 422 | REQUIRE(buffer.IsRegionCpuModified(c + WORD * 10, WORD * 2)); | ||
| 423 | REQUIRE(buffer.IsRegionCpuModified(c + WORD * 11, WORD * 2)); | ||
| 424 | REQUIRE(buffer.IsRegionCpuModified(c + WORD * 12, WORD * 2)); | ||
| 425 | REQUIRE(buffer.IsRegionCpuModified(c + WORD * 12 + PAGE * 4, PAGE * 8)); | ||
| 426 | REQUIRE(buffer.IsRegionCpuModified(c + WORD * 12 + PAGE * 6, PAGE * 8)); | ||
| 427 | REQUIRE(!buffer.IsRegionCpuModified(c + WORD * 12 + PAGE * 6, PAGE)); | ||
| 428 | REQUIRE(buffer.IsRegionCpuModified(c + WORD * 12 + PAGE * 7, PAGE * 2)); | ||
| 429 | REQUIRE(buffer.IsRegionCpuModified(c + WORD * 12 + PAGE * 8, PAGE * 2)); | ||
| 430 | } | ||
| 431 | |||
| 432 | TEST_CASE("BufferBase: Out of bounds region query") { | ||
| 433 | RasterizerInterface rasterizer; | ||
| 434 | BufferBase buffer(rasterizer, c, WORD * 16); | ||
| 435 | REQUIRE(!buffer.IsRegionCpuModified(c - PAGE, PAGE)); | ||
| 436 | REQUIRE(!buffer.IsRegionCpuModified(c - PAGE * 2, PAGE)); | ||
| 437 | REQUIRE(!buffer.IsRegionCpuModified(c + WORD * 16, PAGE)); | ||
| 438 | REQUIRE(buffer.IsRegionCpuModified(c + WORD * 16 - PAGE, WORD * 64)); | ||
| 439 | REQUIRE(!buffer.IsRegionCpuModified(c + WORD * 16, WORD * 64)); | ||
| 440 | } | ||
| 441 | |||
| 442 | TEST_CASE("BufferBase: Wrap word regions") { | ||
| 443 | RasterizerInterface rasterizer; | ||
| 444 | BufferBase buffer(rasterizer, c, WORD * 2); | ||
| 445 | buffer.UnmarkRegionAsCpuModified(c, WORD * 2); | ||
| 446 | buffer.MarkRegionAsCpuModified(c + PAGE * 63, PAGE * 2); | ||
| 447 | REQUIRE(buffer.IsRegionCpuModified(c, WORD * 2)); | ||
| 448 | REQUIRE(!buffer.IsRegionCpuModified(c + PAGE * 62, PAGE)); | ||
| 449 | REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 63, PAGE)); | ||
| 450 | REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 64, PAGE)); | ||
| 451 | REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 63, PAGE * 2)); | ||
| 452 | REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 63, PAGE * 8)); | ||
| 453 | REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 60, PAGE * 8)); | ||
| 454 | |||
| 455 | REQUIRE(!buffer.IsRegionCpuModified(c + PAGE * 127, WORD * 16)); | ||
| 456 | buffer.MarkRegionAsCpuModified(c + PAGE * 127, PAGE); | ||
| 457 | REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 127, WORD * 16)); | ||
| 458 | REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 127, PAGE)); | ||
| 459 | REQUIRE(!buffer.IsRegionCpuModified(c + PAGE * 126, PAGE)); | ||
| 460 | REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 126, PAGE * 2)); | ||
| 461 | REQUIRE(!buffer.IsRegionCpuModified(c + PAGE * 128, WORD * 16)); | ||
| 462 | } | ||
| 463 | |||
| 464 | TEST_CASE("BufferBase: Unaligned page region query") { | ||
| 465 | RasterizerInterface rasterizer; | ||
| 466 | BufferBase buffer(rasterizer, c, WORD); | ||
| 467 | buffer.UnmarkRegionAsCpuModified(c, WORD); | ||
| 468 | buffer.MarkRegionAsCpuModified(c + 4000, 1000); | ||
| 469 | REQUIRE(buffer.IsRegionCpuModified(c, PAGE)); | ||
| 470 | REQUIRE(buffer.IsRegionCpuModified(c + PAGE, PAGE)); | ||
| 471 | REQUIRE(buffer.IsRegionCpuModified(c + 4000, 1000)); | ||
| 472 | REQUIRE(buffer.IsRegionCpuModified(c + 4000, 1)); | ||
| 473 | } | ||
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 25a4b1c5b..e01ea55ab 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | add_subdirectory(host_shaders) | 1 | add_subdirectory(host_shaders) |
| 2 | 2 | ||
| 3 | add_library(video_core STATIC | 3 | add_library(video_core STATIC |
| 4 | buffer_cache/buffer_base.h | ||
| 4 | buffer_cache/buffer_block.h | 5 | buffer_cache/buffer_block.h |
| 5 | buffer_cache/buffer_cache.h | 6 | buffer_cache/buffer_cache.h |
| 6 | buffer_cache/map_interval.cpp | 7 | buffer_cache/map_interval.cpp |
| @@ -135,8 +136,6 @@ add_library(video_core STATIC | |||
| 135 | renderer_vulkan/vk_graphics_pipeline.h | 136 | renderer_vulkan/vk_graphics_pipeline.h |
| 136 | renderer_vulkan/vk_master_semaphore.cpp | 137 | renderer_vulkan/vk_master_semaphore.cpp |
| 137 | renderer_vulkan/vk_master_semaphore.h | 138 | renderer_vulkan/vk_master_semaphore.h |
| 138 | renderer_vulkan/vk_memory_manager.cpp | ||
| 139 | renderer_vulkan/vk_memory_manager.h | ||
| 140 | renderer_vulkan/vk_pipeline_cache.cpp | 139 | renderer_vulkan/vk_pipeline_cache.cpp |
| 141 | renderer_vulkan/vk_pipeline_cache.h | 140 | renderer_vulkan/vk_pipeline_cache.h |
| 142 | renderer_vulkan/vk_query_cache.cpp | 141 | renderer_vulkan/vk_query_cache.cpp |
| @@ -259,6 +258,8 @@ add_library(video_core STATIC | |||
| 259 | vulkan_common/vulkan_instance.h | 258 | vulkan_common/vulkan_instance.h |
| 260 | vulkan_common/vulkan_library.cpp | 259 | vulkan_common/vulkan_library.cpp |
| 261 | vulkan_common/vulkan_library.h | 260 | vulkan_common/vulkan_library.h |
| 261 | vulkan_common/vulkan_memory_allocator.cpp | ||
| 262 | vulkan_common/vulkan_memory_allocator.h | ||
| 262 | vulkan_common/vulkan_surface.cpp | 263 | vulkan_common/vulkan_surface.cpp |
| 263 | vulkan_common/vulkan_surface.h | 264 | vulkan_common/vulkan_surface.h |
| 264 | vulkan_common/vulkan_wrapper.cpp | 265 | vulkan_common/vulkan_wrapper.cpp |
diff --git a/src/video_core/buffer_cache/buffer_base.h b/src/video_core/buffer_cache/buffer_base.h new file mode 100644 index 000000000..ee8602ce9 --- /dev/null +++ b/src/video_core/buffer_cache/buffer_base.h | |||
| @@ -0,0 +1,495 @@ | |||
| 1 | // Copyright 2020 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <algorithm> | ||
| 8 | #include <bit> | ||
| 9 | #include <limits> | ||
| 10 | #include <utility> | ||
| 11 | |||
| 12 | #include "common/alignment.h" | ||
| 13 | #include "common/common_funcs.h" | ||
| 14 | #include "common/common_types.h" | ||
| 15 | #include "common/div_ceil.h" | ||
| 16 | #include "core/memory.h" | ||
| 17 | |||
| 18 | namespace VideoCommon { | ||
| 19 | |||
| 20 | enum class BufferFlagBits { | ||
| 21 | Picked = 1 << 0, | ||
| 22 | }; | ||
| 23 | DECLARE_ENUM_FLAG_OPERATORS(BufferFlagBits) | ||
| 24 | |||
| 25 | /// Tag for creating null buffers with no storage or size | ||
| 26 | struct NullBufferParams {}; | ||
| 27 | |||
| 28 | /** | ||
| 29 | * Range tracking buffer container. | ||
| 30 | * | ||
| 31 | * It keeps track of the modified CPU and GPU ranges on a CPU page granularity, notifying the given | ||
| 32 | * rasterizer about state changes in the tracking behavior of the buffer. | ||
| 33 | * | ||
| 34 | * The buffer size and address is forcefully aligned to CPU page boundaries. | ||
| 35 | */ | ||
| 36 | template <class RasterizerInterface> | ||
| 37 | class BufferBase { | ||
| 38 | static constexpr u64 PAGES_PER_WORD = 64; | ||
| 39 | static constexpr u64 BYTES_PER_PAGE = Core::Memory::PAGE_SIZE; | ||
| 40 | static constexpr u64 BYTES_PER_WORD = PAGES_PER_WORD * BYTES_PER_PAGE; | ||
| 41 | |||
| 42 | /// Vector tracking modified pages tightly packed with small vector optimization | ||
| 43 | union WrittenWords { | ||
| 44 | /// Returns the pointer to the words state | ||
| 45 | [[nodiscard]] const u64* Pointer(bool is_short) const noexcept { | ||
| 46 | return is_short ? &stack : heap; | ||
| 47 | } | ||
| 48 | |||
| 49 | /// Returns the pointer to the words state | ||
| 50 | [[nodiscard]] u64* Pointer(bool is_short) noexcept { | ||
| 51 | return is_short ? &stack : heap; | ||
| 52 | } | ||
| 53 | |||
| 54 | u64 stack = 0; ///< Small buffers storage | ||
| 55 | u64* heap; ///< Not-small buffers pointer to the storage | ||
| 56 | }; | ||
| 57 | |||
| 58 | struct GpuCpuWords { | ||
| 59 | explicit GpuCpuWords() = default; | ||
| 60 | explicit GpuCpuWords(u64 size_bytes_) : size_bytes{size_bytes_} { | ||
| 61 | if (IsShort()) { | ||
| 62 | cpu.stack = ~u64{0}; | ||
| 63 | gpu.stack = 0; | ||
| 64 | } else { | ||
| 65 | // Share allocation between CPU and GPU pages and set their default values | ||
| 66 | const size_t num_words = NumWords(); | ||
| 67 | u64* const alloc = new u64[num_words * 2]; | ||
| 68 | cpu.heap = alloc; | ||
| 69 | gpu.heap = alloc + num_words; | ||
| 70 | std::fill_n(cpu.heap, num_words, ~u64{0}); | ||
| 71 | std::fill_n(gpu.heap, num_words, 0); | ||
| 72 | } | ||
| 73 | // Clean up tailing bits | ||
| 74 | const u64 last_local_page = | ||
| 75 | Common::DivCeil(size_bytes % BYTES_PER_WORD, BYTES_PER_PAGE); | ||
| 76 | const u64 shift = (PAGES_PER_WORD - last_local_page) % PAGES_PER_WORD; | ||
| 77 | u64& last_word = cpu.Pointer(IsShort())[NumWords() - 1]; | ||
| 78 | last_word = (last_word << shift) >> shift; | ||
| 79 | } | ||
| 80 | |||
| 81 | ~GpuCpuWords() { | ||
| 82 | Release(); | ||
| 83 | } | ||
| 84 | |||
| 85 | GpuCpuWords& operator=(GpuCpuWords&& rhs) noexcept { | ||
| 86 | Release(); | ||
| 87 | size_bytes = rhs.size_bytes; | ||
| 88 | cpu = rhs.cpu; | ||
| 89 | gpu = rhs.gpu; | ||
| 90 | rhs.cpu.heap = nullptr; | ||
| 91 | return *this; | ||
| 92 | } | ||
| 93 | |||
| 94 | GpuCpuWords(GpuCpuWords&& rhs) noexcept | ||
| 95 | : size_bytes{rhs.size_bytes}, cpu{rhs.cpu}, gpu{rhs.gpu} { | ||
| 96 | rhs.cpu.heap = nullptr; | ||
| 97 | } | ||
| 98 | |||
| 99 | GpuCpuWords& operator=(const GpuCpuWords&) = delete; | ||
| 100 | GpuCpuWords(const GpuCpuWords&) = delete; | ||
| 101 | |||
| 102 | /// Returns true when the buffer fits in the small vector optimization | ||
| 103 | [[nodiscard]] bool IsShort() const noexcept { | ||
| 104 | return size_bytes <= BYTES_PER_WORD; | ||
| 105 | } | ||
| 106 | |||
| 107 | /// Returns the number of words of the buffer | ||
| 108 | [[nodiscard]] size_t NumWords() const noexcept { | ||
| 109 | return Common::DivCeil(size_bytes, BYTES_PER_WORD); | ||
| 110 | } | ||
| 111 | |||
| 112 | /// Release buffer resources | ||
| 113 | void Release() { | ||
| 114 | if (!IsShort()) { | ||
| 115 | // CPU written words is the base for the heap allocation | ||
| 116 | delete[] cpu.heap; | ||
| 117 | } | ||
| 118 | } | ||
| 119 | |||
| 120 | u64 size_bytes = 0; | ||
| 121 | WrittenWords cpu; | ||
| 122 | WrittenWords gpu; | ||
| 123 | }; | ||
| 124 | |||
| 125 | public: | ||
| 126 | explicit BufferBase(RasterizerInterface& rasterizer_, VAddr cpu_addr_, u64 size_bytes) | ||
| 127 | : rasterizer{&rasterizer_}, cpu_addr{Common::AlignDown(cpu_addr_, BYTES_PER_PAGE)}, | ||
| 128 | words(Common::AlignUp(size_bytes + (cpu_addr_ - cpu_addr), BYTES_PER_PAGE)) {} | ||
| 129 | |||
| 130 | explicit BufferBase(NullBufferParams) {} | ||
| 131 | |||
| 132 | BufferBase& operator=(const BufferBase&) = delete; | ||
| 133 | BufferBase(const BufferBase&) = delete; | ||
| 134 | |||
| 135 | /// Returns the inclusive CPU modified range in a begin end pair | ||
| 136 | [[nodiscard]] std::pair<u64, u64> ModifiedCpuRegion(VAddr query_cpu_addr, | ||
| 137 | u64 query_size) const noexcept { | ||
| 138 | const u64 offset = query_cpu_addr - cpu_addr; | ||
| 139 | return ModifiedRegion<false>(offset, query_size); | ||
| 140 | } | ||
| 141 | |||
| 142 | /// Returns the inclusive GPU modified range in a begin end pair | ||
| 143 | [[nodiscard]] std::pair<u64, u64> ModifiedGpuRegion(VAddr query_cpu_addr, | ||
| 144 | u64 query_size) const noexcept { | ||
| 145 | const u64 offset = query_cpu_addr - cpu_addr; | ||
| 146 | return ModifiedRegion<true>(offset, query_size); | ||
| 147 | } | ||
| 148 | |||
| 149 | /// Returns true if a region has been modified from the CPU | ||
| 150 | [[nodiscard]] bool IsRegionCpuModified(VAddr query_cpu_addr, u64 query_size) const noexcept { | ||
| 151 | const u64 offset = query_cpu_addr - cpu_addr; | ||
| 152 | return IsRegionModified<false>(offset, query_size); | ||
| 153 | } | ||
| 154 | |||
| 155 | /// Returns true if a region has been modified from the GPU | ||
| 156 | [[nodiscard]] bool IsRegionGpuModified(VAddr query_cpu_addr, u64 query_size) const noexcept { | ||
| 157 | const u64 offset = query_cpu_addr - cpu_addr; | ||
| 158 | return IsRegionModified<true>(offset, query_size); | ||
| 159 | } | ||
| 160 | |||
| 161 | /// Mark region as CPU modified, notifying the rasterizer about this change | ||
| 162 | void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 size) { | ||
| 163 | ChangeRegionState<true, true>(words.cpu, dirty_cpu_addr, size); | ||
| 164 | } | ||
| 165 | |||
| 166 | /// Unmark region as CPU modified, notifying the rasterizer about this change | ||
| 167 | void UnmarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 size) { | ||
| 168 | ChangeRegionState<false, true>(words.cpu, dirty_cpu_addr, size); | ||
| 169 | } | ||
| 170 | |||
| 171 | /// Mark region as modified from the host GPU | ||
| 172 | void MarkRegionAsGpuModified(VAddr dirty_cpu_addr, u64 size) noexcept { | ||
| 173 | ChangeRegionState<true, false>(words.gpu, dirty_cpu_addr, size); | ||
| 174 | } | ||
| 175 | |||
| 176 | /// Unmark region as modified from the host GPU | ||
| 177 | void UnmarkRegionAsGpuModified(VAddr dirty_cpu_addr, u64 size) noexcept { | ||
| 178 | ChangeRegionState<false, false>(words.gpu, dirty_cpu_addr, size); | ||
| 179 | } | ||
| 180 | |||
| 181 | /// Call 'func' for each CPU modified range and unmark those pages as CPU modified | ||
| 182 | template <typename Func> | ||
| 183 | void ForEachUploadRange(VAddr query_cpu_range, u64 size, Func&& func) { | ||
| 184 | ForEachModifiedRange<false, true>(query_cpu_range, size, func); | ||
| 185 | } | ||
| 186 | |||
| 187 | /// Call 'func' for each GPU modified range and unmark those pages as GPU modified | ||
| 188 | template <typename Func> | ||
| 189 | void ForEachDownloadRange(VAddr query_cpu_range, u64 size, Func&& func) { | ||
| 190 | ForEachModifiedRange<true, false>(query_cpu_range, size, func); | ||
| 191 | } | ||
| 192 | |||
| 193 | /// Call 'func' for each GPU modified range and unmark those pages as GPU modified | ||
| 194 | template <typename Func> | ||
| 195 | void ForEachDownloadRange(Func&& func) { | ||
| 196 | ForEachModifiedRange<true, false>(cpu_addr, SizeBytes(), func); | ||
| 197 | } | ||
| 198 | |||
| 199 | /// Mark buffer as picked | ||
| 200 | void Pick() noexcept { | ||
| 201 | flags |= BufferFlagBits::Picked; | ||
| 202 | } | ||
| 203 | |||
| 204 | /// Unmark buffer as picked | ||
| 205 | void Unpick() noexcept { | ||
| 206 | flags &= ~BufferFlagBits::Picked; | ||
| 207 | } | ||
| 208 | |||
| 209 | /// Returns true when vaddr -> vaddr+size is fully contained in the buffer | ||
| 210 | [[nodiscard]] bool IsInBounds(VAddr addr, u64 size) const noexcept { | ||
| 211 | return addr >= cpu_addr && addr + size <= cpu_addr + SizeBytes(); | ||
| 212 | } | ||
| 213 | |||
| 214 | /// Returns true if the buffer has been marked as picked | ||
| 215 | [[nodiscard]] bool IsPicked() const noexcept { | ||
| 216 | return True(flags & BufferFlagBits::Picked); | ||
| 217 | } | ||
| 218 | |||
| 219 | /// Returns the base CPU address of the buffer | ||
| 220 | [[nodiscard]] VAddr CpuAddr() const noexcept { | ||
| 221 | return cpu_addr; | ||
| 222 | } | ||
| 223 | |||
| 224 | /// Returns the offset relative to the given CPU address | ||
| 225 | /// @pre IsInBounds returns true | ||
| 226 | [[nodiscard]] u32 Offset(VAddr other_cpu_addr) const noexcept { | ||
| 227 | return static_cast<u32>(other_cpu_addr - cpu_addr); | ||
| 228 | } | ||
| 229 | |||
| 230 | /// Returns the size in bytes of the buffer | ||
| 231 | [[nodiscard]] u64 SizeBytes() const noexcept { | ||
| 232 | return words.size_bytes; | ||
| 233 | } | ||
| 234 | |||
| 235 | private: | ||
| 236 | /** | ||
| 237 | * Change the state of a range of pages | ||
| 238 | * | ||
| 239 | * @param written_words Pages to be marked or unmarked as modified | ||
| 240 | * @param dirty_addr Base address to mark or unmark as modified | ||
| 241 | * @param size Size in bytes to mark or unmark as modified | ||
| 242 | * | ||
| 243 | * @tparam enable True when the bits will be set to one, false for zero | ||
| 244 | * @tparam notify_rasterizer True when the rasterizer has to be notified about the changes | ||
| 245 | */ | ||
| 246 | template <bool enable, bool notify_rasterizer> | ||
| 247 | void ChangeRegionState(WrittenWords& written_words, u64 dirty_addr, | ||
| 248 | s64 size) noexcept(!notify_rasterizer) { | ||
| 249 | const s64 difference = dirty_addr - cpu_addr; | ||
| 250 | const u64 offset = std::max<s64>(difference, 0); | ||
| 251 | size += std::min<s64>(difference, 0); | ||
| 252 | if (offset >= SizeBytes() || size < 0) { | ||
| 253 | return; | ||
| 254 | } | ||
| 255 | u64* const state_words = written_words.Pointer(IsShort()); | ||
| 256 | const u64 offset_end = std::min(offset + size, SizeBytes()); | ||
| 257 | const u64 begin_page_index = offset / BYTES_PER_PAGE; | ||
| 258 | const u64 begin_word_index = begin_page_index / PAGES_PER_WORD; | ||
| 259 | const u64 end_page_index = Common::DivCeil(offset_end, BYTES_PER_PAGE); | ||
| 260 | const u64 end_word_index = Common::DivCeil(end_page_index, PAGES_PER_WORD); | ||
| 261 | u64 page_index = begin_page_index % PAGES_PER_WORD; | ||
| 262 | u64 word_index = begin_word_index; | ||
| 263 | while (word_index < end_word_index) { | ||
| 264 | const u64 next_word_first_page = (word_index + 1) * PAGES_PER_WORD; | ||
| 265 | const u64 left_offset = | ||
| 266 | std::min(next_word_first_page - end_page_index, PAGES_PER_WORD) % PAGES_PER_WORD; | ||
| 267 | const u64 right_offset = page_index; | ||
| 268 | u64 bits = ~u64{0}; | ||
| 269 | bits = (bits >> right_offset) << right_offset; | ||
| 270 | bits = (bits << left_offset) >> left_offset; | ||
| 271 | if constexpr (notify_rasterizer) { | ||
| 272 | NotifyRasterizer<!enable>(word_index, state_words[word_index], bits); | ||
| 273 | } | ||
| 274 | if constexpr (enable) { | ||
| 275 | state_words[word_index] |= bits; | ||
| 276 | } else { | ||
| 277 | state_words[word_index] &= ~bits; | ||
| 278 | } | ||
| 279 | page_index = 0; | ||
| 280 | ++word_index; | ||
| 281 | } | ||
| 282 | } | ||
| 283 | |||
| 284 | /** | ||
| 285 | * Notify rasterizer about changes in the CPU tracking state of a word in the buffer | ||
| 286 | * | ||
| 287 | * @param word_index Index to the word to notify to the rasterizer | ||
| 288 | * @param current_bits Current state of the word | ||
| 289 | * @param new_bits New state of the word | ||
| 290 | * | ||
| 291 | * @tparam add_to_rasterizer True when the rasterizer should start tracking the new pages | ||
| 292 | */ | ||
| 293 | template <bool add_to_rasterizer> | ||
| 294 | void NotifyRasterizer(u64 word_index, u64 current_bits, u64 new_bits) { | ||
| 295 | u64 changed_bits = (add_to_rasterizer ? current_bits : ~current_bits) & new_bits; | ||
| 296 | VAddr addr = cpu_addr + word_index * BYTES_PER_WORD; | ||
| 297 | while (changed_bits != 0) { | ||
| 298 | const int empty_bits = std::countr_zero(changed_bits); | ||
| 299 | addr += empty_bits * BYTES_PER_PAGE; | ||
| 300 | changed_bits >>= empty_bits; | ||
| 301 | |||
| 302 | const u32 continuous_bits = std::countr_one(changed_bits); | ||
| 303 | const u64 size = continuous_bits * BYTES_PER_PAGE; | ||
| 304 | const VAddr begin_addr = addr; | ||
| 305 | addr += size; | ||
| 306 | changed_bits = continuous_bits < PAGES_PER_WORD ? (changed_bits >> continuous_bits) : 0; | ||
| 307 | rasterizer->UpdatePagesCachedCount(begin_addr, size, add_to_rasterizer ? 1 : -1); | ||
| 308 | } | ||
| 309 | } | ||
| 310 | |||
| 311 | /** | ||
| 312 | * Loop over each page in the given range, turn off those bits and notify the rasterizer if | ||
| 313 | * needed. Call the given function on each turned off range. | ||
| 314 | * | ||
| 315 | * @param query_cpu_range Base CPU address to loop over | ||
| 316 | * @param size Size in bytes of the CPU range to loop over | ||
| 317 | * @param func Function to call for each turned off region | ||
| 318 | * | ||
| 319 | * @tparam gpu True for host GPU pages, false for CPU pages | ||
| 320 | * @tparam notify_rasterizer True when the rasterizer should be notified about state changes | ||
| 321 | */ | ||
| 322 | template <bool gpu, bool notify_rasterizer, typename Func> | ||
| 323 | void ForEachModifiedRange(VAddr query_cpu_range, s64 size, Func&& func) { | ||
| 324 | const s64 difference = query_cpu_range - cpu_addr; | ||
| 325 | const u64 query_begin = std::max<s64>(difference, 0); | ||
| 326 | size += std::min<s64>(difference, 0); | ||
| 327 | if (query_begin >= SizeBytes() || size < 0) { | ||
| 328 | return; | ||
| 329 | } | ||
| 330 | const u64* const cpu_words = words.cpu.Pointer(IsShort()); | ||
| 331 | const u64 query_end = query_begin + std::min(static_cast<u64>(size), SizeBytes()); | ||
| 332 | u64* const state_words = (gpu ? words.gpu : words.cpu).Pointer(IsShort()); | ||
| 333 | u64* const words_begin = state_words + query_begin / BYTES_PER_WORD; | ||
| 334 | u64* const words_end = state_words + Common::DivCeil(query_end, BYTES_PER_WORD); | ||
| 335 | |||
| 336 | const auto modified = [](u64 word) { return word != 0; }; | ||
| 337 | const auto first_modified_word = std::find_if(words_begin, words_end, modified); | ||
| 338 | if (first_modified_word == words_end) { | ||
| 339 | // Exit early when the buffer is not modified | ||
| 340 | return; | ||
| 341 | } | ||
| 342 | const auto last_modified_word = std::find_if_not(first_modified_word, words_end, modified); | ||
| 343 | |||
| 344 | const u64 word_index_begin = std::distance(state_words, first_modified_word); | ||
| 345 | const u64 word_index_end = std::distance(state_words, last_modified_word); | ||
| 346 | |||
| 347 | const unsigned local_page_begin = std::countr_zero(*first_modified_word); | ||
| 348 | const unsigned local_page_end = PAGES_PER_WORD - std::countl_zero(last_modified_word[-1]); | ||
| 349 | const u64 word_page_begin = word_index_begin * PAGES_PER_WORD; | ||
| 350 | const u64 word_page_end = (word_index_end - 1) * PAGES_PER_WORD; | ||
| 351 | const u64 query_page_begin = query_begin / BYTES_PER_PAGE; | ||
| 352 | const u64 query_page_end = Common::DivCeil(query_end, BYTES_PER_PAGE); | ||
| 353 | const u64 page_index_begin = std::max(word_page_begin + local_page_begin, query_page_begin); | ||
| 354 | const u64 page_index_end = std::min(word_page_end + local_page_end, query_page_end); | ||
| 355 | const u64 first_word_page_begin = page_index_begin % PAGES_PER_WORD; | ||
| 356 | const u64 last_word_page_end = (page_index_end - 1) % PAGES_PER_WORD + 1; | ||
| 357 | |||
| 358 | u64 page_begin = first_word_page_begin; | ||
| 359 | u64 current_base = 0; | ||
| 360 | u64 current_size = 0; | ||
| 361 | bool on_going = false; | ||
| 362 | for (u64 word_index = word_index_begin; word_index < word_index_end; ++word_index) { | ||
| 363 | const bool is_last_word = word_index + 1 == word_index_end; | ||
| 364 | const u64 page_end = is_last_word ? last_word_page_end : PAGES_PER_WORD; | ||
| 365 | const u64 right_offset = page_begin; | ||
| 366 | const u64 left_offset = PAGES_PER_WORD - page_end; | ||
| 367 | u64 bits = ~u64{0}; | ||
| 368 | bits = (bits >> right_offset) << right_offset; | ||
| 369 | bits = (bits << left_offset) >> left_offset; | ||
| 370 | |||
| 371 | const u64 current_word = state_words[word_index] & bits; | ||
| 372 | state_words[word_index] &= ~bits; | ||
| 373 | |||
| 374 | // Exclude CPU modified pages when visiting GPU pages | ||
| 375 | const u64 word = current_word & ~(gpu ? cpu_words[word_index] : 0); | ||
| 376 | if constexpr (notify_rasterizer) { | ||
| 377 | NotifyRasterizer<true>(word_index, word, ~u64{0}); | ||
| 378 | } | ||
| 379 | u64 page = page_begin; | ||
| 380 | page_begin = 0; | ||
| 381 | |||
| 382 | while (page < page_end) { | ||
| 383 | const int empty_bits = std::countr_zero(word >> page); | ||
| 384 | if (on_going && empty_bits != 0) { | ||
| 385 | InvokeModifiedRange(func, current_size, current_base); | ||
| 386 | current_size = 0; | ||
| 387 | on_going = false; | ||
| 388 | } | ||
| 389 | page += empty_bits; | ||
| 390 | |||
| 391 | const int continuous_bits = std::countr_one(word >> page); | ||
| 392 | if (!on_going && continuous_bits != 0) { | ||
| 393 | current_base = word_index * PAGES_PER_WORD + page; | ||
| 394 | on_going = true; | ||
| 395 | } | ||
| 396 | current_size += continuous_bits; | ||
| 397 | page += continuous_bits; | ||
| 398 | } | ||
| 399 | } | ||
| 400 | if (on_going && current_size > 0) { | ||
| 401 | InvokeModifiedRange(func, current_size, current_base); | ||
| 402 | } | ||
| 403 | } | ||
| 404 | |||
| 405 | template <typename Func> | ||
| 406 | void InvokeModifiedRange(Func&& func, u64 current_size, u64 current_base) { | ||
| 407 | const u64 current_size_bytes = current_size * BYTES_PER_PAGE; | ||
| 408 | const u64 offset_begin = current_base * BYTES_PER_PAGE; | ||
| 409 | const u64 offset_end = std::min(offset_begin + current_size_bytes, SizeBytes()); | ||
| 410 | func(offset_begin, offset_end - offset_begin); | ||
| 411 | } | ||
| 412 | |||
| 413 | /** | ||
| 414 | * Returns true when a region has been modified | ||
| 415 | * | ||
| 416 | * @param offset Offset in bytes from the start of the buffer | ||
| 417 | * @param size Size in bytes of the region to query for modifications | ||
| 418 | */ | ||
| 419 | template <bool gpu> | ||
| 420 | [[nodiscard]] bool IsRegionModified(u64 offset, u64 size) const noexcept { | ||
| 421 | const u64* const cpu_words = words.cpu.Pointer(IsShort()); | ||
| 422 | const u64* const state_words = (gpu ? words.gpu : words.cpu).Pointer(IsShort()); | ||
| 423 | const u64 num_query_words = size / BYTES_PER_WORD + 1; | ||
| 424 | const u64 word_begin = offset / BYTES_PER_WORD; | ||
| 425 | const u64 word_end = std::min(word_begin + num_query_words, NumWords()); | ||
| 426 | const u64 page_limit = Common::DivCeil(offset + size, BYTES_PER_PAGE); | ||
| 427 | u64 page_index = (offset / BYTES_PER_PAGE) % PAGES_PER_WORD; | ||
| 428 | for (u64 word_index = word_begin; word_index < word_end; ++word_index, page_index = 0) { | ||
| 429 | const u64 word = state_words[word_index] & ~(gpu ? cpu_words[word_index] : 0); | ||
| 430 | if (word == 0) { | ||
| 431 | continue; | ||
| 432 | } | ||
| 433 | const u64 page_end = std::min((word_index + 1) * PAGES_PER_WORD, page_limit); | ||
| 434 | const u64 local_page_end = page_end % PAGES_PER_WORD; | ||
| 435 | const u64 page_end_shift = (PAGES_PER_WORD - local_page_end) % PAGES_PER_WORD; | ||
| 436 | if (((word >> page_index) << page_index) << page_end_shift != 0) { | ||
| 437 | return true; | ||
| 438 | } | ||
| 439 | } | ||
| 440 | return false; | ||
| 441 | } | ||
| 442 | |||
| 443 | /** | ||
| 444 | * Returns a begin end pair with the inclusive modified region | ||
| 445 | * | ||
| 446 | * @param offset Offset in bytes from the start of the buffer | ||
| 447 | * @param size Size in bytes of the region to query for modifications | ||
| 448 | * | ||
| 449 | * @tparam gpu True to query GPU modified pages, false for CPU pages | ||
| 450 | */ | ||
| 451 | template <bool gpu> | ||
| 452 | [[nodiscard]] std::pair<u64, u64> ModifiedRegion(u64 offset, u64 size) const noexcept { | ||
| 453 | const u64* const cpu_words = words.cpu.Pointer(IsShort()); | ||
| 454 | const u64* const state_words = (gpu ? words.gpu : words.cpu).Pointer(IsShort()); | ||
| 455 | const u64 num_query_words = size / BYTES_PER_WORD + 1; | ||
| 456 | const u64 word_begin = offset / BYTES_PER_WORD; | ||
| 457 | const u64 word_end = std::min(word_begin + num_query_words, NumWords()); | ||
| 458 | const u64 page_base = offset / BYTES_PER_PAGE; | ||
| 459 | const u64 page_limit = Common::DivCeil(offset + size, BYTES_PER_PAGE); | ||
| 460 | u64 begin = std::numeric_limits<u64>::max(); | ||
| 461 | u64 end = 0; | ||
| 462 | for (u64 word_index = word_begin; word_index < word_end; ++word_index) { | ||
| 463 | const u64 word = state_words[word_index] & ~(gpu ? cpu_words[word_index] : 0); | ||
| 464 | if (word == 0) { | ||
| 465 | continue; | ||
| 466 | } | ||
| 467 | const u64 local_page_begin = std::countr_zero(word); | ||
| 468 | const u64 local_page_end = PAGES_PER_WORD - std::countl_zero(word); | ||
| 469 | const u64 page_index = word_index * PAGES_PER_WORD; | ||
| 470 | const u64 page_begin = std::max(page_index + local_page_begin, page_base); | ||
| 471 | const u64 page_end = std::min(page_index + local_page_end, page_limit); | ||
| 472 | begin = std::min(begin, page_begin); | ||
| 473 | end = std::max(end, page_end); | ||
| 474 | } | ||
| 475 | static constexpr std::pair<u64, u64> EMPTY{0, 0}; | ||
| 476 | return begin < end ? std::make_pair(begin * BYTES_PER_PAGE, end * BYTES_PER_PAGE) : EMPTY; | ||
| 477 | } | ||
| 478 | |||
| 479 | /// Returns the number of words of the buffer | ||
| 480 | [[nodiscard]] size_t NumWords() const noexcept { | ||
| 481 | return words.NumWords(); | ||
| 482 | } | ||
| 483 | |||
| 484 | /// Returns true when the buffer fits in the small vector optimization | ||
| 485 | [[nodiscard]] bool IsShort() const noexcept { | ||
| 486 | return words.IsShort(); | ||
| 487 | } | ||
| 488 | |||
| 489 | RasterizerInterface* rasterizer = nullptr; | ||
| 490 | VAddr cpu_addr = 0; | ||
| 491 | GpuCpuWords words; | ||
| 492 | BufferFlagBits flags{}; | ||
| 493 | }; | ||
| 494 | |||
| 495 | } // namespace VideoCommon | ||
diff --git a/src/video_core/cdma_pusher.cpp b/src/video_core/cdma_pusher.cpp index 94679d5d1..33b3c060b 100644 --- a/src/video_core/cdma_pusher.cpp +++ b/src/video_core/cdma_pusher.cpp | |||
| @@ -18,10 +18,10 @@ | |||
| 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 19 | // | 19 | // |
| 20 | 20 | ||
| 21 | #include <bit> | ||
| 21 | #include "command_classes/host1x.h" | 22 | #include "command_classes/host1x.h" |
| 22 | #include "command_classes/nvdec.h" | 23 | #include "command_classes/nvdec.h" |
| 23 | #include "command_classes/vic.h" | 24 | #include "command_classes/vic.h" |
| 24 | #include "common/bit_util.h" | ||
| 25 | #include "video_core/cdma_pusher.h" | 25 | #include "video_core/cdma_pusher.h" |
| 26 | #include "video_core/command_classes/nvdec_common.h" | 26 | #include "video_core/command_classes/nvdec_common.h" |
| 27 | #include "video_core/engines/maxwell_3d.h" | 27 | #include "video_core/engines/maxwell_3d.h" |
| @@ -56,7 +56,7 @@ void CDmaPusher::Step() { | |||
| 56 | 56 | ||
| 57 | for (const u32 value : values) { | 57 | for (const u32 value : values) { |
| 58 | if (mask != 0) { | 58 | if (mask != 0) { |
| 59 | const u32 lbs = Common::CountTrailingZeroes32(mask); | 59 | const auto lbs = static_cast<u32>(std::countr_zero(mask)); |
| 60 | mask &= ~(1U << lbs); | 60 | mask &= ~(1U << lbs); |
| 61 | ExecuteCommand(static_cast<u32>(offset + lbs), value); | 61 | ExecuteCommand(static_cast<u32>(offset + lbs), value); |
| 62 | continue; | 62 | continue; |
diff --git a/src/video_core/cdma_pusher.h b/src/video_core/cdma_pusher.h index 8ca70b6dd..e5f212c1a 100644 --- a/src/video_core/cdma_pusher.h +++ b/src/video_core/cdma_pusher.h | |||
| @@ -126,7 +126,7 @@ private: | |||
| 126 | 126 | ||
| 127 | s32 count{}; | 127 | s32 count{}; |
| 128 | s32 offset{}; | 128 | s32 offset{}; |
| 129 | s32 mask{}; | 129 | u32 mask{}; |
| 130 | bool incrementing{}; | 130 | bool incrementing{}; |
| 131 | 131 | ||
| 132 | // Queue of command lists to be processed | 132 | // Queue of command lists to be processed |
diff --git a/src/video_core/command_classes/codecs/h264.cpp b/src/video_core/command_classes/codecs/h264.cpp index 65bbeac78..fea6aed98 100644 --- a/src/video_core/command_classes/codecs/h264.cpp +++ b/src/video_core/command_classes/codecs/h264.cpp | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | // | 19 | // |
| 20 | 20 | ||
| 21 | #include <array> | 21 | #include <array> |
| 22 | #include "common/bit_util.h" | 22 | #include <bit> |
| 23 | #include "video_core/command_classes/codecs/h264.h" | 23 | #include "video_core/command_classes/codecs/h264.h" |
| 24 | #include "video_core/gpu.h" | 24 | #include "video_core/gpu.h" |
| 25 | #include "video_core/memory_manager.h" | 25 | #include "video_core/memory_manager.h" |
| @@ -266,7 +266,7 @@ void H264BitWriter::WriteExpGolombCodedInt(s32 value) { | |||
| 266 | } | 266 | } |
| 267 | 267 | ||
| 268 | void H264BitWriter::WriteExpGolombCodedUInt(u32 value) { | 268 | void H264BitWriter::WriteExpGolombCodedUInt(u32 value) { |
| 269 | const s32 size = 32 - Common::CountLeadingZeroes32(static_cast<s32>(value + 1)); | 269 | const s32 size = 32 - std::countl_zero(value + 1); |
| 270 | WriteBits(1, size); | 270 | WriteBits(1, size); |
| 271 | 271 | ||
| 272 | value -= (1U << (size - 1)) - 1; | 272 | value -= (1U << (size - 1)) - 1; |
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index d7437e185..61796e33a 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | #include "video_core/renderer_vulkan/renderer_vulkan.h" | 23 | #include "video_core/renderer_vulkan/renderer_vulkan.h" |
| 24 | #include "video_core/renderer_vulkan/vk_blit_screen.h" | 24 | #include "video_core/renderer_vulkan/vk_blit_screen.h" |
| 25 | #include "video_core/renderer_vulkan/vk_master_semaphore.h" | 25 | #include "video_core/renderer_vulkan/vk_master_semaphore.h" |
| 26 | #include "video_core/renderer_vulkan/vk_memory_manager.h" | ||
| 27 | #include "video_core/renderer_vulkan/vk_rasterizer.h" | 26 | #include "video_core/renderer_vulkan/vk_rasterizer.h" |
| 28 | #include "video_core/renderer_vulkan/vk_scheduler.h" | 27 | #include "video_core/renderer_vulkan/vk_scheduler.h" |
| 29 | #include "video_core/renderer_vulkan/vk_state_tracker.h" | 28 | #include "video_core/renderer_vulkan/vk_state_tracker.h" |
| @@ -32,6 +31,7 @@ | |||
| 32 | #include "video_core/vulkan_common/vulkan_device.h" | 31 | #include "video_core/vulkan_common/vulkan_device.h" |
| 33 | #include "video_core/vulkan_common/vulkan_instance.h" | 32 | #include "video_core/vulkan_common/vulkan_instance.h" |
| 34 | #include "video_core/vulkan_common/vulkan_library.h" | 33 | #include "video_core/vulkan_common/vulkan_library.h" |
| 34 | #include "video_core/vulkan_common/vulkan_memory_allocator.h" | ||
| 35 | #include "video_core/vulkan_common/vulkan_surface.h" | 35 | #include "video_core/vulkan_common/vulkan_surface.h" |
| 36 | #include "video_core/vulkan_common/vulkan_wrapper.h" | 36 | #include "video_core/vulkan_common/vulkan_wrapper.h" |
| 37 | 37 | ||
| @@ -137,7 +137,7 @@ bool RendererVulkan::Init() try { | |||
| 137 | InitializeDevice(); | 137 | InitializeDevice(); |
| 138 | Report(); | 138 | Report(); |
| 139 | 139 | ||
| 140 | memory_manager = std::make_unique<VKMemoryManager>(*device); | 140 | memory_allocator = std::make_unique<MemoryAllocator>(*device); |
| 141 | 141 | ||
| 142 | state_tracker = std::make_unique<StateTracker>(gpu); | 142 | state_tracker = std::make_unique<StateTracker>(gpu); |
| 143 | 143 | ||
| @@ -149,11 +149,11 @@ bool RendererVulkan::Init() try { | |||
| 149 | 149 | ||
| 150 | rasterizer = std::make_unique<RasterizerVulkan>(render_window, gpu, gpu.MemoryManager(), | 150 | rasterizer = std::make_unique<RasterizerVulkan>(render_window, gpu, gpu.MemoryManager(), |
| 151 | cpu_memory, screen_info, *device, | 151 | cpu_memory, screen_info, *device, |
| 152 | *memory_manager, *state_tracker, *scheduler); | 152 | *memory_allocator, *state_tracker, *scheduler); |
| 153 | 153 | ||
| 154 | blit_screen = | 154 | blit_screen = |
| 155 | std::make_unique<VKBlitScreen>(cpu_memory, render_window, *rasterizer, *device, | 155 | std::make_unique<VKBlitScreen>(cpu_memory, render_window, *rasterizer, *device, |
| 156 | *memory_manager, *swapchain, *scheduler, screen_info); | 156 | *memory_allocator, *swapchain, *scheduler, screen_info); |
| 157 | return true; | 157 | return true; |
| 158 | 158 | ||
| 159 | } catch (const vk::Exception& exception) { | 159 | } catch (const vk::Exception& exception) { |
| @@ -172,7 +172,7 @@ void RendererVulkan::ShutDown() { | |||
| 172 | blit_screen.reset(); | 172 | blit_screen.reset(); |
| 173 | scheduler.reset(); | 173 | scheduler.reset(); |
| 174 | swapchain.reset(); | 174 | swapchain.reset(); |
| 175 | memory_manager.reset(); | 175 | memory_allocator.reset(); |
| 176 | device.reset(); | 176 | device.reset(); |
| 177 | } | 177 | } |
| 178 | 178 | ||
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h index 5575ffc54..daf55b9b4 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.h +++ b/src/video_core/renderer_vulkan/renderer_vulkan.h | |||
| @@ -29,8 +29,8 @@ namespace Vulkan { | |||
| 29 | 29 | ||
| 30 | class Device; | 30 | class Device; |
| 31 | class StateTracker; | 31 | class StateTracker; |
| 32 | class MemoryAllocator; | ||
| 32 | class VKBlitScreen; | 33 | class VKBlitScreen; |
| 33 | class VKMemoryManager; | ||
| 34 | class VKSwapchain; | 34 | class VKSwapchain; |
| 35 | class VKScheduler; | 35 | class VKScheduler; |
| 36 | 36 | ||
| @@ -75,7 +75,7 @@ private: | |||
| 75 | 75 | ||
| 76 | vk::DebugUtilsMessenger debug_callback; | 76 | vk::DebugUtilsMessenger debug_callback; |
| 77 | std::unique_ptr<Device> device; | 77 | std::unique_ptr<Device> device; |
| 78 | std::unique_ptr<VKMemoryManager> memory_manager; | 78 | std::unique_ptr<MemoryAllocator> memory_allocator; |
| 79 | std::unique_ptr<StateTracker> state_tracker; | 79 | std::unique_ptr<StateTracker> state_tracker; |
| 80 | std::unique_ptr<VKScheduler> scheduler; | 80 | std::unique_ptr<VKScheduler> scheduler; |
| 81 | std::unique_ptr<VKSwapchain> swapchain; | 81 | std::unique_ptr<VKSwapchain> swapchain; |
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp index 5e184eb42..3e3b895e0 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp +++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp | |||
| @@ -22,13 +22,13 @@ | |||
| 22 | #include "video_core/renderer_vulkan/renderer_vulkan.h" | 22 | #include "video_core/renderer_vulkan/renderer_vulkan.h" |
| 23 | #include "video_core/renderer_vulkan/vk_blit_screen.h" | 23 | #include "video_core/renderer_vulkan/vk_blit_screen.h" |
| 24 | #include "video_core/renderer_vulkan/vk_master_semaphore.h" | 24 | #include "video_core/renderer_vulkan/vk_master_semaphore.h" |
| 25 | #include "video_core/renderer_vulkan/vk_memory_manager.h" | ||
| 26 | #include "video_core/renderer_vulkan/vk_scheduler.h" | 25 | #include "video_core/renderer_vulkan/vk_scheduler.h" |
| 27 | #include "video_core/renderer_vulkan/vk_shader_util.h" | 26 | #include "video_core/renderer_vulkan/vk_shader_util.h" |
| 28 | #include "video_core/renderer_vulkan/vk_swapchain.h" | 27 | #include "video_core/renderer_vulkan/vk_swapchain.h" |
| 29 | #include "video_core/surface.h" | 28 | #include "video_core/surface.h" |
| 30 | #include "video_core/textures/decoders.h" | 29 | #include "video_core/textures/decoders.h" |
| 31 | #include "video_core/vulkan_common/vulkan_device.h" | 30 | #include "video_core/vulkan_common/vulkan_device.h" |
| 31 | #include "video_core/vulkan_common/vulkan_memory_allocator.h" | ||
| 32 | #include "video_core/vulkan_common/vulkan_wrapper.h" | 32 | #include "video_core/vulkan_common/vulkan_wrapper.h" |
| 33 | 33 | ||
| 34 | namespace Vulkan { | 34 | namespace Vulkan { |
| @@ -115,10 +115,10 @@ struct VKBlitScreen::BufferData { | |||
| 115 | VKBlitScreen::VKBlitScreen(Core::Memory::Memory& cpu_memory_, | 115 | VKBlitScreen::VKBlitScreen(Core::Memory::Memory& cpu_memory_, |
| 116 | Core::Frontend::EmuWindow& render_window_, | 116 | Core::Frontend::EmuWindow& render_window_, |
| 117 | VideoCore::RasterizerInterface& rasterizer_, const Device& device_, | 117 | VideoCore::RasterizerInterface& rasterizer_, const Device& device_, |
| 118 | VKMemoryManager& memory_manager_, VKSwapchain& swapchain_, | 118 | MemoryAllocator& memory_allocator_, VKSwapchain& swapchain_, |
| 119 | VKScheduler& scheduler_, const VKScreenInfo& screen_info_) | 119 | VKScheduler& scheduler_, const VKScreenInfo& screen_info_) |
| 120 | : cpu_memory{cpu_memory_}, render_window{render_window_}, rasterizer{rasterizer_}, | 120 | : cpu_memory{cpu_memory_}, render_window{render_window_}, rasterizer{rasterizer_}, |
| 121 | device{device_}, memory_manager{memory_manager_}, swapchain{swapchain_}, | 121 | device{device_}, memory_allocator{memory_allocator_}, swapchain{swapchain_}, |
| 122 | scheduler{scheduler_}, image_count{swapchain.GetImageCount()}, screen_info{screen_info_} { | 122 | scheduler{scheduler_}, image_count{swapchain.GetImageCount()}, screen_info{screen_info_} { |
| 123 | resource_ticks.resize(image_count); | 123 | resource_ticks.resize(image_count); |
| 124 | 124 | ||
| @@ -150,8 +150,8 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool | |||
| 150 | SetUniformData(data, framebuffer); | 150 | SetUniformData(data, framebuffer); |
| 151 | SetVertexData(data, framebuffer); | 151 | SetVertexData(data, framebuffer); |
| 152 | 152 | ||
| 153 | auto map = buffer_commit->Map(); | 153 | const std::span<u8> map = buffer_commit.Map(); |
| 154 | std::memcpy(map.Address(), &data, sizeof(data)); | 154 | std::memcpy(map.data(), &data, sizeof(data)); |
| 155 | 155 | ||
| 156 | if (!use_accelerated) { | 156 | if (!use_accelerated) { |
| 157 | const u64 image_offset = GetRawImageOffset(framebuffer, image_index); | 157 | const u64 image_offset = GetRawImageOffset(framebuffer, image_index); |
| @@ -165,8 +165,8 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool | |||
| 165 | constexpr u32 block_height_log2 = 4; | 165 | constexpr u32 block_height_log2 = 4; |
| 166 | const u32 bytes_per_pixel = GetBytesPerPixel(framebuffer); | 166 | const u32 bytes_per_pixel = GetBytesPerPixel(framebuffer); |
| 167 | Tegra::Texture::UnswizzleTexture( | 167 | Tegra::Texture::UnswizzleTexture( |
| 168 | std::span(map.Address() + image_offset, size_bytes), std::span(host_ptr, size_bytes), | 168 | map.subspan(image_offset, size_bytes), std::span(host_ptr, size_bytes), bytes_per_pixel, |
| 169 | bytes_per_pixel, framebuffer.width, framebuffer.height, 1, block_height_log2, 0); | 169 | framebuffer.width, framebuffer.height, 1, block_height_log2, 0); |
| 170 | 170 | ||
| 171 | const VkBufferImageCopy copy{ | 171 | const VkBufferImageCopy copy{ |
| 172 | .bufferOffset = image_offset, | 172 | .bufferOffset = image_offset, |
| @@ -224,8 +224,6 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool | |||
| 224 | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier); | 224 | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier); |
| 225 | }); | 225 | }); |
| 226 | } | 226 | } |
| 227 | map.Release(); | ||
| 228 | |||
| 229 | scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index], | 227 | scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index], |
| 230 | descriptor_set = descriptor_sets[image_index], buffer = *buffer, | 228 | descriptor_set = descriptor_sets[image_index], buffer = *buffer, |
| 231 | size = swapchain.GetSize(), pipeline = *pipeline, | 229 | size = swapchain.GetSize(), pipeline = *pipeline, |
| @@ -642,7 +640,7 @@ void VKBlitScreen::ReleaseRawImages() { | |||
| 642 | raw_images.clear(); | 640 | raw_images.clear(); |
| 643 | raw_buffer_commits.clear(); | 641 | raw_buffer_commits.clear(); |
| 644 | buffer.reset(); | 642 | buffer.reset(); |
| 645 | buffer_commit.reset(); | 643 | buffer_commit = MemoryCommit{}; |
| 646 | } | 644 | } |
| 647 | 645 | ||
| 648 | void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) { | 646 | void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) { |
| @@ -659,7 +657,7 @@ void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuff | |||
| 659 | }; | 657 | }; |
| 660 | 658 | ||
| 661 | buffer = device.GetLogical().CreateBuffer(ci); | 659 | buffer = device.GetLogical().CreateBuffer(ci); |
| 662 | buffer_commit = memory_manager.Commit(buffer, true); | 660 | buffer_commit = memory_allocator.Commit(buffer, MemoryUsage::Upload); |
| 663 | } | 661 | } |
| 664 | 662 | ||
| 665 | void VKBlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer) { | 663 | void VKBlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer) { |
| @@ -690,7 +688,7 @@ void VKBlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer) | |||
| 690 | .pQueueFamilyIndices = nullptr, | 688 | .pQueueFamilyIndices = nullptr, |
| 691 | .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, | 689 | .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, |
| 692 | }); | 690 | }); |
| 693 | raw_buffer_commits[i] = memory_manager.Commit(raw_images[i], false); | 691 | raw_buffer_commits[i] = memory_allocator.Commit(raw_images[i], MemoryUsage::DeviceLocal); |
| 694 | raw_image_views[i] = device.GetLogical().CreateImageView(VkImageViewCreateInfo{ | 692 | raw_image_views[i] = device.GetLogical().CreateImageView(VkImageViewCreateInfo{ |
| 695 | .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, | 693 | .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, |
| 696 | .pNext = nullptr, | 694 | .pNext = nullptr, |
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h index 69ed61770..b52576957 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.h +++ b/src/video_core/renderer_vulkan/vk_blit_screen.h | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | #include <memory> | 7 | #include <memory> |
| 8 | 8 | ||
| 9 | #include "video_core/renderer_vulkan/vk_memory_manager.h" | 9 | #include "video_core/vulkan_common/vulkan_memory_allocator.h" |
| 10 | #include "video_core/vulkan_common/vulkan_wrapper.h" | 10 | #include "video_core/vulkan_common/vulkan_wrapper.h" |
| 11 | 11 | ||
| 12 | namespace Core { | 12 | namespace Core { |
| @@ -43,7 +43,7 @@ public: | |||
| 43 | explicit VKBlitScreen(Core::Memory::Memory& cpu_memory, | 43 | explicit VKBlitScreen(Core::Memory::Memory& cpu_memory, |
| 44 | Core::Frontend::EmuWindow& render_window, | 44 | Core::Frontend::EmuWindow& render_window, |
| 45 | VideoCore::RasterizerInterface& rasterizer, const Device& device, | 45 | VideoCore::RasterizerInterface& rasterizer, const Device& device, |
| 46 | VKMemoryManager& memory_manager, VKSwapchain& swapchain, | 46 | MemoryAllocator& memory_allocator, VKSwapchain& swapchain, |
| 47 | VKScheduler& scheduler, const VKScreenInfo& screen_info); | 47 | VKScheduler& scheduler, const VKScreenInfo& screen_info); |
| 48 | ~VKBlitScreen(); | 48 | ~VKBlitScreen(); |
| 49 | 49 | ||
| @@ -86,7 +86,7 @@ private: | |||
| 86 | Core::Frontend::EmuWindow& render_window; | 86 | Core::Frontend::EmuWindow& render_window; |
| 87 | VideoCore::RasterizerInterface& rasterizer; | 87 | VideoCore::RasterizerInterface& rasterizer; |
| 88 | const Device& device; | 88 | const Device& device; |
| 89 | VKMemoryManager& memory_manager; | 89 | MemoryAllocator& memory_allocator; |
| 90 | VKSwapchain& swapchain; | 90 | VKSwapchain& swapchain; |
| 91 | VKScheduler& scheduler; | 91 | VKScheduler& scheduler; |
| 92 | const std::size_t image_count; | 92 | const std::size_t image_count; |
| @@ -104,14 +104,14 @@ private: | |||
| 104 | vk::Sampler sampler; | 104 | vk::Sampler sampler; |
| 105 | 105 | ||
| 106 | vk::Buffer buffer; | 106 | vk::Buffer buffer; |
| 107 | VKMemoryCommit buffer_commit; | 107 | MemoryCommit buffer_commit; |
| 108 | 108 | ||
| 109 | std::vector<u64> resource_ticks; | 109 | std::vector<u64> resource_ticks; |
| 110 | 110 | ||
| 111 | std::vector<vk::Semaphore> semaphores; | 111 | std::vector<vk::Semaphore> semaphores; |
| 112 | std::vector<vk::Image> raw_images; | 112 | std::vector<vk::Image> raw_images; |
| 113 | std::vector<vk::ImageView> raw_image_views; | 113 | std::vector<vk::ImageView> raw_image_views; |
| 114 | std::vector<VKMemoryCommit> raw_buffer_commits; | 114 | std::vector<MemoryCommit> raw_buffer_commits; |
| 115 | u32 raw_width = 0; | 115 | u32 raw_width = 0; |
| 116 | u32 raw_height = 0; | 116 | u32 raw_height = 0; |
| 117 | }; | 117 | }; |
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp index 58c710344..d8ad40a0f 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp | |||
| @@ -36,11 +36,11 @@ constexpr VkAccessFlags TRANSFORM_FEEDBACK_WRITE_ACCESS = | |||
| 36 | 36 | ||
| 37 | } // Anonymous namespace | 37 | } // Anonymous namespace |
| 38 | 38 | ||
| 39 | Buffer::Buffer(const Device& device_, VKMemoryManager& memory_manager, VKScheduler& scheduler_, | 39 | Buffer::Buffer(const Device& device_, MemoryAllocator& memory_allocator, VKScheduler& scheduler_, |
| 40 | VKStagingBufferPool& staging_pool_, VAddr cpu_addr_, std::size_t size_) | 40 | StagingBufferPool& staging_pool_, VAddr cpu_addr_, std::size_t size_) |
| 41 | : BufferBlock{cpu_addr_, size_}, device{device_}, scheduler{scheduler_}, staging_pool{ | 41 | : BufferBlock{cpu_addr_, size_}, device{device_}, scheduler{scheduler_}, staging_pool{ |
| 42 | staging_pool_} { | 42 | staging_pool_} { |
| 43 | const VkBufferCreateInfo ci{ | 43 | buffer = device.GetLogical().CreateBuffer(VkBufferCreateInfo{ |
| 44 | .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, | 44 | .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, |
| 45 | .pNext = nullptr, | 45 | .pNext = nullptr, |
| 46 | .flags = 0, | 46 | .flags = 0, |
| @@ -49,22 +49,20 @@ Buffer::Buffer(const Device& device_, VKMemoryManager& memory_manager, VKSchedul | |||
| 49 | .sharingMode = VK_SHARING_MODE_EXCLUSIVE, | 49 | .sharingMode = VK_SHARING_MODE_EXCLUSIVE, |
| 50 | .queueFamilyIndexCount = 0, | 50 | .queueFamilyIndexCount = 0, |
| 51 | .pQueueFamilyIndices = nullptr, | 51 | .pQueueFamilyIndices = nullptr, |
| 52 | }; | 52 | }); |
| 53 | 53 | commit = memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal); | |
| 54 | buffer.handle = device.GetLogical().CreateBuffer(ci); | ||
| 55 | buffer.commit = memory_manager.Commit(buffer.handle, false); | ||
| 56 | } | 54 | } |
| 57 | 55 | ||
| 58 | Buffer::~Buffer() = default; | 56 | Buffer::~Buffer() = default; |
| 59 | 57 | ||
| 60 | void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) { | 58 | void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) { |
| 61 | const auto& staging = staging_pool.GetUnusedBuffer(data_size, true); | 59 | const auto& staging = staging_pool.Request(data_size, MemoryUsage::Upload); |
| 62 | std::memcpy(staging.commit->Map(data_size), data, data_size); | 60 | std::memcpy(staging.mapped_span.data(), data, data_size); |
| 63 | 61 | ||
| 64 | scheduler.RequestOutsideRenderPassOperationContext(); | 62 | scheduler.RequestOutsideRenderPassOperationContext(); |
| 65 | 63 | ||
| 66 | const VkBuffer handle = Handle(); | 64 | const VkBuffer handle = Handle(); |
| 67 | scheduler.Record([staging = *staging.handle, handle, offset, data_size, | 65 | scheduler.Record([staging = staging.buffer, handle, offset, data_size, |
| 68 | &device = device](vk::CommandBuffer cmdbuf) { | 66 | &device = device](vk::CommandBuffer cmdbuf) { |
| 69 | const VkBufferMemoryBarrier read_barrier{ | 67 | const VkBufferMemoryBarrier read_barrier{ |
| 70 | .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, | 68 | .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, |
| @@ -100,12 +98,12 @@ void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) { | |||
| 100 | } | 98 | } |
| 101 | 99 | ||
| 102 | void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) { | 100 | void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) { |
| 103 | const auto& staging = staging_pool.GetUnusedBuffer(data_size, true); | 101 | auto staging = staging_pool.Request(data_size, MemoryUsage::Download); |
| 104 | scheduler.RequestOutsideRenderPassOperationContext(); | 102 | scheduler.RequestOutsideRenderPassOperationContext(); |
| 105 | 103 | ||
| 106 | const VkBuffer handle = Handle(); | 104 | const VkBuffer handle = Handle(); |
| 107 | scheduler.Record( | 105 | scheduler.Record( |
| 108 | [staging = *staging.handle, handle, offset, data_size](vk::CommandBuffer cmdbuf) { | 106 | [staging = staging.buffer, handle, offset, data_size](vk::CommandBuffer cmdbuf) { |
| 109 | const VkBufferMemoryBarrier barrier{ | 107 | const VkBufferMemoryBarrier barrier{ |
| 110 | .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, | 108 | .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, |
| 111 | .pNext = nullptr, | 109 | .pNext = nullptr, |
| @@ -126,7 +124,7 @@ void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) { | |||
| 126 | }); | 124 | }); |
| 127 | scheduler.Finish(); | 125 | scheduler.Finish(); |
| 128 | 126 | ||
| 129 | std::memcpy(data, staging.commit->Map(data_size), data_size); | 127 | std::memcpy(data, staging.mapped_span.data(), data_size); |
| 130 | } | 128 | } |
| 131 | 129 | ||
| 132 | void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst_offset, | 130 | void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst_offset, |
| @@ -164,29 +162,29 @@ void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst | |||
| 164 | 162 | ||
| 165 | VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer_, | 163 | VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer_, |
| 166 | Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, | 164 | Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, |
| 167 | const Device& device_, VKMemoryManager& memory_manager_, | 165 | const Device& device_, MemoryAllocator& memory_allocator_, |
| 168 | VKScheduler& scheduler_, VKStreamBuffer& stream_buffer_, | 166 | VKScheduler& scheduler_, VKStreamBuffer& stream_buffer_, |
| 169 | VKStagingBufferPool& staging_pool_) | 167 | StagingBufferPool& staging_pool_) |
| 170 | : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer_, gpu_memory_, | 168 | : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer_, gpu_memory_, |
| 171 | cpu_memory_, stream_buffer_}, | 169 | cpu_memory_, stream_buffer_}, |
| 172 | device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{ | 170 | device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_}, |
| 173 | staging_pool_} {} | 171 | staging_pool{staging_pool_} {} |
| 174 | 172 | ||
| 175 | VKBufferCache::~VKBufferCache() = default; | 173 | VKBufferCache::~VKBufferCache() = default; |
| 176 | 174 | ||
| 177 | std::shared_ptr<Buffer> VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) { | 175 | std::shared_ptr<Buffer> VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) { |
| 178 | return std::make_shared<Buffer>(device, memory_manager, scheduler, staging_pool, cpu_addr, | 176 | return std::make_shared<Buffer>(device, memory_allocator, scheduler, staging_pool, cpu_addr, |
| 179 | size); | 177 | size); |
| 180 | } | 178 | } |
| 181 | 179 | ||
| 182 | VKBufferCache::BufferInfo VKBufferCache::GetEmptyBuffer(std::size_t size) { | 180 | VKBufferCache::BufferInfo VKBufferCache::GetEmptyBuffer(std::size_t size) { |
| 183 | size = std::max(size, std::size_t(4)); | 181 | size = std::max(size, std::size_t(4)); |
| 184 | const auto& empty = staging_pool.GetUnusedBuffer(size, false); | 182 | const auto& empty = staging_pool.Request(size, MemoryUsage::DeviceLocal); |
| 185 | scheduler.RequestOutsideRenderPassOperationContext(); | 183 | scheduler.RequestOutsideRenderPassOperationContext(); |
| 186 | scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) { | 184 | scheduler.Record([size, buffer = empty.buffer](vk::CommandBuffer cmdbuf) { |
| 187 | cmdbuf.FillBuffer(buffer, 0, size, 0); | 185 | cmdbuf.FillBuffer(buffer, 0, size, 0); |
| 188 | }); | 186 | }); |
| 189 | return {*empty.handle, 0, 0}; | 187 | return {empty.buffer, 0, 0}; |
| 190 | } | 188 | } |
| 191 | 189 | ||
| 192 | } // namespace Vulkan | 190 | } // namespace Vulkan |
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h index 1c39aed34..41d577510 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.h +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h | |||
| @@ -8,21 +8,20 @@ | |||
| 8 | 8 | ||
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "video_core/buffer_cache/buffer_cache.h" | 10 | #include "video_core/buffer_cache/buffer_cache.h" |
| 11 | #include "video_core/renderer_vulkan/vk_memory_manager.h" | ||
| 12 | #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" | 11 | #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" |
| 13 | #include "video_core/renderer_vulkan/vk_stream_buffer.h" | 12 | #include "video_core/renderer_vulkan/vk_stream_buffer.h" |
| 13 | #include "video_core/vulkan_common/vulkan_memory_allocator.h" | ||
| 14 | #include "video_core/vulkan_common/vulkan_wrapper.h" | 14 | #include "video_core/vulkan_common/vulkan_wrapper.h" |
| 15 | 15 | ||
| 16 | namespace Vulkan { | 16 | namespace Vulkan { |
| 17 | 17 | ||
| 18 | class Device; | 18 | class Device; |
| 19 | class VKMemoryManager; | ||
| 20 | class VKScheduler; | 19 | class VKScheduler; |
| 21 | 20 | ||
| 22 | class Buffer final : public VideoCommon::BufferBlock { | 21 | class Buffer final : public VideoCommon::BufferBlock { |
| 23 | public: | 22 | public: |
| 24 | explicit Buffer(const Device& device, VKMemoryManager& memory_manager, VKScheduler& scheduler, | 23 | explicit Buffer(const Device& device, MemoryAllocator& memory_allocator, VKScheduler& scheduler, |
| 25 | VKStagingBufferPool& staging_pool, VAddr cpu_addr_, std::size_t size_); | 24 | StagingBufferPool& staging_pool, VAddr cpu_addr_, std::size_t size_); |
| 26 | ~Buffer(); | 25 | ~Buffer(); |
| 27 | 26 | ||
| 28 | void Upload(std::size_t offset, std::size_t data_size, const u8* data); | 27 | void Upload(std::size_t offset, std::size_t data_size, const u8* data); |
| @@ -33,7 +32,7 @@ public: | |||
| 33 | std::size_t copy_size); | 32 | std::size_t copy_size); |
| 34 | 33 | ||
| 35 | VkBuffer Handle() const { | 34 | VkBuffer Handle() const { |
| 36 | return *buffer.handle; | 35 | return *buffer; |
| 37 | } | 36 | } |
| 38 | 37 | ||
| 39 | u64 Address() const { | 38 | u64 Address() const { |
| @@ -43,18 +42,19 @@ public: | |||
| 43 | private: | 42 | private: |
| 44 | const Device& device; | 43 | const Device& device; |
| 45 | VKScheduler& scheduler; | 44 | VKScheduler& scheduler; |
| 46 | VKStagingBufferPool& staging_pool; | 45 | StagingBufferPool& staging_pool; |
| 47 | 46 | ||
| 48 | VKBuffer buffer; | 47 | vk::Buffer buffer; |
| 48 | MemoryCommit commit; | ||
| 49 | }; | 49 | }; |
| 50 | 50 | ||
| 51 | class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> { | 51 | class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> { |
| 52 | public: | 52 | public: |
| 53 | explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, | 53 | explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, |
| 54 | Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, | 54 | Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, |
| 55 | const Device& device, VKMemoryManager& memory_manager, | 55 | const Device& device, MemoryAllocator& memory_allocator, |
| 56 | VKScheduler& scheduler, VKStreamBuffer& stream_buffer, | 56 | VKScheduler& scheduler, VKStreamBuffer& stream_buffer, |
| 57 | VKStagingBufferPool& staging_pool); | 57 | StagingBufferPool& staging_pool); |
| 58 | ~VKBufferCache(); | 58 | ~VKBufferCache(); |
| 59 | 59 | ||
| 60 | BufferInfo GetEmptyBuffer(std::size_t size) override; | 60 | BufferInfo GetEmptyBuffer(std::size_t size) override; |
| @@ -64,9 +64,9 @@ protected: | |||
| 64 | 64 | ||
| 65 | private: | 65 | private: |
| 66 | const Device& device; | 66 | const Device& device; |
| 67 | VKMemoryManager& memory_manager; | 67 | MemoryAllocator& memory_allocator; |
| 68 | VKScheduler& scheduler; | 68 | VKScheduler& scheduler; |
| 69 | VKStagingBufferPool& staging_pool; | 69 | StagingBufferPool& staging_pool; |
| 70 | }; | 70 | }; |
| 71 | 71 | ||
| 72 | } // namespace Vulkan | 72 | } // namespace Vulkan |
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp index 02a6d54b7..5eb6a54be 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp | |||
| @@ -164,7 +164,7 @@ VkDescriptorSet VKComputePass::CommitDescriptorSet( | |||
| 164 | 164 | ||
| 165 | QuadArrayPass::QuadArrayPass(const Device& device_, VKScheduler& scheduler_, | 165 | QuadArrayPass::QuadArrayPass(const Device& device_, VKScheduler& scheduler_, |
| 166 | VKDescriptorPool& descriptor_pool_, | 166 | VKDescriptorPool& descriptor_pool_, |
| 167 | VKStagingBufferPool& staging_buffer_pool_, | 167 | StagingBufferPool& staging_buffer_pool_, |
| 168 | VKUpdateDescriptorQueue& update_descriptor_queue_) | 168 | VKUpdateDescriptorQueue& update_descriptor_queue_) |
| 169 | : VKComputePass(device_, descriptor_pool_, BuildQuadArrayPassDescriptorSetLayoutBinding(), | 169 | : VKComputePass(device_, descriptor_pool_, BuildQuadArrayPassDescriptorSetLayoutBinding(), |
| 170 | BuildQuadArrayPassDescriptorUpdateTemplateEntry(), | 170 | BuildQuadArrayPassDescriptorUpdateTemplateEntry(), |
| @@ -177,18 +177,18 @@ QuadArrayPass::~QuadArrayPass() = default; | |||
| 177 | std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) { | 177 | std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) { |
| 178 | const u32 num_triangle_vertices = (num_vertices / 4) * 6; | 178 | const u32 num_triangle_vertices = (num_vertices / 4) * 6; |
| 179 | const std::size_t staging_size = num_triangle_vertices * sizeof(u32); | 179 | const std::size_t staging_size = num_triangle_vertices * sizeof(u32); |
| 180 | auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); | 180 | const auto staging_ref = staging_buffer_pool.Request(staging_size, MemoryUsage::DeviceLocal); |
| 181 | 181 | ||
| 182 | update_descriptor_queue.Acquire(); | 182 | update_descriptor_queue.Acquire(); |
| 183 | update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); | 183 | update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size); |
| 184 | const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); | 184 | const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); |
| 185 | 185 | ||
| 186 | scheduler.RequestOutsideRenderPassOperationContext(); | 186 | scheduler.RequestOutsideRenderPassOperationContext(); |
| 187 | 187 | ||
| 188 | ASSERT(num_vertices % 4 == 0); | 188 | ASSERT(num_vertices % 4 == 0); |
| 189 | const u32 num_quads = num_vertices / 4; | 189 | const u32 num_quads = num_vertices / 4; |
| 190 | scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, num_quads, | 190 | scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer, |
| 191 | first, set](vk::CommandBuffer cmdbuf) { | 191 | num_quads, first, set](vk::CommandBuffer cmdbuf) { |
| 192 | constexpr u32 dispatch_size = 1024; | 192 | constexpr u32 dispatch_size = 1024; |
| 193 | cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); | 193 | cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); |
| 194 | cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {}); | 194 | cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {}); |
| @@ -208,11 +208,11 @@ std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 | |||
| 208 | cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, | 208 | cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, |
| 209 | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {}); | 209 | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {}); |
| 210 | }); | 210 | }); |
| 211 | return {*buffer.handle, 0}; | 211 | return {staging_ref.buffer, 0}; |
| 212 | } | 212 | } |
| 213 | 213 | ||
| 214 | Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_, | 214 | Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_, |
| 215 | VKDescriptorPool& descriptor_pool, VKStagingBufferPool& staging_buffer_pool_, | 215 | VKDescriptorPool& descriptor_pool, StagingBufferPool& staging_buffer_pool_, |
| 216 | VKUpdateDescriptorQueue& update_descriptor_queue_) | 216 | VKUpdateDescriptorQueue& update_descriptor_queue_) |
| 217 | : VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(), | 217 | : VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(), |
| 218 | BuildInputOutputDescriptorUpdateTemplate(), {}, VULKAN_UINT8_COMP_SPV), | 218 | BuildInputOutputDescriptorUpdateTemplate(), {}, VULKAN_UINT8_COMP_SPV), |
| @@ -224,15 +224,15 @@ Uint8Pass::~Uint8Pass() = default; | |||
| 224 | std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer, | 224 | std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer, |
| 225 | u64 src_offset) { | 225 | u64 src_offset) { |
| 226 | const u32 staging_size = static_cast<u32>(num_vertices * sizeof(u16)); | 226 | const u32 staging_size = static_cast<u32>(num_vertices * sizeof(u16)); |
| 227 | auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); | 227 | const auto staging_ref = staging_buffer_pool.Request(staging_size, MemoryUsage::DeviceLocal); |
| 228 | 228 | ||
| 229 | update_descriptor_queue.Acquire(); | 229 | update_descriptor_queue.Acquire(); |
| 230 | update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices); | 230 | update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices); |
| 231 | update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); | 231 | update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size); |
| 232 | const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); | 232 | const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); |
| 233 | 233 | ||
| 234 | scheduler.RequestOutsideRenderPassOperationContext(); | 234 | scheduler.RequestOutsideRenderPassOperationContext(); |
| 235 | scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set, | 235 | scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer, set, |
| 236 | num_vertices](vk::CommandBuffer cmdbuf) { | 236 | num_vertices](vk::CommandBuffer cmdbuf) { |
| 237 | constexpr u32 dispatch_size = 1024; | 237 | constexpr u32 dispatch_size = 1024; |
| 238 | cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); | 238 | cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); |
| @@ -252,12 +252,12 @@ std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buff | |||
| 252 | cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, | 252 | cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, |
| 253 | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); | 253 | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); |
| 254 | }); | 254 | }); |
| 255 | return {*buffer.handle, 0}; | 255 | return {staging_ref.buffer, 0}; |
| 256 | } | 256 | } |
| 257 | 257 | ||
| 258 | QuadIndexedPass::QuadIndexedPass(const Device& device_, VKScheduler& scheduler_, | 258 | QuadIndexedPass::QuadIndexedPass(const Device& device_, VKScheduler& scheduler_, |
| 259 | VKDescriptorPool& descriptor_pool_, | 259 | VKDescriptorPool& descriptor_pool_, |
| 260 | VKStagingBufferPool& staging_buffer_pool_, | 260 | StagingBufferPool& staging_buffer_pool_, |
| 261 | VKUpdateDescriptorQueue& update_descriptor_queue_) | 261 | VKUpdateDescriptorQueue& update_descriptor_queue_) |
| 262 | : VKComputePass(device_, descriptor_pool_, BuildInputOutputDescriptorSetBindings(), | 262 | : VKComputePass(device_, descriptor_pool_, BuildInputOutputDescriptorSetBindings(), |
| 263 | BuildInputOutputDescriptorUpdateTemplate(), | 263 | BuildInputOutputDescriptorUpdateTemplate(), |
| @@ -286,15 +286,15 @@ std::pair<VkBuffer, u64> QuadIndexedPass::Assemble( | |||
| 286 | const u32 num_tri_vertices = (num_vertices / 4) * 6; | 286 | const u32 num_tri_vertices = (num_vertices / 4) * 6; |
| 287 | 287 | ||
| 288 | const std::size_t staging_size = num_tri_vertices * sizeof(u32); | 288 | const std::size_t staging_size = num_tri_vertices * sizeof(u32); |
| 289 | auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); | 289 | const auto staging_ref = staging_buffer_pool.Request(staging_size, MemoryUsage::DeviceLocal); |
| 290 | 290 | ||
| 291 | update_descriptor_queue.Acquire(); | 291 | update_descriptor_queue.Acquire(); |
| 292 | update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size); | 292 | update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size); |
| 293 | update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); | 293 | update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size); |
| 294 | const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); | 294 | const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); |
| 295 | 295 | ||
| 296 | scheduler.RequestOutsideRenderPassOperationContext(); | 296 | scheduler.RequestOutsideRenderPassOperationContext(); |
| 297 | scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set, | 297 | scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer, set, |
| 298 | num_tri_vertices, base_vertex, index_shift](vk::CommandBuffer cmdbuf) { | 298 | num_tri_vertices, base_vertex, index_shift](vk::CommandBuffer cmdbuf) { |
| 299 | static constexpr u32 dispatch_size = 1024; | 299 | static constexpr u32 dispatch_size = 1024; |
| 300 | const std::array push_constants = {base_vertex, index_shift}; | 300 | const std::array push_constants = {base_vertex, index_shift}; |
| @@ -317,7 +317,7 @@ std::pair<VkBuffer, u64> QuadIndexedPass::Assemble( | |||
| 317 | cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, | 317 | cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, |
| 318 | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); | 318 | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); |
| 319 | }); | 319 | }); |
| 320 | return {*buffer.handle, 0}; | 320 | return {staging_ref.buffer, 0}; |
| 321 | } | 321 | } |
| 322 | 322 | ||
| 323 | } // namespace Vulkan | 323 | } // namespace Vulkan |
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h index 7ddb09afb..f5c6f5f17 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.h +++ b/src/video_core/renderer_vulkan/vk_compute_pass.h | |||
| @@ -16,8 +16,8 @@ | |||
| 16 | namespace Vulkan { | 16 | namespace Vulkan { |
| 17 | 17 | ||
| 18 | class Device; | 18 | class Device; |
| 19 | class StagingBufferPool; | ||
| 19 | class VKScheduler; | 20 | class VKScheduler; |
| 20 | class VKStagingBufferPool; | ||
| 21 | class VKUpdateDescriptorQueue; | 21 | class VKUpdateDescriptorQueue; |
| 22 | 22 | ||
| 23 | class VKComputePass { | 23 | class VKComputePass { |
| @@ -45,7 +45,7 @@ class QuadArrayPass final : public VKComputePass { | |||
| 45 | public: | 45 | public: |
| 46 | explicit QuadArrayPass(const Device& device_, VKScheduler& scheduler_, | 46 | explicit QuadArrayPass(const Device& device_, VKScheduler& scheduler_, |
| 47 | VKDescriptorPool& descriptor_pool_, | 47 | VKDescriptorPool& descriptor_pool_, |
| 48 | VKStagingBufferPool& staging_buffer_pool_, | 48 | StagingBufferPool& staging_buffer_pool_, |
| 49 | VKUpdateDescriptorQueue& update_descriptor_queue_); | 49 | VKUpdateDescriptorQueue& update_descriptor_queue_); |
| 50 | ~QuadArrayPass(); | 50 | ~QuadArrayPass(); |
| 51 | 51 | ||
| @@ -53,15 +53,14 @@ public: | |||
| 53 | 53 | ||
| 54 | private: | 54 | private: |
| 55 | VKScheduler& scheduler; | 55 | VKScheduler& scheduler; |
| 56 | VKStagingBufferPool& staging_buffer_pool; | 56 | StagingBufferPool& staging_buffer_pool; |
| 57 | VKUpdateDescriptorQueue& update_descriptor_queue; | 57 | VKUpdateDescriptorQueue& update_descriptor_queue; |
| 58 | }; | 58 | }; |
| 59 | 59 | ||
| 60 | class Uint8Pass final : public VKComputePass { | 60 | class Uint8Pass final : public VKComputePass { |
| 61 | public: | 61 | public: |
| 62 | explicit Uint8Pass(const Device& device_, VKScheduler& scheduler_, | 62 | explicit Uint8Pass(const Device& device_, VKScheduler& scheduler_, |
| 63 | VKDescriptorPool& descriptor_pool_, | 63 | VKDescriptorPool& descriptor_pool_, StagingBufferPool& staging_buffer_pool_, |
| 64 | VKStagingBufferPool& staging_buffer_pool_, | ||
| 65 | VKUpdateDescriptorQueue& update_descriptor_queue_); | 64 | VKUpdateDescriptorQueue& update_descriptor_queue_); |
| 66 | ~Uint8Pass(); | 65 | ~Uint8Pass(); |
| 67 | 66 | ||
| @@ -69,7 +68,7 @@ public: | |||
| 69 | 68 | ||
| 70 | private: | 69 | private: |
| 71 | VKScheduler& scheduler; | 70 | VKScheduler& scheduler; |
| 72 | VKStagingBufferPool& staging_buffer_pool; | 71 | StagingBufferPool& staging_buffer_pool; |
| 73 | VKUpdateDescriptorQueue& update_descriptor_queue; | 72 | VKUpdateDescriptorQueue& update_descriptor_queue; |
| 74 | }; | 73 | }; |
| 75 | 74 | ||
| @@ -77,7 +76,7 @@ class QuadIndexedPass final : public VKComputePass { | |||
| 77 | public: | 76 | public: |
| 78 | explicit QuadIndexedPass(const Device& device_, VKScheduler& scheduler_, | 77 | explicit QuadIndexedPass(const Device& device_, VKScheduler& scheduler_, |
| 79 | VKDescriptorPool& descriptor_pool_, | 78 | VKDescriptorPool& descriptor_pool_, |
| 80 | VKStagingBufferPool& staging_buffer_pool_, | 79 | StagingBufferPool& staging_buffer_pool_, |
| 81 | VKUpdateDescriptorQueue& update_descriptor_queue_); | 80 | VKUpdateDescriptorQueue& update_descriptor_queue_); |
| 82 | ~QuadIndexedPass(); | 81 | ~QuadIndexedPass(); |
| 83 | 82 | ||
| @@ -87,7 +86,7 @@ public: | |||
| 87 | 86 | ||
| 88 | private: | 87 | private: |
| 89 | VKScheduler& scheduler; | 88 | VKScheduler& scheduler; |
| 90 | VKStagingBufferPool& staging_buffer_pool; | 89 | StagingBufferPool& staging_buffer_pool; |
| 91 | VKUpdateDescriptorQueue& update_descriptor_queue; | 90 | VKUpdateDescriptorQueue& update_descriptor_queue; |
| 92 | }; | 91 | }; |
| 93 | 92 | ||
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp deleted file mode 100644 index a6abd0eee..000000000 --- a/src/video_core/renderer_vulkan/vk_memory_manager.cpp +++ /dev/null | |||
| @@ -1,230 +0,0 @@ | |||
| 1 | // Copyright 2018 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <algorithm> | ||
| 6 | #include <optional> | ||
| 7 | #include <tuple> | ||
| 8 | #include <vector> | ||
| 9 | |||
| 10 | #include "common/alignment.h" | ||
| 11 | #include "common/assert.h" | ||
| 12 | #include "common/common_types.h" | ||
| 13 | #include "common/logging/log.h" | ||
| 14 | #include "video_core/renderer_vulkan/vk_memory_manager.h" | ||
| 15 | #include "video_core/vulkan_common/vulkan_device.h" | ||
| 16 | #include "video_core/vulkan_common/vulkan_wrapper.h" | ||
| 17 | |||
| 18 | namespace Vulkan { | ||
| 19 | |||
| 20 | namespace { | ||
| 21 | |||
| 22 | u64 GetAllocationChunkSize(u64 required_size) { | ||
| 23 | static constexpr u64 sizes[] = {16ULL << 20, 32ULL << 20, 64ULL << 20, 128ULL << 20}; | ||
| 24 | auto it = std::lower_bound(std::begin(sizes), std::end(sizes), required_size); | ||
| 25 | return it != std::end(sizes) ? *it : Common::AlignUp(required_size, 256ULL << 20); | ||
| 26 | } | ||
| 27 | |||
| 28 | } // Anonymous namespace | ||
| 29 | |||
| 30 | class VKMemoryAllocation final { | ||
| 31 | public: | ||
| 32 | explicit VKMemoryAllocation(const Device& device_, vk::DeviceMemory memory_, | ||
| 33 | VkMemoryPropertyFlags properties_, u64 allocation_size_, u32 type_) | ||
| 34 | : device{device_}, memory{std::move(memory_)}, properties{properties_}, | ||
| 35 | allocation_size{allocation_size_}, shifted_type{ShiftType(type_)} {} | ||
| 36 | |||
| 37 | VKMemoryCommit Commit(VkDeviceSize commit_size, VkDeviceSize alignment) { | ||
| 38 | auto found = TryFindFreeSection(free_iterator, allocation_size, | ||
| 39 | static_cast<u64>(commit_size), static_cast<u64>(alignment)); | ||
| 40 | if (!found) { | ||
| 41 | found = TryFindFreeSection(0, free_iterator, static_cast<u64>(commit_size), | ||
| 42 | static_cast<u64>(alignment)); | ||
| 43 | if (!found) { | ||
| 44 | // Signal out of memory, it'll try to do more allocations. | ||
| 45 | return nullptr; | ||
| 46 | } | ||
| 47 | } | ||
| 48 | auto commit = std::make_unique<VKMemoryCommitImpl>(device, this, memory, *found, | ||
| 49 | *found + commit_size); | ||
| 50 | commits.push_back(commit.get()); | ||
| 51 | |||
| 52 | // Last commit's address is highly probable to be free. | ||
| 53 | free_iterator = *found + commit_size; | ||
| 54 | |||
| 55 | return commit; | ||
| 56 | } | ||
| 57 | |||
| 58 | void Free(const VKMemoryCommitImpl* commit) { | ||
| 59 | ASSERT(commit); | ||
| 60 | |||
| 61 | const auto it = std::find(std::begin(commits), std::end(commits), commit); | ||
| 62 | if (it == commits.end()) { | ||
| 63 | UNREACHABLE_MSG("Freeing unallocated commit!"); | ||
| 64 | return; | ||
| 65 | } | ||
| 66 | commits.erase(it); | ||
| 67 | } | ||
| 68 | |||
| 69 | /// Returns whether this allocation is compatible with the arguments. | ||
| 70 | bool IsCompatible(VkMemoryPropertyFlags wanted_properties, u32 type_mask) const { | ||
| 71 | return (wanted_properties & properties) && (type_mask & shifted_type) != 0; | ||
| 72 | } | ||
| 73 | |||
| 74 | private: | ||
| 75 | static constexpr u32 ShiftType(u32 type) { | ||
| 76 | return 1U << type; | ||
| 77 | } | ||
| 78 | |||
| 79 | /// A memory allocator, it may return a free region between "start" and "end" with the solicited | ||
| 80 | /// requirements. | ||
| 81 | std::optional<u64> TryFindFreeSection(u64 start, u64 end, u64 size, u64 alignment) const { | ||
| 82 | u64 iterator = Common::AlignUp(start, alignment); | ||
| 83 | while (iterator + size <= end) { | ||
| 84 | const u64 try_left = iterator; | ||
| 85 | const u64 try_right = try_left + size; | ||
| 86 | |||
| 87 | bool overlap = false; | ||
| 88 | for (const auto& commit : commits) { | ||
| 89 | const auto [commit_left, commit_right] = commit->interval; | ||
| 90 | if (try_left < commit_right && commit_left < try_right) { | ||
| 91 | // There's an overlap, continue the search where the overlapping commit ends. | ||
| 92 | iterator = Common::AlignUp(commit_right, alignment); | ||
| 93 | overlap = true; | ||
| 94 | break; | ||
| 95 | } | ||
| 96 | } | ||
| 97 | if (!overlap) { | ||
| 98 | // A free address has been found. | ||
| 99 | return try_left; | ||
| 100 | } | ||
| 101 | } | ||
| 102 | |||
| 103 | // No free regions where found, return an empty optional. | ||
| 104 | return std::nullopt; | ||
| 105 | } | ||
| 106 | |||
| 107 | const Device& device; ///< Vulkan device. | ||
| 108 | const vk::DeviceMemory memory; ///< Vulkan memory allocation handler. | ||
| 109 | const VkMemoryPropertyFlags properties; ///< Vulkan properties. | ||
| 110 | const u64 allocation_size; ///< Size of this allocation. | ||
| 111 | const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted. | ||
| 112 | |||
| 113 | /// Hints where the next free region is likely going to be. | ||
| 114 | u64 free_iterator{}; | ||
| 115 | |||
| 116 | /// Stores all commits done from this allocation. | ||
| 117 | std::vector<const VKMemoryCommitImpl*> commits; | ||
| 118 | }; | ||
| 119 | |||
| 120 | VKMemoryManager::VKMemoryManager(const Device& device_) | ||
| 121 | : device{device_}, properties{device_.GetPhysical().GetMemoryProperties()} {} | ||
| 122 | |||
| 123 | VKMemoryManager::~VKMemoryManager() = default; | ||
| 124 | |||
| 125 | VKMemoryCommit VKMemoryManager::Commit(const VkMemoryRequirements& requirements, | ||
| 126 | bool host_visible) { | ||
| 127 | const u64 chunk_size = GetAllocationChunkSize(requirements.size); | ||
| 128 | |||
| 129 | // When a host visible commit is asked, search for host visible and coherent, otherwise search | ||
| 130 | // for a fast device local type. | ||
| 131 | const VkMemoryPropertyFlags wanted_properties = | ||
| 132 | host_visible ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | ||
| 133 | : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; | ||
| 134 | |||
| 135 | if (auto commit = TryAllocCommit(requirements, wanted_properties)) { | ||
| 136 | return commit; | ||
| 137 | } | ||
| 138 | |||
| 139 | // Commit has failed, allocate more memory. | ||
| 140 | if (!AllocMemory(wanted_properties, requirements.memoryTypeBits, chunk_size)) { | ||
| 141 | // TODO(Rodrigo): Handle these situations in some way like flushing to guest memory. | ||
| 142 | // Allocation has failed, panic. | ||
| 143 | UNREACHABLE_MSG("Ran out of VRAM!"); | ||
| 144 | return {}; | ||
| 145 | } | ||
| 146 | |||
| 147 | // Commit again, this time it won't fail since there's a fresh allocation above. If it does, | ||
| 148 | // there's a bug. | ||
| 149 | auto commit = TryAllocCommit(requirements, wanted_properties); | ||
| 150 | ASSERT(commit); | ||
| 151 | return commit; | ||
| 152 | } | ||
| 153 | |||
| 154 | VKMemoryCommit VKMemoryManager::Commit(const vk::Buffer& buffer, bool host_visible) { | ||
| 155 | auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), host_visible); | ||
| 156 | buffer.BindMemory(commit->GetMemory(), commit->GetOffset()); | ||
| 157 | return commit; | ||
| 158 | } | ||
| 159 | |||
| 160 | VKMemoryCommit VKMemoryManager::Commit(const vk::Image& image, bool host_visible) { | ||
| 161 | auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), host_visible); | ||
| 162 | image.BindMemory(commit->GetMemory(), commit->GetOffset()); | ||
| 163 | return commit; | ||
| 164 | } | ||
| 165 | |||
| 166 | bool VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, | ||
| 167 | u64 size) { | ||
| 168 | const u32 type = [&] { | ||
| 169 | for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) { | ||
| 170 | const auto flags = properties.memoryTypes[type_index].propertyFlags; | ||
| 171 | if ((type_mask & (1U << type_index)) && (flags & wanted_properties)) { | ||
| 172 | // The type matches in type and in the wanted properties. | ||
| 173 | return type_index; | ||
| 174 | } | ||
| 175 | } | ||
| 176 | UNREACHABLE_MSG("Couldn't find a compatible memory type!"); | ||
| 177 | return 0U; | ||
| 178 | }(); | ||
| 179 | |||
| 180 | // Try to allocate found type. | ||
| 181 | vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory({ | ||
| 182 | .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, | ||
| 183 | .pNext = nullptr, | ||
| 184 | .allocationSize = size, | ||
| 185 | .memoryTypeIndex = type, | ||
| 186 | }); | ||
| 187 | if (!memory) { | ||
| 188 | LOG_CRITICAL(Render_Vulkan, "Device allocation failed!"); | ||
| 189 | return false; | ||
| 190 | } | ||
| 191 | |||
| 192 | allocations.push_back(std::make_unique<VKMemoryAllocation>(device, std::move(memory), | ||
| 193 | wanted_properties, size, type)); | ||
| 194 | return true; | ||
| 195 | } | ||
| 196 | |||
| 197 | VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requirements, | ||
| 198 | VkMemoryPropertyFlags wanted_properties) { | ||
| 199 | for (auto& allocation : allocations) { | ||
| 200 | if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) { | ||
| 201 | continue; | ||
| 202 | } | ||
| 203 | if (auto commit = allocation->Commit(requirements.size, requirements.alignment)) { | ||
| 204 | return commit; | ||
| 205 | } | ||
| 206 | } | ||
| 207 | return {}; | ||
| 208 | } | ||
| 209 | |||
| 210 | VKMemoryCommitImpl::VKMemoryCommitImpl(const Device& device_, VKMemoryAllocation* allocation_, | ||
| 211 | const vk::DeviceMemory& memory_, u64 begin_, u64 end_) | ||
| 212 | : device{device_}, memory{memory_}, interval{begin_, end_}, allocation{allocation_} {} | ||
| 213 | |||
| 214 | VKMemoryCommitImpl::~VKMemoryCommitImpl() { | ||
| 215 | allocation->Free(this); | ||
| 216 | } | ||
| 217 | |||
| 218 | MemoryMap VKMemoryCommitImpl::Map(u64 size, u64 offset_) const { | ||
| 219 | return MemoryMap(this, std::span<u8>(memory.Map(interval.first + offset_, size), size)); | ||
| 220 | } | ||
| 221 | |||
| 222 | void VKMemoryCommitImpl::Unmap() const { | ||
| 223 | memory.Unmap(); | ||
| 224 | } | ||
| 225 | |||
| 226 | MemoryMap VKMemoryCommitImpl::Map() const { | ||
| 227 | return Map(interval.second - interval.first); | ||
| 228 | } | ||
| 229 | |||
| 230 | } // namespace Vulkan | ||
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h deleted file mode 100644 index 2452bca4e..000000000 --- a/src/video_core/renderer_vulkan/vk_memory_manager.h +++ /dev/null | |||
| @@ -1,132 +0,0 @@ | |||
| 1 | // Copyright 2019 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <memory> | ||
| 8 | #include <span> | ||
| 9 | #include <utility> | ||
| 10 | #include <vector> | ||
| 11 | #include "common/common_types.h" | ||
| 12 | #include "video_core/vulkan_common/vulkan_wrapper.h" | ||
| 13 | |||
| 14 | namespace Vulkan { | ||
| 15 | |||
| 16 | class Device; | ||
| 17 | class MemoryMap; | ||
| 18 | class VKMemoryAllocation; | ||
| 19 | class VKMemoryCommitImpl; | ||
| 20 | |||
| 21 | using VKMemoryCommit = std::unique_ptr<VKMemoryCommitImpl>; | ||
| 22 | |||
| 23 | class VKMemoryManager final { | ||
| 24 | public: | ||
| 25 | explicit VKMemoryManager(const Device& device_); | ||
| 26 | VKMemoryManager(const VKMemoryManager&) = delete; | ||
| 27 | ~VKMemoryManager(); | ||
| 28 | |||
| 29 | /** | ||
| 30 | * Commits a memory with the specified requeriments. | ||
| 31 | * @param requirements Requirements returned from a Vulkan call. | ||
| 32 | * @param host_visible Signals the allocator that it *must* use host visible and coherent | ||
| 33 | * memory. When passing false, it will try to allocate device local memory. | ||
| 34 | * @returns A memory commit. | ||
| 35 | */ | ||
| 36 | VKMemoryCommit Commit(const VkMemoryRequirements& requirements, bool host_visible); | ||
| 37 | |||
| 38 | /// Commits memory required by the buffer and binds it. | ||
| 39 | VKMemoryCommit Commit(const vk::Buffer& buffer, bool host_visible); | ||
| 40 | |||
| 41 | /// Commits memory required by the image and binds it. | ||
| 42 | VKMemoryCommit Commit(const vk::Image& image, bool host_visible); | ||
| 43 | |||
| 44 | private: | ||
| 45 | /// Allocates a chunk of memory. | ||
| 46 | bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size); | ||
| 47 | |||
| 48 | /// Tries to allocate a memory commit. | ||
| 49 | VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements, | ||
| 50 | VkMemoryPropertyFlags wanted_properties); | ||
| 51 | |||
| 52 | const Device& device; ///< Device handler. | ||
| 53 | const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties. | ||
| 54 | std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations. | ||
| 55 | }; | ||
| 56 | |||
| 57 | class VKMemoryCommitImpl final { | ||
| 58 | friend VKMemoryAllocation; | ||
| 59 | friend MemoryMap; | ||
| 60 | |||
| 61 | public: | ||
| 62 | explicit VKMemoryCommitImpl(const Device& device_, VKMemoryAllocation* allocation_, | ||
| 63 | const vk::DeviceMemory& memory_, u64 begin_, u64 end_); | ||
| 64 | ~VKMemoryCommitImpl(); | ||
| 65 | |||
| 66 | /// Maps a memory region and returns a pointer to it. | ||
| 67 | /// It's illegal to have more than one memory map at the same time. | ||
| 68 | MemoryMap Map(u64 size, u64 offset = 0) const; | ||
| 69 | |||
| 70 | /// Maps the whole commit and returns a pointer to it. | ||
| 71 | /// It's illegal to have more than one memory map at the same time. | ||
| 72 | MemoryMap Map() const; | ||
| 73 | |||
| 74 | /// Returns the Vulkan memory handler. | ||
| 75 | VkDeviceMemory GetMemory() const { | ||
| 76 | return *memory; | ||
| 77 | } | ||
| 78 | |||
| 79 | /// Returns the start position of the commit relative to the allocation. | ||
| 80 | VkDeviceSize GetOffset() const { | ||
| 81 | return static_cast<VkDeviceSize>(interval.first); | ||
| 82 | } | ||
| 83 | |||
| 84 | private: | ||
| 85 | /// Unmaps memory. | ||
| 86 | void Unmap() const; | ||
| 87 | |||
| 88 | const Device& device; ///< Vulkan device. | ||
| 89 | const vk::DeviceMemory& memory; ///< Vulkan device memory handler. | ||
| 90 | std::pair<u64, u64> interval{}; ///< Interval where the commit exists. | ||
| 91 | VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation. | ||
| 92 | }; | ||
| 93 | |||
| 94 | /// Holds ownership of a memory map. | ||
| 95 | class MemoryMap final { | ||
| 96 | public: | ||
| 97 | explicit MemoryMap(const VKMemoryCommitImpl* commit_, std::span<u8> span_) | ||
| 98 | : commit{commit_}, span{span_} {} | ||
| 99 | |||
| 100 | ~MemoryMap() { | ||
| 101 | if (commit) { | ||
| 102 | commit->Unmap(); | ||
| 103 | } | ||
| 104 | } | ||
| 105 | |||
| 106 | /// Prematurely releases the memory map. | ||
| 107 | void Release() { | ||
| 108 | commit->Unmap(); | ||
| 109 | commit = nullptr; | ||
| 110 | } | ||
| 111 | |||
| 112 | /// Returns a span to the memory map. | ||
| 113 | [[nodiscard]] std::span<u8> Span() const noexcept { | ||
| 114 | return span; | ||
| 115 | } | ||
| 116 | |||
| 117 | /// Returns the address of the memory map. | ||
| 118 | [[nodiscard]] u8* Address() const noexcept { | ||
| 119 | return span.data(); | ||
| 120 | } | ||
| 121 | |||
| 122 | /// Returns the address of the memory map; | ||
| 123 | [[nodiscard]] operator u8*() const noexcept { | ||
| 124 | return span.data(); | ||
| 125 | } | ||
| 126 | |||
| 127 | private: | ||
| 128 | const VKMemoryCommitImpl* commit{}; ///< Mapped memory commit. | ||
| 129 | std::span<u8> span; ///< Address to the mapped memory. | ||
| 130 | }; | ||
| 131 | |||
| 132 | } // namespace Vulkan | ||
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index ce3db49bd..f0a111829 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp | |||
| @@ -409,24 +409,24 @@ void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf) const { | |||
| 409 | RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, | 409 | RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, |
| 410 | Tegra::MemoryManager& gpu_memory_, | 410 | Tegra::MemoryManager& gpu_memory_, |
| 411 | Core::Memory::Memory& cpu_memory_, VKScreenInfo& screen_info_, | 411 | Core::Memory::Memory& cpu_memory_, VKScreenInfo& screen_info_, |
| 412 | const Device& device_, VKMemoryManager& memory_manager_, | 412 | const Device& device_, MemoryAllocator& memory_allocator_, |
| 413 | StateTracker& state_tracker_, VKScheduler& scheduler_) | 413 | StateTracker& state_tracker_, VKScheduler& scheduler_) |
| 414 | : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, | 414 | : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, |
| 415 | gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()}, | 415 | gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()}, |
| 416 | screen_info{screen_info_}, device{device_}, memory_manager{memory_manager_}, | 416 | screen_info{screen_info_}, device{device_}, memory_allocator{memory_allocator_}, |
| 417 | state_tracker{state_tracker_}, scheduler{scheduler_}, stream_buffer(device, scheduler), | 417 | state_tracker{state_tracker_}, scheduler{scheduler_}, stream_buffer(device, scheduler), |
| 418 | staging_pool(device, memory_manager, scheduler), descriptor_pool(device, scheduler), | 418 | staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler), |
| 419 | update_descriptor_queue(device, scheduler), | 419 | update_descriptor_queue(device, scheduler), |
| 420 | blit_image(device, scheduler, state_tracker, descriptor_pool), | 420 | blit_image(device, scheduler, state_tracker, descriptor_pool), |
| 421 | quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), | 421 | quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), |
| 422 | quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), | 422 | quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), |
| 423 | uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), | 423 | uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), |
| 424 | texture_cache_runtime{device, scheduler, memory_manager, staging_pool, blit_image}, | 424 | texture_cache_runtime{device, scheduler, memory_allocator, staging_pool, blit_image}, |
| 425 | texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), | 425 | texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), |
| 426 | pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler, | 426 | pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler, |
| 427 | descriptor_pool, update_descriptor_queue), | 427 | descriptor_pool, update_descriptor_queue), |
| 428 | buffer_cache(*this, gpu_memory, cpu_memory_, device, memory_manager, scheduler, stream_buffer, | 428 | buffer_cache(*this, gpu_memory, cpu_memory_, device, memory_allocator, scheduler, |
| 429 | staging_pool), | 429 | stream_buffer, staging_pool), |
| 430 | query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, | 430 | query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, |
| 431 | fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, scheduler), | 431 | fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, scheduler), |
| 432 | wfi_event(device.GetLogical().CreateEvent()), async_shaders(emu_window_) { | 432 | wfi_event(device.GetLogical().CreateEvent()), async_shaders(emu_window_) { |
| @@ -1445,7 +1445,7 @@ VkBuffer RasterizerVulkan::DefaultBuffer() { | |||
| 1445 | .queueFamilyIndexCount = 0, | 1445 | .queueFamilyIndexCount = 0, |
| 1446 | .pQueueFamilyIndices = nullptr, | 1446 | .pQueueFamilyIndices = nullptr, |
| 1447 | }); | 1447 | }); |
| 1448 | default_buffer_commit = memory_manager.Commit(default_buffer, false); | 1448 | default_buffer_commit = memory_allocator.Commit(default_buffer, MemoryUsage::DeviceLocal); |
| 1449 | 1449 | ||
| 1450 | scheduler.RequestOutsideRenderPassOperationContext(); | 1450 | scheduler.RequestOutsideRenderPassOperationContext(); |
| 1451 | scheduler.Record([buffer = *default_buffer](vk::CommandBuffer cmdbuf) { | 1451 | scheduler.Record([buffer = *default_buffer](vk::CommandBuffer cmdbuf) { |
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index 4695718e9..8e261b9bd 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h | |||
| @@ -21,7 +21,6 @@ | |||
| 21 | #include "video_core/renderer_vulkan/vk_compute_pass.h" | 21 | #include "video_core/renderer_vulkan/vk_compute_pass.h" |
| 22 | #include "video_core/renderer_vulkan/vk_descriptor_pool.h" | 22 | #include "video_core/renderer_vulkan/vk_descriptor_pool.h" |
| 23 | #include "video_core/renderer_vulkan/vk_fence_manager.h" | 23 | #include "video_core/renderer_vulkan/vk_fence_manager.h" |
| 24 | #include "video_core/renderer_vulkan/vk_memory_manager.h" | ||
| 25 | #include "video_core/renderer_vulkan/vk_pipeline_cache.h" | 24 | #include "video_core/renderer_vulkan/vk_pipeline_cache.h" |
| 26 | #include "video_core/renderer_vulkan/vk_query_cache.h" | 25 | #include "video_core/renderer_vulkan/vk_query_cache.h" |
| 27 | #include "video_core/renderer_vulkan/vk_scheduler.h" | 26 | #include "video_core/renderer_vulkan/vk_scheduler.h" |
| @@ -30,6 +29,7 @@ | |||
| 30 | #include "video_core/renderer_vulkan/vk_texture_cache.h" | 29 | #include "video_core/renderer_vulkan/vk_texture_cache.h" |
| 31 | #include "video_core/renderer_vulkan/vk_update_descriptor.h" | 30 | #include "video_core/renderer_vulkan/vk_update_descriptor.h" |
| 32 | #include "video_core/shader/async_shaders.h" | 31 | #include "video_core/shader/async_shaders.h" |
| 32 | #include "video_core/vulkan_common/vulkan_memory_allocator.h" | ||
| 33 | #include "video_core/vulkan_common/vulkan_wrapper.h" | 33 | #include "video_core/vulkan_common/vulkan_wrapper.h" |
| 34 | 34 | ||
| 35 | namespace Core { | 35 | namespace Core { |
| @@ -56,7 +56,7 @@ public: | |||
| 56 | explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, | 56 | explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, |
| 57 | Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, | 57 | Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, |
| 58 | VKScreenInfo& screen_info_, const Device& device_, | 58 | VKScreenInfo& screen_info_, const Device& device_, |
| 59 | VKMemoryManager& memory_manager_, StateTracker& state_tracker_, | 59 | MemoryAllocator& memory_allocator_, StateTracker& state_tracker_, |
| 60 | VKScheduler& scheduler_); | 60 | VKScheduler& scheduler_); |
| 61 | ~RasterizerVulkan() override; | 61 | ~RasterizerVulkan() override; |
| 62 | 62 | ||
| @@ -213,12 +213,12 @@ private: | |||
| 213 | 213 | ||
| 214 | VKScreenInfo& screen_info; | 214 | VKScreenInfo& screen_info; |
| 215 | const Device& device; | 215 | const Device& device; |
| 216 | VKMemoryManager& memory_manager; | 216 | MemoryAllocator& memory_allocator; |
| 217 | StateTracker& state_tracker; | 217 | StateTracker& state_tracker; |
| 218 | VKScheduler& scheduler; | 218 | VKScheduler& scheduler; |
| 219 | 219 | ||
| 220 | VKStreamBuffer stream_buffer; | 220 | VKStreamBuffer stream_buffer; |
| 221 | VKStagingBufferPool staging_pool; | 221 | StagingBufferPool staging_pool; |
| 222 | VKDescriptorPool descriptor_pool; | 222 | VKDescriptorPool descriptor_pool; |
| 223 | VKUpdateDescriptorQueue update_descriptor_queue; | 223 | VKUpdateDescriptorQueue update_descriptor_queue; |
| 224 | BlitImageHelper blit_image; | 224 | BlitImageHelper blit_image; |
| @@ -234,7 +234,7 @@ private: | |||
| 234 | VKFenceManager fence_manager; | 234 | VKFenceManager fence_manager; |
| 235 | 235 | ||
| 236 | vk::Buffer default_buffer; | 236 | vk::Buffer default_buffer; |
| 237 | VKMemoryCommit default_buffer_commit; | 237 | MemoryCommit default_buffer_commit; |
| 238 | vk::Event wfi_event; | 238 | vk::Event wfi_event; |
| 239 | VideoCommon::Shader::AsyncShaders async_shaders; | 239 | VideoCommon::Shader::AsyncShaders async_shaders; |
| 240 | 240 | ||
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp index 1e0b8b922..97fd41cc1 100644 --- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp +++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp | |||
| @@ -3,10 +3,12 @@ | |||
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <algorithm> | 5 | #include <algorithm> |
| 6 | #include <unordered_map> | ||
| 7 | #include <utility> | 6 | #include <utility> |
| 8 | #include <vector> | 7 | #include <vector> |
| 9 | 8 | ||
| 9 | #include <fmt/format.h> | ||
| 10 | |||
| 11 | #include "common/assert.h" | ||
| 10 | #include "common/bit_util.h" | 12 | #include "common/bit_util.h" |
| 11 | #include "common/common_types.h" | 13 | #include "common/common_types.h" |
| 12 | #include "video_core/renderer_vulkan/vk_scheduler.h" | 14 | #include "video_core/renderer_vulkan/vk_scheduler.h" |
| @@ -16,45 +18,51 @@ | |||
| 16 | 18 | ||
| 17 | namespace Vulkan { | 19 | namespace Vulkan { |
| 18 | 20 | ||
| 19 | VKStagingBufferPool::StagingBuffer::StagingBuffer(std::unique_ptr<VKBuffer> buffer_) | 21 | StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_, |
| 20 | : buffer{std::move(buffer_)} {} | 22 | VKScheduler& scheduler_) |
| 21 | 23 | : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_} {} | |
| 22 | VKStagingBufferPool::VKStagingBufferPool(const Device& device_, VKMemoryManager& memory_manager_, | ||
| 23 | VKScheduler& scheduler_) | ||
| 24 | : device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_} {} | ||
| 25 | 24 | ||
| 26 | VKStagingBufferPool::~VKStagingBufferPool() = default; | 25 | StagingBufferPool::~StagingBufferPool() = default; |
| 27 | 26 | ||
| 28 | VKBuffer& VKStagingBufferPool::GetUnusedBuffer(std::size_t size, bool host_visible) { | 27 | StagingBufferRef StagingBufferPool::Request(size_t size, MemoryUsage usage) { |
| 29 | if (const auto buffer = TryGetReservedBuffer(size, host_visible)) { | 28 | if (const std::optional<StagingBufferRef> ref = TryGetReservedBuffer(size, usage)) { |
| 30 | return *buffer; | 29 | return *ref; |
| 31 | } | 30 | } |
| 32 | return CreateStagingBuffer(size, host_visible); | 31 | return CreateStagingBuffer(size, usage); |
| 33 | } | 32 | } |
| 34 | 33 | ||
| 35 | void VKStagingBufferPool::TickFrame() { | 34 | void StagingBufferPool::TickFrame() { |
| 36 | current_delete_level = (current_delete_level + 1) % NumLevels; | 35 | current_delete_level = (current_delete_level + 1) % NUM_LEVELS; |
| 37 | 36 | ||
| 38 | ReleaseCache(true); | 37 | ReleaseCache(MemoryUsage::DeviceLocal); |
| 39 | ReleaseCache(false); | 38 | ReleaseCache(MemoryUsage::Upload); |
| 39 | ReleaseCache(MemoryUsage::Download); | ||
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) { | 42 | std::optional<StagingBufferRef> StagingBufferPool::TryGetReservedBuffer(size_t size, |
| 43 | for (StagingBuffer& entry : GetCache(host_visible)[Common::Log2Ceil64(size)].entries) { | 43 | MemoryUsage usage) { |
| 44 | if (!scheduler.IsFree(entry.tick)) { | 44 | StagingBuffers& cache_level = GetCache(usage)[Common::Log2Ceil64(size)]; |
| 45 | continue; | 45 | |
| 46 | const auto is_free = [this](const StagingBuffer& entry) { | ||
| 47 | return scheduler.IsFree(entry.tick); | ||
| 48 | }; | ||
| 49 | auto& entries = cache_level.entries; | ||
| 50 | const auto hint_it = entries.begin() + cache_level.iterate_index; | ||
| 51 | auto it = std::find_if(entries.begin() + cache_level.iterate_index, entries.end(), is_free); | ||
| 52 | if (it == entries.end()) { | ||
| 53 | it = std::find_if(entries.begin(), hint_it, is_free); | ||
| 54 | if (it == hint_it) { | ||
| 55 | return std::nullopt; | ||
| 46 | } | 56 | } |
| 47 | entry.tick = scheduler.CurrentTick(); | ||
| 48 | return &*entry.buffer; | ||
| 49 | } | 57 | } |
| 50 | return nullptr; | 58 | cache_level.iterate_index = std::distance(entries.begin(), it) + 1; |
| 59 | it->tick = scheduler.CurrentTick(); | ||
| 60 | return it->Ref(); | ||
| 51 | } | 61 | } |
| 52 | 62 | ||
| 53 | VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) { | 63 | StagingBufferRef StagingBufferPool::CreateStagingBuffer(size_t size, MemoryUsage usage) { |
| 54 | const u32 log2 = Common::Log2Ceil64(size); | 64 | const u32 log2 = Common::Log2Ceil64(size); |
| 55 | 65 | vk::Buffer buffer = device.GetLogical().CreateBuffer({ | |
| 56 | auto buffer = std::make_unique<VKBuffer>(); | ||
| 57 | buffer->handle = device.GetLogical().CreateBuffer({ | ||
| 58 | .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, | 66 | .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, |
| 59 | .pNext = nullptr, | 67 | .pNext = nullptr, |
| 60 | .flags = 0, | 68 | .flags = 0, |
| @@ -66,49 +74,63 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v | |||
| 66 | .queueFamilyIndexCount = 0, | 74 | .queueFamilyIndexCount = 0, |
| 67 | .pQueueFamilyIndices = nullptr, | 75 | .pQueueFamilyIndices = nullptr, |
| 68 | }); | 76 | }); |
| 69 | buffer->commit = memory_manager.Commit(buffer->handle, host_visible); | 77 | if (device.HasDebuggingToolAttached()) { |
| 70 | 78 | ++buffer_index; | |
| 71 | std::vector<StagingBuffer>& entries = GetCache(host_visible)[log2].entries; | 79 | buffer.SetObjectNameEXT(fmt::format("Staging Buffer {}", buffer_index).c_str()); |
| 72 | StagingBuffer& entry = entries.emplace_back(std::move(buffer)); | 80 | } |
| 73 | entry.tick = scheduler.CurrentTick(); | 81 | MemoryCommit commit = memory_allocator.Commit(buffer, usage); |
| 74 | return *entry.buffer; | 82 | const std::span<u8> mapped_span = IsHostVisible(usage) ? commit.Map() : std::span<u8>{}; |
| 75 | } | 83 | |
| 76 | 84 | StagingBuffer& entry = GetCache(usage)[log2].entries.emplace_back(StagingBuffer{ | |
| 77 | VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) { | 85 | .buffer = std::move(buffer), |
| 78 | return host_visible ? host_staging_buffers : device_staging_buffers; | 86 | .commit = std::move(commit), |
| 87 | .mapped_span = mapped_span, | ||
| 88 | .tick = scheduler.CurrentTick(), | ||
| 89 | }); | ||
| 90 | return entry.Ref(); | ||
| 79 | } | 91 | } |
| 80 | 92 | ||
| 81 | void VKStagingBufferPool::ReleaseCache(bool host_visible) { | 93 | StagingBufferPool::StagingBuffersCache& StagingBufferPool::GetCache(MemoryUsage usage) { |
| 82 | auto& cache = GetCache(host_visible); | 94 | switch (usage) { |
| 83 | const u64 size = ReleaseLevel(cache, current_delete_level); | 95 | case MemoryUsage::DeviceLocal: |
| 84 | if (size == 0) { | 96 | return device_local_cache; |
| 85 | return; | 97 | case MemoryUsage::Upload: |
| 98 | return upload_cache; | ||
| 99 | case MemoryUsage::Download: | ||
| 100 | return download_cache; | ||
| 101 | default: | ||
| 102 | UNREACHABLE_MSG("Invalid memory usage={}", usage); | ||
| 103 | return upload_cache; | ||
| 86 | } | 104 | } |
| 87 | } | 105 | } |
| 88 | 106 | ||
| 89 | u64 VKStagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, std::size_t log2) { | 107 | void StagingBufferPool::ReleaseCache(MemoryUsage usage) { |
| 90 | static constexpr std::size_t deletions_per_tick = 16; | 108 | ReleaseLevel(GetCache(usage), current_delete_level); |
| 109 | } | ||
| 91 | 110 | ||
| 111 | void StagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, size_t log2) { | ||
| 112 | constexpr size_t deletions_per_tick = 16; | ||
| 92 | auto& staging = cache[log2]; | 113 | auto& staging = cache[log2]; |
| 93 | auto& entries = staging.entries; | 114 | auto& entries = staging.entries; |
| 94 | const std::size_t old_size = entries.size(); | 115 | const size_t old_size = entries.size(); |
| 95 | 116 | ||
| 96 | const auto is_deleteable = [this](const StagingBuffer& entry) { | 117 | const auto is_deleteable = [this](const StagingBuffer& entry) { |
| 97 | return scheduler.IsFree(entry.tick); | 118 | return scheduler.IsFree(entry.tick); |
| 98 | }; | 119 | }; |
| 99 | const std::size_t begin_offset = staging.delete_index; | 120 | const size_t begin_offset = staging.delete_index; |
| 100 | const std::size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size); | 121 | const size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size); |
| 101 | const auto begin = std::begin(entries) + begin_offset; | 122 | const auto begin = entries.begin() + begin_offset; |
| 102 | const auto end = std::begin(entries) + end_offset; | 123 | const auto end = entries.begin() + end_offset; |
| 103 | entries.erase(std::remove_if(begin, end, is_deleteable), end); | 124 | entries.erase(std::remove_if(begin, end, is_deleteable), end); |
| 104 | 125 | ||
| 105 | const std::size_t new_size = entries.size(); | 126 | const size_t new_size = entries.size(); |
| 106 | staging.delete_index += deletions_per_tick; | 127 | staging.delete_index += deletions_per_tick; |
| 107 | if (staging.delete_index >= new_size) { | 128 | if (staging.delete_index >= new_size) { |
| 108 | staging.delete_index = 0; | 129 | staging.delete_index = 0; |
| 109 | } | 130 | } |
| 110 | 131 | if (staging.iterate_index > new_size) { | |
| 111 | return (1ULL << log2) * (old_size - new_size); | 132 | staging.iterate_index = 0; |
| 133 | } | ||
| 112 | } | 134 | } |
| 113 | 135 | ||
| 114 | } // namespace Vulkan | 136 | } // namespace Vulkan |
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h index 90dadcbbe..d42918a47 100644 --- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h +++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | 9 | ||
| 10 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 11 | 11 | ||
| 12 | #include "video_core/renderer_vulkan/vk_memory_manager.h" | 12 | #include "video_core/vulkan_common/vulkan_memory_allocator.h" |
| 13 | #include "video_core/vulkan_common/vulkan_wrapper.h" | 13 | #include "video_core/vulkan_common/vulkan_wrapper.h" |
| 14 | 14 | ||
| 15 | namespace Vulkan { | 15 | namespace Vulkan { |
| @@ -17,55 +17,65 @@ namespace Vulkan { | |||
| 17 | class Device; | 17 | class Device; |
| 18 | class VKScheduler; | 18 | class VKScheduler; |
| 19 | 19 | ||
| 20 | struct VKBuffer final { | 20 | struct StagingBufferRef { |
| 21 | vk::Buffer handle; | 21 | VkBuffer buffer; |
| 22 | VKMemoryCommit commit; | 22 | std::span<u8> mapped_span; |
| 23 | }; | 23 | }; |
| 24 | 24 | ||
| 25 | class VKStagingBufferPool final { | 25 | class StagingBufferPool { |
| 26 | public: | 26 | public: |
| 27 | explicit VKStagingBufferPool(const Device& device, VKMemoryManager& memory_manager, | 27 | explicit StagingBufferPool(const Device& device, MemoryAllocator& memory_allocator, |
| 28 | VKScheduler& scheduler); | 28 | VKScheduler& scheduler); |
| 29 | ~VKStagingBufferPool(); | 29 | ~StagingBufferPool(); |
| 30 | 30 | ||
| 31 | VKBuffer& GetUnusedBuffer(std::size_t size, bool host_visible); | 31 | StagingBufferRef Request(size_t size, MemoryUsage usage); |
| 32 | 32 | ||
| 33 | void TickFrame(); | 33 | void TickFrame(); |
| 34 | 34 | ||
| 35 | private: | 35 | private: |
| 36 | struct StagingBuffer final { | 36 | struct StagingBuffer { |
| 37 | explicit StagingBuffer(std::unique_ptr<VKBuffer> buffer); | 37 | vk::Buffer buffer; |
| 38 | 38 | MemoryCommit commit; | |
| 39 | std::unique_ptr<VKBuffer> buffer; | 39 | std::span<u8> mapped_span; |
| 40 | u64 tick = 0; | 40 | u64 tick = 0; |
| 41 | |||
| 42 | StagingBufferRef Ref() const noexcept { | ||
| 43 | return { | ||
| 44 | .buffer = *buffer, | ||
| 45 | .mapped_span = mapped_span, | ||
| 46 | }; | ||
| 47 | } | ||
| 41 | }; | 48 | }; |
| 42 | 49 | ||
| 43 | struct StagingBuffers final { | 50 | struct StagingBuffers { |
| 44 | std::vector<StagingBuffer> entries; | 51 | std::vector<StagingBuffer> entries; |
| 45 | std::size_t delete_index = 0; | 52 | size_t delete_index = 0; |
| 53 | size_t iterate_index = 0; | ||
| 46 | }; | 54 | }; |
| 47 | 55 | ||
| 48 | static constexpr std::size_t NumLevels = sizeof(std::size_t) * CHAR_BIT; | 56 | static constexpr size_t NUM_LEVELS = sizeof(size_t) * CHAR_BIT; |
| 49 | using StagingBuffersCache = std::array<StagingBuffers, NumLevels>; | 57 | using StagingBuffersCache = std::array<StagingBuffers, NUM_LEVELS>; |
| 50 | 58 | ||
| 51 | VKBuffer* TryGetReservedBuffer(std::size_t size, bool host_visible); | 59 | std::optional<StagingBufferRef> TryGetReservedBuffer(size_t size, MemoryUsage usage); |
| 52 | 60 | ||
| 53 | VKBuffer& CreateStagingBuffer(std::size_t size, bool host_visible); | 61 | StagingBufferRef CreateStagingBuffer(size_t size, MemoryUsage usage); |
| 54 | 62 | ||
| 55 | StagingBuffersCache& GetCache(bool host_visible); | 63 | StagingBuffersCache& GetCache(MemoryUsage usage); |
| 56 | 64 | ||
| 57 | void ReleaseCache(bool host_visible); | 65 | void ReleaseCache(MemoryUsage usage); |
| 58 | 66 | ||
| 59 | u64 ReleaseLevel(StagingBuffersCache& cache, std::size_t log2); | 67 | void ReleaseLevel(StagingBuffersCache& cache, size_t log2); |
| 60 | 68 | ||
| 61 | const Device& device; | 69 | const Device& device; |
| 62 | VKMemoryManager& memory_manager; | 70 | MemoryAllocator& memory_allocator; |
| 63 | VKScheduler& scheduler; | 71 | VKScheduler& scheduler; |
| 64 | 72 | ||
| 65 | StagingBuffersCache host_staging_buffers; | 73 | StagingBuffersCache device_local_cache; |
| 66 | StagingBuffersCache device_staging_buffers; | 74 | StagingBuffersCache upload_cache; |
| 75 | StagingBuffersCache download_cache; | ||
| 67 | 76 | ||
| 68 | std::size_t current_delete_level = 0; | 77 | size_t current_delete_level = 0; |
| 78 | u64 buffer_index = 0; | ||
| 69 | }; | 79 | }; |
| 70 | 80 | ||
| 71 | } // namespace Vulkan | 81 | } // namespace Vulkan |
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index bd11de012..ab14922d7 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp | |||
| @@ -10,12 +10,12 @@ | |||
| 10 | #include "video_core/engines/fermi_2d.h" | 10 | #include "video_core/engines/fermi_2d.h" |
| 11 | #include "video_core/renderer_vulkan/blit_image.h" | 11 | #include "video_core/renderer_vulkan/blit_image.h" |
| 12 | #include "video_core/renderer_vulkan/maxwell_to_vk.h" | 12 | #include "video_core/renderer_vulkan/maxwell_to_vk.h" |
| 13 | #include "video_core/renderer_vulkan/vk_memory_manager.h" | ||
| 14 | #include "video_core/renderer_vulkan/vk_rasterizer.h" | 13 | #include "video_core/renderer_vulkan/vk_rasterizer.h" |
| 15 | #include "video_core/renderer_vulkan/vk_scheduler.h" | 14 | #include "video_core/renderer_vulkan/vk_scheduler.h" |
| 16 | #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" | 15 | #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" |
| 17 | #include "video_core/renderer_vulkan/vk_texture_cache.h" | 16 | #include "video_core/renderer_vulkan/vk_texture_cache.h" |
| 18 | #include "video_core/vulkan_common/vulkan_device.h" | 17 | #include "video_core/vulkan_common/vulkan_device.h" |
| 18 | #include "video_core/vulkan_common/vulkan_memory_allocator.h" | ||
| 19 | #include "video_core/vulkan_common/vulkan_wrapper.h" | 19 | #include "video_core/vulkan_common/vulkan_wrapper.h" |
| 20 | 20 | ||
| 21 | namespace Vulkan { | 21 | namespace Vulkan { |
| @@ -554,10 +554,18 @@ void TextureCacheRuntime::Finish() { | |||
| 554 | } | 554 | } |
| 555 | 555 | ||
| 556 | ImageBufferMap TextureCacheRuntime::MapUploadBuffer(size_t size) { | 556 | ImageBufferMap TextureCacheRuntime::MapUploadBuffer(size_t size) { |
| 557 | const auto& buffer = staging_buffer_pool.GetUnusedBuffer(size, true); | 557 | const auto staging_ref = staging_buffer_pool.Request(size, MemoryUsage::Upload); |
| 558 | return ImageBufferMap{ | 558 | return { |
| 559 | .handle = *buffer.handle, | 559 | .handle = staging_ref.buffer, |
| 560 | .map = buffer.commit->Map(size), | 560 | .span = staging_ref.mapped_span, |
| 561 | }; | ||
| 562 | } | ||
| 563 | |||
| 564 | ImageBufferMap TextureCacheRuntime::MapDownloadBuffer(size_t size) { | ||
| 565 | const auto staging_ref = staging_buffer_pool.Request(size, MemoryUsage::Download); | ||
| 566 | return { | ||
| 567 | .handle = staging_ref.buffer, | ||
| 568 | .span = staging_ref.mapped_span, | ||
| 561 | }; | 569 | }; |
| 562 | } | 570 | } |
| 563 | 571 | ||
| @@ -788,9 +796,9 @@ Image::Image(TextureCacheRuntime& runtime, const ImageInfo& info_, GPUVAddr gpu_ | |||
| 788 | image(MakeImage(runtime.device, info)), buffer(MakeBuffer(runtime.device, info)), | 796 | image(MakeImage(runtime.device, info)), buffer(MakeBuffer(runtime.device, info)), |
| 789 | aspect_mask(ImageAspectMask(info.format)) { | 797 | aspect_mask(ImageAspectMask(info.format)) { |
| 790 | if (image) { | 798 | if (image) { |
| 791 | commit = runtime.memory_manager.Commit(image, false); | 799 | commit = runtime.memory_allocator.Commit(image, MemoryUsage::DeviceLocal); |
| 792 | } else { | 800 | } else { |
| 793 | commit = runtime.memory_manager.Commit(buffer, false); | 801 | commit = runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal); |
| 794 | } | 802 | } |
| 795 | if (IsPixelFormatASTC(info.format) && !runtime.device.IsOptimalAstcSupported()) { | 803 | if (IsPixelFormatASTC(info.format) && !runtime.device.IsOptimalAstcSupported()) { |
| 796 | flags |= VideoCommon::ImageFlagBits::Converted; | 804 | flags |= VideoCommon::ImageFlagBits::Converted; |
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h index 92a7aad8b..a55d405d1 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.h +++ b/src/video_core/renderer_vulkan/vk_texture_cache.h | |||
| @@ -7,8 +7,8 @@ | |||
| 7 | #include <compare> | 7 | #include <compare> |
| 8 | #include <span> | 8 | #include <span> |
| 9 | 9 | ||
| 10 | #include "video_core/renderer_vulkan/vk_memory_manager.h" | ||
| 11 | #include "video_core/texture_cache/texture_cache.h" | 10 | #include "video_core/texture_cache/texture_cache.h" |
| 11 | #include "video_core/vulkan_common/vulkan_memory_allocator.h" | ||
| 12 | #include "video_core/vulkan_common/vulkan_wrapper.h" | 12 | #include "video_core/vulkan_common/vulkan_wrapper.h" |
| 13 | 13 | ||
| 14 | namespace Vulkan { | 14 | namespace Vulkan { |
| @@ -19,14 +19,13 @@ using VideoCommon::Offset2D; | |||
| 19 | using VideoCommon::RenderTargets; | 19 | using VideoCommon::RenderTargets; |
| 20 | using VideoCore::Surface::PixelFormat; | 20 | using VideoCore::Surface::PixelFormat; |
| 21 | 21 | ||
| 22 | class VKScheduler; | ||
| 23 | class VKStagingBufferPool; | ||
| 24 | |||
| 25 | class BlitImageHelper; | 22 | class BlitImageHelper; |
| 26 | class Device; | 23 | class Device; |
| 27 | class Image; | 24 | class Image; |
| 28 | class ImageView; | 25 | class ImageView; |
| 29 | class Framebuffer; | 26 | class Framebuffer; |
| 27 | class StagingBufferPool; | ||
| 28 | class VKScheduler; | ||
| 30 | 29 | ||
| 31 | struct RenderPassKey { | 30 | struct RenderPassKey { |
| 32 | constexpr auto operator<=>(const RenderPassKey&) const noexcept = default; | 31 | constexpr auto operator<=>(const RenderPassKey&) const noexcept = default; |
| @@ -60,18 +59,18 @@ struct ImageBufferMap { | |||
| 60 | } | 59 | } |
| 61 | 60 | ||
| 62 | [[nodiscard]] std::span<u8> Span() const noexcept { | 61 | [[nodiscard]] std::span<u8> Span() const noexcept { |
| 63 | return map.Span(); | 62 | return span; |
| 64 | } | 63 | } |
| 65 | 64 | ||
| 66 | VkBuffer handle; | 65 | VkBuffer handle; |
| 67 | MemoryMap map; | 66 | std::span<u8> span; |
| 68 | }; | 67 | }; |
| 69 | 68 | ||
| 70 | struct TextureCacheRuntime { | 69 | struct TextureCacheRuntime { |
| 71 | const Device& device; | 70 | const Device& device; |
| 72 | VKScheduler& scheduler; | 71 | VKScheduler& scheduler; |
| 73 | VKMemoryManager& memory_manager; | 72 | MemoryAllocator& memory_allocator; |
| 74 | VKStagingBufferPool& staging_buffer_pool; | 73 | StagingBufferPool& staging_buffer_pool; |
| 75 | BlitImageHelper& blit_image_helper; | 74 | BlitImageHelper& blit_image_helper; |
| 76 | std::unordered_map<RenderPassKey, vk::RenderPass> renderpass_cache; | 75 | std::unordered_map<RenderPassKey, vk::RenderPass> renderpass_cache; |
| 77 | 76 | ||
| @@ -79,10 +78,7 @@ struct TextureCacheRuntime { | |||
| 79 | 78 | ||
| 80 | [[nodiscard]] ImageBufferMap MapUploadBuffer(size_t size); | 79 | [[nodiscard]] ImageBufferMap MapUploadBuffer(size_t size); |
| 81 | 80 | ||
| 82 | [[nodiscard]] ImageBufferMap MapDownloadBuffer(size_t size) { | 81 | [[nodiscard]] ImageBufferMap MapDownloadBuffer(size_t size); |
| 83 | // TODO: Have a special function for this | ||
| 84 | return MapUploadBuffer(size); | ||
| 85 | } | ||
| 86 | 82 | ||
| 87 | void BlitImage(Framebuffer* dst_framebuffer, ImageView& dst, ImageView& src, | 83 | void BlitImage(Framebuffer* dst_framebuffer, ImageView& dst, ImageView& src, |
| 88 | const std::array<Offset2D, 2>& dst_region, | 84 | const std::array<Offset2D, 2>& dst_region, |
| @@ -141,7 +137,7 @@ private: | |||
| 141 | VKScheduler* scheduler; | 137 | VKScheduler* scheduler; |
| 142 | vk::Image image; | 138 | vk::Image image; |
| 143 | vk::Buffer buffer; | 139 | vk::Buffer buffer; |
| 144 | VKMemoryCommit commit; | 140 | MemoryCommit commit; |
| 145 | VkImageAspectFlags aspect_mask = 0; | 141 | VkImageAspectFlags aspect_mask = 0; |
| 146 | bool initialized = false; | 142 | bool initialized = false; |
| 147 | }; | 143 | }; |
diff --git a/src/video_core/texture_cache/accelerated_swizzle.cpp b/src/video_core/texture_cache/accelerated_swizzle.cpp index a4fc1184b..15585caeb 100644 --- a/src/video_core/texture_cache/accelerated_swizzle.cpp +++ b/src/video_core/texture_cache/accelerated_swizzle.cpp | |||
| @@ -27,7 +27,7 @@ BlockLinearSwizzle2DParams MakeBlockLinearSwizzle2DParams(const SwizzleParameter | |||
| 27 | const Extent3D num_tiles = swizzle.num_tiles; | 27 | const Extent3D num_tiles = swizzle.num_tiles; |
| 28 | const u32 bytes_per_block = BytesPerBlock(info.format); | 28 | const u32 bytes_per_block = BytesPerBlock(info.format); |
| 29 | const u32 stride_alignment = CalculateLevelStrideAlignment(info, swizzle.level); | 29 | const u32 stride_alignment = CalculateLevelStrideAlignment(info, swizzle.level); |
| 30 | const u32 stride = Common::AlignBits(num_tiles.width, stride_alignment) * bytes_per_block; | 30 | const u32 stride = Common::AlignUpLog2(num_tiles.width, stride_alignment) * bytes_per_block; |
| 31 | const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); | 31 | const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); |
| 32 | return BlockLinearSwizzle2DParams{ | 32 | return BlockLinearSwizzle2DParams{ |
| 33 | .origin{0, 0, 0}, | 33 | .origin{0, 0, 0}, |
| @@ -47,7 +47,7 @@ BlockLinearSwizzle3DParams MakeBlockLinearSwizzle3DParams(const SwizzleParameter | |||
| 47 | const Extent3D num_tiles = swizzle.num_tiles; | 47 | const Extent3D num_tiles = swizzle.num_tiles; |
| 48 | const u32 bytes_per_block = BytesPerBlock(info.format); | 48 | const u32 bytes_per_block = BytesPerBlock(info.format); |
| 49 | const u32 stride_alignment = CalculateLevelStrideAlignment(info, swizzle.level); | 49 | const u32 stride_alignment = CalculateLevelStrideAlignment(info, swizzle.level); |
| 50 | const u32 stride = Common::AlignBits(num_tiles.width, stride_alignment) * bytes_per_block; | 50 | const u32 stride = Common::AlignUpLog2(num_tiles.width, stride_alignment) * bytes_per_block; |
| 51 | 51 | ||
| 52 | const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) >> GOB_SIZE_X_SHIFT; | 52 | const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) >> GOB_SIZE_X_SHIFT; |
| 53 | const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block.height + block.depth); | 53 | const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block.height + block.depth); |
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp index 279932778..ce8fcfe0a 100644 --- a/src/video_core/texture_cache/util.cpp +++ b/src/video_core/texture_cache/util.cpp | |||
| @@ -279,7 +279,7 @@ template <u32 GOB_EXTENT> | |||
| 279 | const bool is_small = IsSmallerThanGobSize(blocks, gob, info.block.depth); | 279 | const bool is_small = IsSmallerThanGobSize(blocks, gob, info.block.depth); |
| 280 | const u32 alignment = is_small ? 0 : info.tile_width_spacing; | 280 | const u32 alignment = is_small ? 0 : info.tile_width_spacing; |
| 281 | return Extent2D{ | 281 | return Extent2D{ |
| 282 | .width = Common::AlignBits(gobs.width, alignment), | 282 | .width = Common::AlignUpLog2(gobs.width, alignment), |
| 283 | .height = gobs.height, | 283 | .height = gobs.height, |
| 284 | }; | 284 | }; |
| 285 | } | 285 | } |
| @@ -352,7 +352,7 @@ template <u32 GOB_EXTENT> | |||
| 352 | // https://github.com/Ryujinx/Ryujinx/blob/1c9aba6de1520aea5480c032e0ff5664ac1bb36f/Ryujinx.Graphics.Texture/SizeCalculator.cs#L134 | 352 | // https://github.com/Ryujinx/Ryujinx/blob/1c9aba6de1520aea5480c032e0ff5664ac1bb36f/Ryujinx.Graphics.Texture/SizeCalculator.cs#L134 |
| 353 | if (tile_width_spacing > 0) { | 353 | if (tile_width_spacing > 0) { |
| 354 | const u32 alignment_log2 = GOB_SIZE_SHIFT + tile_width_spacing + block.height + block.depth; | 354 | const u32 alignment_log2 = GOB_SIZE_SHIFT + tile_width_spacing + block.height + block.depth; |
| 355 | return Common::AlignBits(size_bytes, alignment_log2); | 355 | return Common::AlignUpLog2(size_bytes, alignment_log2); |
| 356 | } | 356 | } |
| 357 | const u32 aligned_height = Common::AlignUp(size.height, tile_size_y); | 357 | const u32 aligned_height = Common::AlignUp(size.height, tile_size_y); |
| 358 | while (block.height != 0 && aligned_height <= (1U << (block.height - 1)) * GOB_SIZE_Y) { | 358 | while (block.height != 0 && aligned_height <= (1U << (block.height - 1)) * GOB_SIZE_Y) { |
| @@ -528,9 +528,9 @@ template <u32 GOB_EXTENT> | |||
| 528 | const u32 alignment = StrideAlignment(num_tiles, info.block, bpp_log2, info.tile_width_spacing); | 528 | const u32 alignment = StrideAlignment(num_tiles, info.block, bpp_log2, info.tile_width_spacing); |
| 529 | const Extent3D mip_block = AdjustMipBlockSize(num_tiles, info.block, 0); | 529 | const Extent3D mip_block = AdjustMipBlockSize(num_tiles, info.block, 0); |
| 530 | return Extent3D{ | 530 | return Extent3D{ |
| 531 | .width = Common::AlignBits(num_tiles.width, alignment), | 531 | .width = Common::AlignUpLog2(num_tiles.width, alignment), |
| 532 | .height = Common::AlignBits(num_tiles.height, GOB_SIZE_Y_SHIFT + mip_block.height), | 532 | .height = Common::AlignUpLog2(num_tiles.height, GOB_SIZE_Y_SHIFT + mip_block.height), |
| 533 | .depth = Common::AlignBits(num_tiles.depth, GOB_SIZE_Z_SHIFT + mip_block.depth), | 533 | .depth = Common::AlignUpLog2(num_tiles.depth, GOB_SIZE_Z_SHIFT + mip_block.depth), |
| 534 | }; | 534 | }; |
| 535 | } | 535 | } |
| 536 | 536 | ||
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp index 9f5181318..62685a183 100644 --- a/src/video_core/textures/decoders.cpp +++ b/src/video_core/textures/decoders.cpp | |||
| @@ -49,7 +49,7 @@ void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixe | |||
| 49 | // We can configure here a custom pitch | 49 | // We can configure here a custom pitch |
| 50 | // As it's not exposed 'width * bpp' will be the expected pitch. | 50 | // As it's not exposed 'width * bpp' will be the expected pitch. |
| 51 | const u32 pitch = width * bytes_per_pixel; | 51 | const u32 pitch = width * bytes_per_pixel; |
| 52 | const u32 stride = Common::AlignBits(width, stride_alignment) * bytes_per_pixel; | 52 | const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel; |
| 53 | 53 | ||
| 54 | const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); | 54 | const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); |
| 55 | const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); | 55 | const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); |
| @@ -217,9 +217,9 @@ void SwizzleKepler(const u32 width, const u32 height, const u32 dst_x, const u32 | |||
| 217 | std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, | 217 | std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, |
| 218 | u32 block_height, u32 block_depth) { | 218 | u32 block_height, u32 block_depth) { |
| 219 | if (tiled) { | 219 | if (tiled) { |
| 220 | const u32 aligned_width = Common::AlignBits(width * bytes_per_pixel, GOB_SIZE_X_SHIFT); | 220 | const u32 aligned_width = Common::AlignUpLog2(width * bytes_per_pixel, GOB_SIZE_X_SHIFT); |
| 221 | const u32 aligned_height = Common::AlignBits(height, GOB_SIZE_Y_SHIFT + block_height); | 221 | const u32 aligned_height = Common::AlignUpLog2(height, GOB_SIZE_Y_SHIFT + block_height); |
| 222 | const u32 aligned_depth = Common::AlignBits(depth, GOB_SIZE_Z_SHIFT + block_depth); | 222 | const u32 aligned_depth = Common::AlignUpLog2(depth, GOB_SIZE_Z_SHIFT + block_depth); |
| 223 | return aligned_width * aligned_height * aligned_depth; | 223 | return aligned_width * aligned_height * aligned_depth; |
| 224 | } else { | 224 | } else { |
| 225 | return width * height * depth * bytes_per_pixel; | 225 | return width * height * depth * bytes_per_pixel; |
diff --git a/src/video_core/vulkan_common/vulkan_debug_callback.h b/src/video_core/vulkan_common/vulkan_debug_callback.h index 2efcd244c..b0519f132 100644 --- a/src/video_core/vulkan_common/vulkan_debug_callback.h +++ b/src/video_core/vulkan_common/vulkan_debug_callback.h | |||
| @@ -2,6 +2,8 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #pragma once | ||
| 6 | |||
| 5 | #include "video_core/vulkan_common/vulkan_wrapper.h" | 7 | #include "video_core/vulkan_common/vulkan_wrapper.h" |
| 6 | 8 | ||
| 7 | namespace Vulkan { | 9 | namespace Vulkan { |
diff --git a/src/video_core/vulkan_common/vulkan_memory_allocator.cpp b/src/video_core/vulkan_common/vulkan_memory_allocator.cpp new file mode 100644 index 000000000..d6eb3af31 --- /dev/null +++ b/src/video_core/vulkan_common/vulkan_memory_allocator.cpp | |||
| @@ -0,0 +1,268 @@ | |||
| 1 | // Copyright 2018 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <algorithm> | ||
| 6 | #include <bit> | ||
| 7 | #include <optional> | ||
| 8 | #include <vector> | ||
| 9 | |||
| 10 | #include "common/alignment.h" | ||
| 11 | #include "common/assert.h" | ||
| 12 | #include "common/common_types.h" | ||
| 13 | #include "common/logging/log.h" | ||
| 14 | #include "video_core/vulkan_common/vulkan_device.h" | ||
| 15 | #include "video_core/vulkan_common/vulkan_memory_allocator.h" | ||
| 16 | #include "video_core/vulkan_common/vulkan_wrapper.h" | ||
| 17 | |||
| 18 | namespace Vulkan { | ||
| 19 | namespace { | ||
| 20 | struct Range { | ||
| 21 | u64 begin; | ||
| 22 | u64 end; | ||
| 23 | |||
| 24 | [[nodiscard]] bool Contains(u64 iterator, u64 size) const noexcept { | ||
| 25 | return iterator < end && begin < iterator + size; | ||
| 26 | } | ||
| 27 | }; | ||
| 28 | |||
| 29 | [[nodiscard]] u64 AllocationChunkSize(u64 required_size) { | ||
| 30 | static constexpr std::array sizes{ | ||
| 31 | 0x1000ULL << 10, 0x1400ULL << 10, 0x1800ULL << 10, 0x1c00ULL << 10, 0x2000ULL << 10, | ||
| 32 | 0x3200ULL << 10, 0x4000ULL << 10, 0x6000ULL << 10, 0x8000ULL << 10, 0xA000ULL << 10, | ||
| 33 | 0x10000ULL << 10, 0x18000ULL << 10, 0x20000ULL << 10, | ||
| 34 | }; | ||
| 35 | static_assert(std::is_sorted(sizes.begin(), sizes.end())); | ||
| 36 | |||
| 37 | const auto it = std::ranges::lower_bound(sizes, required_size); | ||
| 38 | return it != sizes.end() ? *it : Common::AlignUp(required_size, 4ULL << 20); | ||
| 39 | } | ||
| 40 | |||
| 41 | [[nodiscard]] VkMemoryPropertyFlags MemoryUsagePropertyFlags(MemoryUsage usage) { | ||
| 42 | switch (usage) { | ||
| 43 | case MemoryUsage::DeviceLocal: | ||
| 44 | return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; | ||
| 45 | case MemoryUsage::Upload: | ||
| 46 | return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; | ||
| 47 | case MemoryUsage::Download: | ||
| 48 | return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | | ||
| 49 | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; | ||
| 50 | } | ||
| 51 | UNREACHABLE_MSG("Invalid memory usage={}", usage); | ||
| 52 | return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; | ||
| 53 | } | ||
| 54 | } // Anonymous namespace | ||
| 55 | |||
| 56 | class MemoryAllocation { | ||
| 57 | public: | ||
| 58 | explicit MemoryAllocation(const Device& device_, vk::DeviceMemory memory_, | ||
| 59 | VkMemoryPropertyFlags properties, u64 allocation_size_, u32 type) | ||
| 60 | : device{device_}, memory{std::move(memory_)}, allocation_size{allocation_size_}, | ||
| 61 | property_flags{properties}, shifted_memory_type{1U << type} {} | ||
| 62 | |||
| 63 | [[nodiscard]] std::optional<MemoryCommit> Commit(VkDeviceSize size, VkDeviceSize alignment) { | ||
| 64 | const std::optional<u64> alloc = FindFreeRegion(size, alignment); | ||
| 65 | if (!alloc) { | ||
| 66 | // Signal out of memory, it'll try to do more allocations. | ||
| 67 | return std::nullopt; | ||
| 68 | } | ||
| 69 | const Range range{ | ||
| 70 | .begin = *alloc, | ||
| 71 | .end = *alloc + size, | ||
| 72 | }; | ||
| 73 | commits.insert(std::ranges::upper_bound(commits, *alloc, {}, &Range::begin), range); | ||
| 74 | return std::make_optional<MemoryCommit>(this, *memory, *alloc, *alloc + size); | ||
| 75 | } | ||
| 76 | |||
| 77 | void Free(u64 begin) { | ||
| 78 | const auto it = std::ranges::find(commits, begin, &Range::begin); | ||
| 79 | ASSERT_MSG(it != commits.end(), "Invalid commit"); | ||
| 80 | commits.erase(it); | ||
| 81 | } | ||
| 82 | |||
| 83 | [[nodiscard]] std::span<u8> Map() { | ||
| 84 | if (memory_mapped_span.empty()) { | ||
| 85 | u8* const raw_pointer = memory.Map(0, allocation_size); | ||
| 86 | memory_mapped_span = std::span<u8>(raw_pointer, allocation_size); | ||
| 87 | } | ||
| 88 | return memory_mapped_span; | ||
| 89 | } | ||
| 90 | |||
| 91 | /// Returns whether this allocation is compatible with the arguments. | ||
| 92 | [[nodiscard]] bool IsCompatible(VkMemoryPropertyFlags flags, u32 type_mask) const { | ||
| 93 | return (flags & property_flags) && (type_mask & shifted_memory_type) != 0; | ||
| 94 | } | ||
| 95 | |||
| 96 | private: | ||
| 97 | [[nodiscard]] static constexpr u32 ShiftType(u32 type) { | ||
| 98 | return 1U << type; | ||
| 99 | } | ||
| 100 | |||
| 101 | [[nodiscard]] std::optional<u64> FindFreeRegion(u64 size, u64 alignment) noexcept { | ||
| 102 | ASSERT(std::has_single_bit(alignment)); | ||
| 103 | const u64 alignment_log2 = std::countr_zero(alignment); | ||
| 104 | std::optional<u64> candidate; | ||
| 105 | u64 iterator = 0; | ||
| 106 | auto commit = commits.begin(); | ||
| 107 | while (iterator + size <= allocation_size) { | ||
| 108 | candidate = candidate.value_or(iterator); | ||
| 109 | if (commit == commits.end()) { | ||
| 110 | break; | ||
| 111 | } | ||
| 112 | if (commit->Contains(*candidate, size)) { | ||
| 113 | candidate = std::nullopt; | ||
| 114 | } | ||
| 115 | iterator = Common::AlignUpLog2(commit->end, alignment_log2); | ||
| 116 | ++commit; | ||
| 117 | } | ||
| 118 | return candidate; | ||
| 119 | } | ||
| 120 | |||
| 121 | const Device& device; ///< Vulkan device. | ||
| 122 | const vk::DeviceMemory memory; ///< Vulkan memory allocation handler. | ||
| 123 | const u64 allocation_size; ///< Size of this allocation. | ||
| 124 | const VkMemoryPropertyFlags property_flags; ///< Vulkan memory property flags. | ||
| 125 | const u32 shifted_memory_type; ///< Shifted Vulkan memory type. | ||
| 126 | std::vector<Range> commits; ///< All commit ranges done from this allocation. | ||
| 127 | std::span<u8> memory_mapped_span; ///< Memory mapped span. Empty if not queried before. | ||
| 128 | }; | ||
| 129 | |||
| 130 | MemoryCommit::MemoryCommit(MemoryAllocation* allocation_, VkDeviceMemory memory_, u64 begin_, | ||
| 131 | u64 end_) noexcept | ||
| 132 | : allocation{allocation_}, memory{memory_}, begin{begin_}, end{end_} {} | ||
| 133 | |||
| 134 | MemoryCommit::~MemoryCommit() { | ||
| 135 | Release(); | ||
| 136 | } | ||
| 137 | |||
| 138 | MemoryCommit& MemoryCommit::operator=(MemoryCommit&& rhs) noexcept { | ||
| 139 | Release(); | ||
| 140 | allocation = std::exchange(rhs.allocation, nullptr); | ||
| 141 | memory = rhs.memory; | ||
| 142 | begin = rhs.begin; | ||
| 143 | end = rhs.end; | ||
| 144 | span = std::exchange(rhs.span, std::span<u8>{}); | ||
| 145 | return *this; | ||
| 146 | } | ||
| 147 | |||
| 148 | MemoryCommit::MemoryCommit(MemoryCommit&& rhs) noexcept | ||
| 149 | : allocation{std::exchange(rhs.allocation, nullptr)}, memory{rhs.memory}, begin{rhs.begin}, | ||
| 150 | end{rhs.end}, span{std::exchange(rhs.span, std::span<u8>{})} {} | ||
| 151 | |||
| 152 | std::span<u8> MemoryCommit::Map() { | ||
| 153 | if (span.empty()) { | ||
| 154 | span = allocation->Map().subspan(begin, end - begin); | ||
| 155 | } | ||
| 156 | return span; | ||
| 157 | } | ||
| 158 | |||
| 159 | void MemoryCommit::Release() { | ||
| 160 | if (allocation) { | ||
| 161 | allocation->Free(begin); | ||
| 162 | } | ||
| 163 | } | ||
| 164 | |||
| 165 | MemoryAllocator::MemoryAllocator(const Device& device_) | ||
| 166 | : device{device_}, properties{device_.GetPhysical().GetMemoryProperties()} {} | ||
| 167 | |||
| 168 | MemoryAllocator::~MemoryAllocator() = default; | ||
| 169 | |||
| 170 | MemoryCommit MemoryAllocator::Commit(const VkMemoryRequirements& requirements, MemoryUsage usage) { | ||
| 171 | // Find the fastest memory flags we can afford with the current requirements | ||
| 172 | const VkMemoryPropertyFlags flags = MemoryPropertyFlags(requirements.memoryTypeBits, usage); | ||
| 173 | if (std::optional<MemoryCommit> commit = TryCommit(requirements, flags)) { | ||
| 174 | return std::move(*commit); | ||
| 175 | } | ||
| 176 | // Commit has failed, allocate more memory. | ||
| 177 | // TODO(Rodrigo): Handle out of memory situations in some way like flushing to guest memory. | ||
| 178 | AllocMemory(flags, requirements.memoryTypeBits, AllocationChunkSize(requirements.size)); | ||
| 179 | |||
| 180 | // Commit again, this time it won't fail since there's a fresh allocation above. | ||
| 181 | // If it does, there's a bug. | ||
| 182 | return TryCommit(requirements, flags).value(); | ||
| 183 | } | ||
| 184 | |||
| 185 | MemoryCommit MemoryAllocator::Commit(const vk::Buffer& buffer, MemoryUsage usage) { | ||
| 186 | auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), usage); | ||
| 187 | buffer.BindMemory(commit.Memory(), commit.Offset()); | ||
| 188 | return commit; | ||
| 189 | } | ||
| 190 | |||
| 191 | MemoryCommit MemoryAllocator::Commit(const vk::Image& image, MemoryUsage usage) { | ||
| 192 | auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), usage); | ||
| 193 | image.BindMemory(commit.Memory(), commit.Offset()); | ||
| 194 | return commit; | ||
| 195 | } | ||
| 196 | |||
| 197 | void MemoryAllocator::AllocMemory(VkMemoryPropertyFlags flags, u32 type_mask, u64 size) { | ||
| 198 | const u32 type = FindType(flags, type_mask).value(); | ||
| 199 | vk::DeviceMemory memory = device.GetLogical().AllocateMemory({ | ||
| 200 | .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, | ||
| 201 | .pNext = nullptr, | ||
| 202 | .allocationSize = size, | ||
| 203 | .memoryTypeIndex = type, | ||
| 204 | }); | ||
| 205 | allocations.push_back( | ||
| 206 | std::make_unique<MemoryAllocation>(device, std::move(memory), flags, size, type)); | ||
| 207 | } | ||
| 208 | |||
| 209 | std::optional<MemoryCommit> MemoryAllocator::TryCommit(const VkMemoryRequirements& requirements, | ||
| 210 | VkMemoryPropertyFlags flags) { | ||
| 211 | for (auto& allocation : allocations) { | ||
| 212 | if (!allocation->IsCompatible(flags, requirements.memoryTypeBits)) { | ||
| 213 | continue; | ||
| 214 | } | ||
| 215 | if (auto commit = allocation->Commit(requirements.size, requirements.alignment)) { | ||
| 216 | return commit; | ||
| 217 | } | ||
| 218 | } | ||
| 219 | return std::nullopt; | ||
| 220 | } | ||
| 221 | |||
| 222 | VkMemoryPropertyFlags MemoryAllocator::MemoryPropertyFlags(u32 type_mask, MemoryUsage usage) const { | ||
| 223 | return MemoryPropertyFlags(type_mask, MemoryUsagePropertyFlags(usage)); | ||
| 224 | } | ||
| 225 | |||
| 226 | VkMemoryPropertyFlags MemoryAllocator::MemoryPropertyFlags(u32 type_mask, | ||
| 227 | VkMemoryPropertyFlags flags) const { | ||
| 228 | if (FindType(flags, type_mask)) { | ||
| 229 | // Found a memory type with those requirements | ||
| 230 | return flags; | ||
| 231 | } | ||
| 232 | if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) { | ||
| 233 | // Remove host cached bit in case it's not supported | ||
| 234 | return MemoryPropertyFlags(type_mask, flags & ~VK_MEMORY_PROPERTY_HOST_CACHED_BIT); | ||
| 235 | } | ||
| 236 | if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) { | ||
| 237 | // Remove device local, if it's not supported by the requested resource | ||
| 238 | return MemoryPropertyFlags(type_mask, flags & ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); | ||
| 239 | } | ||
| 240 | UNREACHABLE_MSG("No compatible memory types found"); | ||
| 241 | return 0; | ||
| 242 | } | ||
| 243 | |||
| 244 | std::optional<u32> MemoryAllocator::FindType(VkMemoryPropertyFlags flags, u32 type_mask) const { | ||
| 245 | for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) { | ||
| 246 | const VkMemoryPropertyFlags type_flags = properties.memoryTypes[type_index].propertyFlags; | ||
| 247 | if ((type_mask & (1U << type_index)) && (type_flags & flags)) { | ||
| 248 | // The type matches in type and in the wanted properties. | ||
| 249 | return type_index; | ||
| 250 | } | ||
| 251 | } | ||
| 252 | // Failed to find index | ||
| 253 | return std::nullopt; | ||
| 254 | } | ||
| 255 | |||
| 256 | bool IsHostVisible(MemoryUsage usage) noexcept { | ||
| 257 | switch (usage) { | ||
| 258 | case MemoryUsage::DeviceLocal: | ||
| 259 | return false; | ||
| 260 | case MemoryUsage::Upload: | ||
| 261 | case MemoryUsage::Download: | ||
| 262 | return true; | ||
| 263 | } | ||
| 264 | UNREACHABLE_MSG("Invalid memory usage={}", usage); | ||
| 265 | return false; | ||
| 266 | } | ||
| 267 | |||
| 268 | } // namespace Vulkan | ||
diff --git a/src/video_core/vulkan_common/vulkan_memory_allocator.h b/src/video_core/vulkan_common/vulkan_memory_allocator.h new file mode 100644 index 000000000..9e6cfabf9 --- /dev/null +++ b/src/video_core/vulkan_common/vulkan_memory_allocator.h | |||
| @@ -0,0 +1,117 @@ | |||
| 1 | // Copyright 2019 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <memory> | ||
| 8 | #include <span> | ||
| 9 | #include <utility> | ||
| 10 | #include <vector> | ||
| 11 | #include "common/common_types.h" | ||
| 12 | #include "video_core/vulkan_common/vulkan_wrapper.h" | ||
| 13 | |||
| 14 | namespace Vulkan { | ||
| 15 | |||
| 16 | class Device; | ||
| 17 | class MemoryMap; | ||
| 18 | class MemoryAllocation; | ||
| 19 | |||
| 20 | /// Hints and requirements for the backing memory type of a commit | ||
| 21 | enum class MemoryUsage { | ||
| 22 | DeviceLocal, ///< Hints device local usages, fastest memory type to read and write from the GPU | ||
| 23 | Upload, ///< Requires a host visible memory type optimized for CPU to GPU uploads | ||
| 24 | Download, ///< Requires a host visible memory type optimized for GPU to CPU readbacks | ||
| 25 | }; | ||
| 26 | |||
| 27 | /// Ownership handle of a memory commitment. | ||
| 28 | /// Points to a subregion of a memory allocation. | ||
| 29 | class MemoryCommit { | ||
| 30 | public: | ||
| 31 | explicit MemoryCommit() noexcept = default; | ||
| 32 | explicit MemoryCommit(MemoryAllocation* allocation_, VkDeviceMemory memory_, u64 begin_, | ||
| 33 | u64 end_) noexcept; | ||
| 34 | ~MemoryCommit(); | ||
| 35 | |||
| 36 | MemoryCommit& operator=(MemoryCommit&&) noexcept; | ||
| 37 | MemoryCommit(MemoryCommit&&) noexcept; | ||
| 38 | |||
| 39 | MemoryCommit& operator=(const MemoryCommit&) = delete; | ||
| 40 | MemoryCommit(const MemoryCommit&) = delete; | ||
| 41 | |||
| 42 | /// Returns a host visible memory map. | ||
| 43 | /// It will map the backing allocation if it hasn't been mapped before. | ||
| 44 | std::span<u8> Map(); | ||
| 45 | |||
| 46 | /// Returns the Vulkan memory handler. | ||
| 47 | VkDeviceMemory Memory() const { | ||
| 48 | return memory; | ||
| 49 | } | ||
| 50 | |||
| 51 | /// Returns the start position of the commit relative to the allocation. | ||
| 52 | VkDeviceSize Offset() const { | ||
| 53 | return static_cast<VkDeviceSize>(begin); | ||
| 54 | } | ||
| 55 | |||
| 56 | private: | ||
| 57 | void Release(); | ||
| 58 | |||
| 59 | MemoryAllocation* allocation{}; ///< Pointer to the large memory allocation. | ||
| 60 | VkDeviceMemory memory{}; ///< Vulkan device memory handler. | ||
| 61 | u64 begin{}; ///< Beginning offset in bytes to where the commit exists. | ||
| 62 | u64 end{}; ///< Offset in bytes where the commit ends. | ||
| 63 | std::span<u8> span; ///< Host visible memory span. Empty if not queried before. | ||
| 64 | }; | ||
| 65 | |||
| 66 | /// Memory allocator container. | ||
| 67 | /// Allocates and releases memory allocations on demand. | ||
| 68 | class MemoryAllocator { | ||
| 69 | public: | ||
| 70 | explicit MemoryAllocator(const Device& device_); | ||
| 71 | ~MemoryAllocator(); | ||
| 72 | |||
| 73 | MemoryAllocator& operator=(const MemoryAllocator&) = delete; | ||
| 74 | MemoryAllocator(const MemoryAllocator&) = delete; | ||
| 75 | |||
| 76 | /** | ||
| 77 | * Commits a memory with the specified requirements. | ||
| 78 | * | ||
| 79 | * @param requirements Requirements returned from a Vulkan call. | ||
| 80 | * @param usage Indicates how the memory will be used. | ||
| 81 | * | ||
| 82 | * @returns A memory commit. | ||
| 83 | */ | ||
| 84 | MemoryCommit Commit(const VkMemoryRequirements& requirements, MemoryUsage usage); | ||
| 85 | |||
| 86 | /// Commits memory required by the buffer and binds it. | ||
| 87 | MemoryCommit Commit(const vk::Buffer& buffer, MemoryUsage usage); | ||
| 88 | |||
| 89 | /// Commits memory required by the image and binds it. | ||
| 90 | MemoryCommit Commit(const vk::Image& image, MemoryUsage usage); | ||
| 91 | |||
| 92 | private: | ||
| 93 | /// Allocates a chunk of memory. | ||
| 94 | void AllocMemory(VkMemoryPropertyFlags flags, u32 type_mask, u64 size); | ||
| 95 | |||
| 96 | /// Tries to allocate a memory commit. | ||
| 97 | std::optional<MemoryCommit> TryCommit(const VkMemoryRequirements& requirements, | ||
| 98 | VkMemoryPropertyFlags flags); | ||
| 99 | |||
| 100 | /// Returns the fastest compatible memory property flags from a wanted usage. | ||
| 101 | VkMemoryPropertyFlags MemoryPropertyFlags(u32 type_mask, MemoryUsage usage) const; | ||
| 102 | |||
| 103 | /// Returns the fastest compatible memory property flags from the wanted flags. | ||
| 104 | VkMemoryPropertyFlags MemoryPropertyFlags(u32 type_mask, VkMemoryPropertyFlags flags) const; | ||
| 105 | |||
| 106 | /// Returns index to the fastest memory type compatible with the passed requirements. | ||
| 107 | std::optional<u32> FindType(VkMemoryPropertyFlags flags, u32 type_mask) const; | ||
| 108 | |||
| 109 | const Device& device; ///< Device handle. | ||
| 110 | const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties. | ||
| 111 | std::vector<std::unique_ptr<MemoryAllocation>> allocations; ///< Current allocations. | ||
| 112 | }; | ||
| 113 | |||
| 114 | /// Returns true when a memory usage is guaranteed to be host visible. | ||
| 115 | bool IsHostVisible(MemoryUsage usage) noexcept; | ||
| 116 | |||
| 117 | } // namespace Vulkan | ||