summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar liamwhite2023-03-30 10:14:11 -0400
committerGravatar GitHub2023-03-30 10:14:11 -0400
commitfbf0a9c9762bf2a7ea4a4da3ea8419e32e15beba (patch)
tree35641c900fa8bec31e87963fd7f90708f14c625a
parentMerge pull request #9505 from liamwhite/request-exit (diff)
parentFixes 'Continous' typo (diff)
downloadyuzu-fbf0a9c9762bf2a7ea4a4da3ea8419e32e15beba.tar.gz
yuzu-fbf0a9c9762bf2a7ea4a4da3ea8419e32e15beba.tar.xz
yuzu-fbf0a9c9762bf2a7ea4a4da3ea8419e32e15beba.zip
Merge pull request #10010 from maxdunbar/typo
Fix: 'Continous' typo
Diffstat (limited to '')
-rw-r--r--src/common/range_map.h6
-rw-r--r--src/tests/common/range_map.cpp20
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h2
-rw-r--r--src/video_core/memory_manager.cpp34
-rw-r--r--src/video_core/memory_manager.h12
-rw-r--r--src/video_core/texture_cache/texture_cache.h2
6 files changed, 38 insertions, 38 deletions
diff --git a/src/common/range_map.h b/src/common/range_map.h
index 79c7ef547..ab73993e3 100644
--- a/src/common/range_map.h
+++ b/src/common/range_map.h
@@ -38,12 +38,12 @@ public:
38 Map(address, address_end, null_value); 38 Map(address, address_end, null_value);
39 } 39 }
40 40
41 [[nodiscard]] size_t GetContinousSizeFrom(KeyTBase address) const { 41 [[nodiscard]] size_t GetContinuousSizeFrom(KeyTBase address) const {
42 const KeyT new_address = static_cast<KeyT>(address); 42 const KeyT new_address = static_cast<KeyT>(address);
43 if (new_address < 0) { 43 if (new_address < 0) {
44 return 0; 44 return 0;
45 } 45 }
46 return ContinousSizeInternal(new_address); 46 return ContinuousSizeInternal(new_address);
47 } 47 }
48 48
49 [[nodiscard]] ValueT GetValueAt(KeyT address) const { 49 [[nodiscard]] ValueT GetValueAt(KeyT address) const {
@@ -59,7 +59,7 @@ private:
59 using IteratorType = typename MapType::iterator; 59 using IteratorType = typename MapType::iterator;
60 using ConstIteratorType = typename MapType::const_iterator; 60 using ConstIteratorType = typename MapType::const_iterator;
61 61
62 size_t ContinousSizeInternal(KeyT address) const { 62 size_t ContinuousSizeInternal(KeyT address) const {
63 const auto it = GetFirstElementBeforeOrOn(address); 63 const auto it = GetFirstElementBeforeOrOn(address);
64 if (it == container.end() || it->second == null_value) { 64 if (it == container.end() || it->second == null_value) {
65 return 0; 65 return 0;
diff --git a/src/tests/common/range_map.cpp b/src/tests/common/range_map.cpp
index d301ac5f6..faaefd49f 100644
--- a/src/tests/common/range_map.cpp
+++ b/src/tests/common/range_map.cpp
@@ -21,9 +21,9 @@ TEST_CASE("Range Map: Setup", "[video_core]") {
21 my_map.Map(4000, 4500, MappedEnum::Valid_2); 21 my_map.Map(4000, 4500, MappedEnum::Valid_2);
22 my_map.Map(4200, 4400, MappedEnum::Valid_2); 22 my_map.Map(4200, 4400, MappedEnum::Valid_2);
23 my_map.Map(4200, 4400, MappedEnum::Valid_1); 23 my_map.Map(4200, 4400, MappedEnum::Valid_1);
24 REQUIRE(my_map.GetContinousSizeFrom(4200) == 200); 24 REQUIRE(my_map.GetContinuousSizeFrom(4200) == 200);
25 REQUIRE(my_map.GetContinousSizeFrom(3000) == 200); 25 REQUIRE(my_map.GetContinuousSizeFrom(3000) == 200);
26 REQUIRE(my_map.GetContinousSizeFrom(2900) == 0); 26 REQUIRE(my_map.GetContinuousSizeFrom(2900) == 0);
27 27
28 REQUIRE(my_map.GetValueAt(2900) == MappedEnum::Invalid); 28 REQUIRE(my_map.GetValueAt(2900) == MappedEnum::Invalid);
29 REQUIRE(my_map.GetValueAt(3100) == MappedEnum::Valid_1); 29 REQUIRE(my_map.GetValueAt(3100) == MappedEnum::Valid_1);
@@ -38,20 +38,20 @@ TEST_CASE("Range Map: Setup", "[video_core]") {
38 38
39 my_map.Unmap(0, 6000); 39 my_map.Unmap(0, 6000);
40 for (u64 address = 0; address < 10000; address += 1000) { 40 for (u64 address = 0; address < 10000; address += 1000) {
41 REQUIRE(my_map.GetContinousSizeFrom(address) == 0); 41 REQUIRE(my_map.GetContinuousSizeFrom(address) == 0);
42 } 42 }
43 43
44 my_map.Map(1000, 3000, MappedEnum::Valid_1); 44 my_map.Map(1000, 3000, MappedEnum::Valid_1);
45 my_map.Map(4000, 5000, MappedEnum::Valid_1); 45 my_map.Map(4000, 5000, MappedEnum::Valid_1);
46 my_map.Map(2500, 4100, MappedEnum::Valid_1); 46 my_map.Map(2500, 4100, MappedEnum::Valid_1);
47 REQUIRE(my_map.GetContinousSizeFrom(1000) == 4000); 47 REQUIRE(my_map.GetContinuousSizeFrom(1000) == 4000);
48 48
49 my_map.Map(1000, 3000, MappedEnum::Valid_1); 49 my_map.Map(1000, 3000, MappedEnum::Valid_1);
50 my_map.Map(4000, 5000, MappedEnum::Valid_2); 50 my_map.Map(4000, 5000, MappedEnum::Valid_2);
51 my_map.Map(2500, 4100, MappedEnum::Valid_3); 51 my_map.Map(2500, 4100, MappedEnum::Valid_3);
52 REQUIRE(my_map.GetContinousSizeFrom(1000) == 1500); 52 REQUIRE(my_map.GetContinuousSizeFrom(1000) == 1500);
53 REQUIRE(my_map.GetContinousSizeFrom(2500) == 1600); 53 REQUIRE(my_map.GetContinuousSizeFrom(2500) == 1600);
54 REQUIRE(my_map.GetContinousSizeFrom(4100) == 900); 54 REQUIRE(my_map.GetContinuousSizeFrom(4100) == 900);
55 REQUIRE(my_map.GetValueAt(900) == MappedEnum::Invalid); 55 REQUIRE(my_map.GetValueAt(900) == MappedEnum::Invalid);
56 REQUIRE(my_map.GetValueAt(1000) == MappedEnum::Valid_1); 56 REQUIRE(my_map.GetValueAt(1000) == MappedEnum::Valid_1);
57 REQUIRE(my_map.GetValueAt(2500) == MappedEnum::Valid_3); 57 REQUIRE(my_map.GetValueAt(2500) == MappedEnum::Valid_3);
@@ -59,8 +59,8 @@ TEST_CASE("Range Map: Setup", "[video_core]") {
59 REQUIRE(my_map.GetValueAt(5000) == MappedEnum::Invalid); 59 REQUIRE(my_map.GetValueAt(5000) == MappedEnum::Invalid);
60 60
61 my_map.Map(2000, 6000, MappedEnum::Valid_3); 61 my_map.Map(2000, 6000, MappedEnum::Valid_3);
62 REQUIRE(my_map.GetContinousSizeFrom(1000) == 1000); 62 REQUIRE(my_map.GetContinuousSizeFrom(1000) == 1000);
63 REQUIRE(my_map.GetContinousSizeFrom(3000) == 3000); 63 REQUIRE(my_map.GetContinuousSizeFrom(3000) == 3000);
64 REQUIRE(my_map.GetValueAt(1000) == MappedEnum::Valid_1); 64 REQUIRE(my_map.GetValueAt(1000) == MappedEnum::Valid_1);
65 REQUIRE(my_map.GetValueAt(1999) == MappedEnum::Valid_1); 65 REQUIRE(my_map.GetValueAt(1999) == MappedEnum::Valid_1);
66 REQUIRE(my_map.GetValueAt(1500) == MappedEnum::Valid_1); 66 REQUIRE(my_map.GetValueAt(1500) == MappedEnum::Valid_1);
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 1f656ffa8..abdc593df 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -1442,7 +1442,7 @@ void BufferCache<P>::UpdateVertexBuffer(u32 index) {
1442 } 1442 }
1443 if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) { 1443 if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
1444 address_size = 1444 address_size =
1445 static_cast<u32>(gpu_memory->MaxContinousRange(gpu_addr_begin, address_size)); 1445 static_cast<u32>(gpu_memory->MaxContinuousRange(gpu_addr_begin, address_size));
1446 } 1446 }
1447 const u32 size = address_size; // TODO: Analyze stride and number of vertices 1447 const u32 size = address_size; // TODO: Analyze stride and number of vertices
1448 vertex_buffers[index] = Binding{ 1448 vertex_buffers[index] = Binding{
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index 015a7d3c1..01fb5b546 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -43,7 +43,7 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64
43 43
44 big_entries.resize(big_page_table_size / 32, 0); 44 big_entries.resize(big_page_table_size / 32, 0);
45 big_page_table_cpu.resize(big_page_table_size); 45 big_page_table_cpu.resize(big_page_table_size);
46 big_page_continous.resize(big_page_table_size / continous_bits, 0); 46 big_page_continuous.resize(big_page_table_size / continuous_bits, 0);
47 entries.resize(page_table_size / 32, 0); 47 entries.resize(page_table_size / 32, 0);
48} 48}
49 49
@@ -85,17 +85,17 @@ PTEKind MemoryManager::GetPageKind(GPUVAddr gpu_addr) const {
85 return kind_map.GetValueAt(gpu_addr); 85 return kind_map.GetValueAt(gpu_addr);
86} 86}
87 87
88inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const { 88inline bool MemoryManager::IsBigPageContinuous(size_t big_page_index) const {
89 const u64 entry_mask = big_page_continous[big_page_index / continous_bits]; 89 const u64 entry_mask = big_page_continuous[big_page_index / continuous_bits];
90 const size_t sub_index = big_page_index % continous_bits; 90 const size_t sub_index = big_page_index % continuous_bits;
91 return ((entry_mask >> sub_index) & 0x1ULL) != 0; 91 return ((entry_mask >> sub_index) & 0x1ULL) != 0;
92} 92}
93 93
94inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value) { 94inline void MemoryManager::SetBigPageContinuous(size_t big_page_index, bool value) {
95 const u64 continous_mask = big_page_continous[big_page_index / continous_bits]; 95 const u64 continuous_mask = big_page_continuous[big_page_index / continuous_bits];
96 const size_t sub_index = big_page_index % continous_bits; 96 const size_t sub_index = big_page_index % continuous_bits;
97 big_page_continous[big_page_index / continous_bits] = 97 big_page_continuous[big_page_index / continuous_bits] =
98 (~(1ULL << sub_index) & continous_mask) | (value ? 1ULL << sub_index : 0); 98 (~(1ULL << sub_index) & continuous_mask) | (value ? 1ULL << sub_index : 0);
99} 99}
100 100
101template <MemoryManager::EntryType entry_type> 101template <MemoryManager::EntryType entry_type>
@@ -140,7 +140,7 @@ GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr
140 const auto index = PageEntryIndex<true>(current_gpu_addr); 140 const auto index = PageEntryIndex<true>(current_gpu_addr);
141 const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits); 141 const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
142 big_page_table_cpu[index] = sub_value; 142 big_page_table_cpu[index] = sub_value;
143 const bool is_continous = ([&] { 143 const bool is_continuous = ([&] {
144 uintptr_t base_ptr{ 144 uintptr_t base_ptr{
145 reinterpret_cast<uintptr_t>(memory.GetPointerSilent(current_cpu_addr))}; 145 reinterpret_cast<uintptr_t>(memory.GetPointerSilent(current_cpu_addr))};
146 if (base_ptr == 0) { 146 if (base_ptr == 0) {
@@ -156,7 +156,7 @@ GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr
156 } 156 }
157 return true; 157 return true;
158 })(); 158 })();
159 SetBigPageContinous(index, is_continous); 159 SetBigPageContinuous(index, is_continuous);
160 } 160 }
161 remaining_size -= big_page_size; 161 remaining_size -= big_page_size;
162 } 162 }
@@ -378,7 +378,7 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std:
378 if constexpr (is_safe) { 378 if constexpr (is_safe) {
379 rasterizer->FlushRegion(cpu_addr_base, copy_amount, which); 379 rasterizer->FlushRegion(cpu_addr_base, copy_amount, which);
380 } 380 }
381 if (!IsBigPageContinous(page_index)) [[unlikely]] { 381 if (!IsBigPageContinuous(page_index)) [[unlikely]] {
382 memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); 382 memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount);
383 } else { 383 } else {
384 u8* physical = memory.GetPointer(cpu_addr_base); 384 u8* physical = memory.GetPointer(cpu_addr_base);
@@ -427,7 +427,7 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe
427 if constexpr (is_safe) { 427 if constexpr (is_safe) {
428 rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which); 428 rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which);
429 } 429 }
430 if (!IsBigPageContinous(page_index)) [[unlikely]] { 430 if (!IsBigPageContinuous(page_index)) [[unlikely]] {
431 memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); 431 memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount);
432 } else { 432 } else {
433 u8* physical = memory.GetPointer(cpu_addr_base); 433 u8* physical = memory.GetPointer(cpu_addr_base);
@@ -512,7 +512,7 @@ bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size,
512 return result; 512 return result;
513} 513}
514 514
515size_t MemoryManager::MaxContinousRange(GPUVAddr gpu_addr, size_t size) const { 515size_t MemoryManager::MaxContinuousRange(GPUVAddr gpu_addr, size_t size) const {
516 std::optional<VAddr> old_page_addr{}; 516 std::optional<VAddr> old_page_addr{};
517 size_t range_so_far = 0; 517 size_t range_so_far = 0;
518 bool result{false}; 518 bool result{false};
@@ -553,7 +553,7 @@ size_t MemoryManager::MaxContinousRange(GPUVAddr gpu_addr, size_t size) const {
553} 553}
554 554
555size_t MemoryManager::GetMemoryLayoutSize(GPUVAddr gpu_addr, size_t max_size) const { 555size_t MemoryManager::GetMemoryLayoutSize(GPUVAddr gpu_addr, size_t max_size) const {
556 return kind_map.GetContinousSizeFrom(gpu_addr); 556 return kind_map.GetContinuousSizeFrom(gpu_addr);
557} 557}
558 558
559void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size, 559void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size,
@@ -594,7 +594,7 @@ void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std
594bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { 594bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
595 if (GetEntry<true>(gpu_addr) == EntryType::Mapped) [[likely]] { 595 if (GetEntry<true>(gpu_addr) == EntryType::Mapped) [[likely]] {
596 size_t page_index = gpu_addr >> big_page_bits; 596 size_t page_index = gpu_addr >> big_page_bits;
597 if (IsBigPageContinous(page_index)) [[likely]] { 597 if (IsBigPageContinuous(page_index)) [[likely]] {
598 const std::size_t page{(page_index & big_page_mask) + size}; 598 const std::size_t page{(page_index & big_page_mask) + size};
599 return page <= big_page_size; 599 return page <= big_page_size;
600 } 600 }
@@ -608,7 +608,7 @@ bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
608 return page <= Core::Memory::YUZU_PAGESIZE; 608 return page <= Core::Memory::YUZU_PAGESIZE;
609} 609}
610 610
611bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { 611bool MemoryManager::IsContinuousRange(GPUVAddr gpu_addr, std::size_t size) const {
612 std::optional<VAddr> old_page_addr{}; 612 std::optional<VAddr> old_page_addr{};
613 bool result{true}; 613 bool result{true};
614 auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, 614 auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 51ae2de68..fbbe856c4 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -94,7 +94,7 @@ public:
94 /** 94 /**
95 * Checks if a gpu region is mapped by a single range of cpu addresses. 95 * Checks if a gpu region is mapped by a single range of cpu addresses.
96 */ 96 */
97 [[nodiscard]] bool IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const; 97 [[nodiscard]] bool IsContinuousRange(GPUVAddr gpu_addr, std::size_t size) const;
98 98
99 /** 99 /**
100 * Checks if a gpu region is mapped entirely. 100 * Checks if a gpu region is mapped entirely.
@@ -123,7 +123,7 @@ public:
123 bool IsMemoryDirty(GPUVAddr gpu_addr, size_t size, 123 bool IsMemoryDirty(GPUVAddr gpu_addr, size_t size,
124 VideoCommon::CacheType which = VideoCommon::CacheType::All) const; 124 VideoCommon::CacheType which = VideoCommon::CacheType::All) const;
125 125
126 size_t MaxContinousRange(GPUVAddr gpu_addr, size_t size) const; 126 size_t MaxContinuousRange(GPUVAddr gpu_addr, size_t size) const;
127 127
128 bool IsWithinGPUAddressRange(GPUVAddr gpu_addr) const { 128 bool IsWithinGPUAddressRange(GPUVAddr gpu_addr) const {
129 return gpu_addr < address_space_size; 129 return gpu_addr < address_space_size;
@@ -158,8 +158,8 @@ private:
158 } 158 }
159 } 159 }
160 160
161 inline bool IsBigPageContinous(size_t big_page_index) const; 161 inline bool IsBigPageContinuous(size_t big_page_index) const;
162 inline void SetBigPageContinous(size_t big_page_index, bool value); 162 inline void SetBigPageContinuous(size_t big_page_index, bool value);
163 163
164 template <bool is_gpu_address> 164 template <bool is_gpu_address>
165 void GetSubmappedRangeImpl( 165 void GetSubmappedRangeImpl(
@@ -213,10 +213,10 @@ private:
213 Common::RangeMap<GPUVAddr, PTEKind> kind_map; 213 Common::RangeMap<GPUVAddr, PTEKind> kind_map;
214 Common::VirtualBuffer<u32> big_page_table_cpu; 214 Common::VirtualBuffer<u32> big_page_table_cpu;
215 215
216 std::vector<u64> big_page_continous; 216 std::vector<u64> big_page_continuous;
217 std::vector<std::pair<VAddr, std::size_t>> page_stash{}; 217 std::vector<std::pair<VAddr, std::size_t>> page_stash{};
218 218
219 static constexpr size_t continous_bits = 64; 219 static constexpr size_t continuous_bits = 64;
220 220
221 const size_t unique_identifier; 221 const size_t unique_identifier;
222 std::unique_ptr<VideoCommon::InvalidationAccumulator> accumulator; 222 std::unique_ptr<VideoCommon::InvalidationAccumulator> accumulator;
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 858449af8..63821d496 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -1269,7 +1269,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
1269 const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr); 1269 const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr);
1270 Image& new_image = slot_images[new_image_id]; 1270 Image& new_image = slot_images[new_image_id];
1271 1271
1272 if (!gpu_memory->IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) { 1272 if (!gpu_memory->IsContinuousRange(new_image.gpu_addr, new_image.guest_size_bytes)) {
1273 new_image.flags |= ImageFlagBits::Sparse; 1273 new_image.flags |= ImageFlagBits::Sparse;
1274 } 1274 }
1275 1275