summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/CMakeLists.txt5
-rw-r--r--src/audio_core/CMakeLists.txt19
-rw-r--r--src/core/CMakeLists.txt1
-rw-r--r--src/core/hid/emulated_console.cpp5
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp29
-rw-r--r--src/core/hle/kernel/k_code_memory.h6
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp8
-rw-r--r--src/core/hle/kernel/k_page_group.cpp121
-rw-r--r--src/core/hle/kernel/k_page_group.h163
-rw-r--r--src/core/hle/kernel/k_page_table.cpp142
-rw-r--r--src/core/hle/kernel/k_page_table.h9
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp19
-rw-r--r--src/core/hle/kernel/memory_types.h3
-rw-r--r--src/core/hle/kernel/svc.cpp2
-rw-r--r--src/input_common/CMakeLists.txt15
-rw-r--r--src/input_common/main.cpp24
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h7
-rw-r--r--src/video_core/host_shaders/vulkan_quad_indexed.comp6
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp23
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp299
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h13
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp12
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp8
-rw-r--r--src/yuzu/configuration/config.cpp2
25 files changed, 621 insertions, 322 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 140415474..c7283e82c 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -161,7 +161,10 @@ add_subdirectory(video_core)
161add_subdirectory(network) 161add_subdirectory(network)
162add_subdirectory(input_common) 162add_subdirectory(input_common)
163add_subdirectory(shader_recompiler) 163add_subdirectory(shader_recompiler)
164add_subdirectory(dedicated_room) 164
165if (YUZU_ROOM)
166 add_subdirectory(dedicated_room)
167endif()
165 168
166if (YUZU_TESTS) 169if (YUZU_TESTS)
167 add_subdirectory(tests) 170 add_subdirectory(tests)
diff --git a/src/audio_core/CMakeLists.txt b/src/audio_core/CMakeLists.txt
index 420ba62e0..e7b595459 100644
--- a/src/audio_core/CMakeLists.txt
+++ b/src/audio_core/CMakeLists.txt
@@ -187,11 +187,7 @@ add_library(audio_core STATIC
187 renderer/voice/voice_info.cpp 187 renderer/voice/voice_info.cpp
188 renderer/voice/voice_info.h 188 renderer/voice/voice_info.h
189 renderer/voice/voice_state.h 189 renderer/voice/voice_state.h
190 sink/cubeb_sink.cpp
191 sink/cubeb_sink.h
192 sink/null_sink.h 190 sink/null_sink.h
193 sink/sdl2_sink.cpp
194 sink/sdl2_sink.h
195 sink/sink.h 191 sink/sink.h
196 sink/sink_details.cpp 192 sink/sink_details.cpp
197 sink/sink_details.h 193 sink/sink_details.h
@@ -222,11 +218,22 @@ if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
222 target_link_libraries(audio_core PRIVATE dynarmic::dynarmic) 218 target_link_libraries(audio_core PRIVATE dynarmic::dynarmic)
223endif() 219endif()
224 220
225if(ENABLE_CUBEB) 221if (ENABLE_CUBEB)
222 target_sources(audio_core PRIVATE
223 sink/cubeb_sink.cpp
224 sink/cubeb_sink.h
225 )
226
226 target_link_libraries(audio_core PRIVATE cubeb::cubeb) 227 target_link_libraries(audio_core PRIVATE cubeb::cubeb)
227 target_compile_definitions(audio_core PRIVATE -DHAVE_CUBEB=1) 228 target_compile_definitions(audio_core PRIVATE -DHAVE_CUBEB=1)
228endif() 229endif()
229if(ENABLE_SDL2) 230
231if (ENABLE_SDL2)
232 target_sources(audio_core PRIVATE
233 sink/sdl2_sink.cpp
234 sink/sdl2_sink.h
235 )
236
230 target_link_libraries(audio_core PRIVATE SDL2::SDL2) 237 target_link_libraries(audio_core PRIVATE SDL2::SDL2)
231 target_compile_definitions(audio_core PRIVATE HAVE_SDL2) 238 target_compile_definitions(audio_core PRIVATE HAVE_SDL2)
232endif() 239endif()
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 0252c8c31..5afdeb5ff 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -226,6 +226,7 @@ add_library(core STATIC
226 hle/kernel/k_page_buffer.h 226 hle/kernel/k_page_buffer.h
227 hle/kernel/k_page_heap.cpp 227 hle/kernel/k_page_heap.cpp
228 hle/kernel/k_page_heap.h 228 hle/kernel/k_page_heap.h
229 hle/kernel/k_page_group.cpp
229 hle/kernel/k_page_group.h 230 hle/kernel/k_page_group.h
230 hle/kernel/k_page_table.cpp 231 hle/kernel/k_page_table.cpp
231 hle/kernel/k_page_table.h 232 hle/kernel/k_page_table.h
diff --git a/src/core/hid/emulated_console.cpp b/src/core/hid/emulated_console.cpp
index 30c2e9d17..1c91bbe40 100644
--- a/src/core/hid/emulated_console.cpp
+++ b/src/core/hid/emulated_console.cpp
@@ -40,6 +40,11 @@ void EmulatedConsole::SetTouchParams() {
40 touch_params[index++] = std::move(touchscreen_param); 40 touch_params[index++] = std::move(touchscreen_param);
41 } 41 }
42 42
43 if (Settings::values.touch_from_button_maps.empty()) {
44 LOG_WARNING(Input, "touch_from_button_maps is unset by frontend config");
45 return;
46 }
47
43 const auto button_index = 48 const auto button_index =
44 static_cast<u64>(Settings::values.touch_from_button_map_index.GetValue()); 49 static_cast<u64>(Settings::values.touch_from_button_map_index.GetValue());
45 const auto& touch_buttons = Settings::values.touch_from_button_maps[button_index].buttons; 50 const auto& touch_buttons = Settings::values.touch_from_button_maps[button_index].buttons;
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index 4b1c134d4..d9da1e600 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -27,13 +27,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
27 auto& page_table = m_owner->PageTable(); 27 auto& page_table = m_owner->PageTable();
28 28
29 // Construct the page group. 29 // Construct the page group.
30 m_page_group = {}; 30 m_page_group.emplace(kernel, page_table.GetBlockInfoManager());
31 31
32 // Lock the memory. 32 // Lock the memory.
33 R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size)) 33 R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size))
34 34
35 // Clear the memory. 35 // Clear the memory.
36 for (const auto& block : m_page_group.Nodes()) { 36 for (const auto& block : *m_page_group) {
37 std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize()); 37 std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
38 } 38 }
39 39
@@ -51,12 +51,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
51void KCodeMemory::Finalize() { 51void KCodeMemory::Finalize() {
52 // Unlock. 52 // Unlock.
53 if (!m_is_mapped && !m_is_owner_mapped) { 53 if (!m_is_mapped && !m_is_owner_mapped) {
54 const size_t size = m_page_group.GetNumPages() * PageSize; 54 const size_t size = m_page_group->GetNumPages() * PageSize;
55 m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group); 55 m_owner->PageTable().UnlockForCodeMemory(m_address, size, *m_page_group);
56 } 56 }
57 57
58 // Close the page group. 58 // Close the page group.
59 m_page_group = {}; 59 m_page_group->Close();
60 m_page_group->Finalize();
60 61
61 // Close our reference to our owner. 62 // Close our reference to our owner.
62 m_owner->Close(); 63 m_owner->Close();
@@ -64,7 +65,7 @@ void KCodeMemory::Finalize() {
64 65
65Result KCodeMemory::Map(VAddr address, size_t size) { 66Result KCodeMemory::Map(VAddr address, size_t size) {
66 // Validate the size. 67 // Validate the size.
67 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 68 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
68 69
69 // Lock ourselves. 70 // Lock ourselves.
70 KScopedLightLock lk(m_lock); 71 KScopedLightLock lk(m_lock);
@@ -74,7 +75,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
74 75
75 // Map the memory. 76 // Map the memory.
76 R_TRY(kernel.CurrentProcess()->PageTable().MapPages( 77 R_TRY(kernel.CurrentProcess()->PageTable().MapPages(
77 address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); 78 address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
78 79
79 // Mark ourselves as mapped. 80 // Mark ourselves as mapped.
80 m_is_mapped = true; 81 m_is_mapped = true;
@@ -84,13 +85,13 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
84 85
85Result KCodeMemory::Unmap(VAddr address, size_t size) { 86Result KCodeMemory::Unmap(VAddr address, size_t size) {
86 // Validate the size. 87 // Validate the size.
87 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 88 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
88 89
89 // Lock ourselves. 90 // Lock ourselves.
90 KScopedLightLock lk(m_lock); 91 KScopedLightLock lk(m_lock);
91 92
92 // Unmap the memory. 93 // Unmap the memory.
93 R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group, 94 R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group,
94 KMemoryState::CodeOut)); 95 KMemoryState::CodeOut));
95 96
96 // Mark ourselves as unmapped. 97 // Mark ourselves as unmapped.
@@ -101,7 +102,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
101 102
102Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { 103Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
103 // Validate the size. 104 // Validate the size.
104 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 105 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
105 106
106 // Lock ourselves. 107 // Lock ourselves.
107 KScopedLightLock lk(m_lock); 108 KScopedLightLock lk(m_lock);
@@ -125,7 +126,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
125 126
126 // Map the memory. 127 // Map the memory.
127 R_TRY( 128 R_TRY(
128 m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm)); 129 m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm));
129 130
130 // Mark ourselves as mapped. 131 // Mark ourselves as mapped.
131 m_is_owner_mapped = true; 132 m_is_owner_mapped = true;
@@ -135,13 +136,13 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
135 136
136Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { 137Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
137 // Validate the size. 138 // Validate the size.
138 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); 139 R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
139 140
140 // Lock ourselves. 141 // Lock ourselves.
141 KScopedLightLock lk(m_lock); 142 KScopedLightLock lk(m_lock);
142 143
143 // Unmap the memory. 144 // Unmap the memory.
144 R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode)); 145 R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode));
145 146
146 // Mark ourselves as unmapped. 147 // Mark ourselves as unmapped.
147 m_is_owner_mapped = false; 148 m_is_owner_mapped = false;
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h
index 2e7e1436a..5b260b385 100644
--- a/src/core/hle/kernel/k_code_memory.h
+++ b/src/core/hle/kernel/k_code_memory.h
@@ -3,6 +3,8 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <optional>
7
6#include "common/common_types.h" 8#include "common/common_types.h"
7#include "core/device_memory.h" 9#include "core/device_memory.h"
8#include "core/hle/kernel/k_auto_object.h" 10#include "core/hle/kernel/k_auto_object.h"
@@ -49,11 +51,11 @@ public:
49 return m_address; 51 return m_address;
50 } 52 }
51 size_t GetSize() const { 53 size_t GetSize() const {
52 return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0; 54 return m_is_initialized ? m_page_group->GetNumPages() * PageSize : 0;
53 } 55 }
54 56
55private: 57private:
56 KPageGroup m_page_group{}; 58 std::optional<KPageGroup> m_page_group{};
57 KProcess* m_owner{}; 59 KProcess* m_owner{};
58 VAddr m_address{}; 60 VAddr m_address{};
59 KLightLock m_lock; 61 KLightLock m_lock;
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index bd33571da..cd6ea388e 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -223,7 +223,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
223 223
224 // Ensure that we don't leave anything un-freed. 224 // Ensure that we don't leave anything un-freed.
225 ON_RESULT_FAILURE { 225 ON_RESULT_FAILURE {
226 for (const auto& it : out->Nodes()) { 226 for (const auto& it : *out) {
227 auto& manager = this->GetManager(it.GetAddress()); 227 auto& manager = this->GetManager(it.GetAddress());
228 const size_t node_num_pages = std::min<u64>( 228 const size_t node_num_pages = std::min<u64>(
229 it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); 229 it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
@@ -285,7 +285,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
285 m_has_optimized_process[static_cast<size_t>(pool)], true)); 285 m_has_optimized_process[static_cast<size_t>(pool)], true));
286 286
287 // Open the first reference to the pages. 287 // Open the first reference to the pages.
288 for (const auto& block : out->Nodes()) { 288 for (const auto& block : *out) {
289 PAddr cur_address = block.GetAddress(); 289 PAddr cur_address = block.GetAddress();
290 size_t remaining_pages = block.GetNumPages(); 290 size_t remaining_pages = block.GetNumPages();
291 while (remaining_pages > 0) { 291 while (remaining_pages > 0) {
@@ -335,7 +335,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
335 // Perform optimized memory tracking, if we should. 335 // Perform optimized memory tracking, if we should.
336 if (optimized) { 336 if (optimized) {
337 // Iterate over the allocated blocks. 337 // Iterate over the allocated blocks.
338 for (const auto& block : out->Nodes()) { 338 for (const auto& block : *out) {
339 // Get the block extents. 339 // Get the block extents.
340 const PAddr block_address = block.GetAddress(); 340 const PAddr block_address = block.GetAddress();
341 const size_t block_pages = block.GetNumPages(); 341 const size_t block_pages = block.GetNumPages();
@@ -391,7 +391,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
391 } 391 }
392 } else { 392 } else {
393 // Set all the allocated memory. 393 // Set all the allocated memory.
394 for (const auto& block : out->Nodes()) { 394 for (const auto& block : *out) {
395 std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, 395 std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
396 block.GetSize()); 396 block.GetSize());
397 } 397 }
diff --git a/src/core/hle/kernel/k_page_group.cpp b/src/core/hle/kernel/k_page_group.cpp
new file mode 100644
index 000000000..d8c644a33
--- /dev/null
+++ b/src/core/hle/kernel/k_page_group.cpp
@@ -0,0 +1,121 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/hle/kernel/k_dynamic_resource_manager.h"
5#include "core/hle/kernel/k_memory_manager.h"
6#include "core/hle/kernel/k_page_group.h"
7#include "core/hle/kernel/kernel.h"
8#include "core/hle/kernel/svc_results.h"
9
10namespace Kernel {
11
12void KPageGroup::Finalize() {
13 KBlockInfo* cur = m_first_block;
14 while (cur != nullptr) {
15 KBlockInfo* next = cur->GetNext();
16 m_manager->Free(cur);
17 cur = next;
18 }
19
20 m_first_block = nullptr;
21 m_last_block = nullptr;
22}
23
24void KPageGroup::CloseAndReset() {
25 auto& mm = m_kernel.MemoryManager();
26
27 KBlockInfo* cur = m_first_block;
28 while (cur != nullptr) {
29 KBlockInfo* next = cur->GetNext();
30 mm.Close(cur->GetAddress(), cur->GetNumPages());
31 m_manager->Free(cur);
32 cur = next;
33 }
34
35 m_first_block = nullptr;
36 m_last_block = nullptr;
37}
38
39size_t KPageGroup::GetNumPages() const {
40 size_t num_pages = 0;
41
42 for (const auto& it : *this) {
43 num_pages += it.GetNumPages();
44 }
45
46 return num_pages;
47}
48
49Result KPageGroup::AddBlock(KPhysicalAddress addr, size_t num_pages) {
50 // Succeed immediately if we're adding no pages.
51 R_SUCCEED_IF(num_pages == 0);
52
53 // Check for overflow.
54 ASSERT(addr < addr + num_pages * PageSize);
55
56 // Try to just append to the last block.
57 if (m_last_block != nullptr) {
58 R_SUCCEED_IF(m_last_block->TryConcatenate(addr, num_pages));
59 }
60
61 // Allocate a new block.
62 KBlockInfo* new_block = m_manager->Allocate();
63 R_UNLESS(new_block != nullptr, ResultOutOfResource);
64
65 // Initialize the block.
66 new_block->Initialize(addr, num_pages);
67
68 // Add the block to our list.
69 if (m_last_block != nullptr) {
70 m_last_block->SetNext(new_block);
71 } else {
72 m_first_block = new_block;
73 }
74 m_last_block = new_block;
75
76 R_SUCCEED();
77}
78
79void KPageGroup::Open() const {
80 auto& mm = m_kernel.MemoryManager();
81
82 for (const auto& it : *this) {
83 mm.Open(it.GetAddress(), it.GetNumPages());
84 }
85}
86
87void KPageGroup::OpenFirst() const {
88 auto& mm = m_kernel.MemoryManager();
89
90 for (const auto& it : *this) {
91 mm.OpenFirst(it.GetAddress(), it.GetNumPages());
92 }
93}
94
95void KPageGroup::Close() const {
96 auto& mm = m_kernel.MemoryManager();
97
98 for (const auto& it : *this) {
99 mm.Close(it.GetAddress(), it.GetNumPages());
100 }
101}
102
103bool KPageGroup::IsEquivalentTo(const KPageGroup& rhs) const {
104 auto lit = this->begin();
105 auto rit = rhs.begin();
106 auto lend = this->end();
107 auto rend = rhs.end();
108
109 while (lit != lend && rit != rend) {
110 if (*lit != *rit) {
111 return false;
112 }
113
114 ++lit;
115 ++rit;
116 }
117
118 return lit == lend && rit == rend;
119}
120
121} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h
index 316f172f2..c07f17663 100644
--- a/src/core/hle/kernel/k_page_group.h
+++ b/src/core/hle/kernel/k_page_group.h
@@ -1,4 +1,4 @@
1// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#pragma once 4#pragma once
@@ -13,24 +13,23 @@
13 13
14namespace Kernel { 14namespace Kernel {
15 15
16class KBlockInfoManager;
17class KernelCore;
16class KPageGroup; 18class KPageGroup;
17 19
18class KBlockInfo { 20class KBlockInfo {
19private:
20 friend class KPageGroup;
21
22public: 21public:
23 constexpr KBlockInfo() = default; 22 constexpr explicit KBlockInfo() : m_next(nullptr) {}
24 23
25 constexpr void Initialize(PAddr addr, size_t np) { 24 constexpr void Initialize(KPhysicalAddress addr, size_t np) {
26 ASSERT(Common::IsAligned(addr, PageSize)); 25 ASSERT(Common::IsAligned(addr, PageSize));
27 ASSERT(static_cast<u32>(np) == np); 26 ASSERT(static_cast<u32>(np) == np);
28 27
29 m_page_index = static_cast<u32>(addr) / PageSize; 28 m_page_index = static_cast<u32>(addr / PageSize);
30 m_num_pages = static_cast<u32>(np); 29 m_num_pages = static_cast<u32>(np);
31 } 30 }
32 31
33 constexpr PAddr GetAddress() const { 32 constexpr KPhysicalAddress GetAddress() const {
34 return m_page_index * PageSize; 33 return m_page_index * PageSize;
35 } 34 }
36 constexpr size_t GetNumPages() const { 35 constexpr size_t GetNumPages() const {
@@ -39,10 +38,10 @@ public:
39 constexpr size_t GetSize() const { 38 constexpr size_t GetSize() const {
40 return this->GetNumPages() * PageSize; 39 return this->GetNumPages() * PageSize;
41 } 40 }
42 constexpr PAddr GetEndAddress() const { 41 constexpr KPhysicalAddress GetEndAddress() const {
43 return (m_page_index + m_num_pages) * PageSize; 42 return (m_page_index + m_num_pages) * PageSize;
44 } 43 }
45 constexpr PAddr GetLastAddress() const { 44 constexpr KPhysicalAddress GetLastAddress() const {
46 return this->GetEndAddress() - 1; 45 return this->GetEndAddress() - 1;
47 } 46 }
48 47
@@ -62,8 +61,8 @@ public:
62 return !(*this == rhs); 61 return !(*this == rhs);
63 } 62 }
64 63
65 constexpr bool IsStrictlyBefore(PAddr addr) const { 64 constexpr bool IsStrictlyBefore(KPhysicalAddress addr) const {
66 const PAddr end = this->GetEndAddress(); 65 const KPhysicalAddress end = this->GetEndAddress();
67 66
68 if (m_page_index != 0 && end == 0) { 67 if (m_page_index != 0 && end == 0) {
69 return false; 68 return false;
@@ -72,11 +71,11 @@ public:
72 return end < addr; 71 return end < addr;
73 } 72 }
74 73
75 constexpr bool operator<(PAddr addr) const { 74 constexpr bool operator<(KPhysicalAddress addr) const {
76 return this->IsStrictlyBefore(addr); 75 return this->IsStrictlyBefore(addr);
77 } 76 }
78 77
79 constexpr bool TryConcatenate(PAddr addr, size_t np) { 78 constexpr bool TryConcatenate(KPhysicalAddress addr, size_t np) {
80 if (addr != 0 && addr == this->GetEndAddress()) { 79 if (addr != 0 && addr == this->GetEndAddress()) {
81 m_num_pages += static_cast<u32>(np); 80 m_num_pages += static_cast<u32>(np);
82 return true; 81 return true;
@@ -90,96 +89,118 @@ private:
90 } 89 }
91 90
92private: 91private:
92 friend class KPageGroup;
93
93 KBlockInfo* m_next{}; 94 KBlockInfo* m_next{};
94 u32 m_page_index{}; 95 u32 m_page_index{};
95 u32 m_num_pages{}; 96 u32 m_num_pages{};
96}; 97};
97static_assert(sizeof(KBlockInfo) <= 0x10); 98static_assert(sizeof(KBlockInfo) <= 0x10);
98 99
99class KPageGroup final { 100class KPageGroup {
100public: 101public:
101 class Node final { 102 class Iterator {
102 public: 103 public:
103 constexpr Node(u64 addr_, std::size_t num_pages_) : addr{addr_}, num_pages{num_pages_} {} 104 using iterator_category = std::forward_iterator_tag;
105 using value_type = const KBlockInfo;
106 using difference_type = std::ptrdiff_t;
107 using pointer = value_type*;
108 using reference = value_type&;
109
110 constexpr explicit Iterator(pointer n) : m_node(n) {}
111
112 constexpr bool operator==(const Iterator& rhs) const {
113 return m_node == rhs.m_node;
114 }
115 constexpr bool operator!=(const Iterator& rhs) const {
116 return !(*this == rhs);
117 }
104 118
105 constexpr u64 GetAddress() const { 119 constexpr pointer operator->() const {
106 return addr; 120 return m_node;
121 }
122 constexpr reference operator*() const {
123 return *m_node;
107 } 124 }
108 125
109 constexpr std::size_t GetNumPages() const { 126 constexpr Iterator& operator++() {
110 return num_pages; 127 m_node = m_node->GetNext();
128 return *this;
111 } 129 }
112 130
113 constexpr std::size_t GetSize() const { 131 constexpr Iterator operator++(int) {
114 return GetNumPages() * PageSize; 132 const Iterator it{*this};
133 ++(*this);
134 return it;
115 } 135 }
116 136
117 private: 137 private:
118 u64 addr{}; 138 pointer m_node{};
119 std::size_t num_pages{};
120 }; 139 };
121 140
122public: 141 explicit KPageGroup(KernelCore& kernel, KBlockInfoManager* m)
123 KPageGroup() = default; 142 : m_kernel{kernel}, m_manager{m} {}
124 KPageGroup(u64 address, u64 num_pages) { 143 ~KPageGroup() {
125 ASSERT(AddBlock(address, num_pages).IsSuccess()); 144 this->Finalize();
126 } 145 }
127 146
128 constexpr std::list<Node>& Nodes() { 147 void CloseAndReset();
129 return nodes; 148 void Finalize();
130 }
131 149
132 constexpr const std::list<Node>& Nodes() const { 150 Iterator begin() const {
133 return nodes; 151 return Iterator{m_first_block};
152 }
153 Iterator end() const {
154 return Iterator{nullptr};
155 }
156 bool empty() const {
157 return m_first_block == nullptr;
134 } 158 }
135 159
136 std::size_t GetNumPages() const { 160 Result AddBlock(KPhysicalAddress addr, size_t num_pages);
137 std::size_t num_pages = 0; 161 void Open() const;
138 for (const Node& node : nodes) { 162 void OpenFirst() const;
139 num_pages += node.GetNumPages(); 163 void Close() const;
140 } 164
141 return num_pages; 165 size_t GetNumPages() const;
142 } 166
143 167 bool IsEquivalentTo(const KPageGroup& rhs) const;
144 bool IsEqual(KPageGroup& other) const { 168
145 auto this_node = nodes.begin(); 169 bool operator==(const KPageGroup& rhs) const {
146 auto other_node = other.nodes.begin(); 170 return this->IsEquivalentTo(rhs);
147 while (this_node != nodes.end() && other_node != other.nodes.end()) { 171 }
148 if (this_node->GetAddress() != other_node->GetAddress() ||
149 this_node->GetNumPages() != other_node->GetNumPages()) {
150 return false;
151 }
152 this_node = std::next(this_node);
153 other_node = std::next(other_node);
154 }
155 172
156 return this_node == nodes.end() && other_node == other.nodes.end(); 173 bool operator!=(const KPageGroup& rhs) const {
174 return !(*this == rhs);
157 } 175 }
158 176
159 Result AddBlock(u64 address, u64 num_pages) { 177private:
160 if (!num_pages) { 178 KernelCore& m_kernel;
161 return ResultSuccess; 179 KBlockInfo* m_first_block{};
180 KBlockInfo* m_last_block{};
181 KBlockInfoManager* m_manager{};
182};
183
184class KScopedPageGroup {
185public:
186 explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) {
187 if (m_pg) {
188 m_pg->Open();
162 } 189 }
163 if (!nodes.empty()) { 190 }
164 const auto node = nodes.back(); 191 explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {}
165 if (node.GetAddress() + node.GetNumPages() * PageSize == address) { 192 ~KScopedPageGroup() {
166 address = node.GetAddress(); 193 if (m_pg) {
167 num_pages += node.GetNumPages(); 194 m_pg->Close();
168 nodes.pop_back();
169 }
170 } 195 }
171 nodes.push_back({address, num_pages});
172 return ResultSuccess;
173 } 196 }
174 197
175 bool Empty() const { 198 void CancelClose() {
176 return nodes.empty(); 199 m_pg = nullptr;
177 } 200 }
178 201
179 void Finalize() {}
180
181private: 202private:
182 std::list<Node> nodes; 203 const KPageGroup* m_pg{};
183}; 204};
184 205
185} // namespace Kernel 206} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 612fc76fa..9c7ac22dc 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -100,7 +100,7 @@ constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType a
100 100
101KPageTable::KPageTable(Core::System& system_) 101KPageTable::KPageTable(Core::System& system_)
102 : m_general_lock{system_.Kernel()}, 102 : m_general_lock{system_.Kernel()},
103 m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} 103 m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {}
104 104
105KPageTable::~KPageTable() = default; 105KPageTable::~KPageTable() = default;
106 106
@@ -373,7 +373,7 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta
373 m_memory_block_slab_manager); 373 m_memory_block_slab_manager);
374 374
375 // Allocate and open. 375 // Allocate and open.
376 KPageGroup pg; 376 KPageGroup pg{m_kernel, m_block_info_manager};
377 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( 377 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
378 &pg, num_pages, 378 &pg, num_pages,
379 KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); 379 KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
@@ -432,7 +432,7 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
432 const size_t num_pages = size / PageSize; 432 const size_t num_pages = size / PageSize;
433 433
434 // Create page groups for the memory being mapped. 434 // Create page groups for the memory being mapped.
435 KPageGroup pg; 435 KPageGroup pg{m_kernel, m_block_info_manager};
436 AddRegionToPages(src_address, num_pages, pg); 436 AddRegionToPages(src_address, num_pages, pg);
437 437
438 // Reprotect the source as kernel-read/not mapped. 438 // Reprotect the source as kernel-read/not mapped.
@@ -593,7 +593,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
593 const size_t size = num_pages * PageSize; 593 const size_t size = num_pages * PageSize;
594 594
595 // We're making a new group, not adding to an existing one. 595 // We're making a new group, not adding to an existing one.
596 R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory); 596 R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
597 597
598 // Begin traversal. 598 // Begin traversal.
599 Common::PageTable::TraversalContext context; 599 Common::PageTable::TraversalContext context;
@@ -640,11 +640,10 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
640 R_SUCCEED(); 640 R_SUCCEED();
641} 641}
642 642
643bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { 643bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) {
644 ASSERT(this->IsLockedByCurrentThread()); 644 ASSERT(this->IsLockedByCurrentThread());
645 645
646 const size_t size = num_pages * PageSize; 646 const size_t size = num_pages * PageSize;
647 const auto& pg = pg_ll.Nodes();
648 const auto& memory_layout = m_system.Kernel().MemoryLayout(); 647 const auto& memory_layout = m_system.Kernel().MemoryLayout();
649 648
650 // Empty groups are necessarily invalid. 649 // Empty groups are necessarily invalid.
@@ -942,9 +941,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
942 941
943 ON_RESULT_FAILURE { 942 ON_RESULT_FAILURE {
944 if (cur_mapped_addr != dst_addr) { 943 if (cur_mapped_addr != dst_addr) {
945 // HACK: Manually close the pages.
946 HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize);
947
948 ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, 944 ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize,
949 KMemoryPermission::None, OperationType::Unmap) 945 KMemoryPermission::None, OperationType::Unmap)
950 .IsSuccess()); 946 .IsSuccess());
@@ -1020,9 +1016,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1020 // Map the page. 1016 // Map the page.
1021 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); 1017 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page));
1022 1018
1023 // HACK: Manually open the pages.
1024 HACK_OpenPages(start_partial_page, 1);
1025
1026 // Update tracking extents. 1019 // Update tracking extents.
1027 cur_mapped_addr += PageSize; 1020 cur_mapped_addr += PageSize;
1028 cur_block_addr += PageSize; 1021 cur_block_addr += PageSize;
@@ -1051,9 +1044,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1051 R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, 1044 R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map,
1052 cur_block_addr)); 1045 cur_block_addr));
1053 1046
1054 // HACK: Manually open the pages.
1055 HACK_OpenPages(cur_block_addr, cur_block_size / PageSize);
1056
1057 // Update tracking extents. 1047 // Update tracking extents.
1058 cur_mapped_addr += cur_block_size; 1048 cur_mapped_addr += cur_block_size;
1059 cur_block_addr = next_entry.phys_addr; 1049 cur_block_addr = next_entry.phys_addr;
@@ -1073,9 +1063,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1073 R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, 1063 R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map,
1074 cur_block_addr)); 1064 cur_block_addr));
1075 1065
1076 // HACK: Manually open the pages.
1077 HACK_OpenPages(cur_block_addr, last_block_size / PageSize);
1078
1079 // Update tracking extents. 1066 // Update tracking extents.
1080 cur_mapped_addr += last_block_size; 1067 cur_mapped_addr += last_block_size;
1081 cur_block_addr += last_block_size; 1068 cur_block_addr += last_block_size;
@@ -1107,9 +1094,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
1107 1094
1108 // Map the page. 1095 // Map the page.
1109 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); 1096 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page));
1110
1111 // HACK: Manually open the pages.
1112 HACK_OpenPages(end_partial_page, 1);
1113 } 1097 }
1114 1098
1115 // Update memory blocks to reflect our changes 1099 // Update memory blocks to reflect our changes
@@ -1211,9 +1195,6 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState
1211 const size_t aligned_size = aligned_end - aligned_start; 1195 const size_t aligned_size = aligned_end - aligned_start;
1212 const size_t aligned_num_pages = aligned_size / PageSize; 1196 const size_t aligned_num_pages = aligned_size / PageSize;
1213 1197
1214 // HACK: Manually close the pages.
1215 HACK_ClosePages(aligned_start, aligned_num_pages);
1216
1217 // Unmap the pages. 1198 // Unmap the pages.
1218 R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); 1199 R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap));
1219 1200
@@ -1501,17 +1482,6 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi
1501 } 1482 }
1502} 1483}
1503 1484
1504void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) {
1505 m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages);
1506}
1507
1508void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) {
1509 for (size_t index = 0; index < num_pages; ++index) {
1510 const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize));
1511 m_system.Kernel().MemoryManager().Close(paddr, 1);
1512 }
1513}
1514
1515Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { 1485Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1516 // Lock the physical memory lock. 1486 // Lock the physical memory lock.
1517 KScopedLightLock phys_lk(m_map_physical_memory_lock); 1487 KScopedLightLock phys_lk(m_map_physical_memory_lock);
@@ -1572,7 +1542,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1572 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 1542 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
1573 1543
1574 // Allocate pages for the new memory. 1544 // Allocate pages for the new memory.
1575 KPageGroup pg; 1545 KPageGroup pg{m_kernel, m_block_info_manager};
1576 R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( 1546 R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
1577 &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); 1547 &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
1578 1548
@@ -1650,7 +1620,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1650 KScopedPageTableUpdater updater(this); 1620 KScopedPageTableUpdater updater(this);
1651 1621
1652 // Prepare to iterate over the memory. 1622 // Prepare to iterate over the memory.
1653 auto pg_it = pg.Nodes().begin(); 1623 auto pg_it = pg.begin();
1654 PAddr pg_phys_addr = pg_it->GetAddress(); 1624 PAddr pg_phys_addr = pg_it->GetAddress();
1655 size_t pg_pages = pg_it->GetNumPages(); 1625 size_t pg_pages = pg_it->GetNumPages();
1656 1626
@@ -1680,9 +1650,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1680 last_unmap_address + 1 - cur_address) / 1650 last_unmap_address + 1 - cur_address) /
1681 PageSize; 1651 PageSize;
1682 1652
1683 // HACK: Manually close the pages.
1684 HACK_ClosePages(cur_address, cur_pages);
1685
1686 // Unmap. 1653 // Unmap.
1687 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, 1654 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
1688 OperationType::Unmap) 1655 OperationType::Unmap)
@@ -1703,7 +1670,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1703 // Release any remaining unmapped memory. 1670 // Release any remaining unmapped memory.
1704 m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); 1671 m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
1705 m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); 1672 m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
1706 for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) { 1673 for (++pg_it; pg_it != pg.end(); ++pg_it) {
1707 m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), 1674 m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
1708 pg_it->GetNumPages()); 1675 pg_it->GetNumPages());
1709 m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), 1676 m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
@@ -1731,7 +1698,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1731 // Check if we're at the end of the physical block. 1698 // Check if we're at the end of the physical block.
1732 if (pg_pages == 0) { 1699 if (pg_pages == 0) {
1733 // Ensure there are more pages to map. 1700 // Ensure there are more pages to map.
1734 ASSERT(pg_it != pg.Nodes().end()); 1701 ASSERT(pg_it != pg.end());
1735 1702
1736 // Advance our physical block. 1703 // Advance our physical block.
1737 ++pg_it; 1704 ++pg_it;
@@ -1742,10 +1709,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
1742 // Map whatever we can. 1709 // Map whatever we can.
1743 const size_t cur_pages = std::min(pg_pages, map_pages); 1710 const size_t cur_pages = std::min(pg_pages, map_pages);
1744 R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, 1711 R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
1745 OperationType::Map, pg_phys_addr)); 1712 OperationType::MapFirst, pg_phys_addr));
1746
1747 // HACK: Manually open the pages.
1748 HACK_OpenPages(pg_phys_addr, cur_pages);
1749 1713
1750 // Advance. 1714 // Advance.
1751 cur_address += cur_pages * PageSize; 1715 cur_address += cur_pages * PageSize;
@@ -1888,9 +1852,6 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
1888 last_address + 1 - cur_address) / 1852 last_address + 1 - cur_address) /
1889 PageSize; 1853 PageSize;
1890 1854
1891 // HACK: Manually close the pages.
1892 HACK_ClosePages(cur_address, cur_pages);
1893
1894 // Unmap. 1855 // Unmap.
1895 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) 1856 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)
1896 .IsSuccess()); 1857 .IsSuccess());
@@ -1955,7 +1916,7 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size)
1955 R_TRY(dst_allocator_result); 1916 R_TRY(dst_allocator_result);
1956 1917
1957 // Map the memory. 1918 // Map the memory.
1958 KPageGroup page_linked_list; 1919 KPageGroup page_linked_list{m_kernel, m_block_info_manager};
1959 const size_t num_pages{size / PageSize}; 1920 const size_t num_pages{size / PageSize};
1960 const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( 1921 const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
1961 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); 1922 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
@@ -2022,14 +1983,14 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size
2022 num_dst_allocator_blocks); 1983 num_dst_allocator_blocks);
2023 R_TRY(dst_allocator_result); 1984 R_TRY(dst_allocator_result);
2024 1985
2025 KPageGroup src_pages; 1986 KPageGroup src_pages{m_kernel, m_block_info_manager};
2026 KPageGroup dst_pages; 1987 KPageGroup dst_pages{m_kernel, m_block_info_manager};
2027 const size_t num_pages{size / PageSize}; 1988 const size_t num_pages{size / PageSize};
2028 1989
2029 AddRegionToPages(src_address, num_pages, src_pages); 1990 AddRegionToPages(src_address, num_pages, src_pages);
2030 AddRegionToPages(dst_address, num_pages, dst_pages); 1991 AddRegionToPages(dst_address, num_pages, dst_pages);
2031 1992
2032 R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion); 1993 R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion);
2033 1994
2034 { 1995 {
2035 auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); 1996 auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
@@ -2060,7 +2021,7 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
2060 2021
2061 VAddr cur_addr{addr}; 2022 VAddr cur_addr{addr};
2062 2023
2063 for (const auto& node : page_linked_list.Nodes()) { 2024 for (const auto& node : page_linked_list) {
2064 if (const auto result{ 2025 if (const auto result{
2065 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; 2026 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
2066 result.IsError()) { 2027 result.IsError()) {
@@ -2160,7 +2121,7 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
2160 2121
2161 VAddr cur_addr{addr}; 2122 VAddr cur_addr{addr};
2162 2123
2163 for (const auto& node : page_linked_list.Nodes()) { 2124 for (const auto& node : page_linked_list) {
2164 if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, 2125 if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
2165 OperationType::Unmap)}; 2126 OperationType::Unmap)};
2166 result.IsError()) { 2127 result.IsError()) {
@@ -2527,13 +2488,13 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
2527 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 2488 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
2528 2489
2529 // Allocate pages for the heap extension. 2490 // Allocate pages for the heap extension.
2530 KPageGroup pg; 2491 KPageGroup pg{m_kernel, m_block_info_manager};
2531 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( 2492 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
2532 &pg, allocation_size / PageSize, 2493 &pg, allocation_size / PageSize,
2533 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); 2494 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
2534 2495
2535 // Clear all the newly allocated pages. 2496 // Clear all the newly allocated pages.
2536 for (const auto& it : pg.Nodes()) { 2497 for (const auto& it : pg) {
2537 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, 2498 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
2538 it.GetSize()); 2499 it.GetSize());
2539 } 2500 }
@@ -2610,11 +2571,23 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_
2610 if (is_map_only) { 2571 if (is_map_only) {
2611 R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); 2572 R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
2612 } else { 2573 } else {
2613 KPageGroup page_group; 2574 // Create a page group tohold the pages we allocate.
2614 R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( 2575 KPageGroup pg{m_kernel, m_block_info_manager};
2615 &page_group, needed_num_pages, 2576
2616 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); 2577 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
2617 R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); 2578 &pg, needed_num_pages,
2579 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
2580
2581 // Ensure that the page group is closed when we're done working with it.
2582 SCOPE_EXIT({ pg.Close(); });
2583
2584 // Clear all pages.
2585 for (const auto& it : pg) {
2586 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()),
2587 m_heap_fill_value, it.GetSize());
2588 }
2589
2590 R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup));
2618 } 2591 }
2619 2592
2620 // Update the blocks. 2593 // Update the blocks.
@@ -2795,19 +2768,28 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_
2795 ASSERT(num_pages > 0); 2768 ASSERT(num_pages > 0);
2796 ASSERT(num_pages == page_group.GetNumPages()); 2769 ASSERT(num_pages == page_group.GetNumPages());
2797 2770
2798 for (const auto& node : page_group.Nodes()) { 2771 switch (operation) {
2799 const size_t size{node.GetNumPages() * PageSize}; 2772 case OperationType::MapGroup: {
2773 // We want to maintain a new reference to every page in the group.
2774 KScopedPageGroup spg(page_group);
2775
2776 for (const auto& node : page_group) {
2777 const size_t size{node.GetNumPages() * PageSize};
2800 2778
2801 switch (operation) { 2779 // Map the pages.
2802 case OperationType::MapGroup:
2803 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); 2780 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
2804 break; 2781
2805 default: 2782 addr += size;
2806 ASSERT(false);
2807 break;
2808 } 2783 }
2809 2784
2810 addr += size; 2785 // We succeeded! We want to persist the reference to the pages.
2786 spg.CancelClose();
2787
2788 break;
2789 }
2790 default:
2791 ASSERT(false);
2792 break;
2811 } 2793 }
2812 2794
2813 R_SUCCEED(); 2795 R_SUCCEED();
@@ -2822,13 +2804,29 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
2822 ASSERT(ContainsPages(addr, num_pages)); 2804 ASSERT(ContainsPages(addr, num_pages));
2823 2805
2824 switch (operation) { 2806 switch (operation) {
2825 case OperationType::Unmap: 2807 case OperationType::Unmap: {
2808 // Ensure that any pages we track close on exit.
2809 KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()};
2810 SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
2811
2812 this->AddRegionToPages(addr, num_pages, pages_to_close);
2826 m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); 2813 m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
2827 break; 2814 break;
2815 }
2816 case OperationType::MapFirst:
2828 case OperationType::Map: { 2817 case OperationType::Map: {
2829 ASSERT(map_addr); 2818 ASSERT(map_addr);
2830 ASSERT(Common::IsAligned(map_addr, PageSize)); 2819 ASSERT(Common::IsAligned(map_addr, PageSize));
2831 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); 2820 m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
2821
2822 // Open references to pages, if we should.
2823 if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
2824 if (operation == OperationType::MapFirst) {
2825 m_kernel.MemoryManager().OpenFirst(map_addr, num_pages);
2826 } else {
2827 m_kernel.MemoryManager().Open(map_addr, num_pages);
2828 }
2829 }
2832 break; 2830 break;
2833 } 2831 }
2834 case OperationType::Separate: { 2832 case OperationType::Separate: {
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index f1ca785d7..0a454b05b 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -107,6 +107,10 @@ public:
107 return *m_page_table_impl; 107 return *m_page_table_impl;
108 } 108 }
109 109
110 KBlockInfoManager* GetBlockInfoManager() {
111 return m_block_info_manager;
112 }
113
110 bool CanContain(VAddr addr, size_t size, KMemoryState state) const; 114 bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
111 115
112protected: 116protected:
@@ -261,10 +265,6 @@ private:
261 void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, 265 void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
262 size_t size, KMemoryPermission prot_perm); 266 size_t size, KMemoryPermission prot_perm);
263 267
264 // HACK: These will be removed once we automatically manage page reference counts.
265 void HACK_OpenPages(PAddr phys_addr, size_t num_pages);
266 void HACK_ClosePages(VAddr virt_addr, size_t num_pages);
267
268 mutable KLightLock m_general_lock; 268 mutable KLightLock m_general_lock;
269 mutable KLightLock m_map_physical_memory_lock; 269 mutable KLightLock m_map_physical_memory_lock;
270 270
@@ -488,6 +488,7 @@ private:
488 std::unique_ptr<Common::PageTable> m_page_table_impl; 488 std::unique_ptr<Common::PageTable> m_page_table_impl;
489 489
490 Core::System& m_system; 490 Core::System& m_system;
491 KernelCore& m_kernel;
491}; 492};
492 493
493} // namespace Kernel 494} // namespace Kernel
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 0aa68103c..3cf2b5d91 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -13,10 +13,7 @@
13namespace Kernel { 13namespace Kernel {
14 14
15KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} 15KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
16 16KSharedMemory::~KSharedMemory() = default;
17KSharedMemory::~KSharedMemory() {
18 kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size);
19}
20 17
21Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, 18Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
22 Svc::MemoryPermission owner_permission_, 19 Svc::MemoryPermission owner_permission_,
@@ -49,7 +46,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
49 R_UNLESS(physical_address != 0, ResultOutOfMemory); 46 R_UNLESS(physical_address != 0, ResultOutOfMemory);
50 47
51 //! Insert the result into our page group. 48 //! Insert the result into our page group.
52 page_group.emplace(physical_address, num_pages); 49 page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager());
50 page_group->AddBlock(physical_address, num_pages);
53 51
54 // Commit our reservation. 52 // Commit our reservation.
55 memory_reservation.Commit(); 53 memory_reservation.Commit();
@@ -62,7 +60,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
62 is_initialized = true; 60 is_initialized = true;
63 61
64 // Clear all pages in the memory. 62 // Clear all pages in the memory.
65 for (const auto& block : page_group->Nodes()) { 63 for (const auto& block : *page_group) {
66 std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize()); 64 std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize());
67 } 65 }
68 66
@@ -71,13 +69,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
71 69
72void KSharedMemory::Finalize() { 70void KSharedMemory::Finalize() {
73 // Close and finalize the page group. 71 // Close and finalize the page group.
74 // page_group->Close(); 72 page_group->Close();
75 // page_group->Finalize(); 73 page_group->Finalize();
76
77 //! HACK: Manually close.
78 for (const auto& block : page_group->Nodes()) {
79 kernel.MemoryManager().Close(block.GetAddress(), block.GetNumPages());
80 }
81 74
82 // Release the memory reservation. 75 // Release the memory reservation.
83 resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); 76 resource_limit->Release(LimitableResource::PhysicalMemoryMax, size);
diff --git a/src/core/hle/kernel/memory_types.h b/src/core/hle/kernel/memory_types.h
index 3975507bd..92b8b37ac 100644
--- a/src/core/hle/kernel/memory_types.h
+++ b/src/core/hle/kernel/memory_types.h
@@ -14,4 +14,7 @@ constexpr std::size_t PageSize{1 << PageBits};
14 14
15using Page = std::array<u8, PageSize>; 15using Page = std::array<u8, PageSize>;
16 16
17using KPhysicalAddress = PAddr;
18using KProcessAddress = VAddr;
19
17} // namespace Kernel 20} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 788ee2160..aca442196 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1485,7 +1485,7 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p
1485 ResultInvalidMemoryRegion); 1485 ResultInvalidMemoryRegion);
1486 1486
1487 // Create a new page group. 1487 // Create a new page group.
1488 KPageGroup pg; 1488 KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()};
1489 R_TRY(src_pt.MakeAndOpenPageGroup( 1489 R_TRY(src_pt.MakeAndOpenPageGroup(
1490 std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess, 1490 std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
1491 KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None, 1491 KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
diff --git a/src/input_common/CMakeLists.txt b/src/input_common/CMakeLists.txt
index f24c89b04..cef2c4d52 100644
--- a/src/input_common/CMakeLists.txt
+++ b/src/input_common/CMakeLists.txt
@@ -4,14 +4,10 @@
4add_library(input_common STATIC 4add_library(input_common STATIC
5 drivers/camera.cpp 5 drivers/camera.cpp
6 drivers/camera.h 6 drivers/camera.h
7 drivers/gc_adapter.cpp
8 drivers/gc_adapter.h
9 drivers/keyboard.cpp 7 drivers/keyboard.cpp
10 drivers/keyboard.h 8 drivers/keyboard.h
11 drivers/mouse.cpp 9 drivers/mouse.cpp
12 drivers/mouse.h 10 drivers/mouse.h
13 drivers/sdl_driver.cpp
14 drivers/sdl_driver.h
15 drivers/tas_input.cpp 11 drivers/tas_input.cpp
16 drivers/tas_input.h 12 drivers/tas_input.h
17 drivers/touch_screen.cpp 13 drivers/touch_screen.cpp
@@ -62,8 +58,17 @@ if (ENABLE_SDL2)
62 target_compile_definitions(input_common PRIVATE HAVE_SDL2) 58 target_compile_definitions(input_common PRIVATE HAVE_SDL2)
63endif() 59endif()
64 60
61if (ENABLE_LIBUSB)
62 target_sources(input_common PRIVATE
63 drivers/gc_adapter.cpp
64 drivers/gc_adapter.h
65 )
66 target_link_libraries(input_common PRIVATE libusb::usb)
67 target_compile_definitions(input_common PRIVATE HAVE_LIBUSB)
68endif()
69
65create_target_directory_groups(input_common) 70create_target_directory_groups(input_common)
66target_link_libraries(input_common PUBLIC core PRIVATE common Boost::boost libusb::usb) 71target_link_libraries(input_common PUBLIC core PRIVATE common Boost::boost)
67 72
68if (YUZU_USE_PRECOMPILED_HEADERS) 73if (YUZU_USE_PRECOMPILED_HEADERS)
69 target_precompile_headers(input_common PRIVATE precompiled_headers.h) 74 target_precompile_headers(input_common PRIVATE precompiled_headers.h)
diff --git a/src/input_common/main.cpp b/src/input_common/main.cpp
index 86deb4c7c..4dc92f482 100644
--- a/src/input_common/main.cpp
+++ b/src/input_common/main.cpp
@@ -5,7 +5,6 @@
5#include "common/input.h" 5#include "common/input.h"
6#include "common/param_package.h" 6#include "common/param_package.h"
7#include "input_common/drivers/camera.h" 7#include "input_common/drivers/camera.h"
8#include "input_common/drivers/gc_adapter.h"
9#include "input_common/drivers/keyboard.h" 8#include "input_common/drivers/keyboard.h"
10#include "input_common/drivers/mouse.h" 9#include "input_common/drivers/mouse.h"
11#include "input_common/drivers/tas_input.h" 10#include "input_common/drivers/tas_input.h"
@@ -19,6 +18,10 @@
19#include "input_common/input_mapping.h" 18#include "input_common/input_mapping.h"
20#include "input_common/input_poller.h" 19#include "input_common/input_poller.h"
21#include "input_common/main.h" 20#include "input_common/main.h"
21
22#ifdef HAVE_LIBUSB
23#include "input_common/drivers/gc_adapter.h"
24#endif
22#ifdef HAVE_SDL2 25#ifdef HAVE_SDL2
23#include "input_common/drivers/sdl_driver.h" 26#include "input_common/drivers/sdl_driver.h"
24#endif 27#endif
@@ -45,7 +48,9 @@ struct InputSubsystem::Impl {
45 RegisterEngine("keyboard", keyboard); 48 RegisterEngine("keyboard", keyboard);
46 RegisterEngine("mouse", mouse); 49 RegisterEngine("mouse", mouse);
47 RegisterEngine("touch", touch_screen); 50 RegisterEngine("touch", touch_screen);
51#ifdef HAVE_LIBUSB
48 RegisterEngine("gcpad", gcadapter); 52 RegisterEngine("gcpad", gcadapter);
53#endif
49 RegisterEngine("cemuhookudp", udp_client); 54 RegisterEngine("cemuhookudp", udp_client);
50 RegisterEngine("tas", tas_input); 55 RegisterEngine("tas", tas_input);
51 RegisterEngine("camera", camera); 56 RegisterEngine("camera", camera);
@@ -72,7 +77,9 @@ struct InputSubsystem::Impl {
72 UnregisterEngine(keyboard); 77 UnregisterEngine(keyboard);
73 UnregisterEngine(mouse); 78 UnregisterEngine(mouse);
74 UnregisterEngine(touch_screen); 79 UnregisterEngine(touch_screen);
80#ifdef HAVE_LIBUSB
75 UnregisterEngine(gcadapter); 81 UnregisterEngine(gcadapter);
82#endif
76 UnregisterEngine(udp_client); 83 UnregisterEngine(udp_client);
77 UnregisterEngine(tas_input); 84 UnregisterEngine(tas_input);
78 UnregisterEngine(camera); 85 UnregisterEngine(camera);
@@ -95,8 +102,10 @@ struct InputSubsystem::Impl {
95 devices.insert(devices.end(), keyboard_devices.begin(), keyboard_devices.end()); 102 devices.insert(devices.end(), keyboard_devices.begin(), keyboard_devices.end());
96 auto mouse_devices = mouse->GetInputDevices(); 103 auto mouse_devices = mouse->GetInputDevices();
97 devices.insert(devices.end(), mouse_devices.begin(), mouse_devices.end()); 104 devices.insert(devices.end(), mouse_devices.begin(), mouse_devices.end());
105#ifdef HAVE_LIBUSB
98 auto gcadapter_devices = gcadapter->GetInputDevices(); 106 auto gcadapter_devices = gcadapter->GetInputDevices();
99 devices.insert(devices.end(), gcadapter_devices.begin(), gcadapter_devices.end()); 107 devices.insert(devices.end(), gcadapter_devices.begin(), gcadapter_devices.end());
108#endif
100 auto udp_devices = udp_client->GetInputDevices(); 109 auto udp_devices = udp_client->GetInputDevices();
101 devices.insert(devices.end(), udp_devices.begin(), udp_devices.end()); 110 devices.insert(devices.end(), udp_devices.begin(), udp_devices.end());
102#ifdef HAVE_SDL2 111#ifdef HAVE_SDL2
@@ -119,9 +128,11 @@ struct InputSubsystem::Impl {
119 if (engine == mouse->GetEngineName()) { 128 if (engine == mouse->GetEngineName()) {
120 return mouse; 129 return mouse;
121 } 130 }
131#ifdef HAVE_LIBUSB
122 if (engine == gcadapter->GetEngineName()) { 132 if (engine == gcadapter->GetEngineName()) {
123 return gcadapter; 133 return gcadapter;
124 } 134 }
135#endif
125 if (engine == udp_client->GetEngineName()) { 136 if (engine == udp_client->GetEngineName()) {
126 return udp_client; 137 return udp_client;
127 } 138 }
@@ -194,9 +205,11 @@ struct InputSubsystem::Impl {
194 if (engine == mouse->GetEngineName()) { 205 if (engine == mouse->GetEngineName()) {
195 return true; 206 return true;
196 } 207 }
208#ifdef HAVE_LIBUSB
197 if (engine == gcadapter->GetEngineName()) { 209 if (engine == gcadapter->GetEngineName()) {
198 return true; 210 return true;
199 } 211 }
212#endif
200 if (engine == udp_client->GetEngineName()) { 213 if (engine == udp_client->GetEngineName()) {
201 return true; 214 return true;
202 } 215 }
@@ -217,7 +230,9 @@ struct InputSubsystem::Impl {
217 void BeginConfiguration() { 230 void BeginConfiguration() {
218 keyboard->BeginConfiguration(); 231 keyboard->BeginConfiguration();
219 mouse->BeginConfiguration(); 232 mouse->BeginConfiguration();
233#ifdef HAVE_LIBUSB
220 gcadapter->BeginConfiguration(); 234 gcadapter->BeginConfiguration();
235#endif
221 udp_client->BeginConfiguration(); 236 udp_client->BeginConfiguration();
222#ifdef HAVE_SDL2 237#ifdef HAVE_SDL2
223 sdl->BeginConfiguration(); 238 sdl->BeginConfiguration();
@@ -227,7 +242,9 @@ struct InputSubsystem::Impl {
227 void EndConfiguration() { 242 void EndConfiguration() {
228 keyboard->EndConfiguration(); 243 keyboard->EndConfiguration();
229 mouse->EndConfiguration(); 244 mouse->EndConfiguration();
245#ifdef HAVE_LIBUSB
230 gcadapter->EndConfiguration(); 246 gcadapter->EndConfiguration();
247#endif
231 udp_client->EndConfiguration(); 248 udp_client->EndConfiguration();
232#ifdef HAVE_SDL2 249#ifdef HAVE_SDL2
233 sdl->EndConfiguration(); 250 sdl->EndConfiguration();
@@ -248,7 +265,6 @@ struct InputSubsystem::Impl {
248 265
249 std::shared_ptr<Keyboard> keyboard; 266 std::shared_ptr<Keyboard> keyboard;
250 std::shared_ptr<Mouse> mouse; 267 std::shared_ptr<Mouse> mouse;
251 std::shared_ptr<GCAdapter> gcadapter;
252 std::shared_ptr<TouchScreen> touch_screen; 268 std::shared_ptr<TouchScreen> touch_screen;
253 std::shared_ptr<TasInput::Tas> tas_input; 269 std::shared_ptr<TasInput::Tas> tas_input;
254 std::shared_ptr<CemuhookUDP::UDPClient> udp_client; 270 std::shared_ptr<CemuhookUDP::UDPClient> udp_client;
@@ -256,6 +272,10 @@ struct InputSubsystem::Impl {
256 std::shared_ptr<VirtualAmiibo> virtual_amiibo; 272 std::shared_ptr<VirtualAmiibo> virtual_amiibo;
257 std::shared_ptr<VirtualGamepad> virtual_gamepad; 273 std::shared_ptr<VirtualGamepad> virtual_gamepad;
258 274
275#ifdef HAVE_LIBUSB
276 std::shared_ptr<GCAdapter> gcadapter;
277#endif
278
259#ifdef HAVE_SDL2 279#ifdef HAVE_SDL2
260 std::shared_ptr<SDLDriver> sdl; 280 std::shared_ptr<SDLDriver> sdl;
261#endif 281#endif
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 6c8d98946..f1c60d1f3 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -666,9 +666,10 @@ void BufferCache<P>::BindHostGeometryBuffers(bool is_indexed) {
666 BindHostIndexBuffer(); 666 BindHostIndexBuffer();
667 } else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { 667 } else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
668 const auto& draw_state = maxwell3d->draw_manager->GetDrawState(); 668 const auto& draw_state = maxwell3d->draw_manager->GetDrawState();
669 if (draw_state.topology == Maxwell::PrimitiveTopology::Quads) { 669 if (draw_state.topology == Maxwell::PrimitiveTopology::Quads ||
670 runtime.BindQuadArrayIndexBuffer(draw_state.vertex_buffer.first, 670 draw_state.topology == Maxwell::PrimitiveTopology::QuadStrip) {
671 draw_state.vertex_buffer.count); 671 runtime.BindQuadIndexBuffer(draw_state.topology, draw_state.vertex_buffer.first,
672 draw_state.vertex_buffer.count);
672 } 673 }
673 } 674 }
674 BindHostVertexBuffers(); 675 BindHostVertexBuffers();
diff --git a/src/video_core/host_shaders/vulkan_quad_indexed.comp b/src/video_core/host_shaders/vulkan_quad_indexed.comp
index a412f30ff..066fe4a9c 100644
--- a/src/video_core/host_shaders/vulkan_quad_indexed.comp
+++ b/src/video_core/host_shaders/vulkan_quad_indexed.comp
@@ -16,6 +16,7 @@ layout (std430, set = 0, binding = 1) writeonly buffer OutputBuffer {
16layout (push_constant) uniform PushConstants { 16layout (push_constant) uniform PushConstants {
17 uint base_vertex; 17 uint base_vertex;
18 int index_shift; // 0: uint8, 1: uint16, 2: uint32 18 int index_shift; // 0: uint8, 1: uint16, 2: uint32
19 int is_strip; // 0: quads 1: quadstrip
19}; 20};
20 21
21void main() { 22void main() {
@@ -28,9 +29,10 @@ void main() {
28 int flipped_shift = 2 - index_shift; 29 int flipped_shift = 2 - index_shift;
29 int mask = (1 << flipped_shift) - 1; 30 int mask = (1 << flipped_shift) - 1;
30 31
31 const int quad_swizzle[6] = int[](0, 1, 2, 0, 2, 3); 32 const int quads_swizzle[6] = int[](0, 1, 2, 0, 2, 3);
33 const int quad_strip_swizzle[6] = int[](0, 3, 1, 0, 2, 3);
32 for (uint vertex = 0; vertex < 6; ++vertex) { 34 for (uint vertex = 0; vertex < 6; ++vertex) {
33 int offset = primitive * 4 + quad_swizzle[vertex]; 35 int offset = (is_strip == 0 ? primitive * 4 + quads_swizzle[vertex] : primitive * 2 + quad_strip_swizzle[vertex]);
34 int int_offset = offset >> flipped_shift; 36 int int_offset = offset >> flipped_shift;
35 int bit_offset = (offset & mask) * index_size; 37 int bit_offset = (offset & mask) * index_size;
36 uint packed_input = input_indexes[int_offset]; 38 uint packed_input = input_indexes[int_offset];
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index 3e03c5cd6..ca52e2389 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -301,6 +301,8 @@ VkPrimitiveTopology PrimitiveTopology([[maybe_unused]] const Device& device,
301 return VK_PRIMITIVE_TOPOLOGY_POINT_LIST; 301 return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
302 case Maxwell::PrimitiveTopology::Lines: 302 case Maxwell::PrimitiveTopology::Lines:
303 return VK_PRIMITIVE_TOPOLOGY_LINE_LIST; 303 return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
304 case Maxwell::PrimitiveTopology::LineLoop:
305 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
304 case Maxwell::PrimitiveTopology::LineStrip: 306 case Maxwell::PrimitiveTopology::LineStrip:
305 return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP; 307 return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
306 case Maxwell::PrimitiveTopology::Triangles: 308 case Maxwell::PrimitiveTopology::Triangles:
@@ -309,15 +311,28 @@ VkPrimitiveTopology PrimitiveTopology([[maybe_unused]] const Device& device,
309 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; 311 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
310 case Maxwell::PrimitiveTopology::TriangleFan: 312 case Maxwell::PrimitiveTopology::TriangleFan:
311 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN; 313 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
314 case Maxwell::PrimitiveTopology::LinesAdjacency:
315 return VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY;
316 case Maxwell::PrimitiveTopology::LineStripAdjacency:
317 return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY;
318 case Maxwell::PrimitiveTopology::TrianglesAdjacency:
319 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY;
320 case Maxwell::PrimitiveTopology::TriangleStripAdjacency:
321 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY;
312 case Maxwell::PrimitiveTopology::Quads: 322 case Maxwell::PrimitiveTopology::Quads:
313 // TODO(Rodrigo): Use VK_PRIMITIVE_TOPOLOGY_QUAD_LIST_EXT whenever it releases 323 case Maxwell::PrimitiveTopology::QuadStrip:
324 // TODO: Use VK_PRIMITIVE_TOPOLOGY_QUAD_LIST_EXT/VK_PRIMITIVE_TOPOLOGY_QUAD_STRIP_EXT
325 // whenever it releases
314 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; 326 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
315 case Maxwell::PrimitiveTopology::Patches: 327 case Maxwell::PrimitiveTopology::Patches:
316 return VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; 328 return VK_PRIMITIVE_TOPOLOGY_PATCH_LIST;
317 default: 329 case Maxwell::PrimitiveTopology::Polygon:
318 UNIMPLEMENTED_MSG("Unimplemented topology={}", topology); 330 LOG_WARNING(Render_Vulkan, "Draw mode is Polygon with a polygon mode of lines should be a "
319 return {}; 331 "single body and not a bunch of triangles.");
332 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
320 } 333 }
334 UNIMPLEMENTED_MSG("Unimplemented topology={}", topology);
335 return {};
321} 336}
322 337
323VkFormat VertexFormat(const Device& device, Maxwell::VertexAttribute::Type type, 338VkFormat VertexFormat(const Device& device, Maxwell::VertexAttribute::Type type,
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 84d36fea6..6b54d7111 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -51,15 +51,6 @@ size_t BytesPerIndex(VkIndexType index_type) {
51 } 51 }
52} 52}
53 53
54template <typename T>
55std::array<T, 6> MakeQuadIndices(u32 quad, u32 first) {
56 std::array<T, 6> indices{0, 1, 2, 0, 2, 3};
57 for (T& index : indices) {
58 index = static_cast<T>(first + index + quad * 4);
59 }
60 return indices;
61}
62
63vk::Buffer CreateBuffer(const Device& device, u64 size) { 54vk::Buffer CreateBuffer(const Device& device, u64 size) {
64 VkBufferUsageFlags flags = 55 VkBufferUsageFlags flags =
65 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | 56 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
@@ -123,6 +114,187 @@ VkBufferView Buffer::View(u32 offset, u32 size, VideoCore::Surface::PixelFormat
123 return *views.back().handle; 114 return *views.back().handle;
124} 115}
125 116
117class QuadIndexBuffer {
118public:
119 QuadIndexBuffer(const Device& device_, MemoryAllocator& memory_allocator_,
120 Scheduler& scheduler_, StagingBufferPool& staging_pool_)
121 : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_},
122 staging_pool{staging_pool_} {}
123
124 virtual ~QuadIndexBuffer() = default;
125
126 void UpdateBuffer(u32 num_indices_) {
127 if (num_indices_ <= num_indices) {
128 return;
129 }
130
131 scheduler.Finish();
132
133 num_indices = num_indices_;
134 index_type = IndexTypeFromNumElements(device, num_indices);
135
136 const u32 num_quads = GetQuadsNum(num_indices);
137 const u32 num_triangle_indices = num_quads * 6;
138 const u32 num_first_offset_copies = 4;
139 const size_t bytes_per_index = BytesPerIndex(index_type);
140 const size_t size_bytes = num_triangle_indices * bytes_per_index * num_first_offset_copies;
141 buffer = device.GetLogical().CreateBuffer(VkBufferCreateInfo{
142 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
143 .pNext = nullptr,
144 .flags = 0,
145 .size = size_bytes,
146 .usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
147 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
148 .queueFamilyIndexCount = 0,
149 .pQueueFamilyIndices = nullptr,
150 });
151 if (device.HasDebuggingToolAttached()) {
152 buffer.SetObjectNameEXT("Quad LUT");
153 }
154 memory_commit = memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal);
155
156 const StagingBufferRef staging = staging_pool.Request(size_bytes, MemoryUsage::Upload);
157 u8* staging_data = staging.mapped_span.data();
158 const size_t quad_size = bytes_per_index * 6;
159
160 for (u32 first = 0; first < num_first_offset_copies; ++first) {
161 for (u32 quad = 0; quad < num_quads; ++quad) {
162 MakeAndUpdateIndices(staging_data, quad_size, quad, first);
163 staging_data += quad_size;
164 }
165 }
166
167 scheduler.RequestOutsideRenderPassOperationContext();
168 scheduler.Record([src_buffer = staging.buffer, src_offset = staging.offset,
169 dst_buffer = *buffer, size_bytes](vk::CommandBuffer cmdbuf) {
170 const VkBufferCopy copy{
171 .srcOffset = src_offset,
172 .dstOffset = 0,
173 .size = size_bytes,
174 };
175 const VkBufferMemoryBarrier write_barrier{
176 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
177 .pNext = nullptr,
178 .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
179 .dstAccessMask = VK_ACCESS_INDEX_READ_BIT,
180 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
181 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
182 .buffer = dst_buffer,
183 .offset = 0,
184 .size = size_bytes,
185 };
186 cmdbuf.CopyBuffer(src_buffer, dst_buffer, copy);
187 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
188 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, write_barrier);
189 });
190 }
191
192 void BindBuffer(u32 first) {
193 const VkIndexType index_type_ = index_type;
194 const size_t sub_first_offset = static_cast<size_t>(first % 4) * GetQuadsNum(num_indices);
195 const size_t offset =
196 (sub_first_offset + GetQuadsNum(first)) * 6ULL * BytesPerIndex(index_type);
197 scheduler.Record([buffer = *buffer, index_type_, offset](vk::CommandBuffer cmdbuf) {
198 cmdbuf.BindIndexBuffer(buffer, offset, index_type_);
199 });
200 }
201
202protected:
203 virtual u32 GetQuadsNum(u32 num_indices) const = 0;
204
205 virtual void MakeAndUpdateIndices(u8* staging_data, size_t quad_size, u32 quad, u32 first) = 0;
206
207 const Device& device;
208 MemoryAllocator& memory_allocator;
209 Scheduler& scheduler;
210 StagingBufferPool& staging_pool;
211
212 vk::Buffer buffer{};
213 MemoryCommit memory_commit{};
214 VkIndexType index_type{};
215 u32 num_indices = 0;
216};
217
218class QuadArrayIndexBuffer : public QuadIndexBuffer {
219public:
220 QuadArrayIndexBuffer(const Device& device_, MemoryAllocator& memory_allocator_,
221 Scheduler& scheduler_, StagingBufferPool& staging_pool_)
222 : QuadIndexBuffer(device_, memory_allocator_, scheduler_, staging_pool_) {}
223
224 ~QuadArrayIndexBuffer() = default;
225
226private:
227 u32 GetQuadsNum(u32 num_indices_) const override {
228 return num_indices_ / 4;
229 }
230
231 template <typename T>
232 static std::array<T, 6> MakeIndices(u32 quad, u32 first) {
233 std::array<T, 6> indices{0, 1, 2, 0, 2, 3};
234 for (T& index : indices) {
235 index = static_cast<T>(first + index + quad * 4);
236 }
237 return indices;
238 }
239
240 void MakeAndUpdateIndices(u8* staging_data, size_t quad_size, u32 quad, u32 first) {
241 switch (index_type) {
242 case VK_INDEX_TYPE_UINT8_EXT:
243 std::memcpy(staging_data, MakeIndices<u8>(quad, first).data(), quad_size);
244 break;
245 case VK_INDEX_TYPE_UINT16:
246 std::memcpy(staging_data, MakeIndices<u16>(quad, first).data(), quad_size);
247 break;
248 case VK_INDEX_TYPE_UINT32:
249 std::memcpy(staging_data, MakeIndices<u32>(quad, first).data(), quad_size);
250 break;
251 default:
252 ASSERT(false);
253 break;
254 }
255 }
256};
257
258class QuadStripIndexBuffer : public QuadIndexBuffer {
259public:
260 QuadStripIndexBuffer(const Device& device_, MemoryAllocator& memory_allocator_,
261 Scheduler& scheduler_, StagingBufferPool& staging_pool_)
262 : QuadIndexBuffer(device_, memory_allocator_, scheduler_, staging_pool_) {}
263
264 ~QuadStripIndexBuffer() = default;
265
266private:
267 u32 GetQuadsNum(u32 num_indices_) const override {
268 return num_indices_ >= 4 ? (num_indices_ - 2) / 2 : 0;
269 }
270
271 template <typename T>
272 static std::array<T, 6> MakeIndices(u32 quad, u32 first) {
273 std::array<T, 6> indices{0, 3, 1, 0, 2, 3};
274 for (T& index : indices) {
275 index = static_cast<T>(first + index + quad * 2);
276 }
277 return indices;
278 }
279
280 void MakeAndUpdateIndices(u8* staging_data, size_t quad_size, u32 quad, u32 first) {
281 switch (index_type) {
282 case VK_INDEX_TYPE_UINT8_EXT:
283 std::memcpy(staging_data, MakeIndices<u8>(quad, first).data(), quad_size);
284 break;
285 case VK_INDEX_TYPE_UINT16:
286 std::memcpy(staging_data, MakeIndices<u16>(quad, first).data(), quad_size);
287 break;
288 case VK_INDEX_TYPE_UINT32:
289 std::memcpy(staging_data, MakeIndices<u32>(quad, first).data(), quad_size);
290 break;
291 default:
292 ASSERT(false);
293 break;
294 }
295 }
296};
297
126BufferCacheRuntime::BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_allocator_, 298BufferCacheRuntime::BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_allocator_,
127 Scheduler& scheduler_, StagingBufferPool& staging_pool_, 299 Scheduler& scheduler_, StagingBufferPool& staging_pool_,
128 UpdateDescriptorQueue& update_descriptor_queue_, 300 UpdateDescriptorQueue& update_descriptor_queue_,
@@ -130,7 +302,12 @@ BufferCacheRuntime::BufferCacheRuntime(const Device& device_, MemoryAllocator& m
130 : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_}, 302 : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_},
131 staging_pool{staging_pool_}, update_descriptor_queue{update_descriptor_queue_}, 303 staging_pool{staging_pool_}, update_descriptor_queue{update_descriptor_queue_},
132 uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), 304 uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
133 quad_index_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue) {} 305 quad_index_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue) {
306 quad_array_index_buffer = std::make_shared<QuadArrayIndexBuffer>(device_, memory_allocator_,
307 scheduler_, staging_pool_);
308 quad_strip_index_buffer = std::make_shared<QuadStripIndexBuffer>(device_, memory_allocator_,
309 scheduler_, staging_pool_);
310}
134 311
135StagingBufferRef BufferCacheRuntime::UploadStagingBuffer(size_t size) { 312StagingBufferRef BufferCacheRuntime::UploadStagingBuffer(size_t size) {
136 return staging_pool.Request(size, MemoryUsage::Upload); 313 return staging_pool.Request(size, MemoryUsage::Upload);
@@ -245,10 +422,11 @@ void BufferCacheRuntime::BindIndexBuffer(PrimitiveTopology topology, IndexFormat
245 VkIndexType vk_index_type = MaxwellToVK::IndexFormat(index_format); 422 VkIndexType vk_index_type = MaxwellToVK::IndexFormat(index_format);
246 VkDeviceSize vk_offset = offset; 423 VkDeviceSize vk_offset = offset;
247 VkBuffer vk_buffer = buffer; 424 VkBuffer vk_buffer = buffer;
248 if (topology == PrimitiveTopology::Quads) { 425 if (topology == PrimitiveTopology::Quads || topology == PrimitiveTopology::QuadStrip) {
249 vk_index_type = VK_INDEX_TYPE_UINT32; 426 vk_index_type = VK_INDEX_TYPE_UINT32;
250 std::tie(vk_buffer, vk_offset) = 427 std::tie(vk_buffer, vk_offset) =
251 quad_index_pass.Assemble(index_format, num_indices, base_vertex, buffer, offset); 428 quad_index_pass.Assemble(index_format, num_indices, base_vertex, buffer, offset,
429 topology == PrimitiveTopology::QuadStrip);
252 } else if (vk_index_type == VK_INDEX_TYPE_UINT8_EXT && !device.IsExtIndexTypeUint8Supported()) { 430 } else if (vk_index_type == VK_INDEX_TYPE_UINT8_EXT && !device.IsExtIndexTypeUint8Supported()) {
253 vk_index_type = VK_INDEX_TYPE_UINT16; 431 vk_index_type = VK_INDEX_TYPE_UINT16;
254 std::tie(vk_buffer, vk_offset) = uint8_pass.Assemble(num_indices, buffer, offset); 432 std::tie(vk_buffer, vk_offset) = uint8_pass.Assemble(num_indices, buffer, offset);
@@ -263,7 +441,7 @@ void BufferCacheRuntime::BindIndexBuffer(PrimitiveTopology topology, IndexFormat
263 }); 441 });
264} 442}
265 443
266void BufferCacheRuntime::BindQuadArrayIndexBuffer(u32 first, u32 count) { 444void BufferCacheRuntime::BindQuadIndexBuffer(PrimitiveTopology topology, u32 first, u32 count) {
267 if (count == 0) { 445 if (count == 0) {
268 ReserveNullBuffer(); 446 ReserveNullBuffer();
269 scheduler.Record([this](vk::CommandBuffer cmdbuf) { 447 scheduler.Record([this](vk::CommandBuffer cmdbuf) {
@@ -271,16 +449,14 @@ void BufferCacheRuntime::BindQuadArrayIndexBuffer(u32 first, u32 count) {
271 }); 449 });
272 return; 450 return;
273 } 451 }
274 ReserveQuadArrayLUT(first + count, true); 452
275 453 if (topology == PrimitiveTopology::Quads) {
276 // The LUT has the indices 0, 1, 2, and 3 copied as an array 454 quad_array_index_buffer->UpdateBuffer(first + count);
277 // To apply these 'first' offsets we can apply an offset based on the modulus. 455 quad_array_index_buffer->BindBuffer(first);
278 const VkIndexType index_type = quad_array_lut_index_type; 456 } else if (topology == PrimitiveTopology::QuadStrip) {
279 const size_t sub_first_offset = static_cast<size_t>(first % 4) * (current_num_indices / 4); 457 quad_strip_index_buffer->UpdateBuffer(first + count);
280 const size_t offset = (sub_first_offset + first / 4) * 6ULL * BytesPerIndex(index_type); 458 quad_strip_index_buffer->BindBuffer(first);
281 scheduler.Record([buffer = *quad_array_lut, index_type, offset](vk::CommandBuffer cmdbuf) { 459 }
282 cmdbuf.BindIndexBuffer(buffer, offset, index_type);
283 });
284} 460}
285 461
286void BufferCacheRuntime::BindVertexBuffer(u32 index, VkBuffer buffer, u32 offset, u32 size, 462void BufferCacheRuntime::BindVertexBuffer(u32 index, VkBuffer buffer, u32 offset, u32 size,
@@ -323,83 +499,6 @@ void BufferCacheRuntime::BindTransformFeedbackBuffer(u32 index, VkBuffer buffer,
323 }); 499 });
324} 500}
325 501
326void BufferCacheRuntime::ReserveQuadArrayLUT(u32 num_indices, bool wait_for_idle) {
327 if (num_indices <= current_num_indices) {
328 return;
329 }
330 if (wait_for_idle) {
331 scheduler.Finish();
332 }
333 current_num_indices = num_indices;
334 quad_array_lut_index_type = IndexTypeFromNumElements(device, num_indices);
335
336 const u32 num_quads = num_indices / 4;
337 const u32 num_triangle_indices = num_quads * 6;
338 const u32 num_first_offset_copies = 4;
339 const size_t bytes_per_index = BytesPerIndex(quad_array_lut_index_type);
340 const size_t size_bytes = num_triangle_indices * bytes_per_index * num_first_offset_copies;
341 quad_array_lut = device.GetLogical().CreateBuffer(VkBufferCreateInfo{
342 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
343 .pNext = nullptr,
344 .flags = 0,
345 .size = size_bytes,
346 .usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
347 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
348 .queueFamilyIndexCount = 0,
349 .pQueueFamilyIndices = nullptr,
350 });
351 if (device.HasDebuggingToolAttached()) {
352 quad_array_lut.SetObjectNameEXT("Quad LUT");
353 }
354 quad_array_lut_commit = memory_allocator.Commit(quad_array_lut, MemoryUsage::DeviceLocal);
355
356 const StagingBufferRef staging = staging_pool.Request(size_bytes, MemoryUsage::Upload);
357 u8* staging_data = staging.mapped_span.data();
358 const size_t quad_size = bytes_per_index * 6;
359 for (u32 first = 0; first < num_first_offset_copies; ++first) {
360 for (u32 quad = 0; quad < num_quads; ++quad) {
361 switch (quad_array_lut_index_type) {
362 case VK_INDEX_TYPE_UINT8_EXT:
363 std::memcpy(staging_data, MakeQuadIndices<u8>(quad, first).data(), quad_size);
364 break;
365 case VK_INDEX_TYPE_UINT16:
366 std::memcpy(staging_data, MakeQuadIndices<u16>(quad, first).data(), quad_size);
367 break;
368 case VK_INDEX_TYPE_UINT32:
369 std::memcpy(staging_data, MakeQuadIndices<u32>(quad, first).data(), quad_size);
370 break;
371 default:
372 ASSERT(false);
373 break;
374 }
375 staging_data += quad_size;
376 }
377 }
378 scheduler.RequestOutsideRenderPassOperationContext();
379 scheduler.Record([src_buffer = staging.buffer, src_offset = staging.offset,
380 dst_buffer = *quad_array_lut, size_bytes](vk::CommandBuffer cmdbuf) {
381 const VkBufferCopy copy{
382 .srcOffset = src_offset,
383 .dstOffset = 0,
384 .size = size_bytes,
385 };
386 const VkBufferMemoryBarrier write_barrier{
387 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
388 .pNext = nullptr,
389 .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
390 .dstAccessMask = VK_ACCESS_INDEX_READ_BIT,
391 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
392 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
393 .buffer = dst_buffer,
394 .offset = 0,
395 .size = size_bytes,
396 };
397 cmdbuf.CopyBuffer(src_buffer, dst_buffer, copy);
398 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
399 0, write_barrier);
400 });
401}
402
403void BufferCacheRuntime::ReserveNullBuffer() { 502void BufferCacheRuntime::ReserveNullBuffer() {
404 if (null_buffer) { 503 if (null_buffer) {
405 return; 504 return;
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index a15c8b39b..183b33632 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -50,6 +50,9 @@ private:
50 std::vector<BufferView> views; 50 std::vector<BufferView> views;
51}; 51};
52 52
53class QuadArrayIndexBuffer;
54class QuadStripIndexBuffer;
55
53class BufferCacheRuntime { 56class BufferCacheRuntime {
54 friend Buffer; 57 friend Buffer;
55 58
@@ -86,7 +89,7 @@ public:
86 void BindIndexBuffer(PrimitiveTopology topology, IndexFormat index_format, u32 num_indices, 89 void BindIndexBuffer(PrimitiveTopology topology, IndexFormat index_format, u32 num_indices,
87 u32 base_vertex, VkBuffer buffer, u32 offset, u32 size); 90 u32 base_vertex, VkBuffer buffer, u32 offset, u32 size);
88 91
89 void BindQuadArrayIndexBuffer(u32 first, u32 count); 92 void BindQuadIndexBuffer(PrimitiveTopology topology, u32 first, u32 count);
90 93
91 void BindVertexBuffer(u32 index, VkBuffer buffer, u32 offset, u32 size, u32 stride); 94 void BindVertexBuffer(u32 index, VkBuffer buffer, u32 offset, u32 size, u32 stride);
92 95
@@ -118,8 +121,6 @@ private:
118 update_descriptor_queue.AddBuffer(buffer, offset, size); 121 update_descriptor_queue.AddBuffer(buffer, offset, size);
119 } 122 }
120 123
121 void ReserveQuadArrayLUT(u32 num_indices, bool wait_for_idle);
122
123 void ReserveNullBuffer(); 124 void ReserveNullBuffer();
124 125
125 const Device& device; 126 const Device& device;
@@ -128,10 +129,8 @@ private:
128 StagingBufferPool& staging_pool; 129 StagingBufferPool& staging_pool;
129 UpdateDescriptorQueue& update_descriptor_queue; 130 UpdateDescriptorQueue& update_descriptor_queue;
130 131
131 vk::Buffer quad_array_lut; 132 std::shared_ptr<QuadArrayIndexBuffer> quad_array_index_buffer;
132 MemoryCommit quad_array_lut_commit; 133 std::shared_ptr<QuadStripIndexBuffer> quad_strip_index_buffer;
133 VkIndexType quad_array_lut_index_type{};
134 u32 current_num_indices = 0;
135 134
136 vk::Buffer null_buffer; 135 vk::Buffer null_buffer;
137 MemoryCommit null_buffer_commit; 136 MemoryCommit null_buffer_commit;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 2c00979d7..1a316b6eb 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -245,7 +245,7 @@ QuadIndexedPass::QuadIndexedPass(const Device& device_, Scheduler& scheduler_,
245 UpdateDescriptorQueue& update_descriptor_queue_) 245 UpdateDescriptorQueue& update_descriptor_queue_)
246 : ComputePass(device_, descriptor_pool_, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS, 246 : ComputePass(device_, descriptor_pool_, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS,
247 INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO, 247 INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO,
248 COMPUTE_PUSH_CONSTANT_RANGE<sizeof(u32) * 2>, VULKAN_QUAD_INDEXED_COMP_SPV), 248 COMPUTE_PUSH_CONSTANT_RANGE<sizeof(u32) * 3>, VULKAN_QUAD_INDEXED_COMP_SPV),
249 scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_}, 249 scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
250 update_descriptor_queue{update_descriptor_queue_} {} 250 update_descriptor_queue{update_descriptor_queue_} {}
251 251
@@ -253,7 +253,7 @@ QuadIndexedPass::~QuadIndexedPass() = default;
253 253
254std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble( 254std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
255 Tegra::Engines::Maxwell3D::Regs::IndexFormat index_format, u32 num_vertices, u32 base_vertex, 255 Tegra::Engines::Maxwell3D::Regs::IndexFormat index_format, u32 num_vertices, u32 base_vertex,
256 VkBuffer src_buffer, u32 src_offset) { 256 VkBuffer src_buffer, u32 src_offset, bool is_strip) {
257 const u32 index_shift = [index_format] { 257 const u32 index_shift = [index_format] {
258 switch (index_format) { 258 switch (index_format) {
259 case Tegra::Engines::Maxwell3D::Regs::IndexFormat::UnsignedByte: 259 case Tegra::Engines::Maxwell3D::Regs::IndexFormat::UnsignedByte:
@@ -267,7 +267,7 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
267 return 2; 267 return 2;
268 }(); 268 }();
269 const u32 input_size = num_vertices << index_shift; 269 const u32 input_size = num_vertices << index_shift;
270 const u32 num_tri_vertices = (num_vertices / 4) * 6; 270 const u32 num_tri_vertices = (is_strip ? (num_vertices - 2) / 2 : num_vertices / 4) * 6;
271 271
272 const std::size_t staging_size = num_tri_vertices * sizeof(u32); 272 const std::size_t staging_size = num_tri_vertices * sizeof(u32);
273 const auto staging = staging_buffer_pool.Request(staging_size, MemoryUsage::DeviceLocal); 273 const auto staging = staging_buffer_pool.Request(staging_size, MemoryUsage::DeviceLocal);
@@ -278,8 +278,8 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
278 const void* const descriptor_data{update_descriptor_queue.UpdateData()}; 278 const void* const descriptor_data{update_descriptor_queue.UpdateData()};
279 279
280 scheduler.RequestOutsideRenderPassOperationContext(); 280 scheduler.RequestOutsideRenderPassOperationContext();
281 scheduler.Record([this, descriptor_data, num_tri_vertices, base_vertex, 281 scheduler.Record([this, descriptor_data, num_tri_vertices, base_vertex, index_shift,
282 index_shift](vk::CommandBuffer cmdbuf) { 282 is_strip](vk::CommandBuffer cmdbuf) {
283 static constexpr u32 DISPATCH_SIZE = 1024; 283 static constexpr u32 DISPATCH_SIZE = 1024;
284 static constexpr VkMemoryBarrier WRITE_BARRIER{ 284 static constexpr VkMemoryBarrier WRITE_BARRIER{
285 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, 285 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
@@ -287,7 +287,7 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
287 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, 287 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
288 .dstAccessMask = VK_ACCESS_INDEX_READ_BIT, 288 .dstAccessMask = VK_ACCESS_INDEX_READ_BIT,
289 }; 289 };
290 const std::array<u32, 2> push_constants{base_vertex, index_shift}; 290 const std::array<u32, 3> push_constants{base_vertex, index_shift, is_strip ? 1u : 0u};
291 const VkDescriptorSet set = descriptor_allocator.Commit(); 291 const VkDescriptorSet set = descriptor_allocator.Commit();
292 device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data); 292 device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
293 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline); 293 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index 5d32e3caf..c4c8fa081 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -74,7 +74,7 @@ public:
74 74
75 std::pair<VkBuffer, VkDeviceSize> Assemble( 75 std::pair<VkBuffer, VkDeviceSize> Assemble(
76 Tegra::Engines::Maxwell3D::Regs::IndexFormat index_format, u32 num_vertices, 76 Tegra::Engines::Maxwell3D::Regs::IndexFormat index_format, u32 num_vertices,
77 u32 base_vertex, VkBuffer src_buffer, u32 src_offset); 77 u32 base_vertex, VkBuffer src_buffer, u32 src_offset, bool is_strip);
78 78
79private: 79private:
80 Scheduler& scheduler; 80 Scheduler& scheduler;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 4b7126c30..ac1eb9895 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -138,12 +138,16 @@ DrawParams MakeDrawParams(const MaxwellDrawState& draw_state, u32 num_instances,
138 .first_index = is_indexed ? draw_state.index_buffer.first : 0, 138 .first_index = is_indexed ? draw_state.index_buffer.first : 0,
139 .is_indexed = is_indexed, 139 .is_indexed = is_indexed,
140 }; 140 };
141 // 6 triangle vertices per quad, base vertex is part of the index
142 // See BindQuadIndexBuffer for more details
141 if (draw_state.topology == Maxwell::PrimitiveTopology::Quads) { 143 if (draw_state.topology == Maxwell::PrimitiveTopology::Quads) {
142 // 6 triangle vertices per quad, base vertex is part of the index
143 // See BindQuadArrayIndexBuffer for more details
144 params.num_vertices = (params.num_vertices / 4) * 6; 144 params.num_vertices = (params.num_vertices / 4) * 6;
145 params.base_vertex = 0; 145 params.base_vertex = 0;
146 params.is_indexed = true; 146 params.is_indexed = true;
147 } else if (draw_state.topology == Maxwell::PrimitiveTopology::QuadStrip) {
148 params.num_vertices = (params.num_vertices - 2) / 2 * 6;
149 params.base_vertex = 0;
150 params.is_indexed = true;
147 } 151 }
148 return params; 152 return params;
149} 153}
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index 2ea4f367b..3e51426c8 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -941,7 +941,6 @@ void Config::ReadValues() {
941 ReadRendererValues(); 941 ReadRendererValues();
942 ReadAudioValues(); 942 ReadAudioValues();
943 ReadSystemValues(); 943 ReadSystemValues();
944 ReadMultiplayerValues();
945} 944}
946 945
947void Config::SavePlayerValue(std::size_t player_index) { 946void Config::SavePlayerValue(std::size_t player_index) {
@@ -1099,7 +1098,6 @@ void Config::SaveValues() {
1099 SaveRendererValues(); 1098 SaveRendererValues();
1100 SaveAudioValues(); 1099 SaveAudioValues();
1101 SaveSystemValues(); 1100 SaveSystemValues();
1102 SaveMultiplayerValues();
1103} 1101}
1104 1102
1105void Config::SaveAudioValues() { 1103void Config::SaveAudioValues() {