summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/common/CMakeLists.txt2
-rw-r--r--src/common/make_unique_for_overwrite.h25
-rw-r--r--src/common/scratch_buffer.h95
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp69
-rw-r--r--src/core/hle/kernel/k_shared_memory.h8
-rw-r--r--src/core/hle/kernel/kernel.cpp43
-rw-r--r--src/core/hle/service/time/clock_types.h1
-rw-r--r--src/core/hle/service/time/time_sharedmemory.cpp17
-rw-r--r--src/core/hle/service/time/time_sharedmemory.h87
-rw-r--r--src/tests/CMakeLists.txt1
-rw-r--r--src/tests/common/scratch_buffer.cpp199
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h11
-rw-r--r--src/video_core/dma_pusher.cpp19
-rw-r--r--src/video_core/dma_pusher.h8
-rw-r--r--src/video_core/engines/engine_upload.cpp4
-rw-r--r--src/video_core/engines/engine_upload.h7
-rw-r--r--src/video_core/engines/maxwell_dma.cpp34
-rw-r--r--src/video_core/engines/maxwell_dma.h8
-rw-r--r--src/video_core/host1x/vic.cpp6
-rw-r--r--src/video_core/host1x/vic.h7
20 files changed, 497 insertions, 154 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 25b22a281..eb05e46a8 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -78,6 +78,7 @@ add_library(common STATIC
78 logging/types.h 78 logging/types.h
79 lz4_compression.cpp 79 lz4_compression.cpp
80 lz4_compression.h 80 lz4_compression.h
81 make_unique_for_overwrite.h
81 math_util.h 82 math_util.h
82 memory_detect.cpp 83 memory_detect.cpp
83 memory_detect.h 84 memory_detect.h
@@ -101,6 +102,7 @@ add_library(common STATIC
101 ${CMAKE_CURRENT_BINARY_DIR}/scm_rev.cpp 102 ${CMAKE_CURRENT_BINARY_DIR}/scm_rev.cpp
102 scm_rev.h 103 scm_rev.h
103 scope_exit.h 104 scope_exit.h
105 scratch_buffer.h
104 settings.cpp 106 settings.cpp
105 settings.h 107 settings.h
106 settings_input.cpp 108 settings_input.cpp
diff --git a/src/common/make_unique_for_overwrite.h b/src/common/make_unique_for_overwrite.h
new file mode 100644
index 000000000..c7413cf51
--- /dev/null
+++ b/src/common/make_unique_for_overwrite.h
@@ -0,0 +1,25 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <memory>
7#include <type_traits>
8
9namespace Common {
10
11template <class T>
12requires(!std::is_array_v<T>) std::unique_ptr<T> make_unique_for_overwrite() {
13 return std::unique_ptr<T>(new T);
14}
15
16template <class T>
17requires std::is_unbounded_array_v<T> std::unique_ptr<T> make_unique_for_overwrite(std::size_t n) {
18 return std::unique_ptr<T>(new std::remove_extent_t<T>[n]);
19}
20
21template <class T, class... Args>
22requires std::is_bounded_array_v<T>
23void make_unique_for_overwrite(Args&&...) = delete;
24
25} // namespace Common
diff --git a/src/common/scratch_buffer.h b/src/common/scratch_buffer.h
new file mode 100644
index 000000000..1245a5086
--- /dev/null
+++ b/src/common/scratch_buffer.h
@@ -0,0 +1,95 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/make_unique_for_overwrite.h"
7
8namespace Common {
9
10/**
11 * ScratchBuffer class
12 * This class creates a default initialized heap allocated buffer for cases such as intermediate
13 * buffers being copied into entirely, where value initializing members during allocation or resize
14 * is redundant.
15 */
16template <typename T>
17class ScratchBuffer {
18public:
19 ScratchBuffer() = default;
20
21 explicit ScratchBuffer(size_t initial_capacity)
22 : last_requested_size{initial_capacity}, buffer_capacity{initial_capacity},
23 buffer{Common::make_unique_for_overwrite<T[]>(initial_capacity)} {}
24
25 ~ScratchBuffer() = default;
26
27 /// This will only grow the buffer's capacity if size is greater than the current capacity.
28 /// The previously held data will remain intact.
29 void resize(size_t size) {
30 if (size > buffer_capacity) {
31 auto new_buffer = Common::make_unique_for_overwrite<T[]>(size);
32 std::move(buffer.get(), buffer.get() + buffer_capacity, new_buffer.get());
33 buffer = std::move(new_buffer);
34 buffer_capacity = size;
35 }
36 last_requested_size = size;
37 }
38
39 /// This will only grow the buffer's capacity if size is greater than the current capacity.
40 /// The previously held data will be destroyed if a reallocation occurs.
41 void resize_destructive(size_t size) {
42 if (size > buffer_capacity) {
43 buffer_capacity = size;
44 buffer = Common::make_unique_for_overwrite<T[]>(buffer_capacity);
45 }
46 last_requested_size = size;
47 }
48
49 [[nodiscard]] T* data() noexcept {
50 return buffer.get();
51 }
52
53 [[nodiscard]] const T* data() const noexcept {
54 return buffer.get();
55 }
56
57 [[nodiscard]] T* begin() noexcept {
58 return data();
59 }
60
61 [[nodiscard]] const T* begin() const noexcept {
62 return data();
63 }
64
65 [[nodiscard]] T* end() noexcept {
66 return data() + last_requested_size;
67 }
68
69 [[nodiscard]] const T* end() const noexcept {
70 return data() + last_requested_size;
71 }
72
73 [[nodiscard]] T& operator[](size_t i) {
74 return buffer[i];
75 }
76
77 [[nodiscard]] const T& operator[](size_t i) const {
78 return buffer[i];
79 }
80
81 [[nodiscard]] size_t size() const noexcept {
82 return last_requested_size;
83 }
84
85 [[nodiscard]] size_t capacity() const noexcept {
86 return buffer_capacity;
87 }
88
89private:
90 size_t last_requested_size{};
91 size_t buffer_capacity{};
92 std::unique_ptr<T[]> buffer{};
93};
94
95} // namespace Common
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 10cd4c43d..0aa68103c 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -6,6 +6,7 @@
6#include "core/hle/kernel/k_page_table.h" 6#include "core/hle/kernel/k_page_table.h"
7#include "core/hle/kernel/k_scoped_resource_reservation.h" 7#include "core/hle/kernel/k_scoped_resource_reservation.h"
8#include "core/hle/kernel/k_shared_memory.h" 8#include "core/hle/kernel/k_shared_memory.h"
9#include "core/hle/kernel/k_system_resource.h"
9#include "core/hle/kernel/kernel.h" 10#include "core/hle/kernel/kernel.h"
10#include "core/hle/kernel/svc_results.h" 11#include "core/hle/kernel/svc_results.h"
11 12
@@ -18,19 +19,19 @@ KSharedMemory::~KSharedMemory() {
18} 19}
19 20
20Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, 21Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
21 KPageGroup&& page_list_, Svc::MemoryPermission owner_permission_, 22 Svc::MemoryPermission owner_permission_,
22 Svc::MemoryPermission user_permission_, PAddr physical_address_, 23 Svc::MemoryPermission user_permission_, std::size_t size_,
23 std::size_t size_, std::string name_) { 24 std::string name_) {
24 // Set members. 25 // Set members.
25 owner_process = owner_process_; 26 owner_process = owner_process_;
26 device_memory = &device_memory_; 27 device_memory = &device_memory_;
27 page_list = std::move(page_list_);
28 owner_permission = owner_permission_; 28 owner_permission = owner_permission_;
29 user_permission = user_permission_; 29 user_permission = user_permission_;
30 physical_address = physical_address_; 30 size = Common::AlignUp(size_, PageSize);
31 size = size_;
32 name = std::move(name_); 31 name = std::move(name_);
33 32
33 const size_t num_pages = Common::DivideUp(size, PageSize);
34
34 // Get the resource limit. 35 // Get the resource limit.
35 KResourceLimit* reslimit = kernel.GetSystemResourceLimit(); 36 KResourceLimit* reslimit = kernel.GetSystemResourceLimit();
36 37
@@ -39,6 +40,17 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
39 size_); 40 size_);
40 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); 41 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
41 42
43 // Allocate the memory.
44
45 //! HACK: Open continuous mapping from sysmodule pool.
46 auto option = KMemoryManager::EncodeOption(KMemoryManager::Pool::Secure,
47 KMemoryManager::Direction::FromBack);
48 physical_address = kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option);
49 R_UNLESS(physical_address != 0, ResultOutOfMemory);
50
51 //! Insert the result into our page group.
52 page_group.emplace(physical_address, num_pages);
53
42 // Commit our reservation. 54 // Commit our reservation.
43 memory_reservation.Commit(); 55 memory_reservation.Commit();
44 56
@@ -50,12 +62,23 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
50 is_initialized = true; 62 is_initialized = true;
51 63
52 // Clear all pages in the memory. 64 // Clear all pages in the memory.
53 std::memset(device_memory_.GetPointer<void>(physical_address_), 0, size_); 65 for (const auto& block : page_group->Nodes()) {
66 std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize());
67 }
54 68
55 return ResultSuccess; 69 return ResultSuccess;
56} 70}
57 71
58void KSharedMemory::Finalize() { 72void KSharedMemory::Finalize() {
73 // Close and finalize the page group.
74 // page_group->Close();
75 // page_group->Finalize();
76
77 //! HACK: Manually close.
78 for (const auto& block : page_group->Nodes()) {
79 kernel.MemoryManager().Close(block.GetAddress(), block.GetNumPages());
80 }
81
59 // Release the memory reservation. 82 // Release the memory reservation.
60 resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); 83 resource_limit->Release(LimitableResource::PhysicalMemoryMax, size);
61 resource_limit->Close(); 84 resource_limit->Close();
@@ -65,32 +88,28 @@ void KSharedMemory::Finalize() {
65} 88}
66 89
67Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size, 90Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
68 Svc::MemoryPermission permissions) { 91 Svc::MemoryPermission map_perm) {
69 const u64 page_count{(map_size + PageSize - 1) / PageSize}; 92 // Validate the size.
70 93 R_UNLESS(size == map_size, ResultInvalidSize);
71 if (page_list.GetNumPages() != page_count) {
72 UNIMPLEMENTED_MSG("Page count does not match");
73 }
74 94
75 const Svc::MemoryPermission expected = 95 // Validate the permission.
96 const Svc::MemoryPermission test_perm =
76 &target_process == owner_process ? owner_permission : user_permission; 97 &target_process == owner_process ? owner_permission : user_permission;
77 98 if (test_perm == Svc::MemoryPermission::DontCare) {
78 if (permissions != expected) { 99 ASSERT(map_perm == Svc::MemoryPermission::Read || map_perm == Svc::MemoryPermission::Write);
79 UNIMPLEMENTED_MSG("Permission does not match"); 100 } else {
101 R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission);
80 } 102 }
81 103
82 return target_process.PageTable().MapPages(address, page_list, KMemoryState::Shared, 104 return target_process.PageTable().MapPages(address, *page_group, KMemoryState::Shared,
83 ConvertToKMemoryPermission(permissions)); 105 ConvertToKMemoryPermission(map_perm));
84} 106}
85 107
86Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) { 108Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
87 const u64 page_count{(unmap_size + PageSize - 1) / PageSize}; 109 // Validate the size.
88 110 R_UNLESS(size == unmap_size, ResultInvalidSize);
89 if (page_list.GetNumPages() != page_count) {
90 UNIMPLEMENTED_MSG("Page count does not match");
91 }
92 111
93 return target_process.PageTable().UnmapPages(address, page_list, KMemoryState::Shared); 112 return target_process.PageTable().UnmapPages(address, *page_group, KMemoryState::Shared);
94} 113}
95 114
96} // namespace Kernel 115} // namespace Kernel
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h
index a96c55a3e..8b29f0b4a 100644
--- a/src/core/hle/kernel/k_shared_memory.h
+++ b/src/core/hle/kernel/k_shared_memory.h
@@ -3,6 +3,7 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <optional>
6#include <string> 7#include <string>
7 8
8#include "common/common_types.h" 9#include "common/common_types.h"
@@ -26,9 +27,8 @@ public:
26 ~KSharedMemory() override; 27 ~KSharedMemory() override;
27 28
28 Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, 29 Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
29 KPageGroup&& page_list_, Svc::MemoryPermission owner_permission_, 30 Svc::MemoryPermission owner_permission_,
30 Svc::MemoryPermission user_permission_, PAddr physical_address_, 31 Svc::MemoryPermission user_permission_, std::size_t size_, std::string name_);
31 std::size_t size_, std::string name_);
32 32
33 /** 33 /**
34 * Maps a shared memory block to an address in the target process' address space 34 * Maps a shared memory block to an address in the target process' address space
@@ -76,7 +76,7 @@ public:
76private: 76private:
77 Core::DeviceMemory* device_memory{}; 77 Core::DeviceMemory* device_memory{};
78 KProcess* owner_process{}; 78 KProcess* owner_process{};
79 KPageGroup page_list; 79 std::optional<KPageGroup> page_group{};
80 Svc::MemoryPermission owner_permission{}; 80 Svc::MemoryPermission owner_permission{};
81 Svc::MemoryPermission user_permission{}; 81 Svc::MemoryPermission user_permission{};
82 PAddr physical_address{}; 82 PAddr physical_address{};
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index b75bac5df..1fb25f221 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -94,6 +94,7 @@ struct KernelCore::Impl {
94 pt_heap_region.GetSize()); 94 pt_heap_region.GetSize());
95 } 95 }
96 96
97 InitializeHackSharedMemory();
97 RegisterHostThread(nullptr); 98 RegisterHostThread(nullptr);
98 99
99 default_service_thread = &CreateServiceThread(kernel, "DefaultServiceThread"); 100 default_service_thread = &CreateServiceThread(kernel, "DefaultServiceThread");
@@ -726,14 +727,14 @@ struct KernelCore::Impl {
726 } 727 }
727 728
728 void InitializeMemoryLayout() { 729 void InitializeMemoryLayout() {
729 const auto system_pool = memory_layout->GetKernelSystemPoolRegionPhysicalExtents();
730
731 // Initialize the memory manager. 730 // Initialize the memory manager.
732 memory_manager = std::make_unique<KMemoryManager>(system); 731 memory_manager = std::make_unique<KMemoryManager>(system);
733 const auto& management_region = memory_layout->GetPoolManagementRegion(); 732 const auto& management_region = memory_layout->GetPoolManagementRegion();
734 ASSERT(management_region.GetEndAddress() != 0); 733 ASSERT(management_region.GetEndAddress() != 0);
735 memory_manager->Initialize(management_region.GetAddress(), management_region.GetSize()); 734 memory_manager->Initialize(management_region.GetAddress(), management_region.GetSize());
735 }
736 736
737 void InitializeHackSharedMemory() {
737 // Setup memory regions for emulated processes 738 // Setup memory regions for emulated processes
738 // TODO(bunnei): These should not be hardcoded regions initialized within the kernel 739 // TODO(bunnei): These should not be hardcoded regions initialized within the kernel
739 constexpr std::size_t hid_size{0x40000}; 740 constexpr std::size_t hid_size{0x40000};
@@ -742,39 +743,23 @@ struct KernelCore::Impl {
742 constexpr std::size_t time_size{0x1000}; 743 constexpr std::size_t time_size{0x1000};
743 constexpr std::size_t hidbus_size{0x1000}; 744 constexpr std::size_t hidbus_size{0x1000};
744 745
745 const PAddr hid_phys_addr{system_pool.GetAddress()};
746 const PAddr font_phys_addr{system_pool.GetAddress() + hid_size};
747 const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size};
748 const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size};
749 const PAddr hidbus_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size +
750 time_size};
751
752 hid_shared_mem = KSharedMemory::Create(system.Kernel()); 746 hid_shared_mem = KSharedMemory::Create(system.Kernel());
753 font_shared_mem = KSharedMemory::Create(system.Kernel()); 747 font_shared_mem = KSharedMemory::Create(system.Kernel());
754 irs_shared_mem = KSharedMemory::Create(system.Kernel()); 748 irs_shared_mem = KSharedMemory::Create(system.Kernel());
755 time_shared_mem = KSharedMemory::Create(system.Kernel()); 749 time_shared_mem = KSharedMemory::Create(system.Kernel());
756 hidbus_shared_mem = KSharedMemory::Create(system.Kernel()); 750 hidbus_shared_mem = KSharedMemory::Create(system.Kernel());
757 751
758 hid_shared_mem->Initialize(system.DeviceMemory(), nullptr, 752 hid_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
759 {hid_phys_addr, hid_size / PageSize}, 753 Svc::MemoryPermission::Read, hid_size, "HID:SharedMemory");
760 Svc::MemoryPermission::None, Svc::MemoryPermission::Read, 754 font_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
761 hid_phys_addr, hid_size, "HID:SharedMemory"); 755 Svc::MemoryPermission::Read, font_size, "Font:SharedMemory");
762 font_shared_mem->Initialize(system.DeviceMemory(), nullptr, 756 irs_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
763 {font_phys_addr, font_size / PageSize}, 757 Svc::MemoryPermission::Read, irs_size, "IRS:SharedMemory");
764 Svc::MemoryPermission::None, Svc::MemoryPermission::Read, 758 time_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
765 font_phys_addr, font_size, "Font:SharedMemory"); 759 Svc::MemoryPermission::Read, time_size, "Time:SharedMemory");
766 irs_shared_mem->Initialize(system.DeviceMemory(), nullptr, 760 hidbus_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
767 {irs_phys_addr, irs_size / PageSize}, 761 Svc::MemoryPermission::Read, hidbus_size,
768 Svc::MemoryPermission::None, Svc::MemoryPermission::Read, 762 "HidBus:SharedMemory");
769 irs_phys_addr, irs_size, "IRS:SharedMemory");
770 time_shared_mem->Initialize(system.DeviceMemory(), nullptr,
771 {time_phys_addr, time_size / PageSize},
772 Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
773 time_phys_addr, time_size, "Time:SharedMemory");
774 hidbus_shared_mem->Initialize(system.DeviceMemory(), nullptr,
775 {hidbus_phys_addr, hidbus_size / PageSize},
776 Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
777 hidbus_phys_addr, hidbus_size, "HidBus:SharedMemory");
778 } 763 }
779 764
780 KClientPort* CreateNamedServicePort(std::string name) { 765 KClientPort* CreateNamedServicePort(std::string name) {
diff --git a/src/core/hle/service/time/clock_types.h b/src/core/hle/service/time/clock_types.h
index ef070f32f..ed1eb5b2d 100644
--- a/src/core/hle/service/time/clock_types.h
+++ b/src/core/hle/service/time/clock_types.h
@@ -49,6 +49,7 @@ struct SteadyClockContext {
49static_assert(sizeof(SteadyClockContext) == 0x18, "SteadyClockContext is incorrect size"); 49static_assert(sizeof(SteadyClockContext) == 0x18, "SteadyClockContext is incorrect size");
50static_assert(std::is_trivially_copyable_v<SteadyClockContext>, 50static_assert(std::is_trivially_copyable_v<SteadyClockContext>,
51 "SteadyClockContext must be trivially copyable"); 51 "SteadyClockContext must be trivially copyable");
52using StandardSteadyClockTimePointType = SteadyClockContext;
52 53
53struct SystemClockContext { 54struct SystemClockContext {
54 s64 offset; 55 s64 offset;
diff --git a/src/core/hle/service/time/time_sharedmemory.cpp b/src/core/hle/service/time/time_sharedmemory.cpp
index a3aa0e77f..ff53a7d6f 100644
--- a/src/core/hle/service/time/time_sharedmemory.cpp
+++ b/src/core/hle/service/time/time_sharedmemory.cpp
@@ -26,23 +26,24 @@ void SharedMemory::SetupStandardSteadyClock(const Common::UUID& clock_source_id,
26 const Clock::SteadyClockContext context{ 26 const Clock::SteadyClockContext context{
27 static_cast<u64>(current_time_point.nanoseconds - ticks_time_span.nanoseconds), 27 static_cast<u64>(current_time_point.nanoseconds - ticks_time_span.nanoseconds),
28 clock_source_id}; 28 clock_source_id};
29 shared_memory_format.standard_steady_clock_timepoint.StoreData( 29 StoreToLockFreeAtomicType(&GetFormat()->standard_steady_clock_timepoint, context);
30 system.Kernel().GetTimeSharedMem().GetPointer(), context);
31} 30}
32 31
33void SharedMemory::UpdateLocalSystemClockContext(const Clock::SystemClockContext& context) { 32void SharedMemory::UpdateLocalSystemClockContext(const Clock::SystemClockContext& context) {
34 shared_memory_format.standard_local_system_clock_context.StoreData( 33 StoreToLockFreeAtomicType(&GetFormat()->standard_local_system_clock_context, context);
35 system.Kernel().GetTimeSharedMem().GetPointer(), context);
36} 34}
37 35
38void SharedMemory::UpdateNetworkSystemClockContext(const Clock::SystemClockContext& context) { 36void SharedMemory::UpdateNetworkSystemClockContext(const Clock::SystemClockContext& context) {
39 shared_memory_format.standard_network_system_clock_context.StoreData( 37 StoreToLockFreeAtomicType(&GetFormat()->standard_network_system_clock_context, context);
40 system.Kernel().GetTimeSharedMem().GetPointer(), context);
41} 38}
42 39
43void SharedMemory::SetAutomaticCorrectionEnabled(bool is_enabled) { 40void SharedMemory::SetAutomaticCorrectionEnabled(bool is_enabled) {
44 shared_memory_format.standard_user_system_clock_automatic_correction.StoreData( 41 StoreToLockFreeAtomicType(
45 system.Kernel().GetTimeSharedMem().GetPointer(), is_enabled); 42 &GetFormat()->is_standard_user_system_clock_automatic_correction_enabled, is_enabled);
43}
44
45SharedMemory::Format* SharedMemory::GetFormat() {
46 return reinterpret_cast<SharedMemory::Format*>(system.Kernel().GetTimeSharedMem().GetPointer());
46} 47}
47 48
48} // namespace Service::Time 49} // namespace Service::Time
diff --git a/src/core/hle/service/time/time_sharedmemory.h b/src/core/hle/service/time/time_sharedmemory.h
index 561685acd..044a4d24e 100644
--- a/src/core/hle/service/time/time_sharedmemory.h
+++ b/src/core/hle/service/time/time_sharedmemory.h
@@ -10,45 +10,68 @@
10 10
11namespace Service::Time { 11namespace Service::Time {
12 12
13// Note: this type is not safe for concurrent writes.
14template <typename T>
15struct LockFreeAtomicType {
16 u32 counter_;
17 std::array<T, 2> value_;
18};
19
20template <typename T>
21static inline void StoreToLockFreeAtomicType(LockFreeAtomicType<T>* p, const T& value) {
22 // Get the current counter.
23 auto counter = p->counter_;
24
25 // Increment the counter.
26 ++counter;
27
28 // Store the updated value.
29 p->value_[counter % 2] = value;
30
31 // Fence memory.
32 std::atomic_thread_fence(std::memory_order_release);
33
34 // Set the updated counter.
35 p->counter_ = counter;
36}
37
38template <typename T>
39static inline T LoadFromLockFreeAtomicType(const LockFreeAtomicType<T>* p) {
40 while (true) {
41 // Get the counter.
42 auto counter = p->counter_;
43
44 // Get the value.
45 auto value = p->value_[counter % 2];
46
47 // Fence memory.
48 std::atomic_thread_fence(std::memory_order_acquire);
49
50 // Check that the counter matches.
51 if (counter == p->counter_) {
52 return value;
53 }
54 }
55}
56
13class SharedMemory final { 57class SharedMemory final {
14public: 58public:
15 explicit SharedMemory(Core::System& system_); 59 explicit SharedMemory(Core::System& system_);
16 ~SharedMemory(); 60 ~SharedMemory();
17 61
18 // TODO(ogniK): We have to properly simulate memory barriers, how are we going to do this?
19 template <typename T, std::size_t Offset>
20 struct MemoryBarrier {
21 static_assert(std::is_trivially_copyable_v<T>, "T must be trivially copyable");
22 u32_le read_attempt{};
23 std::array<T, 2> data{};
24
25 // These are not actually memory barriers at the moment as we don't have multicore and all
26 // HLE is mutexed. This will need to properly be implemented when we start updating the time
27 // points on threads. As of right now, we'll be updated both values synchronously and just
28 // incrementing the read_attempt to indicate that we waited.
29 void StoreData(u8* shared_memory, T data_to_store) {
30 std::memcpy(this, shared_memory + Offset, sizeof(*this));
31 read_attempt++;
32 data[read_attempt & 1] = data_to_store;
33 std::memcpy(shared_memory + Offset, this, sizeof(*this));
34 }
35
36 // For reading we're just going to read the last stored value. If there was no value stored
37 // it will just end up reading an empty value as intended.
38 T ReadData(u8* shared_memory) {
39 std::memcpy(this, shared_memory + Offset, sizeof(*this));
40 return data[(read_attempt - 1) & 1];
41 }
42 };
43
44 // Shared memory format 62 // Shared memory format
45 struct Format { 63 struct Format {
46 MemoryBarrier<Clock::SteadyClockContext, 0x0> standard_steady_clock_timepoint; 64 LockFreeAtomicType<Clock::StandardSteadyClockTimePointType> standard_steady_clock_timepoint;
47 MemoryBarrier<Clock::SystemClockContext, 0x38> standard_local_system_clock_context; 65 LockFreeAtomicType<Clock::SystemClockContext> standard_local_system_clock_context;
48 MemoryBarrier<Clock::SystemClockContext, 0x80> standard_network_system_clock_context; 66 LockFreeAtomicType<Clock::SystemClockContext> standard_network_system_clock_context;
49 MemoryBarrier<bool, 0xc8> standard_user_system_clock_automatic_correction; 67 LockFreeAtomicType<bool> is_standard_user_system_clock_automatic_correction_enabled;
50 u32_le format_version; 68 u32 format_version;
51 }; 69 };
70 static_assert(offsetof(Format, standard_steady_clock_timepoint) == 0x0);
71 static_assert(offsetof(Format, standard_local_system_clock_context) == 0x38);
72 static_assert(offsetof(Format, standard_network_system_clock_context) == 0x80);
73 static_assert(offsetof(Format, is_standard_user_system_clock_automatic_correction_enabled) ==
74 0xc8);
52 static_assert(sizeof(Format) == 0xd8, "Format is an invalid size"); 75 static_assert(sizeof(Format) == 0xd8, "Format is an invalid size");
53 76
54 void SetupStandardSteadyClock(const Common::UUID& clock_source_id, 77 void SetupStandardSteadyClock(const Common::UUID& clock_source_id,
@@ -56,10 +79,10 @@ public:
56 void UpdateLocalSystemClockContext(const Clock::SystemClockContext& context); 79 void UpdateLocalSystemClockContext(const Clock::SystemClockContext& context);
57 void UpdateNetworkSystemClockContext(const Clock::SystemClockContext& context); 80 void UpdateNetworkSystemClockContext(const Clock::SystemClockContext& context);
58 void SetAutomaticCorrectionEnabled(bool is_enabled); 81 void SetAutomaticCorrectionEnabled(bool is_enabled);
82 Format* GetFormat();
59 83
60private: 84private:
61 Core::System& system; 85 Core::System& system;
62 Format shared_memory_format{};
63}; 86};
64 87
65} // namespace Service::Time 88} // namespace Service::Time
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index 348d1edf4..6a4022e45 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -8,6 +8,7 @@ add_executable(tests
8 common/host_memory.cpp 8 common/host_memory.cpp
9 common/param_package.cpp 9 common/param_package.cpp
10 common/ring_buffer.cpp 10 common/ring_buffer.cpp
11 common/scratch_buffer.cpp
11 common/unique_function.cpp 12 common/unique_function.cpp
12 core/core_timing.cpp 13 core/core_timing.cpp
13 core/internal_network/network.cpp 14 core/internal_network/network.cpp
diff --git a/src/tests/common/scratch_buffer.cpp b/src/tests/common/scratch_buffer.cpp
new file mode 100644
index 000000000..b602c8d0a
--- /dev/null
+++ b/src/tests/common/scratch_buffer.cpp
@@ -0,0 +1,199 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include <algorithm>
5#include <array>
6#include <span>
7#include <catch2/catch.hpp>
8#include "common/common_types.h"
9#include "common/scratch_buffer.h"
10
11namespace Common {
12
13TEST_CASE("ScratchBuffer: Basic Test", "[common]") {
14 ScratchBuffer<u8> buf;
15
16 REQUIRE(buf.size() == 0U);
17 REQUIRE(buf.capacity() == 0U);
18
19 std::array<u8, 10> payload;
20 payload.fill(66);
21
22 buf.resize(payload.size());
23 REQUIRE(buf.size() == payload.size());
24 REQUIRE(buf.capacity() == payload.size());
25
26 std::memcpy(buf.data(), payload.data(), payload.size());
27 for (size_t i = 0; i < payload.size(); ++i) {
28 REQUIRE(buf[i] == payload[i]);
29 }
30}
31
32TEST_CASE("ScratchBuffer: resize_destructive Grow", "[common]") {
33 std::array<u8, 10> payload;
34 payload.fill(66);
35
36 ScratchBuffer<u8> buf(payload.size());
37 REQUIRE(buf.size() == payload.size());
38 REQUIRE(buf.capacity() == payload.size());
39
40 // Increasing the size should reallocate the buffer
41 buf.resize_destructive(payload.size() * 2);
42 REQUIRE(buf.size() == payload.size() * 2);
43 REQUIRE(buf.capacity() == payload.size() * 2);
44
45 // Since the buffer is not value initialized, reading its data will be garbage
46}
47
48TEST_CASE("ScratchBuffer: resize_destructive Shrink", "[common]") {
49 std::array<u8, 10> payload;
50 payload.fill(66);
51
52 ScratchBuffer<u8> buf(payload.size());
53 REQUIRE(buf.size() == payload.size());
54 REQUIRE(buf.capacity() == payload.size());
55
56 std::memcpy(buf.data(), payload.data(), payload.size());
57 for (size_t i = 0; i < payload.size(); ++i) {
58 REQUIRE(buf[i] == payload[i]);
59 }
60
61 // Decreasing the size should not cause a buffer reallocation
62 // This can be tested by ensuring the buffer capacity and data has not changed,
63 buf.resize_destructive(1U);
64 REQUIRE(buf.size() == 1U);
65 REQUIRE(buf.capacity() == payload.size());
66
67 for (size_t i = 0; i < payload.size(); ++i) {
68 REQUIRE(buf[i] == payload[i]);
69 }
70}
71
72TEST_CASE("ScratchBuffer: resize Grow u8", "[common]") {
73 std::array<u8, 10> payload;
74 payload.fill(66);
75
76 ScratchBuffer<u8> buf(payload.size());
77 REQUIRE(buf.size() == payload.size());
78 REQUIRE(buf.capacity() == payload.size());
79
80 std::memcpy(buf.data(), payload.data(), payload.size());
81 for (size_t i = 0; i < payload.size(); ++i) {
82 REQUIRE(buf[i] == payload[i]);
83 }
84
85 // Increasing the size should reallocate the buffer
86 buf.resize(payload.size() * 2);
87 REQUIRE(buf.size() == payload.size() * 2);
88 REQUIRE(buf.capacity() == payload.size() * 2);
89
90 // resize() keeps the previous data intact
91 for (size_t i = 0; i < payload.size(); ++i) {
92 REQUIRE(buf[i] == payload[i]);
93 }
94}
95
96TEST_CASE("ScratchBuffer: resize Grow u64", "[common]") {
97 std::array<u64, 10> payload;
98 payload.fill(6666);
99
100 ScratchBuffer<u64> buf(payload.size());
101 REQUIRE(buf.size() == payload.size());
102 REQUIRE(buf.capacity() == payload.size());
103
104 std::memcpy(buf.data(), payload.data(), payload.size() * sizeof(u64));
105 for (size_t i = 0; i < payload.size(); ++i) {
106 REQUIRE(buf[i] == payload[i]);
107 }
108
109 // Increasing the size should reallocate the buffer
110 buf.resize(payload.size() * 2);
111 REQUIRE(buf.size() == payload.size() * 2);
112 REQUIRE(buf.capacity() == payload.size() * 2);
113
114 // resize() keeps the previous data intact
115 for (size_t i = 0; i < payload.size(); ++i) {
116 REQUIRE(buf[i] == payload[i]);
117 }
118}
119
120TEST_CASE("ScratchBuffer: resize Shrink", "[common]") {
121 std::array<u8, 10> payload;
122 payload.fill(66);
123
124 ScratchBuffer<u8> buf(payload.size());
125 REQUIRE(buf.size() == payload.size());
126 REQUIRE(buf.capacity() == payload.size());
127
128 std::memcpy(buf.data(), payload.data(), payload.size());
129 for (size_t i = 0; i < payload.size(); ++i) {
130 REQUIRE(buf[i] == payload[i]);
131 }
132
133 // Decreasing the size should not cause a buffer reallocation
134 // This can be tested by ensuring the buffer capacity and data has not changed,
135 buf.resize(1U);
136 REQUIRE(buf.size() == 1U);
137 REQUIRE(buf.capacity() == payload.size());
138
139 for (size_t i = 0; i < payload.size(); ++i) {
140 REQUIRE(buf[i] == payload[i]);
141 }
142}
143
144TEST_CASE("ScratchBuffer: Span Size", "[common]") {
145 std::array<u8, 10> payload;
146 payload.fill(66);
147
148 ScratchBuffer<u8> buf(payload.size());
149 REQUIRE(buf.size() == payload.size());
150 REQUIRE(buf.capacity() == payload.size());
151
152 std::memcpy(buf.data(), payload.data(), payload.size());
153 for (size_t i = 0; i < payload.size(); ++i) {
154 REQUIRE(buf[i] == payload[i]);
155 }
156
157 buf.resize(3U);
158 REQUIRE(buf.size() == 3U);
159 REQUIRE(buf.capacity() == payload.size());
160
161 const auto buf_span = std::span<u8>(buf);
162 // The span size is the last requested size of the buffer, not its capacity
163 REQUIRE(buf_span.size() == buf.size());
164
165 for (size_t i = 0; i < buf_span.size(); ++i) {
166 REQUIRE(buf_span[i] == buf[i]);
167 REQUIRE(buf_span[i] == payload[i]);
168 }
169}
170
171TEST_CASE("ScratchBuffer: Span Writes", "[common]") {
172 std::array<u8, 10> payload;
173 payload.fill(66);
174
175 ScratchBuffer<u8> buf(payload.size());
176 REQUIRE(buf.size() == payload.size());
177 REQUIRE(buf.capacity() == payload.size());
178
179 std::memcpy(buf.data(), payload.data(), payload.size());
180 for (size_t i = 0; i < payload.size(); ++i) {
181 REQUIRE(buf[i] == payload[i]);
182 }
183
184 buf.resize(3U);
185 REQUIRE(buf.size() == 3U);
186 REQUIRE(buf.capacity() == payload.size());
187
188 const auto buf_span = std::span<u8>(buf);
189 REQUIRE(buf_span.size() == buf.size());
190
191 for (size_t i = 0; i < buf_span.size(); ++i) {
192 const auto new_value = static_cast<u8>(i + 1U);
193 // Writes to a span of the scratch buffer will propogate to the buffer itself
194 buf_span[i] = new_value;
195 REQUIRE(buf[i] == new_value);
196 }
197}
198
199} // namespace Common
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 502b4d90a..6c8d98946 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -20,6 +20,7 @@
20#include "common/lru_cache.h" 20#include "common/lru_cache.h"
21#include "common/microprofile.h" 21#include "common/microprofile.h"
22#include "common/polyfill_ranges.h" 22#include "common/polyfill_ranges.h"
23#include "common/scratch_buffer.h"
23#include "common/settings.h" 24#include "common/settings.h"
24#include "core/memory.h" 25#include "core/memory.h"
25#include "video_core/buffer_cache/buffer_base.h" 26#include "video_core/buffer_cache/buffer_base.h"
@@ -422,8 +423,7 @@ private:
422 IntervalSet common_ranges; 423 IntervalSet common_ranges;
423 std::deque<IntervalSet> committed_ranges; 424 std::deque<IntervalSet> committed_ranges;
424 425
425 size_t immediate_buffer_capacity = 0; 426 Common::ScratchBuffer<u8> immediate_buffer_alloc;
426 std::unique_ptr<u8[]> immediate_buffer_alloc;
427 427
428 struct LRUItemParams { 428 struct LRUItemParams {
429 using ObjectType = BufferId; 429 using ObjectType = BufferId;
@@ -1926,11 +1926,8 @@ std::span<const u8> BufferCache<P>::ImmediateBufferWithData(VAddr cpu_addr, size
1926 1926
1927template <class P> 1927template <class P>
1928std::span<u8> BufferCache<P>::ImmediateBuffer(size_t wanted_capacity) { 1928std::span<u8> BufferCache<P>::ImmediateBuffer(size_t wanted_capacity) {
1929 if (wanted_capacity > immediate_buffer_capacity) { 1929 immediate_buffer_alloc.resize_destructive(wanted_capacity);
1930 immediate_buffer_capacity = wanted_capacity; 1930 return std::span<u8>(immediate_buffer_alloc.data(), wanted_capacity);
1931 immediate_buffer_alloc = std::make_unique<u8[]>(wanted_capacity);
1932 }
1933 return std::span<u8>(immediate_buffer_alloc.get(), wanted_capacity);
1934} 1931}
1935 1932
1936template <class P> 1933template <class P>
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index 9835e3ac1..322de2606 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -56,7 +56,7 @@ bool DmaPusher::Step() {
56 56
57 if (command_list.prefetch_command_list.size()) { 57 if (command_list.prefetch_command_list.size()) {
58 // Prefetched command list from nvdrv, used for things like synchronization 58 // Prefetched command list from nvdrv, used for things like synchronization
59 command_headers = std::move(command_list.prefetch_command_list); 59 ProcessCommands(command_list.prefetch_command_list);
60 dma_pushbuffer.pop(); 60 dma_pushbuffer.pop();
61 } else { 61 } else {
62 const CommandListHeader command_list_header{ 62 const CommandListHeader command_list_header{
@@ -74,7 +74,7 @@ bool DmaPusher::Step() {
74 } 74 }
75 75
76 // Push buffer non-empty, read a word 76 // Push buffer non-empty, read a word
77 command_headers.resize(command_list_header.size); 77 command_headers.resize_destructive(command_list_header.size);
78 if (Settings::IsGPULevelHigh()) { 78 if (Settings::IsGPULevelHigh()) {
79 memory_manager.ReadBlock(dma_get, command_headers.data(), 79 memory_manager.ReadBlock(dma_get, command_headers.data(),
80 command_list_header.size * sizeof(u32)); 80 command_list_header.size * sizeof(u32));
@@ -82,16 +82,21 @@ bool DmaPusher::Step() {
82 memory_manager.ReadBlockUnsafe(dma_get, command_headers.data(), 82 memory_manager.ReadBlockUnsafe(dma_get, command_headers.data(),
83 command_list_header.size * sizeof(u32)); 83 command_list_header.size * sizeof(u32));
84 } 84 }
85 ProcessCommands(command_headers);
85 } 86 }
86 for (std::size_t index = 0; index < command_headers.size();) { 87
87 const CommandHeader& command_header = command_headers[index]; 88 return true;
89}
90
91void DmaPusher::ProcessCommands(std::span<const CommandHeader> commands) {
92 for (std::size_t index = 0; index < commands.size();) {
93 const CommandHeader& command_header = commands[index];
88 94
89 if (dma_state.method_count) { 95 if (dma_state.method_count) {
90 // Data word of methods command 96 // Data word of methods command
91 if (dma_state.non_incrementing) { 97 if (dma_state.non_incrementing) {
92 const u32 max_write = static_cast<u32>( 98 const u32 max_write = static_cast<u32>(
93 std::min<std::size_t>(index + dma_state.method_count, command_headers.size()) - 99 std::min<std::size_t>(index + dma_state.method_count, commands.size()) - index);
94 index);
95 CallMultiMethod(&command_header.argument, max_write); 100 CallMultiMethod(&command_header.argument, max_write);
96 dma_state.method_count -= max_write; 101 dma_state.method_count -= max_write;
97 dma_state.is_last_call = true; 102 dma_state.is_last_call = true;
@@ -142,8 +147,6 @@ bool DmaPusher::Step() {
142 } 147 }
143 index++; 148 index++;
144 } 149 }
145
146 return true;
147} 150}
148 151
149void DmaPusher::SetState(const CommandHeader& command_header) { 152void DmaPusher::SetState(const CommandHeader& command_header) {
diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h
index 938f0f11c..6f00de937 100644
--- a/src/video_core/dma_pusher.h
+++ b/src/video_core/dma_pusher.h
@@ -4,11 +4,13 @@
4#pragma once 4#pragma once
5 5
6#include <array> 6#include <array>
7#include <span>
7#include <vector> 8#include <vector>
8#include <queue> 9#include <queue>
9 10
10#include "common/bit_field.h" 11#include "common/bit_field.h"
11#include "common/common_types.h" 12#include "common/common_types.h"
13#include "common/scratch_buffer.h"
12#include "video_core/engines/engine_interface.h" 14#include "video_core/engines/engine_interface.h"
13#include "video_core/engines/puller.h" 15#include "video_core/engines/puller.h"
14 16
@@ -136,13 +138,15 @@ private:
136 static constexpr u32 non_puller_methods = 0x40; 138 static constexpr u32 non_puller_methods = 0x40;
137 static constexpr u32 max_subchannels = 8; 139 static constexpr u32 max_subchannels = 8;
138 bool Step(); 140 bool Step();
141 void ProcessCommands(std::span<const CommandHeader> commands);
139 142
140 void SetState(const CommandHeader& command_header); 143 void SetState(const CommandHeader& command_header);
141 144
142 void CallMethod(u32 argument) const; 145 void CallMethod(u32 argument) const;
143 void CallMultiMethod(const u32* base_start, u32 num_methods) const; 146 void CallMultiMethod(const u32* base_start, u32 num_methods) const;
144 147
145 std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once 148 Common::ScratchBuffer<CommandHeader>
149 command_headers; ///< Buffer for list of commands fetched at once
146 150
147 std::queue<CommandList> dma_pushbuffer; ///< Queue of command lists to be processed 151 std::queue<CommandList> dma_pushbuffer; ///< Queue of command lists to be processed
148 std::size_t dma_pushbuffer_subindex{}; ///< Index within a command list within the pushbuffer 152 std::size_t dma_pushbuffer_subindex{}; ///< Index within a command list within the pushbuffer
@@ -159,7 +163,7 @@ private:
159 DmaState dma_state{}; 163 DmaState dma_state{};
160 bool dma_increment_once{}; 164 bool dma_increment_once{};
161 165
162 bool ib_enable{true}; ///< IB mode enabled 166 const bool ib_enable{true}; ///< IB mode enabled
163 167
164 std::array<Engines::EngineInterface*, max_subchannels> subchannels{}; 168 std::array<Engines::EngineInterface*, max_subchannels> subchannels{};
165 169
diff --git a/src/video_core/engines/engine_upload.cpp b/src/video_core/engines/engine_upload.cpp
index e4f8331ab..cea1dd8b0 100644
--- a/src/video_core/engines/engine_upload.cpp
+++ b/src/video_core/engines/engine_upload.cpp
@@ -24,7 +24,7 @@ void State::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
24void State::ProcessExec(const bool is_linear_) { 24void State::ProcessExec(const bool is_linear_) {
25 write_offset = 0; 25 write_offset = 0;
26 copy_size = regs.line_length_in * regs.line_count; 26 copy_size = regs.line_length_in * regs.line_count;
27 inner_buffer.resize(copy_size); 27 inner_buffer.resize_destructive(copy_size);
28 is_linear = is_linear_; 28 is_linear = is_linear_;
29} 29}
30 30
@@ -70,7 +70,7 @@ void State::ProcessData(std::span<const u8> read_buffer) {
70 const std::size_t dst_size = Tegra::Texture::CalculateSize( 70 const std::size_t dst_size = Tegra::Texture::CalculateSize(
71 true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth, 71 true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth,
72 regs.dest.BlockHeight(), regs.dest.BlockDepth()); 72 regs.dest.BlockHeight(), regs.dest.BlockDepth());
73 tmp_buffer.resize(dst_size); 73 tmp_buffer.resize_destructive(dst_size);
74 memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size); 74 memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size);
75 Tegra::Texture::SwizzleSubrect(tmp_buffer, read_buffer, bytes_per_pixel, width, 75 Tegra::Texture::SwizzleSubrect(tmp_buffer, read_buffer, bytes_per_pixel, width,
76 regs.dest.height, regs.dest.depth, x_offset, regs.dest.y, 76 regs.dest.height, regs.dest.depth, x_offset, regs.dest.y,
diff --git a/src/video_core/engines/engine_upload.h b/src/video_core/engines/engine_upload.h
index 94fafd9dc..7242d2529 100644
--- a/src/video_core/engines/engine_upload.h
+++ b/src/video_core/engines/engine_upload.h
@@ -4,9 +4,10 @@
4#pragma once 4#pragma once
5 5
6#include <span> 6#include <span>
7#include <vector> 7
8#include "common/bit_field.h" 8#include "common/bit_field.h"
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "common/scratch_buffer.h"
10 11
11namespace Tegra { 12namespace Tegra {
12class MemoryManager; 13class MemoryManager;
@@ -73,8 +74,8 @@ private:
73 74
74 u32 write_offset = 0; 75 u32 write_offset = 0;
75 u32 copy_size = 0; 76 u32 copy_size = 0;
76 std::vector<u8> inner_buffer; 77 Common::ScratchBuffer<u8> inner_buffer;
77 std::vector<u8> tmp_buffer; 78 Common::ScratchBuffer<u8> tmp_buffer;
78 bool is_linear = false; 79 bool is_linear = false;
79 Registers& regs; 80 Registers& regs;
80 MemoryManager& memory_manager; 81 MemoryManager& memory_manager;
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index a189e60ae..f73d7bf0f 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -184,12 +184,8 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
184 const size_t src_size = 184 const size_t src_size =
185 CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth); 185 CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
186 186
187 if (read_buffer.size() < src_size) { 187 read_buffer.resize_destructive(src_size);
188 read_buffer.resize(src_size); 188 write_buffer.resize_destructive(dst_size);
189 }
190 if (write_buffer.size() < dst_size) {
191 write_buffer.resize(dst_size);
192 }
193 189
194 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); 190 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
195 memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); 191 memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
@@ -235,12 +231,8 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
235 CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth); 231 CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
236 const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count; 232 const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count;
237 233
238 if (read_buffer.size() < src_size) { 234 read_buffer.resize_destructive(src_size);
239 read_buffer.resize(src_size); 235 write_buffer.resize_destructive(dst_size);
240 }
241 if (write_buffer.size() < dst_size) {
242 write_buffer.resize(dst_size);
243 }
244 236
245 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); 237 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
246 if (Settings::IsGPULevelExtreme()) { 238 if (Settings::IsGPULevelExtreme()) {
@@ -269,12 +261,8 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() {
269 pos_x = pos_x % x_in_gob; 261 pos_x = pos_x % x_in_gob;
270 pos_y = pos_y % 8; 262 pos_y = pos_y % 8;
271 263
272 if (read_buffer.size() < src_size) { 264 read_buffer.resize_destructive(src_size);
273 read_buffer.resize(src_size); 265 write_buffer.resize_destructive(dst_size);
274 }
275 if (write_buffer.size() < dst_size) {
276 write_buffer.resize(dst_size);
277 }
278 266
279 if (Settings::IsGPULevelExtreme()) { 267 if (Settings::IsGPULevelExtreme()) {
280 memory_manager.ReadBlock(regs.offset_in + offset, read_buffer.data(), src_size); 268 memory_manager.ReadBlock(regs.offset_in + offset, read_buffer.data(), src_size);
@@ -333,14 +321,10 @@ void MaxwellDMA::CopyBlockLinearToBlockLinear() {
333 const u32 pitch = x_elements * bytes_per_pixel; 321 const u32 pitch = x_elements * bytes_per_pixel;
334 const size_t mid_buffer_size = pitch * regs.line_count; 322 const size_t mid_buffer_size = pitch * regs.line_count;
335 323
336 if (read_buffer.size() < src_size) { 324 read_buffer.resize_destructive(src_size);
337 read_buffer.resize(src_size); 325 write_buffer.resize_destructive(dst_size);
338 }
339 if (write_buffer.size() < dst_size) {
340 write_buffer.resize(dst_size);
341 }
342 326
343 intermediate_buffer.resize(mid_buffer_size); 327 intermediate_buffer.resize_destructive(mid_buffer_size);
344 328
345 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); 329 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
346 memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); 330 memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index d40d3d302..c88191a61 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -6,8 +6,10 @@
6#include <array> 6#include <array>
7#include <cstddef> 7#include <cstddef>
8#include <vector> 8#include <vector>
9
9#include "common/bit_field.h" 10#include "common/bit_field.h"
10#include "common/common_types.h" 11#include "common/common_types.h"
12#include "common/scratch_buffer.h"
11#include "video_core/engines/engine_interface.h" 13#include "video_core/engines/engine_interface.h"
12 14
13namespace Core { 15namespace Core {
@@ -234,9 +236,9 @@ private:
234 MemoryManager& memory_manager; 236 MemoryManager& memory_manager;
235 VideoCore::RasterizerInterface* rasterizer = nullptr; 237 VideoCore::RasterizerInterface* rasterizer = nullptr;
236 238
237 std::vector<u8> read_buffer; 239 Common::ScratchBuffer<u8> read_buffer;
238 std::vector<u8> write_buffer; 240 Common::ScratchBuffer<u8> write_buffer;
239 std::vector<u8> intermediate_buffer; 241 Common::ScratchBuffer<u8> intermediate_buffer;
240 242
241 static constexpr std::size_t NUM_REGS = 0x800; 243 static constexpr std::size_t NUM_REGS = 0x800;
242 struct Regs { 244 struct Regs {
diff --git a/src/video_core/host1x/vic.cpp b/src/video_core/host1x/vic.cpp
index ac0b7d20e..36a04e4e0 100644
--- a/src/video_core/host1x/vic.cpp
+++ b/src/video_core/host1x/vic.cpp
@@ -155,7 +155,7 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
155 // swizzle pitch linear to block linear 155 // swizzle pitch linear to block linear
156 const u32 block_height = static_cast<u32>(config.block_linear_height_log2); 156 const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
157 const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0); 157 const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
158 luma_buffer.resize(size); 158 luma_buffer.resize_destructive(size);
159 std::span<const u8> frame_buff(converted_frame_buf_addr, 4 * width * height); 159 std::span<const u8> frame_buff(converted_frame_buf_addr, 4 * width * height);
160 Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height, 160 Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height,
161 block_height, 0, width * 4); 161 block_height, 0, width * 4);
@@ -181,8 +181,8 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
181 181
182 const auto stride = static_cast<size_t>(frame->linesize[0]); 182 const auto stride = static_cast<size_t>(frame->linesize[0]);
183 183
184 luma_buffer.resize(aligned_width * surface_height); 184 luma_buffer.resize_destructive(aligned_width * surface_height);
185 chroma_buffer.resize(aligned_width * surface_height / 2); 185 chroma_buffer.resize_destructive(aligned_width * surface_height / 2);
186 186
187 // Populate luma buffer 187 // Populate luma buffer
188 const u8* luma_src = frame->data[0]; 188 const u8* luma_src = frame->data[0];
diff --git a/src/video_core/host1x/vic.h b/src/video_core/host1x/vic.h
index 2b78786e8..3d9753047 100644
--- a/src/video_core/host1x/vic.h
+++ b/src/video_core/host1x/vic.h
@@ -4,8 +4,9 @@
4#pragma once 4#pragma once
5 5
6#include <memory> 6#include <memory>
7#include <vector> 7
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "common/scratch_buffer.h"
9 10
10struct SwsContext; 11struct SwsContext;
11 12
@@ -49,8 +50,8 @@ private:
49 /// size does not change during a stream 50 /// size does not change during a stream
50 using AVMallocPtr = std::unique_ptr<u8, decltype(&av_free)>; 51 using AVMallocPtr = std::unique_ptr<u8, decltype(&av_free)>;
51 AVMallocPtr converted_frame_buffer; 52 AVMallocPtr converted_frame_buffer;
52 std::vector<u8> luma_buffer; 53 Common::ScratchBuffer<u8> luma_buffer;
53 std::vector<u8> chroma_buffer; 54 Common::ScratchBuffer<u8> chroma_buffer;
54 55
55 GPUVAddr config_struct_address{}; 56 GPUVAddr config_struct_address{};
56 GPUVAddr output_surface_luma_address{}; 57 GPUVAddr output_surface_luma_address{};