diff options
| author | 2022-03-27 13:09:59 +0200 | |
|---|---|---|
| committer | 2022-03-27 13:09:59 +0200 | |
| commit | 99654721eb0cdf974dddb0b17aae471f38d8cdc9 (patch) | |
| tree | 754355084577a760adfa2cee37a9ffdfe474eae5 | |
| parent | Merge pull request #8092 from yuzu-emu/revert-8080-yo-momma-so-fat-that (diff) | |
| parent | hle: service: nvflinger: buffer_queue: Remove AutoLock and fix free buffer tr... (diff) | |
| download | yuzu-99654721eb0cdf974dddb0b17aae471f38d8cdc9.tar.gz yuzu-99654721eb0cdf974dddb0b17aae471f38d8cdc9.tar.xz yuzu-99654721eb0cdf974dddb0b17aae471f38d8cdc9.zip | |
Merge pull request #8088 from bunnei/fixup-nvflinger
Follow-up fixes for NVFlinger rewrite
Diffstat (limited to '')
| -rw-r--r-- | src/core/hle/service/nvflinger/buffer_item_consumer.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/service/nvflinger/buffer_queue.cpp | 206 | ||||
| -rw-r--r-- | src/core/hle/service/nvflinger/buffer_queue.h | 154 | ||||
| -rw-r--r-- | src/core/hle/service/nvflinger/buffer_queue_consumer.cpp | 182 | ||||
| -rw-r--r-- | src/core/hle/service/nvflinger/buffer_queue_core.cpp | 26 | ||||
| -rw-r--r-- | src/core/hle/service/nvflinger/buffer_queue_core.h | 22 | ||||
| -rw-r--r-- | src/core/hle/service/nvflinger/buffer_queue_producer.cpp | 79 | ||||
| -rw-r--r-- | src/core/hle/service/nvflinger/buffer_queue_producer.h | 2 | ||||
| -rw-r--r-- | src/core/hle/service/nvflinger/consumer_base.cpp | 8 |
9 files changed, 136 insertions, 547 deletions
diff --git a/src/core/hle/service/nvflinger/buffer_item_consumer.cpp b/src/core/hle/service/nvflinger/buffer_item_consumer.cpp index 7f32c0775..93fa1ec10 100644 --- a/src/core/hle/service/nvflinger/buffer_item_consumer.cpp +++ b/src/core/hle/service/nvflinger/buffer_item_consumer.cpp | |||
| @@ -21,7 +21,7 @@ Status BufferItemConsumer::AcquireBuffer(BufferItem* item, std::chrono::nanoseco | |||
| 21 | return Status::BadValue; | 21 | return Status::BadValue; |
| 22 | } | 22 | } |
| 23 | 23 | ||
| 24 | std::unique_lock lock(mutex); | 24 | std::scoped_lock lock(mutex); |
| 25 | 25 | ||
| 26 | if (const auto status = AcquireBufferLocked(item, present_when); status != Status::NoError) { | 26 | if (const auto status = AcquireBufferLocked(item, present_when); status != Status::NoError) { |
| 27 | if (status != Status::NoBufferAvailable) { | 27 | if (status != Status::NoBufferAvailable) { |
| @@ -40,7 +40,7 @@ Status BufferItemConsumer::AcquireBuffer(BufferItem* item, std::chrono::nanoseco | |||
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | Status BufferItemConsumer::ReleaseBuffer(const BufferItem& item, Fence& release_fence) { | 42 | Status BufferItemConsumer::ReleaseBuffer(const BufferItem& item, Fence& release_fence) { |
| 43 | std::unique_lock lock(mutex); | 43 | std::scoped_lock lock(mutex); |
| 44 | 44 | ||
| 45 | if (const auto status = AddReleaseFenceLocked(item.buf, item.graphic_buffer, release_fence); | 45 | if (const auto status = AddReleaseFenceLocked(item.buf, item.graphic_buffer, release_fence); |
| 46 | status != Status::NoError) { | 46 | status != Status::NoError) { |
diff --git a/src/core/hle/service/nvflinger/buffer_queue.cpp b/src/core/hle/service/nvflinger/buffer_queue.cpp deleted file mode 100644 index 5fead6d1b..000000000 --- a/src/core/hle/service/nvflinger/buffer_queue.cpp +++ /dev/null | |||
| @@ -1,206 +0,0 @@ | |||
| 1 | // Copyright 2018 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <algorithm> | ||
| 6 | |||
| 7 | #include "common/assert.h" | ||
| 8 | #include "common/logging/log.h" | ||
| 9 | #include "core/core.h" | ||
| 10 | #include "core/hle/kernel/k_writable_event.h" | ||
| 11 | #include "core/hle/kernel/kernel.h" | ||
| 12 | #include "core/hle/service/kernel_helpers.h" | ||
| 13 | #include "core/hle/service/nvflinger/buffer_queue.h" | ||
| 14 | |||
| 15 | namespace Service::NVFlinger { | ||
| 16 | |||
| 17 | BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_, | ||
| 18 | KernelHelpers::ServiceContext& service_context_) | ||
| 19 | : id(id_), layer_id(layer_id_), service_context{service_context_} { | ||
| 20 | buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent"); | ||
| 21 | } | ||
| 22 | |||
| 23 | BufferQueue::~BufferQueue() { | ||
| 24 | service_context.CloseEvent(buffer_wait_event); | ||
| 25 | } | ||
| 26 | |||
| 27 | void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) { | ||
| 28 | ASSERT(slot < buffer_slots); | ||
| 29 | LOG_WARNING(Service, "Adding graphics buffer {}", slot); | ||
| 30 | |||
| 31 | { | ||
| 32 | std::unique_lock lock{free_buffers_mutex}; | ||
| 33 | free_buffers.push_back(slot); | ||
| 34 | } | ||
| 35 | free_buffers_condition.notify_one(); | ||
| 36 | |||
| 37 | buffers[slot] = { | ||
| 38 | .slot = slot, | ||
| 39 | .status = Buffer::Status::Free, | ||
| 40 | .igbp_buffer = igbp_buffer, | ||
| 41 | .transform = {}, | ||
| 42 | .crop_rect = {}, | ||
| 43 | .swap_interval = 0, | ||
| 44 | .multi_fence = {}, | ||
| 45 | }; | ||
| 46 | |||
| 47 | buffer_wait_event->GetWritableEvent().Signal(); | ||
| 48 | } | ||
| 49 | |||
| 50 | std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width, | ||
| 51 | u32 height) { | ||
| 52 | // Wait for first request before trying to dequeue | ||
| 53 | { | ||
| 54 | std::unique_lock lock{free_buffers_mutex}; | ||
| 55 | free_buffers_condition.wait(lock, [this] { return !free_buffers.empty() || !is_connect; }); | ||
| 56 | } | ||
| 57 | |||
| 58 | if (!is_connect) { | ||
| 59 | // Buffer was disconnected while the thread was blocked, this is most likely due to | ||
| 60 | // emulation being stopped | ||
| 61 | return std::nullopt; | ||
| 62 | } | ||
| 63 | |||
| 64 | std::unique_lock lock{free_buffers_mutex}; | ||
| 65 | |||
| 66 | auto f_itr = free_buffers.begin(); | ||
| 67 | auto slot = buffers.size(); | ||
| 68 | |||
| 69 | while (f_itr != free_buffers.end()) { | ||
| 70 | const Buffer& buffer = buffers[*f_itr]; | ||
| 71 | if (buffer.status == Buffer::Status::Free && buffer.igbp_buffer.width == width && | ||
| 72 | buffer.igbp_buffer.height == height) { | ||
| 73 | slot = *f_itr; | ||
| 74 | free_buffers.erase(f_itr); | ||
| 75 | break; | ||
| 76 | } | ||
| 77 | ++f_itr; | ||
| 78 | } | ||
| 79 | if (slot == buffers.size()) { | ||
| 80 | return std::nullopt; | ||
| 81 | } | ||
| 82 | buffers[slot].status = Buffer::Status::Dequeued; | ||
| 83 | return {{buffers[slot].slot, &buffers[slot].multi_fence}}; | ||
| 84 | } | ||
| 85 | |||
| 86 | const IGBPBuffer& BufferQueue::RequestBuffer(u32 slot) const { | ||
| 87 | ASSERT(slot < buffers.size()); | ||
| 88 | ASSERT(buffers[slot].status == Buffer::Status::Dequeued); | ||
| 89 | ASSERT(buffers[slot].slot == slot); | ||
| 90 | |||
| 91 | return buffers[slot].igbp_buffer; | ||
| 92 | } | ||
| 93 | |||
| 94 | void BufferQueue::QueueBuffer(u32 slot, BufferTransformFlags transform, | ||
| 95 | const Common::Rectangle<int>& crop_rect, u32 swap_interval, | ||
| 96 | Service::Nvidia::MultiFence& multi_fence) { | ||
| 97 | ASSERT(slot < buffers.size()); | ||
| 98 | ASSERT(buffers[slot].status == Buffer::Status::Dequeued); | ||
| 99 | ASSERT(buffers[slot].slot == slot); | ||
| 100 | |||
| 101 | buffers[slot].status = Buffer::Status::Queued; | ||
| 102 | buffers[slot].transform = transform; | ||
| 103 | buffers[slot].crop_rect = crop_rect; | ||
| 104 | buffers[slot].swap_interval = swap_interval; | ||
| 105 | buffers[slot].multi_fence = multi_fence; | ||
| 106 | std::unique_lock lock{queue_sequence_mutex}; | ||
| 107 | queue_sequence.push_back(slot); | ||
| 108 | } | ||
| 109 | |||
| 110 | void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& multi_fence) { | ||
| 111 | ASSERT(slot < buffers.size()); | ||
| 112 | ASSERT(buffers[slot].status != Buffer::Status::Free); | ||
| 113 | ASSERT(buffers[slot].slot == slot); | ||
| 114 | |||
| 115 | buffers[slot].status = Buffer::Status::Free; | ||
| 116 | buffers[slot].multi_fence = multi_fence; | ||
| 117 | buffers[slot].swap_interval = 0; | ||
| 118 | |||
| 119 | { | ||
| 120 | std::unique_lock lock{free_buffers_mutex}; | ||
| 121 | free_buffers.push_back(slot); | ||
| 122 | } | ||
| 123 | free_buffers_condition.notify_one(); | ||
| 124 | |||
| 125 | buffer_wait_event->GetWritableEvent().Signal(); | ||
| 126 | } | ||
| 127 | |||
| 128 | std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() { | ||
| 129 | std::unique_lock lock{queue_sequence_mutex}; | ||
| 130 | std::size_t buffer_slot = buffers.size(); | ||
| 131 | // Iterate to find a queued buffer matching the requested slot. | ||
| 132 | while (buffer_slot == buffers.size() && !queue_sequence.empty()) { | ||
| 133 | const auto slot = static_cast<std::size_t>(queue_sequence.front()); | ||
| 134 | ASSERT(slot < buffers.size()); | ||
| 135 | if (buffers[slot].status == Buffer::Status::Queued) { | ||
| 136 | ASSERT(buffers[slot].slot == slot); | ||
| 137 | buffer_slot = slot; | ||
| 138 | } | ||
| 139 | queue_sequence.pop_front(); | ||
| 140 | } | ||
| 141 | if (buffer_slot == buffers.size()) { | ||
| 142 | return std::nullopt; | ||
| 143 | } | ||
| 144 | buffers[buffer_slot].status = Buffer::Status::Acquired; | ||
| 145 | return {{buffers[buffer_slot]}}; | ||
| 146 | } | ||
| 147 | |||
| 148 | void BufferQueue::ReleaseBuffer(u32 slot) { | ||
| 149 | ASSERT(slot < buffers.size()); | ||
| 150 | ASSERT(buffers[slot].status == Buffer::Status::Acquired); | ||
| 151 | ASSERT(buffers[slot].slot == slot); | ||
| 152 | |||
| 153 | buffers[slot].status = Buffer::Status::Free; | ||
| 154 | { | ||
| 155 | std::unique_lock lock{free_buffers_mutex}; | ||
| 156 | free_buffers.push_back(slot); | ||
| 157 | } | ||
| 158 | free_buffers_condition.notify_one(); | ||
| 159 | |||
| 160 | buffer_wait_event->GetWritableEvent().Signal(); | ||
| 161 | } | ||
| 162 | |||
| 163 | void BufferQueue::Connect() { | ||
| 164 | std::unique_lock lock{queue_sequence_mutex}; | ||
| 165 | queue_sequence.clear(); | ||
| 166 | is_connect = true; | ||
| 167 | } | ||
| 168 | |||
| 169 | void BufferQueue::Disconnect() { | ||
| 170 | buffers.fill({}); | ||
| 171 | { | ||
| 172 | std::unique_lock lock{queue_sequence_mutex}; | ||
| 173 | queue_sequence.clear(); | ||
| 174 | } | ||
| 175 | buffer_wait_event->GetWritableEvent().Signal(); | ||
| 176 | is_connect = false; | ||
| 177 | free_buffers_condition.notify_one(); | ||
| 178 | } | ||
| 179 | |||
| 180 | u32 BufferQueue::Query(QueryType type) { | ||
| 181 | LOG_WARNING(Service, "(STUBBED) called type={}", type); | ||
| 182 | |||
| 183 | switch (type) { | ||
| 184 | case QueryType::NativeWindowFormat: | ||
| 185 | return static_cast<u32>(PixelFormat::RGBA8888); | ||
| 186 | case QueryType::NativeWindowWidth: | ||
| 187 | case QueryType::NativeWindowHeight: | ||
| 188 | break; | ||
| 189 | case QueryType::NativeWindowMinUndequeuedBuffers: | ||
| 190 | return 0; | ||
| 191 | case QueryType::NativeWindowConsumerUsageBits: | ||
| 192 | return 0; | ||
| 193 | } | ||
| 194 | UNIMPLEMENTED_MSG("Unimplemented query type={}", type); | ||
| 195 | return 0; | ||
| 196 | } | ||
| 197 | |||
| 198 | Kernel::KWritableEvent& BufferQueue::GetWritableBufferWaitEvent() { | ||
| 199 | return buffer_wait_event->GetWritableEvent(); | ||
| 200 | } | ||
| 201 | |||
| 202 | Kernel::KReadableEvent& BufferQueue::GetBufferWaitEvent() { | ||
| 203 | return buffer_wait_event->GetReadableEvent(); | ||
| 204 | } | ||
| 205 | |||
| 206 | } // namespace Service::NVFlinger | ||
diff --git a/src/core/hle/service/nvflinger/buffer_queue.h b/src/core/hle/service/nvflinger/buffer_queue.h deleted file mode 100644 index f2a579133..000000000 --- a/src/core/hle/service/nvflinger/buffer_queue.h +++ /dev/null | |||
| @@ -1,154 +0,0 @@ | |||
| 1 | // Copyright 2018 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <condition_variable> | ||
| 8 | #include <list> | ||
| 9 | #include <mutex> | ||
| 10 | #include <optional> | ||
| 11 | |||
| 12 | #include "common/common_funcs.h" | ||
| 13 | #include "common/math_util.h" | ||
| 14 | #include "common/swap.h" | ||
| 15 | #include "core/hle/kernel/k_event.h" | ||
| 16 | #include "core/hle/kernel/k_readable_event.h" | ||
| 17 | #include "core/hle/service/nvdrv/nvdata.h" | ||
| 18 | |||
| 19 | namespace Kernel { | ||
| 20 | class KernelCore; | ||
| 21 | class KEvent; | ||
| 22 | class KReadableEvent; | ||
| 23 | class KWritableEvent; | ||
| 24 | } // namespace Kernel | ||
| 25 | |||
| 26 | namespace Service::KernelHelpers { | ||
| 27 | class ServiceContext; | ||
| 28 | } // namespace Service::KernelHelpers | ||
| 29 | |||
| 30 | namespace Service::NVFlinger { | ||
| 31 | |||
| 32 | constexpr u32 buffer_slots = 0x40; | ||
| 33 | struct IGBPBuffer { | ||
| 34 | u32_le magic; | ||
| 35 | u32_le width; | ||
| 36 | u32_le height; | ||
| 37 | u32_le stride; | ||
| 38 | u32_le format; | ||
| 39 | u32_le usage; | ||
| 40 | INSERT_PADDING_WORDS(1); | ||
| 41 | u32_le index; | ||
| 42 | INSERT_PADDING_WORDS(3); | ||
| 43 | u32_le gpu_buffer_id; | ||
| 44 | INSERT_PADDING_WORDS(6); | ||
| 45 | u32_le external_format; | ||
| 46 | INSERT_PADDING_WORDS(10); | ||
| 47 | u32_le nvmap_handle; | ||
| 48 | u32_le offset; | ||
| 49 | INSERT_PADDING_WORDS(60); | ||
| 50 | }; | ||
| 51 | |||
| 52 | static_assert(sizeof(IGBPBuffer) == 0x16C, "IGBPBuffer has wrong size"); | ||
| 53 | |||
| 54 | class BufferQueue final { | ||
| 55 | public: | ||
| 56 | enum class QueryType { | ||
| 57 | NativeWindowWidth = 0, | ||
| 58 | NativeWindowHeight = 1, | ||
| 59 | NativeWindowFormat = 2, | ||
| 60 | /// The minimum number of buffers that must remain un-dequeued after a buffer has been | ||
| 61 | /// queued | ||
| 62 | NativeWindowMinUndequeuedBuffers = 3, | ||
| 63 | /// The consumer gralloc usage bits currently set by the consumer | ||
| 64 | NativeWindowConsumerUsageBits = 10, | ||
| 65 | }; | ||
| 66 | |||
| 67 | explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_, | ||
| 68 | KernelHelpers::ServiceContext& service_context_); | ||
| 69 | ~BufferQueue(); | ||
| 70 | |||
| 71 | enum class BufferTransformFlags : u32 { | ||
| 72 | /// No transform flags are set | ||
| 73 | Unset = 0x00, | ||
| 74 | /// Flip source image horizontally (around the vertical axis) | ||
| 75 | FlipH = 0x01, | ||
| 76 | /// Flip source image vertically (around the horizontal axis) | ||
| 77 | FlipV = 0x02, | ||
| 78 | /// Rotate source image 90 degrees clockwise | ||
| 79 | Rotate90 = 0x04, | ||
| 80 | /// Rotate source image 180 degrees | ||
| 81 | Rotate180 = 0x03, | ||
| 82 | /// Rotate source image 270 degrees clockwise | ||
| 83 | Rotate270 = 0x07, | ||
| 84 | }; | ||
| 85 | |||
| 86 | enum class PixelFormat : u32 { | ||
| 87 | RGBA8888 = 1, | ||
| 88 | RGBX8888 = 2, | ||
| 89 | RGB888 = 3, | ||
| 90 | RGB565 = 4, | ||
| 91 | BGRA8888 = 5, | ||
| 92 | RGBA5551 = 6, | ||
| 93 | RRGBA4444 = 7, | ||
| 94 | }; | ||
| 95 | |||
| 96 | struct Buffer { | ||
| 97 | enum class Status { Free = 0, Queued = 1, Dequeued = 2, Acquired = 3 }; | ||
| 98 | |||
| 99 | u32 slot; | ||
| 100 | Status status = Status::Free; | ||
| 101 | IGBPBuffer igbp_buffer; | ||
| 102 | BufferTransformFlags transform; | ||
| 103 | Common::Rectangle<int> crop_rect; | ||
| 104 | u32 swap_interval; | ||
| 105 | Service::Nvidia::MultiFence multi_fence; | ||
| 106 | }; | ||
| 107 | |||
| 108 | void SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer); | ||
| 109 | std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> DequeueBuffer(u32 width, | ||
| 110 | u32 height); | ||
| 111 | const IGBPBuffer& RequestBuffer(u32 slot) const; | ||
| 112 | void QueueBuffer(u32 slot, BufferTransformFlags transform, | ||
| 113 | const Common::Rectangle<int>& crop_rect, u32 swap_interval, | ||
| 114 | Service::Nvidia::MultiFence& multi_fence); | ||
| 115 | void CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& multi_fence); | ||
| 116 | std::optional<std::reference_wrapper<const Buffer>> AcquireBuffer(); | ||
| 117 | void ReleaseBuffer(u32 slot); | ||
| 118 | void Connect(); | ||
| 119 | void Disconnect(); | ||
| 120 | u32 Query(QueryType type); | ||
| 121 | |||
| 122 | u32 GetId() const { | ||
| 123 | return id; | ||
| 124 | } | ||
| 125 | |||
| 126 | bool IsConnected() const { | ||
| 127 | return is_connect; | ||
| 128 | } | ||
| 129 | |||
| 130 | Kernel::KWritableEvent& GetWritableBufferWaitEvent(); | ||
| 131 | |||
| 132 | Kernel::KReadableEvent& GetBufferWaitEvent(); | ||
| 133 | |||
| 134 | private: | ||
| 135 | BufferQueue(const BufferQueue&) = delete; | ||
| 136 | |||
| 137 | u32 id{}; | ||
| 138 | u64 layer_id{}; | ||
| 139 | std::atomic_bool is_connect{}; | ||
| 140 | |||
| 141 | std::list<u32> free_buffers; | ||
| 142 | std::array<Buffer, buffer_slots> buffers; | ||
| 143 | std::list<u32> queue_sequence; | ||
| 144 | Kernel::KEvent* buffer_wait_event{}; | ||
| 145 | |||
| 146 | std::mutex free_buffers_mutex; | ||
| 147 | std::condition_variable free_buffers_condition; | ||
| 148 | |||
| 149 | std::mutex queue_sequence_mutex; | ||
| 150 | |||
| 151 | KernelHelpers::ServiceContext& service_context; | ||
| 152 | }; | ||
| 153 | |||
| 154 | } // namespace Service::NVFlinger | ||
diff --git a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp index 677bec932..41fbba219 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp | |||
| @@ -20,122 +20,102 @@ BufferQueueConsumer::~BufferQueueConsumer() = default; | |||
| 20 | Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer, | 20 | Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer, |
| 21 | std::chrono::nanoseconds expected_present, | 21 | std::chrono::nanoseconds expected_present, |
| 22 | u64 max_frame_number) { | 22 | u64 max_frame_number) { |
| 23 | s32 num_dropped_buffers{}; | 23 | std::scoped_lock lock(core->mutex); |
| 24 | |||
| 25 | // Check that the consumer doesn't currently have the maximum number of buffers acquired. | ||
| 26 | const s32 num_acquired_buffers{ | ||
| 27 | static_cast<s32>(std::count_if(slots.begin(), slots.end(), [](const auto& slot) { | ||
| 28 | return slot.buffer_state == BufferState::Acquired; | ||
| 29 | }))}; | ||
| 30 | |||
| 31 | if (num_acquired_buffers >= core->max_acquired_buffer_count + 1) { | ||
| 32 | LOG_ERROR(Service_NVFlinger, "max acquired buffer count reached: {} (max {})", | ||
| 33 | num_acquired_buffers, core->max_acquired_buffer_count); | ||
| 34 | return Status::InvalidOperation; | ||
| 35 | } | ||
| 24 | 36 | ||
| 25 | std::shared_ptr<IProducerListener> listener; | 37 | // Check if the queue is empty. |
| 26 | { | 38 | if (core->queue.empty()) { |
| 27 | std::unique_lock lock(core->mutex); | 39 | return Status::NoBufferAvailable; |
| 28 | 40 | } | |
| 29 | // Check that the consumer doesn't currently have the maximum number of buffers acquired. | ||
| 30 | const s32 num_acquired_buffers{ | ||
| 31 | static_cast<s32>(std::count_if(slots.begin(), slots.end(), [](const auto& slot) { | ||
| 32 | return slot.buffer_state == BufferState::Acquired; | ||
| 33 | }))}; | ||
| 34 | |||
| 35 | if (num_acquired_buffers >= core->max_acquired_buffer_count + 1) { | ||
| 36 | LOG_ERROR(Service_NVFlinger, "max acquired buffer count reached: {} (max {})", | ||
| 37 | num_acquired_buffers, core->max_acquired_buffer_count); | ||
| 38 | return Status::InvalidOperation; | ||
| 39 | } | ||
| 40 | 41 | ||
| 41 | // Check if the queue is empty. | 42 | auto front(core->queue.begin()); |
| 42 | if (core->queue.empty()) { | ||
| 43 | return Status::NoBufferAvailable; | ||
| 44 | } | ||
| 45 | 43 | ||
| 46 | auto front(core->queue.begin()); | 44 | // If expected_present is specified, we may not want to return a buffer yet. |
| 47 | 45 | if (expected_present.count() != 0) { | |
| 48 | // If expected_present is specified, we may not want to return a buffer yet. | 46 | constexpr auto MAX_REASONABLE_NSEC = 1000000000LL; // 1 second |
| 49 | if (expected_present.count() != 0) { | ||
| 50 | constexpr auto MAX_REASONABLE_NSEC = 1000000000LL; // 1 second | ||
| 51 | |||
| 52 | // The expected_present argument indicates when the buffer is expected to be | ||
| 53 | // presented on-screen. | ||
| 54 | while (core->queue.size() > 1 && !core->queue[0].is_auto_timestamp) { | ||
| 55 | const auto& buffer_item{core->queue[1]}; | ||
| 56 | |||
| 57 | // If dropping entry[0] would leave us with a buffer that the consumer is not yet | ||
| 58 | // ready for, don't drop it. | ||
| 59 | if (max_frame_number && buffer_item.frame_number > max_frame_number) { | ||
| 60 | break; | ||
| 61 | } | ||
| 62 | |||
| 63 | // If entry[1] is timely, drop entry[0] (and repeat). | ||
| 64 | const auto desired_present = buffer_item.timestamp; | ||
| 65 | if (desired_present < expected_present.count() - MAX_REASONABLE_NSEC || | ||
| 66 | desired_present > expected_present.count()) { | ||
| 67 | // This buffer is set to display in the near future, or desired_present is | ||
| 68 | // garbage. | ||
| 69 | LOG_DEBUG(Service_NVFlinger, "nodrop desire={} expect={}", desired_present, | ||
| 70 | expected_present.count()); | ||
| 71 | break; | ||
| 72 | } | ||
| 73 | |||
| 74 | LOG_DEBUG(Service_NVFlinger, "drop desire={} expect={} size={}", desired_present, | ||
| 75 | expected_present.count(), core->queue.size()); | ||
| 76 | |||
| 77 | if (core->StillTracking(*front)) { | ||
| 78 | // Front buffer is still in mSlots, so mark the slot as free | ||
| 79 | slots[front->slot].buffer_state = BufferState::Free; | ||
| 80 | core->free_buffers.push_back(front->slot); | ||
| 81 | listener = core->connected_producer_listener; | ||
| 82 | ++num_dropped_buffers; | ||
| 83 | } | ||
| 84 | |||
| 85 | core->queue.erase(front); | ||
| 86 | front = core->queue.begin(); | ||
| 87 | } | ||
| 88 | 47 | ||
| 89 | // See if the front buffer is ready to be acquired. | 48 | // The expected_present argument indicates when the buffer is expected to be presented |
| 90 | const auto desired_present = front->timestamp; | 49 | // on-screen. |
| 91 | const auto buffer_is_due = | 50 | while (core->queue.size() > 1 && !core->queue[0].is_auto_timestamp) { |
| 92 | desired_present <= expected_present.count() || | 51 | const auto& buffer_item{core->queue[1]}; |
| 93 | desired_present > expected_present.count() + MAX_REASONABLE_NSEC; | ||
| 94 | const auto consumer_is_ready = | ||
| 95 | max_frame_number > 0 ? front->frame_number <= max_frame_number : true; | ||
| 96 | 52 | ||
| 97 | if (!buffer_is_due || !consumer_is_ready) { | 53 | // If dropping entry[0] would leave us with a buffer that the consumer is not yet ready |
| 98 | LOG_DEBUG(Service_NVFlinger, "defer desire={} expect={}", desired_present, | 54 | // for, don't drop it. |
| 99 | expected_present.count()); | 55 | if (max_frame_number && buffer_item.frame_number > max_frame_number) { |
| 100 | return Status::PresentLater; | 56 | break; |
| 101 | } | 57 | } |
| 102 | 58 | ||
| 103 | LOG_DEBUG(Service_NVFlinger, "accept desire={} expect={}", desired_present, | 59 | // If entry[1] is timely, drop entry[0] (and repeat). |
| 104 | expected_present.count()); | 60 | const auto desired_present = buffer_item.timestamp; |
| 105 | } | 61 | if (desired_present < expected_present.count() - MAX_REASONABLE_NSEC || |
| 62 | desired_present > expected_present.count()) { | ||
| 63 | // This buffer is set to display in the near future, or desired_present is garbage. | ||
| 64 | LOG_DEBUG(Service_NVFlinger, "nodrop desire={} expect={}", desired_present, | ||
| 65 | expected_present.count()); | ||
| 66 | break; | ||
| 67 | } | ||
| 106 | 68 | ||
| 107 | const auto slot = front->slot; | 69 | LOG_DEBUG(Service_NVFlinger, "drop desire={} expect={} size={}", desired_present, |
| 108 | *out_buffer = *front; | 70 | expected_present.count(), core->queue.size()); |
| 109 | 71 | ||
| 110 | LOG_DEBUG(Service_NVFlinger, "acquiring slot={}", slot); | 72 | if (core->StillTracking(*front)) { |
| 73 | // Front buffer is still in mSlots, so mark the slot as free | ||
| 74 | slots[front->slot].buffer_state = BufferState::Free; | ||
| 75 | } | ||
| 111 | 76 | ||
| 112 | // If the front buffer is still being tracked, update its slot state | 77 | core->queue.erase(front); |
| 113 | if (core->StillTracking(*front)) { | 78 | front = core->queue.begin(); |
| 114 | slots[slot].acquire_called = true; | ||
| 115 | slots[slot].needs_cleanup_on_release = false; | ||
| 116 | slots[slot].buffer_state = BufferState::Acquired; | ||
| 117 | slots[slot].fence = Fence::NoFence(); | ||
| 118 | } | 79 | } |
| 119 | 80 | ||
| 120 | // If the buffer has previously been acquired by the consumer, set graphic_buffer to nullptr | 81 | // See if the front buffer is ready to be acquired. |
| 121 | // to avoid unnecessarily remapping this buffer on the consumer side. | 82 | const auto desired_present = front->timestamp; |
| 122 | if (out_buffer->acquire_called) { | 83 | if (desired_present > expected_present.count() && |
| 123 | out_buffer->graphic_buffer = nullptr; | 84 | desired_present < expected_present.count() + MAX_REASONABLE_NSEC) { |
| 85 | LOG_DEBUG(Service_NVFlinger, "defer desire={} expect={}", desired_present, | ||
| 86 | expected_present.count()); | ||
| 87 | return Status::PresentLater; | ||
| 124 | } | 88 | } |
| 125 | 89 | ||
| 126 | core->queue.erase(front); | 90 | LOG_DEBUG(Service_NVFlinger, "accept desire={} expect={}", desired_present, |
| 91 | expected_present.count()); | ||
| 92 | } | ||
| 93 | |||
| 94 | const auto slot = front->slot; | ||
| 95 | *out_buffer = *front; | ||
| 127 | 96 | ||
| 128 | // We might have freed a slot while dropping old buffers, or the producer may be blocked | 97 | LOG_DEBUG(Service_NVFlinger, "acquiring slot={}", slot); |
| 129 | // waiting for the number of buffers in the queue to decrease. | 98 | |
| 130 | core->SignalDequeueCondition(); | 99 | // If the front buffer is still being tracked, update its slot state |
| 100 | if (core->StillTracking(*front)) { | ||
| 101 | slots[slot].acquire_called = true; | ||
| 102 | slots[slot].needs_cleanup_on_release = false; | ||
| 103 | slots[slot].buffer_state = BufferState::Acquired; | ||
| 104 | slots[slot].fence = Fence::NoFence(); | ||
| 131 | } | 105 | } |
| 132 | 106 | ||
| 133 | if (listener != nullptr) { | 107 | // If the buffer has previously been acquired by the consumer, set graphic_buffer to nullptr to |
| 134 | for (s32 i = 0; i < num_dropped_buffers; ++i) { | 108 | // avoid unnecessarily remapping this buffer on the consumer side. |
| 135 | listener->OnBufferReleased(); | 109 | if (out_buffer->acquire_called) { |
| 136 | } | 110 | out_buffer->graphic_buffer = nullptr; |
| 137 | } | 111 | } |
| 138 | 112 | ||
| 113 | core->queue.erase(front); | ||
| 114 | |||
| 115 | // We might have freed a slot while dropping old buffers, or the producer may be blocked | ||
| 116 | // waiting for the number of buffers in the queue to decrease. | ||
| 117 | core->SignalDequeueCondition(); | ||
| 118 | |||
| 139 | return Status::NoError; | 119 | return Status::NoError; |
| 140 | } | 120 | } |
| 141 | 121 | ||
| @@ -147,7 +127,7 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc | |||
| 147 | 127 | ||
| 148 | std::shared_ptr<IProducerListener> listener; | 128 | std::shared_ptr<IProducerListener> listener; |
| 149 | { | 129 | { |
| 150 | std::unique_lock lock(core->mutex); | 130 | std::scoped_lock lock(core->mutex); |
| 151 | 131 | ||
| 152 | // If the frame number has changed because the buffer has been reallocated, we can ignore | 132 | // If the frame number has changed because the buffer has been reallocated, we can ignore |
| 153 | // this ReleaseBuffer for the old buffer. | 133 | // this ReleaseBuffer for the old buffer. |
| @@ -170,8 +150,6 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc | |||
| 170 | slots[slot].fence = release_fence; | 150 | slots[slot].fence = release_fence; |
| 171 | slots[slot].buffer_state = BufferState::Free; | 151 | slots[slot].buffer_state = BufferState::Free; |
| 172 | 152 | ||
| 173 | core->free_buffers.push_back(slot); | ||
| 174 | |||
| 175 | listener = core->connected_producer_listener; | 153 | listener = core->connected_producer_listener; |
| 176 | 154 | ||
| 177 | LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot); | 155 | LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot); |
| @@ -189,7 +167,7 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc | |||
| 189 | return Status::BadValue; | 167 | return Status::BadValue; |
| 190 | } | 168 | } |
| 191 | 169 | ||
| 192 | core->dequeue_condition.notify_all(); | 170 | core->SignalDequeueCondition(); |
| 193 | } | 171 | } |
| 194 | 172 | ||
| 195 | // Call back without lock held | 173 | // Call back without lock held |
| @@ -209,7 +187,7 @@ Status BufferQueueConsumer::Connect(std::shared_ptr<IConsumerListener> consumer_ | |||
| 209 | 187 | ||
| 210 | LOG_DEBUG(Service_NVFlinger, "controlled_by_app={}", controlled_by_app); | 188 | LOG_DEBUG(Service_NVFlinger, "controlled_by_app={}", controlled_by_app); |
| 211 | 189 | ||
| 212 | BufferQueueCore::AutoLock lock(core); | 190 | std::scoped_lock lock(core->mutex); |
| 213 | 191 | ||
| 214 | if (core->is_abandoned) { | 192 | if (core->is_abandoned) { |
| 215 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); | 193 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); |
diff --git a/src/core/hle/service/nvflinger/buffer_queue_core.cpp b/src/core/hle/service/nvflinger/buffer_queue_core.cpp index eb93b43ee..6082610e0 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_core.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_core.cpp | |||
| @@ -10,16 +10,12 @@ | |||
| 10 | 10 | ||
| 11 | namespace Service::android { | 11 | namespace Service::android { |
| 12 | 12 | ||
| 13 | BufferQueueCore::BufferQueueCore() : lock{mutex, std::defer_lock} { | 13 | BufferQueueCore::BufferQueueCore() = default; |
| 14 | for (s32 slot = 0; slot < BufferQueueDefs::NUM_BUFFER_SLOTS; ++slot) { | ||
| 15 | free_slots.insert(slot); | ||
| 16 | } | ||
| 17 | } | ||
| 18 | 14 | ||
| 19 | BufferQueueCore::~BufferQueueCore() = default; | 15 | BufferQueueCore::~BufferQueueCore() = default; |
| 20 | 16 | ||
| 21 | void BufferQueueCore::NotifyShutdown() { | 17 | void BufferQueueCore::NotifyShutdown() { |
| 22 | std::unique_lock lk(mutex); | 18 | std::scoped_lock lock(mutex); |
| 23 | 19 | ||
| 24 | is_shutting_down = true; | 20 | is_shutting_down = true; |
| 25 | 21 | ||
| @@ -35,7 +31,7 @@ bool BufferQueueCore::WaitForDequeueCondition() { | |||
| 35 | return false; | 31 | return false; |
| 36 | } | 32 | } |
| 37 | 33 | ||
| 38 | dequeue_condition.wait(lock); | 34 | dequeue_condition.wait(mutex); |
| 39 | 35 | ||
| 40 | return true; | 36 | return true; |
| 41 | } | 37 | } |
| @@ -86,26 +82,15 @@ s32 BufferQueueCore::GetPreallocatedBufferCountLocked() const { | |||
| 86 | void BufferQueueCore::FreeBufferLocked(s32 slot) { | 82 | void BufferQueueCore::FreeBufferLocked(s32 slot) { |
| 87 | LOG_DEBUG(Service_NVFlinger, "slot {}", slot); | 83 | LOG_DEBUG(Service_NVFlinger, "slot {}", slot); |
| 88 | 84 | ||
| 89 | const auto had_buffer = slots[slot].graphic_buffer != nullptr; | ||
| 90 | |||
| 91 | slots[slot].graphic_buffer.reset(); | 85 | slots[slot].graphic_buffer.reset(); |
| 92 | 86 | ||
| 93 | if (slots[slot].buffer_state == BufferState::Acquired) { | 87 | if (slots[slot].buffer_state == BufferState::Acquired) { |
| 94 | slots[slot].needs_cleanup_on_release = true; | 88 | slots[slot].needs_cleanup_on_release = true; |
| 95 | } | 89 | } |
| 96 | 90 | ||
| 97 | if (slots[slot].buffer_state != BufferState::Free) { | ||
| 98 | free_slots.insert(slot); | ||
| 99 | } else if (had_buffer) { | ||
| 100 | // If the slot was FREE, but we had a buffer, we need to move this slot from the free | ||
| 101 | // buffers list to the the free slots list. | ||
| 102 | free_buffers.remove(slot); | ||
| 103 | free_slots.insert(slot); | ||
| 104 | } | ||
| 105 | |||
| 106 | slots[slot].buffer_state = BufferState::Free; | 91 | slots[slot].buffer_state = BufferState::Free; |
| 92 | slots[slot].frame_number = UINT32_MAX; | ||
| 107 | slots[slot].acquire_called = false; | 93 | slots[slot].acquire_called = false; |
| 108 | slots[slot].frame_number = 0; | ||
| 109 | slots[slot].fence = Fence::NoFence(); | 94 | slots[slot].fence = Fence::NoFence(); |
| 110 | } | 95 | } |
| 111 | 96 | ||
| @@ -126,8 +111,7 @@ bool BufferQueueCore::StillTracking(const BufferItem& item) const { | |||
| 126 | 111 | ||
| 127 | void BufferQueueCore::WaitWhileAllocatingLocked() const { | 112 | void BufferQueueCore::WaitWhileAllocatingLocked() const { |
| 128 | while (is_allocating) { | 113 | while (is_allocating) { |
| 129 | std::unique_lock lk(mutex); | 114 | is_allocating_condition.wait(mutex); |
| 130 | is_allocating_condition.wait(lk); | ||
| 131 | } | 115 | } |
| 132 | } | 116 | } |
| 133 | 117 | ||
diff --git a/src/core/hle/service/nvflinger/buffer_queue_core.h b/src/core/hle/service/nvflinger/buffer_queue_core.h index a3cd89f1c..4dfd53387 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_core.h +++ b/src/core/hle/service/nvflinger/buffer_queue_core.h | |||
| @@ -50,23 +50,7 @@ private: | |||
| 50 | void WaitWhileAllocatingLocked() const; | 50 | void WaitWhileAllocatingLocked() const; |
| 51 | 51 | ||
| 52 | private: | 52 | private: |
| 53 | class AutoLock final { | ||
| 54 | public: | ||
| 55 | AutoLock(std::shared_ptr<BufferQueueCore>& core_) : core{core_} { | ||
| 56 | core->lock.lock(); | ||
| 57 | } | ||
| 58 | |||
| 59 | ~AutoLock() { | ||
| 60 | core->lock.unlock(); | ||
| 61 | } | ||
| 62 | |||
| 63 | private: | ||
| 64 | std::shared_ptr<BufferQueueCore>& core; | ||
| 65 | }; | ||
| 66 | |||
| 67 | private: | ||
| 68 | mutable std::mutex mutex; | 53 | mutable std::mutex mutex; |
| 69 | mutable std::unique_lock<std::mutex> lock; | ||
| 70 | bool is_abandoned{}; | 54 | bool is_abandoned{}; |
| 71 | bool consumer_controlled_by_app{}; | 55 | bool consumer_controlled_by_app{}; |
| 72 | std::shared_ptr<IConsumerListener> consumer_listener; | 56 | std::shared_ptr<IConsumerListener> consumer_listener; |
| @@ -75,10 +59,8 @@ private: | |||
| 75 | std::shared_ptr<IProducerListener> connected_producer_listener; | 59 | std::shared_ptr<IProducerListener> connected_producer_listener; |
| 76 | BufferQueueDefs::SlotsType slots{}; | 60 | BufferQueueDefs::SlotsType slots{}; |
| 77 | std::vector<BufferItem> queue; | 61 | std::vector<BufferItem> queue; |
| 78 | std::set<s32> free_slots; | ||
| 79 | std::list<s32> free_buffers; | ||
| 80 | s32 override_max_buffer_count{}; | 62 | s32 override_max_buffer_count{}; |
| 81 | mutable std::condition_variable dequeue_condition; | 63 | mutable std::condition_variable_any dequeue_condition; |
| 82 | const bool use_async_buffer{}; // This is always disabled on HOS | 64 | const bool use_async_buffer{}; // This is always disabled on HOS |
| 83 | bool dequeue_buffer_cannot_block{}; | 65 | bool dequeue_buffer_cannot_block{}; |
| 84 | PixelFormat default_buffer_format{PixelFormat::Rgba8888}; | 66 | PixelFormat default_buffer_format{PixelFormat::Rgba8888}; |
| @@ -90,7 +72,7 @@ private: | |||
| 90 | u64 frame_counter{}; | 72 | u64 frame_counter{}; |
| 91 | u32 transform_hint{}; | 73 | u32 transform_hint{}; |
| 92 | bool is_allocating{}; | 74 | bool is_allocating{}; |
| 93 | mutable std::condition_variable is_allocating_condition; | 75 | mutable std::condition_variable_any is_allocating_condition; |
| 94 | bool allow_allocation{true}; | 76 | bool allow_allocation{true}; |
| 95 | u64 buffer_age{}; | 77 | u64 buffer_age{}; |
| 96 | bool is_shutting_down{}; | 78 | bool is_shutting_down{}; |
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp index 078091904..0833be57a 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp | |||
| @@ -38,7 +38,7 @@ BufferQueueProducer::~BufferQueueProducer() { | |||
| 38 | Status BufferQueueProducer::RequestBuffer(s32 slot, std::shared_ptr<GraphicBuffer>* buf) { | 38 | Status BufferQueueProducer::RequestBuffer(s32 slot, std::shared_ptr<GraphicBuffer>* buf) { |
| 39 | LOG_DEBUG(Service_NVFlinger, "slot {}", slot); | 39 | LOG_DEBUG(Service_NVFlinger, "slot {}", slot); |
| 40 | 40 | ||
| 41 | BufferQueueCore::AutoLock lock(core); | 41 | std::scoped_lock lock(core->mutex); |
| 42 | 42 | ||
| 43 | if (core->is_abandoned) { | 43 | if (core->is_abandoned) { |
| 44 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); | 44 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); |
| @@ -65,7 +65,7 @@ Status BufferQueueProducer::SetBufferCount(s32 buffer_count) { | |||
| 65 | std::shared_ptr<IConsumerListener> listener; | 65 | std::shared_ptr<IConsumerListener> listener; |
| 66 | 66 | ||
| 67 | { | 67 | { |
| 68 | BufferQueueCore::AutoLock lock(core); | 68 | std::scoped_lock lock(core->mutex); |
| 69 | core->WaitWhileAllocatingLocked(); | 69 | core->WaitWhileAllocatingLocked(); |
| 70 | if (core->is_abandoned) { | 70 | if (core->is_abandoned) { |
| 71 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); | 71 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); |
| @@ -156,6 +156,14 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found, | |||
| 156 | case BufferState::Acquired: | 156 | case BufferState::Acquired: |
| 157 | ++acquired_count; | 157 | ++acquired_count; |
| 158 | break; | 158 | break; |
| 159 | case BufferState::Free: | ||
| 160 | // We return the oldest of the free buffers to avoid stalling the producer if | ||
| 161 | // possible, since the consumer may still have pending reads of in-flight buffers | ||
| 162 | if (*found == BufferQueueCore::INVALID_BUFFER_SLOT || | ||
| 163 | slots[s].frame_number < slots[*found].frame_number) { | ||
| 164 | *found = s; | ||
| 165 | } | ||
| 166 | break; | ||
| 159 | default: | 167 | default: |
| 160 | break; | 168 | break; |
| 161 | } | 169 | } |
| @@ -183,27 +191,12 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found, | |||
| 183 | } | 191 | } |
| 184 | } | 192 | } |
| 185 | 193 | ||
| 186 | *found = BufferQueueCore::INVALID_BUFFER_SLOT; | ||
| 187 | |||
| 188 | // If we disconnect and reconnect quickly, we can be in a state where our slots are empty | 194 | // If we disconnect and reconnect quickly, we can be in a state where our slots are empty |
| 189 | // but we have many buffers in the queue. This can cause us to run out of memory if we | 195 | // but we have many buffers in the queue. This can cause us to run out of memory if we |
| 190 | // outrun the consumer. Wait here if it looks like we have too many buffers queued up. | 196 | // outrun the consumer. Wait here if it looks like we have too many buffers queued up. |
| 191 | const bool too_many_buffers = core->queue.size() > static_cast<size_t>(max_buffer_count); | 197 | const bool too_many_buffers = core->queue.size() > static_cast<size_t>(max_buffer_count); |
| 192 | if (too_many_buffers) { | 198 | if (too_many_buffers) { |
| 193 | LOG_ERROR(Service_NVFlinger, "queue size is {}, waiting", core->queue.size()); | 199 | LOG_ERROR(Service_NVFlinger, "queue size is {}, waiting", core->queue.size()); |
| 194 | } else { | ||
| 195 | if (!core->free_buffers.empty()) { | ||
| 196 | auto slot = core->free_buffers.begin(); | ||
| 197 | *found = *slot; | ||
| 198 | core->free_buffers.erase(slot); | ||
| 199 | } else if (core->allow_allocation && !core->free_slots.empty()) { | ||
| 200 | auto slot = core->free_slots.begin(); | ||
| 201 | // Only return free slots up to the max buffer count | ||
| 202 | if (*slot < max_buffer_count) { | ||
| 203 | *found = *slot; | ||
| 204 | core->free_slots.erase(slot); | ||
| 205 | } | ||
| 206 | } | ||
| 207 | } | 200 | } |
| 208 | 201 | ||
| 209 | // If no buffer is found, or if the queue has too many buffers outstanding, wait for a | 202 | // If no buffer is found, or if the queue has too many buffers outstanding, wait for a |
| @@ -240,7 +233,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool | |||
| 240 | Status return_flags = Status::NoError; | 233 | Status return_flags = Status::NoError; |
| 241 | bool attached_by_consumer = false; | 234 | bool attached_by_consumer = false; |
| 242 | { | 235 | { |
| 243 | BufferQueueCore::AutoLock lock(core); | 236 | std::scoped_lock lock(core->mutex); |
| 244 | core->WaitWhileAllocatingLocked(); | 237 | core->WaitWhileAllocatingLocked(); |
| 245 | if (format == PixelFormat::NoFormat) { | 238 | if (format == PixelFormat::NoFormat) { |
| 246 | format = core->default_buffer_format; | 239 | format = core->default_buffer_format; |
| @@ -317,12 +310,13 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool | |||
| 317 | } | 310 | } |
| 318 | 311 | ||
| 319 | { | 312 | { |
| 320 | BufferQueueCore::AutoLock lock(core); | 313 | std::scoped_lock lock(core->mutex); |
| 321 | if (core->is_abandoned) { | 314 | if (core->is_abandoned) { |
| 322 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); | 315 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); |
| 323 | return Status::NoInit; | 316 | return Status::NoInit; |
| 324 | } | 317 | } |
| 325 | 318 | ||
| 319 | slots[*out_slot].frame_number = UINT32_MAX; | ||
| 326 | slots[*out_slot].graphic_buffer = graphic_buffer; | 320 | slots[*out_slot].graphic_buffer = graphic_buffer; |
| 327 | } | 321 | } |
| 328 | } | 322 | } |
| @@ -339,7 +333,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool | |||
| 339 | Status BufferQueueProducer::DetachBuffer(s32 slot) { | 333 | Status BufferQueueProducer::DetachBuffer(s32 slot) { |
| 340 | LOG_DEBUG(Service_NVFlinger, "slot {}", slot); | 334 | LOG_DEBUG(Service_NVFlinger, "slot {}", slot); |
| 341 | 335 | ||
| 342 | BufferQueueCore::AutoLock lock(core); | 336 | std::scoped_lock lock(core->mutex); |
| 343 | if (core->is_abandoned) { | 337 | if (core->is_abandoned) { |
| 344 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); | 338 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); |
| 345 | return Status::NoInit; | 339 | return Status::NoInit; |
| @@ -374,7 +368,7 @@ Status BufferQueueProducer::DetachNextBuffer(std::shared_ptr<GraphicBuffer>* out | |||
| 374 | return Status::BadValue; | 368 | return Status::BadValue; |
| 375 | } | 369 | } |
| 376 | 370 | ||
| 377 | BufferQueueCore::AutoLock lock(core); | 371 | std::scoped_lock lock(core->mutex); |
| 378 | 372 | ||
| 379 | core->WaitWhileAllocatingLocked(); | 373 | core->WaitWhileAllocatingLocked(); |
| 380 | 374 | ||
| @@ -382,12 +376,21 @@ Status BufferQueueProducer::DetachNextBuffer(std::shared_ptr<GraphicBuffer>* out | |||
| 382 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); | 376 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); |
| 383 | return Status::NoInit; | 377 | return Status::NoInit; |
| 384 | } | 378 | } |
| 385 | if (core->free_buffers.empty()) { | 379 | |
| 386 | return Status::NoMemory; | 380 | // Find the oldest valid slot |
| 381 | int found = BufferQueueCore::INVALID_BUFFER_SLOT; | ||
| 382 | for (int s = 0; s < BufferQueueDefs::NUM_BUFFER_SLOTS; ++s) { | ||
| 383 | if (slots[s].buffer_state == BufferState::Free && slots[s].graphic_buffer != nullptr) { | ||
| 384 | if (found == BufferQueueCore::INVALID_BUFFER_SLOT || | ||
| 385 | slots[s].frame_number < slots[found].frame_number) { | ||
| 386 | found = s; | ||
| 387 | } | ||
| 388 | } | ||
| 387 | } | 389 | } |
| 388 | 390 | ||
| 389 | const s32 found = core->free_buffers.front(); | 391 | if (found == BufferQueueCore::INVALID_BUFFER_SLOT) { |
| 390 | core->free_buffers.remove(found); | 392 | return Status::NoMemory; |
| 393 | } | ||
| 391 | 394 | ||
| 392 | LOG_DEBUG(Service_NVFlinger, "Detached slot {}", found); | 395 | LOG_DEBUG(Service_NVFlinger, "Detached slot {}", found); |
| 393 | 396 | ||
| @@ -409,7 +412,7 @@ Status BufferQueueProducer::AttachBuffer(s32* out_slot, | |||
| 409 | return Status::BadValue; | 412 | return Status::BadValue; |
| 410 | } | 413 | } |
| 411 | 414 | ||
| 412 | BufferQueueCore::AutoLock lock(core); | 415 | std::scoped_lock lock(core->mutex); |
| 413 | core->WaitWhileAllocatingLocked(); | 416 | core->WaitWhileAllocatingLocked(); |
| 414 | 417 | ||
| 415 | Status return_flags = Status::NoError; | 418 | Status return_flags = Status::NoError; |
| @@ -469,7 +472,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input, | |||
| 469 | BufferItem item; | 472 | BufferItem item; |
| 470 | 473 | ||
| 471 | { | 474 | { |
| 472 | BufferQueueCore::AutoLock lock(core); | 475 | std::scoped_lock lock(core->mutex); |
| 473 | 476 | ||
| 474 | if (core->is_abandoned) { | 477 | if (core->is_abandoned) { |
| 475 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); | 478 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); |
| @@ -554,7 +557,9 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input, | |||
| 554 | // mark it as freed | 557 | // mark it as freed |
| 555 | if (core->StillTracking(*front)) { | 558 | if (core->StillTracking(*front)) { |
| 556 | slots[front->slot].buffer_state = BufferState::Free; | 559 | slots[front->slot].buffer_state = BufferState::Free; |
| 557 | core->free_buffers.push_front(front->slot); | 560 | // Reset the frame number of the freed buffer so that it is the first in line to |
| 561 | // be dequeued again | ||
| 562 | slots[front->slot].frame_number = 0; | ||
| 558 | } | 563 | } |
| 559 | // Overwrite the droppable buffer with the incoming one | 564 | // Overwrite the droppable buffer with the incoming one |
| 560 | *front = item; | 565 | *front = item; |
| @@ -582,10 +587,9 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input, | |||
| 582 | // Call back without the main BufferQueue lock held, but with the callback lock held so we can | 587 | // Call back without the main BufferQueue lock held, but with the callback lock held so we can |
| 583 | // ensure that callbacks occur in order | 588 | // ensure that callbacks occur in order |
| 584 | { | 589 | { |
| 585 | std::unique_lock lock(callback_mutex); | 590 | std::scoped_lock lock(callback_mutex); |
| 586 | while (callback_ticket != current_callback_ticket) { | 591 | while (callback_ticket != current_callback_ticket) { |
| 587 | std::unique_lock<std::mutex> lk(callback_mutex); | 592 | callback_condition.wait(callback_mutex); |
| 588 | callback_condition.wait(lk); | ||
| 589 | } | 593 | } |
| 590 | 594 | ||
| 591 | if (frameAvailableListener != nullptr) { | 595 | if (frameAvailableListener != nullptr) { |
| @@ -604,7 +608,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input, | |||
| 604 | void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) { | 608 | void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) { |
| 605 | LOG_DEBUG(Service_NVFlinger, "slot {}", slot); | 609 | LOG_DEBUG(Service_NVFlinger, "slot {}", slot); |
| 606 | 610 | ||
| 607 | BufferQueueCore::AutoLock lock(core); | 611 | std::scoped_lock lock(core->mutex); |
| 608 | 612 | ||
| 609 | if (core->is_abandoned) { | 613 | if (core->is_abandoned) { |
| 610 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); | 614 | LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); |
| @@ -621,8 +625,8 @@ void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) { | |||
| 621 | return; | 625 | return; |
| 622 | } | 626 | } |
| 623 | 627 | ||
| 624 | core->free_buffers.push_front(slot); | ||
| 625 | slots[slot].buffer_state = BufferState::Free; | 628 | slots[slot].buffer_state = BufferState::Free; |
| 629 | slots[slot].frame_number = 0; | ||
| 626 | slots[slot].fence = fence; | 630 | slots[slot].fence = fence; |
| 627 | 631 | ||
| 628 | core->SignalDequeueCondition(); | 632 | core->SignalDequeueCondition(); |
| @@ -630,7 +634,7 @@ void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) { | |||
| 630 | } | 634 | } |
| 631 | 635 | ||
| 632 | Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) { | 636 | Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) { |
| 633 | BufferQueueCore::AutoLock lock(core); | 637 | std::scoped_lock lock(core->mutex); |
| 634 | 638 | ||
| 635 | if (out_value == nullptr) { | 639 | if (out_value == nullptr) { |
| 636 | LOG_ERROR(Service_NVFlinger, "outValue was nullptr"); | 640 | LOG_ERROR(Service_NVFlinger, "outValue was nullptr"); |
| @@ -687,7 +691,7 @@ Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) { | |||
| 687 | Status BufferQueueProducer::Connect(const std::shared_ptr<IProducerListener>& listener, | 691 | Status BufferQueueProducer::Connect(const std::shared_ptr<IProducerListener>& listener, |
| 688 | NativeWindowApi api, bool producer_controlled_by_app, | 692 | NativeWindowApi api, bool producer_controlled_by_app, |
| 689 | QueueBufferOutput* output) { | 693 | QueueBufferOutput* output) { |
| 690 | BufferQueueCore::AutoLock lock(core); | 694 | std::scoped_lock lock(core->mutex); |
| 691 | 695 | ||
| 692 | LOG_DEBUG(Service_NVFlinger, "api = {} producer_controlled_by_app = {}", api, | 696 | LOG_DEBUG(Service_NVFlinger, "api = {} producer_controlled_by_app = {}", api, |
| 693 | producer_controlled_by_app); | 697 | producer_controlled_by_app); |
| @@ -745,7 +749,7 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) { | |||
| 745 | std::shared_ptr<IConsumerListener> listener; | 749 | std::shared_ptr<IConsumerListener> listener; |
| 746 | 750 | ||
| 747 | { | 751 | { |
| 748 | BufferQueueCore::AutoLock lock(core); | 752 | std::scoped_lock lock(core->mutex); |
| 749 | 753 | ||
| 750 | core->WaitWhileAllocatingLocked(); | 754 | core->WaitWhileAllocatingLocked(); |
| 751 | 755 | ||
| @@ -795,10 +799,11 @@ Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot, | |||
| 795 | return Status::BadValue; | 799 | return Status::BadValue; |
| 796 | } | 800 | } |
| 797 | 801 | ||
| 798 | BufferQueueCore::AutoLock lock(core); | 802 | std::scoped_lock lock(core->mutex); |
| 799 | 803 | ||
| 800 | slots[slot] = {}; | 804 | slots[slot] = {}; |
| 801 | slots[slot].graphic_buffer = buffer; | 805 | slots[slot].graphic_buffer = buffer; |
| 806 | slots[slot].frame_number = 0; | ||
| 802 | 807 | ||
| 803 | // Most games preallocate a buffer and pass a valid buffer here. However, it is possible for | 808 | // Most games preallocate a buffer and pass a valid buffer here. However, it is possible for |
| 804 | // this to be called with an empty buffer, Naruto Ultimate Ninja Storm is a game that does this. | 809 | // this to be called with an empty buffer, Naruto Ultimate Ninja Storm is a game that does this. |
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.h b/src/core/hle/service/nvflinger/buffer_queue_producer.h index 5ddeebe0c..77fdcae8e 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_producer.h +++ b/src/core/hle/service/nvflinger/buffer_queue_producer.h | |||
| @@ -77,7 +77,7 @@ private: | |||
| 77 | std::mutex callback_mutex; | 77 | std::mutex callback_mutex; |
| 78 | s32 next_callback_ticket{}; | 78 | s32 next_callback_ticket{}; |
| 79 | s32 current_callback_ticket{}; | 79 | s32 current_callback_ticket{}; |
| 80 | std::condition_variable callback_condition; | 80 | std::condition_variable_any callback_condition; |
| 81 | }; | 81 | }; |
| 82 | 82 | ||
| 83 | } // namespace Service::android | 83 | } // namespace Service::android |
diff --git a/src/core/hle/service/nvflinger/consumer_base.cpp b/src/core/hle/service/nvflinger/consumer_base.cpp index 3ccbb7fb8..be65a3f88 100644 --- a/src/core/hle/service/nvflinger/consumer_base.cpp +++ b/src/core/hle/service/nvflinger/consumer_base.cpp | |||
| @@ -18,7 +18,7 @@ ConsumerBase::ConsumerBase(std::unique_ptr<BufferQueueConsumer> consumer_) | |||
| 18 | : consumer{std::move(consumer_)} {} | 18 | : consumer{std::move(consumer_)} {} |
| 19 | 19 | ||
| 20 | ConsumerBase::~ConsumerBase() { | 20 | ConsumerBase::~ConsumerBase() { |
| 21 | std::unique_lock lock(mutex); | 21 | std::scoped_lock lock(mutex); |
| 22 | 22 | ||
| 23 | ASSERT_MSG(is_abandoned, "consumer is not abandoned!"); | 23 | ASSERT_MSG(is_abandoned, "consumer is not abandoned!"); |
| 24 | } | 24 | } |
| @@ -36,17 +36,17 @@ void ConsumerBase::FreeBufferLocked(s32 slot_index) { | |||
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | void ConsumerBase::OnFrameAvailable(const BufferItem& item) { | 38 | void ConsumerBase::OnFrameAvailable(const BufferItem& item) { |
| 39 | std::unique_lock lock(mutex); | 39 | std::scoped_lock lock(mutex); |
| 40 | LOG_DEBUG(Service_NVFlinger, "called"); | 40 | LOG_DEBUG(Service_NVFlinger, "called"); |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | void ConsumerBase::OnFrameReplaced(const BufferItem& item) { | 43 | void ConsumerBase::OnFrameReplaced(const BufferItem& item) { |
| 44 | std::unique_lock lock(mutex); | 44 | std::scoped_lock lock(mutex); |
| 45 | LOG_DEBUG(Service_NVFlinger, "called"); | 45 | LOG_DEBUG(Service_NVFlinger, "called"); |
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | void ConsumerBase::OnBuffersReleased() { | 48 | void ConsumerBase::OnBuffersReleased() { |
| 49 | std::unique_lock lock(mutex); | 49 | std::scoped_lock lock(mutex); |
| 50 | LOG_DEBUG(Service_NVFlinger, "called"); | 50 | LOG_DEBUG(Service_NVFlinger, "called"); |
| 51 | } | 51 | } |
| 52 | 52 | ||