summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar bunnei2020-12-16 21:09:06 -0800
committerGravatar bunnei2020-12-28 21:33:34 -0800
commit6433b1dfd67f4c4f0c4b2e3742dc437a0d1e906e (patch)
treed2a9a23be1a80d9351f99d7feb7978f3c2237882 /src
parenthle: service: Ensure system is powered on before writing IPC result. (diff)
downloadyuzu-6433b1dfd67f4c4f0c4b2e3742dc437a0d1e906e.tar.gz
yuzu-6433b1dfd67f4c4f0c4b2e3742dc437a0d1e906e.tar.xz
yuzu-6433b1dfd67f4c4f0c4b2e3742dc437a0d1e906e.zip
service: nvflinger: Improve synchronization for BufferQueue.
- Use proper mechanisms for blocking on DequeueBuffer. - Ensure service thread terminates on emulation Shutdown.
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue.cpp40
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue.h17
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp13
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.h2
-rw-r--r--src/core/hle/service/vi/vi.cpp19
5 files changed, 72 insertions, 19 deletions
diff --git a/src/core/hle/service/nvflinger/buffer_queue.cpp b/src/core/hle/service/nvflinger/buffer_queue.cpp
index 377f47e8e..c8c6a4d64 100644
--- a/src/core/hle/service/nvflinger/buffer_queue.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue.cpp
@@ -25,7 +25,12 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer)
25 ASSERT(slot < buffer_slots); 25 ASSERT(slot < buffer_slots);
26 LOG_WARNING(Service, "Adding graphics buffer {}", slot); 26 LOG_WARNING(Service, "Adding graphics buffer {}", slot);
27 27
28 free_buffers.push_back(slot); 28 {
29 std::unique_lock lock{queue_mutex};
30 free_buffers.push_back(slot);
31 }
32 condition.notify_one();
33
29 buffers[slot] = { 34 buffers[slot] = {
30 .slot = slot, 35 .slot = slot,
31 .status = Buffer::Status::Free, 36 .status = Buffer::Status::Free,
@@ -41,10 +46,20 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer)
41 46
42std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width, 47std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width,
43 u32 height) { 48 u32 height) {
49 // Wait for first request before trying to dequeue
50 {
51 std::unique_lock lock{queue_mutex};
52 condition.wait(lock, [this] { return !free_buffers.empty() || !is_connect; });
53 }
44 54
45 if (free_buffers.empty()) { 55 if (!is_connect) {
56 // Buffer was disconnected while the thread was blocked, this is most likely due to
57 // emulation being stopped
46 return std::nullopt; 58 return std::nullopt;
47 } 59 }
60
61 std::unique_lock lock{queue_mutex};
62
48 auto f_itr = free_buffers.begin(); 63 auto f_itr = free_buffers.begin();
49 auto slot = buffers.size(); 64 auto slot = buffers.size();
50 65
@@ -97,7 +112,11 @@ void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& mult
97 buffers[slot].multi_fence = multi_fence; 112 buffers[slot].multi_fence = multi_fence;
98 buffers[slot].swap_interval = 0; 113 buffers[slot].swap_interval = 0;
99 114
100 free_buffers.push_back(slot); 115 {
116 std::unique_lock lock{queue_mutex};
117 free_buffers.push_back(slot);
118 }
119 condition.notify_one();
101 120
102 buffer_wait_event.writable->Signal(); 121 buffer_wait_event.writable->Signal();
103} 122}
@@ -127,15 +146,28 @@ void BufferQueue::ReleaseBuffer(u32 slot) {
127 ASSERT(buffers[slot].slot == slot); 146 ASSERT(buffers[slot].slot == slot);
128 147
129 buffers[slot].status = Buffer::Status::Free; 148 buffers[slot].status = Buffer::Status::Free;
130 free_buffers.push_back(slot); 149 {
150 std::unique_lock lock{queue_mutex};
151 free_buffers.push_back(slot);
152 }
153 condition.notify_one();
131 154
132 buffer_wait_event.writable->Signal(); 155 buffer_wait_event.writable->Signal();
133} 156}
134 157
158void BufferQueue::Connect() {
159 queue_sequence.clear();
160 id = 1;
161 layer_id = 1;
162 is_connect = true;
163}
164
135void BufferQueue::Disconnect() { 165void BufferQueue::Disconnect() {
136 buffers.fill({}); 166 buffers.fill({});
137 queue_sequence.clear(); 167 queue_sequence.clear();
138 buffer_wait_event.writable->Signal(); 168 buffer_wait_event.writable->Signal();
169 is_connect = false;
170 condition.notify_one();
139} 171}
140 172
141u32 BufferQueue::Query(QueryType type) { 173u32 BufferQueue::Query(QueryType type) {
diff --git a/src/core/hle/service/nvflinger/buffer_queue.h b/src/core/hle/service/nvflinger/buffer_queue.h
index e610923cb..a2f60d9eb 100644
--- a/src/core/hle/service/nvflinger/buffer_queue.h
+++ b/src/core/hle/service/nvflinger/buffer_queue.h
@@ -4,7 +4,9 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <condition_variable>
7#include <list> 8#include <list>
9#include <mutex>
8#include <optional> 10#include <optional>
9#include <vector> 11#include <vector>
10 12
@@ -99,6 +101,7 @@ public:
99 void CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& multi_fence); 101 void CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& multi_fence);
100 std::optional<std::reference_wrapper<const Buffer>> AcquireBuffer(); 102 std::optional<std::reference_wrapper<const Buffer>> AcquireBuffer();
101 void ReleaseBuffer(u32 slot); 103 void ReleaseBuffer(u32 slot);
104 void Connect();
102 void Disconnect(); 105 void Disconnect();
103 u32 Query(QueryType type); 106 u32 Query(QueryType type);
104 107
@@ -106,18 +109,28 @@ public:
106 return id; 109 return id;
107 } 110 }
108 111
112 bool IsConnected() const {
113 return is_connect;
114 }
115
109 std::shared_ptr<Kernel::WritableEvent> GetWritableBufferWaitEvent() const; 116 std::shared_ptr<Kernel::WritableEvent> GetWritableBufferWaitEvent() const;
110 117
111 std::shared_ptr<Kernel::ReadableEvent> GetBufferWaitEvent() const; 118 std::shared_ptr<Kernel::ReadableEvent> GetBufferWaitEvent() const;
112 119
113private: 120private:
114 u32 id; 121 BufferQueue(const BufferQueue&) = delete;
115 u64 layer_id; 122
123 u32 id{};
124 u64 layer_id{};
125 std::atomic_bool is_connect{};
116 126
117 std::list<u32> free_buffers; 127 std::list<u32> free_buffers;
118 std::array<Buffer, buffer_slots> buffers; 128 std::array<Buffer, buffer_slots> buffers;
119 std::list<u32> queue_sequence; 129 std::list<u32> queue_sequence;
120 Kernel::EventPair buffer_wait_event; 130 Kernel::EventPair buffer_wait_event;
131
132 std::mutex queue_mutex;
133 std::condition_variable condition;
121}; 134};
122 135
123} // namespace Service::NVFlinger 136} // namespace Service::NVFlinger
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index a7a679df1..4b3581949 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -88,6 +88,10 @@ NVFlinger::NVFlinger(Core::System& system) : system(system) {
88} 88}
89 89
90NVFlinger::~NVFlinger() { 90NVFlinger::~NVFlinger() {
91 for (auto& buffer_queue : buffer_queues) {
92 buffer_queue->Disconnect();
93 }
94
91 if (system.IsMulticore()) { 95 if (system.IsMulticore()) {
92 is_running = false; 96 is_running = false;
93 wait_event->Set(); 97 wait_event->Set();
@@ -132,8 +136,9 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
132 136
133 const u64 layer_id = next_layer_id++; 137 const u64 layer_id = next_layer_id++;
134 const u32 buffer_queue_id = next_buffer_queue_id++; 138 const u32 buffer_queue_id = next_buffer_queue_id++;
135 buffer_queues.emplace_back(system.Kernel(), buffer_queue_id, layer_id); 139 buffer_queues.emplace_back(
136 display->CreateLayer(layer_id, buffer_queues.back()); 140 std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id));
141 display->CreateLayer(layer_id, *buffer_queues.back());
137 return layer_id; 142 return layer_id;
138} 143}
139 144
@@ -170,13 +175,13 @@ std::shared_ptr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_id)
170BufferQueue* NVFlinger::FindBufferQueue(u32 id) { 175BufferQueue* NVFlinger::FindBufferQueue(u32 id) {
171 const auto guard = Lock(); 176 const auto guard = Lock();
172 const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(), 177 const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(),
173 [id](const auto& queue) { return queue.GetId() == id; }); 178 [id](const auto& queue) { return queue->GetId() == id; });
174 179
175 if (itr == buffer_queues.end()) { 180 if (itr == buffer_queues.end()) {
176 return nullptr; 181 return nullptr;
177 } 182 }
178 183
179 return &*itr; 184 return itr->get();
180} 185}
181 186
182VI::Display* NVFlinger::FindDisplay(u64 display_id) { 187VI::Display* NVFlinger::FindDisplay(u64 display_id) {
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h
index ce1347d6d..c6765259f 100644
--- a/src/core/hle/service/nvflinger/nvflinger.h
+++ b/src/core/hle/service/nvflinger/nvflinger.h
@@ -107,7 +107,7 @@ private:
107 std::shared_ptr<Nvidia::Module> nvdrv; 107 std::shared_ptr<Nvidia::Module> nvdrv;
108 108
109 std::vector<VI::Display> displays; 109 std::vector<VI::Display> displays;
110 std::vector<BufferQueue> buffer_queues; 110 std::vector<std::unique_ptr<BufferQueue>> buffer_queues;
111 111
112 /// Id to use for the next layer that is created, this counter is shared among all displays. 112 /// Id to use for the next layer that is created, this counter is shared among all displays.
113 u64 next_layer_id = 1; 113 u64 next_layer_id = 1;
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index ce0272e59..1051000f8 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -544,6 +544,12 @@ private:
544 Settings::values.resolution_factor.GetValue()), 544 Settings::values.resolution_factor.GetValue()),
545 static_cast<u32>(static_cast<u32>(DisplayResolution::UndockedHeight) * 545 static_cast<u32>(static_cast<u32>(DisplayResolution::UndockedHeight) *
546 Settings::values.resolution_factor.GetValue())}; 546 Settings::values.resolution_factor.GetValue())};
547
548 {
549 auto& buffer_queue = *nv_flinger.FindBufferQueue(id);
550 buffer_queue.Connect();
551 }
552
547 ctx.WriteBuffer(response.Serialize()); 553 ctx.WriteBuffer(response.Serialize());
548 break; 554 break;
549 } 555 }
@@ -565,18 +571,15 @@ private:
565 const u32 width{request.data.width}; 571 const u32 width{request.data.width};
566 const u32 height{request.data.height}; 572 const u32 height{request.data.height};
567 573
568 std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> result; 574 auto& buffer_queue = *nv_flinger.FindBufferQueue(id);
569 575 do {
570 while (!result) { 576 if (auto result = buffer_queue.DequeueBuffer(width, height); result) {
571 auto& buffer_queue = *nv_flinger.FindBufferQueue(id);
572 result = buffer_queue.DequeueBuffer(width, height);
573
574 if (result) {
575 // Buffer is available 577 // Buffer is available
576 IGBPDequeueBufferResponseParcel response{result->first, *result->second}; 578 IGBPDequeueBufferResponseParcel response{result->first, *result->second};
577 ctx.WriteBuffer(response.Serialize()); 579 ctx.WriteBuffer(response.Serialize());
580 break;
578 } 581 }
579 } 582 } while (buffer_queue.IsConnected());
580 583
581 break; 584 break;
582 } 585 }