summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp76
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.h11
-rw-r--r--src/core/hle/service/vi/display/vi_display.cpp49
-rw-r--r--src/core/hle/service/vi/display/vi_display.h74
-rw-r--r--src/core/hle/service/vi/layer/vi_layer.cpp3
-rw-r--r--src/core/hle/service/vi/layer/vi_layer.h37
-rw-r--r--src/core/hle/service/vi/vi.cpp26
7 files changed, 209 insertions, 67 deletions
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index b5d452db1..56f31e2ac 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -28,9 +28,13 @@ namespace Service::NVFlinger {
28constexpr std::size_t SCREEN_REFRESH_RATE = 60; 28constexpr std::size_t SCREEN_REFRESH_RATE = 60;
29constexpr u64 frame_ticks = static_cast<u64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); 29constexpr u64 frame_ticks = static_cast<u64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE);
30 30
31NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) 31NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_timing} {
32 : displays{{0, "Default"}, {1, "External"}, {2, "Edid"}, {3, "Internal"}, {4, "Null"}}, 32 displays.emplace_back(0, "Default");
33 core_timing{core_timing} { 33 displays.emplace_back(1, "External");
34 displays.emplace_back(2, "Edid");
35 displays.emplace_back(3, "Internal");
36 displays.emplace_back(4, "Null");
37
34 // Schedule the screen composition events 38 // Schedule the screen composition events
35 composition_event = 39 composition_event =
36 core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, int cycles_late) { 40 core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, int cycles_late) {
@@ -55,13 +59,14 @@ std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) {
55 // TODO(Subv): Currently we only support the Default display. 59 // TODO(Subv): Currently we only support the Default display.
56 ASSERT(name == "Default"); 60 ASSERT(name == "Default");
57 61
58 const auto itr = std::find_if(displays.begin(), displays.end(), 62 const auto itr =
59 [&](const VI::Display& display) { return display.name == name; }); 63 std::find_if(displays.begin(), displays.end(),
64 [&](const VI::Display& display) { return display.GetName() == name; });
60 if (itr == displays.end()) { 65 if (itr == displays.end()) {
61 return {}; 66 return {};
62 } 67 }
63 68
64 return itr->id; 69 return itr->GetID();
65} 70}
66 71
67std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { 72std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
@@ -71,13 +76,10 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
71 return {}; 76 return {};
72 } 77 }
73 78
74 ASSERT_MSG(display->layers.empty(), "Only one layer is supported per display at the moment");
75
76 const u64 layer_id = next_layer_id++; 79 const u64 layer_id = next_layer_id++;
77 const u32 buffer_queue_id = next_buffer_queue_id++; 80 const u32 buffer_queue_id = next_buffer_queue_id++;
78 auto buffer_queue = std::make_shared<BufferQueue>(buffer_queue_id, layer_id); 81 buffer_queues.emplace_back(buffer_queue_id, layer_id);
79 display->layers.emplace_back(layer_id, buffer_queue); 82 display->CreateLayer(layer_id, buffer_queues.back());
80 buffer_queues.emplace_back(std::move(buffer_queue));
81 return layer_id; 83 return layer_id;
82} 84}
83 85
@@ -88,7 +90,7 @@ std::optional<u32> NVFlinger::FindBufferQueueId(u64 display_id, u64 layer_id) co
88 return {}; 90 return {};
89 } 91 }
90 92
91 return layer->buffer_queue->GetId(); 93 return layer->GetBufferQueue().GetId();
92} 94}
93 95
94Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_id) const { 96Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_id) const {
@@ -98,12 +100,20 @@ Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_i
98 return nullptr; 100 return nullptr;
99 } 101 }
100 102
101 return display->vsync_event.readable; 103 return display->GetVSyncEvent();
102} 104}
103 105
104std::shared_ptr<BufferQueue> NVFlinger::FindBufferQueue(u32 id) const { 106BufferQueue& NVFlinger::FindBufferQueue(u32 id) {
105 const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(), 107 const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(),
106 [&](const auto& queue) { return queue->GetId() == id; }); 108 [id](const auto& queue) { return queue.GetId() == id; });
109
110 ASSERT(itr != buffer_queues.end());
111 return *itr;
112}
113
114const BufferQueue& NVFlinger::FindBufferQueue(u32 id) const {
115 const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(),
116 [id](const auto& queue) { return queue.GetId() == id; });
107 117
108 ASSERT(itr != buffer_queues.end()); 118 ASSERT(itr != buffer_queues.end());
109 return *itr; 119 return *itr;
@@ -112,7 +122,7 @@ std::shared_ptr<BufferQueue> NVFlinger::FindBufferQueue(u32 id) const {
112VI::Display* NVFlinger::FindDisplay(u64 display_id) { 122VI::Display* NVFlinger::FindDisplay(u64 display_id) {
113 const auto itr = 123 const auto itr =
114 std::find_if(displays.begin(), displays.end(), 124 std::find_if(displays.begin(), displays.end(),
115 [&](const VI::Display& display) { return display.id == display_id; }); 125 [&](const VI::Display& display) { return display.GetID() == display_id; });
116 126
117 if (itr == displays.end()) { 127 if (itr == displays.end()) {
118 return nullptr; 128 return nullptr;
@@ -124,7 +134,7 @@ VI::Display* NVFlinger::FindDisplay(u64 display_id) {
124const VI::Display* NVFlinger::FindDisplay(u64 display_id) const { 134const VI::Display* NVFlinger::FindDisplay(u64 display_id) const {
125 const auto itr = 135 const auto itr =
126 std::find_if(displays.begin(), displays.end(), 136 std::find_if(displays.begin(), displays.end(),
127 [&](const VI::Display& display) { return display.id == display_id; }); 137 [&](const VI::Display& display) { return display.GetID() == display_id; });
128 138
129 if (itr == displays.end()) { 139 if (itr == displays.end()) {
130 return nullptr; 140 return nullptr;
@@ -140,14 +150,7 @@ VI::Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) {
140 return nullptr; 150 return nullptr;
141 } 151 }
142 152
143 const auto itr = std::find_if(display->layers.begin(), display->layers.end(), 153 return display->FindLayer(layer_id);
144 [&](const VI::Layer& layer) { return layer.id == layer_id; });
145
146 if (itr == display->layers.end()) {
147 return nullptr;
148 }
149
150 return &*itr;
151} 154}
152 155
153const VI::Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) const { 156const VI::Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) const {
@@ -157,33 +160,24 @@ const VI::Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) const {
157 return nullptr; 160 return nullptr;
158 } 161 }
159 162
160 const auto itr = std::find_if(display->layers.begin(), display->layers.end(), 163 return display->FindLayer(layer_id);
161 [&](const VI::Layer& layer) { return layer.id == layer_id; });
162
163 if (itr == display->layers.end()) {
164 return nullptr;
165 }
166
167 return &*itr;
168} 164}
169 165
170void NVFlinger::Compose() { 166void NVFlinger::Compose() {
171 for (auto& display : displays) { 167 for (auto& display : displays) {
172 // Trigger vsync for this display at the end of drawing 168 // Trigger vsync for this display at the end of drawing
173 SCOPE_EXIT({ display.vsync_event.writable->Signal(); }); 169 SCOPE_EXIT({ display.SignalVSyncEvent(); });
174 170
175 // Don't do anything for displays without layers. 171 // Don't do anything for displays without layers.
176 if (display.layers.empty()) 172 if (!display.HasLayers())
177 continue; 173 continue;
178 174
179 // TODO(Subv): Support more than 1 layer. 175 // TODO(Subv): Support more than 1 layer.
180 ASSERT_MSG(display.layers.size() == 1, "Max 1 layer per display is supported"); 176 VI::Layer& layer = display.GetLayer(0);
181 177 auto& buffer_queue = layer.GetBufferQueue();
182 VI::Layer& layer = display.layers[0];
183 auto& buffer_queue = layer.buffer_queue;
184 178
185 // Search for a queued buffer and acquire it 179 // Search for a queued buffer and acquire it
186 auto buffer = buffer_queue->AcquireBuffer(); 180 auto buffer = buffer_queue.AcquireBuffer();
187 181
188 MicroProfileFlip(); 182 MicroProfileFlip();
189 183
@@ -208,7 +202,7 @@ void NVFlinger::Compose() {
208 igbp_buffer.width, igbp_buffer.height, igbp_buffer.stride, 202 igbp_buffer.width, igbp_buffer.height, igbp_buffer.stride,
209 buffer->get().transform, buffer->get().crop_rect); 203 buffer->get().transform, buffer->get().crop_rect);
210 204
211 buffer_queue->ReleaseBuffer(buffer->get().slot); 205 buffer_queue.ReleaseBuffer(buffer->get().slot);
212 } 206 }
213} 207}
214 208
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h
index 2e000af91..c0a83fffb 100644
--- a/src/core/hle/service/nvflinger/nvflinger.h
+++ b/src/core/hle/service/nvflinger/nvflinger.h
@@ -28,8 +28,8 @@ class Module;
28} // namespace Service::Nvidia 28} // namespace Service::Nvidia
29 29
30namespace Service::VI { 30namespace Service::VI {
31struct Display; 31class Display;
32struct Layer; 32class Layer;
33} // namespace Service::VI 33} // namespace Service::VI
34 34
35namespace Service::NVFlinger { 35namespace Service::NVFlinger {
@@ -65,7 +65,10 @@ public:
65 Kernel::SharedPtr<Kernel::ReadableEvent> FindVsyncEvent(u64 display_id) const; 65 Kernel::SharedPtr<Kernel::ReadableEvent> FindVsyncEvent(u64 display_id) const;
66 66
67 /// Obtains a buffer queue identified by the ID. 67 /// Obtains a buffer queue identified by the ID.
68 std::shared_ptr<BufferQueue> FindBufferQueue(u32 id) const; 68 BufferQueue& FindBufferQueue(u32 id);
69
70 /// Obtains a buffer queue identified by the ID.
71 const BufferQueue& FindBufferQueue(u32 id) const;
69 72
70 /// Performs a composition request to the emulated nvidia GPU and triggers the vsync events when 73 /// Performs a composition request to the emulated nvidia GPU and triggers the vsync events when
71 /// finished. 74 /// finished.
@@ -87,7 +90,7 @@ private:
87 std::shared_ptr<Nvidia::Module> nvdrv; 90 std::shared_ptr<Nvidia::Module> nvdrv;
88 91
89 std::vector<VI::Display> displays; 92 std::vector<VI::Display> displays;
90 std::vector<std::shared_ptr<BufferQueue>> buffer_queues; 93 std::vector<BufferQueue> buffer_queues;
91 94
92 /// Id to use for the next layer that is created, this counter is shared among all displays. 95 /// Id to use for the next layer that is created, this counter is shared among all displays.
93 u64 next_layer_id = 1; 96 u64 next_layer_id = 1;
diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp
index a108e468f..01d80311b 100644
--- a/src/core/hle/service/vi/display/vi_display.cpp
+++ b/src/core/hle/service/vi/display/vi_display.cpp
@@ -2,8 +2,12 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm>
6#include <utility>
7
5#include <fmt/format.h> 8#include <fmt/format.h>
6 9
10#include "common/assert.h"
7#include "core/core.h" 11#include "core/core.h"
8#include "core/hle/kernel/readable_event.h" 12#include "core/hle/kernel/readable_event.h"
9#include "core/hle/service/vi/display/vi_display.h" 13#include "core/hle/service/vi/display/vi_display.h"
@@ -19,4 +23,49 @@ Display::Display(u64 id, std::string name) : id{id}, name{std::move(name)} {
19 23
20Display::~Display() = default; 24Display::~Display() = default;
21 25
26Layer& Display::GetLayer(std::size_t index) {
27 return layers.at(index);
28}
29
30const Layer& Display::GetLayer(std::size_t index) const {
31 return layers.at(index);
32}
33
34Kernel::SharedPtr<Kernel::ReadableEvent> Display::GetVSyncEvent() const {
35 return vsync_event.readable;
36}
37
38void Display::SignalVSyncEvent() {
39 vsync_event.writable->Signal();
40}
41
42void Display::CreateLayer(u64 id, NVFlinger::BufferQueue& buffer_queue) {
43 // TODO(Subv): Support more than 1 layer.
44 ASSERT_MSG(layers.empty(), "Only one layer is supported per display at the moment");
45
46 layers.emplace_back(id, buffer_queue);
47}
48
49Layer* Display::FindLayer(u64 id) {
50 const auto itr = std::find_if(layers.begin(), layers.end(),
51 [id](const VI::Layer& layer) { return layer.GetID() == id; });
52
53 if (itr == layers.end()) {
54 return nullptr;
55 }
56
57 return &*itr;
58}
59
60const Layer* Display::FindLayer(u64 id) const {
61 const auto itr = std::find_if(layers.begin(), layers.end(),
62 [id](const VI::Layer& layer) { return layer.GetID() == id; });
63
64 if (itr == layers.end()) {
65 return nullptr;
66 }
67
68 return &*itr;
69}
70
22} // namespace Service::VI 71} // namespace Service::VI
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h
index df44db306..2acd46ff8 100644
--- a/src/core/hle/service/vi/display/vi_display.h
+++ b/src/core/hle/service/vi/display/vi_display.h
@@ -10,14 +10,84 @@
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "core/hle/kernel/writable_event.h" 11#include "core/hle/kernel/writable_event.h"
12 12
13namespace Service::NVFlinger {
14class BufferQueue;
15}
16
13namespace Service::VI { 17namespace Service::VI {
14 18
15struct Layer; 19class Layer;
16 20
17struct Display { 21/// Represents a single display type
22class Display {
23public:
24 /// Constructs a display with a given unique ID and name.
25 ///
26 /// @param id The unique ID for this display.
27 /// @param name The name for this display.
28 ///
18 Display(u64 id, std::string name); 29 Display(u64 id, std::string name);
19 ~Display(); 30 ~Display();
20 31
32 Display(const Display&) = delete;
33 Display& operator=(const Display&) = delete;
34
35 Display(Display&&) = default;
36 Display& operator=(Display&&) = default;
37
38 /// Gets the unique ID assigned to this display.
39 u64 GetID() const {
40 return id;
41 }
42
43 /// Gets the name of this display
44 const std::string& GetName() const {
45 return name;
46 }
47
48 /// Whether or not this display has any layers added to it.
49 bool HasLayers() const {
50 return !layers.empty();
51 }
52
53 /// Gets a layer for this display based off an index.
54 Layer& GetLayer(std::size_t index);
55
56 /// Gets a layer for this display based off an index.
57 const Layer& GetLayer(std::size_t index) const;
58
59 /// Gets the readable vsync event.
60 Kernel::SharedPtr<Kernel::ReadableEvent> GetVSyncEvent() const;
61
62 /// Signals the internal vsync event.
63 void SignalVSyncEvent();
64
65 /// Creates and adds a layer to this display with the given ID.
66 ///
67 /// @param id The ID to assign to the created layer.
68 /// @param buffer_queue The buffer queue for the layer instance to use.
69 ///
70 void CreateLayer(u64 id, NVFlinger::BufferQueue& buffer_queue);
71
72 /// Attempts to find a layer with the given ID.
73 ///
74 /// @param id The layer ID.
75 ///
76 /// @returns If found, the Layer instance with the given ID.
77 /// If not found, then nullptr is returned.
78 ///
79 Layer* FindLayer(u64 id);
80
81 /// Attempts to find a layer with the given ID.
82 ///
83 /// @param id The layer ID.
84 ///
85 /// @returns If found, the Layer instance with the given ID.
86 /// If not found, then nullptr is returned.
87 ///
88 const Layer* FindLayer(u64 id) const;
89
90private:
21 u64 id; 91 u64 id;
22 std::string name; 92 std::string name;
23 93
diff --git a/src/core/hle/service/vi/layer/vi_layer.cpp b/src/core/hle/service/vi/layer/vi_layer.cpp
index 3a83e5b95..954225c26 100644
--- a/src/core/hle/service/vi/layer/vi_layer.cpp
+++ b/src/core/hle/service/vi/layer/vi_layer.cpp
@@ -6,8 +6,7 @@
6 6
7namespace Service::VI { 7namespace Service::VI {
8 8
9Layer::Layer(u64 id, std::shared_ptr<NVFlinger::BufferQueue> queue) 9Layer::Layer(u64 id, NVFlinger::BufferQueue& queue) : id{id}, buffer_queue{queue} {}
10 : id{id}, buffer_queue{std::move(queue)} {}
11 10
12Layer::~Layer() = default; 11Layer::~Layer() = default;
13 12
diff --git a/src/core/hle/service/vi/layer/vi_layer.h b/src/core/hle/service/vi/layer/vi_layer.h
index df328e09f..c6bfd01f6 100644
--- a/src/core/hle/service/vi/layer/vi_layer.h
+++ b/src/core/hle/service/vi/layer/vi_layer.h
@@ -4,8 +4,6 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <memory>
8
9#include "common/common_types.h" 7#include "common/common_types.h"
10 8
11namespace Service::NVFlinger { 9namespace Service::NVFlinger {
@@ -14,12 +12,41 @@ class BufferQueue;
14 12
15namespace Service::VI { 13namespace Service::VI {
16 14
17struct Layer { 15/// Represents a single display layer.
18 Layer(u64 id, std::shared_ptr<NVFlinger::BufferQueue> queue); 16class Layer {
17public:
18 /// Constructs a layer with a given ID and buffer queue.
19 ///
20 /// @param id The ID to assign to this layer.
21 /// @param queue The buffer queue for this layer to use.
22 ///
23 Layer(u64 id, NVFlinger::BufferQueue& queue);
19 ~Layer(); 24 ~Layer();
20 25
26 Layer(const Layer&) = delete;
27 Layer& operator=(const Layer&) = delete;
28
29 Layer(Layer&&) = default;
30 Layer& operator=(Layer&&) = delete;
31
32 /// Gets the ID for this layer.
33 u64 GetID() const {
34 return id;
35 }
36
37 /// Gets a reference to the buffer queue this layer is using.
38 NVFlinger::BufferQueue& GetBufferQueue() {
39 return buffer_queue;
40 }
41
42 /// Gets a const reference to the buffer queue this layer is using.
43 const NVFlinger::BufferQueue& GetBufferQueue() const {
44 return buffer_queue;
45 }
46
47private:
21 u64 id; 48 u64 id;
22 std::shared_ptr<NVFlinger::BufferQueue> buffer_queue; 49 NVFlinger::BufferQueue& buffer_queue;
23}; 50};
24 51
25} // namespace Service::VI 52} // namespace Service::VI
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index a317a2885..7369a09ec 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -525,7 +525,7 @@ private:
525 LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id, 525 LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id,
526 static_cast<u32>(transaction), flags); 526 static_cast<u32>(transaction), flags);
527 527
528 auto buffer_queue = nv_flinger->FindBufferQueue(id); 528 auto& buffer_queue = nv_flinger->FindBufferQueue(id);
529 529
530 if (transaction == TransactionId::Connect) { 530 if (transaction == TransactionId::Connect) {
531 IGBPConnectRequestParcel request{ctx.ReadBuffer()}; 531 IGBPConnectRequestParcel request{ctx.ReadBuffer()};
@@ -538,7 +538,7 @@ private:
538 } else if (transaction == TransactionId::SetPreallocatedBuffer) { 538 } else if (transaction == TransactionId::SetPreallocatedBuffer) {
539 IGBPSetPreallocatedBufferRequestParcel request{ctx.ReadBuffer()}; 539 IGBPSetPreallocatedBufferRequestParcel request{ctx.ReadBuffer()};
540 540
541 buffer_queue->SetPreallocatedBuffer(request.data.slot, request.buffer); 541 buffer_queue.SetPreallocatedBuffer(request.data.slot, request.buffer);
542 542
543 IGBPSetPreallocatedBufferResponseParcel response{}; 543 IGBPSetPreallocatedBufferResponseParcel response{};
544 ctx.WriteBuffer(response.Serialize()); 544 ctx.WriteBuffer(response.Serialize());
@@ -546,7 +546,7 @@ private:
546 IGBPDequeueBufferRequestParcel request{ctx.ReadBuffer()}; 546 IGBPDequeueBufferRequestParcel request{ctx.ReadBuffer()};
547 const u32 width{request.data.width}; 547 const u32 width{request.data.width};
548 const u32 height{request.data.height}; 548 const u32 height{request.data.height};
549 std::optional<u32> slot = buffer_queue->DequeueBuffer(width, height); 549 std::optional<u32> slot = buffer_queue.DequeueBuffer(width, height);
550 550
551 if (slot) { 551 if (slot) {
552 // Buffer is available 552 // Buffer is available
@@ -559,8 +559,8 @@ private:
559 [=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx, 559 [=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
560 Kernel::ThreadWakeupReason reason) { 560 Kernel::ThreadWakeupReason reason) {
561 // Repeat TransactParcel DequeueBuffer when a buffer is available 561 // Repeat TransactParcel DequeueBuffer when a buffer is available
562 auto buffer_queue = nv_flinger->FindBufferQueue(id); 562 auto& buffer_queue = nv_flinger->FindBufferQueue(id);
563 std::optional<u32> slot = buffer_queue->DequeueBuffer(width, height); 563 std::optional<u32> slot = buffer_queue.DequeueBuffer(width, height);
564 ASSERT_MSG(slot != std::nullopt, "Could not dequeue buffer."); 564 ASSERT_MSG(slot != std::nullopt, "Could not dequeue buffer.");
565 565
566 IGBPDequeueBufferResponseParcel response{*slot}; 566 IGBPDequeueBufferResponseParcel response{*slot};
@@ -568,28 +568,28 @@ private:
568 IPC::ResponseBuilder rb{ctx, 2}; 568 IPC::ResponseBuilder rb{ctx, 2};
569 rb.Push(RESULT_SUCCESS); 569 rb.Push(RESULT_SUCCESS);
570 }, 570 },
571 buffer_queue->GetWritableBufferWaitEvent()); 571 buffer_queue.GetWritableBufferWaitEvent());
572 } 572 }
573 } else if (transaction == TransactionId::RequestBuffer) { 573 } else if (transaction == TransactionId::RequestBuffer) {
574 IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()}; 574 IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()};
575 575
576 auto& buffer = buffer_queue->RequestBuffer(request.slot); 576 auto& buffer = buffer_queue.RequestBuffer(request.slot);
577 577
578 IGBPRequestBufferResponseParcel response{buffer}; 578 IGBPRequestBufferResponseParcel response{buffer};
579 ctx.WriteBuffer(response.Serialize()); 579 ctx.WriteBuffer(response.Serialize());
580 } else if (transaction == TransactionId::QueueBuffer) { 580 } else if (transaction == TransactionId::QueueBuffer) {
581 IGBPQueueBufferRequestParcel request{ctx.ReadBuffer()}; 581 IGBPQueueBufferRequestParcel request{ctx.ReadBuffer()};
582 582
583 buffer_queue->QueueBuffer(request.data.slot, request.data.transform, 583 buffer_queue.QueueBuffer(request.data.slot, request.data.transform,
584 request.data.GetCropRect()); 584 request.data.GetCropRect());
585 585
586 IGBPQueueBufferResponseParcel response{1280, 720}; 586 IGBPQueueBufferResponseParcel response{1280, 720};
587 ctx.WriteBuffer(response.Serialize()); 587 ctx.WriteBuffer(response.Serialize());
588 } else if (transaction == TransactionId::Query) { 588 } else if (transaction == TransactionId::Query) {
589 IGBPQueryRequestParcel request{ctx.ReadBuffer()}; 589 IGBPQueryRequestParcel request{ctx.ReadBuffer()};
590 590
591 u32 value = 591 const u32 value =
592 buffer_queue->Query(static_cast<NVFlinger::BufferQueue::QueryType>(request.type)); 592 buffer_queue.Query(static_cast<NVFlinger::BufferQueue::QueryType>(request.type));
593 593
594 IGBPQueryResponseParcel response{value}; 594 IGBPQueryResponseParcel response{value};
595 ctx.WriteBuffer(response.Serialize()); 595 ctx.WriteBuffer(response.Serialize());
@@ -629,12 +629,12 @@ private:
629 629
630 LOG_WARNING(Service_VI, "(STUBBED) called id={}, unknown={:08X}", id, unknown); 630 LOG_WARNING(Service_VI, "(STUBBED) called id={}, unknown={:08X}", id, unknown);
631 631
632 const auto buffer_queue = nv_flinger->FindBufferQueue(id); 632 const auto& buffer_queue = nv_flinger->FindBufferQueue(id);
633 633
634 // TODO(Subv): Find out what this actually is. 634 // TODO(Subv): Find out what this actually is.
635 IPC::ResponseBuilder rb{ctx, 2, 1}; 635 IPC::ResponseBuilder rb{ctx, 2, 1};
636 rb.Push(RESULT_SUCCESS); 636 rb.Push(RESULT_SUCCESS);
637 rb.PushCopyObjects(buffer_queue->GetBufferWaitEvent()); 637 rb.PushCopyObjects(buffer_queue.GetBufferWaitEvent());
638 } 638 }
639 639
640 std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; 640 std::shared_ptr<NVFlinger::NVFlinger> nv_flinger;