summaryrefslogtreecommitdiff
path: root/src/video_core/host1x/vic.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/host1x/vic.cpp')
-rw-r--r--src/video_core/host1x/vic.cpp62
1 files changed, 32 insertions, 30 deletions
diff --git a/src/video_core/host1x/vic.cpp b/src/video_core/host1x/vic.cpp
index 10d7ef884..2a5eba415 100644
--- a/src/video_core/host1x/vic.cpp
+++ b/src/video_core/host1x/vic.cpp
@@ -82,27 +82,26 @@ void Vic::Execute() {
82 return; 82 return;
83 } 83 }
84 const VicConfig config{host1x.MemoryManager().Read<u64>(config_struct_address + 0x20)}; 84 const VicConfig config{host1x.MemoryManager().Read<u64>(config_struct_address + 0x20)};
85 const AVFramePtr frame_ptr = nvdec_processor->GetFrame(); 85 auto frame = nvdec_processor->GetFrame();
86 const auto* frame = frame_ptr.get();
87 if (!frame) { 86 if (!frame) {
88 return; 87 return;
89 } 88 }
90 const u64 surface_width = config.surface_width_minus1 + 1; 89 const u64 surface_width = config.surface_width_minus1 + 1;
91 const u64 surface_height = config.surface_height_minus1 + 1; 90 const u64 surface_height = config.surface_height_minus1 + 1;
92 if (static_cast<u64>(frame->width) != surface_width || 91 if (static_cast<u64>(frame->GetWidth()) != surface_width ||
93 static_cast<u64>(frame->height) != surface_height) { 92 static_cast<u64>(frame->GetHeight()) != surface_height) {
94 // TODO: Properly support multiple video streams with differing frame dimensions 93 // TODO: Properly support multiple video streams with differing frame dimensions
95 LOG_WARNING(Service_NVDRV, "Frame dimensions {}x{} don't match surface dimensions {}x{}", 94 LOG_WARNING(Service_NVDRV, "Frame dimensions {}x{} don't match surface dimensions {}x{}",
96 frame->width, frame->height, surface_width, surface_height); 95 frame->GetWidth(), frame->GetHeight(), surface_width, surface_height);
97 } 96 }
98 switch (config.pixel_format) { 97 switch (config.pixel_format) {
99 case VideoPixelFormat::RGBA8: 98 case VideoPixelFormat::RGBA8:
100 case VideoPixelFormat::BGRA8: 99 case VideoPixelFormat::BGRA8:
101 case VideoPixelFormat::RGBX8: 100 case VideoPixelFormat::RGBX8:
102 WriteRGBFrame(frame, config); 101 WriteRGBFrame(std::move(frame), config);
103 break; 102 break;
104 case VideoPixelFormat::YUV420: 103 case VideoPixelFormat::YUV420:
105 WriteYUVFrame(frame, config); 104 WriteYUVFrame(std::move(frame), config);
106 break; 105 break;
107 default: 106 default:
108 UNIMPLEMENTED_MSG("Unknown video pixel format {:X}", config.pixel_format.Value()); 107 UNIMPLEMENTED_MSG("Unknown video pixel format {:X}", config.pixel_format.Value());
@@ -110,10 +109,14 @@ void Vic::Execute() {
110 } 109 }
111} 110}
112 111
113void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) { 112void Vic::WriteRGBFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& config) {
114 LOG_TRACE(Service_NVDRV, "Writing RGB Frame"); 113 LOG_TRACE(Service_NVDRV, "Writing RGB Frame");
115 114
116 if (!scaler_ctx || frame->width != scaler_width || frame->height != scaler_height) { 115 const auto frame_width = frame->GetWidth();
116 const auto frame_height = frame->GetHeight();
117 const auto frame_format = frame->GetPixelFormat();
118
119 if (!scaler_ctx || frame_width != scaler_width || frame_height != scaler_height) {
117 const AVPixelFormat target_format = [pixel_format = config.pixel_format]() { 120 const AVPixelFormat target_format = [pixel_format = config.pixel_format]() {
118 switch (pixel_format) { 121 switch (pixel_format) {
119 case VideoPixelFormat::RGBA8: 122 case VideoPixelFormat::RGBA8:
@@ -129,27 +132,26 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
129 132
130 sws_freeContext(scaler_ctx); 133 sws_freeContext(scaler_ctx);
131 // Frames are decoded into either YUV420 or NV12 formats. Convert to desired RGB format 134 // Frames are decoded into either YUV420 or NV12 formats. Convert to desired RGB format
132 scaler_ctx = sws_getContext(frame->width, frame->height, 135 scaler_ctx = sws_getContext(frame_width, frame_height, frame_format, frame_width,
133 static_cast<AVPixelFormat>(frame->format), frame->width, 136 frame_height, target_format, 0, nullptr, nullptr, nullptr);
134 frame->height, target_format, 0, nullptr, nullptr, nullptr); 137 scaler_width = frame_width;
135 scaler_width = frame->width; 138 scaler_height = frame_height;
136 scaler_height = frame->height;
137 converted_frame_buffer.reset(); 139 converted_frame_buffer.reset();
138 } 140 }
139 if (!converted_frame_buffer) { 141 if (!converted_frame_buffer) {
140 const size_t frame_size = frame->width * frame->height * 4; 142 const size_t frame_size = frame_width * frame_height * 4;
141 converted_frame_buffer = AVMallocPtr{static_cast<u8*>(av_malloc(frame_size)), av_free}; 143 converted_frame_buffer = AVMallocPtr{static_cast<u8*>(av_malloc(frame_size)), av_free};
142 } 144 }
143 const std::array<int, 4> converted_stride{frame->width * 4, frame->height * 4, 0, 0}; 145 const std::array<int, 4> converted_stride{frame_width * 4, frame_height * 4, 0, 0};
144 u8* const converted_frame_buf_addr{converted_frame_buffer.get()}; 146 u8* const converted_frame_buf_addr{converted_frame_buffer.get()};
145 sws_scale(scaler_ctx, frame->data, frame->linesize, 0, frame->height, &converted_frame_buf_addr, 147 sws_scale(scaler_ctx, frame->GetPlanes(), frame->GetStrides(), 0, frame_height,
146 converted_stride.data()); 148 &converted_frame_buf_addr, converted_stride.data());
147 149
148 // Use the minimum of surface/frame dimensions to avoid buffer overflow. 150 // Use the minimum of surface/frame dimensions to avoid buffer overflow.
149 const u32 surface_width = static_cast<u32>(config.surface_width_minus1) + 1; 151 const u32 surface_width = static_cast<u32>(config.surface_width_minus1) + 1;
150 const u32 surface_height = static_cast<u32>(config.surface_height_minus1) + 1; 152 const u32 surface_height = static_cast<u32>(config.surface_height_minus1) + 1;
151 const u32 width = std::min(surface_width, static_cast<u32>(frame->width)); 153 const u32 width = std::min(surface_width, static_cast<u32>(frame_width));
152 const u32 height = std::min(surface_height, static_cast<u32>(frame->height)); 154 const u32 height = std::min(surface_height, static_cast<u32>(frame_height));
153 const u32 blk_kind = static_cast<u32>(config.block_linear_kind); 155 const u32 blk_kind = static_cast<u32>(config.block_linear_kind);
154 if (blk_kind != 0) { 156 if (blk_kind != 0) {
155 // swizzle pitch linear to block linear 157 // swizzle pitch linear to block linear
@@ -169,23 +171,23 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
169 } 171 }
170} 172}
171 173
172void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) { 174void Vic::WriteYUVFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& config) {
173 LOG_TRACE(Service_NVDRV, "Writing YUV420 Frame"); 175 LOG_TRACE(Service_NVDRV, "Writing YUV420 Frame");
174 176
175 const std::size_t surface_width = config.surface_width_minus1 + 1; 177 const std::size_t surface_width = config.surface_width_minus1 + 1;
176 const std::size_t surface_height = config.surface_height_minus1 + 1; 178 const std::size_t surface_height = config.surface_height_minus1 + 1;
177 const std::size_t aligned_width = (surface_width + 0xff) & ~0xffUL; 179 const std::size_t aligned_width = (surface_width + 0xff) & ~0xffUL;
178 // Use the minimum of surface/frame dimensions to avoid buffer overflow. 180 // Use the minimum of surface/frame dimensions to avoid buffer overflow.
179 const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->width)); 181 const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->GetWidth()));
180 const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->height)); 182 const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->GetHeight()));
181 183
182 const auto stride = static_cast<size_t>(frame->linesize[0]); 184 const auto stride = static_cast<size_t>(frame->GetStride(0));
183 185
184 luma_buffer.resize_destructive(aligned_width * surface_height); 186 luma_buffer.resize_destructive(aligned_width * surface_height);
185 chroma_buffer.resize_destructive(aligned_width * surface_height / 2); 187 chroma_buffer.resize_destructive(aligned_width * surface_height / 2);
186 188
187 // Populate luma buffer 189 // Populate luma buffer
188 const u8* luma_src = frame->data[0]; 190 const u8* luma_src = frame->GetData(0);
189 for (std::size_t y = 0; y < frame_height; ++y) { 191 for (std::size_t y = 0; y < frame_height; ++y) {
190 const std::size_t src = y * stride; 192 const std::size_t src = y * stride;
191 const std::size_t dst = y * aligned_width; 193 const std::size_t dst = y * aligned_width;
@@ -196,16 +198,16 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
196 198
197 // Chroma 199 // Chroma
198 const std::size_t half_height = frame_height / 2; 200 const std::size_t half_height = frame_height / 2;
199 const auto half_stride = static_cast<size_t>(frame->linesize[1]); 201 const auto half_stride = static_cast<size_t>(frame->GetStride(1));
200 202
201 switch (frame->format) { 203 switch (frame->GetPixelFormat()) {
202 case AV_PIX_FMT_YUV420P: { 204 case AV_PIX_FMT_YUV420P: {
203 // Frame from FFmpeg software 205 // Frame from FFmpeg software
204 // Populate chroma buffer from both channels with interleaving. 206 // Populate chroma buffer from both channels with interleaving.
205 const std::size_t half_width = frame_width / 2; 207 const std::size_t half_width = frame_width / 2;
206 u8* chroma_buffer_data = chroma_buffer.data(); 208 u8* chroma_buffer_data = chroma_buffer.data();
207 const u8* chroma_b_src = frame->data[1]; 209 const u8* chroma_b_src = frame->GetData(1);
208 const u8* chroma_r_src = frame->data[2]; 210 const u8* chroma_r_src = frame->GetData(2);
209 for (std::size_t y = 0; y < half_height; ++y) { 211 for (std::size_t y = 0; y < half_height; ++y) {
210 const std::size_t src = y * half_stride; 212 const std::size_t src = y * half_stride;
211 const std::size_t dst = y * aligned_width; 213 const std::size_t dst = y * aligned_width;
@@ -219,7 +221,7 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
219 case AV_PIX_FMT_NV12: { 221 case AV_PIX_FMT_NV12: {
220 // Frame from VA-API hardware 222 // Frame from VA-API hardware
221 // This is already interleaved so just copy 223 // This is already interleaved so just copy
222 const u8* chroma_src = frame->data[1]; 224 const u8* chroma_src = frame->GetData(1);
223 for (std::size_t y = 0; y < half_height; ++y) { 225 for (std::size_t y = 0; y < half_height; ++y) {
224 const std::size_t src = y * stride; 226 const std::size_t src = y * stride;
225 const std::size_t dst = y * aligned_width; 227 const std::size_t dst = y * aligned_width;