summaryrefslogtreecommitdiff
path: root/src/audio_core/audio_renderer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/audio_core/audio_renderer.cpp')
-rw-r--r--src/audio_core/audio_renderer.cpp343
1 files changed, 0 insertions, 343 deletions
diff --git a/src/audio_core/audio_renderer.cpp b/src/audio_core/audio_renderer.cpp
deleted file mode 100644
index 9191ca093..000000000
--- a/src/audio_core/audio_renderer.cpp
+++ /dev/null
@@ -1,343 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include <limits>
5#include <optional>
6#include <vector>
7
8#include "audio_core/audio_out.h"
9#include "audio_core/audio_renderer.h"
10#include "audio_core/common.h"
11#include "audio_core/info_updater.h"
12#include "audio_core/voice_context.h"
13#include "common/logging/log.h"
14#include "common/settings.h"
15#include "core/core_timing.h"
16#include "core/memory.h"
17
18namespace {
19[[nodiscard]] static constexpr s16 ClampToS16(s32 value) {
20 return static_cast<s16>(std::clamp(value, s32{std::numeric_limits<s16>::min()},
21 s32{std::numeric_limits<s16>::max()}));
22}
23
24[[nodiscard]] static constexpr s16 Mix2To1(s16 l_channel, s16 r_channel) {
25 // Mix 50% from left and 50% from right channel
26 constexpr float l_mix_amount = 50.0f / 100.0f;
27 constexpr float r_mix_amount = 50.0f / 100.0f;
28 return ClampToS16(static_cast<s32>((static_cast<float>(l_channel) * l_mix_amount) +
29 (static_cast<float>(r_channel) * r_mix_amount)));
30}
31
32[[maybe_unused, nodiscard]] static constexpr std::tuple<s16, s16> Mix6To2(
33 s16 fl_channel, s16 fr_channel, s16 fc_channel, [[maybe_unused]] s16 lf_channel, s16 bl_channel,
34 s16 br_channel) {
35 // Front channels are mixed 36.94%, Center channels are mixed to be 26.12% & the back channels
36 // are mixed to be 36.94%
37
38 constexpr float front_mix_amount = 36.94f / 100.0f;
39 constexpr float center_mix_amount = 26.12f / 100.0f;
40 constexpr float back_mix_amount = 36.94f / 100.0f;
41
42 // Mix 50% from left and 50% from right channel
43 const auto left = front_mix_amount * static_cast<float>(fl_channel) +
44 center_mix_amount * static_cast<float>(fc_channel) +
45 back_mix_amount * static_cast<float>(bl_channel);
46
47 const auto right = front_mix_amount * static_cast<float>(fr_channel) +
48 center_mix_amount * static_cast<float>(fc_channel) +
49 back_mix_amount * static_cast<float>(br_channel);
50
51 return {ClampToS16(static_cast<s32>(left)), ClampToS16(static_cast<s32>(right))};
52}
53
54[[nodiscard]] static constexpr std::tuple<s16, s16> Mix6To2WithCoefficients(
55 s16 fl_channel, s16 fr_channel, s16 fc_channel, s16 lf_channel, s16 bl_channel, s16 br_channel,
56 const std::array<float_le, 4>& coeff) {
57 const auto left =
58 static_cast<float>(fl_channel) * coeff[0] + static_cast<float>(fc_channel) * coeff[1] +
59 static_cast<float>(lf_channel) * coeff[2] + static_cast<float>(bl_channel) * coeff[3];
60
61 const auto right =
62 static_cast<float>(fr_channel) * coeff[0] + static_cast<float>(fc_channel) * coeff[1] +
63 static_cast<float>(lf_channel) * coeff[2] + static_cast<float>(br_channel) * coeff[3];
64
65 return {ClampToS16(static_cast<s32>(left)), ClampToS16(static_cast<s32>(right))};
66}
67
68} // namespace
69
70namespace AudioCore {
71constexpr s32 NUM_BUFFERS = 2;
72
73AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_,
74 AudioCommon::AudioRendererParameter params,
75 Stream::ReleaseCallback&& release_callback,
76 std::size_t instance_number)
77 : worker_params{params}, memory_pool_info(params.effect_count + params.voice_count * 4),
78 voice_context(params.voice_count), effect_context(params.effect_count), mix_context(),
79 sink_context(params.sink_count), splitter_context(),
80 voices(params.voice_count), memory{memory_},
81 command_generator(worker_params, voice_context, mix_context, splitter_context, effect_context,
82 memory),
83 core_timing{core_timing_} {
84 behavior_info.SetUserRevision(params.revision);
85 splitter_context.Initialize(behavior_info, params.splitter_count,
86 params.num_splitter_send_channels);
87 mix_context.Initialize(behavior_info, params.submix_count + 1, params.effect_count);
88 audio_out = std::make_unique<AudioCore::AudioOut>();
89 stream = audio_out->OpenStream(
90 core_timing, params.sample_rate, AudioCommon::STREAM_NUM_CHANNELS,
91 fmt::format("AudioRenderer-Instance{}", instance_number), std::move(release_callback));
92 process_event =
93 Core::Timing::CreateEvent(fmt::format("AudioRenderer-Instance{}-Process", instance_number),
94 [this](std::uintptr_t, s64, std::chrono::nanoseconds) {
95 ReleaseAndQueueBuffers();
96 return std::nullopt;
97 });
98 for (s32 i = 0; i < NUM_BUFFERS; ++i) {
99 QueueMixedBuffer(i);
100 }
101}
102
103AudioRenderer::~AudioRenderer() = default;
104
105Result AudioRenderer::Start() {
106 audio_out->StartStream(stream);
107 ReleaseAndQueueBuffers();
108 return ResultSuccess;
109}
110
111Result AudioRenderer::Stop() {
112 audio_out->StopStream(stream);
113 return ResultSuccess;
114}
115
116u32 AudioRenderer::GetSampleRate() const {
117 return worker_params.sample_rate;
118}
119
120u32 AudioRenderer::GetSampleCount() const {
121 return worker_params.sample_count;
122}
123
124u32 AudioRenderer::GetMixBufferCount() const {
125 return worker_params.mix_buffer_count;
126}
127
128Stream::State AudioRenderer::GetStreamState() const {
129 return stream->GetState();
130}
131
132Result AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params,
133 std::vector<u8>& output_params) {
134 std::scoped_lock lock{mutex};
135 InfoUpdater info_updater{input_params, output_params, behavior_info};
136
137 if (!info_updater.UpdateBehaviorInfo(behavior_info)) {
138 LOG_ERROR(Audio, "Failed to update behavior info input parameters");
139 return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
140 }
141
142 if (!info_updater.UpdateMemoryPools(memory_pool_info)) {
143 LOG_ERROR(Audio, "Failed to update memory pool parameters");
144 return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
145 }
146
147 if (!info_updater.UpdateVoiceChannelResources(voice_context)) {
148 LOG_ERROR(Audio, "Failed to update voice channel resource parameters");
149 return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
150 }
151
152 if (!info_updater.UpdateVoices(voice_context, memory_pool_info, 0)) {
153 LOG_ERROR(Audio, "Failed to update voice parameters");
154 return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
155 }
156
157 // TODO(ogniK): Deal with stopped audio renderer but updates still taking place
158 if (!info_updater.UpdateEffects(effect_context, true)) {
159 LOG_ERROR(Audio, "Failed to update effect parameters");
160 return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
161 }
162
163 if (behavior_info.IsSplitterSupported()) {
164 if (!info_updater.UpdateSplitterInfo(splitter_context)) {
165 LOG_ERROR(Audio, "Failed to update splitter parameters");
166 return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
167 }
168 }
169
170 const auto mix_result = info_updater.UpdateMixes(mix_context, worker_params.mix_buffer_count,
171 splitter_context, effect_context);
172
173 if (mix_result.IsError()) {
174 LOG_ERROR(Audio, "Failed to update mix parameters");
175 return mix_result;
176 }
177
178 // TODO(ogniK): Sinks
179 if (!info_updater.UpdateSinks(sink_context)) {
180 LOG_ERROR(Audio, "Failed to update sink parameters");
181 return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
182 }
183
184 // TODO(ogniK): Performance buffer
185 if (!info_updater.UpdatePerformanceBuffer()) {
186 LOG_ERROR(Audio, "Failed to update performance buffer parameters");
187 return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
188 }
189
190 if (!info_updater.UpdateErrorInfo(behavior_info)) {
191 LOG_ERROR(Audio, "Failed to update error info");
192 return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
193 }
194
195 if (behavior_info.IsElapsedFrameCountSupported()) {
196 if (!info_updater.UpdateRendererInfo(elapsed_frame_count)) {
197 LOG_ERROR(Audio, "Failed to update renderer info");
198 return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
199 }
200 }
201 // TODO(ogniK): Statistics
202
203 if (!info_updater.WriteOutputHeader()) {
204 LOG_ERROR(Audio, "Failed to write output header");
205 return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
206 }
207
208 // TODO(ogniK): Check when all sections are implemented
209
210 if (!info_updater.CheckConsumedSize()) {
211 LOG_ERROR(Audio, "Audio buffers were not consumed!");
212 return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
213 }
214 return ResultSuccess;
215}
216
217void AudioRenderer::QueueMixedBuffer(Buffer::Tag tag) {
218 command_generator.PreCommand();
219 // Clear mix buffers before our next operation
220 command_generator.ClearMixBuffers();
221
222 // If the splitter is not in use, sort our mixes
223 if (!splitter_context.UsingSplitter()) {
224 mix_context.SortInfo();
225 }
226 // Sort our voices
227 voice_context.SortInfo();
228
229 // Handle samples
230 command_generator.GenerateVoiceCommands();
231 command_generator.GenerateSubMixCommands();
232 command_generator.GenerateFinalMixCommands();
233
234 command_generator.PostCommand();
235 // Base sample size
236 std::size_t BUFFER_SIZE{worker_params.sample_count};
237 // Samples, making sure to clear
238 std::vector<s16> buffer(BUFFER_SIZE * stream->GetNumChannels(), 0);
239
240 if (sink_context.InUse()) {
241 const auto stream_channel_count = stream->GetNumChannels();
242 const auto buffer_offsets = sink_context.OutputBuffers();
243 const auto channel_count = buffer_offsets.size();
244 const auto& final_mix = mix_context.GetFinalMixInfo();
245 const auto& in_params = final_mix.GetInParams();
246 std::vector<std::span<s32>> mix_buffers(channel_count);
247 for (std::size_t i = 0; i < channel_count; i++) {
248 mix_buffers[i] =
249 command_generator.GetMixBuffer(in_params.buffer_offset + buffer_offsets[i]);
250 }
251
252 for (std::size_t i = 0; i < BUFFER_SIZE; i++) {
253 if (channel_count == 1) {
254 const auto sample = ClampToS16(mix_buffers[0][i]);
255
256 // Place sample in all channels
257 for (u32 channel = 0; channel < stream_channel_count; channel++) {
258 buffer[i * stream_channel_count + channel] = sample;
259 }
260
261 if (stream_channel_count == 6) {
262 // Output stream has a LF channel, mute it!
263 buffer[i * stream_channel_count + 3] = 0;
264 }
265
266 } else if (channel_count == 2) {
267 const auto l_sample = ClampToS16(mix_buffers[0][i]);
268 const auto r_sample = ClampToS16(mix_buffers[1][i]);
269 if (stream_channel_count == 1) {
270 buffer[i * stream_channel_count + 0] = Mix2To1(l_sample, r_sample);
271 } else if (stream_channel_count == 2) {
272 buffer[i * stream_channel_count + 0] = l_sample;
273 buffer[i * stream_channel_count + 1] = r_sample;
274 } else if (stream_channel_count == 6) {
275 buffer[i * stream_channel_count + 0] = l_sample;
276 buffer[i * stream_channel_count + 1] = r_sample;
277
278 // Combine both left and right channels to the center channel
279 buffer[i * stream_channel_count + 2] = Mix2To1(l_sample, r_sample);
280
281 buffer[i * stream_channel_count + 4] = l_sample;
282 buffer[i * stream_channel_count + 5] = r_sample;
283 }
284
285 } else if (channel_count == 6) {
286 const auto fl_sample = ClampToS16(mix_buffers[0][i]);
287 const auto fr_sample = ClampToS16(mix_buffers[1][i]);
288 const auto fc_sample = ClampToS16(mix_buffers[2][i]);
289 const auto lf_sample = ClampToS16(mix_buffers[3][i]);
290 const auto bl_sample = ClampToS16(mix_buffers[4][i]);
291 const auto br_sample = ClampToS16(mix_buffers[5][i]);
292
293 if (stream_channel_count == 1) {
294 // Games seem to ignore the center channel half the time, we use the front left
295 // and right channel for mixing as that's where majority of the audio goes
296 buffer[i * stream_channel_count + 0] = Mix2To1(fl_sample, fr_sample);
297 } else if (stream_channel_count == 2) {
298 // Mix all channels into 2 channels
299 const auto [left, right] = Mix6To2WithCoefficients(
300 fl_sample, fr_sample, fc_sample, lf_sample, bl_sample, br_sample,
301 sink_context.GetDownmixCoefficients());
302 buffer[i * stream_channel_count + 0] = left;
303 buffer[i * stream_channel_count + 1] = right;
304 } else if (stream_channel_count == 6) {
305 // Pass through
306 buffer[i * stream_channel_count + 0] = fl_sample;
307 buffer[i * stream_channel_count + 1] = fr_sample;
308 buffer[i * stream_channel_count + 2] = fc_sample;
309 buffer[i * stream_channel_count + 3] = lf_sample;
310 buffer[i * stream_channel_count + 4] = bl_sample;
311 buffer[i * stream_channel_count + 5] = br_sample;
312 }
313 }
314 }
315 }
316
317 audio_out->QueueBuffer(stream, tag, std::move(buffer));
318 elapsed_frame_count++;
319 voice_context.UpdateStateByDspShared();
320}
321
322void AudioRenderer::ReleaseAndQueueBuffers() {
323 if (!stream->IsPlaying()) {
324 return;
325 }
326
327 {
328 std::scoped_lock lock{mutex};
329 const auto released_buffers{audio_out->GetTagsAndReleaseBuffers(stream)};
330 for (const auto& tag : released_buffers) {
331 QueueMixedBuffer(tag);
332 }
333 }
334
335 const f32 sample_rate = static_cast<f32>(GetSampleRate());
336 const f32 sample_count = static_cast<f32>(GetSampleCount());
337 const f32 consume_rate = sample_rate / (sample_count * (sample_count / 240));
338 const s32 ms = (1000 / static_cast<s32>(consume_rate)) - 1;
339 const std::chrono::milliseconds next_event_time(std::max(ms / NUM_BUFFERS, 1));
340 core_timing.ScheduleEvent(next_event_time, process_event, {});
341}
342
343} // namespace AudioCore