summaryrefslogtreecommitdiff
path: root/src/audio_core/voice_context.cpp
diff options
context:
space:
mode:
authorGravatar Kelebek12022-07-16 23:48:45 +0100
committerGravatar Kelebek12022-07-22 01:11:32 +0100
commit458da8a94877677f086f06cdeecf959ec4283a33 (patch)
tree583166d77602ad90a0d552f37de8729ad80fd6c1 /src/audio_core/voice_context.cpp
parentMerge pull request #8598 from Link4565/recv-dontwait (diff)
downloadyuzu-458da8a94877677f086f06cdeecf959ec4283a33.tar.gz
yuzu-458da8a94877677f086f06cdeecf959ec4283a33.tar.xz
yuzu-458da8a94877677f086f06cdeecf959ec4283a33.zip
Project Andio
Diffstat (limited to 'src/audio_core/voice_context.cpp')
-rw-r--r--src/audio_core/voice_context.cpp579
1 files changed, 0 insertions, 579 deletions
diff --git a/src/audio_core/voice_context.cpp b/src/audio_core/voice_context.cpp
deleted file mode 100644
index f58a5c754..000000000
--- a/src/audio_core/voice_context.cpp
+++ /dev/null
@@ -1,579 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include <algorithm>
5
6#include "audio_core/behavior_info.h"
7#include "audio_core/voice_context.h"
8#include "core/memory.h"
9
10namespace AudioCore {
11
12ServerVoiceChannelResource::ServerVoiceChannelResource(s32 id_) : id(id_) {}
13ServerVoiceChannelResource::~ServerVoiceChannelResource() = default;
14
15bool ServerVoiceChannelResource::InUse() const {
16 return in_use;
17}
18
19float ServerVoiceChannelResource::GetCurrentMixVolumeAt(std::size_t i) const {
20 ASSERT(i < AudioCommon::MAX_MIX_BUFFERS);
21 return mix_volume.at(i);
22}
23
24float ServerVoiceChannelResource::GetLastMixVolumeAt(std::size_t i) const {
25 ASSERT(i < AudioCommon::MAX_MIX_BUFFERS);
26 return last_mix_volume.at(i);
27}
28
29void ServerVoiceChannelResource::Update(VoiceChannelResource::InParams& in_params) {
30 in_use = in_params.in_use;
31 // Update our mix volumes only if it's in use
32 if (in_params.in_use) {
33 mix_volume = in_params.mix_volume;
34 }
35}
36
37void ServerVoiceChannelResource::UpdateLastMixVolumes() {
38 last_mix_volume = mix_volume;
39}
40
41const std::array<float, AudioCommon::MAX_MIX_BUFFERS>&
42ServerVoiceChannelResource::GetCurrentMixVolume() const {
43 return mix_volume;
44}
45
46const std::array<float, AudioCommon::MAX_MIX_BUFFERS>&
47ServerVoiceChannelResource::GetLastMixVolume() const {
48 return last_mix_volume;
49}
50
51ServerVoiceInfo::ServerVoiceInfo() {
52 Initialize();
53}
54ServerVoiceInfo::~ServerVoiceInfo() = default;
55
56void ServerVoiceInfo::Initialize() {
57 in_params.in_use = false;
58 in_params.node_id = 0;
59 in_params.id = 0;
60 in_params.current_playstate = ServerPlayState::Stop;
61 in_params.priority = 255;
62 in_params.sample_rate = 0;
63 in_params.sample_format = SampleFormat::Invalid;
64 in_params.channel_count = 0;
65 in_params.pitch = 0.0f;
66 in_params.volume = 0.0f;
67 in_params.last_volume = 0.0f;
68 in_params.biquad_filter.fill({});
69 in_params.wave_buffer_count = 0;
70 in_params.wave_buffer_head = 0;
71 in_params.mix_id = AudioCommon::NO_MIX;
72 in_params.splitter_info_id = AudioCommon::NO_SPLITTER;
73 in_params.additional_params_address = 0;
74 in_params.additional_params_size = 0;
75 in_params.is_new = false;
76 out_params.played_sample_count = 0;
77 out_params.wave_buffer_consumed = 0;
78 in_params.voice_drop_flag = false;
79 in_params.buffer_mapped = true;
80 in_params.wave_buffer_flush_request_count = 0;
81 in_params.was_biquad_filter_enabled.fill(false);
82
83 for (auto& wave_buffer : in_params.wave_buffer) {
84 wave_buffer.start_sample_offset = 0;
85 wave_buffer.end_sample_offset = 0;
86 wave_buffer.is_looping = false;
87 wave_buffer.end_of_stream = false;
88 wave_buffer.buffer_address = 0;
89 wave_buffer.buffer_size = 0;
90 wave_buffer.context_address = 0;
91 wave_buffer.context_size = 0;
92 wave_buffer.sent_to_dsp = true;
93 }
94
95 stored_samples.clear();
96}
97
98void ServerVoiceInfo::UpdateParameters(const VoiceInfo::InParams& voice_in,
99 BehaviorInfo& behavior_info) {
100 in_params.in_use = voice_in.is_in_use;
101 in_params.id = voice_in.id;
102 in_params.node_id = voice_in.node_id;
103 in_params.last_playstate = in_params.current_playstate;
104 switch (voice_in.play_state) {
105 case PlayState::Paused:
106 in_params.current_playstate = ServerPlayState::Paused;
107 break;
108 case PlayState::Stopped:
109 if (in_params.current_playstate != ServerPlayState::Stop) {
110 in_params.current_playstate = ServerPlayState::RequestStop;
111 }
112 break;
113 case PlayState::Started:
114 in_params.current_playstate = ServerPlayState::Play;
115 break;
116 default:
117 ASSERT_MSG(false, "Unknown playstate {}", voice_in.play_state);
118 break;
119 }
120
121 in_params.priority = voice_in.priority;
122 in_params.sorting_order = voice_in.sorting_order;
123 in_params.sample_rate = voice_in.sample_rate;
124 in_params.sample_format = voice_in.sample_format;
125 in_params.channel_count = voice_in.channel_count;
126 in_params.pitch = voice_in.pitch;
127 in_params.volume = voice_in.volume;
128 in_params.biquad_filter = voice_in.biquad_filter;
129 in_params.wave_buffer_count = voice_in.wave_buffer_count;
130 in_params.wave_buffer_head = voice_in.wave_buffer_head;
131 if (behavior_info.IsFlushVoiceWaveBuffersSupported()) {
132 const auto in_request_count = in_params.wave_buffer_flush_request_count;
133 const auto voice_request_count = voice_in.wave_buffer_flush_request_count;
134 in_params.wave_buffer_flush_request_count =
135 static_cast<u8>(in_request_count + voice_request_count);
136 }
137 in_params.mix_id = voice_in.mix_id;
138 if (behavior_info.IsSplitterSupported()) {
139 in_params.splitter_info_id = voice_in.splitter_info_id;
140 } else {
141 in_params.splitter_info_id = AudioCommon::NO_SPLITTER;
142 }
143
144 std::memcpy(in_params.voice_channel_resource_id.data(),
145 voice_in.voice_channel_resource_ids.data(),
146 sizeof(s32) * in_params.voice_channel_resource_id.size());
147
148 if (behavior_info.IsVoicePlayedSampleCountResetAtLoopPointSupported()) {
149 in_params.behavior_flags.is_played_samples_reset_at_loop_point =
150 voice_in.behavior_flags.is_played_samples_reset_at_loop_point;
151 } else {
152 in_params.behavior_flags.is_played_samples_reset_at_loop_point.Assign(0);
153 }
154 if (behavior_info.IsVoicePitchAndSrcSkippedSupported()) {
155 in_params.behavior_flags.is_pitch_and_src_skipped =
156 voice_in.behavior_flags.is_pitch_and_src_skipped;
157 } else {
158 in_params.behavior_flags.is_pitch_and_src_skipped.Assign(0);
159 }
160
161 if (voice_in.is_voice_drop_flag_clear_requested) {
162 in_params.voice_drop_flag = false;
163 }
164
165 if (in_params.additional_params_address != voice_in.additional_params_address ||
166 in_params.additional_params_size != voice_in.additional_params_size) {
167 in_params.additional_params_address = voice_in.additional_params_address;
168 in_params.additional_params_size = voice_in.additional_params_size;
169 // TODO(ogniK): Reattach buffer, do we actually need to? Maybe just signal to the DSP that
170 // our context is new
171 }
172}
173
174void ServerVoiceInfo::UpdateWaveBuffers(
175 const VoiceInfo::InParams& voice_in,
176 std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states,
177 BehaviorInfo& behavior_info) {
178 if (voice_in.is_new) {
179 // Initialize our wave buffers
180 for (auto& wave_buffer : in_params.wave_buffer) {
181 wave_buffer.start_sample_offset = 0;
182 wave_buffer.end_sample_offset = 0;
183 wave_buffer.is_looping = false;
184 wave_buffer.end_of_stream = false;
185 wave_buffer.buffer_address = 0;
186 wave_buffer.buffer_size = 0;
187 wave_buffer.context_address = 0;
188 wave_buffer.context_size = 0;
189 wave_buffer.loop_start_sample = 0;
190 wave_buffer.loop_end_sample = 0;
191 wave_buffer.sent_to_dsp = true;
192 }
193
194 // Mark all our wave buffers as invalid
195 for (std::size_t channel = 0; channel < static_cast<std::size_t>(in_params.channel_count);
196 channel++) {
197 for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; ++i) {
198 voice_states[channel]->is_wave_buffer_valid[i] = false;
199 }
200 }
201 }
202
203 // Update our wave buffers
204 for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
205 // Assume that we have at least 1 channel voice state
206 const auto have_valid_wave_buffer = voice_states[0]->is_wave_buffer_valid[i];
207
208 UpdateWaveBuffer(in_params.wave_buffer[i], voice_in.wave_buffer[i], in_params.sample_format,
209 have_valid_wave_buffer, behavior_info);
210 }
211}
212
213void ServerVoiceInfo::UpdateWaveBuffer(ServerWaveBuffer& out_wavebuffer,
214 const WaveBuffer& in_wave_buffer, SampleFormat sample_format,
215 bool is_buffer_valid,
216 [[maybe_unused]] BehaviorInfo& behavior_info) {
217 if (!is_buffer_valid && out_wavebuffer.sent_to_dsp && out_wavebuffer.buffer_address != 0) {
218 out_wavebuffer.buffer_address = 0;
219 out_wavebuffer.buffer_size = 0;
220 }
221
222 if (!in_wave_buffer.sent_to_server || !in_params.buffer_mapped) {
223 // Validate sample offset sizings
224 if (sample_format == SampleFormat::Pcm16) {
225 const s64 buffer_size = static_cast<s64>(in_wave_buffer.buffer_size);
226 const s64 start = sizeof(s16) * in_wave_buffer.start_sample_offset;
227 const s64 end = sizeof(s16) * in_wave_buffer.end_sample_offset;
228 if (0 > start || start > buffer_size || 0 > end || end > buffer_size) {
229 // TODO(ogniK): Write error info
230 LOG_ERROR(Audio,
231 "PCM16 wavebuffer has an invalid size. Buffer has size 0x{:08X}, but "
232 "offsets were "
233 "{:08X} - 0x{:08X}",
234 buffer_size, sizeof(s16) * in_wave_buffer.start_sample_offset,
235 sizeof(s16) * in_wave_buffer.end_sample_offset);
236 return;
237 }
238 } else if (sample_format == SampleFormat::Adpcm) {
239 const s64 buffer_size = static_cast<s64>(in_wave_buffer.buffer_size);
240 const s64 start_frames = in_wave_buffer.start_sample_offset / 14;
241 const s64 start_extra = in_wave_buffer.start_sample_offset % 14 == 0
242 ? 0
243 : (in_wave_buffer.start_sample_offset % 14) / 2 + 1 +
244 (in_wave_buffer.start_sample_offset % 2);
245 const s64 start = start_frames * 8 + start_extra;
246 const s64 end_frames = in_wave_buffer.end_sample_offset / 14;
247 const s64 end_extra = in_wave_buffer.end_sample_offset % 14 == 0
248 ? 0
249 : (in_wave_buffer.end_sample_offset % 14) / 2 + 1 +
250 (in_wave_buffer.end_sample_offset % 2);
251 const s64 end = end_frames * 8 + end_extra;
252 if (in_wave_buffer.start_sample_offset < 0 || start > buffer_size ||
253 in_wave_buffer.end_sample_offset < 0 || end > buffer_size) {
254 LOG_ERROR(Audio,
255 "ADPMC wavebuffer has an invalid size. Buffer has size 0x{:08X}, but "
256 "offsets were "
257 "{:08X} - 0x{:08X}",
258 in_wave_buffer.buffer_size, start, end);
259 return;
260 }
261 }
262 // TODO(ogniK): ADPCM Size error
263
264 out_wavebuffer.sent_to_dsp = false;
265 out_wavebuffer.start_sample_offset = in_wave_buffer.start_sample_offset;
266 out_wavebuffer.end_sample_offset = in_wave_buffer.end_sample_offset;
267 out_wavebuffer.is_looping = in_wave_buffer.is_looping;
268 out_wavebuffer.end_of_stream = in_wave_buffer.end_of_stream;
269
270 out_wavebuffer.buffer_address = in_wave_buffer.buffer_address;
271 out_wavebuffer.buffer_size = in_wave_buffer.buffer_size;
272 out_wavebuffer.context_address = in_wave_buffer.context_address;
273 out_wavebuffer.context_size = in_wave_buffer.context_size;
274 out_wavebuffer.loop_start_sample = in_wave_buffer.loop_start_sample;
275 out_wavebuffer.loop_end_sample = in_wave_buffer.loop_end_sample;
276 in_params.buffer_mapped =
277 in_wave_buffer.buffer_address != 0 && in_wave_buffer.buffer_size != 0;
278 // TODO(ogniK): Pool mapper attachment
279 // TODO(ogniK): IsAdpcmLoopContextBugFixed
280 if (sample_format == SampleFormat::Adpcm && in_wave_buffer.context_address != 0 &&
281 in_wave_buffer.context_size != 0 && behavior_info.IsAdpcmLoopContextBugFixed()) {
282 } else {
283 out_wavebuffer.context_address = 0;
284 out_wavebuffer.context_size = 0;
285 }
286 }
287}
288
289void ServerVoiceInfo::WriteOutStatus(
290 VoiceInfo::OutParams& voice_out, VoiceInfo::InParams& voice_in,
291 std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states) {
292 if (voice_in.is_new || in_params.is_new) {
293 in_params.is_new = true;
294 voice_out.wave_buffer_consumed = 0;
295 voice_out.played_sample_count = 0;
296 voice_out.voice_dropped = false;
297 } else {
298 const auto& state = voice_states[0];
299 voice_out.wave_buffer_consumed = state->wave_buffer_consumed;
300 voice_out.played_sample_count = state->played_sample_count;
301 voice_out.voice_dropped = state->voice_dropped;
302 }
303}
304
305const ServerVoiceInfo::InParams& ServerVoiceInfo::GetInParams() const {
306 return in_params;
307}
308
309ServerVoiceInfo::InParams& ServerVoiceInfo::GetInParams() {
310 return in_params;
311}
312
313const ServerVoiceInfo::OutParams& ServerVoiceInfo::GetOutParams() const {
314 return out_params;
315}
316
317ServerVoiceInfo::OutParams& ServerVoiceInfo::GetOutParams() {
318 return out_params;
319}
320
321bool ServerVoiceInfo::ShouldSkip() const {
322 // TODO(ogniK): Handle unmapped wave buffers or parameters
323 return !in_params.in_use || in_params.wave_buffer_count == 0 || !in_params.buffer_mapped ||
324 in_params.voice_drop_flag;
325}
326
327bool ServerVoiceInfo::UpdateForCommandGeneration(VoiceContext& voice_context) {
328 std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT> dsp_voice_states{};
329 if (in_params.is_new) {
330 ResetResources(voice_context);
331 in_params.last_volume = in_params.volume;
332 in_params.is_new = false;
333 }
334
335 const s32 channel_count = in_params.channel_count;
336 for (s32 i = 0; i < channel_count; i++) {
337 const auto channel_resource = in_params.voice_channel_resource_id[i];
338 dsp_voice_states[i] =
339 &voice_context.GetDspSharedState(static_cast<std::size_t>(channel_resource));
340 }
341 return UpdateParametersForCommandGeneration(dsp_voice_states);
342}
343
344void ServerVoiceInfo::ResetResources(VoiceContext& voice_context) {
345 const s32 channel_count = in_params.channel_count;
346 for (s32 i = 0; i < channel_count; i++) {
347 const auto channel_resource = in_params.voice_channel_resource_id[i];
348 auto& dsp_state =
349 voice_context.GetDspSharedState(static_cast<std::size_t>(channel_resource));
350 dsp_state = {};
351 voice_context.GetChannelResource(static_cast<std::size_t>(channel_resource))
352 .UpdateLastMixVolumes();
353 }
354}
355
356bool ServerVoiceInfo::UpdateParametersForCommandGeneration(
357 std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states) {
358 const s32 channel_count = in_params.channel_count;
359 if (in_params.wave_buffer_flush_request_count > 0) {
360 FlushWaveBuffers(in_params.wave_buffer_flush_request_count, dsp_voice_states,
361 channel_count);
362 in_params.wave_buffer_flush_request_count = 0;
363 }
364
365 switch (in_params.current_playstate) {
366 case ServerPlayState::Play: {
367 for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
368 if (!in_params.wave_buffer[i].sent_to_dsp) {
369 for (s32 channel = 0; channel < channel_count; channel++) {
370 dsp_voice_states[channel]->is_wave_buffer_valid[i] = true;
371 }
372 in_params.wave_buffer[i].sent_to_dsp = true;
373 }
374 }
375 in_params.should_depop = false;
376 return HasValidWaveBuffer(dsp_voice_states[0]);
377 }
378 case ServerPlayState::Paused:
379 case ServerPlayState::Stop: {
380 in_params.should_depop = in_params.last_playstate == ServerPlayState::Play;
381 return in_params.should_depop;
382 }
383 case ServerPlayState::RequestStop: {
384 for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
385 in_params.wave_buffer[i].sent_to_dsp = true;
386 for (s32 channel = 0; channel < channel_count; channel++) {
387 auto* dsp_state = dsp_voice_states[channel];
388
389 if (dsp_state->is_wave_buffer_valid[i]) {
390 dsp_state->wave_buffer_index =
391 (dsp_state->wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
392 dsp_state->wave_buffer_consumed++;
393 }
394
395 dsp_state->is_wave_buffer_valid[i] = false;
396 }
397 }
398
399 for (s32 channel = 0; channel < channel_count; channel++) {
400 auto* dsp_state = dsp_voice_states[channel];
401 dsp_state->offset = 0;
402 dsp_state->played_sample_count = 0;
403 dsp_state->fraction = 0;
404 dsp_state->sample_history.fill(0);
405 dsp_state->context = {};
406 }
407
408 in_params.current_playstate = ServerPlayState::Stop;
409 in_params.should_depop = in_params.last_playstate == ServerPlayState::Play;
410 return in_params.should_depop;
411 }
412 default:
413 ASSERT_MSG(false, "Invalid playstate {}", in_params.current_playstate);
414 }
415
416 return false;
417}
418
419void ServerVoiceInfo::FlushWaveBuffers(
420 u8 flush_count, std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states,
421 s32 channel_count) {
422 auto wave_head = in_params.wave_buffer_head;
423
424 for (u8 i = 0; i < flush_count; i++) {
425 in_params.wave_buffer[wave_head].sent_to_dsp = true;
426 for (s32 channel = 0; channel < channel_count; channel++) {
427 auto* dsp_state = dsp_voice_states[channel];
428 dsp_state->wave_buffer_consumed++;
429 dsp_state->is_wave_buffer_valid[wave_head] = false;
430 dsp_state->wave_buffer_index =
431 (dsp_state->wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
432 }
433 wave_head = (wave_head + 1) % AudioCommon::MAX_WAVE_BUFFERS;
434 }
435}
436
437bool ServerVoiceInfo::HasValidWaveBuffer(const VoiceState* state) const {
438 const auto& valid_wb = state->is_wave_buffer_valid;
439 return std::find(valid_wb.begin(), valid_wb.end(), true) != valid_wb.end();
440}
441
442void ServerVoiceInfo::SetWaveBufferCompleted(VoiceState& dsp_state,
443 const ServerWaveBuffer& wave_buffer) {
444 dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index] = false;
445 dsp_state.wave_buffer_consumed++;
446 dsp_state.wave_buffer_index = (dsp_state.wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
447 dsp_state.loop_count = 0;
448 if (wave_buffer.end_of_stream) {
449 dsp_state.played_sample_count = 0;
450 }
451}
452
453VoiceContext::VoiceContext(std::size_t voice_count_) : voice_count{voice_count_} {
454 for (std::size_t i = 0; i < voice_count; i++) {
455 voice_channel_resources.emplace_back(static_cast<s32>(i));
456 sorted_voice_info.push_back(&voice_info.emplace_back());
457 voice_states.emplace_back();
458 dsp_voice_states.emplace_back();
459 }
460}
461
462VoiceContext::~VoiceContext() {
463 sorted_voice_info.clear();
464}
465
466std::size_t VoiceContext::GetVoiceCount() const {
467 return voice_count;
468}
469
470ServerVoiceChannelResource& VoiceContext::GetChannelResource(std::size_t i) {
471 ASSERT(i < voice_count);
472 return voice_channel_resources.at(i);
473}
474
475const ServerVoiceChannelResource& VoiceContext::GetChannelResource(std::size_t i) const {
476 ASSERT(i < voice_count);
477 return voice_channel_resources.at(i);
478}
479
480VoiceState& VoiceContext::GetState(std::size_t i) {
481 ASSERT(i < voice_count);
482 return voice_states.at(i);
483}
484
485const VoiceState& VoiceContext::GetState(std::size_t i) const {
486 ASSERT(i < voice_count);
487 return voice_states.at(i);
488}
489
490VoiceState& VoiceContext::GetDspSharedState(std::size_t i) {
491 ASSERT(i < voice_count);
492 return dsp_voice_states.at(i);
493}
494
495const VoiceState& VoiceContext::GetDspSharedState(std::size_t i) const {
496 ASSERT(i < voice_count);
497 return dsp_voice_states.at(i);
498}
499
500ServerVoiceInfo& VoiceContext::GetInfo(std::size_t i) {
501 ASSERT(i < voice_count);
502 return voice_info.at(i);
503}
504
505const ServerVoiceInfo& VoiceContext::GetInfo(std::size_t i) const {
506 ASSERT(i < voice_count);
507 return voice_info.at(i);
508}
509
510ServerVoiceInfo& VoiceContext::GetSortedInfo(std::size_t i) {
511 ASSERT(i < voice_count);
512 return *sorted_voice_info.at(i);
513}
514
515const ServerVoiceInfo& VoiceContext::GetSortedInfo(std::size_t i) const {
516 ASSERT(i < voice_count);
517 return *sorted_voice_info.at(i);
518}
519
520s32 VoiceContext::DecodePcm16(s32* output_buffer, ServerWaveBuffer* wave_buffer, s32 channel,
521 s32 channel_count, s32 buffer_offset, s32 sample_count,
522 Core::Memory::Memory& memory) {
523 if (wave_buffer->buffer_address == 0) {
524 return 0;
525 }
526 if (wave_buffer->buffer_size == 0) {
527 return 0;
528 }
529 if (wave_buffer->end_sample_offset < wave_buffer->start_sample_offset) {
530 return 0;
531 }
532
533 const auto samples_remaining =
534 (wave_buffer->end_sample_offset - wave_buffer->start_sample_offset) - buffer_offset;
535 const auto start_offset = (wave_buffer->start_sample_offset + buffer_offset) * channel_count;
536 const auto buffer_pos = wave_buffer->buffer_address + start_offset;
537
538 s16* buffer_data = reinterpret_cast<s16*>(memory.GetPointer(buffer_pos));
539
540 const auto samples_processed = std::min(sample_count, samples_remaining);
541
542 // Fast path
543 if (channel_count == 1) {
544 for (std::ptrdiff_t i = 0; i < samples_processed; i++) {
545 output_buffer[i] = buffer_data[i];
546 }
547 } else {
548 for (std::ptrdiff_t i = 0; i < samples_processed; i++) {
549 output_buffer[i] = buffer_data[i * channel_count + channel];
550 }
551 }
552
553 return samples_processed;
554}
555
556void VoiceContext::SortInfo() {
557 for (std::size_t i = 0; i < voice_count; i++) {
558 sorted_voice_info[i] = &voice_info[i];
559 }
560
561 std::sort(sorted_voice_info.begin(), sorted_voice_info.end(),
562 [](const ServerVoiceInfo* lhs, const ServerVoiceInfo* rhs) {
563 const auto& lhs_in = lhs->GetInParams();
564 const auto& rhs_in = rhs->GetInParams();
565 // Sort by priority
566 if (lhs_in.priority != rhs_in.priority) {
567 return lhs_in.priority > rhs_in.priority;
568 } else {
569 // If the priorities match, sort by sorting order
570 return lhs_in.sorting_order > rhs_in.sorting_order;
571 }
572 });
573}
574
575void VoiceContext::UpdateStateByDspShared() {
576 voice_states = dsp_voice_states;
577}
578
579} // namespace AudioCore