diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/audio_core/command_generator.cpp | 53 | ||||
| -rw-r--r-- | src/audio_core/command_generator.h | 1 | ||||
| -rw-r--r-- | src/audio_core/voice_context.cpp | 3 | ||||
| -rw-r--r-- | src/audio_core/voice_context.h | 2 |
4 files changed, 32 insertions, 27 deletions
diff --git a/src/audio_core/command_generator.cpp b/src/audio_core/command_generator.cpp index 440bfc140..0c3b278ea 100644 --- a/src/audio_core/command_generator.cpp +++ b/src/audio_core/command_generator.cpp | |||
| @@ -470,6 +470,7 @@ s32 CommandGenerator::DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_s | |||
| 470 | 470 | ||
| 471 | return samples_processed; | 471 | return samples_processed; |
| 472 | } | 472 | } |
| 473 | |||
| 473 | s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_state, | 474 | s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_state, |
| 474 | s32 sample_count, s32 channel, std::size_t mix_offset) { | 475 | s32 sample_count, s32 channel, std::size_t mix_offset) { |
| 475 | auto& in_params = voice_info.GetInParams(); | 476 | auto& in_params = voice_info.GetInParams(); |
| @@ -486,33 +487,45 @@ s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_s | |||
| 486 | 487 | ||
| 487 | const auto samples_remaining = | 488 | const auto samples_remaining = |
| 488 | (wave_buffer.end_sample_offset - wave_buffer.start_sample_offset) - dsp_state.offset; | 489 | (wave_buffer.end_sample_offset - wave_buffer.start_sample_offset) - dsp_state.offset; |
| 490 | const auto samples_processed = std::min(sample_count, samples_remaining); | ||
| 489 | const auto start_offset = | 491 | const auto start_offset = |
| 490 | ((wave_buffer.start_sample_offset + dsp_state.offset) * in_params.channel_count); | 492 | ((wave_buffer.start_sample_offset + dsp_state.offset) * in_params.channel_count); |
| 491 | const auto buffer_pos = wave_buffer.buffer_address + start_offset; | 493 | const auto end_offset = start_offset + samples_processed; |
| 492 | 494 | ||
| 493 | const auto samples_processed = std::min(sample_count, samples_remaining); | 495 | constexpr std::size_t FRAME_LEN = 8; |
| 496 | constexpr std::size_t SAMPLES_PER_FRAME = 14; | ||
| 494 | 497 | ||
| 495 | if (start_offset > dsp_state.adpcm_samples.size()) { | 498 | // Base buffer position |
| 496 | dsp_state.adpcm_samples.clear(); | 499 | const auto start_frame_index = start_offset / SAMPLES_PER_FRAME; |
| 497 | } | 500 | const auto start_frame_buffer = start_frame_index * FRAME_LEN; |
| 498 | 501 | ||
| 499 | // TODO(ogniK): Proper ADPCM streaming | 502 | const auto end_frame_index = end_offset / SAMPLES_PER_FRAME; |
| 500 | if (dsp_state.adpcm_samples.empty()) { | 503 | const auto end_frame_buffer = end_frame_index * FRAME_LEN; |
| 501 | Codec::ADPCM_Coeff coeffs; | 504 | |
| 502 | memory.ReadBlock(in_params.additional_params_address, coeffs.data(), | 505 | const auto position_in_frame = start_offset % SAMPLES_PER_FRAME; |
| 503 | sizeof(Codec::ADPCM_Coeff)); | 506 | |
| 504 | std::vector<u8> buffer(wave_buffer.buffer_size); | 507 | const auto buffer_size = (1 + (end_frame_index - start_frame_index)) * FRAME_LEN; |
| 505 | memory.ReadBlock(wave_buffer.buffer_address, buffer.data(), buffer.size()); | 508 | |
| 506 | dsp_state.adpcm_samples = | 509 | Codec::ADPCM_Coeff coeffs; |
| 507 | std::move(Codec::DecodeADPCM(buffer.data(), buffer.size(), coeffs, dsp_state.context)); | 510 | memory.ReadBlock(in_params.additional_params_address, coeffs.data(), |
| 508 | } | 511 | sizeof(Codec::ADPCM_Coeff)); |
| 512 | std::vector<u8> buffer(buffer_size); | ||
| 513 | memory.ReadBlock(wave_buffer.buffer_address + start_frame_buffer, buffer.data(), buffer.size()); | ||
| 514 | const auto adpcm_samples = | ||
| 515 | std::move(Codec::DecodeADPCM(buffer.data(), buffer.size(), coeffs, dsp_state.context)); | ||
| 509 | 516 | ||
| 510 | for (std::size_t i = 0; i < samples_processed; i++) { | 517 | for (std::size_t i = 0; i < samples_processed; i++) { |
| 511 | const auto sample_offset = i + start_offset; | 518 | const auto sample_offset = position_in_frame + i * in_params.channel_count + channel; |
| 512 | sample_buffer[mix_offset + i] = | 519 | const auto sample = adpcm_samples[sample_offset]; |
| 513 | dsp_state.adpcm_samples[sample_offset * in_params.channel_count + channel]; | 520 | sample_buffer[mix_offset + i] = sample; |
| 514 | } | 521 | } |
| 515 | 522 | ||
| 523 | // Manually set our context | ||
| 524 | const auto frame_before_final = (end_frame_index - start_frame_index) - 1; | ||
| 525 | const auto frame_before_final_off = frame_before_final * SAMPLES_PER_FRAME; | ||
| 526 | dsp_state.context.yn2 = adpcm_samples[frame_before_final_off + 12]; | ||
| 527 | dsp_state.context.yn1 = adpcm_samples[frame_before_final_off + 13]; | ||
| 528 | |||
| 516 | return samples_processed; | 529 | return samples_processed; |
| 517 | } | 530 | } |
| 518 | 531 | ||
| @@ -628,10 +641,6 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* o | |||
| 628 | dsp_state.played_sample_count = 0; | 641 | dsp_state.played_sample_count = 0; |
| 629 | } | 642 | } |
| 630 | } else { | 643 | } else { |
| 631 | if (in_params.sample_format == SampleFormat::Adpcm) { | ||
| 632 | // TODO(ogniK): Remove this when ADPCM streaming implemented | ||
| 633 | dsp_state.adpcm_samples.clear(); | ||
| 634 | } | ||
| 635 | 644 | ||
| 636 | // Update our wave buffer states | 645 | // Update our wave buffer states |
| 637 | dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index] = false; | 646 | dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index] = false; |
diff --git a/src/audio_core/command_generator.h b/src/audio_core/command_generator.h index 3ad8973b5..3f49c1303 100644 --- a/src/audio_core/command_generator.h +++ b/src/audio_core/command_generator.h | |||
| @@ -71,7 +71,6 @@ private: | |||
| 71 | s32 channel, std::size_t mix_offset); | 71 | s32 channel, std::size_t mix_offset); |
| 72 | void DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* output, VoiceState& dsp_state, | 72 | void DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* output, VoiceState& dsp_state, |
| 73 | s32 channel, s32 target_sample_rate, s32 sample_count, s32 node_id); | 73 | s32 channel, s32 target_sample_rate, s32 sample_count, s32 node_id); |
| 74 | void Resample(s32* output, s32* input, s32 pitch, s32& fraction, s32 sample_count); | ||
| 75 | 74 | ||
| 76 | AudioCommon::AudioRendererParameter& worker_params; | 75 | AudioCommon::AudioRendererParameter& worker_params; |
| 77 | VoiceContext& voice_context; | 76 | VoiceContext& voice_context; |
diff --git a/src/audio_core/voice_context.cpp b/src/audio_core/voice_context.cpp index 16ae1afe8..1d8f69844 100644 --- a/src/audio_core/voice_context.cpp +++ b/src/audio_core/voice_context.cpp | |||
| @@ -520,8 +520,7 @@ void VoiceContext::SortInfo() { | |||
| 520 | } | 520 | } |
| 521 | 521 | ||
| 522 | void VoiceContext::UpdateStateByDspShared() { | 522 | void VoiceContext::UpdateStateByDspShared() { |
| 523 | std::memcpy(voice_states.data(), dsp_voice_states.data(), | 523 | voice_states = dsp_voice_states; |
| 524 | sizeof(VoiceState) * dsp_voice_states.size()); | ||
| 525 | } | 524 | } |
| 526 | 525 | ||
| 527 | } // namespace AudioCore | 526 | } // namespace AudioCore |
diff --git a/src/audio_core/voice_context.h b/src/audio_core/voice_context.h index b1d554766..13b0a7f0f 100644 --- a/src/audio_core/voice_context.h +++ b/src/audio_core/voice_context.h | |||
| @@ -101,8 +101,6 @@ struct VoiceState { | |||
| 101 | u32 external_context_size{}; | 101 | u32 external_context_size{}; |
| 102 | bool is_external_context_used{}; | 102 | bool is_external_context_used{}; |
| 103 | bool voice_dropped{}; | 103 | bool voice_dropped{}; |
| 104 | // TODO(ogniK): Hack until ADPCM streaming is implemented | ||
| 105 | std::vector<s16> adpcm_samples{}; | ||
| 106 | }; | 104 | }; |
| 107 | 105 | ||
| 108 | class VoiceChannelResource { | 106 | class VoiceChannelResource { |