| 1 | /* |
| 2 | Simple DirectMedia Layer |
| 3 | Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org> |
| 4 | |
| 5 | This software is provided 'as-is', without any express or implied |
| 6 | warranty. In no event will the authors be held liable for any damages |
| 7 | arising from the use of this software. |
| 8 | |
| 9 | Permission is granted to anyone to use this software for any purpose, |
| 10 | including commercial applications, and to alter it and redistribute it |
| 11 | freely, subject to the following restrictions: |
| 12 | |
| 13 | 1. The origin of this software must not be misrepresented; you must not |
| 14 | claim that you wrote the original software. If you use this software |
| 15 | in a product, an acknowledgment in the product documentation would be |
| 16 | appreciated but is not required. |
| 17 | 2. Altered source versions must be plainly marked as such, and must not be |
| 18 | misrepresented as being the original software. |
| 19 | 3. This notice may not be removed or altered from any source distribution. |
| 20 | */ |
| 21 | #include "SDL_internal.h" |
| 22 | |
| 23 | #include "SDL_sysaudio.h" |
| 24 | |
| 25 | #include "SDL_audioqueue.h" |
| 26 | #include "SDL_audioresample.h" |
| 27 | |
| 28 | #ifndef SDL_INT_MAX |
| 29 | #define SDL_INT_MAX ((int)(~0u>>1)) |
| 30 | #endif |
| 31 | |
| 32 | #ifdef SDL_SSE3_INTRINSICS |
| 33 | // Convert from stereo to mono. Average left and right. |
| 34 | static void SDL_TARGETING("sse3" ) SDL_ConvertStereoToMono_SSE3(float *dst, const float *src, int num_frames) |
| 35 | { |
| 36 | LOG_DEBUG_AUDIO_CONVERT("stereo" , "mono (using SSE3)" ); |
| 37 | |
| 38 | const __m128 divby2 = _mm_set1_ps(0.5f); |
| 39 | int i = num_frames; |
| 40 | |
| 41 | /* Do SSE blocks as long as we have 16 bytes available. |
| 42 | Just use unaligned load/stores, if the memory at runtime is |
| 43 | aligned it'll be just as fast on modern processors */ |
| 44 | while (i >= 4) { // 4 * float32 |
| 45 | _mm_storeu_ps(dst, _mm_mul_ps(_mm_hadd_ps(_mm_loadu_ps(src), _mm_loadu_ps(src + 4)), divby2)); |
| 46 | i -= 4; |
| 47 | src += 8; |
| 48 | dst += 4; |
| 49 | } |
| 50 | |
| 51 | // Finish off any leftovers with scalar operations. |
| 52 | while (i) { |
| 53 | *dst = (src[0] + src[1]) * 0.5f; |
| 54 | dst++; |
| 55 | i--; |
| 56 | src += 2; |
| 57 | } |
| 58 | } |
| 59 | #endif |
| 60 | |
| 61 | #ifdef SDL_SSE_INTRINSICS |
| 62 | // Convert from mono to stereo. Duplicate to stereo left and right. |
| 63 | static void SDL_TARGETING("sse" ) SDL_ConvertMonoToStereo_SSE(float *dst, const float *src, int num_frames) |
| 64 | { |
| 65 | LOG_DEBUG_AUDIO_CONVERT("mono" , "stereo (using SSE)" ); |
| 66 | |
| 67 | // convert backwards, since output is growing in-place. |
| 68 | src += (num_frames-4) * 1; |
| 69 | dst += (num_frames-4) * 2; |
| 70 | |
| 71 | /* Do SSE blocks as long as we have 16 bytes available. |
| 72 | Just use unaligned load/stores, if the memory at runtime is |
| 73 | aligned it'll be just as fast on modern processors */ |
| 74 | // convert backwards, since output is growing in-place. |
| 75 | int i = num_frames; |
| 76 | while (i >= 4) { // 4 * float32 |
| 77 | const __m128 input = _mm_loadu_ps(src); // A B C D |
| 78 | _mm_storeu_ps(dst, _mm_unpacklo_ps(input, input)); // A A B B |
| 79 | _mm_storeu_ps(dst + 4, _mm_unpackhi_ps(input, input)); // C C D D |
| 80 | i -= 4; |
| 81 | src -= 4; |
| 82 | dst -= 8; |
| 83 | } |
| 84 | |
| 85 | // Finish off any leftovers with scalar operations. |
| 86 | src += 3; |
| 87 | dst += 6; // adjust for smaller buffers. |
| 88 | while (i) { // convert backwards, since output is growing in-place. |
| 89 | const float srcFC = src[0]; |
| 90 | dst[1] /* FR */ = srcFC; |
| 91 | dst[0] /* FL */ = srcFC; |
| 92 | i--; |
| 93 | src--; |
| 94 | dst -= 2; |
| 95 | } |
| 96 | } |
| 97 | #endif |
| 98 | |
| 99 | // Include the autogenerated channel converters... |
| 100 | #include "SDL_audio_channel_converters.h" |
| 101 | |
| 102 | static bool SDL_IsSupportedAudioFormat(const SDL_AudioFormat fmt) |
| 103 | { |
| 104 | switch (fmt) { |
| 105 | case SDL_AUDIO_U8: |
| 106 | case SDL_AUDIO_S8: |
| 107 | case SDL_AUDIO_S16LE: |
| 108 | case SDL_AUDIO_S16BE: |
| 109 | case SDL_AUDIO_S32LE: |
| 110 | case SDL_AUDIO_S32BE: |
| 111 | case SDL_AUDIO_F32LE: |
| 112 | case SDL_AUDIO_F32BE: |
| 113 | return true; // supported. |
| 114 | |
| 115 | default: |
| 116 | break; |
| 117 | } |
| 118 | |
| 119 | return false; // unsupported. |
| 120 | } |
| 121 | |
| 122 | static bool SDL_IsSupportedChannelCount(const int channels) |
| 123 | { |
| 124 | return ((channels >= 1) && (channels <= 8)); |
| 125 | } |
| 126 | |
| 127 | bool SDL_ChannelMapIsBogus(const int *chmap, int channels) |
| 128 | { |
| 129 | if (chmap) { |
| 130 | for (int i = 0; i < channels; i++) { |
| 131 | const int mapping = chmap[i]; |
| 132 | if ((mapping < -1) || (mapping >= channels)) { |
| 133 | return true; |
| 134 | } |
| 135 | } |
| 136 | } |
| 137 | return false; |
| 138 | } |
| 139 | |
| 140 | bool SDL_ChannelMapIsDefault(const int *chmap, int channels) |
| 141 | { |
| 142 | if (chmap) { |
| 143 | for (int i = 0; i < channels; i++) { |
| 144 | if (chmap[i] != i) { |
| 145 | return false; |
| 146 | } |
| 147 | } |
| 148 | } |
| 149 | return true; |
| 150 | } |
| 151 | |
| 152 | // Swizzle audio channels. src and dst can be the same pointer. It does not change the buffer size. |
| 153 | static void SwizzleAudio(const int num_frames, void *dst, const void *src, int channels, const int *map, SDL_AudioFormat fmt) |
| 154 | { |
| 155 | const int bitsize = (int) SDL_AUDIO_BITSIZE(fmt); |
| 156 | |
| 157 | bool has_null_mappings = false; // !!! FIXME: calculate this when setting the channel map instead. |
| 158 | for (int i = 0; i < channels; i++) { |
| 159 | if (map[i] == -1) { |
| 160 | has_null_mappings = true; |
| 161 | break; |
| 162 | } |
| 163 | } |
| 164 | |
| 165 | #define CHANNEL_SWIZZLE(bits) { \ |
| 166 | Uint##bits *tdst = (Uint##bits *) dst; /* treat as UintX; we only care about moving bits and not the type here. */ \ |
| 167 | const Uint##bits *tsrc = (const Uint##bits *) src; \ |
| 168 | if (src != dst) { /* don't need to copy to a temporary frame first. */ \ |
| 169 | if (has_null_mappings) { \ |
| 170 | const Uint##bits silence = (Uint##bits) SDL_GetSilenceValueForFormat(fmt); \ |
| 171 | for (int i = 0; i < num_frames; i++, tsrc += channels, tdst += channels) { \ |
| 172 | for (int ch = 0; ch < channels; ch++) { \ |
| 173 | const int m = map[ch]; \ |
| 174 | tdst[ch] = (m == -1) ? silence : tsrc[m]; \ |
| 175 | } \ |
| 176 | } \ |
| 177 | } else { \ |
| 178 | for (int i = 0; i < num_frames; i++, tsrc += channels, tdst += channels) { \ |
| 179 | for (int ch = 0; ch < channels; ch++) { \ |
| 180 | tdst[ch] = tsrc[map[ch]]; \ |
| 181 | } \ |
| 182 | } \ |
| 183 | } \ |
| 184 | } else { \ |
| 185 | bool isstack; \ |
| 186 | Uint##bits *tmp = (Uint##bits *) SDL_small_alloc(int, channels, &isstack); /* !!! FIXME: allocate this when setting the channel map instead. */ \ |
| 187 | if (tmp) { \ |
| 188 | if (has_null_mappings) { \ |
| 189 | const Uint##bits silence = (Uint##bits) SDL_GetSilenceValueForFormat(fmt); \ |
| 190 | for (int i = 0; i < num_frames; i++, tsrc += channels, tdst += channels) { \ |
| 191 | for (int ch = 0; ch < channels; ch++) { \ |
| 192 | const int m = map[ch]; \ |
| 193 | tmp[ch] = (m == -1) ? silence : tsrc[m]; \ |
| 194 | } \ |
| 195 | for (int ch = 0; ch < channels; ch++) { \ |
| 196 | tdst[ch] = tmp[ch]; \ |
| 197 | } \ |
| 198 | } \ |
| 199 | } else { \ |
| 200 | for (int i = 0; i < num_frames; i++, tsrc += channels, tdst += channels) { \ |
| 201 | for (int ch = 0; ch < channels; ch++) { \ |
| 202 | tmp[ch] = tsrc[map[ch]]; \ |
| 203 | } \ |
| 204 | for (int ch = 0; ch < channels; ch++) { \ |
| 205 | tdst[ch] = tmp[ch]; \ |
| 206 | } \ |
| 207 | } \ |
| 208 | } \ |
| 209 | SDL_small_free(tmp, isstack); \ |
| 210 | } \ |
| 211 | } \ |
| 212 | } |
| 213 | |
| 214 | switch (bitsize) { |
| 215 | case 8: CHANNEL_SWIZZLE(8); break; |
| 216 | case 16: CHANNEL_SWIZZLE(16); break; |
| 217 | case 32: CHANNEL_SWIZZLE(32); break; |
| 218 | // we don't currently have int64 or double audio datatypes, so no `case 64` for now. |
| 219 | default: SDL_assert(!"Unsupported audio datatype size" ); break; |
| 220 | } |
| 221 | |
| 222 | #undef CHANNEL_SWIZZLE |
| 223 | } |
| 224 | |
| 225 | |
| 226 | // This does type and channel conversions _but not resampling_ (resampling happens in SDL_AudioStream). |
| 227 | // This does not check parameter validity, (beyond asserts), it expects you did that already! |
| 228 | // All of this has to function as if src==dst==scratch (conversion in-place), but as a convenience |
| 229 | // if you're just going to copy the final output elsewhere, you can specify a different output pointer. |
| 230 | // |
| 231 | // The scratch buffer must be able to store `num_frames * CalculateMaxSampleFrameSize(src_format, src_channels, dst_format, dst_channels)` bytes. |
| 232 | // If the scratch buffer is NULL, this restriction applies to the output buffer instead. |
| 233 | // |
| 234 | // Since this is a convenient point that audio goes through even if it doesn't need format conversion, |
| 235 | // we also handle gain adjustment here, so we don't have to make another pass over the data later. |
| 236 | // Strictly speaking, this is also a "conversion". :) |
| 237 | void ConvertAudio(int num_frames, |
| 238 | const void *src, SDL_AudioFormat src_format, int src_channels, const int *src_map, |
| 239 | void *dst, SDL_AudioFormat dst_format, int dst_channels, const int *dst_map, |
| 240 | void *scratch, float gain) |
| 241 | { |
| 242 | SDL_assert(src != NULL); |
| 243 | SDL_assert(dst != NULL); |
| 244 | SDL_assert(SDL_IsSupportedAudioFormat(src_format)); |
| 245 | SDL_assert(SDL_IsSupportedAudioFormat(dst_format)); |
| 246 | SDL_assert(SDL_IsSupportedChannelCount(src_channels)); |
| 247 | SDL_assert(SDL_IsSupportedChannelCount(dst_channels)); |
| 248 | |
| 249 | if (!num_frames) { |
| 250 | return; // no data to convert, quit. |
| 251 | } |
| 252 | |
| 253 | #if DEBUG_AUDIO_CONVERT |
| 254 | SDL_Log("SDL_AUDIO_CONVERT: Convert format %04x->%04x, channels %u->%u" , src_format, dst_format, src_channels, dst_channels); |
| 255 | #endif |
| 256 | |
| 257 | const int dst_bitsize = (int) SDL_AUDIO_BITSIZE(dst_format); |
| 258 | const int dst_sample_frame_size = (dst_bitsize / 8) * dst_channels; |
| 259 | |
| 260 | const bool chmaps_match = (src_channels == dst_channels) && SDL_AudioChannelMapsEqual(src_channels, src_map, dst_map); |
| 261 | if (chmaps_match) { |
| 262 | src_map = dst_map = NULL; // NULL both these out so we don't do any unnecessary swizzling. |
| 263 | } |
| 264 | |
| 265 | /* Type conversion goes like this now: |
| 266 | - swizzle through source channel map to "standard" layout. |
| 267 | - byteswap to CPU native format first if necessary. |
| 268 | - convert to native Float32 if necessary. |
| 269 | - change channel count if necessary. |
| 270 | - convert to final data format. |
| 271 | - byteswap back to foreign format if necessary. |
| 272 | - swizzle through dest channel map from "standard" layout. |
| 273 | |
| 274 | The expectation is we can process data faster in float32 |
| 275 | (possibly with SIMD), and making several passes over the same |
| 276 | buffer is likely to be CPU cache-friendly, avoiding the |
| 277 | biggest performance hit in modern times. Previously we had |
| 278 | (script-generated) custom converters for every data type and |
| 279 | it was a bloat on SDL compile times and final library size. */ |
| 280 | |
| 281 | // swizzle input to "standard" format if necessary. |
| 282 | if (src_map) { |
| 283 | void* buf = scratch ? scratch : dst; // use scratch if available, since it has to be big enough to hold src, unless it's NULL, then dst has to be. |
| 284 | SwizzleAudio(num_frames, buf, src, src_channels, src_map, src_format); |
| 285 | src = buf; |
| 286 | } |
| 287 | |
| 288 | // see if we can skip float conversion entirely. |
| 289 | if ((src_channels == dst_channels) && (gain == 1.0f)) { |
| 290 | if (src_format == dst_format) { |
| 291 | // nothing to do, we're already in the right format, just copy it over if necessary. |
| 292 | if (dst_map) { |
| 293 | SwizzleAudio(num_frames, dst, src, dst_channels, dst_map, dst_format); |
| 294 | } else if (src != dst) { |
| 295 | SDL_memcpy(dst, src, num_frames * dst_sample_frame_size); |
| 296 | } |
| 297 | return; |
| 298 | } |
| 299 | |
| 300 | // just a byteswap needed? |
| 301 | if ((src_format ^ dst_format) == SDL_AUDIO_MASK_BIG_ENDIAN) { |
| 302 | if (dst_map) { // do this first, in case we duplicate channels, we can avoid an extra copy if src != dst. |
| 303 | SwizzleAudio(num_frames, dst, src, dst_channels, dst_map, dst_format); |
| 304 | src = dst; |
| 305 | } |
| 306 | ConvertAudioSwapEndian(dst, src, num_frames * dst_channels, dst_bitsize); |
| 307 | return; // all done. |
| 308 | } |
| 309 | } |
| 310 | |
| 311 | if (!scratch) { |
| 312 | scratch = dst; |
| 313 | } |
| 314 | |
| 315 | const bool srcconvert = src_format != SDL_AUDIO_F32; |
| 316 | const bool channelconvert = src_channels != dst_channels; |
| 317 | const bool dstconvert = dst_format != SDL_AUDIO_F32; |
| 318 | |
| 319 | // get us to float format. |
| 320 | if (srcconvert) { |
| 321 | void* buf = (channelconvert || dstconvert) ? scratch : dst; |
| 322 | ConvertAudioToFloat((float *) buf, src, num_frames * src_channels, src_format); |
| 323 | src = buf; |
| 324 | } |
| 325 | |
| 326 | // Gain adjustment |
| 327 | if (gain != 1.0f) { |
| 328 | float *buf = (float *)((channelconvert || dstconvert) ? scratch : dst); |
| 329 | const int total_samples = num_frames * src_channels; |
| 330 | if (src == buf) { |
| 331 | for (int i = 0; i < total_samples; i++) { |
| 332 | buf[i] *= gain; |
| 333 | } |
| 334 | } else { |
| 335 | float *fsrc = (float *)src; |
| 336 | for (int i = 0; i < total_samples; i++) { |
| 337 | buf[i] = fsrc[i] * gain; |
| 338 | } |
| 339 | } |
| 340 | src = buf; |
| 341 | } |
| 342 | |
| 343 | // Channel conversion |
| 344 | |
| 345 | if (channelconvert) { |
| 346 | SDL_AudioChannelConverter channel_converter; |
| 347 | SDL_AudioChannelConverter override = NULL; |
| 348 | |
| 349 | // SDL_IsSupportedChannelCount should have caught these asserts, or we added a new format and forgot to update the table. |
| 350 | SDL_assert(src_channels <= SDL_arraysize(channel_converters)); |
| 351 | SDL_assert(dst_channels <= SDL_arraysize(channel_converters[0])); |
| 352 | |
| 353 | channel_converter = channel_converters[src_channels - 1][dst_channels - 1]; |
| 354 | SDL_assert(channel_converter != NULL); |
| 355 | |
| 356 | // swap in some SIMD versions for a few of these. |
| 357 | if (channel_converter == SDL_ConvertStereoToMono) { |
| 358 | #ifdef SDL_SSE3_INTRINSICS |
| 359 | if (!override && SDL_HasSSE3()) { override = SDL_ConvertStereoToMono_SSE3; } |
| 360 | #endif |
| 361 | } else if (channel_converter == SDL_ConvertMonoToStereo) { |
| 362 | #ifdef SDL_SSE_INTRINSICS |
| 363 | if (!override && SDL_HasSSE()) { override = SDL_ConvertMonoToStereo_SSE; } |
| 364 | #endif |
| 365 | } |
| 366 | |
| 367 | if (override) { |
| 368 | channel_converter = override; |
| 369 | } |
| 370 | |
| 371 | void* buf = dstconvert ? scratch : dst; |
| 372 | channel_converter((float *) buf, (const float *) src, num_frames); |
| 373 | src = buf; |
| 374 | } |
| 375 | |
| 376 | // Resampling is not done in here. SDL_AudioStream handles that. |
| 377 | |
| 378 | // Move to final data type. |
| 379 | if (dstconvert) { |
| 380 | ConvertAudioFromFloat(dst, (const float *) src, num_frames * dst_channels, dst_format); |
| 381 | src = dst; |
| 382 | } |
| 383 | |
| 384 | SDL_assert(src == dst); // if we got here, we _had_ to have done _something_. Otherwise, we should have memcpy'd! |
| 385 | |
| 386 | if (dst_map) { |
| 387 | SwizzleAudio(num_frames, dst, src, dst_channels, dst_map, dst_format); |
| 388 | } |
| 389 | } |
| 390 | |
| 391 | // Calculate the largest frame size needed to convert between the two formats. |
| 392 | static int CalculateMaxFrameSize(SDL_AudioFormat src_format, int src_channels, SDL_AudioFormat dst_format, int dst_channels) |
| 393 | { |
| 394 | const int src_format_size = SDL_AUDIO_BYTESIZE(src_format); |
| 395 | const int dst_format_size = SDL_AUDIO_BYTESIZE(dst_format); |
| 396 | const int max_app_format_size = SDL_max(src_format_size, dst_format_size); |
| 397 | const int max_format_size = SDL_max(max_app_format_size, sizeof (float)); // ConvertAudio and ResampleAudio use floats. |
| 398 | const int max_channels = SDL_max(src_channels, dst_channels); |
| 399 | return max_format_size * max_channels; |
| 400 | } |
| 401 | |
| 402 | static Sint64 GetAudioStreamResampleRate(SDL_AudioStream* stream, int src_freq, Sint64 resample_offset) |
| 403 | { |
| 404 | src_freq = (int)((float)src_freq * stream->freq_ratio); |
| 405 | |
| 406 | Sint64 resample_rate = SDL_GetResampleRate(src_freq, stream->dst_spec.freq); |
| 407 | |
| 408 | // If src_freq == dst_freq, and we aren't between frames, don't resample |
| 409 | if ((resample_rate == 0x100000000) && (resample_offset == 0)) { |
| 410 | resample_rate = 0; |
| 411 | } |
| 412 | |
| 413 | return resample_rate; |
| 414 | } |
| 415 | |
| 416 | static bool UpdateAudioStreamInputSpec(SDL_AudioStream *stream, const SDL_AudioSpec *spec, const int *chmap) |
| 417 | { |
| 418 | if (SDL_AudioSpecsEqual(&stream->input_spec, spec, stream->input_chmap, chmap)) { |
| 419 | return true; |
| 420 | } |
| 421 | |
| 422 | if (!SDL_ResetAudioQueueHistory(stream->queue, SDL_GetResamplerHistoryFrames())) { |
| 423 | return false; |
| 424 | } |
| 425 | |
| 426 | if (!chmap) { |
| 427 | stream->input_chmap = NULL; |
| 428 | } else { |
| 429 | const size_t chmaplen = sizeof (*chmap) * spec->channels; |
| 430 | stream->input_chmap = stream->input_chmap_storage; |
| 431 | SDL_memcpy(stream->input_chmap, chmap, chmaplen); |
| 432 | } |
| 433 | |
| 434 | SDL_copyp(&stream->input_spec, spec); |
| 435 | |
| 436 | return true; |
| 437 | } |
| 438 | |
| 439 | SDL_AudioStream *SDL_CreateAudioStream(const SDL_AudioSpec *src_spec, const SDL_AudioSpec *dst_spec) |
| 440 | { |
| 441 | SDL_ChooseAudioConverters(); |
| 442 | SDL_SetupAudioResampler(); |
| 443 | |
| 444 | SDL_AudioStream *result = (SDL_AudioStream *)SDL_calloc(1, sizeof(SDL_AudioStream)); |
| 445 | if (!result) { |
| 446 | return NULL; |
| 447 | } |
| 448 | |
| 449 | result->freq_ratio = 1.0f; |
| 450 | result->gain = 1.0f; |
| 451 | result->queue = SDL_CreateAudioQueue(8192); |
| 452 | |
| 453 | if (!result->queue) { |
| 454 | SDL_free(result); |
| 455 | return NULL; |
| 456 | } |
| 457 | |
| 458 | result->lock = SDL_CreateMutex(); |
| 459 | if (!result->lock) { |
| 460 | SDL_free(result->queue); |
| 461 | SDL_free(result); |
| 462 | return NULL; |
| 463 | } |
| 464 | |
| 465 | OnAudioStreamCreated(result); |
| 466 | |
| 467 | if (!SDL_SetAudioStreamFormat(result, src_spec, dst_spec)) { |
| 468 | SDL_DestroyAudioStream(result); |
| 469 | return NULL; |
| 470 | } |
| 471 | |
| 472 | return result; |
| 473 | } |
| 474 | |
| 475 | SDL_PropertiesID SDL_GetAudioStreamProperties(SDL_AudioStream *stream) |
| 476 | { |
| 477 | if (!stream) { |
| 478 | SDL_InvalidParamError("stream" ); |
| 479 | return 0; |
| 480 | } |
| 481 | SDL_LockMutex(stream->lock); |
| 482 | if (stream->props == 0) { |
| 483 | stream->props = SDL_CreateProperties(); |
| 484 | } |
| 485 | SDL_UnlockMutex(stream->lock); |
| 486 | return stream->props; |
| 487 | } |
| 488 | |
| 489 | bool SDL_SetAudioStreamGetCallback(SDL_AudioStream *stream, SDL_AudioStreamCallback callback, void *userdata) |
| 490 | { |
| 491 | if (!stream) { |
| 492 | return SDL_InvalidParamError("stream" ); |
| 493 | } |
| 494 | SDL_LockMutex(stream->lock); |
| 495 | stream->get_callback = callback; |
| 496 | stream->get_callback_userdata = userdata; |
| 497 | SDL_UnlockMutex(stream->lock); |
| 498 | return true; |
| 499 | } |
| 500 | |
| 501 | bool SDL_SetAudioStreamPutCallback(SDL_AudioStream *stream, SDL_AudioStreamCallback callback, void *userdata) |
| 502 | { |
| 503 | if (!stream) { |
| 504 | return SDL_InvalidParamError("stream" ); |
| 505 | } |
| 506 | SDL_LockMutex(stream->lock); |
| 507 | stream->put_callback = callback; |
| 508 | stream->put_callback_userdata = userdata; |
| 509 | SDL_UnlockMutex(stream->lock); |
| 510 | return true; |
| 511 | } |
| 512 | |
| 513 | bool SDL_LockAudioStream(SDL_AudioStream *stream) |
| 514 | { |
| 515 | if (!stream) { |
| 516 | return SDL_InvalidParamError("stream" ); |
| 517 | } |
| 518 | SDL_LockMutex(stream->lock); |
| 519 | return true; |
| 520 | } |
| 521 | |
| 522 | bool SDL_UnlockAudioStream(SDL_AudioStream *stream) |
| 523 | { |
| 524 | if (!stream) { |
| 525 | return SDL_InvalidParamError("stream" ); |
| 526 | } |
| 527 | SDL_UnlockMutex(stream->lock); |
| 528 | return true; |
| 529 | } |
| 530 | |
| 531 | bool SDL_GetAudioStreamFormat(SDL_AudioStream *stream, SDL_AudioSpec *src_spec, SDL_AudioSpec *dst_spec) |
| 532 | { |
| 533 | if (!stream) { |
| 534 | return SDL_InvalidParamError("stream" ); |
| 535 | } |
| 536 | |
| 537 | SDL_LockMutex(stream->lock); |
| 538 | if (src_spec) { |
| 539 | SDL_copyp(src_spec, &stream->src_spec); |
| 540 | } |
| 541 | if (dst_spec) { |
| 542 | SDL_copyp(dst_spec, &stream->dst_spec); |
| 543 | } |
| 544 | SDL_UnlockMutex(stream->lock); |
| 545 | |
| 546 | if (src_spec && src_spec->format == 0) { |
| 547 | return SDL_SetError("Stream has no source format" ); |
| 548 | } else if (dst_spec && dst_spec->format == 0) { |
| 549 | return SDL_SetError("Stream has no destination format" ); |
| 550 | } |
| 551 | |
| 552 | return true; |
| 553 | } |
| 554 | |
| 555 | bool SDL_SetAudioStreamFormat(SDL_AudioStream *stream, const SDL_AudioSpec *src_spec, const SDL_AudioSpec *dst_spec) |
| 556 | { |
| 557 | if (!stream) { |
| 558 | return SDL_InvalidParamError("stream" ); |
| 559 | } |
| 560 | |
| 561 | // note that while we've removed the maximum frequency checks, SDL _will_ |
| 562 | // fail to resample to extremely high sample rates correctly. Really high, |
| 563 | // like 196608000Hz. File a bug. :P |
| 564 | |
| 565 | if (src_spec) { |
| 566 | if (!SDL_IsSupportedAudioFormat(src_spec->format)) { |
| 567 | return SDL_InvalidParamError("src_spec->format" ); |
| 568 | } else if (!SDL_IsSupportedChannelCount(src_spec->channels)) { |
| 569 | return SDL_InvalidParamError("src_spec->channels" ); |
| 570 | } else if (src_spec->freq <= 0) { |
| 571 | return SDL_InvalidParamError("src_spec->freq" ); |
| 572 | } |
| 573 | } |
| 574 | |
| 575 | if (dst_spec) { |
| 576 | if (!SDL_IsSupportedAudioFormat(dst_spec->format)) { |
| 577 | return SDL_InvalidParamError("dst_spec->format" ); |
| 578 | } else if (!SDL_IsSupportedChannelCount(dst_spec->channels)) { |
| 579 | return SDL_InvalidParamError("dst_spec->channels" ); |
| 580 | } else if (dst_spec->freq <= 0) { |
| 581 | return SDL_InvalidParamError("dst_spec->freq" ); |
| 582 | } |
| 583 | } |
| 584 | |
| 585 | SDL_LockMutex(stream->lock); |
| 586 | |
| 587 | // quietly refuse to change the format of the end currently bound to a device. |
| 588 | if (stream->bound_device) { |
| 589 | if (stream->bound_device->physical_device->recording) { |
| 590 | src_spec = NULL; |
| 591 | } else { |
| 592 | dst_spec = NULL; |
| 593 | } |
| 594 | } |
| 595 | |
| 596 | if (src_spec) { |
| 597 | if (src_spec->channels != stream->src_spec.channels) { |
| 598 | SDL_free(stream->src_chmap); |
| 599 | stream->src_chmap = NULL; |
| 600 | } |
| 601 | SDL_copyp(&stream->src_spec, src_spec); |
| 602 | } |
| 603 | |
| 604 | if (dst_spec) { |
| 605 | if (dst_spec->channels != stream->dst_spec.channels) { |
| 606 | SDL_free(stream->dst_chmap); |
| 607 | stream->dst_chmap = NULL; |
| 608 | } |
| 609 | SDL_copyp(&stream->dst_spec, dst_spec); |
| 610 | } |
| 611 | |
| 612 | SDL_UnlockMutex(stream->lock); |
| 613 | |
| 614 | return true; |
| 615 | } |
| 616 | |
| 617 | bool SetAudioStreamChannelMap(SDL_AudioStream *stream, const SDL_AudioSpec *spec, int **stream_chmap, const int *chmap, int channels, int isinput) |
| 618 | { |
| 619 | if (!stream) { |
| 620 | return SDL_InvalidParamError("stream" ); |
| 621 | } |
| 622 | |
| 623 | bool result = true; |
| 624 | |
| 625 | SDL_LockMutex(stream->lock); |
| 626 | |
| 627 | if (channels != spec->channels) { |
| 628 | result = SDL_SetError("Wrong number of channels" ); |
| 629 | } else if (!*stream_chmap && !chmap) { |
| 630 | // already at default, we're good. |
| 631 | } else if (*stream_chmap && chmap && (SDL_memcmp(*stream_chmap, chmap, sizeof (*chmap) * channels) == 0)) { |
| 632 | // already have this map, don't allocate/copy it again. |
| 633 | } else if (SDL_ChannelMapIsBogus(chmap, channels)) { |
| 634 | result = SDL_SetError("Invalid channel mapping" ); |
| 635 | } else { |
| 636 | if (SDL_ChannelMapIsDefault(chmap, channels)) { |
| 637 | chmap = NULL; // just apply a default mapping. |
| 638 | } |
| 639 | if (chmap) { |
| 640 | int *dupmap = SDL_ChannelMapDup(chmap, channels); |
| 641 | if (!dupmap) { |
| 642 | result = SDL_SetError("Invalid channel mapping" ); |
| 643 | } else { |
| 644 | SDL_free(*stream_chmap); |
| 645 | *stream_chmap = dupmap; |
| 646 | } |
| 647 | } else { |
| 648 | SDL_free(*stream_chmap); |
| 649 | *stream_chmap = NULL; |
| 650 | } |
| 651 | } |
| 652 | |
| 653 | SDL_UnlockMutex(stream->lock); |
| 654 | return result; |
| 655 | } |
| 656 | |
| 657 | bool SDL_SetAudioStreamInputChannelMap(SDL_AudioStream *stream, const int *chmap, int channels) |
| 658 | { |
| 659 | return SetAudioStreamChannelMap(stream, &stream->src_spec, &stream->src_chmap, chmap, channels, 1); |
| 660 | } |
| 661 | |
| 662 | bool SDL_SetAudioStreamOutputChannelMap(SDL_AudioStream *stream, const int *chmap, int channels) |
| 663 | { |
| 664 | return SetAudioStreamChannelMap(stream, &stream->dst_spec, &stream->dst_chmap, chmap, channels, 0); |
| 665 | } |
| 666 | |
| 667 | int *SDL_GetAudioStreamInputChannelMap(SDL_AudioStream *stream, int *count) |
| 668 | { |
| 669 | int *result = NULL; |
| 670 | int channels = 0; |
| 671 | if (stream) { |
| 672 | SDL_LockMutex(stream->lock); |
| 673 | channels = stream->src_spec.channels; |
| 674 | result = SDL_ChannelMapDup(stream->src_chmap, channels); |
| 675 | SDL_UnlockMutex(stream->lock); |
| 676 | } |
| 677 | |
| 678 | if (count) { |
| 679 | *count = channels; |
| 680 | } |
| 681 | |
| 682 | return result; |
| 683 | } |
| 684 | |
| 685 | int *SDL_GetAudioStreamOutputChannelMap(SDL_AudioStream *stream, int *count) |
| 686 | { |
| 687 | int *result = NULL; |
| 688 | int channels = 0; |
| 689 | if (stream) { |
| 690 | SDL_LockMutex(stream->lock); |
| 691 | channels = stream->dst_spec.channels; |
| 692 | result = SDL_ChannelMapDup(stream->dst_chmap, channels); |
| 693 | SDL_UnlockMutex(stream->lock); |
| 694 | } |
| 695 | |
| 696 | if (count) { |
| 697 | *count = channels; |
| 698 | } |
| 699 | |
| 700 | return result; |
| 701 | } |
| 702 | |
| 703 | float SDL_GetAudioStreamFrequencyRatio(SDL_AudioStream *stream) |
| 704 | { |
| 705 | if (!stream) { |
| 706 | SDL_InvalidParamError("stream" ); |
| 707 | return 0.0f; |
| 708 | } |
| 709 | |
| 710 | SDL_LockMutex(stream->lock); |
| 711 | const float freq_ratio = stream->freq_ratio; |
| 712 | SDL_UnlockMutex(stream->lock); |
| 713 | |
| 714 | return freq_ratio; |
| 715 | } |
| 716 | |
| 717 | bool SDL_SetAudioStreamFrequencyRatio(SDL_AudioStream *stream, float freq_ratio) |
| 718 | { |
| 719 | if (!stream) { |
| 720 | return SDL_InvalidParamError("stream" ); |
| 721 | } |
| 722 | |
| 723 | // Picked mostly arbitrarily. |
| 724 | const float min_freq_ratio = 0.01f; |
| 725 | const float max_freq_ratio = 100.0f; |
| 726 | |
| 727 | if (freq_ratio < min_freq_ratio) { |
| 728 | return SDL_SetError("Frequency ratio is too low" ); |
| 729 | } else if (freq_ratio > max_freq_ratio) { |
| 730 | return SDL_SetError("Frequency ratio is too high" ); |
| 731 | } |
| 732 | |
| 733 | SDL_LockMutex(stream->lock); |
| 734 | stream->freq_ratio = freq_ratio; |
| 735 | SDL_UnlockMutex(stream->lock); |
| 736 | |
| 737 | return true; |
| 738 | } |
| 739 | |
| 740 | float SDL_GetAudioStreamGain(SDL_AudioStream *stream) |
| 741 | { |
| 742 | if (!stream) { |
| 743 | SDL_InvalidParamError("stream" ); |
| 744 | return -1.0f; |
| 745 | } |
| 746 | |
| 747 | SDL_LockMutex(stream->lock); |
| 748 | const float gain = stream->gain; |
| 749 | SDL_UnlockMutex(stream->lock); |
| 750 | |
| 751 | return gain; |
| 752 | } |
| 753 | |
| 754 | bool SDL_SetAudioStreamGain(SDL_AudioStream *stream, float gain) |
| 755 | { |
| 756 | if (!stream) { |
| 757 | return SDL_InvalidParamError("stream" ); |
| 758 | } else if (gain < 0.0f) { |
| 759 | return SDL_InvalidParamError("gain" ); |
| 760 | } |
| 761 | |
| 762 | SDL_LockMutex(stream->lock); |
| 763 | stream->gain = gain; |
| 764 | SDL_UnlockMutex(stream->lock); |
| 765 | |
| 766 | return true; |
| 767 | } |
| 768 | |
| 769 | static bool CheckAudioStreamIsFullySetup(SDL_AudioStream *stream) |
| 770 | { |
| 771 | if (stream->src_spec.format == 0) { |
| 772 | return SDL_SetError("Stream has no source format" ); |
| 773 | } else if (stream->dst_spec.format == 0) { |
| 774 | return SDL_SetError("Stream has no destination format" ); |
| 775 | } |
| 776 | |
| 777 | return true; |
| 778 | } |
| 779 | |
| 780 | static bool PutAudioStreamBuffer(SDL_AudioStream *stream, const void *buf, int len, SDL_ReleaseAudioBufferCallback callback, void* userdata) |
| 781 | { |
| 782 | #if DEBUG_AUDIOSTREAM |
| 783 | SDL_Log("AUDIOSTREAM: wants to put %d bytes" , len); |
| 784 | #endif |
| 785 | |
| 786 | SDL_LockMutex(stream->lock); |
| 787 | |
| 788 | if (!CheckAudioStreamIsFullySetup(stream)) { |
| 789 | SDL_UnlockMutex(stream->lock); |
| 790 | return false; |
| 791 | } |
| 792 | |
| 793 | if ((len % SDL_AUDIO_FRAMESIZE(stream->src_spec)) != 0) { |
| 794 | SDL_UnlockMutex(stream->lock); |
| 795 | return SDL_SetError("Can't add partial sample frames" ); |
| 796 | } |
| 797 | |
| 798 | SDL_AudioTrack* track = NULL; |
| 799 | |
| 800 | if (callback) { |
| 801 | track = SDL_CreateAudioTrack(stream->queue, &stream->src_spec, stream->src_chmap, (Uint8 *)buf, len, len, callback, userdata); |
| 802 | |
| 803 | if (!track) { |
| 804 | SDL_UnlockMutex(stream->lock); |
| 805 | return false; |
| 806 | } |
| 807 | } |
| 808 | |
| 809 | const int prev_available = stream->put_callback ? SDL_GetAudioStreamAvailable(stream) : 0; |
| 810 | |
| 811 | bool result = true; |
| 812 | |
| 813 | if (track) { |
| 814 | SDL_AddTrackToAudioQueue(stream->queue, track); |
| 815 | } else { |
| 816 | result = SDL_WriteToAudioQueue(stream->queue, &stream->src_spec, stream->src_chmap, (const Uint8 *)buf, len); |
| 817 | } |
| 818 | |
| 819 | if (result) { |
| 820 | if (stream->put_callback) { |
| 821 | const int newavail = SDL_GetAudioStreamAvailable(stream) - prev_available; |
| 822 | stream->put_callback(stream->put_callback_userdata, stream, newavail, newavail); |
| 823 | } |
| 824 | } |
| 825 | |
| 826 | SDL_UnlockMutex(stream->lock); |
| 827 | |
| 828 | return result; |
| 829 | } |
| 830 | |
| 831 | static void SDLCALL FreeAllocatedAudioBuffer(void *userdata, const void *buf, int len) |
| 832 | { |
| 833 | SDL_free((void*) buf); |
| 834 | } |
| 835 | |
| 836 | bool SDL_PutAudioStreamData(SDL_AudioStream *stream, const void *buf, int len) |
| 837 | { |
| 838 | if (!stream) { |
| 839 | return SDL_InvalidParamError("stream" ); |
| 840 | } else if (!buf) { |
| 841 | return SDL_InvalidParamError("buf" ); |
| 842 | } else if (len < 0) { |
| 843 | return SDL_InvalidParamError("len" ); |
| 844 | } else if (len == 0) { |
| 845 | return true; // nothing to do. |
| 846 | } |
| 847 | |
| 848 | // When copying in large amounts of data, try and do as much work as possible |
| 849 | // outside of the stream lock, otherwise the output device is likely to be starved. |
| 850 | const int large_input_thresh = 64 * 1024; |
| 851 | |
| 852 | if (len >= large_input_thresh) { |
| 853 | void *data = SDL_malloc(len); |
| 854 | |
| 855 | if (!data) { |
| 856 | return false; |
| 857 | } |
| 858 | |
| 859 | SDL_memcpy(data, buf, len); |
| 860 | buf = data; |
| 861 | |
| 862 | bool ret = PutAudioStreamBuffer(stream, buf, len, FreeAllocatedAudioBuffer, NULL); |
| 863 | if (!ret) { |
| 864 | SDL_free(data); |
| 865 | } |
| 866 | return ret; |
| 867 | } |
| 868 | |
| 869 | return PutAudioStreamBuffer(stream, buf, len, NULL, NULL); |
| 870 | } |
| 871 | |
| 872 | bool SDL_FlushAudioStream(SDL_AudioStream *stream) |
| 873 | { |
| 874 | if (!stream) { |
| 875 | return SDL_InvalidParamError("stream" ); |
| 876 | } |
| 877 | |
| 878 | SDL_LockMutex(stream->lock); |
| 879 | SDL_FlushAudioQueue(stream->queue); |
| 880 | SDL_UnlockMutex(stream->lock); |
| 881 | |
| 882 | return true; |
| 883 | } |
| 884 | |
| 885 | /* this does not save the previous contents of stream->work_buffer. It's a work buffer!! |
| 886 | The returned buffer is aligned/padded for use with SIMD instructions. */ |
| 887 | static Uint8 *EnsureAudioStreamWorkBufferSize(SDL_AudioStream *stream, size_t newlen) |
| 888 | { |
| 889 | if (stream->work_buffer_allocation >= newlen) { |
| 890 | return stream->work_buffer; |
| 891 | } |
| 892 | |
| 893 | Uint8 *ptr = (Uint8 *) SDL_aligned_alloc(SDL_GetSIMDAlignment(), newlen); |
| 894 | if (!ptr) { |
| 895 | return NULL; // previous work buffer is still valid! |
| 896 | } |
| 897 | |
| 898 | SDL_aligned_free(stream->work_buffer); |
| 899 | stream->work_buffer = ptr; |
| 900 | stream->work_buffer_allocation = newlen; |
| 901 | return ptr; |
| 902 | } |
| 903 | |
| 904 | static Sint64 NextAudioStreamIter(SDL_AudioStream* stream, void** inout_iter, |
| 905 | Sint64* inout_resample_offset, SDL_AudioSpec* out_spec, int **out_chmap, bool* out_flushed) |
| 906 | { |
| 907 | SDL_AudioSpec spec; |
| 908 | bool flushed; |
| 909 | int *chmap; |
| 910 | size_t queued_bytes = SDL_NextAudioQueueIter(stream->queue, inout_iter, &spec, &chmap, &flushed); |
| 911 | |
| 912 | if (out_spec) { |
| 913 | SDL_copyp(out_spec, &spec); |
| 914 | } |
| 915 | |
| 916 | if (out_chmap) { |
| 917 | *out_chmap = chmap; |
| 918 | } |
| 919 | |
| 920 | // There is infinite audio available, whether or not we are resampling |
| 921 | if (queued_bytes == SDL_SIZE_MAX) { |
| 922 | *inout_resample_offset = 0; |
| 923 | |
| 924 | if (out_flushed) { |
| 925 | *out_flushed = false; |
| 926 | } |
| 927 | |
| 928 | return SDL_MAX_SINT32; |
| 929 | } |
| 930 | |
| 931 | Sint64 resample_offset = *inout_resample_offset; |
| 932 | Sint64 resample_rate = GetAudioStreamResampleRate(stream, spec.freq, resample_offset); |
| 933 | Sint64 output_frames = (Sint64)(queued_bytes / SDL_AUDIO_FRAMESIZE(spec)); |
| 934 | |
| 935 | if (resample_rate) { |
| 936 | // Resampling requires padding frames to the left and right of the current position. |
| 937 | // Past the end of the track, the right padding is filled with silence. |
| 938 | // But we only want to do that if the track is actually finished (flushed). |
| 939 | if (!flushed) { |
| 940 | output_frames -= SDL_GetResamplerPaddingFrames(resample_rate); |
| 941 | } |
| 942 | |
| 943 | output_frames = SDL_GetResamplerOutputFrames(output_frames, resample_rate, &resample_offset); |
| 944 | } |
| 945 | |
| 946 | if (flushed) { |
| 947 | resample_offset = 0; |
| 948 | } |
| 949 | |
| 950 | *inout_resample_offset = resample_offset; |
| 951 | |
| 952 | if (out_flushed) { |
| 953 | *out_flushed = flushed; |
| 954 | } |
| 955 | |
| 956 | return output_frames; |
| 957 | } |
| 958 | |
| 959 | static Sint64 GetAudioStreamAvailableFrames(SDL_AudioStream* stream, Sint64* out_resample_offset) |
| 960 | { |
| 961 | void* iter = SDL_BeginAudioQueueIter(stream->queue); |
| 962 | |
| 963 | Sint64 resample_offset = stream->resample_offset; |
| 964 | Sint64 output_frames = 0; |
| 965 | |
| 966 | while (iter) { |
| 967 | output_frames += NextAudioStreamIter(stream, &iter, &resample_offset, NULL, NULL, NULL); |
| 968 | |
| 969 | // Already got loads of frames. Just clamp it to something reasonable |
| 970 | if (output_frames >= SDL_MAX_SINT32) { |
| 971 | output_frames = SDL_MAX_SINT32; |
| 972 | break; |
| 973 | } |
| 974 | } |
| 975 | |
| 976 | if (out_resample_offset) { |
| 977 | *out_resample_offset = resample_offset; |
| 978 | } |
| 979 | |
| 980 | return output_frames; |
| 981 | } |
| 982 | |
| 983 | static Sint64 GetAudioStreamHead(SDL_AudioStream* stream, SDL_AudioSpec* out_spec, int **out_chmap, bool* out_flushed) |
| 984 | { |
| 985 | void* iter = SDL_BeginAudioQueueIter(stream->queue); |
| 986 | |
| 987 | if (!iter) { |
| 988 | SDL_zerop(out_spec); |
| 989 | *out_flushed = false; |
| 990 | return 0; |
| 991 | } |
| 992 | |
| 993 | Sint64 resample_offset = stream->resample_offset; |
| 994 | return NextAudioStreamIter(stream, &iter, &resample_offset, out_spec, out_chmap, out_flushed); |
| 995 | } |
| 996 | |
| 997 | // You must hold stream->lock and validate your parameters before calling this! |
| 998 | // Enough input data MUST be available! |
| 999 | static bool GetAudioStreamDataInternal(SDL_AudioStream *stream, void *buf, int output_frames, float gain) |
| 1000 | { |
| 1001 | const SDL_AudioSpec* src_spec = &stream->input_spec; |
| 1002 | const SDL_AudioSpec* dst_spec = &stream->dst_spec; |
| 1003 | |
| 1004 | const SDL_AudioFormat src_format = src_spec->format; |
| 1005 | const int src_channels = src_spec->channels; |
| 1006 | |
| 1007 | const SDL_AudioFormat dst_format = dst_spec->format; |
| 1008 | const int dst_channels = dst_spec->channels; |
| 1009 | const int *dst_map = stream->dst_chmap; |
| 1010 | |
| 1011 | const int max_frame_size = CalculateMaxFrameSize(src_format, src_channels, dst_format, dst_channels); |
| 1012 | const Sint64 resample_rate = GetAudioStreamResampleRate(stream, src_spec->freq, stream->resample_offset); |
| 1013 | |
| 1014 | #if DEBUG_AUDIOSTREAM |
| 1015 | SDL_Log("AUDIOSTREAM: asking for %d frames." , output_frames); |
| 1016 | #endif |
| 1017 | |
| 1018 | SDL_assert(output_frames > 0); |
| 1019 | |
| 1020 | // Not resampling? It's an easy conversion (and maybe not even that!) |
| 1021 | if (resample_rate == 0) { |
| 1022 | Uint8* work_buffer = NULL; |
| 1023 | |
| 1024 | // Ensure we have enough scratch space for any conversions |
| 1025 | if ((src_format != dst_format) || (src_channels != dst_channels) || (gain != 1.0f)) { |
| 1026 | work_buffer = EnsureAudioStreamWorkBufferSize(stream, output_frames * max_frame_size); |
| 1027 | |
| 1028 | if (!work_buffer) { |
| 1029 | return false; |
| 1030 | } |
| 1031 | } |
| 1032 | |
| 1033 | if (SDL_ReadFromAudioQueue(stream->queue, (Uint8 *)buf, dst_format, dst_channels, dst_map, 0, output_frames, 0, work_buffer, gain) != buf) { |
| 1034 | return SDL_SetError("Not enough data in queue" ); |
| 1035 | } |
| 1036 | |
| 1037 | return true; |
| 1038 | } |
| 1039 | |
| 1040 | // Time to do some resampling! |
| 1041 | // Calculate the number of input frames necessary for this request. |
| 1042 | // Because resampling happens "between" frames, The same number of output_frames |
| 1043 | // can require a different number of input_frames, depending on the resample_offset. |
| 1044 | // In fact, input_frames can sometimes even be zero when upsampling. |
| 1045 | const int input_frames = (int) SDL_GetResamplerInputFrames(output_frames, resample_rate, stream->resample_offset); |
| 1046 | |
| 1047 | const int padding_frames = SDL_GetResamplerPaddingFrames(resample_rate); |
| 1048 | |
| 1049 | const SDL_AudioFormat resample_format = SDL_AUDIO_F32; |
| 1050 | |
| 1051 | // If increasing channels, do it after resampling, since we'd just |
| 1052 | // do more work to resample duplicate channels. If we're decreasing, do |
| 1053 | // it first so we resample the interpolated data instead of interpolating |
| 1054 | // the resampled data. |
| 1055 | const int resample_channels = SDL_min(src_channels, dst_channels); |
| 1056 | |
| 1057 | // The size of the frame used when resampling |
| 1058 | const int resample_frame_size = SDL_AUDIO_BYTESIZE(resample_format) * resample_channels; |
| 1059 | |
| 1060 | // The main portion of the work_buffer can be used to store 3 things: |
| 1061 | // src_sample_frame_size * (left_padding+input_buffer+right_padding) |
| 1062 | // resample_frame_size * (left_padding+input_buffer+right_padding) |
| 1063 | // dst_sample_frame_size * output_frames |
| 1064 | // |
| 1065 | // ResampleAudio also requires an additional buffer if it can't write straight to the output: |
| 1066 | // resample_frame_size * output_frames |
| 1067 | // |
| 1068 | // Note, ConvertAudio requires (num_frames * max_sample_frame_size) of scratch space |
| 1069 | const int work_buffer_frames = input_frames + (padding_frames * 2); |
| 1070 | int work_buffer_capacity = work_buffer_frames * max_frame_size; |
| 1071 | int resample_buffer_offset = -1; |
| 1072 | |
| 1073 | // Check if we can resample directly into the output buffer. |
| 1074 | // Note, this is just to avoid extra copies. |
| 1075 | // Some other formats may fit directly into the output buffer, but i'd rather process data in a SIMD-aligned buffer. |
| 1076 | if ((dst_format != resample_format) || (dst_channels != resample_channels)) { |
| 1077 | // Allocate space for converting the resampled output to the destination format |
| 1078 | int resample_convert_bytes = output_frames * max_frame_size; |
| 1079 | work_buffer_capacity = SDL_max(work_buffer_capacity, resample_convert_bytes); |
| 1080 | |
| 1081 | // SIMD-align the buffer |
| 1082 | int simd_alignment = (int) SDL_GetSIMDAlignment(); |
| 1083 | work_buffer_capacity += simd_alignment - 1; |
| 1084 | work_buffer_capacity -= work_buffer_capacity % simd_alignment; |
| 1085 | |
| 1086 | // Allocate space for the resampled output |
| 1087 | int resample_bytes = output_frames * resample_frame_size; |
| 1088 | resample_buffer_offset = work_buffer_capacity; |
| 1089 | work_buffer_capacity += resample_bytes; |
| 1090 | } |
| 1091 | |
| 1092 | Uint8* work_buffer = EnsureAudioStreamWorkBufferSize(stream, work_buffer_capacity); |
| 1093 | |
| 1094 | if (!work_buffer) { |
| 1095 | return false; |
| 1096 | } |
| 1097 | |
| 1098 | // adjust gain either before resampling or after, depending on which point has less |
| 1099 | // samples to process. |
| 1100 | const float preresample_gain = (input_frames > output_frames) ? 1.0f : gain; |
| 1101 | const float postresample_gain = (input_frames > output_frames) ? gain : 1.0f; |
| 1102 | |
| 1103 | // (dst channel map is NULL because we'll do the final swizzle on ConvertAudio after resample.) |
| 1104 | const Uint8* input_buffer = SDL_ReadFromAudioQueue(stream->queue, |
| 1105 | NULL, resample_format, resample_channels, NULL, |
| 1106 | padding_frames, input_frames, padding_frames, work_buffer, preresample_gain); |
| 1107 | |
| 1108 | if (!input_buffer) { |
| 1109 | return SDL_SetError("Not enough data in queue (resample)" ); |
| 1110 | } |
| 1111 | |
| 1112 | input_buffer += padding_frames * resample_frame_size; |
| 1113 | |
| 1114 | // Decide where the resampled output goes |
| 1115 | void* resample_buffer = (resample_buffer_offset != -1) ? (work_buffer + resample_buffer_offset) : buf; |
| 1116 | |
| 1117 | SDL_ResampleAudio(resample_channels, |
| 1118 | (const float *) input_buffer, input_frames, |
| 1119 | (float*) resample_buffer, output_frames, |
| 1120 | resample_rate, &stream->resample_offset); |
| 1121 | |
| 1122 | // Convert to the final format, if necessary (src channel map is NULL because SDL_ReadFromAudioQueue already handled this). |
| 1123 | ConvertAudio(output_frames, resample_buffer, resample_format, resample_channels, NULL, buf, dst_format, dst_channels, dst_map, work_buffer, postresample_gain); |
| 1124 | |
| 1125 | return true; |
| 1126 | } |
| 1127 | |
| 1128 | // get converted/resampled data from the stream |
| 1129 | int SDL_GetAudioStreamDataAdjustGain(SDL_AudioStream *stream, void *voidbuf, int len, float ) |
| 1130 | { |
| 1131 | Uint8 *buf = (Uint8 *) voidbuf; |
| 1132 | |
| 1133 | #if DEBUG_AUDIOSTREAM |
| 1134 | SDL_Log("AUDIOSTREAM: want to get %d converted bytes" , len); |
| 1135 | #endif |
| 1136 | |
| 1137 | if (!stream) { |
| 1138 | SDL_InvalidParamError("stream" ); |
| 1139 | return -1; |
| 1140 | } else if (!buf) { |
| 1141 | SDL_InvalidParamError("buf" ); |
| 1142 | return -1; |
| 1143 | } else if (len < 0) { |
| 1144 | SDL_InvalidParamError("len" ); |
| 1145 | return -1; |
| 1146 | } else if (len == 0) { |
| 1147 | return 0; // nothing to do. |
| 1148 | } |
| 1149 | |
| 1150 | SDL_LockMutex(stream->lock); |
| 1151 | |
| 1152 | if (!CheckAudioStreamIsFullySetup(stream)) { |
| 1153 | SDL_UnlockMutex(stream->lock); |
| 1154 | return -1; |
| 1155 | } |
| 1156 | |
| 1157 | const float gain = stream->gain * extra_gain; |
| 1158 | const int dst_frame_size = SDL_AUDIO_FRAMESIZE(stream->dst_spec); |
| 1159 | |
| 1160 | len -= len % dst_frame_size; // chop off any fractional sample frame. |
| 1161 | |
| 1162 | // give the callback a chance to fill in more stream data if it wants. |
| 1163 | if (stream->get_callback) { |
| 1164 | Sint64 total_request = len / dst_frame_size; // start with sample frames desired |
| 1165 | Sint64 additional_request = total_request; |
| 1166 | |
| 1167 | Sint64 resample_offset = 0; |
| 1168 | Sint64 available_frames = GetAudioStreamAvailableFrames(stream, &resample_offset); |
| 1169 | |
| 1170 | additional_request -= SDL_min(additional_request, available_frames); |
| 1171 | |
| 1172 | Sint64 resample_rate = GetAudioStreamResampleRate(stream, stream->src_spec.freq, resample_offset); |
| 1173 | |
| 1174 | if (resample_rate) { |
| 1175 | total_request = SDL_GetResamplerInputFrames(total_request, resample_rate, resample_offset); |
| 1176 | additional_request = SDL_GetResamplerInputFrames(additional_request, resample_rate, resample_offset); |
| 1177 | } |
| 1178 | |
| 1179 | total_request *= SDL_AUDIO_FRAMESIZE(stream->src_spec); // convert sample frames to bytes. |
| 1180 | additional_request *= SDL_AUDIO_FRAMESIZE(stream->src_spec); // convert sample frames to bytes. |
| 1181 | stream->get_callback(stream->get_callback_userdata, stream, (int) SDL_min(additional_request, SDL_INT_MAX), (int) SDL_min(total_request, SDL_INT_MAX)); |
| 1182 | } |
| 1183 | |
| 1184 | // Process the data in chunks to avoid allocating too much memory (and potential integer overflows) |
| 1185 | const int chunk_size = 4096; |
| 1186 | |
| 1187 | int total = 0; |
| 1188 | |
| 1189 | while (total < len) { |
| 1190 | // Audio is processed a track at a time. |
| 1191 | SDL_AudioSpec input_spec; |
| 1192 | int *input_chmap; |
| 1193 | bool flushed; |
| 1194 | const Sint64 available_frames = GetAudioStreamHead(stream, &input_spec, &input_chmap, &flushed); |
| 1195 | |
| 1196 | if (available_frames == 0) { |
| 1197 | if (flushed) { |
| 1198 | SDL_PopAudioQueueHead(stream->queue); |
| 1199 | SDL_zero(stream->input_spec); |
| 1200 | stream->resample_offset = 0; |
| 1201 | stream->input_chmap = NULL; |
| 1202 | continue; |
| 1203 | } |
| 1204 | // There are no frames available, but the track hasn't been flushed, so more might be added later. |
| 1205 | break; |
| 1206 | } |
| 1207 | |
| 1208 | if (!UpdateAudioStreamInputSpec(stream, &input_spec, input_chmap)) { |
| 1209 | total = total ? total : -1; |
| 1210 | break; |
| 1211 | } |
| 1212 | |
| 1213 | // Clamp the output length to the maximum currently available. |
| 1214 | // GetAudioStreamDataInternal requires enough input data is available. |
| 1215 | int output_frames = (len - total) / dst_frame_size; |
| 1216 | output_frames = SDL_min(output_frames, chunk_size); |
| 1217 | output_frames = (int) SDL_min(output_frames, available_frames); |
| 1218 | |
| 1219 | if (!GetAudioStreamDataInternal(stream, &buf[total], output_frames, gain)) { |
| 1220 | total = total ? total : -1; |
| 1221 | break; |
| 1222 | } |
| 1223 | |
| 1224 | total += output_frames * dst_frame_size; |
| 1225 | } |
| 1226 | |
| 1227 | SDL_UnlockMutex(stream->lock); |
| 1228 | |
| 1229 | #if DEBUG_AUDIOSTREAM |
| 1230 | SDL_Log("AUDIOSTREAM: Final result was %d" , total); |
| 1231 | #endif |
| 1232 | |
| 1233 | return total; |
| 1234 | } |
| 1235 | |
| 1236 | int SDL_GetAudioStreamData(SDL_AudioStream *stream, void *voidbuf, int len) |
| 1237 | { |
| 1238 | return SDL_GetAudioStreamDataAdjustGain(stream, voidbuf, len, 1.0f); |
| 1239 | } |
| 1240 | |
| 1241 | // number of converted/resampled bytes available for output |
| 1242 | int SDL_GetAudioStreamAvailable(SDL_AudioStream *stream) |
| 1243 | { |
| 1244 | if (!stream) { |
| 1245 | SDL_InvalidParamError("stream" ); |
| 1246 | return -1; |
| 1247 | } |
| 1248 | |
| 1249 | SDL_LockMutex(stream->lock); |
| 1250 | |
| 1251 | if (!CheckAudioStreamIsFullySetup(stream)) { |
| 1252 | SDL_UnlockMutex(stream->lock); |
| 1253 | return 0; |
| 1254 | } |
| 1255 | |
| 1256 | Sint64 count = GetAudioStreamAvailableFrames(stream, NULL); |
| 1257 | |
| 1258 | // convert from sample frames to bytes in destination format. |
| 1259 | count *= SDL_AUDIO_FRAMESIZE(stream->dst_spec); |
| 1260 | |
| 1261 | SDL_UnlockMutex(stream->lock); |
| 1262 | |
| 1263 | // if this overflows an int, just clamp it to a maximum. |
| 1264 | return (int) SDL_min(count, SDL_INT_MAX); |
| 1265 | } |
| 1266 | |
| 1267 | // number of sample frames that are currently queued as input. |
| 1268 | int SDL_GetAudioStreamQueued(SDL_AudioStream *stream) |
| 1269 | { |
| 1270 | if (!stream) { |
| 1271 | SDL_InvalidParamError("stream" ); |
| 1272 | return -1; |
| 1273 | } |
| 1274 | |
| 1275 | SDL_LockMutex(stream->lock); |
| 1276 | |
| 1277 | size_t total = SDL_GetAudioQueueQueued(stream->queue); |
| 1278 | |
| 1279 | SDL_UnlockMutex(stream->lock); |
| 1280 | |
| 1281 | // if this overflows an int, just clamp it to a maximum. |
| 1282 | return (int) SDL_min(total, SDL_INT_MAX); |
| 1283 | } |
| 1284 | |
| 1285 | bool SDL_ClearAudioStream(SDL_AudioStream *stream) |
| 1286 | { |
| 1287 | if (!stream) { |
| 1288 | return SDL_InvalidParamError("stream" ); |
| 1289 | } |
| 1290 | |
| 1291 | SDL_LockMutex(stream->lock); |
| 1292 | |
| 1293 | SDL_ClearAudioQueue(stream->queue); |
| 1294 | SDL_zero(stream->input_spec); |
| 1295 | stream->input_chmap = NULL; |
| 1296 | stream->resample_offset = 0; |
| 1297 | |
| 1298 | SDL_UnlockMutex(stream->lock); |
| 1299 | return true; |
| 1300 | } |
| 1301 | |
| 1302 | void SDL_DestroyAudioStream(SDL_AudioStream *stream) |
| 1303 | { |
| 1304 | if (!stream) { |
| 1305 | return; |
| 1306 | } |
| 1307 | |
| 1308 | SDL_DestroyProperties(stream->props); |
| 1309 | |
| 1310 | OnAudioStreamDestroy(stream); |
| 1311 | |
| 1312 | const bool simplified = stream->simplified; |
| 1313 | if (simplified) { |
| 1314 | if (stream->bound_device) { |
| 1315 | SDL_assert(stream->bound_device->simplified); |
| 1316 | SDL_CloseAudioDevice(stream->bound_device->instance_id); // this will unbind the stream. |
| 1317 | } |
| 1318 | } else { |
| 1319 | SDL_UnbindAudioStream(stream); |
| 1320 | } |
| 1321 | |
| 1322 | SDL_aligned_free(stream->work_buffer); |
| 1323 | SDL_DestroyAudioQueue(stream->queue); |
| 1324 | SDL_DestroyMutex(stream->lock); |
| 1325 | |
| 1326 | SDL_free(stream); |
| 1327 | } |
| 1328 | |
| 1329 | static void SDLCALL DontFreeThisAudioBuffer(void *userdata, const void *buf, int len) |
| 1330 | { |
| 1331 | // We don't own the buffer, but know it will outlive the stream |
| 1332 | } |
| 1333 | |
| 1334 | bool SDL_ConvertAudioSamples(const SDL_AudioSpec *src_spec, const Uint8 *src_data, int src_len, const SDL_AudioSpec *dst_spec, Uint8 **dst_data, int *dst_len) |
| 1335 | { |
| 1336 | if (dst_data) { |
| 1337 | *dst_data = NULL; |
| 1338 | } |
| 1339 | |
| 1340 | if (dst_len) { |
| 1341 | *dst_len = 0; |
| 1342 | } |
| 1343 | |
| 1344 | if (!src_data) { |
| 1345 | return SDL_InvalidParamError("src_data" ); |
| 1346 | } else if (src_len < 0) { |
| 1347 | return SDL_InvalidParamError("src_len" ); |
| 1348 | } else if (!dst_data) { |
| 1349 | return SDL_InvalidParamError("dst_data" ); |
| 1350 | } else if (!dst_len) { |
| 1351 | return SDL_InvalidParamError("dst_len" ); |
| 1352 | } |
| 1353 | |
| 1354 | bool result = false; |
| 1355 | Uint8 *dst = NULL; |
| 1356 | int dstlen = 0; |
| 1357 | |
| 1358 | SDL_AudioStream *stream = SDL_CreateAudioStream(src_spec, dst_spec); |
| 1359 | if (stream) { |
| 1360 | if (PutAudioStreamBuffer(stream, src_data, src_len, DontFreeThisAudioBuffer, NULL) && |
| 1361 | SDL_FlushAudioStream(stream)) { |
| 1362 | dstlen = SDL_GetAudioStreamAvailable(stream); |
| 1363 | if (dstlen >= 0) { |
| 1364 | dst = (Uint8 *)SDL_malloc(dstlen); |
| 1365 | if (dst) { |
| 1366 | result = (SDL_GetAudioStreamData(stream, dst, dstlen) == dstlen); |
| 1367 | } |
| 1368 | } |
| 1369 | } |
| 1370 | } |
| 1371 | |
| 1372 | if (result) { |
| 1373 | *dst_data = dst; |
| 1374 | *dst_len = dstlen; |
| 1375 | } else { |
| 1376 | SDL_free(dst); |
| 1377 | } |
| 1378 | |
| 1379 | SDL_DestroyAudioStream(stream); |
| 1380 | return result; |
| 1381 | } |
| 1382 | |