1 | //************************************ bs::framework - Copyright 2018 Marko Pintera **************************************// |
2 | //*********** Licensed under the MIT license. See LICENSE.md for full terms. This notice is not to be removed. ***********// |
3 | #include "BsOAAudioSource.h" |
4 | #include "BsOAAudio.h" |
5 | #include "BsOAAudioClip.h" |
6 | #include "AL/al.h" |
7 | |
8 | namespace bs |
9 | { |
10 | OAAudioSource::OAAudioSource() |
11 | :mStreamBuffers(), mBusyBuffers() |
12 | { |
13 | gOAAudio()._registerSource(this); |
14 | rebuild(); |
15 | } |
16 | |
17 | OAAudioSource::~OAAudioSource() |
18 | { |
19 | clear(); |
20 | gOAAudio()._unregisterSource(this); |
21 | } |
22 | |
23 | void OAAudioSource::setClip(const HAudioClip& clip) |
24 | { |
25 | stop(); |
26 | |
27 | Lock lock(mMutex); |
28 | AudioSource::setClip(clip); |
29 | |
30 | applyClip(); |
31 | } |
32 | |
33 | void OAAudioSource::setTransform(const Transform& transform) |
34 | { |
35 | AudioSource::setTransform(transform); |
36 | |
37 | auto& contexts = gOAAudio()._getContexts(); |
38 | UINT32 numContexts = (UINT32)contexts.size(); |
39 | for (UINT32 i = 0; i < numContexts; i++) |
40 | { |
41 | if (contexts.size() > 1) |
42 | alcMakeContextCurrent(contexts[i]); |
43 | |
44 | if (is3D()) |
45 | { |
46 | Vector3 position = transform.getPosition(); |
47 | alSource3f(mSourceIDs[i], AL_POSITION, position.x, position.y, position.z); |
48 | } |
49 | else |
50 | alSource3f(mSourceIDs[i], AL_POSITION, 0.0f, 0.0f, 0.0f); |
51 | } |
52 | } |
53 | |
54 | void OAAudioSource::setVelocity(const Vector3& velocity) |
55 | { |
56 | AudioSource::setVelocity(velocity); |
57 | |
58 | auto& contexts = gOAAudio()._getContexts(); |
59 | UINT32 numContexts = (UINT32)contexts.size(); |
60 | for (UINT32 i = 0; i < numContexts; i++) |
61 | { |
62 | if (contexts.size() > 1) |
63 | alcMakeContextCurrent(contexts[i]); |
64 | |
65 | if (is3D()) |
66 | alSource3f(mSourceIDs[i], AL_VELOCITY, velocity.x, velocity.y, velocity.z); |
67 | else |
68 | alSource3f(mSourceIDs[i], AL_VELOCITY, 0.0f, 0.0f, 0.0f); |
69 | } |
70 | } |
71 | |
72 | void OAAudioSource::setVolume(float volume) |
73 | { |
74 | AudioSource::setVolume(volume); |
75 | |
76 | auto& contexts = gOAAudio()._getContexts(); |
77 | UINT32 numContexts = (UINT32)contexts.size(); |
78 | for (UINT32 i = 0; i < numContexts; i++) |
79 | { |
80 | if (contexts.size() > 1) |
81 | alcMakeContextCurrent(contexts[i]); |
82 | |
83 | alSourcef(mSourceIDs[i], AL_GAIN, mVolume); |
84 | } |
85 | } |
86 | |
87 | void OAAudioSource::setPitch(float pitch) |
88 | { |
89 | AudioSource::setPitch(pitch); |
90 | |
91 | auto& contexts = gOAAudio()._getContexts(); |
92 | UINT32 numContexts = (UINT32)contexts.size(); |
93 | for (UINT32 i = 0; i < numContexts; i++) |
94 | { |
95 | if (contexts.size() > 1) |
96 | alcMakeContextCurrent(contexts[i]); |
97 | |
98 | alSourcef(mSourceIDs[i], AL_PITCH, pitch); |
99 | } |
100 | } |
101 | |
102 | void OAAudioSource::setIsLooping(bool loop) |
103 | { |
104 | AudioSource::setIsLooping(loop); |
105 | |
106 | // When streaming we handle looping manually |
107 | if (requiresStreaming()) |
108 | loop = false; |
109 | |
110 | auto& contexts = gOAAudio()._getContexts(); |
111 | UINT32 numContexts = (UINT32)contexts.size(); |
112 | for (UINT32 i = 0; i < numContexts; i++) |
113 | { |
114 | if (contexts.size() > 1) |
115 | alcMakeContextCurrent(contexts[i]); |
116 | |
117 | alSourcei(mSourceIDs[i], AL_LOOPING, loop); |
118 | } |
119 | } |
120 | |
121 | void OAAudioSource::setPriority(INT32 priority) |
122 | { |
123 | AudioSource::setPriority(priority); |
124 | |
125 | // Do nothing, OpenAL doesn't support priorities (perhaps emulate the behaviour by manually disabling sources?) |
126 | } |
127 | |
128 | void OAAudioSource::setMinDistance(float distance) |
129 | { |
130 | AudioSource::setMinDistance(distance); |
131 | |
132 | auto& contexts = gOAAudio()._getContexts(); |
133 | UINT32 numContexts = (UINT32)contexts.size(); |
134 | for (UINT32 i = 0; i < numContexts; i++) |
135 | { |
136 | if (contexts.size() > 1) |
137 | alcMakeContextCurrent(contexts[i]); |
138 | |
139 | alSourcef(mSourceIDs[i], AL_REFERENCE_DISTANCE, distance); |
140 | } |
141 | } |
142 | |
143 | void OAAudioSource::setAttenuation(float attenuation) |
144 | { |
145 | AudioSource::setAttenuation(attenuation); |
146 | |
147 | auto& contexts = gOAAudio()._getContexts(); |
148 | UINT32 numContexts = (UINT32)contexts.size(); |
149 | for (UINT32 i = 0; i < numContexts; i++) |
150 | { |
151 | if (contexts.size() > 1) |
152 | alcMakeContextCurrent(contexts[i]); |
153 | |
154 | alSourcef(mSourceIDs[i], AL_ROLLOFF_FACTOR, attenuation); |
155 | } |
156 | } |
157 | |
158 | void OAAudioSource::play() |
159 | { |
160 | if (mGloballyPaused) |
161 | return; |
162 | |
163 | if(requiresStreaming()) |
164 | { |
165 | Lock lock(mMutex); |
166 | |
167 | if (!mIsStreaming) |
168 | { |
169 | startStreaming(); |
170 | streamUnlocked(); // Stream first block on this thread to ensure something can play right away |
171 | } |
172 | } |
173 | |
174 | auto& contexts = gOAAudio()._getContexts(); |
175 | UINT32 numContexts = (UINT32)contexts.size(); |
176 | for (UINT32 i = 0; i < numContexts; i++) |
177 | { |
178 | if (contexts.size() > 1) |
179 | alcMakeContextCurrent(contexts[i]); |
180 | |
181 | alSourcePlay(mSourceIDs[i]); |
182 | |
183 | // Non-3D clips need to play only on a single source |
184 | // Note: I'm still creating sourcs objects (and possibly queuing streaming buffers) for these non-playing |
185 | // sources. It would be possible to optimize them out at cost of more complexity. At this time it doesn't feel |
186 | // worth it. |
187 | if(!is3D()) |
188 | break; |
189 | } |
190 | } |
191 | |
192 | void OAAudioSource::pause() |
193 | { |
194 | auto& contexts = gOAAudio()._getContexts(); |
195 | UINT32 numContexts = (UINT32)contexts.size(); |
196 | for (UINT32 i = 0; i < numContexts; i++) |
197 | { |
198 | if (contexts.size() > 1) |
199 | alcMakeContextCurrent(contexts[i]); |
200 | |
201 | alSourcePause(mSourceIDs[i]); |
202 | } |
203 | } |
204 | |
205 | void OAAudioSource::stop() |
206 | { |
207 | auto& contexts = gOAAudio()._getContexts(); |
208 | UINT32 numContexts = (UINT32)contexts.size(); |
209 | for (UINT32 i = 0; i < numContexts; i++) |
210 | { |
211 | if (contexts.size() > 1) |
212 | alcMakeContextCurrent(contexts[i]); |
213 | |
214 | alSourceStop(mSourceIDs[i]); |
215 | alSourcef(mSourceIDs[i], AL_SEC_OFFSET, 0.0f); |
216 | } |
217 | |
218 | { |
219 | Lock lock(mMutex); |
220 | |
221 | mStreamProcessedPosition = 0; |
222 | mStreamQueuedPosition = 0; |
223 | |
224 | if (mIsStreaming) |
225 | stopStreaming(); |
226 | } |
227 | } |
228 | |
229 | void OAAudioSource::setGlobalPause(bool pause) |
230 | { |
231 | if (mGloballyPaused == pause) |
232 | return; |
233 | |
234 | mGloballyPaused = pause; |
235 | |
236 | if (getState() == AudioSourceState::Playing) |
237 | { |
238 | if (pause) |
239 | { |
240 | auto& contexts = gOAAudio()._getContexts(); |
241 | UINT32 numContexts = (UINT32)contexts.size(); |
242 | for (UINT32 i = 0; i < numContexts; i++) |
243 | { |
244 | if (contexts.size() > 1) |
245 | alcMakeContextCurrent(contexts[i]); |
246 | |
247 | alSourcePause(mSourceIDs[i]); |
248 | } |
249 | } |
250 | else |
251 | { |
252 | play(); |
253 | } |
254 | } |
255 | } |
256 | |
257 | void OAAudioSource::setTime(float time) |
258 | { |
259 | if (!mAudioClip.isLoaded()) |
260 | return; |
261 | |
262 | AudioSourceState state = getState(); |
263 | stop(); |
264 | |
265 | bool needsStreaming = requiresStreaming(); |
266 | float clipTime; |
267 | { |
268 | Lock lock(mMutex); |
269 | |
270 | if (!needsStreaming) |
271 | clipTime = time; |
272 | else |
273 | { |
274 | if (mAudioClip.isLoaded()) |
275 | mStreamProcessedPosition = (UINT32)(time * mAudioClip->getFrequency() * mAudioClip->getNumChannels()); |
276 | else |
277 | mStreamProcessedPosition = 0; |
278 | |
279 | mStreamQueuedPosition = mStreamProcessedPosition; |
280 | clipTime = 0.0f; |
281 | } |
282 | } |
283 | |
284 | auto& contexts = gOAAudio()._getContexts(); |
285 | UINT32 numContexts = (UINT32)contexts.size(); |
286 | for (UINT32 i = 0; i < numContexts; i++) |
287 | { |
288 | if (contexts.size() > 1) |
289 | alcMakeContextCurrent(contexts[i]); |
290 | |
291 | alSourcef(mSourceIDs[i], AL_SEC_OFFSET, clipTime); |
292 | } |
293 | |
294 | if (state != AudioSourceState::Stopped) |
295 | play(); |
296 | |
297 | if (state == AudioSourceState::Paused) |
298 | pause(); |
299 | } |
300 | |
301 | float OAAudioSource::getTime() const |
302 | { |
303 | Lock lock(mMutex); |
304 | |
305 | auto& contexts = gOAAudio()._getContexts(); |
306 | |
307 | if (contexts.size() > 1) |
308 | alcMakeContextCurrent(contexts[0]); |
309 | |
310 | bool needsStreaming = requiresStreaming(); |
311 | float time; |
312 | if (!needsStreaming) |
313 | { |
314 | alGetSourcef(mSourceIDs[0], AL_SEC_OFFSET, &time); |
315 | return time; |
316 | } |
317 | else |
318 | { |
319 | float timeOffset = 0.0f; |
320 | if (mAudioClip.isLoaded()) |
321 | timeOffset = (float)mStreamProcessedPosition / mAudioClip->getFrequency() / mAudioClip->getNumChannels(); |
322 | |
323 | // When streaming, the returned offset is relative to the last queued buffer |
324 | alGetSourcef(mSourceIDs[0], AL_SEC_OFFSET, &time); |
325 | return timeOffset + time; |
326 | } |
327 | } |
328 | |
329 | AudioSourceState OAAudioSource::getState() const |
330 | { |
331 | ALint state; |
332 | alGetSourcei(mSourceIDs[0], AL_SOURCE_STATE, &state); |
333 | |
334 | switch(state) |
335 | { |
336 | case AL_PLAYING: |
337 | return AudioSourceState::Playing; |
338 | case AL_PAUSED: |
339 | return AudioSourceState::Paused; |
340 | case AL_INITIAL: |
341 | case AL_STOPPED: |
342 | default: |
343 | return AudioSourceState::Stopped; |
344 | } |
345 | } |
346 | |
347 | void OAAudioSource::clear() |
348 | { |
349 | mSavedState = getState(); |
350 | mSavedTime = getTime(); |
351 | stop(); |
352 | |
353 | auto& contexts = gOAAudio()._getContexts(); |
354 | UINT32 numContexts = (UINT32)contexts.size(); |
355 | |
356 | Lock lock(mMutex); |
357 | for (UINT32 i = 0; i < numContexts; i++) |
358 | { |
359 | if (contexts.size() > 1) |
360 | alcMakeContextCurrent(contexts[i]); |
361 | |
362 | alSourcei(mSourceIDs[i], AL_BUFFER, 0); |
363 | alDeleteSources(1, &mSourceIDs[i]); |
364 | } |
365 | |
366 | mSourceIDs.clear(); |
367 | } |
368 | |
369 | void OAAudioSource::rebuild() |
370 | { |
371 | auto& contexts = gOAAudio()._getContexts(); |
372 | UINT32 numContexts = (UINT32)contexts.size(); |
373 | |
374 | { |
375 | Lock lock(mMutex); |
376 | |
377 | for (UINT32 i = 0; i < numContexts; i++) |
378 | { |
379 | if (contexts.size() > 1) |
380 | alcMakeContextCurrent(contexts[i]); |
381 | |
382 | UINT32 source = 0; |
383 | alGenSources(1, &source); |
384 | |
385 | mSourceIDs.push_back(source); |
386 | } |
387 | } |
388 | |
389 | for (UINT32 i = 0; i < numContexts; i++) |
390 | { |
391 | if (contexts.size() > 1) |
392 | alcMakeContextCurrent(contexts[i]); |
393 | |
394 | alSourcef(mSourceIDs[i], AL_PITCH, mPitch); |
395 | alSourcef(mSourceIDs[i], AL_REFERENCE_DISTANCE, mMinDistance); |
396 | alSourcef(mSourceIDs[i], AL_ROLLOFF_FACTOR, mAttenuation); |
397 | |
398 | if(requiresStreaming()) |
399 | alSourcei(mSourceIDs[i], AL_LOOPING, false); |
400 | else |
401 | alSourcei(mSourceIDs[i], AL_LOOPING, mLoop); |
402 | |
403 | if (is3D()) |
404 | { |
405 | Vector3 position = mTransform.getPosition(); |
406 | |
407 | alSourcei(mSourceIDs[i], AL_SOURCE_RELATIVE, false); |
408 | alSource3f(mSourceIDs[i], AL_POSITION, position.x, position.y, position.z); |
409 | alSource3f(mSourceIDs[i], AL_VELOCITY, mVelocity.x, mVelocity.y, mVelocity.z); |
410 | } |
411 | else |
412 | { |
413 | alSourcei(mSourceIDs[i], AL_SOURCE_RELATIVE, true); |
414 | alSource3f(mSourceIDs[i], AL_POSITION, 0.0f, 0.0f, 0.0f); |
415 | alSource3f(mSourceIDs[i], AL_VELOCITY, 0.0f, 0.0f, 0.0f); |
416 | } |
417 | |
418 | { |
419 | Lock lock(mMutex); |
420 | |
421 | if (!mIsStreaming) |
422 | { |
423 | UINT32 oaBuffer = 0; |
424 | if (mAudioClip.isLoaded()) |
425 | { |
426 | OAAudioClip* oaClip = static_cast<OAAudioClip*>(mAudioClip.get()); |
427 | oaBuffer = oaClip->_getOpenALBuffer(); |
428 | } |
429 | |
430 | alSourcei(mSourceIDs[i], AL_BUFFER, oaBuffer); |
431 | } |
432 | } |
433 | } |
434 | |
435 | setTime(mSavedTime); |
436 | |
437 | if (mSavedState != AudioSourceState::Stopped) |
438 | play(); |
439 | |
440 | if (mSavedState == AudioSourceState::Paused) |
441 | pause(); |
442 | } |
443 | |
444 | void OAAudioSource::startStreaming() |
445 | { |
446 | assert(!mIsStreaming); |
447 | |
448 | alGenBuffers(StreamBufferCount, mStreamBuffers); |
449 | gOAAudio().startStreaming(this); |
450 | |
451 | memset(&mBusyBuffers, 0, sizeof(mBusyBuffers)); |
452 | mIsStreaming = true; |
453 | } |
454 | |
455 | void OAAudioSource::stopStreaming() |
456 | { |
457 | assert(mIsStreaming); |
458 | |
459 | mIsStreaming = false; |
460 | gOAAudio().stopStreaming(this); |
461 | |
462 | auto& contexts = gOAAudio()._getContexts(); |
463 | UINT32 numContexts = (UINT32)contexts.size(); |
464 | for (UINT32 i = 0; i < numContexts; i++) |
465 | { |
466 | if (contexts.size() > 1) |
467 | alcMakeContextCurrent(contexts[i]); |
468 | |
469 | INT32 numQueuedBuffers; |
470 | alGetSourcei(mSourceIDs[i], AL_BUFFERS_QUEUED, &numQueuedBuffers); |
471 | |
472 | UINT32 buffer; |
473 | for (INT32 j = 0; j < numQueuedBuffers; j++) |
474 | alSourceUnqueueBuffers(mSourceIDs[i], 1, &buffer); |
475 | } |
476 | |
477 | alDeleteBuffers(StreamBufferCount, mStreamBuffers); |
478 | } |
479 | |
480 | void OAAudioSource::stream() |
481 | { |
482 | Lock lock(mMutex); |
483 | |
484 | streamUnlocked(); |
485 | } |
486 | |
487 | void OAAudioSource::streamUnlocked() |
488 | { |
489 | AudioDataInfo info; |
490 | info.bitDepth = mAudioClip->getBitDepth(); |
491 | info.numChannels = mAudioClip->getNumChannels(); |
492 | info.sampleRate = mAudioClip->getFrequency(); |
493 | info.numSamples = 0; |
494 | |
495 | UINT32 totalNumSamples = mAudioClip->getNumSamples(); |
496 | |
497 | // Note: It is safe to access contexts here only because it is guaranteed by the OAAudio manager that it will always |
498 | // stop all streaming before changing contexts. Otherwise a mutex lock would be needed for every context access. |
499 | auto& contexts = gOAAudio()._getContexts(); |
500 | UINT32 numContexts = (UINT32)contexts.size(); |
501 | for (UINT32 i = 0; i < numContexts; i++) |
502 | { |
503 | if (contexts.size() > 1) |
504 | alcMakeContextCurrent(contexts[i]); |
505 | |
506 | INT32 numProcessedBuffers = 0; |
507 | alGetSourcei(mSourceIDs[i], AL_BUFFERS_PROCESSED, &numProcessedBuffers); |
508 | |
509 | for (INT32 j = numProcessedBuffers; j > 0; j--) |
510 | { |
511 | UINT32 buffer; |
512 | alSourceUnqueueBuffers(mSourceIDs[i], 1, &buffer); |
513 | |
514 | INT32 bufferIdx = -1; |
515 | for (UINT32 k = 0; k < StreamBufferCount; k++) |
516 | { |
517 | if (buffer == mStreamBuffers[k]) |
518 | { |
519 | bufferIdx = k; |
520 | break; |
521 | } |
522 | } |
523 | |
524 | // Possibly some buffer from previous playback remained unqueued, in which case ignore it |
525 | if (bufferIdx == -1) |
526 | continue; |
527 | |
528 | mBusyBuffers[bufferIdx] &= ~(1 << bufferIdx); |
529 | |
530 | // Check if all sources are done with this buffer |
531 | if (mBusyBuffers[bufferIdx] != 0) |
532 | break; |
533 | |
534 | INT32 bufferSize; |
535 | INT32 bufferBits; |
536 | |
537 | alGetBufferi(buffer, AL_SIZE, &bufferSize); |
538 | alGetBufferi(buffer, AL_BITS, &bufferBits); |
539 | |
540 | if (bufferBits == 0) |
541 | { |
542 | LOGERR("Error decoding stream." ); |
543 | return; |
544 | } |
545 | else |
546 | { |
547 | UINT32 bytesPerSample = bufferBits / 8; |
548 | mStreamProcessedPosition += bufferSize / bytesPerSample; |
549 | } |
550 | |
551 | if (mStreamProcessedPosition == totalNumSamples) // Reached the end |
552 | { |
553 | mStreamProcessedPosition = 0; |
554 | |
555 | if (!mLoop) // Variable used on both threads and not thread safe, but it doesn't matter |
556 | { |
557 | stopStreaming(); |
558 | return; |
559 | } |
560 | } |
561 | } |
562 | } |
563 | |
564 | for(UINT32 i = 0; i < StreamBufferCount; i++) |
565 | { |
566 | if (mBusyBuffers[i] != 0) |
567 | continue; |
568 | |
569 | if (fillBuffer(mStreamBuffers[i], info, totalNumSamples)) |
570 | { |
571 | for (auto& source : mSourceIDs) |
572 | alSourceQueueBuffers(source, 1, &mStreamBuffers[i]); |
573 | |
574 | mBusyBuffers[i] |= 1 << i; |
575 | } |
576 | else |
577 | break; |
578 | } |
579 | } |
580 | |
581 | bool OAAudioSource::fillBuffer(UINT32 buffer, AudioDataInfo& info, UINT32 maxNumSamples) |
582 | { |
583 | UINT32 numRemainingSamples = maxNumSamples - mStreamQueuedPosition; |
584 | if (numRemainingSamples == 0) // Reached the end |
585 | { |
586 | if (mLoop) |
587 | { |
588 | mStreamQueuedPosition = 0; |
589 | numRemainingSamples = maxNumSamples; |
590 | } |
591 | else // If not looping, don't queue any more buffers, we're done |
592 | return false; |
593 | } |
594 | |
595 | // Read audio data |
596 | UINT32 numSamples = std::min(numRemainingSamples, info.sampleRate * info.numChannels); // 1 second of data |
597 | UINT32 sampleBufferSize = numSamples * (info.bitDepth / 8); |
598 | |
599 | UINT8* samples = (UINT8*)bs_stack_alloc(sampleBufferSize); |
600 | |
601 | OAAudioClip* audioClip = static_cast<OAAudioClip*>(mAudioClip.get()); |
602 | |
603 | audioClip->getSamples(samples, mStreamQueuedPosition, numSamples); |
604 | mStreamQueuedPosition += numSamples; |
605 | |
606 | info.numSamples = numSamples; |
607 | gOAAudio()._writeToOpenALBuffer(buffer, samples, info); |
608 | |
609 | bs_stack_free(samples); |
610 | |
611 | return true; |
612 | } |
613 | |
614 | void OAAudioSource::applyClip() |
615 | { |
616 | auto& contexts = gOAAudio()._getContexts(); |
617 | UINT32 numContexts = (UINT32)contexts.size(); |
618 | for (UINT32 i = 0; i < numContexts; i++) |
619 | { |
620 | if (contexts.size() > 1) |
621 | alcMakeContextCurrent(contexts[i]); |
622 | |
623 | alSourcei(mSourceIDs[i], AL_SOURCE_RELATIVE, !is3D()); |
624 | |
625 | if (!requiresStreaming()) |
626 | { |
627 | UINT32 oaBuffer = 0; |
628 | if (mAudioClip.isLoaded()) |
629 | { |
630 | OAAudioClip* oaClip = static_cast<OAAudioClip*>(mAudioClip.get()); |
631 | oaBuffer = oaClip->_getOpenALBuffer(); |
632 | } |
633 | |
634 | alSourcei(mSourceIDs[i], AL_BUFFER, oaBuffer); |
635 | } |
636 | } |
637 | |
638 | // Looping is influenced by streaming mode, so re-apply it in case it changed |
639 | setIsLooping(mLoop); |
640 | } |
641 | |
642 | void OAAudioSource::onClipChanged() |
643 | { |
644 | AudioSourceState state = getState(); |
645 | float savedTime = getTime(); |
646 | |
647 | stop(); |
648 | |
649 | { |
650 | Lock lock(mMutex); |
651 | applyClip(); |
652 | } |
653 | |
654 | setTime(savedTime); |
655 | |
656 | if (state != AudioSourceState::Stopped) |
657 | play(); |
658 | |
659 | if (state == AudioSourceState::Paused) |
660 | pause(); |
661 | } |
662 | |
663 | bool OAAudioSource::is3D() const |
664 | { |
665 | if (!mAudioClip.isLoaded()) |
666 | return true; |
667 | |
668 | return mAudioClip->is3D(); |
669 | } |
670 | |
671 | bool OAAudioSource::requiresStreaming() const |
672 | { |
673 | if (!mAudioClip.isLoaded()) |
674 | return false; |
675 | |
676 | AudioReadMode readMode = mAudioClip->getReadMode(); |
677 | bool isCompressed = readMode == AudioReadMode::LoadCompressed && mAudioClip->getFormat() != AudioFormat::PCM; |
678 | |
679 | return (readMode == AudioReadMode::Stream) || isCompressed; |
680 | } |
681 | } |
682 | |