1/**********
2This library is free software; you can redistribute it and/or modify it under
3the terms of the GNU Lesser General Public License as published by the
4Free Software Foundation; either version 3 of the License, or (at your
5option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
6
7This library is distributed in the hope that it will be useful, but WITHOUT
8ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
9FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
10more details.
11
12You should have received a copy of the GNU Lesser General Public License
13along with this library; if not, write to the Free Software Foundation, Inc.,
1451 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
15**********/
16// "liveMedia"
17// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
18// AMR Audio RTP Sources (RFC 4867)
19// Implementation
20
21#include "AMRAudioRTPSource.hh"
22#include "MultiFramedRTPSource.hh"
23#include "BitVector.hh"
24#include <string.h>
25#include <stdlib.h>
26
27// This source is implemented internally by two separate sources:
28// (i) a RTP source for the raw (and possibly interleaved) AMR frames, and
29// (ii) a deinterleaving filter that reads from this.
30// Define these two new classes here:
31
32class RawAMRRTPSource: public MultiFramedRTPSource {
33public:
34 static RawAMRRTPSource*
35 createNew(UsageEnvironment& env,
36 Groupsock* RTPgs, unsigned char rtpPayloadFormat,
37 Boolean isWideband, Boolean isOctetAligned,
38 Boolean isInterleaved, Boolean CRCsArePresent);
39
40 Boolean isWideband() const { return fIsWideband; }
41 unsigned char ILL() const { return fILL; }
42 unsigned char ILP() const { return fILP; }
43 unsigned TOCSize() const { return fTOCSize; } // total # of frames in the last pkt
44 unsigned char* TOC() const { return fTOC; } // FT+Q value for each TOC entry
45 unsigned& frameIndex() { return fFrameIndex; } // index of frame-block within pkt
46 Boolean& isSynchronized() { return fIsSynchronized; }
47
48private:
49 RawAMRRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
50 unsigned char rtpPayloadFormat,
51 Boolean isWideband, Boolean isOctetAligned,
52 Boolean isInterleaved, Boolean CRCsArePresent);
53 // called only by createNew()
54
55 virtual ~RawAMRRTPSource();
56
57private:
58 // redefined virtual functions:
59 virtual Boolean hasBeenSynchronizedUsingRTCP();
60
61 virtual Boolean processSpecialHeader(BufferedPacket* packet,
62 unsigned& resultSpecialHeaderSize);
63 virtual char const* MIMEtype() const;
64
65private:
66 Boolean fIsWideband, fIsOctetAligned, fIsInterleaved, fCRCsArePresent;
67 unsigned char fILL, fILP;
68 unsigned fTOCSize;
69 unsigned char* fTOC;
70 unsigned fFrameIndex;
71 Boolean fIsSynchronized;
72};
73
74class AMRDeinterleaver: public AMRAudioSource {
75public:
76 static AMRDeinterleaver*
77 createNew(UsageEnvironment& env,
78 Boolean isWideband, unsigned numChannels, unsigned maxInterleaveGroupSize,
79 RawAMRRTPSource* inputSource);
80
81private:
82 AMRDeinterleaver(UsageEnvironment& env,
83 Boolean isWideband, unsigned numChannels,
84 unsigned maxInterleaveGroupSize, RawAMRRTPSource* inputSource);
85 // called only by "createNew()"
86
87 virtual ~AMRDeinterleaver();
88
89 static void afterGettingFrame(void* clientData, unsigned frameSize,
90 unsigned numTruncatedBytes,
91 struct timeval presentationTime,
92 unsigned durationInMicroseconds);
93 void afterGettingFrame1(unsigned frameSize, struct timeval presentationTime);
94
95private:
96 // Redefined virtual functions:
97 void doGetNextFrame();
98 virtual void doStopGettingFrames();
99
100private:
101 RawAMRRTPSource* fInputSource;
102 class AMRDeinterleavingBuffer* fDeinterleavingBuffer;
103 Boolean fNeedAFrame;
104};
105
106
107////////// AMRAudioRTPSource implementation //////////
108
109#define MAX_NUM_CHANNELS 20 // far larger than ever expected...
110#define MAX_INTERLEAVING_GROUP_SIZE 1000 // far larger than ever expected...
111
112AMRAudioSource*
113AMRAudioRTPSource::createNew(UsageEnvironment& env,
114 Groupsock* RTPgs,
115 RTPSource*& resultRTPSource,
116 unsigned char rtpPayloadFormat,
117 Boolean isWideband,
118 unsigned numChannels,
119 Boolean isOctetAligned,
120 unsigned interleaving,
121 Boolean robustSortingOrder,
122 Boolean CRCsArePresent) {
123 // Perform sanity checks on the input parameters:
124 if (robustSortingOrder) {
125 env << "AMRAudioRTPSource::createNew(): 'Robust sorting order' was specified, but we don't yet support this!\n";
126 return NULL;
127 } else if (numChannels > MAX_NUM_CHANNELS) {
128 env << "AMRAudioRTPSource::createNew(): The \"number of channels\" parameter ("
129 << numChannels << ") is much too large!\n";
130 return NULL;
131 } else if (interleaving > MAX_INTERLEAVING_GROUP_SIZE) {
132 env << "AMRAudioRTPSource::createNew(): The \"interleaving\" parameter ("
133 << interleaving << ") is much too large!\n";
134 return NULL;
135 }
136
137 // 'Bandwidth-efficient mode' precludes some other options:
138 if (!isOctetAligned) {
139 if (interleaving > 0 || robustSortingOrder || CRCsArePresent) {
140 env << "AMRAudioRTPSource::createNew(): 'Bandwidth-efficient mode' was specified, along with interleaving, 'robust sorting order', and/or CRCs, so we assume 'octet-aligned mode' instead.\n";
141 isOctetAligned = True;
142 }
143 }
144
145 Boolean isInterleaved;
146 unsigned maxInterleaveGroupSize; // in frames (not frame-blocks)
147 if (interleaving > 0) {
148 isInterleaved = True;
149 maxInterleaveGroupSize = interleaving*numChannels;
150 } else {
151 isInterleaved = False;
152 maxInterleaveGroupSize = numChannels;
153 }
154
155 RawAMRRTPSource* rawRTPSource;
156 resultRTPSource = rawRTPSource
157 = RawAMRRTPSource::createNew(env, RTPgs, rtpPayloadFormat,
158 isWideband, isOctetAligned,
159 isInterleaved, CRCsArePresent);
160 if (resultRTPSource == NULL) return NULL;
161
162 AMRDeinterleaver* deinterleaver
163 = AMRDeinterleaver::createNew(env, isWideband, numChannels,
164 maxInterleaveGroupSize, rawRTPSource);
165 if (deinterleaver == NULL) {
166 Medium::close(resultRTPSource);
167 resultRTPSource = NULL;
168 }
169
170 return deinterleaver;
171}
172
173
174////////// AMRBufferedPacket and AMRBufferedPacketFactory //////////
175
176// A subclass of BufferedPacket, used to separate out AMR frames.
177
178class AMRBufferedPacket: public BufferedPacket {
179public:
180 AMRBufferedPacket(RawAMRRTPSource& ourSource);
181 virtual ~AMRBufferedPacket();
182
183private: // redefined virtual functions
184 virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
185 unsigned dataSize);
186private:
187 RawAMRRTPSource& fOurSource;
188};
189
190class AMRBufferedPacketFactory: public BufferedPacketFactory {
191private: // redefined virtual functions
192 virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
193};
194
195
196///////// RawAMRRTPSource implementation ////////
197
198RawAMRRTPSource*
199RawAMRRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
200 unsigned char rtpPayloadFormat,
201 Boolean isWideband, Boolean isOctetAligned,
202 Boolean isInterleaved, Boolean CRCsArePresent) {
203 return new RawAMRRTPSource(env, RTPgs, rtpPayloadFormat,
204 isWideband, isOctetAligned,
205 isInterleaved, CRCsArePresent);
206}
207
208RawAMRRTPSource
209::RawAMRRTPSource(UsageEnvironment& env,
210 Groupsock* RTPgs, unsigned char rtpPayloadFormat,
211 Boolean isWideband, Boolean isOctetAligned,
212 Boolean isInterleaved, Boolean CRCsArePresent)
213 : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat,
214 isWideband ? 16000 : 8000,
215 new AMRBufferedPacketFactory),
216 fIsWideband(isWideband), fIsOctetAligned(isOctetAligned),
217 fIsInterleaved(isInterleaved), fCRCsArePresent(CRCsArePresent),
218 fILL(0), fILP(0), fTOCSize(0), fTOC(NULL), fFrameIndex(0), fIsSynchronized(False) {
219}
220
221RawAMRRTPSource::~RawAMRRTPSource() {
222 delete[] fTOC;
223}
224
225#define FT_SPEECH_LOST 14
226#define FT_NO_DATA 15
227
228static void unpackBandwidthEfficientData(BufferedPacket* packet,
229 Boolean isWideband); // forward
230
231Boolean RawAMRRTPSource
232::processSpecialHeader(BufferedPacket* packet,
233 unsigned& resultSpecialHeaderSize) {
234 // If the data is 'bandwidth-efficient', first unpack it so that it's
235 // 'octet-aligned':
236 if (!fIsOctetAligned) unpackBandwidthEfficientData(packet, fIsWideband);
237
238 unsigned char* headerStart = packet->data();
239 unsigned packetSize = packet->dataSize();
240
241 // There's at least a 1-byte header, containing the CMR:
242 if (packetSize < 1) return False;
243 resultSpecialHeaderSize = 1;
244
245 if (fIsInterleaved) {
246 // There's an extra byte, containing the interleave parameters:
247 if (packetSize < 2) return False;
248
249 // Get the interleaving parameters, and check them for validity:
250 unsigned char const secondByte = headerStart[1];
251 fILL = (secondByte&0xF0)>>4;
252 fILP = secondByte&0x0F;
253 if (fILP > fILL) return False; // invalid
254 ++resultSpecialHeaderSize;
255 }
256#ifdef DEBUG
257 fprintf(stderr, "packetSize: %d, ILL: %d, ILP: %d\n", packetSize, fILL, fILP);
258#endif
259 fFrameIndex = 0; // initially
260
261 // Next, there's a "Payload Table of Contents" (one byte per entry):
262 unsigned numFramesPresent = 0, numNonEmptyFramesPresent = 0;
263 unsigned tocStartIndex = resultSpecialHeaderSize;
264 Boolean F;
265 do {
266 if (resultSpecialHeaderSize >= packetSize) return False;
267 unsigned char const tocByte = headerStart[resultSpecialHeaderSize++];
268 F = (tocByte&0x80) != 0;
269 unsigned char const FT = (tocByte&0x78) >> 3;
270#ifdef DEBUG
271 unsigned char Q = (tocByte&0x04)>>2;
272 fprintf(stderr, "\tTOC entry: F %d, FT %d, Q %d\n", F, FT, Q);
273#endif
274 ++numFramesPresent;
275 if (FT != FT_SPEECH_LOST && FT != FT_NO_DATA) ++numNonEmptyFramesPresent;
276 } while (F);
277#ifdef DEBUG
278 fprintf(stderr, "TOC contains %d entries (%d non-empty)\n", numFramesPresent, numNonEmptyFramesPresent);
279#endif
280
281 // Now that we know the size of the TOC, fill in our copy:
282 if (numFramesPresent > fTOCSize) {
283 delete[] fTOC;
284 fTOC = new unsigned char[numFramesPresent];
285 }
286 fTOCSize = numFramesPresent;
287 for (unsigned i = 0; i < fTOCSize; ++i) {
288 unsigned char const tocByte = headerStart[tocStartIndex + i];
289 fTOC[i] = tocByte&0x7C; // clear everything except the F and Q fields
290 }
291
292 if (fCRCsArePresent) {
293 // 'numNonEmptyFramesPresent' CRC bytes will follow.
294 // Note: we currently don't check the CRCs for validity #####
295 resultSpecialHeaderSize += numNonEmptyFramesPresent;
296#ifdef DEBUG
297 fprintf(stderr, "Ignoring %d following CRC bytes\n", numNonEmptyFramesPresent);
298#endif
299 if (resultSpecialHeaderSize > packetSize) return False;
300 }
301#ifdef DEBUG
302 fprintf(stderr, "Total special header size: %d\n", resultSpecialHeaderSize);
303#endif
304
305 return True;
306}
307
308char const* RawAMRRTPSource::MIMEtype() const {
309 return fIsWideband ? "audio/AMR-WB" : "audio/AMR";
310}
311
312Boolean RawAMRRTPSource::hasBeenSynchronizedUsingRTCP() {
313 return fIsSynchronized;
314}
315
316
317///// AMRBufferedPacket and AMRBufferedPacketFactory implementation
318
319AMRBufferedPacket::AMRBufferedPacket(RawAMRRTPSource& ourSource)
320 : fOurSource(ourSource) {
321}
322
323AMRBufferedPacket::~AMRBufferedPacket() {
324}
325
326// The mapping from the "FT" field to frame size.
327// Values of 65535 are invalid.
328#define FT_INVALID 65535
329static unsigned short const frameBytesFromFT[16] = {
330 12, 13, 15, 17,
331 19, 20, 26, 31,
332 5, FT_INVALID, FT_INVALID, FT_INVALID,
333 FT_INVALID, FT_INVALID, FT_INVALID, 0
334};
335static unsigned short const frameBytesFromFTWideband[16] = {
336 17, 23, 32, 36,
337 40, 46, 50, 58,
338 60, 5, FT_INVALID, FT_INVALID,
339 FT_INVALID, FT_INVALID, 0, 0
340};
341
342unsigned AMRBufferedPacket::
343 nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
344 if (dataSize == 0) return 0; // sanity check
345
346 // The size of the AMR frame is determined by the corresponding 'FT' value
347 // in the packet's Table of Contents.
348 unsigned const tocIndex = fOurSource.frameIndex();
349 if (tocIndex >= fOurSource.TOCSize()) return 0; // sanity check
350
351 unsigned char const tocByte = fOurSource.TOC()[tocIndex];
352 unsigned char const FT = (tocByte&0x78) >> 3;
353 // ASSERT: FT < 16
354 unsigned short frameSize
355 = fOurSource.isWideband() ? frameBytesFromFTWideband[FT] : frameBytesFromFT[FT];
356 if (frameSize == FT_INVALID) {
357 // Strange TOC entry!
358 fOurSource.envir() << "AMRBufferedPacket::nextEnclosedFrameSize(): invalid FT: " << FT << "\n";
359 frameSize = 0; // This probably messes up the rest of this packet, but...
360 }
361#ifdef DEBUG
362 fprintf(stderr, "AMRBufferedPacket::nextEnclosedFrameSize(): frame #: %d, FT: %d, isWideband: %d => frameSize: %d (dataSize: %d)\n", tocIndex, FT, fOurSource.isWideband(), frameSize, dataSize);
363#endif
364 ++fOurSource.frameIndex();
365
366 if (dataSize < frameSize) return 0;
367 return frameSize;
368}
369
370BufferedPacket* AMRBufferedPacketFactory
371::createNewPacket(MultiFramedRTPSource* ourSource) {
372 return new AMRBufferedPacket((RawAMRRTPSource&)(*ourSource));
373}
374
375///////// AMRDeinterleavingBuffer /////////
376// (used to implement AMRDeinterleaver)
377
378#define AMR_MAX_FRAME_SIZE 60
379
380class AMRDeinterleavingBuffer {
381public:
382 AMRDeinterleavingBuffer(unsigned numChannels, unsigned maxInterleaveGroupSize);
383 virtual ~AMRDeinterleavingBuffer();
384
385 void deliverIncomingFrame(unsigned frameSize, RawAMRRTPSource* source,
386 struct timeval presentationTime);
387 Boolean retrieveFrame(unsigned char* to, unsigned maxSize,
388 unsigned& resultFrameSize, unsigned& resultNumTruncatedBytes,
389 u_int8_t& resultFrameHeader,
390 struct timeval& resultPresentationTime,
391 Boolean& resultIsSynchronized);
392
393 unsigned char* inputBuffer() { return fInputBuffer; }
394 unsigned inputBufferSize() const { return AMR_MAX_FRAME_SIZE; }
395
396private:
397 unsigned char* createNewBuffer();
398
399 class FrameDescriptor {
400 public:
401 FrameDescriptor();
402 virtual ~FrameDescriptor();
403
404 unsigned frameSize;
405 unsigned char* frameData;
406 u_int8_t frameHeader;
407 struct timeval presentationTime;
408 Boolean fIsSynchronized;
409 };
410
411 unsigned fNumChannels, fMaxInterleaveGroupSize;
412 FrameDescriptor* fFrames[2];
413 unsigned char fIncomingBankId; // toggles between 0 and 1
414 unsigned char fIncomingBinMax; // in the incoming bank
415 unsigned char fOutgoingBinMax; // in the outgoing bank
416 unsigned char fNextOutgoingBin;
417 Boolean fHaveSeenPackets;
418 u_int16_t fLastPacketSeqNumForGroup;
419 unsigned char* fInputBuffer;
420 struct timeval fLastRetrievedPresentationTime;
421 unsigned fNumSuccessiveSyncedFrames;
422 unsigned char fILL;
423};
424
425
426////////// AMRDeinterleaver implementation /////////
427
428AMRDeinterleaver* AMRDeinterleaver
429::createNew(UsageEnvironment& env,
430 Boolean isWideband, unsigned numChannels, unsigned maxInterleaveGroupSize,
431 RawAMRRTPSource* inputSource) {
432 return new AMRDeinterleaver(env, isWideband, numChannels, maxInterleaveGroupSize, inputSource);
433}
434
435AMRDeinterleaver::AMRDeinterleaver(UsageEnvironment& env,
436 Boolean isWideband, unsigned numChannels,
437 unsigned maxInterleaveGroupSize,
438 RawAMRRTPSource* inputSource)
439 : AMRAudioSource(env, isWideband, numChannels),
440 fInputSource(inputSource), fNeedAFrame(False) {
441 fDeinterleavingBuffer
442 = new AMRDeinterleavingBuffer(numChannels, maxInterleaveGroupSize);
443}
444
445AMRDeinterleaver::~AMRDeinterleaver() {
446 delete fDeinterleavingBuffer;
447 Medium::close(fInputSource);
448}
449
450static unsigned const uSecsPerFrame = 20000; // 20 ms
451
452void AMRDeinterleaver::doGetNextFrame() {
453 // First, try getting a frame from the deinterleaving buffer:
454 if (fDeinterleavingBuffer->retrieveFrame(fTo, fMaxSize,
455 fFrameSize, fNumTruncatedBytes,
456 fLastFrameHeader, fPresentationTime,
457 fInputSource->isSynchronized())) {
458
459 // Success!
460 fNeedAFrame = False;
461
462 fDurationInMicroseconds = uSecsPerFrame;
463
464 // Call our own 'after getting' function. Because we're not a 'leaf'
465 // source, we can call this directly, without risking
466 // infinite recursion
467 afterGetting(this);
468 return;
469 }
470
471 // No luck, so ask our source for help:
472 fNeedAFrame = True;
473 if (!fInputSource->isCurrentlyAwaitingData()) {
474 fInputSource->getNextFrame(fDeinterleavingBuffer->inputBuffer(),
475 fDeinterleavingBuffer->inputBufferSize(),
476 afterGettingFrame, this,
477 FramedSource::handleClosure, this);
478 }
479}
480
481void AMRDeinterleaver::doStopGettingFrames() {
482 fNeedAFrame = False;
483 fInputSource->stopGettingFrames();
484}
485
486void AMRDeinterleaver
487::afterGettingFrame(void* clientData, unsigned frameSize,
488 unsigned /*numTruncatedBytes*/,
489 struct timeval presentationTime,
490 unsigned /*durationInMicroseconds*/) {
491 AMRDeinterleaver* deinterleaver = (AMRDeinterleaver*)clientData;
492 deinterleaver->afterGettingFrame1(frameSize, presentationTime);
493}
494
495void AMRDeinterleaver
496::afterGettingFrame1(unsigned frameSize, struct timeval presentationTime) {
497 RawAMRRTPSource* source = (RawAMRRTPSource*)fInputSource;
498
499 // First, put the frame into our deinterleaving buffer:
500 fDeinterleavingBuffer->deliverIncomingFrame(frameSize, source, presentationTime);
501
502 // Then, try delivering a frame to the client (if he wants one):
503 if (fNeedAFrame) doGetNextFrame();
504}
505
506
507////////// AMRDeinterleavingBuffer implementation /////////
508
509AMRDeinterleavingBuffer
510::AMRDeinterleavingBuffer(unsigned numChannels, unsigned maxInterleaveGroupSize)
511 : fNumChannels(numChannels), fMaxInterleaveGroupSize(maxInterleaveGroupSize),
512 fIncomingBankId(0), fIncomingBinMax(0),
513 fOutgoingBinMax(0), fNextOutgoingBin(0),
514 fHaveSeenPackets(False), fNumSuccessiveSyncedFrames(0), fILL(0) {
515 // Use two banks of descriptors - one for incoming, one for outgoing
516 fFrames[0] = new FrameDescriptor[fMaxInterleaveGroupSize];
517 fFrames[1] = new FrameDescriptor[fMaxInterleaveGroupSize];
518 fInputBuffer = createNewBuffer();
519}
520
521AMRDeinterleavingBuffer::~AMRDeinterleavingBuffer() {
522 delete[] fInputBuffer;
523 delete[] fFrames[0]; delete[] fFrames[1];
524}
525
526void AMRDeinterleavingBuffer
527::deliverIncomingFrame(unsigned frameSize, RawAMRRTPSource* source,
528 struct timeval presentationTime) {
529 fILL = source->ILL();
530 unsigned char const ILP = source->ILP();
531 unsigned frameIndex = source->frameIndex();
532 unsigned short packetSeqNum = source->curPacketRTPSeqNum();
533
534 // First perform a sanity check on the parameters:
535 // (This is overkill, as the source should have already done this.)
536 if (ILP > fILL || frameIndex == 0) {
537#ifdef DEBUG
538 fprintf(stderr, "AMRDeinterleavingBuffer::deliverIncomingFrame() param sanity check failed (%d,%d,%d,%d)\n", frameSize, fILL, ILP, frameIndex);
539#endif
540 source->envir().internalError();
541 }
542
543 --frameIndex; // because it was incremented by the source when this frame was read
544 u_int8_t frameHeader;
545 if (frameIndex >= source->TOCSize()) { // sanity check
546 frameHeader = FT_NO_DATA<<3;
547 } else {
548 frameHeader = source->TOC()[frameIndex];
549 }
550
551 unsigned frameBlockIndex = frameIndex/fNumChannels;
552 unsigned frameWithinFrameBlock = frameIndex%fNumChannels;
553
554 // The input "presentationTime" was that of the first frame-block in this
555 // packet. Update it for the current frame:
556 unsigned uSecIncrement = frameBlockIndex*(fILL+1)*uSecsPerFrame;
557 presentationTime.tv_usec += uSecIncrement;
558 presentationTime.tv_sec += presentationTime.tv_usec/1000000;
559 presentationTime.tv_usec = presentationTime.tv_usec%1000000;
560
561 // Next, check whether this packet is part of a new interleave group
562 if (!fHaveSeenPackets
563 || seqNumLT(fLastPacketSeqNumForGroup, packetSeqNum + frameBlockIndex)) {
564 // We've moved to a new interleave group
565#ifdef DEBUG
566 fprintf(stderr, "AMRDeinterleavingBuffer::deliverIncomingFrame(): new interleave group\n");
567#endif
568 fHaveSeenPackets = True;
569 fLastPacketSeqNumForGroup = packetSeqNum + fILL - ILP;
570
571 // Switch the incoming and outgoing banks:
572 fIncomingBankId ^= 1;
573 unsigned char tmp = fIncomingBinMax;
574 fIncomingBinMax = fOutgoingBinMax;
575 fOutgoingBinMax = tmp;
576 fNextOutgoingBin = 0;
577 }
578
579 // Now move the incoming frame into the appropriate bin:
580 unsigned const binNumber
581 = ((ILP + frameBlockIndex*(fILL+1))*fNumChannels + frameWithinFrameBlock)
582 % fMaxInterleaveGroupSize; // the % is for sanity
583#ifdef DEBUG
584 fprintf(stderr, "AMRDeinterleavingBuffer::deliverIncomingFrame(): frameIndex %d (%d,%d) put in bank %d, bin %d (%d): size %d, header 0x%02x, presentationTime %lu.%06ld\n", frameIndex, frameBlockIndex, frameWithinFrameBlock, fIncomingBankId, binNumber, fMaxInterleaveGroupSize, frameSize, frameHeader, presentationTime.tv_sec, presentationTime.tv_usec);
585#endif
586 FrameDescriptor& inBin = fFrames[fIncomingBankId][binNumber];
587 unsigned char* curBuffer = inBin.frameData;
588 inBin.frameData = fInputBuffer;
589 inBin.frameSize = frameSize;
590 inBin.frameHeader = frameHeader;
591 inBin.presentationTime = presentationTime;
592 inBin.fIsSynchronized = ((RTPSource*)source)->RTPSource::hasBeenSynchronizedUsingRTCP();
593
594 if (curBuffer == NULL) curBuffer = createNewBuffer();
595 fInputBuffer = curBuffer;
596
597 if (binNumber >= fIncomingBinMax) {
598 fIncomingBinMax = binNumber + 1;
599 }
600}
601
602Boolean AMRDeinterleavingBuffer
603::retrieveFrame(unsigned char* to, unsigned maxSize,
604 unsigned& resultFrameSize, unsigned& resultNumTruncatedBytes,
605 u_int8_t& resultFrameHeader,
606 struct timeval& resultPresentationTime,
607 Boolean& resultIsSynchronized) {
608
609 if (fNextOutgoingBin >= fOutgoingBinMax) return False; // none left
610
611 FrameDescriptor& outBin = fFrames[fIncomingBankId^1][fNextOutgoingBin];
612 unsigned char* fromPtr = outBin.frameData;
613 unsigned char fromSize = outBin.frameSize;
614 outBin.frameSize = 0; // for the next time this bin is used
615 resultIsSynchronized = False; // by default; can be changed by:
616 if (outBin.fIsSynchronized) {
617 // Don't consider the outgoing frame to be synchronized until we've received at least a complete interleave cycle of
618 // synchronized frames. This ensures that the receiver will be getting all synchronized frames from now on.
619 if (++fNumSuccessiveSyncedFrames > fILL) {
620 resultIsSynchronized = True;
621 fNumSuccessiveSyncedFrames = fILL+1; // prevents overflow
622 }
623 } else {
624 fNumSuccessiveSyncedFrames = 0;
625 }
626
627 // Check whether this frame is missing; if so, return a FT_NO_DATA frame:
628 if (fromSize == 0) {
629 resultFrameHeader = FT_NO_DATA<<3;
630
631 // Compute this erasure frame's presentation time via extrapolation:
632 resultPresentationTime = fLastRetrievedPresentationTime;
633 resultPresentationTime.tv_usec += uSecsPerFrame;
634 if (resultPresentationTime.tv_usec >= 1000000) {
635 ++resultPresentationTime.tv_sec;
636 resultPresentationTime.tv_usec -= 1000000;
637 }
638 } else {
639 // Normal case - a frame exists:
640 resultFrameHeader = outBin.frameHeader;
641 resultPresentationTime = outBin.presentationTime;
642 }
643
644 fLastRetrievedPresentationTime = resultPresentationTime;
645
646 if (fromSize > maxSize) {
647 resultNumTruncatedBytes = fromSize - maxSize;
648 resultFrameSize = maxSize;
649 } else {
650 resultNumTruncatedBytes = 0;
651 resultFrameSize = fromSize;
652 }
653 memmove(to, fromPtr, resultFrameSize);
654#ifdef DEBUG
655 fprintf(stderr, "AMRDeinterleavingBuffer::retrieveFrame(): from bank %d, bin %d: size %d, header 0x%02x, presentationTime %lu.%06ld\n", fIncomingBankId^1, fNextOutgoingBin, resultFrameSize, resultFrameHeader, resultPresentationTime.tv_sec, resultPresentationTime.tv_usec);
656#endif
657
658 ++fNextOutgoingBin;
659 return True;
660}
661
662unsigned char* AMRDeinterleavingBuffer::createNewBuffer() {
663 return new unsigned char[inputBufferSize()];
664}
665
666AMRDeinterleavingBuffer::FrameDescriptor::FrameDescriptor()
667 : frameSize(0), frameData(NULL) {
668}
669
670AMRDeinterleavingBuffer::FrameDescriptor::~FrameDescriptor() {
671 delete[] frameData;
672}
673
674// Unpack bandwidth-aligned data to octet-aligned:
675static unsigned short const frameBitsFromFT[16] = {
676 95, 103, 118, 134,
677 148, 159, 204, 244,
678 39, 0, 0, 0,
679 0, 0, 0, 0
680};
681static unsigned short const frameBitsFromFTWideband[16] = {
682 132, 177, 253, 285,
683 317, 365, 397, 461,
684 477, 40, 0, 0,
685 0, 0, 0, 0
686};
687
688static void unpackBandwidthEfficientData(BufferedPacket* packet,
689 Boolean isWideband) {
690#ifdef DEBUG
691 fprintf(stderr, "Unpacking 'bandwidth-efficient' payload (%d bytes):\n", packet->dataSize());
692 for (unsigned j = 0; j < packet->dataSize(); ++j) {
693 fprintf(stderr, "%02x:", (packet->data())[j]);
694 }
695 fprintf(stderr, "\n");
696#endif
697 BitVector fromBV(packet->data(), 0, 8*packet->dataSize());
698
699 unsigned const toBufferSize = 2*packet->dataSize(); // conservatively large
700 unsigned char* toBuffer = new unsigned char[toBufferSize];
701 unsigned toCount = 0;
702
703 // Begin with the payload header:
704 unsigned CMR = fromBV.getBits(4);
705 toBuffer[toCount++] = CMR << 4;
706
707 // Then, run through and unpack the TOC entries:
708 while (1) {
709 unsigned toc = fromBV.getBits(6);
710 toBuffer[toCount++] = toc << 2;
711
712 if ((toc&0x20) == 0) break; // the F bit is 0
713 }
714
715 // Then, using the TOC data, unpack each frame payload:
716 unsigned const tocSize = toCount - 1;
717 for (unsigned i = 1; i <= tocSize; ++i) {
718 unsigned char tocByte = toBuffer[i];
719 unsigned char const FT = (tocByte&0x78) >> 3;
720 unsigned short frameSizeBits
721 = isWideband ? frameBitsFromFTWideband[FT] : frameBitsFromFT[FT];
722 unsigned short frameSizeBytes = (frameSizeBits+7)/8;
723
724 if (frameSizeBits > fromBV.numBitsRemaining()) {
725#ifdef DEBUG
726 fprintf(stderr, "\tWarning: Unpacking frame %d of %d: want %d bits, but only %d are available!\n", i, tocSize, frameSizeBits, fromBV.numBitsRemaining());
727#endif
728 break;
729 }
730
731 shiftBits(&toBuffer[toCount], 0, // to
732 packet->data(), fromBV.curBitIndex(), // from
733 frameSizeBits // num bits
734 );
735 fromBV.skipBits(frameSizeBits);
736 toCount += frameSizeBytes;
737 }
738
739#ifdef DEBUG
740 if (fromBV.numBitsRemaining() > 7) {
741 fprintf(stderr, "\tWarning: %d bits remain unused!\n", fromBV.numBitsRemaining());
742 }
743#endif
744
745 // Finally, replace the current packet data with the unpacked data:
746 packet->removePadding(packet->dataSize()); // throws away current packet data
747 packet->appendData(toBuffer, toCount);
748 delete[] toBuffer;
749}
750