1/**********
2This library is free software; you can redistribute it and/or modify it under
3the terms of the GNU Lesser General Public License as published by the
4Free Software Foundation; either version 3 of the License, or (at your
5option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
6
7This library is distributed in the hope that it will be useful, but WITHOUT
8ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
9FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
10more details.
11
12You should have received a copy of the GNU Lesser General Public License
13along with this library; if not, write to the Free Software Foundation, Inc.,
1451 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
15**********/
16// "liveMedia"
17// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
18// A sink that generates a QuickTime file from a composite media session
19// Implementation
20
21#include "QuickTimeFileSink.hh"
22#include "QuickTimeGenericRTPSource.hh"
23#include "GroupsockHelper.hh"
24#include "InputFile.hh"
25#include "OutputFile.hh"
26#include "H263plusVideoRTPSource.hh" // for the special header
27#include "MPEG4GenericRTPSource.hh" //for "samplingFrequencyFromAudioSpecificConfig()"
28#include "MPEG4LATMAudioRTPSource.hh" // for "parseGeneralConfigStr()"
29#include "Base64.hh"
30
31#include <ctype.h>
32
33#define fourChar(x,y,z,w) ( ((x)<<24)|((y)<<16)|((z)<<8)|(w) )
34
35#define H264_IDR_FRAME 0x65 //bit 8 == 0, bits 7-6 (ref) == 3, bits 5-0 (type) == 5
36
37////////// SubsessionIOState, ChunkDescriptor ///////////
38// A structure used to represent the I/O state of each input 'subsession':
39
40class ChunkDescriptor {
41public:
42 ChunkDescriptor(int64_t offsetInFile, unsigned size,
43 unsigned frameSize, unsigned frameDuration,
44 struct timeval presentationTime);
45
46 ChunkDescriptor* extendChunk(int64_t newOffsetInFile, unsigned newSize,
47 unsigned newFrameSize,
48 unsigned newFrameDuration,
49 struct timeval newPresentationTime);
50 // this may end up allocating a new chunk instead
51public:
52 ChunkDescriptor* fNextChunk;
53 int64_t fOffsetInFile;
54 unsigned fNumFrames;
55 unsigned fFrameSize;
56 unsigned fFrameDuration;
57 struct timeval fPresentationTime; // of the start of the data
58};
59
60class SubsessionBuffer {
61public:
62 SubsessionBuffer(unsigned bufferSize)
63 : fBufferSize(bufferSize) {
64 reset();
65 fData = new unsigned char[bufferSize];
66 }
67 virtual ~SubsessionBuffer() { delete[] fData; }
68 void reset() { fBytesInUse = 0; }
69 void addBytes(unsigned numBytes) { fBytesInUse += numBytes; }
70
71 unsigned char* dataStart() { return &fData[0]; }
72 unsigned char* dataEnd() { return &fData[fBytesInUse]; }
73 unsigned bytesInUse() const { return fBytesInUse; }
74 unsigned bytesAvailable() const { return fBufferSize - fBytesInUse; }
75
76 void setPresentationTime(struct timeval const& presentationTime) {
77 fPresentationTime = presentationTime;
78 }
79 struct timeval const& presentationTime() const {return fPresentationTime;}
80
81private:
82 unsigned fBufferSize;
83 struct timeval fPresentationTime;
84 unsigned char* fData;
85 unsigned fBytesInUse;
86};
87
88class SyncFrame {
89public:
90 SyncFrame(unsigned frameNum);
91
92public:
93 class SyncFrame *nextSyncFrame;
94 unsigned sfFrameNum;
95};
96
97// A 64-bit counter, used below:
98class Count64 {
99public:
100 Count64()
101 : hi(0), lo(0) {
102 }
103
104 void operator+=(unsigned arg);
105
106 u_int32_t hi, lo;
107};
108
109class SubsessionIOState {
110public:
111 SubsessionIOState(QuickTimeFileSink& sink, MediaSubsession& subsession);
112 virtual ~SubsessionIOState();
113
114 Boolean setQTstate();
115 void setFinalQTstate();
116
117 void afterGettingFrame(unsigned packetDataSize,
118 struct timeval presentationTime);
119 void onSourceClosure();
120
121 Boolean syncOK(struct timeval presentationTime);
122 // returns true iff data is usable despite a sync check
123
124 static void setHintTrack(SubsessionIOState* hintedTrack,
125 SubsessionIOState* hintTrack);
126 Boolean isHintTrack() const { return fTrackHintedByUs != NULL; }
127 Boolean hasHintTrack() const { return fHintTrackForUs != NULL; }
128
129 UsageEnvironment& envir() const { return fOurSink.envir(); }
130
131public:
132 static unsigned fCurrentTrackNumber;
133 unsigned fTrackID;
134 SubsessionIOState* fHintTrackForUs; SubsessionIOState* fTrackHintedByUs;
135
136 SubsessionBuffer *fBuffer, *fPrevBuffer;
137 QuickTimeFileSink& fOurSink;
138 MediaSubsession& fOurSubsession;
139
140 unsigned short fLastPacketRTPSeqNum;
141 Boolean fOurSourceIsActive;
142
143 Boolean fHaveBeenSynced; // used in synchronizing with other streams
144 struct timeval fSyncTime;
145
146 Boolean fQTEnableTrack;
147 unsigned fQTcomponentSubtype;
148 char const* fQTcomponentName;
149 typedef unsigned (QuickTimeFileSink::*atomCreationFunc)();
150 atomCreationFunc fQTMediaInformationAtomCreator;
151 atomCreationFunc fQTMediaDataAtomCreator;
152 char const* fQTAudioDataType;
153 unsigned short fQTSoundSampleVersion;
154 unsigned fQTTimeScale;
155 unsigned fQTTimeUnitsPerSample;
156 unsigned fQTBytesPerFrame;
157 unsigned fQTSamplesPerFrame;
158 // These next fields are derived from the ones above,
159 // plus the information from each chunk:
160 unsigned fQTTotNumSamples;
161 unsigned fQTDurationM; // in media time units
162 unsigned fQTDurationT; // in track time units
163 int64_t fTKHD_durationPosn;
164 // position of the duration in the output 'tkhd' atom
165 unsigned fQTInitialOffsetDuration;
166 // if there's a pause at the beginning
167
168 ChunkDescriptor *fHeadChunk, *fTailChunk;
169 unsigned fNumChunks;
170 SyncFrame *fHeadSyncFrame, *fTailSyncFrame;
171
172 // Counters to be used in the hint track's 'udta'/'hinf' atom;
173 struct hinf {
174 Count64 trpy;
175 Count64 nump;
176 Count64 tpyl;
177 // Is 'maxr' needed? Computing this would be a PITA. #####
178 Count64 dmed;
179 Count64 dimm;
180 // 'drep' is always 0
181 // 'tmin' and 'tmax' are always 0
182 unsigned pmax;
183 unsigned dmax;
184 } fHINF;
185
186private:
187 void useFrame(SubsessionBuffer& buffer);
188 void useFrameForHinting(unsigned frameSize,
189 struct timeval presentationTime,
190 unsigned startSampleNumber);
191
192 // used by the above two routines:
193 unsigned useFrame1(unsigned sourceDataSize,
194 struct timeval presentationTime,
195 unsigned frameDuration, int64_t destFileOffset);
196 // returns the number of samples in this data
197
198private:
199 // A structure used for temporarily storing frame state:
200 struct {
201 unsigned frameSize;
202 struct timeval presentationTime;
203 int64_t destFileOffset; // used for non-hint tracks only
204
205 // The remaining fields are used for hint tracks only:
206 unsigned startSampleNumber;
207 unsigned short seqNum;
208 unsigned rtpHeader;
209 unsigned char numSpecialHeaders; // used when our RTP source has special headers
210 unsigned specialHeaderBytesLength; // ditto
211 unsigned char specialHeaderBytes[SPECIAL_HEADER_BUFFER_SIZE]; // ditto
212 unsigned packetSizes[256];
213 } fPrevFrameState;
214};
215
216
217////////// QuickTimeFileSink implementation //////////
218
219QuickTimeFileSink::QuickTimeFileSink(UsageEnvironment& env,
220 MediaSession& inputSession,
221 char const* outputFileName,
222 unsigned bufferSize,
223 unsigned short movieWidth,
224 unsigned short movieHeight,
225 unsigned movieFPS,
226 Boolean packetLossCompensate,
227 Boolean syncStreams,
228 Boolean generateHintTracks,
229 Boolean generateMP4Format)
230 : Medium(env), fInputSession(inputSession),
231 fBufferSize(bufferSize), fPacketLossCompensate(packetLossCompensate),
232 fSyncStreams(syncStreams), fGenerateMP4Format(generateMP4Format),
233 fAreCurrentlyBeingPlayed(False),
234 fLargestRTPtimestampFrequency(0),
235 fNumSubsessions(0), fNumSyncedSubsessions(0),
236 fHaveCompletedOutputFile(False),
237 fMovieWidth(movieWidth), fMovieHeight(movieHeight),
238 fMovieFPS(movieFPS), fMaxTrackDurationM(0) {
239 fOutFid = OpenOutputFile(env, outputFileName);
240 if (fOutFid == NULL) return;
241
242 fNewestSyncTime.tv_sec = fNewestSyncTime.tv_usec = 0;
243 fFirstDataTime.tv_sec = fFirstDataTime.tv_usec = (unsigned)(~0);
244
245 // Set up I/O state for each input subsession:
246 MediaSubsessionIterator iter(fInputSession);
247 MediaSubsession* subsession;
248 while ((subsession = iter.next()) != NULL) {
249 // Ignore subsessions without a data source:
250 FramedSource* subsessionSource = subsession->readSource();
251 if (subsessionSource == NULL) continue;
252
253 // If "subsession's" SDP description specified screen dimension
254 // or frame rate parameters, then use these. (Note that this must
255 // be done before the call to "setQTState()" below.)
256 if (subsession->videoWidth() != 0) {
257 fMovieWidth = subsession->videoWidth();
258 }
259 if (subsession->videoHeight() != 0) {
260 fMovieHeight = subsession->videoHeight();
261 }
262 if (subsession->videoFPS() != 0) {
263 fMovieFPS = subsession->videoFPS();
264 }
265
266 SubsessionIOState* ioState
267 = new SubsessionIOState(*this, *subsession);
268 if (ioState == NULL || !ioState->setQTstate()) {
269 // We're not able to output a QuickTime track for this subsession
270 delete ioState; ioState = NULL;
271 continue;
272 }
273 subsession->miscPtr = (void*)ioState;
274
275 if (generateHintTracks) {
276 // Also create a hint track for this track:
277 SubsessionIOState* hintTrack
278 = new SubsessionIOState(*this, *subsession);
279 SubsessionIOState::setHintTrack(ioState, hintTrack);
280 if (!hintTrack->setQTstate()) {
281 delete hintTrack;
282 SubsessionIOState::setHintTrack(ioState, NULL);
283 }
284 }
285
286 // Also set a 'BYE' handler for this subsession's RTCP instance:
287 if (subsession->rtcpInstance() != NULL) {
288 subsession->rtcpInstance()->setByeHandler(onRTCPBye, ioState);
289 }
290
291 unsigned rtpTimestampFrequency = subsession->rtpTimestampFrequency();
292 if (rtpTimestampFrequency > fLargestRTPtimestampFrequency) {
293 fLargestRTPtimestampFrequency = rtpTimestampFrequency;
294 }
295
296 ++fNumSubsessions;
297 }
298
299 // Use the current time as the file's creation and modification
300 // time. Use Apple's time format: seconds (UTC) since January 1, 1904
301
302 gettimeofday(&fStartTime, NULL);
303 fAppleCreationTime = fStartTime.tv_sec - 0x83da4f80;
304
305 // Begin by writing a "mdat" atom at the start of the file.
306 // (Later, when we've finished copying data to the file, we'll come
307 // back and fill in its size.)
308 fMDATposition = TellFile64(fOutFid);
309 addAtomHeader64("mdat");
310 // add 64Bit offset
311 fMDATposition += 8;
312}
313
314QuickTimeFileSink::~QuickTimeFileSink() {
315 completeOutputFile();
316
317 // Then, stop streaming and delete each active "SubsessionIOState":
318 MediaSubsessionIterator iter(fInputSession);
319 MediaSubsession* subsession;
320 while ((subsession = iter.next()) != NULL) {
321 if (subsession->readSource() != NULL) subsession->readSource()->stopGettingFrames();
322
323 SubsessionIOState* ioState
324 = (SubsessionIOState*)(subsession->miscPtr);
325 if (ioState == NULL) continue;
326
327 delete ioState->fHintTrackForUs; // if any
328 delete ioState;
329 }
330
331 // Finally, close our output file:
332 CloseOutputFile(fOutFid);
333}
334
335QuickTimeFileSink*
336QuickTimeFileSink::createNew(UsageEnvironment& env,
337 MediaSession& inputSession,
338 char const* outputFileName,
339 unsigned bufferSize,
340 unsigned short movieWidth,
341 unsigned short movieHeight,
342 unsigned movieFPS,
343 Boolean packetLossCompensate,
344 Boolean syncStreams,
345 Boolean generateHintTracks,
346 Boolean generateMP4Format) {
347 QuickTimeFileSink* newSink =
348 new QuickTimeFileSink(env, inputSession, outputFileName, bufferSize, movieWidth, movieHeight, movieFPS,
349 packetLossCompensate, syncStreams, generateHintTracks, generateMP4Format);
350 if (newSink == NULL || newSink->fOutFid == NULL) {
351 Medium::close(newSink);
352 return NULL;
353 }
354
355 return newSink;
356}
357
358void QuickTimeFileSink
359::noteRecordedFrame(MediaSubsession& /*inputSubsession*/,
360 unsigned /*packetDataSize*/, struct timeval const& /*presentationTime*/) {
361 // Default implementation: Do nothing
362}
363
364Boolean QuickTimeFileSink::startPlaying(afterPlayingFunc* afterFunc,
365 void* afterClientData) {
366 // Make sure we're not already being played:
367 if (fAreCurrentlyBeingPlayed) {
368 envir().setResultMsg("This sink has already been played");
369 return False;
370 }
371
372 fAreCurrentlyBeingPlayed = True;
373 fAfterFunc = afterFunc;
374 fAfterClientData = afterClientData;
375
376 return continuePlaying();
377}
378
379Boolean QuickTimeFileSink::continuePlaying() {
380 // Run through each of our input session's 'subsessions',
381 // asking for a frame from each one:
382 Boolean haveActiveSubsessions = False;
383 MediaSubsessionIterator iter(fInputSession);
384 MediaSubsession* subsession;
385 while ((subsession = iter.next()) != NULL) {
386 FramedSource* subsessionSource = subsession->readSource();
387 if (subsessionSource == NULL) continue;
388
389 if (subsessionSource->isCurrentlyAwaitingData()) continue;
390
391 SubsessionIOState* ioState
392 = (SubsessionIOState*)(subsession->miscPtr);
393 if (ioState == NULL) continue;
394
395 haveActiveSubsessions = True;
396 unsigned char* toPtr = ioState->fBuffer->dataEnd();
397 unsigned toSize = ioState->fBuffer->bytesAvailable();
398 subsessionSource->getNextFrame(toPtr, toSize,
399 afterGettingFrame, ioState,
400 onSourceClosure, ioState);
401 }
402 if (!haveActiveSubsessions) {
403 envir().setResultMsg("No subsessions are currently active");
404 return False;
405 }
406
407 return True;
408}
409
410void QuickTimeFileSink
411::afterGettingFrame(void* clientData, unsigned packetDataSize,
412 unsigned numTruncatedBytes,
413 struct timeval presentationTime,
414 unsigned /*durationInMicroseconds*/) {
415 SubsessionIOState* ioState = (SubsessionIOState*)clientData;
416 if (!ioState->syncOK(presentationTime)) {
417 // Ignore this data:
418 ioState->fOurSink.continuePlaying();
419 return;
420 }
421 if (numTruncatedBytes > 0) {
422 ioState->envir() << "QuickTimeFileSink::afterGettingFrame(): The input frame data was too large for our buffer. "
423 << numTruncatedBytes
424 << " bytes of trailing data was dropped! Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call.\n";
425 }
426 ioState->afterGettingFrame(packetDataSize, presentationTime);
427}
428
429void QuickTimeFileSink::onSourceClosure(void* clientData) {
430 SubsessionIOState* ioState = (SubsessionIOState*)clientData;
431 ioState->onSourceClosure();
432}
433
434void QuickTimeFileSink::onSourceClosure1() {
435 // Check whether *all* of the subsession sources have closed.
436 // If not, do nothing for now:
437 MediaSubsessionIterator iter(fInputSession);
438 MediaSubsession* subsession;
439 while ((subsession = iter.next()) != NULL) {
440 SubsessionIOState* ioState
441 = (SubsessionIOState*)(subsession->miscPtr);
442 if (ioState == NULL) continue;
443
444 if (ioState->fOurSourceIsActive) return; // this source hasn't closed
445 }
446
447 completeOutputFile();
448
449 // Call our specified 'after' function:
450 if (fAfterFunc != NULL) {
451 (*fAfterFunc)(fAfterClientData);
452 }
453}
454
455void QuickTimeFileSink::onRTCPBye(void* clientData) {
456 SubsessionIOState* ioState = (SubsessionIOState*)clientData;
457
458 struct timeval timeNow;
459 gettimeofday(&timeNow, NULL);
460 unsigned secsDiff
461 = timeNow.tv_sec - ioState->fOurSink.fStartTime.tv_sec;
462
463 MediaSubsession& subsession = ioState->fOurSubsession;
464 ioState->envir() << "Received RTCP \"BYE\" on \""
465 << subsession.mediumName()
466 << "/" << subsession.codecName()
467 << "\" subsession (after "
468 << secsDiff << " seconds)\n";
469
470 // Handle the reception of a RTCP "BYE" as if the source had closed:
471 ioState->onSourceClosure();
472}
473
474static Boolean timevalGE(struct timeval const& tv1,
475 struct timeval const& tv2) {
476 return (unsigned)tv1.tv_sec > (unsigned)tv2.tv_sec
477 || (tv1.tv_sec == tv2.tv_sec
478 && (unsigned)tv1.tv_usec >= (unsigned)tv2.tv_usec);
479}
480
481void QuickTimeFileSink::completeOutputFile() {
482 if (fHaveCompletedOutputFile || fOutFid == NULL) return;
483
484 // Begin by filling in the initial "mdat" atom with the current
485 // file size:
486 int64_t curFileSize = TellFile64(fOutFid);
487 setWord64(fMDATposition, (u_int64_t)curFileSize);
488
489 // Then, note the time of the first received data:
490 MediaSubsessionIterator iter(fInputSession);
491 MediaSubsession* subsession;
492 while ((subsession = iter.next()) != NULL) {
493 SubsessionIOState* ioState
494 = (SubsessionIOState*)(subsession->miscPtr);
495 if (ioState == NULL) continue;
496
497 ChunkDescriptor* const headChunk = ioState->fHeadChunk;
498 if (headChunk != NULL
499 && timevalGE(fFirstDataTime, headChunk->fPresentationTime)) {
500 fFirstDataTime = headChunk->fPresentationTime;
501 }
502 }
503
504 // Then, update the QuickTime-specific state for each active track:
505 iter.reset();
506 while ((subsession = iter.next()) != NULL) {
507 SubsessionIOState* ioState
508 = (SubsessionIOState*)(subsession->miscPtr);
509 if (ioState == NULL) continue;
510
511 ioState->setFinalQTstate();
512 // Do the same for a hint track (if any):
513 if (ioState->hasHintTrack()) {
514 ioState->fHintTrackForUs->setFinalQTstate();
515 }
516 }
517
518 if (fGenerateMP4Format) {
519 // Begin with a "ftyp" atom:
520 addAtom_ftyp();
521 }
522
523 // Then, add a "moov" atom for the file metadata:
524 addAtom_moov();
525
526 // We're done:
527 fHaveCompletedOutputFile = True;
528}
529
530
531////////// SubsessionIOState, ChunkDescriptor implementation ///////////
532
533unsigned SubsessionIOState::fCurrentTrackNumber = 0;
534
535SubsessionIOState::SubsessionIOState(QuickTimeFileSink& sink,
536 MediaSubsession& subsession)
537 : fHintTrackForUs(NULL), fTrackHintedByUs(NULL),
538 fOurSink(sink), fOurSubsession(subsession),
539 fLastPacketRTPSeqNum(0), fHaveBeenSynced(False), fQTTotNumSamples(0),
540 fHeadChunk(NULL), fTailChunk(NULL), fNumChunks(0),
541 fHeadSyncFrame(NULL), fTailSyncFrame(NULL) {
542 fTrackID = ++fCurrentTrackNumber;
543
544 fBuffer = new SubsessionBuffer(fOurSink.fBufferSize);
545 fPrevBuffer = sink.fPacketLossCompensate
546 ? new SubsessionBuffer(fOurSink.fBufferSize) : NULL;
547
548 FramedSource* subsessionSource = subsession.readSource();
549 fOurSourceIsActive = subsessionSource != NULL;
550
551 fPrevFrameState.presentationTime.tv_sec = 0;
552 fPrevFrameState.presentationTime.tv_usec = 0;
553 fPrevFrameState.seqNum = 0;
554}
555
556SubsessionIOState::~SubsessionIOState() {
557 delete fBuffer; delete fPrevBuffer;
558
559 // Delete the list of chunk descriptors:
560 ChunkDescriptor* chunk = fHeadChunk;
561 while (chunk != NULL) {
562 ChunkDescriptor* next = chunk->fNextChunk;
563 delete chunk;
564 chunk = next;
565 }
566
567 // Delete the list of sync frames:
568 SyncFrame* syncFrame = fHeadSyncFrame;
569 while (syncFrame != NULL) {
570 SyncFrame* next = syncFrame->nextSyncFrame;
571 delete syncFrame;
572 syncFrame = next;
573 }
574}
575
576Boolean SubsessionIOState::setQTstate() {
577 char const* noCodecWarning1 = "Warning: We don't implement a QuickTime ";
578 char const* noCodecWarning2 = " Media Data Type for the \"";
579 char const* noCodecWarning3 = "\" track, so we'll insert a dummy \"????\" Media Data Atom instead. A separate, codec-specific editing pass will be needed before this track can be played.\n";
580
581 do {
582 fQTEnableTrack = True; // enable this track in the movie by default
583 fQTTimeScale = fOurSubsession.rtpTimestampFrequency(); // by default
584 fQTTimeUnitsPerSample = 1; // by default
585 fQTBytesPerFrame = 0;
586 // by default - indicates that the whole packet data is a frame
587 fQTSamplesPerFrame = 1; // by default
588
589 // Make sure our subsession's medium is one that we know how to
590 // represent in a QuickTime file:
591 if (isHintTrack()) {
592 // Hint tracks are treated specially
593 fQTEnableTrack = False; // hint tracks are marked as inactive
594 fQTcomponentSubtype = fourChar('h','i','n','t');
595 fQTcomponentName = "hint media handler";
596 fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_gmhd;
597 fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_rtp;
598 } else if (strcmp(fOurSubsession.mediumName(), "audio") == 0) {
599 fQTcomponentSubtype = fourChar('s','o','u','n');
600 fQTcomponentName = "Apple Sound Media Handler";
601 fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_smhd;
602 fQTMediaDataAtomCreator
603 = &QuickTimeFileSink::addAtom_soundMediaGeneral; // by default
604 fQTSoundSampleVersion = 0; // by default
605
606 // Make sure that our subsession's codec is one that we can handle:
607 if (strcmp(fOurSubsession.codecName(), "X-QT") == 0 ||
608 strcmp(fOurSubsession.codecName(), "X-QUICKTIME") == 0) {
609 fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_genericMedia;
610 } else if (strcmp(fOurSubsession.codecName(), "PCMU") == 0) {
611 fQTAudioDataType = "ulaw";
612 fQTBytesPerFrame = 1;
613 } else if (strcmp(fOurSubsession.codecName(), "GSM") == 0) {
614 fQTAudioDataType = "agsm";
615 fQTBytesPerFrame = 33;
616 fQTSamplesPerFrame = 160;
617 } else if (strcmp(fOurSubsession.codecName(), "PCMA") == 0) {
618 fQTAudioDataType = "alaw";
619 fQTBytesPerFrame = 1;
620 } else if (strcmp(fOurSubsession.codecName(), "QCELP") == 0) {
621 fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_Qclp;
622 fQTSamplesPerFrame = 160;
623 } else if (strcmp(fOurSubsession.codecName(), "MPEG4-GENERIC") == 0 ||
624 strcmp(fOurSubsession.codecName(), "MP4A-LATM") == 0) {
625 fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_mp4a;
626 fQTTimeUnitsPerSample = 1024; // QT considers each frame to be a 'sample'
627 // The time scale (frequency) comes from the 'config' information.
628 // It might be different from the RTP timestamp frequency (e.g., aacPlus).
629 unsigned frequencyFromConfig
630 = samplingFrequencyFromAudioSpecificConfig(fOurSubsession.fmtp_config());
631 if (frequencyFromConfig != 0) fQTTimeScale = frequencyFromConfig;
632 } else {
633 envir() << noCodecWarning1 << "Audio" << noCodecWarning2
634 << fOurSubsession.codecName() << noCodecWarning3;
635 fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_dummy;
636 fQTEnableTrack = False; // disable this track in the movie
637 }
638 } else if (strcmp(fOurSubsession.mediumName(), "video") == 0) {
639 fQTcomponentSubtype = fourChar('v','i','d','e');
640 fQTcomponentName = "Apple Video Media Handler";
641 fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_vmhd;
642
643 // Make sure that our subsession's codec is one that we can handle:
644 if (strcmp(fOurSubsession.codecName(), "X-QT") == 0 ||
645 strcmp(fOurSubsession.codecName(), "X-QUICKTIME") == 0) {
646 fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_genericMedia;
647 } else if (strcmp(fOurSubsession.codecName(), "H263-1998") == 0 ||
648 strcmp(fOurSubsession.codecName(), "H263-2000") == 0) {
649 fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_h263;
650 fQTTimeScale = 600;
651 fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
652 } else if (strcmp(fOurSubsession.codecName(), "H264") == 0) {
653 fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_avc1;
654 fQTTimeScale = 600;
655 fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
656 } else if (strcmp(fOurSubsession.codecName(), "MP4V-ES") == 0) {
657 fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_mp4v;
658 fQTTimeScale = 600;
659 fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
660 } else {
661 envir() << noCodecWarning1 << "Video" << noCodecWarning2
662 << fOurSubsession.codecName() << noCodecWarning3;
663 fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_dummy;
664 fQTEnableTrack = False; // disable this track in the movie
665 }
666 } else {
667 envir() << "Warning: We don't implement a QuickTime Media Handler for media type \""
668 << fOurSubsession.mediumName() << "\"";
669 break;
670 }
671
672#ifdef QT_SUPPORT_PARTIALLY_ONLY
673 envir() << "Warning: We don't have sufficient codec-specific information (e.g., sample sizes) to fully generate the \""
674 << fOurSubsession.mediumName() << "/" << fOurSubsession.codecName()
675 << "\" track, so we'll disable this track in the movie. A separate, codec-specific editing pass will be needed before this track can be played\n";
676 fQTEnableTrack = False; // disable this track in the movie
677#endif
678
679 return True;
680 } while (0);
681
682 envir() << ", so a track for the \"" << fOurSubsession.mediumName()
683 << "/" << fOurSubsession.codecName()
684 << "\" subsession will not be included in the output QuickTime file\n";
685 return False;
686}
687
688void SubsessionIOState::setFinalQTstate() {
689 // Compute derived parameters, by running through the list of chunks:
690 fQTDurationT = 0;
691
692 ChunkDescriptor* chunk = fHeadChunk;
693 while (chunk != NULL) {
694 unsigned const numFrames = chunk->fNumFrames;
695 unsigned const dur = numFrames*chunk->fFrameDuration;
696 fQTDurationT += dur;
697
698 chunk = chunk->fNextChunk;
699 }
700
701 // Convert this duration from track to movie time scale:
702 double scaleFactor = fOurSink.movieTimeScale()/(double)fQTTimeScale;
703 fQTDurationM = (unsigned)(fQTDurationT*scaleFactor);
704
705 if (fQTDurationM > fOurSink.fMaxTrackDurationM) {
706 fOurSink.fMaxTrackDurationM = fQTDurationM;
707 }
708}
709
710void SubsessionIOState::afterGettingFrame(unsigned packetDataSize,
711 struct timeval presentationTime) {
712 // Begin by checking whether there was a gap in the RTP stream.
713 // If so, try to compensate for this (if desired):
714 if (fOurSubsession.rtpSource() != NULL) { // we have a RTP stream
715 unsigned short rtpSeqNum
716 = fOurSubsession.rtpSource()->curPacketRTPSeqNum();
717 if (fOurSink.fPacketLossCompensate && fPrevBuffer->bytesInUse() > 0) {
718 short seqNumGap = rtpSeqNum - fLastPacketRTPSeqNum;
719 for (short i = 1; i < seqNumGap; ++i) {
720 // Insert a copy of the previous frame, to compensate for the loss:
721 useFrame(*fPrevBuffer);
722 }
723 }
724 fLastPacketRTPSeqNum = rtpSeqNum;
725 }
726
727 // Now, continue working with the frame that we just got
728 fOurSink.noteRecordedFrame(fOurSubsession, packetDataSize, presentationTime);
729
730 if (fBuffer->bytesInUse() == 0) {
731 fBuffer->setPresentationTime(presentationTime);
732 }
733 fBuffer->addBytes(packetDataSize);
734
735 // If our RTP source is a "QuickTimeGenericRTPSource", then
736 // use its 'qtState' to set some parameters that we need:
737 if (fOurSubsession.rtpSource() != NULL // we have a RTP stream
738 && fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_genericMedia) {
739 QuickTimeGenericRTPSource* rtpSource
740 = (QuickTimeGenericRTPSource*)fOurSubsession.rtpSource();
741 QuickTimeGenericRTPSource::QTState& qtState = rtpSource->qtState;
742 fQTTimeScale = qtState.timescale;
743 if (qtState.width != 0) {
744 fOurSink.fMovieWidth = qtState.width;
745 }
746 if (qtState.height != 0) {
747 fOurSink.fMovieHeight = qtState.height;
748 }
749
750 // Also, if the media type in the "sdAtom" is one that we recognize
751 // to have a special parameters, then fix this here:
752 if (qtState.sdAtomSize >= 8) {
753 char const* atom = qtState.sdAtom;
754 unsigned mediaType = fourChar(atom[4],atom[5],atom[6],atom[7]);
755 switch (mediaType) {
756 case fourChar('a','g','s','m'): {
757 fQTBytesPerFrame = 33;
758 fQTSamplesPerFrame = 160;
759 break;
760 }
761 case fourChar('Q','c','l','p'): {
762 fQTBytesPerFrame = 35;
763 fQTSamplesPerFrame = 160;
764 break;
765 }
766 case fourChar('H','c','l','p'): {
767 fQTBytesPerFrame = 17;
768 fQTSamplesPerFrame = 160;
769 break;
770 }
771 case fourChar('h','2','6','3'): {
772 fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
773 break;
774 }
775 }
776 }
777 } else if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_Qclp) {
778 // For QCELP data, make a note of the frame size (even though it's the
779 // same as the packet data size), because it varies depending on the
780 // 'rate' of the stream, and this size gets used later when setting up
781 // the 'Qclp' QuickTime atom:
782 fQTBytesPerFrame = packetDataSize;
783 }
784
785 useFrame(*fBuffer);
786 if (fOurSink.fPacketLossCompensate) {
787 // Save this frame, in case we need it for recovery:
788 SubsessionBuffer* tmp = fPrevBuffer; // assert: != NULL
789 fPrevBuffer = fBuffer;
790 fBuffer = tmp;
791 }
792 fBuffer->reset(); // for the next input
793
794 // Now, try getting more frames:
795 fOurSink.continuePlaying();
796}
797
798void SubsessionIOState::useFrame(SubsessionBuffer& buffer) {
799 unsigned char* const frameSource = buffer.dataStart();
800 unsigned const frameSize = buffer.bytesInUse();
801 struct timeval const& presentationTime = buffer.presentationTime();
802 int64_t const destFileOffset = TellFile64(fOurSink.fOutFid);
803 unsigned sampleNumberOfFrameStart = fQTTotNumSamples + 1;
804 Boolean avcHack = fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_avc1;
805
806 // If we're not syncing streams, or this subsession is not video, then
807 // just give this frame a fixed duration:
808 if (!fOurSink.fSyncStreams
809 || fQTcomponentSubtype != fourChar('v','i','d','e')) {
810 unsigned const frameDuration = fQTTimeUnitsPerSample*fQTSamplesPerFrame;
811 unsigned frameSizeToUse = frameSize;
812 if (avcHack) frameSizeToUse += 4; // H.264/AVC gets the frame size prefix
813
814 fQTTotNumSamples += useFrame1(frameSizeToUse, presentationTime, frameDuration, destFileOffset);
815 } else {
816 // For synced video streams, we use the difference between successive
817 // frames' presentation times as the 'frame duration'. So, record
818 // information about the *previous* frame:
819 struct timeval const& ppt = fPrevFrameState.presentationTime; //abbrev
820 if (ppt.tv_sec != 0 || ppt.tv_usec != 0) {
821 // There has been a previous frame.
822 double duration = (presentationTime.tv_sec - ppt.tv_sec)
823 + (presentationTime.tv_usec - ppt.tv_usec)/1000000.0;
824 if (duration < 0.0) duration = 0.0;
825 unsigned frameDuration
826 = (unsigned)((2*duration*fQTTimeScale+1)/2); // round
827 unsigned frameSizeToUse = fPrevFrameState.frameSize;
828 if (avcHack) frameSizeToUse += 4; // H.264/AVC gets the frame size prefix
829
830 unsigned numSamples
831 = useFrame1(frameSizeToUse, ppt, frameDuration, fPrevFrameState.destFileOffset);
832 fQTTotNumSamples += numSamples;
833 sampleNumberOfFrameStart = fQTTotNumSamples + 1;
834 }
835
836 if (avcHack && (*frameSource == H264_IDR_FRAME)) {
837 SyncFrame* newSyncFrame = new SyncFrame(fQTTotNumSamples + 1);
838 if (fTailSyncFrame == NULL) {
839 fHeadSyncFrame = newSyncFrame;
840 } else {
841 fTailSyncFrame->nextSyncFrame = newSyncFrame;
842 }
843 fTailSyncFrame = newSyncFrame;
844 }
845
846 // Remember the current frame for next time:
847 fPrevFrameState.frameSize = frameSize;
848 fPrevFrameState.presentationTime = presentationTime;
849 fPrevFrameState.destFileOffset = destFileOffset;
850 }
851
852 if (avcHack) fOurSink.addWord(frameSize);
853
854 // Write the data into the file:
855 fwrite(frameSource, 1, frameSize, fOurSink.fOutFid);
856
857 // If we have a hint track, then write to it also (only if we have a RTP stream):
858 if (hasHintTrack() && fOurSubsession.rtpSource() != NULL) {
859 // Because presentation times are used for RTP packet timestamps,
860 // we don't starting writing to the hint track until we've been synced:
861 if (!fHaveBeenSynced) {
862 fHaveBeenSynced = fOurSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP();
863 }
864 if (fHaveBeenSynced) {
865 fHintTrackForUs->useFrameForHinting(frameSize, presentationTime,
866 sampleNumberOfFrameStart);
867 }
868 }
869}
870
871void SubsessionIOState::useFrameForHinting(unsigned frameSize,
872 struct timeval presentationTime,
873 unsigned startSampleNumber) {
874 // At this point, we have a single, combined frame - not individual packets.
875 // For the hint track, we need to split the frame back up into separate packets.
876 // However, for some RTP sources, then we also need to reuse the special
877 // header bytes that were at the start of each of the RTP packets.
878 Boolean hack263 = strcmp(fOurSubsession.codecName(), "H263-1998") == 0;
879 Boolean hackm4a_generic = strcmp(fOurSubsession.mediumName(), "audio") == 0
880 && strcmp(fOurSubsession.codecName(), "MPEG4-GENERIC") == 0;
881 Boolean hackm4a_latm = strcmp(fOurSubsession.mediumName(), "audio") == 0
882 && strcmp(fOurSubsession.codecName(), "MP4A-LATM") == 0;
883 Boolean hackm4a = hackm4a_generic || hackm4a_latm;
884 Boolean haveSpecialHeaders = (hack263 || hackm4a_generic);
885
886 // If there has been a previous frame, then output a 'hint sample' for it.
887 // (We use the current frame's presentation time to compute the previous
888 // hint sample's duration.)
889 RTPSource* const rs = fOurSubsession.rtpSource(); // abbrev (ASSERT: != NULL)
890 struct timeval const& ppt = fPrevFrameState.presentationTime; //abbrev
891 if (ppt.tv_sec != 0 || ppt.tv_usec != 0) {
892 double duration = (presentationTime.tv_sec - ppt.tv_sec)
893 + (presentationTime.tv_usec - ppt.tv_usec)/1000000.0;
894 if (duration < 0.0) duration = 0.0;
895 unsigned msDuration = (unsigned)(duration*1000); // milliseconds
896 if (msDuration > fHINF.dmax) fHINF.dmax = msDuration;
897 unsigned hintSampleDuration
898 = (unsigned)((2*duration*fQTTimeScale+1)/2); // round
899 if (hackm4a) {
900 // Because multiple AAC frames can appear in a RTP packet, the presentation
901 // times of the second and subsequent frames will not be accurate.
902 // So, use the known "hintSampleDuration" instead:
903 hintSampleDuration = fTrackHintedByUs->fQTTimeUnitsPerSample;
904
905 // Also, if the 'time scale' was different from the RTP timestamp frequency,
906 // (as can happen with aacPlus), then we need to scale "hintSampleDuration"
907 // accordingly:
908 if (fTrackHintedByUs->fQTTimeScale != fOurSubsession.rtpTimestampFrequency()) {
909 unsigned const scalingFactor
910 = fOurSubsession.rtpTimestampFrequency()/fTrackHintedByUs->fQTTimeScale ;
911 hintSampleDuration *= scalingFactor;
912 }
913 }
914
915 int64_t const hintSampleDestFileOffset = TellFile64(fOurSink.fOutFid);
916
917 unsigned const maxPacketSize = 1450;
918 unsigned short numPTEntries
919 = (fPrevFrameState.frameSize + (maxPacketSize-1))/maxPacketSize; // normal case
920 unsigned char* immediateDataPtr = NULL;
921 unsigned immediateDataBytesRemaining = 0;
922 if (haveSpecialHeaders) { // special case
923 numPTEntries = fPrevFrameState.numSpecialHeaders;
924 immediateDataPtr = fPrevFrameState.specialHeaderBytes;
925 immediateDataBytesRemaining
926 = fPrevFrameState.specialHeaderBytesLength;
927 }
928 unsigned hintSampleSize
929 = fOurSink.addHalfWord(numPTEntries);// Entry count
930 hintSampleSize += fOurSink.addHalfWord(0x0000); // Reserved
931
932 unsigned offsetWithinSample = 0;
933 for (unsigned i = 0; i < numPTEntries; ++i) {
934 // Output a Packet Table entry (representing a single RTP packet):
935 unsigned short numDTEntries = 1;
936 unsigned short seqNum = fPrevFrameState.seqNum++;
937 // Note: This assumes that the input stream had no packets lost #####
938 unsigned rtpHeader = fPrevFrameState.rtpHeader;
939 if (i+1 < numPTEntries) {
940 // This is not the last RTP packet, so clear the marker bit:
941 rtpHeader &=~ (1<<23);
942 }
943 unsigned dataFrameSize = (i+1 < numPTEntries)
944 ? maxPacketSize : fPrevFrameState.frameSize - i*maxPacketSize; // normal case
945 unsigned sampleNumber = fPrevFrameState.startSampleNumber;
946
947 unsigned char immediateDataLen = 0;
948 if (haveSpecialHeaders) { // special case
949 ++numDTEntries; // to include a Data Table entry for the special hdr
950 if (immediateDataBytesRemaining > 0) {
951 if (hack263) {
952 immediateDataLen = *immediateDataPtr++;
953 --immediateDataBytesRemaining;
954 if (immediateDataLen > immediateDataBytesRemaining) {
955 // shouldn't happen (length byte was bad)
956 immediateDataLen = immediateDataBytesRemaining;
957 }
958 } else {
959 immediateDataLen = fPrevFrameState.specialHeaderBytesLength;
960 }
961 }
962 dataFrameSize = fPrevFrameState.packetSizes[i] - immediateDataLen;
963
964 if (hack263) {
965 Boolean PbitSet
966 = immediateDataLen >= 1 && (immediateDataPtr[0]&0x4) != 0;
967 if (PbitSet) {
968 offsetWithinSample += 2; // to omit the two leading 0 bytes
969 }
970 }
971 }
972
973 // Output the Packet Table:
974 hintSampleSize += fOurSink.addWord(0); // Relative transmission time
975 hintSampleSize += fOurSink.addWord(rtpHeader|seqNum);
976 // RTP header info + RTP sequence number
977 hintSampleSize += fOurSink.addHalfWord(0x0000); // Flags
978 hintSampleSize += fOurSink.addHalfWord(numDTEntries); // Entry count
979 unsigned totalPacketSize = 0;
980
981 // Output the Data Table:
982 if (haveSpecialHeaders) {
983 // use the "Immediate Data" format (1):
984 hintSampleSize += fOurSink.addByte(1); // Source
985 unsigned char len = immediateDataLen > 14 ? 14 : immediateDataLen;
986 hintSampleSize += fOurSink.addByte(len); // Length
987 totalPacketSize += len; fHINF.dimm += len;
988 unsigned char j;
989 for (j = 0; j < len; ++j) {
990 hintSampleSize += fOurSink.addByte(immediateDataPtr[j]); // Data
991 }
992 for (j = len; j < 14; ++j) {
993 hintSampleSize += fOurSink.addByte(0); // Data (padding)
994 }
995
996 immediateDataPtr += immediateDataLen;
997 immediateDataBytesRemaining -= immediateDataLen;
998 }
999 // use the "Sample Data" format (2):
1000 hintSampleSize += fOurSink.addByte(2); // Source
1001 hintSampleSize += fOurSink.addByte(0); // Track ref index
1002 hintSampleSize += fOurSink.addHalfWord(dataFrameSize); // Length
1003 totalPacketSize += dataFrameSize; fHINF.dmed += dataFrameSize;
1004 hintSampleSize += fOurSink.addWord(sampleNumber); // Sample number
1005 hintSampleSize += fOurSink.addWord(offsetWithinSample); // Offset
1006 // Get "bytes|samples per compression block" from the hinted track:
1007 unsigned short const bytesPerCompressionBlock
1008 = fTrackHintedByUs->fQTBytesPerFrame;
1009 unsigned short const samplesPerCompressionBlock
1010 = fTrackHintedByUs->fQTSamplesPerFrame;
1011 hintSampleSize += fOurSink.addHalfWord(bytesPerCompressionBlock);
1012 hintSampleSize += fOurSink.addHalfWord(samplesPerCompressionBlock);
1013
1014 offsetWithinSample += dataFrameSize;// for the next iteration (if any)
1015
1016 // Tally statistics for this packet:
1017 fHINF.nump += 1;
1018 fHINF.tpyl += totalPacketSize;
1019 totalPacketSize += 12; // add in the size of the RTP header
1020 fHINF.trpy += totalPacketSize;
1021 if (totalPacketSize > fHINF.pmax) fHINF.pmax = totalPacketSize;
1022 }
1023
1024 // Make note of this completed hint sample frame:
1025 fQTTotNumSamples += useFrame1(hintSampleSize, ppt, hintSampleDuration,
1026 hintSampleDestFileOffset);
1027 }
1028
1029 // Remember this frame for next time:
1030 fPrevFrameState.frameSize = frameSize;
1031 fPrevFrameState.presentationTime = presentationTime;
1032 fPrevFrameState.startSampleNumber = startSampleNumber;
1033 fPrevFrameState.rtpHeader
1034 = rs->curPacketMarkerBit()<<23
1035 | (rs->rtpPayloadFormat()&0x7F)<<16;
1036 if (hack263) {
1037 H263plusVideoRTPSource* rs_263 = (H263plusVideoRTPSource*)rs;
1038 fPrevFrameState.numSpecialHeaders = rs_263->fNumSpecialHeaders;
1039 fPrevFrameState.specialHeaderBytesLength = rs_263->fSpecialHeaderBytesLength;
1040 unsigned i;
1041 for (i = 0; i < rs_263->fSpecialHeaderBytesLength; ++i) {
1042 fPrevFrameState.specialHeaderBytes[i] = rs_263->fSpecialHeaderBytes[i];
1043 }
1044 for (i = 0; i < rs_263->fNumSpecialHeaders; ++i) {
1045 fPrevFrameState.packetSizes[i] = rs_263->fPacketSizes[i];
1046 }
1047 } else if (hackm4a_generic) {
1048 // Synthesize a special header, so that this frame can be in its own RTP packet.
1049 unsigned const sizeLength = fOurSubsession.attrVal_unsigned("sizelength");
1050 unsigned const indexLength = fOurSubsession.attrVal_unsigned("indexlength");
1051 if (sizeLength + indexLength != 16) {
1052 envir() << "Warning: unexpected 'sizeLength' " << sizeLength
1053 << " and 'indexLength' " << indexLength
1054 << "seen when creating hint track\n";
1055 }
1056 fPrevFrameState.numSpecialHeaders = 1;
1057 fPrevFrameState.specialHeaderBytesLength = 4;
1058 fPrevFrameState.specialHeaderBytes[0] = 0; // AU_headers_length (high byte)
1059 fPrevFrameState.specialHeaderBytes[1] = 16; // AU_headers_length (low byte)
1060 fPrevFrameState.specialHeaderBytes[2] = ((frameSize<<indexLength)&0xFF00)>>8;
1061 fPrevFrameState.specialHeaderBytes[3] = (frameSize<<indexLength);
1062 fPrevFrameState.packetSizes[0]
1063 = fPrevFrameState.specialHeaderBytesLength + frameSize;
1064 }
1065}
1066
1067unsigned SubsessionIOState::useFrame1(unsigned sourceDataSize,
1068 struct timeval presentationTime,
1069 unsigned frameDuration,
1070 int64_t destFileOffset) {
1071 // Figure out the actual frame size for this data:
1072 unsigned frameSize = fQTBytesPerFrame;
1073 if (frameSize == 0) {
1074 // The entire packet data is assumed to be a frame:
1075 frameSize = sourceDataSize;
1076 }
1077 unsigned const numFrames = sourceDataSize/frameSize;
1078 unsigned const numSamples = numFrames*fQTSamplesPerFrame;
1079
1080 // Record the information about which 'chunk' this data belongs to:
1081 ChunkDescriptor* newTailChunk;
1082 if (fTailChunk == NULL) {
1083 newTailChunk = fHeadChunk
1084 = new ChunkDescriptor(destFileOffset, sourceDataSize,
1085 frameSize, frameDuration, presentationTime);
1086 } else {
1087 newTailChunk = fTailChunk->extendChunk(destFileOffset, sourceDataSize,
1088 frameSize, frameDuration,
1089 presentationTime);
1090 }
1091 if (newTailChunk != fTailChunk) {
1092 // This data created a new chunk, rather than extending the old one
1093 ++fNumChunks;
1094 fTailChunk = newTailChunk;
1095 }
1096
1097 return numSamples;
1098}
1099
1100void SubsessionIOState::onSourceClosure() {
1101 fOurSourceIsActive = False;
1102 fOurSink.onSourceClosure1();
1103}
1104
1105Boolean SubsessionIOState::syncOK(struct timeval presentationTime) {
1106 QuickTimeFileSink& s = fOurSink; // abbreviation
1107 if (!s.fSyncStreams || fOurSubsession.rtpSource() == NULL) return True; // we don't care
1108
1109 if (s.fNumSyncedSubsessions < s.fNumSubsessions) {
1110 // Not all subsessions have yet been synced. Check whether ours was
1111 // one of the unsynced ones, and, if so, whether it is now synced:
1112 if (!fHaveBeenSynced) {
1113 // We weren't synchronized before
1114 if (fOurSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
1115 // H264 ?
1116 if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_avc1) {
1117 // special case: audio + H264 video: wait until audio is in sync
1118 if ((s.fNumSubsessions == 2) && (s.fNumSyncedSubsessions < (s.fNumSubsessions - 1))) return False;
1119
1120 // if audio is in sync, wait for the next IDR frame to start
1121 unsigned char* const frameSource = fBuffer->dataStart();
1122 if (*frameSource != H264_IDR_FRAME) return False;
1123 }
1124 // But now we are
1125 fHaveBeenSynced = True;
1126 fSyncTime = presentationTime;
1127 ++s.fNumSyncedSubsessions;
1128
1129 if (timevalGE(fSyncTime, s.fNewestSyncTime)) {
1130 s.fNewestSyncTime = fSyncTime;
1131 }
1132 }
1133 }
1134 }
1135
1136 // Check again whether all subsessions have been synced:
1137 if (s.fNumSyncedSubsessions < s.fNumSubsessions) return False;
1138
1139 // Allow this data if it is more recent than the newest sync time:
1140 return timevalGE(presentationTime, s.fNewestSyncTime);
1141}
1142
1143void SubsessionIOState::setHintTrack(SubsessionIOState* hintedTrack,
1144 SubsessionIOState* hintTrack) {
1145 if (hintedTrack != NULL) hintedTrack->fHintTrackForUs = hintTrack;
1146 if (hintTrack != NULL) hintTrack->fTrackHintedByUs = hintedTrack;
1147}
1148
1149SyncFrame::SyncFrame(unsigned frameNum)
1150 : nextSyncFrame(NULL), sfFrameNum(frameNum) {
1151}
1152
1153void Count64::operator+=(unsigned arg) {
1154 unsigned newLo = lo + arg;
1155 if (newLo < lo) { // lo has overflowed
1156 ++hi;
1157 }
1158 lo = newLo;
1159}
1160
1161ChunkDescriptor
1162::ChunkDescriptor(int64_t offsetInFile, unsigned size,
1163 unsigned frameSize, unsigned frameDuration,
1164 struct timeval presentationTime)
1165 : fNextChunk(NULL), fOffsetInFile(offsetInFile),
1166 fNumFrames(size/frameSize),
1167 fFrameSize(frameSize), fFrameDuration(frameDuration),
1168 fPresentationTime(presentationTime) {
1169}
1170
1171ChunkDescriptor* ChunkDescriptor
1172::extendChunk(int64_t newOffsetInFile, unsigned newSize,
1173 unsigned newFrameSize, unsigned newFrameDuration,
1174 struct timeval newPresentationTime) {
1175 // First, check whether the new space is just at the end of this
1176 // existing chunk:
1177 if (newOffsetInFile == fOffsetInFile + fNumFrames*fFrameSize) {
1178 // We can extend this existing chunk, provided that the frame size
1179 // and frame duration have not changed:
1180 if (newFrameSize == fFrameSize && newFrameDuration == fFrameDuration) {
1181 fNumFrames += newSize/fFrameSize;
1182 return this;
1183 }
1184 }
1185
1186 // We'll allocate a new ChunkDescriptor, and link it to the end of us:
1187 ChunkDescriptor* newDescriptor
1188 = new ChunkDescriptor(newOffsetInFile, newSize,
1189 newFrameSize, newFrameDuration,
1190 newPresentationTime);
1191
1192 fNextChunk = newDescriptor;
1193
1194 return newDescriptor;
1195}
1196
1197
1198////////// QuickTime-specific implementation //////////
1199
1200unsigned QuickTimeFileSink::addWord64(u_int64_t word) {
1201 addByte((unsigned char)(word>>56)); addByte((unsigned char)(word>>48));
1202 addByte((unsigned char)(word>>40)); addByte((unsigned char)(word>>32));
1203 addByte((unsigned char)(word>>24)); addByte((unsigned char)(word>>16));
1204 addByte((unsigned char)(word>>8)); addByte((unsigned char)(word));
1205
1206 return 8;
1207}
1208
1209unsigned QuickTimeFileSink::addWord(unsigned word) {
1210 addByte(word>>24); addByte(word>>16);
1211 addByte(word>>8); addByte(word);
1212
1213 return 4;
1214}
1215
1216unsigned QuickTimeFileSink::addHalfWord(unsigned short halfWord) {
1217 addByte((unsigned char)(halfWord>>8)); addByte((unsigned char)halfWord);
1218
1219 return 2;
1220}
1221
1222unsigned QuickTimeFileSink::addZeroWords(unsigned numWords) {
1223 for (unsigned i = 0; i < numWords; ++i) {
1224 addWord(0);
1225 }
1226
1227 return numWords*4;
1228}
1229
1230unsigned QuickTimeFileSink::add4ByteString(char const* str) {
1231 addByte(str[0]); addByte(str[1]); addByte(str[2]); addByte(str[3]);
1232
1233 return 4;
1234}
1235
1236unsigned QuickTimeFileSink::addArbitraryString(char const* str,
1237 Boolean oneByteLength) {
1238 unsigned size = 0;
1239 if (oneByteLength) {
1240 // Begin with a byte containing the string length:
1241 unsigned strLength = strlen(str);
1242 if (strLength >= 256) {
1243 envir() << "QuickTimeFileSink::addArbitraryString(\""
1244 << str << "\") saw string longer than we know how to handle ("
1245 << strLength << "\n";
1246 }
1247 size += addByte((unsigned char)strLength);
1248 }
1249
1250 while (*str != '\0') {
1251 size += addByte(*str++);
1252 }
1253
1254 return size;
1255}
1256
1257unsigned QuickTimeFileSink::addAtomHeader(char const* atomName) {
1258 // Output a placeholder for the 4-byte size:
1259 addWord(0);
1260
1261 // Output the 4-byte atom name:
1262 add4ByteString(atomName);
1263
1264 return 8;
1265}
1266
1267unsigned QuickTimeFileSink::addAtomHeader64(char const* atomName) {
1268 // Output 64Bit size marker
1269 addWord(1);
1270
1271 // Output the 4-byte atom name:
1272 add4ByteString(atomName);
1273
1274 addWord64(0);
1275
1276 return 16;
1277}
1278
1279void QuickTimeFileSink::setWord(int64_t filePosn, unsigned size) {
1280 do {
1281 if (SeekFile64(fOutFid, filePosn, SEEK_SET) < 0) break;
1282 addWord(size);
1283 if (SeekFile64(fOutFid, 0, SEEK_END) < 0) break; // go back to where we were
1284
1285 return;
1286 } while (0);
1287
1288 // One of the SeekFile64()s failed, probable because we're not a seekable file
1289 envir() << "QuickTimeFileSink::setWord(): SeekFile64 failed (err "
1290 << envir().getErrno() << ")\n";
1291}
1292
1293void QuickTimeFileSink::setWord64(int64_t filePosn, u_int64_t size) {
1294 do {
1295 if (SeekFile64(fOutFid, filePosn, SEEK_SET) < 0) break;
1296 addWord64(size);
1297 if (SeekFile64(fOutFid, 0, SEEK_END) < 0) break; // go back to where we were
1298
1299 return;
1300 } while (0);
1301
1302 // One of the SeekFile64()s failed, probable because we're not a seekable file
1303 envir() << "QuickTimeFileSink::setWord64(): SeekFile64 failed (err "
1304 << envir().getErrno() << ")\n";
1305}
1306
1307// Methods for writing particular atoms. Note the following macros:
1308
1309#define addAtom(name) \
1310 unsigned QuickTimeFileSink::addAtom_##name() { \
1311 int64_t initFilePosn = TellFile64(fOutFid); \
1312 unsigned size = addAtomHeader("" #name "")
1313
1314#define addAtomEnd \
1315 setWord(initFilePosn, size); \
1316 return size; \
1317}
1318
1319addAtom(ftyp);
1320 size += add4ByteString("mp42");
1321 size += addWord(0x00000000);
1322 size += add4ByteString("mp42");
1323 size += add4ByteString("isom");
1324addAtomEnd;
1325
1326addAtom(moov);
1327 size += addAtom_mvhd();
1328
1329 if (fGenerateMP4Format) {
1330 size += addAtom_iods();
1331 }
1332
1333 // Add a 'trak' atom for each subsession:
1334 // (For some unknown reason, QuickTime Player (5.0 at least)
1335 // doesn't display the movie correctly unless the audio track
1336 // (if present) appears before the video track. So ensure this here.)
1337 MediaSubsessionIterator iter(fInputSession);
1338 MediaSubsession* subsession;
1339 while ((subsession = iter.next()) != NULL) {
1340 fCurrentIOState = (SubsessionIOState*)(subsession->miscPtr);
1341 if (fCurrentIOState == NULL) continue;
1342 if (strcmp(subsession->mediumName(), "audio") != 0) continue;
1343
1344 size += addAtom_trak();
1345
1346 if (fCurrentIOState->hasHintTrack()) {
1347 // This track has a hint track; output it also:
1348 fCurrentIOState = fCurrentIOState->fHintTrackForUs;
1349 size += addAtom_trak();
1350 }
1351 }
1352 iter.reset();
1353 while ((subsession = iter.next()) != NULL) {
1354 fCurrentIOState = (SubsessionIOState*)(subsession->miscPtr);
1355 if (fCurrentIOState == NULL) continue;
1356 if (strcmp(subsession->mediumName(), "audio") == 0) continue;
1357
1358 size += addAtom_trak();
1359
1360 if (fCurrentIOState->hasHintTrack()) {
1361 // This track has a hint track; output it also:
1362 fCurrentIOState = fCurrentIOState->fHintTrackForUs;
1363 size += addAtom_trak();
1364 }
1365 }
1366addAtomEnd;
1367
1368addAtom(mvhd);
1369 size += addWord(0x00000000); // Version + Flags
1370 size += addWord(fAppleCreationTime); // Creation time
1371 size += addWord(fAppleCreationTime); // Modification time
1372
1373 // For the "Time scale" field, use the largest RTP timestamp frequency
1374 // that we saw in any of the subsessions.
1375 size += addWord(movieTimeScale()); // Time scale
1376
1377 unsigned const duration = fMaxTrackDurationM;
1378 fMVHD_durationPosn = TellFile64(fOutFid);
1379 size += addWord(duration); // Duration
1380
1381 size += addWord(0x00010000); // Preferred rate
1382 size += addWord(0x01000000); // Preferred volume + Reserved[0]
1383 size += addZeroWords(2); // Reserved[1-2]
1384 size += addWord(0x00010000); // matrix top left corner
1385 size += addZeroWords(3); // matrix
1386 size += addWord(0x00010000); // matrix center
1387 size += addZeroWords(3); // matrix
1388 size += addWord(0x40000000); // matrix bottom right corner
1389 size += addZeroWords(6); // various time fields
1390 size += addWord(SubsessionIOState::fCurrentTrackNumber+1);// Next track ID
1391addAtomEnd;
1392
1393addAtom(iods);
1394 size += addWord(0x00000000); // Version + Flags
1395 size += addWord(0x10808080);
1396 size += addWord(0x07004FFF);
1397 size += addWord(0xFF0FFFFF);
1398addAtomEnd;
1399
1400addAtom(trak);
1401 size += addAtom_tkhd();
1402
1403 // If we're synchronizing the media streams (or are a hint track),
1404 // add an edit list that helps do this:
1405 if (fCurrentIOState->fHeadChunk != NULL
1406 && (fSyncStreams || fCurrentIOState->isHintTrack())) {
1407 size += addAtom_edts();
1408 }
1409
1410 // If we're generating a hint track, add a 'tref' atom:
1411 if (fCurrentIOState->isHintTrack()) size += addAtom_tref();
1412
1413 size += addAtom_mdia();
1414
1415 // If we're generating a hint track, add a 'udta' atom:
1416 if (fCurrentIOState->isHintTrack()) size += addAtom_udta();
1417addAtomEnd;
1418
1419addAtom(tkhd);
1420 if (fCurrentIOState->fQTEnableTrack) {
1421 size += addWord(0x0000000F); // Version + Flags
1422 } else {
1423 // Disable this track in the movie:
1424 size += addWord(0x00000000); // Version + Flags
1425 }
1426 size += addWord(fAppleCreationTime); // Creation time
1427 size += addWord(fAppleCreationTime); // Modification time
1428 size += addWord(fCurrentIOState->fTrackID); // Track ID
1429 size += addWord(0x00000000); // Reserved
1430
1431 unsigned const duration = fCurrentIOState->fQTDurationM; // movie units
1432 fCurrentIOState->fTKHD_durationPosn = TellFile64(fOutFid);
1433 size += addWord(duration); // Duration
1434 size += addZeroWords(3); // Reserved+Layer+Alternate grp
1435 size += addWord(0x01000000); // Volume + Reserved
1436 size += addWord(0x00010000); // matrix top left corner
1437 size += addZeroWords(3); // matrix
1438 size += addWord(0x00010000); // matrix center
1439 size += addZeroWords(3); // matrix
1440 size += addWord(0x40000000); // matrix bottom right corner
1441 if (strcmp(fCurrentIOState->fOurSubsession.mediumName(), "video") == 0) {
1442 size += addWord(fMovieWidth<<16); // Track width
1443 size += addWord(fMovieHeight<<16); // Track height
1444 } else {
1445 size += addZeroWords(2); // not video: leave width and height fields zero
1446 }
1447addAtomEnd;
1448
1449addAtom(edts);
1450 size += addAtom_elst();
1451addAtomEnd;
1452
1453#define addEdit1(duration,trackPosition) do { \
1454 unsigned trackDuration \
1455 = (unsigned) ((2*(duration)*movieTimeScale()+1)/2); \
1456 /* in movie time units */ \
1457 size += addWord(trackDuration); /* Track duration */ \
1458 totalDurationOfEdits += trackDuration; \
1459 size += addWord(trackPosition); /* Media time */ \
1460 size += addWord(0x00010000); /* Media rate (1x) */ \
1461 ++numEdits; \
1462} while (0)
1463#define addEdit(duration) addEdit1((duration),editTrackPosition)
1464#define addEmptyEdit(duration) addEdit1((duration),(~0))
1465
1466addAtom(elst);
1467 size += addWord(0x00000000); // Version + Flags
1468
1469 // Add a dummy "Number of entries" field
1470 // (and remember its position). We'll fill this field in later:
1471 int64_t numEntriesPosition = TellFile64(fOutFid);
1472 size += addWord(0); // dummy for "Number of entries"
1473 unsigned numEdits = 0;
1474 unsigned totalDurationOfEdits = 0; // in movie time units
1475
1476 // Run through our chunks, looking at their presentation times.
1477 // From these, figure out the edits that need to be made to keep
1478 // the track media data in sync with the presentation times.
1479
1480 double const syncThreshold = 0.1; // 100 ms
1481 // don't allow the track to get out of sync by more than this
1482
1483 struct timeval editStartTime = fFirstDataTime;
1484 unsigned editTrackPosition = 0;
1485 unsigned currentTrackPosition = 0;
1486 double trackDurationOfEdit = 0.0;
1487 unsigned chunkDuration = 0;
1488
1489 ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
1490 while (chunk != NULL) {
1491 struct timeval const& chunkStartTime = chunk->fPresentationTime;
1492 double movieDurationOfEdit
1493 = (chunkStartTime.tv_sec - editStartTime.tv_sec)
1494 + (chunkStartTime.tv_usec - editStartTime.tv_usec)/1000000.0;
1495 trackDurationOfEdit = (currentTrackPosition-editTrackPosition)
1496 / (double)(fCurrentIOState->fQTTimeScale);
1497
1498 double outOfSync = movieDurationOfEdit - trackDurationOfEdit;
1499
1500 if (outOfSync > syncThreshold) {
1501 // The track's data is too short, so end this edit, add a new
1502 // 'empty' edit after it, and start a new edit
1503 // (at the current track posn.):
1504 if (trackDurationOfEdit > 0.0) addEdit(trackDurationOfEdit);
1505 addEmptyEdit(outOfSync);
1506
1507 editStartTime = chunkStartTime;
1508 editTrackPosition = currentTrackPosition;
1509 } else if (outOfSync < -syncThreshold) {
1510 // The track's data is too long, so end this edit, and start
1511 // a new edit (pointing at the current track posn.):
1512 if (movieDurationOfEdit > 0.0) addEdit(movieDurationOfEdit);
1513
1514 editStartTime = chunkStartTime;
1515 editTrackPosition = currentTrackPosition;
1516 }
1517
1518 // Note the duration of this chunk:
1519 unsigned numChannels = fCurrentIOState->fOurSubsession.numChannels();
1520 chunkDuration = chunk->fNumFrames*chunk->fFrameDuration/numChannels;
1521 currentTrackPosition += chunkDuration;
1522
1523 chunk = chunk->fNextChunk;
1524 }
1525
1526 // Write out the final edit
1527 trackDurationOfEdit
1528 += (double)chunkDuration/fCurrentIOState->fQTTimeScale;
1529 if (trackDurationOfEdit > 0.0) addEdit(trackDurationOfEdit);
1530
1531 // Now go back and fill in the "Number of entries" field:
1532 setWord(numEntriesPosition, numEdits);
1533
1534 // Also, if the sum of all of the edit durations exceeds the
1535 // track duration that we already computed (from sample durations),
1536 // then reset the track duration to this new value:
1537 if (totalDurationOfEdits > fCurrentIOState->fQTDurationM) {
1538 fCurrentIOState->fQTDurationM = totalDurationOfEdits;
1539 setWord(fCurrentIOState->fTKHD_durationPosn, totalDurationOfEdits);
1540
1541 // Also, check whether the overall movie duration needs to change:
1542 if (totalDurationOfEdits > fMaxTrackDurationM) {
1543 fMaxTrackDurationM = totalDurationOfEdits;
1544 setWord(fMVHD_durationPosn, totalDurationOfEdits);
1545 }
1546
1547 // Also, convert to track time scale:
1548 double scaleFactor
1549 = fCurrentIOState->fQTTimeScale/(double)movieTimeScale();
1550 fCurrentIOState->fQTDurationT
1551 = (unsigned)(totalDurationOfEdits*scaleFactor);
1552 }
1553addAtomEnd;
1554
1555addAtom(tref);
1556 size += addAtom_hint();
1557addAtomEnd;
1558
1559addAtom(hint);
1560 SubsessionIOState* hintedTrack = fCurrentIOState->fTrackHintedByUs;
1561 // Assert: hintedTrack != NULL
1562 size += addWord(hintedTrack->fTrackID);
1563addAtomEnd;
1564
1565addAtom(mdia);
1566 size += addAtom_mdhd();
1567 size += addAtom_hdlr();
1568 size += addAtom_minf();
1569addAtomEnd;
1570
1571addAtom(mdhd);
1572 size += addWord(0x00000000); // Version + Flags
1573 size += addWord(fAppleCreationTime); // Creation time
1574 size += addWord(fAppleCreationTime); // Modification time
1575
1576 unsigned const timeScale = fCurrentIOState->fQTTimeScale;
1577 size += addWord(timeScale); // Time scale
1578
1579 unsigned const duration = fCurrentIOState->fQTDurationT; // track units
1580 size += addWord(duration); // Duration
1581
1582 size += addWord(0x00000000); // Language+Quality
1583addAtomEnd;
1584
1585addAtom(hdlr);
1586 size += addWord(0x00000000); // Version + Flags
1587 size += add4ByteString("mhlr"); // Component type
1588 size += addWord(fCurrentIOState->fQTcomponentSubtype);
1589 // Component subtype
1590 size += add4ByteString("appl"); // Component manufacturer
1591 size += addWord(0x00000000); // Component flags
1592 size += addWord(0x00000000); // Component flags mask
1593 size += addArbitraryString(fCurrentIOState->fQTcomponentName);
1594 // Component name
1595addAtomEnd;
1596
1597addAtom(minf);
1598 SubsessionIOState::atomCreationFunc mediaInformationAtomCreator
1599 = fCurrentIOState->fQTMediaInformationAtomCreator;
1600 size += (this->*mediaInformationAtomCreator)();
1601 size += addAtom_hdlr2();
1602 size += addAtom_dinf();
1603 size += addAtom_stbl();
1604addAtomEnd;
1605
1606addAtom(smhd);
1607 size += addZeroWords(2); // Version+Flags+Balance+Reserved
1608addAtomEnd;
1609
1610addAtom(vmhd);
1611 size += addWord(0x00000001); // Version + Flags
1612 size += addWord(0x00408000); // Graphics mode + Opcolor[red]
1613 size += addWord(0x80008000); // Opcolor[green} + Opcolor[blue]
1614addAtomEnd;
1615
1616addAtom(gmhd);
1617 size += addAtom_gmin();
1618addAtomEnd;
1619
1620addAtom(gmin);
1621 size += addWord(0x00000000); // Version + Flags
1622 // The following fields probably aren't used for hint tracks, so just
1623 // use values that I've seen in other files:
1624 size += addWord(0x00408000); // Graphics mode + Opcolor (1st 2 bytes)
1625 size += addWord(0x80008000); // Opcolor (last 4 bytes)
1626 size += addWord(0x00000000); // Balance + Reserved
1627addAtomEnd;
1628
1629unsigned QuickTimeFileSink::addAtom_hdlr2() {
1630 int64_t initFilePosn = TellFile64(fOutFid);
1631 unsigned size = addAtomHeader("hdlr");
1632 size += addWord(0x00000000); // Version + Flags
1633 size += add4ByteString("dhlr"); // Component type
1634 size += add4ByteString("alis"); // Component subtype
1635 size += add4ByteString("appl"); // Component manufacturer
1636 size += addZeroWords(2); // Component flags+Component flags mask
1637 size += addArbitraryString("Apple Alias Data Handler"); // Component name
1638addAtomEnd;
1639
1640addAtom(dinf);
1641 size += addAtom_dref();
1642addAtomEnd;
1643
1644addAtom(dref);
1645 size += addWord(0x00000000); // Version + Flags
1646 size += addWord(0x00000001); // Number of entries
1647 size += addAtom_alis();
1648addAtomEnd;
1649
1650addAtom(alis);
1651 size += addWord(0x00000001); // Version + Flags
1652addAtomEnd;
1653
1654addAtom(stbl);
1655 size += addAtom_stsd();
1656 size += addAtom_stts();
1657 if (fCurrentIOState->fQTcomponentSubtype == fourChar('v','i','d','e')) {
1658 size += addAtom_stss(); // only for video streams
1659 }
1660 size += addAtom_stsc();
1661 size += addAtom_stsz();
1662 size += addAtom_co64();
1663addAtomEnd;
1664
1665addAtom(stsd);
1666 size += addWord(0x00000000); // Version+Flags
1667 size += addWord(0x00000001); // Number of entries
1668 SubsessionIOState::atomCreationFunc mediaDataAtomCreator
1669 = fCurrentIOState->fQTMediaDataAtomCreator;
1670 size += (this->*mediaDataAtomCreator)();
1671addAtomEnd;
1672
1673unsigned QuickTimeFileSink::addAtom_genericMedia() {
1674 int64_t initFilePosn = TellFile64(fOutFid);
1675
1676 // Our source is assumed to be a "QuickTimeGenericRTPSource"
1677 // Use its "sdAtom" state for our contents:
1678 QuickTimeGenericRTPSource* rtpSource = (QuickTimeGenericRTPSource*)
1679 fCurrentIOState->fOurSubsession.rtpSource();
1680 unsigned size = 0;
1681 if (rtpSource != NULL) {
1682 QuickTimeGenericRTPSource::QTState& qtState = rtpSource->qtState;
1683 char const* from = qtState.sdAtom;
1684 size = qtState.sdAtomSize;
1685 for (unsigned i = 0; i < size; ++i) addByte(from[i]);
1686 }
1687addAtomEnd;
1688
1689unsigned QuickTimeFileSink::addAtom_soundMediaGeneral() {
1690 int64_t initFilePosn = TellFile64(fOutFid);
1691 unsigned size = addAtomHeader(fCurrentIOState->fQTAudioDataType);
1692
1693// General sample description fields:
1694 size += addWord(0x00000000); // Reserved
1695 size += addWord(0x00000001); // Reserved+Data reference index
1696// Sound sample description fields:
1697 unsigned short const version = fCurrentIOState->fQTSoundSampleVersion;
1698 size += addWord(version<<16); // Version+Revision level
1699 size += addWord(0x00000000); // Vendor
1700 unsigned short numChannels
1701 = (unsigned short)(fCurrentIOState->fOurSubsession.numChannels());
1702 size += addHalfWord(numChannels); // Number of channels
1703 size += addHalfWord(0x0010); // Sample size
1704 // size += addWord(0x00000000); // Compression ID+Packet size
1705 size += addWord(0xfffe0000); // Compression ID+Packet size #####
1706
1707 unsigned const sampleRateFixedPoint = fCurrentIOState->fQTTimeScale << 16;
1708 size += addWord(sampleRateFixedPoint); // Sample rate
1709addAtomEnd;
1710
1711unsigned QuickTimeFileSink::addAtom_Qclp() {
1712 // The beginning of this atom looks just like a general Sound Media atom,
1713 // except with a version field of 1:
1714 int64_t initFilePosn = TellFile64(fOutFid);
1715 fCurrentIOState->fQTAudioDataType = "Qclp";
1716 fCurrentIOState->fQTSoundSampleVersion = 1;
1717 unsigned size = addAtom_soundMediaGeneral();
1718
1719 // Next, add the four fields that are particular to version 1:
1720 // (Later, parameterize these #####)
1721 size += addWord(0x000000a0); // samples per packet
1722 size += addWord(0x00000000); // ???
1723 size += addWord(0x00000000); // ???
1724 size += addWord(0x00000002); // bytes per sample (uncompressed)
1725
1726 // Other special fields are in a 'wave' atom that follows:
1727 size += addAtom_wave();
1728addAtomEnd;
1729
1730addAtom(wave);
1731 size += addAtom_frma();
1732 if (strcmp(fCurrentIOState->fQTAudioDataType, "Qclp") == 0) {
1733 size += addWord(0x00000014); // ???
1734 size += add4ByteString("Qclp"); // ???
1735 if (fCurrentIOState->fQTBytesPerFrame == 35) {
1736 size += addAtom_Fclp(); // full-rate QCELP
1737 } else {
1738 size += addAtom_Hclp(); // half-rate QCELP
1739 } // what about other QCELP 'rates'??? #####
1740 size += addWord(0x00000008); // ???
1741 size += addWord(0x00000000); // ???
1742 size += addWord(0x00000000); // ???
1743 size += addWord(0x00000008); // ???
1744 } else if (strcmp(fCurrentIOState->fQTAudioDataType, "mp4a") == 0) {
1745 size += addWord(0x0000000c); // ???
1746 size += add4ByteString("mp4a"); // ???
1747 size += addWord(0x00000000); // ???
1748 size += addAtom_esds(); // ESDescriptor
1749 size += addWord(0x00000008); // ???
1750 size += addWord(0x00000000); // ???
1751 }
1752addAtomEnd;
1753
1754addAtom(frma);
1755 size += add4ByteString(fCurrentIOState->fQTAudioDataType); // ???
1756addAtomEnd;
1757
1758addAtom(Fclp);
1759 size += addWord(0x00000000); // ???
1760addAtomEnd;
1761
1762addAtom(Hclp);
1763 size += addWord(0x00000000); // ???
1764addAtomEnd;
1765
1766unsigned QuickTimeFileSink::addAtom_mp4a() {
1767 unsigned size = 0;
1768 // The beginning of this atom looks just like a general Sound Media atom,
1769 // except with a version field of 1:
1770 int64_t initFilePosn = TellFile64(fOutFid);
1771 fCurrentIOState->fQTAudioDataType = "mp4a";
1772
1773 if (fGenerateMP4Format) {
1774 fCurrentIOState->fQTSoundSampleVersion = 0;
1775 size = addAtom_soundMediaGeneral();
1776 size += addAtom_esds();
1777 } else {
1778 fCurrentIOState->fQTSoundSampleVersion = 1;
1779 size = addAtom_soundMediaGeneral();
1780
1781 // Next, add the four fields that are particular to version 1:
1782 // (Later, parameterize these #####)
1783 size += addWord(fCurrentIOState->fQTTimeUnitsPerSample);
1784 size += addWord(0x00000001); // ???
1785 size += addWord(0x00000001); // ???
1786 size += addWord(0x00000002); // bytes per sample (uncompressed)
1787
1788 // Other special fields are in a 'wave' atom that follows:
1789 size += addAtom_wave();
1790 }
1791addAtomEnd;
1792
1793addAtom(esds);
1794 //#####
1795 MediaSubsession& subsession = fCurrentIOState->fOurSubsession;
1796 if (strcmp(subsession.mediumName(), "audio") == 0) {
1797 // MPEG-4 audio
1798 size += addWord(0x00000000); // ???
1799 size += addWord(0x03808080); // ???
1800 size += addWord(0x2a000000); // ???
1801 size += addWord(0x04808080); // ???
1802 size += addWord(0x1c401500); // ???
1803 size += addWord(0x18000000); // ???
1804 size += addWord(0x6d600000); // ???
1805 size += addWord(0x6d600580); // ???
1806 size += addByte(0x80); size += addByte(0x80); // ???
1807 } else if (strcmp(subsession.mediumName(), "video") == 0) {
1808 // MPEG-4 video
1809 size += addWord(0x00000000); // ???
1810 size += addWord(0x03330000); // ???
1811 size += addWord(0x1f042b20); // ???
1812 size += addWord(0x1104fd46); // ???
1813 size += addWord(0x000d4e10); // ???
1814 size += addWord(0x000d4e10); // ???
1815 size += addByte(0x05); // ???
1816 }
1817
1818 // Add the source's 'config' information:
1819 unsigned configSize;
1820 unsigned char* config
1821 = parseGeneralConfigStr(subsession.fmtp_config(), configSize);
1822 size += addByte(configSize);
1823 for (unsigned i = 0; i < configSize; ++i) {
1824 size += addByte(config[i]);
1825 }
1826 delete[] config;
1827
1828 if (strcmp(subsession.mediumName(), "audio") == 0) {
1829 // MPEG-4 audio
1830 size += addWord(0x06808080); // ???
1831 size += addHalfWord(0x0102); // ???
1832 } else {
1833 // MPEG-4 video
1834 size += addHalfWord(0x0601); // ???
1835 size += addByte(0x02); // ???
1836 }
1837 //#####
1838addAtomEnd;
1839
1840addAtom(srcq);
1841 //#####
1842 size += addWord(0x00000040); // ???
1843 //#####
1844addAtomEnd;
1845
1846addAtom(h263);
1847// General sample description fields:
1848 size += addWord(0x00000000); // Reserved
1849 size += addWord(0x00000001); // Reserved+Data reference index
1850// Video sample description fields:
1851 size += addWord(0x00020001); // Version+Revision level
1852 size += add4ByteString("appl"); // Vendor
1853 size += addWord(0x00000000); // Temporal quality
1854 size += addWord(0x000002fc); // Spatial quality
1855 unsigned const widthAndHeight = (fMovieWidth<<16)|fMovieHeight;
1856 size += addWord(widthAndHeight); // Width+height
1857 size += addWord(0x00480000); // Horizontal resolution
1858 size += addWord(0x00480000); // Vertical resolution
1859 size += addWord(0x00000000); // Data size
1860 size += addWord(0x00010548); // Frame count+Compressor name (start)
1861 // "H.263"
1862 size += addWord(0x2e323633); // Compressor name (continued)
1863 size += addZeroWords(6); // Compressor name (continued - zero)
1864 size += addWord(0x00000018); // Compressor name (final)+Depth
1865 size += addHalfWord(0xffff); // Color table id
1866addAtomEnd;
1867
1868addAtom(avc1);
1869// General sample description fields:
1870 size += addWord(0x00000000); // Reserved
1871 size += addWord(0x00000001); // Reserved+Data reference index
1872// Video sample description fields:
1873 size += addWord(0x00000000); // Version+Revision level
1874 size += add4ByteString("appl"); // Vendor
1875 size += addWord(0x00000000); // Temporal quality
1876 size += addWord(0x00000000); // Spatial quality
1877 unsigned const widthAndHeight = (fMovieWidth<<16)|fMovieHeight;
1878 size += addWord(widthAndHeight); // Width+height
1879 size += addWord(0x00480000); // Horizontal resolution
1880 size += addWord(0x00480000); // Vertical resolution
1881 size += addWord(0x00000000); // Data size
1882 size += addWord(0x00010548); // Frame count+Compressor name (start)
1883 // "H.264"
1884 size += addWord(0x2e323634); // Compressor name (continued)
1885 size += addZeroWords(6); // Compressor name (continued - zero)
1886 size += addWord(0x00000018); // Compressor name (final)+Depth
1887 size += addHalfWord(0xffff); // Color table id
1888 size += addAtom_avcC();
1889addAtomEnd;
1890
1891addAtom(avcC);
1892// Begin by Base-64 decoding the "sprop" parameter sets strings:
1893 char* psets = strDup(fCurrentIOState->fOurSubsession.fmtp_spropparametersets());
1894 if (psets == NULL) return 0;
1895
1896 size_t comma_pos = strcspn(psets, ",");
1897 psets[comma_pos] = '\0';
1898 char const* sps_b64 = psets;
1899 char const* pps_b64 = &psets[comma_pos+1];
1900 unsigned sps_count;
1901 unsigned char* sps_data = base64Decode(sps_b64, sps_count, false);
1902 unsigned pps_count;
1903 unsigned char* pps_data = base64Decode(pps_b64, pps_count, false);
1904
1905// Then add the decoded data:
1906 size += addByte(0x01); // configuration version
1907 size += addByte(sps_data[1]); // profile
1908 size += addByte(sps_data[2]); // profile compat
1909 size += addByte(sps_data[3]); // level
1910 size += addByte(0xff); /* 0b11111100 | lengthsize = 0x11 */
1911 size += addByte(0xe0 | (sps_count > 0 ? 1 : 0) );
1912 if (sps_count > 0) {
1913 size += addHalfWord(sps_count);
1914 for (unsigned i = 0; i < sps_count; i++) {
1915 size += addByte(sps_data[i]);
1916 }
1917 }
1918 size += addByte(pps_count > 0 ? 1 : 0);
1919 if (pps_count > 0) {
1920 size += addHalfWord(pps_count);
1921 for (unsigned i = 0; i < pps_count; i++) {
1922 size += addByte(pps_data[i]);
1923 }
1924 }
1925
1926// Finally, delete the data that we allocated:
1927 delete[] pps_data; delete[] sps_data;
1928 delete[] psets;
1929addAtomEnd;
1930
1931addAtom(mp4v);
1932// General sample description fields:
1933 size += addWord(0x00000000); // Reserved
1934 size += addWord(0x00000001); // Reserved+Data reference index
1935// Video sample description fields:
1936 size += addWord(0x00020001); // Version+Revision level
1937 size += add4ByteString("appl"); // Vendor
1938 size += addWord(0x00000200); // Temporal quality
1939 size += addWord(0x00000400); // Spatial quality
1940 unsigned const widthAndHeight = (fMovieWidth<<16)|fMovieHeight;
1941 size += addWord(widthAndHeight); // Width+height
1942 size += addWord(0x00480000); // Horizontal resolution
1943 size += addWord(0x00480000); // Vertical resolution
1944 size += addWord(0x00000000); // Data size
1945 size += addWord(0x00010c4d); // Frame count+Compressor name (start)
1946 // "MPEG-4 Video"
1947 size += addWord(0x5045472d); // Compressor name (continued)
1948 size += addWord(0x34205669); // Compressor name (continued)
1949 size += addWord(0x64656f00); // Compressor name (continued)
1950 size += addZeroWords(4); // Compressor name (continued - zero)
1951 size += addWord(0x00000018); // Compressor name (final)+Depth
1952 size += addHalfWord(0xffff); // Color table id
1953 size += addAtom_esds(); // ESDescriptor
1954 size += addWord(0x00000000); // ???
1955addAtomEnd;
1956
1957unsigned QuickTimeFileSink::addAtom_rtp() {
1958 int64_t initFilePosn = TellFile64(fOutFid);
1959 unsigned size = addAtomHeader("rtp ");
1960
1961 size += addWord(0x00000000); // Reserved (1st 4 bytes)
1962 size += addWord(0x00000001); // Reserved (last 2 bytes) + Data ref index
1963 size += addWord(0x00010001); // Hint track version + Last compat htv
1964 size += addWord(1450); // Max packet size
1965
1966 size += addAtom_tims();
1967addAtomEnd;
1968
1969addAtom(tims);
1970 size += addWord(fCurrentIOState->fOurSubsession.rtpTimestampFrequency());
1971addAtomEnd;
1972
1973addAtom(stts); // Time-to-Sample
1974 size += addWord(0x00000000); // Version+flags
1975
1976 // First, add a dummy "Number of entries" field
1977 // (and remember its position). We'll fill this field in later:
1978 int64_t numEntriesPosition = TellFile64(fOutFid);
1979 size += addWord(0); // dummy for "Number of entries"
1980
1981 // Then, run through the chunk descriptors, and enter the entries
1982 // in this (compressed) Time-to-Sample table:
1983 unsigned numEntries = 0, numSamplesSoFar = 0;
1984 unsigned prevSampleDuration = 0;
1985 unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame;
1986 ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
1987 while (chunk != NULL) {
1988 unsigned const sampleDuration = chunk->fFrameDuration/samplesPerFrame;
1989 if (sampleDuration != prevSampleDuration) {
1990 // This chunk will start a new table entry,
1991 // so write out the old one (if any):
1992 if (chunk != fCurrentIOState->fHeadChunk) {
1993 ++numEntries;
1994 size += addWord(numSamplesSoFar); // Sample count
1995 size += addWord(prevSampleDuration); // Sample duration
1996 numSamplesSoFar = 0;
1997 }
1998 }
1999
2000 unsigned const numSamples = chunk->fNumFrames*samplesPerFrame;
2001 numSamplesSoFar += numSamples;
2002 prevSampleDuration = sampleDuration;
2003 chunk = chunk->fNextChunk;
2004 }
2005
2006 // Then, write out the last entry:
2007 ++numEntries;
2008 size += addWord(numSamplesSoFar); // Sample count
2009 size += addWord(prevSampleDuration); // Sample duration
2010
2011 // Now go back and fill in the "Number of entries" field:
2012 setWord(numEntriesPosition, numEntries);
2013addAtomEnd;
2014
2015addAtom(stss); // Sync-Sample
2016 size += addWord(0x00000000); // Version+flags
2017
2018 // First, add a dummy "Number of entries" field
2019 // (and remember its position). We'll fill this field in later:
2020 int64_t numEntriesPosition = TellFile64(fOutFid);
2021 size += addWord(0); // dummy for "Number of entries"
2022
2023 unsigned numEntries = 0, numSamplesSoFar = 0;
2024 if (fCurrentIOState->fHeadSyncFrame != NULL) {
2025 SyncFrame* currentSyncFrame = fCurrentIOState->fHeadSyncFrame;
2026
2027 // First, count the number of frames (to use as a sanity check; see below):
2028 unsigned totNumFrames = 0;
2029 for (ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk; chunk != NULL; chunk = chunk->fNextChunk) totNumFrames += chunk->fNumFrames;
2030
2031 while (currentSyncFrame != NULL) {
2032 if (currentSyncFrame->sfFrameNum >= totNumFrames) break; // sanity check
2033
2034 ++numEntries;
2035 size += addWord(currentSyncFrame->sfFrameNum);
2036 currentSyncFrame = currentSyncFrame->nextSyncFrame;
2037 }
2038 } else {
2039 // First, run through the chunk descriptors, counting up the total number of samples:
2040 unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame;
2041 ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
2042 while (chunk != NULL) {
2043 unsigned const numSamples = chunk->fNumFrames*samplesPerFrame;
2044 numSamplesSoFar += numSamples;
2045 chunk = chunk->fNextChunk;
2046 }
2047
2048 // Then, write out the sample numbers that we deem correspond to 'sync samples':
2049 unsigned i;
2050 for (i = 0; i < numSamplesSoFar; i += 12) {
2051 // For an explanation of the constant "12", see http://lists.live555.com/pipermail/live-devel/2009-July/010969.html
2052 // (Perhaps we should really try to keep track of which 'samples' ('frames' for video) really are 'key frames'?)
2053 size += addWord(i+1);
2054 ++numEntries;
2055 }
2056
2057 // Then, write out the last entry (if we haven't already done so):
2058 if (i != (numSamplesSoFar - 1)) {
2059 size += addWord(numSamplesSoFar);
2060 ++numEntries;
2061 }
2062 }
2063
2064 // Now go back and fill in the "Number of entries" field:
2065 setWord(numEntriesPosition, numEntries);
2066addAtomEnd;
2067
2068addAtom(stsc); // Sample-to-Chunk
2069 size += addWord(0x00000000); // Version+flags
2070
2071 // First, add a dummy "Number of entries" field
2072 // (and remember its position). We'll fill this field in later:
2073 int64_t numEntriesPosition = TellFile64(fOutFid);
2074 size += addWord(0); // dummy for "Number of entries"
2075
2076 // Then, run through the chunk descriptors, and enter the entries
2077 // in this (compressed) Sample-to-Chunk table:
2078 unsigned numEntries = 0, chunkNumber = 0;
2079 unsigned prevSamplesPerChunk = ~0;
2080 unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame;
2081 ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
2082 while (chunk != NULL) {
2083 ++chunkNumber;
2084 unsigned const samplesPerChunk = chunk->fNumFrames*samplesPerFrame;
2085 if (samplesPerChunk != prevSamplesPerChunk) {
2086 // This chunk will be a new table entry:
2087 ++numEntries;
2088 size += addWord(chunkNumber); // Chunk number
2089 size += addWord(samplesPerChunk); // Samples per chunk
2090 size += addWord(0x00000001); // Sample description ID
2091
2092 prevSamplesPerChunk = samplesPerChunk;
2093 }
2094 chunk = chunk->fNextChunk;
2095 }
2096
2097 // Now go back and fill in the "Number of entries" field:
2098 setWord(numEntriesPosition, numEntries);
2099addAtomEnd;
2100
2101addAtom(stsz); // Sample Size
2102 size += addWord(0x00000000); // Version+flags
2103
2104 // Begin by checking whether our chunks all have the same
2105 // 'bytes-per-sample'. This determines whether this atom's table
2106 // has just a single entry, or multiple entries.
2107 Boolean haveSingleEntryTable = True;
2108 double firstBPS = 0.0;
2109 ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
2110 while (chunk != NULL) {
2111 double bps
2112 = (double)(chunk->fFrameSize)/(fCurrentIOState->fQTSamplesPerFrame);
2113 if (bps < 1.0) {
2114 // I don't think a multiple-entry table would make sense in
2115 // this case, so assume a single entry table ??? #####
2116 break;
2117 }
2118
2119 if (firstBPS == 0.0) {
2120 firstBPS = bps;
2121 } else if (bps != firstBPS) {
2122 haveSingleEntryTable = False;
2123 break;
2124 }
2125
2126 chunk = chunk->fNextChunk;
2127 }
2128
2129 unsigned sampleSize;
2130 if (haveSingleEntryTable) {
2131 if (fCurrentIOState->isHintTrack()
2132 && fCurrentIOState->fHeadChunk != NULL) {
2133 sampleSize = fCurrentIOState->fHeadChunk->fFrameSize
2134 / fCurrentIOState->fQTSamplesPerFrame;
2135 } else {
2136 // The following doesn't seem right, but seems to do the right thing:
2137 sampleSize = fCurrentIOState->fQTTimeUnitsPerSample; //???
2138 }
2139 } else {
2140 sampleSize = 0; // indicates a multiple-entry table
2141 }
2142 size += addWord(sampleSize); // Sample size
2143 unsigned const totNumSamples = fCurrentIOState->fQTTotNumSamples;
2144 size += addWord(totNumSamples); // Number of entries
2145
2146 if (!haveSingleEntryTable) {
2147 // Multiple-entry table:
2148 // Run through the chunk descriptors, entering the sample sizes:
2149 ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
2150 while (chunk != NULL) {
2151 unsigned numSamples
2152 = chunk->fNumFrames*(fCurrentIOState->fQTSamplesPerFrame);
2153 unsigned sampleSize
2154 = chunk->fFrameSize/(fCurrentIOState->fQTSamplesPerFrame);
2155 for (unsigned i = 0; i < numSamples; ++i) {
2156 size += addWord(sampleSize);
2157 }
2158
2159 chunk = chunk->fNextChunk;
2160 }
2161 }
2162addAtomEnd;
2163
2164addAtom(co64); // Chunk Offset
2165 size += addWord(0x00000000); // Version+flags
2166 size += addWord(fCurrentIOState->fNumChunks); // Number of entries
2167
2168 // Run through the chunk descriptors, entering the file offsets:
2169 ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
2170 while (chunk != NULL) {
2171 size += addWord64(chunk->fOffsetInFile);
2172
2173 chunk = chunk->fNextChunk;
2174 }
2175addAtomEnd;
2176
2177addAtom(udta);
2178 size += addAtom_name();
2179 size += addAtom_hnti();
2180 size += addAtom_hinf();
2181addAtomEnd;
2182
2183addAtom(name);
2184 char description[100];
2185 sprintf(description, "Hinted %s track",
2186 fCurrentIOState->fOurSubsession.mediumName());
2187 size += addArbitraryString(description, False); // name of object
2188addAtomEnd;
2189
2190addAtom(hnti);
2191 size += addAtom_sdp();
2192addAtomEnd;
2193
2194unsigned QuickTimeFileSink::addAtom_sdp() {
2195 int64_t initFilePosn = TellFile64(fOutFid);
2196 unsigned size = addAtomHeader("sdp ");
2197
2198 // Add this subsession's SDP lines:
2199 char const* sdpLines = fCurrentIOState->fOurSubsession.savedSDPLines();
2200 // We need to change any "a=control:trackID=" values to be this
2201 // track's actual track id:
2202 char* newSDPLines = new char[strlen(sdpLines)+100/*overkill*/];
2203 char const* searchStr = "a=control:trackid=";
2204 Boolean foundSearchString = False;
2205 char const *p1, *p2, *p3;
2206 for (p1 = sdpLines; *p1 != '\0'; ++p1) {
2207 for (p2 = p1,p3 = searchStr; tolower(*p2) == *p3; ++p2,++p3) {}
2208 if (*p3 == '\0') {
2209 // We found the end of the search string, at p2.
2210 int beforeTrackNumPosn = p2-sdpLines;
2211 // Look for the subsequent track number, and skip over it:
2212 int trackNumLength;
2213 if (sscanf(p2, " %*d%n", &trackNumLength) < 0) break;
2214 int afterTrackNumPosn = beforeTrackNumPosn + trackNumLength;
2215
2216 // Replace the old track number with the correct one:
2217 int i;
2218 for (i = 0; i < beforeTrackNumPosn; ++i) newSDPLines[i] = sdpLines[i];
2219 sprintf(&newSDPLines[i], "%d", fCurrentIOState->fTrackID);
2220 i = afterTrackNumPosn;
2221 int j = i + strlen(&newSDPLines[i]);
2222 while (1) {
2223 if ((newSDPLines[j] = sdpLines[i]) == '\0') break;
2224 ++i; ++j;
2225 }
2226
2227 foundSearchString = True;
2228 break;
2229 }
2230 }
2231
2232 if (!foundSearchString) {
2233 // Because we didn't find a "a=control:trackID=<trackId>" line,
2234 // add one of our own:
2235 sprintf(newSDPLines, "%s%s%d\r\n",
2236 sdpLines, searchStr, fCurrentIOState->fTrackID);
2237 }
2238
2239 size += addArbitraryString(newSDPLines, False);
2240 delete[] newSDPLines;
2241addAtomEnd;
2242
2243addAtom(hinf);
2244 size += addAtom_totl();
2245 size += addAtom_npck();
2246 size += addAtom_tpay();
2247 size += addAtom_trpy();
2248 size += addAtom_nump();
2249 size += addAtom_tpyl();
2250 // Is 'maxr' required? #####
2251 size += addAtom_dmed();
2252 size += addAtom_dimm();
2253 size += addAtom_drep();
2254 size += addAtom_tmin();
2255 size += addAtom_tmax();
2256 size += addAtom_pmax();
2257 size += addAtom_dmax();
2258 size += addAtom_payt();
2259addAtomEnd;
2260
2261addAtom(totl);
2262 size += addWord(fCurrentIOState->fHINF.trpy.lo);
2263addAtomEnd;
2264
2265addAtom(npck);
2266 size += addWord(fCurrentIOState->fHINF.nump.lo);
2267addAtomEnd;
2268
2269addAtom(tpay);
2270 size += addWord(fCurrentIOState->fHINF.tpyl.lo);
2271addAtomEnd;
2272
2273addAtom(trpy);
2274 size += addWord(fCurrentIOState->fHINF.trpy.hi);
2275 size += addWord(fCurrentIOState->fHINF.trpy.lo);
2276addAtomEnd;
2277
2278addAtom(nump);
2279 size += addWord(fCurrentIOState->fHINF.nump.hi);
2280 size += addWord(fCurrentIOState->fHINF.nump.lo);
2281addAtomEnd;
2282
2283addAtom(tpyl);
2284 size += addWord(fCurrentIOState->fHINF.tpyl.hi);
2285 size += addWord(fCurrentIOState->fHINF.tpyl.lo);
2286addAtomEnd;
2287
2288addAtom(dmed);
2289 size += addWord(fCurrentIOState->fHINF.dmed.hi);
2290 size += addWord(fCurrentIOState->fHINF.dmed.lo);
2291addAtomEnd;
2292
2293addAtom(dimm);
2294 size += addWord(fCurrentIOState->fHINF.dimm.hi);
2295 size += addWord(fCurrentIOState->fHINF.dimm.lo);
2296addAtomEnd;
2297
2298addAtom(drep);
2299 size += addWord(0);
2300 size += addWord(0);
2301addAtomEnd;
2302
2303addAtom(tmin);
2304 size += addWord(0);
2305addAtomEnd;
2306
2307addAtom(tmax);
2308 size += addWord(0);
2309addAtomEnd;
2310
2311addAtom(pmax);
2312 size += addWord(fCurrentIOState->fHINF.pmax);
2313addAtomEnd;
2314
2315addAtom(dmax);
2316 size += addWord(fCurrentIOState->fHINF.dmax);
2317addAtomEnd;
2318
2319addAtom(payt);
2320 MediaSubsession& ourSubsession = fCurrentIOState->fOurSubsession;
2321 RTPSource* rtpSource = ourSubsession.rtpSource();
2322 if (rtpSource != NULL) {
2323 size += addWord(rtpSource->rtpPayloadFormat());
2324
2325 // Also, add a 'rtpmap' string: <mime-subtype>/<rtp-frequency>
2326 unsigned rtpmapStringLength = strlen(ourSubsession.codecName()) + 20;
2327 char* rtpmapString = new char[rtpmapStringLength];
2328 sprintf(rtpmapString, "%s/%d",
2329 ourSubsession.codecName(), rtpSource->timestampFrequency());
2330 size += addArbitraryString(rtpmapString);
2331 delete[] rtpmapString;
2332 }
2333addAtomEnd;
2334
2335// A dummy atom (with name "????"):
2336unsigned QuickTimeFileSink::addAtom_dummy() {
2337 int64_t initFilePosn = TellFile64(fOutFid);
2338 unsigned size = addAtomHeader("????");
2339addAtomEnd;
2340