| 1 | /********** |
| 2 | This library is free software; you can redistribute it and/or modify it under |
| 3 | the terms of the GNU Lesser General Public License as published by the |
| 4 | Free Software Foundation; either version 3 of the License, or (at your |
| 5 | option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.) |
| 6 | |
| 7 | This library is distributed in the hope that it will be useful, but WITHOUT |
| 8 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
| 9 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for |
| 10 | more details. |
| 11 | |
| 12 | You should have received a copy of the GNU Lesser General Public License |
| 13 | along with this library; if not, write to the Free Software Foundation, Inc., |
| 14 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 15 | **********/ |
| 16 | // "liveMedia" |
| 17 | // Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved. |
| 18 | // A sink that generates an AVI file from a composite media session |
| 19 | // Implementation |
| 20 | |
| 21 | #include "AVIFileSink.hh" |
| 22 | #include "InputFile.hh" |
| 23 | #include "OutputFile.hh" |
| 24 | #include "GroupsockHelper.hh" |
| 25 | |
| 26 | #define fourChar(x,y,z,w) ( ((w)<<24)|((z)<<16)|((y)<<8)|(x) )/*little-endian*/ |
| 27 | |
| 28 | #define AVIIF_LIST 0x00000001 |
| 29 | #define AVIIF_KEYFRAME 0x00000010 |
| 30 | #define AVIIF_NO_TIME 0x00000100 |
| 31 | #define AVIIF_COMPRESSOR 0x0FFF0000 |
| 32 | |
| 33 | ////////// AVISubsessionIOState /////////// |
| 34 | // A structure used to represent the I/O state of each input 'subsession': |
| 35 | |
| 36 | class SubsessionBuffer { |
| 37 | public: |
| 38 | SubsessionBuffer(unsigned bufferSize) |
| 39 | : fBufferSize(bufferSize) { |
| 40 | reset(); |
| 41 | fData = new unsigned char[bufferSize]; |
| 42 | } |
| 43 | virtual ~SubsessionBuffer() { delete[] fData; } |
| 44 | void reset() { fBytesInUse = 0; } |
| 45 | void addBytes(unsigned numBytes) { fBytesInUse += numBytes; } |
| 46 | |
| 47 | unsigned char* dataStart() { return &fData[0]; } |
| 48 | unsigned char* dataEnd() { return &fData[fBytesInUse]; } |
| 49 | unsigned bytesInUse() const { return fBytesInUse; } |
| 50 | unsigned bytesAvailable() const { return fBufferSize - fBytesInUse; } |
| 51 | |
| 52 | void setPresentationTime(struct timeval const& presentationTime) { |
| 53 | fPresentationTime = presentationTime; |
| 54 | } |
| 55 | struct timeval const& presentationTime() const {return fPresentationTime;} |
| 56 | |
| 57 | private: |
| 58 | unsigned fBufferSize; |
| 59 | struct timeval fPresentationTime; |
| 60 | unsigned char* fData; |
| 61 | unsigned fBytesInUse; |
| 62 | }; |
| 63 | |
| 64 | class AVISubsessionIOState { |
| 65 | public: |
| 66 | AVISubsessionIOState(AVIFileSink& sink, MediaSubsession& subsession); |
| 67 | virtual ~AVISubsessionIOState(); |
| 68 | |
| 69 | void setAVIstate(unsigned subsessionIndex); |
| 70 | void setFinalAVIstate(); |
| 71 | |
| 72 | void afterGettingFrame(unsigned packetDataSize, |
| 73 | struct timeval presentationTime); |
| 74 | void onSourceClosure(); |
| 75 | |
| 76 | UsageEnvironment& envir() const { return fOurSink.envir(); } |
| 77 | |
| 78 | public: |
| 79 | SubsessionBuffer *fBuffer, *fPrevBuffer; |
| 80 | AVIFileSink& fOurSink; |
| 81 | MediaSubsession& fOurSubsession; |
| 82 | |
| 83 | unsigned short fLastPacketRTPSeqNum; |
| 84 | Boolean fOurSourceIsActive; |
| 85 | struct timeval fPrevPresentationTime; |
| 86 | unsigned fMaxBytesPerSecond; |
| 87 | Boolean fIsVideo, fIsAudio, fIsByteSwappedAudio; |
| 88 | unsigned fAVISubsessionTag; |
| 89 | unsigned fAVICodecHandlerType; |
| 90 | unsigned fAVISamplingFrequency; // for audio |
| 91 | u_int16_t fWAVCodecTag; // for audio |
| 92 | unsigned fAVIScale; |
| 93 | unsigned fAVIRate; |
| 94 | unsigned fAVISize; |
| 95 | unsigned fNumFrames; |
| 96 | unsigned fSTRHFrameCountPosition; |
| 97 | |
| 98 | private: |
| 99 | void useFrame(SubsessionBuffer& buffer); |
| 100 | }; |
| 101 | |
| 102 | |
| 103 | ///////// AVIIndexRecord definition & implementation ////////// |
| 104 | |
| 105 | class AVIIndexRecord { |
| 106 | public: |
| 107 | AVIIndexRecord(unsigned chunkId, unsigned flags, unsigned offset, unsigned size) |
| 108 | : fNext(NULL), fChunkId(chunkId), fFlags(flags), fOffset(offset), fSize(size) { |
| 109 | } |
| 110 | |
| 111 | AVIIndexRecord*& next() { return fNext; } |
| 112 | unsigned chunkId() const { return fChunkId; } |
| 113 | unsigned flags() const { return fFlags; } |
| 114 | unsigned offset() const { return fOffset; } |
| 115 | unsigned size() const { return fSize; } |
| 116 | |
| 117 | private: |
| 118 | AVIIndexRecord* fNext; |
| 119 | unsigned fChunkId; |
| 120 | unsigned fFlags; |
| 121 | unsigned fOffset; |
| 122 | unsigned fSize; |
| 123 | }; |
| 124 | |
| 125 | |
| 126 | ////////// AVIFileSink implementation ////////// |
| 127 | |
| 128 | AVIFileSink::AVIFileSink(UsageEnvironment& env, |
| 129 | MediaSession& inputSession, |
| 130 | char const* outputFileName, |
| 131 | unsigned bufferSize, |
| 132 | unsigned short movieWidth, unsigned short movieHeight, |
| 133 | unsigned movieFPS, Boolean packetLossCompensate) |
| 134 | : Medium(env), fInputSession(inputSession), |
| 135 | fIndexRecordsHead(NULL), fIndexRecordsTail(NULL), fNumIndexRecords(0), |
| 136 | fBufferSize(bufferSize), fPacketLossCompensate(packetLossCompensate), |
| 137 | fAreCurrentlyBeingPlayed(False), fNumSubsessions(0), fNumBytesWritten(0), |
| 138 | fHaveCompletedOutputFile(False), |
| 139 | fMovieWidth(movieWidth), fMovieHeight(movieHeight), fMovieFPS(movieFPS) { |
| 140 | fOutFid = OpenOutputFile(env, outputFileName); |
| 141 | if (fOutFid == NULL) return; |
| 142 | |
| 143 | // Set up I/O state for each input subsession: |
| 144 | MediaSubsessionIterator iter(fInputSession); |
| 145 | MediaSubsession* subsession; |
| 146 | while ((subsession = iter.next()) != NULL) { |
| 147 | // Ignore subsessions without a data source: |
| 148 | FramedSource* subsessionSource = subsession->readSource(); |
| 149 | if (subsessionSource == NULL) continue; |
| 150 | |
| 151 | // If "subsession's" SDP description specified screen dimension |
| 152 | // or frame rate parameters, then use these. |
| 153 | if (subsession->videoWidth() != 0) { |
| 154 | fMovieWidth = subsession->videoWidth(); |
| 155 | } |
| 156 | if (subsession->videoHeight() != 0) { |
| 157 | fMovieHeight = subsession->videoHeight(); |
| 158 | } |
| 159 | if (subsession->videoFPS() != 0) { |
| 160 | fMovieFPS = subsession->videoFPS(); |
| 161 | } |
| 162 | |
| 163 | AVISubsessionIOState* ioState |
| 164 | = new AVISubsessionIOState(*this, *subsession); |
| 165 | subsession->miscPtr = (void*)ioState; |
| 166 | |
| 167 | // Also set a 'BYE' handler for this subsession's RTCP instance: |
| 168 | if (subsession->rtcpInstance() != NULL) { |
| 169 | subsession->rtcpInstance()->setByeHandler(onRTCPBye, ioState); |
| 170 | } |
| 171 | |
| 172 | ++fNumSubsessions; |
| 173 | } |
| 174 | |
| 175 | // Begin by writing an AVI header: |
| 176 | addFileHeader_AVI(); |
| 177 | } |
| 178 | |
| 179 | AVIFileSink::~AVIFileSink() { |
| 180 | completeOutputFile(); |
| 181 | |
| 182 | // Then, stop streaming and delete each active "AVISubsessionIOState": |
| 183 | MediaSubsessionIterator iter(fInputSession); |
| 184 | MediaSubsession* subsession; |
| 185 | while ((subsession = iter.next()) != NULL) { |
| 186 | if (subsession->readSource() != NULL) subsession->readSource()->stopGettingFrames(); |
| 187 | |
| 188 | AVISubsessionIOState* ioState |
| 189 | = (AVISubsessionIOState*)(subsession->miscPtr); |
| 190 | if (ioState == NULL) continue; |
| 191 | |
| 192 | delete ioState; |
| 193 | } |
| 194 | |
| 195 | // Then, delete the index records: |
| 196 | AVIIndexRecord* cur = fIndexRecordsHead; |
| 197 | while (cur != NULL) { |
| 198 | AVIIndexRecord* next = cur->next(); |
| 199 | delete cur; |
| 200 | cur = next; |
| 201 | } |
| 202 | |
| 203 | // Finally, close our output file: |
| 204 | CloseOutputFile(fOutFid); |
| 205 | } |
| 206 | |
| 207 | AVIFileSink* AVIFileSink |
| 208 | ::createNew(UsageEnvironment& env, MediaSession& inputSession, |
| 209 | char const* outputFileName, |
| 210 | unsigned bufferSize, |
| 211 | unsigned short movieWidth, unsigned short movieHeight, |
| 212 | unsigned movieFPS, Boolean packetLossCompensate) { |
| 213 | AVIFileSink* newSink = |
| 214 | new AVIFileSink(env, inputSession, outputFileName, bufferSize, |
| 215 | movieWidth, movieHeight, movieFPS, packetLossCompensate); |
| 216 | if (newSink == NULL || newSink->fOutFid == NULL) { |
| 217 | Medium::close(newSink); |
| 218 | return NULL; |
| 219 | } |
| 220 | |
| 221 | return newSink; |
| 222 | } |
| 223 | |
| 224 | Boolean AVIFileSink::startPlaying(afterPlayingFunc* afterFunc, |
| 225 | void* afterClientData) { |
| 226 | // Make sure we're not already being played: |
| 227 | if (fAreCurrentlyBeingPlayed) { |
| 228 | envir().setResultMsg("This sink has already been played" ); |
| 229 | return False; |
| 230 | } |
| 231 | |
| 232 | fAreCurrentlyBeingPlayed = True; |
| 233 | fAfterFunc = afterFunc; |
| 234 | fAfterClientData = afterClientData; |
| 235 | |
| 236 | return continuePlaying(); |
| 237 | } |
| 238 | |
| 239 | Boolean AVIFileSink::continuePlaying() { |
| 240 | // Run through each of our input session's 'subsessions', |
| 241 | // asking for a frame from each one: |
| 242 | Boolean haveActiveSubsessions = False; |
| 243 | MediaSubsessionIterator iter(fInputSession); |
| 244 | MediaSubsession* subsession; |
| 245 | while ((subsession = iter.next()) != NULL) { |
| 246 | FramedSource* subsessionSource = subsession->readSource(); |
| 247 | if (subsessionSource == NULL) continue; |
| 248 | |
| 249 | if (subsessionSource->isCurrentlyAwaitingData()) continue; |
| 250 | |
| 251 | AVISubsessionIOState* ioState |
| 252 | = (AVISubsessionIOState*)(subsession->miscPtr); |
| 253 | if (ioState == NULL) continue; |
| 254 | |
| 255 | haveActiveSubsessions = True; |
| 256 | unsigned char* toPtr = ioState->fBuffer->dataEnd(); |
| 257 | unsigned toSize = ioState->fBuffer->bytesAvailable(); |
| 258 | subsessionSource->getNextFrame(toPtr, toSize, |
| 259 | afterGettingFrame, ioState, |
| 260 | onSourceClosure, ioState); |
| 261 | } |
| 262 | if (!haveActiveSubsessions) { |
| 263 | envir().setResultMsg("No subsessions are currently active" ); |
| 264 | return False; |
| 265 | } |
| 266 | |
| 267 | return True; |
| 268 | } |
| 269 | |
| 270 | void AVIFileSink |
| 271 | ::afterGettingFrame(void* clientData, unsigned packetDataSize, |
| 272 | unsigned numTruncatedBytes, |
| 273 | struct timeval presentationTime, |
| 274 | unsigned /*durationInMicroseconds*/) { |
| 275 | AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData; |
| 276 | if (numTruncatedBytes > 0) { |
| 277 | ioState->envir() << "AVIFileSink::afterGettingFrame(): The input frame data was too large for our buffer. " |
| 278 | << numTruncatedBytes |
| 279 | << " bytes of trailing data was dropped! Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call.\n" ; |
| 280 | } |
| 281 | ioState->afterGettingFrame(packetDataSize, presentationTime); |
| 282 | } |
| 283 | |
| 284 | void AVIFileSink::onSourceClosure(void* clientData) { |
| 285 | AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData; |
| 286 | ioState->onSourceClosure(); |
| 287 | } |
| 288 | |
| 289 | void AVIFileSink::onSourceClosure1() { |
| 290 | // Check whether *all* of the subsession sources have closed. |
| 291 | // If not, do nothing for now: |
| 292 | MediaSubsessionIterator iter(fInputSession); |
| 293 | MediaSubsession* subsession; |
| 294 | while ((subsession = iter.next()) != NULL) { |
| 295 | AVISubsessionIOState* ioState |
| 296 | = (AVISubsessionIOState*)(subsession->miscPtr); |
| 297 | if (ioState == NULL) continue; |
| 298 | |
| 299 | if (ioState->fOurSourceIsActive) return; // this source hasn't closed |
| 300 | } |
| 301 | |
| 302 | completeOutputFile(); |
| 303 | |
| 304 | // Call our specified 'after' function: |
| 305 | if (fAfterFunc != NULL) { |
| 306 | (*fAfterFunc)(fAfterClientData); |
| 307 | } |
| 308 | } |
| 309 | |
| 310 | void AVIFileSink::onRTCPBye(void* clientData) { |
| 311 | AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData; |
| 312 | |
| 313 | struct timeval timeNow; |
| 314 | gettimeofday(&timeNow, NULL); |
| 315 | unsigned secsDiff |
| 316 | = timeNow.tv_sec - ioState->fOurSink.fStartTime.tv_sec; |
| 317 | |
| 318 | MediaSubsession& subsession = ioState->fOurSubsession; |
| 319 | ioState->envir() << "Received RTCP \"BYE\" on \"" |
| 320 | << subsession.mediumName() |
| 321 | << "/" << subsession.codecName() |
| 322 | << "\" subsession (after " |
| 323 | << secsDiff << " seconds)\n" ; |
| 324 | |
| 325 | // Handle the reception of a RTCP "BYE" as if the source had closed: |
| 326 | ioState->onSourceClosure(); |
| 327 | } |
| 328 | |
| 329 | void AVIFileSink::addIndexRecord(AVIIndexRecord* newIndexRecord) { |
| 330 | if (fIndexRecordsHead == NULL) { |
| 331 | fIndexRecordsHead = newIndexRecord; |
| 332 | } else { |
| 333 | fIndexRecordsTail->next() = newIndexRecord; |
| 334 | } |
| 335 | fIndexRecordsTail = newIndexRecord; |
| 336 | ++fNumIndexRecords; |
| 337 | } |
| 338 | |
| 339 | void AVIFileSink::completeOutputFile() { |
| 340 | if (fHaveCompletedOutputFile || fOutFid == NULL) return; |
| 341 | |
| 342 | // Update various AVI 'size' fields to take account of the codec data that |
| 343 | // we've now written to the file: |
| 344 | unsigned maxBytesPerSecond = 0; |
| 345 | unsigned numVideoFrames = 0; |
| 346 | unsigned numAudioFrames = 0; |
| 347 | |
| 348 | //// Subsession-specific fields: |
| 349 | MediaSubsessionIterator iter(fInputSession); |
| 350 | MediaSubsession* subsession; |
| 351 | while ((subsession = iter.next()) != NULL) { |
| 352 | AVISubsessionIOState* ioState |
| 353 | = (AVISubsessionIOState*)(subsession->miscPtr); |
| 354 | if (ioState == NULL) continue; |
| 355 | |
| 356 | maxBytesPerSecond += ioState->fMaxBytesPerSecond; |
| 357 | |
| 358 | setWord(ioState->fSTRHFrameCountPosition, ioState->fNumFrames); |
| 359 | if (ioState->fIsVideo) numVideoFrames = ioState->fNumFrames; |
| 360 | else if (ioState->fIsAudio) numAudioFrames = ioState->fNumFrames; |
| 361 | } |
| 362 | |
| 363 | //// Global fields: |
| 364 | add4ByteString("idx1" ); |
| 365 | addWord(fNumIndexRecords*4*4); // the size of all of the index records, which come next: |
| 366 | for (AVIIndexRecord* indexRecord = fIndexRecordsHead; indexRecord != NULL; indexRecord = indexRecord->next()) { |
| 367 | addWord(indexRecord->chunkId()); |
| 368 | addWord(indexRecord->flags()); |
| 369 | addWord(indexRecord->offset()); |
| 370 | addWord(indexRecord->size()); |
| 371 | } |
| 372 | |
| 373 | fRIFFSizeValue += fNumBytesWritten + fNumIndexRecords*4*4 - 4; |
| 374 | setWord(fRIFFSizePosition, fRIFFSizeValue); |
| 375 | |
| 376 | setWord(fAVIHMaxBytesPerSecondPosition, maxBytesPerSecond); |
| 377 | setWord(fAVIHFrameCountPosition, |
| 378 | numVideoFrames > 0 ? numVideoFrames : numAudioFrames); |
| 379 | |
| 380 | fMoviSizeValue += fNumBytesWritten; |
| 381 | setWord(fMoviSizePosition, fMoviSizeValue); |
| 382 | |
| 383 | // We're done: |
| 384 | fHaveCompletedOutputFile = True; |
| 385 | } |
| 386 | |
| 387 | |
| 388 | ////////// AVISubsessionIOState implementation /////////// |
| 389 | |
| 390 | AVISubsessionIOState::AVISubsessionIOState(AVIFileSink& sink, |
| 391 | MediaSubsession& subsession) |
| 392 | : fOurSink(sink), fOurSubsession(subsession), |
| 393 | fMaxBytesPerSecond(0), fIsVideo(False), fIsAudio(False), fIsByteSwappedAudio(False), fNumFrames(0) { |
| 394 | fBuffer = new SubsessionBuffer(fOurSink.fBufferSize); |
| 395 | fPrevBuffer = sink.fPacketLossCompensate |
| 396 | ? new SubsessionBuffer(fOurSink.fBufferSize) : NULL; |
| 397 | |
| 398 | FramedSource* subsessionSource = subsession.readSource(); |
| 399 | fOurSourceIsActive = subsessionSource != NULL; |
| 400 | |
| 401 | fPrevPresentationTime.tv_sec = 0; |
| 402 | fPrevPresentationTime.tv_usec = 0; |
| 403 | } |
| 404 | |
| 405 | AVISubsessionIOState::~AVISubsessionIOState() { |
| 406 | delete fBuffer; delete fPrevBuffer; |
| 407 | } |
| 408 | |
| 409 | void AVISubsessionIOState::setAVIstate(unsigned subsessionIndex) { |
| 410 | fIsVideo = strcmp(fOurSubsession.mediumName(), "video" ) == 0; |
| 411 | fIsAudio = strcmp(fOurSubsession.mediumName(), "audio" ) == 0; |
| 412 | |
| 413 | if (fIsVideo) { |
| 414 | fAVISubsessionTag |
| 415 | = fourChar('0'+subsessionIndex/10,'0'+subsessionIndex%10,'d','c'); |
| 416 | if (strcmp(fOurSubsession.codecName(), "JPEG" ) == 0) { |
| 417 | fAVICodecHandlerType = fourChar('m','j','p','g'); |
| 418 | } else if (strcmp(fOurSubsession.codecName(), "MP4V-ES" ) == 0) { |
| 419 | fAVICodecHandlerType = fourChar('D','I','V','X'); |
| 420 | } else if (strcmp(fOurSubsession.codecName(), "MPV" ) == 0) { |
| 421 | fAVICodecHandlerType = fourChar('m','p','g','1'); // what about MPEG-2? |
| 422 | } else if (strcmp(fOurSubsession.codecName(), "H263-1998" ) == 0 || |
| 423 | strcmp(fOurSubsession.codecName(), "H263-2000" ) == 0) { |
| 424 | fAVICodecHandlerType = fourChar('H','2','6','3'); |
| 425 | } else if (strcmp(fOurSubsession.codecName(), "H264" ) == 0) { |
| 426 | fAVICodecHandlerType = fourChar('H','2','6','4'); |
| 427 | } else { |
| 428 | fAVICodecHandlerType = fourChar('?','?','?','?'); |
| 429 | } |
| 430 | fAVIScale = 1; // ??? ##### |
| 431 | fAVIRate = fOurSink.fMovieFPS; // ??? ##### |
| 432 | fAVISize = fOurSink.fMovieWidth*fOurSink.fMovieHeight*3; // ??? ##### |
| 433 | } else if (fIsAudio) { |
| 434 | fIsByteSwappedAudio = False; // by default |
| 435 | fAVISubsessionTag |
| 436 | = fourChar('0'+subsessionIndex/10,'0'+subsessionIndex%10,'w','b'); |
| 437 | fAVICodecHandlerType = 1; // ??? #### |
| 438 | unsigned numChannels = fOurSubsession.numChannels(); |
| 439 | fAVISamplingFrequency = fOurSubsession.rtpTimestampFrequency(); // default |
| 440 | if (strcmp(fOurSubsession.codecName(), "L16" ) == 0) { |
| 441 | fIsByteSwappedAudio = True; // need to byte-swap data before writing it |
| 442 | fWAVCodecTag = 0x0001; |
| 443 | fAVIScale = fAVISize = 2*numChannels; // 2 bytes/sample |
| 444 | fAVIRate = fAVISize*fAVISamplingFrequency; |
| 445 | } else if (strcmp(fOurSubsession.codecName(), "L8" ) == 0) { |
| 446 | fWAVCodecTag = 0x0001; |
| 447 | fAVIScale = fAVISize = numChannels; // 1 byte/sample |
| 448 | fAVIRate = fAVISize*fAVISamplingFrequency; |
| 449 | } else if (strcmp(fOurSubsession.codecName(), "PCMA" ) == 0) { |
| 450 | fWAVCodecTag = 0x0006; |
| 451 | fAVIScale = fAVISize = numChannels; // 1 byte/sample |
| 452 | fAVIRate = fAVISize*fAVISamplingFrequency; |
| 453 | } else if (strcmp(fOurSubsession.codecName(), "PCMU" ) == 0) { |
| 454 | fWAVCodecTag = 0x0007; |
| 455 | fAVIScale = fAVISize = numChannels; // 1 byte/sample |
| 456 | fAVIRate = fAVISize*fAVISamplingFrequency; |
| 457 | } else if (strcmp(fOurSubsession.codecName(), "MPA" ) == 0) { |
| 458 | fWAVCodecTag = 0x0050; |
| 459 | fAVIScale = fAVISize = 1; |
| 460 | fAVIRate = 0; // ??? ##### |
| 461 | } else { |
| 462 | fWAVCodecTag = 0x0001; // ??? ##### |
| 463 | fAVIScale = fAVISize = 1; |
| 464 | fAVIRate = 0; // ??? ##### |
| 465 | } |
| 466 | } else { // unknown medium |
| 467 | fAVISubsessionTag |
| 468 | = fourChar('0'+subsessionIndex/10,'0'+subsessionIndex%10,'?','?'); |
| 469 | fAVICodecHandlerType = 0; |
| 470 | fAVIScale = fAVISize = 1; |
| 471 | fAVIRate = 0; // ??? ##### |
| 472 | } |
| 473 | } |
| 474 | |
| 475 | void AVISubsessionIOState::afterGettingFrame(unsigned packetDataSize, |
| 476 | struct timeval presentationTime) { |
| 477 | // Begin by checking whether there was a gap in the RTP stream. |
| 478 | // If so, try to compensate for this (if desired): |
| 479 | unsigned short rtpSeqNum |
| 480 | = fOurSubsession.rtpSource()->curPacketRTPSeqNum(); |
| 481 | if (fOurSink.fPacketLossCompensate && fPrevBuffer->bytesInUse() > 0) { |
| 482 | short seqNumGap = rtpSeqNum - fLastPacketRTPSeqNum; |
| 483 | for (short i = 1; i < seqNumGap; ++i) { |
| 484 | // Insert a copy of the previous frame, to compensate for the loss: |
| 485 | useFrame(*fPrevBuffer); |
| 486 | } |
| 487 | } |
| 488 | fLastPacketRTPSeqNum = rtpSeqNum; |
| 489 | |
| 490 | // Now, continue working with the frame that we just got |
| 491 | if (fBuffer->bytesInUse() == 0) { |
| 492 | fBuffer->setPresentationTime(presentationTime); |
| 493 | } |
| 494 | fBuffer->addBytes(packetDataSize); |
| 495 | |
| 496 | useFrame(*fBuffer); |
| 497 | if (fOurSink.fPacketLossCompensate) { |
| 498 | // Save this frame, in case we need it for recovery: |
| 499 | SubsessionBuffer* tmp = fPrevBuffer; // assert: != NULL |
| 500 | fPrevBuffer = fBuffer; |
| 501 | fBuffer = tmp; |
| 502 | } |
| 503 | fBuffer->reset(); // for the next input |
| 504 | |
| 505 | // Now, try getting more frames: |
| 506 | fOurSink.continuePlaying(); |
| 507 | } |
| 508 | |
| 509 | void AVISubsessionIOState::useFrame(SubsessionBuffer& buffer) { |
| 510 | unsigned char* const frameSource = buffer.dataStart(); |
| 511 | unsigned const frameSize = buffer.bytesInUse(); |
| 512 | struct timeval const& presentationTime = buffer.presentationTime(); |
| 513 | if (fPrevPresentationTime.tv_usec != 0||fPrevPresentationTime.tv_sec != 0) { |
| 514 | int uSecondsDiff |
| 515 | = (presentationTime.tv_sec - fPrevPresentationTime.tv_sec)*1000000 |
| 516 | + (presentationTime.tv_usec - fPrevPresentationTime.tv_usec); |
| 517 | if (uSecondsDiff > 0) { |
| 518 | unsigned bytesPerSecond = (unsigned)((frameSize*1000000.0)/uSecondsDiff); |
| 519 | if (bytesPerSecond > fMaxBytesPerSecond) { |
| 520 | fMaxBytesPerSecond = bytesPerSecond; |
| 521 | } |
| 522 | } |
| 523 | } |
| 524 | fPrevPresentationTime = presentationTime; |
| 525 | |
| 526 | if (fIsByteSwappedAudio) { |
| 527 | // We need to swap the 16-bit audio samples from big-endian |
| 528 | // to little-endian order, before writing them to a file: |
| 529 | for (unsigned i = 0; i < frameSize; i += 2) { |
| 530 | unsigned char tmp = frameSource[i]; |
| 531 | frameSource[i] = frameSource[i+1]; |
| 532 | frameSource[i+1] = tmp; |
| 533 | } |
| 534 | } |
| 535 | |
| 536 | // Add an index record for this frame: |
| 537 | AVIIndexRecord* newIndexRecord |
| 538 | = new AVIIndexRecord(fAVISubsessionTag, // chunk id |
| 539 | AVIIF_KEYFRAME, // flags |
| 540 | 4 + fOurSink.fNumBytesWritten, // offset (note: 4 == 'movi') |
| 541 | frameSize); // size |
| 542 | fOurSink.addIndexRecord(newIndexRecord); |
| 543 | |
| 544 | // Write the data into the file: |
| 545 | fOurSink.fNumBytesWritten += fOurSink.addWord(fAVISubsessionTag); |
| 546 | if (strcmp(fOurSubsession.codecName(), "H264" ) == 0) { |
| 547 | // Insert a 'start code' (0x00 0x00 0x00 0x01) in front of the frame: |
| 548 | fOurSink.fNumBytesWritten += fOurSink.addWord(4+frameSize); |
| 549 | fOurSink.fNumBytesWritten += fOurSink.addWord(fourChar(0x00, 0x00, 0x00, 0x01));//add start code |
| 550 | } else { |
| 551 | fOurSink.fNumBytesWritten += fOurSink.addWord(frameSize); |
| 552 | } |
| 553 | fwrite(frameSource, 1, frameSize, fOurSink.fOutFid); |
| 554 | fOurSink.fNumBytesWritten += frameSize; |
| 555 | // Pad to an even length: |
| 556 | if (frameSize%2 != 0) fOurSink.fNumBytesWritten += fOurSink.addByte(0); |
| 557 | |
| 558 | ++fNumFrames; |
| 559 | } |
| 560 | |
| 561 | void AVISubsessionIOState::onSourceClosure() { |
| 562 | fOurSourceIsActive = False; |
| 563 | fOurSink.onSourceClosure1(); |
| 564 | } |
| 565 | |
| 566 | |
| 567 | ////////// AVI-specific implementation ////////// |
| 568 | |
| 569 | unsigned AVIFileSink::addWord(unsigned word) { |
| 570 | // Add "word" to the file in little-endian order: |
| 571 | addByte(word); addByte(word>>8); |
| 572 | addByte(word>>16); addByte(word>>24); |
| 573 | |
| 574 | return 4; |
| 575 | } |
| 576 | |
| 577 | unsigned AVIFileSink::addHalfWord(unsigned short halfWord) { |
| 578 | // Add "halfWord" to the file in little-endian order: |
| 579 | addByte((unsigned char)halfWord); addByte((unsigned char)(halfWord>>8)); |
| 580 | |
| 581 | return 2; |
| 582 | } |
| 583 | |
| 584 | unsigned AVIFileSink::addZeroWords(unsigned numWords) { |
| 585 | for (unsigned i = 0; i < numWords; ++i) { |
| 586 | addWord(0); |
| 587 | } |
| 588 | |
| 589 | return numWords*4; |
| 590 | } |
| 591 | |
| 592 | unsigned AVIFileSink::add4ByteString(char const* str) { |
| 593 | addByte(str[0]); addByte(str[1]); addByte(str[2]); |
| 594 | addByte(str[3] == '\0' ? ' ' : str[3]); // e.g., for "AVI " |
| 595 | |
| 596 | return 4; |
| 597 | } |
| 598 | |
| 599 | void AVIFileSink::setWord(unsigned filePosn, unsigned size) { |
| 600 | do { |
| 601 | if (SeekFile64(fOutFid, filePosn, SEEK_SET) < 0) break; |
| 602 | addWord(size); |
| 603 | if (SeekFile64(fOutFid, 0, SEEK_END) < 0) break; // go back to where we were |
| 604 | |
| 605 | return; |
| 606 | } while (0); |
| 607 | |
| 608 | // One of the SeekFile64()s failed, probable because we're not a seekable file |
| 609 | envir() << "AVIFileSink::setWord(): SeekFile64 failed (err " |
| 610 | << envir().getErrno() << ")\n" ; |
| 611 | } |
| 612 | |
| 613 | // Methods for writing particular file headers. Note the following macros: |
| 614 | |
| 615 | #define (tag,name) \ |
| 616 | unsigned AVIFileSink::addFileHeader_##name() { \ |
| 617 | add4ByteString("" #tag ""); \ |
| 618 | unsigned = (unsigned)TellFile64(fOutFid); addWord(0); \ |
| 619 | add4ByteString("" #name ""); \ |
| 620 | unsigned ignoredSize = 8;/*don't include size of tag or size fields*/ \ |
| 621 | unsigned size = 12 |
| 622 | |
| 623 | #define (name) \ |
| 624 | unsigned AVIFileSink::addFileHeader_##name() { \ |
| 625 | add4ByteString("" #name ""); \ |
| 626 | unsigned = (unsigned)TellFile64(fOutFid); addWord(0); \ |
| 627 | unsigned ignoredSize = 8;/*don't include size of name or size fields*/ \ |
| 628 | unsigned size = 8 |
| 629 | |
| 630 | #define \ |
| 631 | setWord(headerSizePosn, size-ignoredSize); \ |
| 632 | return size; \ |
| 633 | } |
| 634 | |
| 635 | addFileHeader(RIFF,AVI); |
| 636 | size += addFileHeader_hdrl(); |
| 637 | size += addFileHeader_movi(); |
| 638 | fRIFFSizePosition = headerSizePosn; |
| 639 | fRIFFSizeValue = size-ignoredSize; |
| 640 | addFileHeaderEnd; |
| 641 | |
| 642 | addFileHeader(LIST,hdrl); |
| 643 | size += addFileHeader_avih(); |
| 644 | |
| 645 | // Then, add a "strl" header for each subsession (stream): |
| 646 | // (Make the video subsession (if any) come before the audio subsession.) |
| 647 | unsigned subsessionCount = 0; |
| 648 | MediaSubsessionIterator iter(fInputSession); |
| 649 | MediaSubsession* subsession; |
| 650 | while ((subsession = iter.next()) != NULL) { |
| 651 | fCurrentIOState = (AVISubsessionIOState*)(subsession->miscPtr); |
| 652 | if (fCurrentIOState == NULL) continue; |
| 653 | if (strcmp(subsession->mediumName(), "video" ) != 0) continue; |
| 654 | |
| 655 | fCurrentIOState->setAVIstate(subsessionCount++); |
| 656 | size += addFileHeader_strl(); |
| 657 | } |
| 658 | iter.reset(); |
| 659 | while ((subsession = iter.next()) != NULL) { |
| 660 | fCurrentIOState = (AVISubsessionIOState*)(subsession->miscPtr); |
| 661 | if (fCurrentIOState == NULL) continue; |
| 662 | if (strcmp(subsession->mediumName(), "video" ) == 0) continue; |
| 663 | |
| 664 | fCurrentIOState->setAVIstate(subsessionCount++); |
| 665 | size += addFileHeader_strl(); |
| 666 | } |
| 667 | |
| 668 | // Then add another JUNK entry |
| 669 | ++fJunkNumber; |
| 670 | size += addFileHeader_JUNK(); |
| 671 | addFileHeaderEnd; |
| 672 | |
| 673 | #define AVIF_HASINDEX 0x00000010 // Index at end of file? |
| 674 | #define AVIF_MUSTUSEINDEX 0x00000020 |
| 675 | #define AVIF_ISINTERLEAVED 0x00000100 |
| 676 | #define AVIF_TRUSTCKTYPE 0x00000800 // Use CKType to find key frames? |
| 677 | #define AVIF_WASCAPTUREFILE 0x00010000 |
| 678 | #define AVIF_COPYRIGHTED 0x00020000 |
| 679 | |
| 680 | addFileHeader1(avih); |
| 681 | unsigned usecPerFrame = fMovieFPS == 0 ? 0 : 1000000/fMovieFPS; |
| 682 | size += addWord(usecPerFrame); // dwMicroSecPerFrame |
| 683 | fAVIHMaxBytesPerSecondPosition = (unsigned)TellFile64(fOutFid); |
| 684 | size += addWord(0); // dwMaxBytesPerSec (fill in later) |
| 685 | size += addWord(0); // dwPaddingGranularity |
| 686 | size += addWord(AVIF_TRUSTCKTYPE|AVIF_HASINDEX|AVIF_ISINTERLEAVED); // dwFlags |
| 687 | fAVIHFrameCountPosition = (unsigned)TellFile64(fOutFid); |
| 688 | size += addWord(0); // dwTotalFrames (fill in later) |
| 689 | size += addWord(0); // dwInitialFrame |
| 690 | size += addWord(fNumSubsessions); // dwStreams |
| 691 | size += addWord(fBufferSize); // dwSuggestedBufferSize |
| 692 | size += addWord(fMovieWidth); // dwWidth |
| 693 | size += addWord(fMovieHeight); // dwHeight |
| 694 | size += addZeroWords(4); // dwReserved |
| 695 | addFileHeaderEnd; |
| 696 | |
| 697 | addFileHeader(LIST,strl); |
| 698 | size += addFileHeader_strh(); |
| 699 | size += addFileHeader_strf(); |
| 700 | fJunkNumber = 0; |
| 701 | size += addFileHeader_JUNK(); |
| 702 | addFileHeaderEnd; |
| 703 | |
| 704 | addFileHeader1(strh); |
| 705 | size += add4ByteString(fCurrentIOState->fIsVideo ? "vids" : |
| 706 | fCurrentIOState->fIsAudio ? "auds" : |
| 707 | "????" ); // fccType |
| 708 | size += addWord(fCurrentIOState->fAVICodecHandlerType); // fccHandler |
| 709 | size += addWord(0); // dwFlags |
| 710 | size += addWord(0); // wPriority + wLanguage |
| 711 | size += addWord(0); // dwInitialFrames |
| 712 | size += addWord(fCurrentIOState->fAVIScale); // dwScale |
| 713 | size += addWord(fCurrentIOState->fAVIRate); // dwRate |
| 714 | size += addWord(0); // dwStart |
| 715 | fCurrentIOState->fSTRHFrameCountPosition = (unsigned)TellFile64(fOutFid); |
| 716 | size += addWord(0); // dwLength (fill in later) |
| 717 | size += addWord(fBufferSize); // dwSuggestedBufferSize |
| 718 | size += addWord((unsigned)-1); // dwQuality |
| 719 | size += addWord(fCurrentIOState->fAVISize); // dwSampleSize |
| 720 | size += addWord(0); // rcFrame (start) |
| 721 | if (fCurrentIOState->fIsVideo) { |
| 722 | size += addHalfWord(fMovieWidth); |
| 723 | size += addHalfWord(fMovieHeight); |
| 724 | } else { |
| 725 | size += addWord(0); |
| 726 | } |
| 727 | addFileHeaderEnd; |
| 728 | |
| 729 | addFileHeader1(strf); |
| 730 | if (fCurrentIOState->fIsVideo) { |
| 731 | // Add a BITMAPINFO header: |
| 732 | unsigned = 0; |
| 733 | size += addWord(10*4 + extraDataSize); // size |
| 734 | size += addWord(fMovieWidth); |
| 735 | size += addWord(fMovieHeight); |
| 736 | size += addHalfWord(1); // planes |
| 737 | size += addHalfWord(24); // bits-per-sample ##### |
| 738 | size += addWord(fCurrentIOState->fAVICodecHandlerType); // compr. type |
| 739 | size += addWord(fCurrentIOState->fAVISize); |
| 740 | size += addZeroWords(4); // ??? ##### |
| 741 | // Later, add extra data here (if any) ##### |
| 742 | } else if (fCurrentIOState->fIsAudio) { |
| 743 | // Add a WAVFORMATEX header: |
| 744 | size += addHalfWord(fCurrentIOState->fWAVCodecTag); |
| 745 | unsigned numChannels = fCurrentIOState->fOurSubsession.numChannels(); |
| 746 | size += addHalfWord(numChannels); |
| 747 | size += addWord(fCurrentIOState->fAVISamplingFrequency); |
| 748 | size += addWord(fCurrentIOState->fAVIRate); // bytes per second |
| 749 | size += addHalfWord(fCurrentIOState->fAVISize); // block alignment |
| 750 | unsigned bitsPerSample = (fCurrentIOState->fAVISize*8)/numChannels; |
| 751 | size += addHalfWord(bitsPerSample); |
| 752 | if (strcmp(fCurrentIOState->fOurSubsession.codecName(), "MPA" ) == 0) { |
| 753 | // Assume MPEG layer II audio (not MP3): ##### |
| 754 | size += addHalfWord(22); // wav_extra_size |
| 755 | size += addHalfWord(2); // fwHeadLayer |
| 756 | size += addWord(8*fCurrentIOState->fAVIRate); // dwHeadBitrate ##### |
| 757 | size += addHalfWord(numChannels == 2 ? 1: 8); // fwHeadMode |
| 758 | size += addHalfWord(0); // fwHeadModeExt |
| 759 | size += addHalfWord(1); // wHeadEmphasis |
| 760 | size += addHalfWord(16); // fwHeadFlags |
| 761 | size += addWord(0); // dwPTSLow |
| 762 | size += addWord(0); // dwPTSHigh |
| 763 | } |
| 764 | } |
| 765 | addFileHeaderEnd; |
| 766 | |
| 767 | #define AVI_MASTER_INDEX_SIZE 256 |
| 768 | |
| 769 | addFileHeader1(JUNK); |
| 770 | if (fJunkNumber == 0) { |
| 771 | size += addHalfWord(4); // wLongsPerEntry |
| 772 | size += addHalfWord(0); // bIndexSubType + bIndexType |
| 773 | size += addWord(0); // nEntriesInUse ##### |
| 774 | size += addWord(fCurrentIOState->fAVISubsessionTag); // dwChunkId |
| 775 | size += addZeroWords(2); // dwReserved |
| 776 | size += addZeroWords(AVI_MASTER_INDEX_SIZE*4); |
| 777 | } else { |
| 778 | size += add4ByteString("odml" ); |
| 779 | size += add4ByteString("dmlh" ); |
| 780 | unsigned wtfCount = 248; |
| 781 | size += addWord(wtfCount); // ??? ##### |
| 782 | size += addZeroWords(wtfCount/4); |
| 783 | } |
| 784 | addFileHeaderEnd; |
| 785 | |
| 786 | addFileHeader(LIST,movi); |
| 787 | fMoviSizePosition = headerSizePosn; |
| 788 | fMoviSizeValue = size-ignoredSize; |
| 789 | addFileHeaderEnd; |
| 790 | |