| 1 | /********** |
| 2 | This library is free software; you can redistribute it and/or modify it under |
| 3 | the terms of the GNU Lesser General Public License as published by the |
| 4 | Free Software Foundation; either version 3 of the License, or (at your |
| 5 | option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.) |
| 6 | |
| 7 | This library is distributed in the hope that it will be useful, but WITHOUT |
| 8 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
| 9 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for |
| 10 | more details. |
| 11 | |
| 12 | You should have received a copy of the GNU Lesser General Public License |
| 13 | along with this library; if not, write to the Free Software Foundation, Inc., |
| 14 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 15 | **********/ |
| 16 | // "liveMedia" |
| 17 | // Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved. |
| 18 | // RTP sink for H.264 video (RFC 3984) |
| 19 | // Implementation |
| 20 | |
| 21 | #include "H264VideoRTPSink.hh" |
| 22 | #include "H264VideoStreamFramer.hh" |
| 23 | #include "Base64.hh" |
| 24 | #include "H264VideoRTPSource.hh" // for "parseSPropParameterSets()" |
| 25 | |
| 26 | ////////// H264VideoRTPSink implementation ////////// |
| 27 | |
| 28 | H264VideoRTPSink |
| 29 | ::H264VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, |
| 30 | u_int8_t const* sps, unsigned spsSize, u_int8_t const* pps, unsigned ppsSize) |
| 31 | : H264or5VideoRTPSink(264, env, RTPgs, rtpPayloadFormat, |
| 32 | NULL, 0, sps, spsSize, pps, ppsSize) { |
| 33 | } |
| 34 | |
| 35 | H264VideoRTPSink::~H264VideoRTPSink() { |
| 36 | } |
| 37 | |
| 38 | H264VideoRTPSink* H264VideoRTPSink |
| 39 | ::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) { |
| 40 | return new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat); |
| 41 | } |
| 42 | |
| 43 | H264VideoRTPSink* H264VideoRTPSink |
| 44 | ::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, |
| 45 | u_int8_t const* sps, unsigned spsSize, u_int8_t const* pps, unsigned ppsSize) { |
| 46 | return new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat, sps, spsSize, pps, ppsSize); |
| 47 | } |
| 48 | |
| 49 | H264VideoRTPSink* H264VideoRTPSink |
| 50 | ::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, |
| 51 | char const* sPropParameterSetsStr) { |
| 52 | u_int8_t* sps = NULL; unsigned spsSize = 0; |
| 53 | u_int8_t* pps = NULL; unsigned ppsSize = 0; |
| 54 | |
| 55 | unsigned numSPropRecords; |
| 56 | SPropRecord* sPropRecords = parseSPropParameterSets(sPropParameterSetsStr, numSPropRecords); |
| 57 | for (unsigned i = 0; i < numSPropRecords; ++i) { |
| 58 | if (sPropRecords[i].sPropLength == 0) continue; // bad data |
| 59 | u_int8_t nal_unit_type = (sPropRecords[i].sPropBytes[0])&0x1F; |
| 60 | if (nal_unit_type == 7/*SPS*/) { |
| 61 | sps = sPropRecords[i].sPropBytes; |
| 62 | spsSize = sPropRecords[i].sPropLength; |
| 63 | } else if (nal_unit_type == 8/*PPS*/) { |
| 64 | pps = sPropRecords[i].sPropBytes; |
| 65 | ppsSize = sPropRecords[i].sPropLength; |
| 66 | } |
| 67 | } |
| 68 | |
| 69 | H264VideoRTPSink* result |
| 70 | = new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat, sps, spsSize, pps, ppsSize); |
| 71 | delete[] sPropRecords; |
| 72 | |
| 73 | return result; |
| 74 | } |
| 75 | |
| 76 | Boolean H264VideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) { |
| 77 | // Our source must be an appropriate framer: |
| 78 | return source.isH264VideoStreamFramer(); |
| 79 | } |
| 80 | |
| 81 | char const* H264VideoRTPSink::auxSDPLine() { |
| 82 | // Generate a new "a=fmtp:" line each time, using our SPS and PPS (if we have them), |
| 83 | // otherwise parameters from our framer source (in case they've changed since the last time that |
| 84 | // we were called): |
| 85 | H264or5VideoStreamFramer* framerSource = NULL; |
| 86 | u_int8_t* vpsDummy = NULL; unsigned vpsDummySize = 0; |
| 87 | u_int8_t* sps = fSPS; unsigned spsSize = fSPSSize; |
| 88 | u_int8_t* pps = fPPS; unsigned ppsSize = fPPSSize; |
| 89 | if (sps == NULL || pps == NULL) { |
| 90 | // We need to get SPS and PPS from our framer source: |
| 91 | if (fOurFragmenter == NULL) return NULL; // we don't yet have a fragmenter (and therefore not a source) |
| 92 | framerSource = (H264or5VideoStreamFramer*)(fOurFragmenter->inputSource()); |
| 93 | if (framerSource == NULL) return NULL; // we don't yet have a source |
| 94 | |
| 95 | framerSource->getVPSandSPSandPPS(vpsDummy, vpsDummySize, sps, spsSize, pps, ppsSize); |
| 96 | if (sps == NULL || pps == NULL) return NULL; // our source isn't ready |
| 97 | } |
| 98 | |
| 99 | // Set up the "a=fmtp:" SDP line for this stream: |
| 100 | u_int8_t* spsWEB = new u_int8_t[spsSize]; // "WEB" means "Without Emulation Bytes" |
| 101 | unsigned spsWEBSize = removeH264or5EmulationBytes(spsWEB, spsSize, sps, spsSize); |
| 102 | if (spsWEBSize < 4) { // Bad SPS size => assume our source isn't ready |
| 103 | delete[] spsWEB; |
| 104 | return NULL; |
| 105 | } |
| 106 | u_int32_t profileLevelId = (spsWEB[1]<<16) | (spsWEB[2]<<8) | spsWEB[3]; |
| 107 | delete[] spsWEB; |
| 108 | |
| 109 | char* sps_base64 = base64Encode((char*)sps, spsSize); |
| 110 | char* pps_base64 = base64Encode((char*)pps, ppsSize); |
| 111 | |
| 112 | char const* fmtpFmt = |
| 113 | "a=fmtp:%d packetization-mode=1" |
| 114 | ";profile-level-id=%06X" |
| 115 | ";sprop-parameter-sets=%s,%s\r\n" ; |
| 116 | unsigned fmtpFmtSize = strlen(fmtpFmt) |
| 117 | + 3 /* max char len */ |
| 118 | + 6 /* 3 bytes in hex */ |
| 119 | + strlen(sps_base64) + strlen(pps_base64); |
| 120 | char* fmtp = new char[fmtpFmtSize]; |
| 121 | sprintf(fmtp, fmtpFmt, |
| 122 | rtpPayloadType(), |
| 123 | profileLevelId, |
| 124 | sps_base64, pps_base64); |
| 125 | |
| 126 | delete[] sps_base64; |
| 127 | delete[] pps_base64; |
| 128 | |
| 129 | delete[] fFmtpSDPLine; fFmtpSDPLine = fmtp; |
| 130 | return fFmtpSDPLine; |
| 131 | } |
| 132 | |