1 | /********** |
2 | This library is free software; you can redistribute it and/or modify it under |
3 | the terms of the GNU Lesser General Public License as published by the |
4 | Free Software Foundation; either version 3 of the License, or (at your |
5 | option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.) |
6 | |
7 | This library is distributed in the hope that it will be useful, but WITHOUT |
8 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
9 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for |
10 | more details. |
11 | |
12 | You should have received a copy of the GNU Lesser General Public License |
13 | along with this library; if not, write to the Free Software Foundation, Inc., |
14 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
15 | **********/ |
16 | // "liveMedia" |
17 | // Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved. |
18 | // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s |
19 | // on demand, from an WAV audio file. |
20 | // Implementation |
21 | |
22 | #include "WAVAudioFileServerMediaSubsession.hh" |
23 | #include "WAVAudioFileSource.hh" |
24 | #include "uLawAudioFilter.hh" |
25 | #include "SimpleRTPSink.hh" |
26 | |
27 | WAVAudioFileServerMediaSubsession* WAVAudioFileServerMediaSubsession |
28 | ::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource, |
29 | Boolean convertToULaw) { |
30 | return new WAVAudioFileServerMediaSubsession(env, fileName, |
31 | reuseFirstSource, convertToULaw); |
32 | } |
33 | |
34 | WAVAudioFileServerMediaSubsession |
35 | ::WAVAudioFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, |
36 | Boolean reuseFirstSource, Boolean convertToULaw) |
37 | : FileServerMediaSubsession(env, fileName, reuseFirstSource), |
38 | fConvertToULaw(convertToULaw) { |
39 | } |
40 | |
41 | WAVAudioFileServerMediaSubsession |
42 | ::~WAVAudioFileServerMediaSubsession() { |
43 | } |
44 | |
45 | void WAVAudioFileServerMediaSubsession |
46 | ::seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes) { |
47 | WAVAudioFileSource* wavSource; |
48 | if (fBitsPerSample > 8) { |
49 | // "inputSource" is a filter; its input source is the original WAV file source: |
50 | wavSource = (WAVAudioFileSource*)(((FramedFilter*)inputSource)->inputSource()); |
51 | } else { |
52 | // "inputSource" is the original WAV file source: |
53 | wavSource = (WAVAudioFileSource*)inputSource; |
54 | } |
55 | |
56 | unsigned seekSampleNumber = (unsigned)(seekNPT*fSamplingFrequency); |
57 | unsigned seekByteNumber = seekSampleNumber*((fNumChannels*fBitsPerSample)/8); |
58 | |
59 | wavSource->seekToPCMByte(seekByteNumber); |
60 | |
61 | setStreamSourceDuration(inputSource, streamDuration, numBytes); |
62 | } |
63 | |
64 | void WAVAudioFileServerMediaSubsession |
65 | ::setStreamSourceDuration(FramedSource* inputSource, double streamDuration, u_int64_t& numBytes) { |
66 | WAVAudioFileSource* wavSource; |
67 | if (fBitsPerSample > 8) { |
68 | // "inputSource" is a filter; its input source is the original WAV file source: |
69 | wavSource = (WAVAudioFileSource*)(((FramedFilter*)inputSource)->inputSource()); |
70 | } else { |
71 | // "inputSource" is the original WAV file source: |
72 | wavSource = (WAVAudioFileSource*)inputSource; |
73 | } |
74 | |
75 | unsigned numDurationSamples = (unsigned)(streamDuration*fSamplingFrequency); |
76 | unsigned numDurationBytes = numDurationSamples*((fNumChannels*fBitsPerSample)/8); |
77 | numBytes = (u_int64_t)numDurationBytes; |
78 | |
79 | wavSource->limitNumBytesToStream(numDurationBytes); |
80 | } |
81 | |
82 | void WAVAudioFileServerMediaSubsession |
83 | ::setStreamSourceScale(FramedSource* inputSource, float scale) { |
84 | int iScale = (int)scale; |
85 | WAVAudioFileSource* wavSource; |
86 | if (fBitsPerSample > 8) { |
87 | // "inputSource" is a filter; its input source is the original WAV file source: |
88 | wavSource = (WAVAudioFileSource*)(((FramedFilter*)inputSource)->inputSource()); |
89 | } else { |
90 | // "inputSource" is the original WAV file source: |
91 | wavSource = (WAVAudioFileSource*)inputSource; |
92 | } |
93 | |
94 | wavSource->setScaleFactor(iScale); |
95 | } |
96 | |
97 | FramedSource* WAVAudioFileServerMediaSubsession |
98 | ::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { |
99 | FramedSource* resultSource = NULL; |
100 | do { |
101 | WAVAudioFileSource* wavSource = WAVAudioFileSource::createNew(envir(), fFileName); |
102 | if (wavSource == NULL) break; |
103 | |
104 | // Get attributes of the audio source: |
105 | |
106 | fAudioFormat = wavSource->getAudioFormat(); |
107 | fBitsPerSample = wavSource->bitsPerSample(); |
108 | // We handle only 4,8,16,20,24 bits-per-sample audio: |
109 | if (fBitsPerSample%4 != 0 || fBitsPerSample < 4 || fBitsPerSample > 24 || fBitsPerSample == 12) { |
110 | envir() << "The input file contains " << fBitsPerSample << " bit-per-sample audio, which we don't handle\n" ; |
111 | break; |
112 | } |
113 | fSamplingFrequency = wavSource->samplingFrequency(); |
114 | fNumChannels = wavSource->numChannels(); |
115 | unsigned bitsPerSecond = fSamplingFrequency*fBitsPerSample*fNumChannels; |
116 | |
117 | fFileDuration = (float)((8.0*wavSource->numPCMBytes())/(fSamplingFrequency*fNumChannels*fBitsPerSample)); |
118 | |
119 | // Add in any filter necessary to transform the data prior to streaming: |
120 | resultSource = wavSource; // by default |
121 | if (fAudioFormat == WA_PCM) { |
122 | if (fBitsPerSample == 16) { |
123 | // Note that samples in the WAV audio file are in little-endian order. |
124 | if (fConvertToULaw) { |
125 | // Add a filter that converts from raw 16-bit PCM audio to 8-bit u-law audio: |
126 | resultSource = uLawFromPCMAudioSource::createNew(envir(), wavSource, 1/*little-endian*/); |
127 | bitsPerSecond /= 2; |
128 | } else { |
129 | // Add a filter that converts from little-endian to network (big-endian) order: |
130 | resultSource = EndianSwap16::createNew(envir(), wavSource); |
131 | } |
132 | } else if (fBitsPerSample == 20 || fBitsPerSample == 24) { |
133 | // Add a filter that converts from little-endian to network (big-endian) order: |
134 | resultSource = EndianSwap24::createNew(envir(), wavSource); |
135 | } |
136 | } |
137 | |
138 | estBitrate = (bitsPerSecond+500)/1000; // kbps |
139 | return resultSource; |
140 | } while (0); |
141 | |
142 | // An error occurred: |
143 | Medium::close(resultSource); |
144 | return NULL; |
145 | } |
146 | |
147 | RTPSink* WAVAudioFileServerMediaSubsession |
148 | ::createNewRTPSink(Groupsock* rtpGroupsock, |
149 | unsigned char rtpPayloadTypeIfDynamic, |
150 | FramedSource* /*inputSource*/) { |
151 | do { |
152 | char const* mimeType; |
153 | unsigned char payloadFormatCode = rtpPayloadTypeIfDynamic; // by default, unless a static RTP payload type can be used |
154 | if (fAudioFormat == WA_PCM) { |
155 | if (fBitsPerSample == 16) { |
156 | if (fConvertToULaw) { |
157 | mimeType = "PCMU" ; |
158 | if (fSamplingFrequency == 8000 && fNumChannels == 1) { |
159 | payloadFormatCode = 0; // a static RTP payload type |
160 | } |
161 | } else { |
162 | mimeType = "L16" ; |
163 | if (fSamplingFrequency == 44100 && fNumChannels == 2) { |
164 | payloadFormatCode = 10; // a static RTP payload type |
165 | } else if (fSamplingFrequency == 44100 && fNumChannels == 1) { |
166 | payloadFormatCode = 11; // a static RTP payload type |
167 | } |
168 | } |
169 | } else if (fBitsPerSample == 20) { |
170 | mimeType = "L20" ; |
171 | } else if (fBitsPerSample == 24) { |
172 | mimeType = "L24" ; |
173 | } else { // fBitsPerSample == 8 (we assume that fBitsPerSample == 4 is only for WA_IMA_ADPCM) |
174 | mimeType = "L8" ; |
175 | } |
176 | } else if (fAudioFormat == WA_PCMU) { |
177 | mimeType = "PCMU" ; |
178 | if (fSamplingFrequency == 8000 && fNumChannels == 1) { |
179 | payloadFormatCode = 0; // a static RTP payload type |
180 | } |
181 | } else if (fAudioFormat == WA_PCMA) { |
182 | mimeType = "PCMA" ; |
183 | if (fSamplingFrequency == 8000 && fNumChannels == 1) { |
184 | payloadFormatCode = 8; // a static RTP payload type |
185 | } |
186 | } else if (fAudioFormat == WA_IMA_ADPCM) { |
187 | mimeType = "DVI4" ; |
188 | // Use a static payload type, if one is defined: |
189 | if (fNumChannels == 1) { |
190 | if (fSamplingFrequency == 8000) { |
191 | payloadFormatCode = 5; // a static RTP payload type |
192 | } else if (fSamplingFrequency == 16000) { |
193 | payloadFormatCode = 6; // a static RTP payload type |
194 | } else if (fSamplingFrequency == 11025) { |
195 | payloadFormatCode = 16; // a static RTP payload type |
196 | } else if (fSamplingFrequency == 22050) { |
197 | payloadFormatCode = 17; // a static RTP payload type |
198 | } |
199 | } |
200 | } else { //unknown format |
201 | break; |
202 | } |
203 | |
204 | return SimpleRTPSink::createNew(envir(), rtpGroupsock, |
205 | payloadFormatCode, fSamplingFrequency, |
206 | "audio" , mimeType, fNumChannels); |
207 | } while (0); |
208 | |
209 | // An error occurred: |
210 | return NULL; |
211 | } |
212 | |
213 | void WAVAudioFileServerMediaSubsession::testScaleFactor(float& scale) { |
214 | if (fFileDuration <= 0.0) { |
215 | // The file is non-seekable, so is probably a live input source. |
216 | // We don't support scale factors other than 1 |
217 | scale = 1; |
218 | } else { |
219 | // We support any integral scale, other than 0 |
220 | int iScale = scale < 0.0 ? (int)(scale - 0.5) : (int)(scale + 0.5); // round |
221 | if (iScale == 0) iScale = 1; |
222 | scale = (float)iScale; |
223 | } |
224 | } |
225 | |
226 | float WAVAudioFileServerMediaSubsession::duration() const { |
227 | return fFileDuration; |
228 | } |
229 | |