1 | /********** |
2 | This library is free software; you can redistribute it and/or modify it under |
3 | the terms of the GNU Lesser General Public License as published by the |
4 | Free Software Foundation; either version 3 of the License, or (at your |
5 | option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.) |
6 | |
7 | This library is distributed in the hope that it will be useful, but WITHOUT |
8 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
9 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for |
10 | more details. |
11 | |
12 | You should have received a copy of the GNU Lesser General Public License |
13 | along with this library; if not, write to the Free Software Foundation, Inc., |
14 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
15 | **********/ |
16 | // "liveMedia" |
17 | // Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved. |
18 | // A filter that breaks up an MPEG (1,2) audio elementary stream into frames |
19 | // Implementation |
20 | |
21 | #include "MPEG1or2AudioStreamFramer.hh" |
22 | #include "StreamParser.hh" |
23 | #include "MP3Internals.hh" |
24 | #include <GroupsockHelper.hh> |
25 | |
26 | ////////// MPEG1or2AudioStreamParser definition ////////// |
27 | |
28 | class MPEG1or2AudioStreamParser: public StreamParser { |
29 | public: |
30 | MPEG1or2AudioStreamParser(MPEG1or2AudioStreamFramer* usingSource, |
31 | FramedSource* inputSource); |
32 | virtual ~MPEG1or2AudioStreamParser(); |
33 | |
34 | public: |
35 | unsigned parse(unsigned& numTruncatedBytes); |
36 | // returns the size of the frame that was acquired, or 0 if none was |
37 | |
38 | void registerReadInterest(unsigned char* to, unsigned maxSize); |
39 | |
40 | MP3FrameParams const& currentFrame() const { return fCurrentFrame; } |
41 | |
42 | private: |
43 | unsigned char* fTo; |
44 | unsigned fMaxSize; |
45 | |
46 | // Parameters of the most recently read frame: |
47 | MP3FrameParams fCurrentFrame; // also works for layer I or II |
48 | }; |
49 | |
50 | |
51 | ////////// MPEG1or2AudioStreamFramer implementation ////////// |
52 | |
53 | MPEG1or2AudioStreamFramer |
54 | ::MPEG1or2AudioStreamFramer(UsageEnvironment& env, FramedSource* inputSource, |
55 | Boolean syncWithInputSource) |
56 | : FramedFilter(env, inputSource), |
57 | fSyncWithInputSource(syncWithInputSource) { |
58 | reset(); |
59 | |
60 | fParser = new MPEG1or2AudioStreamParser(this, inputSource); |
61 | } |
62 | |
63 | MPEG1or2AudioStreamFramer::~MPEG1or2AudioStreamFramer() { |
64 | delete fParser; |
65 | } |
66 | |
67 | MPEG1or2AudioStreamFramer* |
68 | MPEG1or2AudioStreamFramer::createNew(UsageEnvironment& env, |
69 | FramedSource* inputSource, |
70 | Boolean syncWithInputSource) { |
71 | // Need to add source type checking here??? ##### |
72 | return new MPEG1or2AudioStreamFramer(env, inputSource, syncWithInputSource); |
73 | } |
74 | |
75 | void MPEG1or2AudioStreamFramer::flushInput() { |
76 | reset(); |
77 | fParser->flushInput(); |
78 | } |
79 | |
80 | void MPEG1or2AudioStreamFramer::reset() { |
81 | // Use the current wallclock time as the initial 'presentation time': |
82 | struct timeval timeNow; |
83 | gettimeofday(&timeNow, NULL); |
84 | resetPresentationTime(timeNow); |
85 | } |
86 | |
87 | void MPEG1or2AudioStreamFramer |
88 | ::resetPresentationTime(struct timeval newPresentationTime) { |
89 | fNextFramePresentationTime = newPresentationTime; |
90 | } |
91 | |
92 | void MPEG1or2AudioStreamFramer::doGetNextFrame() { |
93 | fParser->registerReadInterest(fTo, fMaxSize); |
94 | continueReadProcessing(); |
95 | } |
96 | |
97 | #define MILLION 1000000 |
98 | |
99 | static unsigned const numSamplesByLayer[4] = {0, 384, 1152, 1152}; |
100 | |
101 | struct timeval MPEG1or2AudioStreamFramer::currentFramePlayTime() const { |
102 | MP3FrameParams const& fr = fParser->currentFrame(); |
103 | unsigned const numSamples = numSamplesByLayer[fr.layer]; |
104 | |
105 | struct timeval result; |
106 | unsigned const freq = fr.samplingFreq*(1 + fr.isMPEG2); |
107 | if (freq == 0) { |
108 | result.tv_sec = 0; |
109 | result.tv_usec = 0; |
110 | return result; |
111 | } |
112 | |
113 | // result is numSamples/freq |
114 | unsigned const uSeconds |
115 | = ((numSamples*2*MILLION)/freq + 1)/2; // rounds to nearest integer |
116 | |
117 | result.tv_sec = uSeconds/MILLION; |
118 | result.tv_usec = uSeconds%MILLION; |
119 | return result; |
120 | } |
121 | |
122 | void MPEG1or2AudioStreamFramer |
123 | ::continueReadProcessing(void* clientData, |
124 | unsigned char* /*ptr*/, unsigned /*size*/, |
125 | struct timeval presentationTime) { |
126 | MPEG1or2AudioStreamFramer* framer = (MPEG1or2AudioStreamFramer*)clientData; |
127 | if (framer->fSyncWithInputSource) { |
128 | framer->resetPresentationTime(presentationTime); |
129 | } |
130 | framer->continueReadProcessing(); |
131 | } |
132 | |
133 | void MPEG1or2AudioStreamFramer::continueReadProcessing() { |
134 | unsigned acquiredFrameSize = fParser->parse(fNumTruncatedBytes); |
135 | if (acquiredFrameSize > 0) { |
136 | // We were able to acquire a frame from the input. |
137 | // It has already been copied to the reader's space. |
138 | fFrameSize = acquiredFrameSize; |
139 | |
140 | // Also set the presentation time, and increment it for next time, |
141 | // based on the length of this frame: |
142 | fPresentationTime = fNextFramePresentationTime; |
143 | struct timeval framePlayTime = currentFramePlayTime(); |
144 | fDurationInMicroseconds = framePlayTime.tv_sec*MILLION + framePlayTime.tv_usec; |
145 | fNextFramePresentationTime.tv_usec += framePlayTime.tv_usec; |
146 | fNextFramePresentationTime.tv_sec |
147 | += framePlayTime.tv_sec + fNextFramePresentationTime.tv_usec/MILLION; |
148 | fNextFramePresentationTime.tv_usec %= MILLION; |
149 | |
150 | // Call our own 'after getting' function. Because we're not a 'leaf' |
151 | // source, we can call this directly, without risking infinite recursion. |
152 | afterGetting(this); |
153 | } else { |
154 | // We were unable to parse a complete frame from the input, because: |
155 | // - we had to read more data from the source stream, or |
156 | // - the source stream has ended. |
157 | } |
158 | } |
159 | |
160 | |
161 | ////////// MPEG1or2AudioStreamParser implementation ////////// |
162 | |
163 | MPEG1or2AudioStreamParser |
164 | ::MPEG1or2AudioStreamParser(MPEG1or2AudioStreamFramer* usingSource, |
165 | FramedSource* inputSource) |
166 | : StreamParser(inputSource, FramedSource::handleClosure, usingSource, |
167 | &MPEG1or2AudioStreamFramer::continueReadProcessing, usingSource) { |
168 | } |
169 | |
170 | MPEG1or2AudioStreamParser::~MPEG1or2AudioStreamParser() { |
171 | } |
172 | |
173 | void MPEG1or2AudioStreamParser::registerReadInterest(unsigned char* to, |
174 | unsigned maxSize) { |
175 | fTo = to; |
176 | fMaxSize = maxSize; |
177 | } |
178 | |
179 | unsigned MPEG1or2AudioStreamParser::parse(unsigned& numTruncatedBytes) { |
180 | try { |
181 | saveParserState(); |
182 | |
183 | // We expect a MPEG audio header (first 11 bits set to 1) at the start: |
184 | while (((fCurrentFrame.hdr = test4Bytes())&0xFFE00000) != 0xFFE00000) { |
185 | skipBytes(1); |
186 | saveParserState(); |
187 | } |
188 | |
189 | fCurrentFrame.setParamsFromHeader(); |
190 | |
191 | // Copy the frame to the requested destination: |
192 | unsigned frameSize = fCurrentFrame.frameSize + 4; // include header |
193 | if (frameSize > fMaxSize) { |
194 | numTruncatedBytes = frameSize - fMaxSize; |
195 | frameSize = fMaxSize; |
196 | } else { |
197 | numTruncatedBytes = 0; |
198 | } |
199 | |
200 | getBytes(fTo, frameSize); |
201 | skipBytes(numTruncatedBytes); |
202 | |
203 | return frameSize; |
204 | } catch (int /*e*/) { |
205 | #ifdef DEBUG |
206 | fprintf(stderr, "MPEG1or2AudioStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n" ); |
207 | #endif |
208 | return 0; // the parsing got interrupted |
209 | } |
210 | } |
211 | |