📄 quicktimefilesink.cpp
字号:
/**********This library is free software; you can redistribute it and/or modify it underthe terms of the GNU Lesser General Public License as published by theFree Software Foundation; either version 2.1 of the License, or (at youroption) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)This library is distributed in the hope that it will be useful, but WITHOUTANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESSFOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License formore details.You should have received a copy of the GNU Lesser General Public Licensealong with this library; if not, write to the Free Software Foundation, Inc.,59 Temple Place, Suite 330, Boston, MA 02111-1307 USA**********/// "liveMedia"// Copyright (c) 1996-2004 Live Networks, Inc. All rights reserved.// A sink that generates a QuickTime file from a composite media session// Implementation#include "QuickTimeFileSink.hh"#include "QuickTimeGenericRTPSource.hh"#include "GroupsockHelper.hh"#include "OutputFile.hh"#include "H263plusVideoRTPSource.hh" // for the special header#include "MPEG4GenericRTPSource.hh" //for "samplingFrequencyFromAudioSpecificConfig()"#include "MPEG4LATMAudioRTPSource.hh" // for "parseGeneralConfigStr()"#include <ctype.h>#define fourChar(x,y,z,w) ( ((x)<<24)|((y)<<16)|((z)<<8)|(w) )////////// SubsessionIOState, ChunkDescriptor ///////////// A structure used to represent the I/O state of each input 'subsession':class ChunkDescriptor {public: ChunkDescriptor(unsigned offsetInFile, unsigned size, unsigned frameSize, unsigned frameDuration, struct timeval presentationTime); virtual ~ChunkDescriptor(); ChunkDescriptor* extendChunk(unsigned newOffsetInFile, unsigned newSize, unsigned newFrameSize, unsigned newFrameDuration, struct timeval newPresentationTime); // this may end up allocating a new chunk insteadpublic: ChunkDescriptor* fNextChunk; unsigned fOffsetInFile; unsigned fNumFrames; unsigned fFrameSize; unsigned fFrameDuration; struct timeval fPresentationTime; // of the start of the data};class SubsessionBuffer {public: SubsessionBuffer(unsigned bufferSize) : fBufferSize(bufferSize) { reset(); fData = new unsigned char[bufferSize]; } virtual ~SubsessionBuffer() { delete fData; } void reset() { fBytesInUse = 0; } void addBytes(unsigned numBytes) { fBytesInUse += numBytes; } unsigned char* dataStart() { return &fData[0]; } unsigned char* dataEnd() { return &fData[fBytesInUse]; } unsigned bytesInUse() const { return fBytesInUse; } unsigned bytesAvailable() const { return fBufferSize - fBytesInUse; } void setPresentationTime(struct timeval const& presentationTime) { fPresentationTime = presentationTime; } struct timeval const& presentationTime() const {return fPresentationTime;}private: unsigned fBufferSize; struct timeval fPresentationTime; unsigned char* fData; unsigned fBytesInUse;};// A 64-bit counter, used below:class Count64 {public: Count64() { hi = lo = 0; } void operator+=(unsigned arg); unsigned hi, lo; // each 32 bits};class SubsessionIOState {public: SubsessionIOState(QuickTimeFileSink& sink, MediaSubsession& subsession); virtual ~SubsessionIOState(); Boolean setQTstate(); void setFinalQTstate(); void afterGettingFrame(unsigned packetDataSize, struct timeval presentationTime); void onSourceClosure(); Boolean syncOK(struct timeval presentationTime); // returns true iff data is usable despite a sync check static void setHintTrack(SubsessionIOState* hintedTrack, SubsessionIOState* hintTrack); Boolean isHintTrack() const { return fTrackHintedByUs != NULL; } Boolean hasHintTrack() const { return fHintTrackForUs != NULL; } UsageEnvironment& envir() const { return fOurSink.envir(); }public: static unsigned fCurrentTrackNumber; unsigned fTrackID; SubsessionIOState* fHintTrackForUs; SubsessionIOState* fTrackHintedByUs; SubsessionBuffer *fBuffer, *fPrevBuffer; QuickTimeFileSink& fOurSink; MediaSubsession& fOurSubsession; unsigned short fLastPacketRTPSeqNum; Boolean fOurSourceIsActive; Boolean fHaveBeenSynced; // used in synchronizing with other streams struct timeval fSyncTime; Boolean fQTEnableTrack; unsigned fQTcomponentSubtype; char const* fQTcomponentName; typedef unsigned (QuickTimeFileSink::*atomCreationFunc)(); atomCreationFunc fQTMediaInformationAtomCreator; atomCreationFunc fQTMediaDataAtomCreator; char const* fQTAudioDataType; unsigned short fQTSoundSampleVersion; unsigned fQTTimeScale; unsigned fQTTimeUnitsPerSample; unsigned fQTBytesPerFrame; unsigned fQTSamplesPerFrame; // These next fields are derived from the ones above, // plus the information from each chunk: unsigned fQTTotNumSamples; unsigned fQTDurationM; // in media time units unsigned fQTDurationT; // in track time units unsigned fTKHD_durationPosn; // position of the duration in the output 'tkhd' atom unsigned fQTInitialOffsetDuration; // if there's a pause at the beginning ChunkDescriptor *fHeadChunk, *fTailChunk; unsigned fNumChunks; // Counters to be used in the hint track's 'udta'/'hinf' atom; struct hinf { Count64 trpy; Count64 nump; Count64 tpyl; // Is 'maxr' needed? Computing this would be a PITA. ##### Count64 dmed; Count64 dimm; // 'drep' is always 0 // 'tmin' and 'tmax' are always 0 unsigned pmax; unsigned dmax; } fHINF;private: void useFrame(SubsessionBuffer& buffer); void useFrameForHinting(unsigned frameSize, struct timeval presentationTime, unsigned startSampleNumber); // used by the above two routines: unsigned useFrame1(unsigned sourceDataSize, struct timeval presentationTime, unsigned frameDuration, unsigned destFileOffset); // returns the number of samples in this dataprivate: // A structure used for temporarily storing frame state: struct { unsigned frameSize; struct timeval presentationTime; unsigned destFileOffset; // used for non-hint tracks only // The remaining fields are used for hint tracks only: unsigned startSampleNumber; unsigned short seqNum; unsigned rtpHeader; unsigned char numSpecialHeaders; // used when our RTP source has special headers unsigned specialHeaderBytesLength; // ditto unsigned char specialHeaderBytes[SPECIAL_HEADER_BUFFER_SIZE]; // ditto unsigned packetSizes[256]; } fPrevFrameState;};////////// QuickTimeFileSink implementation //////////QuickTimeFileSink::QuickTimeFileSink(UsageEnvironment& env, MediaSession& inputSession, FILE* outFid, unsigned bufferSize, unsigned short movieWidth, unsigned short movieHeight, unsigned movieFPS, Boolean packetLossCompensate, Boolean syncStreams, Boolean generateHintTracks, Boolean generateMP4Format) : Medium(env), fInputSession(inputSession), fOutFid(outFid), fBufferSize(bufferSize), fPacketLossCompensate(packetLossCompensate), fSyncStreams(syncStreams), fGenerateMP4Format(generateMP4Format), fAreCurrentlyBeingPlayed(False), fLargestRTPtimestampFrequency(0), fNumSubsessions(0), fNumSyncedSubsessions(0), fHaveCompletedOutputFile(False), fMovieWidth(movieWidth), fMovieHeight(movieHeight), fMovieFPS(movieFPS), fMaxTrackDurationM(0) { fNewestSyncTime.tv_sec = fNewestSyncTime.tv_usec = 0; fFirstDataTime.tv_sec = fFirstDataTime.tv_usec = (unsigned)(~0); // Set up I/O state for each input subsession: MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { // Ignore subsessions without a data source: FramedSource* subsessionSource = subsession->readSource(); if (subsessionSource == NULL) continue; // If "subsession's" SDP description specified screen dimension // or frame rate parameters, then use these. (Note that this must // be done before the call to "setQTState()" below.) if (subsession->videoWidth() != 0) { fMovieWidth = subsession->videoWidth(); } if (subsession->videoHeight() != 0) { fMovieHeight = subsession->videoHeight(); } if (subsession->videoFPS() != 0) { fMovieFPS = subsession->videoFPS(); } SubsessionIOState* ioState = new SubsessionIOState(*this, *subsession); if (ioState == NULL || !ioState->setQTstate()) { // We're not able to output a QuickTime track for this subsession delete ioState; ioState = NULL; continue; } subsession->miscPtr = (void*)ioState; if (generateHintTracks) { // Also create a hint track for this track: SubsessionIOState* hintTrack = new SubsessionIOState(*this, *subsession); SubsessionIOState::setHintTrack(ioState, hintTrack); if (!hintTrack->setQTstate()) { delete hintTrack; SubsessionIOState::setHintTrack(ioState, NULL); } } // Also set a 'BYE' handler for this subsession's RTCP instance: if (subsession->rtcpInstance() != NULL) { subsession->rtcpInstance()->setByeHandler(onRTCPBye, ioState); } unsigned rtpTimestampFrequency = subsession->rtpTimestampFrequency(); if (rtpTimestampFrequency > fLargestRTPtimestampFrequency) { fLargestRTPtimestampFrequency = rtpTimestampFrequency; } ++fNumSubsessions; } // Use the current time as the file's creation and modification // time. Use Apple's time format: seconds since January 1, 1904 gettimeofday(&fStartTime, NULL); fAppleCreationTime = fStartTime.tv_sec - 0x83dac000; // Begin by writing a "mdat" atom at the start of the file. // (Later, when we've finished copying data to the file, we'll come // back and fill in its size.) fMDATposition = ftell(fOutFid); addAtomHeader("mdat");}QuickTimeFileSink::~QuickTimeFileSink() { completeOutputFile(); // Then, delete each active "SubsessionIOState": MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { SubsessionIOState* ioState = (SubsessionIOState*)(subsession->miscPtr); if (ioState == NULL) continue; delete ioState->fHintTrackForUs; // if any delete ioState; }}QuickTimeFileSink*QuickTimeFileSink::createNew(UsageEnvironment& env, MediaSession& inputSession, char const* outputFileName, unsigned bufferSize, unsigned short movieWidth, unsigned short movieHeight, unsigned movieFPS, Boolean packetLossCompensate, Boolean syncStreams, Boolean generateHintTracks, Boolean generateMP4Format) { QuickTimeFileSink* newSink = NULL; do { FILE* fid = OpenOutputFile(env, outputFileName); if (fid == NULL) break; return new QuickTimeFileSink(env, inputSession, fid, bufferSize, movieWidth, movieHeight, movieFPS, packetLossCompensate, syncStreams, generateHintTracks, generateMP4Format); } while (0); delete newSink; return NULL;}Boolean QuickTimeFileSink::startPlaying(afterPlayingFunc* afterFunc, void* afterClientData) { // Make sure we're not already being played: if (fAreCurrentlyBeingPlayed) { envir().setResultMsg("This sink has already been played"); return False; } fAreCurrentlyBeingPlayed = True; fAfterFunc = afterFunc; fAfterClientData = afterClientData; return continuePlaying();}Boolean QuickTimeFileSink::continuePlaying() { // Run through each of our input session's 'subsessions', // asking for a frame from each one: Boolean haveActiveSubsessions = False; MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { FramedSource* subsessionSource = subsession->readSource(); if (subsessionSource == NULL) continue; if (subsessionSource->isCurrentlyAwaitingData()) continue; SubsessionIOState* ioState = (SubsessionIOState*)(subsession->miscPtr);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -