⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 avifilesink.cpp

📁 流媒体传输协议的实现代码,非常有用.可以支持rtsp mms等流媒体传输协议
💻 CPP
📖 第 1 页 / 共 2 页
字号:
/**********This library is free software; you can redistribute it and/or modify it underthe terms of the GNU Lesser General Public License as published by theFree Software Foundation; either version 2.1 of the License, or (at youroption) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)This library is distributed in the hope that it will be useful, but WITHOUTANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESSFOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License formore details.You should have received a copy of the GNU Lesser General Public Licensealong with this library; if not, write to the Free Software Foundation, Inc.,59 Temple Place, Suite 330, Boston, MA  02111-1307  USA**********/// "liveMedia"// Copyright (c) 1996-2004 Live Networks, Inc.  All rights reserved.// A sink that generates an AVI file from a composite media session// Implementation#include "AVIFileSink.hh"#include "OutputFile.hh"#include "GroupsockHelper.hh"#define fourChar(x,y,z,w) ( ((w)<<24)|((z)<<16)|((y)<<8)|(x) )/*little-endian*/////////// AVISubsessionIOState ///////////// A structure used to represent the I/O state of each input 'subsession':class SubsessionBuffer {public:  SubsessionBuffer(unsigned bufferSize)    : fBufferSize(bufferSize) {    reset();    fData = new unsigned char[bufferSize];  }  virtual ~SubsessionBuffer() { delete fData; }  void reset() { fBytesInUse = 0; }  void addBytes(unsigned numBytes) { fBytesInUse += numBytes; }  unsigned char* dataStart() { return &fData[0]; }  unsigned char* dataEnd() { return &fData[fBytesInUse]; }  unsigned bytesInUse() const { return fBytesInUse; }  unsigned bytesAvailable() const { return fBufferSize - fBytesInUse; }    void setPresentationTime(struct timeval const& presentationTime) {    fPresentationTime = presentationTime;  }  struct timeval const& presentationTime() const {return fPresentationTime;}private:  unsigned fBufferSize;  struct timeval fPresentationTime;  unsigned char* fData;  unsigned fBytesInUse;};class AVISubsessionIOState {public:  AVISubsessionIOState(AVIFileSink& sink, MediaSubsession& subsession);  virtual ~AVISubsessionIOState();  void setAVIstate(unsigned subsessionIndex);  void setFinalAVIstate();  void afterGettingFrame(unsigned packetDataSize,			 struct timeval presentationTime);  void onSourceClosure();  UsageEnvironment& envir() const { return fOurSink.envir(); }public:  SubsessionBuffer *fBuffer, *fPrevBuffer;  AVIFileSink& fOurSink;  MediaSubsession& fOurSubsession;  unsigned short fLastPacketRTPSeqNum;  Boolean fOurSourceIsActive;  struct timeval fPrevPresentationTime;  unsigned fMaxBytesPerSecond;  Boolean fIsVideo, fIsAudio, fIsByteSwappedAudio;  unsigned fAVISubsessionTag;  unsigned fAVICodecHandlerType;  unsigned fAVISamplingFrequency; // for audio  u_int16_t fWAVCodecTag; // for audio  unsigned fAVIScale;  unsigned fAVIRate;  unsigned fAVISize;  unsigned fNumFrames;  unsigned fSTRHFrameCountPosition;private:  void useFrame(SubsessionBuffer& buffer);};////////// AVIFileSink implementation //////////AVIFileSink::AVIFileSink(UsageEnvironment& env,			 MediaSession& inputSession,			 FILE* outFid,			 unsigned bufferSize,			 unsigned short movieWidth, unsigned short movieHeight,			 unsigned movieFPS, Boolean packetLossCompensate)  : Medium(env), fInputSession(inputSession), fOutFid(outFid),    fBufferSize(bufferSize), fPacketLossCompensate(packetLossCompensate),    fAreCurrentlyBeingPlayed(False), fNumSubsessions(0), fNumBytesWritten(0),    fHaveCompletedOutputFile(False),    fMovieWidth(movieWidth), fMovieHeight(movieHeight), fMovieFPS(movieFPS) {  // Set up I/O state for each input subsession:  MediaSubsessionIterator iter(fInputSession);  MediaSubsession* subsession;  while ((subsession = iter.next()) != NULL) {    // Ignore subsessions without a data source:    FramedSource* subsessionSource = subsession->readSource();    if (subsessionSource == NULL) continue;    // If "subsession's" SDP description specified screen dimension    // or frame rate parameters, then use these.    if (subsession->videoWidth() != 0) {      fMovieWidth = subsession->videoWidth();    }    if (subsession->videoHeight() != 0) {      fMovieHeight = subsession->videoHeight();    }    if (subsession->videoFPS() != 0) {      fMovieFPS = subsession->videoFPS();    }    AVISubsessionIOState* ioState      = new AVISubsessionIOState(*this, *subsession);    subsession->miscPtr = (void*)ioState;    // Also set a 'BYE' handler for this subsession's RTCP instance:    if (subsession->rtcpInstance() != NULL) {      subsession->rtcpInstance()->setByeHandler(onRTCPBye, ioState);    }    ++fNumSubsessions;  }  // Begin by writing an AVI header:  addFileHeader_AVI();}AVIFileSink::~AVIFileSink() {  completeOutputFile();  // Then, delete each active "AVISubsessionIOState":  MediaSubsessionIterator iter(fInputSession);  MediaSubsession* subsession;  while ((subsession = iter.next()) != NULL) {    AVISubsessionIOState* ioState      = (AVISubsessionIOState*)(subsession->miscPtr);     if (ioState == NULL) continue;    delete ioState;  }}AVIFileSink* AVIFileSink::createNew(UsageEnvironment& env, MediaSession& inputSession,	    char const* outputFileName,	    unsigned bufferSize,	    unsigned short movieWidth, unsigned short movieHeight,	    unsigned movieFPS, Boolean packetLossCompensate) {  AVIFileSink* newSink = NULL;  do {    FILE* fid = OpenOutputFile(env, outputFileName);    if (fid == NULL) break;    return new AVIFileSink(env, inputSession, fid, bufferSize,			   movieWidth, movieHeight, movieFPS,			   packetLossCompensate);  } while (0);  delete newSink;  return NULL;}Boolean AVIFileSink::startPlaying(afterPlayingFunc* afterFunc,				  void* afterClientData) {  // Make sure we're not already being played:  if (fAreCurrentlyBeingPlayed) {    envir().setResultMsg("This sink has already been played");    return False;  }  fAreCurrentlyBeingPlayed = True;  fAfterFunc = afterFunc;  fAfterClientData = afterClientData;  return continuePlaying();}Boolean AVIFileSink::continuePlaying() {  // Run through each of our input session's 'subsessions',  // asking for a frame from each one:  Boolean haveActiveSubsessions = False;   MediaSubsessionIterator iter(fInputSession);  MediaSubsession* subsession;  while ((subsession = iter.next()) != NULL) {    FramedSource* subsessionSource = subsession->readSource();    if (subsessionSource == NULL) continue;    if (subsessionSource->isCurrentlyAwaitingData()) continue;    AVISubsessionIOState* ioState      = (AVISubsessionIOState*)(subsession->miscPtr);     if (ioState == NULL) continue;    haveActiveSubsessions = True;    unsigned char* toPtr = ioState->fBuffer->dataEnd();    unsigned toSize = ioState->fBuffer->bytesAvailable();    subsessionSource->getNextFrame(toPtr, toSize,				   afterGettingFrame, ioState,				   onSourceClosure, ioState);  }  if (!haveActiveSubsessions) {    envir().setResultMsg("No subsessions are currently active");    return False;  }  return True;}void AVIFileSink::afterGettingFrame(void* clientData, unsigned packetDataSize,		    unsigned /*numTruncatedBytes*/,		    struct timeval presentationTime,		    unsigned /*durationInMicroseconds*/) {  AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData;  ioState->afterGettingFrame(packetDataSize, presentationTime);}void AVIFileSink::onSourceClosure(void* clientData) {  AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData;  ioState->onSourceClosure();}void AVIFileSink::onSourceClosure1() {  // Check whether *all* of the subsession sources have closed.  // If not, do nothing for now:  MediaSubsessionIterator iter(fInputSession);  MediaSubsession* subsession;  while ((subsession = iter.next()) != NULL) {    AVISubsessionIOState* ioState      = (AVISubsessionIOState*)(subsession->miscPtr);     if (ioState == NULL) continue;    if (ioState->fOurSourceIsActive) return; // this source hasn't closed  }  completeOutputFile();  // Call our specified 'after' function:  if (fAfterFunc != NULL) {    (*fAfterFunc)(fAfterClientData);  }}void AVIFileSink::onRTCPBye(void* clientData) {  AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData;  struct timeval timeNow;  gettimeofday(&timeNow, NULL);  unsigned secsDiff    = timeNow.tv_sec - ioState->fOurSink.fStartTime.tv_sec;  MediaSubsession& subsession = ioState->fOurSubsession;  ioState->envir() << "Received RTCP \"BYE\" on \""		   << subsession.mediumName()		   << "/" << subsession.codecName()		   << "\" subsession (after "		   << secsDiff << " seconds)\n";  // Handle the reception of a RTCP "BYE" as if the source had closed:  ioState->onSourceClosure();}void AVIFileSink::completeOutputFile() {  if (fHaveCompletedOutputFile || fOutFid == NULL) return;  // Update various AVI 'size' fields to take account of the codec data that  // we've now written to the file:  unsigned maxBytesPerSecond = 0;  unsigned numVideoFrames = 0;  unsigned numAudioFrames = 0;  //// Subsession-specific fields:  MediaSubsessionIterator iter(fInputSession);  MediaSubsession* subsession;  while ((subsession = iter.next()) != NULL) {    AVISubsessionIOState* ioState      = (AVISubsessionIOState*)(subsession->miscPtr);     if (ioState == NULL) continue;    maxBytesPerSecond += ioState->fMaxBytesPerSecond;    setWord(ioState->fSTRHFrameCountPosition, ioState->fNumFrames);    if (ioState->fIsVideo) numVideoFrames = ioState->fNumFrames;    else if (ioState->fIsAudio) numAudioFrames = ioState->fNumFrames;  }  //// Global fields:  fRIFFSizeValue += fNumBytesWritten;  setWord(fRIFFSizePosition, fRIFFSizeValue);  setWord(fAVIHMaxBytesPerSecondPosition, maxBytesPerSecond);  setWord(fAVIHFrameCountPosition,	  numVideoFrames > 0 ? numVideoFrames : numAudioFrames);  fMoviSizeValue += fNumBytesWritten;  setWord(fMoviSizePosition, fMoviSizeValue);  // We're done:  fHaveCompletedOutputFile = True;}////////// AVISubsessionIOState implementation ///////////AVISubsessionIOState::AVISubsessionIOState(AVIFileSink& sink,				     MediaSubsession& subsession)  : fOurSink(sink), fOurSubsession(subsession),    fMaxBytesPerSecond(0), fNumFrames(0) {  fBuffer = new SubsessionBuffer(fOurSink.fBufferSize);  fPrevBuffer = sink.fPacketLossCompensate    ? new SubsessionBuffer(fOurSink.fBufferSize) : NULL;  FramedSource* subsessionSource = subsession.readSource();  fOurSourceIsActive = subsessionSource != NULL;  fPrevPresentationTime.tv_sec = 0;  fPrevPresentationTime.tv_usec = 0;}AVISubsessionIOState::~AVISubsessionIOState() {  delete fBuffer; delete fPrevBuffer;}void AVISubsessionIOState::setAVIstate(unsigned subsessionIndex) {  fIsVideo = strcmp(fOurSubsession.mediumName(), "video") == 0;  fIsAudio = strcmp(fOurSubsession.mediumName(), "audio") == 0;  if (fIsVideo) {    fAVISubsessionTag      = fourChar('0'+subsessionIndex/10,'0'+subsessionIndex%10,'d','c');    if (strcmp(fOurSubsession.codecName(), "JPEG") == 0) {      fAVICodecHandlerType = fourChar('m','j','p','g');    } else if (strcmp(fOurSubsession.codecName(), "MP4V-ES") == 0) {      fAVICodecHandlerType = fourChar('D','I','V','X');    } else if (strcmp(fOurSubsession.codecName(), "MPV") == 0) {      fAVICodecHandlerType = fourChar('m','p','g','1'); // what about MPEG-2?

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -