📄 quicktimefilesink.cpp
字号:
if (ioState == NULL) continue; haveActiveSubsessions = True; unsigned char* toPtr = ioState->fBuffer->dataEnd(); unsigned toSize = ioState->fBuffer->bytesAvailable(); subsessionSource->getNextFrame(toPtr, toSize, afterGettingFrame, ioState, onSourceClosure, ioState); } if (!haveActiveSubsessions) { envir().setResultMsg("No subsessions are currently active"); return False; } return True;}void QuickTimeFileSink::afterGettingFrame(void* clientData, unsigned packetDataSize, unsigned /*numTruncatedBytes*/, struct timeval presentationTime, unsigned /*durationInMicroseconds*/) { SubsessionIOState* ioState = (SubsessionIOState*)clientData; if (!ioState->syncOK(presentationTime)) { // Ignore this data: ioState->fOurSink.continuePlaying(); return; } ioState->afterGettingFrame(packetDataSize, presentationTime);}void QuickTimeFileSink::onSourceClosure(void* clientData) { SubsessionIOState* ioState = (SubsessionIOState*)clientData; ioState->onSourceClosure();}void QuickTimeFileSink::onSourceClosure1() { // Check whether *all* of the subsession sources have closed. // If not, do nothing for now: MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { SubsessionIOState* ioState = (SubsessionIOState*)(subsession->miscPtr); if (ioState == NULL) continue; if (ioState->fOurSourceIsActive) return; // this source hasn't closed } completeOutputFile(); // Call our specified 'after' function: if (fAfterFunc != NULL) { (*fAfterFunc)(fAfterClientData); }}void QuickTimeFileSink::onRTCPBye(void* clientData) { SubsessionIOState* ioState = (SubsessionIOState*)clientData; struct timeval timeNow; gettimeofday(&timeNow, NULL); unsigned secsDiff = timeNow.tv_sec - ioState->fOurSink.fStartTime.tv_sec; MediaSubsession& subsession = ioState->fOurSubsession; ioState->envir() << "Received RTCP \"BYE\" on \"" << subsession.mediumName() << "/" << subsession.codecName() << "\" subsession (after " << secsDiff << " seconds)\n"; // Handle the reception of a RTCP "BYE" as if the source had closed: ioState->onSourceClosure();}static Boolean timevalGE(struct timeval const& tv1, struct timeval const& tv2) { return (unsigned)tv1.tv_sec > (unsigned)tv2.tv_sec || (tv1.tv_sec == tv2.tv_sec && (unsigned)tv1.tv_usec >= (unsigned)tv2.tv_usec);}void QuickTimeFileSink::completeOutputFile() { if (fHaveCompletedOutputFile || fOutFid == NULL) return; // Begin by filling in the initial "mdat" atom with the current // file size: unsigned curFileSize = ftell(fOutFid); setWord(fMDATposition, curFileSize); // Then, note the time of the first received data: MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { SubsessionIOState* ioState = (SubsessionIOState*)(subsession->miscPtr); if (ioState == NULL) continue; ChunkDescriptor* const headChunk = ioState->fHeadChunk; if (headChunk != NULL && timevalGE(fFirstDataTime, headChunk->fPresentationTime)) { fFirstDataTime = headChunk->fPresentationTime; } } // Then, update the QuickTime-specific state for each active track: iter.reset(); while ((subsession = iter.next()) != NULL) { SubsessionIOState* ioState = (SubsessionIOState*)(subsession->miscPtr); if (ioState == NULL) continue; ioState->setFinalQTstate(); // Do the same for a hint track (if any): if (ioState->hasHintTrack()) { ioState->fHintTrackForUs->setFinalQTstate(); } } if (fGenerateMP4Format) { // Begin with a "ftyp" atom: addAtom_ftyp(); } // Then, add a "moov" atom for the file metadata: addAtom_moov(); // We're done: fHaveCompletedOutputFile = True;}////////// SubsessionIOState, ChunkDescriptor implementation ///////////unsigned SubsessionIOState::fCurrentTrackNumber = 0;SubsessionIOState::SubsessionIOState(QuickTimeFileSink& sink, MediaSubsession& subsession) : fHintTrackForUs(NULL), fTrackHintedByUs(NULL), fOurSink(sink), fOurSubsession(subsession), fLastPacketRTPSeqNum(0), fHaveBeenSynced(False), fQTTotNumSamples(0), fHeadChunk(NULL), fTailChunk(NULL), fNumChunks(0) { fTrackID = ++fCurrentTrackNumber; fBuffer = new SubsessionBuffer(fOurSink.fBufferSize); fPrevBuffer = sink.fPacketLossCompensate ? new SubsessionBuffer(fOurSink.fBufferSize) : NULL; FramedSource* subsessionSource = subsession.readSource(); fOurSourceIsActive = subsessionSource != NULL; fPrevFrameState.presentationTime.tv_sec = 0; fPrevFrameState.presentationTime.tv_usec = 0; fPrevFrameState.seqNum = 0;}SubsessionIOState::~SubsessionIOState() { delete fBuffer; delete fPrevBuffer; delete fHeadChunk;}Boolean SubsessionIOState::setQTstate() { char const* noCodecWarning1 = "Warning: We don't implement a QuickTime "; char const* noCodecWarning2 = " Media Data Type for the \""; char const* noCodecWarning3 = "\" track, so we'll insert a dummy \"????\" Media Data Atom instead. A separate, codec-specific editing pass will be needed before this track can be played.\n"; Boolean supportPartiallyOnly = False; do { fQTEnableTrack = True; // enable this track in the movie by default fQTTimeScale = fOurSubsession.rtpTimestampFrequency(); // by default fQTTimeUnitsPerSample = 1; // by default fQTBytesPerFrame = 0; // by default - indicates that the whole packet data is a frame fQTSamplesPerFrame = 1; // by default // Make sure our subsession's medium is one that we know how to // represent in a QuickTime file: if (isHintTrack()) { // Hint tracks are treated specially fQTEnableTrack = False; // hint tracks are marked as inactive fQTcomponentSubtype = fourChar('h','i','n','t'); fQTcomponentName = "hint media handler"; fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_gmhd; fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_rtp; } else if (strcmp(fOurSubsession.mediumName(), "audio") == 0) { fQTcomponentSubtype = fourChar('s','o','u','n'); fQTcomponentName = "Apple Sound Media Handler"; fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_smhd; fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_soundMediaGeneral; // by default fQTSoundSampleVersion = 0; // by default // Make sure that our subsession's codec is one that we can handle: if (strcmp(fOurSubsession.codecName(), "X-QT") == 0 || strcmp(fOurSubsession.codecName(), "X-QUICKTIME") == 0) { fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_genericMedia; } else if (strcmp(fOurSubsession.codecName(), "PCMU") == 0) { fQTAudioDataType = "ulaw"; fQTBytesPerFrame = 1; } else if (strcmp(fOurSubsession.codecName(), "GSM") == 0) { fQTAudioDataType = "agsm"; fQTBytesPerFrame = 33; fQTSamplesPerFrame = 160; } else if (strcmp(fOurSubsession.codecName(), "PCMA") == 0) { fQTAudioDataType = "alaw"; fQTBytesPerFrame = 1; } else if (strcmp(fOurSubsession.codecName(), "QCELP") == 0) { fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_Qclp; fQTSamplesPerFrame = 160; } else if (strcmp(fOurSubsession.codecName(), "MPEG4-GENERIC") == 0 || strcmp(fOurSubsession.codecName(), "MP4A-LATM") == 0) { fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_mp4a; fQTTimeUnitsPerSample = 1024; // QT considers each frame to be a 'sample' // The time scale (frequency) comes from the 'config' information. // It might be different from the RTP timestamp frequency (e.g., aacPlus). unsigned frequencyFromConfig = samplingFrequencyFromAudioSpecificConfig(fOurSubsession.fmtp_config()); if (frequencyFromConfig != 0) fQTTimeScale = frequencyFromConfig; } else { envir() << noCodecWarning1 << "Audio" << noCodecWarning2 << fOurSubsession.codecName() << noCodecWarning3; fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_dummy; fQTEnableTrack = False; // disable this track in the movie } } else if (strcmp(fOurSubsession.mediumName(), "video") == 0) { fQTcomponentSubtype = fourChar('v','i','d','e'); fQTcomponentName = "Apple Video Media Handler"; fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_vmhd; // Make sure that our subsession's codec is one that we can handle: if (strcmp(fOurSubsession.codecName(), "X-QT") == 0 || strcmp(fOurSubsession.codecName(), "X-QUICKTIME") == 0) { fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_genericMedia; } else if (strcmp(fOurSubsession.codecName(), "H263-1998") == 0 || strcmp(fOurSubsession.codecName(), "H263-2000") == 0) { fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_h263; fQTTimeScale = 600; fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS; } else if (strcmp(fOurSubsession.codecName(), "MP4V-ES") == 0) { fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_mp4v; fQTTimeScale = 600; fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS; } else { envir() << noCodecWarning1 << "Video" << noCodecWarning2 << fOurSubsession.codecName() << noCodecWarning3; fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_dummy; fQTEnableTrack = False; // disable this track in the movie } } else { envir() << "Warning: We don't implement a QuickTime Media Handler for media type \"" << fOurSubsession.mediumName() << "\""; break; } if (supportPartiallyOnly) { envir() << "Warning: We don't have sufficient codec-specific information (e.g., sample sizes) to fully generate the \"" << fOurSubsession.mediumName() << "/" << fOurSubsession.codecName() << "\" track, so we'll disable this track in the movie. A separate, codec-specific editing pass will be needed before this track can be played\n"; fQTEnableTrack = False; // disable this track in the movie } return True; } while (0); envir() << ", so a track for the \"" << fOurSubsession.mediumName() << "/" << fOurSubsession.codecName() << "\" subsession will not be included in the output QuickTime file\n"; return False; }void SubsessionIOState::setFinalQTstate() { // Compute derived parameters, by running through the list of chunks: fQTDurationT = 0; ChunkDescriptor* chunk = fHeadChunk; while (chunk != NULL) { unsigned const numFrames = chunk->fNumFrames; unsigned const dur = numFrames*chunk->fFrameDuration; fQTDurationT += dur; chunk = chunk->fNextChunk; } // Convert this duration from track to movie time scale: double scaleFactor = fOurSink.movieTimeScale()/(double)fQTTimeScale; fQTDurationM = (unsigned)(fQTDurationT*scaleFactor); if (fQTDurationM > fOurSink.fMaxTrackDurationM) { fOurSink.fMaxTrackDurationM = fQTDurationM; }}void SubsessionIOState::afterGettingFrame(unsigned packetDataSize, struct timeval presentationTime) { // Begin by checking whether there was a gap in the RTP stream. // If so, try to compensate for this (if desired): unsigned short rtpSeqNum = fOurSubsession.rtpSource()->curPacketRTPSeqNum(); if (fOurSink.fPacketLossCompensate && fPrevBuffer->bytesInUse() > 0) { short seqNumGap = rtpSeqNum - fLastPacketRTPSeqNum; for (short i = 1; i < seqNumGap; ++i) { // Insert a copy of the previous frame, to compensate for the loss: useFrame(*fPrevBuffer); } } fLastPacketRTPSeqNum = rtpSeqNum; // Now, continue working with the frame that we just got if (fBuffer->bytesInUse() == 0) { fBuffer->setPresentationTime(presentationTime); } fBuffer->addBytes(packetDataSize); // If our RTP source is a "QuickTimeGenericRTPSource", then // use its 'qtState' to set some parameters that we need: if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_genericMedia){ QuickTimeGenericRTPSource* rtpSource = (QuickTimeGenericRTPSource*)fOurSubsession.rtpSource(); QuickTimeGenericRTPSource::QTState& qtState = rtpSource->qtState; fQTTimeScale = qtState.timescale; if (qtState.width != 0) { fOurSink.fMovieWidth = qtState.width; } if (qtState.height != 0) { fOurSink.fMovieHeight = qtState.height; } // Also, if the media type in the "sdAtom" is one that we recognize // to have a special parameters, then fix this here: if (qtState.sdAtomSize >= 8) { char const* atom = qtState.sdAtom; unsigned mediaType = fourChar(atom[4],atom[5],atom[6],atom[7]); switch (mediaType) { case fourChar('a','g','s','m'): { fQTBytesPerFrame = 33; fQTSamplesPerFrame = 160; break; } case fourChar('Q','c','l','p'): { fQTBytesPerFrame = 35; fQTSamplesPerFrame = 160; break; } case fourChar('H','c','l','p'): { fQTBytesPerFrame = 17; fQTSamplesPerFrame = 160; break; } case fourChar('h','2','6','3'): { fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS; break; } } } } else if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_Qclp) { // For QCELP data, make a note of the frame size (even though it's the // same as the packet data size), because it varies depending on the // 'rate' of the stream, and this size gets used later when setting up // the 'Qclp' QuickTime atom: fQTBytesPerFrame = packetDataSize; } useFrame(*fBuffer); if (fOurSink.fPacketLossCompensate) { // Save this frame, in case we need it for recovery:
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -