⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 media_source.cpp

📁 网络MPEG4IP流媒体开发源代码
💻 CPP
📖 第 1 页 / 共 3 页
字号:
            Duration diff = m_videoSrcElapsedDuration - m_videoDstElapsedDuration;#ifdef DEBUG_SYNC_DROPS      debug_message("video source vs dest is %lld", diff);#endif      int removed = 0;      do {	m_videoEncodingDrift -= m_videoDstFrameDuration;	if (m_videoEncodingDrift < 0) {	  m_videoEncodingDrift = 0;	}	m_videoDstFrameNumber++;	m_videoDstElapsedDuration = VideoDstFramesToDuration();	diff -= m_videoDstFrameDuration;	removed++;      } while (m_videoEncodingDrift > m_videoDstFrameDuration && diff > 0);#ifdef DEBUG_SYNC_DROPS      debug_message("dest duration is now %llu - skipped %d frames", m_videoDstElapsedDuration, removed);#endif      if (diff > 0) {	// src is still ahead - we can use it.	m_videoEncodingDrift = 0;      } else 	return;    }  }  Timestamp encodingStartTimestamp = GetTimestamp();  // this will either never happen (live capture)  // or just happen once at startup when we discover  // the stride used by the video decoder  if (yStride != m_videoSrcYStride) {    SetVideoSrcSize(m_videoSrcWidth, m_videoSrcHeight, 		    yStride, m_videoMatchAspectRatios);  }  u_int8_t* mallocedYuvImage = NULL;  // crop to desired aspect ratio (may be a no-op)  u_int8_t* yImage = pY + m_videoSrcYCrop;  u_int8_t* uImage = pU + m_videoSrcUVCrop;  u_int8_t* vImage = pV + m_videoSrcUVCrop;  // resize image if necessary  if (m_videoYResizer) {    u_int8_t* resizedYUV =       (u_int8_t*)Malloc(m_videoDstYUVSize);		    u_int8_t* resizedY =       resizedYUV;    u_int8_t* resizedU =       resizedYUV + m_videoDstYSize;    u_int8_t* resizedV =       resizedYUV + m_videoDstYSize + m_videoDstUVSize;    m_videoSrcYImage->data = yImage;    m_videoDstYImage->data = resizedY;    scale_image_process(m_videoYResizer);    m_videoSrcUVImage->data = uImage;    m_videoDstUVImage->data = resizedU;    scale_image_process(m_videoUVResizer);    m_videoSrcUVImage->data = vImage;    m_videoDstUVImage->data = resizedV;    scale_image_process(m_videoUVResizer);    // done with the original source image    // this may be NULL    free(mallocedYuvImage);    // switch over to resized version    mallocedYuvImage = resizedYUV;    yImage = resizedY;    uImage = resizedU;    vImage = resizedV;    yStride = m_videoDstWidth;    uvStride = yStride / 2;  }  // if we want encoded video frames  if (m_pConfig->m_videoEncode) {    // call video encoder    bool rc = m_videoEncoder->EncodeImage(					  yImage, uImage, vImage, 					  yStride, uvStride,					  m_videoWantKeyFrame);    if (!rc) {      debug_message("Can't encode image!");      free(mallocedYuvImage);      return;    }#ifdef DEBUG_VCODEC_SHADOW    m_videoEncoderShadow->EncodeImage(				      yImage, uImage, vImage, 				      yStride, uvStride,				      m_videoWantKeyFrame);    // Note: we don't retrieve encoded frame from shadow#endif    // clear want key frame flag    m_videoWantKeyFrame = false;  }#ifdef DEBUG_SYNC  // Display this before we recalculate elapsed duration  debug_message("video src frame duration %llu dst %llu prev len %d", 		m_videoSrcElapsedDuration, m_videoDstElapsedDuration,		m_videoDstPrevFrameLength);#endif  // calculate previous frame duration  Timestamp dstPrevFrameTimestamp =    m_videoStartTimestamp + m_videoDstPrevFrameElapsedDuration;  Duration dstPrevFrameDuration =     m_videoDstElapsedDuration - m_videoDstPrevFrameElapsedDuration;  m_videoDstPrevFrameElapsedDuration = m_videoDstElapsedDuration;  // calculate the end of this frame  m_videoEncodedFrames++;  m_videoDstFrameNumber++;  m_videoDstElapsedDuration = VideoDstFramesToDuration();  if (m_sourceRealTime && m_videoSrcFrameNumber > 0) {    // next check our duration against real elasped time    Duration lag = m_videoSrcElapsedDuration - m_videoDstElapsedDuration;    if (lag >= m_videoDstFrameDuration) {      // adjust by integral number of target duration units#if 1      debug_message("video Lag dst %llu src %llu frames %llu", 		    m_videoDstElapsedDuration,		    m_videoSrcElapsedDuration,		    lag / m_videoDstFrameDuration);#endif      m_videoDstFrameNumber += (lag / m_videoDstFrameDuration) + 1;      m_videoDstElapsedDuration = VideoDstFramesToDuration();#ifdef DEBUG_SYNC_LAG    } else {      debug_message("video lag is %lld %lld", lag, m_videoDstFrameDuration);#endif    }  }  // forward encoded video to sinks  if (m_pConfig->m_videoEncode) {    if (m_videoDstPrevFrame) {      CMediaFrame* pFrame = new CMediaFrame(					    MPEG4VIDEOFRAME, 					    m_videoDstPrevFrame, 					    m_videoDstPrevFrameLength,					    dstPrevFrameTimestamp, 					    dstPrevFrameDuration);      ForwardFrame(pFrame);    }    // hold onto this encoded vop until next one is ready    m_videoEncoder->GetEncodedImage(				    &m_videoDstPrevFrame, &m_videoDstPrevFrameLength);  }  // forward raw video to sinks  if (m_pConfig->SourceRawVideo()) {    if (m_videoDstPrevImage) {      CMediaFrame* pFrame =	new CMediaFrame(			YUVVIDEOFRAME, 			m_videoDstPrevImage, 			m_videoDstYUVSize,			dstPrevFrameTimestamp, 			dstPrevFrameDuration);      ForwardFrame(pFrame);    }    m_videoDstPrevImage = (u_int8_t*)Malloc(m_videoDstYUVSize);    imgcpy(m_videoDstPrevImage, 	   yImage, 	   m_videoDstWidth,	   m_videoDstHeight,	   yStride);    imgcpy(m_videoDstPrevImage + m_videoDstYSize,	   uImage, 	   m_videoDstWidth / 2,	   m_videoDstHeight / 2,	   uvStride);    imgcpy(m_videoDstPrevImage + m_videoDstYSize + m_videoDstUVSize,	   vImage, 	   m_videoDstWidth / 2,	   m_videoDstHeight / 2,	   uvStride);  }  // forward reconstructed video to sinks  if (m_pConfig->m_videoEncode      && m_pConfig->GetBoolValue(CONFIG_VIDEO_ENCODED_PREVIEW)) {    if (m_videoDstPrevReconstructImage) {      CMediaFrame* pFrame =	new CMediaFrame(RECONSTRUCTYUVVIDEOFRAME, 			m_videoDstPrevReconstructImage, 			m_videoDstYUVSize,			dstPrevFrameTimestamp, 			dstPrevFrameDuration);      ForwardFrame(pFrame);    }    m_videoDstPrevReconstructImage =       (u_int8_t*)Malloc(m_videoDstYUVSize);    m_videoEncoder->GetReconstructedImage(					  m_videoDstPrevReconstructImage,					  m_videoDstPrevReconstructImage 					  + m_videoDstYSize,					  m_videoDstPrevReconstructImage					  + m_videoDstYSize + m_videoDstUVSize);  }  // calculate how we're doing versus target frame rate  // this is used to decide if we need to drop frames  if (m_sourceRealTime) {    // reset skipped frames    Duration drift = GetTimestamp() - encodingStartTimestamp;    if (drift > m_videoDstFrameDuration) {      m_videoEncodingDrift += drift - m_videoDstFrameDuration;    } else {      drift = m_videoDstFrameDuration - drift;      if (m_videoEncodingDrift > drift) {	m_videoEncodingDrift -= drift;      } else {	m_videoEncodingDrift = 0;      }    }#if DEBUG_SYNC_DRIFT    if (m_videoEncodingDrift > 0)       debug_message("drift is %lld - dst duration is %llu total %llu",		    drift, m_videoDstFrameDuration, m_videoEncodingDrift);#endif  }  free(mallocedYuvImage);  return;}void CMediaSource::DoStopVideo(){  DestroyVideoResizer();  if (m_videoEncoder) {    m_videoEncoder->Stop();    delete m_videoEncoder;    m_videoEncoder = NULL;  }  m_sourceVideo = false;}void CMediaSource::DestroyVideoResizer(){  if (m_videoSrcYImage) {    scale_free_image(m_videoSrcYImage);    m_videoSrcYImage = NULL;  }  if (m_videoDstYImage) {    scale_free_image(m_videoDstYImage);    m_videoDstYImage = NULL;  }  if (m_videoYResizer) {    scale_image_done(m_videoYResizer);    m_videoYResizer = NULL;  }  if (m_videoSrcUVImage) {    scale_free_image(m_videoSrcUVImage);    m_videoSrcUVImage = NULL;  }  if (m_videoDstUVImage) {    scale_free_image(m_videoDstUVImage);    m_videoDstUVImage = NULL;  }  if (m_videoUVResizer) {    scale_image_done(m_videoUVResizer);    m_videoUVResizer = NULL;  }}bool CMediaSource::InitAudio(			     bool realTime){  m_sourceRealTime = realTime;  m_sinkRealTime = m_pConfig->GetBoolValue(CONFIG_RTP_ENABLE);  m_audioSrcSampleNumber = 0;  m_audioSrcFrameNumber = 0;  //m_videoSrcFrameNumber = 0;	// ensure video is also at zero  // audio destination info  m_audioDstChannels =    m_pConfig->GetIntegerValue(CONFIG_AUDIO_CHANNELS);  m_audioDstSampleRate =    m_pConfig->GetIntegerValue(CONFIG_AUDIO_SAMPLE_RATE);  m_audioDstSampleNumber = 0;  m_audioDstFrameNumber = 0;  m_audioDstRawSampleNumber = 0;  m_audioDstRawFrameNumber = 0;  m_audioSrcElapsedDuration = 0;  m_audioDstElapsedDuration = 0;  return true;}bool CMediaSource::SetAudioSrc(			       MediaType srcType,			       u_int8_t srcChannels,			       u_int32_t srcSampleRate){  // audio source info   m_audioSrcType = srcType;  m_audioSrcChannels = srcChannels;  m_audioSrcSampleRate = srcSampleRate;  m_audioSrcSamplesPerFrame = 0;	// unknown, presumed variable  // init audio encoder  delete m_audioEncoder;  m_audioEncoder = AudioEncoderCreate(				      m_pConfig->GetStringValue(CONFIG_AUDIO_ENCODER));  m_audioDstType = m_audioEncoder->GetFrameType();  if (m_audioEncoder == NULL) {    return false;  }  if (!m_audioEncoder->Init(m_pConfig, m_sourceRealTime)) {    delete m_audioEncoder;    m_audioEncoder = NULL;    return false;  }  m_audioDstSamplesPerFrame =     m_audioEncoder->GetSamplesPerFrame();  // if we need to resample  if (m_audioDstSampleRate != m_audioSrcSampleRate) {    // create a resampler for each audio destination channel -     // we will combine the channels before resampling    m_audioResample = (resample_t *)malloc(sizeof(resample_t) *					   m_audioDstChannels);    for (int ix = 0; ix <= m_audioDstChannels; ix++) {      m_audioResample[ix] = st_resample_start(m_audioSrcSampleRate, 					      m_audioDstSampleRate);    }  }  // this calculation doesn't take into consideration the resampling  // size of the src.  4 times might not be enough - we need most likely  // 2 times the max of the src samples and the dest samples  m_audioPreEncodingBufferLength = 0;  m_audioPreEncodingBufferMaxLength =    4 * DstSamplesToBytes(m_audioDstSamplesPerFrame);  m_audioPreEncodingBuffer = (u_int8_t*)realloc(						m_audioPreEncodingBuffer,						m_audioPreEncodingBufferMaxLength);		  if (m_audioPreEncodingBuffer == NULL) {    delete m_audioEncoder;    m_audioEncoder = NULL;    return false;  }  return true;}void CMediaSource::AddGapToAudio(Timestamp startTimestamp, Duration silenceDuration){  uint32_t samples = SrcTicksToSamples(silenceDuration);  uint32_t bytes = SrcSamplesToBytes(samples);  if (bytes > 0) {    error_message("Adding audio gap of %lld duration %u bytes", 		  silenceDuration, bytes);    uint8_t *pSilence = (uint8_t *)Malloc(bytes);    memset(pSilence, 0, bytes);    ProcessAudioFrame(pSilence, bytes, startTimestamp, false);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -