⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 video_encoder_class.cpp

📁 完整的RTP RTSP代码库
💻 CPP
📖 第 1 页 / 共 2 页
字号:
/* * The contents of this file are subject to the Mozilla Public * License Version 1.1 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.mozilla.org/MPL/ *  * Software distributed under the License is distributed on an "AS * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. *  * The Original Code is MPEG4IP. *  * The Initial Developer of the Original Code is Cisco Systems Inc. * Portions created by Cisco Systems Inc. are * Copyright (C) Cisco Systems Inc. 2005.  All Rights Reserved. *  * Contributor(s):  *		Dave Mackie		dmackie@cisco.com *              Bill May  wmay@cisco.com */#include "mp4live.h"#include "video_encoder.h"#include "video_encoder_base.h"#include "video_util_filter.h"#ifdef HAVE_FFMPEGextern "C" {#ifdef HAVE_FFMPEG_INSTALLED#include <ffmpeg/avcodec.h>#else#include <avcodec.h>#endif}#endif// Video encoder initializationCVideoEncoder::CVideoEncoder(CVideoProfile *vp,			     uint16_t mtu,			     CVideoEncoder *next,			     bool realTime) :   CMediaCodec(vp, mtu, next, realTime){  m_videoSrcYImage = NULL;  m_videoDstYImage = NULL;  m_videoYResizer = NULL;  m_videoSrcUVImage = NULL;  m_videoDstUVImage = NULL;  m_videoUVResizer = NULL;  m_videoDstPrevImage = NULL;  m_videoDstPrevReconstructImage = NULL;  m_videoSrcWidth = 0;  m_videoSrcHeight = 0;  m_videoSrcYStride = 0;  m_preview = false;};int CVideoEncoder::ThreadMain(void) {  CMsg* pMsg;  bool stop = false;  debug_message("video encoder %s start", Profile()->GetName());  m_videoSrcFrameNumber = 0;  //  debug_message("audio source frame is %d", m_audioSrcFrameNumber);  //  m_audioSrcFrameNumber = 0;	// ensure audio is also at zero  const char *videoFilter;  videoFilter = Profile()->GetStringValue(CFG_VIDEO_FILTER);  m_videoFilter = VF_NONE;  if (strcasecmp(videoFilter, VIDEO_FILTER_DEINTERLACE) == 0) {    m_videoFilter = VF_DEINTERLACE;#ifdef HAVE_FFMPEG  } else if (strcasecmp(videoFilter, VIDEO_FILTER_FFMPEG_DEINTERLACE_INPLACE) == 0) {    m_videoFilter = VF_FFMPEG_DEINTERLACE_INPLACE;#endif  }  m_videoDstFrameRate = Profile()->GetFloatValue(CFG_VIDEO_FRAME_RATE);  m_videoDstFrameDuration =     (Duration)(((float)TimestampTicks / m_videoDstFrameRate) + 0.5);  m_videoDstFrameNumber = 0;  m_videoDstWidth =    Profile()->m_videoWidth;  m_videoDstHeight =    Profile()->m_videoHeight;  m_videoDstAspectRatio =     (float)Profile()->m_videoWidth / (float)Profile()->m_videoHeight;  m_videoDstYSize = m_videoDstWidth * m_videoDstHeight;  m_videoDstUVSize = m_videoDstYSize / 4;  m_videoDstYUVSize = (m_videoDstYSize * 3) / 2;  Init();  m_videoDstType = GetFrameType();  m_videoWantKeyFrame = true;  //m_videoEncodingDrift = 0;  //m_videoEncodingMaxDrift = m_videoDstFrameDuration;  m_videoSrcElapsedDuration = 0;  m_videoDstElapsedDuration = 0;  m_videoDstPrevImage = NULL;  m_videoDstPrevReconstructImage = NULL;  while (stop == false && SDL_SemWait(m_myMsgQueueSemaphore) == 0) {    pMsg = m_myMsgQueue.get_message();    if (pMsg != NULL) {      switch (pMsg->get_value()) {      case MSG_NODE_STOP_THREAD:	debug_message("video %s stop received", 		      Profile()->GetName());	DoStopVideo();	stop = true;	break;      case MSG_NODE_START:	// DoStartTransmit();  Anything ?	break;      case MSG_NODE_STOP:	DoStopVideo();	break;      case MSG_SINK_FRAME: {	uint32_t dontcare;	CMediaFrame *mf = (CMediaFrame*)pMsg->get_message(dontcare);	if (m_stop_thread == false)	  ProcessVideoYUVFrame(mf);	if (mf->RemoveReference()) {	  delete mf;	}	break;      }      }            delete pMsg;    }   }  while ((pMsg = m_myMsgQueue.get_message()) != NULL) {    if (pMsg->get_value() == MSG_SINK_FRAME) {      uint32_t dontcare;      CMediaFrame *mf = (CMediaFrame*)pMsg->get_message(dontcare);      if (mf->RemoveReference()) {	delete mf;      }    }    delete pMsg;  }  debug_message("video encoder %s exit", Profile()->GetName());  return 0;}static void c_ReleaseReconstruct(void *f){  yuv_media_frame_t *yuv = (yuv_media_frame_t *)f;  if (yuv->free_y) {    CHECK_AND_FREE(yuv->y);  }   free(yuv);}// Called from ProcessYUVVideoFrame when we get the first frame - // it will have the source information.  void CVideoEncoder::SetVideoSrcSize(				    u_int16_t srcWidth,				    u_int16_t srcHeight,				    u_int16_t srcStride,				    bool matchAspectRatios){  // N.B. InitVideo() must be called first  m_videoSrcWidth = srcWidth;  m_videoSrcHeight = srcHeight;  m_videoSrcAspectRatio = (float)srcWidth / (float)srcHeight;  m_videoMatchAspectRatios = matchAspectRatios;  // N.B. SetVideoSrcSize() should be called once before   m_videoSrcYStride = srcStride;  m_videoSrcUVStride = srcStride / 2;  // these next three may change below  m_videoSrcAdjustedHeight = m_videoSrcHeight;  m_videoSrcYCrop = 0;  m_videoSrcUVCrop = 0;  // match aspect ratios  if (m_videoMatchAspectRatios       && fabs(m_videoSrcAspectRatio - m_videoDstAspectRatio) > 0.01) {    m_videoSrcAdjustedHeight =      (u_int16_t)(m_videoSrcWidth / m_videoDstAspectRatio);    if ((m_videoSrcAdjustedHeight % 16) != 0) {      m_videoSrcAdjustedHeight += 16 - (m_videoSrcAdjustedHeight % 16);    }    if (m_videoSrcAspectRatio < m_videoDstAspectRatio) {      // crop src      m_videoSrcYCrop = m_videoSrcYStride * 	((m_videoSrcHeight - m_videoSrcAdjustedHeight) / 2);      m_videoSrcUVCrop = m_videoSrcYCrop / 4;    }  }  m_videoSrcYSize = m_videoSrcYStride     * MAX(m_videoSrcHeight, m_videoSrcAdjustedHeight);  m_videoSrcUVSize = m_videoSrcYSize / 4;  m_videoSrcYUVSize = (m_videoSrcYSize * 3) / 2;  // resizing  DestroyVideoResizer();  if (m_videoSrcWidth != m_videoDstWidth       || m_videoSrcAdjustedHeight != m_videoDstHeight) {    m_videoSrcYImage =       scale_new_image(m_videoSrcWidth, 		      m_videoSrcAdjustedHeight, 1);    m_videoSrcYImage->span = m_videoSrcYStride;    m_videoDstYImage =       scale_new_image(m_videoDstWidth, 		      m_videoDstHeight, 1);    m_videoYResizer =       scale_image_init(m_videoDstYImage, m_videoSrcYImage, 		       Bell_filter, Bell_support);    m_videoSrcUVImage =       scale_new_image(m_videoSrcWidth / 2, 		      m_videoSrcAdjustedHeight / 2, 1);    m_videoSrcUVImage->span = m_videoSrcUVStride;    m_videoDstUVImage =       scale_new_image(m_videoDstWidth / 2, 		      m_videoDstHeight / 2, 1);    m_videoUVResizer =       scale_image_init(m_videoDstUVImage, m_videoSrcUVImage, 		       Bell_filter, Bell_support);  }}void CVideoEncoder::ProcessVideoYUVFrame(CMediaFrame *pFrame){  yuv_media_frame_t *pYUV = (yuv_media_frame_t *)pFrame->GetData();  const u_int8_t* pY = pYUV->y;  const u_int8_t* pU = pYUV->u;  const u_int8_t* pV = pYUV->v;  u_int16_t yStride = pYUV->y_stride;  u_int16_t uvStride = pYUV->uv_stride;  Timestamp srcFrameTimestamp = pFrame->GetTimestamp();  if (m_videoSrcFrameNumber == 0) {    m_videoStartTimestamp = srcFrameTimestamp;    SetVideoSrcSize(pYUV->w, pYUV->h, yStride, false);  }  // if we want to be able to handle different input sizes, we  // can check pYUV->w, pYUV->h, ystride against the stored values  m_videoSrcFrameNumber++;  m_videoSrcElapsedDuration = srcFrameTimestamp - m_videoStartTimestamp;#ifdef DEBUG_VIDEO_SYNC  debug_message("vsrc# %d srcDuration="U64" dst# %d dstDuration "U64,                m_videoSrcFrameNumber, m_videoSrcElapsedDuration,                m_videoDstFrameNumber, m_videoDstElapsedDuration);#endif  // destination gets ahead of source  // drop src frames as needed to match target frame rate  if (m_videoSrcElapsedDuration + m_videoDstFrameDuration < m_videoDstElapsedDuration) {#ifdef DEBUG_VIDEO_SYNC    debug_message("video: dropping frame, SrcElapsedDuration="U64" DstElapsedDuration="U64,                  m_videoSrcElapsedDuration, m_videoDstElapsedDuration);#endif    return;  }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -