📄 video_encoder_class.cpp
字号:
Duration lag = m_videoSrcElapsedDuration - m_videoDstElapsedDuration; // source gets ahead of destination if (lag > 3 * m_videoDstFrameDuration) { debug_message("lag "D64" src "U64" dst "U64, lag, m_videoSrcElapsedDuration, m_videoDstElapsedDuration); int j = (lag - (2 * m_videoDstFrameDuration)) / m_videoDstFrameDuration; m_videoDstFrameNumber += j; m_videoDstElapsedDuration = VideoDstFramesToDuration(); debug_message("video: advancing dst by %d frames", j); } // Disabled since we are not taking into account audio drift anymore // and the new algorithm automatically factors in any drift due // to video encoding /* // add any external drift (i.e. audio encoding drift) //to our drift measurement m_videoEncodingDrift += m_otherTotalDrift - m_otherLastTotalDrift; m_otherLastTotalDrift = m_otherTotalDrift; // check if the video encoding drift exceeds the max limit if (m_videoEncodingDrift >= m_videoEncodingMaxDrift) { // we skip multiple destination frames to give audio // a better chance to keep up // on subsequent calls, we will return immediately until // m_videoSrcElapsedDuration catches up with m_videoDstElapsedDuration int framesToSkip = m_videoEncodingDrift / m_videoDstFrameDuration; m_videoEncodingDrift -= framesToSkip * m_videoDstFrameDuration; m_videoDstFrameNumber += framesToSkip; m_videoDstElapsedDuration = VideoDstFramesToDuration(); debug_message("video: will skip %d frames due to encoding drift", framesToSkip); return; } */ m_videoDstFrameNumber++; m_videoDstElapsedDuration = VideoDstFramesToDuration(); //Timestamp encodingStartTimestamp = GetTimestamp(); u_int8_t* mallocedYuvImage = NULL; // crop to desired aspect ratio (may be a no-op) const u_int8_t* yImage = pY + m_videoSrcYCrop; const u_int8_t* uImage = pU + m_videoSrcUVCrop; const u_int8_t* vImage = pV + m_videoSrcUVCrop; // resize image if necessary if (m_videoYResizer) { u_int8_t* resizedYUV = (u_int8_t*)Malloc(m_videoDstYUVSize); u_int8_t* resizedY = resizedYUV; u_int8_t* resizedU = resizedYUV + m_videoDstYSize; u_int8_t* resizedV = resizedYUV + m_videoDstYSize + m_videoDstUVSize; m_videoSrcYImage->data = (pixel_t *)yImage; m_videoDstYImage->data = resizedY; scale_image_process(m_videoYResizer); m_videoSrcUVImage->data = (pixel_t *)uImage; m_videoDstUVImage->data = resizedU; scale_image_process(m_videoUVResizer); m_videoSrcUVImage->data = (pixel_t *)vImage; m_videoDstUVImage->data = resizedV; scale_image_process(m_videoUVResizer); // done with the original source image if (mallocedYuvImage) free(mallocedYuvImage); // switch over to resized version mallocedYuvImage = resizedYUV; yImage = resizedY; uImage = resizedU; vImage = resizedV; yStride = m_videoDstWidth; uvStride = yStride / 2; } // this has to be rewritten to not mess with the original YUV, // since it has to be done after the resizer. if (m_videoFilter != VF_NONE) { if (mallocedYuvImage == NULL) { u_int8_t* YUV = (u_int8_t*)Malloc(m_videoDstYUVSize); u_int8_t* pY = YUV; u_int8_t* pU = YUV + m_videoDstYSize; u_int8_t* pV = YUV + m_videoDstYSize + m_videoDstUVSize; CopyYuv(yImage, uImage, vImage, yStride, uvStride, uvStride, pY, pU, pV, m_videoDstWidth, m_videoDstWidth/2, m_videoDstWidth / 2, m_videoDstWidth, m_videoDstHeight); mallocedYuvImage = YUV; yImage = pY; uImage = pU; vImage = pV; yStride = m_videoDstWidth; uvStride = yStride / 2; // need to copy } switch (m_videoFilter) { case VF_DEINTERLACE: video_filter_interlace((uint8_t *)yImage, (uint8_t *)yImage + m_videoDstYSize, yStride); break;#ifdef HAVE_FFMPEG case VF_FFMPEG_DEINTERLACE_INPLACE: { AVPicture src; avpicture_fill(&src, (uint8_t *)yImage, PIX_FMT_YUV420P, m_videoDstWidth, m_videoDstHeight); avpicture_deinterlace(&src, &src, PIX_FMT_YUV420P, m_videoDstWidth, m_videoDstHeight); break; }#else case VF_FFMPEG_DEINTERLACE_INPLACE:#endif case VF_NONE: default: break; //debug_message("ffmpeg_deinterlace"); } } // if we want encoded video frames // this checkr really doesnt need to be here bool rc = EncodeImage( yImage, uImage, vImage, yStride, uvStride, m_videoWantKeyFrame | pYUV->force_iframe, m_videoDstElapsedDuration, srcFrameTimestamp); if (!rc) { debug_message("Can't encode image!"); if (mallocedYuvImage) free(mallocedYuvImage); return; } m_videoWantKeyFrame = false; uint8_t *frame; uint32_t frame_len; bool got_image; Timestamp pts, dts; got_image = GetEncodedImage(&frame, &frame_len, &dts, &pts); if (got_image) { //error_message("frame len %d time %llu", frame_len, dts); CMediaFrame* pFrame = new CMediaFrame( GetFrameType(), frame, frame_len, dts, m_videoDstFrameDuration, TimestampTicks, pts); pFrame->SetMediaFreeFunction(GetMediaFreeFunction()); ForwardFrame(pFrame); } // forward reconstructed video to sinks if (m_preview) { yuv_media_frame_t *mf = MALLOC_STRUCTURE(yuv_media_frame_t); uint8_t *alloced; mf->y_stride = m_videoDstWidth; mf->uv_stride = m_videoDstWidth / 2; mf->y = alloced = (u_int8_t*)Malloc(m_videoDstYUVSize); mf->u = mf->y + m_videoDstYSize; mf->v = mf->u + m_videoDstUVSize; mf->w = m_videoDstWidth; mf->h = m_videoDstHeight; mf->free_y = true; if (GetReconstructedImage(alloced, alloced + m_videoDstYSize, alloced + m_videoDstYSize + m_videoDstUVSize)) { //debug_message("forwarding encoded yuv"); CMediaFrame* pFrame = new CMediaFrame(YUVVIDEOFRAME, mf, 0, srcFrameTimestamp, m_videoDstFrameDuration); pFrame->SetMediaFreeFunction(c_ReleaseReconstruct); ForwardFrame(pFrame); } else { CHECK_AND_FREE(mf->y); free(mf); } } if (mallocedYuvImage) free(mallocedYuvImage);}void CVideoEncoder::DoStopVideo(){ DestroyVideoResizer(); StopEncoder(); debug_message("Video encoding profile %s stats", GetProfileName()); debug_message("Encoded frames: %u", m_videoDstFrameNumber); }void CVideoEncoder::DestroyVideoResizer(){ if (m_videoSrcYImage) { scale_free_image(m_videoSrcYImage); m_videoSrcYImage = NULL; } if (m_videoDstYImage) { scale_free_image(m_videoDstYImage); m_videoDstYImage = NULL; } if (m_videoYResizer) { scale_image_done(m_videoYResizer); m_videoYResizer = NULL; } if (m_videoSrcUVImage) { scale_free_image(m_videoSrcUVImage); m_videoSrcUVImage = NULL; } if (m_videoDstUVImage) { scale_free_image(m_videoDstUVImage); m_videoDstUVImage = NULL; } if (m_videoUVResizer) { scale_image_done(m_videoUVResizer); m_videoUVResizer = NULL; }}void CVideoEncoder::AddRtpDestination (CMediaStream *stream, bool disable_ts_offset, uint16_t max_ttl, in_port_t srcPort){ mp4live_rtp_params_t *mrtp; if (stream->m_video_rtp_session != NULL) { AddRtpDestInt(disable_ts_offset, stream->m_video_rtp_session); return; } mrtp = MALLOC_STRUCTURE(mp4live_rtp_params_t); rtp_default_params(&mrtp->rtp_params); mrtp->rtp_params.rtp_addr = stream->GetStringValue(STREAM_VIDEO_DEST_ADDR); mrtp->rtp_params.rtp_rx_port = srcPort; mrtp->rtp_params.rtp_tx_port = stream->GetIntegerValue(STREAM_VIDEO_DEST_PORT); mrtp->rtp_params.rtp_ttl = max_ttl; mrtp->rtp_params.transmit_initial_rtcp = 1; mrtp->rtp_params.rtcp_addr = stream->GetStringValue(STREAM_VIDEO_RTCP_DEST_ADDR); mrtp->rtp_params.rtcp_tx_port = stream->GetIntegerValue(STREAM_VIDEO_RTCP_DEST_PORT); mrtp->use_srtp = stream->GetBoolValue(STREAM_VIDEO_USE_SRTP); mrtp->srtp_params.enc_algo = (srtp_enc_algos_t)stream->GetIntegerValue(STREAM_VIDEO_SRTP_ENC_ALGO); mrtp->srtp_params.auth_algo = (srtp_auth_algos_t)stream->GetIntegerValue(STREAM_VIDEO_SRTP_AUTH_ALGO); mrtp->srtp_params.tx_key = stream->m_video_key; mrtp->srtp_params.tx_salt = stream->m_video_salt; mrtp->srtp_params.rx_key = stream->m_video_key; mrtp->srtp_params.rx_salt = stream->m_video_salt; mrtp->srtp_params.rtp_enc = stream->GetBoolValue(STREAM_VIDEO_SRTP_RTP_ENC); mrtp->srtp_params.rtp_auth = stream->GetBoolValue(STREAM_VIDEO_SRTP_RTP_AUTH); mrtp->srtp_params.rtcp_enc = stream->GetBoolValue(STREAM_VIDEO_SRTP_RTCP_ENC); AddRtpDestInt(disable_ts_offset, mrtp);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -