⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 media_source.cpp

📁 MPEG-4编解码的实现(包括MPEG4视音频编解码)
💻 CPP
📖 第 1 页 / 共 2 页
字号:
/*
 * The contents of this file are subject to the Mozilla Public
 * License Version 1.1 (the "License"); you may not use this file
 * except in compliance with the License. You may obtain a copy of
 * the License at http://www.mozilla.org/MPL/
 * 
 * Software distributed under the License is distributed on an "AS
 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
 * implied. See the License for the specific language governing
 * rights and limitations under the License.
 * 
 * The Original Code is MPEG4IP.
 * 
 * The Initial Developer of the Original Code is Cisco Systems Inc.
 * Portions created by Cisco Systems Inc. are
 * Copyright (C) Cisco Systems Inc. 2000-2002.  All Rights Reserved.
 * 
 * Contributor(s): 
 *		Dave Mackie		dmackie@cisco.com
 */

#include "mp4live.h"
#include "media_source.h"
#include "audio_encoder.h"
#include "video_encoder.h"
#include "video_util_rgb.h"
#include <mp4av.h>


CMediaSource::CMediaSource() 
{
	m_pSinksMutex = SDL_CreateMutex();
	if (m_pSinksMutex == NULL) {
		debug_message("CreateMutex error");
	}
	for (int i = 0; i < MAX_SINKS; i++) {
		m_sinks[i] = NULL;
	}

	m_source = false;
	m_sourceVideo = false;
	m_sourceAudio = false;
	m_maxAheadDuration = TimestampTicks / 2 ;	// 500 msec

	m_videoSource = this;
	m_videoSrcYImage = NULL;
	m_videoDstYImage = NULL;
	m_videoYResizer = NULL;
	m_videoSrcUVImage = NULL;
	m_videoDstUVImage = NULL;
	m_videoUVResizer = NULL;
	m_videoEncoder = NULL;
	m_videoDstPrevImage = NULL;
	m_videoDstPrevReconstructImage = NULL;
	m_videoDstPrevFrame = NULL;

	m_audioResampleInputBuffer = NULL;
	m_audioPreEncodingBuffer = NULL;
	m_audioEncoder = NULL;
}

CMediaSource::~CMediaSource() 
{
	SDL_DestroyMutex(m_pSinksMutex);
	m_pSinksMutex = NULL;
}

bool CMediaSource::AddSink(CMediaSink* pSink) 
{
	bool rc = false;

	if (SDL_LockMutex(m_pSinksMutex) == -1) {
		debug_message("AddSink LockMutex error");
		return rc;
	}
	for (int i = 0; i < MAX_SINKS; i++) {
		if (m_sinks[i] == NULL) {
			m_sinks[i] = pSink;
			rc = true;
			break;
		}
	}
	if (SDL_UnlockMutex(m_pSinksMutex) == -1) {
		debug_message("UnlockMutex error");
	}
	return rc;
}

void CMediaSource::RemoveSink(CMediaSink* pSink) 
{
	if (SDL_LockMutex(m_pSinksMutex) == -1) {
		debug_message("RemoveSink LockMutex error");
		return;
	}
	for (int i = 0; i < MAX_SINKS; i++) {
		if (m_sinks[i] == pSink) {
			int j;
			for (j = i; j < MAX_SINKS - 1; j++) {
				m_sinks[j] = m_sinks[j+1];
			}
			m_sinks[j] = NULL;
			break;
		}
	}
	if (SDL_UnlockMutex(m_pSinksMutex) == -1) {
		debug_message("UnlockMutex error");
	}
}

void CMediaSource::RemoveAllSinks(void) 
{
	if (SDL_LockMutex(m_pSinksMutex) == -1) {
		debug_message("RemoveAllSinks LockMutex error");
		return;
	}
	for (int i = 0; i < MAX_SINKS; i++) {
		if (m_sinks[i] == NULL) {
			break;
		}
		m_sinks[i] = NULL;
	}
	if (SDL_UnlockMutex(m_pSinksMutex) == -1) {
		debug_message("UnlockMutex error");
	}
}

void CMediaSource::ProcessMedia()
{
	Duration start = GetElapsedDuration();

	// process ~1 second before returning to check for commands
	while (GetElapsedDuration() - start < (Duration)TimestampTicks) {

		if (m_sourceVideo && m_sourceAudio) {
			bool endOfVideo = IsEndOfVideo();
			bool endOfAudio = IsEndOfAudio();

			if (!endOfVideo && !endOfAudio) {
				if (m_videoSrcElapsedDuration <= m_audioSrcElapsedDuration) {
					ProcessVideo();
				} else {
					ProcessAudio();
				}
			} else if (endOfVideo && endOfAudio) {
				DoStopSource();
				break;
			} else if (endOfVideo) {
				ProcessAudio();
			} else { // endOfAudio
				ProcessVideo();
			}

		} else if (m_sourceVideo) {
			if (IsEndOfVideo()) {
				DoStopSource();
				break;
			}
			ProcessVideo();

		} else if (m_sourceAudio) {
			if (IsEndOfAudio()) {
				DoStopSource();
				break;
			}
			ProcessAudio();
		}
	}
}

Duration CMediaSource::GetElapsedDuration()
{
	if (m_sourceVideo && m_sourceAudio) {
		return MIN(m_videoSrcElapsedDuration, m_audioSrcElapsedDuration);
	} else if (m_sourceVideo) {
		return m_videoSrcElapsedDuration;
	} else if (m_sourceAudio) {
		return m_audioSrcElapsedDuration;
	}
	return 0;
}

// slow down non-realtime sources, i.e. files
// if any of the sinks require real-time semantics, i.e. RTP/UDP
void CMediaSource::PaceSource()
{
	if (m_sourceRealTime || !m_sinkRealTime) {
		return;
	}

	Duration realDuration =
		GetTimestamp() - m_startTimestamp;

	Duration aheadDuration =
		GetElapsedDuration() - realDuration;

	if (aheadDuration >= m_maxAheadDuration) {
		SDL_Delay((aheadDuration - (m_maxAheadDuration / 2)) / 1000);
	}
}

void CMediaSource::ForwardFrame(CMediaFrame* pFrame)
{
	if (SDL_LockMutex(m_pSinksMutex) == -1) {
		debug_message("ForwardFrame LockMutex error");
		return;
	}

	for (int i = 0; i < MAX_SINKS; i++) {
		if (m_sinks[i] == NULL) {
			break;
		}
		m_sinks[i]->EnqueueFrame(pFrame);
	}

	if (SDL_UnlockMutex(m_pSinksMutex) == -1) {
		debug_message("UnlockMutex error");
	}

	return;
}

void CMediaSource::DoStopSource()
{
	if (!m_source) {
		return;
	}

	DoStopVideo();

	DoStopAudio();

	m_source = false;
}

bool CMediaSource::InitVideo(
	MediaType srcType,
	bool realTime)
{
	m_sourceRealTime = realTime;
	m_sinkRealTime = m_pConfig->GetBoolValue(CONFIG_RTP_ENABLE);

	m_videoSrcType = srcType;
	m_videoSrcFrameNumber = 0;
	m_audioSrcFrameNumber = 0;	// ensure audio is also at zero

	m_videoDstType = CMediaFrame::Mpeg4VideoFrame;
	m_videoDstFrameRate =
		m_pConfig->GetFloatValue(CONFIG_VIDEO_FRAME_RATE);
	m_videoDstFrameDuration = 
		(Duration)(((float)TimestampTicks / m_videoDstFrameRate) + 0.5);
	m_videoDstFrameNumber = 0;
	m_videoDstWidth =
		m_pConfig->m_videoWidth;
	m_videoDstHeight =
		m_pConfig->m_videoHeight;
	m_videoDstAspectRatio = 
		(float)m_pConfig->m_videoWidth / (float)m_pConfig->m_videoHeight;
	m_videoDstYSize = m_videoDstWidth * m_videoDstHeight;
	m_videoDstUVSize = m_videoDstYSize / 4;
	m_videoDstYUVSize = (m_videoDstYSize * 3) / 2;

	// intialize encoder
	m_videoEncoder = VideoEncoderCreate(
		m_pConfig->GetStringValue(CONFIG_VIDEO_ENCODER));

	if (!m_videoEncoder) {
		return false;
	}
	if (!m_videoEncoder->Init(m_pConfig, realTime)) {
		delete m_videoEncoder;
		m_videoEncoder = NULL;
		return false;
	}

	m_videoWantKeyFrame = true;
	m_videoSkippedFrames = 0;
	m_videoEncodingDrift = 0;
	m_videoEncodingMaxDrift = m_videoDstFrameDuration;
	m_videoSrcElapsedDuration = 0;
	m_videoDstElapsedDuration = 0;
	m_otherTotalDrift = 0;
	m_otherLastTotalDrift = 0;

	m_videoDstPrevImage = NULL;
	m_videoDstPrevReconstructImage = NULL;
	m_videoDstPrevFrame = NULL;
	m_videoDstPrevFrameLength = 0;

	return true;
}

void CMediaSource::SetVideoSrcSize(
	u_int16_t srcWidth,
	u_int16_t srcHeight,
	u_int16_t srcStride,
	bool matchAspectRatios)
{
	// N.B. InitVideo() must be called first

	m_videoSrcWidth = srcWidth;
	m_videoSrcHeight = srcHeight;
	m_videoSrcAspectRatio = (float)srcWidth / (float)srcHeight;
	m_videoMatchAspectRatios = matchAspectRatios;

	SetVideoSrcStride(srcStride);
}

void CMediaSource::SetVideoSrcStride(
	u_int16_t srcStride)
{
	// N.B. SetVideoSrcSize() should be called once before 

	m_videoSrcYStride = srcStride;
	m_videoSrcUVStride = srcStride / 2;

	// these next three may change below
	m_videoSrcAdjustedHeight = m_videoSrcHeight;
	m_videoSrcYCrop = 0;
	m_videoSrcUVCrop = 0;

	// match aspect ratios
	if (m_videoMatchAspectRatios 
	  && fabs(m_videoSrcAspectRatio - m_videoDstAspectRatio) > 0.01) {

		m_videoSrcAdjustedHeight =
			(u_int16_t)(m_videoSrcWidth / m_videoDstAspectRatio);
		if ((m_videoSrcAdjustedHeight % 16) != 0) {
			m_videoSrcAdjustedHeight += 16 - (m_videoSrcAdjustedHeight % 16);
		}

		if (m_videoSrcAspectRatio < m_videoDstAspectRatio) {
			// crop src
			m_videoSrcYCrop = m_videoSrcYStride * 
				((m_videoSrcHeight - m_videoSrcAdjustedHeight) / 2);
			m_videoSrcUVCrop = m_videoSrcYCrop / 4;
		}
	}

	m_videoSrcYSize = m_videoSrcYStride 
		* MAX(m_videoSrcHeight, m_videoSrcAdjustedHeight);
	m_videoSrcUVSize = m_videoSrcYSize / 4;
	m_videoSrcYUVSize = (m_videoSrcYSize * 3) / 2;

	// resizing

	DestroyVideoResizer();

	if (m_videoSrcWidth != m_videoDstWidth 
	  || m_videoSrcAdjustedHeight != m_videoDstHeight) {

		m_videoSrcYImage = 
			scale_new_image(m_videoSrcWidth, 
				m_videoSrcAdjustedHeight, 1);
		m_videoSrcYImage->span = m_videoSrcYStride;
		m_videoDstYImage = 
			scale_new_image(m_videoDstWidth, 
				m_videoDstHeight, 1);
		m_videoYResizer = 
			scale_image_init(m_videoDstYImage, m_videoSrcYImage, 
				Bell_filter, Bell_support);

		m_videoSrcUVImage = 
			scale_new_image(m_videoSrcWidth / 2, 
				m_videoSrcAdjustedHeight / 2, 1);
		m_videoSrcUVImage->span = m_videoSrcUVStride;
		m_videoDstUVImage = 
			scale_new_image(m_videoDstWidth / 2, 
				m_videoDstHeight / 2, 1);
		m_videoUVResizer = 
			scale_image_init(m_videoDstUVImage, m_videoSrcUVImage, 
				Bell_filter, Bell_support);
	}
}

void CMediaSource::ProcessVideoYUVFrame(
	u_int8_t* pY,
	u_int8_t* pU,
	u_int8_t* pV,
	u_int16_t yStride,
	u_int16_t uvStride,
	Timestamp srcFrameTimestamp)
{
	if (m_videoSrcFrameNumber == 0 && m_audioSrcFrameNumber == 0) {
		m_startTimestamp = srcFrameTimestamp;
	}

	m_videoSrcFrameNumber++;
	m_videoSrcElapsedDuration = srcFrameTimestamp - m_startTimestamp;

	// drop src frames as needed to match target frame rate
	if (m_videoDstElapsedDuration > m_videoSrcElapsedDuration) {
		return;
	}

	// if we're running in real-time mode
	if (m_sourceRealTime) {

		// add any external drift (i.e. audio encoding drift)
		// to our drift measurement
		m_videoEncodingDrift += 
			m_otherTotalDrift - m_otherLastTotalDrift;
		m_otherLastTotalDrift = m_otherTotalDrift;

		// check if we are falling behind
		if (m_videoEncodingDrift >= m_videoEncodingMaxDrift) {
			m_videoEncodingDrift -= m_videoDstFrameDuration;

			if (m_videoEncodingDrift < 0) {
				m_videoEncodingDrift = 0;
			}

			// skip this frame			
			m_videoSkippedFrames++;
			return;
		}
	}

	Timestamp encodingStartTimestamp = GetTimestamp();

	// this will either never happen (live capture)
	// or just happen once at startup when we discover
	// the stride used by the video decoder
	if (yStride != m_videoSrcYStride) {
		SetVideoSrcSize(m_videoSrcWidth, m_videoSrcHeight, 
			yStride, m_videoMatchAspectRatios);
	}

	u_int8_t* mallocedYuvImage = NULL;

	// crop to desired aspect ratio (may be a no-op)
	u_int8_t* yImage = pY + m_videoSrcYCrop;
	u_int8_t* uImage = pU + m_videoSrcUVCrop;
	u_int8_t* vImage = pV + m_videoSrcUVCrop;

	// Note: caller is responsible for adding any padding that is needed

	// resize image if necessary
	if (m_videoYResizer) {
		u_int8_t* resizedYUV = 
			(u_int8_t*)Malloc(m_videoDstYUVSize);
		
		u_int8_t* resizedY = 
			resizedYUV;
		u_int8_t* resizedU = 
			resizedYUV + m_videoDstYSize;
		u_int8_t* resizedV = 
			resizedYUV + m_videoDstYSize + m_videoDstUVSize;

		m_videoSrcYImage->data = yImage;
		m_videoDstYImage->data = resizedY;
		scale_image_process(m_videoYResizer);

		m_videoSrcUVImage->data = uImage;
		m_videoDstUVImage->data = resizedU;
		scale_image_process(m_videoUVResizer);

		m_videoSrcUVImage->data = vImage;
		m_videoDstUVImage->data = resizedV;
		scale_image_process(m_videoUVResizer);

		// done with the original source image
		// this may be NULL
		free(mallocedYuvImage);

		// switch over to resized version
		mallocedYuvImage = resizedYUV;
		yImage = resizedY;
		uImage = resizedU;
		vImage = resizedV;
		yStride = m_videoDstWidth;
		uvStride = yStride / 2;
	}

	// if we want encoded video frames
	if (m_pConfig->m_videoEncode) {

		// call video encoder
		bool rc = m_videoEncoder->EncodeImage(
			yImage, uImage, vImage, 
			yStride, uvStride,
			m_videoWantKeyFrame);

		if (!rc) {
			debug_message("Can't encode image!");
			free(mallocedYuvImage);
			return;
		}

		// clear want key frame flag
		m_videoWantKeyFrame = false;
	}

	Timestamp dstPrevFrameTimestamp =
		m_startTimestamp + m_videoDstElapsedDuration;

	// calculate previous frame duration
	Duration dstPrevFrameDuration = m_videoDstFrameDuration;
	m_videoDstElapsedDuration += m_videoDstFrameDuration;

	if (m_sourceRealTime && m_videoSrcFrameNumber > 0) {

		// first adjust due to skipped frames
		Duration dstPrevFrameAdjustment = 
			m_videoSkippedFrames * m_videoDstFrameDuration;

		dstPrevFrameDuration += dstPrevFrameAdjustment;
		m_videoDstElapsedDuration += dstPrevFrameAdjustment;

		// next check our duration against real elasped time
		Duration lag = m_videoSrcElapsedDuration - m_videoDstElapsedDuration;

		if (lag > 0) {
			// adjust by integral number of target duration units
			dstPrevFrameAdjustment = 
				(lag / m_videoDstFrameDuration) * m_videoDstFrameDuration;

			dstPrevFrameDuration += dstPrevFrameAdjustment;
			m_videoDstElapsedDuration += dstPrevFrameAdjustment;
		}
	}

	// forward encoded video to sinks
	if (m_pConfig->m_videoEncode) {
		if (m_videoDstPrevFrame) {
			CMediaFrame* pFrame = new CMediaFrame(
				CMediaFrame::Mpeg4VideoFrame, 
				m_videoDstPrevFrame, 
				m_videoDstPrevFrameLength,
				dstPrevFrameTimestamp, 
				dstPrevFrameDuration);
			ForwardFrame(pFrame);
			delete pFrame;
		}

		// hold onto this encoded vop until next one is ready
		m_videoEncoder->GetEncodedImage(
			&m_videoDstPrevFrame, &m_videoDstPrevFrameLength);
	}

	// forward raw video to sinks
	if (m_pConfig->SourceRawVideo()) {

		if (m_videoDstPrevImage) {
			CMediaFrame* pFrame =
				new CMediaFrame(
					CMediaFrame::YuvVideoFrame, 
					m_videoDstPrevImage, 
					m_videoDstYUVSize,
					dstPrevFrameTimestamp, 
					dstPrevFrameDuration);
			ForwardFrame(pFrame);
			delete pFrame;
		}

		m_videoDstPrevImage = (u_int8_t*)Malloc(m_videoDstYUVSize);

		imgcpy(m_videoDstPrevImage, 
			yImage, 
			m_videoDstWidth,
			m_videoDstHeight,
			yStride);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -