⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 media_source.cpp

📁 网络MPEG4IP流媒体开发源代码
💻 CPP
📖 第 1 页 / 共 3 页
字号:
    free(pSilence);  }    }void CMediaSource::ProcessAudioFrame(				     u_int8_t* frameData,				     u_int32_t frameDataLength,				     Timestamp srcFrameTimestamp,				     bool resync){  Duration srcStartDuration;  //debug_message("audio - ts %llu bytes %d", srcFrameTimestamp, frameDataLength);  if (m_audioSrcFrameNumber == 0) {    if (!m_sourceVideo || m_videoSrcFrameNumber == 0) {      m_encodingStartTimestamp = GetTimestamp();      //  debug_message("Setting encoding start ts to %llu", m_encodingStartTimestamp);    }    m_audioStartTimestamp = srcFrameTimestamp;    m_audioSrcElapsedDuration = 0;  } else {    Duration diff;    srcStartDuration = srcFrameTimestamp - m_audioStartTimestamp;    if (srcStartDuration >= m_audioSrcElapsedDuration) {      diff = srcStartDuration - m_audioSrcElapsedDuration;    } else {      diff = m_audioSrcElapsedDuration - srcStartDuration;    }#ifdef DEBUG_SYNC    if (diff < -1 || diff > 1) {    debug_message("audio dur should be %llu is %llu - diff %lld", 		  m_audioSrcElapsedDuration, srcStartDuration, diff);    }#endif    if (diff >= 2000LL) {      // we have a time differential between audio frames of more      // than 2 milliseconds.  This is most likely an error      error_message("audio - missing audio frame found - duration %llu should be about %llu diff %llu", 		    srcStartDuration, m_audioSrcElapsedDuration, diff);      if (srcStartDuration > m_audioSrcElapsedDuration) {	AddGapToAudio(m_audioStartTimestamp + m_audioSrcElapsedDuration, diff);      }    }    m_audioSrcElapsedDuration = srcStartDuration;  }  m_audioSrcFrameNumber++;  m_audioSrcSampleNumber += SrcBytesToSamples(frameDataLength);  if (resync) {    // flush preEncodingBuffer    m_audioPreEncodingBufferLength = 0;    // change dst sample numbers to account for gap    m_audioDstSampleNumber =      m_audioDstRawSampleNumber =      DstTicksToSamples(m_audioSrcElapsedDuration);    error_message("Received resync");  }  // calculate m_audioSrcElapsedDuration for end of frame - we're only looking for  // a plus or minus 2 msec here.  m_audioSrcElapsedDuration += SrcSamplesToTicks(SrcBytesToSamples(frameDataLength));  bool pcmMalloced = false;  bool pcmBuffered;  u_int8_t* pcmData = frameData;  u_int32_t pcmDataLength = frameDataLength;  if (m_audioSrcChannels != m_audioDstChannels) {    // Convert the channels if they don't match    // we either double the channel info, or combine    // the left and right    uint32_t samples = SrcBytesToSamples(frameDataLength);    uint32_t dstLength = DstSamplesToBytes(samples);    pcmData = (u_int8_t *)Malloc(dstLength);    pcmDataLength = dstLength;    pcmMalloced = true;    int16_t *src = (int16_t *)frameData;    int16_t *dst = (int16_t *)pcmData;    if (m_audioSrcChannels == 1) {      // 1 channel to 2      for (uint32_t ix = 0; ix < samples; ix++) {	*dst++ = *src;	*dst++ = *src++;      }    } else {      // 2 channels to 1      for (uint32_t ix = 0; ix < samples; ix++) {	int32_t sum = *src++;	sum += *src++;	sum /= 2;	if (sum < -32768) sum = -32768;	else if (sum > 32767) sum = 32767;	*dst++ = sum & 0xffff;      }    }  }	    // resample audio, if necessary  if (m_audioSrcSampleRate != m_audioDstSampleRate) {    ResampleAudio(pcmData, pcmDataLength);    // resampled data is now available in m_audioPreEncodingBuffer    pcmBuffered = true;  } else if (m_audioSrcSamplesPerFrame != m_audioDstSamplesPerFrame) {    // reframe audio, if necessary    // e.g. MP3 is 1152 samples/frame, AAC is 1024 samples/frame    // add samples to end of m_audioBuffer    // InitAudio() ensures that buffer is large enough    memcpy(	   &m_audioPreEncodingBuffer[m_audioPreEncodingBufferLength],	   pcmData,	   pcmDataLength);    m_audioPreEncodingBufferLength += pcmDataLength;    pcmBuffered = true;  } else {    pcmBuffered = false;  }  // LATER restructure so as get rid of this label, and goto below pcmBufferCheck:  if (pcmBuffered) {    u_int32_t samplesAvailable =      DstBytesToSamples(m_audioPreEncodingBufferLength);    // not enough samples collected yet to call encode or forward    if (samplesAvailable < m_audioDstSamplesPerFrame) {      return;    }    if (pcmMalloced) {      free(pcmData);      pcmMalloced = false;    }    // setup for encode/forward    pcmData =       &m_audioPreEncodingBuffer[0];    pcmDataLength =       DstSamplesToBytes(m_audioDstSamplesPerFrame);  }  // encode audio frame  if (m_pConfig->m_audioEncode) {    Timestamp encodingStartTimestamp = GetTimestamp();    Duration frametime =       DstSamplesToTicks(DstBytesToSamples(pcmDataLength));    Duration startOutput = DstSamplesToTicks(m_audioDstSampleNumber);    if (m_audioDstFrameNumber == 0) {      m_audioEncodingStartTimestamp = encodingStartTimestamp;    }#if 1    Duration diff = encodingStartTimestamp - m_audioEncodingStartTimestamp;    if (diff >= (5 * frametime) + startOutput) {#else      //      if (false) {#endif      error_message("audio - fell behind %llu - skipping %llu", diff,		    startOutput + (5 * frametime));      m_audioPreEncodingBufferLength = 0;      pcmDataLength = 0;            // change dst sample numbers to account for gap      m_audioDstSampleNumber =	m_audioDstRawSampleNumber =	DstTicksToSamples(m_audioSrcElapsedDuration);      m_audioDstFrameNumber = m_audioDstSampleNumber / m_audioDstSamplesPerFrame;      // we're going to skip these frames...    } else {      bool rc = m_audioEncoder->EncodeSamples(						(int16_t*)pcmData, 						m_audioDstSamplesPerFrame,						m_audioDstChannels);	if (!rc) {	  debug_message("failed to encode audio");	  return;	}	Duration encodingTime =	  (GetTimestamp() - encodingStartTimestamp);	if (m_sourceRealTime && m_videoSource) {	  Duration drift;	  if (frametime <= encodingTime) {	    drift = encodingTime - frametime;#ifdef DEBUG_SYNC_DRIFT	    debug_message("Adding %llu audio drift", drift);#endif	    m_videoSource->AddEncodingDrift(drift);	  } else {#if 0	    drift = frametime - encodingTime;#ifdef DEBUG_SYNC_DRIFT	    debug_message("Subtracting %llu audio drift", drift);#endif	    m_videoSource->SubtractEncodingDrift(drift);#endif	  }	}	ForwardEncodedAudioFrames();      }#ifdef DEBUG_SYNC    debug_message("audio src duration %llu dst %llu diff %lld", 		  m_audioSrcElapsedDuration, startOutput, m_audioSrcElapsedDuration - startOutput);#endif  }  // if desired, forward raw audio to sinks  if (m_pConfig->SourceRawAudio() && pcmDataLength > 0) {    // make a copy of the pcm data if needed    u_int8_t* pcmForwardedData;    if (!pcmMalloced) {      pcmForwardedData = (u_int8_t*)Malloc(pcmDataLength);      memcpy(pcmForwardedData, pcmData, pcmDataLength);    } else {      pcmForwardedData = pcmData;      pcmMalloced = false;    }#ifndef WORDS_BIGENDIAN    // swap byte ordering so we have big endian to write into    // the file.    uint16_t *pdata = (uint16_t *)pcmForwardedData;    for (uint32_t ix = 0; 	 ix < pcmDataLength; 	 ix += sizeof(uint16_t),pdata++) {      uint16_t swap = *pdata;      *pdata = B2N_16(swap);    }#endif    CMediaFrame* pFrame =      new CMediaFrame(		      PCMAUDIOFRAME, 		      pcmForwardedData, 		      pcmDataLength,		      m_audioStartTimestamp 		      + DstSamplesToTicks(m_audioDstRawSampleNumber),		      DstBytesToSamples(pcmDataLength),		      m_audioDstSampleRate);    ForwardFrame(pFrame);    m_audioDstRawSampleNumber += SrcBytesToSamples(pcmDataLength);    m_audioDstRawFrameNumber++;  }  if (pcmMalloced) {    free(pcmData);  }  if (pcmBuffered) {    m_audioPreEncodingBufferLength -= pcmDataLength;    memcpy(	   &m_audioPreEncodingBuffer[0],	   &m_audioPreEncodingBuffer[pcmDataLength],	   m_audioPreEncodingBufferLength);    goto pcmBufferCheck;  }}void CMediaSource::ResampleAudio(				 u_int8_t* frameData,				 u_int32_t frameDataLength){  uint32_t samplesIn;  uint32_t samplesInConsumed;  uint32_t outBufferSamplesLeft;  uint32_t outBufferSamplesWritten;  uint32_t chan_offset;  samplesIn = DstBytesToSamples(frameDataLength);  // so far, record the pre length  while (samplesIn > 0) {    outBufferSamplesLeft =       DstBytesToSamples(m_audioPreEncodingBufferMaxLength - 			m_audioPreEncodingBufferLength);    for (uint8_t chan_ix = 0; chan_ix < m_audioDstChannels; chan_ix++) {      samplesInConsumed = samplesIn;      outBufferSamplesWritten = outBufferSamplesLeft;      chan_offset = chan_ix * (DstSamplesToBytes(1));#ifdef DEBUG_AUDIO_RESAMPLER      error_message("resample - chans %d %d, samples %d left %d", 		    m_audioDstChannels, chan_ix,		    samplesIn, outBufferSamplesLeft);#endif      if (st_resample_flow(m_audioResample[chan_ix],			   (int16_t *)(frameData + chan_offset),			   (int16_t *)(&m_audioPreEncodingBuffer[m_audioPreEncodingBufferLength + chan_offset]),			   &samplesInConsumed, 			   &outBufferSamplesWritten,			   m_audioDstChannels) < 0) {	error_message("resample failed");      }#ifdef DEBUG_AUDIO_RESAMPLER      debug_message("Chan %d consumed %d wrote %d", 		    chan_ix, samplesInConsumed, outBufferSamplesWritten);#endif    }    if (outBufferSamplesLeft < outBufferSamplesWritten) {      error_message("Written past end of buffer");    }    samplesIn -= samplesInConsumed;    outBufferSamplesLeft -= outBufferSamplesWritten;    m_audioPreEncodingBufferLength += DstSamplesToBytes(outBufferSamplesWritten);    // If we have no room for new output data, and more to process,    // give us a bunch more room...    if (outBufferSamplesLeft == 0 && samplesIn > 0) {      m_audioPreEncodingBufferMaxLength *= 2;      m_audioPreEncodingBuffer = 	(u_int8_t*)realloc(m_audioPreEncodingBuffer,			   m_audioPreEncodingBufferMaxLength);    }  } // end while we still have input samples}void CMediaSource::ForwardEncodedAudioFrames(void){  u_int8_t* pFrame;  u_int32_t frameLength;  u_int32_t frameNumSamples;  while (m_audioEncoder->GetEncodedFrame(&pFrame, 					 &frameLength, 					 &frameNumSamples)) {    // sanity check    if (pFrame == NULL || frameLength == 0) {      break;    }    // output has frame start timestamp    Timestamp output = DstSamplesToTicks(m_audioDstSampleNumber);    m_audioDstSampleNumber += frameNumSamples;    m_audioDstFrameNumber++;    // forward the encoded frame to sinks#ifdef DEBUG_SYNC    debug_message("audio forwarding %llu", output);#endif    CMediaFrame* pMediaFrame =      new CMediaFrame(		      m_audioEncoder->GetFrameType(),		      pFrame, 		      frameLength,		      m_audioStartTimestamp + output,		      frameNumSamples,		      m_audioDstSampleRate);    ForwardFrame(pMediaFrame);  }}void CMediaSource::DoStopAudio(){  if (m_audioEncoder) {    // flush remaining output from audio encoder    // and forward it to sinks    m_audioEncoder->EncodeSamples(NULL, 0, m_audioSrcChannels);    ForwardEncodedAudioFrames();    m_audioEncoder->Stop();    delete m_audioEncoder;    m_audioEncoder = NULL;  }  free(m_audioPreEncodingBuffer);  m_audioPreEncodingBuffer = NULL;  m_sourceAudio = false;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -