⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 audio_encoder_class.cpp

📁 完整的RTP RTSP代码库
💻 CPP
📖 第 1 页 / 共 2 页
字号:
    // the audioStartTimestamp).    // this way, we just need to compare audioSrcElapsedDuration with     // audioDstElapsedDuration (which should match in the ideal case),    // and we don't have to compensate for the lag introduced by the initial    // buffering of source frames in the encoder, which may vary from    // one encoder to another    m_audioSrcElapsedDuration =       srcFrameTimestamp - m_audioEncodingStartTimestamp;    m_audioSrcFrameNumber++;    if (pcmBuffered) {      u_int32_t samplesAvailable =	DstBytesToSamples(m_audioPreEncodingBufferLength);            if (pcmMalloced) {	free((void *)pcmData);	pcmMalloced = false;      }#ifdef DEBUG_AUDIO_SYNC      if (Profile()->GetBoolValue(CFG_AUDIO_DEBUG))      debug_message("%s: samples %u need %u", 		    Profile()->GetName(), 		    samplesAvailable, m_audioDstSamplesPerFrame);#endif      // not enough samples collected yet to call encode or forward      // we moved the data above.      if (samplesAvailable < m_audioDstSamplesPerFrame) {	return;      }      // setup for encode/forward      pcmData = &m_audioPreEncodingBuffer[0];      pcmDataLength = DstSamplesToBytes(m_audioDstSamplesPerFrame);    }#ifdef DEBUG_AUDIO_SYNC      if (Profile()->GetBoolValue(CFG_AUDIO_DEBUG))	debug_message("%s:srcDuration="U64" dstDuration "U64" "D64,		  Profile()->GetName(),		  m_audioSrcElapsedDuration,		  m_audioDstElapsedDuration,		      m_audioDstElapsedDuration - m_audioSrcElapsedDuration);#endif    /*     * Check if we can encode, or if we have to add/drop frames     * First check is to see if the source frequency is greater than the     * theory frequency.     */    if (m_audioSrcElapsedDuration + frametime >= m_audioDstElapsedDuration) {            // source gets ahead of destination      // We tolerate a difference of 3 frames since A/V sync is usually      // noticeable after that. This way we give the encoder a chance to pick       // up      if (m_audioSrcElapsedDuration > 	  (3 * frametime) + m_audioDstElapsedDuration) {	int j = (int) (DstTicksToSamples(m_audioSrcElapsedDuration					 + (2 * frametime)					 - m_audioDstElapsedDuration)		       / m_audioDstSamplesPerFrame);	debug_message("%s: Adding %d silence frames", 		      Profile()->GetName(), j);	for (int k=0; k<j; k++)	  AddSilenceFrame();      }      #ifdef DEBUG_SYNC      debug_message("%s:encoding", Profile()->GetName());#endif      /*       * Actually encode and forward the frames       */      bool rc = EncodeSamples(			      (int16_t*)pcmData,			      m_audioDstSamplesPerFrame,			      m_audioDstChannels);            if (!rc) {	debug_message("failed to encode audio");      }            ForwardEncodedAudioFrames();    } else {      // destination gets ahead of source      // This has been observed as a result of clock frequency drift between      // the sound card oscillator and the system mainbord oscillator      // Example: If the sound card oscillator has a 'real' frequency that      // is slightly larger than the 'rated' frequency, and we are sampling      // at 32kHz, then the 32000 samples acquired from the sound card      // 'actually' occupy a duration of slightly less than a second.      //       // The clock drift is usually fraction of a Hz and takes a long      // time (~ 20-30 minutes) before we are off by one frame duration            debug_message("%s:audio: dropping frame, SrcElapsedDuration="U64" DstElapsedDuration="U64" "U64,		    Profile()->GetName(), 		    m_audioSrcElapsedDuration, m_audioDstElapsedDuration,		    frametime);      // don't return - drop through to remove frame    }        if (pcmMalloced) {      free((void *)pcmData);    }    if (pcmBuffered) {      /*       * This means we're storing data, either from resampling, or if the       * sample numbers do not match.  We will remove the encoded samples,        * and increment the srcFrameTimestamp       */      m_audioPreEncodingBufferLength -= pcmDataLength;      memmove(	     &m_audioPreEncodingBuffer[0],	     &m_audioPreEncodingBuffer[pcmDataLength],	     m_audioPreEncodingBufferLength);      subtractDuration = 0;      srcFrameTimestamp += frametime;    } else {      // no data in buffer (default case).      return;    }  }}void CAudioEncoder::ResampleAudio(				 const u_int8_t* frameData,				 u_int32_t frameDataLength){  uint32_t samplesIn;  uint32_t samplesInConsumed;  uint32_t outBufferSamplesLeft;  uint32_t outBufferSamplesWritten;  uint32_t chan_offset;  samplesIn = DstBytesToSamples(frameDataLength);  // so far, record the pre length  while (samplesIn > 0) {    outBufferSamplesLeft =       DstBytesToSamples(m_audioPreEncodingBufferMaxLength - 			m_audioPreEncodingBufferLength);    if (outBufferSamplesLeft * 2 <= samplesIn && samplesIn > 0) {      m_audioPreEncodingBufferMaxLength *= 2;      m_audioPreEncodingBuffer = 	(u_int8_t*)realloc(m_audioPreEncodingBuffer,			   m_audioPreEncodingBufferMaxLength);    }    for (uint8_t chan_ix = 0; chan_ix < m_audioDstChannels; chan_ix++) {      samplesInConsumed = samplesIn;      outBufferSamplesWritten = outBufferSamplesLeft;      chan_offset = chan_ix * (DstSamplesToBytes(1));#ifdef DEBUG_AUDIO_RESAMPLER      error_message("%s:resample - chans %d %d, samples %d left %d", 		    Profile()->GetName(),		    m_audioDstChannels, chan_ix,		    samplesIn, outBufferSamplesLeft);#endif      if (st_resample_flow(m_audioResample[chan_ix],			   (int16_t *)(frameData + chan_offset),			   (int16_t *)(&m_audioPreEncodingBuffer[m_audioPreEncodingBufferLength + chan_offset]),			   &samplesInConsumed, 			   &outBufferSamplesWritten,			   m_audioDstChannels) < 0) {	error_message("%s:resample failed", Profile()->GetName());      }#ifdef DEBUG_AUDIO_RESAMPLER      debug_message("%s:Chan %d consumed %d wrote %d", 		    Profile()->GetName(),		    chan_ix, samplesInConsumed, outBufferSamplesWritten);#endif    }    if (outBufferSamplesLeft < outBufferSamplesWritten) {      error_message("%s:Written past end of buffer",		    Profile()->GetName());    }    samplesIn -= samplesInConsumed;    outBufferSamplesLeft -= outBufferSamplesWritten;    m_audioPreEncodingBufferLength += DstSamplesToBytes(outBufferSamplesWritten);    // If we have no room for new output data, and more to process,    // give us a bunch more room...  } // end while we still have input samples}void CAudioEncoder::ForwardEncodedAudioFrames(void){  u_int8_t* pFrame;  u_int32_t frameLength;  u_int32_t frameNumSamples;  while (GetEncodedFrame(&pFrame, 			 &frameLength, 			 &frameNumSamples)) {    // sanity check    if (pFrame == NULL || frameLength == 0) {#ifdef DEBUG_SYNC      debug_message("%s:No frame", Profile()->GetName());#endif      break;    }    //debug_message("Got encoded frame");    // output has frame start timestamp    Timestamp output = DstSamplesToTicks(m_audioDstSampleNumber);    m_audioDstFrameNumber++;    m_audioDstSampleNumber += frameNumSamples;    m_audioDstElapsedDuration = DstSamplesToTicks(m_audioDstSampleNumber);    //debug_message("m_audioDstSampleNumber = %llu", m_audioDstSampleNumber);    // forward the encoded frame to sinks#ifdef DEBUG_SYNC    debug_message("%s:audio forwarding "U64, 		  Profile()->GetName(), output);#endif    CMediaFrame* pMediaFrame =      new CMediaFrame(		      GetFrameType(),		      pFrame, 		      frameLength,		      m_audioStartTimestamp + output,		      frameNumSamples,		      m_audioDstSampleRate);    ForwardFrame(pMediaFrame);  }}void CAudioEncoder::DoStopAudio(){  // flush remaining output from audio encoder  // and forward it to sinks    EncodeSamples(NULL, 0, m_audioSrcChannels);  ForwardEncodedAudioFrames();  StopEncoder();  CHECK_AND_FREE(m_audioPreEncodingBuffer);  debug_message("Audio profile %s stats", GetProfileName());  debug_message(" encoded samples: "U64, m_audioDstSampleNumber);  debug_message(" encoded frames: %u", m_audioDstFrameNumber);}void CAudioEncoder::AddRtpDestination (CMediaStream *stream,				       bool disable_ts_offset, 				       uint16_t max_ttl,				       in_port_t srcPort){  mp4live_rtp_params_t *mrtp;  if (stream->m_audio_rtp_session != NULL) {    AddRtpDestInt(disable_ts_offset, stream->m_audio_rtp_session);    return;  }   mrtp = MALLOC_STRUCTURE(mp4live_rtp_params_t);  rtp_default_params(&mrtp->rtp_params);  mrtp->rtp_params.rtp_addr = stream->GetStringValue(STREAM_AUDIO_DEST_ADDR);  mrtp->rtp_params.rtp_rx_port = srcPort;  mrtp->rtp_params.rtp_tx_port = stream->GetIntegerValue(STREAM_AUDIO_DEST_PORT);  mrtp->rtp_params.rtp_ttl = max_ttl;  mrtp->rtp_params.transmit_initial_rtcp = 1;  mrtp->rtp_params.rtcp_addr = stream->GetStringValue(STREAM_AUDIO_RTCP_DEST_ADDR);  mrtp->rtp_params.rtcp_tx_port = stream->GetIntegerValue(STREAM_AUDIO_RTCP_DEST_PORT);  mrtp->use_srtp = stream->GetBoolValue(STREAM_AUDIO_USE_SRTP);  mrtp->srtp_params.enc_algo =     (srtp_enc_algos_t)stream->GetIntegerValue(STREAM_AUDIO_SRTP_ENC_ALGO);  mrtp->srtp_params.auth_algo =     (srtp_auth_algos_t)stream->GetIntegerValue(STREAM_AUDIO_SRTP_AUTH_ALGO);  mrtp->srtp_params.tx_key = stream->m_audio_key;  mrtp->srtp_params.tx_salt = stream->m_audio_salt;  mrtp->srtp_params.rx_key = stream->m_audio_key;  mrtp->srtp_params.rx_salt = stream->m_audio_salt;  mrtp->srtp_params.rtp_enc = stream->GetBoolValue(STREAM_AUDIO_SRTP_RTP_ENC);  mrtp->srtp_params.rtp_auth = stream->GetBoolValue(STREAM_AUDIO_SRTP_RTP_AUTH);  mrtp->srtp_params.rtcp_enc = stream->GetBoolValue(STREAM_AUDIO_SRTP_RTCP_ENC);  AddRtpDestInt(disable_ts_offset, mrtp);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -