📄 mp4afmt.cpp
字号:
if (SUCCEEDED(retVal))
{
// Compute and save the codec delay in ms
m_ulCodecDelayMs = CAudioFormat::ConvertSamplesToMs(ulCodecDelaySamples);
}
}
// Allocate Decoder Buffer
if (SUCCEEDED(retVal))
{
// Did the size change?
if (m_ulDecoderBufferSize != m_ulMaxDecoderOutputBytes)
{
retVal = HXR_OUTOFMEMORY;
HX_VECTOR_DELETE(m_pDecoderBuffer);
m_pDecoderBuffer = new UINT8 [m_ulMaxDecoderOutputBytes];
if (m_pDecoderBuffer)
{
retVal = HXR_OK;
m_ulDecoderBufferSize = m_ulMaxDecoderOutputBytes;
}
}
}
}
return retVal;
}
HX_RESULT CMP4AudioFormat::ConfigureRssm(ULONG32 ulAnchorInMs)
{
HX_RESULT retVal = HXR_UNEXPECTED;
if (m_pRssm)
{
CTSConverter tempConverter;
ULONG32 ulRssmTimeBase;
ULONG32 ulRssmAnchor;
ULONG32 ulDecoderAnchor;
ULONG32 ulSamplingRate = m_pAudioFmt->ulSamplesPerSec;
retVal = HXR_OK;
if (ulSamplingRate == 0)
{
ulSamplingRate = 1000;
}
m_pRssm->SetSamplingRate(ulSamplingRate);
m_pRssm->SetTimeAnchor(ulAnchorInMs);
ulRssmTimeBase = m_pRssm->GetTimeBase();
if (ulRssmTimeBase == 0)
{
ulRssmTimeBase = m_pAudioFmt->ulSamplesPerSec;
}
m_InputTSConverter.SetBase(ulRssmTimeBase,
m_pAudioFmt->ulSamplesPerSec);
tempConverter.SetBase(1000, ulRssmTimeBase);
ulRssmAnchor = tempConverter.ConvertVector(ulAnchorInMs);
tempConverter.SetBase(1000, m_pAudioFmt->ulSamplesPerSec);
ulDecoderAnchor = tempConverter.ConvertVector(ulAnchorInMs);
m_InputTSConverter.SetAnchor(ulRssmAnchor, ulDecoderAnchor);
}
return retVal;
}
/****************************************************************************
* Method:
* CMP4AudioFormat::CreateAssembledPacket
*
*/
CMediaPacket* CMP4AudioFormat::CreateAssembledPacket(IHXPacket* pPacket)
{
CMediaPacket* pLastFramePacket = NULL;
CMediaPacket* pFramePacket = NULL;
if (m_pRssm)
{
if (pPacket)
{
m_pRssm->SetPacket(pPacket);
}
else
{
m_pRssm->Flush();
}
if (m_pRssm->CreateMediaPacket(pLastFramePacket) == HXR_OK)
{
HX_ASSERT(pLastFramePacket);
do
{
pFramePacket = NULL;
if (m_pRssm->CreateMediaPacket(pFramePacket) == HXR_OK)
{
HX_ASSERT(pFramePacket);
if (pLastFramePacket)
{
CAudioFormat::PutAudioPacket(pLastFramePacket);
}
pLastFramePacket = pFramePacket;
}
} while (pFramePacket);
}
}
return pLastFramePacket;
}
/****************************************************************************
* Method:
* CMP4AudioFormat::DecodeAudioData
*
*/
HX_RESULT CMP4AudioFormat::DecodeAudioData(HXAudioData& audioData,
BOOL bFlushCodec)
{
ULONG32 ulBytesDecoded;
ULONG32 ulSamplesProduced;
ULONG32 ulBytesProduced;
ULONG32 ulNextDecodeStartTime;
ULONG32 ulPacketBasedNextDecodeStartTime;
HX_RESULT retVal = HXR_NO_DATA;
HX_RESULT checkStatus;
CMediaPacket* pAssembledFrame = NULL;
if (bFlushCodec)
{
pAssembledFrame = CreateAssembledPacket(NULL);
if (pAssembledFrame)
{
CAudioFormat::PutAudioPacket(pAssembledFrame);
}
}
while ((pAssembledFrame = CAudioFormat::PeekAudioPacket()) &&
(retVal == HXR_NO_DATA))
{
ulBytesDecoded = 0;
ulSamplesProduced = 0;
ulBytesProduced = 0;
ulNextDecodeStartTime = m_ulLastDecodedEndTime;
if (m_bNewAssembledFrame)
{
LONG32 lDecodeStartOffset;
m_bNewAssembledFrame = FALSE;
// Filter uot fluctuations in time-stamps present in some poorly generated audio streams.
ulPacketBasedNextDecodeStartTime = m_InputTSConverter.Convert(pAssembledFrame->m_ulTime);
lDecodeStartOffset = ulPacketBasedNextDecodeStartTime - ulNextDecodeStartTime;
if (lDecodeStartOffset >= ((LONG32) m_ulLastFrameTime))
{
ulNextDecodeStartTime = ulPacketBasedNextDecodeStartTime;
}
// Estimate audio frame time if unknown
if (pAssembledFrame->m_ulFlags & MDPCKT_HAS_UKNOWN_TIME_FLAG)
{
if (CAudioRenderer::CmpTime(m_ulLastDecodedEndTime, ulNextDecodeStartTime) >= 0)
{
ulNextDecodeStartTime = m_ulLastDecodedEndTime;
if (pAssembledFrame->m_ulFlags & MDPCKT_FOLLOWS_LOSS_FLAG)
{
ulNextDecodeStartTime += m_ulLastFrameTime;
}
}
}
// See if there was loss we need to conceal
if (pAssembledFrame->m_ulFlags & MDPCKT_FOLLOWS_LOSS_FLAG)
{
if (CAudioRenderer::CmpTime(ulNextDecodeStartTime, m_ulLastDecodedEndTime) > 0)
{
ULONG32 ulTimeLost = ulNextDecodeStartTime -
m_ulLastDecodedEndTime;
if (CAudioFormat::ConvertTimeToMs(ulTimeLost) > MAX_TOLERABLE_TIME_GAP)
{
ULONG32 ulSamplesLost = CAudioFormat::ConvertTimeToSamples(ulTimeLost);
#ifdef ENABLE_FRAME_TRACE
if (ulFrameTraceIdx < MAX_FRAME_TRACE_ENTRIES)
{
frameTraceArray[ulFrameTraceIdx][2] = m_ulLastDecodedEndTime;
frameTraceArray[ulFrameTraceIdx][3] = CAudioFormat::ConvertTimeToMs(frameTraceArray[ulFrameTraceIdx][2]);
frameTraceArray[ulFrameTraceIdx][0] =
(LONG32) ulTimeLost;
frameTraceArray[ulFrameTraceIdx++][1] = 'G';
}
#endif // ENABLE_FRAME_TRACE
// Conceal the lost samples
if (m_pDecoderInstance)
{
m_pDecoderInstance->Conceal(ulSamplesLost);
ulNextDecodeStartTime = m_ulLastDecodedEndTime;
#ifdef ENABLE_FRAME_TRACE
if (ulFrameTraceIdx < MAX_FRAME_TRACE_ENTRIES)
{
frameTraceArray[ulFrameTraceIdx][2] = ulNextDecodeStartTime;
frameTraceArray[ulFrameTraceIdx][3] = CAudioFormat::ConvertTimeToMs(frameTraceArray[ulFrameTraceIdx][2]);
frameTraceArray[ulFrameTraceIdx][0] =
(LONG32) ulSamplesLost;
frameTraceArray[ulFrameTraceIdx++][1] = 'C';
}
#endif // ENABLE_FRAME_TRACE
}
}
}
}
}
checkStatus = CheckDecoderInstance(m_pDecoderInstance);
if (FAILED(checkStatus))
{
return checkStatus;
}
// Decode some samples
if (((CMediaPacket::GetBufferSize(pAssembledFrame) !=
pAssembledFrame->m_ulDataSize) &&
(pAssembledFrame->m_ulDataSize < MIN_ACCEPTBALE_DATA_REMAINDER))
#ifdef _IGNORE_UNSUPPORTED
|| (!m_pDecoderInstance)
#endif // _IGNORE_UNSUPPORTED
)
{
ulSamplesProduced = 0;
ulBytesDecoded = pAssembledFrame->m_ulDataSize;
}
else
{
ulSamplesProduced = m_ulMaxDecoderOutputSamples;
ProcessAssembledFrame(pAssembledFrame);
retVal = m_pDecoderInstance->Decode(pAssembledFrame->m_pData,
pAssembledFrame->m_ulDataSize,
ulBytesDecoded,
(INT16*) m_pDecoderBuffer,
ulSamplesProduced,
bFlushCodec);
if (retVal != HXR_OK)
{
ulBytesDecoded = 0;
ulSamplesProduced = 0;
}
#ifdef ENABLE_FRAME_TRACE
if (ulFrameTraceIdx < MAX_FRAME_TRACE_ENTRIES)
{
frameTraceArray[ulFrameTraceIdx][2] = ulNextDecodeStartTime;
frameTraceArray[ulFrameTraceIdx][3] = CAudioFormat::ConvertTimeToMs(frameTraceArray[ulFrameTraceIdx][2]);
frameTraceArray[ulFrameTraceIdx][0] =
(LONG32) ulBytesDecoded;
frameTraceArray[ulFrameTraceIdx++][1] = 'B';
}
if (ulFrameTraceIdx < MAX_FRAME_TRACE_ENTRIES)
{
frameTraceArray[ulFrameTraceIdx][2] = ulNextDecodeStartTime;
frameTraceArray[ulFrameTraceIdx][3] = CAudioFormat::ConvertTimeToMs(frameTraceArray[ulFrameTraceIdx][2]);
frameTraceArray[ulFrameTraceIdx][0] =
(LONG32) ulSamplesProduced;
frameTraceArray[ulFrameTraceIdx++][1] = 'D';
}
#endif // ENABLE_FRAME_TRACE
}
// Adjust ramaining data pointers based on bytes consumed
HX_ASSERT(ulBytesDecoded <= pAssembledFrame->m_ulDataSize);
if (ulBytesDecoded <= pAssembledFrame->m_ulDataSize)
{
pAssembledFrame->m_ulDataSize -= ulBytesDecoded;
}
else
{
pAssembledFrame->m_ulDataSize = 0;
}
pAssembledFrame->m_pData += ulBytesDecoded;
// Place decoded data into the audio buffer
if (ulSamplesProduced > 0)
{
if (m_bCanChangeAudioStream && m_pDecoderInstance)
{
retVal = UpdateAudioFormat(ulNextDecodeStartTime);
if (FAILED(checkStatus))
{
retVal = checkStatus;
}
}
#ifndef _SILENT_PLAY
if (SUCCEEDED(retVal))
{
retVal = HXR_OK;
ulBytesProduced = CAudioFormat::ConvertSamplesToBytes(ulSamplesProduced);
if (retVal == HXR_OK)
{
retVal = m_pCommonClassFactory->CreateInstance(
CLSID_IHXBuffer,
(void**) &audioData.pData);
}
if (retVal == HXR_OK)
{
retVal = audioData.pData->Set(
m_pDecoderBuffer,
ulBytesProduced);
}
if (retVal == HXR_OK)
{
audioData.ulAudioTime = m_TSConverter.Convert(ulNextDecodeStartTime) -
m_ulCodecDelayMs;
}
}
#endif // _SILENT_PLAY
}
m_ulLastFrameTime = CAudioFormat::ConvertSamplesToTime(ulSamplesProduced);
m_ulLastDecodedEndTime = ulNextDecodeStartTime + m_ulLastFrameTime;
if ((pAssembledFrame->m_ulDataSize == 0) ||
((ulSamplesProduced == 0) && (ulBytesDecoded == 0)))
{
CMediaPacket* pDeadAssembledFrame = CAudioFormat::GetAudioPacket();
HX_ASSERT(pAssembledFrame == pDeadAssembledFrame);
CMediaPacket::DeletePacket(pDeadAssembledFrame);
m_bNewAssembledFrame = TRUE;
}
if (ulSamplesProduced > 0)
{
break;
}
}
return retVal;
}
/****************************************************************************
* Method:
* CMP4AudioFormat::Reset
*
*/
void CMP4AudioFormat::Reset()
{
_Reset();
CAudioFormat::Reset();
}
BOOL CMP4AudioFormat::CanChangeAudioStream()
{
BOOL bRet = FALSE;
if (m_pDecoderModule)
{
bRet = m_pDecoderModule->CanChangeAudioStream();
}
return bRet;
}
void CMP4AudioFormat::RegisterPayloadFormats()
{
// Register the various payload format builder functions
// with the payload format factory
#if defined(HELIX_FEATURE_AUDIO_CODEC_AAC) || defined(HELIX_FEATURE_AUDIO_CODEC_RAAC)
// MPEG4 audio formats
m_fmtFactory.RegisterBuilder(&MP4APayloadFormat::Build);
#endif // defined(HELIX_FEATURE_AUDIO_CODEC_AAC)
#if defined(HELIX_FEATURE_ISMA) || defined(HELIX_FEATURE_AUDIO_RALF)
m_fmtFactory.RegisterBuilder(&MP4GPayloadFormat::Build);
#endif // defined(HELIX_FEATURE_ISMA) || defined(HELIX_FEATURE_AUDIO_RALF)
// AMR formats
#if defined(HELIX_FEATURE_AUDIO_CODEC_AMRNB) || defined(HELIX_FEATURE_AUDIO_CODEC_AMRWB)
m_fmtFactory.RegisterBuilder(&CAMRPayloadFormat::Build);
m_fmtFactory.RegisterBuilder(&CHXAMRPayloadFormat::Build);
#endif // defined(HELIX_FEATURE_AUDIO_CODEC_AMRNB) || defined(HELIX_FEATURE_AUDIO_CODEC_AMRWB)
#if defined(HELIX_FEATURE_AUDIO_CODEC_MP3)
// MP3 format
m_fmtFactory.RegisterBuilder(&CMP3DraftPayloadFormat::Build);
#endif /* #if defined(HELIX_FEATURE_AUDIO_CODEC_MP3) */
}
void CMP4AudioFormat::_Reset(void)
{
if (m_pRssm)
{
m_pRssm->Reset();
m_pRssm->SetTimeAnchor(GetStartTime());
ConfigureRssm(GetStartTime());
}
if (m_pDecoderInstance)
{
m_pDecoderInstance->Reset();
}
m_ulLastDecodedEndTime = 0;
m_ulLastFrameTime = 0;
m_bNewAssembledFrame = TRUE;
m_TSConverter.Reset();
m_TSConverter.SetOffset(GetStartTime());
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -