📄 raformat.cpp
字号:
if (uPropSize)
{
// The original comment in here suggested that for G2 stereo content,
// param.uChannels would wrongly indicate "1" as the number of channels.
// This cannot be reproduced anymore, but to be sure, we never allocate
// memory for less than 2 channels. The downside is that
// if we're playing a mono stream we could waste as much as ~10kb. Until
// the codec interface is changed to allow us to get the exact size of
// the output buffer, however, this will have to suffice.
int nchannels = param.uChannels < 2 ? 2 : param.uChannels ;
// /Fixes PR 114676: also multiply by bytes-per-sample, as is done
// in the other calculation of m_ulAudioBufSize, above, to fix
// overflow of buffer in RAAC codec's Decode() when there are
// samples to conceal. m_ulAudioBufSize should be in total bytes
// per block (total meaning for all channels):
// (samples / block)*(bytes / sample) * channels => total bytes per block. :
m_ulAudioBufSize = (*pulSamplesPerBlock) * m_StreamParam.uSampleSize / 8 * nchannels;
}
else
{
// There is no information on how large the decode buffer should be
m_ulAudioBufSize = DFLT_AUDIO_BUF_SIZE;
}
}
return theError;
}
BOOL
CRaFormat::IsActive()
{
BOOL bRetVal = FALSE;
if ((!m_IBufs.IsDSuperBlockEmpty()) ||
(!m_IBufs.IsISuperBlockEmpty()))
{
bRetVal = TRUE;
}
else
{
if( m_pPacketFeeder )
{
bRetVal = m_pPacketFeeder->IsActive();
}
}
return bRetVal;
}
void
CRaFormat::LossOccured()
{
if( m_pPacketFeeder )
{
m_pPacketFeeder->LossOccured();
}
}
HX_RESULT
CRaFormat::OnPacket(IHXPacket* pPacket, LONG32 lTimeOffset, UINT16* rule_to_flag_map)
{
HX_RESULT retVal = HXR_OK;
m_lTimeOffset = lTimeOffset;
/*
DEBUG_OUTF_IDX(m_uStreamNumber, RA_FLOW_FILE, (s, "PKT: TS=%d, Rule=%d L=%s\n",
pPacket->GetTime() % MAX_TS_DBG_RANGE,
pPacket->GetASMRuleNumber(),
pPacket->IsLost() ? "YES" : "NO"));
*/
retVal = m_pPacketFeeder->OnPacket(pPacket,
lTimeOffset,
rule_to_flag_map);
// we only deinterleave data when the input buffer
// is filled and output buffer is empty
if (m_IBufs.IsISuperBlockFilled() && m_IBufs.IsDSuperBlockEmpty())
{
// deinterleave the data and fill up the interleave buffer from
// the packet queue
m_IBufs.DeInterleaveData();
}
return retVal;
}
HX_RESULT
CRaFormat::GetAudioData(HXAudioData& audioData,
UINT32& ulActualTimestamp,
AUDIO_STATE audioState,
UINT32 ulSpliceToActualTime,
UINT32 ulSpliceToStreamTime)
{
UINT32 ulIterationCounter = 0;
HX_RESULT pnr = HXR_NO_DATA;
if (ulSpliceToActualTime != NO_TIME_SET)
{
ulSpliceToActualTime = UnAdjustTimestamp(ulSpliceToActualTime, m_lTimeOffset);
ulSpliceToStreamTime = UnAdjustTimestamp(ulSpliceToStreamTime, m_lTimeOffset);
}
// if we've not determined the if we need to adjust the timestamps
// the we don't have any data available.
do
{
HX_RELEASE(audioData.pData);
audioData.ulAudioTime = 0;
// move any data from the time range queue into the IBufs
m_pPacketFeeder->FillISuperBlock();
switch (audioState)
{
case AUDIO_CROSSFADE:
case AUDIO_DRYNOTIFICATION:
case AUDIO_END_OF_PACKETS:
if (!m_IBufs.DataAvailable(audioState))
{
// this is an "emergency" state so do this even
// if the IBlock isn't quite full
m_IBufs.DeInterleaveData();
}
break;
default:
break;
}
if (m_IBufs.DataAvailable(audioState))
{
pnr = DecodeAudioData(audioData,
ulActualTimestamp,
ulSpliceToActualTime,
ulSpliceToStreamTime);
if (HXR_OK == pnr)
{
// if we've fulfilled the discard, reset state
if (m_ulForceDiscardUntilTime != NO_TIME_SET &&
IsTimeGreater(audioData.ulAudioTime + (UINT32)GetMSPerBlock(),
m_ulForceDiscardUntilTime))
{
m_ulForceDiscardUntilTime = NO_TIME_SET;
}
break;
}
else if (FAILED(pnr))
{
DEBUG_OUTF(DOAUDIO_FILE, (s, "GetAudioData\t%lu\t0x%X\n",
(UINT32)GetMSPerBlock(), pnr));
if( pnr == HXR_OUTOFMEMORY )
{
return pnr;
}
break;
}
DEBUG_OUTF(DOAUDIO_FILE, (s, "DataAvailable - no decode\t%lu\t0x%X\n",
(UINT32)GetMSPerBlock(), pnr));
}
else
{
BOOL bTimeRangeEnded = m_pPacketFeeder->IsCurrentTimeRangeEnded();
BOOL bDataInTimeRange = !m_pPacketFeeder->IsCurrentTimeRangeEmpty();
// if the D superblock is empty and there's more data to decode
// we give it a try to move the data into the D superblock to
// decode it.
if (m_IBufs.IsDSuperBlockEmpty() &&
(!m_IBufs.IsISuperBlockEmpty() || bDataInTimeRange))
{
if (m_IBufs.IsISuperBlockFilled())
{
m_IBufs.DeInterleaveData();
}
// if we don't have any data in the De-interleaved superblock
// and we failed to fill the Interleaved superblock, we
// must be out of data and we should break out of the while loop
if (m_IBufs.IsDSuperBlockEmpty() &&
(!m_pPacketFeeder->FillISuperBlock()))
{
DEBUG_OUTF(DOAUDIO_FILE, (s, "GetAudioData\t%d\t%d\t%d\t%d\t%lu\n",
m_IBufs.IsDSuperBlockEmpty(), m_IBufs.IsISuperBlockEmpty(), m_IBufs.IsISuperBlockFilled(), bTimeRangeEnded, (UINT32)GetMSPerBlock()));
break;
}
}
else
{
HX_ASSERT(!bDataInTimeRange);
// we've drained the IBufs of data from the previous TR, now
// we can start the next TR if there is one.
if (bTimeRangeEnded)
{
if (HXR_OK == DecodeAudioData(audioData,
ulActualTimestamp,
ulSpliceToActualTime,
ulSpliceToStreamTime,
TRUE))
{
// if we've fulfilled the discard, reset state
if (m_ulForceDiscardUntilTime != NO_TIME_SET &&
IsTimeGreater(audioData.ulAudioTime + (UINT32)GetMSPerBlock(),
m_ulForceDiscardUntilTime))
{
m_ulForceDiscardUntilTime = NO_TIME_SET;
}
pnr = HXR_OK;
}
else
{
// stream done result here tells the renderer not to expect
// any more data on this format for this particular time range
// so it can give up on a cross fade if it is pending one here
pnr = HXR_STREAM_DONE;
}
SetupForNextTimeRange();
}
else
{
DEBUG_OUTF(DOAUDIO_FILE, (s, "GetNextAudioDataTime\t%d\t%d\t%d\t%d\t%lu\n",
m_IBufs.IsDSuperBlockEmpty(), m_IBufs.IsISuperBlockEmpty(), m_IBufs.IsISuperBlockFilled(), bTimeRangeEnded, (UINT32)GetMSPerBlock()));
}
break;
}
}
ulIterationCounter++;
} while (ulIterationCounter < MAX_AUDIO_DATA_ATTEMPTS);
return pnr;
}
HX_RESULT
CRaFormat::GenerateLostAudioData (UINT32 ulForceEndTime,
HXAudioData& audioData,
UINT32& ulActualTimestamp,
UINT32 ulSpliceToActualTime,
UINT32 ulSpliceToStreamTime)
{
HX_RESULT theError = HXR_FAILED;
Byte* pData = new UCHAR[m_StreamParam.uInterleaveBlockSize];
UINT32 ulInSize = m_StreamParam.uInterleaveBlockSize;
UINT32 ulDataFlags = 0;
UINT32 ulOutSize;
ulForceEndTime = UnAdjustTimestamp(ulForceEndTime, m_lTimeOffset);
if( !m_pCodec )
{
theError = LoadDecoderAndInitDeInterleaver();
}
if (m_pCodec != NULL)
{
theError = m_pCachingClassFactory->CreateInstance(CLSID_IHXBuffer,
(void**) &audioData.pData);
// These buffers will initially be size 0, but since we're using
// the caching class factory, they will generally be the right size
// on reuse. We can't assume this, but let's not call SetSize
// unnecessarily.
if( audioData.pData && audioData.pData->GetSize() != m_ulAudioBufSize )
{
theError = audioData.pData->SetSize( m_ulAudioBufSize );
}
if( theError != HXR_OK )
{
return theError;
}
theError = m_pCodec->Decode(pData,
ulInSize,
audioData.pData->GetBuffer(),
&ulOutSize,
ulDataFlags);
// Make the buffer size smaller if necessary, since we don't always
// pass the decoder the max size of encoded data. Note that this does
// not alter the heap (based on current buffer implementation).
if( ulOutSize < m_ulAudioBufSize )
{
theError = audioData.pData->SetSize( ulOutSize );
}
}
HX_VECTOR_DELETE(pData);
if (theError == HXR_OK && ulOutSize == 0)
{
theError = HXR_NO_DATA;
}
if (theError == HXR_OK)
{
audioData.ulAudioTime = ulSpliceToStreamTime;
audioData.uAudioStreamType = STREAMING_AUDIO;
ulActualTimestamp = ulSpliceToActualTime;
// don't forget to adjust...
if (!AdjustAudioData(audioData, ulActualTimestamp, ulForceEndTime))
{
HX_RELEASE(audioData.pData);
audioData.ulAudioTime = 0;
theError = HXR_FAILED;
}
}
else
{
HX_RELEASE( audioData.pData );
audioData.pData = NULL;
audioData.ulAudioTime = 0;
}
return (HXR_OK == theError) ? (HXR_OK) : (HXR_NO_DATA);
}
HX_RESULT CRaFormat::DecodeAudioBlock(Byte* pData,
UINT32 ulInSize,
Byte* pAudioBuf,
UINT32* pOutSize,
UINT32 ulDataFlags)
{
HX_RESULT retVal = HXR_OK;
retVal = m_pCodec->Decode(pData, ulInSize,
pAudioBuf, pOutSize,
ulDataFlags);
return retVal;
}
HX_RESULT
CRaFormat::DecodeAudioData (HXAudioData& audioData,
UINT32& ulActualTimestamp,
ULONG32 ulSpliceToActualTime,
ULONG32 ulSpliceToStreamTime,
BOOL bFlushCodec)
{
HX_RESULT theError = HXR_FAILED;
Byte* pData;
UINT32 ulInSize;
UINT32 ulOutSize;
UINT32 ulDataFlags;
UINT32 ulTimestamp;
// To optimize heap, we don't use the m_pAudioBuf as a
// temporary pcm buffer; we just decode straight into the buffer
// that we're going to dump it in anyway.
theError = m_pCachingClassFactory->CreateInstance(CLSID_IHXBuffer,
(void**) &audioData.pData);
if( theError != HXR_OK )
{
HX_RELEASE(audioData.pData);
theError = HXR_OUTOFMEMORY;
}
if (theError == HXR_OK && m_pCodec != NULL)
{
theError = m_IBufs.GetBlock(&pData,
&ulInSize,
&ulDataFlags,
&ulTimestamp,
&ulActualTimestamp);
}
if (theError == HXR_OK)
{
// increment the IBuf for next block
m_IBufs.NextBlock();
if (!GetDropBlock(ulTimestamp))
{
// These buffers will initially be size 0, but since we're using
// the caching class factory, they will generally be the right
// on reuse. We can't assume this, but let's not call SetSize
// unnecessarily.
if( audioData.pData->GetSize() != m_ulAudioBufSize )
{
theError = audioData.pData->SetSize( m_ulAudioBufSize );
}
if( theError == HXR_OUTOFMEMORY )
{
return theError;
}
theError = DecodeAudioBlock(pData, ulInSize, audioData.pData->GetBuffer(),
&ulOutSize, ulDataFlags);
}
else
{
DEBUG_OUTF(DOAUDIO_FILE, (s, "DropBlock\t%lu\t%lu\n",
ulTimestamp, (UINT32)GetMSPerBlock()));
// assume entire block decoded?
m_IBufs.NumDecodedBytes(ConvertMsToBytes((UINT32)GetMSPerBlock()));
m_bPCMStreamStart = TRUE;
HX_RELEASE( audioData.pData );
theError = HXR_NO_DATA;
}
}
else if (theError == HXR_OK && bFlushCodec && m_pCodec != NULL)
{
ulOutSize = m_ulAudioBufSize;
theError = m_pCodec->Flush(audioData.pData->GetBuffer(), &ulOutSize);
HX_RELEASE( audioData.pData );
}
if (theError == HXR_OK && ulOutSize == 0)
{
m_fCodecDelay += GetMSPerBlock();
m_ulCodecDelay = (UINT32) m_fCodecDelay;
theError = HXR_NO_DATA;
HX_RELEASE( audioData.pData );
}
if (theError == HXR_OK)
{
// Make the buffer size smaller if necessary, since we don't always
// pass the decoder the max size of encoded data. Note that this does
// not alter the heap (based on current buffer implementation).
if( audioData.pData->GetSize() > ulOutSize )
{
theError = audioData.pData->SetSize( ulOutSize );
if( theError == HXR_OUTOFMEMORY )
{
return theError;
}
}
audioData.ulAudioTime = ulTimestamp;
if (ulOutSize < m_ulMinExpectedDecodedBlockSize)
{
double fCodecDelay = (GetMSPerBlock() - m_IBufs.CalcMs(ulOutSize));
if (fCodecDelay >= MIN_CODEC_BLOCK_DELAY)
{
LONG32 lCodecDelay = (UINT32) fCodecDelay;
audioData.ulAudioTime += lCodecDelay;
m_ulCodecDelay += lCodecDelay;
m_fCodecDelay += fCodecDelay;
}
}
audioData.ulAudioTime -= m_ulCodecDelay;
m_IBufs.NumDecodedBytes(audioData.ulAudioTime, ulOutSize);
SpliceAudioData(audioData,
ulActualTimestamp,
ulSpliceToActualTime,
ulSpliceToStreamTime);
m_bPCMStreamStart = FALSE;
if (AdjustAudioData(audioData, ulActualTimestamp))
{
m_ulNextAudioTime = audioData.ulAudioTime +
(UINT32) m_IBufs.CalcMs(audioData.pData->GetSize());
m_ulNextActualAudioTime = ulActualTimestamp +
(UINT32) m_IBufs.CalcMs(audioData.pData->GetSize());
}
else
{
m_bPCMStreamStart = TRUE;
HX_RELEASE(audioData.pData);
audioData.ulAudioTime = 0;
theError = HXR_NO_DATA;
}
}
else
{
HX_RELEASE( audioData.pData );
audioData.pData = NULL;
audioData.ulAudioTime = 0;
}
return theError;
}
void
CRaFormat::SetCrossFadeEndTime(UINT32 ulTimestamp)
{
DEBUG_OUTF(XFADE_FILE, (s, "SetXFadeEndTime\t%lu\t%lu\n", ulTimestamp, (UINT32)GetMSPerBlock()));
/*
* The ulTimestamp can be in one of these 5 positions.
* We start from 4 and go to 0 looking for some data that's
* less than the ulTimestamp time.
*
* [DDD][III] {_,_,_,} {_,_,_,_,}
* ^ ^ ^ ^ ^
* | | | | |
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -