📄 rarender.cpp
字号:
void CRealAudioRenderer::QueueUnregisterSync(UINT16 uStream, UINT32 ulTime)
{
if ((m_uSyncUnregisterStream != NO_STREAM_SET) &&
(uStream != m_uSyncUnregisterStream))
{
CRealAudioRenderer::QueueUnregisterSync(m_uSyncUnregisterStream,
NO_TIME_SET);
}
m_ulSyncUnregisterTime = ulTime;
if (ulTime == NO_TIME_SET)
{
if (m_pRaFormats &&
m_pRaFormats[uStream]->m_pAudioSync &&
m_pRaFormats[uStream]->m_bRegistered)
{
m_pRaFormats[uStream]->m_bRegistered = FALSE;
m_pRaFormats[uStream]->m_pAudioSync->UnRegister();
m_uSyncUnregisterStream = NO_STREAM_SET;
DEBUG_OUTF_IDX(uStream, RA_FLOW_FILE,
(s, "Sync Stop\n"));
}
}
else
{
m_ulSyncUnregisterTime = uStream;
}
}
void CRealAudioRenderer::FlushUnregisterQueue(BOOL bDestroy)
{
if (m_uSyncUnregisterStream != NO_STREAM_SET)
{
QueueUnregisterSync(m_uSyncUnregisterStream, NO_TIME_SET);
}
}
/////////////////////////////////////////////////////////////////////////////
// Method:
// CRealAudioRenderer::WriteToAudioServices
HX_RESULT
CRealAudioRenderer::WriteToAudioServices(UINT16 uStreamNumber,
HXAudioData* pAudioData,
UINT32 ulActualTimestamp)
{
HX_RESULT pnr = HXR_OK;
BOOL bTryWrite = TRUE;
ULONG32 ulAttemptCount = 0;
while (bTryWrite && (ulAttemptCount < 3))
{
// Write to AS
pnr = m_pAudioStreams[uStreamNumber]->Write(pAudioData);
LogAudioWrite(uStreamNumber, pAudioData, ulActualTimestamp, pnr);
ulAttemptCount++;
if (SUCCEEDED(pnr))
{
if (m_pRaFormats[uStreamNumber]->m_bRegistered)
{
// FudgeTimeStamp doesn't need first parameter anymore
m_pRaFormats[uStreamNumber]->m_pAudioSync->FudgeTimestamp(0,
ulActualTimestamp);
}
CalculateMaxTimeStamp(uStreamNumber,
pAudioData,
ulActualTimestamp);
bTryWrite = FALSE;
}
#ifdef HELIX_FEATURE_RAREND_PREREDSTONE_SUPPORT
else if (m_bPreRedstonePlayer)
{
if (HXR_NONCONTIGUOUS_PACKET == pnr)
{
// we are skipping ahead and should just mark this packet
// as timed and write it again
pAudioData->uAudioStreamType = TIMED_AUDIO;
DEBUG_OUT(m_pErrorMessages, DOL_REALAUDIO_EXTENDED,
(s, "skipping AS stream %u's time ahead to %lu\t0x%X", uStreamNumber,
pAudioData->ulAudioTime, pnr));
DEBUG_OUTF(PREREDSTONE_FILE,
(s, "skipping AS stream %u's time ahead to %lu\t0x%X\n", uStreamNumber,
pAudioData->ulAudioTime, pnr));
}
else
{
HX_ASSERT(m_bPreRedstonePlayer);
// we've likely written a late packet but it's not safe to
// ask all rmacore's what time the stream has so we wait for
// a dry notification, where we are told what the time is
m_usPreviousDryNotificationStream = m_usCurrentDryNotificationStream;
if (FAILED(AddDryNotification(uStreamNumber)))
{
// I guess we just jump ahead a second?? and try again
DEBUG_OUT(m_pErrorMessages, DOL_REALAUDIO_EXTENDED,
(s, "Behind Unknown ms on %u's jumping ahead to %lu\t0x%X", uStreamNumber,
pAudioData->ulAudioTime + 1000, pnr));
DEBUG_OUTF(PREREDSTONE_FILE,
(s, "Behind Unknown ms on %u's jumping ahead to %lu\t0x%X\n", uStreamNumber,
pAudioData->ulAudioTime + 1000, pnr));
m_pRaFormats[uStreamNumber]->
DiscardTillEndOfCrossFade(pAudioData->ulAudioTime + 1000);
// reset dry notification
AddDryNotification(m_usPreviousDryNotificationStream);
m_usPreviousDryNotificationStream = NO_STREAM_SET;
}
// get out of the loop...
bTryWrite = FALSE;
}
}
#endif // HELIX_FEATURE_RAREND_PREREDSTONE_SUPPORT
else
{
// we got an error on write, check what time the audio stream
// expects data for
HXAudioData audioData;
INT32 lTimeDiff;
audioData.pData = NULL;
m_pAudioStreams[uStreamNumber]->Write(&audioData);
if (m_ulLatestStreamTime != NO_TIME_SET)
{
lTimeDiff = (LONG32) (audioData.ulAudioTime - m_ulLatestStreamTime);
OffsetLatestTime(uStreamNumber, lTimeDiff);
}
DEBUG_OUTF_IDX(uStreamNumber, RA_FLOW_FILE,
(s, "Audio Stream Report: Time=%u\n",
audioData.ulAudioTime));
DEBUG_OUT(m_pErrorMessages, DOL_REALAUDIO_EXTENDED,
(s, "Failed AS->Write\t%u\t%lu\t%lu\t%d\t0x%X", uStreamNumber,
pAudioData->ulAudioTime, audioData.ulAudioTime,
pAudioData->uAudioStreamType, pnr));
DEBUG_OUTF(AUDIOSERVICES_FILE,
(s, "Failed AS->Write\t%u\t%lu\t%lu\t%d\t0x%X\n", uStreamNumber,
pAudioData->ulAudioTime, audioData.ulAudioTime,
pAudioData->uAudioStreamType, pnr));
if (IsTimeLess(audioData.ulAudioTime, pAudioData->ulAudioTime))
{
// we are skipping ahead and should just mark this packet
// as timed and write it again
pAudioData->uAudioStreamType = TIMED_AUDIO;
DEBUG_OUT(m_pErrorMessages, DOL_REALAUDIO_EXTENDED,
(s, "skipping AS stream %u's time ahead to %lu\t0x%X", uStreamNumber,
pAudioData->ulAudioTime, pnr));
DEBUG_OUTF(AUDIOSERVICES_FILE,
(s, "skipping AS stream %u's time ahead to %lu\t0x%X\n", uStreamNumber,
pAudioData->ulAudioTime, pnr));
}
else if (IsTimeGreater(audioData.ulAudioTime, pAudioData->ulAudioTime) &&
IsTimeLessOrEqual(audioData.ulAudioTime, pAudioData->ulAudioTime +
((UINT32) (m_pRaFormats[uStreamNumber]->ConvertBytesToMs(pAudioData->pData->GetSize())))))
{
// we are a little behind but at least part of this stream
// is on time, we should clip off this buffer to the time
// the audio stream wants and try again.
bTryWrite = m_pRaFormats[uStreamNumber]->ClipAudioBuffer(pAudioData,
ulActualTimestamp, audioData.ulAudioTime, TRUE);
DEBUG_OUT(m_pErrorMessages, DOL_REALAUDIO_EXTENDED,
(s, "A little bit behind on stream %u's clip buffer to %lu\t0x%X", uStreamNumber,
audioData.ulAudioTime, pnr));
DEBUG_OUTF(AUDIOSERVICES_FILE,
(s, "A little bit behind on stream %u's clip buffer to %lu\t0x%X\n", uStreamNumber,
audioData.ulAudioTime, pnr));
}
else
{
DEBUG_OUT(m_pErrorMessages, DOL_REALAUDIO_EXTENDED,
(s, "A lot behind on stream %u's skipping ahead to %lu\t0x%X", uStreamNumber,
audioData.ulAudioTime, pnr));
DEBUG_OUTF(AUDIOSERVICES_FILE,
(s, "A lot behind on stream %u's skipping ahead to %lu\t0x%X\n", uStreamNumber,
audioData.ulAudioTime, pnr));
UINT32 ulLatestActualTime = NO_TIME_SET;
UINT32 ulLatestStreamTime = NO_TIME_SET;
// we are a lot behind and should tell this format to discard
// data until the audio stream time.
GetLatestTimesForStream(uStreamNumber,
ulLatestActualTime,
ulLatestStreamTime);
if (ulLatestActualTime == NO_TIME_SET)
{
ulLatestActualTime = m_ulLatestActualTime;
}
if (ulLatestActualTime != NO_TIME_SET)
{
m_pRaFormats[uStreamNumber]->
DiscardTillEndOfCrossFade(ulLatestActualTime);
}
// we don't want to try again with this data
bTryWrite = FALSE;
}
}
}
if (SUCCEEDED(pnr))
{
DEBUG_OUT(m_pErrorMessages, DOL_REALAUDIO_EXTENDED,
(s, "w\t%u\t%lu\t%lu\t%lu\t0x%X\t%d", uStreamNumber,
pAudioData->ulAudioTime, m_ulCurrentTimelineTime, ulActualTimestamp,
pnr, pAudioData->uAudioStreamType));
DEBUG_OUTF(AUDIOSERVICES_FILE,
(s, "w\t%u\t%lu\t%lu\t%lu\t0x%lX\t%d\n", uStreamNumber,
pAudioData->ulAudioTime, m_ulCurrentTimelineTime, ulActualTimestamp,
pnr, pAudioData->uAudioStreamType));
}
return pnr;
}
void
CRealAudioRenderer::GetLatestTimesForStream(UINT16 uStream,
UINT32& ulLatestActualTime,
UINT32& ulLatestStreamTime)
{
ulLatestActualTime = m_ulLatestActualTime;
ulLatestStreamTime = m_ulLatestStreamTime;
}
BOOL CRealAudioRenderer::IsCrossfadeInProgress(void)
{
return FALSE;
}
/////////////////////////////////////////////////////////////////////////////
// Method:
// CRealAudioRenderer::DoAudio
//
// Note: See Switchsod.txt for how this is supposed to work, please keep
// the sod up to date with changes here too.
//
HX_RESULT
CRealAudioRenderer::DoAudio(UINT32& ulAudioTime, AUDIO_STATE audioState = AUDIO_NORMAL)
{
#ifdef HELIX_CONFIG_ONLY_DECODE_IF_DRY
// Only decode if our audio buffers are 'dry' -- unless we're not
// playing yet, because in that case we're not going to go dry.
// The second condition totally screws up seeking.
//if( audioState != AUDIO_DRYNOTIFICATION && m_PlayState != buffering )
if( audioState != AUDIO_DRYNOTIFICATION )
{
return HXR_OK;
}
// if we could ask the audio stream how many PCM buffers are curently
// in its write list, we could make sure we don't write any more if the
// numbers is greater than n.
#endif // HELIX_CONFIG_ONLY_DECODE_IF_DRY
HX_RESULT pnr = HXR_NO_DATA;
UINT32 ulActualTimestamp = 0;
UINT16 nActive= 0;
UINT16 uLowest = NO_STREAM_SET;
UINT16 uLongestOverlap = NO_STREAM_SET;
UINT32 ulLowestStartTime = NO_TIME_SET;
UINT32 ulLowestEndTime = NO_TIME_SET;
UINT32 ulLatestActualTime;
UINT32 ulLatestStreamTime;
HXAudioData audioData;
audioData.pData = NULL;
audioData.ulAudioTime = ulAudioTime = 0;
// if we are waiting for dry notification, return imediately.
if (m_usPreviousDryNotificationStream != NO_STREAM_SET)
{
HX_ASSERT(m_bPreRedstonePlayer);
return HXR_OK;
}
// if we are cross fading, set the audio state right.
if (IsCrossfadeInProgress() && (audioState == AUDIO_NORMAL))
{
audioState = AUDIO_CROSSFADE;
}
FindLowestStartTime(uLowest, ulLowestStartTime, ulLowestEndTime, nActive);
DEBUG_OUTF(DOAUDIO_FILE, (s, "Lowest: Stream=%u Start=%lu End=%lu Active=%u\n",
uLowest, ulLowestStartTime, ulLowestEndTime, nActive));
// Identify latest packet time for purposes of gap filling
GetLatestTimesForStream(uLowest, ulLatestActualTime, ulLatestStreamTime);
// if the ulLowestStartTime is > m_ulLatestStreamTime or NO_TIME_SET
// then we need to fill with loss generated from the last known stream?
if ((ulLatestActualTime != NO_TIME_SET) &&
((ulLowestStartTime != NO_TIME_SET) &&
IsTimeGreater(ulLowestStartTime, ulLatestActualTime + RA_TIME_FUDGE)) &&
((uLowest == m_usCurrentDryNotificationStream) ||
IsCrossfadeInProgress()))
{
DEBUG_OUT(m_pErrorMessages, DOL_REALAUDIO,
(s, "Gap from %lu on %d(%d) to %lu on %d -- write loss",
ulLowestStartTime, uLowest, audioState,
ulLatestActualTime, m_usCurrentDryNotificationStream));
pnr = m_pRaFormats[uLowest]->
GenerateLostAudioData(ulLowestStartTime,
audioData,
ulActualTimestamp,
ulLatestActualTime,
ulLatestStreamTime);
DEBUG_OUTF(LOSS_FILE,
(s, "Gap from %lu on %d(%d) to %lu on %d -- write loss\t%lu\t%lu\n",
ulLowestStartTime, uLowest, audioState,
ulLatestActualTime, uLowest,
audioData.ulAudioTime, ulLatestActualTime));
if (SUCCEEDED(pnr) && (pnr != HXR_NO_DATA))
{
pnr = WriteToAudioServices(uLowest, &audioData, ulActualTimestamp);
DEBUG_OUTF(DOAUDIO_FILE, (s, "Write Loss:\t%u\t%lu\t%lu\t%c\t%d\t0x%X\n",
uLowest, audioData.ulAudioTime, ulActualTimestamp,
(!IsCrossfadeInProgress())?('F'):('T'), audioState, pnr));
// Set uLowest to NO_STREAM_SET and nActive to 0 to prevent
// further processing.
uLowest = NO_STREAM_SET;
nActive = 0;
}
HX_RELEASE(audioData.pData);
}
else if (ulLowestStartTime == NO_TIME_SET)
{
m_bDoneWritingPackets = m_bEndOfPackets;
}
// if more than one active format and we're not currently cross
// fading, loop through the formats looking for overlap,
// if I find it, remember that stream and it's current time
// range end time. If I find another stream that overlaps, with a
// greater current time range end time, flush the current time range
// on the first and make the longer time range one the overlap candidate.
if ((nActive > 1) &&
(uLowest != NO_STREAM_SET) &&
(!IsCrossfadeInProgress()))
{
FindLongestOverlap(uLowest, ulLowestEndTime,
nActive, uLongestOverlap,
audioState);
}
if (ulLatestActualTime == NO_TIME_SET)
{
ulLatestActualTime = m_ulLatestActualTime;
ulLatestStreamTime = m_ulLatestStreamTime;
}
// if there's an overlap candidate, setup crossfade on the two streams
if (uLongestOverlap != NO_STREAM_SET)
{
pnr = AttemptCrossfade(uLowest, // From stream
uLongestOverlap, // To stream
ulLatestActualTime,
ulLatestStreamTime,
audioData.ulAudioTime, // Out from stream start time
ulActualTimestamp,
audioState);
}
else if (uLowest != NO_STREAM_SET)
{
// write the lowest stream to audio services
pnr = m_pRaFormats[uLowest]->GetAudioData(
audioData,
ulActualTimestamp,
(m_bEndOfPackets) ? (AUDIO_END_OF_PACKETS) : (audioState),
ulLatestActualTime,
ulLatestStreamTime);
if (HXR_OK == pnr)
{
pnr = WriteToAudioServices(uLowest,
&audioData,
ulActualTimestamp);
}
else if (HXR_OUTOFMEMORY == pnr)
{
return pnr;
}
DEBUG_OUTF(DOAUDIO_FILE, (s, "Write Lowest:\t%u\t%lu\t%lu\t%c\t%d\t0x%X\n",
uLowest, audioData.ulAudioTime, ulActualTimestamp,
(!IsCrossfadeInProgress())?('T'):('F'), audioState, pnr));
}
// release the data buffer if we got one
HX_RELEASE(audioData.pData);
// if we are cross fading, check to see if the cross fade is over
if ((uLowest != NO_STREAM_SET) &&
SUCCEEDED(pnr) &&
(HXR_NO_DATA != pnr))
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -