📄 hxaudstr_new.cpp
字号:
{ HXAudioInfo* pInfo = NULL; if (!m_pDataList->IsEmpty() && NULL != (pInfo = (HXAudioInfo*) m_pDataList->GetTail())) { pAudioData->ulAudioTime = pInfo->ulStartTime + CalcMs(pInfo->pBuffer->GetSize()); } else { pAudioData->ulAudioTime = INT64_TO_UINT32(m_llLastWriteTime - CAST_TO_INT64 (m_ulTSRollOver) * CAST_TO_INT64 (MAX_UINT32)); } return HXR_OK; } // make sure the renderer does not pass NULL data!! HX_ASSERT(pAudioData->pData->GetBuffer() != NULL && pAudioData->pData->GetSize() != 0); if (pAudioData->pData->GetBuffer() == NULL || pAudioData->pData->GetSize() == 0) { return HXR_INVALID_PARAMETER; } if (m_bIsFirstPacket) { m_bIsFirstPacket = FALSE; IHXErrorMessages* pErrMsg = NULL; if (HXR_OK == m_Owner->m_pContext->QueryInterface(IID_IHXErrorMessages, (void**)&pErrMsg)) { DEBUG_OUT(pErrMsg, DOL_GENERIC, (s,"AudioFormatIn: %lu channels %lu SamplesPerSec", m_AudioFmt.uChannels, m_AudioFmt.ulSamplesPerSec)); DEBUG_OUT(pErrMsg, DOL_GENERIC, (s,"AudioFormatOut: %lu channels %lu SamplesPerSec", m_DeviceFmt.uChannels, m_DeviceFmt.ulSamplesPerSec)); } HX_RELEASE(pErrMsg); if (m_bIsLive) { // XXX wschildbach why do we update a last write time *here*? m_Owner->UpdateStreamLastWriteTime(); UpdateStreamLastWriteTime(TRUE); } }//{FILE* f1 = ::fopen("c:\\temp\\audio.txt", "a+"); ::fprintf(f1, "%lu\tAddData\t%p\t%lu\n", HX_GET_BETTERTICKCOUNT(), this, pAudioData->ulAudioTime);::fclose(f1);}// ::fwrite(pAudioData->pData->GetBuffer(), pAudioData->pData->GetSize(), 1, fdin); UINT32 ulDataTime = CalcMs(pAudioData->pData->GetSize()); UINT32 ulEndTime = pAudioData->ulAudioTime + ulDataTime; if (m_pAvailableBuffers && !m_bDeterminedInitialCacheSize && ulDataTime > 0) { m_bDeterminedInitialCacheSize = TRUE; m_uCacheSize = (UINT16) (m_ulGranularity*2/ulDataTime) + 1; /* make sure it is atleast CACHE_INCREMENT_SIZE to begin with */ m_uCacheSize = m_uCacheSize < CACHE_INCREMENT_SIZE ? CACHE_INCREMENT_SIZE : m_uCacheSize; } if (m_ulLastInputStartTime > pAudioData->ulAudioTime && ((m_ulLastInputStartTime - pAudioData->ulAudioTime) > MAX_TIMESTAMP_GAP)) { bInTSRollOver = TRUE; m_ulTSRollOver++; } m_ulLastInputStartTime = pAudioData->ulAudioTime; m_ulLastInputEndTime = ulEndTime; /* even in STREAMING_AUDIO case, it might happen, that the packets * written are late. e.g. packets received late on the network */ // XXX wschildbach : Why do we even care? Just insert the packet into the queue // and let the reaper take care of it! INT64 llActualTimestamp = CAST_TO_INT64 (pAudioData->ulAudioTime) + CAST_TO_INT64 m_ulTSRollOver * CAST_TO_INT64 MAX_UINT32; INT64 llActualEndTime = CAST_TO_INT64 (pAudioData->ulAudioTime) + CAST_TO_INT64 (ulDataTime) + CAST_TO_INT64 (m_ulTSRollOver) * CAST_TO_INT64 (MAX_UINT32);#if 0 // testing sampling frequency estimation for "inaccurate resampling" if (pAudioData->uAudioStreamType == TIMED_AUDIO) { m_startMeasureTime = llActualTimestamp ; m_totSamples = 0 ; } else if (pAudioData->uAudioStreamType == STREAMING_AUDIO) { float diffTime = (llActualTimestamp - m_startMeasureTime) / 1000.0f ; if (diffTime) { float frEstimate = m_totSamples / m_AudioFmt.uChannels / diffTime ; float frEstErr = frEstimate * 0.001f / diffTime ; // 1 ms inaccuracy {FILE *f1 = fopen("c:\\temp\\estimatesr.txt","a+"); fprintf(f1,"%I64d\t%I64d\t%f\t%f\n",llActualTimestamp,llActualEndTime,frEstimate,frEstErr); fclose(f1); } } } m_totSamples += CalcSamples(pAudioData->pData->GetSize()) ;#endif if ((pAudioData->uAudioStreamType == STREAMING_AUDIO || pAudioData->uAudioStreamType == TIMED_AUDIO) && !(llActualTimestamp >= m_llLastWriteTime || llActualEndTime > m_llLastWriteTime)) { /* Too late*/ m_bTobeTimed = TRUE;//{FILE* f1 = ::fopen("e:\\audio.txt", "a+"); ::fprintf(f1, "%lu\t%p\t%d\t%lu\t%lu\tLATE\n", HX_GET_BETTERTICKCOUNT(), this, m_pDataList->GetCount(), pAudioData->ulAudioTime, (INT32)m_llLastWriteTime);::fclose(f1);} /* {FILE *f1 = fopen("c:\\temp\\mix.txt","a+"); fprintf(f1,"LATE packet\n"); fclose(f1); } */ return HXR_LATE_PACKET; } pAinfo = new HXAudioInfo; if( !pAinfo ) { return HXR_OUTOFMEMORY; } pAudioData->pData->AddRef(); pAinfo->pBuffer = pAudioData->pData; pAinfo->ulStartTime = pAudioData->ulAudioTime; pAinfo->pOffset = pAudioData->pData->GetBuffer(); pAinfo->ulBytesLeft = pAudioData->pData->GetSize(); pAinfo->uAudioStreamType = pAudioData->uAudioStreamType; if (m_bTobeTimed && pAudioData->uAudioStreamType == STREAMING_AUDIO) { pAinfo->uAudioStreamType = TIMED_AUDIO; m_bTobeTimed = FALSE; } else if (m_bTobeTimed && pAudioData->uAudioStreamType == TIMED_AUDIO) { m_bTobeTimed = FALSE; }//{FILE* f1 = ::fopen("c:\\temp\\audio.txt", "a+"); ::fprintf(f1, "AddData ulAudioTime: %lu\n", pAudioData->ulAudioTime);::fclose(f1);}////////////////////////////////////////////////////////////////////// // XXX wschildbach // the start time of this packet in samples. This may be corrected later on. pAinfo->llStartTimeInSamples = CAST_TO_INT64(llActualTimestamp) * m_AudioFmt.ulSamplesPerSec / 1000 * m_AudioFmt.uChannels ; pAinfo->llEndTimeInSamples = pAinfo->llStartTimeInSamples + Bytes2Samples(pAinfo->pBuffer->GetSize(), &m_AudioFmt) ;////////////////////////////////////////////////////////////////////// if (pAinfo->uAudioStreamType == INSTANTANEOUS_AUDIO) { CHXSimpleList* pList = new CHXSimpleList; if( !pList ) { HX_RELEASE(pAudioData->pData); HX_DELETE(pAinfo); return HXR_OUTOFMEMORY; } pList->AddHead((void*) pAinfo); m_pInstantaneousList->AddTail((void*) pList); m_Owner->m_Owner->ToBeRewound(); } else if (pAinfo->uAudioStreamType == STREAMING_INSTANTANEOUS_AUDIO) { HX_ASSERT(m_pInstantaneousList && m_pInstantaneousList->GetCount() > 0); CHXSimpleList* pList = NULL; if (m_pInstantaneousList->GetCount() == 0) { pList = new CHXSimpleList; if( !pList ) { HX_RELEASE(pAudioData->pData); HX_DELETE(pAinfo); return HXR_OUTOFMEMORY; } m_pInstantaneousList->AddTail((void*) pList); // fix for naive users! pAinfo->uAudioStreamType = INSTANTANEOUS_AUDIO; m_Owner->m_Owner->ToBeRewound(); } pList = (CHXSimpleList*) m_pInstantaneousList->GetTail(); pList->AddTail(pAinfo); } else if (m_pDataList->IsEmpty()) { /* {FILE *f1 = fopen("c:\\temp\\mix.txt","a+"); fprintf(f1,"adding to empty list...\n"); fclose(f1); } */ m_pDataList->AddTail((void*) pAinfo); } else {/* {FILE *f1 = fopen("c:\\temp\\mix.txt","a+"); fprintf(f1,"adding to non-empty list...\n"); fclose(f1); }*/ HXAudioInfo* pInfo = (HXAudioInfo*) m_pDataList->GetTail(); UINT32 ulActualTSRollOver = m_ulTSRollOver; if (bInTSRollOver && ulActualTSRollOver) { ulActualTSRollOver--; } INT64 llActualLastEndTime = CAST_TO_INT64 (pInfo->ulStartTime) + CAST_TO_INT64 (CalcMs(pInfo->pBuffer->GetSize())) + CAST_TO_INT64 ulActualTSRollOver * CAST_TO_INT64 MAX_UINT32; INT64 llActualLastStartTime = CAST_TO_INT64 (pInfo->ulStartTime) + CAST_TO_INT64 ulActualTSRollOver * CAST_TO_INT64 MAX_UINT32; if (llActualTimestamp < llActualLastStartTime) { /* Not allowed */ theErr = HXR_OUTOFORDER_PACKET; /* something is fu*#$#up... figure out what?*/ HX_ASSERT(!("Packets written out of order")); goto exit; } if (pAinfo->uAudioStreamType == STREAMING_AUDIO) { /* is it a resonable packet to add to the list */ if ((llActualTimestamp <= llActualLastEndTime && llActualLastEndTime - llActualTimestamp <= m_ulFudge) || (llActualTimestamp >= llActualLastEndTime && llActualTimestamp - llActualLastEndTime <= m_ulFudge)) { // XXX wschildbach // make 64-bit timestamps contiguous before adding into the queue. pAinfo->llEndTimeInSamples += pInfo->llEndTimeInSamples - pAinfo->llStartTimeInSamples ; pAinfo->llStartTimeInSamples = pInfo->llEndTimeInSamples ; m_pDataList->AddTail((void*) pAinfo); } else { theErr = HXR_NONCONTIGUOUS_PACKET; //HX_LATE_PACKET; /* something is fu*#$#up... figure out what?*/ HX_ASSERT(!("Streaming Audio: Non-Contigous Write")); m_bTobeTimed = TRUE; goto exit; } } else { /* see if there is any overlap.. we do not allow any overlap */ if (llActualTimestamp < llActualLastEndTime && llActualLastEndTime - llActualTimestamp > m_ulFudge) { /* hmmm an overlapped packet */ theErr = HXR_OVERLAPPED_PACKET; /* something is fu*#$#up... figure out what?*/ HX_ASSERT(!("Timed Audio: Overlapping write")); m_bTobeTimed = TRUE; goto exit; } else { m_pDataList->AddTail((void*) pAinfo); } } }exit: if (theErr != HXR_OK && pAinfo) { pAinfo->pBuffer->Release(); delete pAinfo; }//{FILE* f1 = ::fopen("e:\\audio.txt", "a+"); ::fprintf(f1, "%lu\t%p\t%d\t%lu\t%lu\n", HX_GET_BETTERTICKCOUNT(), this, m_pDataList->GetCount(), pAudioData->ulAudioTime, (UINT32)m_llLastWriteTime);::fclose(f1);} return theErr;}HX_RESULT CHXAudioStream::ProcessInfo(void){ HX_RESULT theErr = HXR_OK; m_ulSampleFrameSize = m_AudioFmt.uChannels * (m_AudioFmt.uBitsPerSample>>3) ; m_ulPendingAudioBytes = 0 ; // Calculate the number of bytes per granularity. // XXX wschildbach: These formulas are suspect. There is no feedback to the player // or audio session that this is what we assume for a size. I believe this is either // not needed or too complex. m_ulInputBytesPerGran = (ULONG32) (((m_AudioFmt.uChannels * (m_AudioFmt.uBitsPerSample>>3) * m_AudioFmt.ulSamplesPerSec) / 1000.0) * m_ulGranularity); m_ulOutputBytesPerGran = (ULONG32) (((m_DeviceFmt.uChannels * (m_DeviceFmt.uBitsPerSample>>3) * m_DeviceFmt.ulSamplesPerSec) / 1000.0) * m_ulGranularity); // Make sure that number of bytes per granularity is an even number. m_ulInputBytesPerGran -= m_ulInputBytesPerGran % ((m_AudioFmt.uBitsPerSample>>3)*m_AudioFmt.uChannels); m_ulOutputBytesPerGran -= m_ulOutputBytesPerGran % ((m_DeviceFmt.uBitsPerSample>>3)*m_DeviceFmt.uChannels); if (!theErr) { // set up the mixing engine // XXX wschildbach theErr = m_pMixEngine->Init(m_AudioFmt.ulSamplesPerSec, m_DeviceFmt.ulSamplesPerSec, m_AudioFmt.uChannels, m_DeviceFmt.uChannels) ; if (SUCCEEDED(theErr)) theErr = m_pMixEngine->SetSampleConverter(this) ; if (SUCCEEDED(theErr)) theErr = m_pMixEngine->SetOutputBytesPerSample(m_DeviceFmt.uBitsPerSample / 8) ; // set the volume (somebody might have set it when we did not have an engine)#ifdef HELIX_FEATURE_GAINTOOL if (SUCCEEDED(theErr)) m_pMixEngine->SetVolume(m_pMixEngine->HXVolume2TenthOfDB(m_bMute ? HX_MIN_VOLUME : m_uVolume)) ;#endif } if (!theErr) { m_bInited = TRUE; if (m_eState == E_STOPPED) { m_eState = E_INITIALIZED; } } /* Get the current player time to set the last write audio time * If someone creates a stream mid presentation, we ask the player * object for the current write time. */ // set last write time to be the current playback time since // this is what other system components(i.e. renderers) based on // fixed b#69847 - loss of push-down-worth of data = // m_Owner->GetLastAudioWriteTime() - m_Owner->GetCurrentPlayBackTime()// m_llLastWriteTime = m_Owner->GetCurrentPlayBackTime(); // XXXRA: It is necessary to use last audio write time for any delayed // audio streams to work that do not involve any Pause/Rewind logic. // To cover the case where a source (and an audio stream) has been added // mid-playback by SMIL renderer which has a delay equivalent to the // current playback time, it should result in a player rewind which should // reset the lastaudiowrite time accordingly...so we should be able // to use m_Owner->GetLastAudioWriteTime() value in such a use case as well. // this change is required to fix PR 79161 and PR 69780. // Henry, PR 69847 (the reason for the earlier change) is still busted. // so I am reverting this code to the original code. you will have // to come up with a different fix for PR 69847 since this was clearly not // the correct fix. m_llLastWriteTime = m_Owner->GetLastAudioWriteTime(); m_pMixEngine->ResetTimeLineInMillis(m_llLastWriteTime) ; if (!theErr && m_bInited) { m_Owner->StreamInitialized(this); } return theErr;}/************************************************************************ * Method: * IHXAudioStream::GetFormat * Purpose: * Return the stream's audio format. */HX_RESULT CHXAudioStream::GetFormat( HXAudioFormat* pAudioFormat){ if (!m_bAudioFormatKnown) { return HXR_NOT_INITIALIZED; } pAudioFormat->uChannels = m_AudioFmt.uChannels; pAudioFormat->uBitsPerSample = m_AudioFmt.uBitsPerSample; pAudioFormat->ulSamplesPerSec = m_AudioFmt.ulSamplesPerSec; pAudioFormat->uMaxBlockSize = m_AudioFmt.uMaxBlockSize; return HXR_OK;}/************************************************************************ * Method: * IHXAudioStream::Setup * Purpose: * This is called by the player's Setup method. At this * time the audio device format is set and we can now * set up the streams pre-mixing buffer. This buffer * contains data that has been resampled to match the * audio device format. */HX_RESULT CHXAudioStream::Setup( HXAudioFormat* pFormat, ULONG32 ulGranularity){ HX_RESULT theErr = HXR_OK; memcpy( &m_DeviceFmt, pFormat, sizeof(HXAudioFormat) ); m_ulGranularity = ulGranularity; m_bSetupDone = TRUE; /* we have all the info now.. so setup the resampler */ if (m_bAudioFormatKnown && !m_bInited) { theErr = ProcessInfo(); } return theErr;}/************************************************************************ * Method: * IHXAudioStream::ResetStream * Purpose: */void CHXAudioStream::ResetStream(){ m_bInited = FALSE; m_bCanBeRewound = FALSE; m_bSetupDone = FALSE; m_bAudioFormatKnown = FALSE; m_bIsResumed = FALSE; UnRegister(); while (m_pAvailableBuffers && m_pAvailableBuffers->GetCount() > 0) { IHXBuffer* pBuffer = (IHXBuffer*) m_pAvailableBuffers->RemoveHead(); HX_RELEASE(pBuffer); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -