📄 macros.h
字号:
else return (I32) (f + 0.5f);}//---------------------------------------------------------------------------#ifdef HITACHIstatic I32 ROUNDD(double f) {#elsestatic INLINE I32 ROUNDD(double f) {#endif if (f < 0.0f) return (I32) (f - 0.5f); else return (I32) (f + 0.5f);}#endif // PLATFORM_SPECIFIC_ROUND//---------------------------------------------------------------------------#ifndef PLATFORM_SPECIFIC_BITCOPY//assuming bsrc is zeroed out#ifdef HITACHIstatic void bitCpy (const U8* pbSrc, IntW iBitStartSrc, IntW cBits, U8* pbDst)#elsestatic INLINE void bitCpy (const U8* pbSrc, IntW iBitStartSrc, IntW cBits, U8* pbDst)#endif{ const U8* pbSrcEnd; IntW iShiftDown; U8 b2, b1; assert (pbSrc != NULL && pbDst != NULL); assert (iBitStartSrc < BITS_PER_DWORD && iBitStartSrc >= 0); assert (cBits >= 0); // Caller assumes we're copying DWORDs at a time (hangover from Intel) // Normalize pointers pbSrc += iBitStartSrc / BITS_PER_BYTE; iBitStartSrc %= BITS_PER_BYTE; pbSrcEnd = pbSrc + (iBitStartSrc > 0) + (((cBits - iBitStartSrc) + 7) & ~7) / 8; //open iShiftDown = (BITS_PER_BYTE - iBitStartSrc); b1 = *pbSrc; while (pbSrc < pbSrcEnd) { pbSrc++; b2 = *pbSrc; *pbDst = (b1 << iBitStartSrc) | (b2 >> iShiftDown); b1 = b2; pbDst++; }}#endif // PLATFORM_SPECIFIC_BITCOPY#ifndef PLATFORM_SPECIFIC_FNPTR#define PLATFORM_SPECIFIC_FNPTR //nothing for non-x86#endif // PLATFORM_SPECIFIC_FNPTR//**********************************************************************// Support for FastFloat//**********************************************************************#if defined(BUILD_INTEGER) // FastFloat // FastFloat is a quick way of handling values that exceed I32 range without incurring // the expense of floating point emulation on integer only platforms. // real value = iFraction * pow( 2, -iFracBits ) // In debugger, iFraction*1.0F/(1<<iFracBits) works if 0<=iFracBits<31 // Normalize a FastFloat#ifdef HITACHI # pragma inline(Norm4FastFloat)#else INLINE#endifstatic void Norm4FastFloat( FastFloat* pfflt ) { // use the faster Norm4FastFloatU when you know the value is positive register UIntW uiF = abs(pfflt->iFraction); register IntW iFB = 0; if ( uiF == 0 ) { pfflt->iFracBits = 0; return; } while ( uiF < 0x1FFFFFFF ) { uiF <<= 2; iFB += 2; } if ( uiF < 0x3FFFFFFF ) { iFB += 1; } pfflt->iFraction <<= iFB; pfflt->iFracBits += iFB; }#ifdef HITACHI# pragma inline(Norm4FastFloatU)#else INLINE#endifstatic void Norm4FastFloatU( FastFloat* pfflt ) { // same as above when we know value is positive (which we often do) register UIntW uiF = pfflt->iFraction; register IntW iFB = 0; assert( uiF > 0 ); while ( uiF < 0x1FFFFFFF ) { uiF <<= 2; iFB += 2; } if ( uiF < 0x3FFFFFFF ) { uiF <<= 1; iFB += 1; } pfflt->iFraction = uiF; pfflt->iFracBits += iFB; }#ifdef HITACHI #pragma inline(ffltMultiply)#else INLINE#endif static FastFloat ffltMultiply( FastFloat ffltA, FastFloat ffltB ) { FastFloat ffltP; ffltP.iFraction = MULT_HI( ffltA.iFraction, ffltB.iFraction ); ffltP.iFracBits = (ffltA.iFracBits + ffltB.iFracBits - 31); Norm4FastFloat( &ffltP ); return ffltP; }# define FASTFLOAT_MULT(a,b) ffltMultiply((a),(b))#ifdef HITACHI #pragma inline(ffltMultiply)#else INLINE#endif static FastFloat ffltfltMultiply( FastFloat ffltA, IntW B , IntW bits) { FastFloat ffltP; ffltP.iFracBits = ffltA.iFracBits; ffltP.iFraction = MULT_HI32_SHIFT(ffltA.iFraction, B, bits); Norm4FastFloat(&ffltP); return ffltP; }# define FASTFLOAT_FLOAT_MULT(a,b,bits) ffltfltMultiply((a), (b), (bits))#ifdef HITACHI #pragma inline(ffltMultiply)#else INLINE#endif static FastFloat ffltAdd( FastFloat ffltA, FastFloat ffltB ) { FastFloat ffltP; if (ffltA.iFraction > 0x3FFFFFFF) { ffltA.iFraction >>= 1; ffltA.iFracBits--; } if (ffltB.iFraction > 0x3FFFFFFF) { ffltB.iFraction >>= 1; ffltB.iFracBits--; } if (ffltA.iFracBits >= ffltB.iFracBits) { ffltP.iFracBits = ffltB.iFracBits; ffltP.iFraction = ffltB.iFraction + (ffltA.iFraction>>(ffltA.iFracBits-ffltB.iFracBits)); } else { ffltP.iFracBits = ffltA.iFracBits; ffltP.iFraction = ffltA.iFraction + (ffltB.iFraction>>(ffltB.iFracBits-ffltA.iFracBits)); } Norm4FastFloat(&ffltP); return ffltP; }# define FASTFLOAT_ADD(a,b) ffltAdd((a),(b))#ifdef HITACHI #pragma inline(FastFloatFromFloat)#else INLINE#endif static FastFloat FastFloatFromFloat(Float flt) { FastFloat fflt; Float fltScale = (Float)(1<<(31-24)); fflt.iFracBits = 24; while( flt < -fltScale || fltScale < flt ) { fflt.iFracBits -= 1; fltScale *= 2; } fflt.iFraction = (I32)(flt*(1<<fflt.iFracBits)); Norm4FastFloat( &fflt ); return fflt; } // floor of log2(flt)#ifdef HITACHI #pragma inline(FastFloatFromFloat)#else INLINE#endif static IntW Log2FromFloat(Float flt) { IntW i = 0; Float fltScale = 2.0; assert(flt >= 0.0); while (fltScale <= flt) { i++; fltScale *= 2; } return i; } // floor of log2(fflt.iFraction*2^-fflt.iFracBits) // = floor(log2(fflt.iFraction)) - fflt.iFracBits#ifdef HITACHI #pragma inline(FastFloatFromFloat)#else INLINE#endif static IntW Log2FromFastFloat(FastFloat fflt) { return LOG2(fflt.iFraction) - fflt.iFracBits; }#ifdef HITACHI #pragma inline(FloatFromFastFloat)#else INLINE#endifstatic Float FloatFromFastFloat( FastFloat fflt ) { assert( 0<= fflt.iFracBits && fflt.iFracBits <= 50 ); if ( fflt.iFracBits > 30 ) return fflt.iFraction/(1048576.0F*(1<<(fflt.iFracBits-20))); else return fflt.iFraction/((Float)(1<<fflt.iFracBits)); }# define FASTFLOAT_FROM_FLOAT(flt) FastFloatFromFloat(flt)# define FLOAT_FROM_FASTFLOAT(fflt) FloatFromFastFloat(fflt)# define DOUBLE_FROM_FASTFLOAT(fflt) ((double)fflt.iFraction/(1<<fflt.iFracBits))#ifdef HITACHI #pragma inline(FastFloatFromU64)#else INLINE#endif static FastFloat FastFloatFromU64(U64 u64, IntW cExp) { FastFloat fflt; U32 uiMSF = (U32)(u64>>32); IntW iExp = 0; if ( uiMSF==0 ) { iExp = 32; uiMSF = (U32)u64; } if (uiMSF==0) { fflt.iFracBits = 0; fflt.iFraction = 0; return fflt; } // normalize the most significant fractional part while( (uiMSF & 0xF0000000)==0 ) { iExp += 3; uiMSF <<= 3; } while( (uiMSF & 0xC0000000)==0 ) { iExp++; uiMSF <<= 1; } // number of fractional bits fflt.iFracBits = iExp+cExp-32;#if defined(PLATFORM_OPTIMIZE_MINIMIZE_BRANCHING) fflt.iFraction = (U32)((u64<<iExp)>>32);#else fflt.iFraction = (iExp>32) ? (U32)(u64<<(iExp-32)) : (U32)(u64>>(32-iExp));#endif return fflt; } #define FASTFLOAT_FROM_U64(u64,exp) FastFloatFromU64(u64,exp) typedef FastFloat QuantStepType;#define DOUBLE_FROM_QUANTSTEPTYPE(qst) DOUBLE_FROM_FASTFLOAT(qst)#define FLOAT_FROM_QUANTSTEPTYPE(qst) FLOAT_FROM_FASTFLOAT(qst)#define FASTFLOAT_FROM_QUANTSTEPTYPE(qst) (qst)#else // must be BUILD_INT_FLOAT# define FASTFLOAT_FROM_FLOAT(flt) (flt)# define FLOAT_FROM_FASTFLOAT(fflt) (fflt)# define FASTFLOAT_MULT(a,b) ((a)*(b))# define FASTFLOAT_FLOAT_MULT(a,b,bits) ((a)*(b))# define FASTFLOAT_ADD(a,b) ((a)+(b))# define DOUBLE_FROM_FASTFLOAT(fflt) ((double)fflt)# define FASTFLOAT_FROM_U64(u64,exp) (((Float)(u64))/(((U64)1)<<exp)) typedef Float QuantStepType;#define DOUBLE_FROM_QUANTSTEPTYPE(qst) ((Double)(qst))#define FLOAT_FROM_QUANTSTEPTYPE(qst) (qst)#define FASTFLOAT_FROM_QUANTSTEPTYPE(qst) ((FastFloat)(qst))#endif// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *// Define Macros to switch auReconMono and auSaveHistoryMono between Integer and Float#if defined(BUILD_INTEGER)#define ROUND_SATURATE_STORE16(piDst,cf,mn,mx,iTmp) \ assert(pau->m_nBytePerSample == 2); \ *((I16*)piDst) = (I16) checkRange (cf, mn, mx); \ iTmp = cf;#define ROUND_SATURATE_STORE24(piDst,cf,mn,mx,iTmp) \ assert(pau->m_nBytePerSample == 3); \ prvSetSample24INLINE(checkRange (cf, mn, mx), piDst, pau, 0); \ iTmp = cf;#define ROUND_SATURATE_STORE2024(piDst,cf,mn,mx,iTmp) \ assert(pau->m_nBytePerSample == 3); \ prvSetSample2024INLINE(checkRange (cf, mn, mx), piDst, pau, 0); \ iTmp = cf;#define ROUND_SATURATE_STORE(piDst,cf,mn,mx,iTmp) \ pau->m_pfnSetSample((PCMSAMPLE) checkRange (cf, mn, mx), piDst, pau, 0); \ iTmp = cf;#define ROUND_AND_CHECK_RANGE(it,flt,mn,mx) \ if (flt < mn) \ it = mn; \ else if (flt > mx) \ it = mx; \ else \ it = flt;#else // BUILD_INT_FLOAT#ifdef PLATFORM_SPECIFIC_ROUND// Combined Round and Saturate is faster in floating point// But if the platform has special Round function, we must use it.#define ROUND_AND_CHECK_RANGE(it,flt,mn,mx) \ it = (I32)ROUNDF_V4V5COMPARE(flt); \ it = checkRange(it,mn,mx);#else// Combined Round and Saturate is faster in floating point#define ROUND_AND_CHECK_RANGE(it,flt,mn,mx) \ if ( flt < (V4V5COMPARE)0.0 ) { \ it = (I32)( flt - ((V4V5COMPARE)0.5) ); \ if ( it < mn ) it = mn; \ } else { \ it = (I32)( flt + ((V4V5COMPARE)0.5) ); \ if ( it > mx ) it = mx; \ }#endif#define ROUND_SATURATE_STORE16(piDst,cf,mn,mx,iTmp) \ assert(pau->m_nBytePerSample == 2); \ ROUND_AND_CHECK_RANGE( iTmp, cf, mn, mx ); \ *((I16*)piDst) = (I16)iTmp;#define ROUND_SATURATE_STORE24(piDst,cf,mn,mx,iTmp) \ assert(pau->m_nBytePerSample == 3); \ ROUND_AND_CHECK_RANGE( iTmp, cf, mn, mx ); \ prvSetSample24INLINE(iTmp, piDst, pau, 0);#define ROUND_SATURATE_STORE2024(piDst,cf,mn,mx,iTmp) \ assert(pau->m_nBytePerSample == 3); \ ROUND_AND_CHECK_RANGE( iTmp, cf, mn, mx ); \ prvSetSample2024INLINE(iTmp, piDst, pau, 0);#define ROUND_SATURATE_STORE(piDst,cf,mn,mx,iTmp) \ ROUND_AND_CHECK_RANGE( iTmp, cf, mn, mx ); \ pau->m_pfnSetSample((PCMSAMPLE)iTmp, piDst, pau, 0); #endif // Both BUILD_INTEGER and BUILD_INT_FLOAT// For lossless mode. reconstructed values are I32.#define ROUND_SATURATE_STORE16_LLM(piDst,iResult,mn,mx) \ assert(pau->m_nBytePerSample == 2); \ iResult = checkRange(iResult,mn,mx); \ *((I16*)piDst) = (I16)iResult;#define ROUND_SATURATE_STORE24_LLM(piDst,iResult,mn,mx) \ assert(
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -