⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 g723_func.c

📁 G.723在ARM上的实现。实现平台为Linux2.4.8+ Intel Xscal。包括源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	nTemp1 = *hpfZiir;	if (enableHighpassFilter)	{		/* high pass the speech signal */		for ( i = 0; i < IPP_G723_FRAME_LEN ; i ++ )		{			iTemp2 = pSrcSpch[i];			nTemp2 = ((Ipp32s)iTemp2 - iTemp1) << 15;			nTemp3 = appsMul16by32_G723_32s(nTemp1, 32512);			nTemp1 = nTemp2 + nTemp3;			pDstLpcAnalysisBuf[i+IPP_G723_LPCWIN_LEN-IPP_G723_SUBFRAME_LEN] = (Ipp16s)((nTemp1 + _G723_Q15) >> 16);			iTemp1 = iTemp2;		}	}	else	{		/* disable highpass filter */		for ( i = 0; i < IPP_G723_FRAME_LEN ; i ++ )		{			iTemp2 = pSrcSpch[i];			pDstLpcAnalysisBuf[i+IPP_G723_LPCWIN_LEN-IPP_G723_SUBFRAME_LEN] = iTemp2>>1;		}	}	/* update filter memory */	*hpfZfir = iTemp1;	*hpfZiir = nTemp1;	/* Prepare input speech buffer for LPC autocorrelation analysis (Levinson-Durbin) */	for ( i = 0; i < IPP_G723_LPCWIN_LEN-IPP_G723_SUBFRAME_LEN; i ++ )		pDstLpcAnalysisBuf[i] = SpeechHistoryBuf[i];	return(IPP_STATUS_OK);	} /* appsPreprocess_G723_I */IppStatus appsAutoCorrSum_G723_16s(Ipp16s *pSrcRss, Ipp16s *pSrcDstFrameAutoCorr, Ipp16s *pSrcDstFrameAutoCorrExp){	int i,j;	Ipp16s iTemp1, iTemp2;	Ipp32s	nTemp1, nAutoCorr[IPP_G723_LPC_ORDER+1];				/* update the old elements of summation autocorrelation coefficients */	for ( i = (IPP_G723_LPC_ORDER+1)*3-1; i >= 0; i -- ) {		pSrcDstFrameAutoCorr[i+(IPP_G723_LPC_ORDER+1)] = pSrcDstFrameAutoCorr[i];	}	for ( i = 2; i >= 0; i -- ) {		pSrcDstFrameAutoCorrExp[i+1] = pSrcDstFrameAutoCorrExp[i];	}		/* calculate the new elements of summation autocorrelation coefficients */	iTemp1 = pSrcRss[(IPP_G723_LPC_ORDER+1)];	for ( i = 1; i < 4; i ++ ) {		if ( iTemp1 > pSrcRss[i*((IPP_G723_LPC_ORDER+1)+1)+(IPP_G723_LPC_ORDER+1)] ) {			iTemp1 = pSrcRss[i*((IPP_G723_LPC_ORDER+1)+1)+(IPP_G723_LPC_ORDER+1)];		}	}	iTemp1 += 14;	for ( i = 0; i < (IPP_G723_LPC_ORDER+1); i ++ ) {		nAutoCorr[i] = 0;	}	for ( i = 0; i < 4; i ++ ) {		iTemp2 = iTemp1 - pSrcRss[i*((IPP_G723_LPC_ORDER+1)+1)+(IPP_G723_LPC_ORDER+1)];		if ( iTemp2 > 0 ) {			for ( j = 0; j < (IPP_G723_LPC_ORDER+1); j ++ ) {				nAutoCorr[j] += ((Ipp32s)pSrcRss[i*((IPP_G723_LPC_ORDER+1)+1)+j] << iTemp2);			}		} else {			for ( j = 0; j < (IPP_G723_LPC_ORDER+1); j ++ ) {				nAutoCorr[j] += ((Ipp32s)pSrcRss[i*((IPP_G723_LPC_ORDER+1)+1)+j] >> (-iTemp2));			}		}	}	iTemp2 = 16 - appsNorm32_G723_32s(nAutoCorr[0], &nTemp1);	if ( iTemp2 < 0 ) {		iTemp2 = 0;	}	for ( i = 0; i < (IPP_G723_LPC_ORDER+1); i ++ ) {		nAutoCorr[i] >>= iTemp2;		pSrcDstFrameAutoCorr[i] = (Ipp16s)(nAutoCorr[i]);	}	pSrcDstFrameAutoCorrExp[0] = iTemp1 - iTemp2;	return(IPP_STATUS_OK);	} /* appsAutoCorrSum_G723_16s */IppStatus appsSinDetect_G723_I(Ipp16s *SineDetect){	int i;	Ipp16s iTemp1, iTemp2;	/* Update sine detector */	*SineDetect &= IPP_MAX_16S;	iTemp1 = *SineDetect;	iTemp2 = 0;	for ( i = 0; i < 15; i ++ )	{		iTemp2 += (iTemp1 & 1);		iTemp1 >>= 1;	}	if ( iTemp2 >= 14 )		*SineDetect |= IPP_MIN_16S;	return(IPP_STATUS_OK);	} /* appsSinDetect_G723_16s_I */IppStatus appsLSFInterp_G723_16s(Ipp16s *pSrcLsf, Ipp16s *pDstInterpLsf, 							  Ipp16s pDstQuantLpc[IPP_G723_NUM_SUBFRAME][IPP_G723_LPC_ORDER], Ipp16s *prevLsf){	int i,j;	Ipp16s iTemp1, iTemp2;	Ipp32s nTemp1;	for ( i = 0; i < IPP_G723_NUM_SUBFRAME-1; i ++ )	{		iTemp1 = _G723_Q15_025 * (i + 1);		iTemp2 = _G723_Q15_025 * (IPP_G723_NUM_SUBFRAME - i - 1);		for ( j = 0; j < IPP_G723_LPC_ORDER; j ++ )		{			nTemp1 = (Ipp32s)prevLsf[j] * iTemp2 + (Ipp32s)pSrcLsf[j] * iTemp1;			pDstInterpLsf[j] = (Ipp16s)((nTemp1 + _G723_Q14) >> 15);		}		ippsLSFToLPC_G723_16s(pDstInterpLsf, pDstQuantLpc[i]);	}	ippsLSFToLPC_G723_16s(pSrcLsf, pDstQuantLpc[IPP_G723_NUM_SUBFRAME-1]);	/* Maintain LSF history in the encoder state */	for ( i = 0; i < IPP_G723_LPC_ORDER; i ++ )		prevLsf[i] = pSrcLsf[i];	return(IPP_STATUS_OK);	}IppStatus	appsPerceptualWeightingFilter_G723_16s( const Ipp16s *pSrcSpch,												    Ipp16s pSrcLpc[IPP_G723_NUM_SUBFRAME][IPP_G723_LPC_ORDER], 													Ipp16s pDstPrcptWgtLpc[IPP_G723_NUM_SUBFRAME<<1][IPP_G723_LPC_ORDER], 													Ipp16s *pDstPrcptWgtSpch, 													Ipp16s *pwfZfir,													Ipp16s *pwfZiir,													Ipp16s *prevWgtSpch){	int		i, j, k;		/* construct pwf */	for ( i = 0; i < IPP_G723_NUM_SUBFRAME; i ++ )	{		k = i << 1;		for ( j = 0; j < IPP_G723_LPC_ORDER; j ++ )		{			pDstPrcptWgtLpc[k][j] = (Ipp16s)(((Ipp32s)pSrcLpc[i][j] 				* gamma1[j] + _G723_Q14) >> 15);			pDstPrcptWgtLpc[k+1][j] = (Ipp16s)(((Ipp32s)pSrcLpc[i][j] 				* gamma2[j] + _G723_Q14) >> 15);		}	}	for ( i = 0; i < IPP_G723_NUM_SUBFRAME; i ++ )		_ippsPerceptualWeighting_G723_16s(pSrcSpch+i*IPP_G723_SUBFRAME_LEN,                                          pDstPrcptWgtLpc[i<<1], 										  pwfZfir,                                           pwfZiir,                                          pDstPrcptWgtSpch+i*IPP_G723_SUBFRAME_LEN+IPP_G723_MAXLAG);	for ( i = 0; i < IPP_G723_MAXLAG; i ++ )		pDstPrcptWgtSpch[i] = prevWgtSpch[i];	return(IPP_STATUS_OK);	} /* appsPerceptualWeightingFilter_G723_16s */IppStatus appsOpenLoopPitchSearchPreprocess_G723_16s(Ipp16s *pSrcPrcptWgtSpch, 													 Ipp16s *pDstOLPSAnalysisBuf, 													 Ipp16s *PrcptWgtSpchHist){	Ipp16s iTemp1, iTemp2;	int i;	/* Prepare an analysis buffer for the open loop pitch search 	   1. Load last IPP_G723_MAXLAG (145) weighted samples from the previous frame into	      the first IPP_G723_MAXLAG samples of the current frame's OLPS analysis buffer.        2. The remaining 240 samples of the OLPS analysis buffer were already generated by the PWF, above.	   3. Identify the element of largest magnitude in the entire OLPS buffer to perform normalization 	   4. Update the perceptually weighted speech history buffer for the next frame OLPS analysis 	*/	/* Load first IPP_G723_MAXLAG from last frame, track largest */	iTemp1 = 0;	for ( i = 0; i < IPP_G723_MAXLAG; i ++ )	{		iTemp2 = PrcptWgtSpchHist[i];		pDstOLPSAnalysisBuf[i] = iTemp2;		if ( iTemp2 == IPP_MIN_16S )			iTemp2 = -IPP_MAX_16S;		if ( iTemp2 > 0 && iTemp2 > iTemp1 )			iTemp1 = iTemp2;		else if ( iTemp2 < 0 && (-iTemp2) > iTemp1 )			iTemp1 = -iTemp2;	}	/* Identify largest in the last 240 samples of the OLPS buffer */	for ( i = 0; i < IPP_G723_FRAME_LEN; i ++ )	{		iTemp2 = pSrcPrcptWgtSpch[IPP_G723_MAXLAG+i];		if ( iTemp2 == IPP_MIN_16S )			iTemp2 = -IPP_MAX_16S;		if ( iTemp2 > 0 && iTemp2 > iTemp1 )			iTemp1 = iTemp2;		else if ( iTemp2 < 0 && (-iTemp2) > iTemp1 )			iTemp1 = -iTemp2;	}	/* Normalize OLPS buffer given the largest buffer element identified during the above buffer scan */	iTemp2 = appsNorm16_G723_16s(iTemp1, &iTemp1) - 3;	if ( iTemp2 > 0 )	{		for ( i = 0; i < IPP_G723_MAXLAG; i ++ )			pDstOLPSAnalysisBuf[i] <<= iTemp2;		for ( i = IPP_G723_MAXLAG; i < IPP_G723_MAXLAG+IPP_G723_FRAME_LEN; i ++ )			pDstOLPSAnalysisBuf[i] = pSrcPrcptWgtSpch[i] << iTemp2;	}	else	{		for ( i = 0; i < IPP_G723_MAXLAG; i ++ )			pDstOLPSAnalysisBuf[i] >>= (-iTemp2);		for ( i = IPP_G723_MAXLAG; i < IPP_G723_MAXLAG+IPP_G723_FRAME_LEN; i ++ )			pDstOLPSAnalysisBuf[i] = pSrcPrcptWgtSpch[i] >> (-iTemp2);	}	/* Maintain a history of the perceptually weighted speech in the encoder state 	   to be used for the OLPS during the next frame */		for ( i = 0; i < IPP_G723_MAXLAG; i ++ ) 			PrcptWgtSpchHist[i] = pSrcPrcptWgtSpch[IPP_G723_FRAME_LEN+i];	return(IPP_STATUS_OK);	} /* ippsOpenLoopPitchSearchPreprocess_G723_16s */IppStatus appsApplyHarmonicNoiseShaping_G723_16s(Ipp16s *pSrcPrcptWgtSpch, 												 Ipp16s *pDstHarmonicWgtSpch, 												 Ipp16s *HarmonicDelay,                                                  Ipp16s *HarmonicGain){	/* Apply harmonic noise shaping filter (on all subframes)       Generate the harmonically weighted speech, sequence, w(n), by	   applying the HNS filter (Eq. 18 of [1], sec. 2.11, p. 8) to 	   the perceptually weighted speech sequence, f(n). 	*/	Ipp16s iTemp2, iTemp3;	Ipp32s nTemp1;	int i,j;	for ( i = 0; i < IPP_G723_NUM_SUBFRAME; i ++ )	{		iTemp2 = (Ipp16s)(i * IPP_G723_SUBFRAME_LEN);		iTemp3 = iTemp2 + IPP_G723_MAXLAG - HarmonicDelay[i];		for ( j = 0; j < IPP_G723_SUBFRAME_LEN; j ++ )		{			nTemp1 = (Ipp32s)pSrcPrcptWgtSpch[IPP_G723_MAXLAG+iTemp2+j] << 15;			nTemp1 -= (Ipp32s)pSrcPrcptWgtSpch[iTemp3+j]*HarmonicGain[i];			nTemp1 = (nTemp1 + _G723_Q14) >> 15;			if ( nTemp1 > IPP_MAX_16S )				pDstHarmonicWgtSpch[iTemp2+j] = IPP_MAX_16S;			else if ( nTemp1 < IPP_MIN_16S )				pDstHarmonicWgtSpch[iTemp2+j] = IPP_MIN_16S;			else				pDstHarmonicWgtSpch[iTemp2+j] = (Ipp16s)nTemp1;		}	}	return(IPP_STATUS_OK);	} /* appsHarmonicNoiseShapingFilter_G723_16s */IppStatus appsComputeFixedTargetVector_G723_16s_I(Ipp16s *pSrcDstFixedCbTarget, 										          Ipp16s *AdaptCbVect, 										          Ipp16s *ImpulseResp){	/* Generate the fixed codebook target, r(n)=t(n)-p(n) 	   by filtering the adaptive codebook vector through Si(z), then subtract the output       from the sequence t(n) (stored in iSpch[]).	*/ 	int j,k;	Ipp32s nTemp1;	for ( j = 0; j < IPP_G723_SUBFRAME_LEN; j ++ )	{		nTemp1 = (Ipp32s)pSrcDstFixedCbTarget[j] << 15;		for ( k = 0; k <= j; k ++ )			nTemp1 -= ((Ipp32s)AdaptCbVect[k] * ImpulseResp[j-k]) << 1;		nTemp1 = (nTemp1 + _G723_Q14) >> 15;		if ( nTemp1 > IPP_MAX_16S )			pSrcDstFixedCbTarget[j] = IPP_MAX_16S;		else if ( nTemp1 < IPP_MIN_16S )			pSrcDstFixedCbTarget[j] = IPP_MIN_16S;		else			pSrcDstFixedCbTarget[j] = (Ipp16s)nTemp1;	}	return(IPP_STATUS_OK);	} /* appsFixedTargetSignal_G723_16s_I */IppStatus	appsPitchSyncFilter_G723_16s_I(Ipp16s *pSrcDstImpulseResp,										   Ipp16s OpenLoopPitchLag,										   Ipp16s ClosedLoopPitchOffset,										   Ipp16s AdaptGainIndex,										   Ipp16s *pitchSyncIndex,										   Ipp16s *pitchSyncGain){	/* For closed-loop pitch lags less than 60, modify Si(z) impulse response by        applying a pitch-synchronous filter ([1], p.13 paragraph 3).       Pitch-synchronous filter lag and gain are tabulated in [2], and duplicated here       in pitchSyncFiltLagTable[] (pitch lag modifier) and pitchSyncFiltGainTable[] (gain modifier).

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -