📄 encodevop.c
字号:
mp4_PredictIntraDCAC1(MBDC[0], refYDC[j2], refYDC[j2+1], &MBDC[1],
MBAC_A[0], refYAC[j2+1], MBAC_A[1], MBAC_C[1],
coeffMB+1*64, dcScalerLuma, &predDir[1], ac_pred_flag, &acPredSum0, &acPredSum1);
refYDC[j2] = MBDC[2];
mp4_PredictIntraDCAC1(MBDC[2], MBDC[0], MBDC[1], &MBDC[3],
MBAC_A[2], MBAC_C[1], MBAC_A[3], MBAC_C[3],
coeffMB+3*64, dcScalerLuma, &predDir[3], ac_pred_flag, &acPredSum0, &acPredSum1);
// memcpy(refYAC[j2+1], MBY_AC_C[3], 8*sizeof(short));
mp4_PredictIntraDCAC1(leftUDC, refUDC[j-1], refUDC[j], &MBDC[4],
leftUAC, refUAC[j], MBAC_A[4], MBAC_C[4],
coeffMB+4*64, dcScalerChroma, &predDir[4], ac_pred_flag, &acPredSum0, &acPredSum1);
refUDC[j-1] = leftUDC;
leftUDC = MBDC[4];
mp4_PredictIntraDCAC1(leftVDC, refVDC[j-1], refVDC[j], &MBDC[5],
leftVAC, refVAC[j], MBAC_A[5], MBAC_C[5],
coeffMB+5*64, dcScalerChroma, &predDir[5], ac_pred_flag, &acPredSum0, &acPredSum1);
refVDC[j-1] = leftVDC;
leftVDC = MBDC[5];
if (ac_pred_flag) {
if (acPredSum0 <= acPredSum1) {
ac_pred_flag = 0;
// mp4_RestoreAC(MBcurr, coeffMB+0*64, 0, predDir[0]);
// mp4_RestoreAC(MBcurr, coeffMB+1*64, 1, predDir[1]);
// mp4_RestoreAC(MBcurr, coeffMB+2*64, 2, predDir[2]);
// mp4_RestoreAC(MBcurr, coeffMB+3*64, 3, predDir[3]);
// mp4_RestoreAC(MBcurr, coeffMB+4*64, 4, predDir[4]);
// mp4_RestoreAC(MBcurr, coeffMB+5*64, 5, predDir[5]);
mp4_RestoreAC(leftYAC[0], refYAC[j2], coeffMB+0*64, predDir[0]);
mp4_RestoreAC(MBAC_A[0], refYAC[j2+1], coeffMB+1*64, predDir[1]);
mp4_RestoreAC(leftYAC[1], MBAC_C[0], coeffMB+2*64, predDir[2]);
mp4_RestoreAC(MBAC_A[2], MBAC_C[1], coeffMB+3*64, predDir[3]);
mp4_RestoreAC(leftUAC, refUAC[j], coeffMB+4*64, predDir[4]);
mp4_RestoreAC(leftVAC, refVAC[j], coeffMB+5*64, predDir[5]);
predDir[0] = predDir[1] = predDir[2] = predDir[3] = predDir[4] = predDir[5] = IPP_VIDEO_NONE;
} else {
// check pattern after predict AC
// if (ac_pred_flag) __070613_2__no_use;
mp4_CheckPattern(coeffMB, &pattern);
}
}
cbpc = pattern & 3;
cbpy = pattern >> 2;
// encode mcbpc
// EncodeMCBPC_I(MBcurr->type, cbpc);
EncodeMCBPC_I(type, cbpc);
// encode ac_pred_flag
PutBits(ac_pred_flag, 1);
// encode cbpy
EncodeCBPY_I(cbpy);
// Encode blocks
EncodeBlockIntra_MPEG4(coeffMB + 0*64, use_intra_dc_vlc, 0, cbpy & 8, predDir[0]);
EncodeBlockIntra_MPEG4(coeffMB + 1*64, use_intra_dc_vlc, 1, cbpy & 4, predDir[1]);
EncodeBlockIntra_MPEG4(coeffMB + 2*64, use_intra_dc_vlc, 2, cbpy & 2, predDir[2]);
EncodeBlockIntra_MPEG4(coeffMB + 3*64, use_intra_dc_vlc, 3, cbpy & 1, predDir[3]);
EncodeBlockIntra_MPEG4(coeffMB + 4*64, use_intra_dc_vlc, 4, cbpc & 2, predDir[4]);
EncodeBlockIntra_MPEG4(coeffMB + 5*64, use_intra_dc_vlc, 5, cbpc & 1, predDir[5]);
// restore VOP
// if (mIVOPdist != 1 || mCalcPSNR) {
if (ac_pred_flag) {
// mp4_RestoreAC(MBcurr, coeffMB+0*64, 0, predDir[0]);
// mp4_RestoreAC(MBcurr, coeffMB+1*64, 1, predDir[1]);
// mp4_RestoreAC(MBcurr, coeffMB+2*64, 2, predDir[2]);
// mp4_RestoreAC(MBcurr, coeffMB+3*64, 3, predDir[3]);
// mp4_RestoreAC(MBcurr, coeffMB+4*64, 4, predDir[4]);
// mp4_RestoreAC(MBcurr, coeffMB+5*64, 5, predDir[5]);
mp4_RestoreAC(leftYAC[0], refYAC[j2], coeffMB+0*64, predDir[0]);
mp4_RestoreAC(MBAC_A[0], refYAC[j2+1], coeffMB+1*64, predDir[1]);
mp4_RestoreAC(leftYAC[1], MBAC_C[0], coeffMB+2*64, predDir[2]);
mp4_RestoreAC(MBAC_A[2], MBAC_C[1], coeffMB+3*64, predDir[3]);
mp4_RestoreAC(leftUAC, refUAC[j], coeffMB+4*64, predDir[4]);
mp4_RestoreAC(leftVAC, refVAC[j], coeffMB+5*64, predDir[5]);
mp4_CheckPattern(coeffMB, &pattern);
}
memcpy(leftYAC[0], MBAC_A[1], 8*sizeof(short));
memcpy(leftYAC[1], MBAC_A[3], 8*sizeof(short));
memcpy(refYAC[j2], MBAC_C[2], 8*sizeof(short));
memcpy(refYAC[j2+1], MBAC_C[3], 8*sizeof(short));
memcpy(leftUAC, MBAC_A[4], 8*sizeof(short));
memcpy(refUAC[j], MBAC_C[4], 8*sizeof(short));
memcpy(leftVAC, MBAC_A[5], 8*sizeof(short));
memcpy(refVAC[j], MBAC_C[5], 8*sizeof(short));
// mp4_Inv_Quant_DCT_Intra_MPEG4(pY, mStepLuma, 0);
// mp4_Inv_Quant_DCT_Intra_MPEG4(pY+8, mStepLuma, 1);
// mp4_Inv_Quant_DCT_Intra_MPEG4(pY+8*mStepLuma, mStepLuma, 2);
// mp4_Inv_Quant_DCT_Intra_MPEG4(pY+8*mStepLuma+8, mStepLuma, 3);
// mp4_Inv_Quant_DCT_Intra_MPEG4(pU, mStepChroma, 4);
// mp4_Inv_Quant_DCT_Intra_MPEG4(pV, mStepChroma, 5);
mp4_Inv_Quant_DCT_Intra_MPEG4(pCur_MB, 16, 0);
mp4_Inv_Quant_DCT_Intra_MPEG4(pCur_MB+8, 16, 1);
mp4_Inv_Quant_DCT_Intra_MPEG4(pCur_MB+16*8, 16, 2);
mp4_Inv_Quant_DCT_Intra_MPEG4(pCur_MB+16*8+8, 16, 3);
mp4_Inv_Quant_DCT_Intra_MPEG4(pCur_MB+24*18, 8, 4);
mp4_Inv_Quant_DCT_Intra_MPEG4(pCur_MB+24*18+64, 8, 5);
pY += 16; pU += 8; pV += 8; p656 += MB_byte_step;
// MBcurr ++;
// mp4_StatisticInc(&pInfo->VisualObject.Statistic.nMB);
}
refYDC[j2+1] = MBDC[3];
refUDC[j-1] = MBDC[4];
refVDC[j-1] = MBDC[5];
}
while(!dma_done);
restore_MB_get_MB_refwin(
pCur_MB, pPreY, pPreU, pPreV,
NULL, NULL,
NULL, NULL, NULL,
NULL, NULL, NULL,
0x00
);
while(!dma_done);
}
//added by bxd Apr.23
static void xhComputeChroma4MV_MPEG4( struct IppMotionVector pLumaMV[4],
struct IppMotionVector* pChromaMV,
Ipp8u pTranspMB[4]//这个参数暂时不知道怎么用
)
{
const Ipp32u roundtab_76[16] = { 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1 };
Ipp16s uv_dx, uv_dy;
uv_dx = pLumaMV[0].dx + pLumaMV[1].dx + pLumaMV[2].dx + pLumaMV[3].dx;
uv_dy = pLumaMV[0].dy + pLumaMV[1].dy + pLumaMV[2].dy + pLumaMV[3].dy;
uv_dx = (uv_dx >> 3) + roundtab_76[uv_dx & 0xf];
uv_dy = (uv_dy >> 3) + roundtab_76[uv_dy & 0xf];
pChromaMV->dx = uv_dx;
pChromaMV->dy = uv_dy;
}
//added by bxd Apr.23
static void xhComputeChromaMV_MPEG4( struct IppMotionVector* pLumaMV,
struct IppMotionVector* pChromaMV
)
{
const Ipp32u roundtab_79[4] = { 0, 1, 0, 0 };
Ipp16s uv_dx, uv_dy;
uv_dx = pLumaMV->dx;
uv_dy = pLumaMV->dy;
uv_dx = (uv_dx >> 1) + roundtab_79[uv_dx & 0x3];
uv_dy = (uv_dy >> 1) + roundtab_79[uv_dy & 0x3];
pChromaMV->dx = uv_dx;
pChromaMV->dy = uv_dy;
}
void EncodePVOP()
{
Ipp8u *pYc, *pUc, *pVc, *pYf, *pUf, *pVf, *p656;
int i,k, j, cbpy, cbpc, quant, pattern, mctype, xL, xR, yT, yB;
Ipp16s _coeffMB[64*6+31], *coeffMB = (Ipp16s*)_PTR_ALIGN32(_coeffMB);
// Ipp8u _pDec[64*6+31], *pDec = (Ipp8u*)_PTR_ALIGN32(_pDec);
Ipp32s nzCount, dev;
Ipp32s bestSAD;
// Ipp32s bestSAD16x16;
// Ipp32s bestSAD8x8;
// struct mp4_MacroBlock *MBcurr = MBinfo;
int type;
int not_coded;
// int dma_flag=0;
struct IppMotionVector mvLuma;
// struct IppMotionVector mvLuma4[4]; __070613_2__no_4mv
struct IppMotionVector mvChroma;
// struct IppMotionVector mvPred[4]; __070613_2__no_4mv
struct IppMotionVector mvPred;
Ipp8u *pr,*pCur_MB;
int fRangeMin = -(16 << VOP.vop_fcode_forward), fRangeMax = (16 << VOP.vop_fcode_forward) - 1, fRange = fRangeMax - fRangeMin + 1;
int predDir[6], ac_pred_flag, dcScalerLuma, dcScalerChroma, use_intra_dc_vlc, acPredSum0, acPredSum1;
int mvIsInRange;
// Ipp8u opaqueMB[4] = {IPP_VIDEO_OPAQUE, IPP_VIDEO_OPAQUE, IPP_VIDEO_OPAQUE, IPP_VIDEO_OPAQUE}; __070613_2__no_4mv
short MBDC[6];
int j2;
short leftUDC;
short leftVDC;
int iCur_MB = 0;
Ipp8u *pPreY, *pPreU, *pPreV;
Ipp8u *pDMARefwinY = refwin_Y;
Ipp8u *pDMARefwinU = refwin_U;
Ipp8u *pDMARefwinV = refwin_V;
Ipp8u *pCurRefwinY;
Ipp8u *pCurRefwinU;
Ipp8u *pCurRefwinV;
int dma_flags;
int refwin_index = REFWIN_NUM;
//Initiating refmv array;
memset(_refmv, 0, REFMV_NUM * sizeof(struct IppMotionVector));
// get_MB_refwin(mCurrPtr656, mForwPtrY-16-16*mStepLuma, MB_buffer[iCur_MB], win_buffer[iCur_MB],
// mForwPtrU-8-8*mStepChroma, mForwPtrV-8-8*mStepChroma);
init_refDC(_refYDC, mDefDC, REFYDC_NUM);
init_refDC(_refUDC, mDefDC, REFUVDC_NUM);
init_refDC(_refVDC, mDefDC, REFUVDC_NUM);
quant = VOP.vop_quant;
use_intra_dc_vlc = quant < mDC_VLC_Threshold[VOP.intra_dc_vlc_thr];
//qp在编码整个VOP的过程中保持不变,所以可以在这里初始化 dcScalerLuma 和dcScalerChroma;
dcScalerLuma = mp4_GetDCscaler(quant, 0);
dcScalerChroma = mp4_GetDCscaler(quant, 4);
// MatchBlock_to_Cur_frame_dma_ini();
// _memdma_refwin_ini(refwin,Current_MB,Current_MB+256,Current_MB+320);
// mdma_refwin_en(mForwPtrY-16-16*mStepLuma, mStepLuma, refwin, 48, 48);
pPreY = mCurrPtrY-16;
pPreU = mCurrPtrU-8;
pPreV = mCurrPtrV-8;
for (i = 0; i < mNumMacroBlockPerCol; i ++) {
pYc = mCurrPtrY + i * 16 * mStepLuma;
pUc = mCurrPtrU + i * 8 * mStepChroma;
pVc = mCurrPtrV + i * 8 * mStepChroma;
pYf = mForwPtrY + i * 16 * mStepLuma;
pUf = mForwPtrU + i * 8 * mStepChroma;
pVf = mForwPtrV + i * 8 * mStepChroma;
p656 = mCurrPtr656 + i * MB_row_step * YUV656_BUF_WIDTH;
// memdma1_finish_test();
// _memdma_refwin_en(pYf-16*mStepLuma-16,pYc,pUc,pVc,dma_flag);
// dma_flag=dma_flag+1;
MBDC[1]=MBDC[3] = mDefDC;
leftUDC = mDefDC;
leftVDC = mDefDC;
for (j = 0; j < mNumMacroBlockPerRow; j ++) {
j2 = j<<1;
// mdma_refwin_en(pYf-16*mStepLuma-16,
// mStepLuma, refwin, 48,48);
// MBcurr->quant = quant;
// Predict1MV(MBcurr, i, j, &mvPred[0]);
// Predict1MV(MBcurr, i, j, &mvPred);
// Predict1MV(&refmv[j], i, j, &mvPred);
Predict1MV(&refmv[j], i, &mvPred);
mvLuma = mvPred;
xL = -IPP_MIN(j * 16 + 16, mPVOPsearchHor);
yT = -IPP_MIN(i * 16 + 16, mPVOPsearchVer);
xR = (j == mNumMacroBlockPerRow - 1) ? IPP_MIN(16, mPVOPsearchHor) : IPP_MIN(VOL.video_object_layer_width + 16 - (j + 1) * 16, mPVOPsearchHor);
yB = (i == mNumMacroBlockPerCol - 1) ? IPP_MIN(16, mPVOPsearchVer) : IPP_MIN(VOL.video_object_layer_height + 16 - (i + 1) * 16, mPVOPsearchVer);
/*
if(j==0)
get_refwin_copy_asm(refwin,pYf-16 * mStepLuma-16,mStepLuma);
else
//利用左边已获得的参考窗来加速数据传递
get_refwin_copy_asm1(refwin,pYf-16 * mStepLuma-16,mStepLuma);
*/
// get_CurMB(Current_MB,pYc,pUc,pVc,mStepLuma,dma_flag);
pCur_MB = MB_buffer[iCur_MB];
// refwin = win_buffer[iCur_MB];
iCur_MB ^= 0x01;
pCurRefwinY = pDMARefwinY;
pCurRefwinU = pDMARefwinU;
pCurRefwinV = pDMARefwinV;
dma_flags = 0x07;
refwin_index --;
pDMARefwinY += 16;
pDMARefwinU += 8;
pDMARefwinV += 8;
if(j==mNumMacroBlockPerRow-1 || refwin_index==0 ) {
refwin_index = REFWIN_NUM;
dma_flags = 0x03;
pDMARefwinY = refwin_Y;
pDMARefwinU = refwin_U;
pDMARefwinV = refwin_V;
}
while(!dma_done);
// while(!((*pMDMA_D0_IRQ_STATUS)&0x01));
// *pMDMA_D0_IRQ_STATUS = 0x01;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -