📄 mp4decvopb.c
字号:
static mp4_Status mp4_DecodeVOP_B_DecodeSlice(mp4_Info* pInfo, int curRow, mp4_MacroBlockMT* pMBinfoMT){ Ipp32u code; int mb_type, cbpb, modb, quant, TRB, TRD, j, mbPerRow, scan, fcode_forward, fcode_backward, sts; int interlaced, field_prediction, dct_type, mb_ftfr, mb_fbfr, mb_btfr, mb_bbfr; mp4_MacroBlock *pMBinfo; IppMotionVector mvForwT, mvBackT, mvForwB, mvBackB, *pMVField; mbPerRow = pInfo->VisualObject.VideoObject.MacroBlockPerRow; pMBinfo = pInfo->VisualObject.VideoObject.MBinfo + curRow * mbPerRow; quant = pInfo->VisualObject.VideoObject.VideoObjectPlane.quant; TRD = pInfo->VisualObject.VideoObject.TRD; TRB = pInfo->VisualObject.VideoObject.TRB; scan = pInfo->VisualObject.VideoObject.VideoObjectPlane.alternate_vertical_scan_flag ? IPPVC_SCAN_VERTICAL : IPPVC_SCAN_ZIGZAG; fcode_forward = pInfo->VisualObject.VideoObject.VideoObjectPlane.fcode_forward; fcode_backward = pInfo->VisualObject.VideoObject.VideoObjectPlane.fcode_backward; interlaced = pInfo->VisualObject.VideoObject.interlaced; pMVField = interlaced ? pInfo->VisualObject.VideoObject.FieldMV + curRow * mbPerRow * 2 : 0; // init for non-interlaced field_prediction = dct_type = mb_ftfr = mb_fbfr = mb_btfr = mb_bbfr = 0; // reset MV predictors at new Row mvForwT.dx = mvForwT.dy = mvBackT.dx = mvBackT.dy = mvForwB.dx = mvForwB.dy = mvBackB.dx = mvBackB.dy = 0; for (j = 0; j < mbPerRow; j ++) { if (pMBinfo->not_coded) { mp4_StatisticInc_(&pInfo->VisualObject.Statistic.nMB_NOTCODED); } else { cbpb = 0; if (mp4_GetBit(pInfo)) { modb = 2; mb_type = IPPVC_MBTYPE_DIRECT; } else { modb = mp4_GetBit(pInfo); // decode mb_type code = mp4_ShowBits9(pInfo, 4); if (code != 0) { mb_type = mp4_BVOPmb_type[code].code; mp4_FlushBits(pInfo, mp4_BVOPmb_type[code].len); } else { mp4_Error("Error when decode mb_type of B-VOP macroblock"); return MP4_STATUS_ERROR; } if (modb == 0) cbpb = mp4_GetBits9(pInfo, 6); if (mb_type != IPPVC_MBTYPE_DIRECT && cbpb != 0) mp4_UpdateQuant_B(pInfo, quant); } if (!interlaced) { if (mb_type == IPPVC_MBTYPE_FORWARD) { mp4_StatisticInc_(&pInfo->VisualObject.Statistic.nMB_FORWARD); sts = mp4_DecodeMV(pInfo, &mvForwT, fcode_forward); pMBinfoMT->mvF[0] = mvForwT; } else if (mb_type == IPPVC_MBTYPE_BACKWARD) { mp4_StatisticInc_(&pInfo->VisualObject.Statistic.nMB_BACKWARD); sts = mp4_DecodeMV(pInfo, &mvBackT, fcode_backward); pMBinfoMT->mvB[0] = mvBackT; } else if (mb_type == IPPVC_MBTYPE_INTERPOLATE) { mp4_StatisticInc_(&pInfo->VisualObject.Statistic.nMB_INTERPOLATE); sts = mp4_DecodeMV(pInfo, &mvForwT, fcode_forward); if (sts == MP4_STATUS_OK) sts = mp4_DecodeMV(pInfo, &mvBackT, fcode_backward); pMBinfoMT->mvF[0] = mvForwT; pMBinfoMT->mvB[0] = mvBackT; } else { // IPPVC_MBTYPE_DIRECT mp4_StatisticInc_(&pInfo->VisualObject.Statistic.nMB_DIRECT); //f MVs of collocated block of recently decoded I or P frame used in Direct mode sts = mp4_DecodeMV_Direct(pInfo, pMBinfo->mv, pMBinfoMT->mvF, pMBinfoMT->mvB, TRB, TRD, modb, pMBinfo->type); } } else { dct_type = 0; field_prediction = 0; if (cbpb != 0) dct_type = mp4_GetBit(pInfo); if (mb_type != IPPVC_MBTYPE_DIRECT) { field_prediction = mp4_GetBit(pInfo); if (field_prediction) { if (mb_type != IPPVC_MBTYPE_BACKWARD) { mb_ftfr = mp4_GetBit(pInfo); mb_fbfr = mp4_GetBit(pInfo); } if (mb_type != IPPVC_MBTYPE_FORWARD) { mb_btfr = mp4_GetBit(pInfo); mb_bbfr = mp4_GetBit(pInfo); } } } if (mb_type == IPPVC_MBTYPE_FORWARD) { mp4_StatisticInc_(&pInfo->VisualObject.Statistic.nMB_FORWARD); if (!field_prediction) { sts = mp4_DecodeMV(pInfo, &mvForwT, fcode_forward); mvForwB = mvForwT; pMBinfoMT->mvF[0] = mvForwT; } else { mvForwT.dy = (Ipp16s)mp4_Div2(mvForwT.dy); mvForwB.dy = (Ipp16s)mp4_Div2(mvForwB.dy); sts = mp4_DecodeMV(pInfo, &mvForwT, fcode_forward); if (sts == MP4_STATUS_OK) sts = mp4_DecodeMV(pInfo, &mvForwB, fcode_forward); pMBinfoMT->mvF[0] = mvForwT; pMBinfoMT->mvF[2] = mvForwB; mvForwT.dy <<= 1; mvForwB.dy <<= 1; } } else if (mb_type == IPPVC_MBTYPE_BACKWARD) { mp4_StatisticInc_(&pInfo->VisualObject.Statistic.nMB_BACKWARD); if (!field_prediction) { sts = mp4_DecodeMV(pInfo, &mvBackT, fcode_backward); mvBackB = mvBackT; pMBinfoMT->mvB[0] = mvBackT; } else { mvBackT.dy = (Ipp16s)mp4_Div2(mvBackT.dy); mvBackB.dy = (Ipp16s)mp4_Div2(mvBackB.dy); sts = mp4_DecodeMV(pInfo, &mvBackT, fcode_backward); if (sts == MP4_STATUS_OK) sts = mp4_DecodeMV(pInfo, &mvBackB, fcode_backward); pMBinfoMT->mvB[0] = mvBackT; pMBinfoMT->mvB[2] = mvBackB; mvBackT.dy <<= 1; mvBackB.dy <<= 1; } } else if (mb_type == IPPVC_MBTYPE_INTERPOLATE) { mp4_StatisticInc_(&pInfo->VisualObject.Statistic.nMB_INTERPOLATE); if (!field_prediction) { sts = mp4_DecodeMV(pInfo, &mvForwT, fcode_forward); if (sts == MP4_STATUS_OK) sts = mp4_DecodeMV(pInfo, &mvBackT, fcode_backward); mvBackB = mvBackT; pMBinfoMT->mvB[0] = mvBackT; mvForwB = mvForwT; pMBinfoMT->mvF[0] = mvForwT; } else { mvForwT.dy = (Ipp16s)mp4_Div2(mvForwT.dy); mvForwB.dy = (Ipp16s)mp4_Div2(mvForwB.dy); mvBackT.dy = (Ipp16s)mp4_Div2(mvBackT.dy); mvBackB.dy = (Ipp16s)mp4_Div2(mvBackB.dy); sts = mp4_DecodeMV(pInfo, &mvForwT, fcode_forward); if (sts == MP4_STATUS_OK) sts = mp4_DecodeMV(pInfo, &mvForwB, fcode_forward); if (sts == MP4_STATUS_OK) sts = mp4_DecodeMV(pInfo, &mvBackT, fcode_backward); if (sts == MP4_STATUS_OK) sts = mp4_DecodeMV(pInfo, &mvBackB, fcode_backward); pMBinfoMT->mvF[0] = mvForwT; pMBinfoMT->mvF[2] = mvForwB; mvForwT.dy <<= 1; mvForwB.dy <<= 1; pMBinfoMT->mvB[0] = mvBackT; pMBinfoMT->mvB[2] = mvBackB; mvBackT.dy <<= 1; mvBackB.dy <<= 1; } } else { // IPPVC_MBTYPE_DIRECT mp4_StatisticInc_(&pInfo->VisualObject.Statistic.nMB_DIRECT); //f MVs of collocated block of recently decoded I or P frame used in Direct mode if (!(pMBinfo->field_info & 1)) sts = mp4_DecodeMV_Direct(pInfo, pMBinfo->mv, pMBinfoMT->mvF, pMBinfoMT->mvB, TRB, TRD, modb, pMBinfo->type); else sts = mp4_DecodeMV_DirectField(pInfo, (pMBinfo->field_info >> 1) & 1, (pMBinfo->field_info >> 2) & 1, &pMVField[0], &pMVField[1], &pMBinfoMT->mvF[0], &pMBinfoMT->mvF[2], &pMBinfoMT->mvB[0], &pMBinfoMT->mvB[2], TRB, TRD, modb); } } if (sts != MP4_STATUS_OK) { mp4_Error("Error when decode B-VOP motion vector"); return MP4_STATUS_ERROR; } mp4_ReconstructCoeffsInterMB(pInfo, pMBinfoMT->dctCoeffs, pMBinfoMT->lnz, cbpb, 0, scan, quant); pMBinfoMT->mb_type = (Ipp8u)mb_type; pMBinfoMT->pat = (Ipp8u)cbpb; if (interlaced) { pMBinfoMT->dct_type = (Ipp8u)dct_type; if (mb_type != IPPVC_MBTYPE_DIRECT) pMBinfoMT->field_info = (Ipp8u)(field_prediction + (mb_ftfr << 1) + (mb_fbfr << 2) + (mb_btfr << 3) + (mb_bbfr << 4)); } } mp4_StatisticInc_(&pInfo->VisualObject.Statistic.nMB); pMBinfo ++; pMBinfoMT ++; pMVField += 2; if (!pInfo->VisualObject.VideoObject.resync_marker_disable) { int found; if (mp4_DecodeVideoPacket(pInfo, &quant, &found) == MP4_STATUS_OK) { if (found) { // reset MV predictors at new VideoPacket mvForwT.dx = mvForwT.dy = mvBackT.dx = mvBackT.dy = mvForwB.dx = mvForwB.dy = mvBackB.dx = mvBackB.dy = 0; } } else return MP4_STATUS_ERROR; } } pInfo->VisualObject.VideoObject.VideoObjectPlane.quant = quant; return MP4_STATUS_OK;}static void mp4_DecodeVOP_B_ReconSlice(mp4_Info* pInfo, int curRow, mp4_MacroBlockMT* pMBinfoMT){ __ALIGN16(Ipp8u, tmpMB, 64*4); Ipp8u *pYc, *pCbc, *pCrc, *pYp, *pCbp, *pCrp, *pYn, *pCbn, *pCrn; int stepYp, stepYc, stepYn, stepCbp, stepCbc, stepCbn, stepCrp, stepCrc, stepCrn; int mb_type, quarter_sample, cbpb, scan, j, dx, dy, mbPerRow; int interlaced, field_prediction, dct_type, mb_ftfr, mb_fbfr, mb_btfr, mb_bbfr; IppiRect limitRectL, limitRectC; mp4_MacroBlock *pMBin
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -