📄 umc_h264_segment_decoder_mt.cpp
字号:
case MBTYPE_INTER_8x8_REF0:
{
Ipp8s *pSBDir = m_cur_mb.LocalMacroblockInfo->sbdir;
RECONSTRUCT_MOTION_VECTORS(0, 0, External);
RECONSTRUCT_MOTION_VECTORS(1, 2, Top);
RECONSTRUCT_MOTION_VECTORS(2, 8, Left);
RECONSTRUCT_MOTION_VECTORS(3, 10, Internal);
}
break;
// all other
default:
return;
break;
}
return;
}
// initialize blocks coding pattern maps
Ipp32s type = m_cur_mb.GlobalMacroblockInfo->mbtype - MBTYPE_FORWARD;
if(type >= 0)
{
if (m_cur_mb.LocalMacroblockInfo->sbdir[0] > D_DIR_BIDIR)
return;
// get all mvd_L0
ReconstructMotionVectors4x4(pCodFBD[type][2], 0);
// get all mvd_L1
ReconstructMotionVectors4x4(pCodFBD[type][3], 1);
}
else
{
Ipp32u i, j;
const Ipp8u *pRIxL0, *pRIxL1, *pMVdL0, *pMVdL1;
#ifdef __ICL
__declspec(align(16))Ipp8u pCodRIxL0[16];
__declspec(align(16))Ipp8u pCodRIxL1[16];
__declspec(align(16))Ipp8u pCodMVdL0[16];
__declspec(align(16))Ipp8u pCodMVdL1[16];
#else
Ipp8u pCodRIxL0[16];
Ipp8u pCodRIxL1[16];
Ipp8u pCodMVdL0[16];
Ipp8u pCodMVdL1[16];
#endif
pRIxL0 = pCodRIxL0;
pRIxL1 = pCodRIxL1;
pMVdL0 = pCodMVdL0;
pMVdL1 = pCodMVdL1;
Ipp8u cL0;
Ipp8u cL1;
switch (m_cur_mb.GlobalMacroblockInfo->mbtype)
{
case MBTYPE_INTER_16x8:
cL0 = (m_cur_mb.LocalMacroblockInfo->sbdir[0] == D_DIR_BWD) ? CodNone : CodInBS;
cL1 = (m_cur_mb.LocalMacroblockInfo->sbdir[0] == D_DIR_FWD) ? CodNone : CodInBS;
pCodRIxL0[0] = cL0;
pCodRIxL1[0] = cL1;
cL0 = (m_cur_mb.LocalMacroblockInfo->sbdir[1] == D_DIR_BWD) ? CodNone : CodInBS;
cL1 = (m_cur_mb.LocalMacroblockInfo->sbdir[1] == D_DIR_FWD) ? CodNone : CodInBS;
pCodRIxL0[8] = cL0;
pCodRIxL1[8] = cL1;
// get all mvd_L0
ReconstructMotionVectors16x8(pRIxL0, 0);
// get all mvd_L1
ReconstructMotionVectors16x8(pRIxL1, 1);
break;
case MBTYPE_INTER_8x16:
cL0 = (m_cur_mb.LocalMacroblockInfo->sbdir[0] == D_DIR_BWD) ? CodNone : CodInBS;
cL1 = (m_cur_mb.LocalMacroblockInfo->sbdir[0] == D_DIR_FWD) ? CodNone : CodInBS;
pCodRIxL0[0] = cL0;
pCodRIxL1[0] = cL1;
cL0 = (m_cur_mb.LocalMacroblockInfo->sbdir[1] == D_DIR_BWD) ? CodNone : CodInBS;
cL1 = (m_cur_mb.LocalMacroblockInfo->sbdir[1] == D_DIR_FWD) ? CodNone : CodInBS;
pCodRIxL0[2] = cL0;
pCodRIxL1[2] = cL1;
// get all mvd_L0
ReconstructMotionVectors8x16(pRIxL0, 0);
// get all mvd_L1
ReconstructMotionVectors8x16(pRIxL1, 1);
break;
case MBTYPE_INTER_8x8:
case MBTYPE_INTER_8x8_REF0:
memcpy(pCodRIxL0, pCodTemplate, sizeof(pCodTemplate[0])*16);
memcpy(pCodRIxL1, pCodTemplate, sizeof(pCodTemplate[0])*16);
memcpy(pCodMVdL0, pCodTemplate, sizeof(pCodTemplate[0])*16);
memcpy(pCodMVdL1, pCodTemplate, sizeof(pCodTemplate[0])*16);
{
for (i = 0; i < 4; i ++)
{
j = subblock_block_mapping[i];
Ipp32s sbtype = (m_cur_mb.LocalMacroblockInfo->sbdir[i] < D_DIR_DIRECT) ?
m_cur_mb.GlobalMacroblockInfo->sbtype[i] : SBTYPE_DIRECT;
switch (sbtype)
{
case SBTYPE_8x8:
cL0 = (m_cur_mb.LocalMacroblockInfo->sbdir[i] == D_DIR_BWD) ? CodNone : CodInBS;
cL1 = (m_cur_mb.LocalMacroblockInfo->sbdir[i] == D_DIR_FWD) ? CodNone : CodInBS;
pCodRIxL0[j] = cL0;
pCodRIxL1[j] = cL1;
pCodMVdL0[j] = cL0;
pCodMVdL1[j] = cL1;
pCodRIxL0[j + 4] = CodAbov;
pCodRIxL1[j + 4] = CodAbov;
pCodMVdL0[j + 4] = CodAbov;
pCodMVdL1[j + 4] = CodAbov;
break;
case SBTYPE_8x4:
cL0 = (m_cur_mb.LocalMacroblockInfo->sbdir[i] == D_DIR_BWD) ? CodNone : CodInBS;
cL1 = (m_cur_mb.LocalMacroblockInfo->sbdir[i] == D_DIR_FWD) ? CodNone : CodInBS;
pCodRIxL0[j] = cL0;
pCodRIxL1[j] = cL1;
pCodMVdL0[j] = cL0;
pCodMVdL1[j] = cL1;
pCodRIxL0[j + 4] = CodAbov;
pCodRIxL1[j + 4] = CodAbov;
pCodMVdL0[j + 4] = cL0;
pCodMVdL1[j + 4] = cL1;
break;
case SBTYPE_4x8:
cL0 = (m_cur_mb.LocalMacroblockInfo->sbdir[i] == D_DIR_BWD) ? CodNone : CodInBS;
cL1 = (m_cur_mb.LocalMacroblockInfo->sbdir[i] == D_DIR_FWD) ? CodNone : CodInBS;
pCodRIxL0[j] = cL0;
pCodRIxL1[j] = cL1;
pCodMVdL0[j] = cL0;
pCodMVdL1[j] = cL1;
pCodMVdL0[j + 1] = cL0;
pCodMVdL1[j + 1] = cL1;
pCodRIxL0[j + 4] = CodAbov;
pCodRIxL1[j + 4] = CodAbov;
pCodMVdL0[j + 4] = pCodMVdL0[j + 5] = CodAbov;
pCodMVdL1[j + 4] = pCodMVdL1[j + 5] = CodAbov;
break;
case SBTYPE_4x4:
cL0 = (m_cur_mb.LocalMacroblockInfo->sbdir[i] == D_DIR_BWD) ? CodNone : CodInBS;
cL1 = (m_cur_mb.LocalMacroblockInfo->sbdir[i] == D_DIR_FWD) ? CodNone : CodInBS;
pCodRIxL0[j] = cL0;
pCodRIxL1[j] = cL1;
pCodRIxL0[j + 4] = CodAbov;
pCodRIxL1[j + 4] = CodAbov;
pCodMVdL0[j] = pCodMVdL0[j + 1] = pCodMVdL0[j + 4] = pCodMVdL0[j + 5] = cL0;
pCodMVdL1[j] = pCodMVdL1[j + 1] = pCodMVdL1[j + 4] = pCodMVdL1[j + 5] = cL1;
break;
case SBTYPE_DIRECT:
cL0 = (m_cur_mb.LocalMacroblockInfo->sbdir[i] == D_DIR_DIRECT_SPATIAL_BWD) ? CodNone : CodSkip;
cL1 = (m_cur_mb.LocalMacroblockInfo->sbdir[i] == D_DIR_DIRECT_SPATIAL_FWD) ? CodNone : CodSkip;
pCodRIxL0[j] = pCodRIxL0[j + 1] = pCodRIxL0[j + 4] = pCodRIxL0[j + 5] = cL0;
pCodRIxL1[j] = pCodRIxL1[j + 1] = pCodRIxL1[j + 4] = pCodRIxL1[j + 5] = cL1;
pCodMVdL0[j] = pCodMVdL0[j + 1] = pCodMVdL0[j + 4] = pCodMVdL0[j + 5] = cL0;
pCodMVdL1[j] = pCodMVdL1[j + 1] = pCodMVdL1[j + 4] = pCodMVdL1[j + 5] = cL1;
break;
default:
throw h264_exception(UMC_ERR_INVALID_STREAM);
}
}
// get all mvd_L0
ReconstructMotionVectors4x4(BlkOrder,
pMVdL0,
0);
// get all mvd_L1
ReconstructMotionVectors4x4(BlkOrder,
pMVdL1,
1);
break;
}
default:
VM_ASSERT(false);
throw h264_exception(UMC_ERR_INVALID_STREAM);
break;
} // switch
}
} // void H264SegmentDecoderMultiThreaded::ReconstructMotionVectors(void)
void H264SegmentDecoderMultiThreaded::DecodeDirectMotionVectorsSpatial(void)
{
/*Ipp32s refIdxL0, refIdxL1;
// find the reference indexes
ComputeDirectSpatialRefIdx(&refIdxL0, &refIdxL1);
// set up reference index array
{
Ipp8s *pFwdRefInd;
Ipp8s *pBwdRefInd;
pFwdRefInd = m_cur_mb.RefIdxs[0]->RefIdxs;
pBwdRefInd = m_cur_mb.RefIdxs[1]->RefIdxs;
if (0 <= (refIdxL0 & refIdxL1))
{
memset(pFwdRefInd, refIdxL0, 16);
memset(pBwdRefInd, refIdxL1, 16);
}
else
{
memset(pFwdRefInd, 0, 16);
memset(pBwdRefInd, 0, 16);
}
}
// set direction for MB
{
Ipp32u uPredDir;
if ((0 <= refIdxL0) && (0 > refIdxL1))
uPredDir = D_DIR_DIRECT_SPATIAL_FWD;
else if ((0 > refIdxL0) && (0 <= refIdxL1))
uPredDir = D_DIR_DIRECT_SPATIAL_BWD;
else
uPredDir = D_DIR_DIRECT_SPATIAL_BIDIR;
m_cur_mb.LocalMacroblockInfo->spatial_prediction_dir = (Ipp8u) uPredDir;
}
// save original spatial ref indexes
m_cur_mb.LocalMacroblockInfo->spatial_ref_idx_l0 = (Ipp8s) refIdxL0;
m_cur_mb.LocalMacroblockInfo->spatial_ref_idx_l1 = (Ipp8s) refIdxL1;*/
} // void H264SegmentDecoderMultiThreaded::DecodeDirectMotionVectorsSpatial(void)
void H264SegmentDecoderMultiThreaded::ReconstructDirectMotionVectorsSpatial()
{
Ipp32u xpos, ypos;
Ipp32s mvxL0, mvyL0;
Ipp32s mvxL1, mvyL1;
Ipp8s RefIndexL0, RefIndexL1;
Ipp32u uSaveMBType = m_cur_mb.GlobalMacroblockInfo->mbtype;
Ipp32u uPredDir; // prediction direction of macroblock
VM_ASSERT(m_pRefPicList[1][0]);
Ipp32s field = m_pFields[1][0].field;
bool bL1RefPicisShortTerm = m_pFields[1][0].isShortReference;
// set up pointers to where MV and RefIndex will be stored
H264DecoderMacroblockMVs *pFwdMV = m_cur_mb.MVs[0];
H264DecoderMacroblockMVs *pBwdMV = m_cur_mb.MVs[1];
Ipp32s bAll4x4AreSame;
Ipp32s bAll8x8AreSame = 0;
Ipp32s refIdxL0, refIdxL1;
// find the reference indexes
ComputeDirectSpatialRefIdx(&refIdxL0, &refIdxL1);
RefIndexL0 = (Ipp8s)refIdxL0;
RefIndexL1 = (Ipp8s)refIdxL1;
// set up reference index array
{
Ipp8s *pFwdRefInd;
Ipp8s *pBwdRefInd;
pFwdRefInd = m_cur_mb.RefIdxs[0]->RefIdxs;
pBwdRefInd = m_cur_mb.RefIdxs[1]->RefIdxs;
if (0 <= (RefIndexL0 & RefIndexL1))
{
memset(pFwdRefInd, RefIndexL0, 16);
memset(pBwdRefInd, RefIndexL1, 16);
}
else
{
memset(pFwdRefInd, 0, 16);
memset(pBwdRefInd, 0, 16);
}
}
// set direction for MB
{
//Ipp32u uPredDir;
if ((0 <= RefIndexL0) && (0 > RefIndexL1))
uPredDir = D_DIR_DIRECT_SPATIAL_FWD;
else if ((0 > RefIndexL0) && (0 <= RefIndexL1))
uPredDir = D_DIR_DIRECT_SPATIAL_BWD;
else
uPredDir = D_DIR_DIRECT_SPATIAL_BIDIR;
//m_cur_mb.LocalMacroblockInfo->spatial_prediction_dir = (Ipp8u) uPredDir;
}
// Because predicted MV is computed using 16x16 block it is likely
// that all 4x4 blocks will use the same MV and reference frame.
// It is possible, however, for the MV for any 4x4 block to be set
// to 0,0 instead of the computed MV. This possibility by default
// forces motion compensation to be performed for each 4x4, the slowest
// possible option. These booleans are used to detect when all of the
// 4x4 blocks in an 8x8 can be combined for motion comp, and even better,
// when all of the 8x8 blocks in the macroblock can be combined.
// Change mbtype to any INTER 16x16 type, for computeMV function,
// required for the 8x8 DIRECT case to force computeMV to get MV
// using 16x16 type instead.
m_cur_mb.GlobalMacroblockInfo->mbtype = MBTYPE_FORWARD;
// Copy to local vars to avoid lots of pointer derefs
//RefIndexL0 = m_cur_mb.LocalMacroblockInfo->spatial_ref_idx_l0;
//RefIndexL1 = m_cur_mb.LocalMacroblockInfo->spatial_ref_idx_l1;
// Forward MV (L0)
if (RefIndexL0 != -1)
{
// L0 ref idx exists, use to obtain predicted L0 motion vector
// for the macroblock
ComputeMotionVectorPredictors(0,
RefIndexL0,
0, // block
&mvxL0, &mvyL0);
}
else
{
mvxL0 = mvyL0 = 0;
}
// Backward MV (L1)
if (RefIndexL1 != -1)
{
// L0 ref idx exists, use to obtain predicted L0 motion vector
// for the macroblock
ComputeMotionVectorPredictors(1,
RefIndexL1,
0, // block
&mvxL1, &mvyL1);
}
else
{
mvxL1 = mvyL1 = 0;
}
if (mvyL0 > m_MVDistortion[0])
m_MVDistortion[0] = mvyL0;
if (mvyL1 > m_MVDistortion[1])
m_MVDistortion[1] = mvyL1;
// restore mbtype
m_cur_mb.GlobalMacroblockInfo->mbtype = (Ipp8u)uSaveMBType;
//uPredDir = m_cur_mb.LocalMacroblockInfo->spatial_prediction_dir;
// In loops below, set MV and RefIdx for all subblocks. Conditionally
// change MV to 0,0 and RefIndex to 0 (doing so called UseZeroPred here).
// To select UseZeroPred for a part:
// (RefIndexLx < 0) ||
// (bL1RefPicisShortTerm && RefIndexLx==0 && ColocatedRefIndex==0 &&
// (colocated motion vectors in range -1..+1)
// When both RefIndexLx are -1, ZeroPred is used and both RefIndexLx
// are changed to zero.
// It is desirable to avoid checking the colocated motion vectors and
// colocated ref index, bools and orders of conditional testing are
// set up to do so.
// At the MB level, the colocated do not need to be checked if:
// - both RefIndexLx < 0
// - colocated is INTRA (know all colocated RefIndex are -1)
// - L1 Ref Pic is not short term
// set bMaybeUseZeroPred to true if any of the above are false
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -