📄 macroblock.c
字号:
b8x8 = h->sps->b_direct8x8_inference ||
(type_col != P_8x8 && type_col != B_SKIP && type_col != B_DIRECT && type_col != B_8x8);
/* col_zero_flag */
for( i8=0; i8<4; i8++ )
{
const int x8 = i8%2;
const int y8 = i8/2;
const int o8 = x8 + y8 * h->mb.i_b8_stride;
if( l1ref0[o8] == 0 || ( l1ref0[o8] < 0 && l1ref1[o8] == 0 ) )
{
const int16_t (*l1mv)[2] = (l1ref0[o8] == 0) ? l1mv0 : l1mv1;
if( b8x8 )
{
const int16_t *mvcol = l1mv[3*x8 + 3*y8 * h->mb.i_b4_stride];
if( abs( mvcol[0] ) <= 1 && abs( mvcol[1] ) <= 1 )
{
if( ref[0] == 0 )
x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, 0, 0 );
if( ref[1] == 0 )
x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 1, 0, 0 );
}
}
else
{
for( i4=0; i4<4; i4++ )
{
const int x4 = i4%2 + 2*x8;
const int y4 = i4/2 + 2*y8;
const int16_t *mvcol = l1mv[x4 + y4 * h->mb.i_b4_stride];
if( abs( mvcol[0] ) <= 1 && abs( mvcol[1] ) <= 1 )
{
if( ref[0] == 0 )
x264_macroblock_cache_mv( h, x4, y4, 1, 1, 0, 0, 0 );
if( ref[1] == 0 )
x264_macroblock_cache_mv( h, x4, y4, 1, 1, 1, 0, 0 );
}
}
}
}
}
return 1;
}
int x264_mb_predict_mv_direct16x16( x264_t *h, int *b_changed )
{
int b_available;
if( h->param.analyse.i_direct_mv_pred == X264_DIRECT_PRED_NONE )
return 0;
else if( h->sh.b_direct_spatial_mv_pred )
b_available = x264_mb_predict_mv_direct16x16_spatial( h );
else
b_available = x264_mb_predict_mv_direct16x16_temporal( h );
if( b_changed != NULL && b_available )
{
int type_col = h->fref1[0]->mb_type[ h->mb.i_mb_xy ];
if( IS_INTRA(type_col) || type_col == P_SKIP )
{
*b_changed = h->mb.cache.direct_ref[0][0] != h->mb.cache.ref[0][X264_SCAN8_0]
|| h->mb.cache.direct_ref[1][0] != h->mb.cache.ref[1][X264_SCAN8_0]
|| *(uint32_t*)h->mb.cache.direct_mv[0][X264_SCAN8_0] != *(uint32_t*)h->mb.cache.mv[0][X264_SCAN8_0]
|| *(uint32_t*)h->mb.cache.direct_mv[1][X264_SCAN8_0] != *(uint32_t*)h->mb.cache.mv[1][X264_SCAN8_0];
}
else
{
int i, l;
*b_changed = 0;
for( l = 0; l < 2; l++ )
for( i = 0; i < 4; i++ )
*b_changed |= h->mb.cache.direct_ref[l][i] != h->mb.cache.ref[l][x264_scan8[i*4]];
*b_changed = *b_changed || memcmp(h->mb.cache.direct_mv, h->mb.cache.mv, sizeof(h->mb.cache.mv));
}
if( !*b_changed )
return b_available;
}
/* cache ref & mv */
if( b_available )
{
int i, l;
for( l = 0; l < 2; l++ )
for( i = 0; i < 4; i++ )
h->mb.cache.direct_ref[l][i] = h->mb.cache.ref[l][x264_scan8[i*4]];
memcpy(h->mb.cache.direct_mv, h->mb.cache.mv, sizeof(h->mb.cache.mv));
}
return b_available;
}
void x264_mb_load_mv_direct8x8( x264_t *h, int idx )
{
const int x = 2*(idx%2);
const int y = 2*(idx/2);
int l;
x264_macroblock_cache_ref( h, x, y, 2, 2, 0, h->mb.cache.direct_ref[0][idx] );
x264_macroblock_cache_ref( h, x, y, 2, 2, 1, h->mb.cache.direct_ref[1][idx] );
for( l = 0; l < 2; l++ )
{
*(uint64_t*)h->mb.cache.mv[l][x264_scan8[idx*4]] =
*(uint64_t*)h->mb.cache.direct_mv[l][x264_scan8[idx*4]];
*(uint64_t*)h->mb.cache.mv[l][x264_scan8[idx*4]+8] =
*(uint64_t*)h->mb.cache.direct_mv[l][x264_scan8[idx*4]+8];
}
}
/* This just improves encoder performance, it's not part of the spec */
void x264_mb_predict_mv_ref16x16( x264_t *h, int i_list, int i_ref, int mvc[8][2], int *i_mvc )
{
int16_t (*mvr)[2] = h->mb.mvr[i_list][i_ref];
int i = 0;
#define SET_MVP(mvp) { \
mvc[i][0] = mvp[0]; \
mvc[i][1] = mvp[1]; \
i++; \
}
/* b_direct */
if( h->sh.i_type == SLICE_TYPE_B
&& h->mb.cache.ref[i_list][x264_scan8[12]] == i_ref )
{
SET_MVP( h->mb.cache.mv[i_list][x264_scan8[12]] );
}
/* spatial predictors */
if( h->mb.i_neighbour & MB_LEFT )
{
int i_mb_l = h->mb.i_mb_xy - 1;
/* skip MBs didn't go through the whole search process, so mvr is undefined */
if( !IS_SKIP( h->mb.type[i_mb_l] ) )
SET_MVP( mvr[i_mb_l] );
}
if( h->mb.i_neighbour & MB_TOP )
{
int i_mb_t = h->mb.i_mb_xy - h->mb.i_mb_stride;
if( !IS_SKIP( h->mb.type[i_mb_t] ) )
SET_MVP( mvr[i_mb_t] );
if( h->mb.i_neighbour & MB_TOPLEFT && !IS_SKIP( h->mb.type[i_mb_t - 1] ) )
SET_MVP( mvr[i_mb_t-1] );
if( h->mb.i_mb_x < h->mb.i_mb_stride - 1 && !IS_SKIP( h->mb.type[i_mb_t + 1] ) )
SET_MVP( mvr[i_mb_t+1] );
}
#undef SET_MVP
/* temporal predictors */
if( h->fref0[0]->i_ref[0] > 0 )
{
x264_frame_t *l0 = h->fref0[0];
int ref_col_cur, ref_col_prev = -1;
int scale = 0;
#define SET_TMVP(dx, dy) { \
int i_b4 = h->mb.i_b4_xy + dx*4 + dy*4*h->mb.i_b4_stride; \
int i_b8 = h->mb.i_b8_xy + dx*2 + dy*2*h->mb.i_b8_stride; \
ref_col_cur = l0->ref[0][i_b8]; \
if( ref_col_cur >= 0 ) \
{ \
/* TODO: calc once per frame and tablize? */\
if( ref_col_cur != ref_col_prev ) \
scale = 256 * (h->fenc->i_poc - h->fref0[i_ref]->i_poc) \
/ (l0->i_poc - l0->ref_poc[0][ref_col_cur]); \
mvc[i][0] = l0->mv[0][i_b4][0] * scale / 256; \
mvc[i][1] = l0->mv[0][i_b4][1] * scale / 256; \
i++; \
ref_col_prev = ref_col_cur; \
} \
}
SET_TMVP(0,0);
if( h->mb.i_mb_x < h->sps->i_mb_width-1 )
SET_TMVP(1,0);
if( h->mb.i_mb_y < h->sps->i_mb_height-1 )
SET_TMVP(0,1);
#undef SET_TMVP
}
*i_mvc = i;
}
static inline void x264_mb_mc_0xywh( x264_t *h, int x, int y, int width, int height )
{
const int i8 = x264_scan8[0]+x+8*y;
const int i_ref = h->mb.cache.ref[0][i8];
const int mvx = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] );
const int mvy = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] );
h->mc.mc_luma( h->mb.pic.p_fref[0][i_ref], h->mb.pic.i_stride[0],
&h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE,
mvx + 4*4*x, mvy + 4*4*y, 4*width, 4*height );
h->mc.mc_chroma( &h->mb.pic.p_fref[0][i_ref][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1],
&h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
mvx, mvy, 2*width, 2*height );
h->mc.mc_chroma( &h->mb.pic.p_fref[0][i_ref][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2],
&h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
mvx, mvy, 2*width, 2*height );
}
static inline void x264_mb_mc_1xywh( x264_t *h, int x, int y, int width, int height )
{
const int i8 = x264_scan8[0]+x+8*y;
const int i_ref = h->mb.cache.ref[1][i8];
const int mvx = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] );
const int mvy = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] );
h->mc.mc_luma( h->mb.pic.p_fref[1][i_ref], h->mb.pic.i_stride[0],
&h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE,
mvx + 4*4*x, mvy + 4*4*y, 4*width, 4*height );
h->mc.mc_chroma( &h->mb.pic.p_fref[1][i_ref][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1],
&h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
mvx, mvy, 2*width, 2*height );
h->mc.mc_chroma( &h->mb.pic.p_fref[1][i_ref][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2],
&h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
mvx, mvy, 2*width, 2*height );
}
static inline void x264_mb_mc_01xywh( x264_t *h, int x, int y, int width, int height )
{
const int i8 = x264_scan8[0]+x+8*y;
const int i_ref1 = h->mb.cache.ref[1][i8];
const int mvx1 = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] );
const int mvy1 = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] );
DECLARE_ALIGNED( uint8_t, tmp[16*16], 16 );
int i_mode = x264_size2pixel[height][width];
x264_mb_mc_0xywh( h, x, y, width, height );
h->mc.mc_luma( h->mb.pic.p_fref[1][i_ref1], h->mb.pic.i_stride[0],
tmp, 16, mvx1 + 4*4*x, mvy1 + 4*4*y, 4*width, 4*height );
if( h->param.analyse.b_weighted_bipred )
{
const int i_ref0 = h->mb.cache.ref[0][i8];
const int weight = h->mb.bipred_weight[i_ref0][i_ref1];
h->mc.avg_weight[i_mode]( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE, tmp, 16, weight );
h->mc.mc_chroma( &h->mb.pic.p_fref[1][i_ref1][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1],
tmp, 16, mvx1, mvy1, 2*width, 2*height );
h->mc.avg_weight[i_mode+3]( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp, 16, weight );
h->mc.mc_chroma( &h->mb.pic.p_fref[1][i_ref1][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2],
tmp, 16, mvx1, mvy1, 2*width, 2*height );
h->mc.avg_weight[i_mode+3]( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp, 16, weight );
}
else
{
h->mc.avg[i_mode]( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE, tmp, 16 );
h->mc.mc_chroma( &h->mb.pic.p_fref[1][i_ref1][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1],
tmp, 16, mvx1, mvy1, 2*width, 2*height );
h->mc.avg[i_mode+3]( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp, 16 );
h->mc.mc_chroma( &h->mb.pic.p_fref[1][i_ref1][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2],
tmp, 16, mvx1, mvy1, 2*width, 2*height );
h->mc.avg[i_mode+3]( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp, 16 );
}
}
static void x264_mb_mc_direct8x8( x264_t *h, int x, int y )
{
const int i8 = x264_scan8[0] + x + 8*y;
/* FIXME: optimize based on current block size, not global settings? */
if( h->sps->b_direct8x8_inference )
{
if( h->mb.cache.ref[0][i8] >= 0 )
if( h->mb.cache.ref[1][i8] >= 0 )
x264_mb_mc_01xywh( h, x, y, 2, 2 );
else
x264_mb_mc_0xywh( h, x, y, 2, 2 );
else
x264_mb_mc_1xywh( h, x, y, 2, 2 );
}
else
{
if( h->mb.cache.ref[0][i8] >= 0 )
{
if( h->mb.cache.ref[1][i8] >= 0 )
{
x264_mb_mc_01xywh( h, x+0, y+0, 1, 1 );
x264_mb_mc_01xywh( h, x+1, y+0, 1, 1 );
x264_mb_mc_01xywh( h, x+0, y+1, 1, 1 );
x264_mb_mc_01xywh( h, x+1, y+1, 1, 1 );
}
else
{
x264_mb_mc_0xywh( h, x+0, y+0, 1, 1 );
x264_mb_mc_0xywh( h, x+1, y+0, 1, 1 );
x264_mb_mc_0xywh( h, x+0, y+1, 1, 1 );
x264_mb_mc_0xywh( h, x+1, y+1, 1, 1 );
}
}
else
{
x264_mb_mc_1xywh( h, x+0, y+0, 1, 1 );
x264_mb_mc_1xywh( h, x+1, y+0, 1, 1 );
x264_mb_mc_1xywh( h, x+0, y+1, 1, 1 );
x264_mb_mc_1xywh( h, x+1, y+1, 1, 1 );
}
}
}
void x264_mb_mc_8x8( x264_t *h, int i8 )
{
const int x = 2*(i8&1);
const int y = 2*(i8>>1);
switch( h->mb.i_sub_partition[i8] )
{
case D_L0_8x8:
x264_mb_mc_0xywh( h, x, y, 2, 2 );
break;
case D_L0_8x4:
x264_mb_mc_0xywh( h, x, y+0, 2, 1 );
x264_mb_mc_0xywh( h, x, y+1, 2, 1 );
break;
case D_L0_4x8:
x264_mb_mc_0xywh( h, x+0, y, 1, 2 );
x264_mb_mc_0xywh( h, x+1, y, 1, 2 );
break;
case D_L0_4x4:
x264_mb_mc_0xywh( h, x+0, y+0, 1, 1 );
x264_mb_mc_0xywh( h, x+1, y+0, 1, 1 );
x264_mb_mc_0xywh( h, x+0, y+1, 1, 1 );
x264_mb_mc_0xywh( h, x+1, y+1, 1, 1 );
break;
case D_L1_8x8:
x264_mb_mc_1xywh( h, x, y, 2, 2 );
break;
case D_L1_8x4:
x264_mb_mc_1xywh( h, x, y+0, 2, 1 );
x264_mb_mc_1xywh( h, x, y+1, 2, 1 );
break;
case D_L1_4x8:
x264_mb_mc_1xywh( h, x+0, y, 1, 2 );
x264_mb_mc_1xywh( h, x+1, y, 1, 2 );
break;
case D_L1_4x4:
x264_mb_mc_1xywh( h, x+0, y+0, 1, 1 );
x264_mb_mc_1xywh( h, x+1, y+0, 1, 1 );
x264_mb_mc_1xywh( h, x+0, y+1, 1, 1 );
x264_mb_mc_1xywh( h, x+1, y+1, 1, 1 );
break;
case D_BI_8x8:
x264_mb_mc_01xywh( h, x, y, 2, 2 );
break;
case D_BI_8x4:
x264_mb_mc_01xywh( h, x, y+0, 2, 1 );
x264_mb_mc_01xywh( h, x, y+1, 2, 1 );
break;
case D_BI_4x8:
x264_mb_mc_01xywh( h, x+0, y, 1, 2 );
x264_mb_mc_01xywh( h, x+1, y, 1, 2 );
break;
case D_BI_4x4:
x264_mb_mc_01xywh( h, x+0, y+0, 1, 1 );
x264_mb_mc_01xywh( h, x+1, y+0, 1, 1 );
x264_mb_mc_01xywh( h, x+0, y+1, 1, 1 );
x264_mb_mc_01xywh( h, x+1, y+1, 1, 1 );
break;
case D_DIRECT_8x8:
x264_mb_mc_direct8x8( h, x, y );
break;
}
}
void x264_mb_mc( x264_t *h )
{
if( h->mb.i_type == P_L0 )//若是使用列表0预测的16*16预测模式
{
//分三种情况进行处理(16*16、16*8、8*16)
if( h->mb.i_partition == D_16x16 )
{
x264_mb_mc_0xywh( h, 0, 0, 4, 4 );
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -