📄 analyse.c
字号:
uint32_t pels[4] = {0}; // doesn't need initting, just shuts up a gcc warning
int i_nnz = 0;
for( idx = 0; idx < 16; idx++ )
{
uint8_t *p_src_by;
uint8_t *p_dst_by;
i_best = COST_MAX;
i_pred_mode = x264_mb_predict_intra4x4_mode( h, idx );
x = block_idx_x[idx];
y = block_idx_y[idx];
p_src_by = p_src + 4*x + 4*y*FENC_STRIDE;
p_dst_by = p_dst + 4*x + 4*y*FDEC_STRIDE;
predict_4x4_mode_available( h->mb.i_neighbour4[idx], predict_mode, &i_max );
if( (h->mb.i_neighbour4[idx] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
/* emulate missing topright samples */
*(uint32_t*) &p_dst_by[4 - FDEC_STRIDE] = p_dst_by[3 - FDEC_STRIDE] * 0x01010101U;
for( i = 0; i < i_max; i++ )
{
i_mode = predict_mode[i];
h->predict_4x4[i_mode]( p_dst_by );
i_satd = x264_rd_cost_i4x4( h, a->i_lambda2, idx, i_mode );
if( i_best > i_satd )
{
a->i_predict4x4[idx] = i_mode;
i_best = i_satd;
pels[0] = *(uint32_t*)(p_dst_by+0*FDEC_STRIDE);
pels[1] = *(uint32_t*)(p_dst_by+1*FDEC_STRIDE);
pels[2] = *(uint32_t*)(p_dst_by+2*FDEC_STRIDE);
pels[3] = *(uint32_t*)(p_dst_by+3*FDEC_STRIDE);
i_nnz = h->mb.cache.non_zero_count[x264_scan8[idx]];
}
}
*(uint32_t*)(p_dst_by+0*FDEC_STRIDE) = pels[0];
*(uint32_t*)(p_dst_by+1*FDEC_STRIDE) = pels[1];
*(uint32_t*)(p_dst_by+2*FDEC_STRIDE) = pels[2];
*(uint32_t*)(p_dst_by+3*FDEC_STRIDE) = pels[3];
h->mb.cache.non_zero_count[x264_scan8[idx]] = i_nnz;
h->mb.cache.intra4x4_pred_mode[x264_scan8[idx]] = a->i_predict4x4[idx];
}
}
else if( h->mb.i_type == I_8x8 )
{
DECLARE_ALIGNED( uint8_t, edge[33], 8 );
for( idx = 0; idx < 4; idx++ )
{
uint64_t pels_h = 0;
uint8_t pels_v[7];
int i_nnz[3];
uint8_t *p_src_by;
uint8_t *p_dst_by;
int j;
int i_thresh = a->i_satd_i8x8_dir[a->i_predict8x8[idx]][idx] * 11/8;
i_best = COST_MAX;
i_pred_mode = x264_mb_predict_intra4x4_mode( h, 4*idx );
x = idx&1;
y = idx>>1;
p_src_by = p_src + 8*x + 8*y*FENC_STRIDE;
p_dst_by = p_dst + 8*x + 8*y*FDEC_STRIDE;
predict_4x4_mode_available( h->mb.i_neighbour8[idx], predict_mode, &i_max );
x264_predict_8x8_filter( p_dst_by, edge, h->mb.i_neighbour8[idx], ALL_NEIGHBORS );
for( i = 0; i < i_max; i++ )
{
i_mode = predict_mode[i];
if( a->i_satd_i8x8_dir[i_mode][idx] > i_thresh )
continue;
h->predict_8x8[i_mode]( p_dst_by, edge );
i_satd = x264_rd_cost_i8x8( h, a->i_lambda2, idx, i_mode );
if( i_best > i_satd )
{
a->i_predict8x8[idx] = i_mode;
i_best = i_satd;
pels_h = *(uint64_t*)(p_dst_by+7*FDEC_STRIDE);
if( !(idx&1) )
for( j=0; j<7; j++ )
pels_v[j] = p_dst_by[7+j*FDEC_STRIDE];
for( j=0; j<3; j++ )
i_nnz[j] = h->mb.cache.non_zero_count[x264_scan8[4*idx+j+1]];
}
}
*(uint64_t*)(p_dst_by+7*FDEC_STRIDE) = pels_h;
if( !(idx&1) )
for( j=0; j<7; j++ )
p_dst_by[7+j*FDEC_STRIDE] = pels_v[j];
for( j=0; j<3; j++ )
h->mb.cache.non_zero_count[x264_scan8[4*idx+j+1]] = i_nnz[j];
x264_macroblock_cache_intra8x8_pred( h, 2*x, 2*y, a->i_predict8x8[idx] );
}
}
}
#define LOAD_FENC( m, src, xoff, yoff) \
(m)->i_stride[0] = h->mb.pic.i_stride[0]; \
(m)->i_stride[1] = h->mb.pic.i_stride[1]; \
(m)->p_fenc[0] = &(src)[0][(xoff)+(yoff)*FENC_STRIDE]; \
(m)->p_fenc[1] = &(src)[1][((xoff)>>1)+((yoff)>>1)*FENC_STRIDE]; \
(m)->p_fenc[2] = &(src)[2][((xoff)>>1)+((yoff)>>1)*FENC_STRIDE];
#define LOAD_HPELS(m, src, list, ref, xoff, yoff) \
(m)->p_fref[0] = &(src)[0][(xoff)+(yoff)*(m)->i_stride[0]]; \
(m)->p_fref[1] = &(src)[1][(xoff)+(yoff)*(m)->i_stride[0]]; \
(m)->p_fref[2] = &(src)[2][(xoff)+(yoff)*(m)->i_stride[0]]; \
(m)->p_fref[3] = &(src)[3][(xoff)+(yoff)*(m)->i_stride[0]]; \
(m)->p_fref[4] = &(src)[4][((xoff)>>1)+((yoff)>>1)*(m)->i_stride[1]]; \
(m)->p_fref[5] = &(src)[5][((xoff)>>1)+((yoff)>>1)*(m)->i_stride[1]]; \
(m)->integral = &h->mb.pic.p_integral[list][ref][(xoff)+(yoff)*(m)->i_stride[0]];
#define REF_COST(list, ref) \
(a->i_lambda * bs_size_te( h->sh.i_num_ref_idx_l##list##_active - 1, ref ))
static void x264_mb_analyse_inter_p16x16( x264_t *h, x264_mb_analysis_t *a )
{
x264_me_t m;
int i_ref;
int mvc[7][2], i_mvc;
int i_halfpel_thresh = INT_MAX;
int *p_halfpel_thresh = h->i_ref0>1 ? &i_halfpel_thresh : NULL;
/* 16x16 Search on all ref frame */
m.i_pixel = PIXEL_16x16;
m.p_cost_mv = a->p_cost_mv;
LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 );
a->l0.me16x16.cost = INT_MAX;
for( i_ref = 0; i_ref < h->i_ref0; i_ref++ )
{
const int i_ref_cost = REF_COST( 0, i_ref );
i_halfpel_thresh -= i_ref_cost;
m.i_ref_cost = i_ref_cost;
m.i_ref = i_ref;
/* search with ref */
LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 0 );
x264_mb_predict_mv_16x16( h, 0, i_ref, m.mvp );
x264_mb_predict_mv_ref16x16( h, 0, i_ref, mvc, &i_mvc );
x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh );
/* early termination
* SSD threshold would probably be better than SATD */
if( i_ref == 0 && a->b_try_pskip && m.cost-m.cost_mv < 300*a->i_lambda )
{
int mvskip[2];
x264_mb_predict_mv_pskip( h, mvskip );
if( abs(m.mv[0]-mvskip[0]) + abs(m.mv[1]-mvskip[1]) <= 1
&& x264_macroblock_probe_pskip( h ) )
{
h->mb.i_type = P_SKIP;
x264_analyse_update_cache( h, a );
return;
}
}
m.cost += i_ref_cost;
i_halfpel_thresh += i_ref_cost;
if( m.cost < a->l0.me16x16.cost )
a->l0.me16x16 = m;
/* save mv for predicting neighbors */
a->l0.mvc[i_ref][0][0] =
h->mb.mvr[0][i_ref][h->mb.i_mb_xy][0] = m.mv[0];
a->l0.mvc[i_ref][0][1] =
h->mb.mvr[0][i_ref][h->mb.i_mb_xy][1] = m.mv[1];
}
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.me16x16.i_ref );
h->mb.i_type = P_L0;
if( a->b_mbrd && a->l0.i_ref == 0 )
{
int mvskip[2];
x264_mb_predict_mv_pskip( h, mvskip );
if( a->l0.me16x16.mv[0] == mvskip[0] && a->l0.me16x16.mv[1] == mvskip[1] )
{
h->mb.i_partition = D_16x16;
x264_macroblock_cache_mv( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv[0], a->l0.me16x16.mv[1] );
a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
}
}
}
static void x264_mb_analyse_inter_p8x8_mixed_ref( x264_t *h, x264_mb_analysis_t *a )
{
x264_me_t m;
int i_ref;
uint8_t **p_fenc = h->mb.pic.p_fenc;
int i_halfpel_thresh = INT_MAX;
int *p_halfpel_thresh = /*h->i_ref0>1 ? &i_halfpel_thresh : */NULL;
int i;
int i_maxref = h->i_ref0-1;
h->mb.i_partition = D_8x8;
/* early termination: if 16x16 chose ref 0, then evalute no refs older
* than those used by the neighbors */
if( i_maxref > 0 && a->l0.me16x16.i_ref == 0 &&
h->mb.i_mb_type_top && h->mb.i_mb_type_left )
{
i_maxref = 0;
i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 - 8 - 1 ] );
i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 - 8 + 0 ] );
i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 - 8 + 2 ] );
i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 - 8 + 4 ] );
i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 + 0 - 1 ] );
i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 + 2*8 - 1 ] );
}
for( i_ref = 0; i_ref <= i_maxref; i_ref++ )
{
a->l0.mvc[i_ref][0][0] = h->mb.mvr[0][i_ref][h->mb.i_mb_xy][0];
a->l0.mvc[i_ref][0][1] = h->mb.mvr[0][i_ref][h->mb.i_mb_xy][1];
}
for( i = 0; i < 4; i++ )
{
x264_me_t *l0m = &a->l0.me8x8[i];
const int x8 = i%2;
const int y8 = i/2;
m.i_pixel = PIXEL_8x8;
m.p_cost_mv = a->p_cost_mv;
LOAD_FENC( &m, p_fenc, 8*x8, 8*y8 );
l0m->cost = INT_MAX;
for( i_ref = 0; i_ref <= i_maxref; i_ref++ )
{
const int i_ref_cost = REF_COST( 0, i_ref );
i_halfpel_thresh -= i_ref_cost;
m.i_ref_cost = i_ref_cost;
m.i_ref = i_ref;
LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*x8, 8*y8 );
x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, i_ref );
x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp );
x264_me_search_ref( h, &m, a->l0.mvc[i_ref], i+1, p_halfpel_thresh );
m.cost += i_ref_cost;
i_halfpel_thresh += i_ref_cost;
*(uint64_t*)a->l0.mvc[i_ref][i+1] = *(uint64_t*)m.mv;
if( m.cost < l0m->cost )
*l0m = m;
}
x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, l0m->mv[0], l0m->mv[1] );
x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, l0m->i_ref );
/* mb type cost */
l0m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
}
a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost +
a->l0.me8x8[2].cost + a->l0.me8x8[3].cost;
h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] =
h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8;
}
static void x264_mb_analyse_inter_p8x8( x264_t *h, x264_mb_analysis_t *a )
{
const int i_ref = a->l0.me16x16.i_ref;
const int i_ref_cost = REF_COST( 0, i_ref );
uint8_t **p_fref = h->mb.pic.p_fref[0][i_ref];
uint8_t **p_fenc = h->mb.pic.p_fenc;
int i_mvc;
int (*mvc)[2] = a->l0.mvc[i_ref];
int i;
/* XXX Needed for x264_mb_predict_mv */
h->mb.i_partition = D_8x8;
i_mvc = 1;
*(uint64_t*)mvc[0] = *(uint64_t*)a->l0.me16x16.mv;
for( i = 0; i < 4; i++ )
{
x264_me_t *m = &a->l0.me8x8[i];
const int x8 = i%2;
const int y8 = i/2;
m->i_pixel = PIXEL_8x8;
m->p_cost_mv = a->p_cost_mv;
m->i_ref_cost = i_ref_cost;
m->i_ref = i_ref;
LOAD_FENC( m, p_fenc, 8*x8, 8*y8 );
LOAD_HPELS( m, p_fref, 0, i_ref, 8*x8, 8*y8 );
x264_mb_predict_mv( h, 0, 4*i, 2, m->mvp );
x264_me_search( h, m, mvc, i_mvc );
x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, m->mv[0], m->mv[1] );
*(uint64_t*)mvc[i_mvc] = *(uint64_t*)m->mv;
i_mvc++;
/* mb type cost */
m->cost += i_ref_cost;
m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
}
/* theoretically this should include 4*ref_cost,
* but 3 seems a better approximation of cabac. */
a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost +
a->l0.me8x8[2].cost + a->l0.me8x8[3].cost -
REF_COST( 0, a->l0.me16x16.i_ref );
h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] =
h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8;
}
static void x264_mb_analyse_inter_p16x8( x264_t *h, x264_mb_analysis_t *a )
{
x264_me_t m;
uint8_t **p_fenc = h->mb.pic.p_fenc;
int mvc[3][2];
int i, j;
/* XXX Needed for x264_mb_predict_mv */
h->mb.i_partition = D_16x8;
for( i = 0; i < 2; i++ )
{
x264_me_t *l0m = &a->l0.me16x8[i];
const int ref8[2] = { a->l0.me8x8[2*i].i_ref, a->l0.me8x8[2*i+1].i_ref };
const int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2;
m.i_pixel = PIXEL_16x8;
m.p_cost_mv = a->p_cost_mv;
LOAD_FENC( &m, p_fenc, 0, 8*i );
l0m->cost = INT_MAX;
for( j = 0; j < i_ref8s; j++ )
{
const int i_ref = ref8[j];
const int i_ref_cost = REF_COST( 0, i_ref );
m.i_ref_cost = i_ref_cost;
m.i_ref = i_ref;
/* if we skipped the 16x16 predictor, we wouldn't have to copy anything... */
*(uint64_t*)mvc[0] = *(uint64_t*)a->l0.mvc[i_ref][0];
*(uint64_t*)mvc[1] = *(uint64_t*)a->l0.mvc[i_ref][2*i+1];
*(uint64_t*)mvc[2] = *(uint64_t*)a->l0.mvc[i_ref][2*i+2];
LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 8*i );
x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, i_ref );
x264_mb_predict_mv( h, 0, 8*i, 4, m.mvp );
x264_me_search( h, &m, mvc, 3 );
m.cost += i_ref_cost;
if( m.cost < l0m->cost )
*l0m = m;
}
x264_macroblock_cache_mv( h, 0, 2*i, 4, 2, 0, l0m->mv[0], l0m->mv[1] );
x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, l0m->i_ref );
}
a->l0.i_cost16x8 = a->l0.me16x8[0].cost + a->l0.me16x8[1].cost;
}
static void x264_mb_analyse_inter_p8x16( x264_t *h, x264_mb_analysis_t *a )
{
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -