📄 analyse.c
字号:
x264_me_t m;
uint8_t **p_fenc = h->mb.pic.p_fenc;
int mvc[3][2];
int i, j;
/* XXX Needed for x264_mb_predict_mv */
h->mb.i_partition = D_8x16;
for( i = 0; i < 2; i++ )
{
x264_me_t *l0m = &a->l0.me8x16[i];
const int ref8[2] = { a->l0.me8x8[i].i_ref, a->l0.me8x8[i+2].i_ref };
const int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2;
m.i_pixel = PIXEL_8x16;
m.p_cost_mv = a->p_cost_mv;
LOAD_FENC( &m, p_fenc, 8*i, 0 );
l0m->cost = INT_MAX;
for( j = 0; j < i_ref8s; j++ )
{
const int i_ref = ref8[j];
const int i_ref_cost = REF_COST( 0, i_ref );
m.i_ref_cost = i_ref_cost;
m.i_ref = i_ref;
*(uint64_t*)mvc[0] = *(uint64_t*)a->l0.mvc[i_ref][0];
*(uint64_t*)mvc[1] = *(uint64_t*)a->l0.mvc[i_ref][i+1];
*(uint64_t*)mvc[2] = *(uint64_t*)a->l0.mvc[i_ref][i+3];
LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*i, 0 );
x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, i_ref );
x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp );
x264_me_search( h, &m, mvc, 3 );
m.cost += i_ref_cost;
if( m.cost < l0m->cost )
*l0m = m;
}
x264_macroblock_cache_mv( h, 2*i, 0, 2, 4, 0, l0m->mv[0], l0m->mv[1] );
x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, l0m->i_ref );
}
a->l0.i_cost8x16 = a->l0.me8x16[0].cost + a->l0.me8x16[1].cost;
}
static int x264_mb_analyse_inter_p4x4_chroma( x264_t *h, x264_mb_analysis_t *a, uint8_t **p_fref, int i8x8, int pixel )
{
DECLARE_ALIGNED( uint8_t, pix1[8*8], 8 );
DECLARE_ALIGNED( uint8_t, pix2[8*8], 8 );
const int i_stride = h->mb.pic.i_stride[1];
const int or = 4*(i8x8&1) + 2*(i8x8&2)*i_stride;
const int oe = 4*(i8x8&1) + 2*(i8x8&2)*FENC_STRIDE;
#define CHROMA4x4MC( width, height, me, x, y ) \
h->mc.mc_chroma( &p_fref[4][or+x+y*i_stride], i_stride, &pix1[x+y*8], 8, (me).mv[0], (me).mv[1], width, height ); \
h->mc.mc_chroma( &p_fref[5][or+x+y*i_stride], i_stride, &pix2[x+y*8], 8, (me).mv[0], (me).mv[1], width, height );
if( pixel == PIXEL_4x4 )
{
CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][0], 0,0 );
CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][1], 0,2 );
CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][2], 2,0 );
CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][3], 2,2 );
}
else if( pixel == PIXEL_8x4 )
{
CHROMA4x4MC( 4,2, a->l0.me8x4[i8x8][0], 0,0 );
CHROMA4x4MC( 4,2, a->l0.me8x4[i8x8][1], 0,2 );
}
else
{
CHROMA4x4MC( 2,4, a->l0.me4x8[i8x8][0], 0,0 );
CHROMA4x4MC( 2,4, a->l0.me4x8[i8x8][1], 2,0 );
}
return h->pixf.mbcmp[PIXEL_4x4]( &h->mb.pic.p_fenc[1][oe], FENC_STRIDE, pix1, 8 )
+ h->pixf.mbcmp[PIXEL_4x4]( &h->mb.pic.p_fenc[2][oe], FENC_STRIDE, pix2, 8 );
}
static void x264_mb_analyse_inter_p4x4( x264_t *h, x264_mb_analysis_t *a, int i8x8 )
{
uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref];
uint8_t **p_fenc = h->mb.pic.p_fenc;
const int i_ref = a->l0.me8x8[i8x8].i_ref;
int i4x4;
/* XXX Needed for x264_mb_predict_mv */
h->mb.i_partition = D_8x8;
for( i4x4 = 0; i4x4 < 4; i4x4++ )
{
const int idx = 4*i8x8 + i4x4;
const int x4 = block_idx_x[idx];
const int y4 = block_idx_y[idx];
const int i_mvc = (i4x4 == 0);
x264_me_t *m = &a->l0.me4x4[i8x8][i4x4];
m->i_pixel = PIXEL_4x4;
m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
x264_mb_predict_mv( h, 0, idx, 1, m->mvp );
x264_me_search( h, m, &a->l0.me8x8[i8x8].mv, i_mvc );
x264_macroblock_cache_mv( h, x4, y4, 1, 1, 0, m->mv[0], m->mv[1] );
}
a->l0.i_cost4x4[i8x8] = a->l0.me4x4[i8x8][0].cost +
a->l0.me4x4[i8x8][1].cost +
a->l0.me4x4[i8x8][2].cost +
a->l0.me4x4[i8x8][3].cost +
REF_COST( 0, i_ref ) +
a->i_lambda * i_sub_mb_p_cost_table[D_L0_4x4];
if( h->mb.b_chroma_me )
a->l0.i_cost4x4[i8x8] += x264_mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_4x4 );
}
static void x264_mb_analyse_inter_p8x4( x264_t *h, x264_mb_analysis_t *a, int i8x8 )
{
uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref];
uint8_t **p_fenc = h->mb.pic.p_fenc;
const int i_ref = a->l0.me8x8[i8x8].i_ref;
int i8x4;
/* XXX Needed for x264_mb_predict_mv */
h->mb.i_partition = D_8x8;
for( i8x4 = 0; i8x4 < 2; i8x4++ )
{
const int idx = 4*i8x8 + 2*i8x4;
const int x4 = block_idx_x[idx];
const int y4 = block_idx_y[idx];
const int i_mvc = (i8x4 == 0);
x264_me_t *m = &a->l0.me8x4[i8x8][i8x4];
m->i_pixel = PIXEL_8x4;
m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
x264_mb_predict_mv( h, 0, idx, 2, m->mvp );
x264_me_search( h, m, &a->l0.me4x4[i8x8][0].mv, i_mvc );
x264_macroblock_cache_mv( h, x4, y4, 2, 1, 0, m->mv[0], m->mv[1] );
}
a->l0.i_cost8x4[i8x8] = a->l0.me8x4[i8x8][0].cost + a->l0.me8x4[i8x8][1].cost +
REF_COST( 0, i_ref ) +
a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x4];
if( h->mb.b_chroma_me )
a->l0.i_cost8x4[i8x8] += x264_mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_8x4 );
}
static void x264_mb_analyse_inter_p4x8( x264_t *h, x264_mb_analysis_t *a, int i8x8 )
{
uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref];
uint8_t **p_fenc = h->mb.pic.p_fenc;
const int i_ref = a->l0.me8x8[i8x8].i_ref;
int i4x8;
/* XXX Needed for x264_mb_predict_mv */
h->mb.i_partition = D_8x8;
for( i4x8 = 0; i4x8 < 2; i4x8++ )
{
const int idx = 4*i8x8 + i4x8;
const int x4 = block_idx_x[idx];
const int y4 = block_idx_y[idx];
const int i_mvc = (i4x8 == 0);
x264_me_t *m = &a->l0.me4x8[i8x8][i4x8];
m->i_pixel = PIXEL_4x8;
m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
x264_mb_predict_mv( h, 0, idx, 1, m->mvp );
x264_me_search( h, m, &a->l0.me4x4[i8x8][0].mv, i_mvc );
x264_macroblock_cache_mv( h, x4, y4, 1, 2, 0, m->mv[0], m->mv[1] );
}
a->l0.i_cost4x8[i8x8] = a->l0.me4x8[i8x8][0].cost + a->l0.me4x8[i8x8][1].cost +
REF_COST( 0, i_ref ) +
a->i_lambda * i_sub_mb_p_cost_table[D_L0_4x8];
if( h->mb.b_chroma_me )
a->l0.i_cost4x8[i8x8] += x264_mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_4x8 );
}
static void x264_mb_analyse_inter_direct( x264_t *h, x264_mb_analysis_t *a )
{
/* Assumes that fdec still contains the results of
* x264_mb_predict_mv_direct16x16 and x264_mb_mc */
uint8_t **p_fenc = h->mb.pic.p_fenc;
uint8_t **p_fdec = h->mb.pic.p_fdec;
int i;
a->i_cost16x16direct = a->i_lambda * i_mb_b_cost_table[B_DIRECT];
for( i = 0; i < 4; i++ )
{
const int x = (i&1)*8;
const int y = (i>>1)*8;
a->i_cost16x16direct +=
a->i_cost8x8direct[i] =
h->pixf.mbcmp[PIXEL_8x8]( &p_fenc[0][x+y*FENC_STRIDE], FENC_STRIDE, &p_fdec[0][x+y*FDEC_STRIDE], FDEC_STRIDE );
/* mb type cost */
a->i_cost8x8direct[i] += a->i_lambda * i_sub_mb_b_cost_table[D_DIRECT_8x8];
}
}
#define WEIGHTED_AVG( size, pix1, stride1, src2, stride2 ) \
{ \
if( h->param.analyse.b_weighted_bipred ) \
h->mc.avg_weight[size]( pix1, stride1, src2, stride2, \
h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); \
else \
h->mc.avg[size]( pix1, stride1, src2, stride2 ); \
}
static void x264_mb_analyse_inter_b16x16( x264_t *h, x264_mb_analysis_t *a )
{
uint8_t pix1[16*16], pix2[16*16];
uint8_t *src2;
int stride2 = 16;
int weight;
x264_me_t m;
int i_ref;
int mvc[8][2], i_mvc;
int i_halfpel_thresh = INT_MAX;
int *p_halfpel_thresh = h->i_ref0>1 ? &i_halfpel_thresh : NULL;
/* 16x16 Search on all ref frame */
m.i_pixel = PIXEL_16x16;
m.p_cost_mv = a->p_cost_mv;
LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 );
/* ME for List 0 */
a->l0.me16x16.cost = INT_MAX;
for( i_ref = 0; i_ref < h->i_ref0; i_ref++ )
{
/* search with ref */
LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 0 );
x264_mb_predict_mv_16x16( h, 0, i_ref, m.mvp );
x264_mb_predict_mv_ref16x16( h, 0, i_ref, mvc, &i_mvc );
x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh );
/* add ref cost */
m.cost += REF_COST( 0, i_ref );
if( m.cost < a->l0.me16x16.cost )
{
a->l0.i_ref = i_ref;
a->l0.me16x16 = m;
}
/* save mv for predicting neighbors */
h->mb.mvr[0][i_ref][h->mb.i_mb_xy][0] = m.mv[0];
h->mb.mvr[0][i_ref][h->mb.i_mb_xy][1] = m.mv[1];
}
/* subtract ref cost, so we don't have to add it for the other MB types */
a->l0.me16x16.cost -= REF_COST( 0, a->l0.i_ref );
/* ME for list 1 */
i_halfpel_thresh = INT_MAX;
p_halfpel_thresh = h->i_ref1>1 ? &i_halfpel_thresh : NULL;
a->l1.me16x16.cost = INT_MAX;
for( i_ref = 0; i_ref < h->i_ref1; i_ref++ )
{
/* search with ref */
LOAD_HPELS( &m, h->mb.pic.p_fref[1][i_ref], 1, i_ref, 0, 0 );
x264_mb_predict_mv_16x16( h, 1, i_ref, m.mvp );
x264_mb_predict_mv_ref16x16( h, 1, i_ref, mvc, &i_mvc );
x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh );
/* add ref cost */
m.cost += REF_COST( 1, i_ref );
if( m.cost < a->l1.me16x16.cost )
{
a->l1.i_ref = i_ref;
a->l1.me16x16 = m;
}
/* save mv for predicting neighbors */
h->mb.mvr[1][i_ref][h->mb.i_mb_xy][0] = m.mv[0];
h->mb.mvr[1][i_ref][h->mb.i_mb_xy][1] = m.mv[1];
}
/* subtract ref cost, so we don't have to add it for the other MB types */
a->l1.me16x16.cost -= REF_COST( 1, a->l1.i_ref );
/* Set global ref, needed for other modes? */
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.i_ref );
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.i_ref );
/* get cost of BI mode */
weight = h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref];
if ( ((a->l0.me16x16.mv[0] | a->l0.me16x16.mv[1]) & 1) == 0 )
{
/* l0 reference is halfpel, so get_ref on it will make it faster */
src2 = h->mc.get_ref( h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.i_stride[0],
pix2, &stride2,
a->l0.me16x16.mv[0], a->l0.me16x16.mv[1],
16, 16 );
h->mc.mc_luma( h->mb.pic.p_fref[1][a->l1.i_ref], h->mb.pic.i_stride[0],
pix1, 16,
a->l1.me16x16.mv[0], a->l1.me16x16.mv[1],
16, 16 );
weight = 64 - weight;
}
else
{
/* if l0 was qpel, we'll use get_ref on l1 instead */
h->mc.mc_luma( h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.i_stride[0],
pix1, 16,
a->l0.me16x16.mv[0], a->l0.me16x16.mv[1],
16, 16 );
src2 = h->mc.get_ref( h->mb.pic.p_fref[1][a->l1.i_ref], h->mb.pic.i_stride[0],
pix2, &stride2,
a->l1.me16x16.mv[0], a->l1.me16x16.mv[1],
16, 16 );
}
if( h->param.analyse.b_weighted_bipred )
h->mc.avg_weight[PIXEL_16x16]( pix1, 16, src2, stride2, weight );
else
h->mc.avg[PIXEL_16x16]( pix1, 16, src2, stride2 );
a->i_cost16x16bi = h->pixf.mbcmp[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE, pix1, 16 )
+ REF_COST( 0, a->l0.i_ref )
+ REF_COST( 1, a->l1.i_ref )
+ a->l0.me16x16.cost_mv
+ a->l1.me16x16.cost_mv;
/* mb type cost */
a->i_cost16x16bi += a->i_lambda * i_mb_b_cost_table[B_BI_BI];
a->l0.me16x16.cost += a->i_lambda * i_mb_b_cost_table[B_L0_L0];
a->l1.me16x16.cost += a->i_lambda * i_mb_b_cost_table[B_L1_L1];
}
static inline void x264_mb_cache_mv_p8x8( x264_t *h, x264_mb_analysis_t *a, int i )
{
const int x = 2*(i%2);
const int y = 2*(i/2);
switch( h->mb.i_sub_partition[i] )
{
case D_L0_8x8:
x264_macroblock_cache_mv( h, x, y, 2, 2, 0, a->l0.me8x8[i].mv[0], a->l0.me8x8[i].mv[1] );
break;
case D_L0_8x4:
x264_macroblock_cache_mv( h, x, y+0, 2, 1, 0, a->l0.me8x4[i][0].mv[0], a->l0.me8x4[i][0].mv[1] );
x264_macroblock_cache_mv( h, x, y+1, 2, 1, 0, a->l0.me8x4[i][1].mv[0], a->l0.me8x4[i][1].mv[1] );
break;
case D_L0_4x8:
x264_macroblock_cache_mv( h, x+0, y, 1, 2, 0, a->l0.me4x8[i][0].mv[0], a->l0.me4x8[i][0].mv[1] );
x264_macroblock_cache_mv( h, x+1, y, 1, 2, 0, a->l0.me4x8[i][1].mv[0], a->l0.me4x8[i][1].mv[1] );
break;
case D_L0_4x4:
x264_macroblock_cache_mv( h, x+0, y+0, 1, 1, 0, a->l0.me4x4[i][0].mv[0], a->l0.me4x4[i][0].mv[1] );
x264_macroblock_cache_mv( h, x+1, y+0, 1, 1, 0, a->l0.me4x4[i][1].mv[0], a->l0.me4x4[i][1].mv[1] );
x264_macroblock_cache_mv( h, x+0, y+1, 1, 1, 0, a->l0.me4x4[i][2].mv[0], a->l0.me4x4[i][2].mv[1] );
x264_macroblock_cache_mv( h, x+1, y+1, 1, 1, 0, a->l0.me4x4[i][3].mv[0], a->l0.me4x4[i][3].mv[1] );
break;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -