📄 analyse.c.svn-base
字号:
static void x264_mb_analyse_inter_b16x16( x264_t *h, x264_mb_analysis_t *a ){ uint8_t pix1[16*16], pix2[16*16]; uint8_t *src2; int stride2 = 16; int src2_ref, pix1_ref; x264_me_t m; int i_ref; int mvc[8][2], i_mvc; int i_halfpel_thresh = INT_MAX; int *p_halfpel_thresh = h->i_ref0>1 ? &i_halfpel_thresh : NULL; /* 16x16 Search on all ref frame */ m.i_pixel = PIXEL_16x16; m.p_cost_mv = a->p_cost_mv; LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 ); /* ME for List 0 */ a->l0.me16x16.cost = INT_MAX; for( i_ref = 0; i_ref < h->i_ref0; i_ref++ ) { /* search with ref */ LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, 0 ); x264_mb_predict_mv_16x16( h, 0, i_ref, m.mvp ); x264_mb_predict_mv_ref16x16( h, 0, i_ref, mvc, &i_mvc ); x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh ); /* add ref cost */ m.cost += REF_COST( 0, i_ref ); if( m.cost < a->l0.me16x16.cost ) { a->l0.i_ref = i_ref; a->l0.me16x16 = m; } /* save mv for predicting neighbors */ h->mb.mvr[0][i_ref][h->mb.i_mb_xy][0] = m.mv[0]; h->mb.mvr[0][i_ref][h->mb.i_mb_xy][1] = m.mv[1]; } /* subtract ref cost, so we don't have to add it for the other MB types */ a->l0.me16x16.cost -= REF_COST( 0, a->l0.i_ref ); /* ME for list 1 */ i_halfpel_thresh = INT_MAX; p_halfpel_thresh = h->i_ref1>1 ? &i_halfpel_thresh : NULL; a->l1.me16x16.cost = INT_MAX; for( i_ref = 0; i_ref < h->i_ref1; i_ref++ ) { /* search with ref */ LOAD_HPELS( &m, h->mb.pic.p_fref[1][i_ref], 0, 0 ); x264_mb_predict_mv_16x16( h, 1, i_ref, m.mvp ); x264_mb_predict_mv_ref16x16( h, 1, i_ref, mvc, &i_mvc ); x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh ); /* add ref cost */ m.cost += REF_COST( 1, i_ref ); if( m.cost < a->l1.me16x16.cost ) { a->l1.i_ref = i_ref; a->l1.me16x16 = m; } /* save mv for predicting neighbors */ h->mb.mvr[1][i_ref][h->mb.i_mb_xy][0] = m.mv[0]; h->mb.mvr[1][i_ref][h->mb.i_mb_xy][1] = m.mv[1]; } /* subtract ref cost, so we don't have to add it for the other MB types */ a->l1.me16x16.cost -= REF_COST( 1, a->l1.i_ref ); /* Set global ref, needed for other modes? */ x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.i_ref ); x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.i_ref ); /* get cost of BI mode */ if ( ((a->l0.me16x16.mv[0] | a->l0.me16x16.mv[1]) & 1) == 0 ) { /* l0 reference is halfpel, so get_ref on it will make it faster */ src2 = h->mc.get_ref( h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.i_stride[0], pix2, &stride2, a->l0.me16x16.mv[0], a->l0.me16x16.mv[1], 16, 16 ); h->mc.mc_luma( h->mb.pic.p_fref[1][a->l1.i_ref], h->mb.pic.i_stride[0], pix1, 16, a->l1.me16x16.mv[0], a->l1.me16x16.mv[1], 16, 16 ); src2_ref = a->l0.i_ref; pix1_ref = a->l1.i_ref; } else { /* if l0 was qpel, we'll use get_ref on l1 instead */ h->mc.mc_luma( h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.i_stride[0], pix1, 16, a->l0.me16x16.mv[0], a->l0.me16x16.mv[1], 16, 16 ); src2 = h->mc.get_ref( h->mb.pic.p_fref[1][a->l1.i_ref], h->mb.pic.i_stride[0], pix2, &stride2, a->l1.me16x16.mv[0], a->l1.me16x16.mv[1], 16, 16 ); src2_ref = a->l1.i_ref; pix1_ref = a->l0.i_ref; } if( h->param.analyse.b_weighted_bipred ) h->mc.avg_weight[PIXEL_16x16]( pix1, 16, src2, stride2, h->mb.bipred_weight[pix1_ref][src2_ref] ); else h->mc.avg[PIXEL_16x16]( pix1, 16, src2, stride2 ); a->i_cost16x16bi = h->pixf.mbcmp[PIXEL_16x16]( h->mb.pic.p_fenc[0], h->mb.pic.i_stride[0], pix1, 16 ) + REF_COST( 0, a->l0.i_ref ) + REF_COST( 1, a->l1.i_ref ) + a->l0.me16x16.cost_mv + a->l1.me16x16.cost_mv; /* mb type cost */ a->i_cost16x16bi += a->i_lambda * i_mb_b_cost_table[B_BI_BI]; a->l0.me16x16.cost += a->i_lambda * i_mb_b_cost_table[B_L0_L0]; a->l1.me16x16.cost += a->i_lambda * i_mb_b_cost_table[B_L1_L1]; if( a->b_mbrd ) { int i_satd_thresh; if( a->l0.me16x16.cost < a->i_best_satd ) a->i_best_satd = a->l0.me16x16.cost; if( a->l1.me16x16.cost < a->i_best_satd ) a->i_best_satd = a->l1.me16x16.cost; if( a->i_cost16x16bi < a->i_best_satd ) a->i_best_satd = a->i_cost16x16bi; i_satd_thresh = a->i_best_satd * 3/2; h->mb.i_partition = D_16x16; /* L0 */ if( a->l0.me16x16.cost < i_satd_thresh ) { h->mb.i_type = B_L0_L0; x264_macroblock_cache_mv( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv[0], a->l0.me16x16.mv[1] ); a->l0.me16x16.cost = x264_rd_cost_mb( h, a->i_lambda2 ); } else a->l0.me16x16.cost = COST_MAX; /* L1 */ if( a->l1.me16x16.cost < i_satd_thresh ) { h->mb.i_type = B_L1_L1; x264_macroblock_cache_mv( h, 0, 0, 4, 4, 1, a->l1.me16x16.mv[0], a->l1.me16x16.mv[1] ); a->l1.me16x16.cost = x264_rd_cost_mb( h, a->i_lambda2 ); } else a->l1.me16x16.cost = COST_MAX; /* BI */ if( a->i_cost16x16bi < i_satd_thresh ) { h->mb.i_type = B_BI_BI; a->i_cost16x16bi = x264_rd_cost_mb( h, a->i_lambda2 ); } else a->i_cost16x16bi = COST_MAX; }}static inline void x264_mb_cache_mv_p8x8( x264_t *h, x264_mb_analysis_t *a, int i ){ const int x = 2*(i%2); const int y = 2*(i/2); switch( h->mb.i_sub_partition[i] ) { case D_L0_8x8: x264_macroblock_cache_mv( h, x, y, 2, 2, 0, a->l0.me8x8[i].mv[0], a->l0.me8x8[i].mv[1] ); break; case D_L0_8x4: x264_macroblock_cache_mv( h, x, y+0, 2, 1, 0, a->l0.me8x4[i][0].mv[0], a->l0.me8x4[i][0].mv[1] ); x264_macroblock_cache_mv( h, x, y+1, 2, 1, 0, a->l0.me8x4[i][1].mv[0], a->l0.me8x4[i][1].mv[1] ); break; case D_L0_4x8: x264_macroblock_cache_mv( h, x+0, y, 1, 2, 0, a->l0.me4x8[i][0].mv[0], a->l0.me4x8[i][0].mv[1] ); x264_macroblock_cache_mv( h, x+1, y, 1, 2, 0, a->l0.me4x8[i][1].mv[0], a->l0.me4x8[i][1].mv[1] ); break; case D_L0_4x4: x264_macroblock_cache_mv( h, x+0, y+0, 1, 1, 0, a->l0.me4x4[i][0].mv[0], a->l0.me4x4[i][0].mv[1] ); x264_macroblock_cache_mv( h, x+1, y+0, 1, 1, 0, a->l0.me4x4[i][1].mv[0], a->l0.me4x4[i][1].mv[1] ); x264_macroblock_cache_mv( h, x+0, y+1, 1, 1, 0, a->l0.me4x4[i][2].mv[0], a->l0.me4x4[i][2].mv[1] ); x264_macroblock_cache_mv( h, x+1, y+1, 1, 1, 0, a->l0.me4x4[i][3].mv[0], a->l0.me4x4[i][3].mv[1] ); break; default: x264_log( h, X264_LOG_ERROR, "internal error\n" ); break; }}#define CACHE_MV_BI(x,y,dx,dy,me0,me1,part) \ if( x264_mb_partition_listX_table[0][part] ) \ { \ x264_macroblock_cache_ref( h, x,y,dx,dy, 0, a->l0.i_ref ); \ x264_macroblock_cache_mv( h, x,y,dx,dy, 0, me0.mv[0], me0.mv[1] ); \ } \ else \ { \ x264_macroblock_cache_ref( h, x,y,dx,dy, 0, -1 ); \ x264_macroblock_cache_mv( h, x,y,dx,dy, 0, 0, 0 ); \ if( b_mvd ) \ x264_macroblock_cache_mvd( h, x,y,dx,dy, 0, 0, 0 ); \ } \ if( x264_mb_partition_listX_table[1][part] ) \ { \ x264_macroblock_cache_ref( h, x,y,dx,dy, 1, a->l1.i_ref ); \ x264_macroblock_cache_mv( h, x,y,dx,dy, 1, me1.mv[0], me1.mv[1] ); \ } \ else \ { \ x264_macroblock_cache_ref( h, x,y,dx,dy, 1, -1 ); \ x264_macroblock_cache_mv( h, x,y,dx,dy, 1, 0, 0 ); \ if( b_mvd ) \ x264_macroblock_cache_mvd( h, x,y,dx,dy, 1, 0, 0 ); \ }static inline void x264_mb_cache_mv_b8x8( x264_t *h, x264_mb_analysis_t *a, int i, int b_mvd ){ int x = (i%2)*2; int y = (i/2)*2; if( h->mb.i_sub_partition[i] == D_DIRECT_8x8 ) { x264_mb_load_mv_direct8x8( h, i ); if( b_mvd ) { x264_macroblock_cache_mvd( h, x, y, 2, 2, 0, 0, 0 ); x264_macroblock_cache_mvd( h, x, y, 2, 2, 1, 0, 0 ); x264_macroblock_cache_skip( h, x, y, 2, 2, 1 ); } } else { CACHE_MV_BI( x, y, 2, 2, a->l0.me8x8[i], a->l1.me8x8[i], h->mb.i_sub_partition[i] ); }}static inline void x264_mb_cache_mv_b16x8( x264_t *h, x264_mb_analysis_t *a, int i, int b_mvd ){ CACHE_MV_BI( 0, 2*i, 4, 2, a->l0.me16x8[i], a->l1.me16x8[i], a->i_mb_partition16x8[i] );}static inline void x264_mb_cache_mv_b8x16( x264_t *h, x264_mb_analysis_t *a, int i, int b_mvd ){ CACHE_MV_BI( 2*i, 0, 2, 4, a->l0.me8x16[i], a->l1.me8x16[i], a->i_mb_partition8x16[i] );}#undef CACHE_MV_BIstatic void x264_mb_analyse_inter_b8x8( x264_t *h, x264_mb_analysis_t *a ){ uint8_t **p_fref[2] = { h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.p_fref[1][a->l1.i_ref] }; uint8_t pix[2][8*8]; int i, l; /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_8x8; a->i_cost8x8bi = 0; for( i = 0; i < 4; i++ ) { const int x8 = i%2; const int y8 = i/2; int i_part_cost; int i_part_cost_bi = 0; for( l = 0; l < 2; l++ ) { x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0; x264_me_t *m = &lX->me8x8[i]; m->i_pixel = PIXEL_8x8; m->p_cost_mv = a->p_cost_mv; LOAD_FENC( m, h->mb.pic.p_fenc, 8*x8, 8*y8 ); LOAD_HPELS( m, p_fref[l], 8*x8, 8*y8 ); x264_mb_predict_mv( h, l, 4*i, 2, m->mvp ); x264_me_search( h, m, &lX->me16x16.mv, 1 ); x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, l, m->mv[0], m->mv[1] ); /* BI mode */ h->mc.mc_luma( m->p_fref, m->i_stride[0], pix[l], 8, m->mv[0], m->mv[1], 8, 8 ); i_part_cost_bi += m->cost_mv; /* FIXME: ref cost */ } WEIGHTED_AVG( PIXEL_8x8, pix[0], 8, pix[1], 8 ); i_part_cost_bi += h->pixf.mbcmp[PIXEL_8x8]( a->l0.me8x8[i].p_fenc[0], h->mb.pic.i_stride[0], pix[0], 8 ) + a->i_lambda * i_sub_mb_b_cost_table[D_BI_8x8]; a->l0.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L0_8x8]; a->l1.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L1_8x8]; i_part_cost = a->l0.me8x8[i].cost; h->mb.i_sub_partition[i] = D_L0_8x8; if( a->l1.me8x8[i].cost < i_part_cost ) { i_part_cost = a->l1.me8x8[i].cost; h->mb.i_sub_partition[i] = D_L1_8x8; } if( i_part_cost_bi < i_part_cost ) { i_part_cost = i_part_cost_bi; h->mb.i_sub_partition[i] = D_BI_8x8; } if( a->i_cost8x8direct[i] < i_part_cost ) { i_part_cost = a->i_cost8x8direct[i]; h->mb.i_sub_partition[i] = D_DIRECT_8x8; } a->i_cost8x8bi += i_part_cost; /* XXX Needed for x264_mb_predict_mv */ x264_mb_cache_mv_b8x8( h, a, i, 0 ); } /* mb type cost */ a->i_cost8x8bi += a->i_lambda * i_mb_b_cost_table[B_8x8]; if( a->b_mbrd ) { if( a->i_cost8x8bi < a->i_best_satd ) a->i_best_satd = a->i_cost8x8bi; if( a->i_cost8x8bi < a->i_best_satd * 3/2 ) { h->mb.i_type = B_8x8; h->mb.i_partition = D_8x8; a->i_cost8x8bi = x264_rd_cost_mb( h, a->i_lambda2 ); } else a->i_cost8x8bi = COST_MAX; }}static void x264_mb_analyse_inter_b16x8( x264_t *h, x264_mb_analysis_t *a ){ uint8_t **p_fref[2] = { h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.p_fref[1][a->l1.i_ref] }; uint8_t pix[2][16*8]; int mvc[2][2]; int i, l; h->mb.i_partition = D_16x8; a->i_cost16x8bi = 0; for( i = 0; i < 2; i++ ) { int i_part_cost; int i_part_cost_bi = 0; /* TODO: check only the list(s) that were used in b8x8? */ for( l = 0; l < 2; l++ ) { x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0; x264_me_t *m = &lX->me16x8[i]; m->i_pixel = PIXEL_16x8; m->p_cost_mv = a->p_cost_mv; LOAD_FENC( m, h->mb.pic.p_fenc, 0, 8*i ); LOAD_HPELS( m, p_fref[l], 0, 8*i ); mvc[0][0] = lX->me8x8[2*i].mv[0]; mvc[0][1] = lX->me8x8[2*i].mv[1]; mvc[1][0] = lX->me8x8[2*i+1].mv[0]; mvc[1][1] = lX->me8x8[2*i+1].mv[1]; x264_mb_predict_mv( h, 0, 8*i, 2, m->mvp ); x264_me_search( h, m, mvc, 2 ); /* BI mode */ h->mc.mc_luma( m->p_fref, m->i_stride[0], pix[l], 16, m->mv[0], m->mv[1], 16, 8 ); /* FIXME: ref cost */ i_part_cost_bi += m->cost_mv; } WEIGHTED_AVG( PIXEL_16x8, pix[0], 16, pix[1], 16 );
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -