📄 analyse.c.svn-base
字号:
x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp ); x264_me_search_ref( h, &m, a->l0.mvc[i_ref], i+1, p_halfpel_thresh ); m.cost += i_ref_cost; i_halfpel_thresh += i_ref_cost; *(uint64_t*)a->l0.mvc[i_ref][i+1] = *(uint64_t*)m.mv; if( m.cost < l0m->cost ) *l0m = m; } x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, l0m->mv[0], l0m->mv[1] ); x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, l0m->i_ref ); /* mb type cost */ l0m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8]; } a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost + a->l0.me8x8[2].cost + a->l0.me8x8[3].cost; if( a->b_mbrd ) { if( a->i_best_satd > a->l0.i_cost8x8 ) a->i_best_satd = a->l0.i_cost8x8; h->mb.i_type = P_8x8; h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] = h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8; a->l0.i_cost8x8 = x264_rd_cost_mb( h, a->i_lambda2 ); }}static void x264_mb_analyse_inter_p8x8( x264_t *h, x264_mb_analysis_t *a ){ const int i_ref = a->l0.me16x16.i_ref; const int i_ref_cost = REF_COST( 0, i_ref ); uint8_t **p_fref = h->mb.pic.p_fref[0][i_ref]; uint8_t **p_fenc = h->mb.pic.p_fenc; int i_mvc; int (*mvc)[2] = a->l0.mvc[i_ref]; int i; /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_8x8; i_mvc = 1; *(uint64_t*)mvc[0] = *(uint64_t*)a->l0.me16x16.mv; for( i = 0; i < 4; i++ ) { x264_me_t *m = &a->l0.me8x8[i]; const int x8 = i%2; const int y8 = i/2; m->i_pixel = PIXEL_8x8; m->p_cost_mv = a->p_cost_mv; m->i_ref_cost = i_ref_cost; m->i_ref = i_ref; LOAD_FENC( m, p_fenc, 8*x8, 8*y8 ); LOAD_HPELS( m, p_fref, 8*x8, 8*y8 ); x264_mb_predict_mv( h, 0, 4*i, 2, m->mvp ); x264_me_search( h, m, mvc, i_mvc ); x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, m->mv[0], m->mv[1] ); *(uint64_t*)mvc[i_mvc] = *(uint64_t*)m->mv; i_mvc++; /* mb type cost */ m->cost += i_ref_cost; m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8]; } /* theoretically this should include 4*ref_cost, * but 3 seems a better approximation of cabac. */ a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost + a->l0.me8x8[2].cost + a->l0.me8x8[3].cost - REF_COST( 0, a->l0.me16x16.i_ref ); if( a->b_mbrd ) { if( a->i_best_satd > a->l0.i_cost8x8 ) a->i_best_satd = a->l0.i_cost8x8; h->mb.i_type = P_8x8; h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] = h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8; a->l0.i_cost8x8 = x264_rd_cost_mb( h, a->i_lambda2 ); }}static void x264_mb_analyse_inter_p16x8( x264_t *h, x264_mb_analysis_t *a ){ x264_me_t m; uint8_t **p_fenc = h->mb.pic.p_fenc; int mvc[3][2]; int i, j; /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_16x8; for( i = 0; i < 2; i++ ) { x264_me_t *l0m = &a->l0.me16x8[i]; const int ref8[2] = { a->l0.me8x8[2*i].i_ref, a->l0.me8x8[2*i+1].i_ref }; const int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2; m.i_pixel = PIXEL_16x8; m.p_cost_mv = a->p_cost_mv; LOAD_FENC( &m, p_fenc, 0, 8*i ); l0m->cost = INT_MAX; for( j = 0; j < i_ref8s; j++ ) { const int i_ref = ref8[j]; const int i_ref_cost = REF_COST( 0, i_ref ); m.i_ref_cost = i_ref_cost; m.i_ref = i_ref; /* if we skipped the 16x16 predictor, we wouldn't have to copy anything... */ *(uint64_t*)mvc[0] = *(uint64_t*)a->l0.mvc[i_ref][0]; *(uint64_t*)mvc[1] = *(uint64_t*)a->l0.mvc[i_ref][2*i+1]; *(uint64_t*)mvc[2] = *(uint64_t*)a->l0.mvc[i_ref][2*i+2]; LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, 8*i ); x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, i_ref ); x264_mb_predict_mv( h, 0, 8*i, 4, m.mvp ); x264_me_search( h, &m, mvc, 3 ); m.cost += i_ref_cost; if( m.cost < l0m->cost ) *l0m = m; } x264_macroblock_cache_mv( h, 0, 2*i, 4, 2, 0, l0m->mv[0], l0m->mv[1] ); x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, l0m->i_ref ); } a->l0.i_cost16x8 = a->l0.me16x8[0].cost + a->l0.me16x8[1].cost; if( a->b_mbrd ) { if( a->i_best_satd > a->l0.i_cost16x8 ) a->i_best_satd = a->l0.i_cost16x8; h->mb.i_type = P_L0; a->l0.i_cost16x8 = x264_rd_cost_mb( h, a->i_lambda2 ); }}static void x264_mb_analyse_inter_p8x16( x264_t *h, x264_mb_analysis_t *a ){ x264_me_t m; uint8_t **p_fenc = h->mb.pic.p_fenc; int mvc[3][2]; int i, j; /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_8x16; for( i = 0; i < 2; i++ ) { x264_me_t *l0m = &a->l0.me8x16[i]; const int ref8[2] = { a->l0.me8x8[i].i_ref, a->l0.me8x8[i+2].i_ref }; const int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2; m.i_pixel = PIXEL_8x16; m.p_cost_mv = a->p_cost_mv; LOAD_FENC( &m, p_fenc, 8*i, 0 ); l0m->cost = INT_MAX; for( j = 0; j < i_ref8s; j++ ) { const int i_ref = ref8[j]; const int i_ref_cost = REF_COST( 0, i_ref ); m.i_ref_cost = i_ref_cost; m.i_ref = i_ref; *(uint64_t*)mvc[0] = *(uint64_t*)a->l0.mvc[i_ref][0]; *(uint64_t*)mvc[1] = *(uint64_t*)a->l0.mvc[i_ref][i+1]; *(uint64_t*)mvc[2] = *(uint64_t*)a->l0.mvc[i_ref][i+3]; LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 8*i, 0 ); x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, i_ref ); x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp ); x264_me_search( h, &m, mvc, 3 ); m.cost += i_ref_cost; if( m.cost < l0m->cost ) *l0m = m; } x264_macroblock_cache_mv( h, 2*i, 0, 2, 4, 0, l0m->mv[0], l0m->mv[1] ); x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, l0m->i_ref ); } a->l0.i_cost8x16 = a->l0.me8x16[0].cost + a->l0.me8x16[1].cost; if( a->b_mbrd ) { if( a->i_best_satd > a->l0.i_cost8x16 ) a->i_best_satd = a->l0.i_cost8x16; h->mb.i_type = P_L0; a->l0.i_cost8x16 = x264_rd_cost_mb( h, a->i_lambda2 ); }}static int x264_mb_analyse_inter_p4x4_chroma( x264_t *h, x264_mb_analysis_t *a, uint8_t **p_fref, int i8x8, int pixel ){ uint8_t pix1[8*8], pix2[8*8]; const int i_stride = h->mb.pic.i_stride[1]; const int off = 4*(i8x8&1) + 2*(i8x8&2)*i_stride;#define CHROMA4x4MC( width, height, me, x, y ) \ h->mc.mc_chroma( &p_fref[4][off+x+y*i_stride], i_stride, &pix1[x+y*8], 8, (me).mv[0], (me).mv[1], width, height ); \ h->mc.mc_chroma( &p_fref[5][off+x+y*i_stride], i_stride, &pix2[x+y*8], 8, (me).mv[0], (me).mv[1], width, height ); if( pixel == PIXEL_4x4 ) { CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][0], 0,0 ); CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][1], 0,2 ); CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][2], 2,0 ); CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][3], 2,2 ); } else if( pixel == PIXEL_8x4 ) { CHROMA4x4MC( 4,2, a->l0.me8x4[i8x8][0], 0,0 ); CHROMA4x4MC( 4,2, a->l0.me8x4[i8x8][1], 0,2 ); } else { CHROMA4x4MC( 2,4, a->l0.me4x8[i8x8][0], 0,0 ); CHROMA4x4MC( 2,4, a->l0.me4x8[i8x8][1], 2,0 ); } return h->pixf.mbcmp[PIXEL_4x4]( &h->mb.pic.p_fenc[1][off], i_stride, pix1, 8 ) + h->pixf.mbcmp[PIXEL_4x4]( &h->mb.pic.p_fenc[2][off], i_stride, pix2, 8 );}static void x264_mb_analyse_inter_p4x4( x264_t *h, x264_mb_analysis_t *a, int i8x8 ){ uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref]; uint8_t **p_fenc = h->mb.pic.p_fenc; int i4x4; /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_8x8; for( i4x4 = 0; i4x4 < 4; i4x4++ ) { const int idx = 4*i8x8 + i4x4; const int x4 = block_idx_x[idx]; const int y4 = block_idx_y[idx]; const int i_mvc = (i4x4 == 0); x264_me_t *m = &a->l0.me4x4[i8x8][i4x4]; m->i_pixel = PIXEL_4x4; m->p_cost_mv = a->p_cost_mv; LOAD_FENC( m, p_fenc, 4*x4, 4*y4 ); LOAD_HPELS( m, p_fref, 4*x4, 4*y4 ); x264_mb_predict_mv( h, 0, idx, 1, m->mvp ); x264_me_search( h, m, &a->l0.me8x8[i8x8].mv, i_mvc ); x264_macroblock_cache_mv( h, x4, y4, 1, 1, 0, m->mv[0], m->mv[1] ); } a->l0.i_cost4x4[i8x8] = a->l0.me4x4[i8x8][0].cost + a->l0.me4x4[i8x8][1].cost + a->l0.me4x4[i8x8][2].cost + a->l0.me4x4[i8x8][3].cost + REF_COST( 0, a->l0.me8x8[i8x8].i_ref ) + a->i_lambda * i_sub_mb_p_cost_table[D_L0_4x4]; if( h->mb.b_chroma_me ) a->l0.i_cost4x4[i8x8] += x264_mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_4x4 );}static void x264_mb_analyse_inter_p8x4( x264_t *h, x264_mb_analysis_t *a, int i8x8 ){ uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref]; uint8_t **p_fenc = h->mb.pic.p_fenc; int i8x4; /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_8x8; for( i8x4 = 0; i8x4 < 2; i8x4++ ) { const int idx = 4*i8x8 + 2*i8x4; const int x4 = block_idx_x[idx]; const int y4 = block_idx_y[idx]; const int i_mvc = (i8x4 == 0); x264_me_t *m = &a->l0.me8x4[i8x8][i8x4]; m->i_pixel = PIXEL_8x4; m->p_cost_mv = a->p_cost_mv; LOAD_FENC( m, p_fenc, 4*x4, 4*y4 ); LOAD_HPELS( m, p_fref, 4*x4, 4*y4 ); x264_mb_predict_mv( h, 0, idx, 2, m->mvp ); x264_me_search( h, m, &a->l0.me4x4[i8x8][0].mv, i_mvc ); x264_macroblock_cache_mv( h, x4, y4, 2, 1, 0, m->mv[0], m->mv[1] ); } a->l0.i_cost8x4[i8x8] = a->l0.me8x4[i8x8][0].cost + a->l0.me8x4[i8x8][1].cost + REF_COST( 0, a->l0.me8x8[i8x8].i_ref ) + a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x4]; if( h->mb.b_chroma_me ) a->l0.i_cost8x4[i8x8] += x264_mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_8x4 );}static void x264_mb_analyse_inter_p4x8( x264_t *h, x264_mb_analysis_t *a, int i8x8 ){ uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref]; uint8_t **p_fenc = h->mb.pic.p_fenc; int i4x8; /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_8x8; for( i4x8 = 0; i4x8 < 2; i4x8++ ) { const int idx = 4*i8x8 + i4x8; const int x4 = block_idx_x[idx]; const int y4 = block_idx_y[idx]; const int i_mvc = (i4x8 == 0); x264_me_t *m = &a->l0.me4x8[i8x8][i4x8]; m->i_pixel = PIXEL_4x8; m->p_cost_mv = a->p_cost_mv; LOAD_FENC( m, p_fenc, 4*x4, 4*y4 ); LOAD_HPELS( m, p_fref, 4*x4, 4*y4 ); x264_mb_predict_mv( h, 0, idx, 1, m->mvp ); x264_me_search( h, m, &a->l0.me4x4[i8x8][0].mv, i_mvc ); x264_macroblock_cache_mv( h, x4, y4, 1, 2, 0, m->mv[0], m->mv[1] ); } a->l0.i_cost4x8[i8x8] = a->l0.me4x8[i8x8][0].cost + a->l0.me4x8[i8x8][1].cost + REF_COST( 0, a->l0.me8x8[i8x8].i_ref ) + a->i_lambda * i_sub_mb_p_cost_table[D_L0_4x8]; if( h->mb.b_chroma_me ) a->l0.i_cost4x8[i8x8] += x264_mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_4x8 );}static void x264_mb_analyse_inter_direct( x264_t *h, x264_mb_analysis_t *a ){ /* Assumes that fdec still contains the results of * x264_mb_predict_mv_direct16x16 and x264_mb_mc */ uint8_t **p_fenc = h->mb.pic.p_fenc; uint8_t **p_fdec = h->mb.pic.p_fdec; int i_stride= h->mb.pic.i_stride[0]; int i; a->i_cost16x16direct = 0; for( i = 0; i < 4; i++ ) { const int x8 = i%2; const int y8 = i/2; const int off = 8 * x8 + 8 * i_stride * y8; a->i_cost16x16direct += a->i_cost8x8direct[i] = h->pixf.mbcmp[PIXEL_8x8]( &p_fenc[0][off], i_stride, &p_fdec[0][off], i_stride ); /* mb type cost */ a->i_cost8x8direct[i] += a->i_lambda * i_sub_mb_b_cost_table[D_DIRECT_8x8]; } a->i_cost16x16direct += a->i_lambda * i_mb_b_cost_table[B_DIRECT]; if( a->b_mbrd ) { if( a->i_cost16x16direct < a->i_best_satd ) a->i_best_satd = a->i_cost16x16direct; h->mb.i_type = B_DIRECT; a->i_cost16x16direct = x264_rd_cost_mb( h, a->i_lambda2 ); }}#define WEIGHTED_AVG( size, pix1, stride1, src2, stride2 ) \ { \ if( h->param.analyse.b_weighted_bipred ) \ h->mc.avg_weight[size]( pix1, stride1, src2, stride2, \ h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); \ else \ h->mc.avg[size]( pix1, stride1, src2, stride2 ); \ }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -