📄 macroblock.c
字号:
const int x8 = 2*(i%2); const int y8 = 2*(i/2); const int i_part_8x8 = i_mb_8x8 + x8/2 + y8 * h->mb.i_mb_stride; const int i_ref = h->mb.map_col_to_list0[ h->fref1[0]->ref[0][ i_part_8x8 ] ]; if( i_ref >= 0 ) { const int dist_scale_factor = h->mb.dist_scale_factor[i_ref][0]; int x4, y4; x264_macroblock_cache_ref( h, x8, y8, 2, 2, 0, i_ref ); if( h->sps->b_direct8x8_inference ) { const int16_t *mv_col = h->fref1[0]->mv[0][ i_mb_4x4 + (x8/2)*3 + (y8/2)*3 * 4 * h->mb.i_mb_stride ]; int mv_l0[2]; mv_l0[0] = ( dist_scale_factor * mv_col[0] + 128 ) >> 8; mv_l0[1] = ( dist_scale_factor * mv_col[1] + 128 ) >> 8; x264_fill_rectangle(&h->mb.cache.mv[0][x264_scan8[i*4]], 2, 2, 8, pack16to32(mv_l0[0],mv_l0[1]), 4); x264_fill_rectangle(&h->mb.cache.mv[1][x264_scan8[i*4]], 2, 2, 8, pack16to32(mv_l0[0] - mv_col[0],mv_l0[1] - mv_col[1]), 4); } else { for( y4 = y8; y4 < y8+2; y4++ ) for( x4 = x8; x4 < x8+2; x4++ ) { const int16_t *mv_col = h->fref1[0]->mv[0][ i_mb_4x4 + x4 + y4 * 4 * h->mb.i_mb_stride ]; int mv_l0[2]; mv_l0[0] = ( dist_scale_factor * mv_col[0] + 128 ) >> 8; mv_l0[1] = ( dist_scale_factor * mv_col[1] + 128 ) >> 8; x264_macroblock_cache_mv( h, x4, y4, 1, 1, 0, mv_l0[0], mv_l0[1] ); x264_macroblock_cache_mv( h, x4, y4, 1, 1, 1, mv_l0[0] - mv_col[0], mv_l0[1] - mv_col[1] ); } } } else { /* the colocated ref isn't in the current list0 */ /* FIXME: we might still be able to use direct_8x8 on some partitions */ return 0; } } return 1;}static int x264_mb_predict_mv_spatial( x264_t *h ){ /* only work for 16x16 */ int ref[2]; int mv[2][2]; int i_list; int i8, i4; const int8_t *l1ref = &h->fref1[0]->ref[0][ h->mb.i_b8_xy ]; const int16_t (*l1mv)[2] = (const int16_t (*)[2]) &h->fref1[0]->mv[0][ h->mb.i_b4_xy ]; for( i_list=0; i_list<2; i_list++ ) { int i_refa = h->mb.cache.ref[i_list][X264_SCAN8_0 - 1]; int i_refb = h->mb.cache.ref[i_list][X264_SCAN8_0 - 8]; int i_refc = h->mb.cache.ref[i_list][X264_SCAN8_0 - 8 + 4]; if( i_refc == -2 ) i_refc = h->mb.cache.ref[i_list][X264_SCAN8_0 - 8 - 1]; ref[i_list] = i_refa; if( ref[i_list] < 0 || ( i_refb < ref[i_list] && i_refb >= 0 )) ref[i_list] = i_refb; if( ref[i_list] < 0 || ( i_refc < ref[i_list] && i_refc >= 0 )) ref[i_list] = i_refc; if( ref[i_list] < 0 ) ref[i_list] = -1; } if( ref[0] < 0 && ref[1] < 0 ) { ref[0] = ref[1] = 0; mv[0][0] = mv[0][1] = mv[1][0] = mv[1][1] = 0; } else { for( i_list=0; i_list<2; i_list++ ) { if( ref[i_list] >= 0 ) pred_motion(h, 0, 4, i_list, ref[i_list], &mv[i_list][0], &mv[i_list][1]); // x264_mb_predict_mv_16x16( h, i_list, ref[i_list], mv[i_list] ); else mv[i_list][0] = mv[i_list][1] = 0; } }#if 0 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, ref[0] ); x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, ref[1] ); x264_macroblock_cache_mv( h, 0, 0, 4, 4, 0, mv[0][0], mv[0][1] ); x264_macroblock_cache_mv( h, 0, 0, 4, 4, 1, mv[1][0], mv[1][1] );#else x264_fill_rectangle(&h->mb.cache.ref[0][X264_SCAN8_0], 4, 4, 8, ref[0], 1); x264_fill_rectangle(&h->mb.cache.ref[1][X264_SCAN8_0], 4, 4, 8, ref[1], 1); x264_fill_rectangle(h->mb.cache.mv[0][X264_SCAN8_0], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4); x264_fill_rectangle(h->mb.cache.mv[1][X264_SCAN8_0], 4, 4, 8, pack16to32(mv[1][0],mv[1][1]), 4);#endif /* col_zero_flag */ for( i8=0; i8<4; i8++ ) { const int x8 = i8%2; const int y8 = i8/2; if( l1ref[ x8 + y8 * h->mb.i_b8_stride ] == 0 ) { for( i4=0; i4<4; i4++ ) { const int x4 = i4%2 + 2*x8; const int y4 = i4/2 + 2*y8; const int16_t *mvcol = l1mv[x4 + y4 * h->mb.i_b4_stride]; if( abs( mvcol[0] ) <= 1 && abs( mvcol[1] ) <= 1 ) { if( ref[0] == 0 ) x264_macroblock_cache_mv( h, x4, y4, 1, 1, 0, 0, 0 ); if( ref[1] == 0 ) x264_macroblock_cache_mv( h, x4, y4, 1, 1, 1, 0, 0 ); } } } } return 1;}int x264_mb_predict_mv_direct( x264_t *h ){ int b_available; if( h->sh.b_direct_spatial_mv_pred ) b_available = x264_mb_predict_mv_spatial( h ); else b_available = x264_mb_predict_mv_temporal( h ); /* cache ref & mv */ if( b_available ) { int i, l; for( l = 0; l < 2; l++ ) for( i = 0; i < 4; i++ ) h->mb.cache.direct_ref[l][i] = h->mb.cache.ref[l][x264_scan8[i*4]]; memcpy(h->mb.cache.direct_mv, h->mb.cache.mv, sizeof(h->mb.cache.mv)); } return b_available;}// From FFmpegint fetch_diagonal_mv(x264_t*h, const int16_t **C, int i, int list, int part_width){ const int topright_ref= h->mb.cache.ref[list][ i - 8 + part_width ]; if(topright_ref != -2){ *C= h->mb.cache.mv[list][ i - 8 + part_width ]; return topright_ref; }else{ // tprintf("topright MV not available\n"); *C= h->mb.cache.mv[list][ i - 8 - 1 ]; return h->mb.cache.ref[list][ i - 8 - 1 ]; }}/** * gets the predicted MV. * @param n the block index * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4) * @param mx the x component of the predicted motion vector * @param my the y component of the predicted motion vector */void pred_motion(x264_t * const h, int n, int part_width, int list, int ref, int *const mx, int * const my){ const int index8= x264_scan8[n]; const int top_ref= h->mb.cache.ref[list][ index8 - 8 ]; const int left_ref= h->mb.cache.ref[list][ index8 - 1 ]; const int16_t * const A= h->mb.cache.mv[list][ index8 - 1 ]; const int16_t * const B= h->mb.cache.mv[list][ index8 - 8 ]; const int16_t * C; int diagonal_ref, match_count; assert(part_width==1 || part_width==2 || part_width==4);/* mv_cache B . . A T T T T U . . L . . , . U . . L . . . . U . . L . . , . . . . L . . . .*/ diagonal_ref= fetch_diagonal_mv(h, &C, index8, list, part_width); match_count= (diagonal_ref==ref) + (top_ref==ref) + (left_ref==ref); // tprintf("pred_motion match_count=%d\n", match_count); if(match_count > 1){ //most common *mx= x264_median(A[0], B[0], C[0]); *my= x264_median(A[1], B[1], C[1]); }else if(match_count==1){ if(left_ref==ref){ *mx= A[0]; *my= A[1]; }else if(top_ref==ref){ *mx= B[0]; *my= B[1]; }else{ *mx= C[0]; *my= C[1]; } }else{ if(top_ref == -2 && diagonal_ref == -2 && left_ref != -2){ *mx= A[0]; *my= A[1]; }else{ *mx= x264_median(A[0], B[0], C[0]); *my= x264_median(A[1], B[1], C[1]); } } // tprintf("pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list);}/** * gets the directionally predicted 16x8 MV. * @param n the block index * @param mx the x component of the predicted motion vector * @param my the y component of the predicted motion vector */void pred_16x8_motion(x264_t* const h, int n, int list, int ref, int * const mx, int * const my){ if(n==0){ const int top_ref= h->mb.cache.ref[list][ x264_scan8[0] - 8 ]; const int16_t * const B= h->mb.cache.mv[list][ x264_scan8[0] - 8 ]; // tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list); if(top_ref == ref){ *mx= B[0]; *my= B[1]; return; } }else{ const int left_ref = h->mb.cache.ref[list][ x264_scan8[8] - 1 ]; const int16_t * const A = h->mb.cache.mv[list][ x264_scan8[8] - 1 ]; // tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list); if(left_ref == ref){ *mx= A[0]; *my= A[1]; return; } } //RARE pred_motion(h, n, 4, list, ref, mx, my);}/** * gets the directionally predicted 8x16 MV. * @param n the block index * @param mx the x component of the predicted motion vector * @param my the y component of the predicted motion vector */void pred_8x16_motion(x264_t* const h, int n, int list, int ref, int * const mx, int * const my){ if(n==0){ const int left_ref= h->mb.cache.ref[list][ x264_scan8[0] - 1 ]; const int16_t * const A= h->mb.cache.mv[list][ x264_scan8[0] - 1 ]; // tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list); if(left_ref == ref){ *mx= A[0]; *my= A[1]; return; } }else{ const int16_t * C; int diagonal_ref; diagonal_ref= fetch_diagonal_mv(h, &C, x264_scan8[4], list, 2); // tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list); if(diagonal_ref == ref){ *mx= C[0]; *my= C[1]; return; } } //RARE pred_motion(h, n, 2, list, ref, mx, my);}void pred_pskip_motion(x264_t* const h, int * const mx, int * const my){ const int top_ref = h->mb.cache.ref[0][ x264_scan8[0] - 8 ]; const int left_ref= h->mb.cache.ref[0][ x264_scan8[0] - 1 ]; // tprintf("pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y); if(top_ref == -2 || left_ref == -2 || (top_ref == 0 && *(uint32_t*)h->mb.cache.mv[0][ x264_scan8[0] - 8 ] == 0) || (left_ref == 0 && *(uint32_t*)h->mb.cache.mv[0][ x264_scan8[0] - 1 ] == 0)){ *mx = *my = 0; return; } pred_motion(h, 0, 4, 0, 0, mx, my); return;}void x264_mb_load_mv_direct8x8( x264_t *h, int idx ){ const int x = 2*(idx%2); const int y = 2*(idx/2); int l; x264_macroblock_cache_ref( h, x, y, 2, 2, 0, h->mb.cache.direct_ref[0][idx] ); x264_macroblock_cache_ref( h, x, y, 2, 2, 1, h->mb.cache.direct_ref[1][idx] ); for( l = 0; l < 2; l++ ) { *(uint64_t*)h->mb.cache.mv[l][x264_scan8[idx*4]] = *(uint64_t*)h->mb.cache.direct_mv[l][x264_scan8[idx*4]]; *(uint64_t*)h->mb.cache.mv[l][x264_scan8[idx*4]+8] = *(uint64_t*)h->mb.cache.direct_mv[l][x264_scan8[idx*4]+8]; }}/* This just improves encoder performance, it's not part of the spec */void x264_mb_predict_mv_ref16x16( x264_t *h, int i_list, int i_ref, int mvc[5][2], int *i_mvc ){ int16_t (*mvr)[2] = h->mb.mvr[i_list][i_ref]; int i = 0; /* temporal */ if( h->sh.i_type == SLICE_TYPE_B ) { if( h->mb.cache.ref[i_list][x264_scan8[12]] == i_ref ) { /* FIXME: use direct_mv to be clearer? */ int16_t *mvp = h->mb.cache.mv[i_list][x264_scan8[12]]; mvc[i][0] = mvp[0]; mvc[i][1] = mvp[1]; i++; } } /* spatial */ if( h->mb.i_mb_x > 0 ) { int i_mb_l = h->mb.i_mb_xy - 1; /* skip MBs didn't go through the whole search process, so mvr is undefined */ if( !IS_SKIP( h->mb.type[i_mb_l] ) ) { mvc[i][0] = mvr[i_mb_l][0]; mvc[i][1] = mvr[i_mb_l][1]; i++; } } if( h->mb.i_mb_y > 0 ) { int i_mb_t = h->mb.i_mb_xy - h->mb.i_mb_stride;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -