⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 errdo_mc_prediction.c

📁 H.264编码实现
💻 C
📖 第 1 页 / 共 4 页
字号:
    l0_rFrameL  = (char) (mb_left.available    ? dec_picture->motion.ref_idx[LIST_0][mb_left.pos_y][mb_left.pos_x]       : -1);
    l0_rFrameU  = (char) (mb_up.available      ? dec_picture->motion.ref_idx[LIST_0][mb_up.pos_y][mb_up.pos_x]           : -1);
    l0_rFrameUL = (char) (mb_upleft.available  ? dec_picture->motion.ref_idx[LIST_0][mb_upleft.pos_y][mb_upleft.pos_x]   : -1);
    l0_rFrameUR = (char) (mb_upright.available ? dec_picture->motion.ref_idx[LIST_0][mb_upright.pos_y][mb_upright.pos_x] : l0_rFrameUL);

    l1_rFrameL  = (char) (mb_left.available     ? dec_picture->motion.ref_idx[LIST_1][mb_left.pos_y][mb_left.pos_x]      : -1);
    l1_rFrameU  = (char) (mb_up.available       ? dec_picture->motion.ref_idx[LIST_1][mb_up.pos_y][mb_up.pos_x]          : -1);
    l1_rFrameUL = (char) (mb_upleft.available  ? dec_picture->motion.ref_idx[LIST_1][mb_upleft.pos_y][mb_upleft.pos_x]   : -1);
    l1_rFrameUR = (char) (mb_upright.available ? dec_picture->motion.ref_idx[LIST_1][mb_upright.pos_y][mb_upright.pos_x] : l1_rFrameUL);
  }
  else
  {
    if (currMB->mb_field)
    {
      l0_rFrameL = (char) (mb_left.available 
        ? img->mb_data[mb_left.mb_addr].mb_field  || dec_picture->motion.ref_idx[LIST_0][mb_left.pos_y][mb_left.pos_x] < 0
        ? dec_picture->motion.ref_idx[LIST_0][mb_left.pos_y][mb_left.pos_x] 
        : dec_picture->motion.ref_idx[LIST_0][mb_left.pos_y][mb_left.pos_x] * 2: -1);

      l0_rFrameU = (char) (mb_up.available 
        ? img->mb_data[mb_up.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_0][mb_up.pos_y][mb_up.pos_x] < 0
        ? dec_picture->motion.ref_idx[LIST_0][mb_up.pos_y][mb_up.pos_x] 
        : dec_picture->motion.ref_idx[LIST_0][mb_up.pos_y][mb_up.pos_x] * 2: -1);

       l0_rFrameUL = (char) (mb_upleft.available 
         ? img->mb_data[mb_upleft.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_0][mb_upleft.pos_y][mb_upleft.pos_x] < 0
         ? dec_picture->motion.ref_idx[LIST_0][mb_upleft.pos_y][mb_upleft.pos_x] 
         : dec_picture->motion.ref_idx[LIST_0][mb_upleft.pos_y][mb_upleft.pos_x] *2: -1);

       l0_rFrameUR = (char) (mb_upright.available 
         ? img->mb_data[mb_upright.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_0][mb_upright.pos_y][mb_upright.pos_x] < 0 
         ? dec_picture->motion.ref_idx[LIST_0][mb_upright.pos_y][mb_upright.pos_x] 
         : dec_picture->motion.ref_idx[LIST_0][mb_upright.pos_y][mb_upright.pos_x] * 2: l0_rFrameUL);

       l1_rFrameL = (char) (mb_left.available 
         ? img->mb_data[mb_left.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_1][mb_left.pos_y][mb_left.pos_x]  < 0 
         ? dec_picture->motion.ref_idx[LIST_1][mb_left.pos_y][mb_left.pos_x] 
         : dec_picture->motion.ref_idx[LIST_1][mb_left.pos_y][mb_left.pos_x] * 2: -1);

       l1_rFrameU = (char) (mb_up.available 
         ? img->mb_data[mb_up.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_1][mb_up.pos_y][mb_up.pos_x]  < 0 
         ? dec_picture->motion.ref_idx[LIST_1][mb_up.pos_y][mb_up.pos_x] 
         : dec_picture->motion.ref_idx[LIST_1][mb_up.pos_y][mb_up.pos_x] * 2: -1);

       l1_rFrameUL = (char) (mb_upleft.available 
         ? img->mb_data[mb_upleft.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_1][mb_upleft.pos_y][mb_upleft.pos_x]  < 0 
         ? dec_picture->motion.ref_idx[LIST_1][mb_upleft.pos_y][mb_upleft.pos_x] 
         : dec_picture->motion.ref_idx[LIST_1][mb_upleft.pos_y][mb_upleft.pos_x] *2 : -1);

       l1_rFrameUR = (char) (mb_upright.available 
         ? img->mb_data[mb_upright.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_1][mb_upright.pos_y][mb_upright.pos_x] < 0
         ? dec_picture->motion.ref_idx[LIST_1][mb_upright.pos_y][mb_upright.pos_x] 
         : dec_picture->motion.ref_idx[LIST_1][mb_upright.pos_y][mb_upright.pos_x] * 2: l1_rFrameUL);
    }
    else
    {
      l0_rFrameL = (char) (mb_left.available 
        ? img->mb_data[mb_left.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_0][mb_left.pos_y][mb_left.pos_x]  < 0 
        ? dec_picture->motion.ref_idx[LIST_0][mb_left.pos_y][mb_left.pos_x] >> 1 
        : dec_picture->motion.ref_idx[LIST_0][mb_left.pos_y][mb_left.pos_x]: -1);

      l0_rFrameU = (char) (mb_up.available 
        ? img->mb_data[mb_up.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_0][mb_up.pos_y][mb_up.pos_x] < 0 
        ? dec_picture->motion.ref_idx[LIST_0][mb_up.pos_y][mb_up.pos_x] >> 1 
        : dec_picture->motion.ref_idx[LIST_0][mb_up.pos_y][mb_up.pos_x] : -1);

      l0_rFrameUL = (char) (mb_upleft.available 
        ? img->mb_data[mb_upleft.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_0][mb_upleft.pos_y][mb_upleft.pos_x] < 0 
        ? dec_picture->motion.ref_idx[LIST_0][mb_upleft.pos_y][mb_upleft.pos_x]>> 1 
        : dec_picture->motion.ref_idx[LIST_0][mb_upleft.pos_y][mb_upleft.pos_x] : -1);

      l0_rFrameUR = (char) (mb_upright.available 
        ? img->mb_data[mb_upright.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_0][mb_upright.pos_y][mb_upright.pos_x] < 0 
        ? dec_picture->motion.ref_idx[LIST_0][mb_upright.pos_y][mb_upright.pos_x] >> 1 
        : dec_picture->motion.ref_idx[LIST_0][mb_upright.pos_y][mb_upright.pos_x] : l0_rFrameUL);

      l1_rFrameL = (char) (mb_left.available 
        ? img->mb_data[mb_left.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_1][mb_left.pos_y][mb_left.pos_x] < 0 
        ? dec_picture->motion.ref_idx[LIST_1][mb_left.pos_y][mb_left.pos_x] >> 1 
        : dec_picture->motion.ref_idx[LIST_1][mb_left.pos_y][mb_left.pos_x] : -1);

      l1_rFrameU = (char) (mb_up.available 
        ? img->mb_data[mb_up.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_1][mb_up.pos_y][mb_up.pos_x] < 0 
        ? dec_picture->motion.ref_idx[LIST_1][mb_up.pos_y][mb_up.pos_x] >> 1 
        : dec_picture->motion.ref_idx[LIST_1][mb_up.pos_y][mb_up.pos_x] : -1);

      l1_rFrameUL = (char) (mb_upleft.available 
        ? img->mb_data[mb_upleft.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_1][mb_upleft.pos_y][mb_upleft.pos_x] < 0 
        ? dec_picture->motion.ref_idx[LIST_1][mb_upleft.pos_y][mb_upleft.pos_x] >> 1 
        : dec_picture->motion.ref_idx[LIST_1][mb_upleft.pos_y][mb_upleft.pos_x] : -1);

      l1_rFrameUR = (char) (mb_upright.available 
        ? img->mb_data[mb_upright.mb_addr].mb_field || dec_picture->motion.ref_idx[LIST_1][mb_upright.pos_y][mb_upright.pos_x] < 0 
        ? dec_picture->motion.ref_idx[LIST_1][mb_upright.pos_y][mb_upright.pos_x] >> 1
        : dec_picture->motion.ref_idx[LIST_1][mb_upright.pos_y][mb_upright.pos_x] : l1_rFrameUL);
    }
  }

  *l0_rFrame = (char) ((l0_rFrameL >= 0 && l0_rFrameU >= 0)  ? imin(l0_rFrameL,l0_rFrameU) : imax(l0_rFrameL,l0_rFrameU));
  *l0_rFrame = (char) ((*l0_rFrame >= 0 && l0_rFrameUR >= 0) ? imin(*l0_rFrame,l0_rFrameUR): imax(*l0_rFrame,l0_rFrameUR));

  *l1_rFrame = (char) ((l1_rFrameL >= 0 && l1_rFrameU >= 0)  ? imin(l1_rFrameL,l1_rFrameU) : imax(l1_rFrameL,l1_rFrameU));
  *l1_rFrame = (char) ((*l1_rFrame >= 0 && l1_rFrameUR >= 0) ? imin(*l1_rFrame,l1_rFrameUR): imax(*l1_rFrame,l1_rFrameUR));

  if (*l0_rFrame >=0)
    SetMotionVectorPredictor (img, currMB, pmvl0, *l0_rFrame, LIST_0, dec_picture->motion.ref_idx, dec_picture->motion.mv, 0, 0, 16, 16);

  if (*l1_rFrame >=0)
    SetMotionVectorPredictor (img, currMB, pmvl1, *l1_rFrame, LIST_1, dec_picture->motion.ref_idx, dec_picture->motion.mv, 0, 0, 16, 16);
}

void check_motion_vector_range(ImageParameters *img, short mv_x, short mv_y)
{
  if (mv_x > 8191 || mv_x < -8192)
  {
    fprintf(stderr,"ERROR! Horizontal motion vector %d is out of allowed range {-8192, 8191} in picture %d, macroblock %d\n", mv_x, img->number, img->current_mb_nr);
    error("invalid stream: too big horizontal motion vector", 500);
  }

  if (mv_y > (img->max_mb_vmv_r - 1) || mv_y < (-img->max_mb_vmv_r))
  {
    fprintf(stderr,"ERROR! Vertical motion vector %d is out of allowed range {%d, %d} in picture %d, macroblock %d\n", mv_y, (-img->max_mb_vmv_r), (img->max_mb_vmv_r - 1), img->number, img->current_mb_nr);
    error("invalid stream: too big vertical motion vector", 500);
  }
}
#endif

void perform_mc(int decoder, ColorPlane pl, StorablePicture *dec_picture, ImageParameters *img, int pred_dir, int l0_mode, int l1_mode, int i, int j, int list_offset, int block_size_x, int block_size_y, int curr_mb_field)
{
  static int vec1_x=0, vec1_y=0;
#if 0
  static int vec2_x=0, vec2_y=0;
  static int vec1_y_cr = 0, vec2_y_cr = 0;
  static int alpha_l0, alpha_l1, wp_offset;
  int        max_imgpel_value = img->max_imgpel_value_comp[pl];
#endif
  static const int mv_mul = 16; // 4 * 4

  
  int i4   = img->block_x + i;
  int j4   = img->block_y + j;
  int ioff = (i << 2);
  int joff = (j << 2);         
  
  assert (pred_dir<=2);

  if (pred_dir != 2)
  {
    //===== Single List Prediction =====
    short       ref_idx = dec_picture->motion.ref_idx[pred_dir][j4][i4];
    //short       ref_idx_wp = ref_idx;
    short      ***mv_array = img->all_mv[pred_dir][ref_idx][l0_mode];
    StorablePicture **list = listX[list_offset + pred_dir];

    //check_motion_vector_range(img, mv_array[j4][i4][0], mv_array[j4][i4][1]);

    vec1_x = i4 * mv_mul + mv_array[j][i][0];
    //vec1_y = (img->block_y_aff + j) * mv_mul + mv_array[j4][i4][1];
    vec1_y = j4 * mv_mul + mv_array[j][i][1];

    get_block_luma (decoder, pl, dec_picture, list[ref_idx], vec1_x, vec1_y, block_size_x, block_size_y, img, tmp_block_l0); 

#if 0 //Currently not used
    if (img->apply_weights)
    {
      if (curr_mb_field && ((active_pps->weighted_pred_flag&&(img->type==P_SLICE|| img->type == SP_SLICE))||
         (active_pps->weighted_bipred_idc==1 && (img->type==B_SLICE))))
      {
        ref_idx_wp >>=1;
      }

      alpha_l0  = img->wp_weight[pred_dir][ref_idx_wp][0];
      wp_offset = img->wp_offset[pred_dir][ref_idx_wp][0];

      weighted_mc_prediction(&img->mb_pred[pl][joff], block_size_y, block_size_x, ioff, tmp_block_l0, alpha_l0, wp_offset, img->luma_log2_weight_denom, max_imgpel_value);
    }
    else
#endif
    {
      mc_prediction(&img->mb_pred[pl][joff], block_size_y, block_size_x, ioff, tmp_block_l0); 
    }

#if 0
    if ((dec_picture->chroma_format_idc != YUV400) && (dec_picture->chroma_format_idc != YUV444) ) 
    {
      int uv;

      int ioff_cr = (img->mb_cr_size_x == MB_BLOCK_SIZE) ? ioff : ioff >> 1;
      int joff_cr = (img->mb_cr_size_y == MB_BLOCK_SIZE) ? joff : joff >> 1;
      int block_size_x_cr = img->mb_cr_size_x == MB_BLOCK_SIZE ? block_size_x : block_size_x >> 1;
      int block_size_y_cr = img->mb_cr_size_y == MB_BLOCK_SIZE ? block_size_y : block_size_y >> 1;

      vec1_y_cr = vec1_y + ((active_sps->chroma_format_idc == 1)? list[ref_idx]->chroma_vector_adjustment : 0);

      for(uv=0;uv<2;uv++)
      {
        get_block_chroma (uv, list[ref_idx], vec1_x, vec1_y_cr, block_size_x_cr, block_size_y_cr, img, tmp_block_l0);

        if (img->apply_weights)
        {
          alpha_l0  = img->wp_weight[pred_dir][ref_idx_wp][uv + 1];
          wp_offset = img->wp_offset[pred_dir][ref_idx_wp][uv + 1];

          weighted_mc_prediction(&img->mb_pred[uv + 1][joff_cr], block_size_y_cr, block_size_x_cr, ioff_cr, tmp_block_l0, alpha_l0, wp_offset, img->chroma_log2_weight_denom, img->max_imgpel_value_comp[uv + 1]);
        }
        else
        {
          mc_prediction(&img->mb_pred[uv + 1][joff_cr], block_size_y_cr, block_size_x_cr, ioff_cr, tmp_block_l0);
        }
      }
    }
#endif
  }
  else
  {
#if 0
    //===== BI-PREDICTION =====
    short ***l0_mv_array = dec_picture->motion.mv[LIST_0];
    short ***l1_mv_array = dec_picture->motion.mv[LIST_1];

    short l0_refframe = dec_picture->motion.ref_idx[LIST_0][j4][i4];
    short l1_refframe = dec_picture->motion.ref_idx[LIST_1][j4][i4];
    short l0_ref_idx  = l0_refframe;
    short l1_ref_idx  = l1_refframe;

    check_motion_vector_range(img, l0_mv_array[j4][i4][0], l0_mv_array[j4][i4][1]);
    check_motion_vector_range(img, l1_mv_array[j4][i4][0], l1_mv_array[j4][i4][1]);
    vec1_x = i4 * mv_mul + l0_mv_array[j4][i4][0];
    vec2_x = i4 * mv_mul + l1_mv_array[j4][i4][0];

    vec1_y = (img->block_y_aff + j) * mv_mul + l0_mv_array[j4][i4][1];
    vec2_y = (img->block_y_aff + j) * mv_mul + l1_mv_array[j4][i4][1];

    get_block_luma(pl, listX[LIST_0 + list_offset][l0_refframe], vec1_x, vec1_y, block_size_x, block_size_y, img, tmp_block_l0);  
    get_block_luma(pl, listX[LIST_1 + list_offset][l1_refframe], vec2_x, vec2_y, block_size_x, block_size_y, img, tmp_block_l1);  

    if(img->apply_weights)
    {
      int wt_list_offset = (active_pps->weighted_bipred_idc==2)? list_offset : 0;

      // This code existed in the original. Seems pointless but copying it here for reference and in case temporal direct breaks.
      // if (mv_mode==0 && img->direct_spatial_mv_pred_flag==0 ) l1_ref_idx=0;    
      if (((active_pps->weighted_pred_flag&&(img->type==P_SLICE|| img->type == SP_SLICE))||
        (active_pps->weighted_bipred_idc==1 && (img->type==B_SLICE))) && curr_mb_field)
      {
        l0_ref_idx >>=1;
        l1_ref_idx >>=1;
      }

      alpha_l0  =   img->wbp_weight[LIST_0 + wt_list_offset][l0_ref_idx][l1_ref_idx][0];
      alpha_l1  =   img->wbp_weight[LIST_1 + wt_list_offset][l0_ref_idx][l1_ref_idx][0];
      wp_offset = ((img->wp_offset [LIST_0 + wt_list_offset][l0_ref_idx][0] + img->wp_offset[LIST_1 + wt_list_offset][l1_ref_idx][0] + 1) >>1);

      weighted_bi_prediction(&img->mb_pred[pl][joff], tmp_block_l0, tmp_block_l1, block_size_y, block_size_x, ioff, alpha_l0, alpha_l1, wp_offset, (img->luma_log2_weight_denom + 1), max_imgpel_value);
    }
    else
    { 
      bi_prediction(&img->mb_pred[pl][joff], tmp_block_l0, tmp_block_l1, block_size_y, block_size_x, ioff); 
    }

    if ((dec_picture->chroma_format_idc != YUV400) && (dec_picture->chroma_format_idc != YUV444) ) 
    {
      int uv;

      int ioff_cr = img->mb_cr_size_x == MB_BLOCK_SIZE ? ioff : ioff >> 1;
      int joff_cr = img->mb_cr_size_y == MB_BLOCK_SIZE ? joff : joff >> 1;
      int block_size_x_cr = img->mb_cr_size_x == MB_BLOCK_SIZE ? block_size_x : block_size_x >> 1;
      int block_size_y_cr = img->mb_cr_size_y == MB_BLOCK_SIZE ? block_size_y : block_size_y >> 1;

      vec1_y_cr = vec1_y + ((active_sps->chroma_format_idc == 1)? listX[LIST_0 + list_offset][l0_refframe]->chroma_vector_adjustment : 0);
      vec2_y_cr = vec2_y + ((active_sps->chroma_format_idc == 1)? listX[LIST_1 + list_offset][l1_refframe]->chroma_vector_adjustment : 0);

      for(uv=0;uv<2;uv++)
      {
        get_block_chroma (uv, listX[LIST_0 + list_offset][l0_refframe], vec1_x, vec1_y_cr, block_size_x_cr, block_size_y_cr, img, tmp_block_l0);
        get_block_chroma (uv, listX[LIST_1 + list_offset][l1_refframe], vec2_x, vec2_y_cr, block_size_x_cr, block_size_y_cr, img, tmp_block_l1);

        if(img->apply_weights)
        {
          int wt_list_offset = (active_pps->weighted_bipred_idc==2)? list_offset : 0;

          alpha_l0  =   img->wbp_weight[LIST_0 + wt_list_offset][l0_ref_idx][l1_ref_idx][uv + 1];
          alpha_l1  =   img->wbp_weight[LIST_1 + wt_list_offset][l0_ref_idx][l1_ref_idx][uv + 1];
          wp_offset = ((img->wp_offset [LIST_0 + wt_list_offset][l0_ref_idx][uv + 1] + img->wp_offset[LIST_1 + wt_list_offset][l1_ref_idx][uv + 1] + 1) >>1);

          weighted_bi_prediction(&img->mb_pred[uv+1][joff_cr], tmp_block_l0, tmp_block_l1, block_size_y_cr, block_size_x_cr, ioff_cr, alpha_l0, alpha_l1, wp_offset, (img->chroma_log2_weight_denom + 1), img->max_imgpel_value_comp[uv + 1]);
        }
        else
        {
          bi_prediction(&img->mb_pred[uv + 1][joff_cr], tmp_block_l0, tmp_block_l1, block_size_y_cr, block_size_x_cr, ioff_cr);
        }
      }
    }
#endif
  }
}

void perform_mc_concealment(int decoder, ColorPlane pl, StorablePicture *dec_picture, ImageParameters *img, int pred_dir, int i, int j, int block_size_x, int block_size_y)
{
  static int vec1_x=0, vec1_y=0;
  static const int mv_mul = 16; // 4 * 4
  
  int i4   = img->block_x + i;
  int j4   = img->block_y + j;
  int ioff = (i << 2);
  int joff = (j << 2);         
  
  assert (pred_dir<=2);

  if (pred_dir != 2)
  {
    //===== Single List Prediction =====
    short       ref_idx = dec_picture->motion.ref_idx[pred_dir][j4][i4];
    short      ***mv_array = dec_picture->motion.mv[pred_dir];
    StorablePicture **list = listX[pred_dir];

    vec1_x = i4 * mv_mul + mv_array[j4][i4][0];
    vec1_y = j4 * mv_mul + mv_array[j4][i4][1];

    get_block_luma (decoder, pl, dec_picture, list[ref_idx], vec1_x, vec1_y, block_size_x, block_size_y, img, tmp_block_l0); 
    mc_prediction(&img->mb_pred[pl][joff], block_size_y, block_size_x, ioff, tmp_block_l0); 
  }
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -