⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mc_prediction.c

📁 H.264基于baseline解码器的C++实现源码
💻 C
📖 第 1 页 / 共 3 页
字号:
  else // unsafe positions
  {
    if (dx == 0 && dy == 0)
    {  /* fullpel position */
      for (j = 0; j < ver_block_size; j++)
      {
        cur_line = cur_img[iClip3(0, maxold_y, y_pos + j)];
        blk_line = block[j];
        for (i = 0; i < hor_block_size; i++)
        {
          *(blk_line++) = cur_line[iClip3(0, maxold_x, x_pos + i )];
        }
      }
    }
    else if (dx == 0)
    { 
      for (j = 0; j < ver_block_size; j++)
      {
        cur_line    = cur_img[iClip3(0, maxold_y, y_pos + j)];
        cur_line_p1 = cur_img[iClip3(0, maxold_y, y_pos + j + 1)];
        tmp_pos = x_pos;
        blk_line = block[j];

        for (i = 0; i < hor_block_size; i++)
        {
          ipos    = iClip3(0, maxold_x, tmp_pos++);

          result = (w00 * cur_line[ipos] + w01 * cur_line_p1[ipos]);
          *(blk_line++) = (imgpel) iClip1(max_imgpel_value, rshift_rnd_sf(result, total_scale));
        }
      }      
    }
    else if (dy == 0)
    { 
      for (j = 0; j < ver_block_size; j++)
      {
        cur_line    = cur_img[iClip3(0, maxold_y, y_pos + j)];
        tmp_pos = x_pos;
        blk_line = block[j];

        for (i = 0; i < hor_block_size; i++)
        {
          ipos    = iClip3(0, maxold_x, tmp_pos++);
          ipos_p1 = iClip3(0, maxold_x, tmp_pos  );

          result = (w00 * cur_line[ipos   ] + w10 * cur_line[ipos_p1]);
          *(blk_line++) = (imgpel)iClip1(max_imgpel_value, rshift_rnd_sf(result, total_scale));
        }
      }      
    }
    else
    { /* other positions */ 
      for (j = 0; j < ver_block_size; j++)
      {
        cur_line    = cur_img[iClip3(0, maxold_y, y_pos + j)];
        cur_line_p1 = cur_img[iClip3(0, maxold_y, y_pos + j + 1)];
        tmp_pos = x_pos;
        blk_line = block[j];

        for (i = 0; i < hor_block_size; i++)
        {
          ipos    = iClip3(0, maxold_x, tmp_pos++);
          ipos_p1 = iClip3(0, maxold_x, tmp_pos  );

          result = (
            w00 * cur_line   [ipos   ] + 
            w10 * cur_line   [ipos_p1] +
            w01 * cur_line_p1[ipos   ] +
            w11 * cur_line_p1[ipos_p1]);
          *(blk_line++) = (imgpel) iClip1(max_imgpel_value, rshift_rnd_sf(result, total_scale));
        }
      }      
    }
  }
}


void intra_cr_decoding(Macroblock *currMB, int yuv, ImageParameters *img, int smb)
{
  imgpel **curUV;
  int uv;
  int b8,b4;
  int ioff, joff, ii, jj;

  for(uv = 0; uv < 2; uv++)
  {
    itrans_4x4 = (currMB->is_lossless == FALSE) ? itrans4x4 : itrans4x4_ls;

    curUV = dec_picture->imgUV[uv];
    intrapred_chroma(img, currMB, uv);

    if (!smb && (currMB->cbp >> 4))
    {
      for (b8 = 0; b8 < (img->num_uv_blocks); b8++)
      {
        for(b4 = 0; b4 < 4; b4++)
        {
          joff = subblk_offset_y[yuv][b8][b4];          
          ioff = subblk_offset_x[yuv][b8][b4];          

          itrans_4x4(img, (ColorPlane) (uv + 1), ioff, joff);

          for(jj = joff; jj < 4 + joff;jj++)
            memcpy(&(curUV[img->pix_c_y + jj][img->pix_c_x + ioff]), &(img->mb_rec[uv + 1][jj][ioff]), BLOCK_SIZE * sizeof(imgpel));
        }
      }
    }
    else if ((currMB->cbp >> 4) == 0)
    {
      for (b8 = 0; b8 < (img->num_uv_blocks); b8++)
      {
        for(b4 = 0; b4 < 4; b4++)
        {
          joff = subblk_offset_y[yuv][b8][b4];
          ioff = subblk_offset_x[yuv][b8][b4];          

          for(jj = joff; jj < 4 + joff;jj++)
            memcpy(&(curUV[img->pix_c_y + jj][img->pix_c_x + ioff]), &(img->mb_pred[uv + 1][jj][ioff]), BLOCK_SIZE * sizeof(imgpel));
        }
      }
    }
    else
    {
      itrans_sp_cr(img, uv);

      for (joff  = 0; joff < 8; joff += 4)
      {
        for(ioff = 0; ioff < 8;ioff+=4)
        {          
          itrans_4x4(img, (ColorPlane) (uv + 1), ioff, joff);

          for(jj = joff; jj < joff + 4; jj++)
          {
            for(ii = ioff; ii < ioff + 4; ii++)
            {
              curUV[img->pix_c_y+jj][ii + img->pix_c_x] = (imgpel) img->mb_rres[uv+1][jj][ii];
            }
          }
        }
      }
    }
  }
}

void prepare_direct_params(Macroblock *currMB, StorablePicture *dec_picture, ImageParameters *img, short pmvl0[2], short pmvl1[2],char *l0_rFrame, char *l1_rFrame)
{
  char l0_rFrameL, l0_rFrameU, l0_rFrameUR;
  char l1_rFrameL, l1_rFrameU, l1_rFrameUR;
  PicMotionParams *motion = &dec_picture->motion;
  
  PixelPos mb_a, mb_b, mb_d, mb_c;

  get_neighbors(currMB, &mb_a, &mb_b, &mb_c, &mb_d, 0, 0, 16);

  if (!img->MbaffFrameFlag)
  {
    l0_rFrameL  = (char) (mb_a.available ? motion->ref_idx[LIST_0][mb_a.pos_y][mb_a.pos_x] : -1);
    l0_rFrameU  = (char) (mb_b.available ? motion->ref_idx[LIST_0][mb_b.pos_y][mb_b.pos_x] : -1);
    l0_rFrameUR = (char) (mb_c.available ? motion->ref_idx[LIST_0][mb_c.pos_y][mb_c.pos_x] : -1);

    l1_rFrameL  = (char) (mb_a.available ? motion->ref_idx[LIST_1][mb_a.pos_y][mb_a.pos_x] : -1);
    l1_rFrameU  = (char) (mb_b.available ? motion->ref_idx[LIST_1][mb_b.pos_y][mb_b.pos_x] : -1);
    l1_rFrameUR = (char) (mb_c.available ? motion->ref_idx[LIST_1][mb_c.pos_y][mb_c.pos_x] : -1);
  }
  else
  {
    if (currMB->mb_field)
    {
      l0_rFrameL = (char) (mb_a.available 
        ? img->mb_data[mb_a.mb_addr].mb_field  || motion->ref_idx[LIST_0][mb_a.pos_y][mb_a.pos_x] < 0
        ? motion->ref_idx[LIST_0][mb_a.pos_y][mb_a.pos_x] 
        : motion->ref_idx[LIST_0][mb_a.pos_y][mb_a.pos_x] * 2: -1);

      l0_rFrameU = (char) (mb_b.available 
        ? img->mb_data[mb_b.mb_addr].mb_field || motion->ref_idx[LIST_0][mb_b.pos_y][mb_b.pos_x] < 0
        ? motion->ref_idx[LIST_0][mb_b.pos_y][mb_b.pos_x] 
        : motion->ref_idx[LIST_0][mb_b.pos_y][mb_b.pos_x] * 2: -1);

       l0_rFrameUR = (char) (mb_c.available 
         ? img->mb_data[mb_c.mb_addr].mb_field || motion->ref_idx[LIST_0][mb_c.pos_y][mb_c.pos_x] < 0 
         ? motion->ref_idx[LIST_0][mb_c.pos_y][mb_c.pos_x] 
         : motion->ref_idx[LIST_0][mb_c.pos_y][mb_c.pos_x] * 2: -1);

       l1_rFrameL = (char) (mb_a.available 
         ? img->mb_data[mb_a.mb_addr].mb_field || motion->ref_idx[LIST_1][mb_a.pos_y][mb_a.pos_x]  < 0 
         ? motion->ref_idx[LIST_1][mb_a.pos_y][mb_a.pos_x] 
         : motion->ref_idx[LIST_1][mb_a.pos_y][mb_a.pos_x] * 2: -1);

       l1_rFrameU = (char) (mb_b.available 
         ? img->mb_data[mb_b.mb_addr].mb_field || motion->ref_idx[LIST_1][mb_b.pos_y][mb_b.pos_x]  < 0 
         ? motion->ref_idx[LIST_1][mb_b.pos_y][mb_b.pos_x] 
         : motion->ref_idx[LIST_1][mb_b.pos_y][mb_b.pos_x] * 2: -1);

       l1_rFrameUR = (char) (mb_c.available 
         ? img->mb_data[mb_c.mb_addr].mb_field || motion->ref_idx[LIST_1][mb_c.pos_y][mb_c.pos_x] < 0
         ? motion->ref_idx[LIST_1][mb_c.pos_y][mb_c.pos_x] 
         : motion->ref_idx[LIST_1][mb_c.pos_y][mb_c.pos_x] * 2: -1);
    }
    else
    {
      l0_rFrameL = (char) (mb_a.available 
        ? img->mb_data[mb_a.mb_addr].mb_field || motion->ref_idx[LIST_0][mb_a.pos_y][mb_a.pos_x]  < 0 
        ? motion->ref_idx[LIST_0][mb_a.pos_y][mb_a.pos_x] >> 1 
        : motion->ref_idx[LIST_0][mb_a.pos_y][mb_a.pos_x]: -1);

      l0_rFrameU = (char) (mb_b.available 
        ? img->mb_data[mb_b.mb_addr].mb_field || motion->ref_idx[LIST_0][mb_b.pos_y][mb_b.pos_x] < 0 
        ? motion->ref_idx[LIST_0][mb_b.pos_y][mb_b.pos_x] >> 1 
        : motion->ref_idx[LIST_0][mb_b.pos_y][mb_b.pos_x] : -1);

      l0_rFrameUR = (char) (mb_c.available 
        ? img->mb_data[mb_c.mb_addr].mb_field || motion->ref_idx[LIST_0][mb_c.pos_y][mb_c.pos_x] < 0 
        ? motion->ref_idx[LIST_0][mb_c.pos_y][mb_c.pos_x] >> 1 
        : motion->ref_idx[LIST_0][mb_c.pos_y][mb_c.pos_x] : -1);

      l1_rFrameL = (char) (mb_a.available 
        ? img->mb_data[mb_a.mb_addr].mb_field || motion->ref_idx[LIST_1][mb_a.pos_y][mb_a.pos_x] < 0 
        ? motion->ref_idx[LIST_1][mb_a.pos_y][mb_a.pos_x] >> 1 
        : motion->ref_idx[LIST_1][mb_a.pos_y][mb_a.pos_x] : -1);

      l1_rFrameU = (char) (mb_b.available 
        ? img->mb_data[mb_b.mb_addr].mb_field || motion->ref_idx[LIST_1][mb_b.pos_y][mb_b.pos_x] < 0 
        ? motion->ref_idx[LIST_1][mb_b.pos_y][mb_b.pos_x] >> 1 
        : motion->ref_idx[LIST_1][mb_b.pos_y][mb_b.pos_x] : -1);

      l1_rFrameUR = (char) (mb_c.available 
        ? img->mb_data[mb_c.mb_addr].mb_field || motion->ref_idx[LIST_1][mb_c.pos_y][mb_c.pos_x] < 0 
        ? motion->ref_idx[LIST_1][mb_c.pos_y][mb_c.pos_x] >> 1
        : motion->ref_idx[LIST_1][mb_c.pos_y][mb_c.pos_x] : -1);
    }
  }

  *l0_rFrame = (char) ((l0_rFrameL >= 0 && l0_rFrameU >= 0)  ? imin(l0_rFrameL,l0_rFrameU) : imax(l0_rFrameL,l0_rFrameU));
  *l0_rFrame = (char) ((*l0_rFrame >= 0 && l0_rFrameUR >= 0) ? imin(*l0_rFrame,l0_rFrameUR): imax(*l0_rFrame,l0_rFrameUR));

  *l1_rFrame = (char) ((l1_rFrameL >= 0 && l1_rFrameU >= 0)  ? imin(l1_rFrameL,l1_rFrameU) : imax(l1_rFrameL,l1_rFrameU));
  *l1_rFrame = (char) ((*l1_rFrame >= 0 && l1_rFrameUR >= 0) ? imin(*l1_rFrame,l1_rFrameUR): imax(*l1_rFrame,l1_rFrameUR));

  if (*l0_rFrame >=0)
    GetMotionVectorPredictor (currMB, &mb_a, &mb_b, &mb_c, pmvl0, *l0_rFrame, motion->ref_idx[LIST_0], motion->mv[LIST_0], 0, 0, 16, 16);

  if (*l1_rFrame >=0)
    GetMotionVectorPredictor (currMB, &mb_a, &mb_b, &mb_c, pmvl1, *l1_rFrame, motion->ref_idx[LIST_1], motion->mv[LIST_1], 0, 0, 16, 16);
}

void check_motion_vector_range(ImageParameters *img, short mv_x, short mv_y)
{
  if (mv_x > 8191 || mv_x < -8192)
  {
    fprintf(stderr,"ERROR! Horizontal motion vector %d is out of allowed range {-8192, 8191} in picture %d, macroblock %d\n", mv_x, img->number, img->current_mb_nr);
    error("invalid stream: too big horizontal motion vector", 500);
  }

  if (mv_y > (img->max_mb_vmv_r - 1) || mv_y < (-img->max_mb_vmv_r))
  {
    fprintf(stderr,"ERROR! Vertical motion vector %d is out of allowed range {%d, %d} in picture %d, macroblock %d\n", mv_y, (-img->max_mb_vmv_r), (img->max_mb_vmv_r - 1), img->number, img->current_mb_nr);
    error("invalid stream: too big vertical motion vector", 500);
  }
}

void perform_mc(ColorPlane pl, StorablePicture *dec_picture, ImageParameters *img, int pred_dir, int i, int j, int list_offset, int block_size_x, int block_size_y, int curr_mb_field)
{
  static int vec1_x=0, vec1_y=0, vec2_x=0, vec2_y=0;
  static int vec1_y_cr = 0, vec2_y_cr = 0;
  static int alpha_l0, alpha_l1, wp_offset;
  static const int mv_mul = 16; // 4 * 4
  int        max_imgpel_value = img->max_imgpel_value_comp[pl];
  
  int i4   = img->block_x + i;
  int j4   = img->block_y + j;
  int ioff = (i << 2);
  int joff = (j << 2);         
  
  assert (pred_dir<=2);

  if (pred_dir != 2)
  {
    //===== Single List Prediction =====
    short       ref_idx = dec_picture->motion.ref_idx[pred_dir][j4][i4];
    short       ref_idx_wp = ref_idx;
    short      *mv_array = dec_picture->motion.mv[pred_dir][j4][i4];
    StorablePicture *list = listX[list_offset + pred_dir][ref_idx];

    check_motion_vector_range(img, mv_array[0], mv_array[1]);

    vec1_x = i4 * mv_mul + mv_array[0];
    vec1_y = (img->block_y_aff + j) * mv_mul + mv_array[1];

    get_block_luma (pl, list, vec1_x, vec1_y, block_size_x, block_size_y, img, tmp_block_l0); 

    if (img->apply_weights)
    {
      if (curr_mb_field && ((active_pps->weighted_pred_flag&&(img->type==P_SLICE|| img->type == SP_SLICE))||
         (active_pps->weighted_bipred_idc==1 && (img->type==B_SLICE))))
      {
        ref_idx_wp >>=1;
      }

      alpha_l0  = img->wp_weight[pred_dir][ref_idx_wp][0];
      wp_offset = img->wp_offset[pred_dir][ref_idx_wp][0];

      weighted_mc_prediction(&img->mb_pred[pl][joff], block_size_y, block_size_x, ioff, tmp_block_l0, alpha_l0, wp_offset, img->luma_log2_weight_denom, max_imgpel_value);
    }
    else
    {
      mc_prediction(&img->mb_pred[pl][joff], block_size_y, block_size_x, ioff, tmp_block_l0); 
    }

    if ((dec_picture->chroma_format_idc != YUV400) && (dec_picture->chroma_format_idc != YUV444) ) 
    {
      int uv;

      int ioff_cr = (img->mb_cr_size_x == MB_BLOCK_SIZE) ? ioff : ioff >> 1;
      int joff_cr = (img->mb_cr_size_y == MB_BLOCK_SIZE) ? joff : joff >> 1;
      int block_size_x_cr = img->mb_cr_size_x == MB_BLOCK_SIZE ? block_size_x : block_size_x >> 1;
      int block_size_y_cr = img->mb_cr_size_y == MB_BLOCK_SIZE ? block_size_y : block_size_y >> 1;

      vec1_y_cr = vec1_y + ((active_sps->chroma_format_idc == 1)? list->chroma_vector_adjustment : 0);

      for(uv=0;uv<2;uv++)
      {
        get_block_chroma (uv, list, vec1_x, vec1_y_cr, block_size_x_cr, block_size_y_cr, img, tmp_block_l0);

        if (img->apply_weights)
        {
          alpha_l0  = img->wp_weight[pred_dir][ref_idx_wp][uv + 1];
          wp_offset = img->wp_offset[pred_dir][ref_idx_wp][uv + 1];

          weighted_mc_prediction(&img->mb_pred[uv + 1][joff_cr], block_size_y_cr, block_size_x_cr, ioff_cr, tmp_block_l0, alpha_l0, wp_offset, img->chroma_log2_weight_denom, img->max_imgpel_value_comp[uv + 1]);
        }
        else
        {
          mc_prediction(&img->mb_pred[uv + 1][joff_cr], block_size_y_cr, block_size_x_cr, ioff_cr, tmp_block_l0);
        }
      }
    }
  }
  else
  {
    //===== BI-PREDICTION =====
    short *l0_mv_array = dec_picture->motion.mv[LIST_0][j4][i4];
    short *l1_mv_array = dec_picture->motion.mv[LIST_1][j4][i4];

    short l0_refframe = dec_picture->motion.ref_idx[LIST_0][j4][i4];
    short l0_ref_idx  = l0_refframe;
    short l1_refframe = dec_picture->motion.ref_idx[LIST_1][j4][i4];
    short l1_ref_idx  = l1_refframe;

    check_motion_vector_range(img, l0_mv_array[0], l0_mv_array[1]);
    check_motion_vector_range(img, l1_mv_array[0], l1_mv_array[1]);
    vec1_x = i4 * mv_mul + l0_mv_array[0];
    vec2_x = i4 * mv_mul + l1_mv_array[0];

    vec1_y = (img->block_y_aff + j) * mv_mul + l0_mv_array[1];
    vec2_y = (img->block_y_aff + j) * mv_mul + l1_mv_array[1];

    get_block_luma(pl, listX[LIST_0 + list_offset][l0_refframe], vec1_x, vec1_y, block_size_x, block_size_y, img, tmp_block_l0);  
    get_block_luma(pl, listX[LIST_1 + list_offset][l1_refframe], vec2_x, vec2_y, block_size_x, block_size_y, img, tmp_block_l1);  

    if(img->apply_weights)
    {
      int wt_list_offset = (active_pps->weighted_bipred_idc==2)? list_offset : 0;

      // This code existed in the original. Seems pointless but copying it here for reference and in case temporal direct breaks.
      // if (mv_mode==0 && img->direct_spatial_mv_pred_flag==0 ) l1_ref_idx=0;    
      if (((active_pps->weighted_pred_flag&&(img->type==P_SLICE|| img->type == SP_SLICE))||
        (active_pps->weighted_bipred_idc==1 && (img->type==B_SLICE))) && curr_mb_field)
      {
        l0_ref_idx >>=1;
        l1_ref_idx >>=1;
      }

      alpha_l0  =   img->wbp_weight[LIST_0 + wt_list_offset][l0_ref_idx][l1_ref_idx][0];
      alpha_l1  =   img->wbp_weight[LIST_1 + wt_list_offset][l0_ref_idx][l1_ref_idx][0];
      wp_offset = ((img->wp_offset [LIST_0 + wt_list_offset][l0_ref_idx][0] + img->wp_offset[LIST_1 + wt_list_offset][l1_ref_idx][0] + 1) >>1);

      weighted_bi_prediction(&img->mb_pred[pl][joff], tmp_block_l0, tmp_block_l1, block_size_y, block_size_x, ioff, alpha_l0, alpha_l1, wp_offset, (img->luma_log2_weight_denom + 1), max_imgpel_value);
    }
    else
    { 
      bi_prediction(&img->mb_pred[pl][joff], tmp_block_l0, tmp_block_l1, block_size_y, block_size_x, ioff); 
    }

    if ((dec_picture->chroma_format_idc != YUV400) && (dec_picture->chroma_format_idc != YUV444) ) 
    {
      int uv;

      int ioff_cr = img->mb_cr_size_x == MB_BLOCK_SIZE ? ioff : ioff >> 1;
      int joff_cr = img->mb_cr_size_y == MB_BLOCK_SIZE ? joff : joff >> 1;
      int block_size_x_cr = img->mb_cr_size_x == MB_BLOCK_SIZE ? block_size_x : block_size_x >> 1;
      int block_size_y_cr = img->mb_cr_size_y == MB_BLOCK_SIZE ? block_size_y : block_size_y >> 1;

      vec1_y_cr = vec1_y + ((active_sps->chroma_format_idc == 1)? listX[LIST_0 + list_offset][l0_refframe]->chroma_vector_adjustment : 0);
      vec2_y_cr = vec2_y + ((active_sps->chroma_format_idc == 1)? listX[LIST_1 + list_offset][l1_refframe]->chroma_vector_adjustment : 0);

      for(uv=0;uv<2;uv++)
      {
        get_block_chroma (uv, listX[LIST_0 + list_offset][l0_refframe], vec1_x, vec1_y_cr, block_size_x_cr, block_size_y_cr, img, tmp_block_l0);
        get_block_chroma (uv, listX[LIST_1 + list_offset][l1_refframe], vec2_x, vec2_y_cr, block_size_x_cr, block_size_y_cr, img, tmp_block_l1);

        if(img->apply_weights)
        {
          int wt_list_offset = (active_pps->weighted_bipred_idc==2)? list_offset : 0;

          alpha_l0  =   img->wbp_weight[LIST_0 + wt_list_offset][l0_ref_idx][l1_ref_idx][uv + 1];
          alpha_l1  =   img->wbp_weight[LIST_1 + wt_list_offset][l0_ref_idx][l1_ref_idx][uv + 1];
          wp_offset = ((img->wp_offset [LIST_0 + wt_list_offset][l0_ref_idx][uv + 1] + img->wp_offset[LIST_1 + wt_list_offset][l1_ref_idx][uv + 1] + 1) >>1);

          weighted_bi_prediction(&img->mb_pred[uv+1][joff_cr], tmp_block_l0, tmp_block_l1, block_size_y_cr, block_size_x_cr, ioff_cr, alpha_l0, alpha_l1, wp_offset, (img->chroma_log2_weight_denom + 1), img->max_imgpel_value_comp[uv + 1]);
        }
        else
        {
          bi_prediction(&img->mb_pred[uv + 1][joff_cr], tmp_block_l0, tmp_block_l1, block_size_y_cr, block_size_x_cr, ioff_cr);
        }
      }
    }      
  }
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -