⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mv-search.c

📁 网络MPEG4IP流媒体开发源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
    iy0=pic4_pix_x+s_pos_x;    jy0=pic4_pix_y+s_pos_y;    if (lambda)      current_inter_sad = MVCostLambda (mv_mul==1?0:1, lambda,                                        predicted_x, predicted_y, s_pos_x, s_pos_y);    else      current_inter_sad = MVCost (4, (mv_mul==1)?4:8, blocktype, img->qp, predicted_x, predicted_y, s_pos_x, s_pos_y);    for (vec1_x=0; vec1_x < blockshape_x; vec1_x += 4)    {      for (vec1_y=0; vec1_y < blockshape_y; vec1_y += 4)      {        for (i=0; i < BLOCK_SIZE; i++)        {          vec2_x=i+vec1_x;          i22=iy0+vec2_x*4;          for (j=0; j < BLOCK_SIZE; j++)          {            vec2_y=j+vec1_y;            m7[i][j]=mo[vec2_y][vec2_x]-PelY_14 (CurrRefPic, jy0+vec2_y*4, i22);          }        }        current_inter_sad += find_sad(input->hadamard, m7);      }    }    if (current_inter_sad < best_inter_sad)    {      // Vectors are saved in all_mv[] to be able to assign correct vectors to each block after mode selection.      // tmp_mv[] is a 'temporary' assignment of vectors to be used to estimate 'bit cost' in vector prediction.      *s_pos_x1=s_pos_x;      *s_pos_y1=s_pos_y;      for (i=0; i < blockshape_x/BLOCK_SIZE; i++)      {        for (j=0; j < blockshape_y/BLOCK_SIZE; j++)        {          all_mv[block_x+i][block_y+j][ref_frame][blocktype][0]=mv_mul**s_pos_x1;          all_mv[block_x+i][block_y+j][ref_frame][blocktype][1]=mv_mul**s_pos_y1;          tmp_mv[0][pic_block_y+j][pic_block_x+i+BLOCK_SIZE]=mv_mul**s_pos_x1;          tmp_mv[1][pic_block_y+j][pic_block_x+i+BLOCK_SIZE]=mv_mul**s_pos_y1;        }      }      best_inter_sad=current_inter_sad;    }  }  *s_pos_x2 = 2 * *s_pos_x1;  *s_pos_y2 = 2 * *s_pos_y1;  return best_inter_sad;}/*! *********************************************************************** * \brief *    Eighth Pel Search *********************************************************************** */int EighthPelSearch(int pic8_pix_x, int pic8_pix_y, int mv_mul, int blockshape_x, int blockshape_y,       int *s_pos_x1, int *s_pos_y1, int *s_pos_x2, int *s_pos_y2, int center_h2, int center_v2,       int predicted_x, int predicted_y, int blocktype, pel_t mo[16][16], pel_t **CurrRefPic, int ***tmp_mv,       int *****all_mv, int block_x, int block_y, int ref_frame,       int pic_block_x, int pic_block_y, int best_inter_sad,       double lambda       ){  int current_inter_sad;  int lv, s_pos_x, s_pos_y, iy0, jy0, vec1_x, vec1_y, vec2_x, vec2_y, i, i22, j;  int  m7[16][16];  for (lv=1; lv < 9; lv++)  {    s_pos_x=*s_pos_x2+SpiralX[lv];    s_pos_y=*s_pos_y2+SpiralY[lv];    iy0=pic8_pix_x+s_pos_x;    jy0=pic8_pix_y+s_pos_y;    if (lambda)      current_inter_sad = MVCostLambda (0, lambda,      predicted_x, predicted_y, s_pos_x, s_pos_y);    else      current_inter_sad = MVCost (8, 8, blocktype, img->qp, predicted_x, predicted_y, s_pos_x, s_pos_y);    for (vec1_x=0; vec1_x < blockshape_x; vec1_x += 4)    {      for (vec1_y=0; vec1_y < blockshape_y; vec1_y += 4)      {        for (i=0; i < BLOCK_SIZE; i++)        {          vec2_x=i+vec1_x;          i22=iy0+vec2_x*8;          for (j=0; j < BLOCK_SIZE; j++)          {            vec2_y=j+vec1_y;            m7[i][j]=mo[vec2_y][vec2_x]-PelY_18 (CurrRefPic, jy0+vec2_y*8, i22);          }        }        current_inter_sad += find_sad(input->hadamard, m7);      }    }    if (current_inter_sad < best_inter_sad)    {      // Vectors are saved in all_mv[] to be able to assign correct vectors to each block after mode selection.      // tmp_mv[] is a 'temporary' assignment of vectors to be used to estimate 'bit cost' in vector prediction.      *s_pos_x1=s_pos_x;      *s_pos_y1=s_pos_y;      for (i=0; i < blockshape_x/BLOCK_SIZE; i++)      {        for (j=0; j < blockshape_y/BLOCK_SIZE; j++)        {          all_mv[block_x+i][block_y+j][ref_frame][blocktype][0]=*s_pos_x1;          all_mv[block_x+i][block_y+j][ref_frame][blocktype][1]=*s_pos_y1;          tmp_mv[0][pic_block_y+j][pic_block_x+i+BLOCK_SIZE]=*s_pos_x1;          tmp_mv[1][pic_block_y+j][pic_block_x+i+BLOCK_SIZE]=*s_pos_y1;        }      }      best_inter_sad=current_inter_sad;    }  }  return best_inter_sad;}#define PFRAME    0#define B_FORWARD 1#define B_BACKWARD  2/*! ************************************************************************ * \brief *    In this routine motion search (integer pel+1/2 and 1/4 pel) and mode selection *    is performed. Since we treat all 4x4 blocks before coding/decoding the *    prediction may not be based on decoded pixels (except for some of the blocks). *    This will result in too good prediction.  To compensate for this the SAD for *    intra(tot_intra_sad) is given a 'handicap' depending on QP. *                                                                                     \par *    Note that a couple of speed improvements were implemented by StW, which *    slightly change the use of some variables.  In particular, the dimensions *    of the img->m7 and mo[] variables were changed in order to allow better *    cache access. *                                                                                     \par *    Depending on the MV-resolution 1/4-pel or 1/8-pel motion vectors are estimated *                                                                                     \par * \par Input: *    Best intra SAD value. * * \par Output: *    Reference image. * * \par Side Effects: *    writes refFrArr[ ][ ] to set reference frame ID for each block * *  \par Bugs and Limitations: *    currently the B-frame module does not support UMV in its residual coding. *    Hence, the old search range limitation is implemented here for all B frames. *    The if()s in question all start in column 1 of the file and are thus *    easily idetifyable. * ************************************************************************ */intUnifiedMotionSearch (int tot_intra_sad, int **refFrArr, int ***tmp_mv,         int *****img_mv,         int *img_mb_mode, int *img_blc_size_h, int *img_blc_size_v,         int *img_multframe_no, int BFrame, int *min_inter_sad, int *****all_mv){  int predframe_no=0;  int ref_frame,blocktype;  int tot_inter_sad;  int ref_inx;  *min_inter_sad = MAX_VALUE;  // Loop through all reference frames under consideration  // P-Frames, B-Forward: All reference frames  // P-Backward: use only the last reference frame, stored in mref_P  #ifdef _ADDITIONAL_REFERENCE_FRAME_  for (ref_frame=0; ref_frame < img->nb_references; ref_frame++)    if ((ref_frame <  input->no_multpred) || (ref_frame == input->add_ref_frame))#else  for (ref_frame=0; ref_frame < img->nb_references; ref_frame++)#endif  {    ref_inx = (img->number-ref_frame-1)%img->buf_cycle;    //  Looping through all the chosen block sizes:    blocktype=1;    while (input->blc_size[blocktype][0]==0 && blocktype<=7) // skip blocksizes not chosen      blocktype++;    for (;blocktype <= 7;)    {      tot_inter_sad  = QP2QUANT[img->qp] * min(ref_frame,1) * 2; // start 'handicap '      tot_inter_sad += SingleUnifiedMotionSearch (ref_frame, blocktype,        refFrArr, tmp_mv, img_mv,        BFrame, all_mv, 0.0);        /*        Here we update which is the best inter mode. At the end we have the best inter mode.        predframe_no:  which reference frame to use for prediction        img->multframe_no:  Index in the mref[] matrix used for prediction      */      if (tot_inter_sad <= *min_inter_sad)      {        if (BFrame==PFRAME)          *img_mb_mode=blocktype;        if (BFrame == B_FORWARD)        {          if (blocktype == 1)            *img_mb_mode = blocktype;          else            *img_mb_mode = blocktype * 2;        }        if (BFrame == B_BACKWARD)        {          if (blocktype == 1)            *img_mb_mode = blocktype+1;          else            *img_mb_mode = blocktype * 2+1;        }        *img_blc_size_h=input->blc_size[blocktype][0];        *img_blc_size_v=input->blc_size[blocktype][1];        predframe_no=ref_frame;        *img_multframe_no=ref_inx;        *min_inter_sad=tot_inter_sad;      }      while (input->blc_size[++blocktype][0]==0 && blocktype<=7); // only go through chosen blocksizes    }    if (BFrame == B_BACKWARD)      ref_frame = MAX_VALUE;    // jump out of the loop  }  return predframe_no;}/*! ************************************************************************ * \brief *    unified motion search for a single combination of reference frame and blocksize ************************************************************************ */intSingleUnifiedMotionSearch (int    ref_frame,                           int    blocktype,                           int  **refFrArr,                           int ***tmp_mv,                           int    *****img_mv,                           int    BFrame,                           int    *****all_mv,                           double lambda){  int   j,i;  int   pic_block_x,pic_block_y,block_x,ref_inx,block_y,pic_pix_y;  int   pic4_pix_y,pic8_pix_y,pic_pix_x,pic4_pix_x,pic8_pix_x;  int   ip0,ip1;  int   center_h2,curr_search_range,center_v2;  int   s_pos_x1,s_pos_y1,s_pos_x2,s_pos_y2;  int   mb_y,mb_x;  int   best_inter_sad, tot_inter_sad = 0;  pel_t mo[16][16];  int   blockshape_x,blockshape_y;  int   mv_mul;  pel_t **CurrRefPic;  pel_t *CurrRefPic11;#ifdef _FAST_FULL_ME_  int  refindex, full_search_range;#else  int    center_x, center_y;#endif  /*  curr_search_range is the actual search range used depending on block size and reference frame.  It may be reduced by 1/2 or 1/4 for smaller blocks and prediction from older frames due to compexity  */#ifdef _FULL_SEARCH_RANGE_  if (input->full_search == 2) // no restrictions at all  {    curr_search_range = input->search_range;  }  else if (input->full_search == 1) // smaller search range for older reference frames  {    curr_search_range = input->search_range /  (min(ref_frame,1)+1);  }  else // smaller search range for older reference frames and smaller blocks#endif  {    curr_search_range = input->search_range / ((min(ref_frame,1)+1) * min(2,blocktype));  }#ifdef _FAST_FULL_ME_#ifdef _FULL_SEARCH_RANGE_  if (input->full_search == 2)    full_search_range   = input->search_range;  else#endif    full_search_range   = input->search_range / (min(ref_frame,1)+1);#endif  // Motion vector scale-factor for 1/8-pel MV-resolution  if(input->mv_res)    mv_mul=2;  else    mv_mul=1;  // set reference frame array  for (j = 0;j < 4;j++)  {    for (i = 0;i < 4;i++)    {      refFrArr[img->block_y+j][img->block_x+i] = ref_frame;    }  }  // find  reference image  if (BFrame != B_BACKWARD)  {    ref_inx=(img->number-ref_frame-1)%img->buf_cycle;    CurrRefPic   = mref[ref_inx];    CurrRefPic11 = Refbuf11[ref_inx];  }  else  {    CurrRefPic   = mref_P;    CurrRefPic11 = Refbuf11_P;  }#ifdef _FAST_FULL_ME_  //===== setup fast full integer search: get SAD's for all motion vectors and 4x4 blocks =====  //      (this is done just once for a macroblock)  refindex = (BFrame != B_BACKWARD ? ref_frame : img->buf_cycle);  if (!search_setup_done[refindex])  {    SetupFastFullIntegerSearch (ref_frame, refFrArr, tmp_mv, img_mv, CurrRefPic11,      full_search_range, (BFrame==B_BACKWARD), (lambda != 0));  }#endif  blockshape_x=input->blc_size[blocktype][0];// input->blc_size has information of the 7 blocksizes  blockshape_y=input->blc_size[blocktype][1];// array iz stores offset inside one MB  //  Loop through the whole MB with all block sizes  for (mb_y=0; mb_y < MB_BLOCK_SIZE; mb_y += blockshape_y)  {    block_y=mb_y/BLOCK_SIZE;    pic_block_y=img->block_y+block_y;    pic_pix_y=img->pix_y+mb_y;    pic4_pix_y=pic_pix_y*4;    for (mb_x=0; mb_x < MB_BLOCK_SIZE; mb_x += blockshape_x)    {      block_x=mb_x/BLOCK_SIZE;      pic_block_x=img->block_x+block_x;      pic_pix_x=img->pix_x+mb_x;      pic4_pix_x=pic_pix_x*4;      SetMotionVectorPredictor (img_mv[block_x][block_y][ref_frame][blocktype], refFrArr, tmp_mv,        ref_frame, mb_x, mb_y, blockshape_x, blockshape_y);      ip0=img_mv[block_x][block_y][ref_frame][blocktype][0];      ip1=img_mv[block_x][block_y][ref_frame][blocktype][1];      //  mo[] is the the original block to be used in the search process      // Note: dimensions changed to speed up access      for (i=0; i < blockshape_x; i++)        for (j=0; j < blockshape_y; j++)          mo[j][i]=imgY_org[pic_pix_y+j][pic_pix_x+i];      //  Integer pel search.  center_x,center_y is the 'search center'#ifndef _FAST_FULL_ME_      // Note that the predictor in mv[][][][][] is in 1/4 or 1/8th pel resolution      // The following rounding enforces a "true" integer pel search.      center_x=ip0/(mv_mul*4);      center_y=ip1/(mv_mul*4);      // Limitation of center_x,center_y so that the search wlevel ow contains the (0,0) vector      center_x=max(-(input->search_range),min(input->search_range,center_x));      center_y=max(-(input->search_range),min(input->search_range,center_y));      // The following was deleted to support UMV.  See Remak regarding UMV performance for B      // in the Readme File (to be provided)      // Search center corrected to prevent vectors outside the frame, this is not permitted in this model.      if ((pic_pix_x + center_x > curr_search_range) &&          (pic_pix_y + center_y > curr_search_range) &&          (pic_pix_x + center_x < img->width  - 1 - curr_search_range - blockshape_x) &&          (pic_pix_y + center_y < img->height - 1 - curr_search_range - blockshape_y))      {        PelYline_11 = FastLine16Y_11;      }      else      {        PelYline_11 = UMVLine16Y_11;      }      best_inter_sad = IntegerSpiralSearch (mv_mul, center_x, center_y, ip0, ip1,                                            blockshape_x, blockshape_y, curr_search_range,                                            blocktype, mo, CurrRefPic11, &center_h2, &center_v2,                                            pic_pix_x, pic_pix_y,                                            lambda);#else // _FAST_FULL_ME_

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -