⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mvmb.c

📁 优化过的xvid1.1.2源代码
💻 C
📖 第 1 页 / 共 2 页
字号:

  validate_vector(mv, x_pos, y_pos, dec);

  if ((pMB->mode != MODE_INTER4V) || (bvop)) { /* INTER, INTER_Q, NOT_CODED, FORWARD, BACKWARD */

    uv_dx = mv[0].x;
    uv_dy = mv[0].y;
    if (dec->quarterpel) {
			if (dec->bs_version <= BS_VERSION_BUGGY_CHROMA_ROUNDING) {
  				uv_dx = (uv_dx>>1) | (uv_dx&1);
				uv_dy = (uv_dy>>1) | (uv_dy&1);
			}
			else {
        uv_dx /= 2;
        uv_dy /= 2;
      }
    }
    uv_dx = (uv_dx >> 1) + roundtab_79[uv_dx & 0x3];
    uv_dy = (uv_dy >> 1) + roundtab_79[uv_dy & 0x3];

    if (dec->quarterpel)
      interpolate16x16_quarterpel(dec->cur.y, dec->refn[ref].y, dec->qtmp.y, dec->qtmp.y + 64,
                  dec->qtmp.y + 128, 16*x_pos, 16*y_pos,
                      mv[0].x, mv[0].y, stride, rounding);
    else
      interpolate16x16_switch(dec->cur.y, dec->refn[ref].y, 16*x_pos, 16*y_pos,
                  mv[0].x, mv[0].y, stride, rounding);

  } else {  /* MODE_INTER4V */

    if(dec->quarterpel) {
			if (dec->bs_version <= BS_VERSION_BUGGY_CHROMA_ROUNDING) {
				int z;
				uv_dx = 0; uv_dy = 0;
				for (z = 0; z < 4; z++) {
				  uv_dx += ((mv[z].x>>1) | (mv[z].x&1));
				  uv_dy += ((mv[z].y>>1) | (mv[z].y&1));
				}
			}
			else {
        uv_dx = (mv[0].x / 2) + (mv[1].x / 2) + (mv[2].x / 2) + (mv[3].x / 2);
        uv_dy = (mv[0].y / 2) + (mv[1].y / 2) + (mv[2].y / 2) + (mv[3].y / 2);
      }
    } else {
      uv_dx = mv[0].x + mv[1].x + mv[2].x + mv[3].x;
      uv_dy = mv[0].y + mv[1].y + mv[2].y + mv[3].y;
    }

    uv_dx = (uv_dx >> 3) + roundtab_76[uv_dx & 0xf];
    uv_dy = (uv_dy >> 3) + roundtab_76[uv_dy & 0xf];

    if (dec->quarterpel) {
      interpolate8x8_quarterpel(dec->cur.y, dec->refn[0].y , dec->qtmp.y, dec->qtmp.y + 64,
                  dec->qtmp.y + 128, 16*x_pos, 16*y_pos,
                  mv[0].x, mv[0].y, stride, rounding);
      interpolate8x8_quarterpel(dec->cur.y, dec->refn[0].y , dec->qtmp.y, dec->qtmp.y + 64,
                  dec->qtmp.y + 128, 16*x_pos + 8, 16*y_pos,
                  mv[1].x, mv[1].y, stride, rounding);
      interpolate8x8_quarterpel(dec->cur.y, dec->refn[0].y , dec->qtmp.y, dec->qtmp.y + 64,
                  dec->qtmp.y + 128, 16*x_pos, 16*y_pos + 8,
                  mv[2].x, mv[2].y, stride, rounding);
      interpolate8x8_quarterpel(dec->cur.y, dec->refn[0].y , dec->qtmp.y, dec->qtmp.y + 64,
                  dec->qtmp.y + 128, 16*x_pos + 8, 16*y_pos + 8,
                  mv[3].x, mv[3].y, stride, rounding);
    } else {
      interpolate8x8_switch(dec->cur.y, dec->refn[0].y , 16*x_pos, 16*y_pos,
                mv[0].x, mv[0].y, stride, rounding);
      interpolate8x8_switch(dec->cur.y, dec->refn[0].y , 16*x_pos + 8, 16*y_pos,
                mv[1].x, mv[1].y, stride, rounding);
      interpolate8x8_switch(dec->cur.y, dec->refn[0].y , 16*x_pos, 16*y_pos + 8,
                mv[2].x, mv[2].y, stride, rounding);
      interpolate8x8_switch(dec->cur.y, dec->refn[0].y , 16*x_pos + 8, 16*y_pos + 8,
                mv[3].x, mv[3].y, stride, rounding);
    }
  }

  /* chroma */
  interpolate8x8_switch(dec->cur.u, dec->refn[ref].u, 8 * x_pos, 8 * y_pos,
              uv_dx, uv_dy, stride2, rounding);
  interpolate8x8_switch(dec->cur.v, dec->refn[ref].v, 8 * x_pos, 8 * y_pos,
              uv_dx, uv_dy, stride2, rounding);

  
  if (cbp)
    decoder_mb_decode(dec, cbp, bs, pY_Cur, pU_Cur, pV_Cur, pMB);
}


void  decoder_mb_decode(DECODER * dec,
								        const uint32_t cbp,
								        Bitstream * bs,
								        uint8_t * pY_Cur,
								        uint8_t * pU_Cur,
								        uint8_t * pV_Cur,
								        const MACROBLOCK * pMB)
{
    uint8_t data_header   [1024];
    int16_t* data = (int16_t*)((uint32_t)(data_header + CACHE_LINE - 1)
			                          &(~(uint32_t)(CACHE_LINE - 1)));
  //DECLARE_ALIGNED_MATRIX(data, 1, 64, int16_t, CACHE_LINE);

  int stride = dec->edged_width;
  int i;
  const uint32_t iQuant = pMB->quant;
  const int direction = dec->alternate_vertical_scan ? 2 : 0;
  typedef void (*get_inter_block_function_t)(
      Bitstream * bs,
      int16_t * block,
      int direction,
      const int quant,
      const uint16_t *matrix);
  typedef void (*add_residual_function_t)(
      uint8_t *predicted_block,
      const int16_t *residual,
      int stride);

  const get_inter_block_function_t get_inter_block = (dec->quant_type == 0)
    ? (get_inter_block_function_t)get_inter_block_h263
    : (get_inter_block_function_t)get_inter_block_mpeg;

  uint8_t *dst[6];
  int strides[6];


  if (dec->interlacing && pMB->field_dct) {
    dst[0] = pY_Cur;
    dst[1] = pY_Cur + 8;
    dst[2] = pY_Cur + stride;
    dst[3] = dst[2] + 8;
    dst[4] = pU_Cur;
    dst[5] = pV_Cur;
    strides[0] = strides[1] = strides[2] = strides[3] = stride*2;
    strides[4] = stride/2;
    strides[5] = stride/2;
  } else {
    dst[0] = pY_Cur;
    dst[1] = pY_Cur + 8;
    dst[2] = pY_Cur + 8*stride;
    dst[3] = dst[2] + 8;
    dst[4] = pU_Cur;
    dst[5] = pV_Cur;
    strides[0] = strides[1] = strides[2] = strides[3] = stride;
    strides[4] = stride/2;
    strides[5] = stride/2;
  }

  for (i = 0; i < 6; i++) {
    /* Process only coded blocks */
    if (cbp & (1 << (5 - i))) {

      /* Clear the block */
      memset(&data[0], 0, 64*sizeof(int16_t));

      /* Decode coeffs and dequantize on the fly */
      get_inter_block(bs, &data[0], direction, iQuant, get_inter_matrix(dec->mpeg_quant_matrices));
      
      /* iDCT */
      idct_int32((short * const)&data[0]);
      
	  /* Add this residual to the predicted block */
      transfer_16to8_add(dst[i], &data[0], strides[i]);
     }
  }
}


void decoder_mbinter_field(DECODER * dec,
										        const MACROBLOCK * pMB,
										        const uint32_t x_pos,
										        const uint32_t y_pos,
										        const uint32_t cbp,
										        Bitstream * bs,
										        const uint32_t rounding,
										        const int ref,
														const int bvop)
{
  uint32_t stride = dec->edged_width;
  uint32_t stride2 = stride / 2;

  uint8_t *pY_Cur, *pU_Cur, *pV_Cur;

  int uvtop_dx, uvtop_dy;
  int uvbot_dx, uvbot_dy;
  VECTOR mv[4]; /* local copy of mvs */

  /* Get pointer to memory areas */
  pY_Cur = dec->cur.y + (y_pos << 4) * stride + (x_pos << 4);
  pU_Cur = dec->cur.u + (y_pos << 3) * stride2 + (x_pos << 3);
  pV_Cur = dec->cur.v + (y_pos << 3) * stride2 + (x_pos << 3);
  
  mv[0] = pMB->mvs[0];
  mv[1] = pMB->mvs[1];
  memset(&mv[2],0,2*sizeof(VECTOR));

  validate_vector(mv, x_pos, y_pos, dec);

  if((pMB->mode!=MODE_INTER4V) || (bvop))   /* INTER, INTER_Q, NOT_CODED, FORWARD, BACKWARD */
  { 
    /* Prepare top field vector */
    uvtop_dx = DIV2ROUND(mv[0].x);
    uvtop_dy = DIV2ROUND(mv[0].y);

    /* Prepare bottom field vector */
    uvbot_dx = DIV2ROUND(mv[1].x);
    uvbot_dy = DIV2ROUND(mv[1].y);

    if(dec->quarterpel)
    {
      /* NOT supported */
    }
    else
    {
      /* Interpolate top field left part(we use double stride for every 2nd line) */
      interpolate8x8_switch(dec->cur.y,dec->refn[ref].y+pMB->field_for_top*stride,
                            16*x_pos,8*y_pos,mv[0].x, mv[0].y>>1,2*stride, rounding);
      /* top field right part */
      interpolate8x8_switch(dec->cur.y,dec->refn[ref].y+pMB->field_for_top*stride,
                            16*x_pos+8,8*y_pos,mv[0].x, mv[0].y>>1,2*stride, rounding);

      /* Interpolate bottom field left part(we use double stride for every 2nd line) */
      interpolate8x8_switch(dec->cur.y+stride,dec->refn[ref].y+pMB->field_for_bot*stride,
                            16*x_pos,8*y_pos,mv[1].x, mv[1].y>>1,2*stride, rounding);
      /* Bottom field right part */
      interpolate8x8_switch(dec->cur.y+stride,dec->refn[ref].y+pMB->field_for_bot*stride,
                            16*x_pos+8,8*y_pos,mv[1].x, mv[1].y>>1,2*stride, rounding);

      /* Interpolate field1 U */
      interpolate8x4_switch(dec->cur.u,dec->refn[ref].u+pMB->field_for_top*stride2,
                            8*x_pos,4*y_pos,uvtop_dx,DIV2ROUND(uvtop_dy),stride,rounding);
      
      /* Interpolate field1 V */
      interpolate8x4_switch(dec->cur.v,dec->refn[ref].v+pMB->field_for_top*stride2,
                            8*x_pos,4*y_pos,uvtop_dx,DIV2ROUND(uvtop_dy),stride,rounding);
    
      /* Interpolate field2 U */
      interpolate8x4_switch(dec->cur.u+stride2,dec->refn[ref].u+pMB->field_for_bot*stride2,
                            8*x_pos,4*y_pos,uvbot_dx,DIV2ROUND(uvbot_dy),stride,rounding);
    
      /* Interpolate field2 V */
      interpolate8x4_switch(dec->cur.v+stride2,dec->refn[ref].v+pMB->field_for_bot*stride2,
                            8*x_pos,4*y_pos,uvbot_dx,DIV2ROUND(uvbot_dy),stride,rounding);
    }
  } 
  else 
  {
    /* We don't expect 4 motion vectors in interlaced mode */
  }

  /* Must add error correction? */
  if(cbp)
   decoder_mb_decode(dec, cbp, bs, pY_Cur, pU_Cur, pV_Cur, pMB);
}

void
decoder_bf_interpolate_mbinter(DECODER * dec,
                IMAGE forward,
                IMAGE backward,
                MACROBLOCK * pMB,
                const uint32_t x_pos,
                const uint32_t y_pos,
                Bitstream * bs,
                const int direct)
{
  uint32_t stride = dec->edged_width;
  uint32_t stride2 = stride / 2;
  int uv_dx, uv_dy;
  int b_uv_dx, b_uv_dy;
  uint8_t *pY_Cur, *pU_Cur, *pV_Cur;
  const uint32_t cbp = pMB->cbp;

  pY_Cur = dec->cur.y + (y_pos << 4) * stride + (x_pos << 4);
  pU_Cur = dec->cur.u + (y_pos << 3) * stride2 + (x_pos << 3);
  pV_Cur = dec->cur.v + (y_pos << 3) * stride2 + (x_pos << 3);

  validate_vector(pMB->mvs, x_pos, y_pos, dec);
  validate_vector(pMB->b_mvs, x_pos, y_pos, dec);

  if (!direct) {
    uv_dx = pMB->mvs[0].x;
    uv_dy = pMB->mvs[0].y;
    b_uv_dx = pMB->b_mvs[0].x;
    b_uv_dy = pMB->b_mvs[0].y;

    if (dec->quarterpel) {
			if (dec->bs_version <= BS_VERSION_BUGGY_CHROMA_ROUNDING) {
				uv_dx = (uv_dx>>1) | (uv_dx&1);
				uv_dy = (uv_dy>>1) | (uv_dy&1);
				b_uv_dx = (b_uv_dx>>1) | (b_uv_dx&1);
				b_uv_dy = (b_uv_dy>>1) | (b_uv_dy&1);
			}
			else {
        uv_dx /= 2;
        uv_dy /= 2;
        b_uv_dx /= 2;
        b_uv_dy /= 2;
      }
    }

    uv_dx = (uv_dx >> 1) + roundtab_79[uv_dx & 0x3];
    uv_dy = (uv_dy >> 1) + roundtab_79[uv_dy & 0x3];
    b_uv_dx = (b_uv_dx >> 1) + roundtab_79[b_uv_dx & 0x3];
    b_uv_dy = (b_uv_dy >> 1) + roundtab_79[b_uv_dy & 0x3];

  } else {
	  if (dec->quarterpel) { /* for qpel the /2 shall be done before summation. We've done it right in the encoder in the past. */
							 /* TODO: figure out if we ever did it wrong on the encoder side. If yes, add some workaround */
		if (dec->bs_version <= BS_VERSION_BUGGY_CHROMA_ROUNDING) {
			int z;
			uv_dx = 0; uv_dy = 0;
			b_uv_dx = 0; b_uv_dy = 0;
			for (z = 0; z < 4; z++) {
			  uv_dx += ((pMB->mvs[z].x>>1) | (pMB->mvs[z].x&1));
			  uv_dy += ((pMB->mvs[z].y>>1) | (pMB->mvs[z].y&1));
			  b_uv_dx += ((pMB->b_mvs[z].x>>1) | (pMB->b_mvs[z].x&1));
			  b_uv_dy += ((pMB->b_mvs[z].y>>1) | (pMB->b_mvs[z].y&1));
			}
		}
		else {
			uv_dx = (pMB->mvs[0].x / 2) + (pMB->mvs[1].x / 2) + (pMB->mvs[2].x / 2) + (pMB->mvs[3].x / 2);
			uv_dy = (pMB->mvs[0].y / 2) + (pMB->mvs[1].y / 2) + (pMB->mvs[2].y / 2) + (pMB->mvs[3].y / 2);
			b_uv_dx = (pMB->b_mvs[0].x / 2) + (pMB->b_mvs[1].x / 2) + (pMB->b_mvs[2].x / 2) + (pMB->b_mvs[3].x / 2);
			b_uv_dy = (pMB->b_mvs[0].y / 2) + (pMB->b_mvs[1].y / 2) + (pMB->b_mvs[2].y / 2) + (pMB->b_mvs[3].y / 2);
		} 
	} else {
      uv_dx = pMB->mvs[0].x + pMB->mvs[1].x + pMB->mvs[2].x + pMB->mvs[3].x;
      uv_dy = pMB->mvs[0].y + pMB->mvs[1].y + pMB->mvs[2].y + pMB->mvs[3].y;
      b_uv_dx = pMB->b_mvs[0].x + pMB->b_mvs[1].x + pMB->b_mvs[2].x + pMB->b_mvs[3].x;
      b_uv_dy = pMB->b_mvs[0].y + pMB->b_mvs[1].y + pMB->b_mvs[2].y + pMB->b_mvs[3].y;
    }

    uv_dx = (uv_dx >> 3) + roundtab_76[uv_dx & 0xf];
    uv_dy = (uv_dy >> 3) + roundtab_76[uv_dy & 0xf];
    b_uv_dx = (b_uv_dx >> 3) + roundtab_76[b_uv_dx & 0xf];
    b_uv_dy = (b_uv_dy >> 3) + roundtab_76[b_uv_dy & 0xf];
  }

   if(dec->quarterpel) {
    if(!direct) {
      interpolate16x16_quarterpel(dec->cur.y, forward.y, dec->qtmp.y, dec->qtmp.y + 64,
                    dec->qtmp.y + 128, 16*x_pos, 16*y_pos,
                    pMB->mvs[0].x, pMB->mvs[0].y, stride, 0);
    } else {
      interpolate8x8_quarterpel(dec->cur.y, forward.y, dec->qtmp.y, dec->qtmp.y + 64,
                    dec->qtmp.y + 128, 16*x_pos, 16*y_pos,
                    pMB->mvs[0].x, pMB->mvs[0].y, stride, 0);
      interpolate8x8_quarterpel(dec->cur.y, forward.y, dec->qtmp.y, dec->qtmp.y + 64,
                    dec->qtmp.y + 128, 16*x_pos + 8, 16*y_pos,
                    pMB->mvs[1].x, pMB->mvs[1].y, stride, 0);
      interpolate8x8_quarterpel(dec->cur.y, forward.y, dec->qtmp.y, dec->qtmp.y + 64,
                    dec->qtmp.y + 128, 16*x_pos, 16*y_pos + 8,
                    pMB->mvs[2].x, pMB->mvs[2].y, stride, 0);
      interpolate8x8_quarterpel(dec->cur.y, forward.y, dec->qtmp.y, dec->qtmp.y + 64,
                    dec->qtmp.y + 128, 16*x_pos + 8, 16*y_pos + 8,
                    pMB->mvs[3].x, pMB->mvs[3].y, stride, 0);
    }
  } else {
    interpolate8x8_switch(dec->cur.y, forward.y, 16 * x_pos, 16 * y_pos,
              pMB->mvs[0].x, pMB->mvs[0].y, stride, 0);
    interpolate8x8_switch(dec->cur.y, forward.y, 16 * x_pos + 8, 16 * y_pos,
              pMB->mvs[1].x, pMB->mvs[1].y, stride, 0);
    interpolate8x8_switch(dec->cur.y, forward.y, 16 * x_pos, 16 * y_pos + 8,
              pMB->mvs[2].x, pMB->mvs[2].y, stride, 0);
    interpolate8x8_switch(dec->cur.y, forward.y, 16 * x_pos + 8, 16 * y_pos + 8,
              pMB->mvs[3].x, pMB->mvs[3].y, stride, 0);
  }

  interpolate8x8_switch(dec->cur.u, forward.u, 8 * x_pos, 8 * y_pos, uv_dx,
            uv_dy, stride2, 0);
  interpolate8x8_switch(dec->cur.v, forward.v, 8 * x_pos, 8 * y_pos, uv_dx,
            uv_dy, stride2, 0);


  if(dec->quarterpel) {
    if(!direct) {
      interpolate16x16_quarterpel_add(dec->cur.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64,
          dec->qtmp.y + 128, 16*x_pos, 16*y_pos,
          pMB->b_mvs[0].x, pMB->b_mvs[0].y, stride, 0);
    } else {
      interpolate8x8_quarterpel_add(dec->cur.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64,
          dec->qtmp.y + 128, 16*x_pos, 16*y_pos,
          pMB->b_mvs[0].x, pMB->b_mvs[0].y, stride, 0);
      interpolate8x8_quarterpel_add(dec->cur.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64,
          dec->qtmp.y + 128, 16*x_pos + 8, 16*y_pos,
          pMB->b_mvs[1].x, pMB->b_mvs[1].y, stride, 0);
      interpolate8x8_quarterpel_add(dec->cur.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64,
          dec->qtmp.y + 128, 16*x_pos, 16*y_pos + 8,
          pMB->b_mvs[2].x, pMB->b_mvs[2].y, stride, 0);
      interpolate8x8_quarterpel_add(dec->cur.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64,
          dec->qtmp.y + 128, 16*x_pos + 8, 16*y_pos + 8,
          pMB->b_mvs[3].x, pMB->b_mvs[3].y, stride, 0);
    }
  } else {
    interpolate8x8_switch_add(dec->cur.y, backward.y, 16 * x_pos, 16 * y_pos,
        pMB->b_mvs[0].x, pMB->b_mvs[0].y, stride, 0);
    interpolate8x8_switch_add(dec->cur.y, backward.y, 16 * x_pos + 8,
        16 * y_pos, pMB->b_mvs[1].x, pMB->b_mvs[1].y, stride, 0);
    interpolate8x8_switch_add(dec->cur.y, backward.y, 16 * x_pos,
        16 * y_pos + 8, pMB->b_mvs[2].x, pMB->b_mvs[2].y, stride, 0);
    interpolate8x8_switch_add(dec->cur.y, backward.y, 16 * x_pos + 8,
        16 * y_pos + 8, pMB->b_mvs[3].x, pMB->b_mvs[3].y, stride, 0);
  }

  interpolate8x8_switch_add(dec->cur.u, backward.u, 8 * x_pos, 8 * y_pos,
      b_uv_dx, b_uv_dy, stride2, 0);
  interpolate8x8_switch_add(dec->cur.v, backward.v, 8 * x_pos, 8 * y_pos,
      b_uv_dx, b_uv_dy, stride2, 0);

  if (cbp)
    decoder_mb_decode(dec, cbp, bs, pY_Cur, pU_Cur, pV_Cur, pMB);
}

void decoder_mbgmc(DECODER * dec,
						        MACROBLOCK * const pMB,
						        const uint32_t x_pos,
						        const uint32_t y_pos,
						        const uint32_t fcode,
						        const uint32_t cbp,
						        Bitstream * bs,
						        const uint32_t rounding)
{
		  const uint32_t stride = dec->edged_width;
		  const uint32_t stride2 = stride / 2;
		
		  uint8_t *const pY_Cur=dec->cur.y + (y_pos << 4) * stride + (x_pos << 4);
		  uint8_t *const pU_Cur=dec->cur.u + (y_pos << 3) * stride2 + (x_pos << 3);
		  uint8_t *const pV_Cur=dec->cur.v + (y_pos << 3) * stride2 + (x_pos << 3);
		
		  NEW_GMC_DATA * gmc_data = &dec->new_gmc_data;
		
		  pMB->mvs[0] = pMB->mvs[1] = pMB->mvs[2] = pMB->mvs[3] = pMB->amv;
		
		  gmc_data->predict_16x16(gmc_data,
		      dec->cur.y + y_pos*16*stride + x_pos*16, dec->refn[0].y,
		      stride, stride, x_pos, y_pos, rounding);
		
		  gmc_data->predict_8x8(gmc_data,
		      dec->cur.u + y_pos*8*stride2 + x_pos*8, dec->refn[0].u,
		      dec->cur.v + y_pos*8*stride2 + x_pos*8, dec->refn[0].v,
		      stride2, stride2, x_pos, y_pos, rounding);
		
		  gmc_data->get_average_mv(gmc_data, &pMB->amv, x_pos, y_pos, dec->quarterpel);
		
		  pMB->amv.x = gmc_sanitize(pMB->amv.x, dec->quarterpel, fcode);
		  pMB->amv.y = gmc_sanitize(pMB->amv.y, dec->quarterpel, fcode);
		
		  pMB->mvs[0] = pMB->mvs[1] = pMB->mvs[2] = pMB->mvs[3] = pMB->amv;
		
		  if (cbp)
		    decoder_mb_decode(dec, cbp, bs, pY_Cur, pU_Cur, pV_Cur, pMB);

}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -