📄 decoder.c
字号:
if ((mv).y > yborder_high) { \
DPRINTF(XVID_DEBUG_MV, "mv.y > max -- %d > %d, MB %d, %d", (mv).y, yborder_high, x_pos, y_pos); \
(mv).y = yborder_high; \
} else if ((mv).y < yborder_low) { \
DPRINTF(XVID_DEBUG_MV, "mv.y < min -- %d < %d, MB %d, %d", (mv).y, yborder_low, x_pos, y_pos); \
(mv).y = yborder_low; \
} \
} while (0)
CHECK_MV(mv[0]);
CHECK_MV(mv[1]);
CHECK_MV(mv[2]);
CHECK_MV(mv[3]);
}
/* Up to this version, chroma rounding was wrong with qpel.
* So we try to be backward compatible to avoid artifacts */
#define BS_VERSION_BUGGY_CHROMA_ROUNDING 1
/* decode an inter macroblock */
static void
decoder_mbinter(DECODER * dec,
const MACROBLOCK * pMB,
const uint32_t x_pos,
const uint32_t y_pos,
const uint32_t cbp,
Bitstream * bs,
const uint32_t rounding,
const int ref,
const int bvop)
{
uint32_t stride = dec->edged_width;
uint32_t stride2 = stride / 2;
uint32_t i;
uint8_t *pY_Cur, *pU_Cur, *pV_Cur;
int uv_dx, uv_dy;
VECTOR mv[4]; /* local copy of mvs */
pY_Cur = dec->cur.y + (y_pos << 4) * stride + (x_pos << 4);
pU_Cur = dec->cur.u + (y_pos << 3) * stride2 + (x_pos << 3);
pV_Cur = dec->cur.v + (y_pos << 3) * stride2 + (x_pos << 3);
for (i = 0; i < 4; i++)
mv[i] = pMB->mvs[i];
validate_vector(mv, x_pos, y_pos, dec);
start_timer();
if ((pMB->mode != MODE_INTER4V) || (bvop)) { /* INTER, INTER_Q, NOT_CODED, FORWARD, BACKWARD */
uv_dx = mv[0].x;
uv_dy = mv[0].y;
if (dec->quarterpel) {
if (dec->bs_version <= BS_VERSION_BUGGY_CHROMA_ROUNDING) {
uv_dx = (uv_dx>>1) | (uv_dx&1);
uv_dy = (uv_dy>>1) | (uv_dy&1);
}
else {
uv_dx /= 2;
uv_dy /= 2;
}
}
uv_dx = (uv_dx >> 1) + roundtab_79[uv_dx & 0x3];
uv_dy = (uv_dy >> 1) + roundtab_79[uv_dy & 0x3];
if (dec->quarterpel)
interpolate16x16_quarterpel(dec->cur.y, dec->refn[ref].y, dec->qtmp.y, dec->qtmp.y + 64,
dec->qtmp.y + 128, 16*x_pos, 16*y_pos,
mv[0].x, mv[0].y, stride, rounding);
else
interpolate16x16_switch(dec->cur.y, dec->refn[ref].y, 16*x_pos, 16*y_pos,
mv[0].x, mv[0].y, stride, rounding);
} else { /* MODE_INTER4V */
if(dec->quarterpel) {
if (dec->bs_version <= BS_VERSION_BUGGY_CHROMA_ROUNDING) {
int z;
uv_dx = 0; uv_dy = 0;
for (z = 0; z < 4; z++) {
uv_dx += ((mv[z].x>>1) | (mv[z].x&1));
uv_dy += ((mv[z].y>>1) | (mv[z].y&1));
}
}
else {
uv_dx = (mv[0].x / 2) + (mv[1].x / 2) + (mv[2].x / 2) + (mv[3].x / 2);
uv_dy = (mv[0].y / 2) + (mv[1].y / 2) + (mv[2].y / 2) + (mv[3].y / 2);
}
} else {
uv_dx = mv[0].x + mv[1].x + mv[2].x + mv[3].x;
uv_dy = mv[0].y + mv[1].y + mv[2].y + mv[3].y;
}
uv_dx = (uv_dx >> 3) + roundtab_76[uv_dx & 0xf];
uv_dy = (uv_dy >> 3) + roundtab_76[uv_dy & 0xf];
if (dec->quarterpel) {
interpolate8x8_quarterpel(dec->cur.y, dec->refn[0].y , dec->qtmp.y, dec->qtmp.y + 64,
dec->qtmp.y + 128, 16*x_pos, 16*y_pos,
mv[0].x, mv[0].y, stride, rounding);
interpolate8x8_quarterpel(dec->cur.y, dec->refn[0].y , dec->qtmp.y, dec->qtmp.y + 64,
dec->qtmp.y + 128, 16*x_pos + 8, 16*y_pos,
mv[1].x, mv[1].y, stride, rounding);
interpolate8x8_quarterpel(dec->cur.y, dec->refn[0].y , dec->qtmp.y, dec->qtmp.y + 64,
dec->qtmp.y + 128, 16*x_pos, 16*y_pos + 8,
mv[2].x, mv[2].y, stride, rounding);
interpolate8x8_quarterpel(dec->cur.y, dec->refn[0].y , dec->qtmp.y, dec->qtmp.y + 64,
dec->qtmp.y + 128, 16*x_pos + 8, 16*y_pos + 8,
mv[3].x, mv[3].y, stride, rounding);
} else {
interpolate8x8_switch(dec->cur.y, dec->refn[0].y , 16*x_pos, 16*y_pos,
mv[0].x, mv[0].y, stride, rounding);
interpolate8x8_switch(dec->cur.y, dec->refn[0].y , 16*x_pos + 8, 16*y_pos,
mv[1].x, mv[1].y, stride, rounding);
interpolate8x8_switch(dec->cur.y, dec->refn[0].y , 16*x_pos, 16*y_pos + 8,
mv[2].x, mv[2].y, stride, rounding);
interpolate8x8_switch(dec->cur.y, dec->refn[0].y , 16*x_pos + 8, 16*y_pos + 8,
mv[3].x, mv[3].y, stride, rounding);
}
}
/* chroma */
interpolate8x8_switch(dec->cur.u, dec->refn[ref].u, 8 * x_pos, 8 * y_pos,
uv_dx, uv_dy, stride2, rounding);
interpolate8x8_switch(dec->cur.v, dec->refn[ref].v, 8 * x_pos, 8 * y_pos,
uv_dx, uv_dy, stride2, rounding);
stop_comp_timer();
if (cbp)
decoder_mb_decode(dec, cbp, bs, pY_Cur, pU_Cur, pV_Cur, pMB);
}
/* decode an inter macroblock in field mode */
static void
decoder_mbinter_field(DECODER * dec,
const MACROBLOCK * pMB,
const uint32_t x_pos,
const uint32_t y_pos,
const uint32_t cbp,
Bitstream * bs,
const uint32_t rounding,
const int ref,
const int bvop)
{
uint32_t stride = dec->edged_width;
uint32_t stride2 = stride / 2;
uint8_t *pY_Cur, *pU_Cur, *pV_Cur;
int uvtop_dx, uvtop_dy;
int uvbot_dx, uvbot_dy;
VECTOR mv[4]; /* local copy of mvs */
/* Get pointer to memory areas */
pY_Cur = dec->cur.y + (y_pos << 4) * stride + (x_pos << 4);
pU_Cur = dec->cur.u + (y_pos << 3) * stride2 + (x_pos << 3);
pV_Cur = dec->cur.v + (y_pos << 3) * stride2 + (x_pos << 3);
mv[0] = pMB->mvs[0];
mv[1] = pMB->mvs[1];
memset(&mv[2],0,2*sizeof(VECTOR));
validate_vector(mv, x_pos, y_pos, dec);
start_timer();
if((pMB->mode!=MODE_INTER4V) || (bvop)) /* INTER, INTER_Q, NOT_CODED, FORWARD, BACKWARD */
{
/* Prepare top field vector */
uvtop_dx = DIV2ROUND(mv[0].x);
uvtop_dy = DIV2ROUND(mv[0].y);
/* Prepare bottom field vector */
uvbot_dx = DIV2ROUND(mv[1].x);
uvbot_dy = DIV2ROUND(mv[1].y);
if(dec->quarterpel)
{
/* NOT supported */
}
else
{
/* Interpolate top field left part(we use double stride for every 2nd line) */
interpolate8x8_switch(dec->cur.y,dec->refn[ref].y+pMB->field_for_top*stride,
16*x_pos,8*y_pos,mv[0].x, mv[0].y>>1,2*stride, rounding);
/* top field right part */
interpolate8x8_switch(dec->cur.y,dec->refn[ref].y+pMB->field_for_top*stride,
16*x_pos+8,8*y_pos,mv[0].x, mv[0].y>>1,2*stride, rounding);
/* Interpolate bottom field left part(we use double stride for every 2nd line) */
interpolate8x8_switch(dec->cur.y+stride,dec->refn[ref].y+pMB->field_for_bot*stride,
16*x_pos,8*y_pos,mv[1].x, mv[1].y>>1,2*stride, rounding);
/* Bottom field right part */
interpolate8x8_switch(dec->cur.y+stride,dec->refn[ref].y+pMB->field_for_bot*stride,
16*x_pos+8,8*y_pos,mv[1].x, mv[1].y>>1,2*stride, rounding);
/* Interpolate field1 U */
interpolate8x4_switch(dec->cur.u,dec->refn[ref].u+pMB->field_for_top*stride2,
8*x_pos,4*y_pos,uvtop_dx,DIV2ROUND(uvtop_dy),stride,rounding);
/* Interpolate field1 V */
interpolate8x4_switch(dec->cur.v,dec->refn[ref].v+pMB->field_for_top*stride2,
8*x_pos,4*y_pos,uvtop_dx,DIV2ROUND(uvtop_dy),stride,rounding);
/* Interpolate field2 U */
interpolate8x4_switch(dec->cur.u+stride2,dec->refn[ref].u+pMB->field_for_bot*stride2,
8*x_pos,4*y_pos,uvbot_dx,DIV2ROUND(uvbot_dy),stride,rounding);
/* Interpolate field2 V */
interpolate8x4_switch(dec->cur.v+stride2,dec->refn[ref].v+pMB->field_for_bot*stride2,
8*x_pos,4*y_pos,uvbot_dx,DIV2ROUND(uvbot_dy),stride,rounding);
}
}
else
{
/* We don't expect 4 motion vectors in interlaced mode */
}
stop_comp_timer();
/* Must add error correction? */
if(cbp)
decoder_mb_decode(dec, cbp, bs, pY_Cur, pU_Cur, pV_Cur, pMB);
}
static void
decoder_mbgmc(DECODER * dec,
MACROBLOCK * const pMB,
const uint32_t x_pos,
const uint32_t y_pos,
const uint32_t fcode,
const uint32_t cbp,
Bitstream * bs,
const uint32_t rounding)
{
const uint32_t stride = dec->edged_width;
const uint32_t stride2 = stride / 2;
uint8_t *const pY_Cur=dec->cur.y + (y_pos << 4) * stride + (x_pos << 4);
uint8_t *const pU_Cur=dec->cur.u + (y_pos << 3) * stride2 + (x_pos << 3);
uint8_t *const pV_Cur=dec->cur.v + (y_pos << 3) * stride2 + (x_pos << 3);
NEW_GMC_DATA * gmc_data = &dec->new_gmc_data;
pMB->mvs[0] = pMB->mvs[1] = pMB->mvs[2] = pMB->mvs[3] = pMB->amv;
start_timer();
/* this is where the calculations are done */
gmc_data->predict_16x16(gmc_data,
dec->cur.y + y_pos*16*stride + x_pos*16, dec->refn[0].y,
stride, stride, x_pos, y_pos, rounding);
gmc_data->predict_8x8(gmc_data,
dec->cur.u + y_pos*8*stride2 + x_pos*8, dec->refn[0].u,
dec->cur.v + y_pos*8*stride2 + x_pos*8, dec->refn[0].v,
stride2, stride2, x_pos, y_pos, rounding);
gmc_data->get_average_mv(gmc_data, &pMB->amv, x_pos, y_pos, dec->quarterpel);
pMB->amv.x = gmc_sanitize(pMB->amv.x, dec->quarterpel, fcode);
pMB->amv.y = gmc_sanitize(pMB->amv.y, dec->quarterpel, fcode);
pMB->mvs[0] = pMB->mvs[1] = pMB->mvs[2] = pMB->mvs[3] = pMB->amv;
stop_transfer_timer();
if (cbp)
decoder_mb_decode(dec, cbp, bs, pY_Cur, pU_Cur, pV_Cur, pMB);
}
static void
decoder_iframe(DECODER * dec,
Bitstream * bs,
int quant,
int intra_dc_threshold)
{
uint32_t bound;
uint32_t x, y;
const uint32_t mb_width = dec->mb_width;
const uint32_t mb_height = dec->mb_height;
bound = 0;
for (y = 0; y < mb_height; y++) {
for (x = 0; x < mb_width; x++) {
MACROBLOCK *mb;
uint32_t mcbpc;
uint32_t cbpc;
uint32_t acpred_flag;
uint32_t cbpy;
uint32_t cbp;
while (BitstreamShowBits(bs, 9) == 1)
BitstreamSkip(bs, 9);
if (check_resync_marker(bs, 0))
{
bound = read_video_packet_header(bs, dec, 0,
&quant, NULL, NULL, &intra_dc_threshold);
x = bound % mb_width;
y = bound / mb_width;
}
mb = &dec->mbs[y * dec->mb_width + x];
DPRINTF(XVID_DEBUG_MB, "macroblock (%i,%i) %08x\n", x, y, BitstreamShowBits(bs, 32));
mcbpc = get_mcbpc_intra(bs);
mb->mode = mcbpc & 7;
cbpc = (mcbpc >> 4);
acpred_flag = BitstreamGetBit(bs);
cbpy = get_cbpy(bs, 1);
cbp = (cbpy << 2) | cbpc;
if (mb->mode == MODE_INTRA_Q) {
quant += dquant_table[BitstreamGetBits(bs, 2)];
if (quant > 31) {
quant = 31;
} else if (quant < 1) {
quant = 1;
}
}
mb->quant = quant;
mb->mvs[0].x = mb->mvs[0].y =
mb->mvs[1].x = mb->mvs[1].y =
mb->mvs[2].x = mb->mvs[2].y =
mb->mvs[3].x = mb->mvs[3].y =0;
if (dec->interlacing) {
mb->field_dct = BitstreamGetBit(bs);
DPRINTF(XVID_DEBUG_MB,"deci: field_dct: %i\n", mb->field_dct);
}
decoder_mbintra(dec, mb, x, y, acpred_flag, cbp, bs, quant,
intra_dc_threshold, bound);
}
if(dec->out_frm)
output_slice(&dec->cur, dec->edged_width,dec->width,dec->out_frm,0,y,mb_width);
}
}
static void
get_motion_vector(DECODER * dec,
Bitstream * bs,
int x,
int y,
int k,
VECTOR * ret_mv,
int fcode,
const int bound)
{
const int scale_fac = 1 << (fcode - 1);
const int high = (32 * scale_fac) - 1;
const int low = ((-32) * scale_fac);
const int range = (64 * scale_fac);
const VECTOR pmv = get_pmv2(dec->mbs, dec->mb_width, bound, x, y, k);
VECTOR mv;
mv.x = get_mv(bs, fcode);
mv.y = get_mv(bs, fcode);
DPRINTF(XVID_DEBUG_MV,"mv_diff (%i,%i) pred (%i,%i) result (%i,%i)\n", mv.x, mv.y, pmv.x, pmv.y, mv.x+pmv.x, mv.y+pmv.y);
mv.x += pmv.x;
mv.y += pmv.y;
if (mv.x < low) {
mv.x += range;
} else if (mv.x > high) {
mv.x -= range;
}
if (mv.y < low) {
mv.y += range;
} else if (mv.y > high) {
mv.y -= range;
}
ret_mv->x = mv.x;
ret_mv->y = mv.y;
}
/* We use this when decoder runs interlaced -> different prediction */
static void get_motion_vector_interlaced(DECODER * dec,
Bitstream * bs,
int x,
int y,
int k,
MACROBLOCK *pMB,
int fcode,
const int bound)
{
const int scale_fac = 1 << (fcode - 1);
const int high = (32 * scale_fac) - 1;
const int low = ((-32) * scale_fac);
const int range = (64 * scale_fac);
/* Get interlaced prediction */
const VECTOR pmv=get_pmv2_interlaced(dec->mbs,dec->mb_width,bound,x,y,k);
VECTOR mv,mvf1,mvf2;
if(!pMB->field_pred)
{
mv.x = get_mv(bs,fcode);
mv.y = get_mv(bs,fcode);
mv.x += pmv.x;
mv.y += pmv.y;
if(mv.x<low) {
mv.x += range;
} else if (mv.x>high) {
mv.x-=range;
}
if (mv.y < low) {
mv.y += range;
} else if (mv.y > high) {
mv.y -= range;
}
pMB->mvs[0]=pMB->mvs[1]=pMB->mvs[2]=pMB->mvs[3]=mv;
}
else
{
mvf1.x = get_mv(bs, fcode);
mvf1.y = get_mv(bs, fcode);
mvf1.x += pmv.x;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -