⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vc1.c

📁 mediastreamer2是开源的网络传输媒体流的库
💻 C
📖 第 1 页 / 共 5 页
字号:
    DSPContext *dsp = &v->s.dsp;    if(v->rangeredfrm) {        int i, j, k;        for(k = 0; k < 6; k++)            for(j = 0; j < 8; j++)                for(i = 0; i < 8; i++)                    block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;    }    ys = v->s.current_picture.linesize[0];    us = v->s.current_picture.linesize[1];    vs = v->s.current_picture.linesize[2];    Y = v->s.dest[0];    dsp->put_pixels_clamped(block[0], Y, ys);    dsp->put_pixels_clamped(block[1], Y + 8, ys);    Y += ys * 8;    dsp->put_pixels_clamped(block[2], Y, ys);    dsp->put_pixels_clamped(block[3], Y + 8, ys);    if(!(v->s.flags & CODEC_FLAG_GRAY)) {        dsp->put_pixels_clamped(block[4], v->s.dest[1], us);        dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);    }}/** Do motion compensation over 1 macroblock * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c */static void vc1_mc_1mv(VC1Context *v, int dir){    MpegEncContext *s = &v->s;    DSPContext *dsp = &v->s.dsp;    uint8_t *srcY, *srcU, *srcV;    int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;    if(!v->s.last_picture.data[0])return;    mx = s->mv[dir][0][0];    my = s->mv[dir][0][1];    // store motion vectors for further use in B frames    if(s->pict_type == P_TYPE) {        s->current_picture.motion_val[1][s->block_index[0]][0] = mx;        s->current_picture.motion_val[1][s->block_index[0]][1] = my;    }    uvmx = (mx + ((mx & 3) == 3)) >> 1;    uvmy = (my + ((my & 3) == 3)) >> 1;    if(v->fastuvmc) {        uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));        uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));    }    if(!dir) {        srcY = s->last_picture.data[0];        srcU = s->last_picture.data[1];        srcV = s->last_picture.data[2];    } else {        srcY = s->next_picture.data[0];        srcU = s->next_picture.data[1];        srcV = s->next_picture.data[2];    }    src_x = s->mb_x * 16 + (mx >> 2);    src_y = s->mb_y * 16 + (my >> 2);    uvsrc_x = s->mb_x * 8 + (uvmx >> 2);    uvsrc_y = s->mb_y * 8 + (uvmy >> 2);    if(v->profile != PROFILE_ADVANCED){        src_x   = av_clip(  src_x, -16, s->mb_width  * 16);        src_y   = av_clip(  src_y, -16, s->mb_height * 16);        uvsrc_x = av_clip(uvsrc_x,  -8, s->mb_width  *  8);        uvsrc_y = av_clip(uvsrc_y,  -8, s->mb_height *  8);    }else{        src_x   = av_clip(  src_x, -17, s->avctx->coded_width);        src_y   = av_clip(  src_y, -18, s->avctx->coded_height + 1);        uvsrc_x = av_clip(uvsrc_x,  -8, s->avctx->coded_width  >> 1);        uvsrc_y = av_clip(uvsrc_y,  -8, s->avctx->coded_height >> 1);    }    srcY += src_y * s->linesize + src_x;    srcU += uvsrc_y * s->uvlinesize + uvsrc_x;    srcV += uvsrc_y * s->uvlinesize + uvsrc_x;    /* for grayscale we should not try to read from unknown area */    if(s->flags & CODEC_FLAG_GRAY) {        srcU = s->edge_emu_buffer + 18 * s->linesize;        srcV = s->edge_emu_buffer + 18 * s->linesize;    }    if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)       || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3       || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){        uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;        srcY -= s->mspel * (1 + s->linesize);        ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,                            src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);        srcY = s->edge_emu_buffer;        ff_emulated_edge_mc(uvbuf     , srcU, s->uvlinesize, 8+1, 8+1,                            uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);        ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,                            uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);        srcU = uvbuf;        srcV = uvbuf + 16;        /* if we deal with range reduction we need to scale source blocks */        if(v->rangeredfrm) {            int i, j;            uint8_t *src, *src2;            src = srcY;            for(j = 0; j < 17 + s->mspel*2; j++) {                for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;                src += s->linesize;            }            src = srcU; src2 = srcV;            for(j = 0; j < 9; j++) {                for(i = 0; i < 9; i++) {                    src[i] = ((src[i] - 128) >> 1) + 128;                    src2[i] = ((src2[i] - 128) >> 1) + 128;                }                src += s->uvlinesize;                src2 += s->uvlinesize;            }        }        /* if we deal with intensity compensation we need to scale source blocks */        if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {            int i, j;            uint8_t *src, *src2;            src = srcY;            for(j = 0; j < 17 + s->mspel*2; j++) {                for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];                src += s->linesize;            }            src = srcU; src2 = srcV;            for(j = 0; j < 9; j++) {                for(i = 0; i < 9; i++) {                    src[i] = v->lutuv[src[i]];                    src2[i] = v->lutuv[src2[i]];                }                src += s->uvlinesize;                src2 += s->uvlinesize;            }        }        srcY += s->mspel * (1 + s->linesize);    }    if(s->mspel) {        dxy = ((my & 3) << 2) | (mx & 3);        dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0]    , srcY    , s->linesize, v->rnd);        dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);        srcY += s->linesize * 8;        dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize    , srcY    , s->linesize, v->rnd);        dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);    } else { // hpel mc - always used for luma        dxy = (my & 2) | ((mx & 2) >> 1);        if(!v->rnd)            dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);        else            dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);    }    if(s->flags & CODEC_FLAG_GRAY) return;    /* Chroma MC always uses qpel bilinear */    uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);    uvmx = (uvmx&3)<<1;    uvmy = (uvmy&3)<<1;    if(!v->rnd){        dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);        dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);    }else{        dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);        dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);    }}/** Do motion compensation for 4-MV macroblock - luminance block */static void vc1_mc_4mv_luma(VC1Context *v, int n){    MpegEncContext *s = &v->s;    DSPContext *dsp = &v->s.dsp;    uint8_t *srcY;    int dxy, mx, my, src_x, src_y;    int off;    if(!v->s.last_picture.data[0])return;    mx = s->mv[0][n][0];    my = s->mv[0][n][1];    srcY = s->last_picture.data[0];    off = s->linesize * 4 * (n&2) + (n&1) * 8;    src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);    src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);    if(v->profile != PROFILE_ADVANCED){        src_x   = av_clip(  src_x, -16, s->mb_width  * 16);        src_y   = av_clip(  src_y, -16, s->mb_height * 16);    }else{        src_x   = av_clip(  src_x, -17, s->avctx->coded_width);        src_y   = av_clip(  src_y, -18, s->avctx->coded_height + 1);    }    srcY += src_y * s->linesize + src_x;    if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)       || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2       || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){        srcY -= s->mspel * (1 + s->linesize);        ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,                            src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);        srcY = s->edge_emu_buffer;        /* if we deal with range reduction we need to scale source blocks */        if(v->rangeredfrm) {            int i, j;            uint8_t *src;            src = srcY;            for(j = 0; j < 9 + s->mspel*2; j++) {                for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;                src += s->linesize;            }        }        /* if we deal with intensity compensation we need to scale source blocks */        if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {            int i, j;            uint8_t *src;            src = srcY;            for(j = 0; j < 9 + s->mspel*2; j++) {                for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];                src += s->linesize;            }        }        srcY += s->mspel * (1 + s->linesize);    }    if(s->mspel) {        dxy = ((my & 3) << 2) | (mx & 3);        dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);    } else { // hpel mc - always used for luma        dxy = (my & 2) | ((mx & 2) >> 1);        if(!v->rnd)            dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);        else            dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);    }}static inline int median4(int a, int b, int c, int d){    if(a < b) {        if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;        else      return (FFMIN(b, c) + FFMAX(a, d)) / 2;    } else {        if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;        else      return (FFMIN(a, c) + FFMAX(b, d)) / 2;    }}/** Do motion compensation for 4-MV macroblock - both chroma blocks */static void vc1_mc_4mv_chroma(VC1Context *v){    MpegEncContext *s = &v->s;    DSPContext *dsp = &v->s.dsp;    uint8_t *srcU, *srcV;    int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;    int i, idx, tx = 0, ty = 0;    int mvx[4], mvy[4], intra[4];    static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};    if(!v->s.last_picture.data[0])return;    if(s->flags & CODEC_FLAG_GRAY) return;    for(i = 0; i < 4; i++) {        mvx[i] = s->mv[0][i][0];        mvy[i] = s->mv[0][i][1];        intra[i] = v->mb_type[0][s->block_index[i]];    }    /* calculate chroma MV vector from four luma MVs */    idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];    if(!idx) { // all blocks are inter        tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);        ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);    } else if(count[idx] == 1) { // 3 inter blocks        switch(idx) {        case 0x1:            tx = mid_pred(mvx[1], mvx[2], mvx[3]);            ty = mid_pred(mvy[1], mvy[2], mvy[3]);            break;        case 0x2:            tx = mid_pred(mvx[0], mvx[2], mvx[3]);            ty = mid_pred(mvy[0], mvy[2], mvy[3]);            break;        case 0x4:            tx = mid_pred(mvx[0], mvx[1], mvx[3]);            ty = mid_pred(mvy[0], mvy[1], mvy[3]);            break;        case 0x8:            tx = mid_pred(mvx[0], mvx[1], mvx[2]);            ty = mid_pred(mvy[0], mvy[1], mvy[2]);            break;        }    } else if(count[idx] == 2) {        int t1 = 0, t2 = 0;        for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}        for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}        tx = (mvx[t1] + mvx[t2]) / 2;        ty = (mvy[t1] + mvy[t2]) / 2;    } else {        s->current_picture.motion_val[1][s->block_index[0]][0] = 0;        s->current_picture.motion_val[1][s->block_index[0]][1] = 0;        return; //no need to do MC for inter blocks    }    s->current_picture.motion_val[1][s->block_index[0]][0] = tx;    s->current_picture.motion_val[1][s->block_index[0]][1] = ty;    uvmx = (tx + ((tx&3) == 3)) >> 1;    uvmy = (ty + ((ty&3) == 3)) >> 1;    if(v->fastuvmc) {        uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));        uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));    }    uvsrc_x = s->mb_x * 8 + (uvmx >> 2);    uvsrc_y = s->mb_y * 8 + (uvmy >> 2);    if(v->profile != PROFILE_ADVANCED){        uvsrc_x = av_clip(uvsrc_x,  -8, s->mb_width  *  8);        uvsrc_y = av_clip(uvsrc_y,  -8, s->mb_height *  8);    }else{        uvsrc_x = av_clip(uvsrc_x,  -8, s->avctx->coded_width  >> 1);        uvsrc_y = av_clip(uvsrc_y,  -8, s->avctx->coded_height >> 1);    }    srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;    srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;    if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)       || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9       || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){        ff_emulated_edge_mc(s->edge_emu_buffer     , srcU, s->uvlinesize, 8+1, 8+1,                            uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);        ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,                            uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);        srcU = s->edge_emu_buffer;        srcV = s->edge_emu_buffer + 16;        /* if we deal with range reduction we need to scale source blocks */        if(v->rangeredfrm) {            int i, j;            uint8_t *src, *src2;            src = srcU; src2 = srcV;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -