⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 truemotion2.c

📁 ffmpeg源码分析
💻 C
📖 第 1 页 / 共 2 页
字号:
    int i;    int deltas[16];    TM2_INIT_POINTERS();    /* low-res chroma */    deltas[0] = GET_TOK(ctx, TM2_C_LO);    deltas[1] = deltas[2] = deltas[3] = 0;    tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);    deltas[0] = GET_TOK(ctx, TM2_C_LO);    deltas[1] = deltas[2] = deltas[3] = 0;    tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);    /* hi-res luma */    for(i = 0; i < 16; i++)        deltas[i] = GET_TOK(ctx, TM2_L_HI);    tm2_apply_deltas(ctx, Y, Ystride, deltas, last);}static inline void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by){    int i;    int t1, t2;    int deltas[16];    TM2_INIT_POINTERS();    /* low-res chroma */    deltas[0] = GET_TOK(ctx, TM2_C_LO);    deltas[1] = deltas[2] = deltas[3] = 0;    tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);    deltas[0] = GET_TOK(ctx, TM2_C_LO);    deltas[1] = deltas[2] = deltas[3] = 0;    tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);    /* low-res luma */    for(i = 0; i < 16; i++)        deltas[i] = 0;    deltas[ 0] = GET_TOK(ctx, TM2_L_LO);    deltas[ 2] = GET_TOK(ctx, TM2_L_LO);    deltas[ 8] = GET_TOK(ctx, TM2_L_LO);    deltas[10] = GET_TOK(ctx, TM2_L_LO);    if(bx > 0)        last[0] = (last[-1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3] + last[1]) >> 1;    else        last[0] = (last[1]  - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3])>> 1;    last[2] = (last[1] + last[3]) >> 1;    t1 = ctx->D[0] + ctx->D[1];    ctx->D[0] = t1 >> 1;    ctx->D[1] = t1 - (t1 >> 1);    t2 = ctx->D[2] + ctx->D[3];    ctx->D[2] = t2 >> 1;    ctx->D[3] = t2 - (t2 >> 1);    tm2_apply_deltas(ctx, Y, Ystride, deltas, last);}static inline void tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by){    int i;    int ct;    int left, right, diff;    int deltas[16];    TM2_INIT_POINTERS();    /* null chroma */    deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;    tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);    deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;    tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);    /* null luma */    for(i = 0; i < 16; i++)        deltas[i] = 0;    ct = ctx->D[0] + ctx->D[1] + ctx->D[2] + ctx->D[3];    if(bx > 0)        left = last[-1] - ct;    else        left = 0;    right = last[3];    diff = right - left;    last[0] = left + (diff >> 2);    last[1] = left + (diff >> 1);    last[2] = right - (diff >> 2);    last[3] = right;    {        int tp = left;        ctx->D[0] = (tp + (ct >> 2)) - left;        left += ctx->D[0];        ctx->D[1] = (tp + (ct >> 1)) - left;        left += ctx->D[1];        ctx->D[2] = ((tp + ct) - (ct >> 2)) - left;        left += ctx->D[2];        ctx->D[3] = (tp + ct) - left;    }    tm2_apply_deltas(ctx, Y, Ystride, deltas, last);}static inline void tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by){    int i, j;    TM2_INIT_POINTERS_2();    /* update chroma */    for(j = 0; j < 2; j++){        for(i = 0; i < 2; i++){            U[i] = Uo[i];            V[i] = Vo[i];        }        U += Ustride; V += Vstride;        Uo += oUstride; Vo += oVstride;    }    U -= Ustride * 2;    V -= Vstride * 2;    TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);    TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));    /* update deltas */    ctx->D[0] = Yo[3] - last[3];    ctx->D[1] = Yo[3 + oYstride] - Yo[3];    ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];    ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];    for(j = 0; j < 4; j++){        for(i = 0; i < 4; i++){            Y[i] = Yo[i];            last[i] = Yo[i];        }        Y += Ystride;        Yo += oYstride;    }}static inline void tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by){    int i, j;    int d;    TM2_INIT_POINTERS_2();    /* update chroma */    for(j = 0; j < 2; j++){        for(i = 0; i < 2; i++){            U[i] = Uo[i] + GET_TOK(ctx, TM2_UPD);            V[i] = Vo[i] + GET_TOK(ctx, TM2_UPD);        }        U += Ustride; V += Vstride;        Uo += oUstride; Vo += oVstride;    }    U -= Ustride * 2;    V -= Vstride * 2;    TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);    TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));    /* update deltas */    ctx->D[0] = Yo[3] - last[3];    ctx->D[1] = Yo[3 + oYstride] - Yo[3];    ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];    ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];    for(j = 0; j < 4; j++){        d = last[3];        for(i = 0; i < 4; i++){            Y[i] = Yo[i] + GET_TOK(ctx, TM2_UPD);            last[i] = Y[i];        }        ctx->D[j] = last[3] - d;        Y += Ystride;        Yo += oYstride;    }}static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by){    int i, j;    int mx, my;    TM2_INIT_POINTERS_2();    mx = GET_TOK(ctx, TM2_MOT);    my = GET_TOK(ctx, TM2_MOT);    Yo += my * oYstride + mx;    Uo += (my >> 1) * oUstride + (mx >> 1);    Vo += (my >> 1) * oVstride + (mx >> 1);    /* copy chroma */    for(j = 0; j < 2; j++){        for(i = 0; i < 2; i++){            U[i] = Uo[i];            V[i] = Vo[i];        }        U += Ustride; V += Vstride;        Uo += oUstride; Vo += oVstride;    }    U -= Ustride * 2;    V -= Vstride * 2;    TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);    TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));    /* copy luma */    for(j = 0; j < 4; j++){        for(i = 0; i < 4; i++){            Y[i] = Yo[i];        }        Y += Ystride;        Yo += oYstride;    }    /* calculate deltas */    Y -= Ystride * 4;    ctx->D[0] = Y[3] - last[3];    ctx->D[1] = Y[3 + Ystride] - Y[3];    ctx->D[2] = Y[3 + Ystride * 2] - Y[3 + Ystride];    ctx->D[3] = Y[3 + Ystride * 3] - Y[3 + Ystride * 2];    for(i = 0; i < 4; i++)        last[i] = Y[i + Ystride * 3];}static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p){    int i, j;    int bw, bh;    int type;    int keyframe = 1;    uint8_t *Y, *U, *V;    int *src;    bw = ctx->avctx->width >> 2;    bh = ctx->avctx->height >> 2;    for(i = 0; i < TM2_NUM_STREAMS; i++)        ctx->tok_ptrs[i] = 0;    if (ctx->tok_lens[TM2_TYPE]<bw*bh){        av_log(ctx->avctx,AV_LOG_ERROR,"Got %i tokens for %i blocks\n",ctx->tok_lens[TM2_TYPE],bw*bh);        return -1;    }    memset(ctx->last, 0, 4 * bw * sizeof(int));    memset(ctx->clast, 0, 4 * bw * sizeof(int));    for(j = 0; j < bh; j++) {        memset(ctx->D, 0, 4 * sizeof(int));        memset(ctx->CD, 0, 4 * sizeof(int));        for(i = 0; i < bw; i++) {            type = GET_TOK(ctx, TM2_TYPE);            switch(type) {            case TM2_HI_RES:                tm2_hi_res_block(ctx, p, i, j);                break;            case TM2_MED_RES:                tm2_med_res_block(ctx, p, i, j);                break;            case TM2_LOW_RES:                tm2_low_res_block(ctx, p, i, j);                break;            case TM2_NULL_RES:                tm2_null_res_block(ctx, p, i, j);                break;            case TM2_UPDATE:                tm2_update_block(ctx, p, i, j);                keyframe = 0;                break;            case TM2_STILL:                tm2_still_block(ctx, p, i, j);                keyframe = 0;                break;            case TM2_MOTION:                tm2_motion_block(ctx, p, i, j);                keyframe = 0;                break;            default:                av_log(ctx->avctx, AV_LOG_ERROR, "Skipping unknown block type %i\n", type);            }        }    }    /* copy data from our buffer to AVFrame */    Y = p->data[0];    src = (ctx->cur?ctx->Y2:ctx->Y1);    for(j = 0; j < ctx->avctx->height; j++){        for(i = 0; i < ctx->avctx->width; i++){            Y[i] = clip_uint8(*src++);        }        Y += p->linesize[0];    }    U = p->data[2];    src = (ctx->cur?ctx->U2:ctx->U1);    for(j = 0; j < (ctx->avctx->height + 1) >> 1; j++){        for(i = 0; i < (ctx->avctx->width + 1) >> 1; i++){            U[i] = clip_uint8(*src++);        }        U += p->linesize[2];    }    V = p->data[1];    src = (ctx->cur?ctx->V2:ctx->V1);    for(j = 0; j < (ctx->avctx->height + 1) >> 1; j++){        for(i = 0; i < (ctx->avctx->width + 1) >> 1; i++){            V[i] = clip_uint8(*src++);        }        V += p->linesize[1];    }    return keyframe;}static int decode_frame(AVCodecContext *avctx,                        void *data, int *data_size,                        uint8_t *buf, int buf_size){    TM2Context * const l = avctx->priv_data;    AVFrame * const p= (AVFrame*)&l->pic;    int skip, t;    p->reference = 1;    p->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;    if(avctx->reget_buffer(avctx, p) < 0){        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");        return -1;    }    l->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, buf_size >> 2);    skip = tm2_read_header(l, buf);    if(skip == -1)        return -1;    t = tm2_read_stream(l, buf + skip, TM2_C_HI);    if(t == -1)        return -1;    skip += t;    t = tm2_read_stream(l, buf + skip, TM2_C_LO);    if(t == -1)        return -1;    skip += t;    t = tm2_read_stream(l, buf + skip, TM2_L_HI);    if(t == -1)        return -1;    skip += t;    t = tm2_read_stream(l, buf + skip, TM2_L_LO);    if(t == -1)        return -1;    skip += t;    t = tm2_read_stream(l, buf + skip, TM2_UPD);    if(t == -1)        return -1;    skip += t;    t = tm2_read_stream(l, buf + skip, TM2_MOT);    if(t == -1)        return -1;    skip += t;    t = tm2_read_stream(l, buf + skip, TM2_TYPE);    if(t == -1)        return -1;    p->key_frame = tm2_decode_blocks(l, p);    if(p->key_frame)        p->pict_type = FF_I_TYPE;    else        p->pict_type = FF_P_TYPE;    l->cur = !l->cur;    *data_size = sizeof(AVFrame);    *(AVFrame*)data = l->pic;    return buf_size;}static int decode_init(AVCodecContext *avctx){    TM2Context * const l = avctx->priv_data;    int i;    if (avcodec_check_dimensions(avctx, avctx->height, avctx->width) < 0) {        return -1;    }    if((avctx->width & 3) || (avctx->height & 3)){        av_log(avctx, AV_LOG_ERROR, "Width and height must be multiple of 4\n");        return -1;    }    l->avctx = avctx;    l->pic.data[0]=NULL;    avctx->has_b_frames = 0;    avctx->pix_fmt = PIX_FMT_YUV420P;    dsputil_init(&l->dsp, avctx);    l->last = av_malloc(4 * sizeof(int) * (avctx->width >> 2));    l->clast = av_malloc(4 * sizeof(int) * (avctx->width >> 2));    for(i = 0; i < TM2_NUM_STREAMS; i++) {        l->tokens[i] = NULL;        l->tok_lens[i] = 0;    }    l->Y1 = av_malloc(sizeof(int) * avctx->width * avctx->height);    l->U1 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1));    l->V1 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1));    l->Y2 = av_malloc(sizeof(int) * avctx->width * avctx->height);    l->U2 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1));    l->V2 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1));    l->cur = 0;    return 0;}static int decode_end(AVCodecContext *avctx){    TM2Context * const l = avctx->priv_data;    int i;    if(l->last)        av_free(l->last);    if(l->clast)        av_free(l->clast);    for(i = 0; i < TM2_NUM_STREAMS; i++)        if(l->tokens[i])            av_free(l->tokens[i]);    if(l->Y1){        av_free(l->Y1);        av_free(l->U1);        av_free(l->V1);        av_free(l->Y2);        av_free(l->U2);        av_free(l->V2);    }    return 0;}AVCodec truemotion2_decoder = {    "truemotion2",    CODEC_TYPE_VIDEO,    CODEC_ID_TRUEMOTION2,    sizeof(TM2Context),    decode_init,    NULL,    decode_end,    decode_frame,    CODEC_CAP_DR1,};

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -