⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vp56.c

📁 mediastreamer2是开源的网络传输媒体流的库
💻 C
📖 第 1 页 / 共 2 页
字号:
    if (x<0 || x+12>=s->plane_width[plane] ||        y<0 || y+12>=s->plane_height[plane]) {        ff_emulated_edge_mc(s->edge_emu_buffer,                            src + s->block_offset[b] + (dy-2)*stride + (dx-2),                            stride, 12, 12, x, y,                            s->plane_width[plane],                            s->plane_height[plane]);        src_block = s->edge_emu_buffer;        src_offset = 2 + 2*stride;    } else if (deblock_filtering) {        /* only need a 12x12 block, but there is no such dsp function, */        /* so copy a 16x12 block */        s->dsp.put_pixels_tab[0][0](s->edge_emu_buffer,                                    src + s->block_offset[b] + (dy-2)*stride + (dx-2),                                    stride, 12);        src_block = s->edge_emu_buffer;        src_offset = 2 + 2*stride;    } else {        src_block = src;        src_offset = s->block_offset[b] + dy*stride + dx;    }    if (deblock_filtering)        vp56_deblock_filter(s, src_block, stride, dx&7, dy&7);    if (s->mv[b].x & mask)        overlap_offset += (s->mv[b].x > 0) ? 1 : -1;    if (s->mv[b].y & mask)        overlap_offset += (s->mv[b].y > 0) ? stride : -stride;    if (overlap_offset) {        if (s->filter)            s->filter(s, dst, src_block, src_offset, src_offset+overlap_offset,                      stride, s->mv[b], mask, s->filter_selection, b<4);        else            s->dsp.put_no_rnd_pixels_l2[1](dst, src_block+src_offset,                                           src_block+src_offset+overlap_offset,                                           stride, 8);    } else {        s->dsp.put_pixels_tab[1][0](dst, src_block+src_offset, stride, 8);    }}static void vp56_decode_mb(vp56_context_t *s, int row, int col, int is_alpha){    AVFrame *frame_current, *frame_ref;    vp56_mb_t mb_type;    vp56_frame_t ref_frame;    int b, ab, b_max, plane, off;    if (s->framep[VP56_FRAME_CURRENT]->key_frame)        mb_type = VP56_MB_INTRA;    else        mb_type = vp56_decode_mv(s, row, col);    ref_frame = vp56_reference_frame[mb_type];    memset(s->block_coeff, 0, sizeof(s->block_coeff));    s->parse_coeff(s);    vp56_add_predictors_dc(s, ref_frame);    frame_current = s->framep[VP56_FRAME_CURRENT];    frame_ref = s->framep[ref_frame];    ab = 6*is_alpha;    b_max = 6 - 2*is_alpha;    switch (mb_type) {        case VP56_MB_INTRA:            for (b=0; b<b_max; b++) {                plane = vp56_b2p[b+ab];                s->dsp.idct_put(frame_current->data[plane] + s->block_offset[b],                                s->stride[plane], s->block_coeff[b]);            }            break;        case VP56_MB_INTER_NOVEC_PF:        case VP56_MB_INTER_NOVEC_GF:            for (b=0; b<b_max; b++) {                plane = vp56_b2p[b+ab];                off = s->block_offset[b];                s->dsp.put_pixels_tab[1][0](frame_current->data[plane] + off,                                            frame_ref->data[plane] + off,                                            s->stride[plane], 8);                s->dsp.idct_add(frame_current->data[plane] + off,                                s->stride[plane], s->block_coeff[b]);            }            break;        case VP56_MB_INTER_DELTA_PF:        case VP56_MB_INTER_V1_PF:        case VP56_MB_INTER_V2_PF:        case VP56_MB_INTER_DELTA_GF:        case VP56_MB_INTER_4V:        case VP56_MB_INTER_V1_GF:        case VP56_MB_INTER_V2_GF:            for (b=0; b<b_max; b++) {                int x_off = b==1 || b==3 ? 8 : 0;                int y_off = b==2 || b==3 ? 8 : 0;                plane = vp56_b2p[b+ab];                vp56_mc(s, b, plane, frame_ref->data[plane], s->stride[plane],                        16*col+x_off, 16*row+y_off);                s->dsp.idct_add(frame_current->data[plane] + s->block_offset[b],                                s->stride[plane], s->block_coeff[b]);            }            break;    }}static int vp56_size_changed(AVCodecContext *avctx){    vp56_context_t *s = avctx->priv_data;    int stride = s->framep[VP56_FRAME_CURRENT]->linesize[0];    int i;    s->plane_width[0]  = s->plane_width[3]  = avctx->coded_width;    s->plane_width[1]  = s->plane_width[2]  = avctx->coded_width/2;    s->plane_height[0] = s->plane_height[3] = avctx->coded_height;    s->plane_height[1] = s->plane_height[2] = avctx->coded_height/2;    for (i=0; i<4; i++)        s->stride[i] = s->flip * s->framep[VP56_FRAME_CURRENT]->linesize[i];    s->mb_width  = (avctx->coded_width +15) / 16;    s->mb_height = (avctx->coded_height+15) / 16;    if (s->mb_width > 1000 || s->mb_height > 1000) {        av_log(avctx, AV_LOG_ERROR, "picture too big\n");        return -1;    }    s->above_blocks = av_realloc(s->above_blocks,                                 (4*s->mb_width+6) * sizeof(*s->above_blocks));    s->macroblocks = av_realloc(s->macroblocks,                                s->mb_width*s->mb_height*sizeof(*s->macroblocks));    av_free(s->edge_emu_buffer_alloc);    s->edge_emu_buffer_alloc = av_malloc(16*stride);    s->edge_emu_buffer = s->edge_emu_buffer_alloc;    if (s->flip < 0)        s->edge_emu_buffer += 15 * stride;    return 0;}int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size,                      const uint8_t *buf, int buf_size){    vp56_context_t *s = avctx->priv_data;    AVFrame *const p = s->framep[VP56_FRAME_CURRENT];    int is_alpha, alpha_offset;    if (s->has_alpha) {        alpha_offset = bytestream_get_be24(&buf);        buf_size -= 3;    }    for (is_alpha=0; is_alpha < 1+s->has_alpha; is_alpha++) {        int mb_row, mb_col, mb_row_flip, mb_offset = 0;        int block, y, uv, stride_y, stride_uv;        int golden_frame = 0;        int res;        s->modelp = &s->models[is_alpha];        res = s->parse_header(s, buf, buf_size, &golden_frame);        if (!res)            return -1;        if (!is_alpha) {            p->reference = 1;            if (avctx->get_buffer(avctx, p) < 0) {                av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");                return -1;            }            if (res == 2)                if (vp56_size_changed(avctx)) {                    avctx->release_buffer(avctx, p);                    return -1;                }        }        if (p->key_frame) {            p->pict_type = FF_I_TYPE;            s->default_models_init(s);            for (block=0; block<s->mb_height*s->mb_width; block++)                s->macroblocks[block].type = VP56_MB_INTRA;        } else {            p->pict_type = FF_P_TYPE;            vp56_parse_mb_type_models(s);            s->parse_vector_models(s);            s->mb_type = VP56_MB_INTER_NOVEC_PF;        }        s->parse_coeff_models(s);        memset(s->prev_dc, 0, sizeof(s->prev_dc));        s->prev_dc[1][VP56_FRAME_CURRENT] = 128;        s->prev_dc[2][VP56_FRAME_CURRENT] = 128;        for (block=0; block < 4*s->mb_width+6; block++) {            s->above_blocks[block].ref_frame = -1;            s->above_blocks[block].dc_coeff = 0;            s->above_blocks[block].not_null_dc = 0;        }        s->above_blocks[2*s->mb_width + 2].ref_frame = 0;        s->above_blocks[3*s->mb_width + 4].ref_frame = 0;        stride_y  = p->linesize[0];        stride_uv = p->linesize[1];        if (s->flip < 0)            mb_offset = 7;        /* main macroblocks loop */        for (mb_row=0; mb_row<s->mb_height; mb_row++) {            if (s->flip < 0)                mb_row_flip = s->mb_height - mb_row - 1;            else                mb_row_flip = mb_row;            for (block=0; block<4; block++) {                s->left_block[block].ref_frame = -1;                s->left_block[block].dc_coeff = 0;                s->left_block[block].not_null_dc = 0;            }            memset(s->coeff_ctx, 0, sizeof(s->coeff_ctx));            memset(s->coeff_ctx_last, 24, sizeof(s->coeff_ctx_last));            s->above_block_idx[0] = 1;            s->above_block_idx[1] = 2;            s->above_block_idx[2] = 1;            s->above_block_idx[3] = 2;            s->above_block_idx[4] = 2*s->mb_width + 2 + 1;            s->above_block_idx[5] = 3*s->mb_width + 4 + 1;            s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y;            s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y;            s->block_offset[1] = s->block_offset[0] + 8;            s->block_offset[3] = s->block_offset[2] + 8;            s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv;            s->block_offset[5] = s->block_offset[4];            for (mb_col=0; mb_col<s->mb_width; mb_col++) {                vp56_decode_mb(s, mb_row, mb_col, is_alpha);                for (y=0; y<4; y++) {                    s->above_block_idx[y] += 2;                    s->block_offset[y] += 16;                }                for (uv=4; uv<6; uv++) {                    s->above_block_idx[uv] += 1;                    s->block_offset[uv] += 8;                }            }        }        if (p->key_frame || golden_frame) {            if (s->framep[VP56_FRAME_GOLDEN]->data[0] &&                s->framep[VP56_FRAME_GOLDEN] != s->framep[VP56_FRAME_GOLDEN2])                avctx->release_buffer(avctx, s->framep[VP56_FRAME_GOLDEN]);            s->framep[VP56_FRAME_GOLDEN] = p;        }        if (s->has_alpha) {            FFSWAP(AVFrame *, s->framep[VP56_FRAME_GOLDEN],                              s->framep[VP56_FRAME_GOLDEN2]);            buf += alpha_offset;            buf_size -= alpha_offset;        }    }    if (s->framep[VP56_FRAME_PREVIOUS] == s->framep[VP56_FRAME_GOLDEN] ||        s->framep[VP56_FRAME_PREVIOUS] == s->framep[VP56_FRAME_GOLDEN2]) {        if (s->framep[VP56_FRAME_UNUSED] != s->framep[VP56_FRAME_GOLDEN] &&            s->framep[VP56_FRAME_UNUSED] != s->framep[VP56_FRAME_GOLDEN2])            FFSWAP(AVFrame *, s->framep[VP56_FRAME_PREVIOUS],                              s->framep[VP56_FRAME_UNUSED]);        else            FFSWAP(AVFrame *, s->framep[VP56_FRAME_PREVIOUS],                              s->framep[VP56_FRAME_UNUSED2]);    } else if (s->framep[VP56_FRAME_PREVIOUS]->data[0])        avctx->release_buffer(avctx, s->framep[VP56_FRAME_PREVIOUS]);    FFSWAP(AVFrame *, s->framep[VP56_FRAME_CURRENT],                      s->framep[VP56_FRAME_PREVIOUS]);    *(AVFrame*)data = *p;    *data_size = sizeof(AVFrame);    return buf_size;}void vp56_init(AVCodecContext *avctx, int flip, int has_alpha){    vp56_context_t *s = avctx->priv_data;    int i;    s->avctx = avctx;    avctx->pix_fmt = has_alpha ? PIX_FMT_YUVA420P : PIX_FMT_YUV420P;    if (avctx->idct_algo == FF_IDCT_AUTO)        avctx->idct_algo = FF_IDCT_VP3;    dsputil_init(&s->dsp, avctx);    ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct);    avcodec_set_dimensions(avctx, 0, 0);    for (i=0; i<4; i++)        s->framep[i] = &s->frames[i];    s->framep[VP56_FRAME_UNUSED] = s->framep[VP56_FRAME_GOLDEN];    s->framep[VP56_FRAME_UNUSED2] = s->framep[VP56_FRAME_GOLDEN2];    s->edge_emu_buffer_alloc = NULL;    s->above_blocks = NULL;    s->macroblocks = NULL;    s->quantizer = -1;    s->deblock_filtering = 1;    s->filter = NULL;    s->has_alpha = has_alpha;    if (flip) {        s->flip = -1;        s->frbi = 2;        s->srbi = 0;    } else {        s->flip = 1;        s->frbi = 0;        s->srbi = 2;    }}int vp56_free(AVCodecContext *avctx){    vp56_context_t *s = avctx->priv_data;    av_free(s->above_blocks);    av_free(s->macroblocks);    av_free(s->edge_emu_buffer_alloc);    if (s->framep[VP56_FRAME_GOLDEN]->data[0])        avctx->release_buffer(avctx, s->framep[VP56_FRAME_GOLDEN]);    if (s->framep[VP56_FRAME_GOLDEN2]->data[0])        avctx->release_buffer(avctx, s->framep[VP56_FRAME_GOLDEN2]);    if (s->framep[VP56_FRAME_PREVIOUS]->data[0])        avctx->release_buffer(avctx, s->framep[VP56_FRAME_PREVIOUS]);    return 0;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -