⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rv34.c

📁 ffmpeg移植到symbian的全部源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
        code = get_vlc2(gb, rvlc->third_pattern[sc].table, 9, 2);        decode_subblock(dst + 8*2+2, code, 0, gb, &rvlc->coefficient);    }}/** * Dequantize ordinary 4x4 block. * @todo optimize */static inline void rv34_dequant4x4(DCTELEM *block, int Qdc, int Q){    int i, j;    block[0] = (block[0] * Qdc + 8) >> 4;    for(i = 0; i < 4; i++)        for(j = !i; j < 4; j++)            block[j + i*8] = (block[j + i*8] * Q + 8) >> 4;}/** * Dequantize 4x4 block of DC values for 16x16 macroblock. * @todo optimize */static inline void rv34_dequant4x4_16x16(DCTELEM *block, int Qdc, int Q){    int i;    for(i = 0; i < 3; i++)         block[rv34_dezigzag[i]] = (block[rv34_dezigzag[i]] * Qdc + 8) >> 4;    for(; i < 16; i++)         block[rv34_dezigzag[i]] = (block[rv34_dezigzag[i]] * Q + 8) >> 4;}/** @} */ //block functions/** * @defgroup bitstream RV30/40 bitstream parsing * @{ *//** * Decode starting slice position. * @todo Maybe replace with ff_h263_decode_mba() ? */int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size){    int i;    for(i = 0; i < 5; i++)        if(rv34_mb_max_sizes[i] > mb_size)            break;    return rv34_mb_bits_sizes[i];}/** * Select VLC set for decoding from current quantizer, modifier and frame type. */static inline RV34VLC* choose_vlc_set(int quant, int mod, int type){    if(mod == 2 && quant < 19) quant += 10;    else if(mod && quant < 26) quant += 5;    return type ? &inter_vlcs[rv34_quant_to_vlc_set[1][av_clip(quant, 0, 30)]]                : &intra_vlcs[rv34_quant_to_vlc_set[0][av_clip(quant, 0, 30)]];}/** * Decode quantizer difference and return modified quantizer. */static inline int rv34_decode_dquant(GetBitContext *gb, int quant){    if(get_bits1(gb))        return rv34_dquant_tab[get_bits1(gb)][quant];    else        return get_bits(gb, 5);}/** @} */ //bitstream functions/** * @defgroup mv motion vector related code (prediction, reconstruction, motion compensation) * @{ *//** macroblock partition width in 8x8 blocks */static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };/** macroblock partition height in 8x8 blocks */static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };/** availability index for subblocks */static const uint8_t avail_indexes[4] = { 5, 6, 9, 10 };/** * motion vector prediction * * Motion prediction performed for the block by using median prediction of * motion vectors from the left, top and right top blocks but in corner cases * some other vectors may be used instead. */static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no){    MpegEncContext *s = &r->s;    int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;    int A[2] = {0}, B[2], C[2];    int i, j;    int mx, my;    int avail_index = avail_indexes[subblock_no];    int c_off = part_sizes_w[block_type];    mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride;    if(subblock_no == 3)        c_off = -1;    if(r->avail_cache[avail_index - 1]){        A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];        A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];    }    if(r->avail_cache[avail_index - 4]){        B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];        B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];    }else{        B[0] = A[0];        B[1] = A[1];    }    if(!r->avail_cache[avail_index - 4 + c_off]){        if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1] || r->rv30)){            C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];            C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];        }else{            C[0] = A[0];            C[1] = A[1];        }    }else{        C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];        C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];    }    mx = mid_pred(A[0], B[0], C[0]);    my = mid_pred(A[1], B[1], C[1]);    mx += r->dmv[dmv_no][0];    my += r->dmv[dmv_no][1];    for(j = 0; j < part_sizes_h[block_type]; j++){        for(i = 0; i < part_sizes_w[block_type]; i++){            s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;            s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;        }    }}#define GET_PTS_DIFF(a, b) ((a - b + 8192) & 0x1FFF)/** * Calculate motion vector component that should be added for direct blocks. */static int calc_add_mv(RV34DecContext *r, int dir, int val){    int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);    int dist = dir ? GET_PTS_DIFF(r->next_pts, r->cur_pts) : GET_PTS_DIFF(r->cur_pts, r->last_pts);    if(!refdist) return 0;    if(!dir)        return (val * dist + refdist - 1) / refdist;    else        return -(val * dist / refdist);}/** * Predict motion vector for B-frame macroblock. */static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],                                      int A_avail, int B_avail, int C_avail,                                      int *mx, int *my){    if(A_avail + B_avail + C_avail != 3){        *mx = A[0] + B[0] + C[0];        *my = A[1] + B[1] + C[1];        if(A_avail + B_avail + C_avail == 2){            *mx /= 2;            *my /= 2;        }    }else{        *mx = mid_pred(A[0], B[0], C[0]);        *my = mid_pred(A[1], B[1], C[1]);    }}/** * motion vector prediction for B-frames */static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir){    MpegEncContext *s = &r->s;    int mb_pos = s->mb_x + s->mb_y * s->mb_stride;    int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;    int A[2], B[2], C[2];    int has_A = 0, has_B = 0, has_C = 0;    int mx, my;    int i, j;    Picture *cur_pic = s->current_picture_ptr;    const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;    int type = cur_pic->mb_type[mb_pos];    memset(A, 0, sizeof(A));    memset(B, 0, sizeof(B));    memset(C, 0, sizeof(C));    if((r->avail_cache[5-1] & type) & mask){        A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];        A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];        has_A = 1;    }    if((r->avail_cache[5-4] & type) & mask){        B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];        B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];        has_B = 1;    }    if((r->avail_cache[5-2] & type) & mask){        C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];        C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];        has_C = 1;    }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[5-5] & type) & mask){        C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];        C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];        has_C = 1;    }    rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my);    mx += r->dmv[dir][0];    my += r->dmv[dir][1];    for(j = 0; j < 2; j++){        for(i = 0; i < 2; i++){            cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;            cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;        }    }    if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD)        fill_rectangle(cur_pic->motion_val[!dir][mv_pos], 2, 2, s->b8_stride, 0, 4);}static const int chroma_coeffs[3] = { 8, 5, 3 };/** * generic motion compensation function * * @param r decoder context * @param block_type type of the current block * @param xoff horizontal offset from the start of the current block * @param yoff vertical offset from the start of the current block * @param mv_off offset to the motion vector information * @param width width of the current partition in 8x8 blocks * @param height height of the current partition in 8x8 blocks */static inline void rv34_mc(RV34DecContext *r, const int block_type,                          const int xoff, const int yoff, int mv_off,                          const int width, const int height, int dir,                          const int thirdpel,                          qpel_mc_func (*qpel_mc)[16],                          h264_chroma_mc_func (*chroma_mc)){    MpegEncContext *s = &r->s;    uint8_t *Y, *U, *V, *srcY, *srcU, *srcV;    int dxy, mx, my, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;    int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;    int is16x16 = 1;    if(thirdpel){        mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);        my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);        lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;        ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;        uvmx = chroma_coeffs[(3*(mx&1) + lx) >> 1];        uvmy = chroma_coeffs[(3*(my&1) + ly) >> 1];    }else{        mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;        my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;        lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;        ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;        uvmx = mx & 6;        uvmy = my & 6;    }    dxy = ly*4 + lx;    srcY = dir ? s->next_picture_ptr->data[0] : s->last_picture_ptr->data[0];    srcU = dir ? s->next_picture_ptr->data[1] : s->last_picture_ptr->data[1];    srcV = dir ? s->next_picture_ptr->data[2] : s->last_picture_ptr->data[2];    src_x = s->mb_x * 16 + xoff + mx;    src_y = s->mb_y * 16 + yoff + my;    uvsrc_x = s->mb_x * 8 + (xoff >> 1) + (mx >> 1);    uvsrc_y = s->mb_y * 8 + (yoff >> 1) + (my >> 1);    srcY += src_y * s->linesize + src_x;    srcU += uvsrc_y * s->uvlinesize + uvsrc_x;    srcV += uvsrc_y * s->uvlinesize + uvsrc_x;    if(   (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 3       || (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 3){        uint8_t *uvbuf= s->edge_emu_buffer + 20 * s->linesize;        srcY -= 2 + 2*s->linesize;        ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, (width<<3)+4, (height<<3)+4,                            src_x - 2, src_y - 2, s->h_edge_pos, s->v_edge_pos);        srcY = s->edge_emu_buffer + 2 + 2*s->linesize;        ff_emulated_edge_mc(uvbuf     , srcU, s->uvlinesize, (width<<2)+1, (height<<2)+1,                            uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);        ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, (width<<2)+1, (height<<2)+1,                            uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);        srcU = uvbuf;        srcV = uvbuf + 16;    }    Y = s->dest[0] + xoff      + yoff     *s->linesize;    U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;    V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;    if(block_type == RV34_MB_P_16x8){        qpel_mc[1][dxy](Y, srcY, s->linesize);        Y    += 8;        srcY += 8;    }else if(block_type == RV34_MB_P_8x16){        qpel_mc[1][dxy](Y, srcY, s->linesize);        Y    += 8 * s->linesize;        srcY += 8 * s->linesize;    }    is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16);    qpel_mc[!is16x16][dxy](Y, srcY, s->linesize);    chroma_mc[2-width]   (U, srcU, s->uvlinesize, height*4, uvmx, uvmy);    chroma_mc[2-width]   (V, srcV, s->uvlinesize, height*4, uvmx, uvmy);}static void rv34_mc_1mv(RV34DecContext *r, const int block_type,                        const int xoff, const int yoff, int mv_off,                        const int width, const int height, int dir){    rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30,            r->rv30 ? r->s.dsp.put_rv30_tpel_pixels_tab                    : r->s.dsp.put_h264_qpel_pixels_tab,            r->s.dsp.put_h264_chroma_pixels_tab);}static void rv34_mc_2mv(RV34DecContext *r, const int block_type){    rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30,            r->rv30 ? r->s.dsp.put_rv30_tpel_pixels_tab

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -