📄 rv34.c
字号:
static void rv34_apply_differences(RV34DecContext *r, int cbp){ static const int shifts[4] = { 0, 2, 8, 10 }; MpegEncContext *s = &r->s; int i; for(i = 0; i < 4; i++) if(cbp & (LUMA_CBP_BLOCK_MASK << shifts[i])) s->dsp.add_pixels_clamped(s->block[i], s->dest[0] + (i & 1)*8 + (i&2)*4*s->linesize, s->linesize); if(cbp & U_CBP_MASK) s->dsp.add_pixels_clamped(s->block[4], s->dest[1], s->uvlinesize); if(cbp & V_CBP_MASK) s->dsp.add_pixels_clamped(s->block[5], s->dest[2], s->uvlinesize);}static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types){ MpegEncContext *s = &r->s; GetBitContext *gb = &s->gb; int cbp, cbp2; int i, blknum, blkoff; DCTELEM block16[64]; int luma_dc_quant; int dist; int mb_pos = s->mb_x + s->mb_y * s->mb_stride; // Calculate which neighbours are available. Maybe it's worth optimizing too. memset(r->avail_cache, 0, sizeof(r->avail_cache)); fill_rectangle(r->avail_cache + 5, 2, 2, 4, 1, 4); dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width; if(s->mb_x && dist) r->avail_cache[4] = r->avail_cache[8] = s->current_picture_ptr->mb_type[mb_pos - 1]; if(dist >= s->mb_width) r->avail_cache[1] = r->avail_cache[2] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride]; if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1) r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1]; if(s->mb_x && dist > s->mb_width) r->avail_cache[0] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1]; s->qscale = r->si.quant; cbp = cbp2 = rv34_decode_mb_header(r, intra_types); r->cbp_luma [s->mb_x + s->mb_y * s->mb_stride] = cbp; r->cbp_chroma[s->mb_x + s->mb_y * s->mb_stride] = cbp >> 16; s->current_picture.qscale_table[s->mb_x + s->mb_y * s->mb_stride] = s->qscale; if(cbp == -1) return -1; luma_dc_quant = r->si.type ? r->luma_dc_quant_p[s->qscale] : r->luma_dc_quant_i[s->qscale]; if(r->is16){ memset(block16, 0, sizeof(block16)); rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0); rv34_dequant4x4_16x16(block16, rv34_qscale_tab[luma_dc_quant],rv34_qscale_tab[s->qscale]); rv34_inv_transform_noround(block16); } for(i = 0; i < 16; i++, cbp >>= 1){ if(!r->is16 && !(cbp & 1)) continue; blknum = ((i & 2) >> 1) + ((i & 8) >> 2); blkoff = ((i & 1) << 2) + ((i & 4) << 3); if(cbp & 1) rv34_decode_block(s->block[blknum] + blkoff, gb, r->cur_vlcs, r->luma_vlc, 0); rv34_dequant4x4(s->block[blknum] + blkoff, rv34_qscale_tab[luma_dc_quant],rv34_qscale_tab[s->qscale]); if(r->is16) //FIXME: optimize s->block[blknum][blkoff] = block16[(i & 3) | ((i & 0xC) << 1)]; rv34_inv_transform(s->block[blknum] + blkoff); } if(r->block_type == RV34_MB_P_MIX16x16) r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1); for(; i < 24; i++, cbp >>= 1){ if(!(cbp & 1)) continue; blknum = ((i & 4) >> 2) + 4; blkoff = ((i & 1) << 2) + ((i & 2) << 4); rv34_decode_block(s->block[blknum] + blkoff, gb, r->cur_vlcs, r->chroma_vlc, 1); rv34_dequant4x4(s->block[blknum] + blkoff, rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]],rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]]); rv34_inv_transform(s->block[blknum] + blkoff); } if(IS_INTRA(s->current_picture_ptr->mb_type[s->mb_x + s->mb_y*s->mb_stride])) rv34_output_macroblock(r, intra_types, cbp2, r->is16); else rv34_apply_differences(r, cbp2); return 0;}static int check_slice_end(RV34DecContext *r, MpegEncContext *s){ int bits; if(s->mb_y >= s->mb_height) return 1; if(!s->mb_num_left) return 1; if(r->s.mb_skip_run > 1) return 0; bits = r->bits - get_bits_count(&s->gb); if(bits < 0 || (bits < 8 && !show_bits(&s->gb, bits))) return 1; return 0;}static inline int slice_compare(SliceInfo *si1, SliceInfo *si2){ return si1->type != si2->type || si1->start >= si2->start || si1->width != si2->width || si1->height != si2->height|| si1->pts != si2->pts;}static int rv34_decode_slice(RV34DecContext *r, int end, uint8_t* buf, int buf_size){ MpegEncContext *s = &r->s; GetBitContext *gb = &s->gb; int mb_pos; int res; init_get_bits(&r->s.gb, buf, buf_size*8); res = r->parse_slice_header(r, gb, &r->si); if(res < 0){ av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n"); return -1; } if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) { if(s->width != r->si.width || s->height != r->si.height){ av_log(s->avctx, AV_LOG_DEBUG, "Changing dimensions to %dx%d\n", r->si.width,r->si.height); MPV_common_end(s); s->width = r->si.width; s->height = r->si.height; if(MPV_common_init(s) < 0) return -1; r->intra_types_hist = av_realloc(r->intra_types_hist, s->b4_stride * 4 * 2 * sizeof(*r->intra_types_hist)); r->intra_types = r->intra_types_hist + s->b4_stride * 4; r->mb_type = av_realloc(r->mb_type, r->s.mb_stride * r->s.mb_height * sizeof(*r->mb_type)); r->cbp_luma = av_realloc(r->cbp_luma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_luma)); r->cbp_chroma = av_realloc(r->cbp_chroma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma)); } s->pict_type = r->si.type ? r->si.type : FF_I_TYPE; if(MPV_frame_start(s, s->avctx) < 0) return -1; ff_er_frame_start(s); s->current_picture_ptr = &s->current_picture; r->cur_pts = r->si.pts; if(s->pict_type != FF_B_TYPE){ r->last_pts = r->next_pts; r->next_pts = r->cur_pts; } s->mb_x = s->mb_y = 0; } r->si.end = end; s->qscale = r->si.quant; r->bits = buf_size*8; s->mb_num_left = r->si.end - r->si.start; r->s.mb_skip_run = 0; mb_pos = s->mb_x + s->mb_y * s->mb_width; if(r->si.start != mb_pos){ av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos); s->mb_x = r->si.start % s->mb_width; s->mb_y = r->si.start / s->mb_width; } memset(r->intra_types_hist, -1, s->b4_stride * 4 * 2 * sizeof(*r->intra_types_hist)); s->first_slice_line = 1; s->resync_mb_x= s->mb_x; s->resync_mb_y= s->mb_y; ff_init_block_index(s); while(!check_slice_end(r, s)) { ff_update_block_index(s); s->dsp.clear_blocks(s->block[0]); if(rv34_decode_macroblock(r, r->intra_types + s->mb_x * 4 + 1) < 0){ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_ERROR|DC_ERROR|MV_ERROR); return -1; } if (++s->mb_x == s->mb_width) { s->mb_x = 0; s->mb_y++; ff_init_block_index(s); memmove(r->intra_types_hist, r->intra_types, s->b4_stride * 4 * sizeof(*r->intra_types_hist)); memset(r->intra_types, -1, s->b4_stride * 4 * sizeof(*r->intra_types_hist)); } if(s->mb_x == s->resync_mb_x) s->first_slice_line=0; s->mb_num_left--; } ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END); return s->mb_y == s->mb_height;}/** @} */ // recons group end/** * Initialize decoder. */av_cold int ff_rv34_decode_init(AVCodecContext *avctx){ RV34DecContext *r = avctx->priv_data; MpegEncContext *s = &r->s; MPV_decode_defaults(s); s->avctx= avctx; s->out_format = FMT_H263; s->codec_id= avctx->codec_id; s->width = avctx->width; s->height = avctx->height; r->s.avctx = avctx; avctx->flags |= CODEC_FLAG_EMU_EDGE; r->s.flags |= CODEC_FLAG_EMU_EDGE; avctx->pix_fmt = PIX_FMT_YUV420P; avctx->has_b_frames = 1; s->low_delay = 0; if (MPV_common_init(s) < 0) return -1; ff_h264_pred_init(&r->h, CODEC_ID_RV40); r->intra_types_hist = av_malloc(s->b4_stride * 4 * 2 * sizeof(*r->intra_types_hist)); r->intra_types = r->intra_types_hist + s->b4_stride * 4; r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height * sizeof(*r->mb_type)); r->cbp_luma = av_malloc(r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_luma)); r->cbp_chroma = av_malloc(r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma)); if(!intra_vlcs[0].cbppattern[0].bits) rv34_init_tables(); return 0;}static int get_slice_offset(AVCodecContext *avctx, uint8_t *buf, int n){ if(avctx->slice_count) return avctx->slice_offset[n]; else return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);}int ff_rv34_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){ RV34DecContext *r = avctx->priv_data; MpegEncContext *s = &r->s; AVFrame *pict = data; SliceInfo si; int i; int slice_count; uint8_t *slices_hdr = NULL; int last = 0; /* no supplementary picture */ if (buf_size == 0) { /* special case for last picture */ if (s->low_delay==0 && s->next_picture_ptr) { *pict= *(AVFrame*)s->next_picture_ptr; s->next_picture_ptr= NULL; *data_size = sizeof(AVFrame); } return 0; } if(!avctx->slice_count){ slice_count = (*buf++) + 1; slices_hdr = buf + 4; buf += 8 * slice_count; }else slice_count = avctx->slice_count; for(i=0; i<slice_count; i++){ int offset= get_slice_offset(avctx, slices_hdr, i); int size; if(i+1 == slice_count) size= buf_size - offset; else size= get_slice_offset(avctx, slices_hdr, i+1) - offset; r->si.end = s->mb_width * s->mb_height; if(i+1 < slice_count){ init_get_bits(&s->gb, buf+get_slice_offset(avctx, slices_hdr, i+1), (buf_size-get_slice_offset(avctx, slices_hdr, i+1))*8); if(r->parse_slice_header(r, &r->s.gb, &si) < 0){ if(i+2 < slice_count) size = get_slice_offset(avctx, slices_hdr, i+2) - offset; else size = buf_size - offset; }else r->si.end = si.start; } last = rv34_decode_slice(r, r->si.end, buf + offset, size); s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start; if(last) break; } if(last){ if(r->loop_filter) r->loop_filter(r); ff_er_frame_end(s); MPV_frame_end(s); if (s->pict_type == FF_B_TYPE || s->low_delay) { *pict= *(AVFrame*)s->current_picture_ptr; } else if (s->last_picture_ptr != NULL) { *pict= *(AVFrame*)s->last_picture_ptr; } if(s->last_picture_ptr || s->low_delay){ *data_size = sizeof(AVFrame); ff_print_debug_info(s, pict); } s->current_picture_ptr= NULL; //so we can detect if frame_end wasnt called (find some nicer solution...) } return buf_size;}av_cold int ff_rv34_decode_end(AVCodecContext *avctx){ RV34DecContext *r = avctx->priv_data; MPV_common_end(&r->s); av_freep(&r->intra_types_hist); r->intra_types = NULL; av_freep(&r->mb_type); return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -