📄 cavs.c
字号:
int align; align = (-get_bits_count(gb)) & 7; if((show_bits_long(gb,24+align) & 0xFFFFFF) == 0x000001) { get_bits_long(gb,24+align); h->stc = get_bits(gb,8); decode_slice_header(h,gb); }}/***************************************************************************** * * frame level * ****************************************************************************/static void init_pic(AVSContext *h) { int i; /* clear some predictors */ for(i=0;i<=20;i+=4) h->mv[i] = un_mv; h->mv[MV_BWD_X0] = dir_mv; set_mvs(&h->mv[MV_BWD_X0], BLK_16X16); h->mv[MV_FWD_X0] = dir_mv; set_mvs(&h->mv[MV_FWD_X0], BLK_16X16); h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL; h->cy = h->picture.data[0]; h->cu = h->picture.data[1]; h->cv = h->picture.data[2]; h->l_stride = h->picture.linesize[0]; h->c_stride = h->picture.linesize[1]; h->luma_scan[2] = 8*h->l_stride; h->luma_scan[3] = 8*h->l_stride+8; h->mbx = h->mby = 0; h->flags = 0;}static int decode_pic(AVSContext *h) { MpegEncContext *s = &h->s; int skip_count; enum mb_t mb_type; if (!s->context_initialized) { s->avctx->idct_algo = FF_IDCT_CAVS; if (MPV_common_init(s) < 0) return -1; ff_init_scantable(s->dsp.idct_permutation,&h->scantable,ff_zigzag_direct); } get_bits(&s->gb,16);//bbv_dwlay if(h->stc == PIC_PB_START_CODE) { h->pic_type = get_bits(&s->gb,2) + FF_I_TYPE; if(h->pic_type > FF_B_TYPE) { av_log(s->avctx, AV_LOG_ERROR, "illegal picture type\n"); return -1; } /* make sure we have the reference frames we need */ if(!h->DPB[0].data[0] || (!h->DPB[1].data[0] && h->pic_type == FF_B_TYPE)) return -1; } else { h->pic_type = FF_I_TYPE; if(get_bits1(&s->gb)) get_bits(&s->gb,16);//time_code } /* release last B frame */ if(h->picture.data[0]) s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture); s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture); init_pic(h); h->picture.poc = get_bits(&s->gb,8)*2; /* get temporal distances and MV scaling factors */ if(h->pic_type != FF_B_TYPE) { h->dist[0] = (h->picture.poc - h->DPB[0].poc + 512) % 512; } else { h->dist[0] = (h->DPB[0].poc - h->picture.poc + 512) % 512; } h->dist[1] = (h->picture.poc - h->DPB[1].poc + 512) % 512; h->scale_den[0] = h->dist[0] ? 512/h->dist[0] : 0; h->scale_den[1] = h->dist[1] ? 512/h->dist[1] : 0; if(h->pic_type == FF_B_TYPE) { h->sym_factor = h->dist[0]*h->scale_den[1]; } else { h->direct_den[0] = h->dist[0] ? 16384/h->dist[0] : 0; h->direct_den[1] = h->dist[1] ? 16384/h->dist[1] : 0; } if(s->low_delay) get_ue_golomb(&s->gb); //bbv_check_times h->progressive = get_bits1(&s->gb); if(h->progressive) h->pic_structure = 1; else if(!(h->pic_structure = get_bits1(&s->gb) && (h->stc == PIC_PB_START_CODE)) ) get_bits1(&s->gb); //advanced_pred_mode_disable skip_bits1(&s->gb); //top_field_first skip_bits1(&s->gb); //repeat_first_field h->qp_fixed = get_bits1(&s->gb); h->qp = get_bits(&s->gb,6); if(h->pic_type == FF_I_TYPE) { if(!h->progressive && !h->pic_structure) skip_bits1(&s->gb);//what is this? skip_bits(&s->gb,4); //reserved bits } else { if(!(h->pic_type == FF_B_TYPE && h->pic_structure == 1)) h->ref_flag = get_bits1(&s->gb); skip_bits(&s->gb,4); //reserved bits h->skip_mode_flag = get_bits1(&s->gb); } h->loop_filter_disable = get_bits1(&s->gb); if(!h->loop_filter_disable && get_bits1(&s->gb)) { h->alpha_offset = get_se_golomb(&s->gb); h->beta_offset = get_se_golomb(&s->gb); } else { h->alpha_offset = h->beta_offset = 0; } check_for_slice(h); if(h->pic_type == FF_I_TYPE) { do { decode_mb_i(h, 0); } while(next_mb(h)); } else if(h->pic_type == FF_P_TYPE) { do { if(h->skip_mode_flag) { skip_count = get_ue_golomb(&s->gb); while(skip_count--) { decode_mb_p(h,P_SKIP); if(!next_mb(h)) goto done; } mb_type = get_ue_golomb(&s->gb) + P_16X16; } else mb_type = get_ue_golomb(&s->gb) + P_SKIP; if(mb_type > P_8X8) { decode_mb_i(h, mb_type - P_8X8 - 1); } else decode_mb_p(h,mb_type); } while(next_mb(h)); } else { /* FF_B_TYPE */ do { if(h->skip_mode_flag) { skip_count = get_ue_golomb(&s->gb); while(skip_count--) { decode_mb_b(h,B_SKIP); if(!next_mb(h)) goto done; } mb_type = get_ue_golomb(&s->gb) + B_DIRECT; } else mb_type = get_ue_golomb(&s->gb) + B_SKIP; if(mb_type > B_8X8) { decode_mb_i(h, mb_type - B_8X8 - 1); } else decode_mb_b(h,mb_type); } while(next_mb(h)); } done: if(h->pic_type != FF_B_TYPE) { if(h->DPB[1].data[0]) s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]); memcpy(&h->DPB[1], &h->DPB[0], sizeof(Picture)); memcpy(&h->DPB[0], &h->picture, sizeof(Picture)); memset(&h->picture,0,sizeof(Picture)); } return 0;}/***************************************************************************** * * headers and interface * ****************************************************************************//** * some predictions require data from the top-neighbouring macroblock. * this data has to be stored for one complete row of macroblocks * and this storage space is allocated here */static void init_top_lines(AVSContext *h) { /* alloc top line of predictors */ h->top_qp = av_malloc( h->mb_width); h->top_mv[0] = av_malloc((h->mb_width*2+1)*sizeof(vector_t)); h->top_mv[1] = av_malloc((h->mb_width*2+1)*sizeof(vector_t)); h->top_pred_Y = av_malloc( h->mb_width*2*sizeof(*h->top_pred_Y)); h->top_border_y = av_malloc((h->mb_width+1)*16); h->top_border_u = av_malloc((h->mb_width)*10); h->top_border_v = av_malloc((h->mb_width)*10); /* alloc space for co-located MVs and types */ h->col_mv = av_malloc( h->mb_width*h->mb_height*4*sizeof(vector_t)); h->col_type_base = av_malloc(h->mb_width*h->mb_height); h->block = av_mallocz(64*sizeof(DCTELEM));}static int decode_seq_header(AVSContext *h) { MpegEncContext *s = &h->s; int frame_rate_code; h->profile = get_bits(&s->gb,8); h->level = get_bits(&s->gb,8); skip_bits1(&s->gb); //progressive sequence s->width = get_bits(&s->gb,14); s->height = get_bits(&s->gb,14); skip_bits(&s->gb,2); //chroma format skip_bits(&s->gb,3); //sample_precision h->aspect_ratio = get_bits(&s->gb,4); frame_rate_code = get_bits(&s->gb,4); skip_bits(&s->gb,18);//bit_rate_lower skip_bits1(&s->gb); //marker_bit skip_bits(&s->gb,12);//bit_rate_upper s->low_delay = get_bits1(&s->gb); h->mb_width = (s->width + 15) >> 4; h->mb_height = (s->height + 15) >> 4; h->s.avctx->time_base.den = ff_frame_rate_tab[frame_rate_code].num; h->s.avctx->time_base.num = ff_frame_rate_tab[frame_rate_code].den; h->s.avctx->width = s->width; h->s.avctx->height = s->height; if(!h->top_qp) init_top_lines(h); return 0;}static void cavs_flush(AVCodecContext * avctx) { AVSContext *h = avctx->priv_data; h->got_keyframe = 0;}static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size, uint8_t * buf, int buf_size) { AVSContext *h = avctx->priv_data; MpegEncContext *s = &h->s; int input_size; const uint8_t *buf_end; const uint8_t *buf_ptr; AVFrame *picture = data; uint32_t stc; s->avctx = avctx; if (buf_size == 0) { if(!s->low_delay && h->DPB[0].data[0]) { *data_size = sizeof(AVPicture); *picture = *(AVFrame *) &h->DPB[0]; } return 0; } buf_ptr = buf; buf_end = buf + buf_size; for(;;) { buf_ptr = ff_find_start_code(buf_ptr,buf_end, &stc); if(stc & 0xFFFFFE00) return FFMAX(0, buf_ptr - buf - s->parse_context.last_index); input_size = (buf_end - buf_ptr)*8; switch(stc) { case CAVS_START_CODE: init_get_bits(&s->gb, buf_ptr, input_size); decode_seq_header(h); break; case PIC_I_START_CODE: if(!h->got_keyframe) { if(h->DPB[0].data[0]) avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]); if(h->DPB[1].data[0]) avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]); h->got_keyframe = 1; } case PIC_PB_START_CODE: *data_size = 0; if(!h->got_keyframe) break; init_get_bits(&s->gb, buf_ptr, input_size); h->stc = stc; if(decode_pic(h)) break; *data_size = sizeof(AVPicture); if(h->pic_type != FF_B_TYPE) { if(h->DPB[1].data[0]) { *picture = *(AVFrame *) &h->DPB[1]; } else { *data_size = 0; } } else *picture = *(AVFrame *) &h->picture; break; case EXT_START_CODE: //mpeg_decode_extension(avctx,buf_ptr, input_size); break; case USER_START_CODE: //mpeg_decode_user_data(avctx,buf_ptr, input_size); break; default: if (stc >= SLICE_MIN_START_CODE && stc <= SLICE_MAX_START_CODE) { init_get_bits(&s->gb, buf_ptr, input_size); decode_slice_header(h, &s->gb); } break; } }}static int cavs_decode_init(AVCodecContext * avctx) { AVSContext *h = avctx->priv_data; MpegEncContext * const s = &h->s; MPV_decode_defaults(s); s->avctx = avctx; avctx->pix_fmt= PIX_FMT_YUV420P; h->luma_scan[0] = 0; h->luma_scan[1] = 8; h->intra_pred_l[ INTRA_L_VERT] = intra_pred_vert; h->intra_pred_l[ INTRA_L_HORIZ] = intra_pred_horiz; h->intra_pred_l[ INTRA_L_LP] = intra_pred_lp; h->intra_pred_l[ INTRA_L_DOWN_LEFT] = intra_pred_down_left; h->intra_pred_l[INTRA_L_DOWN_RIGHT] = intra_pred_down_right; h->intra_pred_l[ INTRA_L_LP_LEFT] = intra_pred_lp_left; h->intra_pred_l[ INTRA_L_LP_TOP] = intra_pred_lp_top; h->intra_pred_l[ INTRA_L_DC_128] = intra_pred_dc_128; h->intra_pred_c[ INTRA_C_LP] = intra_pred_lp; h->intra_pred_c[ INTRA_C_HORIZ] = intra_pred_horiz; h->intra_pred_c[ INTRA_C_VERT] = intra_pred_vert; h->intra_pred_c[ INTRA_C_PLANE] = intra_pred_plane; h->intra_pred_c[ INTRA_C_LP_LEFT] = intra_pred_lp_left; h->intra_pred_c[ INTRA_C_LP_TOP] = intra_pred_lp_top; h->intra_pred_c[ INTRA_C_DC_128] = intra_pred_dc_128; h->mv[ 7] = un_mv; h->mv[19] = un_mv; return 0;}static int cavs_decode_end(AVCodecContext * avctx) { AVSContext *h = avctx->priv_data; av_free(h->top_qp); av_free(h->top_mv[0]); av_free(h->top_mv[1]); av_free(h->top_pred_Y); av_free(h->top_border_y); av_free(h->top_border_u); av_free(h->top_border_v); av_free(h->col_mv); av_free(h->col_type_base); av_free(h->block); return 0;}AVCodec cavs_decoder = { "cavs", CODEC_TYPE_VIDEO, CODEC_ID_CAVS, sizeof(AVSContext), cavs_decode_init, NULL, cavs_decode_end, cavs_decode_frame, CODEC_CAP_DR1 | CODEC_CAP_DELAY, NULL, /*.flush=*/ cavs_flush,};
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -