📄 mpegvideo.c
字号:
av_freep(&s->reordered_input_picture); av_freep(&s->dct_offset); if(s->picture){ for(i=0; i<MAX_PICTURE_COUNT; i++){ free_picture(s, &s->picture[i]); } } av_freep(&s->picture); s->context_initialized = 0; s->last_picture_ptr= s->next_picture_ptr= s->current_picture_ptr= NULL; s->linesize= s->uvlinesize= 0; for(i=0; i<3; i++) av_freep(&s->visualization_buffer[i]); avcodec_default_free_buffers(s->avctx);}void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3]){ int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1]; uint8_t index_run[MAX_RUN+1]; int last, run, level, start, end, i; /* If table is static, we can quit if rl->max_level[0] is not NULL */ if(static_store && rl->max_level[0]) return; /* compute max_level[], max_run[] and index_run[] */ for(last=0;last<2;last++) { if (last == 0) { start = 0; end = rl->last; } else { start = rl->last; end = rl->n; } memset(max_level, 0, MAX_RUN + 1); memset(max_run, 0, MAX_LEVEL + 1); memset(index_run, rl->n, MAX_RUN + 1); for(i=start;i<end;i++) { run = rl->table_run[i]; level = rl->table_level[i]; if (index_run[run] == rl->n) index_run[run] = i; if (level > max_level[run]) max_level[run] = level; if (run > max_run[level]) max_run[level] = run; } if(static_store) rl->max_level[last] = static_store[last]; else rl->max_level[last] = av_malloc(MAX_RUN + 1); memcpy(rl->max_level[last], max_level, MAX_RUN + 1); if(static_store) rl->max_run[last] = static_store[last] + MAX_RUN + 1; else rl->max_run[last] = av_malloc(MAX_LEVEL + 1); memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1); if(static_store) rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2; else rl->index_run[last] = av_malloc(MAX_RUN + 1); memcpy(rl->index_run[last], index_run, MAX_RUN + 1); }}void init_vlc_rl(RLTable *rl, int use_static){ int i, q; /* Return if static table is already initialized */ if(use_static && rl->rl_vlc[0]) return; init_vlc(&rl->vlc, 9, rl->n + 1, &rl->table_vlc[0][1], 4, 2, &rl->table_vlc[0][0], 4, 2, use_static); for(q=0; q<32; q++){ int qmul= q*2; int qadd= (q-1)|1; if(q==0){ qmul=1; qadd=0; } if(use_static) rl->rl_vlc[q]= av_mallocz_static(rl->vlc.table_size*sizeof(RL_VLC_ELEM)); else rl->rl_vlc[q]= av_malloc(rl->vlc.table_size*sizeof(RL_VLC_ELEM)); for(i=0; i<rl->vlc.table_size; i++){ int code= rl->vlc.table[i][0]; int len = rl->vlc.table[i][1]; int level, run; if(len==0){ // illegal code run= 66; level= MAX_LEVEL; }else if(len<0){ //more bits needed run= 0; level= code; }else{ if(code==rl->n){ //esc run= 66; level= 0; }else{ run= rl->table_run [code] + 1; level= rl->table_level[code] * qmul + qadd; if(code >= rl->last) run+=192; } } rl->rl_vlc[q][i].len= len; rl->rl_vlc[q][i].level= level; rl->rl_vlc[q][i].run= run; } }}/* draw the edges of width 'w' of an image of size width, height *///FIXME check that this is ok for mpeg4 interlacedstatic void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w){ uint8_t *ptr, *last_line; int i; last_line = buf + (height - 1) * wrap; for(i=0;i<w;i++) { /* top and bottom */ memcpy(buf - (i + 1) * wrap, buf, width); memcpy(last_line + (i + 1) * wrap, last_line, width); } /* left and right */ ptr = buf; for(i=0;i<height;i++) { memset(ptr - w, ptr[0], w); memset(ptr + width, ptr[width-1], w); ptr += wrap; } /* corners */ for(i=0;i<w;i++) { memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */ memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */ memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */ memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */ }}int ff_find_unused_picture(MpegEncContext *s, int shared){ int i; if(shared){ for(i=0; i<MAX_PICTURE_COUNT; i++){ if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i; } }else{ for(i=0; i<MAX_PICTURE_COUNT; i++){ if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME } for(i=0; i<MAX_PICTURE_COUNT; i++){ if(s->picture[i].data[0]==NULL) return i; } } assert(0); return -1;}static void update_noise_reduction(MpegEncContext *s){ int intra, i; for(intra=0; intra<2; intra++){ if(s->dct_count[intra] > (1<<16)){ for(i=0; i<64; i++){ s->dct_error_sum[intra][i] >>=1; } s->dct_count[intra] >>= 1; } for(i=0; i<64; i++){ s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1); } }}/** * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded */int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx){ int i; AVFrame *pic; s->mb_skipped = 0; assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3); /* mark&release old frames */ if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) { if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){ avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr); /* release forgotten pictures */ /* if(mpeg124/h263) */ if(!s->encoding){ for(i=0; i<MAX_PICTURE_COUNT; i++){ if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){ av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n"); avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]); } } } } }alloc: if(!s->encoding){ /* release non reference frames */ for(i=0; i<MAX_PICTURE_COUNT; i++){ if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){ s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]); } } if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL) pic= (AVFrame*)s->current_picture_ptr; //we allready have a unused image (maybe it was set before reading the header) else{ i= ff_find_unused_picture(s, 0); pic= (AVFrame*)&s->picture[i]; } pic->reference= 0; if (!s->dropable){ if (s->codec_id == CODEC_ID_H264) pic->reference = s->picture_structure; else if (s->pict_type != B_TYPE) pic->reference = 3; } pic->coded_picture_number= s->coded_picture_number++; if( alloc_picture(s, (Picture*)pic, 0) < 0) return -1; s->current_picture_ptr= (Picture*)pic; s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence; } s->current_picture_ptr->pict_type= s->pict_type;// if(s->flags && CODEC_FLAG_QSCALE) // s->current_picture_ptr->quality= s->new_picture_ptr->quality; s->current_picture_ptr->key_frame= s->pict_type == I_TYPE; copy_picture(&s->current_picture, s->current_picture_ptr); if (s->pict_type != B_TYPE) { s->last_picture_ptr= s->next_picture_ptr; if(!s->dropable) s->next_picture_ptr= s->current_picture_ptr; }/* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr, s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL, s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL, s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL, s->pict_type, s->dropable);*/ if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr); if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr); if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && !s->dropable){ av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n"); assert(s->pict_type != B_TYPE); //these should have been dropped if we don't have a reference goto alloc; } assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0])); if(s->picture_structure!=PICT_FRAME){ int i; for(i=0; i<4; i++){ if(s->picture_structure == PICT_BOTTOM_FIELD){ s->current_picture.data[i] += s->current_picture.linesize[i]; } s->current_picture.linesize[i] *= 2; s->last_picture.linesize[i] *=2; s->next_picture.linesize[i] *=2; } } s->hurry_up= s->avctx->hurry_up; s->error_resilience= avctx->error_resilience; /* set dequantizer, we can't do it during init as it might change for mpeg4 and we can't do it in the header decode as init is not called for mpeg4 there yet */ if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){ s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra; s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter; }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){ s->dct_unquantize_intra = s->dct_unquantize_h263_intra; s->dct_unquantize_inter = s->dct_unquantize_h263_inter; }else{ s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra; s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter; } if(s->dct_error_sum){ assert(s->avctx->noise_reduction && s->encoding); update_noise_reduction(s); }#ifdef HAVE_XVMC if(s->avctx->xvmc_acceleration) return XVMC_field_start(s, avctx);#endif return 0;}/* generic function for encode/decode called after a frame has been coded/decoded */void MPV_frame_end(MpegEncContext *s){ int i; /* draw edge for correct motion prediction if outside */#ifdef HAVE_XVMC//just to make sure that all data is rendered. if(s->avctx->xvmc_acceleration){ XVMC_field_end(s); }else#endif if(s->unrestricted_mv && s->current_picture.reference && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) { draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH ); draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2); draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2); } emms_c(); s->last_pict_type = s->pict_type; s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality; if(s->pict_type!=B_TYPE){ s->last_non_b_pict_type= s->pict_type; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -