📄 mpegvideo_enc.c
字号:
ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
if (ENABLE_H261_ENCODER && s->out_format == FMT_H261)
ff_h261_encode_init(s);
if (ENABLE_ANY_H263_ENCODER && s->out_format == FMT_H263)
h263_encode_init(s);
if (ENABLE_MSMPEG4_ENCODER && s->msmpeg4_version)
ff_msmpeg4_encode_init(s);
if ((ENABLE_MPEG1VIDEO_ENCODER || ENABLE_MPEG2VIDEO_ENCODER)
&& s->out_format == FMT_MPEG1)
ff_mpeg1_encode_init(s);
/* init q matrix */
for(i=0;i<64;i++) {
int j= s->dsp.idct_permutation[i];
if(ENABLE_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
}else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
s->intra_matrix[j] =
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
}else
{ /* mpeg1/2 */
s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
}
if(s->avctx->intra_matrix)
s->intra_matrix[j] = s->avctx->intra_matrix[i];
if(s->avctx->inter_matrix)
s->inter_matrix[j] = s->avctx->inter_matrix[i];
}
/* precompute matrix */
/* for mjpeg, we do include qscale in the matrix */
if (s->out_format != FMT_MJPEG) {
convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
s->intra_matrix, s->intra_quant_bias, avctx->qmin, 31, 1);
convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
s->inter_matrix, s->inter_quant_bias, avctx->qmin, 31, 0);
}
if(ff_rate_control_init(s) < 0)
return -1;
return 0;
}
int MPV_encode_end(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
ff_rate_control_uninit(s);
MPV_common_end(s);
if ((ENABLE_MJPEG_ENCODER || ENABLE_LJPEG_ENCODER) && s->out_format == FMT_MJPEG)
ff_mjpeg_encode_close(s);
av_freep(&avctx->extradata);
return 0;
}
static int get_sae(uint8_t *src, int ref, int stride){
int x,y;
int acc=0;
for(y=0; y<16; y++){
for(x=0; x<16; x++){
acc+= FFABS(src[x+y*stride] - ref);
}
}
return acc;
}
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
int x, y, w, h;
int acc=0;
w= s->width &~15;
h= s->height&~15;
for(y=0; y<h; y+=16){
for(x=0; x<w; x+=16){
int offset= x + y*stride;
int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride, 16);
int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
int sae = get_sae(src + offset, mean, stride);
acc+= sae + 500 < sad;
}
}
return acc;
}
static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
AVFrame *pic=NULL;
int64_t pts;
int i;
const int encoding_delay= s->max_b_frames;
int direct=1;
if(pic_arg){
pts= pic_arg->pts;
pic_arg->display_picture_number= s->input_picture_number++;
if(pts != AV_NOPTS_VALUE){
if(s->user_specified_pts != AV_NOPTS_VALUE){
int64_t time= pts;
int64_t last= s->user_specified_pts;
if(time <= last){
av_log(s->avctx, AV_LOG_ERROR, "Error, Invalid timestamp=%"PRId64", last=%"PRId64"\n", pts, s->user_specified_pts);
return -1;
}
}
s->user_specified_pts= pts;
}else{
if(s->user_specified_pts != AV_NOPTS_VALUE){
s->user_specified_pts=
pts= s->user_specified_pts + 1;
av_log(s->avctx, AV_LOG_INFO, "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n", pts);
}else{
pts= pic_arg->display_picture_number;
}
}
}
if(pic_arg){
if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
if(pic_arg->linesize[0] != s->linesize) direct=0;
if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
// av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
if(direct){
i= ff_find_unused_picture(s, 1);
pic= (AVFrame*)&s->picture[i];
pic->reference= 3;
for(i=0; i<4; i++){
pic->data[i]= pic_arg->data[i];
pic->linesize[i]= pic_arg->linesize[i];
}
alloc_picture(s, (Picture*)pic, 1);
}else{
i= ff_find_unused_picture(s, 0);
pic= (AVFrame*)&s->picture[i];
pic->reference= 3;
alloc_picture(s, (Picture*)pic, 0);
if( pic->data[0] + INPLACE_OFFSET == pic_arg->data[0]
&& pic->data[1] + INPLACE_OFFSET == pic_arg->data[1]
&& pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]){
// empty
}else{
int h_chroma_shift, v_chroma_shift;
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
for(i=0; i<3; i++){
int src_stride= pic_arg->linesize[i];
int dst_stride= i ? s->uvlinesize : s->linesize;
int h_shift= i ? h_chroma_shift : 0;
int v_shift= i ? v_chroma_shift : 0;
int w= s->width >>h_shift;
int h= s->height>>v_shift;
uint8_t *src= pic_arg->data[i];
uint8_t *dst= pic->data[i];
if(!s->avctx->rc_buffer_size)
dst +=INPLACE_OFFSET;
if(src_stride==dst_stride)
memcpy(dst, src, src_stride*h);
else{
while(h--){
memcpy(dst, src, w);
dst += dst_stride;
src += src_stride;
}
}
}
}
}
copy_picture_attributes(s, pic, pic_arg);
pic->pts= pts; //we set this here to avoid modifiying pic_arg
}
/* shift buffer entries */
for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
s->input_picture[i-1]= s->input_picture[i];
s->input_picture[encoding_delay]= (Picture*)pic;
return 0;
}
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){
int x, y, plane;
int score=0;
int64_t score64=0;
for(plane=0; plane<3; plane++){
const int stride= p->linesize[plane];
const int bw= plane ? 1 : 2;
for(y=0; y<s->mb_height*bw; y++){
for(x=0; x<s->mb_width*bw; x++){
int off= p->type == FF_BUFFER_TYPE_SHARED ? 0: 16;
int v= s->dsp.frame_skip_cmp[1](s, p->data[plane] + 8*(x + y*stride)+off, ref->data[plane] + 8*(x + y*stride), stride, 8);
switch(s->avctx->frame_skip_exp){
case 0: score= FFMAX(score, v); break;
case 1: score+= FFABS(v);break;
case 2: score+= v*v;break;
case 3: score64+= FFABS(v*v*(int64_t)v);break;
case 4: score64+= v*v*(int64_t)(v*v);break;
}
}
}
}
if(score) score64= score;
if(score64 < s->avctx->frame_skip_threshold)
return 1;
if(score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda)>>8))
return 1;
return 0;
}
static int estimate_best_b_count(MpegEncContext *s){
AVCodec *codec= avcodec_find_encoder(s->avctx->codec_id);
AVCodecContext *c= avcodec_alloc_context();
AVFrame input[FF_MAX_B_FRAMES+2];
const int scale= s->avctx->brd_scale;
int i, j, out_size, p_lambda, b_lambda, lambda2;
int outbuf_size= s->width * s->height; //FIXME
uint8_t *outbuf= av_malloc(outbuf_size);
int64_t best_rd= INT64_MAX;
int best_b_count= -1;
assert(scale>=0 && scale <=3);
// emms_c();
p_lambda= s->last_lambda_for[P_TYPE]; //s->next_picture_ptr->quality;
b_lambda= s->last_lambda_for[B_TYPE]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
if(!b_lambda) b_lambda= p_lambda; //FIXME we should do this somewhere else
lambda2= (b_lambda*b_lambda + (1<<FF_LAMBDA_SHIFT)/2 ) >> FF_LAMBDA_SHIFT;
c->width = s->width >> scale;
c->height= s->height>> scale;
c->flags= CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR | CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
c->flags|= s->avctx->flags & CODEC_FLAG_QPEL;
c->mb_decision= s->avctx->mb_decision;
c->me_cmp= s->avctx->me_cmp;
c->mb_cmp= s->avctx->mb_cmp;
c->me_sub_cmp= s->avctx->me_sub_cmp;
c->pix_fmt = PIX_FMT_YUV420P;
c->time_base= s->avctx->time_base;
c->max_b_frames= s->max_b_frames;
if (avcodec_open(c, codec) < 0)
return -1;
for(i=0; i<s->max_b_frames+2; i++){
int ysize= c->width*c->height;
int csize= (c->width/2)*(c->height/2);
Picture pre_input, *pre_input_ptr= i ? s->input_picture[i-1] : s->next_picture_ptr;
avcodec_get_frame_defaults(&input[i]);
input[i].data[0]= av_malloc(ysize + 2*csize);
input[i].data[1]= input[i].data[0] + ysize;
input[i].data[2]= input[i].data[1] + csize;
input[i].linesize[0]= c->width;
input[i].linesize[1]=
input[i].linesize[2]= c->width/2;
if(pre_input_ptr && (!i || s->input_picture[i-1])) {
pre_input= *pre_input_ptr;
if(pre_input.type != FF_BUFFER_TYPE_SHARED && i) {
pre_input.data[0]+=INPLACE_OFFSET;
pre_input.data[1]+=INPLACE_OFFSET;
pre_input.data[2]+=INPLACE_OFFSET;
}
s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], pre_input.data[0], pre_input.linesize[0], c->width, c->height);
s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], pre_input.data[1], pre_input.linesize[1], c->width>>1, c->height>>1);
s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], pre_input.data[2], pre_input.linesize[2], c->width>>1, c->height>>1);
}
}
for(j=0; j<s->max_b_frames+1; j++){
int64_t rd=0;
if(!s->input_picture[j])
break;
c->error[0]= c->error[1]= c->error[2]= 0;
input[0].pict_type= I_TYPE;
input[0].quality= 1 * FF_QP2LAMBDA;
out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[0]);
// rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
for(i=0; i<s->max_b_frames+1; i++){
int is_p= i % (j+1) == j || i==s->max_b_frames;
input[i+1].pict_type= is_p ? P_TYPE : B_TYPE;
input[i+1].quality= is_p ? p_lambda : b_lambda;
out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[i+1]);
rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
}
/* get the delayed frames */
while(out_size){
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
}
rd += c->error[0] + c->error[1] + c->error[2];
if(rd < best_rd){
best_rd= rd;
best_b_count= j;
}
}
av_freep(&outbuf);
avcodec_close(c);
av_freep(&c);
for(i=0; i<s->max_b_frames+2; i++){
av_freep(&input[i].data[0]);
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -