📄 ffmpeg.c
字号:
/* create temporary picture */ size = avpicture_get_size(enc->pix_fmt, enc->width, enc->height); buf = av_malloc(size); if (!buf) return; final_picture = &picture_format_temp; avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height); if (img_convert((AVPicture*)final_picture, enc->pix_fmt, (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P, enc->width, enc->height) < 0) { if (verbose >= 0) fprintf(stderr, "pixel format conversion not handled\n"); goto the_end; } } } else if (ost->video_crop) { picture_crop_temp.data[0] = formatted_picture->data[0] + (ost->topBand * formatted_picture->linesize[0]) + ost->leftBand; picture_crop_temp.data[1] = formatted_picture->data[1] + ((ost->topBand >> 1) * formatted_picture->linesize[1]) + (ost->leftBand >> 1); picture_crop_temp.data[2] = formatted_picture->data[2] + ((ost->topBand >> 1) * formatted_picture->linesize[2]) + (ost->leftBand >> 1); picture_crop_temp.linesize[0] = formatted_picture->linesize[0]; picture_crop_temp.linesize[1] = formatted_picture->linesize[1]; picture_crop_temp.linesize[2] = formatted_picture->linesize[2]; final_picture = &picture_crop_temp; } else if (ost->video_pad) { final_picture = &ost->pict_tmp; for (i = 0; i < 3; i++) { uint8_t *optr, *iptr; int shift = (i == 0) ? 0 : 1; int y, yheight; /* set offset to start writing image into */ optr = final_picture->data[i] + (((final_picture->linesize[i] * ost->padtop) + ost->padleft) >> shift); iptr = formatted_picture->data[i]; yheight = (enc->height - ost->padtop - ost->padbottom) >> shift; for (y = 0; y < yheight; y++) { /* copy unpadded image row into padded image row */ memcpy(optr, iptr, formatted_picture->linesize[i]); optr += final_picture->linesize[i]; iptr += formatted_picture->linesize[i]; } } fill_pad_region((AVPicture*)final_picture, enc->height, enc->width, ost->padtop, ost->padbottom, ost->padleft, ost->padright, padcolor); if (enc->pix_fmt != PIX_FMT_YUV420P) { int size; av_free(buf); /* create temporary picture */ size = avpicture_get_size(enc->pix_fmt, enc->width, enc->height); buf = av_malloc(size); if (!buf) return; final_picture = &picture_format_temp; avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height); if (img_convert((AVPicture*)final_picture, enc->pix_fmt, (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P, enc->width, enc->height) < 0) { if (verbose >= 0) fprintf(stderr, "pixel format conversion not handled\n"); goto the_end; } } } else { final_picture = formatted_picture; } /* duplicates frame if needed */ /* XXX: pb because no interleaving */ for(i=0;i<nb_frames;i++) { AVPacket pkt; av_init_packet(&pkt); pkt.stream_index= ost->index; if (s->oformat->flags & AVFMT_RAWPICTURE) { /* raw pictures are written as AVPicture structure to avoid any copies. We support temorarily the older method. */ AVFrame* old_frame = enc->coded_frame; enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack pkt.data= (uint8_t *)final_picture; pkt.size= sizeof(AVPicture); if(dec->coded_frame) pkt.pts= dec->coded_frame->pts; if(dec->coded_frame && dec->coded_frame->key_frame) pkt.flags |= PKT_FLAG_KEY; av_interleaved_write_frame(s, &pkt); enc->coded_frame = old_frame; } else { AVFrame big_picture; big_picture= *final_picture; /* better than nothing: use input picture interlaced settings */ big_picture.interlaced_frame = in_picture->interlaced_frame; if(do_interlace_me || do_interlace_dct){ if(top_field_first == -1) big_picture.top_field_first = in_picture->top_field_first; else big_picture.top_field_first = top_field_first; } /* handles sameq here. This is not correct because it may not be a global option */ if (same_quality) { big_picture.quality = ist->st->quality; }else big_picture.quality = ost->st->quality; if(!me_threshold) big_picture.pict_type = 0;// big_picture.pts = AV_NOPTS_VALUE; big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->frame_rate_base, enc->frame_rate);//av_log(NULL, AV_LOG_DEBUG, "%lld -> encoder\n", ost->sync_opts); ret = avcodec_encode_video(enc, bit_buffer, VIDEO_BUFFER_SIZE, &big_picture); //enc->frame_number = enc->real_pict_num; if(ret){ pkt.data= bit_buffer; pkt.size= ret; if(enc->coded_frame) pkt.pts= enc->coded_frame->pts;/*av_log(NULL, AV_LOG_DEBUG, "encoder -> %lld/%lld\n", pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->frame_rate, AV_TIME_BASE*(int64_t)enc->frame_rate_base) : -1, pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->frame_rate, AV_TIME_BASE*(int64_t)enc->frame_rate_base) : -1);*/ if(enc->coded_frame && enc->coded_frame->key_frame) pkt.flags |= PKT_FLAG_KEY; av_interleaved_write_frame(s, &pkt); *frame_size = ret; //fprintf(stderr,"\nFrame: %3d %3d size: %5d type: %d", // enc->frame_number-1, enc->real_pict_num, ret, // enc->pict_type); /* if two pass, output log */ if (ost->logfile && enc->stats_out) { fprintf(ost->logfile, "%s", enc->stats_out); } } } ost->sync_opts++; ost->frame_number++; } the_end: av_free(buf); av_free(buf1);}static double psnr(double d){ if(d==0) return INFINITY; return -10.0*log(d)/log(10.0);}static void do_video_stats(AVFormatContext *os, AVOutputStream *ost, int frame_size){ static FILE *fvstats=NULL; char filename[40]; time_t today2; struct tm *today; AVCodecContext *enc; int frame_number; int64_t ti; double ti1, bitrate, avg_bitrate; if (!fvstats) { today2 = time(NULL); today = localtime(&today2); sprintf(filename, "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min, today->tm_sec); fvstats = fopen(filename,"w"); if (!fvstats) { perror("fopen"); exit(1); } } ti = MAXINT64; enc = &ost->st->codec; if (enc->codec_type == CODEC_TYPE_VIDEO) { frame_number = ost->frame_number; fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA); if (enc->flags&CODEC_FLAG_PSNR) fprintf(fvstats, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0))); fprintf(fvstats,"f_size= %6d ", frame_size); /* compute pts value */ ti1 = (double)ost->sync_opts *enc->frame_rate_base / enc->frame_rate; if (ti1 < 0.01) ti1 = 0.01; bitrate = (double)(frame_size * 8) * enc->frame_rate / enc->frame_rate_base / 1000.0; avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0; fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ", (double)video_size / 1024, ti1, bitrate, avg_bitrate); fprintf(fvstats,"type= %c\n", av_get_pict_type_char(enc->coded_frame->pict_type)); }}static void print_report(AVFormatContext **output_files, AVOutputStream **ost_table, int nb_ostreams, int is_last_report){ char buf[1024]; AVOutputStream *ost; AVFormatContext *oc, *os; int64_t total_size; AVCodecContext *enc; int frame_number, vid, i; double bitrate, ti1, pts; static int64_t last_time = -1; if (!is_last_report) { int64_t cur_time; /* display the report every 0.5 seconds */ cur_time = av_gettime(); if (last_time == -1) { last_time = cur_time; return; } if ((cur_time - last_time) < 500000) return; last_time = cur_time; } oc = output_files[0]; total_size = url_ftell(&oc->pb); buf[0] = '\0'; ti1 = 1e10; vid = 0; for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; os = output_files[ost->file_index]; enc = &ost->st->codec; if (vid && enc->codec_type == CODEC_TYPE_VIDEO) { sprintf(buf + strlen(buf), "q=%2.1f ", enc->coded_frame->quality/(float)FF_QP2LAMBDA); } if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) { frame_number = ost->frame_number; sprintf(buf + strlen(buf), "frame=%5d q=%2.1f ", frame_number, enc->coded_frame ? enc->coded_frame->quality/(float)FF_QP2LAMBDA : 0); if(is_last_report) sprintf(buf + strlen(buf), "L"); if (enc->flags&CODEC_FLAG_PSNR){ int j; double error, error_sum=0; double scale, scale_sum=0; char type[3]= {'Y','U','V'}; sprintf(buf + strlen(buf), "PSNR="); for(j=0; j<3; j++){ if(is_last_report){ error= enc->error[j]; scale= enc->width*enc->height*255.0*255.0*frame_number; }else{ error= enc->coded_frame->error[j]; scale= enc->width*enc->height*255.0*255.0; } if(j) scale/=4; error_sum += error; scale_sum += scale; sprintf(buf + strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale)); } sprintf(buf + strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum)); } vid = 1; } /* compute min output value */ pts = (double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den; if ((pts < ti1) && (pts > 0)) ti1 = pts; } if (ti1 < 0.01) ti1 = 0.01; if (verbose || is_last_report) { bitrate = (double)(total_size * 8) / ti1 / 1000.0; sprintf(buf + strlen(buf), "size=%8.0fkB time=%0.1f bitrate=%6.1fkbits/s", (double)total_size / 1024, ti1, bitrate); if (verbose > 1) sprintf(buf + strlen(buf), " dup=%d drop=%d", nb_frames_dup, nb_frames_drop); if (verbose >= 0) fprintf(stderr, "%s \r", buf); fflush(stderr); } if (is_last_report && verbose >= 0){ int64_t raw= audio_size + video_size + extra_size; fprintf(stderr, "\n"); fprintf(stderr, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n", video_size/1024.0, audio_size/1024.0, extra_size/1024.0, 100.0*(total_size - raw)/raw ); }}/* pkt = NULL means EOF (needed to flush decoder buffers) */static int output_packet(AVInputStream *ist, int ist_index, AVOutputStream **ost_table, int nb_ostreams, const AVPacket *pkt){ AVFormatContext *os; AVOutputStream *ost; uint8_t *ptr; int len, ret, i; uint8_t *data_buf; int data_size, got_picture; AVFrame picture; short samples[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2]; void *buffer_to_free; if (pkt && pkt->dts != AV_NOPTS_VALUE) { //FIXME seems redundant, as libavformat does this too ist->next_pts = ist->pts = pkt->dts; } else { assert(ist->pts == ist->next_pts); } if (pkt == NULL) { /* EOF handling */ ptr = NULL; len = 0; goto handle_eof; } len = pkt->size; ptr = pkt->data; while (len > 0) { handle_eof: /* decode the packet if needed */ data_buf = NULL; /* fail safe */ data_size = 0; if (ist->decoding_needed) { switch(ist->st->codec.codec_type) { case CODEC_TYPE_AUDIO: /* XXX: could avoid copy if PCM 16 bits with same endianness as CPU */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -