ffmpeg.c

来自「现在关于h.264的源码很多」· C语言 代码 · 共 1,787 行 · 第 1/5 页

C
1,787
字号
        pkt.pts = av_rescale_q(av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q) + input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q,  ost->st->time_base);        if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {            /* XXX: the pts correction is handled here. Maybe handling               it in the codec would be better */            if (i == 0)                pkt.pts += 90 * sub->start_display_time;            else                pkt.pts += 90 * sub->end_display_time;        }        av_interleaved_write_frame(s, &pkt);    }}static int bit_buffer_size= 1024*256;static uint8_t *bit_buffer= NULL;static void do_video_out(AVFormatContext *s,                         AVOutputStream *ost,                         AVInputStream *ist,                         AVFrame *in_picture,                         int *frame_size){    int nb_frames, i, ret;    AVFrame *final_picture, *formatted_picture;    AVFrame picture_format_temp, picture_crop_temp;    uint8_t *buf = NULL, *buf1 = NULL;    AVCodecContext *enc, *dec;    enum PixelFormat target_pixfmt;    avcodec_get_frame_defaults(&picture_format_temp);    avcodec_get_frame_defaults(&picture_crop_temp);    enc = ost->st->codec;    dec = ist->st->codec;    /* by default, we output a single frame */    nb_frames = 1;    *frame_size = 0;    if(video_sync_method){        double vdelta;        vdelta = get_sync_ipts(ost) / av_q2d(enc->time_base) - ost->sync_opts;        //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c        if (vdelta < -1.1)            nb_frames = 0;        else if (vdelta > 1.1)            nb_frames = lrintf(vdelta);//fprintf(stderr, "vdelta:%f, ost->sync_opts:%lld, ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, ost->sync_ipts, nb_frames);        if (nb_frames == 0){            ++nb_frames_drop;            if (verbose>2)                fprintf(stderr, "*** drop!\n");        }else if (nb_frames > 1) {            nb_frames_dup += nb_frames;            if (verbose>2)                fprintf(stderr, "*** %d dup!\n", nb_frames-1);        }    }else        ost->sync_opts= lrintf(get_sync_ipts(ost) / av_q2d(enc->time_base));    nb_frames= FFMIN(nb_frames, max_frames[CODEC_TYPE_VIDEO] - ost->frame_number);    if (nb_frames <= 0)        return;    /* convert pixel format if needed */    target_pixfmt = ost->video_resample || ost->video_pad        ? PIX_FMT_YUV420P : enc->pix_fmt;    if (dec->pix_fmt != target_pixfmt) {        int size;        /* create temporary picture */        size = avpicture_get_size(target_pixfmt, dec->width, dec->height);        buf = av_malloc(size);        if (!buf)            return;        formatted_picture = &picture_format_temp;        avpicture_fill((AVPicture*)formatted_picture, buf, target_pixfmt, dec->width, dec->height);        if (img_convert((AVPicture*)formatted_picture, target_pixfmt,                        (AVPicture *)in_picture, dec->pix_fmt,                        dec->width, dec->height) < 0) {            if (verbose >= 0)                fprintf(stderr, "pixel format conversion not handled\n");            goto the_end;        }    } else {        formatted_picture = in_picture;    }    /* XXX: resampling could be done before raw format conversion in       some cases to go faster */    /* XXX: only works for YUV420P */    if (ost->video_resample) {        final_picture = &ost->pict_tmp;        img_resample(ost->img_resample_ctx, (AVPicture*)final_picture, (AVPicture*)formatted_picture);        if (ost->padtop || ost->padbottom || ost->padleft || ost->padright) {            fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,                    ost->padtop, ost->padbottom, ost->padleft, ost->padright,                    padcolor);        }        if (enc->pix_fmt != PIX_FMT_YUV420P) {            int size;            av_free(buf);            /* create temporary picture */            size = avpicture_get_size(enc->pix_fmt, enc->width, enc->height);            buf = av_malloc(size);            if (!buf)                return;            final_picture = &picture_format_temp;            avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);            if (img_convert((AVPicture*)final_picture, enc->pix_fmt,                            (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,                            enc->width, enc->height) < 0) {                if (verbose >= 0)                    fprintf(stderr, "pixel format conversion not handled\n");                goto the_end;            }        }    } else if (ost->video_crop) {        if (img_crop((AVPicture *)&picture_crop_temp, (AVPicture *)formatted_picture, enc->pix_fmt, ost->topBand, ost->leftBand) < 0) {            av_log(NULL, AV_LOG_ERROR, "error cropping picture\n");            goto the_end;        }        final_picture = &picture_crop_temp;    } else if (ost->video_pad) {        final_picture = &ost->pict_tmp;        for (i = 0; i < 3; i++) {            uint8_t *optr, *iptr;            int shift = (i == 0) ? 0 : 1;            int y, yheight;            /* set offset to start writing image into */            optr = final_picture->data[i] + (((final_picture->linesize[i] *                            ost->padtop) + ost->padleft) >> shift);            iptr = formatted_picture->data[i];            yheight = (enc->height - ost->padtop - ost->padbottom) >> shift;            for (y = 0; y < yheight; y++) {                /* copy unpadded image row into padded image row */                memcpy(optr, iptr, formatted_picture->linesize[i]);                optr += final_picture->linesize[i];                iptr += formatted_picture->linesize[i];            }        }        fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,                ost->padtop, ost->padbottom, ost->padleft, ost->padright,                padcolor);        if (enc->pix_fmt != PIX_FMT_YUV420P) {            int size;            av_free(buf);            /* create temporary picture */            size = avpicture_get_size(enc->pix_fmt, enc->width, enc->height);            buf = av_malloc(size);            if (!buf)                return;            final_picture = &picture_format_temp;            avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);            if (img_convert((AVPicture*)final_picture, enc->pix_fmt,                        (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,                        enc->width, enc->height) < 0) {                if (verbose >= 0)                    fprintf(stderr, "pixel format conversion not handled\n");                goto the_end;            }        }    } else {        final_picture = formatted_picture;    }    /* duplicates frame if needed */    for(i=0;i<nb_frames;i++) {        AVPacket pkt;        av_init_packet(&pkt);        pkt.stream_index= ost->index;        if (s->oformat->flags & AVFMT_RAWPICTURE) {            /* raw pictures are written as AVPicture structure to               avoid any copies. We support temorarily the older               method. */            AVFrame* old_frame = enc->coded_frame;            enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack            pkt.data= (uint8_t *)final_picture;            pkt.size=  sizeof(AVPicture);            if(dec->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)                pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);            if(dec->coded_frame && dec->coded_frame->key_frame)                pkt.flags |= PKT_FLAG_KEY;            av_interleaved_write_frame(s, &pkt);            enc->coded_frame = old_frame;        } else {            AVFrame big_picture;            big_picture= *final_picture;            /* better than nothing: use input picture interlaced               settings */            big_picture.interlaced_frame = in_picture->interlaced_frame;            if(avctx_opts->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)){                if(top_field_first == -1)                    big_picture.top_field_first = in_picture->top_field_first;                else                    big_picture.top_field_first = top_field_first;            }            /* handles sameq here. This is not correct because it may               not be a global option */            if (same_quality) {                big_picture.quality = ist->st->quality;            }else                big_picture.quality = ost->st->quality;            if(!me_threshold)                big_picture.pict_type = 0;//            big_picture.pts = AV_NOPTS_VALUE;            big_picture.pts= ost->sync_opts;//            big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den);//av_log(NULL, AV_LOG_DEBUG, "%lld -> encoder\n", ost->sync_opts);            ret = avcodec_encode_video(enc,                                       bit_buffer, bit_buffer_size,                                       &big_picture);            //enc->frame_number = enc->real_pict_num;            if(ret>0){                pkt.data= bit_buffer;                pkt.size= ret;                if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)                    pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);/*av_log(NULL, AV_LOG_DEBUG, "encoder -> %lld/%lld\n",   pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1,   pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/                if(enc->coded_frame && enc->coded_frame->key_frame)                    pkt.flags |= PKT_FLAG_KEY;                av_interleaved_write_frame(s, &pkt);                *frame_size = ret;                //fprintf(stderr,"\nFrame: %3d %3d size: %5d type: %d",                //        enc->frame_number-1, enc->real_pict_num, ret,                //        enc->pict_type);                /* if two pass, output log */                if (ost->logfile && enc->stats_out) {                    fprintf(ost->logfile, "%s", enc->stats_out);                }            }        }        ost->sync_opts++;        ost->frame_number++;    } the_end:    av_free(buf);    av_free(buf1);}static double psnr(double d){    if(d==0) return INFINITY;    return -10.0*log(d)/log(10.0);}static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,                           int frame_size){    static FILE *fvstats=NULL;    char filename[40];    time_t today2;    struct tm *today;    AVCodecContext *enc;    int frame_number;    int64_t ti;    double ti1, bitrate, avg_bitrate;    if (!fvstats) {        today2 = time(NULL);        today = localtime(&today2);        snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour,                                               today->tm_min,                                               today->tm_sec);        fvstats = fopen(filename,"w");        if (!fvstats) {            perror("fopen");            exit(1);        }    }    ti = MAXINT64;    enc = ost->st->codec;    if (enc->codec_type == CODEC_TYPE_VIDEO) {        frame_number = ost->frame_number;        fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA);        if (enc->flags&CODEC_FLAG_PSNR)            fprintf(fvstats, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));        fprintf(fvstats,"f_size= %6d ", frame_size);        /* compute pts value */        ti1 = ost->sync_opts * av_q2d(enc->time_base);        if (ti1 < 0.01)            ti1 = 0.01;        bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;        avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;        fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",            (double)video_size / 1024, ti1, bitrate, avg_bitrate);        fprintf(fvstats,"type= %c\n", av_get_pict_type_char(enc->coded_frame->pict_type));    }}static void print_report(AVFormatContext **output_files,                         AVOutputStream **ost_table, int nb_ostreams,                         int is_last_report){    char buf[1024];    AVOutputStream *ost;    AVFormatContext *oc, *os;    int64_t total_size;    AVCodecContext *enc;    int frame_number, vid, i;    double bitrate, ti1, pts;    static int64_t last_time = -1;    static int qp_histogram[52];    if (!is_last_report) {        int64_t cur_time;        /* display the report every 0.5 seconds */        cur_time = av_gettime();        if (last_time == -1) {            last_time = cur_time;            return;        }        if ((cur_time - last_time) < 500000)            return;        last_time = cur_time;    }    oc = output_files[0];    total_size = url_ftell(&oc->pb);    buf[0] = '\0';    ti1 = 1e10;    vid = 0;    for(i=0;i<nb_ostreams;i++) {        ost = ost_table[i];        os = output_files[ost->file_index];        enc = ost->st->codec;        if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ",

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?