⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 utils.c

📁 F:图像处理资料264264书籍ffmpeg-0.4.9-pre1VideoStream.rar 一个视频解压缩源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
            av_parser_close(st->parser);        }        av_free(st->index_entries);        av_free(st);    }    flush_packet_queue(s);    must_open_file = 1;    if (s->iformat->flags & AVFMT_NOFILE) {        must_open_file = 0;    }    if (must_open_file) {        url_fclose(&s->pb);    }    av_freep(&s->priv_data);    av_free(s);}/** * Add a new stream to a media file. Can only be called in the * read_header function. If the flag AVFMTCTX_NOHEADER is in the * format context, then new streams can be added in read_packet too. * * * @param s media file handle * @param id file format dependent stream id  */AVStream *av_new_stream(AVFormatContext *s, int id){    AVStream *st;    if (s->nb_streams >= MAX_STREAMS)        return NULL;    st = av_mallocz(sizeof(AVStream));    if (!st)        return NULL;    avcodec_get_context_defaults(&st->codec);    if (s->iformat) {        /* no default bitrate if decoding */        st->codec.bit_rate = 0;    }    st->index = s->nb_streams;    st->id = id;    st->start_time = AV_NOPTS_VALUE;    st->duration = AV_NOPTS_VALUE;    st->cur_dts = AV_NOPTS_VALUE;    /* default pts settings is MPEG like */    av_set_pts_info(st, 33, 1, 90000);    st->last_IP_pts = AV_NOPTS_VALUE;    s->streams[s->nb_streams++] = st;    return st;}/************************************************************//* output media file */int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap){    int ret;        if (s->oformat->priv_data_size > 0) {        s->priv_data = av_mallocz(s->oformat->priv_data_size);        if (!s->priv_data)            return AVERROR_NOMEM;    } else        s->priv_data = NULL;	    if (s->oformat->set_parameters) {        ret = s->oformat->set_parameters(s, ap);        if (ret < 0)            return ret;    }    return 0;}/** * allocate the stream private data and write the stream header to an * output media file * * @param s media file handle * @return 0 if OK. AVERROR_xxx if error.   */int av_write_header(AVFormatContext *s){    int ret, i;    AVStream *st;    ret = s->oformat->write_header(s);    if (ret < 0)        return ret;    /* init PTS generation */    for(i=0;i<s->nb_streams;i++) {        st = s->streams[i];        switch (st->codec.codec_type) {        case CODEC_TYPE_AUDIO:            av_frac_init(&st->pts, 0, 0,                          (int64_t)st->time_base.num * st->codec.sample_rate);            break;        case CODEC_TYPE_VIDEO:            av_frac_init(&st->pts, 0, 0,                          (int64_t)st->time_base.num * st->codec.frame_rate);            break;        default:            break;        }    }    return 0;}//FIXME merge with compute_pkt_fieldsstatic void compute_pkt_fields2(AVStream *st, AVPacket *pkt){    int b_frames = FFMAX(st->codec.has_b_frames, st->codec.max_b_frames);    int num, den, frame_size;//    av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);    /*    if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)        return -1;*/                if(pkt->pts != AV_NOPTS_VALUE)        pkt->pts = av_rescale(pkt->pts, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);    if(pkt->dts != AV_NOPTS_VALUE)        pkt->dts = av_rescale(pkt->dts, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);    /* duration field */    pkt->duration = av_rescale(pkt->duration, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);    if (pkt->duration == 0) {        compute_frame_duration(&num, &den, st, NULL, pkt);        if (den && num) {            pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);        }    }    //XXX/FIXME this is a temporary hack until all encoders output pts    if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){        pkt->dts=//        pkt->pts= st->cur_dts;        pkt->pts= st->pts.val;    }    //calculate dts from pts        if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){        if(b_frames){            if(st->last_IP_pts == AV_NOPTS_VALUE){                st->last_IP_pts= -pkt->duration;            }            if(st->last_IP_pts < pkt->pts){                pkt->dts= st->last_IP_pts;                st->last_IP_pts= pkt->pts;            }else                pkt->dts= pkt->pts;        }else            pkt->dts= pkt->pts;    }    //    av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);    st->cur_dts= pkt->dts;    st->pts.val= pkt->dts;    /* update pts */    switch (st->codec.codec_type) {    case CODEC_TYPE_AUDIO:        frame_size = get_audio_frame_size(&st->codec, pkt->size);        /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,           but it would be better if we had the real timestamps from the encoder */        if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {            av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);        }        break;    case CODEC_TYPE_VIDEO:        av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec.frame_rate_base);        break;    default:        break;    }}static void truncate_ts(AVStream *st, AVPacket *pkt){    int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;        if(pkt->dts < 0)        pkt->dts= 0;  //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here        pkt->pts &= pts_mask;    pkt->dts &= pts_mask;}/** * Write a packet to an output media file. The packet shall contain * one audio or video frame. * * @param s media file handle * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ... * @return < 0 if error, = 0 if OK, 1 if end of stream wanted. */int av_write_frame(AVFormatContext *s, AVPacket *pkt){    compute_pkt_fields2(s->streams[pkt->stream_index], pkt);        truncate_ts(s->streams[pkt->stream_index], pkt);    return s->oformat->write_packet(s, pkt);}/** * Writes a packet to an output media file ensuring correct interleaving.  * The packet shall contain one audio or video frame. * If the packets are already correctly interleaved the application should * call av_write_frame() instead as its slightly faster, its also important * to keep in mind that non interlaved input will need huge amounts * of memory to interleave with this, so its prefereable to interleave at the * demuxer level * * @param s media file handle * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ... * @return < 0 if error, = 0 if OK, 1 if end of stream wanted. */int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){    AVPacketList *pktl, **next_point, *this_pktl;    int stream_count=0;    int streams[MAX_STREAMS];    AVStream *st= s->streams[ pkt->stream_index];    compute_pkt_fields2(st, pkt);        //FIXME/XXX/HACK drop zero sized packets    if(st->codec.codec_type == CODEC_TYPE_AUDIO && pkt->size==0)        return 0;    if(pkt->dts == AV_NOPTS_VALUE)        return -1;            assert(pkt->destruct != av_destruct_packet); //FIXME    this_pktl = av_mallocz(sizeof(AVPacketList));    this_pktl->pkt= *pkt;    av_dup_packet(&this_pktl->pkt);    next_point = &s->packet_buffer;    while(*next_point){        AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];        int64_t left=  st2->time_base.num * (int64_t)st ->time_base.den;        int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;        if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow            break;        next_point= &(*next_point)->next;    }    this_pktl->next= *next_point;    *next_point= this_pktl;        memset(streams, 0, sizeof(streams));    pktl= s->packet_buffer;    while(pktl){//av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);        if(streams[ pktl->pkt.stream_index ] == 0)            stream_count++;        streams[ pktl->pkt.stream_index ]++;        pktl= pktl->next;    }    while(s->nb_streams == stream_count){        int ret;        pktl= s->packet_buffer;//av_log(s, AV_LOG_DEBUG, "write st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);        truncate_ts(s->streams[pktl->pkt.stream_index], &pktl->pkt);        ret= s->oformat->write_packet(s, &pktl->pkt);                s->packet_buffer= pktl->next;                if((--streams[ pktl->pkt.stream_index ]) == 0)            stream_count--;        av_free_packet(&pktl->pkt);        av_freep(&pktl);                if(ret<0)            return ret;    }    return 0;}/** * write the stream trailer to an output media file and and free the * file private data. * * @param s media file handle * @return 0 if OK. AVERROR_xxx if error.  */int av_write_trailer(AVFormatContext *s){    int ret;        while(s->packet_buffer){        int ret;        AVPacketList *pktl= s->packet_buffer;//av_log(s, AV_LOG_DEBUG, "write_trailer st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);        truncate_ts(s->streams[pktl->pkt.stream_index], &pktl->pkt);        ret= s->oformat->write_packet(s, &pktl->pkt);                s->packet_buffer= pktl->next;                av_free_packet(&pktl->pkt);        av_freep(&pktl);                if(ret<0)            return ret;    }    ret = s->oformat->write_trailer(s);    av_freep(&s->priv_data);    return ret;}/* "user interface" functions */void dump_format(AVFormatContext *ic,                 int index,                  const char *url,                 int is_output){    int i, flags;    char buf[256];    av_log(NULL, AV_LOG_DEBUG, "%s #%d, %s, %s '%s':\n",             is_output ? "Output" : "Input",            index,             is_output ? ic->oformat->name : ic->iformat->name,             is_output ? "to" : "from", url);    if (!is_output) {        av_log(NULL, AV_LOG_DEBUG, "  Duration: ");        if (ic->duration != AV_NOPTS_VALUE) {            int hours, mins, secs, us;            secs = ic->duration / AV_TIME_BASE;            us = ic->duration % AV_TIME_BASE;            mins = secs / 60;            secs %= 60;            hours = mins / 60;            mins %= 60;            av_log(NULL, AV_LOG_DEBUG, "%02d:%02d:%02d.%01d", hours, mins, secs,                    (10 * us) / AV_TIME_BASE);        } else {            av_log(NULL, AV_LOG_DEBUG, "N/A");        }        av_log(NULL, AV_LOG_DEBUG, ", bitrate: ");        if (ic->bit_rate) {            av_log(NULL, AV_LOG_DEBUG,"%d kb/s", ic->bit_rate / 1000);        } else {            av_log(NULL, AV_LOG_DEBUG, "N/A");        }        av_log(NULL, AV_LOG_DEBUG, "\n");    }    for(i=0;i<ic->nb_streams;i++) {        AVStream *st = ic->streams[i];        avcodec_string(buf, sizeof(buf), &st->codec, is_output);        av_log(NULL, AV_LOG_DEBUG, "  Stream #%d.%d", index, i);        /* the pid is an important information, so we display it */        /* XXX: add a generic system */        if (is_output)            flags = ic->oformat->flags;        else            flags = ic->iformat->flags;        if (flags & AVFMT_SHOW_IDS) {            av_log(NULL, AV_LOG_DEBUG, "[0x%x]", st->id);        }        av_log(NULL, AV_LOG_DEBUG, ": %s\n", buf);    }}typedef struct {    const char *abv;    int width, height;    int frame_rate, frame_rate_base;} AbvEntry;static AbvEntry frame_abvs[] = {    { "ntsc",      720, 480, 30000, 1001 },    { "pal",       720, 576,    25,    1 },    { "qntsc",     352, 240, 30000, 1001 }, /* VCD compliant ntsc */    { "qpal",      352, 288,    25,    1 }, /* VCD compliant pal */    { "sntsc",     640, 480, 30000, 1001 }, /* square pixel ntsc */    { "spal",      768, 576,    25,    1 }, /* square pixel pal */    { "film",      352, 240,    24,    1 },    { "ntsc-film", 352, 240, 24000, 1001 },    { "sqcif",     128,  96,     0,    0 },    { "qcif",      176, 144,     0,    0 },    { "cif",       352, 288,     0,    0 },    { "4cif",      704, 576,     0,    0 },};int parse_image_size(int *width_ptr, int *height_ptr, const char *str){    int i;    int n = sizeof(frame_abvs) / sizeof(AbvEntry);    const char *p;    int frame_width = 0, frame_height = 0;    for(i=0;i<n;i++) {        if (!strcmp(frame_abvs[i].abv, str)) {            frame_width = frame_abvs[i].width;            frame_height = frame_abvs[i].height;            break;        }    }    if (i == n) {        p = str;        frame_width = strtol(p, (char **)&p, 10);        if (*p)            p++;        frame_height = strtol(p, (char **)&p, 10);    }    if (frame_width <= 0 || frame_height <= 0)        return -1;    *width_ptr = frame_width;    *height_ptr = frame_height;    return 0;}int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg){    int i;    char* cp;       /* First, we check our abbreviation table */    for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)         if (!strcmp(frame_abvs[i].abv, arg)) {	     *frame_rate = frame_abvs[i].frame_rate;	     *frame_rate_base = frame_abvs[i].frame_rate_base;	     return 0;	 }    /* Then, we try to parse it as fraction */ 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -