⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 utils.c

📁 mediastreamer2是开源的网络传输媒体流的库
💻 C
📖 第 1 页 / 共 5 页
字号:
#define PROBE_BUF_MIN 2048#define PROBE_BUF_MAX (1<<20)int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,                       AVInputFormat *fmt,                       int buf_size,                       AVFormatParameters *ap){    int err, probe_size;    AVProbeData probe_data, *pd = &probe_data;    ByteIOContext *pb = NULL;    pd->filename = "";    if (filename)        pd->filename = filename;    pd->buf = NULL;    pd->buf_size = 0;    if (!fmt) {        /* guess format if no file can be opened */        fmt = av_probe_input_format(pd, 0);    }    /* Do not open file if the format does not need it. XXX: specific       hack needed to handle RTSP/TCP */    if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {        /* if no file needed do not try to open one */        if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {            goto fail;        }        if (buf_size > 0) {            url_setbufsize(pb, buf_size);        }        for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){            int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;            /* read probe data */            pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);            pd->buf_size = get_buffer(pb, pd->buf, probe_size);            memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);            if (url_fseek(pb, 0, SEEK_SET) < 0) {                url_fclose(pb);                if (url_fopen(&pb, filename, URL_RDONLY) < 0) {                    pb = NULL;                    err = AVERROR(EIO);                    goto fail;                }            }            /* guess file format */            fmt = av_probe_input_format2(pd, 1, &score);        }        av_freep(&pd->buf);    }    /* if still no format found, error */    if (!fmt) {        err = AVERROR_NOFMT;        goto fail;    }    /* check filename in case an image number is expected */    if (fmt->flags & AVFMT_NEEDNUMBER) {        if (!av_filename_number_test(filename)) {            err = AVERROR_NUMEXPECTED;            goto fail;        }    }    err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);    if (err)        goto fail;    return 0; fail:    av_freep(&pd->buf);    if (pb)        url_fclose(pb);    *ic_ptr = NULL;    return err;}/*******************************************************/int av_read_packet(AVFormatContext *s, AVPacket *pkt){    int ret;    AVStream *st;    av_init_packet(pkt);    ret= s->iformat->read_packet(s, pkt);    if (ret < 0)        return ret;    st= s->streams[pkt->stream_index];    switch(st->codec->codec_type){    case CODEC_TYPE_VIDEO:        if(s->video_codec_id)   st->codec->codec_id= s->video_codec_id;        break;    case CODEC_TYPE_AUDIO:        if(s->audio_codec_id)   st->codec->codec_id= s->audio_codec_id;        break;    case CODEC_TYPE_SUBTITLE:        if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;        break;    }    return ret;}/**********************************************************//** * Get the number of samples of an audio frame. Return -1 on error. */static int get_audio_frame_size(AVCodecContext *enc, int size){    int frame_size;    if (enc->frame_size <= 1) {        int bits_per_sample = av_get_bits_per_sample(enc->codec_id);        if (bits_per_sample) {            if (enc->channels == 0)                return -1;            frame_size = (size << 3) / (bits_per_sample * enc->channels);        } else {            /* used for example by ADPCM codecs */            if (enc->bit_rate == 0)                return -1;            frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;        }    } else {        frame_size = enc->frame_size;    }    return frame_size;}/** * Return the frame duration in seconds. Return 0 if not available. */static void compute_frame_duration(int *pnum, int *pden, AVStream *st,                                   AVCodecParserContext *pc, AVPacket *pkt){    int frame_size;    *pnum = 0;    *pden = 0;    switch(st->codec->codec_type) {    case CODEC_TYPE_VIDEO:        if(st->time_base.num*1000LL > st->time_base.den){            *pnum = st->time_base.num;            *pden = st->time_base.den;        }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){            *pnum = st->codec->time_base.num;            *pden = st->codec->time_base.den;            if (pc && pc->repeat_pict) {                *pden *= 2;                *pnum = (*pnum) * (2 + pc->repeat_pict);            }        }        break;    case CODEC_TYPE_AUDIO:        frame_size = get_audio_frame_size(st->codec, pkt->size);        if (frame_size < 0)            break;        *pnum = frame_size;        *pden = st->codec->sample_rate;        break;    default:        break;    }}static int is_intra_only(AVCodecContext *enc){    if(enc->codec_type == CODEC_TYPE_AUDIO){        return 1;    }else if(enc->codec_type == CODEC_TYPE_VIDEO){        switch(enc->codec_id){        case CODEC_ID_MJPEG:        case CODEC_ID_MJPEGB:        case CODEC_ID_LJPEG:        case CODEC_ID_RAWVIDEO:        case CODEC_ID_DVVIDEO:        case CODEC_ID_HUFFYUV:        case CODEC_ID_FFVHUFF:        case CODEC_ID_ASV1:        case CODEC_ID_ASV2:        case CODEC_ID_VCR1:            return 1;        default: break;        }    }    return 0;}static void update_initial_timestamps(AVFormatContext *s, int stream_index,                                      int64_t dts, int64_t pts){    AVStream *st= s->streams[stream_index];    AVPacketList *pktl= s->packet_buffer;    if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE)        return;    st->first_dts= dts - st->cur_dts;    st->cur_dts= dts;    for(; pktl; pktl= pktl->next){        if(pktl->pkt.stream_index != stream_index)            continue;        //FIXME think more about this check        if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)            pktl->pkt.pts += st->first_dts;        if(pktl->pkt.dts != AV_NOPTS_VALUE)            pktl->pkt.dts += st->first_dts;        if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)            st->start_time= pktl->pkt.pts;    }    if (st->start_time == AV_NOPTS_VALUE)        st->start_time = pts;}static void compute_pkt_fields(AVFormatContext *s, AVStream *st,                               AVCodecParserContext *pc, AVPacket *pkt){    int num, den, presentation_delayed, delay, i;    int64_t offset;    if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63       /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){        pkt->dts -= 1LL<<st->pts_wrap_bits;    }    if (pkt->duration == 0) {        compute_frame_duration(&num, &den, st, pc, pkt);        if (den && num) {            pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);        }    }    /* correct timestamps with byte offset if demuxers only have timestamps       on packet boundaries */    if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){        /* this will estimate bitrate based on this frame's duration and size */        offset = av_rescale(pc->offset, pkt->duration, pkt->size);        if(pkt->pts != AV_NOPTS_VALUE)            pkt->pts += offset;        if(pkt->dts != AV_NOPTS_VALUE)            pkt->dts += offset;    }    /* do we have a video B-frame ? */    delay= st->codec->has_b_frames;    presentation_delayed = 0;    /* XXX: need has_b_frame, but cannot get it if the codec is        not initialized */    if (delay &&        pc && pc->pict_type != FF_B_TYPE)        presentation_delayed = 1;    /* This may be redundant, but it should not hurt. */    if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)        presentation_delayed = 1;    if(st->cur_dts == AV_NOPTS_VALUE){        st->cur_dts = 0; //FIXME maybe set it to 0 during init    }//    av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);    /* interpolate PTS and DTS if they are not present */    if(delay <=1){        if (presentation_delayed) {            /* DTS = decompression timestamp */            /* PTS = presentation timestamp */            if (pkt->dts == AV_NOPTS_VALUE)                pkt->dts = st->last_IP_pts;            update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);            if (pkt->dts == AV_NOPTS_VALUE)                pkt->dts = st->cur_dts;            /* this is tricky: the dts must be incremented by the duration            of the frame we are displaying, i.e. the last I- or P-frame */            if (st->last_IP_duration == 0)                st->last_IP_duration = pkt->duration;            st->cur_dts = pkt->dts + st->last_IP_duration;            st->last_IP_duration  = pkt->duration;            st->last_IP_pts= pkt->pts;            /* cannot compute PTS if not present (we can compute it only            by knowing the future */        } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){            if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){                int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);                int64_t new_diff= FFABS(st->cur_dts - pkt->pts);                if(old_diff < new_diff && old_diff < (pkt->duration>>3)){                    pkt->pts += pkt->duration;    //                av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);                }            }            /* presentation is not delayed : PTS and DTS are the same */            if(pkt->pts == AV_NOPTS_VALUE)                pkt->pts = pkt->dts;            update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);            if(pkt->pts == AV_NOPTS_VALUE)                pkt->pts = st->cur_dts;            pkt->dts = pkt->pts;            st->cur_dts = pkt->pts + pkt->duration;        }    }    if(pkt->pts != AV_NOPTS_VALUE){        st->pts_buffer[0]= pkt->pts;        for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)            st->pts_buffer[i]= (i-delay-1) * pkt->duration;        for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)            FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);        if(pkt->dts == AV_NOPTS_VALUE)            pkt->dts= st->pts_buffer[0];        if(delay>1){            update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet        }        if(pkt->dts > st->cur_dts)            st->cur_dts = pkt->dts;    }//    av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);    /* update flags */    if(is_intra_only(st->codec))        pkt->flags |= PKT_FLAG_KEY;    else if (pc) {        pkt->flags = 0;        /* keyframe computation */            if (pc->pict_type == FF_I_TYPE)                pkt->flags |= PKT_FLAG_KEY;    }}void av_destruct_packet_nofree(AVPacket *pkt){    pkt->data = NULL; pkt->size = 0;}static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt){    AVStream *st;    int len, ret, i;    av_init_packet(pkt);    for(;;) {        /* select current input stream component */        st = s->cur_st;        if (st) {            if (!st->need_parsing || !st->parser) {                /* no parsing needed: we just output the packet as is */                /* raw data support */                *pkt = s->cur_pkt;                compute_pkt_fields(s, st, NULL, pkt);                s->cur_st = NULL;                break;            } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {                len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,                                      s->cur_ptr, s->cur_len,                                      s->cur_pkt.pts, s->cur_pkt.dts);                s->cur_pkt.pts = AV_NOPTS_VALUE;                s->cur_pkt.dts = AV_NOPTS_VALUE;                /* increment read pointer */                s->cur_ptr += len;                s->cur_len -= len;                /* return packet if any */                if (pkt->size) {                got_packet:                    pkt->pos = s->cur_pkt.pos;              // Isn't quite accurate but close.                    pkt->duration = 0;                    pkt->stream_index = st->index;                    pkt->pts = st->parser->pts;                    pkt->dts = st->parser->dts;                    pkt->destruct = av_destruct_packet_nofree;                    compute_pkt_fields(s, st, st->parser, pkt);                    if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){                        ff_reduce_index(s, st->index);                        av_add_index_entry(st, st->parser->frame_offset, pkt->dts,                                           0, 0, AVINDEX_KEYFRAME);                    }                    break;                }            } else {                /* free packet */                av_free_packet(&s->cur_pkt);                s->cur_st = NULL;            }        } else {            /* read next packet */            ret = av_read_packet(s, &s->cur_pkt);            if (ret < 0) {                if (ret == AVERROR(EAGAIN))                    return ret;                /* return the last frames, if any */                for(i = 0; i < s->nb_streams; i++) {                    st = s->streams[i];                    if (st->parser && st->need_parsing) {                        av_parser_parse(st->parser, st->codec,                                        &pkt->data, &pkt->size,                                        NULL, 0,                                        AV_NOPTS_VALUE, AV_NOPTS_VALUE);                        if (pkt->size)                            goto got_packet;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -