⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 utils.c

📁 F:图像处理资料264264书籍ffmpeg-0.4.9-pre1VideoStream.rar 一个视频解压缩源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
    if (!fmt) {        err = AVERROR_NOFMT;        goto fail;    }            /* XXX: suppress this hack for redirectors */#ifdef CONFIG_NETWORK    if (fmt == &redir_demux) {        err = redir_open(ic_ptr, pb);        url_fclose(pb);        return err;    }#endif    /* check filename in case of an image number is expected */    if (fmt->flags & AVFMT_NEEDNUMBER) {        if (filename_number_test(filename) < 0) {             err = AVERROR_NUMEXPECTED;            goto fail;        }    }    err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);    if (err)        goto fail;    return 0; fail:    if (file_opened)        url_fclose(pb);    *ic_ptr = NULL;    return err;    }/*******************************************************//** * Read a transport packet from a media file. This function is * absolete and should never be used. Use av_read_frame() instead. *  * @param s media file handle * @param pkt is filled  * @return 0 if OK. AVERROR_xxx if error.   */int av_read_packet(AVFormatContext *s, AVPacket *pkt){    return s->iformat->read_packet(s, pkt);}/**********************************************************//* get the number of samples of an audio frame. Return (-1) if error */static int get_audio_frame_size(AVCodecContext *enc, int size){    int frame_size;    if (enc->frame_size <= 1) {        /* specific hack for pcm codecs because no frame size is           provided */        switch(enc->codec_id) {        case CODEC_ID_PCM_S16LE:        case CODEC_ID_PCM_S16BE:        case CODEC_ID_PCM_U16LE:        case CODEC_ID_PCM_U16BE:            if (enc->channels == 0)                return -1;            frame_size = size / (2 * enc->channels);            break;        case CODEC_ID_PCM_S8:        case CODEC_ID_PCM_U8:        case CODEC_ID_PCM_MULAW:        case CODEC_ID_PCM_ALAW:            if (enc->channels == 0)                return -1;            frame_size = size / (enc->channels);            break;        default:            /* used for example by ADPCM codecs */            if (enc->bit_rate == 0)                return -1;            frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;            break;        }    } else {        frame_size = enc->frame_size;    }    return frame_size;}/* return the frame duration in seconds, return 0 if not available */static void compute_frame_duration(int *pnum, int *pden, AVStream *st,                                    AVCodecParserContext *pc, AVPacket *pkt){    int frame_size;    *pnum = 0;    *pden = 0;    switch(st->codec.codec_type) {    case CODEC_TYPE_VIDEO:        *pnum = st->codec.frame_rate_base;        *pden = st->codec.frame_rate;        if (pc && pc->repeat_pict) {            *pden *= 2;            *pnum = (*pnum) * (2 + pc->repeat_pict);        }        break;    case CODEC_TYPE_AUDIO:        frame_size = get_audio_frame_size(&st->codec, pkt->size);        if (frame_size < 0)            break;        *pnum = frame_size;        *pden = st->codec.sample_rate;        break;    default:        break;    }}static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){    int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;    int64_t delta= last_ts - mask/2;    return  ((lsb - delta)&mask) + delta;}static void compute_pkt_fields(AVFormatContext *s, AVStream *st,                                AVCodecParserContext *pc, AVPacket *pkt){    int num, den, presentation_delayed;    /* handle wrapping */    if(st->cur_dts != AV_NOPTS_VALUE){        if(pkt->pts != AV_NOPTS_VALUE)            pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);        if(pkt->dts != AV_NOPTS_VALUE)            pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);    }        if (pkt->duration == 0) {        compute_frame_duration(&num, &den, st, pc, pkt);        if (den && num) {            pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);        }    }    /* do we have a video B frame ? */    presentation_delayed = 0;    if (st->codec.codec_type == CODEC_TYPE_VIDEO) {        /* XXX: need has_b_frame, but cannot get it if the codec is           not initialized */        if ((st->codec.codec_id == CODEC_ID_MPEG1VIDEO ||             st->codec.codec_id == CODEC_ID_MPEG2VIDEO ||             st->codec.codec_id == CODEC_ID_MPEG4 ||             st->codec.codec_id == CODEC_ID_H264) &&             pc && pc->pict_type != FF_B_TYPE)            presentation_delayed = 1;        /* this may be redundant, but it shouldnt hurt */        if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)            presentation_delayed = 1;    }        if(st->cur_dts == AV_NOPTS_VALUE){        if(presentation_delayed) st->cur_dts = -pkt->duration;        else                     st->cur_dts = 0;    }//    av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);    /* interpolate PTS and DTS if they are not present */    if (presentation_delayed) {        /* DTS = decompression time stamp */        /* PTS = presentation time stamp */        if (pkt->dts == AV_NOPTS_VALUE) {            /* if we know the last pts, use it */            if(st->last_IP_pts != AV_NOPTS_VALUE)                st->cur_dts = pkt->dts = st->last_IP_pts;            else                pkt->dts = st->cur_dts;        } else {            st->cur_dts = pkt->dts;        }        /* this is tricky: the dts must be incremented by the duration           of the frame we are displaying, i.e. the last I or P frame */        if (st->last_IP_duration == 0)            st->cur_dts += pkt->duration;        else            st->cur_dts += st->last_IP_duration;        st->last_IP_duration  = pkt->duration;        st->last_IP_pts= pkt->pts;        /* cannot compute PTS if not present (we can compute it only           by knowing the futur */    } else {        /* presentation is not delayed : PTS and DTS are the same */        if (pkt->pts == AV_NOPTS_VALUE) {            if (pkt->dts == AV_NOPTS_VALUE) {                pkt->pts = st->cur_dts;                pkt->dts = st->cur_dts;            }            else {                st->cur_dts = pkt->dts;                pkt->pts = pkt->dts;            }        } else {            st->cur_dts = pkt->pts;            pkt->dts = pkt->pts;        }        st->cur_dts += pkt->duration;    }//    av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);        /* update flags */    if (pc) {        pkt->flags = 0;        /* key frame computation */        switch(st->codec.codec_type) {        case CODEC_TYPE_VIDEO:            if (pc->pict_type == FF_I_TYPE)                pkt->flags |= PKT_FLAG_KEY;            break;        case CODEC_TYPE_AUDIO:            pkt->flags |= PKT_FLAG_KEY;            break;        default:            break;        }    }    /* convert the packet time stamp units */    if(pkt->pts != AV_NOPTS_VALUE)        pkt->pts = av_rescale(pkt->pts, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);    if(pkt->dts != AV_NOPTS_VALUE)        pkt->dts = av_rescale(pkt->dts, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);    /* duration field */    pkt->duration = av_rescale(pkt->duration, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);}static void av_destruct_packet_nofree(AVPacket *pkt){    pkt->data = NULL; pkt->size = 0;}static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt){    AVStream *st;    int len, ret, i;    for(;;) {        /* select current input stream component */        st = s->cur_st;        if (st) {            if (!st->parser) {                /* no parsing needed: we just output the packet as is */                /* raw data support */                *pkt = s->cur_pkt;                compute_pkt_fields(s, st, NULL, pkt);                s->cur_st = NULL;                return 0;            } else if (s->cur_len > 0) {                len = av_parser_parse(st->parser, &st->codec, &pkt->data, &pkt->size,                                       s->cur_ptr, s->cur_len,                                      s->cur_pkt.pts, s->cur_pkt.dts);                s->cur_pkt.pts = AV_NOPTS_VALUE;                s->cur_pkt.dts = AV_NOPTS_VALUE;                /* increment read pointer */                s->cur_ptr += len;                s->cur_len -= len;                                /* return packet if any */                if (pkt->size) {                got_packet:                    pkt->duration = 0;                    pkt->stream_index = st->index;                    pkt->pts = st->parser->pts;                    pkt->dts = st->parser->dts;                    pkt->destruct = av_destruct_packet_nofree;                    compute_pkt_fields(s, st, st->parser, pkt);                    return 0;                }            } else {                /* free packet */                av_free_packet(&s->cur_pkt);                 s->cur_st = NULL;            }        } else {            /* read next packet */            ret = av_read_packet(s, &s->cur_pkt);            if (ret < 0) {                if (ret == -EAGAIN)                    return ret;                /* return the last frames, if any */                for(i = 0; i < s->nb_streams; i++) {                    st = s->streams[i];                    if (st->parser) {                        av_parser_parse(st->parser, &st->codec,                                         &pkt->data, &pkt->size,                                         NULL, 0,                                         AV_NOPTS_VALUE, AV_NOPTS_VALUE);                        if (pkt->size)                            goto got_packet;                    }                }                /* no more packets: really terminates parsing */                return ret;            }                        st = s->streams[s->cur_pkt.stream_index];            s->cur_st = st;            s->cur_ptr = s->cur_pkt.data;            s->cur_len = s->cur_pkt.size;            if (st->need_parsing && !st->parser) {                st->parser = av_parser_init(st->codec.codec_id);                if (!st->parser) {                    /* no parser available : just output the raw packets */                    st->need_parsing = 0;                }            }        }    }}/** * Return the next frame of a stream. The returned packet is valid * until the next av_read_frame() or until av_close_input_file() and * must be freed with av_free_packet. For video, the packet contains * exactly one frame. For audio, it contains an integer number of * frames if each frame has a known fixed size (e.g. PCM or ADPCM * data). If the audio frames have a variable size (e.g. MPEG audio), * then it contains one frame. *  * pkt->pts, pkt->dts and pkt->duration are always set to correct * values in AV_TIME_BASE unit (and guessed if the format cannot * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format * has B frames, so it is better to rely on pkt->dts if you do not * decompress the payload. *  * Return 0 if OK, < 0 if error or end of file.   */int av_read_frame(AVFormatContext *s, AVPacket *pkt){    AVPacketList *pktl;    pktl = s->packet_buffer;    if (pktl) {        /* read packet from packet buffer, if there is data */        *pkt = pktl->pkt;        s->packet_buffer = pktl->next;        av_free(pktl);        return 0;    } else {        return av_read_frame_internal(s, pkt);    }}/* XXX: suppress the packet queue */static void flush_packet_queue(AVFormatContext *s){    AVPacketList *pktl;    for(;;) {        pktl = s->packet_buffer;        if (!pktl)             break;        s->packet_buffer = pktl->next;        av_free_packet(&pktl->pkt);        av_free(pktl);    }}/*******************************************************//* seek support */int av_find_default_stream_index(AVFormatContext *s){    int i;    AVStream *st;    if (s->nb_streams <= 0)        return -1;    for(i = 0; i < s->nb_streams; i++) {        st = s->streams[i];        if (st->codec.codec_type == CODEC_TYPE_VIDEO) {            return i;        }    }    return 0;}/* flush the frame reader */static void av_read_frame_flush(AVFormatContext *s){    AVStream *st;    int i;    flush_packet_queue(s);    /* free previous packet */    if (s->cur_st) {        if (s->cur_st->parser)            av_free_packet(&s->cur_pkt);        s->cur_st = NULL;    }    /* fail safe */    s->cur_ptr = NULL;    s->cur_len = 0;        /* for each stream, reset read state */    for(i = 0; i < s->nb_streams; i++) {        st = s->streams[i];                if (st->parser) {            av_parser_close(st->parser);            st->parser = NULL;        }        st->last_IP_pts = AV_NOPTS_VALUE;        st->cur_dts = 0; /* we set the current DTS to an unspecified origin */    }}/** * add a index entry into a sorted list updateing if it is already there. * @param timestamp timestamp in the timebase of the given stream */int av_add_index_entry(AVStream *st,                            int64_t pos, int64_t timestamp, int distance, int flags){    AVIndexEntry *entries, *ie;    int index;        entries = av_fast_realloc(st->index_entries,                              &st->index_entries_allocated_size,                              (st->nb_index_entries + 1) *                               sizeof(AVIndexEntry));    st->index_entries= entries;    if(st->nb_index_entries){        index= av_index_search_timestamp(st, timestamp);        ie= &entries[index];        if(ie->timestamp != timestamp){            if(ie->timestamp < timestamp){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -