📄 utils.c
字号:
break; } }}void av_destruct_packet_nofree(AVPacket *pkt){ pkt->data = NULL; pkt->size = 0;}static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt){ AVStream *st; int len, ret, i; for(;;) { /* select current input stream component */ st = s->cur_st; if (st) { if (!st->need_parsing || !st->parser) { /* no parsing needed: we just output the packet as is */ /* raw data support */ *pkt = s->cur_pkt; compute_pkt_fields(s, st, NULL, pkt); s->cur_st = NULL; return 0; } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) { len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size, s->cur_ptr, s->cur_len, s->cur_pkt.pts, s->cur_pkt.dts); s->cur_pkt.pts = AV_NOPTS_VALUE; s->cur_pkt.dts = AV_NOPTS_VALUE; /* increment read pointer */ s->cur_ptr += len; s->cur_len -= len; /* return packet if any */ if (pkt->size) { got_packet: pkt->duration = 0; pkt->stream_index = st->index; pkt->pts = st->parser->pts; pkt->dts = st->parser->dts; pkt->destruct = av_destruct_packet_nofree; compute_pkt_fields(s, st, st->parser, pkt); return 0; } } else { /* free packet */ av_free_packet(&s->cur_pkt); s->cur_st = NULL; } } else { /* read next packet */ ret = av_read_packet(s, &s->cur_pkt); if (ret < 0) { if (ret == -EAGAIN) return ret; /* return the last frames, if any */ for(i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->parser && st->need_parsing) { av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size, NULL, 0, AV_NOPTS_VALUE, AV_NOPTS_VALUE); if (pkt->size) goto got_packet; } } /* no more packets: really terminates parsing */ return ret; } st = s->streams[s->cur_pkt.stream_index]; s->cur_st = st; s->cur_ptr = s->cur_pkt.data; s->cur_len = s->cur_pkt.size; if (st->need_parsing && !st->parser) { st->parser = av_parser_init(st->codec->codec_id); if (!st->parser) { /* no parser available : just output the raw packets */ st->need_parsing = 0; }else if(st->need_parsing == 2){ st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; } } } }}/** * Return the next frame of a stream. * * The returned packet is valid * until the next av_read_frame() or until av_close_input_file() and * must be freed with av_free_packet. For video, the packet contains * exactly one frame. For audio, it contains an integer number of * frames if each frame has a known fixed size (e.g. PCM or ADPCM * data). If the audio frames have a variable size (e.g. MPEG audio), * then it contains one frame. * * pkt->pts, pkt->dts and pkt->duration are always set to correct * values in AV_TIME_BASE unit (and guessed if the format cannot * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format * has B frames, so it is better to rely on pkt->dts if you do not * decompress the payload. * * @return 0 if OK, < 0 if error or end of file. */int av_read_frame(AVFormatContext *s, AVPacket *pkt){ AVPacketList *pktl; int eof=0; const int genpts= s->flags & AVFMT_FLAG_GENPTS; for(;;){ pktl = s->packet_buffer; if (pktl) { AVPacket *next_pkt= &pktl->pkt; if(genpts && next_pkt->dts != AV_NOPTS_VALUE){ while(pktl && next_pkt->pts == AV_NOPTS_VALUE){ if( pktl->pkt.stream_index == next_pkt->stream_index && next_pkt->dts < pktl->pkt.dts && pktl->pkt.pts != pktl->pkt.dts //not b frame /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){ next_pkt->pts= pktl->pkt.dts; } pktl= pktl->next; } pktl = s->packet_buffer; } if( next_pkt->pts != AV_NOPTS_VALUE || next_pkt->dts == AV_NOPTS_VALUE || !genpts || eof){ /* read packet from packet buffer, if there is data */ *pkt = *next_pkt; s->packet_buffer = pktl->next; av_free(pktl); return 0; } } if(genpts){ AVPacketList **plast_pktl= &s->packet_buffer; int ret= av_read_frame_internal(s, pkt); if(ret<0){ if(pktl && ret != -EAGAIN){ eof=1; continue; }else return ret; } /* duplicate the packet */ if (av_dup_packet(pkt) < 0) return AVERROR_NOMEM; while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last? pktl = av_mallocz(sizeof(AVPacketList)); if (!pktl) return AVERROR_NOMEM; /* add the packet in the buffered packet list */ *plast_pktl = pktl; pktl->pkt= *pkt; }else{ assert(!s->packet_buffer); return av_read_frame_internal(s, pkt); } }}/* XXX: suppress the packet queue */static void flush_packet_queue(AVFormatContext *s){ AVPacketList *pktl; for(;;) { pktl = s->packet_buffer; if (!pktl) break; s->packet_buffer = pktl->next; av_free_packet(&pktl->pkt); av_free(pktl); }}/*******************************************************//* seek support */int av_find_default_stream_index(AVFormatContext *s){ int i; AVStream *st; if (s->nb_streams <= 0) return -1; for(i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->codec->codec_type == CODEC_TYPE_VIDEO) { return i; } } return 0;}/** * Flush the frame reader. */static void av_read_frame_flush(AVFormatContext *s){ AVStream *st; int i; flush_packet_queue(s); /* free previous packet */ if (s->cur_st) { if (s->cur_st->parser) av_free_packet(&s->cur_pkt); s->cur_st = NULL; } /* fail safe */ s->cur_ptr = NULL; s->cur_len = 0; /* for each stream, reset read state */ for(i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->parser) { av_parser_close(st->parser); st->parser = NULL; } st->last_IP_pts = AV_NOPTS_VALUE; st->cur_dts = 0; /* we set the current DTS to an unspecified origin */ }}/** * Updates cur_dts of all streams based on given timestamp and AVStream. * * Stream ref_st unchanged, others set cur_dts in their native timebase * only needed for timestamp wrapping or if (dts not set and pts!=dts) * @param timestamp new dts expressed in time_base of param ref_st * @param ref_st reference stream giving time_base of param timestamp */static void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){ int i; for(i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; st->cur_dts = av_rescale(timestamp, st->time_base.den * (int64_t)ref_st->time_base.num, st->time_base.num * (int64_t)ref_st->time_base.den); }}/** * Add a index entry into a sorted list updateing if it is already there. * * @param timestamp timestamp in the timebase of the given stream */int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, int size, int distance, int flags){ AVIndexEntry *entries, *ie; int index; if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry)) return -1; entries = av_fast_realloc(st->index_entries, &st->index_entries_allocated_size, (st->nb_index_entries + 1) * sizeof(AVIndexEntry)); if(!entries) return -1; st->index_entries= entries; index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY); if(index<0){ index= st->nb_index_entries++; ie= &entries[index]; assert(index==0 || ie[-1].timestamp < timestamp); }else{ ie= &entries[index]; if(ie->timestamp != timestamp){ if(ie->timestamp <= timestamp) return -1; memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index)); st->nb_index_entries++; }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance distance= ie->min_distance; } ie->pos = pos; ie->timestamp = timestamp; ie->min_distance= distance; ie->size= size; ie->flags = flags; return index;}/** * build an index for raw streams using a parser. */static void av_build_index_raw(AVFormatContext *s){ AVPacket pkt1, *pkt = &pkt1; int ret; AVStream *st; st = s->streams[0]; av_read_frame_flush(s); url_fseek(&s->pb, s->data_offset, SEEK_SET); for(;;) { ret = av_read_frame(s, pkt); if (ret < 0) break; if (pkt->stream_index == 0 && st->parser && (pkt->flags & PKT_FLAG_KEY)) { av_add_index_entry(st, st->parser->frame_offset, pkt->dts, 0, 0, AVINDEX_KEYFRAME); } av_free_packet(pkt); }}/** * Returns TRUE if we deal with a raw stream. * * Raw codec data and parsing needed. */static int is_raw_stream(AVFormatContext *s){ AVStream *st; if (s->nb_streams != 1) return 0; st = s->streams[0]; if (!st->need_parsing) return 0; return 1;}/** * Gets the index for a specific timestamp. * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to * the timestamp which is <= the requested one, if backward is 0 * then it will be >= * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise * @return < 0 if no such timestamp could be found */int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags){ AVIndexEntry *entries= st->index_entries; int nb_entries= st->nb_index_entries; int a, b, m; int64_t timestamp; a = - 1; b = nb_entries; while (b - a > 1) { m = (a + b) >> 1; timestamp = entries[m].timestamp; if(timestamp >= wanted_timestamp) b = m; if(timestamp <= wanted_timestamp) a = m; } m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b; if(!(flags & AVSEEK_FLAG_ANY)){ while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){ m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1; } } if(m == nb_entries) return -1; return m;}#define DEBUG_SEEK/** * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp(). * this isnt supposed to be called directly by a user application, but by demuxers * @param target_ts target timestamp in the time base of the given stream * @param stream_index stream number */int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){ AVInputFormat *avif= s->iformat; int64_t pos_min, pos_max, pos, pos_limit; int64_t ts_min, ts_max, ts; int64_t start_pos, filesize; int index, no_change; AVStream *st; if (stream_index < 0) return -1;#ifdef DEBUG_SEEK av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);#endif ts_max= ts_min= AV_NOPTS_VALUE; pos_limit= -1; //gcc falsely says it may be uninitalized st= s->streams[stream_index]; if(st->index_entries){ AVIndexEntry *e; index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp() index= FFMAX(index, 0); e= &st->index_entries[index]; if(e->timestamp <= target_ts || e->pos == e->min_distance){ pos_min= e->pos; ts_min= e->timestamp;#ifdef DEBUG_SEEK av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -