📄 utils.c
字号:
} /* presentation is not delayed : PTS and DTS are the same */ if(pkt->pts == AV_NOPTS_VALUE) pkt->pts = pkt->dts; update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts); if(pkt->pts == AV_NOPTS_VALUE) pkt->pts = st->cur_dts; pkt->dts = pkt->pts; if(pkt->pts != AV_NOPTS_VALUE) st->cur_dts = pkt->pts + pkt->duration; } } if(pkt->pts != AV_NOPTS_VALUE){ st->pts_buffer[0]= pkt->pts; for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++) st->pts_buffer[i]= (i-delay-1) * pkt->duration; for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++) FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); if(pkt->dts == AV_NOPTS_VALUE) pkt->dts= st->pts_buffer[0]; if(delay>1){ update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet } if(pkt->dts > st->cur_dts) st->cur_dts = pkt->dts; }// av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts); /* update flags */ if(is_intra_only(st->codec)) pkt->flags |= PKT_FLAG_KEY; else if (pc) { pkt->flags = 0; /* keyframe computation */ if (pc->pict_type == FF_I_TYPE) pkt->flags |= PKT_FLAG_KEY; }}void av_destruct_packet_nofree(AVPacket *pkt){ pkt->data = NULL; pkt->size = 0;}static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt){ AVStream *st; int len, ret, i; av_init_packet(pkt); for(;;) { /* select current input stream component */ st = s->cur_st; if (st) { if (!st->need_parsing || !st->parser) { /* no parsing needed: we just output the packet as is */ /* raw data support */ *pkt = s->cur_pkt; compute_pkt_fields(s, st, NULL, pkt); s->cur_st = NULL; break; } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) { len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size, s->cur_ptr, s->cur_len, s->cur_pkt.pts, s->cur_pkt.dts); s->cur_pkt.pts = AV_NOPTS_VALUE; s->cur_pkt.dts = AV_NOPTS_VALUE; /* increment read pointer */ s->cur_ptr += len; s->cur_len -= len; /* return packet if any */ if (pkt->size) { got_packet: pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close. pkt->duration = 0; pkt->stream_index = st->index; pkt->pts = st->parser->pts; pkt->dts = st->parser->dts; pkt->destruct = av_destruct_packet_nofree; compute_pkt_fields(s, st, st->parser, pkt); if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){ ff_reduce_index(s, st->index); av_add_index_entry(st, st->parser->frame_offset, pkt->dts, 0, 0, AVINDEX_KEYFRAME); } break; } } else { /* free packet */ av_free_packet(&s->cur_pkt); s->cur_st = NULL; } } else { /* read next packet */ ret = av_read_packet(s, &s->cur_pkt); if (ret < 0) { if (ret == AVERROR(EAGAIN)) return ret; /* return the last frames, if any */ for(i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->parser && st->need_parsing) { av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size, NULL, 0, AV_NOPTS_VALUE, AV_NOPTS_VALUE); if (pkt->size) goto got_packet; } } /* no more packets: really terminate parsing */ return ret; } if(s->cur_pkt.pts != AV_NOPTS_VALUE && s->cur_pkt.dts != AV_NOPTS_VALUE && s->cur_pkt.pts < s->cur_pkt.dts){ av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n", s->cur_pkt.stream_index, s->cur_pkt.pts, s->cur_pkt.dts, s->cur_pkt.size);// av_free_packet(&s->cur_pkt);// return -1; } st = s->streams[s->cur_pkt.stream_index]; if(s->debug & FF_FDEBUG_TS) av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n", s->cur_pkt.stream_index, s->cur_pkt.pts, s->cur_pkt.dts, s->cur_pkt.size, s->cur_pkt.flags); s->cur_st = st; s->cur_ptr = s->cur_pkt.data; s->cur_len = s->cur_pkt.size; if (st->need_parsing && !st->parser) { st->parser = av_parser_init(st->codec->codec_id); if (!st->parser) { /* no parser available: just output the raw packets */ st->need_parsing = AVSTREAM_PARSE_NONE; }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){ st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; } if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){ st->parser->next_frame_offset= st->parser->cur_offset= s->cur_pkt.pos; } } } } if(s->debug & FF_FDEBUG_TS) av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n", pkt->stream_index, pkt->pts, pkt->dts, pkt->size, pkt->flags); return 0;}int av_read_frame(AVFormatContext *s, AVPacket *pkt){ AVPacketList *pktl; int eof=0; const int genpts= s->flags & AVFMT_FLAG_GENPTS; for(;;){ pktl = s->packet_buffer; if (pktl) { AVPacket *next_pkt= &pktl->pkt; if(genpts && next_pkt->dts != AV_NOPTS_VALUE){ while(pktl && next_pkt->pts == AV_NOPTS_VALUE){ if( pktl->pkt.stream_index == next_pkt->stream_index && next_pkt->dts < pktl->pkt.dts && pktl->pkt.pts != pktl->pkt.dts //not b frame /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){ next_pkt->pts= pktl->pkt.dts; } pktl= pktl->next; } pktl = s->packet_buffer; } if( next_pkt->pts != AV_NOPTS_VALUE || next_pkt->dts == AV_NOPTS_VALUE || !genpts || eof){ /* read packet from packet buffer, if there is data */ *pkt = *next_pkt; s->packet_buffer = pktl->next; av_free(pktl); return 0; } } if(genpts){ int ret= av_read_frame_internal(s, pkt); if(ret<0){ if(pktl && ret != AVERROR(EAGAIN)){ eof=1; continue; }else return ret; } if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt)) < 0) return AVERROR(ENOMEM); }else{ assert(!s->packet_buffer); return av_read_frame_internal(s, pkt); } }}/* XXX: suppress the packet queue */static void flush_packet_queue(AVFormatContext *s){ AVPacketList *pktl; for(;;) { pktl = s->packet_buffer; if (!pktl) break; s->packet_buffer = pktl->next; av_free_packet(&pktl->pkt); av_free(pktl); }}/*******************************************************//* seek support */int av_find_default_stream_index(AVFormatContext *s){ int first_audio_index = -1; int i; AVStream *st; if (s->nb_streams <= 0) return -1; for(i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->codec->codec_type == CODEC_TYPE_VIDEO) { return i; } if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO) first_audio_index = i; } return first_audio_index >= 0 ? first_audio_index : 0;}/** * Flush the frame reader. */static void av_read_frame_flush(AVFormatContext *s){ AVStream *st; int i; flush_packet_queue(s); /* free previous packet */ if (s->cur_st) { if (s->cur_st->parser) av_free_packet(&s->cur_pkt); s->cur_st = NULL; } /* fail safe */ s->cur_ptr = NULL; s->cur_len = 0; /* for each stream, reset read state */ for(i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->parser) { av_parser_close(st->parser); st->parser = NULL; } st->last_IP_pts = AV_NOPTS_VALUE; st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */ }}void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){ int i; for(i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; st->cur_dts = av_rescale(timestamp, st->time_base.den * (int64_t)ref_st->time_base.num, st->time_base.num * (int64_t)ref_st->time_base.den); }}void ff_reduce_index(AVFormatContext *s, int stream_index){ AVStream *st= s->streams[stream_index]; unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry); if((unsigned)st->nb_index_entries >= max_entries){ int i; for(i=0; 2*i<st->nb_index_entries; i++) st->index_entries[i]= st->index_entries[2*i]; st->nb_index_entries= i; }}int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, int size, int distance, int flags){ AVIndexEntry *entries, *ie; int index; if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry)) return -1; entries = av_fast_realloc(st->index_entries, &st->index_entries_allocated_size, (st->nb_index_entries + 1) * sizeof(AVIndexEntry)); if(!entries) return -1; st->index_entries= entries; index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY); if(index<0){ index= st->nb_index_entries++; ie= &entries[index]; assert(index==0 || ie[-1].timestamp < timestamp); }else{ ie= &entries[index]; if(ie->timestamp != timestamp){ if(ie->timestamp <= timestamp) return -1; memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index)); st->nb_index_entries++; }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance distance= ie->min_distance; } ie->pos = pos; ie->timestamp = timestamp; ie->min_distance= distance; ie->size= size; ie->flags = flags; return index;}int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags){ AVIndexEntry *entries= st->index_entries; int nb_entries= st->nb_index_entries; int a, b, m; int64_t timestamp; a = - 1; b = nb_entries; while (b - a > 1) { m = (a + b) >> 1; timestamp = entries[m].timestamp; if(timestamp >= wanted_timestamp) b = m; if(timestamp <= wanted_timestamp) a = m; } m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b; if(!(flags & AVSEEK_FLAG_ANY)){ while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){ m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1; } } if(m == nb_entries) return -1; return m;}#define DEBUG_SEEKint av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){ AVInputFormat *avif= s->iformat; int64_t pos_min, pos_max, pos, pos_limit; int64_t ts_min, ts_max, ts; int index; AVStream *st; if (stream_index < 0) return -1;#ifdef DEBUG_SEEK av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);#endif ts_max= ts_min= AV_NOPTS_VALUE; pos_limit= -1; //gcc falsely says it may be uninitialized
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -