📄 utils.c
字号:
} } fail: return ret;}/* absolute maximum size we read until we abort */#define MAX_READ_SIZE 5000000/* maximum duration until we stop analysing the stream */#define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 2.0))/*******************************************************//** * start playing a network based stream (e.g. RTSP stream) at the * current position */int av_read_play(AVFormatContext *s){ if (!s->iformat->read_play) return AVERROR_NOTSUPP; return s->iformat->read_play(s);}/** * Pause a network based stream (e.g. RTSP stream). * * Use av_read_play() to resume it. */int av_read_pause(AVFormatContext *s){ if (!s->iformat->read_pause) return AVERROR_NOTSUPP; return s->iformat->read_pause(s);}/** * Close a media file (but not its codecs). * * @param s media file handle */void av_close_input_file(AVFormatContext *s){ int i, must_open_file; AVStream *st; /* free previous packet */ if (s->cur_st && s->cur_st->parser) av_free_packet(&s->cur_pkt); if (s->iformat->read_close) s->iformat->read_close(s); for(i=0;i<s->nb_streams;i++) { /* free all data in a stream component */ st = s->streams[i]; if (st->parser) { av_parser_close(st->parser); } av_free(st->index_entries); av_free(st->codec); av_free(st); } flush_packet_queue(s); must_open_file = 1; if (s->iformat->flags & AVFMT_NOFILE) { must_open_file = 0; } if (must_open_file) { url_fclose(&s->pb); } av_freep(&s->priv_data); av_free(s);}/** * Add a new stream to a media file. * * Can only be called in the read_header() function. If the flag * AVFMTCTX_NOHEADER is in the format context, then new streams * can be added in read_packet too. * * @param s media file handle * @param id file format dependent stream id */AVStream *av_new_stream(AVFormatContext *s, int id){ AVStream *st; if (s->nb_streams >= MAX_STREAMS) return NULL; st = av_mallocz(sizeof(AVStream)); if (!st) return NULL; st->codec= avcodec_alloc_context(); if (s->iformat) { /* no default bitrate if decoding */ st->codec->bit_rate = 0; } st->index = s->nb_streams; st->id = id; st->start_time = AV_NOPTS_VALUE; st->duration = AV_NOPTS_VALUE; st->cur_dts = AV_NOPTS_VALUE; /* default pts settings is MPEG like */ av_set_pts_info(st, 33, 1, 90000); st->last_IP_pts = AV_NOPTS_VALUE; s->streams[s->nb_streams++] = st; return st;}/************************************************************//* output media file */int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap){ int ret; if (s->oformat->priv_data_size > 0) { s->priv_data = av_mallocz(s->oformat->priv_data_size); if (!s->priv_data) return AVERROR_NOMEM; } else s->priv_data = NULL; if (s->oformat->set_parameters) { ret = s->oformat->set_parameters(s, ap); if (ret < 0) return ret; } return 0;}/** * allocate the stream private data and write the stream header to an * output media file * * @param s media file handle * @return 0 if OK. AVERROR_xxx if error. */int av_write_header(AVFormatContext *s){ int ret, i; AVStream *st; // some sanity checks for(i=0;i<s->nb_streams;i++) { st = s->streams[i]; switch (st->codec->codec_type) { case CODEC_TYPE_AUDIO: if(st->codec->sample_rate<=0){ av_log(s, AV_LOG_ERROR, "sample rate not set\n"); return -1; } break; case CODEC_TYPE_VIDEO: if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too? av_log(s, AV_LOG_ERROR, "time base not set\n"); return -1; } if(st->codec->width<=0 || st->codec->height<=0){ av_log(s, AV_LOG_ERROR, "dimensions not set\n"); return -1; } break; } } ret = s->oformat->write_header(s); if (ret < 0) return ret; /* init PTS generation */ for(i=0;i<s->nb_streams;i++) { int64_t den = AV_NOPTS_VALUE; st = s->streams[i]; switch (st->codec->codec_type) { case CODEC_TYPE_AUDIO: den = (int64_t)st->time_base.num * st->codec->sample_rate; break; case CODEC_TYPE_VIDEO: den = (int64_t)st->time_base.num * st->codec->time_base.den; break; default: break; } if (den != AV_NOPTS_VALUE) { if (den <= 0) return AVERROR_INVALIDDATA; av_frac_init(&st->pts, 0, 0, den); } } return 0;}//FIXME merge with compute_pkt_fieldsstatic int compute_pkt_fields2(AVStream *st, AVPacket *pkt){ int b_frames = FFMAX(st->codec->has_b_frames, st->codec->max_b_frames); int num, den, frame_size;// av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index); /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE) return -1;*/ /* duration field */ if (pkt->duration == 0) { compute_frame_duration(&num, &den, st, NULL, pkt); if (den && num) { pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num); } } //XXX/FIXME this is a temporary hack until all encoders output pts if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){ pkt->dts=// pkt->pts= st->cur_dts; pkt->pts= st->pts.val; } //calculate dts from pts if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){ if(b_frames){ if(st->last_IP_pts == AV_NOPTS_VALUE){ st->last_IP_pts= -pkt->duration; } if(st->last_IP_pts < pkt->pts){ pkt->dts= st->last_IP_pts; st->last_IP_pts= pkt->pts; }else pkt->dts= pkt->pts; }else pkt->dts= pkt->pts; } if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){ av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts); return -1; } if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){ av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n"); return -1; }// av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts); st->cur_dts= pkt->dts; st->pts.val= pkt->dts; /* update pts */ switch (st->codec->codec_type) { case CODEC_TYPE_AUDIO: frame_size = get_audio_frame_size(st->codec, pkt->size); /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay, but it would be better if we had the real timestamps from the encoder */ if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) { av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size); } break; case CODEC_TYPE_VIDEO: av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num); break; default: break; } return 0;}static void truncate_ts(AVStream *st, AVPacket *pkt){ int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1; // if(pkt->dts < 0)// pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here pkt->pts &= pts_mask; pkt->dts &= pts_mask;}/** * Write a packet to an output media file. * * The packet shall contain one audio or video frame. * * @param s media file handle * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ... * @return < 0 if error, = 0 if OK, 1 if end of stream wanted. */int av_write_frame(AVFormatContext *s, AVPacket *pkt){ int ret; ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt); if (ret < 0) return ret; truncate_ts(s->streams[pkt->stream_index], pkt); ret= s->oformat->write_packet(s, pkt); if(!ret) ret= url_ferror(&s->pb); return ret;}/** * interleave_packet implementation which will interleave per DTS. * packets with pkt->destruct == av_destruct_packet will be freed inside this function. * so they cannot be used after it, note calling av_free_packet() on them is still safe */static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){ AVPacketList *pktl, **next_point, *this_pktl; int stream_count=0; int streams[MAX_STREAMS]; if(pkt){ AVStream *st= s->streams[ pkt->stream_index];// assert(pkt->destruct != av_destruct_packet); //FIXME this_pktl = av_mallocz(sizeof(AVPacketList)); this_pktl->pkt= *pkt; if(pkt->destruct == av_destruct_packet) pkt->destruct= NULL; // non shared -> must keep original from being freed else av_dup_packet(&this_pktl->pkt); //shared -> must dup next_point = &s->packet_buffer; while(*next_point){ AVStream *st2= s->streams[ (*next_point)->pkt.stream_index]; int64_t left= st2->time_base.num * (int64_t)st ->time_base.den; int64_t right= st ->time_base.num * (int64_t)st2->time_base.den; if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow break; next_point= &(*next_point)->next; } this_pktl->next= *next_point; *next_point= this_pktl; } memset(streams, 0, sizeof(streams)); pktl= s->packet_buffer; while(pktl){//av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts); if(streams[ pktl->pkt.stream_index ] == 0) stream_count++; streams[ pktl->pkt.stream_index ]++; pktl= pktl->next; } if(s->nb_streams == stream_count || (flush && stream_count)){ pktl= s->packet_buffer; *out= pktl->pkt; s->packet_buffer= pktl->next; av_freep(&pktl); return 1; } else { av_init_packet(out); return 0; } }/** * Interleaves a AVPacket correctly so it can be muxed. * @param out the interleaved packet will be output here * @param in the input packet * @param flush 1 if no further packets are available as input and all * remaining packets should be output * @return 1 if a packet was output, 0 if no packet could be output, * < 0 if an error occured */static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){ if(s->oformat->interleave_packet) return s->oformat->interleave_packet(s, out, in, flush); else return av_interleave_packet_per_dts(s, out, in, flush); }/** * Writes a packet to an output media file ensuring correct interleaving. * * The packet must contain one audio or video frame. * If the packets are already correctly interleaved the application should * call av_write_frame() instead as its slightly faster, its also important * to keep in mind that completly non interleaved input will need huge amounts * of memory to interleave with this, so its prefereable to interleave at the * demuxer level * * @param s media file handle * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ... * @return < 0 if error, = 0 if OK, 1 if end of stream wanted. */int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){ AVStream *st= s->streams[ pkt->stream_index]; //FIXME/XXX/HACK drop zero sized packets if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0) return 0; //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %Ld %Ld\n", pkt->size, pkt->dts, pkt->pts); if(compute_pkt_fields2(st, pkt) < 0) return -1; if(pkt->dts == AV_NOPTS_VALUE) return -1; for(;;){ AVPacket opkt; int ret= av_interleave_packet(s, &opkt, pkt, 0); if(ret<=0) //FIXME cleanup needed for ret<0 ? return ret; truncate_ts(s->streams[opkt.stream_index], &opkt); ret= s->oformat->write_packet(s, &opkt); av_free_packet(&opkt); pkt= NULL; if(ret<0) return ret; if(url_ferror(&s->pb)) return url_ferror(&s->pb); }}/** * @brief Write the stream trailer to an output media file and * free the file private data. * * @param s media file handle * @return 0 if OK. AVERROR_xxx if error. */int av_write_trailer(AVFormatContext *s){ int ret, i; for(;;){ AVPacket pkt; ret= av_interleave_pac
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -