📄 avformat.h
字号:
* then it contains one frame.
*
* pkt->pts, pkt->dts and pkt->duration are always set to correct
* values in AVStream.timebase units (and guessed if the format cannot
* provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
* has B frames, so it is better to rely on pkt->dts if you do not
* decompress the payload.
*
* @return 0 if OK, < 0 if error or end of file.
*/
int av_read_frame(AVFormatContext *s, AVPacket *pkt);
/**
* Seek to the key frame at timestamp.
* 'timestamp' in 'stream_index'.
* @param stream_index If stream_index is (-1), a default
* stream is selected, and timestamp is automatically converted
* from AV_TIME_BASE units to the stream specific time_base.
* @param timestamp timestamp in AVStream.time_base units
* or if there is no stream specified then in AV_TIME_BASE units
* @param flags flags which select direction and seeking mode
* @return >= 0 on success
*/
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags);
/**
* start playing a network based stream (e.g. RTSP stream) at the
* current position
*/
int av_read_play(AVFormatContext *s);
/**
* Pause a network based stream (e.g. RTSP stream).
*
* Use av_read_play() to resume it.
*/
int av_read_pause(AVFormatContext *s);
/**
* Free a AVFormatContext allocated by av_open_input_stream.
* @param s context to free
*/
void av_close_input_stream(AVFormatContext *s);
/**
* Close a media file (but not its codecs).
*
* @param s media file handle
*/
void av_close_input_file(AVFormatContext *s);
/**
* Add a new stream to a media file.
*
* Can only be called in the read_header() function. If the flag
* AVFMTCTX_NOHEADER is in the format context, then new streams
* can be added in read_packet too.
*
* @param s media file handle
* @param id file format dependent stream id
*/
AVStream *av_new_stream(AVFormatContext *s, int id);
AVProgram *av_new_program(AVFormatContext *s, int id);
/**
* Set the pts for a given stream.
*
* @param s stream
* @param pts_wrap_bits number of bits effectively used by the pts
* (used for wrap control, 33 is the value for MPEG)
* @param pts_num numerator to convert to seconds (MPEG: 1)
* @param pts_den denominator to convert to seconds (MPEG: 90000)
*/
void av_set_pts_info(AVStream *s, int pts_wrap_bits,
int pts_num, int pts_den);
#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward
#define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes
#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non keyframes
int av_find_default_stream_index(AVFormatContext *s);
/**
* Gets the index for a specific timestamp.
* @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
* the timestamp which is <= the requested one, if backward is 0
* then it will be >=
* if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
* @return < 0 if no such timestamp could be found
*/
int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags);
/**
* Ensures the index uses less memory than the maximum specified in
* AVFormatContext.max_index_size, by discarding entries if it grows
* too large.
* This function is not part of the public API and should only be called
* by demuxers.
*/
void ff_reduce_index(AVFormatContext *s, int stream_index);
/**
* Add a index entry into a sorted list updateing if it is already there.
*
* @param timestamp timestamp in the timebase of the given stream
*/
int av_add_index_entry(AVStream *st,
int64_t pos, int64_t timestamp, int size, int distance, int flags);
/**
* Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
* This is not supposed to be called directly by a user application, but by demuxers.
* @param target_ts target timestamp in the time base of the given stream
* @param stream_index stream number
*/
int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags);
/**
* Updates cur_dts of all streams based on given timestamp and AVStream.
*
* Stream ref_st unchanged, others set cur_dts in their native timebase
* only needed for timestamp wrapping or if (dts not set and pts!=dts).
* @param timestamp new dts expressed in time_base of param ref_st
* @param ref_st reference stream giving time_base of param timestamp
*/
void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp);
/**
* Does a binary search using read_timestamp().
* This is not supposed to be called directly by a user application, but by demuxers.
* @param target_ts target timestamp in the time base of the given stream
* @param stream_index stream number
*/
int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ));
/** media file output */
int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap);
/**
* Allocate the stream private data and write the stream header to an
* output media file.
*
* @param s media file handle
* @return 0 if OK. AVERROR_xxx if error.
*/
int av_write_header(AVFormatContext *s);
/**
* Write a packet to an output media file.
*
* The packet shall contain one audio or video frame.
* The packet must be correctly interleaved according to the container specification,
* if not then av_interleaved_write_frame must be used
*
* @param s media file handle
* @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
* @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
*/
int av_write_frame(AVFormatContext *s, AVPacket *pkt);
/**
* Writes a packet to an output media file ensuring correct interleaving.
*
* The packet must contain one audio or video frame.
* If the packets are already correctly interleaved the application should
* call av_write_frame() instead as it is slightly faster. It is also important
* to keep in mind that completely non-interleaved input will need huge amounts
* of memory to interleave with this, so it is preferable to interleave at the
* demuxer level.
*
* @param s media file handle
* @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
* @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
*/
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt);
/**
* Interleave a packet per DTS in an output media file.
*
* Packets with pkt->destruct == av_destruct_packet will be freed inside this function,
* so they cannot be used after it, note calling av_free_packet() on them is still safe.
*
* @param s media file handle
* @param out the interleaved packet will be output here
* @param in the input packet
* @param flush 1 if no further packets are available as input and all
* remaining packets should be output
* @return 1 if a packet was output, 0 if no packet could be output,
* < 0 if an error occurred
*/
int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush);
/**
* @brief Write the stream trailer to an output media file and
* free the file private data.
*
* @param s media file handle
* @return 0 if OK. AVERROR_xxx if error.
*/
int av_write_trailer(AVFormatContext *s);
void dump_format(AVFormatContext *ic,
int index,
const char *url,
int is_output);
/**
* parses width and height out of string str.
* @deprecated Use av_parse_video_frame_size instead.
*/
attribute_deprecated int parse_image_size(int *width_ptr, int *height_ptr, const char *str);
/**
* Converts frame rate from string to a fraction.
* @deprecated Use av_parse_video_frame_rate instead.
*/
attribute_deprecated int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg);
/**
* Parses \p datestr and returns a corresponding number of microseconds.
* @param datestr String representing a date or a duration.
* - If a date the syntax is:
* @code
* [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
* @endcode
* Time is localtime unless Z is appended, in which case it is
* interpreted as UTC.
* If the year-month-day part isn't specified it takes the current
* year-month-day.
* Returns the number of microseconds since 1st of January, 1970 up to
* the time of the parsed date or INT64_MIN if \p datestr cannot be
* successfully parsed.
* - If a duration the syntax is:
* @code
* [-]HH[:MM[:SS[.m...]]]
* [-]S+[.m...]
* @endcode
* Returns the number of microseconds contained in a time interval
* with the specified duration or INT64_MIN if \p datestr cannot be
* successfully parsed.
* @param duration Flag which tells how to interpret \p datestr, if
* not zero \p datestr is interpreted as a duration, otherwise as a
* date.
*/
int64_t parse_date(const char *datestr, int duration);
int64_t av_gettime(void);
/* ffm specific for ffserver */
#define FFM_PACKET_SIZE 4096
offset_t ffm_read_write_index(int fd);
void ffm_write_write_index(int fd, offset_t pos);
void ffm_set_write_index(AVFormatContext *s, offset_t pos, offset_t file_size);
/**
* Attempts to find a specific tag in a URL.
*
* syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
* Return 1 if found.
*/
int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info);
/**
* Returns in 'buf' the path with '%d' replaced by number.
* Also handles the '%0nd' format where 'n' is the total number
* of digits and '%%'.
*
* @param buf destination buffer
* @param buf_size destination buffer size
* @param path numbered sequence string
* @param number frame number
* @return 0 if OK, -1 if format error.
*/
int av_get_frame_filename(char *buf, int buf_size,
const char *path, int number);
/**
* Check whether filename actually is a numbered sequence generator.
*
* @param filename possible numbered sequence string
* @return 1 if a valid numbered sequence string, 0 otherwise.
*/
int av_filename_number_test(const char *filename);
/**
* Generate an SDP for an RTP session.
*
* @param ac array of AVFormatContexts describing the RTP streams. If the
* array is composed by only one context, such context can contain
* multiple AVStreams (one AVStream per RTP stream). Otherwise,
* all the contexts in the array (an AVCodecContext per RTP stream)
* must contain only one AVStream
* @param n_files number of AVCodecContexts contained in ac
* @param buff buffer where the SDP will be stored (must be allocated by
* the caller
* @param size the size of the buffer
* @return 0 if OK. AVERROR_xxx if error.
*/
int avf_sdp_create(AVFormatContext *ac[], int n_files, char *buff, int size);
#ifdef HAVE_AV_CONFIG_H
void __dynarray_add(unsigned long **tab_ptr, int *nb_ptr, unsigned long elem);
#ifdef __GNUC__
#define dynarray_add(tab, nb_ptr, elem)\
do {\
typeof(tab) _tab = (tab);\
typeof(elem) _elem = (elem);\
(void)sizeof(**_tab == _elem); /* check that types are compatible */\
__dynarray_add((unsigned long **)_tab, nb_ptr, (unsigned long)_elem);\
} while(0)
#else
#define dynarray_add(tab, nb_ptr, elem)\
do {\
__dynarray_add((unsigned long **)(tab), nb_ptr, (unsigned long)(elem));\
} while(0)
#endif
time_t mktimegm(struct tm *tm);
struct tm *brktimegm(time_t secs, struct tm *tm);
const char *small_strptime(const char *p, const char *fmt,
struct tm *dt);
struct in_addr;
int resolve_host(struct in_addr *sin_addr, const char *hostname);
void url_split(char *proto, int proto_size,
char *authorization, int authorization_size,
char *hostname, int hostname_size,
int *port_ptr,
char *path, int path_size,
const char *url);
int match_ext(const char *filename, const char *extensions);
#endif /* HAVE_AV_CONFIG_H */
#endif /* FFMPEG_AVFORMAT_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -