📄 nutenc.c
字号:
break;
case CODEC_TYPE_VIDEO:
put_v(bc, codec->width);
put_v(bc, codec->height);
if(codec->sample_aspect_ratio.num<=0 || codec->sample_aspect_ratio.den<=0){
put_v(bc, 0);
put_v(bc, 0);
}else{
put_v(bc, codec->sample_aspect_ratio.num);
put_v(bc, codec->sample_aspect_ratio.den);
}
put_v(bc, 0); /* csp type -- unknown */
break;
default:
break;
}
return 0;
}
static int add_info(ByteIOContext *bc, char *type, char *value){
put_str(bc, type);
put_s(bc, -1);
put_str(bc, value);
return 1;
}
static void write_globalinfo(NUTContext *nut, ByteIOContext *bc){
AVFormatContext *s= nut->avf;
ByteIOContext dyn_bc;
uint8_t *dyn_buf=NULL;
int count=0, dyn_size;
url_open_dyn_buf(&dyn_bc);
if(s->title [0]) count+= add_info(&dyn_bc, "Title" , s->title);
if(s->author [0]) count+= add_info(&dyn_bc, "Author" , s->author);
if(s->copyright[0]) count+= add_info(&dyn_bc, "Copyright", s->copyright);
if(!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT))
count+= add_info(&dyn_bc, "Encoder" , LIBAVFORMAT_IDENT);
put_v(bc, 0); //stream_if_plus1
put_v(bc, 0); //chapter_id
put_v(bc, 0); //timestamp_start
put_v(bc, 0); //length
put_v(bc, count);
dyn_size= url_close_dyn_buf(&dyn_bc, &dyn_buf);
put_buffer(bc, dyn_buf, dyn_size);
av_free(dyn_buf);
}
static void write_headers(NUTContext *nut, ByteIOContext *bc){
ByteIOContext dyn_bc;
int i;
url_open_dyn_buf(&dyn_bc);
write_mainheader(nut, &dyn_bc);
put_packet(nut, bc, &dyn_bc, 1, MAIN_STARTCODE);
for (i=0; i < nut->avf->nb_streams; i++){
AVCodecContext *codec = nut->avf->streams[i]->codec;
url_open_dyn_buf(&dyn_bc);
write_streamheader(nut, &dyn_bc, codec, i);
put_packet(nut, bc, &dyn_bc, 1, STREAM_STARTCODE);
}
url_open_dyn_buf(&dyn_bc);
write_globalinfo(nut, &dyn_bc);
put_packet(nut, bc, &dyn_bc, 1, INFO_STARTCODE);
nut->last_syncpoint_pos= INT_MIN;
nut->header_count++;
}
static int write_header(AVFormatContext *s){
NUTContext *nut = s->priv_data;
ByteIOContext *bc = &s->pb;
int i, j;
nut->avf= s;
nut->stream = av_mallocz(sizeof(StreamContext)*s->nb_streams);
nut->time_base= av_mallocz(sizeof(AVRational )*s->nb_streams);
for(i=0; i<s->nb_streams; i++){
AVStream *st= s->streams[i];
int ssize;
AVRational time_base;
ff_parse_specific_params(st->codec, &time_base.den, &ssize, &time_base.num);
av_set_pts_info(st, 64, time_base.num, time_base.den);
for(j=0; j<nut->time_base_count; j++){
if(!memcmp(&time_base, &nut->time_base[j], sizeof(AVRational))){
break;
}
}
nut->time_base[j]= time_base;
nut->stream[i].time_base= &nut->time_base[j];
if(j==nut->time_base_count)
nut->time_base_count++;
if(av_q2d(time_base) >= 0.001)
nut->stream[i].msb_pts_shift = 7;
else
nut->stream[i].msb_pts_shift = 14;
nut->stream[i].max_pts_distance= FFMAX(1/av_q2d(time_base), 1);
}
build_frame_code(s);
assert(nut->frame_code['N'].flags == FLAG_INVALID);
put_buffer(bc, ID_STRING, strlen(ID_STRING));
put_byte(bc, 0);
write_headers(nut, bc);
put_flush_packet(bc);
//FIXME index
return 0;
}
static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc, AVPacket *pkt){
int flags= 0;
if(pkt->flags & PKT_FLAG_KEY ) flags |= FLAG_KEY;
if(pkt->stream_index != fc->stream_id ) flags |= FLAG_STREAM_ID;
if(pkt->size / fc->size_mul ) flags |= FLAG_SIZE_MSB;
if(pkt->pts - nus->last_pts != fc->pts_delta) flags |= FLAG_CODED_PTS;
if(pkt->size > 2*nut->max_distance ) flags |= FLAG_CHECKSUM;
if(FFABS(pkt->pts - nus->last_pts)
> nus->max_pts_distance) flags |= FLAG_CHECKSUM;
return flags | (fc->flags & FLAG_CODED);
}
static int write_packet(AVFormatContext *s, AVPacket *pkt){
NUTContext *nut = s->priv_data;
StreamContext *nus= &nut->stream[pkt->stream_index];
ByteIOContext *bc = &s->pb, dyn_bc;
FrameCode *fc;
int64_t coded_pts;
int best_length, frame_code, flags, needed_flags, i;
int key_frame = !!(pkt->flags & PKT_FLAG_KEY);
int store_sp=0;
if(1LL<<(20+3*nut->header_count) <= url_ftell(bc))
write_headers(nut, bc);
if(key_frame && !!(nus->last_flags & FLAG_KEY))
store_sp= 1;
if(pkt->size + 30/*FIXME check*/ + url_ftell(bc) >= nut->last_syncpoint_pos + nut->max_distance)
store_sp= 1;
//FIXME: Ensure store_sp is 1 in the first place.
if(store_sp){
syncpoint_t *sp, dummy= {.pos= INT64_MAX};
ff_nut_reset_ts(nut, *nus->time_base, pkt->dts);
for(i=0; i<s->nb_streams; i++){
AVStream *st= s->streams[i];
int index= av_index_search_timestamp(st, pkt->dts, AVSEEK_FLAG_BACKWARD);
if(index<0) dummy.pos=0;
else dummy.pos= FFMIN(dummy.pos, st->index_entries[index].pos);
}
sp= av_tree_find(nut->syncpoints, &dummy, ff_nut_sp_pos_cmp, NULL);
nut->last_syncpoint_pos= url_ftell(bc);
url_open_dyn_buf(&dyn_bc);
put_t(nut, nus, &dyn_bc, pkt->dts);
put_v(&dyn_bc, sp ? (nut->last_syncpoint_pos - sp->pos)>>4 : 0);
put_packet(nut, bc, &dyn_bc, 1, SYNCPOINT_STARTCODE);
ff_nut_add_sp(nut, nut->last_syncpoint_pos, 0/*unused*/, pkt->dts);
}
assert(nus->last_pts != AV_NOPTS_VALUE);
coded_pts = pkt->pts & ((1<<nus->msb_pts_shift)-1);
if(ff_lsb2full(nus, coded_pts) != pkt->pts)
coded_pts= pkt->pts + (1<<nus->msb_pts_shift);
best_length=INT_MAX;
frame_code= -1;
for(i=0; i<256; i++){
int length= 0;
FrameCode *fc= &nut->frame_code[i];
int flags= fc->flags;
if(flags & FLAG_INVALID)
continue;
needed_flags= get_needed_flags(nut, nus, fc, pkt);
if(flags & FLAG_CODED){
length++;
flags = needed_flags;
}
if((flags & needed_flags) != needed_flags)
continue;
if((flags ^ needed_flags) & FLAG_KEY)
continue;
if(flags & FLAG_STREAM_ID)
length+= get_length(pkt->stream_index);
if(pkt->size % fc->size_mul != fc->size_lsb)
continue;
if(flags & FLAG_SIZE_MSB)
length += get_length(pkt->size / fc->size_mul);
if(flags & FLAG_CHECKSUM)
length+=4;
if(flags & FLAG_CODED_PTS)
length += get_length(coded_pts);
length*=4;
length+= !(flags & FLAG_CODED_PTS);
length+= !(flags & FLAG_CHECKSUM);
if(length < best_length){
best_length= length;
frame_code=i;
}
}
assert(frame_code != -1);
fc= &nut->frame_code[frame_code];
flags= fc->flags;
needed_flags= get_needed_flags(nut, nus, fc, pkt);
init_checksum(bc, av_crc04C11DB7_update, 0);
put_byte(bc, frame_code);
if(flags & FLAG_CODED){
put_v(bc, (flags^needed_flags) & ~(FLAG_CODED));
flags = needed_flags;
}
if(flags & FLAG_STREAM_ID) put_v(bc, pkt->stream_index);
if(flags & FLAG_CODED_PTS) put_v(bc, coded_pts);
if(flags & FLAG_SIZE_MSB) put_v(bc, pkt->size / fc->size_mul);
if(flags & FLAG_CHECKSUM) put_le32(bc, get_checksum(bc));
else get_checksum(bc);
put_buffer(bc, pkt->data, pkt->size);
nus->last_flags= flags;
//FIXME just store one per syncpoint
if(flags & FLAG_KEY)
av_add_index_entry(
s->streams[pkt->stream_index],
nut->last_syncpoint_pos,
pkt->pts,
0,
0,
AVINDEX_KEYFRAME);
return 0;
}
static int write_trailer(AVFormatContext *s){
NUTContext *nut= s->priv_data;
ByteIOContext *bc= &s->pb;
while(nut->header_count<3)
write_headers(nut, bc);
put_flush_packet(bc);
return 0;
}
AVOutputFormat nut_muxer = {
"nut",
"nut format",
"video/x-nut",
"nut",
sizeof(NUTContext),
#ifdef CONFIG_LIBVORBIS
CODEC_ID_VORBIS,
#elif defined(CONFIG_LIBMP3LAME)
CODEC_ID_MP3,
#else
CODEC_ID_MP2, /* AC3 needs liba52 decoder */
#endif
CODEC_ID_MPEG4,
write_header,
write_packet,
write_trailer,
.flags = AVFMT_GLOBALHEADER,
.codec_tag= (const AVCodecTag*[]){codec_bmp_tags, codec_wav_tags, 0},
};
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -