📄 video.c
字号:
static int lqt_ffmpeg_decode_video(quicktime_t *file, unsigned char **row_pointers, int track) { uint8_t * user_atom; uint32_t user_atom_len; int i, imax; int result = 0; int buffer_size; quicktime_video_map_t *vtrack = &(file->vtracks[track]); quicktime_trak_t *trak = vtrack->track; int height; int width; quicktime_ffmpeg_video_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv; int got_pic; // int do_cmodel_transfer; quicktime_ctab_t * ctab; int exact = 0; uint8_t * extradata = (uint8_t*)0; int extradata_size = 0; uint8_t * cpy_rows[3]; height = quicktime_video_height(file, track); width = quicktime_video_width(file, track); /* Initialize decoder */ if(!codec->initialized) { codec->avctx->width = width; codec->avctx->height = height;#if LIBAVCODEC_VERSION_INT < ((52<<16)+(0<<8)+0) codec->avctx->bits_per_sample = quicktime_video_depth(file, track);#else codec->avctx->bits_per_coded_sample = quicktime_video_depth(file, track);#endif /* Set extradata: It's done differently for each codec */ if(codec->decoder->id == CODEC_ID_SVQ3) { extradata = trak->mdia.minf.stbl.stsd.table[0].table_raw + 4; extradata_size = trak->mdia.minf.stbl.stsd.table[0].table_raw_size - 4; } else if(codec->decoder->id == CODEC_ID_H264) { user_atom = quicktime_stsd_get_user_atom(trak, "avcC", &user_atom_len); if(!user_atom) lqt_log(file, LQT_LOG_ERROR, LOG_DOMAIN, "No avcC atom present, decoding is likely to fail"); else { extradata = user_atom + 8; extradata_size = user_atom_len - 8; } } else if(codec->decoder->id == CODEC_ID_MPEG4) { if(trak->mdia.minf.stbl.stsd.table[0].has_esds) { extradata = trak->mdia.minf.stbl.stsd.table[0].esds.decoderConfig; extradata_size = trak->mdia.minf.stbl.stsd.table[0].esds.decoderConfigLen; } } else if((user_atom = quicktime_stsd_get_user_atom(trak, "glbl", &user_atom_len))) { extradata = user_atom + 8; extradata_size = user_atom_len - 8; } if(extradata) { codec->extradata = calloc(1, extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(codec->extradata, extradata, extradata_size); codec->avctx->extradata_size = extradata_size; codec->avctx->extradata = codec->extradata; } /* Add palette info */ ctab = &(trak->mdia.minf.stbl.stsd.table->ctab); if(ctab->size) { codec->avctx->palctrl = &(codec->palette); codec->palette.palette_changed = 1; imax = (ctab->size > AVPALETTE_COUNT) ? AVPALETTE_COUNT : ctab->size; for(i = 0; i < imax; i++) { codec->palette.palette[i] = ((ctab->alpha[i] >> 8) << 24) | ((ctab->red[i] >> 8) << 16) | ((ctab->green[i] >> 8) << 8) | ((ctab->blue[i] >> 8)); } } // codec->avctx->get_buffer = avcodec_default_get_buffer; // codec->avctx->release_buffer = avcodec_default_release_buffer; if(avcodec_open(codec->avctx, codec->decoder) != 0) return -1; codec->frame = avcodec_alloc_frame(); vtrack->stream_cmodel = LQT_COLORMODEL_NONE; codec->initialized = 1; }#if 0 /* Check if we must seek */ if((quicktime_has_keyframes(file, track)) && (vtrack->current_position != codec->last_frame + 1)) { int64_t frame1, frame2 = vtrack->current_position; /* Forget about previously decoded frame */ codec->have_frame = 0; frame1 = quicktime_get_keyframe_before(file, vtrack->current_position, track); if((frame1 < codec->last_frame) && (frame2 > codec->last_frame)) frame1 = codec->last_frame + 1; while(frame1 < frame2) { buffer_size = lqt_read_video_frame(file, &codec->buffer, &codec->buffer_alloc, frame1, track); if(buffer_size > 0) { avcodec_decode_video(codec->avctx, codec->frame, &got_pic, codec->buffer, buffer_size); } frame1++; } vtrack->current_position = frame2; } codec->last_frame = vtrack->current_position;#endif /* Read the frame from file and decode it */ got_pic = 0; if(!codec->have_frame) { while(!got_pic) { buffer_size = lqt_read_video_frame(file, &codec->buffer, &codec->buffer_alloc, vtrack->current_position + codec->decoding_delay, NULL, track); codec->decoding_delay++; if(avcodec_decode_video(codec->avctx, codec->frame, &got_pic, codec->buffer, buffer_size) < 0) { lqt_log(file, LQT_LOG_ERROR, LOG_DOMAIN, "Skipping corrupted frame"); continue; } if(got_pic) codec->decoding_delay--; if((buffer_size <= 0) && !got_pic) return 0; } } if(vtrack->stream_cmodel == LQT_COLORMODEL_NONE) { vtrack->stream_cmodel = lqt_ffmpeg_get_lqt_colormodel(codec->avctx->pix_fmt, &exact); if(!exact) { codec->do_imgconvert = 1; #ifdef HAVE_LIBSWSCALE if(!((codec->avctx->pix_fmt == PIX_FMT_RGBA32) && (vtrack->stream_cmodel == BC_RGBA8888))) { codec->swsContext = sws_getContext(width, height, codec->avctx->pix_fmt, width, height, lqt_ffmpeg_get_ffmpeg_colormodel(vtrack->stream_cmodel), 0, (SwsFilter*)0, (SwsFilter*)0, (double*)0); }#endif } if(codec->decoder->id == CODEC_ID_DVVIDEO) { if(vtrack->stream_cmodel == BC_YUV420P) vtrack->chroma_placement = LQT_CHROMA_PLACEMENT_DVPAL; vtrack->interlace_mode = LQT_INTERLACE_BOTTOM_FIRST; } else if(codec->decoder->id == CODEC_ID_MPEG4) { if(vtrack->stream_cmodel == BC_YUV420P) vtrack->chroma_placement = LQT_CHROMA_PLACEMENT_MPEG2; } if(codec->avctx->sample_aspect_ratio.num) { trak->mdia.minf.stbl.stsd.table[0].pasp.hSpacing = codec->avctx->sample_aspect_ratio.num; trak->mdia.minf.stbl.stsd.table[0].pasp.vSpacing = codec->avctx->sample_aspect_ratio.den; } } if(!row_pointers) { codec->have_frame = 1; return 1; } /* * Check for the colormodel * There are 2 possible cases: * * 1. The decoded frame can be memcopied directly to row_pointers * this is the (most likely) case, when the colormodel of * the decoder is supported by lqt * * 2. The decoder colormodel is not supported by libquicktime, * (e.g. YUV410P for sorenson). We must then use avcodec's * image conversion routines to convert to row_pointers. */ if(!codec->do_imgconvert) { cpy_rows[0] = codec->frame->data[0]; cpy_rows[1] = codec->frame->data[1]; cpy_rows[2] = codec->frame->data[2]; lqt_rows_copy(row_pointers, cpy_rows, width, height, codec->frame->linesize[0], codec->frame->linesize[1], vtrack->stream_row_span, vtrack->stream_row_span_uv, vtrack->stream_cmodel); } else { convert_image_decode(codec, codec->frame, codec->avctx->pix_fmt, row_pointers, vtrack->stream_cmodel, width, height, vtrack->stream_row_span, vtrack->stream_row_span_uv); } codec->have_frame = 0; return result; }static void resync_ffmpeg(quicktime_t *file, int track) { int64_t keyframe, frame; int buffer_size, got_pic; quicktime_video_map_t *vtrack = &(file->vtracks[track]); quicktime_ffmpeg_video_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv; /* Forget about previously decoded frame */ codec->have_frame = 0; codec->decoding_delay = 0; /* Reset lavc */ avcodec_flush_buffers(codec->avctx); if(quicktime_has_keyframes(file, track)) { keyframe = quicktime_get_keyframe_before(file, vtrack->current_position, track); frame = keyframe; while(frame < vtrack->current_position) { buffer_size = lqt_read_video_frame(file, &codec->buffer, &codec->buffer_alloc, frame, NULL, track); if(buffer_size > 0) { avcodec_decode_video(codec->avctx, codec->frame, &got_pic, codec->buffer, buffer_size); } frame++; } } }static int set_pass_ffmpeg(quicktime_t *file, int track, int pass, int total_passes, const char * stats_file) { quicktime_video_map_t *vtrack = &(file->vtracks[track]); quicktime_ffmpeg_video_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv; codec->total_passes = total_passes; codec->pass = pass; codec->stats_filename = malloc(strlen(stats_file)+1); strcpy(codec->stats_filename, stats_file); return 1; }static int lqt_ffmpeg_encode_video(quicktime_t *file, unsigned char **row_pointers, int track){ quicktime_esds_t * esds; int result = 0; int pixel_width, pixel_height; int bytes_encoded; quicktime_video_map_t *vtrack = &(file->vtracks[track]); quicktime_trak_t *trak = vtrack->track; quicktime_ffmpeg_video_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv; int height = trak->tkhd.track_height; int width = trak->tkhd.track_width; quicktime_atom_t chunk_atom; int stats_len; if(!row_pointers) { vtrack->stream_cmodel = codec->encode_colormodel; if(codec->encode_colormodel == BC_YUV420P) { if(codec->encoder->id == CODEC_ID_MPEG4) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -