📄 video.c
字号:
/******************************************************************************* video.c libquicktime - A library for reading and writing quicktime/avi/mp4 files. http://libquicktime.sourceforge.net Copyright (C) 2002 Heroine Virtual Ltd. Copyright (C) 2002-2007 Members of the libquicktime project. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA*******************************************************************************/ #include "lqt_private.h"#include "ffmpeg.h"#include <quicktime/colormodels.h>#include <stdlib.h>#include <stdio.h>#include <string.h>#define LOG_DOMAIN "ffmpeg_video"#ifdef HAVE_LIBSWSCALE#include SWSCALE_HEADER#endif// Enable interlaced encoding (experimental)// #define DO_INTERLACEtypedef struct { AVCodecContext * avctx; AVCodec * encoder; AVCodec * decoder; int initialized; int decoding_delay; uint8_t * buffer; int buffer_alloc; AVFrame * frame; uint8_t * frame_buffer; /* Colormodel */ int do_imgconvert;#ifdef HAVE_LIBSWSCALE struct SwsContext *swsContext;#endif // unsigned char ** tmp_buffer; unsigned char ** row_pointers; /* Quality must be passed to the individual frame */ int qscale; AVPaletteControl palette; /* We decode the first frame during the init() function to obtain the stream colormodel */ int have_frame; int encode_colormodel; int write_global_header; int global_header_written; uint8_t * extradata; /* For decoding only, for encoding extradata is owned by lavc */ /* Multipass control */ int total_passes; int pass; char * stats_filename; FILE * stats_file; } quicktime_ffmpeg_video_codec_t;/* ffmpeg <-> libquicktime colormodels *//* Exact entries MUST come first */static struct { enum PixelFormat ffmpeg_id; int lqt_id; int exact; }colormodels[] = { { PIX_FMT_YUV420P, BC_YUV420P, 1 }, ///< Planar YUV 4:2:0 (1 Cr & Cb sample per 2x2 Y samples) { PIX_FMT_YUV422, BC_YUV422, 1 }, { PIX_FMT_RGB24, BC_RGB888, 1 }, ///< Packed pixel, 3 bytes per pixel, RGBRGB... { PIX_FMT_BGR24, BC_BGR888, 1 }, ///< Packed pixel, 3 bytes per pixel, BGRBGR... { PIX_FMT_YUV422P, BC_YUV422P, 1 }, ///< Planar YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples) { PIX_FMT_YUV444P, BC_YUV444P, 1 }, ///< Planar YUV 4:4:4 (1 Cr & Cb sample per 1x1 Y samples) { PIX_FMT_YUV411P, BC_YUV411P, 1 }, ///< Planar YUV 4:1:1 (1 Cr & Cb sample per 4x1 Y samples) { PIX_FMT_RGB565, BC_RGB565, 1 }, ///< always stored in cpu endianness { PIX_FMT_YUVJ420P, BC_YUVJ420P, 1 }, ///< Planar YUV 4:2:0 full scale (jpeg) { PIX_FMT_YUVJ422P, BC_YUVJ422P, 1 }, ///< Planar YUV 4:2:2 full scale (jpeg) { PIX_FMT_YUVJ444P, BC_YUVJ444P, 1 }, ///< Planar YUV 4:4:4 full scale (jpeg) { PIX_FMT_RGBA32, BC_RGBA8888, 0 }, ///< Packed pixel, 4 bytes per pixel, BGRABGRA... { PIX_FMT_RGB555, BC_RGB888, 0 }, ///< always stored in cpu endianness, most significant bit to 1 { PIX_FMT_GRAY8, BC_RGB888, 0 }, { PIX_FMT_MONOWHITE, BC_RGB888, 0 },///< 0 is white { PIX_FMT_MONOBLACK, BC_RGB888, 0 },///< 0 is black { PIX_FMT_PAL8, BC_RGB888, 0 }, ///< 8 bit with RGBA palette { PIX_FMT_YUV410P, BC_YUV420P, 0 }, ///< Planar YUV 4:1:0 (1 Cr & Cb sample per 4x4 Y samples) };static int lqt_ffmpeg_delete_video(quicktime_video_map_t *vtrack){ quicktime_ffmpeg_video_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv; if(codec->extradata) free(codec->extradata); if(codec->stats_filename) free(codec->stats_filename); if(codec->stats_file) fclose(codec->stats_file); if(codec->initialized) { if(codec->avctx->stats_in) av_free(codec->avctx->stats_in); avcodec_close(codec->avctx); } av_free(codec->avctx); if(codec->frame_buffer) free(codec->frame_buffer); if(codec->buffer) free(codec->buffer); if(codec->row_pointers) free(codec->row_pointers); if(codec->frame) av_free(codec->frame);#ifdef HAVE_LIBSWSCALE if(codec->swsContext) sws_freeContext(codec->swsContext);#endif free(codec); return 0;}#ifndef HAVE_LIBSWSCALEstatic void fill_avpicture(AVPicture * ret, unsigned char ** rows, int lqt_colormodel, int row_span, int row_span_uv) { switch(lqt_colormodel) { case BC_YUV420P: case BC_YUV422P: ret->data[0] = rows[0]; ret->data[1] = rows[1]; ret->data[2] = rows[2]; ret->linesize[0] = row_span; ret->linesize[1] = row_span_uv ? row_span_uv : row_span/2; ret->linesize[2] = row_span_uv ? row_span_uv : row_span/2; break; case BC_YUV444P: ret->data[0] = rows[0]; ret->data[1] = rows[1]; ret->data[2] = rows[2]; ret->linesize[0] = row_span; ret->linesize[1] = row_span_uv ? row_span_uv : row_span; ret->linesize[2] = row_span_uv ? row_span_uv : row_span; break; case BC_YUV411P: ret->data[0] = rows[0]; ret->data[1] = rows[1]; ret->data[2] = rows[2]; ret->linesize[0] = row_span; ret->linesize[1] = row_span_uv ? row_span_uv : row_span/4; ret->linesize[2] = row_span_uv ? row_span_uv : row_span/4; break; case BC_YUV422: case BC_RGB888: ///< Packed pixel, 3 bytes per pixel, RGBRGB... case BC_BGR888: ///< Packed pixel, 3 bytes per pixel, BGRBGR... case BC_RGBA8888: ///< Packed pixel, 4 bytes per pixel, BGRABGRA... case BC_RGB565: ///< always stored in cpu endianness ret->data[0] = rows[0]; ret->linesize[0] = (int)(rows[1] - rows[0]); break; default: break; } }#endifstatic enum PixelFormat lqt_ffmpeg_get_ffmpeg_colormodel(int id) { int i; for(i = 0; i < sizeof(colormodels)/sizeof(colormodels[0]); i++) { if(colormodels[i].lqt_id == id) return colormodels[i].ffmpeg_id; } return PIX_FMT_NB; }static int lqt_ffmpeg_get_lqt_colormodel(enum PixelFormat id, int * exact) { int i; for(i = 0; i < sizeof(colormodels)/sizeof(colormodels[0]); i++) { if(colormodels[i].ffmpeg_id == id) { *exact = colormodels[i].exact; return colormodels[i].lqt_id; } } return LQT_COLORMODEL_NONE; }/* Convert ffmpeg RGBA32 to BC_RGBA888 *//* From avcodec.h: *//* * PIX_FMT_RGBA32 is handled in an endian-specific manner. A RGBA * color is put together as: * (A << 24) | (R << 16) | (G << 8) | B * This is stored as BGRA on little endian CPU architectures and ARGB on * big endian CPUs. *//* The only question is: WHY? */static void convert_image_decode_rgba(AVFrame * in_frame, unsigned char ** out_frame, int width, int height) { uint32_t r, g, b; // , a; uint32_t * src_ptr; uint8_t * dst_ptr; int i, j; for(i = 0; i < height; i++) { src_ptr = (uint32_t*)(in_frame->data[0] + i * in_frame->linesize[0]); dst_ptr = out_frame[i]; for(j = 0; j < width; j++) { // a = ((*src_ptr) & 0xff000000) >> 24; r = ((*src_ptr) & 0x00ff0000) >> 16; g = ((*src_ptr) & 0x0000ff00) >> 8; b = ((*src_ptr) & 0x000000ff); dst_ptr[0] = r; dst_ptr[1] = g; dst_ptr[2] = b; // dst_ptr[3] = a; dst_ptr[3] = 0xff; dst_ptr += 4; src_ptr++; } } }/* * Do a conversion from a ffmpeg special colorspace * to a libquicktime special one */static void convert_image_decode(quicktime_ffmpeg_video_codec_t *codec, AVFrame * in_frame, enum PixelFormat in_format, unsigned char ** out_frame, int out_format, int width, int height, int row_span, int row_span_uv) {#ifdef HAVE_LIBSWSCALE uint8_t * out_planes[4]; int out_strides[4];#else AVPicture in_pic; AVPicture out_pic;#endif /* * Could someone please tell me, how people can make such a brain dead * RGBA format like in ffmpeg?? */ if((in_format == PIX_FMT_RGBA32) && (out_format == BC_RGBA8888)) { convert_image_decode_rgba(in_frame, out_frame, width, height); return; }#ifdef HAVE_LIBSWSCALE out_planes[0] = out_frame[0]; out_planes[1] = out_frame[0]; out_planes[2] = out_frame[0]; out_planes[3] = (uint8_t*)0; out_strides[0] = row_span; out_strides[1] = row_span_uv; out_strides[2] = row_span_uv; out_strides[3] = 0; sws_scale(codec->swsContext, in_frame->data, in_frame->linesize, 0, height, out_planes, out_strides);#else memset(&in_pic, 0, sizeof(in_pic)); memset(&out_pic, 0, sizeof(out_pic)); in_pic.data[0] = in_frame->data[0]; in_pic.data[1] = in_frame->data[1]; in_pic.data[2] = in_frame->data[2]; in_pic.linesize[0] = in_frame->linesize[0]; in_pic.linesize[1] = in_frame->linesize[1]; in_pic.linesize[2] = in_frame->linesize[2]; fill_avpicture(&out_pic, out_frame, out_format, row_span, row_span_uv); img_convert(&out_pic, lqt_ffmpeg_get_ffmpeg_colormodel(out_format), &in_pic, in_format, width, height);#endif }/* Just for the curious: This function can be called with NULL as row_pointers. In this case, have_frame is set to 1 and a subsequent call will take the alreydy decoded frame. This madness is necessary because sometimes ffmpeg doesn't tells us the true colormodel before decoding the first frame */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -