vf_zrmjpeg.c

来自「君正早期ucos系统(只有早期的才不没有打包成库),MPLAYER,文件系统,图」· C语言 代码 · 共 1,069 行 · 第 1/3 页

C
1,069
字号
/** * \file vf_zrmjpeg.c * * \brief Does mjpeg encoding as required by the zrmjpeg filter as well * as by the zr video driver. *//* * Copyright (C) 2005 Rik Snel <rsnel@cube.dyndns.org>, license GPL v2 * - based on vd_lavc.c by A'rpi (C) 2002-2003 * - parts from ffmpeg Copyright (c) 2000-2003 Fabrice Bellard * * This files includes a straightforward (to be) optimized JPEG encoder for * the YUV422 format, based on mjpeg code from ffmpeg. * * For an excellent introduction to the JPEG format, see: * http://www.ece.purdue.edu/~bouman/grad-labs/lab8/pdf/lab.pdf */#include <uclib.h>#include <uclib.h>#include <uclib.h>#include <inttypes.h>#include "config.h"#include "mp_msg.h"#include "img_format.h"#include "mp_image.h"#include "vf.h"/* We need this #define because we need ../libavcodec/common.h to #define * be2me_32, otherwise the linker will complain that it doesn't exist */#define HAVE_AV_CONFIG_H#include "libavcodec/avcodec.h"#include "libavcodec/dsputil.h"#include "libavcodec/mpegvideo.h"//#include "jpeg_enc.h" /* this file is not present yet */#undef malloc#undef free#undef reallocextern int avcodec_inited;/* some convenient #define's, is this portable enough? *//// Printout  with vf_zrmjpeg: prefix at VERBOSE level#define VERBOSE(...) mp_msg(MSGT_DECVIDEO, MSGL_V, "vf_zrmjpeg: " __VA_ARGS__)/// Printout with vf_zrmjpeg: prefix at ERROR level#define ERROR(...) mp_msg(MSGT_DECVIDEO, MSGL_ERR, "vf_zrmjpeg: " __VA_ARGS__)/// Printout with vf_zrmjpeg: prefix at WARNING level#define WARNING(...) mp_msg(MSGT_DECVIDEO, MSGL_WARN, \		"vf_zrmjpeg: " __VA_ARGS__)// "local" flag in vd_ffmpeg.c. If not set, avcodec_init() et. al. need to be called// set when init is done, so that initialization is not done twice.extern int avcodec_inited;/// structure copied from mjpeg.c/* zrmjpeg_encode_mb needs access to these tables for the black & white * option */typedef struct MJpegContext {	uint8_t huff_size_dc_luminance[12];	uint16_t huff_code_dc_luminance[12];	uint8_t huff_size_dc_chrominance[12];	uint16_t huff_code_dc_chrominance[12];	uint8_t huff_size_ac_luminance[256];	uint16_t huff_code_ac_luminance[256];	uint8_t huff_size_ac_chrominance[256];	uint16_t huff_code_ac_chrominance[256];} MJpegContext;/// The get_pixels() routine to use. The real routine comes from dsputilstatic void (*get_pixels)(DCTELEM *restrict block, const uint8_t *pixels, int line_size);/* Begin excessive code duplication ************************************//* Code coming from mpegvideo.c and mjpeg.c in ../libavcodec ***********//// copy of the table in mpegvideo.cstatic const unsigned short aanscales[64] = {	/**< precomputed values scaled up by 14 bits */	16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,	22725, 31521, 29692, 26722, 22725, 17855, 12299,  6270,	21407, 29692, 27969, 25172, 21407, 16819, 11585,  5906,	19266, 26722, 25172, 22654, 19266, 15137, 10426,  5315,	16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,	12873, 17855, 16819, 15137, 12873, 10114,  6967,  3552,	8867,  12299, 11585, 10426,  8867,  6967,  4799,  2446,	4520,   6270,  5906,  5315,  4520,  3552,  2446,  1247};/// Precompute DCT quantizing matrix/** * This routine will precompute the combined DCT matrix with qscale * and DCT renorm needed by the MPEG encoder here. It is basically the * same as the routine with the same name in mpegvideo.c, except for * some coefficient changes. The matrix will be computed in two variations, * depending on the DCT version used. The second used by the MMX version of DCT. * * \param s MpegEncContext pointer * \param qmat[OUT] pointer to where the matrix is stored * \param qmat16[OUT] pointer to where matrix for MMX is stored. *		  This matrix is not permutated *                and second 64 entries are bias * \param quant_matrix[IN] the quantizion matrix to use * \param bias bias for the quantizer * \param qmin minimum qscale value to set up for * \param qmax maximum qscale value to set up for * * Only rows between qmin and qmax will be populated in the matrix. * In this MJPEG encoder, only the value 8 for qscale is used. */static void convert_matrix(MpegEncContext *s, int (*qmat)[64],		uint16_t (*qmat16)[2][64], const uint16_t *quant_matrix,		int bias, int qmin, int qmax) {	int qscale;	for(qscale = qmin; qscale <= qmax; qscale++) {		int i;		if (s->dsp.fdct == ff_jpeg_fdct_islow) {			for (i = 0; i < 64; i++) {				const int j = s->dsp.idct_permutation[i];/* 16 <= qscale * quant_matrix[i] <= 7905 * 19952         <= aanscales[i] * qscale * quant_matrix[i]      <= 249205026 * (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) *                                                       >= (1<<36)/249205026 * 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i])  >= 275 */				qmat[qscale][i] = (int)((UINT64_C(1) <<					(QMAT_SHIFT-3))/					(qscale*quant_matrix[j]));			}		} else if (s->dsp.fdct == fdct_ifast) {			for (i = 0; i < 64; i++) {				const int j = s->dsp.idct_permutation[i];/* 16 <= qscale * quant_matrix[i] <= 7905 * 19952         <= aanscales[i] * qscale * quant_matrix[i]      <= 249205026 * (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) *                                                       >= (1<<36)/249205026 * 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i])  >= 275 */				qmat[qscale][i] = (int)((UINT64_C(1) <<					(QMAT_SHIFT + 11))/(aanscales[i]					*qscale * quant_matrix[j]));			}		} else {			for (i = 0; i < 64; i++) {				const int j = s->dsp.idct_permutation[i];/* We can safely assume that 16 <= quant_matrix[i] <= 255 * So 16           <= qscale * quant_matrix[i]             <= 7905 * so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905 * so 32768        >= (1<<19) / (qscale * quant_matrix[i]) >= 67 */				qmat[qscale][i] = (int)((UINT64_C(1) <<						QMAT_SHIFT_MMX) / (qscale							*quant_matrix[j]));				qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX)						/(qscale * quant_matrix[j]);				if (qmat16[qscale][0][i] == 0 ||						qmat16[qscale][0][i] == 128*256)					qmat16[qscale][0][i]=128*256-1;				qmat16[qscale][1][i]=ROUNDED_DIV(bias						<<(16-QUANT_BIAS_SHIFT),						qmat16[qscale][0][i]);			}		}	}}/// Emit the DC value into a MJPEG code sream/** * This routine is only intended to be used from encode_block * * \param s pointer to MpegEncContext structure * \param val the DC value to emit * \param huff_size pointer to huffman code size array * \param huff_code pointer to the code array corresponding to \a huff_size * * This routine is a clone of mjpeg_encode_dc */static inline void encode_dc(MpegEncContext *s, int val,		uint8_t *huff_size, uint16_t *huff_code) {	int mant, nbits;	if (val == 0) {		put_bits(&s->pb, huff_size[0], huff_code[0]);	} else {		mant = val;		if (val < 0) {			val = -val;			mant--;		}		nbits= av_log2_16bit(val) + 1;		put_bits(&s->pb, huff_size[nbits], huff_code[nbits]);		put_bits(&s->pb, nbits, mant & ((1 << nbits) - 1));	}}/// Huffman encode and emit one DCT block into the MJPEG code stream/** * \param s pointer to MpegEncContext structure * \param block pointer to the DCT block to emit * \param n * * This routine is a duplicate of encode_block in mjpeg.c */static void encode_block(MpegEncContext *s, DCTELEM *block, int n) {	int mant, nbits, code, i, j;	int component, dc, run, last_index, val;	MJpegContext *m = s->mjpeg_ctx;	uint8_t *huff_size_ac;	uint16_t *huff_code_ac;	/* DC coef */	component = (n <= 3 ? 0 : n - 4 + 1);	dc = block[0]; /* overflow is impossible */	val = dc - s->last_dc[component];	if (n < 4) {		encode_dc(s, val, m->huff_size_dc_luminance,				m->huff_code_dc_luminance);		huff_size_ac = m->huff_size_ac_luminance;		huff_code_ac = m->huff_code_ac_luminance;	} else {		encode_dc(s, val, m->huff_size_dc_chrominance,				m->huff_code_dc_chrominance);		huff_size_ac = m->huff_size_ac_chrominance;		huff_code_ac = m->huff_code_ac_chrominance;	}	s->last_dc[component] = dc;	/* AC coefs */	run = 0;	last_index = s->block_last_index[n];	for (i = 1; i <= last_index; i++) {		j = s->intra_scantable.permutated[i];		val = block[j];		if (val == 0) run++;		else {			while (run >= 16) {				put_bits(&s->pb, huff_size_ac[0xf0],						huff_code_ac[0xf0]);				run -= 16;			}			mant = val;			if (val < 0) {				val = -val;				mant--;			}			nbits= av_log2_16bit(val) + 1;			code = (run << 4) | nbits;			put_bits(&s->pb, huff_size_ac[code],					huff_code_ac[code]);			put_bits(&s->pb, nbits, mant & ((1 << nbits) - 1));			run = 0;		}	}	/* output EOB only if not already 64 values */	if (last_index < 63 || run != 0)		put_bits(&s->pb, huff_size_ac[0], huff_code_ac[0]);}/// clip overflowing DCT coefficients/** * If the computed DCT coefficients in a block overflow, this routine * will go through them and clip them to be in the valid range. * * \param s pointer to MpegEncContext * \param block pointer to DCT block to process * \param last_index index of the last non-zero coefficient in block * * The max and min level, which are clipped to, are stored in * s->min_qcoeff and s->max_qcoeff respectively. */static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,		int last_index) {	int i;	const int maxlevel= s->max_qcoeff;	const int minlevel= s->min_qcoeff;	for (i = 0; i <= last_index; i++) {		const int j = s->intra_scantable.permutated[i];		int level = block[j];		if (level > maxlevel) level=maxlevel;		else if(level < minlevel) level=minlevel;		block[j]= level;	}}/* End excessive code duplication **************************************/typedef struct {	struct MpegEncContext *s;	int cheap_upsample;	int bw;	int y_rs;	int u_rs;	int v_rs;} jpeg_enc_t;// Huffman encode and emit one MCU of MJPEG code/** * \param j pointer to jpeg_enc_t structure * * This function huffman encodes one MCU, and emits the * resulting bitstream into the MJPEG code that is currently worked on. * * this function is a reproduction of the one in mjpeg, it includes two * changes, it allows for black&white encoding (it skips the U and V * macroblocks and it outputs the huffman code for 'no change' (dc) and * 'all zero' (ac)) and it takes 4 macroblocks (422) instead of 6 (420) */static av_always_inline void zr_mjpeg_encode_mb(jpeg_enc_t *j) {	MJpegContext *m = j->s->mjpeg_ctx;	encode_block(j->s, j->s->block[0], 0);	encode_block(j->s, j->s->block[1], 1);	if (j->bw) {		/* U */		put_bits(&j->s->pb, m->huff_size_dc_chrominance[0],				m->huff_code_dc_chrominance[0]);		put_bits(&j->s->pb, m->huff_size_ac_chrominance[0],				m->huff_code_ac_chrominance[0]);		/* V */		put_bits(&j->s->pb, m->huff_size_dc_chrominance[0],				m->huff_code_dc_chrominance[0]);		put_bits(&j->s->pb, m->huff_size_ac_chrominance[0],				m->huff_code_ac_chrominance[0]);	} else {		/* we trick encode_block here so that it uses		 * chrominance huffman tables instead of luminance ones		 * (see the effect of second argument of encode_block) */		encode_block(j->s, j->s->block[2], 4);		encode_block(j->s, j->s->block[3], 5);	}}/// Fill one DCT MCU from planar storage/** * This routine will convert one MCU from YUYV planar storage into 4 * DCT macro blocks, converting from 8-bit format in the planar * storage to 16-bit format used in the DCT. * * \param j pointer to jpeg_enc structure, and also storage for DCT macro blocks * \param x pixel x-coordinate for the first pixel * \param y pixel y-coordinate for the first pixel * \param y_data pointer to the Y plane * \param u_data pointer to the U plane * \param v_data pointer to the V plane */static av_always_inline void fill_block(jpeg_enc_t *j, int x, int y,		unsigned char *y_data, unsigned char *u_data,		unsigned char *v_data){	int i, k;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?