📄 svq1dec.c
字号:
if (result != 0)
return result;
/* predict and decode motion vector (1) */
pmv[0] = &mv;
if (y == 0) {
pmv[1] =
pmv[2] = pmv[0];
}
else {
pmv[1] = &motion[(x / 8) + 3];
}
result = svq1_decode_motion_vector (bitbuf, &motion[0], pmv);
if (result != 0)
return result;
/* predict and decode motion vector (2) */
pmv[1] = &motion[0];
pmv[2] = &motion[(x / 8) + 1];
result = svq1_decode_motion_vector (bitbuf, &motion[(x / 8) + 2], pmv);
if (result != 0)
return result;
/* predict and decode motion vector (3) */
pmv[2] = &motion[(x / 8) + 2];
pmv[3] = &motion[(x / 8) + 3];
result = svq1_decode_motion_vector (bitbuf, pmv[3], pmv);
if (result != 0)
return result;
/* form predictions */
for (i=0; i < 4; i++) {
int mvx= pmv[i]->x + (i&1)*16;
int mvy= pmv[i]->y + (i>>1)*16;
///XXX /FIXME clipping or padding?
if(y + (mvy >> 1)<0)
mvy= 0;
if(x + (mvx >> 1)<0)
mvx= 0;
#if 0
int w= (s->width+15)&~15;
int h= (s->height+15)&~15;
if(x + (mvx >> 1)<0 || y + (mvy >> 1)<0 || x + (mvx >> 1) + 8 > w || y + (mvy >> 1) + 8> h)
av_log(s->avctx, AV_LOG_INFO, "%d %d %d %d\n", x, y, x + (mvx >> 1), y + (mvy >> 1));
#endif
src = &previous[(x + (mvx >> 1)) + (y + (mvy >> 1))*pitch];
dst = current;
s->dsp.put_pixels_tab[1][((mvy & 1) << 1) | (mvx & 1)](dst,src,pitch,8);
/* select next block */
if (i & 1) {
current += 8*(pitch - 1);
} else {
current += 8;
}
}
return 0;
}
static int svq1_decode_delta_block (MpegEncContext *s, GetBitContext *bitbuf,
uint8_t *current, uint8_t *previous, int pitch,
svq1_pmv_t *motion, int x, int y) {
uint32_t block_type;
int result = 0;
/* get block type */
block_type = get_vlc2(bitbuf, svq1_block_type.table, 2, 2);
/* reset motion vectors */
if (block_type == SVQ1_BLOCK_SKIP || block_type == SVQ1_BLOCK_INTRA) {
motion[0].x =
motion[0].y =
motion[(x / 8) + 2].x =
motion[(x / 8) + 2].y =
motion[(x / 8) + 3].x =
motion[(x / 8) + 3].y = 0;
}
switch (block_type) {
case SVQ1_BLOCK_SKIP:
svq1_skip_block (current, previous, pitch, x, y);
break;
case SVQ1_BLOCK_INTER:
result = svq1_motion_inter_block (s, bitbuf, current, previous, pitch, motion, x, y);
if (result != 0)
{
#ifdef DEBUG_SVQ1
av_log(s->avctx, AV_LOG_INFO, "Error in svq1_motion_inter_block %i\n",result);
#endif
break;
}
result = svq1_decode_block_non_intra (bitbuf, current, pitch);
break;
case SVQ1_BLOCK_INTER_4V:
result = svq1_motion_inter_4v_block (s, bitbuf, current, previous, pitch, motion, x, y);
if (result != 0)
{
#ifdef DEBUG_SVQ1
av_log(s->avctx, AV_LOG_INFO, "Error in svq1_motion_inter_4v_block %i\n",result);
#endif
break;
}
result = svq1_decode_block_non_intra (bitbuf, current, pitch);
break;
case SVQ1_BLOCK_INTRA:
result = svq1_decode_block_intra (bitbuf, current, pitch);
break;
}
return result;
}
static uint16_t svq1_packet_checksum (uint8_t *data, int length, int value) {
int i;
for (i=0; i < length; i++) {
value = checksum_table[data[i] ^ (value >> 8)] ^ ((value & 0xFF) << 8);
}
return value;
}
static void svq1_parse_string (GetBitContext *bitbuf, uint8_t *out) {
uint8_t seed;
int i;
out[0] = get_bits (bitbuf, 8);
seed = string_table[out[0]];
for (i=1; i <= out[0]; i++) {
out[i] = get_bits (bitbuf, 8) ^ seed;
seed = string_table[out[i] ^ seed];
}
}
static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) {
int frame_size_code;
int temporal_reference;
temporal_reference = get_bits (bitbuf, 8);
/* frame type */
s->pict_type= get_bits (bitbuf, 2)+1;
if(s->pict_type==4)
return -1;
if (s->pict_type == I_TYPE) {
/* unknown fields */
if (s->f_code == 0x50 || s->f_code == 0x60) {
int csum = get_bits (bitbuf, 16);
csum = svq1_packet_checksum ((uint8_t *)bitbuf->buffer, bitbuf->size_in_bits>>3, csum);
// av_log(s->avctx, AV_LOG_INFO, "%s checksum (%02x) for packet data\n",
// (csum == 0) ? "correct" : "incorrect", csum);
}
if ((s->f_code ^ 0x10) >= 0x50) {
uint8_t msg[256];
svq1_parse_string (bitbuf, msg);
av_log(s->avctx, AV_LOG_INFO, "embedded message: \"%s\"\n", (char *) msg);
}
skip_bits (bitbuf, 2);
skip_bits (bitbuf, 2);
skip_bits1 (bitbuf);
/* load frame size */
frame_size_code = get_bits (bitbuf, 3);
if (frame_size_code == 7) {
/* load width, height (12 bits each) */
s->width = get_bits (bitbuf, 12);
s->height = get_bits (bitbuf, 12);
if (!s->width || !s->height)
return -1;
} else {
/* get width, height from table */
s->width = ff_svq1_frame_size_table[frame_size_code].width;
s->height = ff_svq1_frame_size_table[frame_size_code].height;
}
}
/* unknown fields */
if (get_bits1 (bitbuf) == 1) {
skip_bits1 (bitbuf); /* use packet checksum if (1) */
skip_bits1 (bitbuf); /* component checksums after image data if (1) */
if (get_bits (bitbuf, 2) != 0)
return -1;
}
if (get_bits1 (bitbuf) == 1) {
skip_bits1 (bitbuf);
skip_bits (bitbuf, 4);
skip_bits1 (bitbuf);
skip_bits (bitbuf, 2);
while (get_bits1 (bitbuf) == 1) {
skip_bits (bitbuf, 8);
}
}
return 0;
}
static int svq1_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
uint8_t *buf, int buf_size)
{
MpegEncContext *s=avctx->priv_data;
uint8_t *current, *previous;
int result, i, x, y, width, height;
AVFrame *pict = data;
/* initialize bit buffer */
init_get_bits(&s->gb,buf,buf_size*8);
/* decode frame header */
s->f_code = get_bits (&s->gb, 22);
if ((s->f_code & ~0x70) || !(s->f_code & 0x60))
return -1;
/* swap some header bytes (why?) */
if (s->f_code != 0x20) {
uint32_t *src = (uint32_t *) (buf + 4);
for (i=0; i < 4; i++) {
src[i] = ((src[i] << 16) | (src[i] >> 16)) ^ src[7 - i];
}
}
result = svq1_decode_frame_header (&s->gb, s);
if (result != 0)
{
#ifdef DEBUG_SVQ1
av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_frame_header %i\n",result);
#endif
return result;
}
//FIXME this avoids some confusion for "B frames" without 2 references
//this should be removed after libavcodec can handle more flexible picture types & ordering
if(s->pict_type==B_TYPE && s->last_picture_ptr==NULL) return buf_size;
if(avctx->hurry_up && s->pict_type==B_TYPE) return buf_size;
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
|| avctx->skip_frame >= AVDISCARD_ALL)
return buf_size;
if(MPV_frame_start(s, avctx) < 0)
return -1;
/* decode y, u and v components */
for (i=0; i < 3; i++) {
int linesize;
if (i == 0) {
width = (s->width+15)&~15;
height = (s->height+15)&~15;
linesize= s->linesize;
} else {
if(s->flags&CODEC_FLAG_GRAY) break;
width = (s->width/4+15)&~15;
height = (s->height/4+15)&~15;
linesize= s->uvlinesize;
}
current = s->current_picture.data[i];
if(s->pict_type==B_TYPE){
previous = s->next_picture.data[i];
}else{
previous = s->last_picture.data[i];
}
if (s->pict_type == I_TYPE) {
/* keyframe */
for (y=0; y < height; y+=16) {
for (x=0; x < width; x+=16) {
result = svq1_decode_block_intra (&s->gb, ¤t[x], linesize);
if (result != 0)
{
//#ifdef DEBUG_SVQ1
av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_block %i (keyframe)\n",result);
//#endif
return result;
}
}
current += 16*linesize;
}
} else {
svq1_pmv_t pmv[width/8+3];
/* delta frame */
memset (pmv, 0, ((width / 8) + 3) * sizeof(svq1_pmv_t));
for (y=0; y < height; y+=16) {
for (x=0; x < width; x+=16) {
result = svq1_decode_delta_block (s, &s->gb, ¤t[x], previous,
linesize, pmv, x, y);
if (result != 0)
{
#ifdef DEBUG_SVQ1
av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_delta_block %i\n",result);
#endif
return result;
}
}
pmv[0].x =
pmv[0].y = 0;
current += 16*linesize;
}
}
}
*pict = *(AVFrame*)&s->current_picture;
MPV_frame_end(s);
*data_size=sizeof(AVFrame);
return buf_size;
}
static int svq1_decode_init(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
int i;
MPV_decode_defaults(s);
s->avctx = avctx;
s->width = (avctx->width+3)&~3;
s->height = (avctx->height+3)&~3;
s->codec_id= avctx->codec->id;
avctx->pix_fmt = PIX_FMT_YUV410P;
avctx->has_b_frames= 1; // not true, but DP frames and these behave like unidirectional b frames
s->flags= avctx->flags;
if (MPV_common_init(s) < 0) return -1;
init_vlc(&svq1_block_type, 2, 4,
&ff_svq1_block_type_vlc[0][1], 2, 1,
&ff_svq1_block_type_vlc[0][0], 2, 1, 1);
init_vlc(&svq1_motion_component, 7, 33,
&mvtab[0][1], 2, 1,
&mvtab[0][0], 2, 1, 1);
for (i = 0; i < 6; i++) {
init_vlc(&svq1_intra_multistage[i], 3, 8,
&ff_svq1_intra_multistage_vlc[i][0][1], 2, 1,
&ff_svq1_intra_multistage_vlc[i][0][0], 2, 1, 1);
init_vlc(&svq1_inter_multistage[i], 3, 8,
&ff_svq1_inter_multistage_vlc[i][0][1], 2, 1,
&ff_svq1_inter_multistage_vlc[i][0][0], 2, 1, 1);
}
init_vlc(&svq1_intra_mean, 8, 256,
&ff_svq1_intra_mean_vlc[0][1], 4, 2,
&ff_svq1_intra_mean_vlc[0][0], 4, 2, 1);
init_vlc(&svq1_inter_mean, 9, 512,
&ff_svq1_inter_mean_vlc[0][1], 4, 2,
&ff_svq1_inter_mean_vlc[0][0], 4, 2, 1);
return 0;
}
static int svq1_decode_end(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
MPV_common_end(s);
return 0;
}
AVCodec svq1_decoder = {
"svq1",
CODEC_TYPE_VIDEO,
CODEC_ID_SVQ1,
sizeof(MpegEncContext),
svq1_decode_init,
NULL,
svq1_decode_end,
svq1_decode_frame,
CODEC_CAP_DR1,
.flush= ff_mpeg_flush,
.pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV410P, -1},
};
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -