📄 dv.c
字号:
DECLARE_ALIGNED_16(DCTELEM, block[64]);
EncBlockInfo enc_blks[5*6];
PutBitContext pbs[5*6];
PutBitContext* pb;
EncBlockInfo* enc_blk;
int vs_bit_size = 0;
int qnos[5];
assert((((int)block) & 15) == 0);
enc_blk = &enc_blks[0];
pb = &pbs[0];
for(mb_index = 0; mb_index < 5; mb_index++) {
v = *mb_pos_ptr++;
mb_x = v & 0xff;
mb_y = v >> 8;
if (s->sys->pix_fmt == PIX_FMT_YUV422P) {
y_ptr = s->picture.data[0] + (mb_y * s->picture.linesize[0] * 8) + (mb_x * 4);
} else { /* 4:1:1 */
y_ptr = s->picture.data[0] + (mb_y * s->picture.linesize[0] * 8) + (mb_x * 8);
}
if (s->sys->pix_fmt == PIX_FMT_YUV420P) {
c_offset = (((mb_y >> 1) * s->picture.linesize[1] * 8) + ((mb_x >> 1) * 8));
} else { /* 4:2:2 or 4:1:1 */
c_offset = ((mb_y * s->picture.linesize[1] * 8) + ((mb_x >> 2) * 8));
}
do_edge_wrap = 0;
qnos[mb_index] = 15; /* No quantization */
ptr = dif + mb_index*80 + 4;
for(j = 0;j < 6; j++) {
int dummy = 0;
if (s->sys->pix_fmt == PIX_FMT_YUV422P) { /* 4:2:2 */
if (j == 0 || j == 2) {
/* Y0 Y1 */
data = y_ptr + ((j>>1) * 8);
linesize = s->picture.linesize[0];
} else if (j > 3) {
/* Cr Cb */
data = s->picture.data[6 - j] + c_offset;
linesize = s->picture.linesize[6 - j];
} else {
/* j=1 and j=3 are "dummy" blocks, used for AC data only */
data = 0;
linesize = 0;
dummy = 1;
}
} else { /* 4:1:1 or 4:2:0 */
if (j < 4) { /* Four Y blocks */
/* NOTE: at end of line, the macroblock is handled as 420 */
if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x < (704 / 8)) {
data = y_ptr + (j * 8);
} else {
data = y_ptr + ((j & 1) * 8) + ((j >> 1) * 8 * s->picture.linesize[0]);
}
linesize = s->picture.linesize[0];
} else { /* Cr and Cb blocks */
/* don't ask Fabrice why they inverted Cb and Cr ! */
data = s->picture.data[6 - j] + c_offset;
linesize = s->picture.linesize[6 - j];
if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8))
do_edge_wrap = 1;
}
}
/* Everything is set up -- now just copy data -> DCT block */
if (do_edge_wrap) { /* Edge wrap copy: 4x16 -> 8x8 */
uint8_t* d;
DCTELEM *b = block;
for (i=0;i<8;i++) {
d = data + 8 * linesize;
b[0] = data[0]; b[1] = data[1]; b[2] = data[2]; b[3] = data[3];
b[4] = d[0]; b[5] = d[1]; b[6] = d[2]; b[7] = d[3];
data += linesize;
b += 8;
}
} else { /* Simple copy: 8x8 -> 8x8 */
if (!dummy)
s->get_pixels(block, data, linesize);
}
if(s->avctx->flags & CODEC_FLAG_INTERLACED_DCT)
enc_blk->dct_mode = dv_guess_dct_mode(block);
else
enc_blk->dct_mode = 0;
enc_blk->area_q[0] = enc_blk->area_q[1] = enc_blk->area_q[2] = enc_blk->area_q[3] = 0;
enc_blk->partial_bit_count = 0;
enc_blk->partial_bit_buffer = 0;
enc_blk->cur_ac = 0;
if (dummy) {
/* We rely on the fact that encoding all zeros leads to an immediate EOB,
which is precisely what the spec calls for in the "dummy" blocks. */
memset(block, 0, sizeof(block));
} else {
s->fdct[enc_blk->dct_mode](block);
}
dv_set_class_number(block, enc_blk,
enc_blk->dct_mode ? ff_zigzag248_direct : ff_zigzag_direct,
enc_blk->dct_mode ? dv_weight_248 : dv_weight_88,
j/4);
init_put_bits(pb, ptr, block_sizes[j]/8);
put_bits(pb, 9, (uint16_t)(((enc_blk->mb[0] >> 3) - 1024 + 2) >> 2));
put_bits(pb, 1, enc_blk->dct_mode);
put_bits(pb, 2, enc_blk->cno);
vs_bit_size += enc_blk->bit_size[0] + enc_blk->bit_size[1] +
enc_blk->bit_size[2] + enc_blk->bit_size[3];
++enc_blk;
++pb;
ptr += block_sizes[j]/8;
}
}
if (vs_total_ac_bits < vs_bit_size)
dv_guess_qnos(&enc_blks[0], &qnos[0]);
for (i=0; i<5; i++) {
dif[i*80 + 3] = qnos[i];
}
/* First pass over individual cells only */
for (j=0; j<5*6; j++)
dv_encode_ac(&enc_blks[j], &pbs[j], &pbs[j+1]);
/* Second pass over each MB space */
for (j=0; j<5*6; j+=6) {
pb= &pbs[j];
for (i=0; i<6; i++) {
if (enc_blks[i+j].partial_bit_count)
pb=dv_encode_ac(&enc_blks[i+j], pb, &pbs[j+6]);
}
}
/* Third and final pass over the whole vides segment space */
pb= &pbs[0];
for (j=0; j<5*6; j++) {
if (enc_blks[j].partial_bit_count)
pb=dv_encode_ac(&enc_blks[j], pb, &pbs[6*5]);
if (enc_blks[j].partial_bit_count)
av_log(NULL, AV_LOG_ERROR, "ac bitstream overflow\n");
}
for (j=0; j<5*6; j++)
flush_put_bits(&pbs[j]);
}
static int dv_decode_mt(AVCodecContext *avctx, void* sl)
{
DVVideoContext *s = avctx->priv_data;
int slice = (size_t)sl;
/* which DIF channel is this? */
int chan = slice / (s->sys->difseg_size * 27);
/* slice within the DIF channel */
int chan_slice = slice % (s->sys->difseg_size * 27);
/* byte offset of this channel's data */
int chan_offset = chan * s->sys->difseg_size * 150 * 80;
dv_decode_video_segment(s, &s->buf[((chan_slice/27)*6+(chan_slice/3)+chan_slice*5+7)*80 + chan_offset],
&s->sys->video_place[slice*5]);
return 0;
}
#ifdef CONFIG_ENCODERS
static int dv_encode_mt(AVCodecContext *avctx, void* sl)
{
DVVideoContext *s = avctx->priv_data;
int slice = (size_t)sl;
/* which DIF channel is this? */
int chan = slice / (s->sys->difseg_size * 27);
/* slice within the DIF channel */
int chan_slice = slice % (s->sys->difseg_size * 27);
/* byte offset of this channel's data */
int chan_offset = chan * s->sys->difseg_size * 150 * 80;
dv_encode_video_segment(s, &s->buf[((chan_slice/27)*6+(chan_slice/3)+chan_slice*5+7)*80 + chan_offset],
&s->sys->video_place[slice*5]);
return 0;
}
#endif
#ifdef CONFIG_DECODERS
/* NOTE: exactly one frame must be given (120000 bytes for NTSC,
144000 bytes for PAL - or twice those for 50Mbps) */
static int dvvideo_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
uint8_t *buf, int buf_size)
{
DVVideoContext *s = avctx->priv_data;
s->sys = dv_frame_profile(buf);
if (!s->sys || buf_size < s->sys->frame_size)
return -1; /* NOTE: we only accept several full frames */
if(s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
s->picture.reference = 0;
s->picture.key_frame = 1;
s->picture.pict_type = FF_I_TYPE;
avctx->pix_fmt = s->sys->pix_fmt;
avcodec_set_dimensions(avctx, s->sys->width, s->sys->height);
if(avctx->get_buffer(avctx, &s->picture) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
s->picture.interlaced_frame = 1;
s->picture.top_field_first = 0;
s->buf = buf;
avctx->execute(avctx, dv_decode_mt, (void**)&dv_anchor[0], NULL,
s->sys->n_difchan * s->sys->difseg_size * 27);
emms_c();
/* return image */
*data_size = sizeof(AVFrame);
*(AVFrame*)data= s->picture;
return s->sys->frame_size;
}
#endif
static inline int dv_write_pack(enum dv_pack_type pack_id, DVVideoContext *c, uint8_t* buf)
{
/*
* Here's what SMPTE314M says about these two:
* (page 6) APTn, AP1n, AP2n, AP3n: These data shall be identical
* as track application IDs (APTn = 001, AP1n =
* 001, AP2n = 001, AP3n = 001), if the source signal
* comes from a digital VCR. If the signal source is
* unknown, all bits for these data shall be set to 1.
* (page 12) STYPE: STYPE defines a signal type of video signal
* 00000b = 4:1:1 compression
* 00100b = 4:2:2 compression
* XXXXXX = Reserved
* Now, I've got two problems with these statements:
* 1. it looks like APT == 111b should be a safe bet, but it isn't.
* It seems that for PAL as defined in IEC 61834 we have to set
* APT to 000 and for SMPTE314M to 001.
* 2. It is not at all clear what STYPE is used for 4:2:0 PAL
* compression scheme (if any).
*/
int apt = (c->sys->pix_fmt == PIX_FMT_YUV420P ? 0 : 1);
int stype = (c->sys->pix_fmt == PIX_FMT_YUV422P ? 4 : 0);
uint8_t aspect = 0;
if((int)(av_q2d(c->avctx->sample_aspect_ratio) * c->avctx->width / c->avctx->height * 10) == 17) /* 16:9 */
aspect = 0x02;
buf[0] = (uint8_t)pack_id;
switch (pack_id) {
case dv_header525: /* I can't imagine why these two weren't defined as real */
case dv_header625: /* packs in SMPTE314M -- they definitely look like ones */
buf[1] = 0xf8 | /* reserved -- always 1 */
(apt & 0x07); /* APT: Track application ID */
buf[2] = (0 << 7) | /* TF1: audio data is 0 - valid; 1 - invalid */
(0x0f << 3) | /* reserved -- always 1 */
(apt & 0x07); /* AP1: Audio application ID */
buf[3] = (0 << 7) | /* TF2: video data is 0 - valid; 1 - invalid */
(0x0f << 3) | /* reserved -- always 1 */
(apt & 0x07); /* AP2: Video application ID */
buf[4] = (0 << 7) | /* TF3: subcode(SSYB) is 0 - valid; 1 - invalid */
(0x0f << 3) | /* reserved -- always 1 */
(apt & 0x07); /* AP3: Subcode application ID */
break;
case dv_video_source:
buf[1] = 0xff; /* reserved -- always 1 */
buf[2] = (1 << 7) | /* B/W: 0 - b/w, 1 - color */
(1 << 6) | /* following CLF is valid - 0, invalid - 1 */
(3 << 4) | /* CLF: color frames id (see ITU-R BT.470-4) */
0xf; /* reserved -- always 1 */
buf[3] = (3 << 6) | /* reserved -- always 1 */
(c->sys->dsf << 5) | /* system: 60fields/50fields */
stype; /* signal type video compression */
buf[4] = 0xff; /* VISC: 0xff -- no information */
break;
case dv_video_control:
buf[1] = (0 << 6) | /* Copy generation management (CGMS) 0 -- free */
0x3f; /* reserved -- always 1 */
buf[2] = 0xc8 | /* reserved -- always b11001xxx */
aspect;
buf[3] = (1 << 7) | /* Frame/field flag 1 -- frame, 0 -- field */
(1 << 6) | /* First/second field flag 0 -- field 2, 1 -- field 1 */
(1 << 5) | /* Frame change flag 0 -- same picture as before, 1 -- different */
(1 << 4) | /* 1 - interlaced, 0 - noninterlaced */
0xc; /* reserved -- always b1100 */
buf[4] = 0xff; /* reserved -- always 1 */
break;
default:
buf[1] = buf[2] = buf[3] = buf[4] = 0xff;
}
return 5;
}
static void dv_format_frame(DVVideoContext* c, uint8_t* buf)
{
int chan, i, j, k;
for (chan = 0; chan < c->sys->n_difchan; chan++) {
for (i = 0; i < c->sys->difseg_size; i++) {
memset(buf, 0xff, 80 * 6); /* First 6 DIF blocks are for control data */
/* DV header: 1DIF */
buf += dv_write_dif_id(dv_sect_header, chan, i, 0, buf);
buf += dv_write_pack((c->sys->dsf ? dv_header625 : dv_header525), c, buf);
buf += 72; /* unused bytes */
/* DV subcode: 2DIFs */
for (j = 0; j < 2; j++) {
buf += dv_write_dif_id(dv_sect_subcode, chan, i, j, buf);
for (k = 0; k < 6; k++)
buf += dv_write_ssyb_id(k, (i < c->sys->difseg_size/2), buf) + 5;
buf += 29; /* unused bytes */
}
/* DV VAUX: 3DIFS */
for (j = 0; j < 3; j++) {
buf += dv_write_dif_id(dv_sect_vaux, chan, i, j, buf);
buf += dv_write_pack(dv_video_source, c, buf);
buf += dv_write_pack(dv_video_control, c, buf);
buf += 7*5;
buf += dv_write_pack(dv_video_source, c, buf);
buf += dv_write_pack(dv_video_control, c, buf);
buf += 4*5 + 2; /* unused bytes */
}
/* DV Audio/Video: 135 Video DIFs + 9 Audio DIFs */
for (j = 0; j < 135; j++) {
if (j%15 == 0) {
memset(buf, 0xff, 80);
buf += dv_write_dif_id(dv_sect_audio, chan, i, j/15, buf);
buf += 77; /* audio control & shuffled PCM audio */
}
buf += dv_write_dif_id(dv_sect_video, chan, i, j, buf);
buf += 77; /* 1 video macro block: 1 bytes control
4 * 14 bytes Y 8x8 data
10 bytes Cr 8x8 data
10 bytes Cb 8x8 data */
}
}
}
}
#ifdef CONFIG_ENCODERS
static int dvvideo_encode_frame(AVCodecContext *c, uint8_t *buf, int buf_size,
void *data)
{
DVVideoContext *s = c->priv_data;
s->sys = dv_codec_profile(c);
if (!s->sys)
return -1;
if(buf_size < s->sys->frame_size)
return -1;
c->pix_fmt = s->sys->pix_fmt;
s->picture = *((AVFrame *)data);
s->picture.key_frame = 1;
s->picture.pict_type = FF_I_TYPE;
s->buf = buf;
c->execute(c, dv_encode_mt, (void**)&dv_anchor[0], NULL,
s->sys->n_difchan * s->sys->difseg_size * 27);
emms_c();
dv_format_frame(s, buf);
return s->sys->frame_size;
}
#endif
static int dvvideo_close(AVCodecContext *c)
{
DVVideoContext *s = c->priv_data;
if(s->picture.data[0])
c->release_buffer(c, &s->picture);
return 0;
}
#ifdef CONFIG_DVVIDEO_ENCODER
AVCodec dvvideo_encoder = {
"dvvideo",
CODEC_TYPE_VIDEO,
CODEC_ID_DVVIDEO,
sizeof(DVVideoContext),
dvvideo_init,
dvvideo_encode_frame,
.pix_fmts = (enum PixelFormat[]) {PIX_FMT_YUV411P, PIX_FMT_YUV422P, PIX_FMT_YUV420P, -1},
};
#endif // CONFIG_DVVIDEO_ENCODER
#ifdef CONFIG_DVVIDEO_DECODER
AVCodec dvvideo_decoder = {
"dvvideo",
CODEC_TYPE_VIDEO,
CODEC_ID_DVVIDEO,
sizeof(DVVideoContext),
dvvideo_init,
NULL,
dvvideo_close,
dvvideo_decode_frame,
CODEC_CAP_DR1,
NULL
};
#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -