📄 dv.c
字号:
return (score88 - score248 > -10);}static inline void dv_guess_qnos(EncBlockInfo* blks, int* qnos){ int size[5]; int i, j, k, a, prev, a2; EncBlockInfo* b; size[0] = size[1] = size[2] = size[3] = size[4] = 1<<24; do { b = blks; for (i=0; i<5; i++) { if (!qnos[i]) continue; qnos[i]--; size[i] = 0; for (j=0; j<6; j++, b++) { for (a=0; a<4; a++) { if (b->area_q[a] != dv_quant_shifts[qnos[i] + dv_quant_offset[b->cno]][a]) { b->bit_size[a] = 1; // 4 areas 4 bits for EOB :) b->area_q[a]++; prev= b->prev[a]; assert(b->next[prev] >= mb_area_start[a+1] || b->mb[prev]); for (k= b->next[prev] ; k<mb_area_start[a+1]; k= b->next[k]) { b->mb[k] >>= 1; if (b->mb[k]) { b->bit_size[a] += dv_rl2vlc_size(k - prev - 1, b->mb[k]); prev= k; } else { if(b->next[k] >= mb_area_start[a+1] && b->next[k]<64){ for(a2=a+1; b->next[k] >= mb_area_start[a2+1]; a2++) b->prev[a2] = prev; assert(a2<4); assert(b->mb[b->next[k]]); b->bit_size[a2] += dv_rl2vlc_size(b->next[k] - prev - 1, b->mb[b->next[k]]) -dv_rl2vlc_size(b->next[k] - k - 1, b->mb[b->next[k]]); assert(b->prev[a2]==k && (a2+1 >= 4 || b->prev[a2+1]!=k)); b->prev[a2] = prev; } b->next[prev] = b->next[k]; } } b->prev[a+1]= prev; } size[i] += b->bit_size[a]; } } if(vs_total_ac_bits >= size[0] + size[1] + size[2] + size[3] + size[4]) return; } } while (qnos[0]|qnos[1]|qnos[2]|qnos[3]|qnos[4]); for(a=2; a==2 || vs_total_ac_bits < size[0]; a+=a){ b = blks; size[0] = 5*6*4; //EOB for (j=0; j<6*5; j++, b++) { prev= b->prev[0]; for (k= b->next[prev]; k<64; k= b->next[k]) { if(b->mb[k] < a && b->mb[k] > -a){ b->next[prev] = b->next[k]; }else{ size[0] += dv_rl2vlc_size(k - prev - 1, b->mb[k]); prev= k; } } } }}static inline void dv_encode_video_segment(DVVideoContext *s, uint8_t *dif, const uint16_t *mb_pos_ptr){ int mb_index, i, j, v; int mb_x, mb_y, c_offset, linesize; uint8_t* y_ptr; uint8_t* data; uint8_t* ptr; int do_edge_wrap; DECLARE_ALIGNED_8(DCTELEM, block[64]); EncBlockInfo enc_blks[5*6]; PutBitContext pbs[5*6]; PutBitContext* pb; EncBlockInfo* enc_blk; int vs_bit_size = 0; int qnos[5]; assert((((int)block) & 7) == 0); enc_blk = &enc_blks[0]; pb = &pbs[0]; for(mb_index = 0; mb_index < 5; mb_index++) { v = *mb_pos_ptr++; mb_x = v & 0xff; mb_y = v >> 8; if (s->sys->pix_fmt == PIX_FMT_YUV422P) { y_ptr = s->picture.data[0] + (mb_y * s->picture.linesize[0] * 8) + (mb_x * 4); } else { /* 4:1:1 */ y_ptr = s->picture.data[0] + (mb_y * s->picture.linesize[0] * 8) + (mb_x * 8); } if (s->sys->pix_fmt == PIX_FMT_YUV420P) { c_offset = (((mb_y >> 1) * s->picture.linesize[1] * 8) + ((mb_x >> 1) * 8)); } else { /* 4:2:2 or 4:1:1 */ c_offset = ((mb_y * s->picture.linesize[1] * 8) + ((mb_x >> 2) * 8)); } do_edge_wrap = 0; qnos[mb_index] = 15; /* No quantization */ ptr = dif + mb_index*80 + 4; for(j = 0;j < 6; j++) { int dummy = 0; if (s->sys->pix_fmt == PIX_FMT_YUV422P) { /* 4:2:2 */ if (j == 0 || j == 2) { /* Y0 Y1 */ data = y_ptr + ((j>>1) * 8); linesize = s->picture.linesize[0]; } else if (j > 3) { /* Cr Cb */ data = s->picture.data[6 - j] + c_offset; linesize = s->picture.linesize[6 - j]; } else { /* j=1 and j=3 are "dummy" blocks, used for AC data only */ data = 0; linesize = 0; dummy = 1; } } else { /* 4:1:1 or 4:2:0 */ if (j < 4) { /* Four Y blocks */ /* NOTE: at end of line, the macroblock is handled as 420 */ if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x < (704 / 8)) { data = y_ptr + (j * 8); } else { data = y_ptr + ((j & 1) * 8) + ((j >> 1) * 8 * s->picture.linesize[0]); } linesize = s->picture.linesize[0]; } else { /* Cr and Cb blocks */ /* don't ask Fabrice why they inverted Cb and Cr ! */ data = s->picture.data[6 - j] + c_offset; linesize = s->picture.linesize[6 - j]; if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) do_edge_wrap = 1; } } /* Everything is set up -- now just copy data -> DCT block */ if (do_edge_wrap) { /* Edge wrap copy: 4x16 -> 8x8 */ uint8_t* d; DCTELEM *b = block; for (i=0;i<8;i++) { d = data + 8 * linesize; b[0] = data[0]; b[1] = data[1]; b[2] = data[2]; b[3] = data[3]; b[4] = d[0]; b[5] = d[1]; b[6] = d[2]; b[7] = d[3]; data += linesize; b += 8; } } else { /* Simple copy: 8x8 -> 8x8 */ if (!dummy) s->get_pixels(block, data, linesize); } if(s->avctx->flags & CODEC_FLAG_INTERLACED_DCT) enc_blk->dct_mode = dv_guess_dct_mode(block); else enc_blk->dct_mode = 0; enc_blk->area_q[0] = enc_blk->area_q[1] = enc_blk->area_q[2] = enc_blk->area_q[3] = 0; enc_blk->partial_bit_count = 0; enc_blk->partial_bit_buffer = 0; enc_blk->cur_ac = 0; if (dummy) { /* We rely on the fact that encoding all zeros leads to an immediate EOB, which is precisely what the spec calls for in the "dummy" blocks. */ memset(block, 0, sizeof(block)); } else { s->fdct[enc_blk->dct_mode](block); } dv_set_class_number(block, enc_blk, enc_blk->dct_mode ? ff_zigzag248_direct : ff_zigzag_direct, enc_blk->dct_mode ? dv_weight_248 : dv_weight_88, j/4); init_put_bits(pb, ptr, block_sizes[j]/8); put_bits(pb, 9, (uint16_t)(((enc_blk->mb[0] >> 3) - 1024 + 2) >> 2)); put_bits(pb, 1, enc_blk->dct_mode); put_bits(pb, 2, enc_blk->cno); vs_bit_size += enc_blk->bit_size[0] + enc_blk->bit_size[1] + enc_blk->bit_size[2] + enc_blk->bit_size[3]; ++enc_blk; ++pb; ptr += block_sizes[j]/8; } } if (vs_total_ac_bits < vs_bit_size) dv_guess_qnos(&enc_blks[0], &qnos[0]); for (i=0; i<5; i++) { dif[i*80 + 3] = qnos[i]; } /* First pass over individual cells only */ for (j=0; j<5*6; j++) dv_encode_ac(&enc_blks[j], &pbs[j], &pbs[j+1]); /* Second pass over each MB space */ for (j=0; j<5*6; j+=6) { pb= &pbs[j]; for (i=0; i<6; i++) { if (enc_blks[i+j].partial_bit_count) pb=dv_encode_ac(&enc_blks[i+j], pb, &pbs[j+6]); } } /* Third and final pass over the whole vides segment space */ pb= &pbs[0]; for (j=0; j<5*6; j++) { if (enc_blks[j].partial_bit_count) pb=dv_encode_ac(&enc_blks[j], pb, &pbs[6*5]); if (enc_blks[j].partial_bit_count) av_log(NULL, AV_LOG_ERROR, "ac bitstream overflow\n"); } for (j=0; j<5*6; j++) flush_put_bits(&pbs[j]);}static int dv_decode_mt(AVCodecContext *avctx, void* sl){ DVVideoContext *s = avctx->priv_data; int slice = (size_t)sl; /* which DIF channel is this? */ int chan = slice / (s->sys->difseg_size * 27); /* slice within the DIF channel */ int chan_slice = slice % (s->sys->difseg_size * 27); /* byte offset of this channel's data */ int chan_offset = chan * s->sys->difseg_size * 150 * 80; dv_decode_video_segment(s, &s->buf[((chan_slice/27)*6+(chan_slice/3)+chan_slice*5+7)*80 + chan_offset], &s->sys->video_place[slice*5]); return 0;}static int dv_encode_mt(AVCodecContext *avctx, void* sl){ DVVideoContext *s = avctx->priv_data; int slice = (size_t)sl; /* which DIF channel is this? */ int chan = slice / (s->sys->difseg_size * 27); /* slice within the DIF channel */ int chan_slice = slice % (s->sys->difseg_size * 27); /* byte offset of this channel's data */ int chan_offset = chan * s->sys->difseg_size * 150 * 80; dv_encode_video_segment(s, &s->buf[((chan_slice/27)*6+(chan_slice/3)+chan_slice*5+7)*80 + chan_offset], &s->sys->video_place[slice*5]); return 0;}/* NOTE: exactly one frame must be given (120000 bytes for NTSC, 144000 bytes for PAL - or twice those for 50Mbps) */static int dvvideo_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){ DVVideoContext *s = avctx->priv_data; s->sys = dv_frame_profile(buf); if (!s->sys || buf_size < s->sys->frame_size) return -1; /* NOTE: we only accept several full frames */ if(s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); s->picture.reference = 0; s->picture.key_frame = 1; s->picture.pict_type = FF_I_TYPE; avctx->pix_fmt = s->sys->pix_fmt; avcodec_set_dimensions(avctx, s->sys->width, s->sys->height); if(avctx->get_buffer(avctx, &s->picture) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } s->picture.interlaced_frame = 1; s->picture.top_field_first = 0; s->buf = buf; avctx->execute(avctx, dv_decode_mt, (void**)&dv_anchor[0], NULL, s->sys->n_difchan * s->sys->difseg_size * 27); emms_c(); /* return image */ *data_size = sizeof(AVFrame); *(AVFrame*)data= s->picture; return s->sys->frame_size;}static int dvvideo_encode_frame(AVCodecContext *c, uint8_t *buf, int buf_size, void *data){ DVVideoContext *s = c->priv_data; s->sys = dv_codec_profile(c); if (!s->sys) return -1; if(buf_size < s->sys->frame_size) return -1; c->pix_fmt = s->sys->pix_fmt; s->picture = *((AVFrame *)data); s->picture.key_frame = 1; s->picture.pict_type = FF_I_TYPE; s->buf = buf; c->execute(c, dv_encode_mt, (void**)&dv_anchor[0], NULL, s->sys->n_difchan * s->sys->difseg_size * 27); emms_c(); /* Fill in just enough of the header for dv_frame_profile() to return the correct result, so that the frame can be decoded correctly. The rest of the metadata is filled in by the dvvideo avformat. (this should probably change so that encode_frame() fills in ALL of the metadata - e.g. for Quicktime-wrapped DV streams) */ /* NTSC/PAL format */ buf[3] = s->sys->dsf ? 0x80 : 0x00; /* 25Mbps or 50Mbps */ buf[80*5 + 48 + 3] = (s->sys->pix_fmt == PIX_FMT_YUV422P) ? 0x4 : 0x0; return s->sys->frame_size;}static int dvvideo_close(AVCodecContext *c){ return 0;}#ifdef CONFIG_DVVIDEO_ENCODERAVCodec dvvideo_encoder = { "dvvideo", CODEC_TYPE_VIDEO, CODEC_ID_DVVIDEO, sizeof(DVVideoContext), dvvideo_init, dvvideo_encode_frame, dvvideo_close, NULL, CODEC_CAP_DR1, NULL};#endif // CONFIG_DVVIDEO_ENCODERAVCodec dvvideo_decoder = { "dvvideo", CODEC_TYPE_VIDEO, CODEC_ID_DVVIDEO, sizeof(DVVideoContext), dvvideo_init, NULL, dvvideo_close, dvvideo_decode_frame, CODEC_CAP_DR1, NULL};
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -