📄 asv1.c
字号:
if(a->pb.buf_end - a->pb.buf - (put_bits_count(&a->pb)>>3) < 30*16*16*3/2/8){
av_log(a->avctx, AV_LOG_ERROR, "encoded frame too large\n");
return -1;
}
if(a->avctx->codec_id == CODEC_ID_ASV1){
for(i=0; i<6; i++)
asv1_encode_block(a, block[i]);
}else{
for(i=0; i<6; i++)
asv2_encode_block(a, block[i]);
}
return 0;
}
static inline void idct_put(ASV1Context *a, int mb_x, int mb_y){
DCTELEM (*block)[64]= a->block;
int linesize= a->picture.linesize[0];
uint8_t *dest_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
uint8_t *dest_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8;
uint8_t *dest_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8;
a->dsp.idct_put(dest_y , linesize, block[0]);
a->dsp.idct_put(dest_y + 8, linesize, block[1]);
a->dsp.idct_put(dest_y + 8*linesize , linesize, block[2]);
a->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]);
if(!(a->avctx->flags&CODEC_FLAG_GRAY)){
a->dsp.idct_put(dest_cb, a->picture.linesize[1], block[4]);
a->dsp.idct_put(dest_cr, a->picture.linesize[2], block[5]);
}
}
static inline void dct_get(ASV1Context *a, int mb_x, int mb_y){
DCTELEM (*block)[64]= a->block;
int linesize= a->picture.linesize[0];
int i;
uint8_t *ptr_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
uint8_t *ptr_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8;
uint8_t *ptr_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8;
a->dsp.get_pixels(block[0], ptr_y , linesize);
a->dsp.get_pixels(block[1], ptr_y + 8, linesize);
a->dsp.get_pixels(block[2], ptr_y + 8*linesize , linesize);
a->dsp.get_pixels(block[3], ptr_y + 8*linesize + 8, linesize);
for(i=0; i<4; i++)
a->dsp.fdct(block[i]);
if(!(a->avctx->flags&CODEC_FLAG_GRAY)){
a->dsp.get_pixels(block[4], ptr_cb, a->picture.linesize[1]);
a->dsp.get_pixels(block[5], ptr_cr, a->picture.linesize[2]);
for(i=4; i<6; i++)
a->dsp.fdct(block[i]);
}
}
static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
uint8_t *buf, int buf_size)
{
ASV1Context * const a = avctx->priv_data;
AVFrame *picture = data;
AVFrame * const p= (AVFrame*)&a->picture;
int mb_x, mb_y;
if(p->data[0])
avctx->release_buffer(avctx, p);
p->reference= 0;
if(avctx->get_buffer(avctx, p) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
p->pict_type= I_TYPE;
p->key_frame= 1;
a->bitstream_buffer= av_fast_realloc(a->bitstream_buffer, &a->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
if(avctx->codec_id == CODEC_ID_ASV1)
a->dsp.bswap_buf((uint32_t*)a->bitstream_buffer, (uint32_t*)buf, buf_size/4);
else{
int i;
for(i=0; i<buf_size; i++)
a->bitstream_buffer[i]= ff_reverse[ buf[i] ];
}
init_get_bits(&a->gb, a->bitstream_buffer, buf_size*8);
for(mb_y=0; mb_y<a->mb_height2; mb_y++){
for(mb_x=0; mb_x<a->mb_width2; mb_x++){
if( decode_mb(a, a->block) <0)
return -1;
idct_put(a, mb_x, mb_y);
}
}
if(a->mb_width2 != a->mb_width){
mb_x= a->mb_width2;
for(mb_y=0; mb_y<a->mb_height2; mb_y++){
if( decode_mb(a, a->block) <0)
return -1;
idct_put(a, mb_x, mb_y);
}
}
if(a->mb_height2 != a->mb_height){
mb_y= a->mb_height2;
for(mb_x=0; mb_x<a->mb_width; mb_x++){
if( decode_mb(a, a->block) <0)
return -1;
idct_put(a, mb_x, mb_y);
}
}
#if 0
int i;
printf("%d %d\n", 8*buf_size, get_bits_count(&a->gb));
for(i=get_bits_count(&a->gb); i<8*buf_size; i++){
printf("%d", get_bits1(&a->gb));
}
for(i=0; i<s->avctx->extradata_size; i++){
printf("%c\n", ((uint8_t*)s->avctx->extradata)[i]);
}
#endif
*picture= *(AVFrame*)&a->picture;
*data_size = sizeof(AVPicture);
emms_c();
return (get_bits_count(&a->gb)+31)/32*4;
}
#ifdef CONFIG_ENCODERS
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
ASV1Context * const a = avctx->priv_data;
AVFrame *pict = data;
AVFrame * const p= (AVFrame*)&a->picture;
int size;
int mb_x, mb_y;
init_put_bits(&a->pb, buf, buf_size);
*p = *pict;
p->pict_type= I_TYPE;
p->key_frame= 1;
for(mb_y=0; mb_y<a->mb_height2; mb_y++){
for(mb_x=0; mb_x<a->mb_width2; mb_x++){
dct_get(a, mb_x, mb_y);
encode_mb(a, a->block);
}
}
if(a->mb_width2 != a->mb_width){
mb_x= a->mb_width2;
for(mb_y=0; mb_y<a->mb_height2; mb_y++){
dct_get(a, mb_x, mb_y);
encode_mb(a, a->block);
}
}
if(a->mb_height2 != a->mb_height){
mb_y= a->mb_height2;
for(mb_x=0; mb_x<a->mb_width; mb_x++){
dct_get(a, mb_x, mb_y);
encode_mb(a, a->block);
}
}
emms_c();
align_put_bits(&a->pb);
while(put_bits_count(&a->pb)&31)
put_bits(&a->pb, 8, 0);
size= put_bits_count(&a->pb)/32;
if(avctx->codec_id == CODEC_ID_ASV1)
a->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
else{
int i;
for(i=0; i<4*size; i++)
buf[i]= ff_reverse[ buf[i] ];
}
return size*4;
}
#endif /* CONFIG_ENCODERS */
static void common_init(AVCodecContext *avctx){
ASV1Context * const a = avctx->priv_data;
dsputil_init(&a->dsp, avctx);
a->mb_width = (avctx->width + 15) / 16;
a->mb_height = (avctx->height + 15) / 16;
a->mb_width2 = (avctx->width + 0) / 16;
a->mb_height2 = (avctx->height + 0) / 16;
avctx->coded_frame= (AVFrame*)&a->picture;
a->avctx= avctx;
}
static int decode_init(AVCodecContext *avctx){
ASV1Context * const a = avctx->priv_data;
AVFrame *p= (AVFrame*)&a->picture;
int i;
const int scale= avctx->codec_id == CODEC_ID_ASV1 ? 1 : 2;
common_init(avctx);
init_vlcs(a);
ff_init_scantable(a->dsp.idct_permutation, &a->scantable, scantab);
avctx->pix_fmt= PIX_FMT_YUV420P;
a->inv_qscale= ((uint8_t*)avctx->extradata)[0];
if(a->inv_qscale == 0){
av_log(avctx, AV_LOG_ERROR, "illegal qscale 0\n");
if(avctx->codec_id == CODEC_ID_ASV1)
a->inv_qscale= 6;
else
a->inv_qscale= 10;
}
for(i=0; i<64; i++){
int index= scantab[i];
a->intra_matrix[i]= 64*scale*ff_mpeg1_default_intra_matrix[index] / a->inv_qscale;
}
p->qstride= a->mb_width;
p->qscale_table= av_malloc( p->qstride * a->mb_height);
p->quality= (32*scale + a->inv_qscale/2)/a->inv_qscale;
memset(p->qscale_table, p->quality, p->qstride*a->mb_height);
return 0;
}
#ifdef CONFIG_ENCODERS
static int encode_init(AVCodecContext *avctx){
ASV1Context * const a = avctx->priv_data;
int i;
const int scale= avctx->codec_id == CODEC_ID_ASV1 ? 1 : 2;
common_init(avctx);
if(avctx->global_quality == 0) avctx->global_quality= 4*FF_QUALITY_SCALE;
a->inv_qscale= (32*scale*FF_QUALITY_SCALE + avctx->global_quality/2) / avctx->global_quality;
avctx->extradata= av_mallocz(8);
avctx->extradata_size=8;
((uint32_t*)avctx->extradata)[0]= le2me_32(a->inv_qscale);
((uint32_t*)avctx->extradata)[1]= le2me_32(ff_get_fourcc("ASUS"));
for(i=0; i<64; i++){
int q= 32*scale*ff_mpeg1_default_intra_matrix[i];
a->q_intra_matrix[i]= ((a->inv_qscale<<16) + q/2) / q;
}
return 0;
}
#endif
static int decode_end(AVCodecContext *avctx){
ASV1Context * const a = avctx->priv_data;
av_freep(&a->bitstream_buffer);
av_freep(&a->picture.qscale_table);
a->bitstream_buffer_size=0;
return 0;
}
AVCodec asv1_decoder = {
"asv1",
CODEC_TYPE_VIDEO,
CODEC_ID_ASV1,
sizeof(ASV1Context),
decode_init,
NULL,
decode_end,
decode_frame,
CODEC_CAP_DR1,
};
AVCodec asv2_decoder = {
"asv2",
CODEC_TYPE_VIDEO,
CODEC_ID_ASV2,
sizeof(ASV1Context),
decode_init,
NULL,
decode_end,
decode_frame,
CODEC_CAP_DR1,
};
#ifdef CONFIG_ENCODERS
AVCodec asv1_encoder = {
"asv1",
CODEC_TYPE_VIDEO,
CODEC_ID_ASV1,
sizeof(ASV1Context),
encode_init,
encode_frame,
//encode_end,
.pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
};
AVCodec asv2_encoder = {
"asv2",
CODEC_TYPE_VIDEO,
CODEC_ID_ASV2,
sizeof(ASV1Context),
encode_init,
encode_frame,
//encode_end,
.pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
};
#endif //CONFIG_ENCODERS
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -