📄 ffv1.c
字号:
static int encode_end(AVCodecContext *avctx){ FFV1Context *s = avctx->priv_data; common_end(s); return 0;}static inline void decode_line(FFV1Context *s, int w, int_fast16_t *sample[2], int plane_index, int bits){ PlaneContext * const p= &s->plane[plane_index]; CABACContext * const c= &s->c; int x; int run_count=0; int run_mode=0; int run_index= s->run_index; for(x=0; x<w; x++){ int diff, context, sign; context= get_context(s, sample[1] + x, sample[0] + x, sample[1] + x); if(context < 0){ context= -context; sign=1; }else sign=0; if(s->ac) diff= get_symbol(c, p->state[context], 1, bits-1); else{ if(context == 0 && run_mode==0) run_mode=1; if(run_mode){ if(run_count==0 && run_mode==1){ if(get_bits1(&s->gb)){ run_count = 1<<log2_run[run_index]; if(x + run_count <= w) run_index++; }else{ if(log2_run[run_index]) run_count = get_bits(&s->gb, log2_run[run_index]); else run_count=0; if(run_index) run_index--; run_mode=2; } } run_count--; if(run_count < 0){ run_mode=0; run_count=0; diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits); if(diff>=0) diff++; }else diff=0; }else diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits); // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, get_bits_count(&s->gb)); } if(sign) diff= -diff; sample[1][x]= (predict(sample[1] + x, sample[0] + x) + diff) & ((1<<bits)-1); } s->run_index= run_index; }static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){ int x, y; int_fast16_t sample_buffer[2][w+6]; int_fast16_t *sample[2]= {sample_buffer[0]+3, sample_buffer[1]+3}; s->run_index=0; memset(sample_buffer, 0, sizeof(sample_buffer)); for(y=0; y<h; y++){ int_fast16_t *temp= sample[0]; //FIXME try a normal buffer sample[0]= sample[1]; sample[1]= temp; sample[1][-1]= sample[0][0 ]; sample[0][ w]= sample[0][w-1]; //{START_TIMER decode_line(s, w, sample, plane_index, 8); for(x=0; x<w; x++){ src[x + stride*y]= sample[1][x]; }//STOP_TIMER("decode-line")} }}static void decode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){ int x, y, p; int_fast16_t sample_buffer[3][2][w+6]; int_fast16_t *sample[3][2]= { {sample_buffer[0][0]+3, sample_buffer[0][1]+3}, {sample_buffer[1][0]+3, sample_buffer[1][1]+3}, {sample_buffer[2][0]+3, sample_buffer[2][1]+3}}; s->run_index=0; memset(sample_buffer, 0, sizeof(sample_buffer)); for(y=0; y<h; y++){ for(p=0; p<3; p++){ int_fast16_t *temp= sample[p][0]; //FIXME try a normal buffer sample[p][0]= sample[p][1]; sample[p][1]= temp; sample[p][1][-1]= sample[p][0][0 ]; sample[p][0][ w]= sample[p][0][w-1]; decode_line(s, w, sample[p], FFMIN(p, 1), 9); } for(x=0; x<w; x++){ int g= sample[0][1][x]; int b= sample[1][1][x]; int r= sample[2][1][x];// assert(g>=0 && b>=0 && r>=0);// assert(g<256 && b<512 && r<512); b -= 0x100; r -= 0x100; g -= (b + r)>>2; b += g; r += g; src[x + stride*y]= b + (g<<8) + (r<<16); } }}static int read_quant_table(CABACContext *c, int16_t *quant_table, int scale){ int v; int i=0; uint8_t state[CONTEXT_SIZE]={0}; for(v=0; i<128 ; v++){ int len= get_symbol(c, state, 0, 7) + 1; if(len + i > 128) return -1; while(len--){ quant_table[i] = scale*v; i++;//printf("%2d ",v);//if(i%16==0) printf("\n"); } } for(i=1; i<128; i++){ quant_table[256-i]= -quant_table[i]; } quant_table[128]= -quant_table[127]; return 2*v - 1;}static int read_header(FFV1Context *f){ uint8_t state[CONTEXT_SIZE]={0}; int i, context_count; CABACContext * const c= &f->c; f->version= get_symbol(c, state, 0, 7); f->ac= f->avctx->coder_type= get_symbol(c, state, 0, 7); f->colorspace= get_symbol(c, state, 0, 7); //YUV cs type get_cabac(c, state); //no chroma = false f->chroma_h_shift= get_symbol(c, state, 0, 7); f->chroma_v_shift= get_symbol(c, state, 0, 7); get_cabac(c, state); //transparency plane f->plane_count= 2; if(f->colorspace==0){ switch(16*f->chroma_h_shift + f->chroma_v_shift){ case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break; case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break; case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break; case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break; case 0x33: f->avctx->pix_fmt= PIX_FMT_YUV410P; break; default: av_log(f->avctx, AV_LOG_ERROR, "format not supported\n"); return -1; } }else if(f->colorspace==1){ if(f->chroma_h_shift || f->chroma_v_shift){ av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n"); return -1; } f->avctx->pix_fmt= PIX_FMT_RGBA32; }else{ av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n"); return -1; }//printf("%d %d %d\n", f->chroma_h_shift, f->chroma_v_shift,f->avctx->pix_fmt); context_count=1; for(i=0; i<5; i++){ context_count*= read_quant_table(c, f->quant_table[i], context_count); if(context_count < 0){ av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n"); return -1; } } context_count= (context_count+1)/2; for(i=0; i<f->plane_count; i++){ PlaneContext * const p= &f->plane[i]; p->context_count= context_count; if(f->ac){ if(!p->state) p->state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t)); }else{ if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState)); } } return 0;}static int decode_init(AVCodecContext *avctx){// FFV1Context *s = avctx->priv_data; common_init(avctx); return 0;}static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){ FFV1Context *f = avctx->priv_data; CABACContext * const c= &f->c; const int width= f->width; const int height= f->height; AVFrame * const p= &f->picture; int bytes_read; AVFrame *picture = data; *data_size = 0; /* no supplementary picture */ if (buf_size == 0) return 0; ff_init_cabac_decoder(c, buf, buf_size); ff_init_cabac_states(c, ff_h264_lps_range, ff_h264_mps_state, ff_h264_lps_state, 64); p->pict_type= FF_I_TYPE; //FIXME I vs. P if(get_cabac_bypass(c)){ p->key_frame= 1; read_header(f); clear_state(f); }else{ p->key_frame= 0; } p->reference= 0; if(avctx->get_buffer(avctx, p) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } if(avctx->debug&FF_DEBUG_PICT_INFO) av_log(avctx, AV_LOG_ERROR, "keyframe:%d coder:%d\n", p->key_frame, f->ac); if(!f->ac){ bytes_read = get_cabac_terminate(c); if(bytes_read ==0) av_log(avctx, AV_LOG_ERROR, "error at end of AC stream\n");//printf("pos=%d\n", bytes_read); init_get_bits(&f->gb, buf + bytes_read, buf_size - bytes_read); } else { bytes_read = 0; /* avoid warning */ } if(f->colorspace==0){ const int chroma_width = -((-width )>>f->chroma_h_shift); const int chroma_height= -((-height)>>f->chroma_v_shift); decode_plane(f, p->data[0], width, height, p->linesize[0], 0); decode_plane(f, p->data[1], chroma_width, chroma_height, p->linesize[1], 1); decode_plane(f, p->data[2], chroma_width, chroma_height, p->linesize[2], 1); }else{ decode_rgb_frame(f, (uint32_t*)p->data[0], width, height, p->linesize[0]/4); } emms_c(); f->picture_number++; *picture= *p; avctx->release_buffer(avctx, p); //FIXME *data_size = sizeof(AVFrame); if(f->ac){ bytes_read= get_cabac_terminate(c); if(bytes_read ==0) av_log(f->avctx, AV_LOG_ERROR, "error at end of frame\n"); }else{ bytes_read+= (get_bits_count(&f->gb)+7)/8; } return bytes_read;}static int decode_end(AVCodecContext *avctx){ FFV1Context *s = avctx->priv_data; int i; if(avctx->get_buffer == avcodec_default_get_buffer){ for(i=0; i<4; i++){ av_freep(&s->picture.base[i]); s->picture.data[i]= NULL; } av_freep(&s->picture.opaque); } return 0;}AVCodec ffv1_decoder = { "ffv1", CODEC_TYPE_VIDEO, CODEC_ID_FFV1, sizeof(FFV1Context), decode_init, NULL, decode_end, decode_frame, CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/, NULL};#ifdef CONFIG_ENCODERSAVCodec ffv1_encoder = { "ffv1", CODEC_TYPE_VIDEO, CODEC_ID_FFV1, sizeof(FFV1Context), encode_init, encode_frame, encode_end,};#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -