📄 ffv1.c
字号:
#if 0 // JPEG LS if(k==0 && 2*state->drift <= - state->count) v ^= (-1);#else v ^= ((2*state->drift + state->count)>>31);#endif ret= fold(v + state->bias, bits); update_vlc_state(state, v);//printf("final: %d\n", ret); return ret;}static inline void encode_line(FFV1Context *s, int w, int_fast16_t *sample[2], int plane_index, int bits){ PlaneContext * const p= &s->plane[plane_index]; CABACContext * const c= &s->c; int x; int run_index= s->run_index; int run_count=0; int run_mode=0; for(x=0; x<w; x++){ int diff, context; context= get_context(s, sample[1]+x, sample[0]+x, sample[2]+x); diff= sample[1][x] - predict(sample[1]+x, sample[0]+x); if(context < 0){ context = -context; diff= -diff; } diff= fold(diff, bits); if(s->ac){ put_symbol(c, p->state[context], diff, 1, bits-1); }else{ if(context == 0) run_mode=1; if(run_mode){ if(diff){ while(run_count >= 1<<log2_run[run_index]){ run_count -= 1<<log2_run[run_index]; run_index++; put_bits(&s->pb, 1, 1); } put_bits(&s->pb, 1 + log2_run[run_index], run_count); if(run_index) run_index--; run_count=0; run_mode=0; if(diff>0) diff--; }else{ run_count++; } } // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, (int)get_bit_count(&s->pb)); if(run_mode == 0) put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits); } } if(run_mode){ while(run_count >= 1<<log2_run[run_index]){ run_count -= 1<<log2_run[run_index]; run_index++; put_bits(&s->pb, 1, 1); } if(run_count) put_bits(&s->pb, 1, 1); } s->run_index= run_index;}static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){ int x,y; int_fast16_t sample_buffer[3][w+6]; int_fast16_t *sample[3]= {sample_buffer[0]+3, sample_buffer[1]+3, sample_buffer[2]+3}; s->run_index=0; memset(sample_buffer, 0, sizeof(sample_buffer)); for(y=0; y<h; y++){ int_fast16_t *temp= sample[0]; //FIXME try a normal buffer sample[0]= sample[1]; sample[1]= sample[2]; sample[2]= temp; sample[1][-1]= sample[0][0 ]; sample[0][ w]= sample[0][w-1];//{START_TIMER for(x=0; x<w; x++){ sample[1][x]= src[x + stride*y]; } encode_line(s, w, sample, plane_index, 8);//STOP_TIMER("encode line")} }}static void encode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){ int x, y, p; int_fast16_t sample_buffer[3][2][w+6]; int_fast16_t *sample[3][2]= { {sample_buffer[0][0]+3, sample_buffer[0][1]+3}, {sample_buffer[1][0]+3, sample_buffer[1][1]+3}, {sample_buffer[2][0]+3, sample_buffer[2][1]+3}}; s->run_index=0; memset(sample_buffer, 0, sizeof(sample_buffer)); for(y=0; y<h; y++){ for(x=0; x<w; x++){ int v= src[x + stride*y]; int b= v&0xFF; int g= (v>>8)&0xFF; int r= (v>>16)&0xFF; b -= g; r -= g; g += (b + r)>>2; b += 0x100; r += 0x100; // assert(g>=0 && b>=0 && r>=0);// assert(g<256 && b<512 && r<512); sample[0][0][x]= g; sample[1][0][x]= b; sample[2][0][x]= r; } for(p=0; p<3; p++){ int_fast16_t *temp= sample[p][0]; //FIXME try a normal buffer sample[p][0]= sample[p][1]; sample[p][1]= temp; sample[p][1][-1]= sample[p][0][0 ]; sample[p][0][ w]= sample[p][0][w-1]; encode_line(s, w, sample[p], FFMIN(p, 1), 9); } }}static void write_quant_table(CABACContext *c, int16_t *quant_table){ int last=0; int i; uint8_t state[CONTEXT_SIZE]={0}; for(i=1; i<128 ; i++){ if(quant_table[i] != quant_table[i-1]){ put_symbol(c, state, i-last-1, 0, 7); last= i; } } put_symbol(c, state, i-last-1, 0, 7);}static void write_header(FFV1Context *f){ uint8_t state[CONTEXT_SIZE]={0}; int i; CABACContext * const c= &f->c; put_symbol(c, state, f->version, 0, 7); put_symbol(c, state, f->avctx->coder_type, 0, 7); put_symbol(c, state, f->colorspace, 0, 7); //YUV cs type put_cabac(c, state, 1); //chroma planes put_symbol(c, state, f->chroma_h_shift, 0, 7); put_symbol(c, state, f->chroma_v_shift, 0, 7); put_cabac(c, state, 0); //no transparency plane for(i=0; i<5; i++) write_quant_table(c, f->quant_table[i]);}static int common_init(AVCodecContext *avctx){ FFV1Context *s = avctx->priv_data; int width, height; s->avctx= avctx; s->flags= avctx->flags; dsputil_init(&s->dsp, avctx); width= s->width= avctx->width; height= s->height= avctx->height; assert(width && height); return 0;}static int encode_init(AVCodecContext *avctx){ FFV1Context *s = avctx->priv_data; int i; common_init(avctx); s->version=0; s->ac= avctx->coder_type; s->plane_count=2; for(i=0; i<256; i++){ s->quant_table[0][i]= quant11[i]; s->quant_table[1][i]= 11*quant11[i]; if(avctx->context_model==0){ s->quant_table[2][i]= 11*11*quant11[i]; s->quant_table[3][i]= s->quant_table[4][i]=0; }else{ s->quant_table[2][i]= 11*11*quant5 [i]; s->quant_table[3][i]= 5*11*11*quant5 [i]; s->quant_table[4][i]= 5*5*11*11*quant5 [i]; } } for(i=0; i<s->plane_count; i++){ PlaneContext * const p= &s->plane[i]; if(avctx->context_model==0){ p->context_count= (11*11*11+1)/2; }else{ p->context_count= (11*11*5*5*5+1)/2; } if(s->ac){ if(!p->state) p->state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t)); }else{ if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState)); } } avctx->coded_frame= &s->picture; switch(avctx->pix_fmt){ case PIX_FMT_YUV444P: case PIX_FMT_YUV422P: case PIX_FMT_YUV420P: case PIX_FMT_YUV411P: case PIX_FMT_YUV410P: s->colorspace= 0; break; case PIX_FMT_RGBA32: s->colorspace= 1; break; default: av_log(avctx, AV_LOG_ERROR, "format not supported\n"); return -1; } avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift); s->picture_number=0; return 0;}static void clear_state(FFV1Context *f){ int i, j; for(i=0; i<f->plane_count; i++){ PlaneContext *p= &f->plane[i]; p->interlace_bit_state[0]= 0; p->interlace_bit_state[1]= 0; for(j=0; j<p->context_count; j++){ if(f->ac){ memset(p->state[j], 0, sizeof(uint8_t)*CONTEXT_SIZE); p->state[j][7] = 2*62; }else{ p->vlc_state[j].drift= 0; p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2); p->vlc_state[j].bias= 0; p->vlc_state[j].count= 1; } } }}static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ FFV1Context *f = avctx->priv_data; CABACContext * const c= &f->c; AVFrame *pict = data; const int width= f->width; const int height= f->height; AVFrame * const p= &f->picture; int used_count= 0; if(avctx->strict_std_compliance >= 0){ av_log(avctx, AV_LOG_ERROR, "this codec is under development, files encoded with it wont be decodeable with future versions!!!\n" "use vstrict=-1 to use it anyway\n"); return -1; } ff_init_cabac_encoder(c, buf, buf_size); ff_init_cabac_states(c, ff_h264_lps_range, ff_h264_mps_state, ff_h264_lps_state, 64); *p = *pict; p->pict_type= FF_I_TYPE; if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){ put_cabac_bypass(c, 1); p->key_frame= 1; write_header(f); clear_state(f); }else{ put_cabac_bypass(c, 0); p->key_frame= 0; } if(!f->ac){ used_count += put_cabac_terminate(c, 1);//printf("pos=%d\n", used_count); init_put_bits(&f->pb, buf + used_count, buf_size - used_count); } if(f->colorspace==0){ const int chroma_width = -((-width )>>f->chroma_h_shift); const int chroma_height= -((-height)>>f->chroma_v_shift); encode_plane(f, p->data[0], width, height, p->linesize[0], 0); encode_plane(f, p->data[1], chroma_width, chroma_height, p->linesize[1], 1); encode_plane(f, p->data[2], chroma_width, chroma_height, p->linesize[2], 1); }else{ encode_rgb_frame(f, (uint32_t*)(p->data[0]), width, height, p->linesize[0]/4); } emms_c(); f->picture_number++; if(f->ac){ return put_cabac_terminate(c, 1); }else{ flush_put_bits(&f->pb); //nicer padding FIXME return used_count + (get_bit_count(&f->pb)+7)/8; }}static void common_end(FFV1Context *s){ int i; for(i=0; i<s->plane_count; i++){ PlaneContext *p= &s->plane[i]; av_freep(&p->state); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -