⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 context_ini.c

📁 JM 11.0 KTA 2.1 Source Code
💻 C
📖 第 1 页 / 共 2 页
字号:
  TextureInfoContexts* tc = img->currentSlice->tex_ctx;
  int i, j;
  
  //--- motion coding contexts ---
  BIARI_CTX_INIT2 (3, NUM_MB_TYPE_CTX,   mc->mb_type_contexts,     INIT_MB_TYPE,    img->model_number);
  BIARI_CTX_INIT2 (2, NUM_B8_TYPE_CTX,   mc->b8_type_contexts,     INIT_B8_TYPE,    img->model_number);
  BIARI_CTX_INIT2 (2, NUM_MV_RES_CTX,    mc->mv_res_contexts,       INIT_MV_RES,      img->model_number);
#ifdef MV_COMPETITION
  if (input->mv_competition > 0)
  {  
    BIARI_CTX_INIT1 (mv_comp.nb_mode_for_skip,     mc->mv_predictor_skip_contexts,INIT_PRED,        img->model_number);
    BIARI_CTX_INIT1 (mv_comp.nb_mode_for_mvp ,     mc->mv_predictor_mvp_contexts ,INIT_PRED,        img->model_number);
    BIARI_CTX_INIT1 (mv_comp.nb_mode_for_mvb ,     mc->mv_predictor_mvb_contexts ,INIT_PRED,        img->model_number);
  }
#endif  
  BIARI_CTX_INIT2 (2, NUM_REF_NO_CTX,    mc->ref_no_contexts,      INIT_REF_NO,     img->model_number);
  BIARI_CTX_INIT1 (   NUM_DELTA_QP_CTX,  mc->delta_qp_contexts,    INIT_DELTA_QP,   img->model_number);
  BIARI_CTX_INIT1 (   NUM_MB_AFF_CTX,    mc->mb_aff_contexts,      INIT_MB_AFF,     img->model_number);
  BIARI_CTX_INIT1 (   NUM_TRANSFORM_SIZE_CTX,  mc->transform_size_contexts,    INIT_TRANSFORM_SIZE,   img->model_number);
#ifdef ADAPTIVE_QUANTIZATION
  BIARI_CTX_INIT1 (   NUM_MODULATED_QUANTIZATION_CTX,  mc->modulated_quantization_contexts,    INIT_MODULATED_QUANTIZATION,   img->model_number);
#endif

  //--- texture coding contexts ---
  BIARI_CTX_INIT1 (                 NUM_IPR_CTX,  tc->ipr_contexts,     INIT_IPR,       img->model_number);
  BIARI_CTX_INIT1 (                 NUM_CIPR_CTX, tc->cipr_contexts,    INIT_CIPR,      img->model_number);
  BIARI_CTX_INIT2 (3,               NUM_CBP_CTX,  tc->cbp_contexts,     INIT_CBP,       img->model_number);
  BIARI_CTX_INIT2 (8,               NUM_BCBP_CTX, tc->bcbp_contexts,    INIT_BCBP,      img->model_number);
  BIARI_CTX_INIT2 (NUM_BLOCK_TYPES, NUM_MAP_CTX,  tc->map_contexts,     INIT_MAP,       img->model_number);
  BIARI_CTX_INIT2 (NUM_BLOCK_TYPES, NUM_LAST_CTX, tc->last_contexts,    INIT_LAST,      img->model_number);
  BIARI_CTX_INIT2 (NUM_BLOCK_TYPES, NUM_ONE_CTX,  tc->one_contexts,     INIT_ONE,       img->model_number);
  BIARI_CTX_INIT2 (NUM_BLOCK_TYPES, NUM_ABS_CTX,  tc->abs_contexts,     INIT_ABS,       img->model_number);
  BIARI_CTX_INIT2 (NUM_BLOCK_TYPES, NUM_MAP_CTX,  tc->fld_map_contexts, INIT_FLD_MAP,   img->model_number);
  BIARI_CTX_INIT2 (NUM_BLOCK_TYPES, NUM_LAST_CTX, tc->fld_last_contexts,INIT_FLD_LAST,  img->model_number);
  
#ifdef ADAPTIVE_FD_SD_CODING
  BIARI_CTX_INIT1 (  NUM_BCBP_FD_SD_CTX,  tc->bcbp_contexts_FD_SD   ,    INIT_BCBP_FD_SD  ,      img->model_number);
  BIARI_CTX_INIT1 (  NUM_BCBP_FD_SD_CTX,  tc->bcbp8_contexts_FD_SD  ,    INIT_BCBP_FD_SD8 ,      img->model_number);
  BIARI_CTX_INIT1 (  NUM_MAP_CTX       ,  tc->map_contexts_SD       ,    INIT_MAP_SD      ,      img->model_number);
  BIARI_CTX_INIT1 (  NUM_MAP8_CTX      ,  tc->map8_contexts_SD      ,    INIT_MAP8_SD     ,      img->model_number);
  BIARI_CTX_INIT1 (  NUM_LAST_CTX      ,  tc->last_contexts_SD      ,    INIT_LAST_SD     ,      img->model_number);
  BIARI_CTX_INIT1 (  NUM_LAST8_CTX     ,  tc->last8_contexts_SD     ,    INIT_LAST8_SD    ,      img->model_number);
  BIARI_CTX_INIT1 (  NUM_ONE_CTX       ,  tc->one_contexts_SD       ,    INIT_ONE_SD      ,      img->model_number);
  BIARI_CTX_INIT1 (  NUM_ONE_CTX       ,  tc->one8_contexts_SD      ,    INIT_ONE_SD      ,      img->model_number);
  BIARI_CTX_INIT1 (  NUM_ABS_CTX       ,  tc->abs_contexts_SD       ,    INIT_ABS_SD      ,      img->model_number);
  BIARI_CTX_INIT1 (  NUM_ABS_CTX       ,  tc->abs8_contexts_SD      ,    INIT_ABS_SD      ,      img->model_number);
  BIARI_CTX_INIT1 (  NUM_MB_adap_CTX   ,  tc->MB_adaptive_SD_context,    INIT_MB_ADAP     ,      img->model_number);
#endif
}



double XRate (BiContextTypePtr ctx, const int* model)
{
  int     ctx_state, mod_state;
  double  weight, xr = 0.0;
  int     qp = max(0,img->qp);
  
  weight    = min (1.0, (double)ctx->count/(double)RELIABLE_COUNT);
  
  mod_state = ((model[0]*qp)>>4)+model[1];
  mod_state = min (max (0, mod_state), 127);
  ctx_state = (ctx->MPS ? 64+ctx->state : 63-ctx->state);
  
  xr -= weight * probability[    ctx_state] * entropy[    mod_state];
  xr -= weight * probability[127-ctx_state] * entropy[127-mod_state];
  
  return xr;
}

#define ADD_XRATE2(ii,jj,ctx,tab,num) \
{ \
  for (i=0; i<ii; i++) \
  for (j=0; j<jj; j++) \
{ \
  if      (img->type==I_SLICE)  xr += XRate (&(ctx[i][j]), &(tab ## _I[num][i][j][0])); \
  else                            xr += XRate (&(ctx[i][j]), &(tab ## _P[num][i][j][0])); \
} \
}
#define ADD_XRATE1(jj,ctx,tab,num) \
{ \
  for (j=0; j<jj; j++) \
{ \
  if      (img->type==I_SLICE)  xr += XRate (&(ctx[j]), &(tab ## _I[num][0][j][0])); \
  else                            xr += XRate (&(ctx[j]), &(tab ## _P[num][0][j][0])); \
} \
}


void GetCtxModelNumber (int* mnumber, MotionInfoContexts* mc, TextureInfoContexts* tc)
{
  int     model, j, i;
  int     num_models = (img->type==I_SLICE ? NUM_CTX_MODELS_I : NUM_CTX_MODELS_P);
  double  xr, min_xr = 1e30;
  
  for (model=0; model<num_models; model++)
  {
    xr = 0.0;
    //--- motion coding contexts ---
    ADD_XRATE2 (3, NUM_MB_TYPE_CTX,   mc->mb_type_contexts,     INIT_MB_TYPE,   model);
    ADD_XRATE2 (2, NUM_B8_TYPE_CTX,   mc->b8_type_contexts,     INIT_B8_TYPE,   model);
    ADD_XRATE2 (2, NUM_MV_RES_CTX,    mc->mv_res_contexts,      INIT_MV_RES,    model);
#ifdef MV_COMPETITION
    if (input->mv_competition > 0)
    {
      ADD_XRATE1 (mv_comp.nb_mode_for_skip,  mc->mv_predictor_skip_contexts,    INIT_PRED,      model);
      ADD_XRATE1 (mv_comp.nb_mode_for_mvp ,  mc->mv_predictor_mvp_contexts ,    INIT_PRED,      model);
      ADD_XRATE1 (mv_comp.nb_mode_for_mvb ,  mc->mv_predictor_mvb_contexts ,    INIT_PRED,      model);
    }
#endif    
    ADD_XRATE2 (2, NUM_REF_NO_CTX,    mc->ref_no_contexts,      INIT_REF_NO,    model);
    ADD_XRATE1 (   NUM_DELTA_QP_CTX,  mc->delta_qp_contexts,    INIT_DELTA_QP,  model);
    ADD_XRATE1 (   NUM_MB_AFF_CTX,    mc->mb_aff_contexts,      INIT_MB_AFF,    model);
    ADD_XRATE1 (   NUM_TRANSFORM_SIZE_CTX,  mc->transform_size_contexts, INIT_TRANSFORM_SIZE,  model);
#ifdef ADAPTIVE_QUANTIZATION
    ADD_XRATE1 (   NUM_MODULATED_QUANTIZATION_CTX,  mc->modulated_quantization_contexts, INIT_MODULATED_QUANTIZATION,  model);
#endif
    
#ifdef ADAPTIVE_FD_SD_CODING
    ADD_XRATE1 (  NUM_BCBP_FD_SD_CTX,  tc->bcbp_contexts_FD_SD   ,    INIT_BCBP_FD_SD  ,      model);
    ADD_XRATE1 (  NUM_BCBP_FD_SD_CTX,  tc->bcbp8_contexts_FD_SD  ,    INIT_BCBP_FD_SD8 ,      model);
    ADD_XRATE1 (  NUM_MAP_CTX       ,  tc->map_contexts_SD       ,    INIT_MAP_SD      ,      model);
    ADD_XRATE1 (  NUM_MAP8_CTX      ,  tc->map8_contexts_SD      ,    INIT_MAP8_SD     ,      model);
    ADD_XRATE1 (  NUM_LAST_CTX      ,  tc->last_contexts_SD      ,    INIT_LAST_SD     ,      model);
    ADD_XRATE1 (  NUM_LAST8_CTX     ,  tc->last8_contexts_SD     ,    INIT_LAST8_SD    ,      model);
    ADD_XRATE1 (  NUM_ONE_CTX       ,  tc->one_contexts_SD       ,    INIT_ONE_SD      ,      model);
    ADD_XRATE1 (  NUM_ONE_CTX       ,  tc->one8_contexts_SD      ,    INIT_ONE_SD      ,      model);
    ADD_XRATE1 (  NUM_ABS_CTX       ,  tc->abs_contexts_SD       ,    INIT_ABS_SD      ,      model);
    ADD_XRATE1 (  NUM_ABS_CTX       ,  tc->abs8_contexts_SD      ,    INIT_ABS_SD      ,      model);
    ADD_XRATE1 (  NUM_MB_adap_CTX   ,  tc->MB_adaptive_SD_context,    INIT_MB_ADAP     ,      model);
#endif
    
    
    //--- texture coding contexts ---
    ADD_XRATE1 (                  NUM_IPR_CTX,  tc->ipr_contexts,       INIT_IPR,       model);
    ADD_XRATE1 (                  NUM_CIPR_CTX, tc->cipr_contexts,      INIT_CIPR,      model);
    ADD_XRATE2 (3,                NUM_CBP_CTX,  tc->cbp_contexts,       INIT_CBP,       model);
    ADD_XRATE2 (NUM_BLOCK_TYPES,  NUM_BCBP_CTX, tc->bcbp_contexts,      INIT_BCBP,      model);
    ADD_XRATE2 (NUM_BLOCK_TYPES,  NUM_MAP_CTX,  tc->map_contexts,       INIT_MAP,       model);
    ADD_XRATE2 (NUM_BLOCK_TYPES,  NUM_LAST_CTX, tc->last_contexts,      INIT_LAST,      model);
    ADD_XRATE2 (NUM_BLOCK_TYPES,  NUM_ONE_CTX,  tc->one_contexts,       INIT_ONE,       model);
    ADD_XRATE2 (NUM_BLOCK_TYPES,  NUM_ABS_CTX,  tc->abs_contexts,       INIT_ABS,       model);
    ADD_XRATE2 (NUM_BLOCK_TYPES,  NUM_MAP_CTX,  tc->fld_map_contexts,   INIT_FLD_MAP,   model);
    ADD_XRATE2 (NUM_BLOCK_TYPES,  NUM_LAST_CTX, tc->fld_last_contexts,  INIT_FLD_LAST,  model);
    
    if (xr<min_xr)
    {
      min_xr    = xr;
      *mnumber  = model;
    }
  }
}

#undef ADD_XRATE2
#undef ADD_XRATE1






void store_contexts ()
{
  int frame_field = img->field_picture;
  int img_type    = img->type;
  int ctx_number  = img->currentSlice->start_mb_nr / num_mb_per_slice;
  
  if( input->context_init_method )
  {
    initialized [frame_field][img_type][ctx_number] = 1;
    GetCtxModelNumber (model_number[frame_field][img_type]+ctx_number, img->currentSlice->mot_ctx, img->currentSlice->tex_ctx);
  }
  else
  {
    // do nothing
  }
}


void update_field_frame_contexts (int field)
{
  int i, j;
  
  if (field)
  {
    // set frame contexts
    for (j=0; j<FRAME_TYPES; j++)
    {
      for (i=0; i<number_of_slices; i++)
      {
        initialized [0][j][i] = initialized [1][j][i>>1];
        model_number[0][j][i] = model_number[1][j][i>>1];
      }
    }
  }
  else
  {
    // set field contexts
    for (j=0; j<FRAME_TYPES; j++)
    {
      for (i=0; i<((number_of_slices+1)>>1); i++)
      {
        initialized [1][j][i] = initialized [0][j][i<<1];
        model_number[1][j][i] = model_number[0][j][i<<1];
      }
    }
  }
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -