⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 svm_common.c

📁 SVMcfg: Learns a weighted context free grammar from examples. Training examples (e.g. for natural la
💻 C
📖 第 1 页 / 共 2 页
字号:
     /* Note: SVECTOR lists are not followed, but only the first
	SVECTOR is used */
{
  return(multadd_ss(a,b,-1.0));
}

SVECTOR* add_ss(SVECTOR *a, SVECTOR *b) 
     /* compute the sum a+b of two sparse vectors */
     /* Note: SVECTOR lists are not followed, but only the first
	SVECTOR is used */
{
  return(multadd_ss(a,b,1.0));
}

SVECTOR* add_list_ss(SVECTOR *a) 
     /* computes the linear combination of the SVECTOR list weighted
	by the factor of each SVECTOR */
{
  SVECTOR *oldsum,*sum,*f;
  WORD    empty[2];
    
  if(a){
    sum=smult_s(a,a->factor);
    for(f=a->next;f;f=f->next) {
      oldsum=sum;
      sum=multadd_ss(oldsum,f,f->factor);
      free_svector(oldsum);
    }
  }
  else {
    empty[0].wnum=0;
    sum=create_svector(empty,"",1.0);
  }
  return(sum);
}

SVECTOR* add_list_ns(SVECTOR *a) 
     /* computes the linear combination of the SVECTOR list weighted
	by the factor of each SVECTOR. assumes that the number of
	features is small compared to the number of elements in the
	list */
{
    SVECTOR *vec,*f;
    register WORD *ai;
    long totwords;
    double *sum;

    /* find max feature number */
    totwords=0;
    for(f=a;f;f=f->next) {
      ai=f->words;
      while (ai->wnum) {
	if(totwords<ai->wnum) 
	  totwords=ai->wnum;
	ai++;
      }
    }
    sum=create_nvector(totwords);
    /* printf("totwords=%ld, %p\n",totwords, (void *)sum); */

    clear_nvector(sum,totwords);
    for(f=a;f;f=f->next)  
      add_vector_ns(sum,f,f->factor);

    vec=create_svector_n(sum,totwords,"",1.0);
    free(sum);

    return(vec);
}

void add_list_n_ns(double *vec_n, SVECTOR *vec_s, double faktor)
{
  SVECTOR *f;
  for(f=vec_s;f;f=f->next)  
    add_vector_ns(vec_n,f,f->factor*faktor);
}

void append_svector_list(SVECTOR *a, SVECTOR *b) 
     /* appends SVECTOR b to the end of SVECTOR a. */
{
    SVECTOR *f;
    
    for(f=a;f->next;f=f->next);  /* find end of first vector list */
    f->next=b;                   /* append the two vector lists */
}

SVECTOR* smult_s(SVECTOR *a, double factor) 
     /* scale sparse vector a by factor */
{
    SVECTOR *vec;
    register WORD *sum,*sumi;
    register WORD *ai;
    long veclength;
  
    ai=a->words;
    veclength=0;
    while (ai->wnum) {
      veclength++;
      ai++;
    }
    veclength++;

    sum=(WORD *)my_malloc(sizeof(WORD)*veclength);
    sumi=sum;
    ai=a->words;
    while (ai->wnum) {
	(*sumi)=(*ai);
	sumi->weight*=factor;
	if(sumi->weight != 0)
	  sumi++;
	ai++;
    }
    sumi->wnum=0;

    vec=create_svector(sum,a->userdefined,1.0);
    free(sum);

    return(vec);
}

int featvec_eq(SVECTOR *a, SVECTOR *b)
     /* tests two sparse vectors for equality */
{
    register WORD *ai,*bj;
    ai=a->words;
    bj=b->words;
    while (ai->wnum && bj->wnum) {
      if(ai->wnum > bj->wnum) {
	if((CFLOAT)(bj->weight) != 0)
	  return(0);
	bj++;
      }
      else if (ai->wnum < bj->wnum) {
	if((CFLOAT)(ai->weight) != 0)
	  return(0);
	ai++;
      }
      else {
	if((CFLOAT)(ai->weight) != (CFLOAT)(bj->weight)) 
	  return(0);
	ai++;
	bj++;
      }
    }
    return(1);
}

double model_length_s(MODEL *model, KERNEL_PARM *kernel_parm) 
     /* compute length of weight vector */
{
  register long i,j;
  register double sum=0,alphai;
  register DOC *supveci;

  for(i=1;i<model->sv_num;i++) {  
    alphai=model->alpha[i];
    supveci=model->supvec[i];
    for(j=1;j<model->sv_num;j++) {
      sum+=alphai*model->alpha[j]
	   *kernel(kernel_parm,supveci,model->supvec[j]);
    }
  }
  return(sqrt(sum));
}

void mult_vector_ns(double *vec_n, SVECTOR *vec_s, double faktor)
{
  register WORD *ai;
  ai=vec_s->words;
  while (ai->wnum) {
    vec_n[ai->wnum]*=(faktor*ai->weight);
    ai++;
  }
}

void add_vector_ns(double *vec_n, SVECTOR *vec_s, double faktor)
{
  /* Note: SVECTOR lists are not followed, but only the first
           SVECTOR is used */
  register WORD *ai;
  ai=vec_s->words;
  while (ai->wnum) {
    vec_n[ai->wnum]+=(faktor*ai->weight);
    ai++;
  }
}

double sprod_ns(double *vec_n, SVECTOR *vec_s)
{
  register double sum=0;
  register WORD *ai;
  ai=vec_s->words;
  while (ai->wnum) {
    sum+=(vec_n[ai->wnum]*ai->weight);
    ai++;
  }
  return(sum);
}

void add_weight_vector_to_linear_model(MODEL *model)
     /* compute weight vector in linear case and add to model */
{
  long i;
  SVECTOR *f;

  model->lin_weights=create_nvector(model->totwords);
  clear_nvector(model->lin_weights,model->totwords);
  for(i=1;i<model->sv_num;i++) {
    for(f=(model->supvec[i])->fvec;f;f=f->next)  
      add_vector_ns(model->lin_weights,f,f->factor*model->alpha[i]);
  }
}


DOC *create_example(long docnum, long queryid, long slackid, 
		    double costfactor, SVECTOR *fvec)
{
  DOC *example;
  example = (DOC *)my_malloc(sizeof(DOC));
  example->docnum=docnum;
  example->kernelid=docnum;
  example->queryid=queryid;
  example->slackid=slackid;
  example->costfactor=costfactor;
  example->fvec=fvec;
  return(example);
}

void free_example(DOC *example, long deep)
{
  if(example) {
    if(deep) {
      if(example->fvec)
	free_svector(example->fvec);
    }
    free(example);
  }
}

/************ Some useful dense vector and matrix routines ****************/

MATRIX *create_matrix(int n, int m)
/* create matrix with n rows and m colums */
{
  int i;
  MATRIX *matrix;
  
  matrix=(MATRIX*)my_malloc(sizeof(MATRIX));
  matrix->n=n;
  matrix->m=m;
  matrix->element=(double **)my_malloc(sizeof(double *)*n);
  for(i=0;i<n;i++) {
    matrix->element[i]=(double *)my_malloc(sizeof(double)*m);
  }
  return(matrix);
}

MATRIX *realloc_matrix(MATRIX *matrix, int n, int m)
/* extends/shrinks matrix to n rows and m colums. Not that added elements are
   not initialized. */
{
  int i;

  if(!matrix) 
    return(create_matrix(n,m));

  for(i=n;i<matrix->n;i++) 
    free(matrix->element[i]);
  matrix->element=(double **)realloc(matrix->element,sizeof(double *)*n);
  for(i=matrix->n;i<n;i++) 
    matrix->element[i]=(double *)my_malloc(sizeof(double)*m);
  for(i=0;i<MIN(n,matrix->n);i++) {
    matrix->element[i]=(double *)realloc(matrix->element[i],sizeof(double)*m);
  }
  matrix->n=n;
  matrix->m=m;
  return(matrix);
}

double *create_nvector(int n)
/* creates a dense column vector with n+1 rows. unfortunately, there
   is part of the code that starts counting at 0, while the sparse
   vectors start counting at 1. So, it always allocates one extra
   row. */
{
  double *vector;
  
  vector=(double *)my_malloc(sizeof(double)*(n+1));

  return(vector);
}

void clear_nvector(double *vec, long int n)
{
  register long i;
  for(i=0;i<=n;i++) vec[i]=0;
}

MATRIX *copy_matrix(MATRIX *matrix)
/* create deep copy of matrix */
{
  int i,j;
  MATRIX *copy;
  
  copy=create_matrix(matrix->n,matrix->m);
  for(i=0;i<matrix->n;i++) {
    for(j=0;j<matrix->m;j++) {
      copy->element[i][j]=matrix->element[i][j];
    }
  }
  return(copy);
}

void free_matrix(MATRIX *matrix) 
/* deallocates memory */
{
  int i;

  for(i=0;i<matrix->n;i++) {
    free(matrix->element[i]);
  }
  free(matrix->element);
  free(matrix);
}

void free_nvector(double *vector) 
/* deallocates memory */
{
  free(vector);
}

MATRIX *transpose_matrix(MATRIX *matrix)
/* returns copy with transpose of matrix */
{
  int i,j;
  MATRIX *copy;
  
  copy=create_matrix(matrix->m,matrix->n);
  for(i=0;i<matrix->n;i++) {
    for(j=0;j<matrix->m;j++) {
      copy->element[j][i]=matrix->element[i][j];
    }
  }
  return(copy);
}


MATRIX *cholesky_matrix(MATRIX *A)
/* Given a positive-definite symmetric matrix A[0..n-1][0..n-1], this routine constructs its Cholesky decomposition, A = L 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -